I am implementing a batch in Azure Container Apps.
When a message comes from a queue in service bus then my batch is run.
For this, I added a scale rule to automatic scale when a message comes from the queue.
It works well, when there is a message it is scaled out from 0 to 1 replica. But when my batch is terminated, the replica restarts the container several times until it is scaled in to 0.
Here is my terraform script to create the container apps :
resource "azapi_resource" "container_app" {
name = var.container_app_name
location = "northeurope"
parent_id = data.azurerm_resource_group.resource_group.id
identity {
type = "UserAssigned"
identity_ids = [data.azurerm_user_assigned_identity.aca_identity.id]
}
type = "Microsoft.App/containerApps#2022-03-01"
body = jsonencode({
properties: {
managedEnvironmentId = data.azapi_resource.container_environment.id
configuration = {
secrets = [
{
name = "regitry-password"
value = data.azurerm_container_registry.acr.admin_password
},
{
name = "service-bus-connection-string"
value = data.azurerm_servicebus_namespace.servicebus.default_primary_connection_string
}
]
ingress = null
registries = [
{
server = data.azurerm_container_registry.acr.login_server
username = data.azurerm_container_registry.acr.admin_username,
passwordSecretRef = "regitry-password"
}]
}
template = {
containers = [{
image = "${data.azurerm_container_registry.acr.login_server}/${var.container_repository}:${var.container_image_tag}"
name = "dbt-instance"
resources = {
cpu = var.container_cpu
memory = var.container_memory
}
env = [
{
name = "APP_CONFIG_NAME"
value = var.app_configuration_name
},
{
name = "AZURE_CLIENT_ID"
value = data.azurerm_user_assigned_identity.aca_identity.client_id
}
]
}]
scale = {
minReplicas = 0
maxReplicas = 5
rules = [{
name = "queue-based-autoscaling"
custom = {
type = "azure-servicebus"
metadata = {
queueName = var.service_bus_queue_name
messageCount = "1"
}
auth = [{
secretRef = "service-bus-connection-string"
triggerParameter = "connection"
}]
}
}]
}
}
}
})
How to run my container only one time ?
I managed to do it with Azure Container Instance with the property "restartPolicy=Never"
Related
I have this terraform script that works perfectly fine for the whole s3 module but it cannot create the Object lock configuration resource and returns the message :
error creating S3 bucket (bucket-name) Object lock configuration: AccessDenied: AccessDenied
Status code 403, request id: ..., host id: ...
Desite the message, the S3 bucket is actually created, but I still get this error, maybe there is something missing in the policy ?
Here is my code.
module "s3_bucket" {
source = "terraform-aws-modules/s3-bucket/aws"
version = "3.4.0"
bucket = local.bucket_name
...
object_lock_enabled = true
attach_policy = true
policy = data.aws_iam_policy_document.voucher_s3_bucket.json
versioning = {
status = var.status
mfa_delete = var.mfa_delete
}
server_side_encryption_configuration = {
rule = {
apply_server_side_encryption_by_default = {
kms_master_key_id = aws_kms_key.voucher_s3_bucket.arn
sse_algorithm = "aws:kms"
}
}
}
}
data "aws_iam_policy_document" "s3_bucket_kms_key" {
statement {
sid = "AllowPutRoles"
effect = "Allow"
actions = ["kms:GenerateDataKey"]
principals {
identifiers = local.put_object_roles #we can use event_gateway iam_role for now
type = "AWS"
}
resources = ["*"]
}
statement {
sid = "AllowAdmin"
effect = "Allow"
actions = [
"kms:*",
]
principals {
identifiers = [data.aws_iam_role.admin_role.arn, data.aws_iam_role.default_role.arn, data.aws_iam_role.automation_role.arn]
type = "AWS"
}
resources = ["*"]
}
}
resource "aws_kms_key" "s3_bucket" {
tags = {
"s3_bucket" = local.bucket_name
}
enable_key_rotation = true
policy = data.aws_iam_policy_document.voucher_s3_bucket_kms_key.json
}
resource "aws_s3_bucket_object_lock_configuration" "s3_bucket_object_lock_configuration" {
bucket = local.bucket_name
rule {
default_retention {
mode = "GOVERNANCE"
years = 10
}
}
}
data "aws_iam_policy_document" "voucher_s3_bucket" {
statement {
sid = "DenyNoKMSEncryption"
effect = "Deny"
actions = ["s3:PutObject"]
principals {
identifiers = ["*"]
type = "*"
}
resources = ["${module.voucher_s3_bucket.s3_bucket_arn}/*"]
condition {
test = "StringNotEqualsIfExists"
values = ["aws:kms"]
variable = "s3:x-amz-server-side-encryption"
}
condition {
test = "Null"
values = ["false"]
variable = "s3:x-amz-server-side-encryption"
}
}
statement {
sid = "DenyWrongKMSKey"
effect = "Deny"
actions = ["s3:PutObject"]
principals {
identifiers = ["*"]
type = "*"
}
resources = ["${module.s3_bucket.s3_bucket_arn}/*"]
condition {
test = "StringNotEquals"
values = [aws_kms_key.voucher_s3_bucket.arn]
variable = "s3:x-amz-server-side-encryption-aws-kms-key-id"
}
}
statement {
sid = "AllowAdminDefault"
effect = "Allow"
actions = ["s3:*"]
principals {
identifiers = [data.aws_iam_role.admin_role.arn, data.aws_iam_role.default_role.arn]
type = "AWS"
}
resources = [
"${module.voucher_s3_bucket.s3_bucket_arn}/*",
module.voucher_s3_bucket.s3_bucket_arn,
]
}
statement {
sid = "DenyDeleteActions"
effect = "Deny"
actions = ["s3:DeleteBucket", "s3:DeleteObject", "s3:DeleteObjectVersion", "s3:PutBucketObjectLockConfiguration"]
principals {
identifiers = ["*"]
type = "AWS"
}
resources = [
"${module.s3_bucket.s3_bucket_arn}/*",
module.s3_bucket.s3_bucket_arn,
]
}
}
So im using terraform to create an atlas cluster but the output im getting is incompleteate to do my request terraform is givim me this:
mongodb+srv://esc-app-dbcluster-devel.b59mwv7.mongodb.net
and what i need shoul be more like this:
mongodb+srv://admin:admin#esc-app-dbcluster-devel.b59mwv7.mongodb.net/development?retryWrites=true&w=majority
or atleast thats the format that works with what im testing.
this is my terraform code:
terraform {
required_providers {
mongodbatlas = {
source = "mongodb/mongodbatlas"
version = "1.4.6"
}
}
}
provider "mongodbatlas" {
public_key = var.atlas_public_key
private_key = var.atlas_private_key
}
resource "mongodbatlas_cluster" "db-cluster" {
project_id = var.atlas_project_id
name = var.db_cluster_name
# Provider Settings "block"
provider_name = "TENANT" //free tier
backing_provider_name = "AWS"
provider_region_name = "US_EAST_1" //free tier
provider_instance_size_name = "M0" //free tier
}
resource "mongodbatlas_database_user" "dbuser" {
username = var.db_user
password = var.db_password
project_id = var.atlas_project_id
auth_database_name = "admin"
roles {
role_name = "readWrite"
database_name = var.environment
}
}
resource "mongodbatlas_project_ip_access_list" "test" {
project_id = var.atlas_project_id
cidr_block = var.cidr
}
output "db_cn_string" {
value = mongodbatlas_cluster.db-cluster.connection_strings.0.standard_srv
}
code i use to connect
const environment = process.env.ENVIRONMENT;
const uridb = "mongodb+srv://admin:admin#esc-app-dbcluster-devel.b59mwv7.mongodb.net/development?retryWrites=true&w=majority" // working format
//dburi = "mongodb+srv://esc-app-dbcluster-devel.b59mwv7.mongodb.net" --- format from terraform
console.log('environment:::::', environment);
let ENVIRONMENT_VARIABLES = {
'process.env.ENVIRONMENT': JSON.stringify(environment),
'process.env.PORT': JSON.stringify('80'),
'process.env.MONGO_CONNECTION_STRING': JSON.stringify(uridb)
};
need a way to genarate the proper connection string
Trying to do an AZ Terraform deployment, and failing horribly - looking for some ideas what am I missing. Basically I am trying to deploy 2 (maybe later more) VM-s with variable size of disks, joining them to the domain and add SQL server to them. (Be gentle with me, I am from VMWare-Tf background, this is my first SQL deployment on AZ!)
My module:
## main.tf:
# ----------- NIC --------------------------------
resource "azurerm_network_interface" "nic" {
name = "${var.vm_name}-nic"
resource_group_name = var.rg.name
location = var.location
ip_configuration {
name = "${var.vm_name}-internal"
subnet_id = var.subnet_id
private_ip_address_allocation = "Static"
private_ip_address = var.private_ip
}
dns_servers = var.dns_servers
}
# ----------- VM --------------------------------
resource "azurerm_windows_virtual_machine" "vm" {
/* count = length(var.instances) */
name = var.vm_name
location = var.location
resource_group_name = var.rg.name
network_interface_ids = [azurerm_network_interface.nic.id]
size = var.size
zone = var.zone
admin_username = var.win_admin_user
admin_password = var.win_admin_pw # data.azurerm_key_vault_secret.vmadminpwd.value
enable_automatic_updates = "false"
patch_mode = "Manual"
provision_vm_agent = "true"
tags = var.vm_tags
source_image_reference {
publisher = "MicrosoftSQLServer"
offer = "sql2019-ws2019"
sku = "enterprise"
version = "latest"
}
os_disk {
name = "${var.vm_name}-osdisk"
caching = "ReadWrite"
storage_account_type = "StandardSSD_LRS"
disk_size_gb = 250
}
}
# ----------- DOMAIN JOIN --------------------------------
// Waits for up to 1 hour for the Domain to become available. Will return an error 1 if unsuccessful preventing the member attempting to join.
resource "azurerm_virtual_machine_extension" "wait-for-domain-to-provision" {
name = "TestConnectionDomain"
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.9"
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
settings = <<SETTINGS
{
"commandToExecute": "powershell.exe -Command \"while (!(Test-Connection -ComputerName ${var.active_directory_domain_name} -Count 1 -Quiet) -and ($retryCount++ -le 360)) { Start-Sleep 10 } \""
}
SETTINGS
}
resource "azurerm_virtual_machine_extension" "join-domain" {
name = azurerm_windows_virtual_machine.vm.name
publisher = "Microsoft.Compute"
type = "JsonADDomainExtension"
type_handler_version = "1.3"
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
settings = <<SETTINGS
{
"Name": "${var.active_directory_domain_name}",
"OUPath": "",
"User": "${var.active_directory_username}#${var.active_directory_domain_name}",
"Restart": "true",
"Options": "3"
}
SETTINGS
protected_settings = <<SETTINGS
{
"Password": "${var.active_directory_password}"
}
SETTINGS
depends_on = [azurerm_virtual_machine_extension.wait-for-domain-to-provision]
}
# ----------- DISKS --------------------------------
resource "azurerm_managed_disk" "data" {
for_each = var.disks
name = "${var.vm_name}-${each.value.name}"
location = var.location
resource_group_name = var.rg.name
storage_account_type = each.value.sa
create_option = each.value.create
disk_size_gb = each.value.size
zone = var.zone
}
resource "azurerm_virtual_machine_data_disk_attachment" "disk-attachment" {
for_each = var.disks
managed_disk_id = azurerm_managed_disk.data[each.key].id
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
lun = each.value.lun
caching = "ReadWrite"
depends_on = [azurerm_windows_virtual_machine.vm]
}
# ----------- SQL --------------------------------
# configure the SQL side of the deployment
resource "azurerm_mssql_virtual_machine" "sqlvm" {
/* count = length(var.instances) */
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
sql_license_type = "PAYG"
r_services_enabled = true
sql_connectivity_port = 1433
sql_connectivity_type = "PRIVATE"
/* sql_connectivity_update_username = var.sqladmin
sql_connectivity_update_password = data.azurerm_key_vault_secret.sqladminpwd.value */
#The storage_configuration block supports the following:
storage_configuration {
disk_type = "NEW" # (Required) The type of disk configuration to apply to the SQL Server. Valid values include NEW, EXTEND, or ADD.
storage_workload_type = "OLTP" # (Required) The type of storage workload. Valid values include GENERAL, OLTP, or DW.
data_settings {
default_file_path = "F:\\Data"
luns = [1]
}
log_settings {
default_file_path = "G:\\Log"
luns = [2]
}
temp_db_settings {
default_file_path = "D:\\TempDb"
luns = [0]
}
}
}
## provider.tf
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">=3.0.1"
#configuration_aliases = [azurerm.corp]
}
}
}
variables.tf
# ----------- COMMON --------------------------------
variable "vm_name" {
type = string
}
variable "rg" {
/* type = string */
description = "STACK - resource group"
}
variable "location" {
type = string
description = "STACK - location"
}
# ----------- NIC --------------------------------
variable "subnet_id" {
type = string
description = "STACK - subnet"
}
variable "private_ip" {
}
variable "dns_servers" {
}
# ----------- VM --------------------------------
variable "size" {
description = "VM - size"
type = string
}
variable "win_admin_user" {
sensitive = true
type = string
}
variable "win_admin_pw" {
sensitive = true
type = string
}
variable "os_storage_type" {
type = string
}
variable "vm_tags" {
type = map(any)
}
variable "zone" {
#type = list
description = "VM AZ"
}
# ----------- DOMAIN JOIN --------------------------------
variable "active_directory_domain_name" {
type = string
}
variable "active_directory_username" {
sensitive = true
}
variable "active_directory_password" {
sensitive = true
}
# ----------- SQL --------------------------------
variable "sql_maint_day" {
type = string
description = "SQL - maintenance day"
}
variable "sql_maint_length_min" {
type = number
description = "SQL - maintenance duration (min)"
}
variable "sql_maint_start_hour" {
type = number
description = "SQL- maintenance start (hour of the day)"
}
# ----------- DISKS --------------------------------
/* variable "disk_storage_account" {
type = string
default = "Standard_LRS"
description = "DATA DISKS - storage account type"
}
variable "disk_create_method" {
type = string
default = "Empty"
description = "DATA DISKS - creation method"
}
variable "disk_size0" {
type = number
}
variable "disk_size1" {
type = number
}
variable "disk_size2" {
type = number
}
variable "lun0" {
type = number
default = 0
}
variable "lun1" {
type = number
default = 1
}
variable "lun2" {
default = 2
type = number
} */
/* variable "disks" {
description = "List of disks to create"
type = map(any)
default = {
disk0 = {
name = "data0"
size = 200
create = "Empty"
sa = "Standard_LRS"
lun = 0
}
disk1 = {
name = "data1"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 1
}
}
} */
variable "disks" {
type = map(object({
name = string
size = number
create = string
sa = string
lun = number
}))
}
the actual deployment:
main.tf
/*
PS /home/fabrice> Get-AzVMSize -Location northeurope | where-object {$_.Name -like "*ds13*"}
*/
module "uat_set" {
source = "../modules/vm"
providers = {
azurerm = azurerm.cbank-test
}
for_each = var.uat_set
active_directory_domain_name = local.uat_ad_domain
active_directory_password = var.domain_admin_password
active_directory_username = var.domain_admin_username
disks = var.disk_allocation
dns_servers = local.dns_servers
location = local.uat_location
os_storage_type = local.uat_storage_type
private_ip = each.value.private_ip
rg = data.azurerm_resource_group.main
size = each.value.vm_size
sql_maint_day = local.uat_sql_maintenance_day
sql_maint_length_min = local.uat_sql_maintenance_min
sql_maint_start_hour = local.uat_sql_maintenance_start_hour
subnet_id = data.azurerm_subnet.main.id
vm_name = each.key
vm_tags = var.default_tags
win_admin_pw = var.admin_password
win_admin_user = var.admin_username
zone = each.value.zone[0]
}
variable "uat_set" {
description = "List of VM-s to create"
type = map(any)
default = {
UAT-SQLDB-NE-01 = {
private_ip = "192.168.32.8"
vm_size = "Standard_DS13-4_v2"
zone = ["1"]
}
UAT-SQLDB-NE-02 = {
private_ip = "192.168.32.10"
vm_size = "Standard_DS13-4_v2"
zone = ["2"]
}
}
}
variable "disk_allocation" {
type = map(object({
name = string
size = number
create = string
sa = string
lun = number
}))
default = {
"temp" = {
name = "temp"
size = 200
create = "Empty"
sa = "Standard_LRS"
lun = 0
},
"disk1" = {
name = "data1"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 1
},
"disk2" = {
name = "data2"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 2
}
}
}
locals {
dns_servers = ["192.168.34.5", "192.168.34.10"]
uat_storage_type = "Standard_LRS"
uat_sql_maintenance_day = "Saturday"
uat_sql_maintenance_min = 180
uat_sql_maintenance_start_hour = 23
uat_ad_domain = "civbdev.local"
uat_location = "North Europe"
}
## variables.tf
# new build variables
variable "Environment" {
default = "DEV"
description = "this is the environment variable used to intperpolate with others vars"
}
variable "default_tags" {
type = map(any)
default = {
Environment = "DEV"
Product = "dev-XXXtemplateXXX"
Terraformed = "https://AllicaBankLtd#dev.azure.com/XXXtemplateXXX/Terraform/DEV"
}
}
variable "admin_username" {
sensitive = true
}
variable "admin_password" {
sensitive = true
}
variable "domain_admin_username" {
sensitive = true
}
variable "domain_admin_password" {
sensitive = true
}
Resources create OK, except the SQL-part
│ Error: A resource with the ID "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-02" already exists - to be managed via Terraform this resource needs to be imported into the State. Please see the resource documentation for "azurerm_mssql_virtual_machine" for more information.
│
│ with module.uat_set["UAT-SQLDB-NE-02"].azurerm_mssql_virtual_machine.sqlvm,
│ on ../modules/vm/main.tf line 115, in resource "azurerm_mssql_virtual_machine" "sqlvm":
│ 115: resource "azurerm_mssql_virtual_machine" "sqlvm" {
│
╵
╷
│ Error: A resource with the ID "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-01" already exists - to be managed via Terraform this resource needs to be imported into the State. Please see the resource documentation for "azurerm_mssql_virtual_machine" for more information.
│
│ with module.uat_set["UAT-SQLDB-NE-01"].azurerm_mssql_virtual_machine.sqlvm,
│ on ../modules/vm/main.tf line 115, in resource "azurerm_mssql_virtual_machine" "sqlvm":
│ 115: resource "azurerm_mssql_virtual_machine" "sqlvm" {
│
╵
Any notions please what I might be missing?
Ta,
Fabrice
UPDATE:
Thanks for those who replied. Just to confirm: it is not an already existing resource. I get this error straight at the time of the creation of these VM-s.
For example, these are my vm-s after the Terraform run (none of them has the sql extension)
Plan even states it will create these:
Terraform will perform the following actions:
# module.uat_set["UAT-SQLDB-NE-01"].azurerm_mssql_virtual_machine.sqlvm will be created
+ resource "azurerm_mssql_virtual_machine" "sqlvm" {
+ id = (known after apply)
+ r_services_enabled = true
+ sql_connectivity_port = 1433
+ sql_connectivity_type = "PRIVATE"
+ sql_license_type = "PAYG"
+ virtual_machine_id = "/subscriptions/..../providers/Microsoft.Compute/virtualMachines/UAT-SQLDB-NE-01"
+ storage_configuration {
+ disk_type = "NEW"
+ storage_workload_type = "OLTP"
+ data_settings {
+ default_file_path = "F:\\Data"
+ luns = [
+ 1,
]
}
+ log_settings {
+ default_file_path = "G:\\Log"
+ luns = [
+ 2,
]
}
+ temp_db_settings {
+ default_file_path = "Z:\\TempDb"
+ luns = [
+ 0,
]
}
}
}
# module.uat_set["UAT-SQLDB-NE-02"].azurerm_mssql_virtual_machine.sqlvm will be created
+ resource "azurerm_mssql_virtual_machine" "sqlvm" {
+ id = (known after apply)
+ r_services_enabled = true
+ sql_connectivity_port = 1433
+ sql_connectivity_type = "PRIVATE"
+ sql_license_type = "PAYG"
+ virtual_machine_id = "/subscriptions/..../providers/Microsoft.Compute/virtualMachines/UAT-SQLDB-NE-02"
+ storage_configuration {
+ disk_type = "NEW"
+ storage_workload_type = "OLTP"
+ data_settings {
+ default_file_path = "F:\\Data"
+ luns = [
+ 1,
]
}
+ log_settings {
+ default_file_path = "G:\\Log"
+ luns = [
+ 2,
]
}
+ temp_db_settings {
+ default_file_path = "Z:\\TempDb"
+ luns = [
+ 0,
]
}
}
}
Plan: 2 to add, 0 to change, 0 to destroy.
Presumably, if these resources would exist somehow - which would be odd, as Tf just created the VM-s - then it would not say in the plan that it will create it now, would it?
So the error is quite the source of my confusion, since if the VM just got created, the creation of the extension failed - how could it possibly be existing?
In this case you should probably just import the modules as the error suggest to your terraform state.
For example
terraform import module.uat_set[\"UAT-SQLDB-NE-02\"].azurerm_mssql_virtual_machine.sqlvm "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-02"
I have a question regarding the following.
I am using terraform with fortios provider
tree:
these are my providers in the root-prod:
provider "fortios" {
hostname = "xxxxx"
token = "xxxxx"
insecure = "true"
vdom = "PROD"
}
provider "fortios" {
hostname = "xxxx"
token = "xxxx"
insecure = "true"
vdom = "OPS"
alias = "isops"
}
I h got my root-module-prod:
module "AWS_xxx"{
source = "../modules"
name = "AWS_PROD"
prefix_lists = local.aws_prod
providers = {
fortios.dc1 = fortios
fortios.dc2 = fortios.isops
}
}
provider & resource within-child-modules:
terraform {
required_providers {
fortios = {
source = "fortinetdev/fortios"
version = "1.13.1"
configuration_aliases = [ fortios.dc1, fortios.dc2 ]
}
}
}
resource "fortios_router_prefixlist" "prefix_lists" {
name = var.name
dynamic "rule" {
for_each = var.prefix_lists
content {
id = rule.value["id"]
action = rule.value["action"]
prefix = rule.value["prefix"]
ge = rule.value["ge"]
le = rule.value["le"]
}
}
}
my goal is for the above module to create two instances of the resource, one in each of the declared providers.
My issue is that while the resource is created in the first provider PROD it doesn't crated in OPS.
Do you have any clue on this..?
Not really did not work through Terraform-multi-providers.
In our case, I found a way through Jenkins Parallelism.
We launch in parallel multiple envs with the credentials saved encrypted in Jenkins server.
It is easy to Configure a web App Service to use Azure AD login manually via the official document However, How can I achieve this from Terraform? I've searched a while didn't found any examples, if you happen to address one, would be nice to share with me.
The following code is how I created Resource group and provisioned the web application
terraform {
backend "azurerm" {}
}
terraform {
required_version = ">= 0.13"
}
resource "azurerm_resource_group" "tf_resource_group" {
name = "RG_${var.application_name}_${var.environment}"
location = var.location
tags = {
environment = var.environment
DeployedBy = "terraform"
}
}
resource "azurerm_app_service_plan" "tf_service_plan" {
name = "${var.application_name}-${var.environment}-asp"
location = azurerm_resource_group.tf_resource_group.location
resource_group_name = azurerm_resource_group.tf_resource_group.name
kind = "Linux"
reserved = true
sku {
tier = "Standard"
size = "S1"
}
tags = {
environment = var.environment
DeployedBy = "terraform"
}
}
resource "azurerm_app_service" "tf_app_service" {
name = var.application_name
location = azurerm_resource_group.tf_resource_group.location
resource_group_name = azurerm_resource_group.tf_resource_group.name
app_service_plan_id = azurerm_app_service_plan.tf_service_plan.id
site_config {
always_on = true
linux_fx_version = "DOCKER|${var.acr_name}.azurecr.io/${var.img_repo_name}:${var.tag}"
}
app_settings = {
DOCKER_REGISTRY_SERVER_URL = "$DRSRUL"
WEBSITES_ENABLE_APP_SERVICE_STORAGE = "false"
DOCKER_REGISTRY_SERVER_USERNAME = "$ACRNAME"
DOCKER_REGISTRY_SERVER_PASSWORD = "$PW"
}
identity {
type = "SystemAssigned"
}
}
I believe your "azurerm_app_service" resource block needs a auth_settings block with a active_directory block. Example:
auth_settings {
enabled = true
active_directory {
client_id = "${azuread_application.example.application_id}"
}
default_provider = "AzureActiveDirectory"
issuer = "https://sts.windows.net/xxxxxxx-xxxx-xxx-xxxx-xxxtenantID/"