Terraform AWS ALB which manages multiple Fargate services problem - api

Hi I have the following infrastructure on AWS:
API gateway
VPC link on NLB
An ALB
A target group with
target_type = "alb"
A cluster ECS with 3 services (on 8083, 8084, 8085 ports). Every service with a Fargate task.
Two security group: one on ALB and one for ECS task
A VPC with 2 public subnet e 2 private subnet
An IG
A NAT (for external access from private subnets)
I made the described configuration using the AWS Console on the eu-south-1 region, and everything works. Now I would like to migrate everything to the us-east-1 region and for this reason I made some scripts with Terraform
ALB
resource "aws_lb" "alb-block-forgery" {
name = "${var.name}-alb-${var.environment}"
desync_mitigation_mode = "defensive"
drop_invalid_header_fields = "false"
enable_deletion_protection = "false"
enable_http2 = "true"
enable_waf_fail_open = "false"
idle_timeout = "60"
internal = "true"
ip_address_type = "ipv4"
load_balancer_type = "application"
preserve_host_header = "false"
security_groups = var.alb_security_groups
subnets = var.subnets.*.id
}
NLB
resource "aws_lb" "nlb-block-forgery" {
name = "${var.name}-nlb-${var.environment}"
enable_cross_zone_load_balancing = "true"
enable_deletion_protection = "false"
internal = "false"
ip_address_type = "ipv4"
load_balancer_type = "network"
subnets = var.subnets.*.id
}
ALB TG
resource "aws_lb_target_group" "alb-block-forgery-tg" {
name = "${var.name}-alb-tg-${substr(uuid(), 0, 3)}-${var.environment}"
deregistration_delay = "300"
health_check {
enabled = "true"
healthy_threshold = "5"
interval = "30"
matcher = "200"
path = "/"
port = "traffic-port"
protocol = "HTTP"
timeout = "5"
unhealthy_threshold = "2"
}
load_balancing_algorithm_type = "round_robin"
port = "80"
protocol = "HTTP"
protocol_version = "HTTP1"
slow_start = "0"
stickiness {
cookie_duration = "86400"
enabled = "false"
type = "lb_cookie"
}
target_type = "ip"
vpc_id = var.vpc_id
lifecycle {
ignore_changes = [name]
create_before_destroy = true
}
}
NLB TG
resource "aws_lb_target_group" "nlb-block-forgery-tg" {
name = "${var.name}-nlb-tg-${substr(uuid(), 0, 3)}-${var.environment}"
port = 80
protocol = "TCP"
target_type = "alb"
vpc_id = var.vpc_id
lifecycle {
ignore_changes = [name]
create_before_destroy = true
}
}
DEFAULT TG
resource "aws_lb_target_group" "backend-tg" {
name = "${var.name}-backend-tg-${var.environment}"
deregistration_delay = var.deregistration_delay
vpc_id = var.vpc_id
health_check {
enabled = "true"
healthy_threshold = "5"
interval = var.health_check_interval
matcher = "200"
path = "/api/v1/blockforgery/signUp/healthCheck"
port = "traffic-port"
protocol = "HTTP"
timeout = "5"
unhealthy_threshold = "2"
}
load_balancing_algorithm_type = "round_robin"
port = "8083"
protocol = "HTTP"
protocol_version = "HTTP1"
slow_start = "0"
stickiness {
cookie_duration = "86400"
enabled = "false"
type = "lb_cookie"
}
target_type = "ip"
tags = {
Name = "${var.name}-backend-tg-${var.environment}"
Environment = var.environment
}
}
FARGATE SERVICE1 TG
resource "aws_lb_target_group" "venly-srv-tg" {
name = "venly-srv-tg-${var.environment}"
deregistration_delay = var.deregistration_delay
vpc_id = var.vpc_id
health_check {
enabled = "true"
healthy_threshold = "5"
interval = var.health_check_interval
matcher = "200"
path = "/api/v1/blockforgery/create-wallet/healthCheck"
port = "traffic-port"
protocol = "HTTP"
timeout = "5"
unhealthy_threshold = "2"
}
load_balancing_algorithm_type = "round_robin"
port = "8084"
protocol = "HTTP"
protocol_version = "HTTP1"
slow_start = "0"
stickiness {
cookie_duration = "86400"
enabled = "false"
type = "lb_cookie"
}
target_type = "ip"
tags = {
Name = "venly-srv-tg-${var.environment}"
Environment = var.environment
}
}
FARGATE SERVICE2 TG
resource "aws_lb_target_group" "blockchain-srv-tg" {
name = "blockchain-srv-tg-${var.environment}"
deregistration_delay = var.deregistration_delay
vpc_id = var.vpc_id
health_check {
enabled = "true"
healthy_threshold = "5"
interval = var.health_check_interval
matcher = "200"
path = "/api/v1/blockforgery/feed/healthCheck"
port = "traffic-port"
protocol = "HTTP"
timeout = "5"
unhealthy_threshold = "2"
}
load_balancing_algorithm_type = "round_robin"
port = "8085"
protocol = "HTTP"
protocol_version = "HTTP1"
slow_start = "0"
stickiness {
cookie_duration = "86400"
enabled = "false"
type = "lb_cookie"
}
target_type = "ip"
tags = {
Name = "blockchain-srv-tg-${var.environment}"
Environment = var.environment
}
}
ALB and NLB LISTNER
resource "aws_lb_listener" "tcp-alb" {
default_action {
order = "1"
target_group_arn = aws_lb_target_group.backend-tg.id
type = "forward"
}
load_balancer_arn = aws_lb.alb-block-forgery.arn
port = "80"
protocol = "HTTP"
}
resource "aws_lb_listener" "tcp-nlb" {
default_action {
target_group_arn = aws_lb_target_group.nlb-block-forgery-tg.id
type = "forward"
}
load_balancer_arn = aws_lb.nlb-block-forgery.arn
port = "80"
protocol = "TCP"
}
ALB LISTNER RULE
resource "aws_lb_listener_rule" "blockchain-srv" {
action {
order = "1"
target_group_arn = aws_lb_target_group.blockchain-srv-tg.arn
type = "forward"
}
condition {
path_pattern {
values = ["/api/v1/blockforgery/feed", "/api/v1/blockforgery/feed/*"]
}
}
listener_arn = aws_lb_listener.tcp-alb.id
priority = "2"
}
resource "aws_lb_listener_rule" "venly-srv" {
action {
order = "1"
target_group_arn = aws_lb_target_group.venly-srv-tg.arn
type = "forward"
}
condition {
path_pattern {
values = ["/api/v1/blockforgery/create-wallet", "/api/v1/blockforgery/create-wallet/*"]
}
}
listener_arn = aws_lb_listener.tcp-alb.id
priority = "3"
}
ECS SERVICE 1 (OK)
resource "aws_ecs_service" "backend" {
name = "${var.name}-backend"
cluster = aws_ecs_cluster.main.id
task_definition = aws_ecs_task_definition.backend.arn
desired_count = var.replicas_backend
deployment_maximum_percent = "200"
deployment_minimum_healthy_percent = "100"
enable_ecs_managed_tags = "true"
enable_execute_command = "false"
health_check_grace_period_seconds = "25"
launch_type = "FARGATE"
load_balancer {
container_name = var.task_name_backend
container_port = var.container_blockforgery_backend_port
target_group_arn = var.aws_alb_target_group_backend_arn
}
network_configuration {
assign_public_ip = "false"
security_groups = var.ecs_service_security_groups
subnets = var.subnets.*.id
}
deployment_circuit_breaker {
enable = "false"
rollback = "false"
}
deployment_controller {
type = "ECS"
}
platform_version = "LATEST"
propagate_tags = "TASK_DEFINITION"
scheduling_strategy = "REPLICA"
lifecycle {
ignore_changes = [task_definition, desired_count]
}
}
ECS SERVICE 2 (KO)
resource "aws_ecs_service" "venly-srv" {
name = "${var.name}-venly-srv"
cluster = aws_ecs_cluster.main.id
task_definition = aws_ecs_task_definition.venly-srv-task.arn
desired_count = var.replicas_venly_srv
deployment_maximum_percent = "200"
deployment_minimum_healthy_percent = "100"
enable_ecs_managed_tags = "true"
enable_execute_command = "false"
health_check_grace_period_seconds = "0"
launch_type = "FARGATE"
load_balancer {
container_name = var.task_name_venly_srv
container_port = var.container_blockforgery_venly_srv_port
target_group_arn = var.aws_alb_target_group_venly_srv_arn
}
network_configuration {
assign_public_ip = "false"
security_groups = var.ecs_service_security_groups
subnets = var.subnets.*.id
}
deployment_circuit_breaker {
enable = "false"
rollback = "false"
}
deployment_controller {
type = "ECS"
}
platform_version = "LATEST"
propagate_tags = "TASK_DEFINITION"
scheduling_strategy = "REPLICA"
lifecycle {
ignore_changes = [task_definition, desired_count]
}
}
ECS SERVICE 3 (KO)
resource "aws_ecs_service" "blockchain-srv" {
name = "${var.name}-blockchain-srv"
cluster = aws_ecs_cluster.main.id
task_definition = aws_ecs_task_definition.blockchain-srv-task.arn
desired_count = var.replicas_blockchain_srv
deployment_maximum_percent = "200"
deployment_minimum_healthy_percent = "100"
enable_ecs_managed_tags = "true"
enable_execute_command = "false"
health_check_grace_period_seconds = "0"
launch_type = "FARGATE"
scheduling_strategy = "REPLICA"
network_configuration {
assign_public_ip = "false"
security_groups = var.ecs_service_security_groups
subnets = var.subnets.*.id
}
load_balancer {
container_name = var.task_name_blockchain_srv
container_port = var.container_blockforgery_blockchain_srv_port
target_group_arn = var.aws_alb_target_group_blockchain_srv_arn
}
deployment_circuit_breaker {
enable = "false"
rollback = "false"
}
deployment_controller {
type = "ECS"
}
propagate_tags = "TASK_DEFINITION"
platform_version = "LATEST"
}
resource "aws_appautoscaling_target" "blockchain-srv" {
service_namespace = "ecs"
resource_id = "service/${aws_ecs_cluster.main.name}/${aws_ecs_service.blockchain-srv.name}"
scalable_dimension = "ecs:service:DesiredCount"
max_capacity = var.ecs_autoscale_max_instances
min_capacity = var.ecs_autoscale_min_instances
}
The only service reachable through the default target group works. Health check is OK, while on the two remaining targets (on which I have set path-based rules) the health check not working:
ALB should correctly route my requests reaching the remaining target groups.
Any help and suggestions will be very welcome. Thank you

Related

Hashicorp Vault machines using Raft not talking to each other

I am trying to setup a Vault cluster using integrated-storage feature, meaning using RAFT as the backend. I have 3 machines in same network (running Ubuntu 20.04). Lets assume they have the following name:
r1.gs.com
r2.gs.com
r3.gs.com
My config in 1st machine is as follows:
cluster_addr = "r1.gs.com:8201"
api_addr = "r1.gs.com:8200"
disable_mlock = true
listener "tcp" {
address = "0.0.0.0:8200"
cluster_address = "r1.gs.com:8201"
ctls_cert_file = "/opt/vault/tls/server.crt"
ctls_key_file = "/opt/vault/tls/server.key"
}
storage "raft" {
path = "/opt/vault/data"
node_id = "1"
retry_join {
leader_api_addr = "r2.gs.com:8200"
leader_client_cert_file = "/opt/vault/tls/server.crt"
leader_client_key_file = "/opt/vault/tls/server.key"
leader_ca_cert_file = "/opt/vault/tls/ca.crt"
}
retry_join {
leader_api_addr = "r3.gs.com:8200"
leader_client_cert_file = "/opt/vault/tls/server.crt"
leader_client_key_file = "/opt/vault/tls/server.key"
leader_ca_cert_file = "/opt/vault/tls/ca.crt"
}
}
ui = true
My config in 2nd machine is as follows:
cluster_addr = "r2.gs.com:8201"
api_addr = "r2.gs.com:8200"
disable_mlock = true
listener "tcp" {
address = "0.0.0.0:8200"
cluster_address = "r2.gs.com:8201"
ctls_cert_file = "/opt/vault/tls/server.crt"
ctls_key_file = "/opt/vault/tls/server.key"
}
storage "raft" {
path = "/opt/vault/data"
node_id = "1"
retry_join {
leader_api_addr = "r1.gs.com:8200"
leader_client_cert_file = "/opt/vault/tls/server.crt"
leader_client_key_file = "/opt/vault/tls/server.key"
leader_ca_cert_file = "/opt/vault/tls/ca.crt"
}
retry_join {
leader_api_addr = "r3.gs.com:8200"
leader_client_cert_file = "/opt/vault/tls/server.crt"
leader_client_key_file = "/opt/vault/tls/server.key"
leader_ca_cert_file = "/opt/vault/tls/ca.crt"
}
}
ui = true
My config in 3rd machine is as follows:
cluster_addr = "r3.gs.com:8201"
api_addr = "r3.gs.com:8200"
disable_mlock = true
listener "tcp" {
address = "0.0.0.0:8200"
cluster_address = "r3.gs.com:8201"
ctls_cert_file = "/opt/vault/tls/server.crt"
ctls_key_file = "/opt/vault/tls/server.key"
}
storage "raft" {
path = "/opt/vault/data"
node_id = "1"
retry_join {
leader_api_addr = "r1.gs.com:8200"
leader_client_cert_file = "/opt/vault/tls/server.crt"
leader_client_key_file = "/opt/vault/tls/server.key"
leader_ca_cert_file = "/opt/vault/tls/ca.crt"
}
retry_join {
leader_api_addr = "r2.gs.com:8200"
leader_client_cert_file = "/opt/vault/tls/server.crt"
leader_client_key_file = "/opt/vault/tls/server.key"
leader_ca_cert_file = "/opt/vault/tls/ca.crt"
}
}
ui = true
My vault service starts up, and I unseal each machine, and each machine becomes the Leader. They are not talking to each other. But I can telnet to each other on port 8200.
TLS is enabled. I am using a private ca.crt. I generated the server.crt and server.key files myself.
If I give a command to r2.gs.com to join the raft on r1.gs.com, it returns successful, but the peer list on r1.gs.com doesn’t show it.
Any idea where am I wrong?

Terraform Object Lock Configuration: AccessDenied

I have this terraform script that works perfectly fine for the whole s3 module but it cannot create the Object lock configuration resource and returns the message :
error creating S3 bucket (bucket-name) Object lock configuration: AccessDenied: AccessDenied
Status code 403, request id: ..., host id: ...
Desite the message, the S3 bucket is actually created, but I still get this error, maybe there is something missing in the policy ?
Here is my code.
module "s3_bucket" {
source = "terraform-aws-modules/s3-bucket/aws"
version = "3.4.0"
bucket = local.bucket_name
...
object_lock_enabled = true
attach_policy = true
policy = data.aws_iam_policy_document.voucher_s3_bucket.json
versioning = {
status = var.status
mfa_delete = var.mfa_delete
}
server_side_encryption_configuration = {
rule = {
apply_server_side_encryption_by_default = {
kms_master_key_id = aws_kms_key.voucher_s3_bucket.arn
sse_algorithm = "aws:kms"
}
}
}
}
data "aws_iam_policy_document" "s3_bucket_kms_key" {
statement {
sid = "AllowPutRoles"
effect = "Allow"
actions = ["kms:GenerateDataKey"]
principals {
identifiers = local.put_object_roles #we can use event_gateway iam_role for now
type = "AWS"
}
resources = ["*"]
}
statement {
sid = "AllowAdmin"
effect = "Allow"
actions = [
"kms:*",
]
principals {
identifiers = [data.aws_iam_role.admin_role.arn, data.aws_iam_role.default_role.arn, data.aws_iam_role.automation_role.arn]
type = "AWS"
}
resources = ["*"]
}
}
resource "aws_kms_key" "s3_bucket" {
tags = {
"s3_bucket" = local.bucket_name
}
enable_key_rotation = true
policy = data.aws_iam_policy_document.voucher_s3_bucket_kms_key.json
}
resource "aws_s3_bucket_object_lock_configuration" "s3_bucket_object_lock_configuration" {
bucket = local.bucket_name
rule {
default_retention {
mode = "GOVERNANCE"
years = 10
}
}
}
data "aws_iam_policy_document" "voucher_s3_bucket" {
statement {
sid = "DenyNoKMSEncryption"
effect = "Deny"
actions = ["s3:PutObject"]
principals {
identifiers = ["*"]
type = "*"
}
resources = ["${module.voucher_s3_bucket.s3_bucket_arn}/*"]
condition {
test = "StringNotEqualsIfExists"
values = ["aws:kms"]
variable = "s3:x-amz-server-side-encryption"
}
condition {
test = "Null"
values = ["false"]
variable = "s3:x-amz-server-side-encryption"
}
}
statement {
sid = "DenyWrongKMSKey"
effect = "Deny"
actions = ["s3:PutObject"]
principals {
identifiers = ["*"]
type = "*"
}
resources = ["${module.s3_bucket.s3_bucket_arn}/*"]
condition {
test = "StringNotEquals"
values = [aws_kms_key.voucher_s3_bucket.arn]
variable = "s3:x-amz-server-side-encryption-aws-kms-key-id"
}
}
statement {
sid = "AllowAdminDefault"
effect = "Allow"
actions = ["s3:*"]
principals {
identifiers = [data.aws_iam_role.admin_role.arn, data.aws_iam_role.default_role.arn]
type = "AWS"
}
resources = [
"${module.voucher_s3_bucket.s3_bucket_arn}/*",
module.voucher_s3_bucket.s3_bucket_arn,
]
}
statement {
sid = "DenyDeleteActions"
effect = "Deny"
actions = ["s3:DeleteBucket", "s3:DeleteObject", "s3:DeleteObjectVersion", "s3:PutBucketObjectLockConfiguration"]
principals {
identifiers = ["*"]
type = "AWS"
}
resources = [
"${module.s3_bucket.s3_bucket_arn}/*",
module.s3_bucket.s3_bucket_arn,
]
}
}

azurerm_mssql_virtual_machine - already exists

Trying to do an AZ Terraform deployment, and failing horribly - looking for some ideas what am I missing. Basically I am trying to deploy 2 (maybe later more) VM-s with variable size of disks, joining them to the domain and add SQL server to them. (Be gentle with me, I am from VMWare-Tf background, this is my first SQL deployment on AZ!)
My module:
## main.tf:
# ----------- NIC --------------------------------
resource "azurerm_network_interface" "nic" {
name = "${var.vm_name}-nic"
resource_group_name = var.rg.name
location = var.location
ip_configuration {
name = "${var.vm_name}-internal"
subnet_id = var.subnet_id
private_ip_address_allocation = "Static"
private_ip_address = var.private_ip
}
dns_servers = var.dns_servers
}
# ----------- VM --------------------------------
resource "azurerm_windows_virtual_machine" "vm" {
/* count = length(var.instances) */
name = var.vm_name
location = var.location
resource_group_name = var.rg.name
network_interface_ids = [azurerm_network_interface.nic.id]
size = var.size
zone = var.zone
admin_username = var.win_admin_user
admin_password = var.win_admin_pw # data.azurerm_key_vault_secret.vmadminpwd.value
enable_automatic_updates = "false"
patch_mode = "Manual"
provision_vm_agent = "true"
tags = var.vm_tags
source_image_reference {
publisher = "MicrosoftSQLServer"
offer = "sql2019-ws2019"
sku = "enterprise"
version = "latest"
}
os_disk {
name = "${var.vm_name}-osdisk"
caching = "ReadWrite"
storage_account_type = "StandardSSD_LRS"
disk_size_gb = 250
}
}
# ----------- DOMAIN JOIN --------------------------------
// Waits for up to 1 hour for the Domain to become available. Will return an error 1 if unsuccessful preventing the member attempting to join.
resource "azurerm_virtual_machine_extension" "wait-for-domain-to-provision" {
name = "TestConnectionDomain"
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.9"
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
settings = <<SETTINGS
{
"commandToExecute": "powershell.exe -Command \"while (!(Test-Connection -ComputerName ${var.active_directory_domain_name} -Count 1 -Quiet) -and ($retryCount++ -le 360)) { Start-Sleep 10 } \""
}
SETTINGS
}
resource "azurerm_virtual_machine_extension" "join-domain" {
name = azurerm_windows_virtual_machine.vm.name
publisher = "Microsoft.Compute"
type = "JsonADDomainExtension"
type_handler_version = "1.3"
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
settings = <<SETTINGS
{
"Name": "${var.active_directory_domain_name}",
"OUPath": "",
"User": "${var.active_directory_username}#${var.active_directory_domain_name}",
"Restart": "true",
"Options": "3"
}
SETTINGS
protected_settings = <<SETTINGS
{
"Password": "${var.active_directory_password}"
}
SETTINGS
depends_on = [azurerm_virtual_machine_extension.wait-for-domain-to-provision]
}
# ----------- DISKS --------------------------------
resource "azurerm_managed_disk" "data" {
for_each = var.disks
name = "${var.vm_name}-${each.value.name}"
location = var.location
resource_group_name = var.rg.name
storage_account_type = each.value.sa
create_option = each.value.create
disk_size_gb = each.value.size
zone = var.zone
}
resource "azurerm_virtual_machine_data_disk_attachment" "disk-attachment" {
for_each = var.disks
managed_disk_id = azurerm_managed_disk.data[each.key].id
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
lun = each.value.lun
caching = "ReadWrite"
depends_on = [azurerm_windows_virtual_machine.vm]
}
# ----------- SQL --------------------------------
# configure the SQL side of the deployment
resource "azurerm_mssql_virtual_machine" "sqlvm" {
/* count = length(var.instances) */
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
sql_license_type = "PAYG"
r_services_enabled = true
sql_connectivity_port = 1433
sql_connectivity_type = "PRIVATE"
/* sql_connectivity_update_username = var.sqladmin
sql_connectivity_update_password = data.azurerm_key_vault_secret.sqladminpwd.value */
#The storage_configuration block supports the following:
storage_configuration {
disk_type = "NEW" # (Required) The type of disk configuration to apply to the SQL Server. Valid values include NEW, EXTEND, or ADD.
storage_workload_type = "OLTP" # (Required) The type of storage workload. Valid values include GENERAL, OLTP, or DW.
data_settings {
default_file_path = "F:\\Data"
luns = [1]
}
log_settings {
default_file_path = "G:\\Log"
luns = [2]
}
temp_db_settings {
default_file_path = "D:\\TempDb"
luns = [0]
}
}
}
## provider.tf
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">=3.0.1"
#configuration_aliases = [azurerm.corp]
}
}
}
variables.tf
# ----------- COMMON --------------------------------
variable "vm_name" {
type = string
}
variable "rg" {
/* type = string */
description = "STACK - resource group"
}
variable "location" {
type = string
description = "STACK - location"
}
# ----------- NIC --------------------------------
variable "subnet_id" {
type = string
description = "STACK - subnet"
}
variable "private_ip" {
}
variable "dns_servers" {
}
# ----------- VM --------------------------------
variable "size" {
description = "VM - size"
type = string
}
variable "win_admin_user" {
sensitive = true
type = string
}
variable "win_admin_pw" {
sensitive = true
type = string
}
variable "os_storage_type" {
type = string
}
variable "vm_tags" {
type = map(any)
}
variable "zone" {
#type = list
description = "VM AZ"
}
# ----------- DOMAIN JOIN --------------------------------
variable "active_directory_domain_name" {
type = string
}
variable "active_directory_username" {
sensitive = true
}
variable "active_directory_password" {
sensitive = true
}
# ----------- SQL --------------------------------
variable "sql_maint_day" {
type = string
description = "SQL - maintenance day"
}
variable "sql_maint_length_min" {
type = number
description = "SQL - maintenance duration (min)"
}
variable "sql_maint_start_hour" {
type = number
description = "SQL- maintenance start (hour of the day)"
}
# ----------- DISKS --------------------------------
/* variable "disk_storage_account" {
type = string
default = "Standard_LRS"
description = "DATA DISKS - storage account type"
}
variable "disk_create_method" {
type = string
default = "Empty"
description = "DATA DISKS - creation method"
}
variable "disk_size0" {
type = number
}
variable "disk_size1" {
type = number
}
variable "disk_size2" {
type = number
}
variable "lun0" {
type = number
default = 0
}
variable "lun1" {
type = number
default = 1
}
variable "lun2" {
default = 2
type = number
} */
/* variable "disks" {
description = "List of disks to create"
type = map(any)
default = {
disk0 = {
name = "data0"
size = 200
create = "Empty"
sa = "Standard_LRS"
lun = 0
}
disk1 = {
name = "data1"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 1
}
}
} */
variable "disks" {
type = map(object({
name = string
size = number
create = string
sa = string
lun = number
}))
}
the actual deployment:
main.tf
/*
PS /home/fabrice> Get-AzVMSize -Location northeurope | where-object {$_.Name -like "*ds13*"}
*/
module "uat_set" {
source = "../modules/vm"
providers = {
azurerm = azurerm.cbank-test
}
for_each = var.uat_set
active_directory_domain_name = local.uat_ad_domain
active_directory_password = var.domain_admin_password
active_directory_username = var.domain_admin_username
disks = var.disk_allocation
dns_servers = local.dns_servers
location = local.uat_location
os_storage_type = local.uat_storage_type
private_ip = each.value.private_ip
rg = data.azurerm_resource_group.main
size = each.value.vm_size
sql_maint_day = local.uat_sql_maintenance_day
sql_maint_length_min = local.uat_sql_maintenance_min
sql_maint_start_hour = local.uat_sql_maintenance_start_hour
subnet_id = data.azurerm_subnet.main.id
vm_name = each.key
vm_tags = var.default_tags
win_admin_pw = var.admin_password
win_admin_user = var.admin_username
zone = each.value.zone[0]
}
variable "uat_set" {
description = "List of VM-s to create"
type = map(any)
default = {
UAT-SQLDB-NE-01 = {
private_ip = "192.168.32.8"
vm_size = "Standard_DS13-4_v2"
zone = ["1"]
}
UAT-SQLDB-NE-02 = {
private_ip = "192.168.32.10"
vm_size = "Standard_DS13-4_v2"
zone = ["2"]
}
}
}
variable "disk_allocation" {
type = map(object({
name = string
size = number
create = string
sa = string
lun = number
}))
default = {
"temp" = {
name = "temp"
size = 200
create = "Empty"
sa = "Standard_LRS"
lun = 0
},
"disk1" = {
name = "data1"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 1
},
"disk2" = {
name = "data2"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 2
}
}
}
locals {
dns_servers = ["192.168.34.5", "192.168.34.10"]
uat_storage_type = "Standard_LRS"
uat_sql_maintenance_day = "Saturday"
uat_sql_maintenance_min = 180
uat_sql_maintenance_start_hour = 23
uat_ad_domain = "civbdev.local"
uat_location = "North Europe"
}
## variables.tf
# new build variables
variable "Environment" {
default = "DEV"
description = "this is the environment variable used to intperpolate with others vars"
}
variable "default_tags" {
type = map(any)
default = {
Environment = "DEV"
Product = "dev-XXXtemplateXXX"
Terraformed = "https://AllicaBankLtd#dev.azure.com/XXXtemplateXXX/Terraform/DEV"
}
}
variable "admin_username" {
sensitive = true
}
variable "admin_password" {
sensitive = true
}
variable "domain_admin_username" {
sensitive = true
}
variable "domain_admin_password" {
sensitive = true
}
Resources create OK, except the SQL-part
│ Error: A resource with the ID "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-02" already exists - to be managed via Terraform this resource needs to be imported into the State. Please see the resource documentation for "azurerm_mssql_virtual_machine" for more information.
│
│ with module.uat_set["UAT-SQLDB-NE-02"].azurerm_mssql_virtual_machine.sqlvm,
│ on ../modules/vm/main.tf line 115, in resource "azurerm_mssql_virtual_machine" "sqlvm":
│ 115: resource "azurerm_mssql_virtual_machine" "sqlvm" {
│
╵
╷
│ Error: A resource with the ID "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-01" already exists - to be managed via Terraform this resource needs to be imported into the State. Please see the resource documentation for "azurerm_mssql_virtual_machine" for more information.
│
│ with module.uat_set["UAT-SQLDB-NE-01"].azurerm_mssql_virtual_machine.sqlvm,
│ on ../modules/vm/main.tf line 115, in resource "azurerm_mssql_virtual_machine" "sqlvm":
│ 115: resource "azurerm_mssql_virtual_machine" "sqlvm" {
│
╵
Any notions please what I might be missing?
Ta,
Fabrice
UPDATE:
Thanks for those who replied. Just to confirm: it is not an already existing resource. I get this error straight at the time of the creation of these VM-s.
For example, these are my vm-s after the Terraform run (none of them has the sql extension)
Plan even states it will create these:
Terraform will perform the following actions:
# module.uat_set["UAT-SQLDB-NE-01"].azurerm_mssql_virtual_machine.sqlvm will be created
+ resource "azurerm_mssql_virtual_machine" "sqlvm" {
+ id = (known after apply)
+ r_services_enabled = true
+ sql_connectivity_port = 1433
+ sql_connectivity_type = "PRIVATE"
+ sql_license_type = "PAYG"
+ virtual_machine_id = "/subscriptions/..../providers/Microsoft.Compute/virtualMachines/UAT-SQLDB-NE-01"
+ storage_configuration {
+ disk_type = "NEW"
+ storage_workload_type = "OLTP"
+ data_settings {
+ default_file_path = "F:\\Data"
+ luns = [
+ 1,
]
}
+ log_settings {
+ default_file_path = "G:\\Log"
+ luns = [
+ 2,
]
}
+ temp_db_settings {
+ default_file_path = "Z:\\TempDb"
+ luns = [
+ 0,
]
}
}
}
# module.uat_set["UAT-SQLDB-NE-02"].azurerm_mssql_virtual_machine.sqlvm will be created
+ resource "azurerm_mssql_virtual_machine" "sqlvm" {
+ id = (known after apply)
+ r_services_enabled = true
+ sql_connectivity_port = 1433
+ sql_connectivity_type = "PRIVATE"
+ sql_license_type = "PAYG"
+ virtual_machine_id = "/subscriptions/..../providers/Microsoft.Compute/virtualMachines/UAT-SQLDB-NE-02"
+ storage_configuration {
+ disk_type = "NEW"
+ storage_workload_type = "OLTP"
+ data_settings {
+ default_file_path = "F:\\Data"
+ luns = [
+ 1,
]
}
+ log_settings {
+ default_file_path = "G:\\Log"
+ luns = [
+ 2,
]
}
+ temp_db_settings {
+ default_file_path = "Z:\\TempDb"
+ luns = [
+ 0,
]
}
}
}
Plan: 2 to add, 0 to change, 0 to destroy.
Presumably, if these resources would exist somehow - which would be odd, as Tf just created the VM-s - then it would not say in the plan that it will create it now, would it?
So the error is quite the source of my confusion, since if the VM just got created, the creation of the extension failed - how could it possibly be existing?
In this case you should probably just import the modules as the error suggest to your terraform state.
For example
terraform import module.uat_set[\"UAT-SQLDB-NE-02\"].azurerm_mssql_virtual_machine.sqlvm "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-02"

Azure Container Apps restart serveral times even if my batch is terminated

I am implementing a batch in Azure Container Apps.
When a message comes from a queue in service bus then my batch is run.
For this, I added a scale rule to automatic scale when a message comes from the queue.
It works well, when there is a message it is scaled out from 0 to 1 replica. But when my batch is terminated, the replica restarts the container several times until it is scaled in to 0.
Here is my terraform script to create the container apps :
resource "azapi_resource" "container_app" {
name = var.container_app_name
location = "northeurope"
parent_id = data.azurerm_resource_group.resource_group.id
identity {
type = "UserAssigned"
identity_ids = [data.azurerm_user_assigned_identity.aca_identity.id]
}
type = "Microsoft.App/containerApps#2022-03-01"
body = jsonencode({
properties: {
managedEnvironmentId = data.azapi_resource.container_environment.id
configuration = {
secrets = [
{
name = "regitry-password"
value = data.azurerm_container_registry.acr.admin_password
},
{
name = "service-bus-connection-string"
value = data.azurerm_servicebus_namespace.servicebus.default_primary_connection_string
}
]
ingress = null
registries = [
{
server = data.azurerm_container_registry.acr.login_server
username = data.azurerm_container_registry.acr.admin_username,
passwordSecretRef = "regitry-password"
}]
}
template = {
containers = [{
image = "${data.azurerm_container_registry.acr.login_server}/${var.container_repository}:${var.container_image_tag}"
name = "dbt-instance"
resources = {
cpu = var.container_cpu
memory = var.container_memory
}
env = [
{
name = "APP_CONFIG_NAME"
value = var.app_configuration_name
},
{
name = "AZURE_CLIENT_ID"
value = data.azurerm_user_assigned_identity.aca_identity.client_id
}
]
}]
scale = {
minReplicas = 0
maxReplicas = 5
rules = [{
name = "queue-based-autoscaling"
custom = {
type = "azure-servicebus"
metadata = {
queueName = var.service_bus_queue_name
messageCount = "1"
}
auth = [{
secretRef = "service-bus-connection-string"
triggerParameter = "connection"
}]
}
}]
}
}
}
})
How to run my container only one time ?
I managed to do it with Azure Container Instance with the property "restartPolicy=Never"

How to configure App Service to use Azure AD login from Terraform

It is easy to Configure a web App Service to use Azure AD login manually via the official document However, How can I achieve this from Terraform? I've searched a while didn't found any examples, if you happen to address one, would be nice to share with me.
The following code is how I created Resource group and provisioned the web application
terraform {
backend "azurerm" {}
}
terraform {
required_version = ">= 0.13"
}
resource "azurerm_resource_group" "tf_resource_group" {
name = "RG_${var.application_name}_${var.environment}"
location = var.location
tags = {
environment = var.environment
DeployedBy = "terraform"
}
}
resource "azurerm_app_service_plan" "tf_service_plan" {
name = "${var.application_name}-${var.environment}-asp"
location = azurerm_resource_group.tf_resource_group.location
resource_group_name = azurerm_resource_group.tf_resource_group.name
kind = "Linux"
reserved = true
sku {
tier = "Standard"
size = "S1"
}
tags = {
environment = var.environment
DeployedBy = "terraform"
}
}
resource "azurerm_app_service" "tf_app_service" {
name = var.application_name
location = azurerm_resource_group.tf_resource_group.location
resource_group_name = azurerm_resource_group.tf_resource_group.name
app_service_plan_id = azurerm_app_service_plan.tf_service_plan.id
site_config {
always_on = true
linux_fx_version = "DOCKER|${var.acr_name}.azurecr.io/${var.img_repo_name}:${var.tag}"
}
app_settings = {
DOCKER_REGISTRY_SERVER_URL = "$DRSRUL"
WEBSITES_ENABLE_APP_SERVICE_STORAGE = "false"
DOCKER_REGISTRY_SERVER_USERNAME = "$ACRNAME"
DOCKER_REGISTRY_SERVER_PASSWORD = "$PW"
}
identity {
type = "SystemAssigned"
}
}
I believe your "azurerm_app_service" resource block needs a auth_settings block with a active_directory block. Example:
auth_settings {
enabled = true
active_directory {
client_id = "${azuread_application.example.application_id}"
}
default_provider = "AzureActiveDirectory"
issuer = "https://sts.windows.net/xxxxxxx-xxxx-xxx-xxxx-xxxtenantID/"