I am having trouble provisioning a VM in vSphere 6.5 using Terraform v0.11.13 and provider.vsphere v1.9.1 using a static ipv4 address. When the template gets cloned it configures the VM for DHCP. The vsphere template is configured with a static ip too. But I've noticed that after the clone it's set back to DHCP. This works with the linux_options.
I've set the static IP for the template to match the final VMs IP. I removed the VM_count thinking that it expects multiple VMs so it uses DHCP.
This is the Module vars.tf:
variable "vsphere_user" {
type = "string"
description = "The terraform vsphere service account."
default = "vsphereUser#acme.com"
}
variable "vsphere_password" {
type = "string"
description = "The terraform vsphere service account password."
default = ""
}
variable "vsphere_server" {
type = "string"
description = "The vsphere virtual center server."
default = "vcenter.acme.com"
}
variable "vsphere_guest_id" {
type = "string"
description = "The vsphere guest id to assign Windows servers."
default = "windows9Server64Guest"
}
variable "datacenter" {
type = "string"
description = "The datacenter to deploy the virtual machines to."
default = "HCI"
}
variable "datastore" {
type = "string"
description = "The datastore to deploy the virtual machines to."
default = "DS-01"
}
variable "linux_template" {
type = "string"
description = "The default linux template"
default = "CENT-7.5-50GB"
}
variable "windows_template" {
type = "string"
description = "The default windows template"
default = "Packer-Win2016-HCI-20190424"
}
variable "network" {
type = "string"
description = "The vshpere network to assign to the VM."
default = "VLAN_99"
}
variable "cluster" {
type = "string"
description = "The vsphere cluster to assign."
default = "General-VMs"
}
variable "vm_root_password" {
type = "string"
description = "The initial Linux server root password set prior to puppet inline provisioning."
default = "xxxxx"
}
variable "vm_default_password" {
type = "string"
description = "The initial Windows server root password set prior to the SCCM provisioning."
default = "xxxxx"
}
variable "vm_domain_name" {
type = "string"
description = "The domain name to assign to the VM."
default = "acme.com"
}
variable "vm_time_zone_linux" {
type = "string"
description = "The time zone to assign to the VM."
default = "America/New_York"
}
variable "vm_time_zone_windows" {
type = "string"
description = "The time zone to assign to the VM."
default = "35"
}
variable "vm_name" {
type = "string"
description = "The name to use for vsphere virtual machines created with this module."
default = ""
}
variable "vm_host_name" {
type = "string"
description = "The hostname to provision the server with."
default = ""
}
variable "vm_ip_address" {
type = "string"
description = "The IP address to assing to the VM."
default = ""
}
variable "vm_netmask" {
type = "string"
description = "The netmask to assign to the VM."
default = "24"
}
variable "vm_ip_gateway" {
type = "string"
description = "The gateway address to assing to the VM."
default = ""
}
variable "vm_num_cpus" {
type = "string"
description = "The number of CPU to allocate the VM."
default = "2"
}
variable "vm_memory" {
type = "string"
description = "The amount of RAM to allocate the VM."
default = "4096"
}
variable "vm_count" {
description = "The number of virtual machines to create."
default = "1"
}
variable "folder" {
description = "The folder to place the VM in vSphere."
default = "Servers/3Test"
}
variable "vm_cpu_hotadd" {
description = "Allows CPU to be added realtime."
default = "true"
}
variable "vm_memory_hotadd" {
description = "Allows memory to be added realtime."
default = "true"
}
variable "vm_disk0_size" {
description = "The size of disk 0."
default = "50"
}
variable "vm_disk1_size" {
description = "The size of disk 1."
default = "10"
}
variable "vm_notes" {
description = "Description of VM."
default = "terraform"
}
This is the root vars.tf: (they are very similar, still working on which are needed for which folder)
variable "vsphere_user" {
type = "string"
description = "The terraform vsphere service account."
default = "vsphereUser#acme.com"
}
variable "vsphere_password" {
type = "string"
description = "The terraform vsphere service account password."
default = ""
}
variable "vsphere_server" {
type = "string"
description = "The vsphere virtual center server."
default = "vcenter.acme.com"
}
variable "vsphere_guest_id" {
type = "string"
description = "The vsphere guest id to assign Windows servers."
default = "windows9Server64Guest"
}
variable "datacenter" {
type = "string"
description = "The datacenter to deploy the virtual machines to."
default = "HCI"
}
variable "datastore" {
type = "string"
description = "The datastore to deploy the virtual machines to."
default = "DS-01"
}
variable "linux_template" {
type = "string"
description = "The default linux template"
default = "CENT-7.6-50GB"
}
variable "network" {
type = "string"
description = "The vshpere network to assign to the VM."
default = "VLAN_99"
}
variable "cluster" {
type = "string"
description = "The vsphere cluster to assign."
default = "General-VMs"
}
variable "vm_root_password" {
type = "string"
description = "The intial Linux server root password set prior to puppet inline provisioning."
default = "xxxxx"
}
variable "vm_default_password" {
type = "string"
description = "The intial Windows server root password set prior to the SCCM provisioning."
default = "xxxxx"
}
variable "vm_domain_name" {
type = "string"
description = "The domain name to assign to the VM."
default = "acme.com"
}
variable "vm_time_zone_linux" {
type = "string"
description = "The time zone to assign to the VM."
default = "America/New_York"
}
variable "vm_time_zone_windows" {
type = "string"
description = "The time zone to assign to the VM."
default = "35"
}
variable "vm_name" {
type = "string"
description = "The name to use for vsphere virtual machines created with this module."
default = ""
}
variable "vm_host_name" {
type = "string"
description = "The hostname to provision the server with."
default = ""
}
variable "vm_ip_address" {
type = "string"
description = "The IP address to assing to the VM."
default = ""
}
variable "vm_netmask" {
type = "string"
description = "The netmask to assign to the VM."
default = "24"
}
variable "vm_ip_gateway" {
type = "string"
description = "The gateway address to assing to the VM."
default = ""
}
variable "vm_num_cpus" {
type = "string"
description = "The number of CPU to allocate the VM."
default = "2"
}
variable "vm_memory" {
type = "string"
description = "The amount of RAM to allocate the VM."
default = "4096"
}
variable "vm_count" {
description = "The number of virtual machines to create."
default = "1"
}
variable "folder" {
description = "The folder to place the VM in vSphere."
default = "Servers/1Prod"
}
variable "vm_cpu_hotadd" {
description = "Allows CPU to be added realtime."
default = "true"
}
variable "vm_memory_hotadd" {
description = "Allows memory to be added realtime."
default = "true"
}
variable "vm_notes" {
type = "string"
description = "Description of VM."
default = "terraform"
}
This is the VMs module tf:
module "testv11" {
source = "git::ssh://git#gitlab/iac/terraform- modules.git//windows-base?ref=windows-base-prod-v1.0.2"
vm_name = "TESTV11"
vm_host_name = "testv11"
vm_ip_address = "10.255.187.45"
vm_ip_gateway = "10.255.187.1"
network = "VLAN_99"
vm_disk0_size = "50"
vm_disk1_size = "10"
folder = "Servers/1Prod"
}
In the terraform plan, I expect to see the static IP address.
Windows plan not showing static IP:
clone.#: "" => "1"
clone.0.customize.#: "" => "1"
clone.0.customize.0.network_interface.#: "" => "1"
clone.0.customize.0.timeout: "" => "10"
clone.0.customize.0.windows_options.#: "" => "1"
clone.0.customize.0.windows_options.0.admin_password: "<sensitive>" => "<sensitive>"
clone.0.customize.0.windows_options.0.auto_logon_count: "" => "1"
clone.0.customize.0.windows_options.0.computer_name: "" => "testv11"
clone.0.customize.0.windows_options.0.full_name: "" =>
"Administrator"
clone.0.customize.0.windows_options.0.organization_name: "" => "Managed by Terraform"
clone.0.customize.0.windows_options.0.time_zone: "" => "35"
clone.0.customize.0.windows_options.0.workgroup: "" => "workgroup"
clone.0.template_uuid: "" => "422912a4-a156-2e28-10a8-2684d835c834"
clone.0.timeout: "" => "30"
Linux plan showing staic IP:
clone.#: "" => "1"
clone.0.customize.#: "" => "1"
clone.0.customize.0.ipv4_gateway: "" => "10.255.187.1"
clone.0.customize.0.linux_options.#: "" => "1"
clone.0.customize.0.linux_options.0.domain: "" => "acme.com"
clone.0.customize.0.linux_options.0.host_name: "" => "linuxv01"
clone.0.customize.0.linux_options.0.hw_clock_utc: "" => "true"
clone.0.customize.0.linux_options.0.time_zone: "" => "America/New_York"
clone.0.customize.0.network_interface.#: "" => "1"
clone.0.customize.0.network_interface.0.ipv4_address: "" => "10.255.187.38"
clone.0.customize.0.network_interface.0.ipv4_netmask: "" => "24"
clone.0.customize.0.timeout: "" => "10"
clone.0.template_uuid: "" => "4229c06d-522d-88d5-c158-a53fca0a96cb"
clone.0.timeout: "" => "30"
Related
I have a sample code below from terraform but I'm having some issues trying to declare a variable that the argument is a block
basic {}
and moving to production will be something like
dedicated {
cku = 2
}
DEV
resource "confluent_kafka_cluster" "basic" {
display_name = "basic_kafka_cluster"
availability = "SINGLE_ZONE"
cloud = "GCP"
region = "us-central1"
basic {} <<<< # I want this block to be declared as variable
# Calling the variable
local.cluster_type["dev"] <<<< # this approach is not supported. how can I call the variable directly if there is no argument?
}
PROD
resource "confluent_kafka_cluster" "dedicated" {
display_name = "dedicated_kafka_cluster"
availability = "MULTI_ZONE"
cloud = "GCP"
region = "us-central1"
# For Production it is using a different block
dedicated {
cku = 2
}
# Calling the variable
local.cluster_type["prod"] <<<<< # this approach is not supported. how can I call the variable directly if there is no argument?
}
Local variables
locals {
cluster_type = {
prod = "dedicated {
cku = 2
}"
dev = "basic {}"
}
}
You have some issues with your script:
confluent_kafka_cluster is deprecated, it should be replaced by confluentcloud_kafka_cluster
To use the environment, you can create confluentcloud_environment:
resource "confluentcloud_environment" "env" {
display_name = var.environment
}
To solve the issue of the block, you can use dynamic with conditions, like this:
dynamic "basic" {
for_each = var.environment == "dev" ? [1] : []
content {}
}
dynamic "dedicated" {
for_each = var.environment == "prod" ? [1] : []
content {
cku = 2
}
}
Your code can be like this:
resource "confluentcloud_environment" "env" {
display_name = var.environment
}
resource "confluentcloud_kafka_cluster" "basic" {
display_name = "basic_kafka_cluster"
availability = "SINGLE_ZONE"
cloud = "GCP"
region = "us-central1"
dynamic "basic" {
for_each = var.environment == "dev" ? [1] : []
content {}
}
dynamic "dedicated" {
for_each = var.environment == "prod" ? [1] : []
content {
cku = 2
}
}
environment {
id = confluentcloud_environment.env.id
}
}
variable "environment" {
default = "dev"
}
Trying to do an AZ Terraform deployment, and failing horribly - looking for some ideas what am I missing. Basically I am trying to deploy 2 (maybe later more) VM-s with variable size of disks, joining them to the domain and add SQL server to them. (Be gentle with me, I am from VMWare-Tf background, this is my first SQL deployment on AZ!)
My module:
## main.tf:
# ----------- NIC --------------------------------
resource "azurerm_network_interface" "nic" {
name = "${var.vm_name}-nic"
resource_group_name = var.rg.name
location = var.location
ip_configuration {
name = "${var.vm_name}-internal"
subnet_id = var.subnet_id
private_ip_address_allocation = "Static"
private_ip_address = var.private_ip
}
dns_servers = var.dns_servers
}
# ----------- VM --------------------------------
resource "azurerm_windows_virtual_machine" "vm" {
/* count = length(var.instances) */
name = var.vm_name
location = var.location
resource_group_name = var.rg.name
network_interface_ids = [azurerm_network_interface.nic.id]
size = var.size
zone = var.zone
admin_username = var.win_admin_user
admin_password = var.win_admin_pw # data.azurerm_key_vault_secret.vmadminpwd.value
enable_automatic_updates = "false"
patch_mode = "Manual"
provision_vm_agent = "true"
tags = var.vm_tags
source_image_reference {
publisher = "MicrosoftSQLServer"
offer = "sql2019-ws2019"
sku = "enterprise"
version = "latest"
}
os_disk {
name = "${var.vm_name}-osdisk"
caching = "ReadWrite"
storage_account_type = "StandardSSD_LRS"
disk_size_gb = 250
}
}
# ----------- DOMAIN JOIN --------------------------------
// Waits for up to 1 hour for the Domain to become available. Will return an error 1 if unsuccessful preventing the member attempting to join.
resource "azurerm_virtual_machine_extension" "wait-for-domain-to-provision" {
name = "TestConnectionDomain"
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.9"
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
settings = <<SETTINGS
{
"commandToExecute": "powershell.exe -Command \"while (!(Test-Connection -ComputerName ${var.active_directory_domain_name} -Count 1 -Quiet) -and ($retryCount++ -le 360)) { Start-Sleep 10 } \""
}
SETTINGS
}
resource "azurerm_virtual_machine_extension" "join-domain" {
name = azurerm_windows_virtual_machine.vm.name
publisher = "Microsoft.Compute"
type = "JsonADDomainExtension"
type_handler_version = "1.3"
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
settings = <<SETTINGS
{
"Name": "${var.active_directory_domain_name}",
"OUPath": "",
"User": "${var.active_directory_username}#${var.active_directory_domain_name}",
"Restart": "true",
"Options": "3"
}
SETTINGS
protected_settings = <<SETTINGS
{
"Password": "${var.active_directory_password}"
}
SETTINGS
depends_on = [azurerm_virtual_machine_extension.wait-for-domain-to-provision]
}
# ----------- DISKS --------------------------------
resource "azurerm_managed_disk" "data" {
for_each = var.disks
name = "${var.vm_name}-${each.value.name}"
location = var.location
resource_group_name = var.rg.name
storage_account_type = each.value.sa
create_option = each.value.create
disk_size_gb = each.value.size
zone = var.zone
}
resource "azurerm_virtual_machine_data_disk_attachment" "disk-attachment" {
for_each = var.disks
managed_disk_id = azurerm_managed_disk.data[each.key].id
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
lun = each.value.lun
caching = "ReadWrite"
depends_on = [azurerm_windows_virtual_machine.vm]
}
# ----------- SQL --------------------------------
# configure the SQL side of the deployment
resource "azurerm_mssql_virtual_machine" "sqlvm" {
/* count = length(var.instances) */
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
sql_license_type = "PAYG"
r_services_enabled = true
sql_connectivity_port = 1433
sql_connectivity_type = "PRIVATE"
/* sql_connectivity_update_username = var.sqladmin
sql_connectivity_update_password = data.azurerm_key_vault_secret.sqladminpwd.value */
#The storage_configuration block supports the following:
storage_configuration {
disk_type = "NEW" # (Required) The type of disk configuration to apply to the SQL Server. Valid values include NEW, EXTEND, or ADD.
storage_workload_type = "OLTP" # (Required) The type of storage workload. Valid values include GENERAL, OLTP, or DW.
data_settings {
default_file_path = "F:\\Data"
luns = [1]
}
log_settings {
default_file_path = "G:\\Log"
luns = [2]
}
temp_db_settings {
default_file_path = "D:\\TempDb"
luns = [0]
}
}
}
## provider.tf
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">=3.0.1"
#configuration_aliases = [azurerm.corp]
}
}
}
variables.tf
# ----------- COMMON --------------------------------
variable "vm_name" {
type = string
}
variable "rg" {
/* type = string */
description = "STACK - resource group"
}
variable "location" {
type = string
description = "STACK - location"
}
# ----------- NIC --------------------------------
variable "subnet_id" {
type = string
description = "STACK - subnet"
}
variable "private_ip" {
}
variable "dns_servers" {
}
# ----------- VM --------------------------------
variable "size" {
description = "VM - size"
type = string
}
variable "win_admin_user" {
sensitive = true
type = string
}
variable "win_admin_pw" {
sensitive = true
type = string
}
variable "os_storage_type" {
type = string
}
variable "vm_tags" {
type = map(any)
}
variable "zone" {
#type = list
description = "VM AZ"
}
# ----------- DOMAIN JOIN --------------------------------
variable "active_directory_domain_name" {
type = string
}
variable "active_directory_username" {
sensitive = true
}
variable "active_directory_password" {
sensitive = true
}
# ----------- SQL --------------------------------
variable "sql_maint_day" {
type = string
description = "SQL - maintenance day"
}
variable "sql_maint_length_min" {
type = number
description = "SQL - maintenance duration (min)"
}
variable "sql_maint_start_hour" {
type = number
description = "SQL- maintenance start (hour of the day)"
}
# ----------- DISKS --------------------------------
/* variable "disk_storage_account" {
type = string
default = "Standard_LRS"
description = "DATA DISKS - storage account type"
}
variable "disk_create_method" {
type = string
default = "Empty"
description = "DATA DISKS - creation method"
}
variable "disk_size0" {
type = number
}
variable "disk_size1" {
type = number
}
variable "disk_size2" {
type = number
}
variable "lun0" {
type = number
default = 0
}
variable "lun1" {
type = number
default = 1
}
variable "lun2" {
default = 2
type = number
} */
/* variable "disks" {
description = "List of disks to create"
type = map(any)
default = {
disk0 = {
name = "data0"
size = 200
create = "Empty"
sa = "Standard_LRS"
lun = 0
}
disk1 = {
name = "data1"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 1
}
}
} */
variable "disks" {
type = map(object({
name = string
size = number
create = string
sa = string
lun = number
}))
}
the actual deployment:
main.tf
/*
PS /home/fabrice> Get-AzVMSize -Location northeurope | where-object {$_.Name -like "*ds13*"}
*/
module "uat_set" {
source = "../modules/vm"
providers = {
azurerm = azurerm.cbank-test
}
for_each = var.uat_set
active_directory_domain_name = local.uat_ad_domain
active_directory_password = var.domain_admin_password
active_directory_username = var.domain_admin_username
disks = var.disk_allocation
dns_servers = local.dns_servers
location = local.uat_location
os_storage_type = local.uat_storage_type
private_ip = each.value.private_ip
rg = data.azurerm_resource_group.main
size = each.value.vm_size
sql_maint_day = local.uat_sql_maintenance_day
sql_maint_length_min = local.uat_sql_maintenance_min
sql_maint_start_hour = local.uat_sql_maintenance_start_hour
subnet_id = data.azurerm_subnet.main.id
vm_name = each.key
vm_tags = var.default_tags
win_admin_pw = var.admin_password
win_admin_user = var.admin_username
zone = each.value.zone[0]
}
variable "uat_set" {
description = "List of VM-s to create"
type = map(any)
default = {
UAT-SQLDB-NE-01 = {
private_ip = "192.168.32.8"
vm_size = "Standard_DS13-4_v2"
zone = ["1"]
}
UAT-SQLDB-NE-02 = {
private_ip = "192.168.32.10"
vm_size = "Standard_DS13-4_v2"
zone = ["2"]
}
}
}
variable "disk_allocation" {
type = map(object({
name = string
size = number
create = string
sa = string
lun = number
}))
default = {
"temp" = {
name = "temp"
size = 200
create = "Empty"
sa = "Standard_LRS"
lun = 0
},
"disk1" = {
name = "data1"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 1
},
"disk2" = {
name = "data2"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 2
}
}
}
locals {
dns_servers = ["192.168.34.5", "192.168.34.10"]
uat_storage_type = "Standard_LRS"
uat_sql_maintenance_day = "Saturday"
uat_sql_maintenance_min = 180
uat_sql_maintenance_start_hour = 23
uat_ad_domain = "civbdev.local"
uat_location = "North Europe"
}
## variables.tf
# new build variables
variable "Environment" {
default = "DEV"
description = "this is the environment variable used to intperpolate with others vars"
}
variable "default_tags" {
type = map(any)
default = {
Environment = "DEV"
Product = "dev-XXXtemplateXXX"
Terraformed = "https://AllicaBankLtd#dev.azure.com/XXXtemplateXXX/Terraform/DEV"
}
}
variable "admin_username" {
sensitive = true
}
variable "admin_password" {
sensitive = true
}
variable "domain_admin_username" {
sensitive = true
}
variable "domain_admin_password" {
sensitive = true
}
Resources create OK, except the SQL-part
│ Error: A resource with the ID "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-02" already exists - to be managed via Terraform this resource needs to be imported into the State. Please see the resource documentation for "azurerm_mssql_virtual_machine" for more information.
│
│ with module.uat_set["UAT-SQLDB-NE-02"].azurerm_mssql_virtual_machine.sqlvm,
│ on ../modules/vm/main.tf line 115, in resource "azurerm_mssql_virtual_machine" "sqlvm":
│ 115: resource "azurerm_mssql_virtual_machine" "sqlvm" {
│
╵
╷
│ Error: A resource with the ID "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-01" already exists - to be managed via Terraform this resource needs to be imported into the State. Please see the resource documentation for "azurerm_mssql_virtual_machine" for more information.
│
│ with module.uat_set["UAT-SQLDB-NE-01"].azurerm_mssql_virtual_machine.sqlvm,
│ on ../modules/vm/main.tf line 115, in resource "azurerm_mssql_virtual_machine" "sqlvm":
│ 115: resource "azurerm_mssql_virtual_machine" "sqlvm" {
│
╵
Any notions please what I might be missing?
Ta,
Fabrice
UPDATE:
Thanks for those who replied. Just to confirm: it is not an already existing resource. I get this error straight at the time of the creation of these VM-s.
For example, these are my vm-s after the Terraform run (none of them has the sql extension)
Plan even states it will create these:
Terraform will perform the following actions:
# module.uat_set["UAT-SQLDB-NE-01"].azurerm_mssql_virtual_machine.sqlvm will be created
+ resource "azurerm_mssql_virtual_machine" "sqlvm" {
+ id = (known after apply)
+ r_services_enabled = true
+ sql_connectivity_port = 1433
+ sql_connectivity_type = "PRIVATE"
+ sql_license_type = "PAYG"
+ virtual_machine_id = "/subscriptions/..../providers/Microsoft.Compute/virtualMachines/UAT-SQLDB-NE-01"
+ storage_configuration {
+ disk_type = "NEW"
+ storage_workload_type = "OLTP"
+ data_settings {
+ default_file_path = "F:\\Data"
+ luns = [
+ 1,
]
}
+ log_settings {
+ default_file_path = "G:\\Log"
+ luns = [
+ 2,
]
}
+ temp_db_settings {
+ default_file_path = "Z:\\TempDb"
+ luns = [
+ 0,
]
}
}
}
# module.uat_set["UAT-SQLDB-NE-02"].azurerm_mssql_virtual_machine.sqlvm will be created
+ resource "azurerm_mssql_virtual_machine" "sqlvm" {
+ id = (known after apply)
+ r_services_enabled = true
+ sql_connectivity_port = 1433
+ sql_connectivity_type = "PRIVATE"
+ sql_license_type = "PAYG"
+ virtual_machine_id = "/subscriptions/..../providers/Microsoft.Compute/virtualMachines/UAT-SQLDB-NE-02"
+ storage_configuration {
+ disk_type = "NEW"
+ storage_workload_type = "OLTP"
+ data_settings {
+ default_file_path = "F:\\Data"
+ luns = [
+ 1,
]
}
+ log_settings {
+ default_file_path = "G:\\Log"
+ luns = [
+ 2,
]
}
+ temp_db_settings {
+ default_file_path = "Z:\\TempDb"
+ luns = [
+ 0,
]
}
}
}
Plan: 2 to add, 0 to change, 0 to destroy.
Presumably, if these resources would exist somehow - which would be odd, as Tf just created the VM-s - then it would not say in the plan that it will create it now, would it?
So the error is quite the source of my confusion, since if the VM just got created, the creation of the extension failed - how could it possibly be existing?
In this case you should probably just import the modules as the error suggest to your terraform state.
For example
terraform import module.uat_set[\"UAT-SQLDB-NE-02\"].azurerm_mssql_virtual_machine.sqlvm "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-02"
I am implementing a batch in Azure Container Apps.
When a message comes from a queue in service bus then my batch is run.
For this, I added a scale rule to automatic scale when a message comes from the queue.
It works well, when there is a message it is scaled out from 0 to 1 replica. But when my batch is terminated, the replica restarts the container several times until it is scaled in to 0.
Here is my terraform script to create the container apps :
resource "azapi_resource" "container_app" {
name = var.container_app_name
location = "northeurope"
parent_id = data.azurerm_resource_group.resource_group.id
identity {
type = "UserAssigned"
identity_ids = [data.azurerm_user_assigned_identity.aca_identity.id]
}
type = "Microsoft.App/containerApps#2022-03-01"
body = jsonencode({
properties: {
managedEnvironmentId = data.azapi_resource.container_environment.id
configuration = {
secrets = [
{
name = "regitry-password"
value = data.azurerm_container_registry.acr.admin_password
},
{
name = "service-bus-connection-string"
value = data.azurerm_servicebus_namespace.servicebus.default_primary_connection_string
}
]
ingress = null
registries = [
{
server = data.azurerm_container_registry.acr.login_server
username = data.azurerm_container_registry.acr.admin_username,
passwordSecretRef = "regitry-password"
}]
}
template = {
containers = [{
image = "${data.azurerm_container_registry.acr.login_server}/${var.container_repository}:${var.container_image_tag}"
name = "dbt-instance"
resources = {
cpu = var.container_cpu
memory = var.container_memory
}
env = [
{
name = "APP_CONFIG_NAME"
value = var.app_configuration_name
},
{
name = "AZURE_CLIENT_ID"
value = data.azurerm_user_assigned_identity.aca_identity.client_id
}
]
}]
scale = {
minReplicas = 0
maxReplicas = 5
rules = [{
name = "queue-based-autoscaling"
custom = {
type = "azure-servicebus"
metadata = {
queueName = var.service_bus_queue_name
messageCount = "1"
}
auth = [{
secretRef = "service-bus-connection-string"
triggerParameter = "connection"
}]
}
}]
}
}
}
})
How to run my container only one time ?
I managed to do it with Azure Container Instance with the property "restartPolicy=Never"
in terraform documentation i found the follow example:
resource "azurerm_data_factory_pipeline" "test" {
name = .....
resource_group_name = ...
data_factory_id = ...
variables = {
"bob" = "item1"
}
but I need to create a boolean variable, in the portal Azure I have the type field.
how can I set the variable like this:
"variables": {
"END": {
"type": "Boolean",
"defaultValue": false
}
}
Based on your question, if you are asking how to create a variable of type boolean in Terraform, that is done like this:
variable "END" {
type = bool
description = "End variable."
default = false
}
You can reference that variable then in the resource definition:
resource "azurerm_data_factory_pipeline" "test" {
name = .....
resource_group_name = ...
data_factory_id = ...
variables = {
"END" = var.END
}
}
Or alternatively you can set it without defining the Terraform variable like this:
resource "azurerm_data_factory_pipeline" "test" {
name = .....
resource_group_name = ...
data_factory_id = ...
variables = {
"END" = false
}
}
I am having difficulties with defining a variable inside the ami=data.aws_ami.$var.ami_name.id line.
I have tried ami= "${data.aws_ami.(var.ami_name).id}"but in both cases I am getting the:
79: ami = data.aws_ami.(var.ami_name)).id
│
│ An attribute name is required after a dot.
It only works with the string value data.aws_ami.ubuntu-1804.id.
My question is how to concat the variable to the data.aws_ami?
The end goal is to provision based on different OS ec2 instances (Suse,Ubuntu,RHEL) All depending on the variable provided when deploying it.
variable "ami_name" {
default = "ubuntu"
}
data "aws_ami" "amazon" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["amzn2-ami-hvm*"]
}
}
data "aws_ami" "ubuntu" {
most_recent = true
owners = ["099720109477"] # Canonical
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
}
resource "aws_instance" "linux" {
key_name = var.ami_key_pair_name
//ami = var.ami_id
//I want this to be dynamic so I can deploy either Amazon or Ubuntu in all regions.
ami = data.aws_ami.$var.ami_name.id
//ami = data.aws_ami.ubuntu.id # this works
instance_type = "t2.micro"
tags = {
Name = var.instance_name
}
vpc_security_group_ids = [
aws_security_group.allow-ssh-http.id
]
}
I did the search but could not find anything related. I am using Terraform v0.15.4
The code you show: data.aws_ami.$var.ami_name.id that is not valid terraform syntax.
Here is a possibility for what you are asking:
provider "aws" { region = "us-east-2" }
locals {
allowed_os = {
"amazon": {owner: "amazon", filter: "amzn2-ami-hvm*"},
"suse": {owner: "amazon", filter: "*suse*"},
"RHEL": {owner: "amazon", filter: "*RHEL*"},
"ubuntu": {owner: "099720109477", filter: "*ubuntu-bionic-18.04-amd64-*"},
}
}
variable "ami_name" {
default = "ubuntu"
validation {
condition = can(regex("amazon|suse|RHEL|ubuntu", var.ami_name))
error_message = "Invalid ami name, allowed_values = [amazon suse RHEL ubuntu]."
}
}
data "aws_ami" "os" {
for_each = local.allowed_os
most_recent = true
owners = [each.value.owner]
filter {
name = "name"
values = [each.value.filter]
}
}
resource "aws_instance" "linux" {
ami = data.aws_ami.os[var.ami_name].id
instance_type = "t2.micro"
# ... todo add arguments here
}
My approach here is to use a for_each in the aws_ami, that will give us an array, we can consume that later in the aws_instance resource:
data.aws_ami.os["ubuntu"].id
Here we use a hardcoded value to access a specific AMI in your code.
data.aws_ami.os[var.ami_name].id
Or this way with the variable that will be provided by user or a config file.
You can add more items to the array to add other operating systems, and same with the filters, you can just change the allowed_os local variable to suit your needs.
As an extra, I added validation to your ami_name variable to match the allowed different OS we use in the for_each, that way we prevent any issues right before they can cause errors.