✦ ➜ terraform --version
Terraform v0.12.28
+ provider.aws v2.60.0
+ provider.kubernetes v1.11.3
+ provider.local v1.4.0
+ provider.null v2.1.2
+ provider.random v2.2.1
+ provider.template v2.1.2
Just put 2 new files for SSL certificate
# module.ssl-certificate.aws_iam_server_certificate.cert must be replaced
+/- resource "aws_iam_server_certificate" "cert" {
~ arn = "arn:aws:iam::XXX:server-certificate/xxx-ssl-certxxx" -> (known after apply)
~ certificate_body = "721e444119806928d19ef830740057c52580ba71" -> "cd6882dff1edb0223a20fe5f1c2b4b594f07526f" # forces replacement
- certificate_chain = "7e85cb3e40dff5a9f83ff75576d71fd98fdfdd89" -> null # forces replacement
~ id = "XXX" -> (known after apply)
~ name = "XXX-ssl-cert20200716210119477600000001" -> (known after apply)
name_prefix = "XXX-ssl-cert"
path = "/"
private_key = (sensitive value)
}
And each time I run terraform apply I always asked to "replace" the certificate. Each time a new one is created.
Files (crt, key) are not changing
/main.tf
module "ssl-certificate" {
source = "./modules/certificates"
certificate = {
name = "xxx-ssl-cert"
body = file("assets/ssl/_.xxx.com/xxx.crt")
private_key = file("assets/ssl/_.xxx.com/xxx.key")
}
team = var.team
project = var.project
component = ""
environment = var.environment
tags = module.project_config.tags
}
/modules/certificates/main.tf
resource "aws_iam_server_certificate" "cert" {
name_prefix = var.certificate.name
certificate_body = var.certificate.body
private_key = var.certificate.private_key
lifecycle {
create_before_destroy = true
}
}
What is wrong? Prior to this I had self-signed cert, and never had this behavior. Added new certs - and started to get these "recreate" required plans in apply.
Any ideas?
I would suggest to use lifecycle for ignore_changes.
Example: lifecycle {
ignore_changes = [certificate_body]
}
To prevent terraform from recreating certificate when certificate contents don't change,
Move certificate chain contents from "certificate_body" to "certificate_chain" terraform argument inside "aws_iam_server_certificate" resource &
Ensure line endings in certificate_body & certificate_contents are same as actual(for my use case, line endings in cert was LF)
As #beta mentioned, the solution is to run dos2unix command on your cert file in order to convert it from DOS to UNIX format, especially the end of line char.
Related
I'm trying to define a config block for two environments - local and cloud and I'm using the if/else condition but I got an error message for the encrypt attribute of the s3 bucket: 'encrypt' expected type 'bool', got unconvertible type 'string'.
If I remove the if/else condition block then it worked but I need to choose between the two environments, so I've to use if/else condition.
The config block code:
config = local.is_local_environment ? {
# Local configuration
path = "${path_relative_to_include()}/terraform.tfstate"
} : {
# Cloud configuration
bucket = "my-bucket"
key = "terraform/${path_relative_to_include()}/terraform.tfstate"
region = local.region
encrypt = true
dynamodb_table = "terraform-lock"
}
}
the issue is that local backends don't take any configuration, use null
config = local.is_local_environment ? null : {
# Cloud configuration
bucket = "my-bucket"
key = "terraform/${path_relative_to_include()}/terraform.tfstate"
region = local.region
encrypt = true
dynamodb_table = "terraform-lock"
}
}
I'm running Terraform using VScode editor which uses PowerShell as the default shell and getting the same error when I try to validate it or to run terraform init/plan/apply through VScode, external PowerShell or CMD.
The code was running without any issues until I added Virtual Machine creation code. I have clubbed the variables.tf, terraform.tfvars and the main Terraform code below.
terraform.tfvars
web_server_location = "West US 2"
resource_prefix = "web-server"
web_server_address_space = "1.0.0.0/22"
web_server_address_prefix = "1.0.1.0/24"
Environment = "Test"
variables.tf
variable "web_server_location" {
type = string
}
variable "resource_prefix" {
type = string
}
variable "web_server_address_space" {
type = string
}
#variable for network range
variable "web_server_address_prefix" {
type = string
}
#variable for Environment
variable "Environment" {
type = string
}
terraform_example.tf
# Configure the Azure Provider
provider "azurerm" {
# whilst the `version` attribute is optional, we recommend pinning to a given version of the Provider
version = "=2.0.0"
features {}
}
# Create a resource group
resource "azurerm_resource_group" "example_rg" {
name = "${var.resource_prefix}-RG"
location = var.web_server_location
}
# Create a virtual network within the resource group
resource "azurerm_virtual_network" "example_vnet" {
name = "${var.resource_prefix}-vnet"
resource_group_name = azurerm_resource_group.example_rg.name
location = var.web_server_location
address_space = [var.web_server_address_space]
}
# Create a subnet within the virtual network
resource "azurerm_subnet" "example_subnet" {
name = "${var.resource_prefix}-subnet"
resource_group_name = azurerm_resource_group.example_rg.name
virtual_network_name = azurerm_virtual_network.example_vnet.name
address_prefix = var.web_server_address_prefix
}
# Create a Network Interface
resource "azurerm_network_interface" "example_nic" {
name = "${var.resource_prefix}-NIC"
location = azurerm_resource_group.example_rg.location
resource_group_name = azurerm_resource_group.example_rg.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.example_subnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.example_public_ip.id
}
}
# Create a Public IP
resource "azurerm_public_ip" "example_public_ip" {
name = "${var.resource_prefix}-PublicIP"
location = azurerm_resource_group.example_rg.location
resource_group_name = azurerm_resource_group.example_rg.name
allocation_method = var.Environment == "Test" ? "Static" : "Dynamic"
tags = {
environment = "Test"
}
}
# Creating resource NSG
resource "azurerm_network_security_group" "example_nsg" {
name = "${var.resource_prefix}-NSG"
location = azurerm_resource_group.example_rg.location
resource_group_name = azurerm_resource_group.example_rg.name
# Security rule can also be defined with resource azurerm_network_security_rule, here just defining it inline.
security_rule {
name = "RDPInbound"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "3389"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
environment = "Test"
}
}
# NIC and NSG association
resource "azurerm_network_interface_security_group_association" "example_nsg_association" {
network_interface_id = azurerm_network_interface.example_nic.id
network_security_group_id = azurerm_network_security_group.example_nsg.id
}
# Creating Windows Virtual Machine
resource "azurerm_virtual_machine" "example_windows_vm" {
name = "${var.resource_prefix}-VM"
location = azurerm_resource_group.example_rg.location
resource_group_name = azurerm_resource_group.example_rg.name
network_interface_ids = [azurerm_network_interface.example_nic.id]
vm_size = "Standard_B1s"
delete_os_disk_on_termination = true
storage_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServerSemiAnnual"
sku = "Datacenter-Core-1709-smalldisk"
version = "latest"
}
storage_os_disk {
name = "myosdisk1"
caching = "ReadWrite"
create_option = "FromImage"
storage_account_type = "Standard_LRS"
}
os_profile {
computer_name = "hostname"
admin_username = "adminuser"
admin_password = "Password1234!"
}
os_profile_windows_config {
disable_password_authentication = false
}
tags = {
environment = "Test"
}
}
Error:
PS C:\Users\e5605266\Documents\MyFiles\Devops\Terraform> terraform init
There are some problems with the configuration, described below.
The Terraform configuration must be valid before initialization so that
Terraform can determine which modules and providers need to be installed.
Error: Invalid character
on terraform_example.tf line 89, in resource "azurerm_virtual_machine" "example_windows_vm":
89: location = azurerm_resource_group.example_rg.location
This character is not used within the language.
Error: Invalid expression
on terraform_example.tf line 89, in resource "azurerm_virtual_machine" "example_windows_vm":
89: location = azurerm_resource_group.example_rg.location
Expected the start of an expression, but found an invalid expression token.
Error: Argument or block definition required
on terraform_example.tf line 90, in resource "azurerm_virtual_machine" "example_windows_vm":
90: resource_group_name = azurerm_resource_group.example_rg.name
An argument or block definition is required here. To set an argument, use the
equals sign "=" to introduce the argument value.
Error: Invalid character
on terraform_example.tf line 90, in resource "azurerm_virtual_machine" "example_windows_vm":
90: resource_group_name = azurerm_resource_group.example_rg.name
This character is not used within the language.
*
I've encountered this problem myself in several different contexts, and it does have a common solution which is no fun at all: manually typing the code back in...
This resource block seems to be where it runs into problems:
resource "azurerm_virtual_machine" "example_windows_vm" {
name = "${var.resource_prefix}-VM"
location = azurerm_resource_group.example_rg.location
resource_group_name = azurerm_resource_group.example_rg.name
network_interface_ids = [azurerm_network_interface.example_nic.id]
vm_size = "Standard_B1s"
delete_os_disk_on_termination = true
storage_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServerSemiAnnual"
sku = "Datacenter-Core-1709-smalldisk"
version = "latest"
}
storage_os_disk {
name = "myosdisk1"
caching = "ReadWrite"
create_option = "FromImage"
storage_account_type = "Standard_LRS"
}
os_profile {
computer_name = "hostname"
admin_username = "adminuser"
admin_password = "Password1234!"
}
os_profile_windows_config {
disable_password_authentication = false
}
tags = {
environment = "Test"
}
}
Try copying that back into your editor as is. I cannot see any problematic characters in it, and ironically StackOverflow may have done you a solid and filtered them out. Literally copy/pasting it over the existing block may remedy the situation.
I have seen Terraform examples online with stylish double quotes (which aren't ASCII double quotes and won't work) many times. That may be what you are seeing.
Beyond that, you'd need to push your code to GitHub or similar so I can see the raw bytes for myself.
In the off-chance this helps someone who runs into this error and comes across it on Google, I just thought I would post my situation and how I fixed it.
I have an old demo Terraform infrastructure that I revisited after months and, long story short, I issued this command two days ago and forgot about it:
terraform plan -out=plan.tf
This creates a zip archive of the plan. Upon coming back two days later and running a terraform init, my terminal scrolled garbage and "This character is not used within the language." for about 7 seconds. Due to the .tf extension, terraform was looking at the zip data and promptly pooping its pants.
Through moving individual tf files to a temp directory and checking their validity with terraform init, I found the culprit, deleted it, and functionality was restored.
Be careful when exporting your plan files, folks!
I ran into the same problem and found this page.
I solved the issue and decided to post here.
I opened my plan file in Notepad++ and selected View-Show all symbols.
I removed all the TAB characters and replaced them with spaces.
In my case, the problem was fully resolved by this.
In my case, when I ran into the same problem ("This character is not used within the language"), I found the encoding of the files was UTF-16 (it was a generated file from PS). Changing the file encoding to UTF-8 (as mentioned in this question) solved the issue.
I found I got this most often when I go from Windows to linux. The *.tf file does not like the windows TABs and Line Breaks.
I tried to some of the same tools I use when I have this problem with *.sh, but so far I've resorted to manually cleaning up the lines I've seen in there error.
In my case, the .tf file was generated by the following command terraform show -no-color > my_problematic.tf, and this file's encoding is in "UTF-16 LE BOM", converting it to UTF-8 fixed my issue.
I want to be able to change the auth_token of a redis cluster. But when I changed it, the terrafrom plan tells me it wants to recreate the cluster:
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
-/+ destroy and then create replacement
Terraform will perform the following actions:
# module.live_presentation_elasticache.aws_elasticache_replication_group.main[0] must be replaced
-/+ resource "aws_elasticache_replication_group" "main" {
+ apply_immediately = (known after apply)
at_rest_encryption_enabled = true
~ auth_token = (sensitive value)
auto_minor_version_upgrade = false
automatic_failover_enabled = true
~ configuration_endpoint_address = "clustercfg.my-project.abcde5.use1.cache.amazonaws.com" -> (known after apply)
engine = "redis"
engine_version = "5.0.3"
~ id = "my-project" -> (known after apply)
maintenance_window = "sun:07:00-sun:08:00"
~ member_clusters = [
- "my-project-0001-001",
- "my-project-0002-001",
- "my-project-0003-001",
] -> (known after apply)
node_type = "cache.t2.micro"
~ number_cache_clusters = 3 -> (known after apply)
parameter_group_name = "default.redis5.0.cluster.on"
port = 6379
+ primary_endpoint_address = (known after apply)
replication_group_id = "my-project"
security_group_ids = [
"sg-0600b285b055c64b1",
]
~ security_group_names = [] -> (known after apply)
snapshot_retention_limit = 30
snapshot_window = "06:00-07:00"
subnet_group_name = "my-project-redis-subnet"
tags = {
"Flavor" = "dev"
"Name" = "live-presentation"
}
transit_encryption_enabled = true
cluster_mode {
num_node_groups = 3
replicas_per_node_group = 0
}
}
Plan: 1 to add, 0 to change, 1 to destroy.
I want to avoid a recreation of a redis cluster. Some of the obvious reasons are: I want to avoid losing data and downtime.
Is there a less destructive way to update the auth_token? Preferably a terraform solution but not necessary.
There is something that I am not understanding about terraform variables. I am getting prompted for two variables in when I run "terraform apply". I don't think that I should be prompted for any as I defined a terraform.tfvars. I am getting prompted for (applicationNamespace, and staticIpName) but I am not sure why. What am I misunderstanding?
I created a file (terraform.tfvars):
#--------------------------------------------------------------
# General
#--------------------------------------------------------------
cluster = "reddiyo-development"
project = "<MYPROJECTID>"
region = "us-central1"
credentialsLocation = "<MYCERTLOCATION>"
bucket = "reddiyo-terraform-state"
vpcLocation = "us-central1-b"
network = "default"
staticIpName = "dev-env-ip"
#--------------------------------------------------------------
# Specific To NODE
#--------------------------------------------------------------
terraformPrefix = "development"
mainNodeName = "primary-pool"
nodeMachineType = "n1-standard-1"
#--------------------------------------------------------------
# Specific To Application
#--------------------------------------------------------------
applicationNamespace = "application"
I also have a terrform script:
variable "cluster" {}
variable "project" {}
variable "region" {}
variable "bucket" {}
variable "terraformPrefix" {}
variable "mainNodeName" {}
variable "vpcLocation" {}
variable "nodeMachineType" {}
variable "credentialsLocation" {}
variable "network" {}
variable "applicationNamespace" {}
variable "staticIpName" {}
data "terraform_remote_state" "remote" {
backend = "gcs"
config = {
bucket = "${var.bucket}"
prefix = "${var.terraformPrefix}"
}
}
provider "google" {
//This needs to be updated to wherever you put your credentials
credentials = "${file("${var.credentialsLocation}")}"
project = "${var.project}"
region = "${var.region}"
}
resource "google_container_cluster" "gke-cluster" {
name = "${var.cluster}"
network = "${var.network}"
location = "${var.vpcLocation}"
remove_default_node_pool = true
# node_pool {
# name = "${var.mainNodeName}"
# }
node_locations = [
"us-central1-a",
"us-central1-f"
]
//Get your credentials for the newly created cluster so that microservices can be deployed
provisioner "local-exec" {
command = "gcloud config set project ${var.project}"
}
provisioner "local-exec" {
command = "gcloud container clusters get-credentials ${var.cluster} --zone ${var.vpcLocation}"
}
}
resource "google_container_node_pool" "primary_pool" {
name = "${var.mainNodeName}"
cluster = "${var.cluster}"
location = "${var.vpcLocation}"
node_count = "2"
node_config {
machine_type = "${var.nodeMachineType}"
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/trace.append",
]
}
management {
auto_repair = true
auto_upgrade = true
}
autoscaling {
min_node_count = 2
max_node_count = 10
}
}
# //Reserve a Static IP
resource "google_compute_address" "ip_address" {
name = "${var.staticIpName}"
}
//Install Ambassador
module "ambassador" {
source = "modules/ambassador"
applicationNamespace = "${var.applicationNamespace}"
}
You can try to force it to read your variables by using:
terraform apply -var-file=<path_to_your_vars>
For reference, read below, if anybody face the similar issue.
“terraform.tfvars” is the default variable file name, from where terraform will read variables.
If any other file name is used, it needs to be passed in the command line i.e: “terraform plan –var=whateverName.tfvars
Also, order of Loading for variables by Terraform program.
Environment Variables
terraform.tfvars
terraform.tfvars.json
Any .auto.tfvars
Any –var or –var-file options
I'm trying to make a request on lua with certificate.
Recently I've gotten a COMODO SSL.
I've tried many tutorials on the internet, but to no avail.
I found this blog's proposal very interesting:
I am not getting able to execute the request on Linux/OpenWRT/Lua 5.1.
COMODO has provided me with the following files:
AddTrustExternalCARoot.crt
my_domain_com.crt
COMODORSAAddTrustCA.crt
COMODORSADomainValidationSecureServerCA.crt
And in this blog he mentions these files:
key = "/root/client.key"
certificate="/root/client.crt",
cafile="/root/ca.crt"
How do I convert the COMODO's .crt files the to those mentioned in the blog?
Obs: I tried to download with curl and get, but it did not work.
I've described the details in a blog post; basically, you need to specify the mode and the certificate file for the ssl.wrap call:
local params = {
mode = "client",
protocol = "tlsv1",
cafile = "/path/to/downloaded/cacert.pem", --<-- added cafile parameters
verify = "peer", --<-- changed "none" to "peer"
options = "all",
}
If you need to convert .crt to .pem file, then the following SO answer may help. I haven't tried with .crt, but the examples I have work with .pem files.
I solve it with this code:
module("https", package.seeall)
local socket = require "socket"
local http = require "socket.http"
local ssl = require "ssl"
local ltn12 = require "ltn12"
local try = socket.try
local protect = socket.protect
local DEFAULT_PROTOCOL = "sslv23"
local DEFAULT_CAFILE = "/etc/ssl/certs/ca-certificates.crt"
local DEFAULT_VERIFY = "peer"
local DEFAULT_OPTIONS = "all"
local DEFAULT_CIPHERS = "ADH-AES256-SHA:ADH-AES128-SHA:HIGH:MEDIUM"
local DEFAULT_HTTPS_PORT = 443
local https_mt = {
-- Create proxy functions for each call through the metatable
__index = function(tbl, key)
local f = function(prxy, ...)
local c = prxy.c
return c[key](c, ...)
end
tbl[key] = f -- Save new proxy function in cache for speed
return f
end
}
local function new_create(params)
return function()
local t = { c = try(socket.tcp()) }
function t:connect(host, port)
try(self.c:connect(host, port))
self.c = try(ssl.wrap(self.c, params))
try(self.c:dohandshake())
return 1
end
return setmetatable(t, https_mt)
end
end
local function request_generic(args)
local sslparams = {
mode = "client",
protocol = args.protocol or DEFAULT_PROTOCOL,
cafile = args.cafile or DEFAULT_CAFILE,
verify = args.verify or DEFAULT_VERIFY,
options = args.options or DEFAULT_OPTIONS,
ciphers = args.ciphers or DEFAULT_CIPHERS
}
local req = {
url = args.url,
port = args.port or DEFAULT_HTTPS_PORT,
sink = args.sink,
method = args.method,
headers = args.headers,
source = args.source,
step = args.step,
proxy = args.proxy, -- Buggy?
redirect = args.redirect,
create = new_create(sslparams)
}
return http.request(req)
end
local function request_simple(url, body)
local tbl = { }
local req = {
url = url,
sink = ltn12.sink.table(tbl)
}
if body then
req.method = "POST"
req.source = ltn12.source.string(body)
req.headers = {
["Content-length"] = #body,
["Content-type"] = "application/x-www-form-urlencoded"
}
end
local _, status, headers = request_generic(req)
return table.concat(tbl), status, headers
end
function request(req_or_url, body)
if type(req_or_url) == "string" then
return request_simple(req_or_url, body)
else
return request_generic(req_or_url)
end
end