Module with multiple providers - module

I have a question regarding the following.
I am using terraform with fortios provider
tree:
these are my providers in the root-prod:
provider "fortios" {
hostname = "xxxxx"
token = "xxxxx"
insecure = "true"
vdom = "PROD"
}
provider "fortios" {
hostname = "xxxx"
token = "xxxx"
insecure = "true"
vdom = "OPS"
alias = "isops"
}
I h got my root-module-prod:
module "AWS_xxx"{
source = "../modules"
name = "AWS_PROD"
prefix_lists = local.aws_prod
providers = {
fortios.dc1 = fortios
fortios.dc2 = fortios.isops
}
}
provider & resource within-child-modules:
terraform {
required_providers {
fortios = {
source = "fortinetdev/fortios"
version = "1.13.1"
configuration_aliases = [ fortios.dc1, fortios.dc2 ]
}
}
}
resource "fortios_router_prefixlist" "prefix_lists" {
name = var.name
dynamic "rule" {
for_each = var.prefix_lists
content {
id = rule.value["id"]
action = rule.value["action"]
prefix = rule.value["prefix"]
ge = rule.value["ge"]
le = rule.value["le"]
}
}
}
my goal is for the above module to create two instances of the resource, one in each of the declared providers.
My issue is that while the resource is created in the first provider PROD it doesn't crated in OPS.
Do you have any clue on this..?

Not really did not work through Terraform-multi-providers.
In our case, I found a way through Jenkins Parallelism.
We launch in parallel multiple envs with the credentials saved encrypted in Jenkins server.

Related

connection string generated by atlas cluster using terraform not in correct format

So im using terraform to create an atlas cluster but the output im getting is incompleteate to do my request terraform is givim me this:
mongodb+srv://esc-app-dbcluster-devel.b59mwv7.mongodb.net
and what i need shoul be more like this:
mongodb+srv://admin:admin#esc-app-dbcluster-devel.b59mwv7.mongodb.net/development?retryWrites=true&w=majority
or atleast thats the format that works with what im testing.
this is my terraform code:
terraform {
required_providers {
mongodbatlas = {
source = "mongodb/mongodbatlas"
version = "1.4.6"
}
}
}
provider "mongodbatlas" {
public_key = var.atlas_public_key
private_key = var.atlas_private_key
}
resource "mongodbatlas_cluster" "db-cluster" {
project_id = var.atlas_project_id
name = var.db_cluster_name
# Provider Settings "block"
provider_name = "TENANT" //free tier
backing_provider_name = "AWS"
provider_region_name = "US_EAST_1" //free tier
provider_instance_size_name = "M0" //free tier
}
resource "mongodbatlas_database_user" "dbuser" {
username = var.db_user
password = var.db_password
project_id = var.atlas_project_id
auth_database_name = "admin"
roles {
role_name = "readWrite"
database_name = var.environment
}
}
resource "mongodbatlas_project_ip_access_list" "test" {
project_id = var.atlas_project_id
cidr_block = var.cidr
}
output "db_cn_string" {
value = mongodbatlas_cluster.db-cluster.connection_strings.0.standard_srv
}
code i use to connect
const environment = process.env.ENVIRONMENT;
const uridb = "mongodb+srv://admin:admin#esc-app-dbcluster-devel.b59mwv7.mongodb.net/development?retryWrites=true&w=majority" // working format
//dburi = "mongodb+srv://esc-app-dbcluster-devel.b59mwv7.mongodb.net" --- format from terraform
console.log('environment:::::', environment);
let ENVIRONMENT_VARIABLES = {
'process.env.ENVIRONMENT': JSON.stringify(environment),
'process.env.PORT': JSON.stringify('80'),
'process.env.MONGO_CONNECTION_STRING': JSON.stringify(uridb)
};
need a way to genarate the proper connection string

Issues using certificates in terraform

I'm facing some issues while dealing with certificates in terraform.
Before writing the code below, i've already made a CSR request.
I need to say that certificate_pem and private_key are both encoded in base64, particularly private_key is encrypted.
In the code below, i would like to use private_key and certificate_pem.
resource "kubernetes_secret" "my-secret" {
data = {
"tls.crt" = data.my_data.my-configuration-secret.data["certificate_pem"]
"tls.key" = data.my_data.my-configuration-secret.data["private_key"]
}
metadata {
name = "my-secret"
namespace = "my-namespace"
}
}
Now, in the Ingress ressource, i use this secret name
resource "kubernetes_ingress" "my-sni" {
metadata {
name = "my-sni"
namespace = "my_namespace"
annotations = {
"kubernetes.io/ingress.class" = "my_namespace"
"kubernetes.io/ingress.allow-http" = "true"
"nginx.ingress.kubernetes.io/ssl-redirect" = "false"
"nginx.ingress.kubernetes.io/force-ssl-redirect" = "false"
"nginx.ingress.kubernetes.io/ssl-passthrough" = "false"
"nginx.ingress.kubernetes.io/secure-backends" = "false"
"nginx.ingress.kubernetes.io/proxy-body-size" = "0"
"nginx.ingress.kubernetes.io/proxy-read-timeout" = "3600000"
"nginx.ingress.kubernetes.io/rewrite-target" = "/$1"
"nginx.ingress.kubernetes.io/proxy-send-timeout" = "400000"
"nginx.ingress.kubernetes.io/backend-protocol" = "HTTP"
}
}
spec {
tls {
hosts = ["my_host"]
secret_name = "my-secret"
}
rule {
host = "my_host"
http {
path {
path = "/?(.*)"
backend {
service_name = "my-service"
service_port = 8080
}
}
}
}
}
}
Everything is fine with terraform apply, but i can't go on the host to check if i can access to the microservice.
Someone told me i've to uncypher the private_key.
I don't know how to do that

Terraform for_each azure customer managed keys

I am trying to user a for_each to create multiple azure storage accounts and azure secrets and keys.
so far so good and managed to create everything as supposed to be using this code:
variable "storage-foreach" {
type = list(string)
default = ["storage1", "storage2"]
}
variable "key-name" {
type = list(string)
default = ["key1", "key2"]
}
resource "azurerm_storage_account" "storage-foreach" {
for_each = toset(var.storage-foreach)
access_tier = "Hot"
account_kind = "StorageV2"
account_replication_type = "LRS"
account_tier = "Standard"
location = var.location
name = each.value
resource_group_name = azurerm_resource_group.tenant-testing-test.name
lifecycle {
prevent_destroy = false
}
}
resource "azurerm_key_vault_secret" "storagesctforeach" {
for_each = toset(var.storage-foreach)
key_vault_id = azurerm_key_vault.tenantsnbshared.id
name = each.value
value = azurerm_storage_account.storage-foreach[each.key].primary_connection_string
content_type = "${each.value} Storage Account Connection String"
lifecycle {
prevent_destroy = false
}
}
resource "azurerm_storage_table" "tableautomation" {
for_each = toset(var.storage-foreach)
name = "UserAnswer"
storage_account_name = azurerm_storage_account.storage-foreach[each.key].name
lifecycle {
prevent_destroy = false
}
}
resource "azurerm_key_vault_key" "client-key" {
for_each = toset(var.key-name)
key_vault_id = azurerm_key_vault.tenantsnbshared.id
name = "Key-Client-${each.value}"
key_opts = [
"decrypt",
"encrypt",
"sign",
"unwrapKey",
"verify",
"wrapKey",
]
key_type = "RSA"
key_size = 2048
}
This block of code works perfectly fine until when I try to create a customer managed key resource and automatically assign the keys to the storage accounts.
resource "azurerm_storage_account_customer_managed_key" "storage-managed-key" {
for_each = toset(var.key-name)
key_name = each.value
key_vault_id = azurerm_key_vault.tenantsnbshared.id
storage_account_id = azurerm_storage_account.storage-foreach[each.value].id
key_version = "current"
}
The problem I am facing is, as I created all the previous resources with a for_each in the above resource is expecting a [each.value] in my storage account id. Which I placed but that parameter is targeting the var.key-name, which is throwing an error as it can't find those strings in my storage account.
I was wondering if you can help me to think about a good practice to automate this procedure and make sure that it picks up the correct key to encrypt the correct storage account id in the resource group.
Thank you very much in advance everyone, and I am sorry but I have been struggling on this block of code and how I can automate it.
The problem is you are trying to access var.storage-foreach items by using the var.key-name.
I think the following works for you:
resource "azurerm_storage_account_customer_managed_key" "storage-managed-key" {
count = length(var.key-name)
key_name = var.key-name[count.index]
key_vault_id = azurerm_key_vault.tenantsnbshared.id
storage_account_id = azurerm_storage_account.storage-foreach[var.storage-foreach[count.index]].id
key_version = "current"
}

How to configure App Service to use Azure AD login from Terraform

It is easy to Configure a web App Service to use Azure AD login manually via the official document However, How can I achieve this from Terraform? I've searched a while didn't found any examples, if you happen to address one, would be nice to share with me.
The following code is how I created Resource group and provisioned the web application
terraform {
backend "azurerm" {}
}
terraform {
required_version = ">= 0.13"
}
resource "azurerm_resource_group" "tf_resource_group" {
name = "RG_${var.application_name}_${var.environment}"
location = var.location
tags = {
environment = var.environment
DeployedBy = "terraform"
}
}
resource "azurerm_app_service_plan" "tf_service_plan" {
name = "${var.application_name}-${var.environment}-asp"
location = azurerm_resource_group.tf_resource_group.location
resource_group_name = azurerm_resource_group.tf_resource_group.name
kind = "Linux"
reserved = true
sku {
tier = "Standard"
size = "S1"
}
tags = {
environment = var.environment
DeployedBy = "terraform"
}
}
resource "azurerm_app_service" "tf_app_service" {
name = var.application_name
location = azurerm_resource_group.tf_resource_group.location
resource_group_name = azurerm_resource_group.tf_resource_group.name
app_service_plan_id = azurerm_app_service_plan.tf_service_plan.id
site_config {
always_on = true
linux_fx_version = "DOCKER|${var.acr_name}.azurecr.io/${var.img_repo_name}:${var.tag}"
}
app_settings = {
DOCKER_REGISTRY_SERVER_URL = "$DRSRUL"
WEBSITES_ENABLE_APP_SERVICE_STORAGE = "false"
DOCKER_REGISTRY_SERVER_USERNAME = "$ACRNAME"
DOCKER_REGISTRY_SERVER_PASSWORD = "$PW"
}
identity {
type = "SystemAssigned"
}
}
I believe your "azurerm_app_service" resource block needs a auth_settings block with a active_directory block. Example:
auth_settings {
enabled = true
active_directory {
client_id = "${azuread_application.example.application_id}"
}
default_provider = "AzureActiveDirectory"
issuer = "https://sts.windows.net/xxxxxxx-xxxx-xxx-xxxx-xxxtenantID/"

Terraform AWS optional logging for S3 bucket

I am trying to create S3 bucket using terraform from examples in the link
https://www.terraform.io/docs/providers/aws/r/s3_bucket.html
I have created a S3 module.
The issue i am facing is, for certain bucket i do not want logging enabled.
How can this be accomplished in terraform.
logging {
target_bucket = "${aws_s3_bucket.log_bucket.id}"
target_prefix = "log/"
}
Using empty string for target_bucket and target_prefix causes terraform to make an attempt to create target_bucket.
Also, i am trying to use a module.
Using the newer dynamic block support in terraform 0.12+ we pass a single-item array containing the logging settings if we want logging like so:
variable "logging" {
type = list
default = []
description = "to enable logging set this to [{target_bucket = 'xxx' target_prefix = 'logs/'}]"
}
resource "aws_s3_bucket" "s3bucket" {
dynamic "logging" {
for_each = [for l in var.logging : {
target_bucket = l.target_bucket
target_prefix = l.target_prefix
}]
content {
target_bucket = logging.value.target_bucket
target_prefix = logging.value.target_prefix
}
}
}
Can Fly.
If you want to make the values of logging optional, first make your module aws_s3_bucket.tf:
resource "aws_s3_bucket" "b" {
bucket = "my-tf-test-bucket"
acl = "private"
logging = "${var.logging}"
}
variable "logging" {
type = "list"
default = []
}
then in a sub-folder example add your template module.tf:
module "s3" {
source = "../"
logging = [
{
target_bucket = "loggingbucketname"
target_prefix = "log/"
},
]
}
provider "aws" {
region = "eu-west-1"
version = "2.4.0"
}
This is your version that has logging.
Next modify your module.tf to look like
module "s3" {
source = "../"
}
provider "aws" {
region = "eu-west-1"
version = "2.4.0"
}
That's your version without. This worked with:
Terraform v0.11.11
+ provider.aws v2.4.0
Updated
This is answer for v0.12.5.
module is now:
resource "aws_s3_bucket" "b" {
bucket = "my-tf-test-bucket"
acl = "private"
logging {
target_bucket = var.logging["target_bucket"]
target_prefix = var.logging["target_prefix"]
}
}
variable "logging" {
type=map
default={
target_bucket = ""
target_prefix = ""
}
}
Use module with logging becomes (your path to modules might differ):
module "s3" {
source = "../"
logging={
target_bucket = aws_s3_bucket.log_bucket.id
target_prefix = "log/"
}
}
provider "aws" {
region = "eu-west-1"
version = "2.34.0"
}
resource "aws_s3_bucket" "log_bucket" {
bucket = "my-tf-log-bucket"
acl = "private"
}
and without:
module "s3" {
source = "../"
}
provider "aws" {
region = "eu-west-1"
version = "2.34.0"
}