Terraform Object Lock Configuration: AccessDenied - amazon-s3

I have this terraform script that works perfectly fine for the whole s3 module but it cannot create the Object lock configuration resource and returns the message :
error creating S3 bucket (bucket-name) Object lock configuration: AccessDenied: AccessDenied
Status code 403, request id: ..., host id: ...
Desite the message, the S3 bucket is actually created, but I still get this error, maybe there is something missing in the policy ?
Here is my code.
module "s3_bucket" {
source = "terraform-aws-modules/s3-bucket/aws"
version = "3.4.0"
bucket = local.bucket_name
...
object_lock_enabled = true
attach_policy = true
policy = data.aws_iam_policy_document.voucher_s3_bucket.json
versioning = {
status = var.status
mfa_delete = var.mfa_delete
}
server_side_encryption_configuration = {
rule = {
apply_server_side_encryption_by_default = {
kms_master_key_id = aws_kms_key.voucher_s3_bucket.arn
sse_algorithm = "aws:kms"
}
}
}
}
data "aws_iam_policy_document" "s3_bucket_kms_key" {
statement {
sid = "AllowPutRoles"
effect = "Allow"
actions = ["kms:GenerateDataKey"]
principals {
identifiers = local.put_object_roles #we can use event_gateway iam_role for now
type = "AWS"
}
resources = ["*"]
}
statement {
sid = "AllowAdmin"
effect = "Allow"
actions = [
"kms:*",
]
principals {
identifiers = [data.aws_iam_role.admin_role.arn, data.aws_iam_role.default_role.arn, data.aws_iam_role.automation_role.arn]
type = "AWS"
}
resources = ["*"]
}
}
resource "aws_kms_key" "s3_bucket" {
tags = {
"s3_bucket" = local.bucket_name
}
enable_key_rotation = true
policy = data.aws_iam_policy_document.voucher_s3_bucket_kms_key.json
}
resource "aws_s3_bucket_object_lock_configuration" "s3_bucket_object_lock_configuration" {
bucket = local.bucket_name
rule {
default_retention {
mode = "GOVERNANCE"
years = 10
}
}
}
data "aws_iam_policy_document" "voucher_s3_bucket" {
statement {
sid = "DenyNoKMSEncryption"
effect = "Deny"
actions = ["s3:PutObject"]
principals {
identifiers = ["*"]
type = "*"
}
resources = ["${module.voucher_s3_bucket.s3_bucket_arn}/*"]
condition {
test = "StringNotEqualsIfExists"
values = ["aws:kms"]
variable = "s3:x-amz-server-side-encryption"
}
condition {
test = "Null"
values = ["false"]
variable = "s3:x-amz-server-side-encryption"
}
}
statement {
sid = "DenyWrongKMSKey"
effect = "Deny"
actions = ["s3:PutObject"]
principals {
identifiers = ["*"]
type = "*"
}
resources = ["${module.s3_bucket.s3_bucket_arn}/*"]
condition {
test = "StringNotEquals"
values = [aws_kms_key.voucher_s3_bucket.arn]
variable = "s3:x-amz-server-side-encryption-aws-kms-key-id"
}
}
statement {
sid = "AllowAdminDefault"
effect = "Allow"
actions = ["s3:*"]
principals {
identifiers = [data.aws_iam_role.admin_role.arn, data.aws_iam_role.default_role.arn]
type = "AWS"
}
resources = [
"${module.voucher_s3_bucket.s3_bucket_arn}/*",
module.voucher_s3_bucket.s3_bucket_arn,
]
}
statement {
sid = "DenyDeleteActions"
effect = "Deny"
actions = ["s3:DeleteBucket", "s3:DeleteObject", "s3:DeleteObjectVersion", "s3:PutBucketObjectLockConfiguration"]
principals {
identifiers = ["*"]
type = "AWS"
}
resources = [
"${module.s3_bucket.s3_bucket_arn}/*",
module.s3_bucket.s3_bucket_arn,
]
}
}

Related

Write a dynamic Terraform block for a load balancer listener rule

I'm new to dynamic blocks and am having some trouble writing rules to listeners on a load balancer that was created using for_each.
Below are the resources I created:
resource "aws_lb_listener" "app_listener_forward" {
for_each = toset(var.app_listener_ports)
load_balancer_arn = aws_lb.app_alb.arn
port = each.value
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
certificate_arn = var.ssl_cert
default_action {
type = "forward"
forward {
dynamic "target_group" {
for_each = aws_lb_target_group.app_tg
content {
arn = target_group.value["arn"]
}
}
stickiness {
enabled = true
duration = 86400
}
}
}
}
resource "aws_lb_listener_rule" "app_https_listener_rule" {
for_each = toset(var.app_listener_ports)
listener_arn = aws_lb_listener.app_listener_forward[each.value].arn
action {
type = "forward"
forward {
dynamic "target_group" {
for_each = aws_lb_target_group.app_tg
content {
arn = target_group.value["arn"]
}
}
}
}
dynamic "condition" {
for_each = var.images
path_pattern {
content {
values = condition.value["paths"]
}
}
}
}
resource "aws_lb_target_group" "app_tg" {
for_each = var.images
name = each.key
port = each.value.port
protocol = "HTTP"
target_type = "ip"
vpc_id = aws_vpc.app_vpc.id
health_check {
interval = 130
timeout = 120
healthy_threshold = 10
unhealthy_threshold = 10
}
stickiness {
type = "lb_cookie"
cookie_duration = 86400
}
}
Below are how the variables are defined:
variable "images" {
type = map(object({
app_port = number
paths = set(string)
}))
{
"app-one" = {
app_port = 3000
paths = [
"/appOne",
"/appOne/*"
]
}
"app-two" = {
app_port = 4000
paths = [
"/appTwo",
"/appTwo/*"
]
}
}
variable "app_listener_ports" {
type = list(string)
default = [
80, 443, 22, 7999, 8999
]
}
Upon executing, I am getting an error dealing with the path_pattern being unexpected:
Error: Unsupported block type
│
│ on alb.tf line 78, in resource "aws_lb_listener_rule" "app_https_listener_rule":
│ 78: path_pattern {
│
│ Blocks of type "path_pattern" are not expected here.
I've tried a few ways to get this dynamic block but am having some difficulty. Any advice would be appreciated.
Thank you!
Try it like this:
dynamic "condition" {
for_each = var.images
content {
path_pattern {
values = condition.value.paths
}
}
}
And change the type of paths from set(string) to list(string).
This is also completely acceptable:
dynamic "condition" {
for_each = var.images
content {
path_pattern {
values = condition.value["paths"]
}
}
}
However, in my opinion here it's better to not use a dynamic block for the condition to maintain readability and maintenance.
condition {
path_pattern {
values = [
"/appOne",
"/appOne/*" ## can also use variables if you prefer !!
]
}
}
I have already answered your original post related to the problem which you had after fixing the dynamic syntax.
Post URL: Error when creating dynamic terraform rule for alb listener rule

Terraform flappy state

I have created a Terraform project in order to create s3-buckets, cross region replication, versioning and a livecycle rule to delete old versions.
The problem is, that whenever I run terraform, it want's to delete the cross region replication and livecycle rule, if it exists in AWS, and whenever it does not exist anymore, it wants to readd it. This happens without any code change.
It seems that the state is not accurate.
I already deleted everything from scratch in AWS and started from the beginning, but it didn't help. I always run into the flappy situation.
All the details:
$ terraform init && terraform plan -var-file xyz.tfvars
...
Terraform will perform the following actions:
# module.test_s3_bucket["bnpl-docs"].aws_s3_bucket.s3_bucket will be updated in-place
~ resource "aws_s3_bucket" "s3_bucket" {
id = "bnpl-docs"
tags = {}
# (11 unchanged attributes hidden)
- replication_configuration {
- role = "arn:aws:iam::....:role/bnpl-docs-s3-bucket-replication" -> null
- rules {
- id = "version-replication" -> null
- priority = 0 -> null
- status = "Enabled" -> null
- destination {
- bucket = "arn:aws:s3:::bnpl-docs-crr" -> null
- storage_class = "STANDARD" -> null
}
}
}
# (1 unchanged block hidden)
}
# module.test_s3_bucket["bnpl-docs"].aws_s3_bucket_lifecycle_configuration.s3_bucket[0] will be created
+ resource "aws_s3_bucket_lifecycle_configuration" "s3_bucket" {
+ bucket = "bnpl-docs"
+ id = (known after apply)
+ rule {
+ id = "version-retention"
+ status = "Enabled"
+ expiration {
+ days = 0
+ expired_object_delete_marker = true
}
+ noncurrent_version_expiration {
+ noncurrent_days = 30
}
}
}
Plan: 1 to add, 1 to change, 0 to destroy.
But result is both is missing in AWS after running apply.
When I rerun, i get the same output:
Terraform will perform the following actions:
# module.test_s3_bucket["bnpl-docs"].aws_s3_bucket.s3_bucket will be updated in-place
~ resource "aws_s3_bucket" "s3_bucket" {
id = "bnpl-docs"
tags = {}
# (11 unchanged attributes hidden)
- replication_configuration {
- role = "arn:aws:iam::......:role/bnpl-docs-s3-bucket-replication" -> null
- rules {
- id = "version-replication" -> null
- priority = 0 -> null
- status = "Enabled" -> null
- destination {
- bucket = "arn:aws:s3:::bnpl-docs-crr" -> null
- storage_class = "STANDARD" -> null
}
}
}
# (1 unchanged block hidden)
}
# module.test_s3_bucket["bnpl-docs"].aws_s3_bucket_lifecycle_configuration.s3_bucket[0] will be created
+ resource "aws_s3_bucket_lifecycle_configuration" "s3_bucket" {
+ bucket = "bnpl-docs"
+ id = (known after apply)
+ rule {
+ id = "version-retention"
+ status = "Enabled"
+ expiration {
+ days = 0
+ expired_object_delete_marker = true
}
+ noncurrent_version_expiration {
+ noncurrent_days = 30
}
}
}
Plan: 1 to add, 1 to change, 0 to destroy.
But result is both is created in AWS after running apply.
I have created several modules to realize what I want. The involved code:
module "test_s3_bucket" {
source = "./modules/test-s3-bucket"
for_each = local.aws_s3_bucket_map
bucket_name = each.key
versioning = each.value.version_config
}
module "test_s3_bucket_repli" {
source = "./modules/test-s3-bucket"
providers = {
aws = aws.repli
}
for_each = local.aws_s3_bucket_map_repli
bucket_name = each.key
versioning = each.value.version_config
}
module "test_s3_bucket_repli_config" {
source = "./modules/test-s3-bucket-replication"
for_each = local.aws_s3_bucket_map_repli
src_bucket = {
name = module.test_s3_bucket[each.value.src_bucket_name].name
arn = module.test_s3_bucket[each.value.src_bucket_name].arn
}
dest_bucket = {
name = module.test_s3_bucket_repli[each.key].name
arn = module.test_s3_bucket_repli[each.key].arn
}
}
Contents of test_s3_bucket-Module:
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.0"
}
}
}
##
# Bucket with configuration
##
resource "aws_s3_bucket" "s3_bucket" {
bucket = var.bucket_name
}
resource "aws_s3_bucket_acl" "s3_bucket" {
bucket = aws_s3_bucket.s3_bucket.id
acl = "private"
}
resource "aws_s3_bucket_versioning" "s3_bucket" {
bucket = aws_s3_bucket.s3_bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket_lifecycle_configuration" "s3_bucket" {
count = var.versioning == null ? 0 : 1
bucket = aws_s3_bucket.s3_bucket.id
rule {
id = var.versioning.rule_id
expiration {
expired_object_delete_marker = true
}
noncurrent_version_expiration {
noncurrent_days = var.versioning.expiration_days
}
status = "Enabled"
}
}
resource "aws_s3_bucket_public_access_block" "s3_bucket" {
bucket = aws_s3_bucket.s3_bucket.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
The contents of the test-s3-bucket-replication module:
erraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.0"
}
}
}
resource "aws_s3_bucket_replication_configuration" "bucket_main" {
bucket = var.src_bucket.name
role = aws_iam_role.s3_bucket_main_replication.arn
rule {
id = "version-replication"
status = "Enabled"
destination {
bucket = var.dest_bucket.arn
storage_class = "STANDARD"
}
}
}
resource "aws_iam_role" "s3_bucket_main_replication" {
name = "${var.src_bucket.name}-s3-bucket-replication"
assume_role_policy = file("${path.module}/files/policies/sts-s3-assume.json")
}
resource "aws_iam_policy" "s3_bucket_main_replication" {
name = "${var.src_bucket.name}-s3-bucket-replication"
policy = templatefile("${path.module}/files/policies/s3-bucket-replication.json", {
source_bucket_arn = var.src_bucket.arn,
destination_bucket_arn = var.dest_bucket.arn,
})
}
resource "aws_iam_role_policy_attachment" "s3_bucket_main_replication" {
role = aws_iam_role.s3_bucket_main_replication.name
policy_arn = aws_iam_policy.s3_bucket_main_replication.arn
}
You see the two configuration parts which kind of seem to be in conflict are spread over two modules. I'm unsure if this is a problem. I'm kinda new to terraform :)
After increasing the Loglevel via TF_LOG="DEBUG", I found out that those two ressources conflicted with the bucket configuration - the aws_s3_bucket-configuration did not include any configuration for lifecycle or replication (as suggested by the documentation), but terraform expected this legacy declaration somehow.
I was using AWS Provider 3.75 and upgraded to 4.X. After this upgrade, everything works as expected.

Dynamically AWS IAM policy document with principals

I am creating a dynamic AWS IAM policy document "FROM" static to "TO" dynamic but principals part gives "An argument named "principals" is not expected here"
If I delete "principals" from the aws_iam_policy_document it works. Any suggestion would be helpful.
FROM
data "aws_iam_policy_document" "bucket_policy" {
statement {
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::sdfsdfsdeploy",
"arn:aws:iam::sdfsdfsdeploy/OrganizationAccountAccessRole"
]
}
actions = [
"s3:GetObject",
"s3:PutObject"
]
resources = formatlist("arn:aws:s3:::%s/*", var.bucket_name)
}
}
TO
this code in source = "../../modules/s3/main.tf"
data "aws_iam_policy_document" "bucket_policy" {
dynamic "statement" {
for_each = var.policies_list
iterator = role
content {
effect = lookup(role.value, "effect", null)
principals = lookup(role.value, "principals", null)
actions = lookup(role.value, "actions", null)
resources = lookup(role.value, "resources", null)
}
}
}
module "s3_test" {
source = "../../modules/s3"
region = var.region
policies_list = [
{
effect = "Allow"
principals = {
type = "AWS"
identifiers = [
"arn:aws:iam::3ssdfsdfy",
"arn:aws:iam::3ssdfsdfy:role/OrganizationAccountAccessRole"
]
}
actions = [
"s3:GetObject",
"s3:PutObject"
]
resources = formatlist("arn:aws:s3:::%s/*", "teskjkjsdkfkjskdjhkjfhkjhskjdf")
}
]
}
Found it.
variable "policies_list" {
description = "nested block: s3_aws_iam_policy_document"
type = set(object(
{
actions = list(string)
effect = string
principals = set(object(
{
type = string
identifiers = list(string)
}
))
resources = list(string)
}
))
default = []
}
data "aws_iam_policy_document" "bucket_policy" {
dynamic "statement" {
for_each = var. policies_list
iterator = role
content {
effect = lookup(role.value, "effect", null)
actions = lookup(role.value, "actions", null)
dynamic "principals" {
for_each = role.value.principals
content {
type = principals.value["type"]
identifiers = principals.value["identifiers"]
}
}
resources = lookup(role.value, "resources", null)
}
}
}
based on
https://github.com/niveklabs/tfwriter/blob/1ea629ed386bbe6a8f21617a430dae19ba536a98/google-beta/r/google_storage_bucket.md

How to configure App Service to use Azure AD login from Terraform

It is easy to Configure a web App Service to use Azure AD login manually via the official document However, How can I achieve this from Terraform? I've searched a while didn't found any examples, if you happen to address one, would be nice to share with me.
The following code is how I created Resource group and provisioned the web application
terraform {
backend "azurerm" {}
}
terraform {
required_version = ">= 0.13"
}
resource "azurerm_resource_group" "tf_resource_group" {
name = "RG_${var.application_name}_${var.environment}"
location = var.location
tags = {
environment = var.environment
DeployedBy = "terraform"
}
}
resource "azurerm_app_service_plan" "tf_service_plan" {
name = "${var.application_name}-${var.environment}-asp"
location = azurerm_resource_group.tf_resource_group.location
resource_group_name = azurerm_resource_group.tf_resource_group.name
kind = "Linux"
reserved = true
sku {
tier = "Standard"
size = "S1"
}
tags = {
environment = var.environment
DeployedBy = "terraform"
}
}
resource "azurerm_app_service" "tf_app_service" {
name = var.application_name
location = azurerm_resource_group.tf_resource_group.location
resource_group_name = azurerm_resource_group.tf_resource_group.name
app_service_plan_id = azurerm_app_service_plan.tf_service_plan.id
site_config {
always_on = true
linux_fx_version = "DOCKER|${var.acr_name}.azurecr.io/${var.img_repo_name}:${var.tag}"
}
app_settings = {
DOCKER_REGISTRY_SERVER_URL = "$DRSRUL"
WEBSITES_ENABLE_APP_SERVICE_STORAGE = "false"
DOCKER_REGISTRY_SERVER_USERNAME = "$ACRNAME"
DOCKER_REGISTRY_SERVER_PASSWORD = "$PW"
}
identity {
type = "SystemAssigned"
}
}
I believe your "azurerm_app_service" resource block needs a auth_settings block with a active_directory block. Example:
auth_settings {
enabled = true
active_directory {
client_id = "${azuread_application.example.application_id}"
}
default_provider = "AzureActiveDirectory"
issuer = "https://sts.windows.net/xxxxxxx-xxxx-xxx-xxxx-xxxtenantID/"

AWS S3 Object Lifecycle Exclution

I'm working in Terraform, and am creating an S3 object/folder-with-content. I would like to exclude that object from my lifecycle policy. But I'm not sure to exclude the object (folder-object/sample) from the lifecycle policy (Terraform Code Below):
resource "aws_s3_bucket" "s3_test" {
bucket = "test-bucket-upload"
acl = "private"
key = "folder-object/sample"
tags {
Name = "test-bucket"
Environment = "lab"
}
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
lifecycle_rule {
id = "glacier-transfer"
enabled = true
transition {
days = 360
storage_class = "GLACIER"
}
}
}
Instead of excluding, use prefix to identify the objects your lifecycle rule should apply to. For example, the rule below would only apply to objects in the new_objects folder in your bucket:
...
lifecycle_rule {
id = "glacier-transfer"
enabled = true
prefix = "new_objects/"
transition {
days = 360
storage_class = "GLACIER"
}
}
...