terraform import multiple s3 buckets into terraform - amazon-s3

I have multiple(more than 5) s3 buckets. How can I import all s3 buckets at once using s3 module
resource "aws_s3_bucket" "bucket" {
count = "${length(var.bucket_names)}"
bucket = "${element(var.bucket_names, count.index)}"
}
variable "bucket_names" {
type = list(string)
default = [
"something",
"something",
"something",
"something",
"something",
"something",
"something",
"something"
]
}
What will be the command for importing all at once?

Related

Create an IAM role with Terraform to access S3 bucket over EKS

In my EKS app I have this minimal example to reproduce:
const tempCredentials = new aws.ChainableTemporaryCredentials({
params: {
RoleArn: 'some-other-arn',
},
});
tempCredentials.get(console.error);
console.log(tempCredentials.accessKeyId);
console.log(tempCredentials.secretAccessKey);
I get the following error:
AccessDenied: User: (eks-arn-role) is not authorized to perform: sts:AssumeRole on resource: some-other-arn
This is my terraform file:
# 0. Create another role that my app will assume
resource "aws_iam_role" "techdocs_iam_role" {
name = var.techdocs_iam_role
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "eks.amazonaws.com"
}
},
]
})
}
# 1. My App should be able to AssumeRole only to 'some-other-arn' resource.
data "aws_iam_policy_document" "app_iam_policy" {
statement {
actions = [
"sts:AssumeRole"
]
resources = [
"some-other-arn" # The role created in Step 0.
]
}
}
# 2. Create S3 bucket
resource "aws_s3_bucket" "bucket" {
bucket = var.techdocs_bucket_name
}
# 3. Create policy to S3 bucket
resource "aws_iam_policy" "policy" {
name = "policy"
description = "Policy to access S3 bucket"
policy = jsonencode({
"Version" : "2012-10-17",
"Statement" : [
{
"Effect" : "Allow",
"Action" : ["s3:PutObject",
"s3:GetObject",
"s3:DeleteObjectVersion",
"s3:ListBucket",
"s3:DeleteObject",
"s3:PutObjectAcl"],
"Resource" : ["arn:aws:s3:::${var.techdocs_bucket_name}", "arn:aws:s3:::${var.techdocs_bucket_name}/*"]
}
]
})
}
Step 1 is working because I have the AWS_ environment variables injected in EKS and the Web Identity Token File is created, but it can't assume the role created on Terraform.
I need to a create a new role that my EKS can assume the role to, and apply certain S3 actions either to that bucket or to that role.
thanks for the help!

How to grant lambda permission to upload file to s3 bucket in `terraform`?

I have below lambda function configuration in TerraForm:
resource "aws_lambda_function" "test_lambda" {
# filename = "crawler/dist/deploy.zip"
s3_bucket = "${var.s3-bucket}"
s3_key = "${aws_s3_bucket_object.file_upload.key}"
# source_code_hash = "${filebase64sha256("file.zip")}"
function_name = "quote-crawler"
role = "arn:aws:iam::773592622512:role/LambdaRole"
handler = "handler.handler"
source_code_hash = "${data.archive_file.zipit.output_base64sha256}"
runtime = "${var.runtime}"
timeout = 180
environment {
variables = {
foo = "bar"
}
}
}
when I run the lambda I got the error "errorMessage": "An error occurred (AccessDenied) when calling the PutObject operation: Access Denied", when it tries to upload file to s3 bucket. It seems that the lambda function doesn't have permission to access s3. TerraForm doc is not clear about how to configure them. The permission configuration panel doesn't appear on lambda console either. It seems that lambda that created by TerraForm has limited configuration for me to use. So how can I grant s3 permission to lambda?
To make it easy you can do this in three steps,
create a role
create policy
attached policy to the role
attached role to lambda
Create role.
resource "aws_iam_role" "role" {
name = "${var.env_prefix_name}-alb-logs-to-elk"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
Create a policy that has specified access to s3
#Created Policy for IAM Role
resource "aws_iam_policy" "policy" {
name = "${var.env_prefix_name}-test-policy"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": "arn:aws:s3:::*"
}
]
}
EOF
}
Attached IAM Role and the new created Policy
resource "aws_iam_role_policy_attachment" "test-attach" {
role = "${aws_iam_role.role.name}"
policy_arn = "${aws_iam_policy.policy.arn}"
}
Now attached the role to Lamba source
resource "aws_lambda_function" "test_lambda" {
# filename = "crawler/dist/deploy.zip"
s3_bucket = "${var.s3-bucket}"
s3_key = "${aws_s3_bucket_object.file_upload.key}"
# source_code_hash = "${filebase64sha256("file.zip")}"
function_name = "quote-crawler"
role = "${aws_iam_role.role.arn}"
handler = "handler.handler"
source_code_hash = "${data.archive_file.zipit.output_base64sha256}"
runtime = "${var.runtime}"
timeout = 180
environment {
variables = {
foo = "bar"
}
}
}
The IAM role associated to the function is not allowed to upload to S3.
The solution is to create an IAM policy allowing S3 access to your bucket (say read/write), which would look something like:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ListObjectsInBucket",
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": ["arn:aws:s3:::bucket-name"]
},
{
"Sid": "AllObjectActions",
"Effect": "Allow",
"Action": "s3:*Object",
"Resource": ["arn:aws:s3:::bucket-name/*"]
}
]
}
Then, you need to attach this policy to the role used by your lambda function.
More info at:
https://www.terraform.io/docs/providers/aws/r/iam_role_policy.html
I would do it in the following order:
this code is using terraform 0.12.*
Create policy documents for assume role and s3 permissions
data aws_iam_policy_document lambda_assume_role {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["lambda.amazonaws.com"]
}
}
}
data aws_iam_policy_document lambda_s3 {
statement {
actions = [
"s3:PutObject",
"s3:PutObjectAcl"
]
resources = [
"arn:aws:s3:::bucket/*"
]
}
}
Create an IAM policy
resource aws_iam_policy lambda_s3 {
name = "lambda-s3-permissions"
description = "Contains S3 put permission for lambda"
policy = data.aws_iam_policy_document.lambda_s3.json
}
Create a role
resource aws_iam_role lambda_role {
name = "lambda-role"
assume_role_policy = data.aws_iam_policy_document.lambda_assume_role.json
}
Attach policy to role
resource aws_iam_role_policy_attachment lambda_s3 {
role = aws_iam_role.lambda_role.name
policy_arn = aws_iam_policy.lambda_s3.arn
}
Attach role to lambda
resource "aws_lambda_function" "test_lambda" {
# filename = "crawler/dist/deploy.zip"
s3_bucket = var.s3-bucket
s3_key = aws_s3_bucket_object.file_upload.key
# source_code_hash = "${filebase64sha256("file.zip")}"
function_name = "quote-crawler"
role = aws_iam_role.lambda_role.arn
handler = "handler.handler"
source_code_hash = data.archive_file.zipit.output_base64sha256
runtime = var.runtime
timeout = 180
environment {
variables = {
foo = "bar"
}
}
}

Terraform bucket_notification throwing an error

So I followed this link to create a lambda function that is triggered by an s3 event in tf. When I run apply I get keep getting the following error:
module.elb_logs_to_es.aws_s3_bucket_notification.elb_logs_bucket: 1 error(s) occurred:
aws_s3_bucket_notification.elb_logs_bucket: Error putting S3 notification configuration: AccessDenied: Access Denied
Here's my configuration:
resource "aws_lambda_function" "elb_logs_to_es" {
function_name = "send-elb-logs-to-es"
s3_bucket = "xxxxxxxxxxxx"
s3_key = "elb_logs_to_es/elb_to_es.zip"
handler = "elb_to_es.lambda_handler"
role = "${aws_iam_role.elb_logs_role.arn}"
runtime = "python3.6"
}
resource "aws_s3_bucket_notification" "elb_logs_bucket" {
bucket = "xxxxxxxxxxxx"
lambda_function {
lambda_function_arn = "${aws_lambda_function.elb_logs_to_es.arn}"
events = ["s3:ObjectCreated:*"]
}
}
resource "aws_lambda_permission" "elb_bucket_access" {
statement_id = "AllowExecutionFromS3Bucket"
action = "lambda:InvokeFunction"
function_name = "${aws_lambda_function.elb_logs_to_es.arn}"
principal = "s3.amazonaws.com"
source_arn = "arn:aws:s3:::xxxxxxxxxxxx"
}
resource "aws_iam_role" "elb_logs_role" {
name = "elb_logs_sending_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow"
}
]
}
EOF
}
I also tried changing bucket policy manually to allow the role to PutObject in it but still nothing. Any ideas?

Configure AWS S3 bucket for Django static files with Terraform

I'm new to Terraform.
I'm trying to configure S3 bucket to serve Django static files.
There should be unrestricted access for HTTP GET requests for these static files but there should also be AWS user - this user account will be used by Django to upload updated static files to S3 bucket.
I've written this:
resource "aws_iam_user" "integrations_lite_staticfiles_s3_bucket_user" {
name = "Integrations-Lite-staticfiles-user"
}
resource "aws_iam_access_key" "integrations_lite_staticfiles_s3_bucket_user_key" {
user = "${aws_iam_user.integrations_lite_staticfiles_s3_bucket_user.name}"
}
data "aws_iam_policy_document" "integrations_lite_staticfiles_s3_user_policy" {
statement {
effect = "Allow"
actions = ["s3:*"]
resources = ["${aws_s3_bucket.integrations_lite_staticfiles_s3_bucket.arn}"]
}
}
resource "aws_iam_user_policy" "integrations_lite_staticfiles_s3_user_policy" {
name = "Integrations-Lite-staticfiles-user-policy"
user = "${aws_iam_user.integrations_lite_staticfiles_s3_bucket_user.name}"
policy = "${data.aws_iam_policy_document.integrations_lite_staticfiles_s3_user_policy.json}"
}
data "aws_iam_policy_document" "integrations_lite_staticfiles_s3_bucket_policy" {
"statement" {
sid = "PublicReadForGetBucketObjects"
effect = "Allow"
actions = ["s3:GetObject"]
resources = ["${aws_s3_bucket.integrations_lite_staticfiles_s3_bucket.arn}"]
principals {
identifiers = ["*"]
type = "AWS"
}
}
}
resource "aws_s3_bucket_policy" "integrations_lite_staticfiles_s3_bucket_policy" {
bucket = "${aws_s3_bucket.integrations_lite_staticfiles_s3_bucket.id}"
policy = "${data.aws_iam_policy_document.integrations_lite_staticfiles_s3_user_policy.json}"
}
resource "aws_s3_bucket" "integrations_lite_staticfiles_s3_bucket" {
region = "${var.region}"
bucket = "integrations-lite-staticfiles"
acl = "public-read"
cors_rule {
allowed_headers = ["*"]
allowed_methods = ["PUT","POST"]
allowed_origins = ["*"]
expose_headers = ["ETag"]
max_age_seconds = 3000
}
website {
index_document = "index.html"
}
}
but terraform apply results in:
* aws_s3_bucket_policy.integrations_lite_staticfiles_s3_bucket_policy: 1 error(s) occurred:
* aws_s3_bucket_policy.integrations_lite_staticfiles_s3_bucket_policy: Error putting S3 policy: MalformedPolicy: Missing required field Principal
status code: 400, request id: 724BC650DFFCE3B7, host id: ####
However adding principals to aws_s3_bucket_policy.integrations_lite_staticfiles_s3_bucket_policy results in:
Error: aws_s3_bucket_policy.integrations_lite_staticfiles_s3_bucket_policy: : invalid or unknown key: principals
I've found a solution:
resource "aws_iam_group" "manage-integrations-lite-staticfiles-s3-bucket" {
name = "Manage-Integrations-Lite-static-files"
}
resource "aws_iam_user" "manage-integrations-lite-staticfiles-s3-bucket" {
name = "Manage-Integrations-Lite-static-files"
}
resource "aws_iam_group_membership" "manage-integrations-lite-staticfiles-s3-bucket" {
group = "${aws_iam_group.manage-integrations-lite-staticfiles-s3-bucket.name}"
name = "Manage-Integrations-Lite-static-files"
users = ["${aws_iam_user.manage-integrations-lite-staticfiles-s3-bucket.name}"]
}
resource "aws_iam_group_policy" "manage-integrations-lite-staticfiles-s3-bucket" {
group = "${aws_iam_group.manage-integrations-lite-staticfiles-s3-bucket.name}"
policy =<<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ManageIntegrationsLiteStaticfilesBucket",
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::integrations-lite-staticfiles",
"arn:aws:s3:::integrations-lite-staticfiles/*"
]
}
]
}
POLICY
}
resource "aws_s3_bucket" "integrations-lite-staticfiles-s3-bucket" {
region = "${var.region}"
bucket = "integrations-lite-staticfiles"
acl = "public-read"
cors_rule {
allowed_headers = ["*"]
allowed_methods = ["GET", "HEAD"]
allowed_origins = ["*"]
expose_headers = ["ETag"]
max_age_seconds = 3000
}
website {
index_document = "index.html"
}
policy =<<POLICY
{
"Version":"2012-10-17",
"Statement":[{
"Sid":"PublicReadGetObject",
"Effect":"Allow",
"Principal": "*",
"Action":["s3:GetObject"],
"Resource":[
"arn:aws:s3:::integrations-lite-staticfiles",
"arn:aws:s3:::integrations-lite-staticfiles/*"
]
}]
}
POLICY
}
Note: I intentionally removed api key part. I prefer to generate key id and secret manually via AWS console.

Terraform s3 bucket notification having multiple topics

I am trying to write a flexible/dynamic resource for aws_s3_bucket_notification which may have variable topics for the specified s3 bucket. For one bucket, I may have just 2 prefixes and 2 topics and for others 4 or 5 and so on... I was thinking of using a map function which will store the "prefix" and "SNS ARN" for each prefix type as the events will be the same. I need to create a s3_bucket_notification which will have all the topics in it without having to manually write each of the topics. Any suggestions?
Example
resource "aws_s3_bucket_notification" "bucket_notification" {
bucket = "${aws_s3_bucket.bucket.id}"
topic {
topic_arn = "$map.value" ###prototype
events = ["s3:ObjectCreated:*"]
filter_suffix = "$map.key" ###prototype
}
}
If my understanding is correct, the codes should like this:
variable "sns_top" {
type = "map"
default = {
dev = "topic1"
uat = "topic2"
prod = "topic3"
}
}
variable "bucket_name" {
default = "my-tf-test-bucket-dfsfddsf"
}
data "aws_caller_identity" "current" {}
resource "aws_sns_topic" "sns_topic" {
count = "${length(keys(var.sns_top))}"
name = "sns-topic-${element(values(var.sns_top),count.index)}"
}
resource "aws_sns_topic_policy" "custom" {
count = "${length(keys(var.sns_top))}"
arn = "${element(aws_sns_topic.sns_topic.*.arn, count.index)}"
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "default",
"Statement":[{
"Sid": "default",
"Effect": "Allow",
"Principal": {"AWS":"*"},
"Action": [
"SNS:GetTopicAttributes",
"SNS:SetTopicAttributes",
"SNS:AddPermission",
"SNS:RemovePermission",
"SNS:DeleteTopic"
],
"Resource": "${element(aws_sns_topic.sns_topic.*.arn, count.index)}"
}]
}
POLICY
depends_on = ["aws_sns_topic.sns_topic"]
}
resource "aws_s3_bucket" "bucket" {
bucket = "${var.bucket_name}"
}
data "aws_iam_policy_document" "default" {
statement {
effect = "Allow"
actions = [
"s3:PutObject",
]
resources = [
"${aws_s3_bucket.bucket.arn}/*",
]
principals {
type = "AWS"
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
}
}
}
resource "aws_s3_bucket_policy" "default" {
bucket = "${aws_s3_bucket.bucket.id}"
policy = "${data.aws_iam_policy_document.default.json}"
}
resource "aws_s3_bucket_notification" "bucket_notification" {
count = "${length(keys(var.sns_top))}"
bucket = "${aws_s3_bucket.bucket.id}"
topic {
topic_arn = "${element(aws_sns_topic.sns_topic.*.arn, count.index)}"
events = ["s3:ObjectCreated:*"]
filter_suffix = "${element(keys(var.sns_top),count.index)}"
}
}
The codes hit an error explained in below link, but you should be fine to use it for further codings, such as how to use count.index with maps.
* aws_s3_bucket_notification.bucket_notification.0: Error putting S3 notification configuration: InvalidArgument: Unable to validate the following destination configurations
Refer:
How do I avoid the error "Unable to validate the following destination configurations" when using S3 event notifications in CloudFormation?