can cognito attributes also be used in an assumed IAM role in another AWS account? - amazon-cognito

i am using the ${cognito-identity.amazonaws.com:sub} placeholder in the authenticated role of a cognito/identity user pool to give the cognito users fine grained access to their resources. As described for example here this works great if the resources are in the same account as the IAM role.
Now i want to do the same thing but for resources in another account. How would i for example do the following?:
"userX" from userpool in accountA can access logGroup with name "userX" in accountB

So apparently it works with s3Buckets for example but you need to also give the mentioned s3 bucket in AccountB a resource policy which allows all users from AccountA to access this bucket. For example with terraform (btw this is to make amplify's upload feature work with a bucket in a different AWS account from amplify project):
resource "aws_iam_role" "authenticated_role" {
name = "${local.identity_pool_name}-authenticated-user"
assume_role_policy = data.aws_iam_policy_document.authenticated_role_document.json
tags = var.tags
inline_policy {
name = "authenticated_role_policy"
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
],
"Resource": [
"arn:aws:s3:::${var.accountBBucket}/public/*",
"arn:aws:s3:::${var.accountBBucket}/protected/$${cognito-identity.amazonaws.com:sub}/*",
"arn:aws:s3:::${var.accountBBucket}/private/$${cognito-identity.amazonaws.com:sub}/*"
],
"Effect": "Allow"
},
...
]
})
}
}
resource "aws_s3_bucket_policy" "shared_customer_artefacts_pol" {
provider = aws.accountB
bucket = aws_s3_bucket.shared_customer_artefacts.id
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Sid = "1"
Effect = "Allow"
Principal = {
AWS = "arn:aws:iam::${var.accountAId}:root"
}
Action = [
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
]
Resource = [
"arn:aws:s3:::${var.var.accountBBucket}",
"arn:aws:s3:::${var.var.accountBBucket}/*",
]
}
]
})
}

Related

Create an IAM role with Terraform to access S3 bucket over EKS

In my EKS app I have this minimal example to reproduce:
const tempCredentials = new aws.ChainableTemporaryCredentials({
params: {
RoleArn: 'some-other-arn',
},
});
tempCredentials.get(console.error);
console.log(tempCredentials.accessKeyId);
console.log(tempCredentials.secretAccessKey);
I get the following error:
AccessDenied: User: (eks-arn-role) is not authorized to perform: sts:AssumeRole on resource: some-other-arn
This is my terraform file:
# 0. Create another role that my app will assume
resource "aws_iam_role" "techdocs_iam_role" {
name = var.techdocs_iam_role
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "eks.amazonaws.com"
}
},
]
})
}
# 1. My App should be able to AssumeRole only to 'some-other-arn' resource.
data "aws_iam_policy_document" "app_iam_policy" {
statement {
actions = [
"sts:AssumeRole"
]
resources = [
"some-other-arn" # The role created in Step 0.
]
}
}
# 2. Create S3 bucket
resource "aws_s3_bucket" "bucket" {
bucket = var.techdocs_bucket_name
}
# 3. Create policy to S3 bucket
resource "aws_iam_policy" "policy" {
name = "policy"
description = "Policy to access S3 bucket"
policy = jsonencode({
"Version" : "2012-10-17",
"Statement" : [
{
"Effect" : "Allow",
"Action" : ["s3:PutObject",
"s3:GetObject",
"s3:DeleteObjectVersion",
"s3:ListBucket",
"s3:DeleteObject",
"s3:PutObjectAcl"],
"Resource" : ["arn:aws:s3:::${var.techdocs_bucket_name}", "arn:aws:s3:::${var.techdocs_bucket_name}/*"]
}
]
})
}
Step 1 is working because I have the AWS_ environment variables injected in EKS and the Web Identity Token File is created, but it can't assume the role created on Terraform.
I need to a create a new role that my EKS can assume the role to, and apply certain S3 actions either to that bucket or to that role.
thanks for the help!

Grant Lambda access to private S3 bucket

We have a Lambda function that needs to be able to access a private S3 bucket.
The bucket has 'block all public access' enabled and the following resource policy:
{
"Version": "2012-10-17",
"Id": "Policy1620740846405",
"Statement": [
{
"Sid": "Stmt1620740843181",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::'''''':role/integrations-shopifyBucketOrdersFunctionRole-*****",
"arn:aws:iam::'''''':root",
"arn:aws:iam::''''''':user/transalisS3"
]
},
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::bucket/*",
"arn:aws:s3:::bucket"
]
}
]
}
I have also attached the AmazonS3FullAccess policy directly the the IAM role that the Lambda uses.
However, when the Lambda function tries to access the S3 bucket it gives an access denied error:
AccessDenied: Access Denied
An external system that connects to S3 using IAM User credentials also gets the same error when it tries to access the bucket.
Does anybody know what might be causing this error?
Below is the Lambda code that is erroring:
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
exports.bucketOrders = async (event, context) => {
let response = {};
let eventBucket = event.Records[0].s3.bucket.name;
let eventFile = event.Records[0].s3.object.key;
let decodedKey = decodeURIComponent(eventFile);
try {
let objectData = await s3.getObject({
Bucket: eventBucket,
Key: decodedKey,
}).promise();
When the Lambda application is created there is an option to auto generate the IAM role, this role has a permission boundary which had an invalid resource attached - causing everything to fail.

Can't access images in S3 bucket using cognito identity

I'm testing to display images from an s3 bucket using javascript, prior to making this part of an application.
I have an s3 bucket (non-public), named for this post: IMAGE-BUCKET
Created an identity role : GET-IMAGE.
I have temporarily given full s3 access to GET-IMAGE role.
I have CORS defined for the bucket.
While testing I have disabled the browser cache.
3 issues:
Getting "403 Forbidden" response when images are accessed from the
html/script below.
If I make a particular image public, that image displays -- an issue with large # of images.
If I make the entire bucket public, images do not display
It seems Cognito identiy is not able to access the bucket, or there's an issue in the script below.
Also, setting the bucket public doesn't work either, unless each image is also set public. This bucket will be used privately, so this is only an issue while troubleshooting.
I have attached AmazonS3FullAccess to GET-IMAGE, I also added the following policy:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AccessS3BucketIMAGEBUCKET",
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
Using html and script from AWS documentation (modified):
<script src="https://sdk.amazonaws.com/js/aws-sdk-2.487.0.js"></script>
<script>
var albumBucketName = 'IMAGE-BUCKET';
// Initialize the Amazon Cognito credentials provider for GET-IMAGE:
AWS.config.region = 'us-east-1'; // Region
AWS.config.credentials = new AWS.CognitoIdentityCredentials({
IdentityPoolId: 'us-east-1:43ba4c15-ab2f-8880-93be-xxx',
});
// Create a new service object
var s3 = new AWS.S3({
apiVersion: '2006-03-01',
params: { Bucket: albumBucketName }
});
// A utility function to create HTML.
function getHtml(template) {
return template.join('\n');
}
// Show the photos that exist in an album.
function viewAlbum(albumName) {
var albumPhotosKey = '/';
s3.listObjects(function (err, data) {
if (err) {
return alert('There was an error viewing your album: ' + err.message);
}
// 'this' references the AWS.Response instance that represents the response
var href = this.request.httpRequest.endpoint.href;
var bucketUrl = href + albumBucketName + '/';
var photos = data.Contents.map(function (photo) {
var photoKey = photo.Key;
var photoUrl = bucketUrl + encodeURIComponent(photoKey);
return getHtml([
'<span>',
'<div>',
'<br/>',
'<img style="width:128px;height:128px;" src="' + photoUrl + '"/>',
'</div>',
'<div>',
'<span>',
photoKey.replace(albumPhotosKey, ''),
'</span>',
'</div>',
'</span>',
]);
});
var message = photos.length ?
'<p>The following photos are present.</p>' :
'<p>There are no photos in this album.</p>';
var htmlTemplate = [
'<div>',
'<button onclick="listAlbums()">',
'Back To Albums',
'</button>',
'</div>',
'<h2>',
'Album: ' + albumName,
'</h2>',
message,
'<div>',
getHtml(photos),
'</div>',
'<h2>',
'End of Album: ' + albumName,
'</h2>',
'<div>',
'<button onclick="listAlbums()">',
'Back To Albums',
'</button>',
'</div>',
]
document.getElementById('viewer').innerHTML = getHtml(htmlTemplate);
document.getElementsByTagName('img')[0].setAttribute('style', 'display:none;');
});
}
</script>
</head>
<body>
<h1>Photo Album Viewer</h1>
<div id="viewer" />
<button onclick="viewAlbum('');">View All Images</button>
</body>
</html>
UPDATE:
If I grant public read in S3 Bucket Policy:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PublicRead",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::IMAGE-BUCKET/*"
}
]
}
It allows to access each image; solving the issue #2 and #3.
But this makes the bucket basically public.
If I change the Bucket policy to limit to the Cognito identity, changing the principal as follows, again I am not able to access images via the html/script, getting 403 errors.
"Principal": {
"AWS": "arn:aws:iam::547299998870:role/Cognito_GET-IMAGEIDUnauth_Role"
}
UPDATE:
I've been reading online, checking some of the other related posts ...
I've it reduced to the basic components, here's the latest configuration. The configuration should be as simple as, giving access to the GET-IMAGE role based on the documentation:
Under IAM Management Console > Roles > GET-IMAGE role (unauthenticated)
I added an inline policy:
{
"Sid": "VisualEditor2",
"Effect": "Allow",
"Action": ["s3:GetObject","s3:ListBucket"],
"Resource": "arn:aws:s3:::IMAGE-BUCKET/*"
}
I removed the Bucket policy -- this shouldn't be needed, the GET-IMAGE role already has access. Role trust is already included by default. HTML contains the credential:
IdentityPoolId: 'us-east-1:9bfadd6a-xxxx-41d4-xxxx-79ad7347xxa1
Those are the most basic components, nothing else should be needed. However, it does not work. I made 1 of the images public and that image is displayed, other images error with 403 Forbidden.
I've resolved the s3 access issue, I'm including all the settings and methods I used. To troubleshoot, I started testing with an actual AWS user, then stepped back to the cognito identity. I included notes regarding access by an AWS user for reference. I also abondoned the AWS sample HTML code, and used a simple short function to display output in the console, utilizing getsignedurl function. I'm not familiar with AWS libraries, and finding getsignedurl helped speed up testing and finally resolving the issue.
Used the following sample names throughout:
Cognito Role: GET-IMAGE
S3 Bucket: IMAGE-BUCKET
I'll go over both Cognito and AWS user access to S3, using HTML for simple demo and testing.
With Cognito:
SETTINGS:
Role: Create a Cognito Identity. For instructions and to create, follow this wizard:
https://console.aws.amazon.com/cognito/create/
Take a note of the sample code AWS provides after it's created-- you'll need the pool ID.
Permissions: Add Role level AND S3 level permissions
Role level: IAM > Roles > GET-IMAGE_Unauth_Role
Add (JSON) to both Auth and UnAuth Roles
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "s3Access",
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::IMAGE-BUCKET/*"
]}
S3:
IMAGE-BUCKET > Permissions > Bucket Policy:
(JSON)
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PublicRead",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::547999998899:user/anAWSUser",
"arn:aws:iam::547999998899:role/Cognito_GET-IMAGEIDUnauth_Role",
"arn:aws:iam::547999998899:role/Cognito_GET-IMAGEIDAuth_Role"
]
},
"Action": [
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::IMAGE-BUCKET/*"
]
}
]}
** Note: I also added an AWSUser, for credential version, for use in the next section "With Credentials"
CORS:
IMAGE-BUCKET> Permissions > CORS
<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>GET</AllowedMethod>
<AllowedMethod>HEAD</AllowedMethod>
<AllowedHeader>*</AllowedHeader>
</CORSRule>
</CORSConfiguration>
**Note: You can restrict the origin in the AllowedOrigin parameter.
HTML:
<!DOCTYPE html>
<html>
<head>
<script src="https://sdk.amazonaws.com/js/aws-sdk-2.487.0.js"></script>
<script>
// Replace IMAGE-BUCKET with your bucket name.
var BucketName = 'IMAGE-BUCKET';
// Cognito credentials (from Cognito ID creation sample code)
AWS.config.region = 'us-east-1'; // Region
AWS.config.credentials = new AWS.CognitoIdentityCredentials({
IdentityPoolId: 'us-east-1:9999996a-f099-99d4-b999-79a99999aaa1',
});
// Create a new service object
var s3 = new AWS.S3({
apiVersion: '2006-03-01',
params: { Bucket: BucketName }
});
// Test Function. REPLACE Key with test file name from s3
function show1() {
var UrlExpireSeconds = 180 * 1;
var params = {
Bucket: BucketName,
Key: "20190815_file_name.jpg",
Expires: UrlExpireSeconds
};
var url = s3.getSignedUrl('getObject', params);
console.log('The URL is', url);
document.getElementById('viewer').innerHTML =
'<span>'+
'<div>'+
'<br/>'+
'<img style="width:128px;height:128px;" src="' +
url + '"/>' +
'</div>'+
'<div>'+
'<span>'
};
show1();
</script>
</head>
<body>
<h1>S3 Test Image Display</h1>
<div id="viewer" />
<button onclick="show1();">View Image</button>
</body>
</html>
With User Credentials
You can also use credentials to authenticate a user to access s3. In the javascript above,
comment out the cognito credentials and use the following instead:
//access key ID, Secret
var cred = new AWS.Credentials('AKXXX283988CCCAA-ACCESS-KEY','kKsCuq7a9WNohmOYY8SApewie77493LgV-SECRET');
AWS.config.credentials=cred;
To get the access key and secret from AWS console:
IAM > Users > an-AWSUser > Security-Credentials
Under "Access Keys", click "Create Access Key"
===================
Note that the trust policy for an unauthenticated role is automatically created by AWS when you create a Cognito ID Role; it doesn't have to be defined manually as mentioned before.
Also listbucket and bucket level resource permissions, as in "IMAGE-BUCKET", are not required; Getobject is all that's needed to access a file directly. In my case, I'm accessing images by key, do not need to list bucket contents.
I set both the Role and S3 bucket permissions; I did not test without the role permissions, bucket policy may be sufficient.
You are not defining the trust policy for your unauthenticated role correctly.
As per this documentation on cognito role trust and permissions, the trust policy for an unauthenticated role can be defined as follows:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Federated": "cognito-identity.amazonaws.com"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"cognito-identity.amazonaws.com:aud": "YOUR_IDENTITY_POOL_ID"
},
"ForAnyValue:StringLike": {
"cognito-identity.amazonaws.com:amr": "unauthenticated"
}
}
}
]
}
When you use AWS.CognitoIdentityCredentials, under the hood your Cognito Identity Pool will first get a web identity id for your user. As you don't provide a login with an authenticated token from an identity provider such as Cognito User Pools, or Facebook, the id is for an unauthorized web identity.
Cognito will then call the security token service's assumeRoleWithWebIdentity method on your behalf in order to get credentials with the permissions that you defined in the unauthenticated role's access policy that will allow the web identity to access the s3 bucket.
This is why the principal in the trust policy needs to be cognito-identity.amazonaws.com. It is to give cognito identity pools the permission to call the sts:AssumeRoleWithWebIdentity method on behalf of the web identity in order to obtain IAM credentials.
The access policy part of the role, which defines what unauthenticated users can actually do, will continue to be as you originally defined it in your post:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AccessS3BucketIMAGEBUCKET",
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
Update
I notice that the last inline policy you have posted for your unauthenticated role won't work for s3.listObjects(). It will return a 403 because it needs a slightly different resource statement to indicate the bucket itself, rather than the buckets content.
You can update your policy as follows:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::IMAGE-BUCKET/*"
]
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::IMAGE-BUCKET"
]
}
]
}

How to grant lambda permission to upload file to s3 bucket in `terraform`?

I have below lambda function configuration in TerraForm:
resource "aws_lambda_function" "test_lambda" {
# filename = "crawler/dist/deploy.zip"
s3_bucket = "${var.s3-bucket}"
s3_key = "${aws_s3_bucket_object.file_upload.key}"
# source_code_hash = "${filebase64sha256("file.zip")}"
function_name = "quote-crawler"
role = "arn:aws:iam::773592622512:role/LambdaRole"
handler = "handler.handler"
source_code_hash = "${data.archive_file.zipit.output_base64sha256}"
runtime = "${var.runtime}"
timeout = 180
environment {
variables = {
foo = "bar"
}
}
}
when I run the lambda I got the error "errorMessage": "An error occurred (AccessDenied) when calling the PutObject operation: Access Denied", when it tries to upload file to s3 bucket. It seems that the lambda function doesn't have permission to access s3. TerraForm doc is not clear about how to configure them. The permission configuration panel doesn't appear on lambda console either. It seems that lambda that created by TerraForm has limited configuration for me to use. So how can I grant s3 permission to lambda?
To make it easy you can do this in three steps,
create a role
create policy
attached policy to the role
attached role to lambda
Create role.
resource "aws_iam_role" "role" {
name = "${var.env_prefix_name}-alb-logs-to-elk"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
Create a policy that has specified access to s3
#Created Policy for IAM Role
resource "aws_iam_policy" "policy" {
name = "${var.env_prefix_name}-test-policy"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": "arn:aws:s3:::*"
}
]
}
EOF
}
Attached IAM Role and the new created Policy
resource "aws_iam_role_policy_attachment" "test-attach" {
role = "${aws_iam_role.role.name}"
policy_arn = "${aws_iam_policy.policy.arn}"
}
Now attached the role to Lamba source
resource "aws_lambda_function" "test_lambda" {
# filename = "crawler/dist/deploy.zip"
s3_bucket = "${var.s3-bucket}"
s3_key = "${aws_s3_bucket_object.file_upload.key}"
# source_code_hash = "${filebase64sha256("file.zip")}"
function_name = "quote-crawler"
role = "${aws_iam_role.role.arn}"
handler = "handler.handler"
source_code_hash = "${data.archive_file.zipit.output_base64sha256}"
runtime = "${var.runtime}"
timeout = 180
environment {
variables = {
foo = "bar"
}
}
}
The IAM role associated to the function is not allowed to upload to S3.
The solution is to create an IAM policy allowing S3 access to your bucket (say read/write), which would look something like:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ListObjectsInBucket",
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": ["arn:aws:s3:::bucket-name"]
},
{
"Sid": "AllObjectActions",
"Effect": "Allow",
"Action": "s3:*Object",
"Resource": ["arn:aws:s3:::bucket-name/*"]
}
]
}
Then, you need to attach this policy to the role used by your lambda function.
More info at:
https://www.terraform.io/docs/providers/aws/r/iam_role_policy.html
I would do it in the following order:
this code is using terraform 0.12.*
Create policy documents for assume role and s3 permissions
data aws_iam_policy_document lambda_assume_role {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["lambda.amazonaws.com"]
}
}
}
data aws_iam_policy_document lambda_s3 {
statement {
actions = [
"s3:PutObject",
"s3:PutObjectAcl"
]
resources = [
"arn:aws:s3:::bucket/*"
]
}
}
Create an IAM policy
resource aws_iam_policy lambda_s3 {
name = "lambda-s3-permissions"
description = "Contains S3 put permission for lambda"
policy = data.aws_iam_policy_document.lambda_s3.json
}
Create a role
resource aws_iam_role lambda_role {
name = "lambda-role"
assume_role_policy = data.aws_iam_policy_document.lambda_assume_role.json
}
Attach policy to role
resource aws_iam_role_policy_attachment lambda_s3 {
role = aws_iam_role.lambda_role.name
policy_arn = aws_iam_policy.lambda_s3.arn
}
Attach role to lambda
resource "aws_lambda_function" "test_lambda" {
# filename = "crawler/dist/deploy.zip"
s3_bucket = var.s3-bucket
s3_key = aws_s3_bucket_object.file_upload.key
# source_code_hash = "${filebase64sha256("file.zip")}"
function_name = "quote-crawler"
role = aws_iam_role.lambda_role.arn
handler = "handler.handler"
source_code_hash = data.archive_file.zipit.output_base64sha256
runtime = var.runtime
timeout = 180
environment {
variables = {
foo = "bar"
}
}
}

Terraform s3 bucket notification having multiple topics

I am trying to write a flexible/dynamic resource for aws_s3_bucket_notification which may have variable topics for the specified s3 bucket. For one bucket, I may have just 2 prefixes and 2 topics and for others 4 or 5 and so on... I was thinking of using a map function which will store the "prefix" and "SNS ARN" for each prefix type as the events will be the same. I need to create a s3_bucket_notification which will have all the topics in it without having to manually write each of the topics. Any suggestions?
Example
resource "aws_s3_bucket_notification" "bucket_notification" {
bucket = "${aws_s3_bucket.bucket.id}"
topic {
topic_arn = "$map.value" ###prototype
events = ["s3:ObjectCreated:*"]
filter_suffix = "$map.key" ###prototype
}
}
If my understanding is correct, the codes should like this:
variable "sns_top" {
type = "map"
default = {
dev = "topic1"
uat = "topic2"
prod = "topic3"
}
}
variable "bucket_name" {
default = "my-tf-test-bucket-dfsfddsf"
}
data "aws_caller_identity" "current" {}
resource "aws_sns_topic" "sns_topic" {
count = "${length(keys(var.sns_top))}"
name = "sns-topic-${element(values(var.sns_top),count.index)}"
}
resource "aws_sns_topic_policy" "custom" {
count = "${length(keys(var.sns_top))}"
arn = "${element(aws_sns_topic.sns_topic.*.arn, count.index)}"
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "default",
"Statement":[{
"Sid": "default",
"Effect": "Allow",
"Principal": {"AWS":"*"},
"Action": [
"SNS:GetTopicAttributes",
"SNS:SetTopicAttributes",
"SNS:AddPermission",
"SNS:RemovePermission",
"SNS:DeleteTopic"
],
"Resource": "${element(aws_sns_topic.sns_topic.*.arn, count.index)}"
}]
}
POLICY
depends_on = ["aws_sns_topic.sns_topic"]
}
resource "aws_s3_bucket" "bucket" {
bucket = "${var.bucket_name}"
}
data "aws_iam_policy_document" "default" {
statement {
effect = "Allow"
actions = [
"s3:PutObject",
]
resources = [
"${aws_s3_bucket.bucket.arn}/*",
]
principals {
type = "AWS"
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
}
}
}
resource "aws_s3_bucket_policy" "default" {
bucket = "${aws_s3_bucket.bucket.id}"
policy = "${data.aws_iam_policy_document.default.json}"
}
resource "aws_s3_bucket_notification" "bucket_notification" {
count = "${length(keys(var.sns_top))}"
bucket = "${aws_s3_bucket.bucket.id}"
topic {
topic_arn = "${element(aws_sns_topic.sns_topic.*.arn, count.index)}"
events = ["s3:ObjectCreated:*"]
filter_suffix = "${element(keys(var.sns_top),count.index)}"
}
}
The codes hit an error explained in below link, but you should be fine to use it for further codings, such as how to use count.index with maps.
* aws_s3_bucket_notification.bucket_notification.0: Error putting S3 notification configuration: InvalidArgument: Unable to validate the following destination configurations
Refer:
How do I avoid the error "Unable to validate the following destination configurations" when using S3 event notifications in CloudFormation?