Pass credentials / .json file stored in S3 bucket to GoogleServiceAccountClient - google-oauth

Here the code to initialize the GoogleRefreshTokenClient using the credentials from json key file.
oauth2_client = oauth2.GoogleServiceAccountClient(key_file, oauth2.GetAPIScope('ad_manager'))
Key_file .json is stored in S3 bucket.
Is there any way to pass .json file (with credentials) stored in s3 to GoogleServiceAccountClient?
Ps. Info to DalmTo stackoverflow member. Do not close or merge this question, please :)

You can read the file from S3 and write it as a json file to your /tmp folder
def readFileFromS3(file_name):
tmp_path = "/tmp/"+file_name
file_path = Path(tmp_path)
if file_path.is_file():
return tmp_path
s3 = boto3.resource(
's3',
aws_access_key_id = <AWS_ACCESS_KEY>,
aws_secret_access_key = <AWS_SECRET>,
region_name = <YOUR_REGION_NAME>
)
content_object = s3.Object(<BUCKET_NAME>, file_name)
file_content = content_object.get()['Body'].read().decode('utf-8')
json_content = json.loads(file_content)
with open(tmp_path, 'w') as res_file:
json.dump(json_content, res_file, indent=4)
return tmp_path
Then use the path returned from the above function in GoogleServiceAccountClient
key_file = readFileFromS3(<key_file_name_in_s3>)
oauth2_client = oauth2.GoogleServiceAccountClient(key_file, oauth2.GetAPIScope('ad_manager'))

Related

Extracting tar files from an S3 bucket to another S3 bucket using Python

We need to extract the contents of zip and tar files to another S3 bucket.
We have the code to extract the zip files working.
We need to use meta.client.upload_fileobj or meta.client.copy so if necessary multipart upload or copy will be used.
def unzip_file(source_bucketname, filename, target_bucketname):
s3_resource = boto3.resource('s3')
s3_client = boto3.client('s3')
target_directory = source_file_name + '/'
zip_obj = s3_resource.Object(
bucket_name=source_bucketname, key=source_file_name)
buffer = BytesIO(zip_obj.get()["Body"].read())
with zipfile.ZipFile(buffer, mode='r', allowZip64=True) as z:
for filename in z.namelist():
file_info = z.getinfo(filename)
s3_resource.meta.client.upload_fileobj(
z.open(filename),
Bucket=target_bucketname,
Key=f'{source_file_name}/{filename}'
)
We can't get the extraction of tar files to work.
def untar_file(source_bucketname, filename, target_bucketname):
s3_resource = boto3.resource('s3')
s3_client = boto3.client('s3')
target_directory = source_file_name + '/'
s3_object = s3_client.get_object(Bucket=source_bucketname, Key=filename)
tar_file = s3_object['Body'].read()
file_object = io.BytesIO(tar_file)
with tarfile.open(fileobj=file_object, mode=('r:gz')) as z:
for filename in z.getmembers():
s3_resource.meta.client.upload_fileobj(
filename, #z.open(filename)
Bucket=target_bucketname,
Key=f'{source_file_name}/{filename}'
)
The problem is specifying the filename object in the meta.client.upload_fileobj command.
We have tried z.open(filename)
We would be very grateful if anyone has any ideas.
Anon Coward answered this but the answer seems to have been deleted.
s3_resource.meta.client.upload_fileobj(
filename, #z.open(filename)
Bucket=target_bucketname,
Key=f'{source_file_name}/{filename}'
)
needs to be
s3_resource.meta.client.upload_fileobj(
z.extractfile(filename),
Bucket=target_bucketname,
Key=f'{source_file_name}/{filename.name}'
)
The source file needs to be z.extractfile(filename) and the destination filename needs to be filename.name.
Many thanks Anon Coward

downloading file from S3 using boto3 key error

I am trying to download a joblib file from S3 but getting errors with the key format..
This is my S3 path to the file:
"s3://v1/v2/v3/v4/model.joblib"
This is my code:
import boto3
bucketname = "v1"
key = "v2/v3/v4"
filename = "model.joblib"
s3 = boto3.resource('s3')
obj = s3.Object(bucketname, key)
body = obj.get()['label_model.joblib'].read()
ultimately i want to be able to do:
from joblib import load
model = load("model.joblib")
Error i got:
NoSuchKey: An error occurred (NoSuchKey) when calling the GetObject operation: The specified key does not exist.
You are trying to access the file without the filename.
Your code is:
import boto3
bucketname = "v1"
key = "v2/v3/v4"
filename = "model.joblib"
s3 = boto3.resource('s3')
obj = s3.Object(bucketname, key)
body = obj.get()['label_model.joblib'].read()
But you need to add the filename to the key variable. Here is an example downloading the file from s3:
bucketname = "v1"
key = "v2/v3/v4"
filename = "model.joblib"
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucketname)
with open('filename', 'wb') as f:
bucket.download_fileobj(f'{key}/{filename}', f)

Python boto3 load model tar file from s3 and unpack it

I am using Sagemaker and have a bunch of model.tar.gz files that I need to unpack and load in sklearn. I've been testing using list_objects with delimiter to get to the tar.gz files:
response = s3.list_objects(
Bucket = bucket,
Prefix = 'aleks-weekly/models/',
Delimiter = '.csv'
)
for i in response['Contents']:
print(i['Key'])
And then I plan to extract with
import tarfile
tf = tarfile.open(model.read())
tf.extractall()
But how do I get to the actual tar.gz file from s3 instead of a some boto3 object?
You can download objects to files using s3.download_file(). This will make your code look like:
s3 = boto3.client('s3')
bucket = 'my-bukkit'
prefix = 'aleks-weekly/models/'
# List objects matching your criteria
response = s3.list_objects(
Bucket = bucket,
Prefix = prefix,
Delimiter = '.csv'
)
# Iterate over each file found and download it
for i in response['Contents']:
key = i['Key']
dest = os.path.join('/tmp',key)
print("Downloading file",key,"from bucket",bucket)
s3.download_file(
Bucket = bucket,
Key = key,
Filename = dest
)

S3 Boto3 python - change all files acl to public read

I am trying to change ACL of 500k files within a S3 bucket folder from 'private' to 'public-read'
Is there any way to speed this up?
I am using the below snippet.
from boto3.session import Session
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=100)
BUCKET_NAME = ""
aws_access_key_id = ""
aws_secret_access_key = ""
Prefix='pics/'
session = Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
_s3 = session.resource("s3")
_bucket = _s3.Bucket(BUCKET_NAME)
def upload(eachObject):
eachObject.Acl().put(ACL='public-read')
counter = 0
filenames = []
for eachObject in _bucket.objects.filter(Prefix=Prefix):
counter += 1
filenames.append(eachObject)
if counter % 100 == 0:
pool.map(upload, filenames)
print(counter)
if filenames:
pool.map(upload, filenames)
As far as i can tell, without applying the ACL to the entire bucket, there is no way to simply apply the ACL to all items containing the same prefix without iterating through each item like below:
bucketName='YOUR_BUCKET_NAME'
prefix="YOUR_FOLDER_PREFIX"
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucketName)
[obj.Acl().put(ACL='public-read') for obj in bucket.objects.filter(Prefix=prefix).all()]

List directories in amazon S3 with AWS SDK

I am trying to list folders in S3:
string delimiter = "/";
folder = "a/";
ListObjectsResponse r = s3Client.ListObjects(new Amazon.S3.Model.ListObjectsRequest()
{
BucketName = BucketName,
Prefix = folder,
MaxKeys = 1000,
Delimiter = delimiter
});
and i expect list of directories such as:
a/Folder1
a/Folder2
....
a/FolderN
but my actual result is only 1 object:
'a1'
Folders are not treated as objects in S3.
Instead, I need to read string[] CommonPrefixes property, which has my subfolders