Problem with write dataframe into blob storage in Azure - pandas

I am trying to write a pandas dataframe to a blob storage in Azure with Databricks. I am able to make the connection to the blob storage, but I am not able to write my dataframe with the BlobClient library of "azure.storage.blob".
I've read tutorials on the internet, but they have deprecated libraries, and I can't find a current solution. I am using version "azure-storage-blob==12.12.0" and my code is as follows:
from azure.storage.blob import BlobClient
import pandas as pd
import io
output = io.StringIO()
head = ["col1" , "col2" , "col3"]
l = [[1 , 2 , 3],[4,5,6] , [8 , 7 , 9]]
df = pd.DataFrame (l , columns = head)
print(df)
output = df.to_csv (index_label="idx", encoding = "utf-8")
print(output)
accountName = "https://xxxxxx.blob.core.windows.net/"
accountKey = 'xxxxxx'
containerName = "mycontainer"
blobName = "test3.csv"
blobService = BlobClient(account_url=accountName, account_key=accountKey, container_name="mycontainer", blob_name = blobName, output) # <-----HERE IS THE PROBLEM
What am I doing wrong?
Do you have an example that can work with my current version please?
Many thanks in advance.
Regards

Related

ValueError: NaTType does not support timetuple when converting a dataframe to dictionary using to_dict('records')

I'm running this flask app
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS, cross_origin
import json
import pandas as pd
# Create the app object
app = Flask(__name__)
cors = CORS(app, resources= {r"/*": {'origins' : "*"}})
# importing function for calculations
from Record_Matching import Matching
#app.route("/query", methods = ['get'])
#cross_origin()
def query():
# service_account_creds = request.json
query1 = request.args.get('query1', type = str)
query2 = request.args.get('query2', type = str)
querycolumns = request.args.get('querycolumns')
project_id = request.args.get('project_id', type = str)
service_account_creds = request.args.get('service_account')
SS = request.args.get('SS', type = float)
TT = request.args.get('TT', type = float)
result = Matching(query1,query2, SS,TT, service_account_creds, project_id, querycolumns)
return result
if __name__ == "__main__":
app.run(host="localhost", port=8080, debug=True)
and I'm importing the matching function from this python scripts
import pandas as pd
from google.cloud import bigquery
from google.oauth2 import service_account
import recordlinkage
from recordlinkage.preprocessing import phonetic
from pandas.io.json import json_normalize
import uuid
from uuid import uuid4
import random
import string
import json
import ast
# Results to data frame function
def gcp2df(sql, client):
query = client.query(sql)
results = query.result()
return results.to_dataframe()
# Exporting df to bigquery - table parameter example: "dataset.tablename"
# def insert(df, table):
# client = bigquery.Client()
# job_config = bigquery.LoadJobConfig(write_disposition=bigquery.job.WriteDisposition.WRITE_TRUNCATE)
# return client.load_table_from_dataframe(df, table, job_config = job_config)
def pair(df1, df2, TT, querycolumns):
# function to take pair from list and compare:
L = querycolumns
l=len(querycolumns)
p1=0
p2=1
# To generate phonetics we need to make sure all names are in english.
# thus we'll replace non-english words by random english strings
df1[L[p1]] = df1[L[p1]].astype(str)
df2[L[p2]] = df2[L[p2]].astype(str)
for i in range(0,len(df1)):
if df1[L[p1]][i].isascii() == False:
df1[L[p1]][i] = ''.join(random.choices(string.ascii_lowercase, k=5))
for i in range(0,len(df2)):
if df2[L[p2]][i].isascii() == False:
df2[L[p2]][i] = ''.join(random.choices(string.ascii_lowercase, k=5))
compare = recordlinkage.Compare()
df1["phonetic_given_name"] = phonetic(df1[L[p1]], "soundex")
df2["phonetic_given_name"] = phonetic(df2[L[p2]], "soundex")
df1["initials"] = (df1[L[p1]].str[0] + df1[L[p1]].str[-1])
df2["initials"] = (df2[L[p2]].str[0] + df2[L[p2]].str[-1])
indexer = recordlinkage.Index()
indexer.block('initials')
candidate_links = indexer.index(df1, df2)
compare.exact('phonetic_given_name', 'phonetic_given_name', label="phonetic_given_name")
# O(n) a function that uses two pointers to track consecutive pairs for the input list
while p2 <=l:
compare.string(L[p1], L[p2], method='jarowinkler',threshold = TT, label=L[p1])
p1+=2
p2+=2
features = compare.compute(candidate_links,df1, df2)
return features
def Matching(query1,query2, SS,TT, service_account_creds, project_id, querycolumns):
service_account_creds = ast.literal_eval(service_account_creds)
credentials = service_account.Credentials(service_account_creds, service_account_creds['client_email'],
service_account_creds['token_uri'])
job_config = bigquery.LoadJobConfig()
client = bigquery.Client( project = project_id)
SS=int(SS)
TT=float(TT)
df1 = gcp2df("""{}""".format(query1), client)
df2 = gcp2df("""{}""".format(query2), client)
querycolumns = json.loads(querycolumns)
querycolumns = list(querycolumns.values())
features = pair(df1, df2, TT, querycolumns)
features['Similarity_score'] = features.sum(axis=1)
features = features[features['Similarity_score']>=SS].reset_index()
final = features[['level_0', 'level_1']]
final.rename(columns= {'level_0':'df1_index', 'level_1':'df2_index'}, inplace= True)
final['Unique_ID'] = [uuid.uuid4() for _ in range(len(final.index))]
final['Unique_ID'] = final['Unique_ID'].astype(str)
final['Similarity_Score'] = SS
final_duplicates = final['df1_index'].value_counts().max()
# insert(final,"test-ahmed-project.Record_Linkage.Matching_Indices")
message = "Mission accomplished!, your highest number of duplicates is " + str(final_duplicates)
return {'message':message,'final':final.to_dict('records'), 'df1':df1.to_dict('records')}
I'm not sure why when I return df1 as a dictionary it shows ValueError error when I try to to use the function from flask app, but when I run it in a jupytor notebook using the same dataframe that I'm taking from bigquery, it works just fine, so why does it not work on the flask app?
I tried to_dict('record') to convert a dataframe to a dictionary,
it looking online many resources suggest the error exists because the data contains missing values, but it shouldn't be a problem because when I try converting the same dataframe to dictionary in jupyter notebook it works just fine.

H2O sparkling water write performance issue

when i tried to write the data (100000 rows , 6 columns : 13.09 mb) in csv format using spark (3.1.2) it takes around 5s. But if i tried to include the auto-ml training& prediction using sparkling water internal backend in spark it takes around 12mins. I have checked the datatype of the auto-ml prediction, it's in pyspark dataframe type. Is this behavior is due to the following reason when you convert between an H2OFrame and a Spark DataFrame because Sparkling Water uses a wrapper around the H2OFrame, which uses the RDD/DataFrame API. Is there any way to improve the write speed ? Any help on this issue would be really helpful. I have attached the sample script below
from pyspark.sql.types import *
from pysparkling import *
from pysparkling.ml import *
import time
import logging
spark = SparkSession.builder.getOrCreate()
hc = H2OContext.getOrCreate()
my_logger = logging.getLogger(__name__)
my_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s : %(name)s : %(levelname)s : %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
my_logger.addHandler(stream_handler)
read_location = "XXX"
write_location = "XXX"
train_config = {
"labelCol": "XXX",
"maxModels": 2,
"excludeAlgos": ["XGBoost", "StackedEnsemble","GLM","DeepLearning"],
"maxRuntimeSecsPerModel": 300,
"maxRuntimeSecs": 600
}
# bq config
my_logger.info("Reading CSV")
df = spark.read.format("csv").option("header", True).load(read_location)
my_logger.info("Read CSV")
my_logger.info("Printing input DF schema")
df.printSchema()
my_logger.info("Printed input DF schema")
my_logger.info("Starting model training")
autoMLEstimator = H2OAutoML(**train_config)
model = autoMLEstimator.fit(df)
my_logger.info("Model training finished")
my_logger.info("Starting prediction")
outputDf = model.transform(df)
my_logger.info("Prediction done")
my_logger.info("Printing output DF schema")
outputDf.printSchema()
my_logger.info("Printed output DF schema")
startTime = time.time()
my_logger.info("Starting to write")
outputDf.write.csv(write_location,header = 'true')
my_logger.info("Writing finished")
seconds = time.time() - startTime
my_logger.info("Total Time Taken: "+str(time.strftime("%H:%M:%S",time.gmtime(seconds))))```

Copy/assign a Pandas dataframe based on their name in a for loop

I am relatively new with python - and I am struggling to do the following:
I have a set of different data frames, with sequential naming (df_i), which I want to access in a for loop based on their name (with an string), how can I do that? e.g.
df_1 = pd.read_csv('...')
df_2 = pd.read_csv('...')
df_3 = pd.read_csv('...')
....
n_df = 3
for i in range(len(n_df)):
df_namestr= 'df_' + str(i+1)
# ---------------------
df_temp = df_namestr
# ---------------------
# Operate with df_temp. For i+1= 1, df_temp should be df_1
Kind regards,
DF
You can try something like that:
for n in range(1, n_df+1):
df_namestr = f"df_{n}"
df_tmp = locals().get(df_namestr)
if not isinstance(df_tmp, pd.DataFrame):
continue
print(df_namestr)
print(df_tmp)
Refer to the documentation of locals() to know more.
Would it be better to approach the accessing of multiple dataframes by reading them into a list?
You could put all the csv files required in a subfolder and read them all in. Then they are in a list and you can access each one as an item in that list.
Example:
import pandas as pd
import glob
path = r'/Users/myUsername/Documents/subFolder'
csv_files = glob.glob(path + "/*.csv")
dfs = []
for filename in csv_files:
df = pd.read_csv(filename)
dfs.append(df)
print(len(dfs))
print(dfs[1].head())

Pandas combine mutilple columns in a BQ table to generate payload for FB conversions api

I am reading from a bigquery table to generate a payload to upload to FB conversions api.
cols=["payload","client_user_agent","event_source_url"]
I am copying the column values directly from the bq table as I am unable to print the full output of the dataframe in note book.
payload="{"pageDetail":{"pageName":"Confirmation","pageContentType":"cart","pageSiteSection":"cart","breadcrumbs":[{"title":"Home","url":"/en/home.html"},{"title":"Cart","url":"/cart"},{"title":"Confirmation","url":"/order-confirmation="}],"pageCategory":"Home","pageCategory1":"Cart","pageCategory2":"Confirmation","proBtbGlobalHeader":false},"orderDetails":{"hceid":"3b94a","orderConfirmed":true,"orderDate":"2021-01-15","orderId":"0123","unique":2,"pricingSummary":{"total":54.01},"items":[{"productId":"0456","quantity":1,"shippingAddress":{"postalCode":"V4N 3X3"},"promotion":{"voucherCode":null},"clickToInstall":{"eligible":false}},{"productId":"0789","quantity":1,"fulfillment":{"fulfillmentCost":""},"shippingAddress":{"postalCode":"A4N 3Y3"},"promotion":{"voucherCode":null},"clickToInstall":{"eligible":false}}],"billingAddress":{"postalCode":"M$X1A7"}},"event":{"type":"Load","page":"Confirmation","timestamp":1610706772998,"language":"English","url":"https://www"}}"
client_user_agent="Mozilla/5.0"
event_source_url= "https://www.def.com="
I need the value for email=[orderDetails][hceid] and value=["orderDetails"]["pricingSummary"]["total"]
Initially all the payload I wanted was in a single column and I was able to achieve the uploads with the following code
import time
from facebook_business.adobjects.serverside.event import Event
from facebook_business.adobjects.serverside.event_request import EventRequest
from facebook_business.adobjects.serverside.user_data import UserData
from facebook_business.adobjects.serverside.custom_data import CustomData
from facebook_business.api import FacebookAdsApi
import pandas as pd
import json
FacebookAdsApi.init(access_token=access_token)
query='''SELECT JSON_EXTRACT(payload, '$') AS payload FROM `project.dataset.events` WHERE eventType = 'Page Load' AND pagename = "Confirmation" limit 1'''
df = pd.read_gbq(query, project_id= project, dialect='standard')
payload = df.to_dict(orient="records")
for i in payload:
#print(type(i["payload"]))
k = json.loads(i["payload"])
email = k["orderDetails"]["hcemuid"]
user_data = UserData(email)
value=k["orderDetails"]["pricingSummary"]["total"]
order_id = k["orderDetails"]["orderId"]
custom_data = CustomData(
currency='CAD',
value=value)
event = Event(
event_name='Purchase',
event_time=int(time.time()),
user_data=user_data,
custom_data=custom_data,
event_id = order_id,
data_processing_options= [])
events = [event]
#print(events)
event_request = EventRequest(
events=events,
test_event_code='TEST8609',
pixel_id=pixel_id)
#print(event_request)
a=event_request.execute()
print(a)
Now there are additional values client_user_agent that needs to be part of user data and event_source_url as parts of events in the above code that are present as two different columns in GBQ table.
I have tried similar code as above for multiple columns but I am receiving a
TypeError: Object of type Series is not JSON serializable
So I tried concatenating the columns and then create a json serializable object but I am not able to do an upload.
Below is where I am stuck and lost and not sure how to proceed further any inputs appreciated.
import time
from facebook_business.adobjects.serverside.event import Event
from facebook_business.adobjects.serverside.event_request import EventRequest
from facebook_business.adobjects.serverside.user_data import UserData
from facebook_business.adobjects.serverside.custom_data import CustomData
from facebook_business.api import FacebookAdsApi
import pandas as pd
import json
FacebookAdsApi.init(access_token=access_token)
query='''SELECT payload AS payload,location.userAgent as client_user_agent,location.referrer as event_source_url FROM `project.Dataset.events` WHERE eventType = 'Page Load' AND pagename = "Confirmation" limit 1'''
df = pd.read_gbq(query, project_id= project, dialect='standard')
df.reset_index(drop=True, inplace=True)
payload = df.to_dict(orient="records")
print(payload)
## cols = ['payload', 'client_user_agent', 'event_source_url']
## df['combined'] = df[cols].apply(lambda row: ','.join(row.values.astype(str)), axis=1)
## del df["payload"]
## del df["client"]
## del df["source"]
## payload = df.to_dict(orient="records")
#tried concatinating all columns in a the dataframe but not able to create a valid json object for upload
columns = ['payload', 'client_user_agent', 'event_source_url']
df['payload'] = df['payload'].str.replace(r'}"$', '')
payload = df[columns].to_dict(orient='records')
print(payload)
## df = df.drop(columns=columns)
## pd.options.display.max_rows = 4000
# #print(payload)
# for i in payload:
# print(i["payload"])
# k = json.loads(i["payload"])
# email = k["orderDetails"]["hcemuid"]
# print(email)
I am following the instructions from this page:https://developers.facebook.com/docs/marketing-api/conversions-api
I have used the bigquery json_extract_scalar function to extract data from nested column instead of pandas which is a relatively better solution for my scenario.

Read XML file to Pandas DataFrame [duplicate]

This question already has answers here:
How to convert an XML file to nice pandas dataframe?
(5 answers)
Closed 1 year ago.
Can someone please help convert the following XML file to Pandas dataframe:
<?xml version="1.0" encoding="UTF-8" ?>
<root>
<bathrooms type="dict">
<n35237 type="number">1.0</n35237>
<n32238 type="number">3.0</n32238>
<n44699 type="number">nan</n44699>
</bathrooms>
<price type="dict">
<n35237 type="number">7020000.0</n35237>
<n32238 type="number">10000000.0</n32238>
<n44699 type="number">4128000.0</n44699>
</price>
<property_id type="dict">
<n35237 type="number">35237.0</n35237>
<n32238 type="number">32238.0</n32238>
<n44699 type="number">44699.0</n44699>
</property_id>
</root>
It should look like this --
OUTPUT
This is the code I have written:-
import pandas as pd
import xml.etree.ElementTree as ET
tree = ET.parse('real_state.xml')
root = tree.getroot()
dfcols = ['property_id', 'price', 'bathrooms']
df_xml = pd.DataFrame(columns=dfcols)
for node in root:
property_id = node.attrib.get('property_id')
price = node.attrib.get('price')
bathrooms = node.attrib.get('bathrooms')
df_xml = df_xml.append(
pd.Series([property_id, price, bathrooms], index=dfcols),
ignore_index=True)
print(df_xml)
I am getting None everywhere, instead of the actual values. Can someone please tell how it can be fixed. Thanks!
if the data is simple, like this, then you can do something like:
from lxml import objectify
xml = objectify.parse('Document1.xml')
root = xml.getroot()
bathrooms = [child.text for child in root['bathrooms'].getchildren()]
price = [child.text for child in root['price'].getchildren()]
property_id = [child.text for child in root['property_id'].getchildren()]
data = [bathrooms, price, property_id]
df = pd.DataFrame(data).T
df.columns = ['bathrooms', 'price', 'property_id']
bathrooms price property_id
0 1.0 7020000.0 35237.0
1 3.0 10000000.0 32238.0
2 nan 4128000.0 44699.0
if it is more complex then a loop is better. You can do something like
from lxml import objectify
xml = objectify.parse('Document1.xml')
root = xml.getroot()
data=[]
for i in range(len(root.getchildren())):
data.append([child.text for child in root.getchildren()[i].getchildren()])
df = pd.DataFrame(data).T
df.columns = ['bathrooms', 'price', 'property_id']
I have had success using this function from the xmltodict package:
import xmltodict
xmlDict = xmltodict.parse(xmlData)
df = pd.DataFrame.from_dict(xmlDict)
What I like about this, is I can easily do some dictionary manipulation in between parsing the xml and making my df. Also, it helps to explore the data as a dict if the structure is wily.
Hello all I found another really easily way to solve those question.
reference: https://www.youtube.com/watch?v=WVrg5-cjr5k
import xml.etree.ElementTree as ET
import pandas as pd
import codecs
## open notebook and save your xml file to text.xml
with codecs.open('text.xml', 'r', encoding='utf8') as f:
tt = f.read()
def xml2df(xml_data):
root = ET.XML(xml_data)
all_records = []
for i, child in enumerate(root):
record = {}
for sub_child in child:
record[sub_child.tag] = sub_child.text
all_records.append(record)
return pd.DataFrame(all_records)
df_xml1 = xml2df(tt)
print(df_xml1)
for better understanding of ET you can use underneath code to see what in side of your xml
import xml.etree.ElementTree as ET
import pandas as pd
import codecs
with codecs.open('text.xml', 'r', encoding='utf8') as f:
tt = f.read()
root = ET.XML(tt)
print(type(root))
print(root[0])
for ele in root[0]:
print(ele.tag + '////' + ele.text)
print(root[0][0].tag)
Once you finish running the program you can see the output underneath:
C:\Users\username\Documents\pycode\Scripts\python.exe C:/Users/username/PycharmProjects/DestinationLight/try.py
n35237 n32238 n44699
0 1.0 3.0 nan
1 7020000.0 10000000.0 4128000.0
2 35237.0 32238.0 44699.0
<class 'xml.etree.ElementTree.Element'>
<Element 'bathrooms' at 0x00000285006B6180>
n35237////1.0
n32238////3.0
n44699////nan
n35237
Process finished with exit code 0