TypeError: find_one_and_update() missing 1 required positional argument: 'update' - pymongo

I need help, Not sure what im doing wrong. I keep getting this error and im not sure why can anyone give me any advice:
TypeError: find_one_and_update() missing 1 required positional argument: 'update'
Here is my Test Code:
import pymongo
from pymongo import ReturnDocument
import datetime
from animalsCRUD import AnimalShelter
#username = "aacuser"
#password = "password"
insertRec = AnimalShelter("aacuser", "password")
locateRec = AnimalShelter("aacuser", "password")
updateRec = AnimalShelter("aacuser", "password")
deleteRec = AnimalShelter("aacuser", "password")
animal = ({"age_upon_outcome":"5 years", "animal_id":"A333333", "animal_type":"Dog",
"breed":"Derp", "color":"White",
"date_of_birth":"07/19/19", "datetime": datetime.datetime.now(), "name":"",
"outcome_subtype":"Foster",
"outcome_type":"Adoption", "sex_upon_outcome":"Intact Female", "location_lat":30.60784677,
"location_long":-97.35087807, "age_upon_outcome_in_weeks":64.24642857})
critter = {"animal_id":"A333333"}
changeCritter = ({"animal_id": "A333333"}, {'$set': {'animal_type': 'Cat'}})
print(insertRec.create(animal))
locateRec.locate(critter)
updateRec.update(changeCritter)
locateRec.locate(critter)
deleteRec.delete(critter)

The update method has two parameters, you could decomposite the tuple
updateRec.update(changeCritter[0],changeCritter[1])

Related

Getting error in a python script when using QuickSight API calls to retrieve the value of user parameter selection

I am working on a python script which will use QS APIs to retrieve the user parameter selections but keep getting the below error:
parameters = response['Dashboard']['Parameters'] KeyError: 'Parameters'
If I try a different code to retrieve the datasets in my QS account, it works but the Parameters code doesn't. I think I am missing some configuration.
#Code to retrieve the parameters from a QS dashboard (which fails):
import boto3
quicksight = boto3.client('quicksight')
response = quicksight.describe_dashboard(
AwsAccountId='99999999999',
DashboardId='zzz-zzzz-zzzz'
)
parameters = response['Dashboard']['Parameters']
for parameter in parameters:
print(parameter['Name'], ':', parameter['Value'])
#Code to display the datasets in the QS account (which works):
import boto3
import json
account_id = '99999999999'
session = boto3.Session(profile_name='default')
qs_client = session.client('quicksight')
response = qs_client.list_data_sets(AwsAccountId = account_id,MaxResults = 100)
results = response['DataSetSummaries']
while "NextToken" in response.keys():
response = qs_client.list_data_sets(AwsAccountId = account_id,MaxResults = 100,NextToken=response["NextToken"])
results.extend(response["DataSetSummaries"])
for i in results:
x = i['DataSetId']
try:
response = qs_client.describe_data_set(AwsAccountId=account_id,DataSetId=x)
print("succeeded loading: {} for data set {} ".format(x, response['DataSet']['Name']))
except:
print("failed loading: {} ".format(x))

TypeError: super(type, obj): obj must be an instance or subtype of type - help me

from google.cloud import bigquery
import os
credentials_path = '....keyBigQuery.json'
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials_path
client = bigquery.Client()
table_id = '...VENTAS'
row_to_insert = [
{u'ID_VENTA':'E001-20',
u'TIPO_DOCUMENTO':'FACTURA',
u'CLIENTE':'LEOPOLDO',
u'FECHA_EMISION':'12/10/2023',
u'FORMA_COBRO':'CONTADO',
u'TOTAL_PRODUCTOS':1,
u'TOTAL_CUOTAS':1,
u'MONTO_TOTAL':15520,
u'ESTADO':'CANCELADO'},
{u'ID_VENTA':'E001-21',
u'TIPO_DOCUMENTO':'FACTURA',
u'CLIENTE':'LEOPOLDO',
u'FECHA_EMISION':'12/10/2023',
u'FORMA_COBRO':'CONTADO',
u'TOTAL_PRODUCTOS':1,
u'TOTAL_CUOTAS':1,
u'MONTO_TOTAL':15520,
u'ESTADO':'CANCELADO'}
]
client.insert_rows_json(table_id,row_to_insert)
good night,
I'm doing tests and I get this error
TypeError: super(type, obj): obj must be an instance or subtype of type
I appreciate a possible solution

How to add username and name columns to pandas dataframe with search_all_tweets lookup in python

I am trying to collect tweets from 2022 using Twitter API. I can record the tweet_fields for the tweets, but I can't figure out how to add columns for the username and name (the user_fields) for each tweet.
I'm running the following code:
import requests
import os
import json
import tweepy
import pandas as pd
from datetime import timedelta
import datetime
bearer_token = "my_bearer_token_here"
keyword = "#WomeninSTEM"
start_time = "2022-01-01T12:01:00Z"
end_time = "2023-01-01T12:01:00Z"
client = tweepy.Client(bearer_token=bearer_token)
responses = client.search_all_tweets(query = "#WomeninSTEM", max_results= 500, start_time=start_time, end_time = end_time,
user_fields = ["username", "name"],
tweet_fields =["in_reply_to_user_id", "author_id", "lang",
"public_metrics", "created_at", "conversation_id"])
**##I can't get the username or name columns to work here.**
column = []
for i in range(len(responses.data)) :
row = []
Username = responses.data[i]["username"]
row.append(Username)
name = responses.data[i]["name"]
row.append(name)
text = responses.data[i].text
row.append(text)
favoriteCount = responses.data[i].public_metrics["like_count"]
row.append(favoriteCount)
retweet_count = responses.data[i].public_metrics["retweet_count"]
row.append(retweet_count)
reply_count = responses.data[i].public_metrics["reply_count"]
row.append(reply_count)
quote_count = responses.data[i].public_metrics["quote_count"]
row.append(quote_count)
created = responses.data[i].created_at
row.append(created)
ReplyTo = responses.data[i].text.split(" ")[0]
row.append(ReplyTo)
ReplyToUID = responses.data[i].in_reply_to_user_id
row.append(ReplyToUID)
ConversationID = responses.data[i]["conversation_id"]
row.append(ConversationID)
column.append(row)
data = pd.DataFrame(column)
Whenever I try and include username and name, I get this error:KeyError Traceback (most recent call last)
Assuming you're querying at https://api.twitter.com/2/tweets/[...], the response does not have a 'username' or a 'name' parameter, that's why you're getting a KeyError when trying to access them.
It does have an 'author_id' parameter, which you can use to perform an additional query at https://api.twitter.com/2/users/:id and retrieve 'username' and 'name'.
More info here and here.

How can I use pandas.read_sql on an async connection?

I am trying to do the asynchron equivalent of
engine = create_engine('sqlite:///./test.db')
stmt = session.query(MyTable)
data = pd.read_sql(stmt, engine)
but it fails with the error AttributeError: 'AsyncConnection' object has no attribute 'cursor'.
What would be the right way to make this work?
asyn_engine = create_async_engine('sqlite+aiosqlite:///./test.db')
stmt = select(MyTable)
data = pd.read_sql(stmt, async_engine)
This code in principal is working...
# Making pd.read_sql connection the first argument to make it compatible
# with conn.run_syn()
def _read_sql(con, stmt):
return pd.read_sql(stmt, con)
async def get_df(stmt, engine):
async with engine.begin() as conn:
data = await conn.run_sync(_read_sql, stmt)
return data
asyn_engine = create_async_engine('sqlite+aiosqlite:///./test.db')
stmt = select(MyTable)
data = get_df(stmt, asyn_engine )

403 Access Denied Error while creating a dataset in DOMO

I'm getting an 404 access denied error while trying to create a dataset in DOMO, anyone who is good in DOMO please help me,
import logging
from pydomo import Domo
from pydomo.datasets import DataSetRequest, Schema, Column, ColumnType, Policy
from pydomo.datasets import PolicyFilter, FilterOperator, PolicyType, Sorting
client_id = ''
client_secret_code = ''
data_set_id = ''
api_host = 'api.domo.com'
domo = Domo(client_id, client_secret_code, logger_name='foo', log_level=logging.INFO,
api_host=api_host)
data_set_name = 'Testing'
data_set_description = 'Test_to_update_schema'
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
domo = Domo(client_id, client_secret_code, logger_name='foo', log_level=logging.INFO,
api_host=api_host)
dsr = DataSetRequest()
dsr.name = data_set_name
dsr.description = data_set_description
dsr.schema = Schema([Column(ColumnType.STRING, 'Id'),
Column(ColumnType.STRING, 'Publisher_Name')])
dataset = domo.datasets.create(dsr)
print(dataset)
This is Authorization error. Either of credentials are wrong that is client_id, client_secret_code or maybe both.
I have the same error in .NET when using Domo to connect and get data from the dataset. I fix this when I create a base64 string and passed it to auth for example Authorization is Basic
"Basic", Convert.ToBase64String(Encoding.UTF8.GetBytes($"{clientId}:{clientSecret}"))
I am not sure how this goes in Python but create a base64 string and pass for auth.