So I made a code in oop where it would allow you to deposit money in a bank, and also withdraw it. I solved the part where I was able to deposit money but I'm not able to withdraw money from the bank. I would appreciate a fixed answer. Thanks!
class Bank:
def __init__(self,name,accountNumber,totalBalance):
self.name = name
self.accountNumber = {}
self.totalBalance = {}
def deposit(self):
print("What is your account number?")
x = input()
print("What balance are you depositing?")
y = input()
self.accountNumber[x] = y
print(self.accountNumber)
def withdraw(self):
print("What is your account number?")
s = int(input())
print("What balance are you withdrawing?")
f = int(input())
self.accountNumber[s] = self.accountNumber[s] - f
print(self.accountNumber)
Bank1 = Bank("MONEYYYYY",1234,53)
Bank1.deposit()
Bank1.withdraw()
If you know why it's not working then I would appreciate it if you could tell me why and fix it, thanks!
There are other problems with the design of your solution, but to address this particular one, you are subtracting the withdrawal amount from self.accountNumber[s] instead of from self.totalBalance. You are also setting the deposit amount in the same incorrect field.
class Bank:
def __init__(self,name,accountNumber,totalBalance):
self.name = name
self.accountNumber = {}
self.totalBalance = {}
def deposit(self):
print("What is your account number?")
x = input()
print("What balance are you depositing?")
y = input()
if self.totalBalance.has_key(x) == False:
self.totalBalance[x] = 0
self.totalBalance[x] += y
print(self.accountNumber)
def withdraw(self):
print("What is your account number?")
s = int(input())
print("What balance are you withdrawing?")
f = int(input())
if self.totalBalance.has_key(s) == False:
self.totalBalance[s] = 0
self.totalBalance[s] = self.totalBalance[s] - f
print(self.accountNumber)
Bank1 = Bank("MONEYYYYY",1234,53)
Bank1.deposit()
Bank1.withdraw()
Related
Hi everyone so I have a DataFrame about Pokemon data
data = pd.read_csv('pokemon.csv')
And I'm only interested in 2 columns 'type1' 'type2' (type2 can be null) as the same way the original videogame does. What I need is to get a DataFrame that looks like this:
data.type1 looks like this:
data.type2:
So basically I need to take a single DataFrames using those 2 columns.
I've code this stuff trying to get 2 DataFrame that I can turn into the final one I am asked to reach:
tabla = {}
def contar(tipo):
buscando=tipo
if tipo == np.NaN:
pass
else:
if tipo in tabla:
tabla[tipo] += 1
else:
tabla[tipo] = 1
tabla2 = {}
def contar2(tipo):
buscando=tipo
if tipo == np.NaN:
pass
else:
if tipo in tabla2:
tabla2[tipo] += 1
else:
tabla2[tipo] = 1
def reset_tabla():
tabla = {}
tabla2 = {}
data['type1'].apply(contar)
df_type1 = pd.DataFrame.from_dict(tabla, orient='index')
reset_tabla()
data['type2'].apply(contar2)
df_type2 = pd.DataFrame.from_dict(tabla2, orient='index')
df_types = pd.concat([df_type1, df_type2])
df_type1
So with above code I get the data I want but no the way I need it.
I expected:
Instead, this was the output:
img continues and data appears 2 times due to 2 types columns
I think what I am doing wrong is the concat because type1 and 2 look like this separately:
and
Finally, if you know how to combine these 2 DataFrames or you think you can solve this problem better let me know.
Thanks you all :).
I've solved this issue, so if it's useful for somebody the solution is here:
tabla = {}
def contar(tipo):
buscando=tipo
if tipo in tabla:
tabla[tipo] += 1
else:
tabla[tipo] = 1
tabla2 = {}
def contar2(tipo):
buscando=tipo
if tipo == np.NaN:
pass
else:
if tipo in tabla2:
tabla2[tipo] += 1
else:
tabla2[tipo] = 1
def reset_tabla():
tabla = {}
tabla2 = {}
reset_tabla()
data['type1'].apply(contar)
data['type2'].apply(contar2)
for x in tabla2.keys():
if type(x)==float:
delete = x
del tabla2[delete]
types = {"type1": tabla,
"type2": tabla2}
df_types = pd.DataFrame(types)
df_types
So I get
Below piece of code works fine for smaller dataframe as it will bring all data to driver memory but I want to leverage distributed computing of spark. Can someone help me on how to leverage Pyspark capabilities
def topNSuggestion(row, noSuggestionLabel, n):
#create output suggestion list
suggestion = [noSuggestionLabel] * n
#Create data frame for row received as an input to this method
suggestedNameDataFrame = pd.DataFrame({'name':row.index})
suggestedCountDataFrame = pd.DataFrame([eachRow for eachRow in row.values], columns=['countOrder','countQuantity'])
suggestedNameCountDataFrame = pd.concat([suggestedNameDataFrame,suggestedCountDataFrame], axis=1)
#Sort suggestion data frame on number of orders and quantity
sortSuggestedNameCountDataFrame = suggestedNameCountDataFrame.sort_values(by=['countOrder','countQuantity'], ascending=False)
sortSuggestedNameCountDataFrame = sortSuggestedNameCountDataFrame.reset_index(drop=True)
countProduct = sortSuggestedNameCountDataFrame.shape[0]
#Update suggestion list only if number of orders are more than zero else NO suggestions
countSuggestion = 0
while(countSuggestion < countProduct and countSuggestion < n):
if sortSuggestedNameCountDataFrame.countOrder[countSuggestion] > 0:
suggestion[countSuggestion] = str(sortSuggestedNameCountDataFrame.name[countSuggestion])
countSuggestion += 1
return suggestion
My main code to process each row in dataframe. I am using dataframe.collect method which mitigates benefits of distributed computing.
noSuggestionLabel = 'No Suggestion'
numberOfSuggestions = 5
#create product suggest data frame
customerProductSuggestDataFrame = aggregateQuantityFilterDataFrame.drop_duplicates(['CustomerId', 'ProductId', 'ProductName'])
customerProductSuggestDataFrame = customerProductSuggestDataFrame.drop('STORE_NUMBER', 'OrderId', 'AmountPurchased', 'ProductCode')
customerProductSuggestDataFrame = customerProductSuggestDataFrame.sort('CustomerId','ProductId')
for i in range(numberOfSuggestions):
suggestion = 'Suggestion' + str(i+1) + 'ItemDescription'
customerProductSuggestDataFrame = customerProductSuggestDataFrame.withColumn(suggestion, F.lit(noSuggestionLabel))
previousCustomerId = 0
#Process each product suggestion
customerProductSuggestDataFrameCollect = customerProductSuggestDataFrame.collect()
for eachCustomerProductSuggest in customerProductSuggestDataFrameCollect:
if previousCustomerId != eachCustomerProductSuggest.CustomerId:
currentCustomerId = eachCustomerProductSuggest.CustomerId
#Filter customer level purchase history info
eachCustomerAggregateQuantityDataFrame = aggregateQuantityFilterDataFrame.filter(aggregateQuantityFilterDataFrame.CustomerId == eachCustomerProductSuggest.CustomerId)
#Create product matrix for each customer
#Get all unqiue product code from store purchase history info for this customer
uniqueProducts = list(eachCustomerAggregateQuantityDataFrame.select('ProductId').distinct().toPandas()['ProductId'])
uniqueProducts.sort()
#create product ranking data frame with rows and columns as unique product code only
productDataFrame = pd.DataFrame(index=uniqueProducts,columns=uniqueProducts)
for index,eachRow in productDataFrame.iterrows():
for col in productDataFrame.columns:
productDataFrame[index][col] = dict({'countOrder': 0, 'countQuantity':0})
previousOrderId = 0
eachCustomerAggregateQuantityDataFrameCollect = eachCustomerAggregateQuantityDataFrame.collect()
for eachCustomerOrderAggr in eachCustomerAggregateQuantityDataFrameCollect:
if previousOrderId != eachCustomerOrderAggr.OrderId:
productQuantityDict = {}
previousOrderId = eachCustomerOrderAggr.OrderId
#Iterate through all products to process previous products in the same order
for previousProductOrder in productQuantityDict:
#Only if not same product
if previousProductOrder != eachCustomerOrderAggr.ProductId:
#Update number of orders for each product in product matrix
productDataFrame.loc[eachCustomerOrderAggr.ProductId,previousProductOrder]['countOrder'] = productDataFrame.loc[eachCustomerOrderAggr.ProductId,previousProductOrder]['countOrder'] + 1
productDataFrame.loc[previousProductOrder,eachCustomerOrderAggr.ProductId]['countOrder'] = productDataFrame.loc[previousProductOrder,eachCustomerOrderAggr.ProductId]['countOrder'] + 1
#Update quantity for each product in product matrix
productDataFrame.loc[eachCustomerOrderAggr.ProductId,previousProductOrder]['countQuantity'] = productDataFrame.loc[eachCustomerOrderAggr.ProductId,previousProductOrder]['countQuantity'] + productQuantityDict[previousProductOrder]
productDataFrame.loc[previousProductOrder,eachCustomerOrderAggr.ProductId]['countQuantity'] = productDataFrame.loc[previousProductOrder,eachCustomerOrderAggr.ProductId]['countQuantity'] + eachCustomerOrderAggr.AmountPurchased
#Add product that is processed to product list
productQuantityDict[eachCustomerOrderAggr.ProductId] = eachCustomerOrderAggr.AmountPurchased
#Create dataframe with top n suggestions for each product
productSuggestion = productDataFrame.apply(lambda row:topNSuggestion(row, noSuggestionLabel, n=numberOfSuggestions), axis=1)
#Add Suggestions created for each ProductId to current customer
if currentCustomerId == eachCustomerProductSuggest.CustomerId:
eachSuggestionList = productSuggestion.loc[eachCustomerProductSuggest.ProductId]
#Append suggestions to product suggestion data frame
count = 1
for eachSuggestion in eachSuggestionList:
suggestion = 'Suggestion' + str(count) + 'ItemDescription'
if eachSuggestion != noSuggestionLabel:
eachSuggestionProductName = productInfoDataFrame.filter(productInfoDataFrame.ProductId == eachSuggestion).collect()[0]['ProductName']
else:
eachSuggestionProductName = str(eachSuggestion)
customerProductSuggestDataFrame = customerProductSuggestDataFrame.withColumn(suggestion, F.when((F.col("CustomerId") == eachCustomerProductSuggest.CustomerId)&(F.col("ProductId") == eachCustomerProductSuggest.ProductId), F.lit(eachSuggestionProductName)).otherwise(F.col(suggestion)))
count += 1
previousCustomerId = eachCustomerProductSuggest.CustomerId
Any help to direct me is appreciated
I'm using Oanda API to automate Trading strategies, I have a 'price' error that only occurs when selecting some instruments such as XAG (silver), my guess is that there is a classification difference but Oanda is yet to answer on the matter.
The error does not occur when selecting Forex pairs.
If anyone had such issues in the past and managed to solve it I'll be happy to hear form them.
PS: I'm UK based and have access to most products including CFDs
class SMABollTrader(tpqoa.tpqoa):
def __init__(self, conf_file, instrument, bar_length, SMA, dev, SMA_S, SMA_L, units):
super().__init__(conf_file)
self.instrument = instrument
self.bar_length = pd.to_timedelta(bar_length)
self.tick_data = pd.DataFrame()
self.raw_data = None
self.data = None
self.last_bar = None
self.units = units
self.position = 0
self.profits = []
self.price = []
#*****************add strategy-specific attributes here******************
self.SMA = SMA
self.dev = dev
self.SMA_S = SMA_S
self.SMA_L = SMA_L
#************************************************************************
def get_most_recent(self, days = 5):
while True:
time.sleep(2)
now = datetime.utcnow()
now = now - timedelta(microseconds = now.microsecond)
past = now - timedelta(days = days)
df = self.get_history(instrument = self.instrument, start = past, end = now,
granularity = "S5", price = "M", localize = False).c.dropna().to_frame()
df.rename(columns = {"c":self.instrument}, inplace = True)
df = df.resample(self .bar_length, label = "right").last().dropna().iloc[:-1]
self.raw_data = df.copy()
self.last_bar = self.raw_data.index[-1]
if pd.to_datetime(datetime.utcnow()).tz_localize("UTC") - self.last_bar < self.bar_length:
break
def on_success(self, time, bid, ask):
print(self.ticks, end = " ")
recent_tick = pd.to_datetime(time)
df = pd.DataFrame({self.instrument:(ask + bid)/2},
index = [recent_tick])
self.tick_data = self.tick_data.append(df)
if recent_tick - self.last_bar > self.bar_length:
self.resample_and_join()
self.define_strategy()
self.execute_trades()
def resample_and_join(self):
self.raw_data = self.raw_data.append(self.tick_data.resample(self.bar_length,
label="right").last().ffill().iloc[:-1])
self.tick_data = self.tick_data.iloc[-1:]
self.last_bar = self.raw_data.index[-1]
def define_strategy(self): # "strategy-specific"
df = self.raw_data.copy()
#******************** define your strategy here ************************
df["SMA"] = df[self.instrument].rolling(self.SMA).mean()
df["Lower"] = df["SMA"] - df[self.instrument].rolling(self.SMA).std() * self.dev
df["Upper"] = df["SMA"] + df[self.instrument].rolling(self.SMA).std() * self.dev
df["distance"] = df[self.instrument] - df.SMA
df["SMA_S"] = df[self.instrument].rolling(self.SMA_S).mean()
df["SMA_L"] = df[self.instrument].rolling(self.SMA_L).mean()
df["position"] = np.where(df[self.instrument] < df.Lower) and np.where(df["SMA_S"] > df["SMA_L"] ,1,np.nan)
df["position"] = np.where(df[self.instrument] > df.Upper) and np.where(df["SMA_S"] < df["SMA_L"], -1, df["position"])
df["position"] = np.where(df.distance * df.distance.shift(1) < 0, 0, df["position"])
df["position"] = df.position.ffill().fillna(0)
self.data = df.copy()
#***********************************************************************
def execute_trades(self):
if self.data["position"].iloc[-1] == 1:
if self.position == 0 or None:
order = self.create_order(self.instrument, self.units, suppress = True, ret = True)
self.report_trade(order, "GOING LONG")
elif self.position == -1:
order = self.create_order(self.instrument, self.units * 2, suppress = True, ret = True)
self.report_trade(order, "GOING LONG")
self.position = 1
elif self.data["position"].iloc[-1] == -1:
if self.position == 0:
order = self.create_order(self.instrument, -self.units, suppress = True, ret = True)
self.report_trade(order, "GOING SHORT")
elif self.position == 1:
order = self.create_order(self.instrument, -self.units * 2, suppress = True, ret = True)
self.report_trade(order, "GOING SHORT")
self.position = -1
elif self.data["position"].iloc[-1] == 0:
if self.position == -1:
order = self.create_order(self.instrument, self.units, suppress = True, ret = True)
self.report_trade(order, "GOING NEUTRAL")
elif self.position == 1:
order = self.create_order(self.instrument, -self.units, suppress = True, ret = True)
self.report_trade(order, "GOING NEUTRAL")
self.position = 0
def report_trade(self, order, going):
time = order["time"]
units = order["units"]
price = order["price"]
pl = float(order["pl"])
self.profits.append(pl)
cumpl = sum(self.profits)
print("\n" + 100* "-")
print("{} | {}".format(time, going))
print("{} | units = {} | price = {} | P&L = {} | Cum P&L = {}".format(time, units, price, pl, cumpl))
print(100 * "-" + "\n")
trader = SMABollTrader("oanda.cfg", "EUR_GBP", "15m", SMA = 82, dev = 4, SMA_S = 38, SMA_L = 135, units = 100000)
trader.get_most_recent()
trader.stream_data(trader.instrument, stop = None )
if trader.position != 0: # if we have a final open position
close_order = trader.create_order(trader.instrument, units = -trader.position * trader.units,
suppress = True, ret = True)
trader.report_trade(close_order, "GOING NEUTRAL")
trader.signal = 0
I have done Hagmann course as well and I have recognised your code immediately.
Firstly the way you define your positions is not the best. Look at the section of combining two strategies. There are two ways.
Now regarding your price problem I had a similar situation with BTC. You can download it's historical data but when I plotted it to the strategy code and started to stream I had exactly the same error indicating that tick data was never streamed.
I am guessing that simply not all instruments are tradeable via api or in your case maybe you tried to stream beyond trading hours?
In jDE, each individual has its own F and CR values. How to assign these values to each individuals programmatically. How to update these values.
A pseudo-code will help.
If you want each individual to have its own F and CR values, you can simply save it in a list. (Pseudo-code: Python)
ID_POS = 0
ID_FIT = 1
ID_F = 2
ID_CR = 3
def create_solution(problem_size):
pos = np.random.uniform(lower_bound, upper_bound, problem_size)
fit = fitness_function(pos)
F = your_values
CR = your values
return [pos, fit, F, CR]
def training(problem_size, pop_size, max_iteration):
# Initialization
pop = [create_solution(problem_size) for _ in range(0, pop_size)]
# Evolution process
for iteration in range(0, max_iteration):
for i in range(0, pop_size):
# Do your stuff here
pos_new = ....
fit_new = ....
F_new = ...
CR_new = ...
if pop[i][ID_FIT] < fit_new: # meaning the new solution has better fitness than the old one.
pop[i][ID_F] = F_new
pop[i][ID_CR] = CR_new # This is how you update F and CR for every individual.
...
You can check out my repo's contains most of the state-of-the-art meta-heuristics here.
https://github.com/thieunguyen5991/metaheuristics
class ResPartnerInherit(models.Model):
_inherit = 'res.partner'
is_specialist = fields.Boolean(string='Is Specialist')
specialized_in = fields.Many2one('specialization',string='Specialization')
hospital = fields.Char(string='Hospital')
#api.depends('is_specialist')
#api.multi
def name_get(self):
res = []
self.browse(self.ids).read(['name', 'hospital'])
for rec in self:
res.append((rec.id, '%s - %s' % (rec.name, rec.hospital)))
return res
What I'm trying to do here is, using name_get function When selecting a specialist his hospital needs to be shown, so I wanna give condition only for a specialist,there is a boolean field named is_specialist.so I wanna get the condition only when boolean is true
You just need to check is the partner is a specialist when building his name and if yes show also the hospital.
#api.multi
def name_get(self):
res = []
for rec in self:
res.append((rec.id, '%s - %s' % (rec.name, rec.hospital) if rec.is_specialist else rec.name))
return res