i'm attempting to determine the sum of a column for a running period where there is a negative gain - ie i want to determine the total for a loosing streak.
i've set up a column that provides the numerical number of days where its been loosing (Consecutive Losses), but i wish to sum up the same for the total loss throughout the streak. what i have (Aggregate Consecutive Loss) 1) doesn't work (because it just cumsums() without resetting to zero at each streak) and 2) is incorrectly as i should in fact take the Open value at the start of the streak and Close value at the end.
how can i correctly setup this Aggregate Consecutive Loss value in pandas?
import pandas as pd
import numpy as np
import yfinance as yf
def get( symbols, group_by_ticker=False, **kwargs ):
if not isinstance(symbols, list):
symbols = [ symbols, ]
kwargs['auto_adjust'] = True
kwargs['prepost'] = True
kwargs['threads'] = True
df = None
if group_by_ticker:
kwargs['group_by'] = 'ticker'
df = yf.download( symbols, **kwargs)
for t in symbols:
df["Change Percent",t] = df["Close",t].pct_change() * 100
df["Gain",t] = np.where( df['Change Percent',t] > 0, True, False ).astype('bool')
a = df['Gain',t] != True
df['Consecutive Losses',t] = a.cumsum() - a.cumsum().where(~a).ffill().fillna(0).astype(int)
x = df['Change Percent',t].where( df['Consecutive Losses',t] > 0 )
df['Aggregate Consecutive Loss',t] = x.cumsum() - x.cumsum().where(~a).ffill().fillna(0).astype(float)
return df
data = get( ["DOW", "IDX"], period="6mo")
data[['Change Percent','Gain','Consecutive Losses','Aggregate Consecutive Loss']].head(50)
Related
In my code, I am trying to extract data from csv file to use in the function, but it doesnt output anything, and gives no error. My code works because I tried it with just numpy array as inputs. not sure why it doesnt work with panda.
import numpy as np
import pandas as pd
import os
# change the current directory to the directory where the running script file is
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# finding best fit line for y=mx+b by iteration
def gradient_descent(x,y):
m_iter = b_iter = 1 #starting point
iteration = 10000
n = len(x)
learning_rate = 0.05
last_mse = 10000
#take baby steps to reach global minima
for i in range(iteration):
y_predicted = m_iter*x + b_iter
#mse = 1/n*sum([value**2 for value in (y-y_predicted)]) # cost function to minimize
mse = 1/n*sum((y-y_predicted)**2) # cost function to minimize
if (last_mse - mse)/mse < 0.001:
break
# recall MSE formula is 1/n*sum((yi-y_predicted)^2), where y_predicted = m*x+b
# using partial deriv of MSE formula, d/dm and d/db
dm = -(2/n)*sum(x*(y-y_predicted))
db = -(2/n)*sum((y-y_predicted))
# use current predicted value to get the next value for prediction
# by using learning rate
m_iter = m_iter - learning_rate*dm
b_iter = b_iter - learning_rate*db
print('m is {}, b is {}, cost is {}, iteration {}'.format(m_iter,b_iter,mse,i))
last_mse = mse
#x = np.array([1,2,3,4,5])
#y = np.array([5,7,8,10,13])
#gradient_descent(x,y)
df = pd.read_csv('Linear_Data.csv')
x = df['Area']
y = df['Price']
gradient_descent(x,y)
My code works because I tried it with just numpy array as inputs. not sure why it doesnt work with panda.
Well no, your code also works with pandas dataframes:
df = pd.DataFrame({'Area': [1,2,3,4,5], 'Price': [5,7,8,10,13]})
x = df['Area']
y = df['Price']
gradient_descent(x,y)
Above will give you the same output as with numpy arrays.
Try to check what's in Linear_Data.csv and/or add some print statements in the gradient_descent function just to check your assumptions. I would suggest to first of all add a print statement before the condition with the break statement:
print(last_mse, mse)
if (last_mse - mse)/mse < 0.001:
break
I have some discrete data in an array, such that:
arr = np.array([[1,1,1],[2,2,2],[3,3,3],[2,2,2],[1,1,1]])
whose plot looks like:
I also have an index array, such that each unique value in arr is associated with a unique index value, like:
ind = np.array([[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5]])
What is the most pythonic way of converting arr from discrete values to continuous values, so that the array would look like this when plotted?:
therefore, interpolating between the discrete points to make continuous data
I found a solution to this if anyone has a similar issue. It is maybe not the most elegant so modifications are welcome:
def ref_linear_interp(x, y):
arr = []
ux=np.unique(x) #unique x values
for u in ux:
idx = y[x==u]
try:
min = y[x==u-1][0]
max = y[x==u][0]
except:
min = y[x==u][0]
max = y[x==u][0]
try:
min = y[x==u][0]
max = y[x==u+1][0]
except:
min = y[x==u][0]
max = y[x==u][0]
if min==max:
sub = np.full((len(idx)), min)
arr.append(sub)
else:
sub = np.linspace(min, max, len(idx))
arr.append(sub)
return np.concatenate(arr, axis=None).ravel()
y = np.array([[1,1,1],[2,2,2],[3,3,3],[2,2,2],[1,1,1]])
x = np.array([[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5]])
z = np.arange(1, 16, 1)
Here is an answer for the symmetric solution that I would expect when reading the question:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# create the data as described
numbers = [1,2,3,2,1]
nblock = 3
df = pd.DataFrame({
"x": np.arange(nblock*len(numbers)),
"y": np.repeat(numbers, nblock),
"label": np.repeat(np.arange(len(numbers)), nblock)
})
Expecting a constant block size of 3, we could use a rolling window:
df['y-smooth'] = df['y'].rolling(nblock, center=True).mean()
# fill NaNs
df['y-smooth'].bfill(inplace=True)
df['y-smooth'].ffill(inplace=True)
plt.plot(df['x'], df['y-smooth'], marker='*')
If the block size is allowed to vary, we could determine the block centers and interpolate piecewise.
centers = df[['x', 'y', 'label']].groupby('label').mean()
df['y-interp'] = np.interp(df['x'], centers['x'], centers['y'])
plt.plot(df['x'], df['y-interp'], marker='*')
Note: You may also try
centers = df[['x', 'y', 'label']].groupby('label').min() to select the left corner of the labelled blocks.
Overview
The code below contains a numpy array clusters with values that are compared against each row of a pandas Dataframe using np.where. The SoFunc function returns rows where all conditions are True and takes the clusters array as input.
Question
I can loop through this array to compare each array element against the respective np.where conditions. How do I remove the requirement to loop but still get the same output?
I appreciate looping though numpy arrays is inefficient and want to improve this code. The actual dataset will be much larger.
Prepare the reproducible mock data
def genMockDataFrame(days,startPrice,colName,startDate,seed=None):
periods = days*24
np.random.seed(seed)
steps = np.random.normal(loc=0, scale=0.0018, size=periods)
steps[0]=0
P = startPrice+np.cumsum(steps)
P = [round(i,4) for i in P]
fxDF = pd.DataFrame({
'ticker':np.repeat( [colName], periods ),
'date':np.tile( pd.date_range(startDate, periods=periods, freq='H'), 1 ),
'price':(P)})
fxDF.index = pd.to_datetime(fxDF.date)
fxDF = fxDF.price.resample('D').ohlc()
fxDF.columns = [i.title() for i in fxDF.columns]
return fxDF
def SoFunc(clust):
#generate mock data
df = genMockDataFrame(10,1.1904,'eurusd','19/3/2020',seed=157)
df["Upper_Band"] = 1.1928
df.loc["2020-03-27", "Upper_Band"] = 1.2118
df.loc["2020-03-26", "Upper_Band"] = 1.2200
df["Level"] = np.where((df["High"] >= clust)
& (df["Low"] <= clust)
& (df["High"] >= df["Upper_Band"] ),1,np.NaN
)
return df.dropna()
Loop through the clusters array
clusters = np.array([1.1929 , 1.2118 ])
l = []
for i in range(len(clusters)):
l.append(SoFunc(clusters[i]))
pd.concat(l)
Output
Open High Low Close Upper_Band Level
date
2020-03-19 1.1904 1.1937 1.1832 1.1832 1.1928 1.0
2020-03-25 1.1939 1.1939 1.1864 1.1936 1.1928 1.0
2020-03-27 1.2118 1.2144 1.2039 1.2089 1.2118 1.0
(Edited based on #tdy's comment below)
pandas.merge allows you to make len(clusters) copies of your dataframe and then pare it down to according to the conditions in your SoFunc function.
The cross merge creates a dataframe with a copy of df for each record in clusters_df. The overall result ought to be faster for large dataframes than the loop-based approach, provided you have enough memory to temporarily accommodate the merged dataframe (if not, the operation may spill over onto page / swap and slow down drastically).
import numpy as np
import pandas as pd
def genMockDataFrame(days,startPrice,colName,startDate,seed=None):
''' identical to the example provided '''
periods = days*24
np.random.seed(seed)
steps = np.random.normal(loc=0, scale=0.0018, size=periods)
steps[0]=0
P = startPrice+np.cumsum(steps)
P = [round(i,4) for i in P]
fxDF = pd.DataFrame({
'ticker':np.repeat( [colName], periods ),
'date':np.tile( pd.date_range(startDate, periods=periods, freq='H'), 1 ),
'price':(P)})
fxDF.index = pd.to_datetime(fxDF.date)
fxDF = fxDF.price.resample('D').ohlc()
fxDF.columns = [i.title() for i in fxDF.columns]
return fxDF
# create the base dataframe according to the former SoFunc
df = genMockDataFrame(10,1.1904,'eurusd','19/3/2020',seed=157)
df["Upper_Band"] = 1.1928
df.loc["2020-03-27"]["Upper_Band"] = 1.2118
df.loc["2020-03-26"]["Upper_Band"] = 1.2200
# create a df out of the cluster array
clusters = np.array([1.1929 , 1.2118 ])
clusters_df = pd.DataFrame({"clust": clusters})
# perform the merge, then filter and finally clean up
result_df = (
pd
.merge(df.reset_index(), clusters_df, how="cross") # for each entry in cluster, make a copy of df
.loc[lambda z: (z.Low <= z.clust) & (z.High >= z.clust) & (z.High >= z.Upper_Band), :] # filter the copies down
.drop(columns=["clust"]) # not needed in result
.assign(Level=1.0) # to match your result; not really needed
.set_index("date") # bring back the old index
)
print(result_df)
I recommend inspecting just the result of pd.merge(df.reset_index(), clusters_df, how="cross") to see how it works.
I am trying to pass "buy_list" in the code below to df . This is a small section of the code when the full code is executed I get the results of a back test results linked image.
initial results
replacement_stocks = portfolio_size - len(kept_positions)
buy_list = ranking_table.loc[
~ranking_table.index.isin(kept_positions)][:replacement_stocks]
new_portfolio = pd.concat(
(buy_list,
ranking_table.loc[ranking_table.index.isin(kept_positions)])
)
When I define df as in below I get df not defined error
replacement_stocks = portfolio_size - len(kept_positions)
buy_list = ranking_table.loc[
~ranking_table.index.isin(kept_positions)][:replacement_stocks]
new_portfolio = pd.concat(
(buy_list,
ranking_table.loc[ranking_table.index.isin(kept_positions)])
)
df1 = buy_list # ceate df1 with buy_list
df2 = ranking_table.loc[
~ranking_table.index.isin(kept_positions)][:replacement_stocks]# create df2 with buy_list
I tried the solution in the link below
Similar error with suggested fix
Following this I still get df not defined error and the output of my back test changes to 0% in all the month which previously had actual % change negative and positive
replacement_stocks = portfolio_size - len(kept_positions)
buy_list = ranking_table.loc[
~ranking_table.index.isin(kept_positions)][:replacement_stocks]
new_portfolio = pd.concat(
(buy_list,
ranking_table.loc[ranking_table.index.isin(kept_positions)])
)
return buy_list
df2 = ranking_table.loc[
~ranking_table.index.isin(kept_positions)][:replacement_stocks]
print(df2)
This is what I now end up with
Error message
I'd appreciate any suggestions on how I can fix this.
Thanks,
Last1
Below is the full code as requested, it's from a book am working through, Trading Evolved by Andreas Clenow.
Thanks again.
%matplotlib inline
import zipline
from zipline.api import order_target_percent, symbol, \
set_commission, set_slippage, schedule_function, \
date_rules, time_rules, attach_pipeline, pipeline_output
from pandas import Timestamp
import matplotlib.pyplot as plt
import pyfolio as pf
import pandas as pd
import numpy as np
from scipy import stats
from zipline.finance.commission import PerDollar
from zipline.finance.slippage import VolumeShareSlippage, FixedSlippage
from zipline_norgatedata.pipelines import NorgateDataIndexConstituent
from zipline.pipeline import Pipeline
"""
Model Settings
"""
intial_portfolio = 100000
momentum_window1 = 125
momentum_window2 = 125
minimum_momentum = 40
portfolio_size = 30
vola_window = 20
# Trend filter settings
enable_trend_filter = True
trend_filter_symbol = '$SPXTR'
trend_filter_window = 200
"""
Commission and Slippage Settings
"""
enable_commission = True
commission_pct = 0.001
enable_slippage = True
slippage_volume_limit = 0.025
slippage_impact = 0.05
"""
Helper functions.
"""
def momentum_score(ts):
"""
Input: Price time series.
Output: Annualized exponential regression slope,
multiplied by the R2
"""
# Make a list of consecutive numbers
x = np.arange(len(ts))
# Get logs
log_ts = np.log(ts)
# Calculate regression values
slope, intercept, r_value, p_value, std_err = stats.linregress(x, log_ts)
# Annualize percent
annualized_slope = (np.power(np.exp(slope), 252) - 1) * 100
#Adjust for fitness
score = annualized_slope * (r_value ** 2)
return score
def volatility(ts):
return ts.pct_change().rolling(vola_window).std().iloc[-1]
"""
Initialization and trading logic
"""
def make_pipeline():
indexconstituent = NorgateDataIndexConstituent('$SPX')
return Pipeline(
columns={
'NorgateDataIndexConstituent':indexconstituent},
screen = indexconstituent)
def initialize(context):
attach_pipeline(make_pipeline(), 'norgatedata_pipeline', chunks=9999,eager=True)
# Set commission and slippage.
if enable_commission:
comm_model = PerDollar(cost=commission_pct)
else:
comm_model = PerDollar(cost=0.0)
set_commission(comm_model)
if enable_slippage:
slippage_model=VolumeShareSlippage(volume_limit=slippage_volume_limit, price_impact=slippage_impact)
set_slippage(slippage_model)
else:
slippage_model=FixedSlippage(spread=0.0)
# Used only for progress output.
context.last_month = intial_portfolio
# Store index membership
#context.index_members = pd.read_csv('../data/index_members/sp500.csv', index_col=0, parse_dates=[0])
#Schedule rebalance monthly.
schedule_function(
func=rebalance,
date_rule=date_rules.month_start(),
time_rule=time_rules.market_open()
)
def output_progress(context):
"""
Output some performance numbers during backtest run
"""
# Get today's date
today = zipline.api.get_datetime().date()
# Calculate percent difference since last month
perf_pct = (context.portfolio.portfolio_value / context.last_month) - 1
# Print performance, format as percent with two decimals.
print("{} - Last Month Result: {:.2%}".format(today, perf_pct))
# Remember today's portfolio value for next month's calculation
context.last_month = context.portfolio.portfolio_value
def rebalance(context, data):
# Write some progress output during the backtest
output_progress(context)
context.pipeline_data = pipeline_output('norgatedata_pipeline')
todays_universe = context.pipeline_data.index
# Check how long history window we need.
hist_window = max(momentum_window1,
momentum_window2)
# Get historical data
hist = data.history(todays_universe, "close", hist_window, "1d")
# Slice the history to match the two chosen time frames.
momentum_hist1 = hist[(-1 * momentum_window1):]
momentum_hist2 = hist[(-1 * momentum_window2):]
# Calculate momentum values for the two time frames.
momentum_list1 = momentum_hist1.apply(momentum_score)
momentum_list2 = momentum_hist2.apply(momentum_score)
# Now let's put the two momentum values together, and calculate mean.
momentum_concat = pd.concat((momentum_list1, momentum_list2))
mom_by_row = momentum_concat.groupby(momentum_concat.index)
mom_means = mom_by_row.mean()
# Sort by momentum value.
ranking_table = mom_means.sort_values(ascending=False)
"""
Sell Logic
First we check if any existing position should be sold.
* Sell if stock is no longer part of index.
* Sell if stock has too low momentum value.
"""
kept_positions = list(context.portfolio.positions.keys())
for security in context.portfolio.positions:
if (security not in todays_universe):
order_target_percent(security, 0.0)
kept_positions.remove(security)
elif ranking_table[security] < minimum_momentum:
order_target_percent(security, 0.0)
kept_positions.remove(security)
"""
Trend Filter Section
"""
if enable_trend_filter:
ind_hist = data.history(
symbol(trend_filter_symbol),
'close',
trend_filter_window,
'1d'
)
trend_filter = ind_hist.iloc[-1] > ind_hist.mean()
if trend_filter == False:
return
"""
Stock Selection Logic
Check how many stocks we are keeping from last month.
Fill from top of ranking list, until we reach the
desired total number of portfolio holdings.
"""
replacement_stocks = portfolio_size - len(kept_positions)
buy_list = ranking_table.loc[
~ranking_table.index.isin(kept_positions)][:replacement_stocks]
new_portfolio = pd.concat(
(buy_list,
ranking_table.loc[ranking_table.index.isin(kept_positions)])
)
"""
Calculate inverse volatility for stocks,
and make target position weights.
"""
vola_table = hist[new_portfolio.index].apply(volatility)
inv_vola_table = 1 / vola_table
sum_inv_vola = np.sum(inv_vola_table)
vola_target_weights = inv_vola_table / sum_inv_vola
for security, rank in new_portfolio.iteritems():
weight = vola_target_weights[security]
if security in kept_positions:
order_target_percent(security, weight)
else:
if ranking_table[security] > minimum_momentum:
order_target_percent(security, weight)
def analyze(context, perf):
perf['max'] = perf.portfolio_value.cummax()
perf['dd'] = (perf.portfolio_value / perf['max']) - 1
maxdd = perf['dd'].min()
ann_ret = (np.power((perf.portfolio_value.iloc[-1] / perf.portfolio_value.iloc[0]),(252 / len(perf)))) - 1
print("Annualized Return: {:.2%} Max Drawdown: {:.2%}".format(ann_ret, maxdd))
return
start_date = Timestamp('2015-01-01',tz='UTC')
end_date = Timestamp('2020-03-14',tz='UTC')
perf = zipline.run_algorithm(
start=start_date, end=end_date,
initialize=initialize,
analyze=analyze,
capital_base=intial_portfolio,
data_frequency = 'daily',
bundle='norgatedata-sp500' )
I have a timeseries
ts = pd.Series(data=[0,1,2,3,4],index=[pd.Timestamp('1991-01-01'),pd.Timestamp('1995-01-01'),pd.Timestamp('1996-01-01'),pd.Timestamp('2010-01-01'),pd.Timestamp('2011-01-01')])
Whats the fastest, most readable, way to get the total duration in which the value is below 2, assuming the values are valid until the next time-step indicates otherwise (no linear interpolation). I imagine there probably is a pandas function for this
This seems to be working quite well, however I am still baffled that there does not seem to be a pandas function for this!
import pandas as pd
import numpy as np
ts = pd.Series(data=[0,1,2,3,4],index=[pd.Timestamp('1991-01-01'),pd.Timestamp('1995-01-01'),pd.Timestamp('1996-01-01'),pd.Timestamp('2010-01-01'),pd.Timestamp('2011-01-01')])
# making the timeseries binary. 1 = meets condition, 0 = does not
ts = ts.where(ts>=2,other=1)
ts = ts.where(ts<2,other=0)
delta_time = ts.index.to_pydatetime()[1:]-ts.index.to_pydatetime()[:-1]
time_below_2 = np.sum(delta_time[np.invert(ts.values[:-1])]).total_seconds()
time_above_2 = np.sum(delta_time[(ts.values[:-1])]).total_seconds()
The above function seems to break for certain timeframes. This option is slower, but did not break in any of my tests:
def get_total_duration_above_and_below_value(value,ts):
# making the timeseries binary. 1 = above value, 0 = below value
ts = ts.where(ts >= value, other=1)
ts = ts.where(ts < value, other=0)
time_above_value = 0
time_below_value = 0
for i in range(ts.size - 1):
if ts[i] == 1:
time_above_value += abs(pd.Timedelta(
ts.index[i] - ts.index[i + 1]).total_seconds()) / 3600
else:
time_below_value += abs(pd.Timedelta(
ts.index[i] - ts.index[i + 1]).total_seconds()) / 3600
return time_above_value, time_below_value