Build a decision Column by ANDing multiple columns in pandas - pandas

I have a pandas data frame which is shown below:
>>> x = [[1,2,3,4,5],[1,2,4,4,3],[2,4,5,6,7]]
>>> columns = ['a','b','c','d','e']
>>> df = pd.DataFrame(data = x, columns = columns)
>>> df
a b c d e
0 1 2 3 4 5
1 1 2 4 4 3
2 2 4 5 6 7
I have an array of objects (conditions) as shown below:
[
{
'header' : 'a',
'condition' : '==',
'values' : [1]
},
{
'header' : 'b',
'condition' : '==',
'values' : [2]
},
...
]
and an assignHeader which is:
assignHeader = decision
now I want to do an operation which builds up all the conditions from the conditions array by looping through it, for example something like this:
pConditions = []
for eachCondition in conditions:
header = eachCondition['header']
values = eachCondition['values']
if eachCondition['condition'] == "==":
pConditions.append(df[header].isin(values))
else:
pConditions.append(~df[header].isin(values))
df[assignHeader ] = and(pConditions)
I was thinking of using all operator in pandas but am unable to crack the right syntax to do so. The list I shared can go big and dynamic and so I want to use this nested approach and check for the equality. Does anyone know a way to do so?
Final Output:
conditons = [df['a']==1,df['b']==2]
>>> df['decision'] = (df['a']==1) & (df['b']==2)
>>> df
a b c d e decision
0 1 2 3 4 5 True
1 1 2 4 4 3 True
2 2 4 5 6 7 False
Here conditions array will be variable. And I want to have a function which takes df, 'newheadernameandconditions` as input and returns the output as shown below:
>>> df
a b c d e decision
0 1 2 3 4 5 True
1 1 2 4 4 3 True
2 2 4 5 6 7 False
where newheadername = 'decision'

I was able to solve the problem using the code shown below. I am not sure if this is kind of fast way of getting things done, but would love to know your inputs in case you have any specific thing to point out.
def andMerging(conditions, mergeHeader, df):
if len(conditions) != 0:
df[mergeHeader] = pd.concat(conditions, axis = 1).all(axis = 1)
return df
where conditions are an array of pd.Series with boolean values.
And conditions are formatted as shown below:
def prepareForConditionMerging(conditionsArray, df):
conditions = []
for prop in conditionsArray:
condition = prop['condition']
values = prop['values']
header = prop['header']
if type(values) == str:
values = [values]
if condition=="==":
conditions.append(df[header].isin(values))
else:
conditions.append(~df[header].isin(values))
# Here we can add more conditions such as greater than less than etc.
return conditions

Related

Pandas xs where level in list of options

If I have a pd.DataFrame that looks like:
new_df = []
for i in range(10):
df_example = pd.DataFrame(np.random.normal(size=[10,1]))
cols = [round(np.random.uniform(low=0,high=10)),round(np.random.uniform(low=0,high=10)),
round(np.random.uniform(low=0,high=10)),round(np.random.uniform(low=0,high=10))]
keys = ['A','B','C','D']
new_ix = pd.MultiIndex.from_tuples([cols],names=keys)
df_example.columns = new_ix
new_df.append(df_example)
new_df = pd.concat(new_df,axis=1)
Which could yield something like:
Now, if I want where C=4 and A=1 I can do:
df.xs(axis=1,level=['A','C'],key=[1,4])
How do I express if I want:
C in [4,2] and A in [5,2]
C in [4,2] or A in [5,2]
To the best of my knowledge, you can't use anything but tuples for key parameter in xs, so such queries are not possible.
The next best thing is to define helper functions for that purpose, such as the following:
def xs_or(df: pd.DataFrame, params: dict[str, list[int]]) -> pd.DataFrame:
"""Helper function.
Args:
df: input dataframe.
params: columns/values to query.
Returns:
Filtered dataframe.
"""
df = pd.concat(
[
df.xs(axis=1, level=[level], key=(key,))
for level, keys in params.items()
for key in keys
],
axis=1,
)
for level in params.keys():
try:
df = df.droplevel([level], axis=1)
except KeyError:
pass
return df
def xs_and(df: pd.DataFrame, params: dict[str, list[int]]) -> pd.DataFrame:
"""Helper function.
Args:
df: input dataframe.
params: columns/values to query.
Returns:
Filtered dataframe.
"""
for level, keys in params.items():
df = xs_or(df, {level: keys})
return df
And so, with the following dataframe named df:
A 4 7 3 1 7 9 4 0 3 9
B 6 7 4 6 7 5 8 0 8 0
C 2 10 5 2 9 9 4 3 4 5
D 0 1 7 3 8 3 6 7 9 10
0 -0.199458 1.155345 1.298027 0.575606 0.785291 -1.126484 0.019082 1.765094 0.034631 -0.243635
1 1.173873 0.523277 -0.709546 1.378983 0.266661 1.626118 1.647584 -0.228162 -1.708271 0.111583
2 0.321156 0.049470 -0.611111 -1.238887 1.092369 0.019503 -0.473618 1.804474 -0.850320 -0.217921
3 0.339307 -0.758909 0.072159 1.636119 -0.541920 -0.160791 -1.131100 1.081766 -0.530082 -0.546489
4 -1.523110 -0.662232 -0.434115 1.698073 0.568690 0.836359 -0.833581 0.230585 0.166119 1.085600
5 0.020645 -1.379587 -0.608083 -1.455928 1.855402 1.714663 -0.739409 1.270043 1.650138 -0.718430
6 1.280583 -1.317288 0.899278 -0.032213 -0.347234 2.543415 0.272228 -0.664116 -1.404851 -0.517939
7 -1.201619 0.724669 -0.705984 0.533725 0.820124 0.651339 0.363214 0.727381 -0.282170 0.651201
8 1.829209 0.049628 0.655277 -0.237327 -0.007662 1.849530 0.095479 0.295623 -0.856162 -0.350407
9 -0.690613 1.419008 -0.791556 0.180751 -0.648182 0.240589 -0.247574 -1.947492 -1.010009 1.549234
You can filter like this:
# C in [10, 2] or A in [1, 0]
print(xs_or(df, {"C": [10, 2], "A": [1, 0]}))
# Output
B 7 6 2 3
D 1 0 3 3 7
0 1.155345 -0.199458 0.575606 0.575606 1.765094
1 0.523277 1.173873 1.378983 1.378983 -0.228162
2 0.049470 0.321156 -1.238887 -1.238887 1.804474
3 -0.758909 0.339307 1.636119 1.636119 1.081766
4 -0.662232 -1.523110 1.698073 1.698073 0.230585
5 -1.379587 0.020645 -1.455928 -1.455928 1.270043
6 -1.317288 1.280583 -0.032213 -0.032213 -0.664116
7 0.724669 -1.201619 0.533725 0.533725 0.727381
8 0.049628 1.829209 -0.237327 -0.237327 0.295623
9 1.419008 -0.690613 0.180751 0.180751 -1.947492
# C in [10, 2] and A in [1, 7]
print(xs_and(df, {"C": [10, 2], "A": [1, 7]}))
# Output
B 6 7
D 3 1
0 0.575606 1.155345
1 1.378983 0.523277
2 -1.238887 0.049470
3 1.636119 -0.758909
4 1.698073 -0.662232
5 -1.455928 -1.379587
6 -0.032213 -1.317288
7 0.533725 0.724669
8 -0.237327 0.049628
9 0.180751 1.419008

average on dataframe segments

In the following picture, I have DataFrame that renders zero after each cycle of operation (the cycle has random length). I want to calculate the average (or perform other operations) for each patch. For example, the average of [0.762, 0.766] alone, and [0.66, 1.37, 2.11, 2.29] alone and so forth till the end of the DataFrame.
So I worked with this data :
random_value
0 0
1 0
2 1
3 2
4 3
5 0
6 4
7 4
8 0
9 1
There is probably a way better solution, but here is what I came with :
def avg_function(df):
avg_list = []
value_list = list(df["random_value"])
temp_list = []
for i in range(len(value_list)):
if value_list[i] == 0:
if temp_list:
avg_list.append(sum(temp_list) / len(temp_list))
temp_list = []
else:
temp_list.append(value_list[i])
if temp_list: # for the last values
avg_list.append(sum(temp_list) / len(temp_list))
return avg_list
test_list = avg_function(df=df)
test_list
[Out] : [2.0, 4.0, 1.0]
Edit: since requested in the comments, here is a way to add the means to the dataframe. I dont know if there is a way to do that with pandas (and there might be!), but I came up with this :
def add_mean(df, mean_list):
temp_mean_list = []
list_index = 0 # will be the index for the value of mean_list
df["random_value_shifted"] = df["random_value"].shift(1).fillna(0)
random_value = list(df["random_value"])
random_value_shifted = list(df["random_value_shifted"])
for i in range(df.shape[0]):
if random_value[i] == 0 and random_value_shifted[i] == 0:
temp_mean_list.append(0)
elif random_value[i] == 0 and random_value_shifted[i] != 0:
temp_mean_list.append(0)
list_index += 1
else:
temp_mean_list.append(mean_list[list_index])
df = df.drop(["random_value_shifted"], axis=1)
df["mean"] = temp_mean_list
return df
df = add_mean(df=df, mean_list=mean_list
Which gave me :
df
[Out] :
random_value mean
0 0 0
1 0 0
2 1 2
3 2 2
4 3 2
5 0 0
6 4 4
7 4 4
8 0 0
9 1 1

Multiply String in Dataframe?

My desired output is the following:
count tally
1 2 //
2 3 ///
3 5 /////
4 3 ///
5 2 //
My code:
my_list = [1,1,2,2,2,3,3,3,3,3,4,4,4,5,5]
my_series = pd.Series(my_list)
values_counted = pd.Series(my_series.value_counts(),name='count')
# other calculated columns left out for SO simplicity
df = pd.concat([values_counted], axis=1).sort_index()
df['tally'] = values_counted * '/'
With the code above I get the following error:
masked_arith_op
result[mask] = op(xrav[mask], y)
numpy.core._exceptions.UFuncTypeError: ufunc 'multiply' did not contain a loop with signature matching types (dtype('<U21'), dtype('<U21')) -> dtype('<U21')
In searching for solutions I found one on SO that said to try:
values_counted * float('/')
But that did not work.
In 'normal' Python outside of Dataframes the following code works:
10 * '/'
and returns
///////////
How can I achieve the same functionality in a Dataframe?
Use lambda function for repeat values, your solution is simplify:
my_list = [1,1,2,2,2,3,3,3,3,3,4,4,4,5,5]
df1 = pd.Series(my_list).value_counts().to_frame('count').sort_index()
df1['tally'] = df1['count'].apply(lambda x: x * '/')
print (df1)
count tally
1 2 //
2 3 ///
3 5 /////
4 3 ///
5 2 //
You can group the series by itself and then aggregate:
new_df = my_series.groupby(my_series).agg(**{"count": "size",
"tally": lambda s: "/" * s.size})
to get
>>> new_df
count tally
1 2 //
2 3 ///
3 5 /////
4 3 ///
5 2 //

How to replace pd.NamedAgg to a code compliant with pandas 0.24.2?

Hello I am obliged to downgrade Pandas versioon to '0.24.2'
As a result, the function pd.NamedAgg is not recognizable anymore.
import pandas as pd
import numpy as np
agg_cols = ['A', 'B', 'C']
agg_df = df.groupby(agg_cols).agg(
max_foo=pd.NamedAgg(column='Foo', aggfunc=np.max),
min_foo=pd.NamedAgg(column='Foo', aggfunc=np.min)
).reset_index()
Can you help me please change my code to make it compliant with the version 0.24.2??
Thank you a lot.
Sample:
agg_df = df.groupby(agg_cols)['Foo'].agg(
[('max_foo', np.max),('min_foo', np.min)]
).reset_index()
df = pd.DataFrame({
'A':list('a')*6,
'B':[4,5,4,5,5,4],
'C':[7]*6,
'Foo':[1,3,5,7,1,0],
'E':[5,3,6,9,2,4],
'F':list('aaabbb')
})
agg_cols = ['A', 'B', 'C']
agg_df = df.groupby(agg_cols).agg(
max_foo=pd.NamedAgg(column='Foo', aggfunc=np.max),
min_foo=pd.NamedAgg(column='Foo', aggfunc=np.min)
).reset_index()
print (agg_df)
A B C max_foo min_foo
0 a 4 7 5 0
1 a 5 7 7 1
Because there is only one column Foo for processing add column Foo after groupby and pass tuples with new columns names with aggregate functions:
agg_df = df.groupby(agg_cols)['Foo'].agg(
[('max_foo', np.max),('min_foo', np.min)]
).reset_index()
print (agg_df)
A B C max_foo min_foo
0 a 4 7 5 0
1 a 5 7 7 1
Another idea is pass dictionary of lists of aggregate functions:
agg_df = df.groupby(agg_cols).agg({'Foo':['max', 'min']})
agg_df.columns = [f'{b}_{a}' for a, b in agg_df.columns]
agg_df = agg_df.reset_index()
print (agg_df)
A B C max_foo min_foo
0 a 4 7 5 0
1 a 5 7 7 1

How to select pandas row(s) which has attributes column value equals to any one of value from list

data = {
"name": ["abc", "xyz", "pqr"],
"attributes": [["attr2", "attr3"], ["attr2","attr4"], ["attr3", "attr1"] ]
}
df = pd.DataFrame.from_dict(data)
How do i filter rows which satisfies this condition:
select row if it's attributes column contains values any of "attr1" or "attr3"
expected output is:
name attributes
0 "abc" ["attr2", "attr3"]
1 "pqr" ["attr3", "attr1"]
Using
df[pd.DataFrame(df.attributes.tolist()).isin(['attr1','attr3']).any(1)]
Out[295]:
attributes name
0 [attr2, attr3] abc
2 [attr3, attr1] pqr
To get a boolean indexer,
>>> idx = df['attributes'].map(lambda l: any(s in l for s in ['attr1', 'attr3']))
>>> idx
0 True
1 False
2 True
Name: attributes, dtype: bool
Then
>>> df.loc[idx]
name attributes
0 abc [attr2, attr3]
2 pqr [attr3, attr1]
Whether you want to reset the index afterward is up to you.