I have a dataframe like this:
name
cluster
'sock'
1
'graceful'
2
'disgrace'
2
'fixture'
3
'winnow'
4
'window'
4
and a function common_substring that takes in two strings and returns the longest common substring.
I want to return
name
cluster
'sock'
1
'grace'
2
'fixture'
3
'win'
4
I know I'm supposed to group by cluster, but I'm struggling with how to aggregate and apply a function that takes two values.
Based on the #jezrael and the link:
from functools import partial, reduce
from itertools import chain
from typing import Iterator
def ngram(seq: str, n: int) -> Iterator[str]:
return (seq[i: i+n] for i in range(0, len(seq)-n+1))
def allngram(seq: str) -> set:
lengths = range(len(seq))
ngrams = map(partial(ngram, seq), lengths)
return set(chain.from_iterable(ngrams))
def func(a, b):
seqs_ngrams = map(allngram, [a,b])
intersection = reduce(set.intersection, seqs_ngrams)
longest = max(intersection, key=len)
return longest
def f(x):
try:
return func(x.iat[0], x.iat[1])
except:
return 'Unique value'
df = df.groupby('cluster')['name'].agg(f).reset_index()
print (df)
Demonstration:
df = pd.DataFrame({'name':['winnow', 'window'], 'cluster':[4,4]})
df = df.groupby('cluster')['name'].agg(f).reset_index()
output:
For, any length input:
from functools import partial, reduce
from itertools import chain
from typing import Iterator
def ngram(seq: str, n: int) -> Iterator[str]:
return (seq[i: i+n] for i in range(0, len(seq)-n+1))
def allngram(seq: str) -> set:
lengths = range(len(seq)+1)
ngrams = map(partial(ngram, seq), lengths)
return set(chain.from_iterable(ngrams))
def func(seq):
seq=seq.values
seqs_ngrams = map(allngram, seq)
intersection = reduce(set.intersection, seqs_ngrams)
longest = max(intersection, key=len)
return longest
def f(x):
return func(x)
df = pd.DataFrame({'name':['winnow', 'window', 'win', 'sock', 'graceful', 'disgrace','fixture'], 'cluster':[4,4,4,1,2,2,3]})
df = df.groupby('cluster')['name'].agg(f).reset_index()
Ooutput:
Use GroupBy.agg with aggregate function for pass function if length of group is not 1, then pass all values in column in list:
#https://stackoverflow.com/questions/40556491
from functools import partial, reduce
from itertools import chain
from typing import Iterator
def ngram(seq: str, n: int) -> Iterator[str]:
return (seq[i: i+n] for i in range(0, len(seq)-n+1))
def allngram(seq: str) -> set:
lengths = range(len(seq)+1)
ngrams = map(partial(ngram, seq), lengths)
return set(chain.from_iterable(ngrams))
def func(x):
seqs_ngrams = map(allngram, x.tolist())
intersection = reduce(set.intersection, seqs_ngrams)
longest = max(intersection, key=len)
return longest
df['name'] = df['name'].str.strip("'")
df = df.groupby('cluster')['name'].agg(func).reset_index()
print (df)
cluster name
0 1 sock
1 2 grace
2 3 fixture
3 4 win
Related
Given a large numpy matrix/multi-dimensional array, what is the best and fastest way to get the indices of the x smallest elements?
from typing import Tuple
import numpy as np
def get_indices_of_k_smallest_as_array(arr: np.ndarray, k: int) -> np.ndarray:
idx = np.argpartition(arr.ravel(), k)
return np.array(np.unravel_index(idx, arr.shape))[:, range(k)].transpose().tolist()
def get_indices_of_k_smallest_as_tuple(arr: np.ndarray, k: int) -> Tuple:
idx = np.argpartition(arr.ravel(), k)
return tuple(np.array(np.unravel_index(idx, arr.shape))[:, range(min(k, 0), max(k, 0))])
This answer gives the correct indices, but those indices aren't sorted based on size of the elements. That's just how the introselect algorithm works, which is used by np.argpartition under the hood, https://en.wikipedia.org/wiki/Introselect.
It would be nice if the return was also sorted based on the size of the elements, ex. index 0 of the return points to the smallest element, index 1 points to the 2nd smallest element, etc.
Here's how to do it with sorting. Keep in mind that sorting the results after np.argpartition is going to be much faster than sorting the entire multi-dimensional array.
def get_indices_of_k_smallest_as_array(arr: np.ndarray, k: int) -> np.ndarray:
ravel_array = arr.ravel()
indices_on_ravel = np.argpartition(ravel_array, k)
sorted_indices_on_ravel = sorted(indices_on_ravel, key=lambda x: ravel_array[x])
sorted_indices_on_original = np.array(np.unravel_index(sorted_indices_on_ravel, arr.shape))[:, range(k)].transpose().tolist()
# for the fun of numpy indexing, you can do it this way too
# indices_on_original = np.array(np.unravel_index(indices_on_ravel, arr.shape))[:, range(k)].transpose().tolist()
# sorted_indices_on_original = sorted(indices_on_original, key=lambda x: arr[tuple(np.array(x).T)])
return sorted_indices_on_original
def get_indices_of_k_smallest_as_tuple(arr: np.ndarray, k: int) -> Tuple:
ravel_array = arr.ravel()
indices_on_ravel = np.argpartition(ravel_array, k)
sorted_indices_on_ravel = sorted(indices_on_ravel, key=lambda x: ravel_array[x])
sorted_indices_on_original = tuple(
np.array(np.unravel_index(sorted_indices_on_ravel, arr.shape))[:, range(min(k, 0), max(k, 0))]
)
return sorted_indices_on_original
import numpy as np
import pandas as pd
d = {'ABSTRACT_ID': [14145090,1900667, 8157202,6784974],
'TEXT': [
"velvet antlers vas are commonly used in tradit",
"we have taken a basic biologic RPA to elucidat4",
"ceftobiprole bpr is an investigational cephalo",
"lipoperoxidationderived aldehydes for example",],
'LOCATION': [1, 4, 2, 1]}
df = pd.DataFrame(data=d)
df
def word_at_pos(x,y):
pos=x
string= y
count = 0
res = ""
for word in string:
if word == ' ':
count = count + 1
if count == pos:
break
res = ""
else :
res = res + word
print(res)
word_at_pos(df.iloc[0,2],df.iloc[0,1])
For this df I want to create a new column WORD that contains the word from TEXT at the position indicated by LOCATION. e.g. first line would be "velvet".
I can do this for a single line as an isolated function world_at_pos(x,y), but can't work out how to apply this to whole column. I have done new columns with Lambda functions before, but can't work out how to fit this function to lambda.
Looping over TEXT and LOCATION could be the best idea because splitting creates a jagged array, so filtering using numpy advanced indexing won't be possible.
df["WORDS"] = [txt.split()[loc] for txt, loc in zip(df["TEXT"], df["LOCATION"]-1)]
print(df)
ABSTRACT_ID ... WORDS
0 14145090 ... velvet
1 1900667 ... a
2 8157202 ... bpr
3 6784974 ... lipoperoxidationderived
[4 rows x 4 columns]
I am trying to implement a time fold function to be 'map'ed to various partitions of a dask dataframe which in turn changes the shape of the dataframe in question (or alternatively produces a new dataframe with the altered shape). This is how far I have gotten. The result 'res' returned on compute is a list of 3 delayed objects. When I try to compute each of them in a loop (last tow lines of code) this results in a "TypeError: 'DataFrame' object is not callable" After going through the examples for map_partitions, I also tried altering the input DF (inplace) in the function with no return value which causes a similar TypeError with NoneType. What am I missing?
Also, looking at the visualization (attached) I feel like there is a need for reducing the individually computed (folded) partitions into a single DF. How do I do this?
#! /usr/bin/env python
# Start dask scheduler and workers
# dask-scheduler &
# dask-worker --nthreads 1 --nprocs 6 --memory-limit 3GB localhost:8786 --local-directory /dev/shm &
from dask.distributed import Client
from dask.delayed import delayed
import pandas as pd
import numpy as np
import dask.dataframe as dd
import math
foldbucketsecs=30
periodicitysecs=15
secsinday=24 * 60 * 60
chunksizesecs=60 # 1 minute
numts = 5
start = 1525132800 # 01/05
end = 1525132800 + (3 * 60) # 3 minute
c = Client('127.0.0.1:8786')
def fold(df, start, bucket):
return df
def reduce_folds(df):
return df
def load(epoch):
idx = []
for ts in range(0, chunksizesecs, periodicitysecs):
idx.append(epoch + ts)
d = np.random.rand(chunksizesecs/periodicitysecs, numts)
ts = []
for i in range(0, numts):
tsname = "ts_%s" % (i)
ts.append(tsname)
gts.append(tsname)
res = pd.DataFrame(index=idx, data=d, columns=ts, dtype=np.float64)
res.index = pd.to_datetime(arg=res.index, unit='s')
return res
gts = []
load(start)
cols = len(gts)
idx1 = pd.DatetimeIndex(start=start, freq=('%sS' % periodicitysecs), end=start+periodicitysecs, dtype='datetime64[s]')
meta = pd.DataFrame(index=idx1[:0], data=[], columns=gts, dtype=np.float64)
dfs = [delayed(load)(fn) for fn in range(start, end, chunksizesecs)]
from_delayed = dd.from_delayed(dfs, meta, 'sorted')
nfolds = int(math.ceil((end - start)/foldbucketsecs))
cprime = nfolds * cols
gtsnew = []
for i in range(0, cprime):
gtsnew.append("ts_%s,fold=%s" % (i%cols, i/cols))
idx2 = pd.DatetimeIndex(start=start, freq=('%sS' % periodicitysecs), end=start+foldbucketsecs, dtype='datetime64[s]')
meta = pd.DataFrame(index=idx2[:0], data=[], columns=gtsnew, dtype=np.float64)
folded_df = from_delayed.map_partitions(delayed(fold)(from_delayed, start, foldbucketsecs), meta=meta)
result = c.submit(reduce_folds, folded_df)
c.gather(result).visualize(filename='/usr/share/nginx/html/svg/df4.svg')
res = c.gather(result).compute()
for f in res:
f.compute()
Never mind! It was my fault, instead of wrapping my function in delayed I simply passed it to the map_partitions call like so and it worked.
folded_df = from_delayed.map_partitions(fold, start, foldbucketsecs, nfolds, meta=meta)
I am using NLTK on a dataset stored as a pandas dataframe. All the raw text processing procedures worked fine until I tried to convert the Treebank POS tags to Wordnet POS tags. These are the codes which worked fine for me.
import pandas as pd
import string
from nltk import WordPunctTokenizer, pos_tag
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn, stopwords
# Example dataframe
df = pd.DataFrame([[2, "I am new at programming."],
[7, "Leaves are falling from the tree."],
[4, "Sophia has been studying since this morning."]], columns = ['ID', 'Text'])
# Tokenize text
tokenizer = nltk.WordPunctTokenizer()
df["Tokens"] = df["Text"].str.lower().apply(tokenizer.tokenize)
# Remove punctuations
pattern = string.punctuation
print(pattern)
def remove_punctuation(tokens):
filtered = [word for word in tokens if word not in pattern]
return filtered
df["Tokens"] = df["Tokens"].apply(remove_punctuation)
# Remove stopwords
stopwords = stopwords.words('english')
def remove_stopwords(tokens):
filtered_words = [word for word in tokens if word not in stopwords]
return filtered_words
df["Tokens"] = df["Tokens"].apply(remove_stopwords)
The following lines of codes did not work and I got this error:
ValueError: too many values to unpack (expected 2)
def wordnet_pos(pos_tag):
if pos_tag.startswith('J'):
return wn.ADJ
elif pos_tag.startswith('V'):
return wn.VERB
elif pos_tag.startswith('N'):
return wn.NOUN
elif pos_tag.startswith('R'):
return wn.ADV
else:
return None
def wordnet(tokens):
pos_tokens = [nltk.pos_tag(token) for token in tokens]
pos_tokens = [(word, wordnet_pos(pos_tag)) for (word, pos_tag) in pos_tokens]
return pos_tokens
df["Wordnet"] = df["Tokens"].apply(wordnet)
This is what I had hoped to achieve - to create df["Wordnet"] with the Wordnet POS tags.
print(df["Wordnet"])
0 [(new, a), (programming, n)]
1 [(leaves, n), (falling, v), (tree, n)]
2 [(sophia, n), (studying, v), (since, n), (...
Name: Wordnet, dtype: object
In the following, func represents a function that uses multiple columns (with coupling across the group) and cannot operate directly on pandas.Series. The 0*d['x'] syntax was the lightest I could think of to force the conversion, but I think it's awkward.
Additionally, the resulting pandas.Series (s) still includes the group index, which must be removed before adding as a column to the pandas.DataFrame. The s.reset_index(...) index manipulation seems fragile and error-prone, so I'm curious if it can be avoided. Is there an idiom for doing this?
import pandas
import numpy
df = pandas.DataFrame(dict(i=[1]*8,j=[1]*4+[2]*4,x=list(range(4))*2))
df['y'] = numpy.sin(df['x']) + 1000*df['j']
df = df.set_index(['i','j'])
print('# df\n', df)
def func(d):
x = numpy.array(d['x'])
y = numpy.array(d['y'])
# I want to do math with x,y that cannot be applied to
# pandas.Series, so explicitly convert to numpy arrays.
#
# We have to return an appropriately-indexed pandas.Series
# in order for it to be admissible as a column in the
# pandas.DataFrame. Instead of simply "return x + y", we
# have to make the conversion.
return 0*d['x'] + x + y
s = df.groupby(df.index).apply(func)
# The Series is still adorned with the (unnamed) group index,
# which will prevent adding as a column of df due to
# Exception: cannot handle a non-unique multi-index!
s = s.reset_index(level=0, drop=True)
print('# s\n', s)
df['z'] = s
print('# df\n', df)
Instead of
0*d['x'] + x + y
you could use
pd.Series(x+y, index=d.index)
When using groupy-apply, instead of dropping the group key index using:
s = df.groupby(df.index).apply(func)
s = s.reset_index(level=0, drop=True)
df['z'] = s
you can tell groupby to drop the keys using the keyword parameter group_keys=False:
df['z'] = df.groupby(df.index, group_keys=False).apply(func)
import pandas as pd
import numpy as np
df = pd.DataFrame(dict(i=[1]*8,j=[1]*4+[2]*4,x=list(range(4))*2))
df['y'] = np.sin(df['x']) + 1000*df['j']
df = df.set_index(['i','j'])
def func(d):
x = np.array(d['x'])
y = np.array(d['y'])
return pd.Series(x+y, index=d.index)
df['z'] = df.groupby(df.index, group_keys=False).apply(func)
print(df)
yields
x y z
i j
1 1 0 1000.000000 1000.000000
1 1 1000.841471 1001.841471
1 2 1000.909297 1002.909297
1 3 1000.141120 1003.141120
2 0 2000.000000 2000.000000
2 1 2000.841471 2001.841471
2 2 2000.909297 2002.909297
2 3 2000.141120 2003.141120