I need an array version of a function similar to Pandas.fillna, in the forum I collected a lot of answers to create the following function, but it is still 3 times times slower than Pandas.fillna, I want to know if there is a better way to optimize, thank you.
def fillna(self,axis=None,mask=None,value=None,method='pad'):
""" array fillna
Parameters
----------
self : 1d/2d
axis : axis(0 or 1)
mask : Custom mask, or Built np.isfinite(x)
value : int
method : 'back', 'pad', 'mean'
--------
"""
x = np.asarray(self)
if mask is None: mask = np.isfinite(x)
if (not value is None)|(method=='mean'):
out = x.copy()
if x.ndim == 1:
if method=='mean':
out[~mask] = np.nanmean(x)
else: out[~mask] = value
else:
vask = ~mask * (np.nanmean(x,1)[:,None] if axis==1 else np.nanmean(x,0))
out[~mask] = vask[~mask]
else:
if axis is None: axis = 0
if x.ndim==1:
if method=='pad':
idx = np.where(mask,np.arange(mask.shape[0]),0)
np.maximum.accumulate(idx,axis=0,out=idx)
return x[idx]
elif method=='back':
idx = np.where(mask[::-1],np.arange(mask.shape[0]),0)
np.maximum.accumulate(idx,axis=0,out=idx)
return x[mask.shape[0]-idx[::-1]-1]
else: return x
if axis==1:
if method=='back': mask = mask[:, ::-1]
idx = np.where(mask,np.arange(mask.shape[1]),0)
else:
if method=='back': mask = mask[::-1,:]
idx = np.where(mask,np.arange(mask.shape[0])[:,None],0)
np.maximum.accumulate(idx,axis=axis,out=idx)
if axis==1:
if method=='back': idx = idx.shape[1]-idx[:, ::-1] - 1
out = x[np.arange(idx.shape[0])[:,None], idx]
else:
if method=='back': idx = idx.shape[0]-idx[::-1, :] - 1
out = x[idx,np.arange(idx.shape[1])]
return out
Related
I am building a recommendation engine from a database from Kaggle.
df = pd.read_csv("netflix.csv")
df = df.drop(["ratingdescription"], axis=1)
df = pd.get_dummies(df, columns=["rating_level"])
df = df.dropna()
df = df[['title', 'rating', 'release_year', 'user_rating_score', 'user_rating_size']]
df['title'] = df['title'].astype('category')
df['title'] = df['title'].cat.codes
model_knn = NearestNeighbors(metric = 'cosine', algorithm = 'brute')
model_knn.fit(df.drop(['title'], axis=1))
def recommend(title, df, model_knn):
query_index = df.loc\[df\['title'\] == title\].index.values\[0\]
distances, indices = model_knn.kneighbors(df.loc\[df\['title'\] == title\].drop(\['title'\], axis=1), n_neighbors = 6)
for i in range(0, len(indices.flatten())):
if indices.flatten()\[i\] == query_index:
continue
else:
recommended_title = df.loc\[df.index == indices.flatten()\[i\], 'title'\].values\[0\]
recommended_title = df.loc\[df\['title'\] == recommended_title\]\['title'\].cat.categories\[recommended_title\]
print('Recommendation:', recommended_title)
def evaluate(title, df, model_knn):
query_index = df.loc\[df\['title'\] == title\].index.values\[0\]
distances, indices = model_knn.kneighbors(df.loc\[df\['title'\] == title\].drop(\['title'\], axis=1), n_neighbors = 6)
recommended_titles = \[\]
for i in range(0, len(indices.flatten())):
if indices.flatten()\[i\] == query_index:
continue
`else:
recommended_title = df.loc[df.index == indices.flatten()[i], 'title'].values[0]
recommended_titles.append(recommended_title)
actual_titles = df.loc[df['rating'] == df.loc[df['title'] == title]['rating'].values[0], 'title']
actual_titles = actual_titles.drop(query_index)
actual_titles = [df.loc[df['title'] == title]['title'].cat.categories[title] for title in actual_titles]
recommended_titles = [df.loc[df['title'] == title]['title'].cat.categories[title] for title in recommended_titles]
precision, recall, _, _ = precision_recall_fscore_support(actual_titles, recommended_titles, average = 'macro')
print('Precision:', precision)
print('Recall:', recall)
recommend("The Shawshank Redemption", df, model_knn)
evaluate("The Shawshank Redemption", df, model_knn)
I have tried altering the code many times but it's either this or the error message
"KeyError: 'rating_level'" indicates that the column "rating_level" is not found in the dataframe **df**.
error received is this :
`ValueError: could not convert string to float: 'PG-13'
I got a TypeError while training my model:
enter image description here
here is my data preprocessing code:
class CriteoDatasetOtherSplit(torch.utils.data.Dataset):
"""
Criteo Display Advertising Challenge Dataset
Data prepration:
* Remove the infrequent features (appearing in less than threshold instances) and treat them as a single feature
* Discretize numerical values by log2 transformation which is proposed by the winner of Criteo Competition
:param dataset_path: criteo train.txt path.
:param cache_path: lmdb cache path.
:param rebuild_cache: If True, lmdb cache is refreshed.
:param min_threshold: infrequent feature threshold.
Reference:
https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
https://www.csie.ntu.edu.tw/~r01922136/kaggle-2014-criteo.pdf
"""
def __init__(self, dataset_path=None, cache_path='./criteo', rebuild_cache=False, min_threshold=8):
self.NUM_FEATS = 39
self.NUM_INT_FEATS = 13
self.min_threshold = min_threshold
if rebuild_cache or not Path(cache_path).exists():
shutil.rmtree(cache_path, ignore_errors=True)
if dataset_path is None:
raise ValueError('create cache: failed: dataset_path is None')
self.__build_cache(dataset_path, cache_path)
self.env = lmdb.open(cache_path, create=False, lock=False, readonly=True)
with self.env.begin(write=False) as txn:
self.length = txn.stat()['entries'] - 1
self.field_dims = np.frombuffer(txn.get(b'field_dims'), dtype=np.uint32)
self.other_dims = np.frombuffer(txn.get(b'other_dims'), dtype=np.uint32)
def __getitem__(self, index):
with self.env.begin(write=False) as txn:
np_array = np.frombuffer(
txn.get(struct.pack('>I', index)), dtype=np.uint32).astype(dtype=np.long)
return np_array[1:], np_array[0]
def __len__(self):
return self.length
def __build_cache(self, path, cache_path):
feat_mapper, other_feat_mapper, defaults = self.__get_feat_mapper(path)
with lmdb.open(cache_path, map_size=int(1e11)) as env:
field_dims = np.zeros(self.NUM_FEATS, dtype=np.uint32)
other_dims = np.zeros(self.NUM_FEATS, dtype=np.uint32)
for i, fm in other_feat_mapper.items():
other_dims[i - 1] = len(fm)
for i, fm in feat_mapper.items():
field_dims[i - 1] = len(fm) + other_dims[i - 1]
with env.begin(write=True) as txn:
txn.put(b'field_dims', field_dims.tobytes())
txn.put(b'other_dims', other_dims.tobytes())
for buffer in self.__yield_buffer(path, feat_mapper, other_feat_mapper, defaults):
with env.begin(write=True) as txn:
for key, value in buffer:
txn.put(key, value)
def __get_feat_mapper(self, path):
feat_cnts = defaultdict(lambda: defaultdict(int))
with open(path) as f:
pbar = tqdm(f, mininterval=1, smoothing=0.1)
pbar.set_description('Create criteo dataset cache: counting features')
for line in pbar:
values = line.rstrip('\n').split('\t')
if len(values) != self.NUM_FEATS + 1:
continue
for i in range(1, self.NUM_INT_FEATS + 1):
feat_cnts[i][convert_numeric_feature(values[i])] += 1
for i in range(self.NUM_INT_FEATS + 1, self.NUM_FEATS + 1):
feat_cnts[i][values[i]] += 1
feat_mapper = {i: {feat for feat, c in cnt.items() if c >= self.min_threshold} for i, cnt in feat_cnts.items()}
other_feat_mapper = {i: {feat for feat, c in cnt.items() if c < self.min_threshold} for i, cnt in feat_cnts.items()}
feat_mapper = {i: {feat: idx for idx, feat in enumerate(cnt)} for i, cnt in feat_mapper.items()}
other_feat_mapper = {i: {feat: idx for idx, feat in enumerate(cnt)} for i, cnt in other_feat_mapper.items()}
defaults = {i: len(cnt) for i, cnt in feat_mapper.items()}
return feat_mapper, other_feat_mapper, defaults
def __yield_buffer(self, path, feat_mapper, other_feat_mapper, defaults, buffer_size=int(1e5)):
item_idx = 0
buffer = list()
with open(path) as f:
pbar = tqdm(f, mininterval=1, smoothing=0.1)
pbar.set_description('Create criteo dataset cache: setup lmdb')
for line in pbar:
values = line.rstrip('\n').split('\t')
if len(values) != self.NUM_FEATS + 1:
continue
np_array = np.zeros(self.NUM_FEATS + 1, dtype=np.uint32)
np_array[0] = int(values[0])
for i in range(1, self.NUM_INT_FEATS + 1):
other_feat_mapper[i].setdefault(convert_numeric_feature(values[i]), 0)
np_array[i] = feat_mapper[i].get(convert_numeric_feature(values[i]),
other_feat_mapper[i][convert_numeric_feature(values[i])]+defaults[i])
for i in range(self.NUM_INT_FEATS + 1, self.NUM_FEATS + 1):
other_feat_mapper[i].setdefault(values[i], 0)
np_array[i] = feat_mapper[i].get(values[i], other_feat_mapper[i][values[i]]+defaults[i])
buffer.append((struct.pack('>I', item_idx), np_array.tobytes()))
item_idx += 1
if item_idx % buffer_size == 0:
yield buffer
buffer.clear()
yield buffer
#lru_cache(maxsize=None)
def convert_numeric_feature(val: str):
if val == '':
return 'NULL'
v = int(val)
if v > 2:
return str(int(math.log(v) ** 2))
else:
return str(v - 2)
I want to return a dataframe from this function, which can be used elsewhere (for plotly graph to be exact).
My idea is to use the dataframe I can create with points_sum(), save it as the team name, and then use that dataframe in my px.line(dataframe = team_name).
In essence, I want to use the men_points_df variable after I created it.
def points_sum(team):
points = 0
men_points = []
for index, row in menscore_df.iterrows():
if row['hometeam'] == team:
if row['homegoals'] > row['awaygoals']:
points += 2
elif row['homegoals'] == row['awaygoals']:
points += 1
elif row['homegoals'] < row['awaygoals']:
points == points
date = str(row['date'])
men_points.append([date, points])
if row['awayteam'] == team:
if row['homegoals'] < row['awaygoals']:
points += 2
elif row['homegoals'] == row['awaygoals']:
points += 1
elif row['homegoals'] > row['awaygoals']:
points == points
date = str(row['date'])
men_points.append([date, points])
men_points_df = pd.DataFrame(men_points, columns = ["Date", 'Points'])
return men_points_df
In plotly, I am trying to use my new dataframe (men_points_df), like below, but I get the error undefined name, even though I can print it (for example: test = points_sum("FIF") (FIF is one of the team names) and it shows the correct dataframe in the console (when I type test):
elif pathname == "/page-3":
return [html.H1('Seasonal performance',
style={'textAlign':'center'}),
html.Div(
children=[
html.H2('Select team',style={'textAlign':'center'}),
html.Br(),
html.Br(),
dcc.Dropdown(
id='team_dd',
options=[{'label': v, 'value': k} for k,v in teams_all.items()],
)]),
dcc.Graph(id="performance_graph")
]
Output(component_id="performance_graph", component_property="figure"),
Input(component_id="team_dd", component_property="value")
def update_graph(option_selected):
title = "none selected"
if option_selected:
title = option_selected
line_fig = px.line(
test, # <------------ THIS IS THE ISSUE
title = f"{title}",
x = "Date", y = "Points")
return line_fig
Just call points_sum in the update_graph function, before you use test:
def update_graph(option_selected):
title = "none selected"
if option_selected:
title = option_selected
# vvv Here vvv
test = points_sum("FIF")
line_fig = px.line(
test, #THIS IS THE ISSUE
title = f"{title}",
x = "Date", y = "Points")
return line_fig
I am trying to merge kmeans into SOM finding the best match unit. During clustering points to return the numbers of clusters for each point I encounter this error
"ValueError: all the input arrays must have same number of dimensions"
in line 159
distances_from_center = np.concatenate((distances_from_center, [dist(teacher,nodes)]))
I am trying to optimize the SOM using the fast kmeans approach.
N = 8 # linear size of 2D map
M = 8
n_teacher = 10000 # # of teacher signal
np.random.seed(100)# test seed for random number
def main():
# initialize node vectors
nodes = np.random.rand(N,M,3)# node array. each node has 3-dim weight vector
#nodes = centers_initiation(n_teacher, 4)
#initial out put
#TODO; make out put function to simplify here
plt.imshow(nodes, interpolation='none')
plt.savefig("init.png")
""""""
""" Learning """
""""""
# teacher signal
teachers = np.random.rand(n_teacher,3)
for i in range(n_teacher):
train(nodes, teachers, i)
# intermediate out put
if i%200 ==0 or i< 100: #out put for i<100 or each 1000 iteration
plt.imshow(nodes, interpolation='none')
plt.savefig(str(i)+".png")
#output
plt.imshow(nodes, interpolation='none')
plt.savefig("final.png")
def train(nodes, teachers, i):
bmu = best_matching_unit(nodes, teachers[i])
#print bmu
for x in range(N):
for y in range(M):
c = np.array([x,y])# coordinate of unit
d = np.linalg.norm(c-bmu)
L = learning_ratio(i)
S = learning_radius(i,d)
for z in range(3): #TODO clear up using numpy function
nodes[x,y,z] += L*S*(teachers[i,z] - nodes[x,y,z])
def dist(x, y):
# euclidean distance
if len(x.shape) == 1:
d = np.sqrt(np.sum((x - y) ** 2))
else:
d = np.sqrt(np.sum((x - y) ** 2, axis=1))
return d
def centers_initiation(teacher, number_of_centers):
# initialization of clusters centers as most distant points. return cluster centers (point)
dist_per_point = np.empty((0, 0), int)
dist_for_point = 0
index_of_deleted_point = 0
for point in teacher:
for other_point in np.delete(teacher, index_of_deleted_point, axis=0):
dist_for_point += dist(point, other_point)
dist_per_point = np.append(dist_per_point, dist_for_point)
dist_for_point = 0
index_of_deleted_point += 1
ordered_points_by_min = np.array(
[key for key, value in sorted(enumerate(dist_per_point), key=lambda p: p[1], reverse=True)])
return teacher[ordered_points_by_min[0:number_of_centers]]
def get_cluster_number(teacher, nodes):
# clustering points. return numbers of clusters for each point
distances_from_centers = np.zeros((0, nodes.shape[0]), int)
for point in teacher:
distances_from_center = np.array([])
for center in nodes:
distances_from_center = np.concatenate((distances_from_center, [dist(teacher,nodes)]))
distances_from_centers = np.concatenate((distances_from_centers, [distances_from_center]), axis=0)
nearest_center_number = np.argmin(distances_from_centers, axis=1)
return nearest_center_number
def best_matching_unit(teacher, nodes):
clusters = get_cluster_number(teacher, nodes)
clusters_centers_shift = 1
new_centers = np.zeros(nodes.shape)
counter = 0
while np.sum(clusters_centers_shift) != 0:
counter += 1
for i in xrange(nodes.shape[0]):
new_centers[i] = np.mean(teacher[:][clusters == i], axis=0)
clusters_centers_shift = dist(new_centers, nodes)
clusters = get_cluster_number(teacher, new_centers)
nodes = np.copy(new_centers)
return clusters
def neighbourhood(t):#neighbourhood radious
halflife = float(n_teacher/4) #for testing
initial = float(N/2)
return initial*np.exp(-t/halflife)
def learning_ratio(t):
halflife = float(n_teacher/4) #for testing
initial = 0.1
return initial*np.exp(-t/halflife)
def learning_radius(t, d):
# d is distance from BMU
s = neighbourhood(t)
return np.exp(-d**2/(2*s**2))
main()
Here is an autoencoder.
My problem is that I don't know how to set "mode" to let it split into "train" and "test".
I'm glad if you guys can show me an example:D
Can I use a global variable or placeholder to change "mode" ?
I'm glad you can answer it for me.
#batch normalization
def Batch_norm_en(Wx_plus_b, i):
if mode == 1:
fc_mean_en, fc_var_en = tf.nn.moments(Wx_plus_b, axes=[0, 1])
else:
fc_mean_en, fc_var_en = fc_mean_en, fc_var_en
Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, fc_mean_en, fc_var_en, shift_en[i], scale_en[i], 10**(-3))
return Wx_plus_b
def Batch_norm_de(Wx_plus_b, i):
if mode == 1:
fc_mean_de, fc_var_de = tf.nn.moments(Wx_plus_b, axes=[0, 1])
else:
fc_mean_de, fc_var_de = fc_mean_de, fc_var_de
Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, fc_mean_de, fc_var_de, shift_de[i], scale_de[i], 10**(-3))
return Wx_plus_b
#encoder data
def encoder_model(x):
res = x
for i in range(0, len(en_n_neurons)-1):
Wx_plus_b = tf.matmul(res,W_en[i]) + b_en[i]
Wx_plus_b = Batch_norm_en(Wx_plus_b, i)
res = tf.nn.sigmoid(Wx_plus_b)
return res
#decoder data
def decoder_model(x):
res = x
for i in range(0, len(de_n_neurons)-1):
Wx_plus_b = tf.matmul(res,W_de[i]) + b_de[i]
Wx_plus_b = Batch_norm_de(Wx_plus_b, i)
res = tf.nn.sigmoid(Wx_plus_b)
return res