Dimension problems in scipy.optimize.minimize - dimensions

I'm having some dimensions problems but I've checked all the constraints dimensions and there doesn't seem to be an error, if someone could help me I would appreciate it.
# Librerias necesarias
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
# Constantes de eficiencia de los conversores del EH
# CHP
n_W = 0.3 # Electricidad
n_Q = 0.45 # Termal
# AB
n_T = 0.8 # Termal
# CERG
COP_CERG = 3
# WARG
COP_WARG = 0.7
# Limites superiores
v_p_CHP = np.array([0,300,450,0])
v_p_AB = np.array([0,0,900,0])
v_p_CERG = np.array([0,0,0,400])
v_p_WARG = np.array([0,0,0,400])
# Limites inferiores
v_d_CHP = np.array([0,300/2,450/2,0])
v_d_AB = np.array([0,0,0,0])
v_d_CERG = np.array([0,0,0,0])
v_d_WARG = np.array([0,0,0,0])
# Precio del gas
P_g = 0.02 #kW/h
# Precio electricidad
P_e = np.array([0,40,20,35,40,60,110,120,115,75,80,120,90,70,60,40])
# Demanda de electricidad, calefacción y refrigeración
D_h = np.array([0,200,320,400,580,550,520,500,470,420,320,200])
D_e = np.array([0,150,210,180,175,180,210,160])
D_c = np.array([0,0,25,50,75,125,150,125,75,50,25,0])
# Matrices de espeficicacion por componente
M = 4 # Numero de vectores de energia (Electricidad,gas,calefaccion y regrigeracion)
zero_vect_v = np.zeros((1,4))
zero_vect_h = np.zeros((4,1))
# H_CHP
H_CHP = np.array([[0,0,0,0],
[n_W,0,0,0],
[n_Q,0,0,0],
[0,0,0,0]])
# H_AB
H_AB = np.array([[0,0,0,0],
[0,0,0,0],
[n_T,0,0,0],
[0,0,0,0]])
# H_WARG
H_WARG = np.array([[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,COP_WARG,0]])
# H_CERG
H_CERG = np.array([[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,COP_CERG,0,0]])
# Matrices de interconección
def T_o_AB(X):
return np.diag([0,0,X[5],0])
def T_o_CHP(X):
return np.diag([0,X[2],X[3],0])
T_o_CERG = np.diag([0,0,0,1])
T_o_WARG = np.diag([0,0,0,1])
def T_o_EDS(X):
return np.diag([0,X[0],0,0])
def T_AB_i(X):
return np.diag([1-X[1],0,0,0])
def T_CHP_i(X):
return np.diag([X[1],0,0,0])
def T_CERG_CHP(X):
return np.diag([0,1-X[2],0,0])
def T_CERG_i(X):
return np.diag([0,1,1-X[0],0])
def T_WARG_AB(X):
return np.diag([0,0,1-X[5],0])
def T_WARG_CHP(X):
return np.diag([0,0,X[4],0])
T_zero = np.zeros((M,M))
I = np.identity(M)
# Costo de la electricidad por horas
# Dominio del tiempo (rangos de tiempo)
P_tiempo = np.arange(1,25,1)
# Definicion del vector de demanda eléctrica
P_e_vect_1 = np.zeros((P_tiempo.shape[0]))
P_e_vect_1[(P_tiempo>=0) & (P_tiempo<=2)] = P_e[1]
P_e_vect_1[(P_tiempo>2) & (P_tiempo<=6)] = P_e[2]
P_e_vect_1[(P_tiempo>6) & (P_tiempo<=7)] = P_e[3]
P_e_vect_1[(P_tiempo>7) & (P_tiempo<=8)] = P_e[4]
P_e_vect_1[(P_tiempo>8) & (P_tiempo<=9)] = P_e[5]
P_e_vect_1[(P_tiempo>9) & (P_tiempo<=10)] = P_e[6]
P_e_vect_1[(P_tiempo>10) & (P_tiempo<=12)] = P_e[7]
P_e_vect_1[(P_tiempo>12) & (P_tiempo<=13)] = P_e[8]
P_e_vect_1[(P_tiempo>13) & (P_tiempo<=15)] = P_e[9]
P_e_vect_1[(P_tiempo>15) & (P_tiempo<=19)] = P_e[10]
P_e_vect_1[(P_tiempo>19) & (P_tiempo<=20)] = P_e[11]
P_e_vect_1[(P_tiempo>20) & (P_tiempo<=21)] = P_e[12]
P_e_vect_1[(P_tiempo>21) & (P_tiempo<=22)] = P_e[13]
P_e_vect_1[(P_tiempo>22) & (P_tiempo<=23)] = P_e[14]
P_e_vect_1[(P_tiempo>23) & (P_tiempo<=24)] = P_e[15]
P_e_vect_1 = P_e_vect_1/1000
# Demanda
# Definicion del vector para demanda electricidad
P_e_vect_d_1 = np.zeros((P_tiempo.shape[0]))
P_e_vect_d_1[(P_tiempo>=0) & (P_tiempo<=8)] = D_e[1]
P_e_vect_d_1[(P_tiempo>8) & (P_tiempo<=12)] = D_e[2]
P_e_vect_d_1[(P_tiempo>12) & (P_tiempo<=15)] = D_e[3]
P_e_vect_d_1[(P_tiempo>15) & (P_tiempo<=18)] = D_e[4]
P_e_vect_d_1[(P_tiempo>18) & (P_tiempo<=19)] = D_e[5]
P_e_vect_d_1[(P_tiempo>19) & (P_tiempo<=20)] = D_e[6]
P_e_vect_d_1[(P_tiempo>20) & (P_tiempo<=24)] = D_e[7]
# Definicion del vector para demanda refrigeracion
P_c_vect_d_1 = np.zeros((P_tiempo.shape[0]))
P_c_vect_d_1[(P_tiempo>=0) & (P_tiempo<=9)] = D_c[1]
P_c_vect_d_1[(P_tiempo>=9) & (P_tiempo<=10)] = D_c[2]
P_c_vect_d_1[(P_tiempo>=10) & (P_tiempo<=12)] = D_c[3]
P_c_vect_d_1[(P_tiempo>=12) & (P_tiempo<=13)] = D_c[4]
P_c_vect_d_1[(P_tiempo>=13) & (P_tiempo<=14)] = D_c[5]
P_c_vect_d_1[(P_tiempo>=14) & (P_tiempo<=15)] = D_c[6]
P_c_vect_d_1[(P_tiempo>=15) & (P_tiempo<=16)] = D_c[7]
P_c_vect_d_1[(P_tiempo>=16) & (P_tiempo<=17)] = D_c[8]
P_c_vect_d_1[(P_tiempo>=17) & (P_tiempo<=18)] = D_c[9]
P_c_vect_d_1[(P_tiempo>=18) & (P_tiempo<=19)] = D_c[10]
P_c_vect_d_1[(P_tiempo>=19) & (P_tiempo<=24)] = D_c[11]
# Definicion del vector para demanda calefaccion
P_h_vect_d_1 = np.zeros((P_tiempo.shape[0]))
P_h_vect_d_1[(P_tiempo>=0) & (P_tiempo<=6)] = D_h[1]
P_h_vect_d_1[(P_tiempo>=6) & (P_tiempo<=7)] = D_h[2]
P_h_vect_d_1[(P_tiempo>=7) & (P_tiempo<=8)] = D_h[3]
P_h_vect_d_1[(P_tiempo>=8) & (P_tiempo<=12)] = D_h[4]
P_h_vect_d_1[(P_tiempo>=12) & (P_tiempo<=13)] = D_h[5]
P_h_vect_d_1[(P_tiempo>=13) & (P_tiempo<=14)] = D_h[6]
P_h_vect_d_1[(P_tiempo>=14) & (P_tiempo<=16)] = D_h[7]
P_h_vect_d_1[(P_tiempo>=16) & (P_tiempo<=17)] = D_h[8]
P_h_vect_d_1[(P_tiempo>=17) & (P_tiempo<=21)] = D_h[9]
P_h_vect_d_1[(P_tiempo>=21) & (P_tiempo<=23)] = D_h[10]
P_h_vect_d_1[(P_tiempo>=23) & (P_tiempo<=24)] = D_h[11]
# Arreglo de dimensiones
P_e_vect_d_1 = np.array([P_e_vect_d_1])
P_c_vect_d_1 = np.array([P_c_vect_d_1])
P_h_vect_d_1 = np.array([P_h_vect_d_1])
# Concatenar los vectores de demanda
Vout = np.concatenate((np.zeros((1,24)),P_e_vect_d_1,P_h_vect_d_1,P_c_vect_d_1),axis=0)
Vout = Vout.T
# Construccion de matriz H
def H_matrix(X):
return T_o_EDS(X)*I + T_o_AB(X)*H_AB*T_AB_i(X)*I + T_o_CHP(X)*H_CHP*T_CHP_i(X)*I + T_o_CERG#H_CERG*(T_CERG_i(X)*I + T_CERG_CHP(X)*H_CHP*T_CHP_i(X)*I) + T_o_WARG#H_WARG*(T_WARG_AB(X)*H_AB*T_AB_i(X)*I + T_WARG_CHP(X)*H_CHP*T_CHP_i(X)*I)
save_results = [] # Lista para guardar los resultados
# Problema de optimizacion
for t in P_tiempo:
# Definicion de la funcion de costo
def objective_function(X):
return P_e_vect_1[t-1]*X[7] + P_g*X[6]
# Restricciones
cons = ({'type': 'eq',
'fun': lambda X: H_matrix(X)#np.array([[X[6]],[X[7]],[0],[0]]) - Vout[t-1,:].T },
{'type': 'eq',
'fun': lambda X: H_CHP#np.array([[X[1]*X[6]],[0],[0],[0]]) - np.array([[0],[X[8]],[X[9]],[0]])},
{'type': 'eq',
'fun': lambda X: H_AB#np.array([[(1-X[1])*X[6]],[0],[0],[0]]) - np.array([[0],[0],[X[10]],[0]])},
{'type': 'eq',
'fun': lambda X: H_WARG#np.array([[0],[0],[X[4]*X[9]+(1-X[5])*X[10]],[0]]) - np.array([[0],[0],[0],[X[12]]])},
{'type': 'eq',
'fun': lambda X: H_CERG#np.array([[0],[(1-X[0])*X[7]+(1-X[2])*X[8]],[0],[0]]) - np.array([[0],[0],[0],[X[11]]])},
{'type': 'ineq',
'fun': lambda X: -(X[0]-1)*X[7]},
{'type': 'ineq',
'fun': lambda X: -(H_CHP#np.array([[X[1]*X[6]],[0],[0],[0]]) - v_p_CHP.T)},
{'type': 'ineq',
'fun': lambda X: -(H_AB#np.array([[(1-X[1])*X[6]],[0],[0],[0]]) - v_p_AB.T)},
{'type': 'ineq',
'fun': lambda X: -(H_WARG#np.array([[0],[0],[X[4]*X[9]+(1-X[5])*X[10]],[0]]) - v_p_WARG.T)},
{'type': 'ineq',
'fun': lambda X: -(H_CERG#np.array([[0],[(1-X[0])*X[7]+(1-X[2])*X[8]],[0],[0]]) - v_p_CERG.T)},
{'type': 'ineq',
'fun': lambda X: -(v_d_CHP.T - H_CHP#np.array([[X[1]*X[6]],[0],[0],[0]]))},
{'type': 'ineq',
'fun': lambda X: (H_AB#np.array([[(1-X[1])*X[6]],[0],[0],[0]]))},
{'type': 'ineq',
'fun': lambda X: (H_WARG#np.array([[0],[0],[X[4]*X[9]+(1-X[5])*X[10]],[0]]))},
{'type': 'ineq',
'fun': lambda X: (H_CERG#np.array([[0],[(1-X[0])*X[7]+(1-X[2])*X[8]],[0],[0]]))})
# Condicion inicial
if t == 1:
x0 = np.zeros((13,1))
else:
x0 = save_results[t-2]
# Bounds
bnds = ((0, 1),(0, 1),(0, 1),(0, 1),(0, 1),(0, 1),(0, None),(0, None),(0, None),(0, None),(0, None),(0, None),(0, None))
# Problema de optimizacion
sol = minimize(objective_function,x0,constraints=cons,bounds=bnds)
solution = sol.x
save_results.append(solution)
It shows this error: ValueError: all the input array dimensions for the concatenation axis must match exactly, but along dimension 1, the array at index 0 has size 4 and the array at index 1 has size 1.
I look at the eq constraints because apparently the error's there but all the dimensions of the matrices and vector match.

Related

Process finished with exit code 137 (interrupted by signal 9: SIGKILL)

I have added a large-scale key values pairs in python file which is about 20000 and i got below error after running a code.
Trace
(236167, 3)
Process finished with exit code 137 (interrupted by signal 9: SIGKILL)
File.py
import pandas as pd
import cupy.cuda as np
import itertools
from sklearn import metrics
from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, auc
import matplotlib.pyplot as plt
from tqdm import tqdm
np.Device(0).use()
# --------------------------
# Data set
# Ref: https://github.com/serengil/deepface/tree/master/tests/dataset
idendities = {
"AnneBancroft": [
"13859_AnneBancroft_25_f.jpg",
"13872_AnneBancroft_73_f.jpg",
"13864_AnneBancroft_31_f.jpg",
"13870_AnneBancroft_61_f.jpg",
"13844_AnneBancroft_67_f.jpg",
"13863_AnneBancroft_22_f.jpg",
"13869_AnneBancroft_72_f.jpg",
"13843_AnneBancroft_60_f.jpg",
"13868_AnneBancroft_71_f.jpg",
"13860_AnneBancroft_66_f.jpg",
"13853_AnneBancroft_49_f.jpg",
"13842_AnneBancroft_51_f.jpg",
"13874_AnneBancroft_73_f.jpg",
"13846_AnneBancroft_44_f.jpg",
"13871_AnneBancroft_35_f.jpg",
"13857_AnneBancroft_24_f.jpg",
"13850_AnneBancroft_53_f.jpg",
"13865_AnneBancroft_41_f.jpg",
"13862_AnneBancroft_46_f.jpg",
"13852_AnneBancroft_69_f.jpg",
"13866_AnneBancroft_68_f.jpg",
"13873_AnneBancroft_25_f.jpg",
"13861_AnneBancroft_23_f.jpg",
"13848_AnneBancroft_52_f.jpg",
"13847_AnneBancroft_33_f.jpg",
"13851_AnneBancroft_28_f.jpg",
"13856_AnneBancroft_25_f.jpg",
"13845_AnneBancroft_31_f.jpg",
"13867_AnneBancroft_70_f.jpg",
"13854_AnneBancroft_70_f.jpg",
"13849_AnneBancroft_61_f.jpg",
"13855_AnneBancroft_28_f.jpg",
"13858_AnneBancroft_22_f.jpg"
],
"RoseMarie": [
"9907_RoseMarie_82_f.jpg",
"9893_RoseMarie_35_f.jpg",
"9911_RoseMarie_88_f.jpg",
"9906_RoseMarie_80_f.jpg",
"9895_RoseMarie_40_f.jpg",
"9901_RoseMarie_57_f.jpg",
"9903_RoseMarie_77_f.jpg",
"9892_RoseMarie_30_f.jpg",
"9909_RoseMarie_85_f.jpg",
"9900_RoseMarie_52_f.jpg",
"9897_RoseMarie_44_f.jpg",
"9904_RoseMarie_78_f.jpg",
"9905_RoseMarie_79_f.jpg",
"9898_RoseMarie_46_f.jpg",
"9908_RoseMarie_83_f.jpg",
"9902_RoseMarie_70_f.jpg",
"9896_RoseMarie_42_f.jpg",
"9899_RoseMarie_50_f.jpg",
"9910_RoseMarie_87_f.jpg",
"9894_RoseMarie_37_f.jpg"
],
"BobDylan": [
"1665_BobDylan_35_m.jpg",
"1651_BobDylan_23_m.jpg",
"1663_BobDylan_33_m.jpg",
"1682_BobDylan_64_m.jpg",
"1678_BobDylan_56_m.jpg",
"1684_BobDylan_68_m.jpg",
"1686_BobDylan_72_m.jpg",
"1645_BobDylan_16_m.jpg",
"1664_BobDylan_34_m.jpg",
"1680_BobDylan_61_m.jpg",
"1674_BobDylan_47_m.jpg",
"1656_BobDylan_26_m.jpg",
"1658_BobDylan_28_m.jpg",
"1667_BobDylan_40_m.jpg",
"1673_BobDylan_46_m.jpg",
"1668_BobDylan_41_m.jpg",
"1657_BobDylan_27_m.jpg",
"1685_BobDylan_71_m.jpg",
"1647_BobDylan_19_m.jpg",
"1660_BobDylan_30_m.jpg",
"1679_BobDylan_57_m.jpg",
"1672_BobDylan_45_m.jpg",
"1666_BobDylan_37_m.jpg",
"1650_BobDylan_22_m.jpg",
"1683_BobDylan_66_m.jpg",
"1652_BobDylan_23_m.jpg",
"1654_BobDylan_24_m.jpg",
"1687_BobDylan_74_m.jpg",
"1649_BobDylan_21_m.jpg",
"1677_BobDylan_54_m.jpg",
"1659_BobDylan_29_m.jpg",
"1675_BobDylan_48_m.jpg",
"1662_BobDylan_32_m.jpg",
"1671_BobDylan_44_m.jpg",
"1669_BobDylan_42_m.jpg",
"1653_BobDylan_24_m.jpg",
"1648_BobDylan_20_m.jpg",
"1681_BobDylan_62_m.jpg",
"1661_BobDylan_31_m.jpg",
"1670_BobDylan_43_m.jpg",
"1655_BobDylan_25_m.jpg",
"1676_BobDylan_49_m.jpg",
"1646_BobDylan_18_m.jpg"
],
"LorneGreene": [
"8137_LorneGreene_25_m.jpg",
"8145_LorneGreene_48_m.jpg",
"8140_LorneGreene_38_m.jpg",
"8138_LorneGreene_28_m.jpg",
"8139_LorneGreene_33_m.jpg",
"8149_LorneGreene_52_m.jpg",
"8154_LorneGreene_58_m.jpg",
"8142_LorneGreene_44_m.jpg",
"8162_LorneGreene_68_m.jpg",
"8155_LorneGreene_61_m.jpg",
"8164_LorneGreene_71_m.jpg",
"8147_LorneGreene_50_m.jpg",
"8151_LorneGreene_54_m.jpg",
"8163_LorneGreene_70_m.jpg",
"8150_LorneGreene_53_m.jpg",
"8156_LorneGreene_62_m.jpg",
"8160_LorneGreene_66_m.jpg",
"8146_LorneGreene_49_m.jpg",
"8144_LorneGreene_46_m.jpg",
"8158_LorneGreene_64_m.jpg",
"8152_LorneGreene_55_m.jpg",
"8159_LorneGreene_65_m.jpg",
"8161_LorneGreene_67_m.jpg",
"8157_LorneGreene_63_m.jpg",
"8141_LorneGreene_43_m.jpg",
"8143_LorneGreene_45_m.jpg",
"8136_LorneGreene_18_m.jpg",
"8153_LorneGreene_57_m.jpg",
"8148_LorneGreene_51_m.jpg"
],
"LaurenBacall": [
"11540_LaurenBacall_26_f.jpg",
"11539_LaurenBacall_25_f.jpg",
"11547_LaurenBacall_45_f.jpg",
"11549_LaurenBacall_72_f.jpg",
"11534_LaurenBacall_20_f.jpg",
"11559_LaurenBacall_31_f.jpg",
"11545_LaurenBacall_35_f.jpg",
"11546_LaurenBacall_40_f.jpg",
"11563_LaurenBacall_64_f.jpg",
"11555_LaurenBacall_82_f.jpg",
"11541_LaurenBacall_31_f.jpg",
"11564_LaurenBacall_27_f.jpg",
"11561_LaurenBacall_57_f.jpg",
"11552_LaurenBacall_75_f.jpg",
"11556_LaurenBacall_83_f.jpg",
"11543_LaurenBacall_31_f.jpg",
"11533_LaurenBacall_19_f.jpg",
"11557_LaurenBacall_85_f.jpg",
"11544_LaurenBacall_34_f.jpg",
"11535_LaurenBacall_21_f.jpg",
"11565_LaurenBacall_26_f.jpg",
"11558_LaurenBacall_42_f.jpg",
"11531_LaurenBacall_28_f.jpg",
"11536_LaurenBacall_22_f.jpg",
"11562_LaurenBacall_46_f.jpg",
"11554_LaurenBacall_81_f.jpg",
"11542_LaurenBacall_31_f.jpg",
"11537_LaurenBacall_22_f.jpg",
"11560_LaurenBacall_56_f.jpg",
"11548_LaurenBacall_65_f.jpg",
"11550_LaurenBacall_73_f.jpg",
"11530_LaurenBacall_17_f.jpg",
"11532_LaurenBacall_18_f.jpg",
"11566_LaurenBacall_20_f.jpg",
"11551_LaurenBacall_77_f.jpg",
"11538_LaurenBacall_23_f.jpg",
"11553_LaurenBacall_80_f.jpg"
],
"SerenaWilliams": [
"16468_SerenaWilliams_32_f.jpg",
"16486_SerenaWilliams_32_f.jpg",
"16479_SerenaWilliams_25_f.jpg",
"16474_SerenaWilliams_18_f.jpg",
"16472_SerenaWilliams_21_f.jpg",
"16008_SerenaWilliams_36_f.jpg",
"16484_SerenaWilliams_31_f.jpg",
"16469_SerenaWilliams_31_f.jpg",
"16478_SerenaWilliams_24_f.jpg",
"16485_SerenaWilliams_32_f.jpg",
"16480_SerenaWilliams_26_f.jpg",
"16481_SerenaWilliams_27_f.jpg",
"16487_SerenaWilliams_33_f.jpg",
"16477_SerenaWilliams_23_f.jpg",
"16010_SerenaWilliams_34_f.jpg",
"16483_SerenaWilliams_30_f.jpg",
"16471_SerenaWilliams_29_f.jpg",
"16009_SerenaWilliams_35_f.jpg",
"16476_SerenaWilliams_20_f.jpg",
"16475_SerenaWilliams_19_f.jpg",
"16482_SerenaWilliams_28_f.jpg",
"16007_SerenaWilliams_36_f.jpg",
"16470_SerenaWilliams_35_f.jpg",
"16473_SerenaWilliams_24_f.jpg"
],
"JohnVernon": [
"6459_JohnVernon_49_m.jpg",
"6447_JohnVernon_33_m.jpg",
"6446_JohnVernon_32_m.jpg",
"6448_JohnVernon_34_m.jpg",
"6454_JohnVernon_40_m.jpg",
"6452_JohnVernon_38_m.jpg",
"6471_JohnVernon_71_m.jpg",
"6468_JohnVernon_60_m.jpg",
"6469_JohnVernon_63_m.jpg",
"6458_JohnVernon_47_m.jpg",
"6463_JohnVernon_53_m.jpg",
"6444_JohnVernon_30_m.jpg",
"6457_JohnVernon_46_m.jpg",
"6456_JohnVernon_42_m.jpg",
"6462_JohnVernon_52_m.jpg",
"6464_JohnVernon_54_m.jpg",
"6451_JohnVernon_37_m.jpg",
"6449_JohnVernon_35_m.jpg",
"6470_JohnVernon_67_m.jpg",
"6445_JohnVernon_31_m.jpg",
"6461_JohnVernon_51_m.jpg",
"6450_JohnVernon_36_m.jpg",
"6460_JohnVernon_50_m.jpg",
"6455_JohnVernon_41_m.jpg",
"6466_JohnVernon_57_m.jpg",
"6465_JohnVernon_56_m.jpg",
"6453_JohnVernon_39_m.jpg",
"6467_JohnVernon_58_m.jpg"
],
"JamesStewart": [
"8647_JamesStewart_45_m.jpg",
"8657_JamesStewart_29_m.jpg",
"8644_JamesStewart_32_m.jpg",
"8639_JamesStewart_28_m.jpg",
"8645_JamesStewart_38_m.jpg",
"8642_JamesStewart_31_m.jpg",
"8643_JamesStewart_32_m.jpg",
"8652_JamesStewart_69_m.jpg",
"8655_JamesStewart_32_m.jpg",
"8638_JamesStewart_26_m.jpg",
"8658_JamesStewart_41_m.jpg",
"8646_JamesStewart_40_m.jpg",
"8641_JamesStewart_31_m.jpg",
"8650_JamesStewart_65_m.jpg",
"8656_JamesStewart_32_m.jpg",
"8651_JamesStewart_68_m.jpg",
"8654_JamesStewart_34_m.jpg",
"8637_JamesStewart_86_m.jpg",
"8640_JamesStewart_30_m.jpg",
"8649_JamesStewart_52_m.jpg",
"8653_JamesStewart_41_m.jpg",
"8648_JamesStewart_51_m.jpg"
],
"JoanLeslie": [
"10177_JoanLeslie_35_f.jpg",
"10181_JoanLeslie_50_f.jpg",
"10182_JoanLeslie_59_f.jpg",
"10167_JoanLeslie_21_f.jpg",
"10184_JoanLeslie_77_f.jpg",
"10175_JoanLeslie_32_f.jpg",
"10170_JoanLeslie_25_f.jpg",
"10166_JoanLeslie_19_f.jpg",
"10188_JoanLeslie_83_f.jpg",
"10168_JoanLeslie_22_f.jpg",
"10174_JoanLeslie_30_f.jpg",
"10173_JoanLeslie_29_f.jpg",
"10165_JoanLeslie_17_f.jpg",
"10190_JoanLeslie_87_f.jpg",
"10171_JoanLeslie_26_f.jpg",
"10183_JoanLeslie_74_f.jpg",
"10163_JoanLeslie_13_f.jpg",
"10189_JoanLeslie_84_f.jpg",
"10172_JoanLeslie_28_f.jpg",
"10185_JoanLeslie_78_f.jpg",
"10187_JoanLeslie_81_f.jpg",
"10169_JoanLeslie_23_f.jpg",
"10164_JoanLeslie_16_f.jpg",
"10179_JoanLeslie_38_f.jpg",
"10180_JoanLeslie_45_f.jpg",
"10178_JoanLeslie_36_f.jpg",
"10176_JoanLeslie_33_f.jpg",
"10186_JoanLeslie_80_f.jpg"
],
"MelindaDillion": [
"12321_MelindaDillion_57_f.jpg",
"12310_MelindaDillion_41_f.jpg",
"12307_MelindaDillion_38_f.jpg",
"12304_MelindaDillion_30_f.jpg",
"12323_MelindaDillion_63_f.jpg",
"12314_MelindaDillion_45_f.jpg",
"12324_MelindaDillion_64_f.jpg",
"12327_MelindaDillion_70_f.jpg",
"12312_MelindaDillion_43_f.jpg",
"12306_MelindaDillion_37_f.jpg",
"12316_MelindaDillion_47_f.jpg",
"12319_MelindaDillion_54_f.jpg",
"12305_MelindaDillion_34_f.jpg",
"12325_MelindaDillion_66_f.jpg",
"12309_MelindaDillion_40_f.jpg",
"12313_MelindaDillion_44_f.jpg",
"12311_MelindaDillion_42_f.jpg",
"12326_MelindaDillion_68_f.jpg",
"12303_MelindaDillion_29_f.jpg",
"12320_MelindaDillion_55_f.jpg",
"12317_MelindaDillion_48_f.jpg",
"12315_MelindaDillion_46_f.jpg",
"12322_MelindaDillion_59_f.jpg",
"12308_MelindaDillion_39_f.jpg",
"12328_MelindaDillion_73_f.jpg",
"12318_MelindaDillion_50_f.jpg"
],
"StephenHawking": [
"1020_StephenHawking_65_m.jpg",
"1004_StephenHawking_43_m.jpg",
"1017_StephenHawking_65_m.jpg",
"1014_StephenHawking_67_m.jpg",
"1006_StephenHawking_36_m.jpg",
"1000_StephenHawking_1_m.jpg",
"1018_StephenHawking_66_m.jpg",
"1005_StephenHawking_23_m.jpg",
"1007_StephenHawking_43_m.jpg",
"1012_StephenHawking_67_m.jpg",
"1024_StephenHawking_54_m.jpg",
"1002_StephenHawking_15_m.jpg",
"1019_StephenHawking_53_m.jpg",
"1022_StephenHawking_48_m.jpg",
"1003_StephenHawking_21_m.jpg",
"1010_StephenHawking_62_m.jpg",
"1009_StephenHawking_46_m.jpg",
"1008_StephenHawking_43_m.jpg",
"1016_StephenHawking_53_m.jpg",
"1001_StephenHawking_3_m.jpg",
"1011_StephenHawking_64_m.jpg",
"1015_StephenHawking_40_m.jpg",
"1021_StephenHawking_64_m.jpg",
"1013_StephenHawking_67_m.jpg",
"1023_StephenHawking_45_m.jpg"
]
}
# --------------------------
# Positives
positives = []
for key, values in idendities.items():
# print(key)
for i in range(0, len(values) - 1):
for j in range(i + 1, len(values)):
# print(values[i], " and ", values[j])
positive = []
positive.append(values[i])
positive.append(values[j])
positives.append(positive)
positives = pd.DataFrame(positives, columns=["file_x", "file_y"])
positives["decision"] = "Yes"
print(positives.shape)
# --------------------------
# Negatives
samples_list = list(idendities.values())
negatives = []
for i in range(0, len(idendities) - 1):
for j in range(i + 1, len(idendities)):
# print(samples_list[i], " vs ",samples_list[j])
cross_product = itertools.product(samples_list[i], samples_list[j])
cross_product = list(cross_product)
# print(cross_product)
for cross_sample in cross_product:
# print(cross_sample[0], " vs ", cross_sample[1])
negative = []
negative.append(cross_sample[0])
negative.append(cross_sample[1])
negatives.append(negative)
negatives = pd.DataFrame(negatives, columns=["file_x", "file_y"])
negatives["decision"] = "No"
negatives = negatives.sample(positives.shape[0])
print(negatives.shape)
# --------------------------
# Merge positive and negative ones
df = pd.concat([positives, negatives]).reset_index(drop=True)
print(df.decision.value_counts())
df.file_x = "deepface/tests/dataset/" + df.file_x
df.file_y = "deepface/tests/dataset/" + df.file_y
# --------------------------
# DeepFace
from deepface import DeepFace
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
pretrained_models = {}
pretrained_models["VGG-Face"] = VGGFace.loadModel()
print("VGG-Face loaded")
pretrained_models["Facenet"] = Facenet.loadModel()
print("Facenet loaded")
pretrained_models["OpenFace"] = OpenFace.loadModel()
print("OpenFace loaded")
pretrained_models["DeepFace"] = FbDeepFace.loadModel()
print("FbDeepFace loaded")
instances = df[["file_x", "file_y"]].values.tolist()
models = ['VGG-Face']
metrics = ['cosine']
if True:
for model in models:
for metric in metrics:
resp_obj = DeepFace.verify(instances
, model_name=model
, model=pretrained_models[model]
, distance_metric=metric)
distances = []
for i in range(0, len(instances)):
distance = round(resp_obj["pair_%s" % (i + 1)]["distance"], 4)
distances.append(distance)
df['%s_%s' % (model, metric)] = distances
df.to_csv("face-recognition-pivot.csv", index=False)
else:
df = pd.read_csv("face-recognition-pivot.csv")
df_raw = df.copy()
# --------------------------
# Distribution
fig = plt.figure(figsize=(15, 15))
figure_idx = 1
for model in models:
for metric in metrics:
feature = '%s_%s' % (model, metric)
ax1 = fig.add_subplot(4, 2, figure_idx)
df[df.decision == "Yes"][feature].plot(kind='kde', title=feature, label='Yes', legend=True)
df[df.decision == "No"][feature].plot(kind='kde', title=feature, label='No', legend=True)
figure_idx = figure_idx + 1
# plt.show()
# --------------------------
# Pre-processing for modelling
columns = []
for model in models:
for metric in metrics:
feature = '%s_%s' % (model, metric)
columns.append(feature)
columns.append("decision")
df = df[columns]
df.loc[df[df.decision == 'Yes'].index, 'decision'] = 1
df.loc[df[df.decision == 'No'].index, 'decision'] = 0
print(df.head())
# --------------------------
# Train test split
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df, test_size=0.30, random_state=17)
target_name = "decision"
y_train = df_train[target_name].values
x_train = df_train.drop(columns=[target_name]).values
y_test = df_test[target_name].values
x_test = df_test.drop(columns=[target_name]).values
# --------------------------
# LightGBM
import lightgbm as lgb
features = df.drop(columns=[target_name]).columns.tolist()
lgb_train = lgb.Dataset(x_train, y_train, feature_name=features)
lgb_test = lgb.Dataset(x_test, y_test, feature_name=features)
params = {
'task': 'train'
, 'boosting_type': 'gbdt'
, 'objective': 'multiclass'
, 'num_class': 2
, 'metric': 'multi_logloss'
}
gbm = lgb.train(params, lgb_train, num_boost_round=250, early_stopping_rounds=15, valid_sets=lgb_test)
gbm.save_model("face-recognition-ensemble-model.txt")
# --------------------------
# Evaluation
predictions = gbm.predict(x_test)
predictions_classes = []
for i in predictions:
prediction_class = np.argmax(i)
predictions_classes.append(prediction_class)
cm = confusion_matrix(y_test, predictions_classes)
print(cm)
tn, fp, fn, tp = cm.ravel()
recall = tp / (tp + fn)
precision = tp / (tp + fp)
accuracy = (tp + tn) / (tn + fp + fn + tp)
f1 = 2 * (precision * recall) / (precision + recall)
print("Precision: ", 100 * precision, "%")
print("Recall: ", 100 * recall, "%")
print("F1 score ", 100 * f1, "%")
print("Accuracy: ", 100 * accuracy, "%")
# --------------------------
# Interpretability
ax = lgb.plot_importance(gbm, max_num_features=20)
# plt.show()
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin'
plt.rcParams["figure.figsize"] = [20, 20]
for i in range(0, gbm.num_trees()):
ax = lgb.plot_tree(gbm, tree_index=i)
# plt.show()
if i == 2:
break
# --------------------------
# ROC Curve
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve
y_pred_proba = predictions[::, 1]
fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
auc = roc_auc_score(y_test, y_pred_proba)
plt.figure(figsize=(7, 3))
lw = 2
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
fig.savefig('/home/khawar/deepface/tests/VGG-FACE_Cosine_ROC.png', dpi=fig.dpi)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('VGG Face')
plt.plot(fpr, tpr, label="ROC with Cosine auc=" + str(auc))
fig.savefig('/home/khawar/deepface/tests/VGG-FACE_Cosine_ROC_T_F.png', dpi=fig.dpi)
plt.legend(loc=4)
fig.savefig('/home/khawar/deepface/tests/VGG-FACE_Cosine.png', dpi=fig.dpi)
plt.show()
# --------------------------

Scipy minimization failing with inequality constraints or bounds

I am trying to use scipy.optimize to solve a minimizaiton problem but getting failures on using an inequality constraint or a bound. Looking for any suggestions regarding proper usage of constraints vs bounds, and if any other algorithm would be suitable in this case.
The problem is:
Here is a reproducible code:
import pandas as pd
from scipy.optimize import minimize as scimin
def obj(wc, M, wb, S):
return (wc.dot(M.T) - wb).dot(S).dot(wc.dot(M.T) - wb)
n=278
k= 16
c_labels = ['c'+ str(i) for i in range(k)]
r_labels_1 = ['r' + str(i) +s for i in range(k) for s in ['a', 'b']]
r_labels_2 = ['r' + str(i) for i in range(k, n-k)]
r_labels = r_labels_1 + r_labels_2
wb = pd.Series(index=r_labels, data=np.random.triangular(0, 1.0/n, 0.3, n))
wb = wb/wb.sum()
cc = pd.Series(index=c_labels, data=4 + 2*np.random.random(k))
cb = pd.Series(index=r_labels, data=3 + 10*np.random.random(n))
s_pre = np.random.rand(n, n)
S = pd.DataFrame(index=r_labels, columns= r_labels, data=s_pre.dot(s_pre.T))
M = pd.DataFrame(data=np.eye(k), index= ['r'+ str(i) +'a' for i in range(k)], columns = c_labels)
for i in range(k):
M.loc['r' + str(i)+ 'b'] = M.loc['r' + str(i) + 'a']
M = M.loc[r_labels_1].applymap(lambda x: x* np.random.rand())
M = M / M.sum()
for i in r_labels_2:
M.loc[i] = 0
one_k = pd.DataFrame(index=M.columns, data=np.ones(len(M.columns)))
con1 = {'type': 'eq', 'fun': lambda x: x.sum() - 1}
con2 = {'type': 'eq', 'fun': lambda x: cc.dot(x) - cb.dot(wb)}
# try 1 with inequality constraint
con3 = {'type': 'ineq', 'fun': lambda x: min(x)}
consts = [con1, con2, con3]
res = scimin(obj, x0=one_k, args=(M, wb, S), constraints=consts, method='SLSQP')
wc = res['x']
oj = obj(wc, M, wb, S)
# try 2 with bounds instead of inequality constraint
bounds = [(0, 1000)] *len(M.columns)
consts = [con1, con2]
res1 = scimin(obj, x0=one_k, args=(M, wb, S), constraints=consts, bounds= bounds, method='SLSQP')
wc1 = res1['x']
oj1 = obj(wc1, M, wb, S)

AttributeError: 'numpy.float32' object has no attribute 'to_cpu'

Good day,
I'm developing a deep learning model for wireless signal detection. Below is the snippet of the function that computes the model accuracy and bit error rate (BER):
from chainer.datasets import TupleDataset
import numpy as np
from chainer import cuda
from chainer import function
def get_idp_acc(model, dataset_tuple, comp_ratio, profile = None, batchsize = 128, gpu = -1):
chainer.config.train = True
xp = np if gpu < 0 else cuda.cupy
x, indices, x_zf, HtH, Hty = dataset_tuple._datasets[0], dataset_tuple._datasets[1], dataset_tuple._datasets[2], dataset_tuple._datasets[3], dataset_tuple._datasets[4]
accs = 0
BERs = 0
model.train = False
for j in range(0, len(x), batchsize):
x_batch = xp.array(x[j:j + batchsize])
indices_batch = xp.array(indices[j:j + batchsize])
x_zf_batch = xp.array(x_zf[j:j + batchsize])
HtH_batch = xp.array(HtH[j:j + batchsize])
Hty_batch = xp.array(Hty[j:j + batchsize])
if profile == None:
acc_data = model(x_batch, indices_batch, x_zf_batch, HtH_batch, Hty_batch, comp_ratio = comp_ratio,
ret_param = 'acc')
else:
acc_data = model(x_batch, indices_batch, x_zf_batch, HtH_batch, Hty_batch, comp_ratio = comp_ratio,
ret_param = 'acc', profile = profile)
acc_data.to_cpu()
acc = acc_data.data
BER = 1.0 - acc
accs += acc * len(x_batch)
BERs += BER * len(x_batch)
return (accs / len(x)) * 100.
When the code is run, I get the following error below despite having imported all the required chainer modules. I really need your help on this issue as I'm stuck for nearly two months without making any headways in my project.
Traceback (most recent call last):
File "/Users/mac/Documents/idp_detnet/examples/run_mlp.py", line 14, in <module>
mlp.run(args)
File "/Users/mac/Documents/idp_detnet/examples/mlp.py", line 39, in run
acc_dict[name], BER_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 107, in sweep_idp
batchsize=args.batchsize, profile=profile))
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 83, in get_idp_acc
acc_data.to_cpu()
AttributeError: 'numpy.float32' object has no attribute 'to_cpu'
Below is the additional information providing codes for model definition:
K = 10
num_layers = 3*K
def lin_soft_sign(x, t):
'''Linear soft sign activation function from the original paper Eq. (11)'''
y = -1 + F.relu(x + t)/ F.absolute(t) - F.relu(- t)/ F.absolute(t)
return y
def accuracy(x, y):
'''Computes the fraction of elements for which x and y are equal'''
return np.mean(np.equal(x, y)).astype(np.float32)
class MLP(chainer.Chain):
def __init__(self, K, coeff_generator, profiles = None, z_dims = 8*K, v_dims = 2*K):
super(MLP, self).__init__()
if profiles == None:
profiles = [(0, 10)]
self.coeff_generator = coeff_generator
self.z_dims = z_dims
self.v_dims = v_dims
self.K = K
self.profiles = profiles
self.profile = 0
with self.init_scope():
self.p0_l1 = IncompleteLinear(None, self.z_dims)
self.p1_l1 = IncompleteLinear(None, self.z_dims)
self.p2_l1 = IncompleteLinear(None, self.z_dims)
self.p0_lv = IncompleteLinear(None, self.v_dims)
self.p1_lv = IncompleteLinear(None, self.v_dims)
self.p2_lv = IncompleteLinear(None, self.v_dims)
self.p0_l3 = IncompleteLinear(None, self.K)
self.p1_l3 = IncompleteLinear(None, self.K)
self.p2_l3 = IncompleteLinear(None, self.K)
def __call__(self, x, indices, x_zf, HtH, Hty, ret_param = 'loss', profile = None, comp_ratio = None):
if profile == None:
profile = self.profile
# Form Zero-forcing detection
err_rel = F.sum((x - x_zf)**2, axis = 1)
params = layer_profile(self.coeff_generator,
*self.profiles[profile], self.z_dims,
self.v_dims, comp_ratio)
def detnet_layer(x_d, x_logit, v, z_dims, v_dims):
HtH_x = np.matmul(HtH, np.expand_dims(x_d.data, axis = 2).astype(np.float32))
HtH_x = F.squeeze(HtH_x, axis = -1)
#x_concat = np.concatenate([Hty, x, HtH_x, v], axis=1)
x_concat = F.concat([Hty, x_d, HtH_x, v], axis = 1)
if profile == 0:
z = F.relu(self.p0_l1(x_concat))
v += self.p0_lv(z, *params)
x_logit += self.p0_l3(z, *params)
x = lin_soft_sign(x_logit, F.broadcast_to(np.ones(1).astype(np.float32), x_logit.shape))
elif profile == 1:
z = F.relu(self.p1_l1(x_concat))
v += self.p1_lv(z, *params)
x_logit += self.p1_l3(z, *params)
x = lin_soft_sign(x_logit, F.broadcast_to(np.ones(1).astype(np.float32), x_logit.shape))
elif profile == 2:
z = F.relu(self.p2_l1(x_concat))
v += self.p2_lv(z, *params)
x_logit += self.p2_l3(z, *params)
x = lin_soft_sign(x_logit, F.broadcast_to(np.ones(1).astype(np.float32), x_logit.shape))
return x, x_logit, v
x_k = np.zeros((Hty.shape[0], self.K), dtype = np.float32)
x_k_logit = np.zeros((Hty.shape[0], self.K), dtype = np.float32)
v = np.zeros((Hty.shape[0], self.v_dims), dtype = np.float32)
loss = 0
mod = sg.Modulator('BPSK', K)
for k in range(1, num_layers + 1):
x_k, x_k_logit, v = detnet_layer(x_k, x_k_logit, v, self.z_dims, self.v_dims)
err = F.sum((x - x_k)**2, 1)
loss += (np.log(k)).astype(np.float32) * F.mean(err/err_rel)
report = {'loss': loss, 'acc': accuracy(mod.demodulate(x_k.data), indices)}
reporter.report(report, self)
return report[ret_param]
def report_params(self):
return ['validation/main/acc']
def param_names(self):
if len(self.profiles) > 1:
return 'IDPDETNET_{}_{}_{}_p{}'.format(self.z_dims, self.v_dims, self.coeff_generator.__name__, len(self.profiles))
return 'IDPDETNET_{}_{}_{}'.format(self.z_dims, self.v_dims, self.coeff_generator.__name__)
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import visualize as vz
import idp.coeffs_generator as cg
from net import MLP
import util
K = 10
N = 4
v_dims = 2*K
z_dims = 8*K
SNR_dB_tmin = -4
SNR_dB_tmax = 24
SNR_dB_test = np.linspace(SNR_dB_tmin, SNR_dB_tmax, 8)
num_snr_test = len(SNR_dB_test)
def run(args):
train, test = util.get_dataset(args.modeltype)
names = ['all-one (standard)', 'linear']
colors = [vz.colors.all_one_lg, vz.colors.linear_lg]
models = [
MLP.MLP(K, cg.uniform, z_dims = 8*K, v_dims = 2*K),
MLP.MLP(K, cg.linear, z_dims = 8*K, v_dims = 2*K)
]
comp_ratios = np.linspace(0.1, 1.0, 20)
acc_dict = {}
BER_dict = {}
ratios_dict = {}
for i in range(num_snr_test):
for name, model in zip(names, models):
util.load_or_train_model(model, train, test, args)
acc_dict[name], BER_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
ratios_dict[name] = [100. * cr for cr in comp_ratios]
filename = "IDPDETNET1_{}".format(args.modeltype)
vz.plot(ratios_dict, acc_dict, names, filename, colors = colors,
folder = args.figure_path, ext=args.ext,
title = 'IDPDETNET (BPSK)',
xlabel = 'IDP (%)',
ylabel = 'Test Accuracy (%)', ylim = (0, 100))
filename = "IDPDETNET2_{}".format(args.modeltype)
vz.plot(ratios_dict, BER_dict, names, filename, colors = colors,
folder=args.figure_path, ext=args.ext,
title='IDPDETNET (BPSK)',
xlabel='IDP (%)',
ylabel='BER (bits/sec)')
filename = "IDPDETNET3_{}".format(args.modeltype)
vz.plot(num_snr_test, BER_dict, names, filename, colors = colors,
folder = args.figure_path, ext = args.ext,
title = 'IDPDETNET (BPSK)',
xlabel = 'SNR (dB)',
ylabel = ' BER (bits/sec)')
if __name__ == '__main__':
args = util.default_parser('IDPDETNET Example').parse_args()
run(args)
Hi Seiya Tokui. Thank you for your kind input. Here is the model definition based on the above code:
model = MLP.MLP(K, cg.uniform, z_dims = 8*K, v_dims = 2*K)
OR
model = MLP.MLP(K, cg.linear, z_dims = 8*K, v_dims = 2*K)
Hi #BloodyD. Thank for your brilliant contributions. The model started training, but then later returned the following error:
1 nan nan 0.50108 5.85448
Traceback (most recent call last):
File "run_mlp.py", line 14, in <module>
mlp.run(args)
File "/Users/mac/Documents/idp_detnet/examples/mlp.py", line 38, in run
util.load_or_train_model(model, train, test, args)
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 204, in load_or_train_model
train_model(model, train, test, args)
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 184, in train_model
return eval(fp.read().replace('\n', ''))
File "<string>", line 1, in <module>
NameError: name 'NaN' is not defined
The error occurs in the last line of this snippet code below:
name = model.param_names()
save_model(model, os.path.join(args.model_path, name))
chainer.config.train = False
with open(os.path.join(args.out, 'log'), 'r') as fp:
return eval(fp.read().replace('\n', ''))

how to make copy paste to follow the same pattern

I am creating a design with my QGraphicsitems . I have selected all the items in the scene and pasted it.But it is not following the same pattern.can we make the items paste in the same pattern like the one which we have created initially? –
I have tried with the following code
from PyQt5.QtCore import (QByteArray,QDataStream, QIODevice,pyqtSlot, QMimeData, QPointF, QPoint, Qt, QRect,QTimer,QLineF, QEvent,QRectF)
from PyQt5.QtGui import QColor,QDrag, QPainter, QPixmap,QFont,QFontMetrics,QBrush, QLinearGradient, QIcon, QPen, QPainterPath, QTransform,QCursor,QMouseEvent,QClipboard
from PyQt5.QtWidgets import QApplication,QGraphicsTextItem,QGraphicsItemGroup, QSizePolicy,QShortcut, QScrollArea, QPushButton,QLineEdit, QMainWindow,QInputDialog, QGraphicsPathItem,QDialog, QVBoxLayout,QGraphicsItem,QStatusBar,QTextEdit, QAction,QMenu, qApp,QSplitter, QButtonGroup, QToolButton, QFrame, QHBoxLayout, QGraphicsView, QGraphicsItem, QGraphicsPixmapItem, QLabel, QGraphicsScene, QWidget
import importlib
import SketchBook as sketchBook
import Blocks as blocks
import random
custom_mimeType = "application/x-qgraphicsitems"
pos1 = QPointF()
def item_to_ds(it, ds):
if not isinstance(it, QGraphicsItem):
return
ds.writeQString(it.__class__.__module__)
ds.writeQString(it.__class__.__name__)
ds.writeInt(it.flags())
ds << it.pos()
posdiff = it.pos().x() -pos1().x()
pos1 = QPointF(it.pos().x(),it.pos().y())
# ds.writeInt(it.UserType)
ds.writeFloat(it.opacity())
ds.writeFloat(it.rotation())
ds.writeFloat(it.scale())
# ds.writeString(it.type())
# ds.writeQString(it.type1())
# if isinstance(it, QGraphicsItem):
# ds << it.brush() << it.pen()
if isinstance(it, QGraphicsPixmapItem):
ds << it.pixmap()
if isinstance(it, QGraphicsPathItem):
ds << it.path()
def ds_to_item(ds):
module_name = ds.readQString()
class_name = ds.readQString()
if class_name == 'QGraphicsPixmapItem':
mod = importlib.import_module(module_name)
it = getattr(mod, class_name)()
# flags = QGraphicsItem.GraphicsItemFlag(ds.readInt())
# pos = QPointF()
# ds >> pos
# it.setFlags(flags)
# it.setPos(pos)
# it.setOpacity(ds.readFloat())
# it.setRotation(ds.readFloat())
# it.setScale(ds.readFloat())
else:
mod = importlib.import_module(module_name)
it = getattr(mod, class_name)(blocks.selectedObjType)
flags = QGraphicsItem.GraphicsItemFlag(ds.readInt())
pos = QPointF()
ds >> pos
it.setFlags(flags)
it.setPos(pos)
it.setOpacity(ds.readFloat())
it.setRotation(ds.readFloat())
it.setScale(ds.readFloat())
# if isinstance(it, QGraphicsItem):
# pen, brush = QPen(), QBrush()
# ds >> brush
# ds >> pen
# it.setPen(pen)
# it.setBrush(brush)
if isinstance(it, QGraphicsPathItem):
path = QPainterPath()
ds >> path
it.setPath(path)
if isinstance(it, QGraphicsPixmapItem):
pixmap = QPixmap()
# pen, brush = QPen(), QBrush()
# ds >> brush
# ds >> pen
ds >> pixmap
it.setPixmap(pixmap)
return it
class GraphicsSceneClass(QGraphicsScene):
global selectedObjType
def __init__(self, parent=None):
super(GraphicsSceneClass, self).__init__(parent)
self.gridOn = 0
self.setSceneRect(0, 0, 1920, 1080)
self.setItemIndexMethod(QGraphicsScene.NoIndex)
self.setBackgroundBrush(QBrush(Qt.black))
def mousePressEvent(self, event):
sampleTransform = QTransform()
objectAtMouse = self.itemAt(event.scenePos(), sampleTransform)
if objectAtMouse and event.button()== Qt.LeftButton:
objectAtMouse.setSelected(True)
elif objectAtMouse==None and event.button()==Qt.RightButton:
# pass
self.grid = self.TargPosForLine(event.scenePos(), "ForLine")
self.grid = self.TargPosForLine(event.scenePos(), "ForLine")
print(self.grid)
# else:
# self.DeselectItems()
# objectAtMouse.QShortcut
def TargPosForLine(self, position, mode):
clicked_column = int((position.y() // 16)) * 16
clicked_row = int((position.x() // 16)) * 16
if clicked_column < 0:
clicked_column = 0
if clicked_row < 0:
clicked_row = 0
if(mode == "ForRect"):
return QRect(clicked_row, clicked_column,16,16)
elif(mode == "ForLine"):
return QPointF(clicked_row,clicked_column)
def DeselectItems(self):
selectedObjects = self.selectedItems()
for object in selectedObjects:
object.setSelected(False)
def mouseReleaseEvent(self, event):
# self.DeselectItems()
pass
class MainWindow(QMainWindow):
global selectedObjType
# global item
def __init__(self,):
super(MainWindow, self).__init__()
self.createActions()
self.createMenus()
self.createToolbars()
self.scene = GraphicsSceneClass()
MainWindow.obj = self.scene
self.view = QGraphicsView(self.scene)
# self.view.setDragMode(QGraphicsView.RubberBandDrag)
self.view.setMouseTracking(True)
self.view.setRenderHint(QPainter.HighQualityAntialiasing)
self.widg = QWidget()
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.addWidget(self.view)
self.widg.setMouseTracking(True)
self.widget = QWidget()
self.widget.setLayout(self.horizontalLayout)
self.setCentralWidget(self.widget)
self.obj=None
def createMenus(self):
menuBar = self.menuBar()
fileMenu = menuBar.addMenu('&File')
fileMenu.addAction(self.exitAction)
fileMenu = menuBar.addMenu('&Edit')
fileMenu.addAction(self.copyAction)
fileMenu.addAction(self.pasteAction)
fileMenu.addAction(self.selectAction)
def createActions(self):
self.exitAction = QAction("E&xit", self, shortcut="Ctrl+X", statusTip="Quit Scenediagram example",
triggered=self.deleteItem)
self.copyAction = QAction("C&opy", self, shortcut="Ctrl+C", triggered=self.copy)
self.pasteAction = QAction("P&aste", self, shortcut="Ctrl+V", triggered=self.paste)
self.selectAction = QAction("S&electAll", self, shortcut="Ctrl+A", triggered=self.selectAll)
def createToolbars(self):
GridButton = QToolButton()
GridButton.setCheckable(True)
GridButton.setIcon(QIcon('images/GridButton.png'))
GridButton.clicked.connect(self.GridOnOffControl)
GridButton.setToolTip("Grid Control")
self.pointerToolbar = self.addToolBar("Pointer type")
self.pointerToolbar.addWidget(GridButton)
def deleteItem(self):
for item in self.scene.selectedItems():
self.scene.removeItem(item)
def selectAll(self):
for item in self.scene.items():
item.setSelected(True)
def GridOnOffControl(self):
if self.scene.gridOn == 0:
self.scene.gridOn = 1
else:
self.scene.gridOn = 0
if self.scene.gridOn == 1:
self.scene.setBackgroundBrush(QBrush(QPixmap('images/Grid.png')))
else:
self.scene.setBackgroundBrush(QBrush(Qt.black))
def contextMenuEvent(self, event):
contextMenu = QMenu(self)
Cutaction = contextMenu.addAction("Cut")
Coaction = contextMenu.addAction("Copy")
Paaction = contextMenu.addAction("Paste")
Propaction = contextMenu.addAction("draw1")
Propaction1=contextMenu.addAction("draw2")
quitAct = contextMenu.addAction("quit")
action = contextMenu.exec_(self.mapToGlobal(event.pos()))
if action == quitAct:
self.close()
elif action == Propaction:
objectDrop = None
# painterPath = QPainterPath()
#
# painterPath.moveTo(10, 50.0)
# painterPath.lineTo(50,50)
# painterPath.lineTo(50,55)
# painterPath.lineTo(10,55)
# gradient = QLinearGradient(1, 1, 1, 5)
# gradient.setColorAt(0, QColor(Qt.gray))
# gradient.setColorAt(0.5, QColor(192, 192, 192, 255))
# gradient.setColorAt(1, QColor(Qt.darkGray))
# painterPath.closeSubpath()
#
# objectDrop = QGraphicsPathItem()
# objectDrop.setPath(painterPath)
# objectDrop.setBrush(QBrush(gradient))
objectDrop = QGraphicsPixmapItem(QPixmap("2AS_HG_RG.png"))
objectDrop.setPos(self.scene.grid)
print("sig",self.scene.grid)
# objectDrop._position = QPointF(gridPos.x() + 2, gridPos.y() + 5.9)
# objectDrop._type = "2AS_HG_RG"
objectDrop._type1 = "2AS_HG_RG"
self.scene.addItem(objectDrop)
objectDrop.setFlag(QGraphicsItem.ItemIsSelectable)
objectDrop.setFlag(QGraphicsItem.ItemIsMovable)
objectDrop._type1="2AS_HG_RG"
# self.scene.addPath(painterPath)
elif action==Propaction1:
objectDrop = None
selectedObjType = "line"
objectDrop = sketchBook.SketchBook(selectedObjType)
print("line",self.scene.grid)
objectDrop.setFlag(QGraphicsItem.ItemIsSelectable)
objectDrop.setFlag(QGraphicsItem.ItemIsMovable)
objectDrop._type1 = "line"
objectDrop.setPos(self.scene.grid.x(),self.scene.grid.y()-48+5)
self.scene.addItem(objectDrop)
elif action == Coaction:
self.copy()
elif action == Paaction:
self.paste()
#pyqtSlot()
def copy(self):
mimedata = QMimeData()
ba = QByteArray()
ds = QDataStream(ba, QIODevice.WriteOnly)
for it in self.scene.selectedItems():
self.posdiff=item_to_ds(it, ds)
mimedata.setData(custom_mimeType, ba)
clipboard = QApplication.clipboard()
clipboard.setMimeData(mimedata)
#pyqtSlot()
def paste(self):
pos2=self.scene.grid
clipboard = QApplication.clipboard()
mimedata = clipboard.mimeData()
if mimedata.hasFormat(custom_mimeType):
ba = mimedata.data(custom_mimeType)
# STR = str(ba)
# QW = ba.capacity()
ds = QDataStream(ba)
while not ds.atEnd():
# for it in ds:
it = ds_to_item(ds)
if isinstance(it, QGraphicsPixmapItem):
self.scene.addItem(it)
it.setPos(pos2)
it._position = QPointF(pos2.x() + 2, pos2.y() + 5.9)
print("sig",it._position)
it._type1 = "2AS_HG_RG"
else:
gradient = QLinearGradient(1, 1, 1, 5)
gradient.setColorAt(0, QColor(Qt.gray))
gradient.setColorAt(0.5, QColor(192, 192, 192, 255))
gradient.setColorAt(1, QColor(Qt.darkGray))
self.scene.addItem(it)
it.setBrush(QBrush(gradient))
it.setPos(pos2.x()+self.posdiff().x(),pos2.y()-48)
it._position = QPointF(pos2.x() + 2, pos2.y() + 5.9)
print(it._position)
# it.setFlags(QGraphicsItem.ItemIsSelectable)
# it._type1 = "line"
def selectedItem(self):
items = self.scene.selectedItems()
if len(items) == 1:
return items[0]
return None
if __name__=="__main__":
import sys
app=QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())
1) select all the items or the items to be pasted
2) copy it
3) paste it
if we have design pattern have item1 followed by item2 followed by item3 with respective distance. When we copy and paste it it should follow the same pattern.
QGraphicsItem.setPos() is absolute to the scene (or relative to its parent), the alternative solution is to use moveBy(x, y) (which is the same as setPos(self.pos() + deltaPos), but you have to take into account the relative position of the click according to the reference point.
I'd suggest you to not set the position until all items have been added, and then set their position according to a specific item that will be used as an "anchor" point.
#pyqtSlot()
def paste(self):
pos2=self.scene.grid
clipboard = QApplication.clipboard()
mimedata = clipboard.mimeData()
items = []
topLeft = None
if mimedata.hasFormat(custom_mimeType):
ba = mimedata.data(custom_mimeType)
ds = QDataStream(ba)
while not ds.atEnd():
it = ds_to_item(ds)
items.append(it)
if not topLeft:
topLeft = it
elif it.y() < topLeft.y() or it.x() < topLeft.x():
# find a possible topmost/leftmost item
topLeft = it
# add items, but do not set their position here
# ...
delta = self.scene.grid - topLeft.pos()
[i.moveBy(delta.x(), delta.y()) for i in items]
An alternative is to find the "anchor" in the copy procedure, and set the position of each item relative to that point in the datastream, so that you'll be able to use moveBy(pos2.x(), pos2.y()) directly after adding the items.

How can the edge colors of individual matplotlib histograms be set?

I've got a rough and ready function that can be used to compare two sets of values using histograms:
I want to set the individual edge colors of each of the histograms in the top plot (much as how I set the individual sets of values used for each histogram). How could this be done?
import os
import datavision
import matplotlib.pyplot
import numpy
import shijian
def main():
a = numpy.random.normal(2, 2, size = 120)
b = numpy.random.normal(2, 2, size = 120)
save_histogram_comparison_matplotlib(
values_1 = a,
values_2 = b,
label_1 = "a",
label_2 = "b",
normalize = True,
label_ratio_x = "measurement",
label_y = "",
title = "comparison of a and b",
filename = "histogram_comparison_1.png"
)
def save_histogram_comparison_matplotlib(
values_1 = None,
values_2 = None,
filename = None,
directory = ".",
number_of_bins = None,
normalize = True,
label_x = "",
label_y = None,
label_ratio_x = None,
label_ratio_y = "ratio",
title = "comparison",
label_1 = "1",
label_2 = "2",
overwrite = True,
LaTeX = False,
#aspect = None,
font_size = 20,
color_1 = "#3861AA",
color_2 = "#00FF00",
color_3 = "#7FDADC",
color_edge_1 = "#3861AA", # |<---------- insert magic for these
color_edge_2 = "#00FF00", # |
alpha = 0.5,
width_line = 1
):
matplotlib.pyplot.ioff()
if LaTeX is True:
matplotlib.pyplot.rc("text", usetex = True)
matplotlib.pyplot.rc("font", family = "serif")
if number_of_bins is None:
number_of_bins_1 = datavision.propose_number_of_bins(values_1)
number_of_bins_2 = datavision.propose_number_of_bins(values_2)
number_of_bins = int((number_of_bins_1 + number_of_bins_2) / 2)
if filename is None:
if title is None:
filename = "histogram_comparison.png"
else:
filename = shijian.propose_filename(
filename = title + ".png",
overwrite = overwrite
)
else:
filename = shijian.propose_filename(
filename = filename,
overwrite = overwrite
)
values = []
values.append(values_1)
values.append(values_2)
bar_width = 0.8
figure, (axis_1, axis_2) = matplotlib.pyplot.subplots(
nrows = 2,
gridspec_kw = {"height_ratios": (2, 1)}
)
ns, bins, patches = axis_1.hist(
values,
color = [
color_1,
color_2
],
normed = normalize,
histtype = "stepfilled",
bins = number_of_bins,
alpha = alpha,
label = [label_1, label_2],
rwidth = bar_width,
linewidth = width_line,
#edgecolor = [color_edge_1, color_edge_2] <---------- magic here? dunno
)
axis_1.legend(
loc = "best"
)
bars = axis_2.bar(
bins[:-1],
ns[0] / ns[1],
alpha = 1,
linewidth = 0, #width_line
width = bins[1] - bins[0]
)
for bar in bars:
bar.set_color(color_3)
axis_1.set_xlabel(label_x, fontsize = font_size)
axis_1.set_ylabel(label_y, fontsize = font_size)
axis_2.set_xlabel(label_ratio_x, fontsize = font_size)
axis_2.set_ylabel(label_ratio_y, fontsize = font_size)
#axis_1.xticks(fontsize = font_size)
#axis_1.yticks(fontsize = font_size)
#axis_2.xticks(fontsize = font_size)
#axis_2.yticks(fontsize = font_size)
matplotlib.pyplot.suptitle(title, fontsize = font_size)
if not os.path.exists(directory):
os.makedirs(directory)
#if aspect is None:
# matplotlib.pyplot.axes().set_aspect(
# 1 / matplotlib.pyplot.axes().get_data_ratio()
# )
#else:
# matplotlib.pyplot.axes().set_aspect(aspect)
figure.tight_layout()
matplotlib.pyplot.subplots_adjust(top = 0.9)
matplotlib.pyplot.savefig(
directory + "/" + filename,
dpi = 700
)
matplotlib.pyplot.close()
if __name__ == "__main__":
main()
You may simply plot two different histograms but share the bins.
import numpy as np; np.random.seed(3)
import matplotlib.pyplot as plt
a = np.random.normal(size=(89,2))
kws = dict(histtype= "stepfilled",alpha= 0.5, linewidth = 2)
hist, edges,_ = plt.hist(a[:,0], bins = 6,color="lightseagreen", label = "A", edgecolor="k", **kws)
plt.hist(a[:,1], bins = edges,color="gold", label = "B", edgecolor="crimson", **kws)
plt.show()
Use the lists of Patches objects returned by the hist() function.
In your case, you have two datasets, so your variable patches will be a list containing two lists, each with the Patches objects used to draw the bars on your plot.
You can easily set the properties on all of these objects using the setp() function. For example:
a = np.random.normal(size=(100,))
b = np.random.normal(size=(100,))
c,d,e = plt.hist([a,b], color=['r','g'])
plt.setp(e[0], edgecolor='k', lw=2)
plt.setp(e[1], edgecolor='b', lw=3)