Importing a matrix from Python to Pyomo - numpy

I have a matrix defined in Python: (name of the document matrix.py)
N = 4
l = N
k = N
D = np.zeros((l,k))
for i in range(0,l):
for j in range(0,k):
if (i==j):
D[i,j] = 2
else:
D[i,j] = 0
D[0,0] = (2*N**2+1)/6
D[-1,-1] = -(2*N**2+1)/6
print(D)
I want to use it in Pyomo, and i did:
import matrix
.
.
.
m.f_x1 = Var(m.N)
def f_x1_definition(model,i):
for j in m.N:
return m.f_x1[j] ==sum(D[i,j]*m.x1[j] for j in range(value(m.n)))
m.f_x1_const = Constraint(m.N, rule = f_x1_definition)
But I get the next error:
NameError: global name 'D' is not defined
How can I do it?

When you import a module in python using the syntax
import foo
all the things defined in the foo module will be available within the foo namespace. That is, if foo.py contains:
import numpy as np
a = 5
D = np.zeros((1,5))
when you import the module with import foo, then you can access a, and D with:
import foo
print(foo.a)
print(foo.D)
If you want to pull the symbols from foo directly into your local namespace, you would instead use the from ... import ... syntax:
from foo import a,D
print(a)
print(D)

Related

Getting "cannot unpack non-iterable NoneType object" error when trying to run GA optimization

I'm running through the ASE tutorial and am trying to run a GA optimization (https://wiki.fysik.dtu.dk/ase/tutorials/ga/ga_optimize.html). However, when I run the code, I get an unpacking error. I'm not too sure about how to approach the issue.
Code is as follows:
from random import random
from ase.io import write
from ase.optimize import BFGS
from ase.calculators.emt import EMT
from ase.ga.data import DataConnection
from ase.ga.population import Population
from ase.ga.standard_comparators import InteratomicDistanceComparator
from ase.ga.cutandsplicepairing import CutAndSplicePairing
from ase.ga.utilities import closest_distances_generator
from ase.ga.utilities import get_all_atom_types
from ase.ga.offspring_creator import OperationSelector
from ase.ga.standardmutations import MirrorMutation
from ase.ga.standardmutations import RattleMutation
from ase.ga.standardmutations import PermutationMutation
# Change the following three parameters to suit your needs
population_size = 20
mutation_probability = 0.3
n_to_test = 20
# Initialize the different components of the GA
da = DataConnection('gadb.db')
atom_numbers_to_optimize = da.get_atom_numbers_to_optimize()
n_to_optimize = len(atom_numbers_to_optimize)
slab = da.get_slab()
all_atom_types = get_all_atom_types(slab, atom_numbers_to_optimize)
blmin = closest_distances_generator(all_atom_types,
ratio_of_covalent_radii=0.7)
comp = InteratomicDistanceComparator(n_top=n_to_optimize,
pair_cor_cum_diff=0.015,
pair_cor_max=0.7,
dE=0.02,
mic=False)
pairing = CutAndSplicePairing(slab, n_to_optimize, blmin)
mutations = OperationSelector([1., 1., 1.],
[MirrorMutation(blmin, n_to_optimize),
RattleMutation(blmin, n_to_optimize),
PermutationMutation(n_to_optimize)])
# Relax all unrelaxed structures (e.g. the starting population)
while da.get_number_of_unrelaxed_candidates() > 0:
a = da.get_an_unrelaxed_candidate()
a.calc = EMT()
print('Relaxing starting candidate {0}'.format(a.info['confid']))
dyn = BFGS(a, trajectory=None, logfile=None)
dyn.run(fmax=0.05, steps=100)
a.info['key_value_pairs']['raw_score'] = -a.get_potential_energy()
da.add_relaxed_step(a)
# create the population
population = Population(data_connection=da,
population_size=population_size,
comparator=comp)
# test n_to_test new candidates
for i in range(n_to_test):
print('Now starting configuration number {0}'.format(i))
a1, a2 = population.get_two_candidates()
a3, desc = pairing.get_new_individual([a1, a2])
if a3 is None:
continue
da.add_unrelaxed_candidate(a3, description=desc)
# Check if we want to do a mutation
if random() < mutation_probability:
a3_mut, desc = mutations.get_new_individual([a3])
if a3_mut is not None:
da.add_unrelaxed_step(a3_mut, desc)
a3 = a3_mut
# Relax the new candidate
a3.calc = EMT()
dyn = BFGS(a3, trajectory=None, logfile=None)
dyn.run(fmax=0.05, steps=100)
a3.info['key_value_pairs']['raw_score'] = -a3.get_potential_energy()
da.add_relaxed_step(a3)
population.update()
write('all_candidates.traj', da.get_all_relaxed_candidates())
Really confused on what I should try to fix the issue. Can't return the a1 and a2 values either.

Cannot see the output on Google colab when I use pygraphviz

I have the following code using pygraphviz on Google colab. How do I see the output graph? This is similar to [this question][1], except for the fact that the solution only works for graphviz, and not pygraphviz.
import pygraphviz as pgz
def tocolor(u):
assert u >= 0
assert u <= 1
z = int((1-u)*256)
return '#%2x%2x%2x'%(z,z,z)
#return '#%2x%2x%2x'%(int(u*256),100,int((1-u)*256))
def drawG(filename,nodes,c,directed=False):
n = len(nodes)
G = pgz.AGraph(strict=True, directed=directed)
G.node_attr['shape']='circle'
#G.node_attr['style']='filled'
for x in range(n):
#G.add_node(nodes[x],fillcolor=".6 .1 .1")
G.add_node(nodes[x])
for i in range(n):
for j in range(i+1,n):
if c[i,j] < 1e-3:
continue
else:
u = c[i,j]/100
G.add_edge(nodes[i],nodes[j],color=tocolor(u),style="setlinewidth(4)")
G.draw(filename,prog='circo',args='-Gsize="200,200"')
l='Cervix,Vagina,Uterus,Iliac LN,Mandibular LN,Mesenteric LN,Axillary LN,Bronchial LN,Colonic LN,Colon,Spleen,Rectum,Inguinal,Liver,Ovary,Bone Marrow'
l = l.split(',')
y = pylab.loadtxt('Matrix.csv',dtype=float,usecols=list(range(1,17)),delimiter=',')
def main():
drawG('example-graph.pdf',l,y)
main() ```
[1]: https://stackoverflow.com/questions/59560168/graphviz-not-printing-output-graph-on-colab
This import seems to be missing.
import pylab
but other than than, what error are you getting?

can i use OR-tools for TSP with partial distance matrix (for a huge set of nodes)?

i'm trying to solve tsp with OR-tools for a problem of something like 80,000 nodes, the problem is, I need a huge distance matrix that takes to much memory ,so its infeasible and i don't get a solution.
so:
is there an option to work with partial distance matrix in or-tools?
if not is there a way to improve my code?
is there another external solver that can work for this task in python?
import math
from collections import namedtuple
import random
import time
from collections import namedtuple
from sklearn.metrics.pairwise import euclidean_distances
import numpy as np
import numba
from scipy.spatial import distance_matrix
from sklearn.metrics.pairwise import euclidean_distances
from math import sqrt
Point = namedtuple("Point", ['x', 'y'])
def solve_it(input_data):
# Modify this code to run your optimization algorithm
global POINTS
# parse the input
lines = input_data.split('\n')
nodeCount = int(lines[0])
points = []
for i in range(1, nodeCount+1):
line = lines[i]
parts = line.split()
points.append(Point(float(parts[0]), float(parts[1])))
#2.routing with or tools
def dist_matrix(nodeCount,points):
data=[]
for k in range(len(points)):
data.append([int(points[k].x),int(points[k].y)])
D=euclidean_distances(data, data)
return D
def create_data_model(D):
"""Stores the data for the problem."""
data = {}
data['distance_matrix'] = D # yapf: disable
data['num_vehicles'] = 1
data['depot'] = 0
return data
def print_solution(manager, routing, solution):
index = routing.Start(0)
plan_output = []#Route for vehicle 0:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output.append(manager.IndexToNode(index))
index = solution.Value(routing.NextVar(index))
return plan_output
def or_main(nodeCount,points):
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
"""Entry point of the program."""
# Instantiate the data problem.
global sol
D=dist_matrix(nodeCount,points)
data = create_data_model(D)
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['distance_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
k = 100
if nodeCount <= 100:
k = 30
elif 100 <= nodeCount <= 1000:
k = 300
elif nodeCount > 1000:
k = 17000
search_parameters.time_limit.seconds =k
search_parameters.log_search = True
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# #print solution on console.
if solution:
sol=print_solution(manager, routing, solution)
return sol
######################################################################
solution=or_main(nodeCount,points)
# calculate the length of the tour
obj = length(points[solution[-1]], points[solution[0]])
for index in range(0, nodeCount-1):
obj += length(points[solution[index]], points[solution[index+1]])
# prepare the solution in the specified output format
output_data = '%.2f' % obj + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, solution))
return output_data
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
#print(solve_it(input_data))
else:
print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/tsp_51_1)')

could not convert string to float in python

i try to analysis the Principle Component from cvs file but when i run the code i get this error
C:\Users\Lenovo\Desktop>python pca.py
ValueError: could not convert string to float: Annee;NET;INT;SUB;LMT;DCT;IMM;EXP;VRD
this is my cvs file
i try to remove any space and any think
this is my python script, i don't know what i miss
Note: i run this code under python2.7
from sklearn.externals import joblib
import numpy as np
import glob
import os
import time
import numpy
my_matrix = numpy.loadtxt(open("pca.csv","rb"),delimiter= ",",skiprows=0)
def pca(dataMat, r, autoset_r=False, autoset_rate=0.9):
"""
purpose: principal components analysis
"""
print("Start to do PCA...")
t1 = time.time()
meanVal = np.mean(dataMat, axis=0)
meanRemoved = dataMat - meanVal
# normData = meanRemoved / np.std(dataMat)
covMat = np.cov(meanRemoved, rowvar=0)
eigVals, eigVects = np.linalg.eig(np.mat(covMat))
eigValIndex = np.argsort(-eigVals)
if autoset_r:
r = autoset_eigNum(eigVals, autoset_rate)
print("autoset: take top {} of {} features".format(r, meanRemoved.shape[1]))
r_eigValIndex = eigValIndex[:r]
r_eigVect = eigVects[:, r_eigValIndex]
lowDDataMat = meanRemoved * r_eigVect
reconMat = (lowDDataMat * r_eigVect.T) + meanVal
t2 = time.time()
print("PCA takes %f seconds" %(t2-t1))
joblib.dump(r_eigVect, './pca_args_save/r_eigVect.eig')
joblib.dump(meanVal, './pca_args_save/meanVal.mean')
return lowDDataMat, reconMat
def autoset_eigNum(eigValues, rate=0.99):
eigValues_sorted = sorted(eigValues, reverse=True)
eigVals_total = eigValues.sum()
for i in range(1, len(eigValues_sorted)+1):
eigVals_sum = sum(eigValues_sorted[:i])
if eigVals_sum / eigVals_total >= rate:
break
return i
It seemed that NumPy has some problem parsing your index row to float.
Try setting skiprows = 1 in your np.readtxt command in order to skip the table header.

Have error by using pypandoc writing to file

I'm trying to write to file the string that i got after running pypandoc.
After writing to file im getting a bounch of UnicodeEncodeError's.
My question how can i write to file without any encoding.
To write string as is?
Thank you.
from tools.general import *
from definitions.url_Parse import *
from data import *
import spacy
import re
import pypandoc
url='https://groupprops.subwiki.org/w/index.php?title=1-automorphism-
invariant_subgroup&action=edit'
strng = url_Parse(url)
str_ = strng.string
output = pypandoc.convert_text(str_, 'plain', format = 'mediawiki')
file1=open('output.txt','w')
file1.write(output)
file1.close()
My string is:
A subgroup H of a group G is termed a 1-AUTOMORPHISM-INVARIANT subgroup
if any 1-automorphism of G sends H to itself. In other words, for every
1-automorphism φ of G, φ(H) ⊆ H.