Leetcode 126: Word Ladder 2 in Python code optimization - optimization

I have the solution for the Word Ladder 2 (Leetcode problem 126: Word Ladder 2 ) in Python 3.6, and I notice that one of the very last testcases times out for me on the platform. Funnily, the test passes when run on PyCharm or as an individual test case on the site, but it takes about 5 seconds for it to complete. My solution uses BFS with some optimizations, but can someone tell me if there is a way to make it faster. Thank you! (P.S: Apologies for the additional test cases included in the commented out section!)
import math
import queue
from typing import List
class WordLadder2(object):
#staticmethod
def is_one_hop_away(s1: str, s2: str) -> int:
"""
Uses the distance between strings to return True if string s2 is one character away from s1
:param s1: Base string
:param s2: Comparison string
:return: True if it the difference between the strings is one character
"""
matrix = [[0] * (len(s1) + 1) for i in range(len(s1) + 1)]
for r, row in enumerate(matrix):
for c, entry in enumerate(row):
if not r:
matrix[r][c] = c
elif not c:
matrix[r][c] = r
else:
if s1[r - 1] == s2[c - 1]:
matrix[r][c] = matrix[r - 1][c - 1]
else:
matrix[r][c] = 1 + min(matrix[r - 1][c - 1], matrix[r - 1][c], matrix[r][c - 1])
if matrix[-1][-1] == 1:
return True
else:
return False
def get_next_words(self, s1: str, wordList: List[str]) -> List[str]:
"""
For a given string in the list, return a set of strings that are one hop away
:param s1: String whose neighbors one hop away are needed
:param wordList: Array of words to choose from
:return: List of words that are one character away from given string s1
"""
words = []
for word in wordList:
if self.is_one_hop_away(s1, word):
words.append(word)
return words
def find_ladders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
"""
Main method to determine shortest paths between a beginning word and an ending word, in a given list of words
:param beginWord: Word to begin the ladder
:param endWord: Word to end the ladder
:param wordList: List of words to choose from
:return: List of list of word ladders, if they are found. Empty list, if endWord not in wordList or path not
found from beginWord to endWord
"""
q = queue.Queue()
paths = list()
current = [beginWord]
q.put((beginWord, current))
# Set to track words we have already processed
visited = set()
# Dictionary to keep track of the shortest path lengths to each word from beginWord
shortest_paths = {beginWord: 1}
min_length = math.inf
# Use BFS to find the shortest path in the graph
while q.qsize():
word, path = q.get()
# If endWord is found, add the current path to the list of paths and compute minimum path
# length found so far
if word == endWord:
paths.append(path)
min_length = min(min_length, len(path))
continue
for hop in self.get_next_words(word, wordList):
# If the hop is already processed or in the queue for processing, skip
if hop in visited or hop in q.queue:
continue
# If the shortest path to the hop has not been determined or the current path length is lesser
# than or equal to the known shortest path to the hop, add it to the queue and update the shortest
# path to the hop.
if (hop not in shortest_paths) or (hop in shortest_paths and len(path + [hop]) <= shortest_paths[hop]):
q.put((hop, path + [hop]))
shortest_paths[hop] = len(path + [hop])
visited.add(word)
return [s for s in paths if len(s) == min_length]
if __name__ == "__main__":
# beginword = 'qa'
# endword = 'sq'
# wordlist = ["si","go","se","cm","so","ph","mt","db","mb","sb","kr","ln","tm","le","av","sm","ar","ci","ca","br","ti","ba","to","ra","fa","yo","ow","sn","ya","cr","po","fe","ho","ma","re","or","rn","au","ur","rh","sr","tc","lt","lo","as","fr","nb","yb","if","pb","ge","th","pm","rb","sh","co","ga","li","ha","hz","no","bi","di","hi","qa","pi","os","uh","wm","an","me","mo","na","la","st","er","sc","ne","mn","mi","am","ex","pt","io","be","fm","ta","tb","ni","mr","pa","he","lr","sq","ye"]
# beginword = 'hit'
# endword = 'cog'
# wordlist = ['hot', 'dot', 'dog', 'lot', 'log', 'cog']
# beginword = 'red'
# endword = 'tax'
# wordlist = ['ted', 'tex', 'red', 'tax', 'tad', 'den', 'rex', 'pee']
beginword = 'cet'
endword = 'ism'
wordlist = ["kid","tag","pup","ail","tun","woo","erg","luz","brr","gay","sip","kay","per","val","mes","ohs","now","boa","cet","pal","bar","die","war","hay","eco","pub","lob","rue","fry","lit","rex","jan","cot","bid","ali","pay","col","gum","ger","row","won","dan","rum","fad","tut","sag","yip","sui","ark","has","zip","fez","own","ump","dis","ads","max","jaw","out","btu","ana","gap","cry","led","abe","box","ore","pig","fie","toy","fat","cal","lie","noh","sew","ono","tam","flu","mgm","ply","awe","pry","tit","tie","yet","too","tax","jim","san","pan","map","ski","ova","wed","non","wac","nut","why","bye","lye","oct","old","fin","feb","chi","sap","owl","log","tod","dot","bow","fob","for","joe","ivy","fan","age","fax","hip","jib","mel","hus","sob","ifs","tab","ara","dab","jag","jar","arm","lot","tom","sax","tex","yum","pei","wen","wry","ire","irk","far","mew","wit","doe","gas","rte","ian","pot","ask","wag","hag","amy","nag","ron","soy","gin","don","tug","fay","vic","boo","nam","ave","buy","sop","but","orb","fen","paw","his","sub","bob","yea","oft","inn","rod","yam","pew","web","hod","hun","gyp","wei","wis","rob","gad","pie","mon","dog","bib","rub","ere","dig","era","cat","fox","bee","mod","day","apr","vie","nev","jam","pam","new","aye","ani","and","ibm","yap","can","pyx","tar","kin","fog","hum","pip","cup","dye","lyx","jog","nun","par","wan","fey","bus","oak","bad","ats","set","qom","vat","eat","pus","rev","axe","ion","six","ila","lao","mom","mas","pro","few","opt","poe","art","ash","oar","cap","lop","may","shy","rid","bat","sum","rim","fee","bmw","sky","maj","hue","thy","ava","rap","den","fla","auk","cox","ibo","hey","saw","vim","sec","ltd","you","its","tat","dew","eva","tog","ram","let","see","zit","maw","nix","ate","gig","rep","owe","ind","hog","eve","sam","zoo","any","dow","cod","bed","vet","ham","sis","hex","via","fir","nod","mao","aug","mum","hoe","bah","hal","keg","hew","zed","tow","gog","ass","dem","who","bet","gos","son","ear","spy","kit","boy","due","sen","oaf","mix","hep","fur","ada","bin","nil","mia","ewe","hit","fix","sad","rib","eye","hop","haw","wax","mid","tad","ken","wad","rye","pap","bog","gut","ito","woe","our","ado","sin","mad","ray","hon","roy","dip","hen","iva","lug","asp","hui","yak","bay","poi","yep","bun","try","lad","elm","nat","wyo","gym","dug","toe","dee","wig","sly","rip","geo","cog","pas","zen","odd","nan","lay","pod","fit","hem","joy","bum","rio","yon","dec","leg","put","sue","dim","pet","yaw","nub","bit","bur","sid","sun","oil","red","doc","moe","caw","eel","dix","cub","end","gem","off","yew","hug","pop","tub","sgt","lid","pun","ton","sol","din","yup","jab","pea","bug","gag","mil","jig","hub","low","did","tin","get","gte","sox","lei","mig","fig","lon","use","ban","flo","nov","jut","bag","mir","sty","lap","two","ins","con","ant","net","tux","ode","stu","mug","cad","nap","gun","fop","tot","sow","sal","sic","ted","wot","del","imp","cob","way","ann","tan","mci","job","wet","ism","err","him","all","pad","hah","hie","aim"]
wl = WordLadder2()
# beginword = 'hot'
# endword = 'dog'
# wordlist = ['hot', 'dog', 'dot']
print(wl.find_ladders(beginword, endword, wordlist))

The part that slows down your solution is is_one_hop_away, which is a costly function. This is called repeatedly during the actual BFS. Instead you should aim to first create a graph structure -- an adjacency list -- so that complexity of calculating which words are neighbors is dealt with before actually peforming the BFS search.
Here is one way to do it:
from collections import defaultdict
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
def createAdjacencyList(wordList):
adj = defaultdict(set)
d = defaultdict(set)
for word in wordList:
for i in range(len(word)):
derived = word[:i] + "*" + word[i+1:]
for neighbor in d[derived]:
adj[word].add(neighbor)
adj[neighbor].add(word)
d[derived].add(word)
return adj
def edgesOnShortestPaths(adj, beginWord, endWord):
frontier = [beginWord]
edges = defaultdict(list)
edges[beginWord] = []
while endWord not in frontier:
nextfrontier = set(neighbor
for word in frontier
for neighbor in adj[word]
if neighbor not in edges
)
if not nextfrontier: # endNode is not reachable
return
for word in frontier:
for neighbor in adj[word]:
if neighbor in nextfrontier:
edges[neighbor].append(word)
frontier = nextfrontier
return edges
def generatePaths(edges, word):
if not edges[word]:
yield [word]
else:
for neighbor in edges[word]:
for path in generatePaths(edges, neighbor):
yield path + [word]
if endWord not in wordList: # shortcut exit
return []
adj = createAdjacencyList([beginWord] + wordList)
edges = edgesOnShortestPaths(adj, beginWord, endWord)
if not edges: # endNode is not reachable
return []
return list(generatePaths(edges, endWord))

Related

PYQGIS: How to use QgsRasterFileWriter.writeRaster to create raster from numpy array

I am trying to use the method writeRaster from qgis.core.writeRaster to create a singleBand raster of float and Nans but according to the documentation, I need to provide theses inputs:
writeRaster(
self, # OK
pipe: QgsRasterPipe, # Q1
nCols: int, # OK
nRows: int, # OK
outputExtent: QgsRectangle, # Q2
crs: QgsCoordinateReferenceSystem, # OK
feedback: QgsRasterBlockFeedback = None # OK
) → QgsRasterFileWriter.WriterError
I have 2 questions here:
Q1: What is a QgsRasterPipe, how to use it and what is its purpose?
The documentation says: Constructor for QgsRasterPipe. Base class for processing modules.
Few examples online of writeRaster just initialize this object. So what do I need to provide in the argument pipe ?
Q2: The argument outputExtent of type QgsRectangle seems to be the bounding area of my raster: QgsRectangle(x_min, y_min, x_max, y_max). But here is my question: Where do I declare the values of pixels?
Here is the script (not working) I have for the moment:
import os
import numpy
from qgis.core import (
QgsMapLayer,
QgsRasterFileWriter,
QgsCoordinateReferenceSystem,
QgsRasterPipe,
)
def write_to_geotiff(data: list, filename: str, epsg: str, layer: str=None) -> None:
x_data = data[0]
y_data = data[1]
z_data = data[2]
nx, ny = len(x_data), len(y_data)
QgsRasterFileWriter.writeRaster(
QgsRasterPipe(),
nCols=nx,
nRows=ny,
QgsRectangle(
min(x_data),
min(y_data),
max(x_data),
max(y_data)
),
crs = QgsCoordinateReferenceSystem(f"epsg:{epsg}"),
)
if __name__ == "__main__":
filename = r"C:\Users\vince\Downloads\test.gpkg"
x_data = numpy.asarray([0, 1, 2])
y_data = numpy.asarray([0, 1])
z_data = numpy.asarray([
[0.1, numpy.nan],
[0.5, 139.5],
[150.98, numpy.nan],
])
epsg = "4326"
write_to_geotiff(
[x_data, y_data, z_data],
filename,
epsg
)
I saw this answer for Q1, the data is in the pipe variable. But I don t know how to create a qgsRasterBlock from my numpy array...
I get it using the method QgsRasterFileWriter.createOneBandRaster creating a provider.
You can get the bloc of the provider of type QgsRasterBlock and use the method setValue to associate values.
writer = QgsRasterFileWriter(filename)
provider = QgsRasterFileWriter.createOneBandRaster(
writer,
dataType=Qgis.Float32,
width=nx,
height=ny,
extent=QgsRectangle(
min(x_data),
min(y_data),
max(x_data),
max(y_data)
),
crs = QgsCoordinateReferenceSystem(f"epsg:{epsg}"),
)
provider.setNoDataValue(1, -1)
provider.setEditable(True)
block = provider.block(
bandNo=1,
boundingBox=provider.extent(),
width=provider.xSize(),
height=provider.ySize()
)
for ix in range(nx):
for iy in range(ny):
value = z_data[ix][iy]
if value == numpy.nan:
continue
block.setValue(iy, ix, value)
provider.writeBlock(
block=block,
band=1,
xOffset=0,
yOffset=0
)
provider.setEditable(False)
This will create a tiffile:

Numpy: Construct Slice A La Carte

Suppose I have the following:
# in pseudo code
# function input 1
chord = [0,1,17,35,47,0]
dims = [0,1,2,4,5,6]
x_axis = 3
t_axis = 7
# what I'd like to return
np.squeeze(arr[0,1,17,:,35,47,0,:])
# function input 2
chord = [0,3,4,5,6,7]
dims = [0,2,3,4,5,6]
x_axis = 1
t_axis = 7
# desired return
np.squeeze(arr[0,:,3,4,5,6,7,:])
How do I construct these numpy slices given input that I can arbitrarily specify a pair of axes and a chord coordinate?
I implemented a reflection-based solution:
def reflection_window(arr:np.ndarray,chord:list,dim0,dim1):
var = "arr"
bra = "["
ket = "]"
coord = [str(i) for int(i) in chord]
coord.insert(dim0,':')
coord.insert(dim1,':')
chordstr = ','.join(coord)
slicer = var+bra+chordstr+ket
return eval(slicer)
Staying native to numpy is probably better, but since python is a shell scripting language, it probably makes sense to treat it that way if necessary.

how to use Apache Commons Math Optimization in Jython?

I want to transfer Matlab code to Jython version, and find that the fminsearch in Matlab might be replaced by Apache-Common-Math-Optimization.
I'm coding on the Mango Medical Image script manager, which uses Jython 2.5.3 as coding language. And the Math version is 3.6.1.
Here is my code:
def f(x,y):
return x^2+y^2
sys.path.append('/home/shujian/APPs/Mango/lib/commons-math3-3.6.1.jar')
sys.add_package('org.apache.commons.math3.analysis')
from org.apache.commons.math3.analysis import MultivariateFunction
sys.add_package('org.apache.commons.math3.optim.nonlinear.scalar.noderiv')
from org.apache.commons.math3.optim.nonlinear.scalar.noderiv import NelderMeadSimplex,SimplexOptimizer
sys.add_package('org.apache.commons.math3.optim.nonlinear.scalar')
from org.apache.commons.math3.optim.nonlinear.scalar import ObjectiveFunction
sys.add_package('org.apache.commons.math3.optim')
from org.apache.commons.math3.optim import MaxEval,InitialGuess
sys.add_package('org.apache.commons.math3.optimization')
from org.apache.commons.math3.optimization import GoalType
initialSolution=[2.0,2.0]
simplex=NelderMeadSimplex([2.0,2.0])
opt=SimplexOptimizer(2**(-6), 2**(-10))
solution=opt.optimize(MaxEval(300),ObjectiveFunction(f),simplex,GoalType.MINIMIZE,InitialGuess([2.0,2.0]))
skewParameters2 = solution.getPointRef()
print skewParameters2;
And I got the error below:
TypeError: optimize(): 1st arg can't be coerced to
I'm quite confused about how to use the optimization in Jython and the examples are all Java version.
I've given up this plan and find another method to perform the fminsearch in Jython. Below is the Jython version code:
import sys
sys.path.append('.../jnumeric-2.5.1_ra0.1.jar') #add the jnumeric path
import Numeric as np
def nelder_mead(f, x_start,
step=0.1, no_improve_thr=10e-6,
no_improv_break=10, max_iter=0,
alpha=1., gamma=2., rho=-0.5, sigma=0.5):
'''
#param f (function): function to optimize, must return a scalar score
and operate over a numpy array of the same dimensions as x_start
#param x_start (float list): initial position
#param step (float): look-around radius in initial step
#no_improv_thr, no_improv_break (float, int): break after no_improv_break iterations with
an improvement lower than no_improv_thr
#max_iter (int): always break after this number of iterations.
Set it to 0 to loop indefinitely.
#alpha, gamma, rho, sigma (floats): parameters of the algorithm
(see Wikipedia page for reference)
return: tuple (best parameter array, best score)
'''
# init
dim = len(x_start)
prev_best = f(x_start)
no_improv = 0
res = [[np.array(x_start), prev_best]]
for i in range(dim):
x=np.array(x_start)
x[i]=x[i]+step
score = f(x)
res.append([x, score])
# simplex iter
iters = 0
while 1:
# order
res.sort(key=lambda x: x[1])
best = res[0][1]
# break after max_iter
if max_iter and iters >= max_iter:
return res[0]
iters += 1
# break after no_improv_break iterations with no improvement
print '...best so far:', best
if best < prev_best - no_improve_thr:
no_improv = 0
prev_best = best
else:
no_improv += 1
if no_improv >= no_improv_break:
return res[0]
# centroid
x0 = [0.] * dim
for tup in res[:-1]:
for i, c in enumerate(tup[0]):
x0[i] += c / (len(res)-1)
# reflection
xr = x0 + alpha*(x0 - res[-1][0])
rscore = f(xr)
if res[0][1] <= rscore < res[-2][1]:
del res[-1]
res.append([xr, rscore])
continue
# expansion
if rscore < res[0][1]:
xe = x0 + gamma*(x0 - res[-1][0])
escore = f(xe)
if escore < rscore:
del res[-1]
res.append([xe, escore])
continue
else:
del res[-1]
res.append([xr, rscore])
continue
# contraction
xc = x0 + rho*(x0 - res[-1][0])
cscore = f(xc)
if cscore < res[-1][1]:
del res[-1]
res.append([xc, cscore])
continue
# reduction
x1 = res[0][0]
nres = []
for tup in res:
redx = x1 + sigma*(tup[0] - x1)
score = f(redx)
nres.append([redx, score])
res = nres
And the test example is as below:
def f(x):
return x[0]**2+x[1]**2+x[2]**2
print nelder_mead(f,[3.4,2.3,2.2])
Actually, the original version is for python, and the link below is the source:
https://github.com/fchollet/nelder-mead

Polygons from network of connected points

Given an array of 2D points (#pts x 2) and an array of which points are connected to which (#bonds x 2 int array with indices of pts), how can I efficiently return an array of polygons formed from the bonds?
There can be 'dangling' bonds (like in the top left of the image below) that don't close a polygon, and these should be ignored.
Here's an example:
import numpy as np
xy = np.array([[2.72,-2.976], [2.182,-3.40207],
[-3.923,-3.463], [2.1130,4.5460], [2.3024,3.4900], [.96979,-.368],
[-2.632,3.7555], [-.5086,.06170], [.23409,-.6588], [.20225,-.9540],
[-.5267,-1.981], [-2.190,1.4710], [-4.341,3.2331], [-3.318,3.2654],
[.58510,4.1406], [.74331,2.9556], [.39622,3.6160], [-.8943,1.0643],
[-1.624,1.5259], [-1.414,3.5908], [-1.321,3.6770], [1.6148,1.0070],
[.76172,2.4627], [.76935,2.4838], [3.0322,-2.124], [1.9273,-.5527],
[-2.350,-.8412], [-3.053,-2.697], [-1.945,-2.795], [-1.905,-2.767],
[-1.904,-2.765], [-3.546,1.3208], [-2.513,1.3117], [-2.953,-.5855],
[-4.368,-.9650]])
BL= np.array([[22,23], [28,29], [8,9],
[12,31], [18,19], [31,32], [3,14],
[32,33], [24,25], [10,30], [15,23],
[5,25], [12,13], [0,24], [27,28],
[15,16], [5,8], [0,1], [11,18],
[2,27], [11,13], [33,34], [26,33],
[29,30], [7,17], [9,10], [26,30],
[17,22], [5,21], [19,20], [17,18],
[14,16], [7,26], [21,22], [3,4],
[4,15], [11,32], [6,19], [6,13],
[16,20], [27,34], [7,8], [1,9]])
I can't tell you how to implement it with numpy, but here's an outline of a possible algorithm:
Add a list of attached bonds to each point.
Remove the points that have only one bond attached, remove this bond as well (these are the dangling bonds)
Attach two boolean markers to each bond, indicating if the bond has already been added to a polygon in one of the two possible directions. Each bond can only be used in two polygons. Initially set all markers to false.
Select any initial point and repeat the following step until all bonds have been used in both directions:
Select a bond that has not been used (in the respective direction). This is the first edge of the polygon. Of the bonds attached to the end point of the selected one, choose the one with minimal angle in e.g. counter-clockwise direction. Add this to the polygon and continue until you return to the initial point.
This algorithm will also produce a large polygon containing all the outer bonds of the network. I guess you will find a way to recognize this one and remove it.
For future readers, the bulk of the implementation of Frank's suggestion in numpy is below. The extraction of the boundary follows essentially the same algorithm as walking around a polygon, except using the minimum angle bond, rather than the max.
def extract_polygons_lattice(xy, BL, NL, KL):
''' Extract polygons from a lattice of points.
Parameters
----------
xy : NP x 2 float array
points living on vertices of dual to triangulation
BL : Nbonds x 2 int array
Each row is a bond and contains indices of connected points
NL : NP x NN int array
Neighbor list. The ith row has neighbors of the ith particle, padded with zeros
KL : NP x NN int array
Connectivity list. The ith row has ones where ith particle is connected to NL[i,j]
Returns
----------
polygons : list
list of lists of indices of each polygon
PPC : list
list of patches for patch collection
'''
NP = len(xy)
NN = np.shape(KL)[1]
# Remove dangling bonds
# dangling bonds have one particle with only one neighbor
finished_dangles = False
while not finished_dangles:
dangles = np.where([ np.count_nonzero(row)==1 for row in KL])[0]
if len(dangles) >0:
# Make sorted bond list of dangling bonds
dpair = np.sort(np.array([ [d0, NL[d0,np.where(KL[d0]!=0)[0]] ] for d0 in dangles ]), axis=1)
# Remove those bonds from BL
BL = setdiff2d(BL,dpair.astype(BL.dtype))
print 'dpair = ', dpair
print 'ending BL = ', BL
NL, KL = BL2NLandKL(BL,NP=NP,NN=NN)
else:
finished_dangles = True
# bond markers for counterclockwise, clockwise
used = np.zeros((len(BL),2), dtype = bool)
polygons = []
finished = False
while (not finished) and len(polygons)<20:
# Check if all bond markers are used in order A-->B
todoAB = np.where(~used[:,0])[0]
if len(todoAB) > 0:
bond = BL[todoAB[0]]
# bb will be list of polygon indices
# Start with orientation going from bond[0] to bond[1]
nxt = bond[1]
bb = [ bond[0], nxt ]
dmyi = 1
# as long as we haven't completed the full outer polygon, add next index
while nxt != bond[0]:
n_tmp = NL[ nxt, np.argwhere(KL[nxt]).ravel()]
# Exclude previous boundary particle from the neighbors array, unless its the only one
# (It cannot be the only one, if we removed dangling bonds)
if len(n_tmp) == 1:
'''The bond is a lone bond, not part of a triangle.'''
neighbors = n_tmp
else:
neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi-1])[0])
angles = np.mod( np.arctan2(xy[neighbors,1]-xy[nxt,1],xy[neighbors,0]-xy[nxt,0]).ravel() \
- np.arctan2( xy[bb[dmyi-1],1]-xy[nxt,1], xy[bb[dmyi-1],0]-xy[nxt,0] ).ravel(), 2*np.pi)
nxt = neighbors[angles == max(angles)][0]
bb.append( nxt )
# Now mark the current bond as used
thisbond = [bb[dmyi-1], bb[dmyi]]
# Get index of used matching thisbond
mark_used = np.where((BL == thisbond).all(axis=1))
if len(mark_used)>0:
#print 'marking bond [', thisbond, '] as used'
used[mark_used,0] = True
else:
# Used this bond in reverse order
used[mark_used,1] = True
dmyi += 1
polygons.append(bb)
else:
# Check for remaining bonds unused in reverse order (B-->A)
todoBA = np.where(~used[:,1])[0]
if len(todoBA) >0:
bond = BL[todoBA[0]]
# bb will be list of polygon indices
# Start with orientation going from bond[0] to bond[1]
nxt = bond[0]
bb = [ bond[1], nxt ]
dmyi = 1
# as long as we haven't completed the full outer polygon, add nextIND
while nxt != bond[1]:
n_tmp = NL[ nxt, np.argwhere(KL[nxt]).ravel()]
# Exclude previous boundary particle from the neighbors array, unless its the only one
# (It cannot be the only one, if we removed dangling bonds)
if len(n_tmp) == 1:
'''The bond is a lone bond, not part of a triangle.'''
neighbors = n_tmp
else:
neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi-1])[0])
angles = np.mod( np.arctan2(xy[neighbors,1]-xy[nxt,1],xy[neighbors,0]-xy[nxt,0]).ravel() \
- np.arctan2( xy[bb[dmyi-1],1]-xy[nxt,1], xy[bb[dmyi-1],0]-xy[nxt,0] ).ravel(), 2*np.pi)
nxt = neighbors[angles == max(angles)][0]
bb.append( nxt )
# Now mark the current bond as used --> note the inversion of the bond order to match BL
thisbond = [bb[dmyi], bb[dmyi-1]]
# Get index of used matching [bb[dmyi-1],nxt]
mark_used = np.where((BL == thisbond).all(axis=1))
if len(mark_used)>0:
used[mark_used,1] = True
dmyi += 1
polygons.append(bb)
else:
# All bonds have been accounted for
finished = True
# Check for duplicates (up to cyclic permutations) in polygons
# Note that we need to ignore the last element of each polygon (which is also starting pt)
keep = np.ones(len(polygons),dtype=bool)
for ii in range(len(polygons)):
polyg = polygons[ii]
for p2 in polygons[ii+1:]:
if is_cyclic_permutation(polyg[:-1],p2[:-1]):
keep[ii] = False
polygons = [polygons[i] for i in np.where(keep)[0]]
# Remove the polygon which is the entire lattice boundary, except dangling bonds
boundary = extract_boundary_from_NL(xy,NL,KL)
print 'boundary = ', boundary
keep = np.ones(len(polygons),dtype=bool)
for ii in range(len(polygons)):
polyg = polygons[ii]
if is_cyclic_permutation(polyg[:-1],boundary.tolist()):
keep[ii] = False
elif is_cyclic_permutation(polyg[:-1],boundary[::-1].tolist()):
keep[ii] = False
polygons = [polygons[i] for i in np.where(keep)[0]]
# Prepare a polygon patch collection
PPC = []
for polyINDs in polygons:
pp = Path(xy[polyINDs],closed=True)
ppp = patches.PathPatch(pp, lw=2)
PPC.append(ppp)
return polygons, PPC

how to set key for deterministic variable in pymc

I'm trying to plot the difference between two variables. I'm following the example set here (search for true_p_A and it will be in the right section)
Here is my code
def cool(test):
n_data_points = len(test)
alpha = 1.0/np.mean(test)
lambda_1 = pm.Exponential("lambda_1", alpha) # prior on first behaviour
lambda_2 = pm.Exponential("lambda_2", alpha) # prior on second behaviour
tau = pm.DiscreteUniform("tau", lower=0, upper=len(test)) # prior on behaviour change
"""
The below deterministic functions map an assignment, in this case 0 or 1,
to a set of parameters, located in the (1,2) arrays `taus` and `centers`.
"""
#pm.deterministic
def lambda_(tau=tau, lambda_1=lambda_1, lambda_2=lambda_2):
out = np.zeros(n_data_points)
out[:tau] = lambda_1 # lambda before tau is lambda1
out[tau:] = lambda_2 # lambda after tau is lambda2
return out
def delta(p_A=lambda_1, p_B=lambda_2):
return p_A - p_B
obs = pm.Poisson("obs", lambda_, value=test, observed=True)
model = pm.Model([obs, lambda_, lambda_1, lambda_2, tau,delta])
mcmc = pm.MCMC(model)
mcmc.sample(5000, 1000, 1)
return mcmc,5000,1
def main_plotter(stats,test):
mcmc,N,bin = stats
n_count_data = len(test)
lambda_1_samples = mcmc.trace('lambda_1')[:]
lambda_2_samples = mcmc.trace('lambda_2')[:]
tau_samples = mcmc.trace('tau')[:]
delta_samples = mcmc.trace('delta')
print(delta_samples)
data = [1,2,1,2.2,5,5.5,6,5.4]
main_plotter( cool(data),data)
In the example no variable is created for delta so no key value is inserted. Whenever I run this code is tells me it can't find the key. My question is what do I need to do to access the delta posterior data?
You are missing the deterministic decorator before the delta function definition. It works if you change starting at line 21:
#pm.deterministic
def delta(p_A=lambda_1, p_B=lambda_2):
return p_A - p_B