Matplotlib for multi-panel of subgraphs - matplotlib

I would like to plot multi-panel graphs using the following data:
CHROM BIN_START BIN_END N_VARIANTS PI
NODE_10_length_497_cov_170.299805 1 10000 39 0.000703581
NODE_13_length_1438_cov_357.659943 1 10000 61 0.00132786
NODE_15_length_733_cov_44.686222 1 10000 25 5.73165e-05
NODE_16_length_8151_cov_58.001228 1 10000 525 0.0129994
NODE_18_length_98839_cov_51.306377 1 10000 753 0.0158054
NODE_18_length_98839_cov_51.306377 10001 20000 644 0.0147527
NODE_18_length_98839_cov_51.306377 20001 30000 783 0.0180735
NODE_18_length_98839_cov_51.306377 30001 40000 433 0.00950069
NODE_18_length_98839_cov_51.306377 40001 50000 568 0.0134851
NODE_18_length_98839_cov_51.306377 50001 60000 634 0.0107662
NODE_18_length_98839_cov_51.306377 60001 70000 501 0.0104874
NODE_18_length_98839_cov_51.306377 70001 80000 736 0.0142129
NODE_18_length_98839_cov_51.306377 80001 90000 803 0.0177756
NODE_18_length_98839_cov_51.306377 90001 100000 683 0.015499
NODE_20_length_219211_cov_54.980137 1 10000 803 0.0188128
NODE_20_length_219211_cov_54.980137 10001 20000 577 0.0152466
NODE_20_length_219211_cov_54.980137 20001 30000 596 0.0149394
NODE_20_length_219211_cov_54.980137 30001 40000 982 0.0230532
NODE_20_length_219211_cov_54.980137 40001 50000 991 0.0211885
NODE_20_length_219211_cov_54.980137 50001 60000 659 0.0154533
NODE_20_length_219211_cov_54.980137 60001 70000 752 0.0171805
NODE_20_length_219211_cov_54.980137 70001 80000 725 0.0168676
NODE_20_length_219211_cov_54.980137 80001 90000 836 0.0199372
NODE_20_length_219211_cov_54.980137 90001 100000 652 0.0116497
NODE_20_length_219211_cov_54.980137 100001 110000 788 0.0176188
NODE_20_length_219211_cov_54.980137 110001 120000 278 0.00549361
NODE_20_length_219211_cov_54.980137 120001 130000 499 0.0120812
NODE_20_length_219211_cov_54.980137 130001 140000 738 0.0161006
NODE_20_length_219211_cov_54.980137 140001 150000 729 0.0148927
NODE_20_length_219211_cov_54.980137 150001 160000 611 0.015047
NODE_20_length_219211_cov_54.980137 160001 170000 737 0.0175226
NODE_20_length_219211_cov_54.980137 170001 180000 709 0.0153575
NODE_20_length_219211_cov_54.980137 180001 190000 688 0.0133387
NODE_20_length_219211_cov_54.980137 190001 200000 693 0.0145321
NODE_20_length_219211_cov_54.980137 200001 210000 564 0.0122298
NODE_20_length_219211_cov_54.980137 210001 220000 687 0.0140385
NODE_22_length_248_cov_293.927429 1 10000 23 0.000482139
NODE_24_length_104_cov_29.778847 1 10000 6 2.73246e-05
NODE_29_length_319744_cov_53.127953 1 10000 759 0.0155234
NODE_29_length_319744_cov_53.127953 10001 20000 741 0.0182896
NODE_29_length_319744_cov_53.127953 20001 30000 709 0.0147025
NODE_29_length_319744_cov_53.127953 30001 40000 755 0.0159811
.
.
.
I have used the following python code.
import matplotlib.pyplot as plt
chrs = {}
with open('diversity_level.windowed.pi', 'r') as f:
info = f.readlines()
for i in range(2, len(info)):#skip the first line
linfo = info[i].split("\t")
if (linfo[0] in chrs):
chrs[linfo[0]][0].append(int(linfo[1]))
chrs[linfo[0]][1].append(float(linfo[4]))
else:
chrs[linfo[0]] = [[] for i in range(2)]
fig, axs = plt.subplots(len(chrs), 1)
fig.subplots_adjust(wspace=50,hspace=0.1)
i = 0
for chr, data in chrs.items():
axs[i].plot(data[0], data[1])
axs[i].set_xlabel(chr)
axs[i].set_ylabel('pi')
axs[i].grid(True)
i = i + 1
plt.show()
The obtained result is
May I know how to adjust the parameter in the code so the the X-axis label of each subgraph can be seen.
How can I select not to output empty subgraphs.
Thanks in advance
To gobffi,
Since all the data is inside the file 'diversity_level.windowed.pi', how can I read the data in the file (without header) and put in the dictionary for plotting the subgraph?
from matplotlib.pyplot import show, subplots
data_dict = {}
with open('diversity_level.windowed.pi', 'r') as f:
data= f.readlines()
for record in range(1,len(data)): #skip the first line header
key, start, pi = record[0], int(record[1]), float(record[4])
start_list, pi_list = data_dict.get(key, [[],[]])
start_list.append(start), pi_list.append(pi)
data_dict[key] = [start_list, pi_list]
for key in list(data_dict.keys()):
if len(data_dict[key][0])==1: data_dict.pop(key)
fig, axs = subplots(len(data_dict), constrained_layout=1,
sharex=True, sharey=True)
for ax, (key, (start_list, pi_list)) in zip(axs, (data_dict.items())):
ax.plot(start_list, pi_list)
ax.set_xlabel(key, size='x-small')
ax.set_ylabel('pi', size='x-small')
ax.tick_params(axis='both', labelsize='x-small')
ax.grid(1);
ax.set_ylim(bottom=0.0)
show()

Here it is my attempt, note that I have reorganized your code a little bit because I was feeling overwhelmed by the amount of indices you used.
To remove the "empty" sequences I loop over the keys, and if the length of a sequence is exactly 1 I remove (that is, pop) the corresponding dictionary entry.
To solve the issue with the cramped plots, usually a good idea is to specify that the figure has to respect constrained_layout (it's a relatively new feature that, imho, was really missing!)
.
from matplotlib.pyplot import show, subplots
# iterate over the lines of a file opened for reading
# each line is a record
data = [record.split() for record in open('your_file_name', 'r')]
data_dict = {}
# we iterate on all the records, except the first one (Python counts from 0)
for record in data[1:]:
# unpack the "interesting" stuff
key, start, pi = record[0], int(record[1]), float(record[4])
# get what is inside the dictionary item, using unpacking
# if item is "new" we get the default value, that is two empty lists
start_list, pi_list = data_dict.get(key, [[],[]])
# append the values to the respective lists
start_list.append(start), pi_list.append(pi)
# update the dictionary item with the augmented lists
data_dict[key] = [start_list, pi_list]
# we remove the "non interesting" entries from the dictionary
# using the `.pop()` method
# note the use of `list(...)` to have a static reference to the keys
for key in list(data_dict.keys()):
if len(data_dict[key][0])==1: data_dict.pop(key)
# instantiate the figure and the axes, using `constrained_layout`
# usually leads to a better arrangement of the figure elements
fig, axs = subplots(len(data_dict), constrained_layout=1)
# except for a moderate abuse of the unpacking syntax,
# everything should be clear …
for ax, (key, (start_list, pi_list)) in zip(axs, (data_dict.items())):
ax.plot(start_list, pi_list)
ax.set_xlabel(key)
ax.set_ylabel('pi')
ax.grid(1);
show()
A twist on the subject, if the data instances must be compared I
prefer to have the same axes (hence sharex and shareyinsubplots`) in each subplot and also, in this particular case, to set
the lower y limit to zero; also, because the figure is rather busy,
here I show you how to reduce the size of the labels and of the tick
labels
...
fig, axs = subplots(len(data_dict), constrained_layout=1,
sharex=True, sharey=True)
for ax, (key, (start_list, pi_list)) in zip(axs, (data_dict.items())):
ax.plot(start_list, pi_list)
ax.set_xlabel(key, size='x-small')
ax.set_ylabel('pi', size='x-small')
ax.tick_params(axis='both', labelsize='x-small')
ax.grid(1);
ax.set_ylim(bottom=0.0)
show()

Since it is an assignment, the following is the picture of the complete data:
the whole code from above, accordingly:
from matplotlib.pyplot import show, subplots
# iterate over the lines of a file opened for reading
# each line is a record
data = [record.split() for record in open('diversity_level.windowed.pi', 'r')]
data_dict = {}
# we iterate on all the records, except the first one (Python counts from 0)
for record in data[1:]:
# unpack the "interesting" stuff
key, start, pi = record[0], int(record[1]), float(record[4])
# get what is inside the dictionary item, using unpacking
# if item is "new" we get the default value, that is two empty lists
start_list, pi_list = data_dict.get(key, [[],[]])
# append the values to the respective lists
start_list.append(start), pi_list.append(pi)
# update the dictionary item with the augmented lists
data_dict[key] = [start_list, pi_list]
# we remove the "non interesting" entries from the dictionary
# using the `.pop()` method
# note the use of `list(...)` to have a static reference to the keys
for key in list(data_dict.keys()):
if len(data_dict[key][0])==1: data_dict.pop(key)
# instantiate the figure and the axes, using `constrained_layout`
# usually leads to a better arrangement of the figure elements
fig, axs = subplots(len(data_dict), constrained_layout=1)
# except for a moderate abuse of the unpacking syntax,
# everything should be clear
for ax, (key, (start_list, pi_list)) in zip(axs, (data_dict.items())):
ax.plot(start_list, pi_list)
ax.set_xlabel(key)
ax.set_ylabel('pi')
ax.grid(1);
ax.set_ylim(bottom=0.0)
show()
2022 assignment info.

Related

Filtering out a chemical dataset according to information in columns

I'm working with a chemical dataset and I was wondering about the smartest way to do the following thing. My dataset looks something like this:
formula Temperature (Kelvin) (Physical) Property Value
CO2 298 5
CO2 298 7.6
CO2 300 3.2
NaCl 300 3.4
NaCl 296 1.4
H2O 298 7.2
H2O 298 8.3
H2O 293 6.4
ZnO 300 3.10
ZnO 290 1.2
FeO 295 4.6
FeO 290 3.6
Given that Room Temperature := 298K,
what I would like to accomplish is to filter the original dataset in order to have only values reported with Room Temperature when it is available, and if there's no value reported at room temperature, I would like to keep the closest value to the room temperature that is available. According to what I would like to achieve, the sample initial dataset above would become something like
formula Temperature (Kelvin) (Physical) Property Value
CO2 298 5
CO2 298 7.6
NaCl 300 3.4
H2O 298 7.2
H2O 298 8.3
ZnO 300 3.10
FeO 295 4.6
Maybe I should use a lambda expression?
Any suggestions on how to achieve something like this?
Many thanks,
James
We can first filter the "good" ones, i.e., those that have temp 298 K. Then we can sort the remaining rows with respect to their distance to 298 K and then drop the duplicates to keep only the closests. We lastly merge good ones and these:
# room temp in K
rt = 298
# taking those that have `rt` K temp
good_ones = df[df["Temperature (Kelvin)"].eq(rt)]
good_names = good_ones.formula.unique()
# getting others
others = df[~df.formula.isin(good_names)]
# filtering others according to distance to `rt`
sorter = lambda s: s.sub(rt).abs()
others_filtered = (others
.sort_values("Temperature (Kelvin)", key=sorter)
.drop_duplicates("formula", keep="first"))
# merging them all
result = pd.concat([good_ones, others_filtered]).sort_index(ignore_index=True)
to get
>>> result
formula Temperature (Kelvin) (Physical) Property Value
0 CO2 298 5.0
1 CO2 298 7.6
2 NaCl 300 3.4
3 H2O 298 7.2
4 H2O 298 8.3
5 ZnO 300 3.1
6 FeO 295 4.6
There's also the apply way:
def filter_temp(gr):
# get he temp column and a bool series where it equals `rt`
temps = gr["Temperature (Kelvin)"]
rt_temps = temps.eq(rt)
# does any temp match `rt`?
if rt_temps.any():
# then return the locations it matches
return gr[rt_temps]
else:
# otherwise return the closest one
return gr.loc[[gr.temps.sub(rt).abs().idxmin()]]
result = (df.groupby("formula", as_index=False, group_keys=False)
.apply(filter_temp).sort_index(ignore_index=True))
The idea here is to group the rows by formula. Each group is then filtered to keep all rows with the required room temperature if any or the unique row with the closest temperature. Let's define this function:
def temperature_filter(df, room_temp, temp_col="Temperature (Kelvin)"):
if room_temp in df[temp_col].values:
return df[df[temp_col] == room_temp]
else:
return df.loc[[abs(df[temp_col] - room_temp).idxmin()]]
It only remains to apply this function to each group:
ROOM_TEMP = 298
df.groupby("formula", sort=False).apply(temperature_filter, ROOM_TEMP).droplevel("formula")
Note that temperature_filter has been written with clarity in mind, bun can also be included as a lambda function to reach a one-line solution!
def filter_closest_to_rt(df, rt=298):
df['tmrt'] = df['Temperature (Kelvin)'].sub(rt).abs()
return df[df['tmrt'] == df.groupby('formula')['tmrt'].transform('min')].drop(columns='tmrt')
filter_closest_to_rt(df)
formula Temperature (Kelvin) (Physical) Property Value
0 CO2 298 5.0
1 CO2 298 7.6
3 NaCl 300 3.4
4 NaCl 296 1.4
5 H2O 298 7.2
6 H2O 298 8.3
8 ZnO 300 3.1
10 FeO 295 4.6
How about this:
df['val'] = np.abs(df['Temperature (Kelvin)'] - 298)
df = df.sort_values(['formula', 'val'], ascending=[True, True])
df = df.drop_duplicates(subset='formula', keep="first")
To make sure you don't lose any 298 duplicates another solution is:
df['val'] = np.abs(df['Temperature (Kelvin)'] - 298)
the_298s = df[df['Temperature (Kelvin)'] == 298]
others = df[df['Temperature (Kelvin)'] != 298]
others = others.sort_values(['formula', 'val'], ascending=[True, True])
others = others.drop_duplicates(subset='formula', keep="first")
the_298s_formulas = the_298s.formula.unique()
others = others[~ others.formula.isin(the_298s_formulas)]
final_df = the_298s.append(others)

Can I use a CSV in Spark MLLib?

I'm new to using Spark's MLLib Python API. I have my data in CSV format like so:
Label 0 1 2 3 4 5 6 7 8 9 ... 758 759 760 761 762 763 764 765 766 767
0 -0.168307 -0.277797 -0.248202 -0.069546 0.176131 -0.152401 0.12664 -0.401460 0.125926 0.279061 ... -0.289871 0.207264 -0.140448 -0.426980 -0.328994 0.328007 0.486793 0.222587 0.650064 -0.513640
3 -0.313138 -0.045043 0.279587 -0.402598 -0.165238 -0.464669 0.09019 0.008703 0.074541 0.142638 ... -0.094025 0.036567 -0.059926 -0.492336 -0.006370 0.108954 0.350182 -0.144818 0.306949 -0.216190
2 -0.379293 -0.340999 0.319142 0.024552 0.142129 0.042989 -0.60938 0.052103 -0.293400 0.162741 ... 0.108854 -0.025618 0.149078 -0.917385 0.110629 0.146427
Can I use this as is by loading it using df = spark.read.format("csv").option("header", "true").load("file.csv")? I'm attempting to train a Random Forest model. I've tried researching it, but it doesn't seem to be a big topic. I don't want to just attempt it without being fully sure it would work because the cluster I use has long queue times.
Yes! You'll want to infer the schema too.
df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("file.csv")
If you have many files with the same column names and data types, save the schema to reuse.
schema = df.schema
And then next time you read a csv file with the same columns, you can
df = spark.read.format("csv").option("header", "true").option("schema", schema).load("file.csv")

How to debug a "IndexError: invalid index to scalar variable" error in Python?

This is my code:
import matplotlib.patches as pat
oval = pat.Ellipse(v1_mean,v2_mean,v1_std*2,v2_std*2)
fig,graph = plt.subplots()
graph.scatter(v1,v2)
graph.scatter(v1_mean,v2_mean, s=100)
graph.text(v1_mean,v2_mean, 'Mean')
graph.add_patch(oval)
And this is the error that comes:
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-40-2278a0e6f4cf> in <module>()
7 graph.scatter(v1_mean,v2_mean, s=100)
8 graph.text(v1_mean,v2_mean, 'Mean')
----> 9 graph.add_patch(oval)
10
11 graph.xlabel('V1')
/opt/conda/lib/python3.6/site-packages/matplotlib/axes/_base.py in add_patch(self, p)
2033 if p.get_clip_path() is None:
2034 p.set_clip_path(self.patch)
-> 2035 self._update_patch_limits(p)
2036 self.patches.append(p)
2037 p._remove_method = lambda h: self.patches.remove(h)
/opt/conda/lib/python3.6/site-packages/matplotlib/axes/_base.py in _update_patch_limits(self, patch)
2053 vertices = patch.get_path().vertices
2054 if vertices.size > 0:
-> 2055 xys = patch.get_patch_transform().transform(vertices)
2056 if patch.get_data_transform() != self.transData:
2057 patch_to_data = (patch.get_data_transform() -
/opt/conda/lib/python3.6/site-packages/matplotlib/patches.py in get_patch_transform(self)
1492
1493 def get_patch_transform(self):
-> 1494 self._recompute_transform()
1495 return self._patch_transform
1496
/opt/conda/lib/python3.6/site-packages/matplotlib/patches.py in _recompute_transform(self)
1476 not directly access the transformation member variable.
1477 """
-> 1478 center = (self.convert_xunits(self.center[0]),
1479 self.convert_yunits(self.center[1]))
1480 width = self.convert_xunits(self.width)
IndexError: invalid index to scalar variable.
Basically, what I am trying to do is plot an oval shape and some data into the same graph. But it seems like the error has got to do with the center of the oval, but I dont know what is exactly wrong. It's strange that I followed exactly what the teacher has done, but mine came with an error while his is ok.
It's strange that I followed exactly what the teacher has done, but mine came with an error while his is ok.
Probably you didn't follow exactly. According to the documentation of matplotlib.patches.Ellipse the xy coordinates of ellipse centre are to be given as a tuple rather than individual arguments, so it's not
oval = pat.Ellipse(v1_mean,v2_mean,v1_std*2,v2_std*2)
but
oval = pat.Ellipse((v1_mean, v2_mean), v1_std*2, v2_std*2)
instead. Unfortunately Ellipse didn't warn about this and stored a single number as the ellipse center.

Conditional operation on Pandas dataframe

I have dataset where one of the column holds total sq.ft value.
1151
1025
2100 - 2850
1075
1760
I would like to split the 2100 - 2850 if the dataframe contains '-' and take its average(mean) as the new value. I am trying achieve this using apply method but running into error when statement containing contains is executing. Please suggest how to handle this situation.
def convert_totSqft(s):
if s.str.contains('-', regex=False) == True
<< some statements>>
else:
<< some statements>>
X['new_col'] = X['total_sqft'].apply(convert_totSqft)
Error message:
File "<ipython-input-6-af39b196879b>", line 2, in convert_totSqft
if s.str.contains('-', regex=False) == True:
AttributeError: 'str' object has no attribute 'str'
IIUC
df.col.str.split('-',expand=True).apply(pd.to_numeric).mean(1)
Out[630]:
0 1151.0
1 1025.0
2 2475.0
3 1075.0
4 1760.0
dtype: float64
IIUC, you can split by - anyway and just transform using np.mean, once the mean of a single number is just the number itself
df.col.str.split('-').transform(lambda s: np.mean([int(x.strip()) for x in s]))
0 1151.0
1 1025.0
2 2475.0
3 1075.0
4 1760.0
Alternatively, you can sum and divide by len (same thing)
df.col.str.split('-').transform(lambda s: sum([int(x.strip()) for x in s])/len(s))
If want results back necessarily as int, just wrap it with int()
df.col.str.split('-').transform(lambda s: int(np.mean([int(x.strip()) for x in s])))
0 1151
1 1025
2 2475
3 1075
4 1760

Error matplotlib.lines.Line2D object at 0x025B8350

>>> import pylab as pl
>>> x = np.linspace(0,4*np.pi, 100)
>>> pl.plot(x, np.sin(x))
[<matplotlib.lines.Line2D object at 0x025B8350>]
after install numpy, scipy, sympy, matplotlib, ipython
---------------------------------------------------------------------------
TypeError Python 2.7.3: C:\Python27\python.exe
Fri Sep 28 09:59:01 2012
A problem occured executing Python code. Here is the sequence of function
calls leading up to the error, with the most recent (innermost) call last.
C:\Python27\scripts\ipython.py in <module>()
13
14 [or simply IPython.Shell.IPShell().mainloop(1) ]
15
16 and IPython will be your working environment when you start python. The final
17 sys.exit() call will make python exit transparently when IPython finishes, so
18 you don't have an extra prompt to get out of.
19
20 This is probably useful to developers who manage multiple Python versions and
21 don't want to have correspondingly multiple IPython versions. Note that in
22 this mode, there is no way to pass IPython any command-line options, as those
23 are trapped first by Python itself.
24 """
25
26 import IPython.Shell
27
---> 28 IPython.Shell.start().mainloop()
global IPython.Shell.start.mainloop = undefined
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
C:\Python27\lib\site-packages\IPython\Shell.pyc in start(user_ns=None)
1244
1245 # New versions of pygtk don't need the brittle threaded support.
1246 th_mode = check_gtk(th_mode)
1247 return th_shell[th_mode]
1248
1249
1250 # This is the one which should be called by external code.
1251 def start(user_ns = None):
1252 """Return a running shell instance, dealing with threading options.
1253
1254 This is a factory function which will instantiate the proper IPython shell
1255 based on the user's threading choice. Such a selector is needed because
1256 different GUI toolkits require different thread handling details."""
1257
1258 shell = _select_shell(sys.argv)
-> 1259 return shell(user_ns = user_ns)
1260
1261 # Some aliases for backwards compatibility
1262 IPythonShell = IPShell
1263 IPythonShellEmbed = IPShellEmbed
1264 #************************ End of file <Shell.py> ***************************
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
C:\Python27\lib\site-packages\IPython\Shell.pyc in __init__(self=<IPython.Shell.IPShell instance>, argv=None, user_ns=None, user_global_ns=None, debug=1, shell_class=<class 'IPython.iplib.InteractiveShell'>)
58 # Default timeout for waiting for multithreaded shells (in seconds)
59 GUI_TIMEOUT = 10
60
61 #-----------------------------------------------------------------------------
62 # This class is trivial now, but I want to have it in to publish a clean
63 # interface. Later when the internals are reorganized, code that uses this
64 # shouldn't have to change.
65
66 class IPShell:
67 """Create an IPython instance."""
68
69 def __init__(self,argv=None,user_ns=None,user_global_ns=None,
70 debug=1,shell_class=InteractiveShell):
71 self.IP = make_IPython(argv,user_ns=user_ns,
72 user_global_ns=user_global_ns,
---> 73 debug=debug,shell_class=shell_class)
global For = undefined
global more = undefined
global details = undefined
global see = undefined
global the = undefined
global __call__ = undefined
global method = undefined
global below. = undefined
74
75 def mainloop(self,sys_exit=0,banner=None):
76 self.IP.mainloop(banner)
77 if sys_exit:
78 sys.exit()
79
80 #-----------------------------------------------------------------------------
81 def kill_embedded(self,parameter_s=''):
82 """%kill_embedded : deactivate for good the current embedded IPython.
83
84 This function (after asking for confirmation) sets an internal flag so that
85 an embedded IPython will never activate again. This is useful to
86 permanently disable a shell that is being called inside a loop: once you've
87 figured out what you needed from it, you may then kill it and the program
88 will then continue to run without the interactive shell interfering again.
C:\Python27\lib\site-packages\IPython\ipmaker.pyc in make_IPython(argv=[r'C:\Python27\scripts\ipython.py'], user_ns=None, user_global_ns=None, debug=1, rc_override=None, shell_class=<class 'IPython.iplib.InteractiveShell'>, embedded=False, **kw={})
506 # tweaks. Basically options which affect other options. I guess this
507 # should just be written so that options are fully orthogonal and we
508 # wouldn't worry about this stuff!
509
510 if IP_rc.classic:
511 IP_rc.quick = 1
512 IP_rc.cache_size = 0
513 IP_rc.pprint = 0
514 IP_rc.prompt_in1 = '>>> '
515 IP_rc.prompt_in2 = '... '
516 IP_rc.prompt_out = ''
517 IP_rc.separate_in = IP_rc.separate_out = IP_rc.separate_out2 = '0'
518 IP_rc.colors = 'NoColor'
519 IP_rc.xmode = 'Plain'
520
--> 521 IP.pre_config_initialization()
522 # configure readline
523
524 # update exception handlers with rc file status
525 otrap.trap_out() # I don't want these messages ever.
526 IP.magic_xmode(IP_rc.xmode)
527 otrap.release_out()
528
529 # activate logging if requested and not reloading a log
530 if IP_rc.logplay:
531 IP.magic_logstart(IP_rc.logplay + ' append')
532 elif IP_rc.logfile:
533 IP.magic_logstart(IP_rc.logfile)
534 elif IP_rc.log:
535 IP.magic_logstart()
536
C:\Python27\lib\site-packages\IPython\iplib.pyc in pre_config_initialization(self=<IPython.iplib.InteractiveShell object>)
820 self.user_ns, # globals
821 # Skip our own frame in searching for locals:
822 sys._getframe(depth+1).f_locals # locals
823 ))
824
825 def pre_config_initialization(self):
826 """Pre-configuration init method
827
828 This is called before the configuration files are processed to
829 prepare the services the config files might need.
830
831 self.rc already has reasonable default values at this point.
832 """
833 rc = self.rc
834 try:
--> 835 self.db = pickleshare.PickleShareDB(rc.ipythondir + "/db")
global Optional = undefined
global inputs = undefined
836 except exceptions.UnicodeDecodeError:
837 print "Your ipythondir can't be decoded to unicode!"
838 print "Please set HOME environment variable to something that"
839 print r"only has ASCII characters, e.g. c:\home"
840 print "Now it is",rc.ipythondir
841 sys.exit()
842 self.shadowhist = IPython.history.ShadowHist(self.db)
843
844 def post_config_initialization(self):
845 """Post configuration init method
846
847 This is called after the configuration files have been processed to
848 'finalize' the initialization."""
849
850 rc = self.rc
C:\Python27\lib\site-packages\IPython\Extensions\pickleshare.pyc in __init__(self=PickleShareDB('C:\Documents and Settings\martinhylee\_ipython\db'), root=u'C:\\Documents and Settings\\martinhylee\\_ipython/db')
38 import cPickle as pickle
39 import UserDict
40 import warnings
41 import glob
42
43 def gethashfile(key):
44 return ("%02x" % abs(hash(key) % 256))[-2:]
45
46 _sentinel = object()
47
48 class PickleShareDB(UserDict.DictMixin):
49 """ The main 'connection' object for PickleShare database """
50 def __init__(self,root):
51 """ Return a db object that will manage the specied directory"""
52 self.root = Path(root).expanduser().abspath()
---> 53 if not self.root.isdir():
54 self.root.makedirs()
55 # cache has { 'key' : (obj, orig_mod_time) }
56 self.cache = {}
57
58
59 def __getitem__(self,key):
60 """ db['key'] reading """
61 fil = self.root / key
62 try:
63 mtime = (fil.stat()[stat.ST_MTIME])
64 except OSError:
65 raise KeyError(key)
66
67 if fil in self.cache and mtime == self.cache[fil][1]:
68 return self.cache[fil][0]
TypeError: _isdir() takes exactly 1 argument (0 given)
**********************************************************************
Oops, IPython crashed. We do our best to make it stable, but...
A crash report was automatically generated with the following information:
- A verbatim copy of the crash traceback.
- A copy of your input history during this session.
- Data on your current IPython configuration.
It was left in the file named:
'C:\Documents and Settings\martinhylee\_ipython\IPython_crash_report.txt'
If you can email this file to the developers, the information in it will help
them in understanding and correcting the problem.
You can mail it to: Fernando Perez at fperez.net#gmail.com
with the subject 'IPython Crash Report'.
If you want to do it now, the following command will work (under Unix):
mail -s 'IPython Crash Report' fperez.net#gmail.com < C:\Documents and Settings\martinhylee\_ipython\IPython_crash_report.txt
To ensure accurate tracking of this issue, please file a report about it at:
https://bugs.launchpad.net/ipython/+filebug
Error in sys.excepthook:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\IPython\CrashHandler.py", line 157, in __call__
report.write(self.make_report(traceback))
File "C:\Python27\lib\site-packages\IPython\CrashHandler.py", line 215, in make_report
rpt_add('BZR revision : %s \n\n' % Release.revision)
AttributeError: 'module' object has no attribute 'revision'
Original exception was:
Traceback (most recent call last):
File "C:\Python27\scripts\ipython.py", line 28, in <module>
IPython.Shell.start().mainloop()
File "C:\Python27\lib\site-packages\IPython\Shell.py", line 1259, in start
return shell(user_ns = user_ns)
File "C:\Python27\lib\site-packages\IPython\Shell.py", line 73, in __init__
debug=debug,shell_class=shell_class)
File "C:\Python27\lib\site-packages\IPython\ipmaker.py", line 521, in make_IPython
IP.pre_config_initialization()
File "C:\Python27\lib\site-packages\IPython\iplib.py", line 835, in pre_config_initialization
self.db = pickleshare.PickleShareDB(rc.ipythondir + "/db")
File "C:\Python27\lib\site-packages\IPython\Extensions\pickleshare.py", line 53, in __init__
if not self.root.isdir():
TypeError: _isdir() takes exactly 1 argument (0 given)
Try this:
from pylab import *
x = np.linspace(0.4 * np.pi, 100)
plot(x, np.sin(x))
show()