ImportError: No module named 'svmutil' - libsvm

i get stuck right now, my code:
import sys
import os
import itertools
import random
from PIL import Image
from svmutil import *
DIMENSION = 200
ROOT_DIR = "../train/"
NEGATIVE = "negative"
POSITIVE = "positive"
CLASSES = [NEGATIVE, POSITIVE]
....
and it says :
ImportError: No module named 'svmutil'
now I use python 3.5
What should I do now?

Related

Cannot import name 'QQ' from 'sympy.polys.domains.rationalfield'

I am using conda with python 3.8. I install sympy and am not sure about the following error:
ImportError: cannot import name 'QQ' from 'sympy.polys.domains.rationalfield'
Here is the import command:
from sympy.polys.domains.rationalfield import QQ
And here is rationalfield.py file:
"""Implementation of :class:`RationalField` class. """
from __future__ import print_function, division
from sympy.polys.domains.characteristiczero import CharacteristicZero
from sympy.polys.domains.field import Field
from sympy.polys.domains.simpledomain import SimpleDomain
from sympy.utilities import public
#public
class RationalField(Field, CharacteristicZero, SimpleDomain):
"""General class for rational fields. """
rep = 'QQ'
is_RationalField = is_QQ = True
is_Numerical = True
has_assoc_Ring = True
has_assoc_Field = True
def algebraic_field(self, *extension):
r"""Returns an algebraic field, i.e. `\mathbb{Q}(\alpha, \ldots)`. """
from sympy.polys.domains import AlgebraicField
return AlgebraicField(self, *extension)
def from_AlgebraicField(K1, a, K0):
"""Convert a ``ANP`` object to ``dtype``. """
if a.is_ground:
return K1.convert(a.LC(), K0.dom)

Accessing methods within a class from bokeh FileInput widget

I am working on a Bokeh serve UI and am running into trouble interfacing a class (and its methods) with the FileInput widget. I am using a class (in this example, called "EIS_data") which, when instantiated, loads a file using pd.read_csv. The EIS_data class also has a method to plot the data in a particular way, and I'd like to be able to load the pandas dataframe and call and manipulate the data using the methods already in place in the class.
So far, I have been able to load the data successfully using the FileInput widget, but I can't figure out how to access the dataframe again once it's loaded in. In a standalone Jupyter notebook, I could run d = EIS_data("filename") and then ```d.plot''' to load the data into a pandas dataframe and then plot it according to the method defined in the EIS_data class, but I can't figure out how to replicate this in the UI code once the data are loaded using the FileInput widget.
Is there a way I can interface this with Bokeh widgets, such that I could simply add d.plot() to curdoc()? I have found a workaround using ColumnDataSource, but it seems a shame to redefine plotting methods and data handling when they are already defined in the class. Below are minimal working examples of the UI code and the class definition.
UI Code:
import numpy as np
import pandas as pd
from eis_analysis_trimmed import EIS_data
import bokeh
from bokeh.io import curdoc
from bokeh import layouts
from bokeh.layouts import column,row,gridplot
from bokeh.plotting import figure
from bokeh.models import *
import base64
import io
## Instantiate the EIS_data class for loading data
def load_data(f):
return EIS_data(f)
## updater function called to load data with FileInput widget
## Must be decoded using base64
def load_file(attr, old, new):
decoded = base64.b64decode(new)
d = io.BytesIO(decoded)
dat = load_data(d)
print(dat.df)
print(dat)
print("EIS Data Uploaded Successfully")
return dat
f_load = Paragraph(text="""Load Data""",height=15)
f = FileInput()
f.on_change('value',load_file)
curdoc().add_root(column(f))
and here is the EIS_data class:
import numpy as np
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
from bokeh.plotting import figure, show
from bokeh.models import LinearAxis, Range1d
from bokeh.resources import INLINE
import bokeh.io
#locally include javascript dependencies in html
bokeh.io.output_notebook(INLINE)
class EIS_data:
def __init__(self, file_name, delimiter='\t',
header=0, f_low=None, f_high=None):
#load eis data into a pandas dataframe
eis_data = pd.read_csv(file_name, delimiter=delimiter, header=header)
#iterate through all of the columns and check to see
#if all of the values in that column are null
#if they are, then remove that column
for c in eis_data.columns:
if eis_data[c].isnull().all():
eis_data = eis_data.drop([c], axis=1)
#make sure that the data are imported as floats and not strings
eis_data = eis_data[['freq/Hz', 'Re(Z)/Ohm', '-Im(Z)/Ohm']]
eis_data['freq/Hz'] = pd.to_numeric(eis_data['freq/Hz'])
eis_data['Re(Z)/Ohm'] = pd.to_numeric(eis_data['Re(Z)/Ohm'])
eis_data['-Im(Z)/Ohm'] = pd.to_numeric(eis_data['-Im(Z)/Ohm'])
self.df = eis_data.sort_values(by='freq/Hz')
def plot(self, fit_vals = None):
plot = figure(title="Nyquist Plot",
x_axis_label='Re(Z) Ohm',
y_axis_label='-Im(Z) Ohm',
plot_width=600,
plot_height=600)
plot.circle(self.df['Re(Z)/Ohm'], self.df['-Im(Z)/Ohm'],
size=7, color='navy', name='Data')
return plot
EDIT: Adding the workaround using ColumnDataSource
from bokeh.layouts import column
from bokeh.plotting import figure
from bokeh.models import *
from bokeh.models.widgets import FileInput
import base64
import io
from eis_analysis2 import EIS_data
# Instantiate the EIS_data class for loading data
def load_data(data):
return EIS_data(data)
# updater function called to load data with FileInput widget
# Must be decoded using base64
def load_file(attr, old, new):
decoded = base64.b64decode(new)
d = io.BytesIO(decoded)
dat = load_data(d)
dat_df = dat.df
# Replace plot data with data from newly-loaded file
source.data = dict(freq=dat_df[dat_df.columns[0]], reZ=dat_df[dat_df.columns[1]], imZ=dat_df[dat_df.columns[2]])
#phase,mag = bode_calc(reZ,imZ)
print(dat_df)
print("EIS Data Uploaded Successfully")
# Create Column Data Source that will be used by the plot
source = ColumnDataSource(data=dict(freq=[], reZ=[], imZ=[]))
##Make the nyquist plot
nyq_plot = figure(title="Nyquist Plot",
x_axis_label='Re(Z) Ohm',
y_axis_label='-Im(Z) Ohm',
plot_width=600,
plot_height=600)
nyq_plot.circle(x="reZ", y="imZ",source=source,size=7, color='navy', name='Data')
f = FileInput()
f.on_change('value', load_file)
layout = column(f, nyq_plot)
curdoc().add_root(layout)

ModuleNotFoundError for .vision and .utils , download_url, VisionDataset

I am trying to create a custom dataset in google colab, but imports give me errors.
from PIL import Image
from six.moves import zip
import os
from .vision import VisionDataset ------------------------(1)
from .utils import download_url, check_integrity --------------(2)
class datasetName(VisionDataset):
...
(1) error :
ModuleNotFoundError: No module named 'main.vision'; 'main' is
not a package
(2) error :
ModuleNotFoundError: No module named 'main.utils'; 'main' is
not a package
I have tried to add from torchvision import utils but it does not solve the error.
If I change to from torch.utils import download_url, check_integrity
then the error becomes:
ImportError: cannot import name 'download_url'
Please try the following import lines.
from torchvision.datasets.vision import VisionDataset
from torchvision.datasets.utils import download_url, check_integrity
Hope it helps!

Zipline - How to pass bundle DataPortal to TradeAlgorithm.run()?

I am trying to run a Zipline back test by calling the run() method of zipline.algorithm.TradeAlgorithm:
algo = TradingAlgorithm(initialize= CandlestickStrategy.initialize,
handle_data= CandlestickStrategy.handle_data,
analyze= CandlestickStrategy.analyze,
data=None,
bundle='quandl')
results = algo.run()
But I'm not sure what or how to pass the data parameter. I have already ingested the data bundle which is called 'quandl'. According to the docs, that parameter should receive a DataPortal instance, but I don't know how to create one of those based on the data I have ingested. What is the best way of doing this/is this necessary?
Essentially my goal is to create a top level 'dashboard' style class which can run multiple back tests using different strategies which exist in separate modules.
Full code (dashboard.py):
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from mpl_finance import candlestick_ohlc
from datetime import datetime, date, tzinfo, timedelta
from dateutil import parser
import pytz
import numpy as np
import talib
import warnings
import logbook
from logbook import Logger
log = Logger('Algorithm')
from zipline.algorithm import TradingAlgorithm
from zipline.api import order_target_percent, order_target, cancel_order, get_open_orders, get_order, get_datetime, record, symbol
from zipline.data import bundles
from zipline.finance import execution
from CandlestickStrategy import CandlestickStrategy
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# Choosing a security and a time horizon
logbook.StderrHandler().push_application()
start = datetime(2014, 9, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2016, 1, 1, 0, 0, 0, 0, pytz.utc)
#dataPortal = data_portal.DataPortal(asset_finder, trading_calendar, first_trading_day, e
#bundle = bundles.load('quandl',None,start)
algo = TradingAlgorithm(initialize= CandlestickStrategy.initialize,
handle_data= CandlestickStrategy.handle_data,
analyze= CandlestickStrategy.analyze,
data=None,
bundle='quandl')
results = algo.run()
CandleStickStrategy.py:
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from mpl_finance import candlestick_ohlc
from zipline.api import order_target_percent, order_target, cancel_order, get_open_orders, get_order, get_datetime, record, symbol
from zipline.finance import execution
from datetime import datetime, date, tzinfo, timedelta
from dateutil import parser
import pytz
import numpy as np
import talib
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
class CandlestickStrategy:
def initialize(context):
print "initializing algorythm..."
context.i = 0
context.asset = symbol('AAL')
def handle_data(context, data):
try:
trailing_window = data.history(context.asset, ['open','high','low','close'], 28, '1d')
except:
return
def analyze(context=None, results=None):
print "Analyze"
Hopefully someone can point me in the right direction.
Thanks
I faced the same issue. When running the trading algorithm manually this way the bundle argument is not evaluated. You need to create the data portal yourself. I manually registered the bundle and created a data_portal to run it:
bundles.register('yahoo-xetra',
csvdir_equities(get_calendar("XETRA"), ["daily"],
'/data/yahoo'),
calendar_name='XETRA')
bundle_data = bundles.load(
'yahoo-xetra',
)
first_trading_day = bundle_data.equity_daily_bar_reader.first_trading_day
data = DataPortal(
bundle_data.asset_finder,
trading_calendar=get_calendar("XETRA"),
first_trading_day=first_trading_day,
equity_minute_reader=bundle_data.equity_minute_bar_reader,
equity_daily_reader=bundle_data.equity_daily_bar_reader,
adjustment_reader=bundle_data.adjustment_reader,
)
Strategy = SimpleAlgorithm(trading_calendar=get_calendar("XETRA"), data_frequency='daily',
start=pd.Timestamp('2017-1-1 08:00:00+0200', tz='Europe/Berlin'),
end=pd.Timestamp('2018-12-27 08:00:00+0200', tz='Europe/Berlin'),
capital_base=10000000,
data_portal=data)

Creating executable with Py2exe and matplotlib errors

I have been searching this forum and many others and cannot seem to get a good method of creating an executable. I have tried several different methods (py2exe, pyinstaller and cx_freeze) and all seem to give me some kind of error.
When i tried pyinstaller, I received the error that "no _imaging C module is installed". Everything I search says that it has to do with PIL, but my code is not using PIL.
When I tried py2exe, I keep receiving the following error:
File "Scout_Tool.py", line 18, in <module>
File "matplotlib\pyplot.pyc", line 95, in <module>
File "matplotlib\backends\__init__.pyc", line 25, in pylab_setup
ImportError: No module named backend_qt4agg
I am at a loss of what to do. My code contains the following imports:
import os
import csv
import wx
import time
import math
from matplotlib.backends.backend_wx import FigureCanvasWx as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.pyplot import figure,show
from mpl_toolkits.basemap import Basemap
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from numpy.random import rand
from datetime import datetime
import wx.calendar as cal
import numpy as npy
from pylab import *
import numpy as np
import matplotlib
import adodbapi
import sqlparse
import pylab
import annote_new
import cPickle as pickle
Does anyone have any suggestions on how to do a build of the executable using py2exe? What i have tried...
from distutils.core import setup
import py2exe
import matplotlib
setup(
windows=[{'script': r'Scout_Tool.py'}],
data_files=matplotlib.get_py2exe_datafiles(),
options={
r'py2exe': {
r'includes': r'ElementConfig',
r'includes': r'ColorConv',
r'includes': r'Tkinter',
r'includes': r're',
r'includes': r'math',
r'includes': r'sys',
r'includes': r'matplotlib',
r'includes': r'mpl_toolkits',
r'includes': r'matplotlib.backends.backend_wx',
r'dll_excludes': [r'MSVCP90.dll'],
}
},
)
Thanks for any help!
I am not fully sure this will fix your problem, but you should start by correcting that faulty options dictionary entry. In python, when you define a dictionary with the same key over and over, you are only going to get the last value. A key can only exist once:
options={
r'py2exe': {
r'includes': r'ElementConfig',
...
r'includes': r'mpl_toolkits',
r'includes': r'matplotlib.backends.backend_wx',
...
}
}
print options
#{'py2exe': {'includes': 'matplotlib.backends.backend_wx'}}
I suspect the result of this usage is py2exe not really finding any of your intended includes. includes should be a list:
options={
'py2exe':{
'includes': [
'ElementConfig',
'ColorConv',
'Tkinter',
're',
'math',
'sys',
'matplotlib',
'mpl_toolkits',
'matplotlib.backends.backend_wx'
],
'dll_excludes': ['MSVCP90.dll'],
}
},
If after this, it still complains about the backend missing, you can add another explicit entry:
'includes': [
...
'matplotlib.backends.backend_qt4agg'
],