How to use python to share a outlook calendar - win32com

I want to share my outlook calendar with other exchange users by using the win32com.
According to the Office VBA Reference Sharing Calendars I want to create a SharingItem by using the method CreateSharingItem but failed.
blow is my code:
ol = win32com.client.gencache.EnsureDispatch('Outlook.Application')
session = ol.GetNamespace("MAPI")
sharefolder = session.GetDefaultFolder(9)
shareitem = session.CreateSharingItem(sharefolder)
and the error messages:
Traceback (most recent call last):
File "D:\wah2hi\comment\test\test\test.py", line 22, in <module>
shareitem = session.CreateSharingItem(sharefolder)
File "C:\Users\wah2hi\AppData\Local\Temp\gen_py\2.7\00062FFF-0000-0000-C000-000000000046x0x9x5\_NameSpace.py", line 62, in CreateSharingItem
ret = self._oleobj_.InvokeTypes(64484, LCID, 1, (13, 0), ((12, 1), (12, 17)), Context, Provider)
pywintypes.com_error: (-2147352567, 'Exception occurred.', (4096, u'Microsoft Outlook', u'The operation failed.', None, 0, -2147467259), None)
So, if I use the CreateSharingItem method in a wrong way or are there some other ways to share a calendar by using win32com?

Related

Running into uvloop issues with Database queries from Rasa-X?

I'm trying to make a simple query to my amazon neptune database, from Rasa-x.
Here is the code from my actions.py:
class ActionQueryDietary(Action):
def name(self) -> Text:
return "action_query_dietary"
def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
available = False
restaurant = "XXXX"
dietaryQuestion=tracker.get_slot('dietaryQuestion')
g, remoteConn = kb.openConnection()
dietary = kb.getDietary(g, restaurant, dietaryQuestion)
if(dietary=="Yes"):
available = True
if(available==True):
dispatcher.utter_message(text="According to our knowledge base, {} is on the menu").format(dietaryQuestion)
else:
dispatcher.utter_message(text="Sorry, according to our knowledge base, we don't have this option on the menu")
kb.closeConnection(remoteConn)
return []
and here is the code from knowledgebase.py:
def getDietary(g, restaurant, dietary):
properties = queryRestaurantProperties(g, restaurant)
result = properties[dietary]
print(result)
return result
but any query to the knowledgebase results in this error:
2020-10-22T18:01:22.347345241Z File "/opt/venv/lib/python3.7/site-packages/sanic/app.py", line 973, in handle_request
2020-10-22T18:01:22.347351643Z response = await response
2020-10-22T18:01:22.347357446Z File "/app/rasa_sdk/endpoint.py", line 102, in webhook
2020-10-22T18:01:22.347363522Z result = await executor.run(action_call)
2020-10-22T18:01:22.347369398Z File "/app/rasa_sdk/executor.py", line 392, in run
2020-10-22T18:01:22.347375473Z events = action(dispatcher, tracker, domain)
2020-10-22T18:01:22.347381348Z File "/app/actions/actions.py", line 33, in run
2020-10-22T18:01:22.347387063Z dietary = kb.getDietary(g, restaurant, dietaryQuestion)
2020-10-22T18:01:22.347392835Z File "/app/actions/knowledgebase.py", line 117, in getDietary
2020-10-22T18:01:22.347399112Z properties = queryRestaurantProperties(g, restaurant)
2020-10-22T18:01:22.347405111Z File "/app/actions/knowledgebase.py", line 86, in queryRestaurantProperties
2020-10-22T18:01:22.347411869Z result = g.V().has('name', restaurant).valueMap().next()
2020-10-22T18:01:22.347418048Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/process/traversal.py", line 89, in next
2020-10-22T18:01:22.347424293Z return self.__next__()
2020-10-22T18:01:22.347430069Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/process/traversal.py", line 48, in __next__
2020-10-22T18:01:22.347436333Z self.traversal_strategies.apply_strategies(self)
2020-10-22T18:01:22.347441940Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/process/traversal.py", line 573, in apply_strategies
2020-10-22T18:01:22.347447667Z traversal_strategy.apply(traversal)
2020-10-22T18:01:22.347453031Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/driver/remote_connection.py", line 149, in apply
2020-10-22T18:01:22.347459352Z remote_traversal = self.remote_connection.submit(traversal.bytecode)
2020-10-22T18:01:22.347465069Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/driver/driver_remote_connection.py", line 55, in submit
2020-10-22T18:01:22.347486749Z result_set = self._client.submit(bytecode)
2020-10-22T18:01:22.347493788Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/driver/client.py", line 127, in submit
2020-10-22T18:01:22.347499424Z return self.submitAsync(message, bindings=bindings).result()
2020-10-22T18:01:22.347505093Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/driver/client.py", line 146, in submitAsync
2020-10-22T18:01:22.347511092Z return conn.write(message)
2020-10-22T18:01:22.347516610Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/driver/connection.py", line 55, in write
2020-10-22T18:01:22.347522673Z self.connect()
2020-10-22T18:01:22.347529987Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/driver/connection.py", line 45, in connect
2020-10-22T18:01:22.347536097Z self._transport.connect(self._url, self._headers)
2020-10-22T18:01:22.347542222Z File "/opt/venv/lib/python3.7/site-packages/gremlin_python/driver/tornado/transport.py", line 36, in connect
2020-10-22T18:01:22.347547822Z lambda: websocket.websocket_connect(url))
2020-10-22T18:01:22.347553477Z File "/opt/venv/lib/python3.7/site-packages/tornado/ioloop.py", line 571, in run_sync
2020-10-22T18:01:22.347559295Z self.start()
2020-10-22T18:01:22.347564864Z File "/opt/venv/lib/python3.7/site-packages/tornado/platform/asyncio.py", line 132, in start
2020-10-22T18:01:22.347570978Z self.asyncio_loop.run_forever()
2020-10-22T18:01:22.347576693Z File "uvloop/loop.pyx", line 1351, in uvloop.loop.Loop.run_forever
2020-10-22T18:01:22.347582342Z File "uvloop/loop.pyx", line 484, in uvloop.loop.Loop._run
2020-10-22T18:01:22.347588222Z RuntimeError: Cannot run the event loop while another loop is running
I tried using nest_asyncio.apply, but that resulted in this error:
ValueError: Can't patch loop of type <class 'uvloop.Loop'>
which according to the docs is just a rule.
So I don't really know how to proceed?
Adding my comment above as an answer. In some cases it is necessary to downlevel the version of Tornado being used. There are some issues that you can sometimes run into if the event loop is already running when someone else tries to create it. For now, down leveling to Tornado 4.5.1 with Gremlin Python should resolve any issues.

tensorflow 2.0 keras save model to hdfs: Can't decrement id ref count

I have mounted an hdfs drive by hdfs fuse, thus I can access the hdfs by path /hdfs/xxx.
After training a model by keras, I want to save it to /hdfs/model.h5 by model.save("/hdfs/model.h5").
I get the following error:
2020-02-26T10:06:51.83869705Z File "h5py/_objects.pyx", line 193, in h5py._objects.ObjectID.__dealloc__
2020-02-26T10:06:51.838791107Z RuntimeError: Can't decrement id ref count (file write failed: time = Wed Feb 26 10:06:51 2020
2020-02-26T10:06:51.838796288Z , filename = '/hdfs/model.h5', file descriptor = 3, errno = 95, error message = 'Operation not supported', buf = 0x7f20d000ddc8, total write size = 512, bytes this sub-write = 512, bytes actually written = 18446744073709551615, offset = 298264)
2020-02-26T10:06:51.838802442Z Exception ignored in: 'h5py._objects.ObjectID.__dealloc__'
2020-02-26T10:06:51.838807122Z Traceback (most recent call last):
2020-02-26T10:06:51.838811833Z File "h5py/_objects.pyx", line 193, in h5py._objects.ObjectID.__dealloc__
2020-02-26T10:06:51.838816793Z RuntimeError: Can't decrement id ref count (file write failed: time = Wed Feb 26 10:06:51 2020
2020-02-26T10:06:51.838821942Z , filename = '/hdfs/model.h5', file descriptor = 3, errno = 95, error message = 'Operation not supported', buf = 0x7f20d000ddc8, total write size = 512, bytes this sub-write = 512, bytes actually written = 18446744073709551615, offset = 298264)
2020-02-26T10:06:51.838827917Z Traceback (most recent call last):
2020-02-26T10:06:51.838832755Z File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/saving/hdf5_format.py", line 117, in save_model_to_hdf5
2020-02-26T10:06:51.838838098Z f.flush()
2020-02-26T10:06:51.83885453Z File "/usr/local/lib/python3.6/dist-packages/h5py/_hl/files.py", line 452, in flush
2020-02-26T10:06:51.838859816Z h5f.flush(self.id)
2020-02-26T10:06:51.838864401Z File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
2020-02-26T10:06:51.838869302Z File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
2020-02-26T10:06:51.838874126Z File "h5py/h5f.pyx", line 146, in h5py.h5f.flush
2020-02-26T10:06:51.838879016Z RuntimeError: Can't flush cache (file write failed: time = Wed Feb 26 10:06:51 2020
2020-02-26T10:06:51.838885827Z , filename = '/hdfs/model.h5', file descriptor = 3, errno = 95, error message = 'Operation not supported', buf = 0x4e5b018, total write size = 4, bytes this sub-write = 4, bytes actually written = 18446744073709551615, offset = 34552)
But I can directly write a file to the same path by
with open("/hdfs/a.txt") as f:
f.write("1")
Also I've figured out a tricky workaround and it worked...
model.save("temp.h5")
move("temp.h5", "/hdfs/model.h5")
So maybe the problem is about keras api? It can only save the model locally but cannot save to an hdfs path.
Any idea how to fix the problem?
I don't think tensorflow makes any promises about being able to save to hdfs-fuse. Your (final) error is "Can't flush cache" not, "Can't decrement id ref count", basically meaning "Can't save straight to hdfs-fuse". But, to be honest, it seems fixed to me, your workaround is fine.

send file to serial port in python

i am trying to send the file below '105.8k' to my energy meter.
i am using the xmodem example from pypi but i get the following error:
Traceback (most recent call last):
File "C:\Py\mainpy.py", line 68, in <module>
status = modem.send(f, retry=3)
File "C:\Users\admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\xmodem\__init__.py", line 270, in send
char = self.getc(1)
File "C:\py\mainpy.py", line 62, in getc
return ser.read(size) or None
AttributeError: 'str' object has no attribute 'read'
the code i use:
### send file to port###
ser = serialPortCombobox.get().split(" ")[0]
def getc(size, timeout=1):
return ser.read(size) or None
def putc(data, timeout=1):
return ser.write(data)
modem = XMODEM(getc, putc)
f = open('105.8k', 'rb')
status = modem.send(f, retry=3)
ser.close()
stream.close()
thank you for your help.

APScheduler Look up object error

APScheduler (3.3.1) py2.7
I use this code to do my job When I use Memory as job store it can work well but I have too many job and my Memory in server is limit so I change SQLAlchemyJobStore as job store but I got the Lookup error. How to solve it.
Code:
def script(indicator, strategy_name, real_time=False):
# Solve No handlers could be found for logger “apscheduler.scheduler
import logging
logging.basicConfig(level=logging.ERROR,
format='%(name)-12s %(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
try:
job_defaults = {
'coalesce': False,
'max_instances': 1,
"misfire_grace_time": config.real_time_script_interval + 5,
}
executors = {
'default': ThreadPoolExecutor(60),
}
jobstores = {
"default": SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
}
scheduler = BlockingScheduler(daemonic=True, jobstores=jobstores, job_defaults=job_defaults,
executors=executors)
module = __import__("%s.%s" % (indicator, strategy_name), fromlist=[strategy_name])
if real_time:
for st in module.strategy:
scheduler.add_job(st.run, "interval", seconds=config.real_time_script_interval)
else:
for st in module.strategy:
# 计算最近的下一个准点时间
start_time = _recent_time(st.run_period)
scheduler.add_job(st.run, "interval", **start_time)
scheduler.start()
except Exception as e:
logger.get("run-log").error(error_msg())
Error:
apscheduler.jobstores.default Thu, 06 Apr 2017 10:50:32 ERROR Unable to restore job "d100a4b24e2d49c3ad51305fd846e5f5" -- removing it
Traceback (most recent call last):
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/jobstores/sqlalchemy.py", line 135, in _get_jobs
jobs.append(self._reconstitute_job(row.job_state))
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/jobstores/sqlalchemy.py", line 122, in _reconstitute_job
job.__setstate__(job_state)
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/job.py", line 260, in __setstate__
self.func = ref_to_obj(self.func_ref)
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/util.py", line 277, in ref_to_obj
raise LookupError('Error resolving reference %s: error looking up object' % ref)
LookupError: Error resolving reference base.strategy:Strategy.run: error looking up object
apscheduler.jobstores.default Thu, 06 Apr 2017 10:50:32 ERROR Unable to restore job "2602167cd3c745c2b0764a2b63da1a3a" -- removing it
Traceback (most recent call last):
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/jobstores/sqlalchemy.py", line 135, in _get_jobs
jobs.append(self._reconstitute_job(row.job_state))
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/jobstores/sqlalchemy.py", line 122, in _reconstitute_job
job.__setstate__(job_state)
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/job.py", line 260, in __setstate__
self.func = ref_to_obj(self.func_ref)
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/util.py", line 277, in ref_to_obj
raise LookupError('Error resolving reference %s: error looking up object' % ref)
LookupError: Error resolving reference base.strategy:Strategy.run: error looking up object
apscheduler.jobstores.default Thu, 06 Apr 2017 10:50:32 ERROR Unable to restore job "3eb917670e7642b8848a165268df8913" -- removing it
Traceback (most recent call last):
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/jobstores/sqlalchemy.py", line 135, in _get_jobs
jobs.append(self._reconstitute_job(row.job_state))
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/jobstores/sqlalchemy.py", line 122, in _reconstitute_job
job.__setstate__(job_state)
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/job.py", line 260, in __setstate__
self.func = ref_to_obj(self.func_ref)
File "/Users/wyx/bitcoin_workspace/fibo-strategy/.env/lib/python2.7/site-packages/apscheduler/util.py", line 277, in ref_to_obj
raise LookupError('Error resolving reference %s: error looking up object' % ref)
LookupError: Error resolving reference base.strategy:Strategy.run: error looking up object
Supplementary instruction for Alex Grönholm's question here because it is hard to say in comment
In base/strategy_util.py:
base_strategy is some classes which inherit the BaseStrategy class in base/strategy.py . The BaseStrategy has its run method
def _strategy(base_strategy, minute, ticker_table_format):
class Strategy(base_strategy):
run_period = minute
def _init_params(self):
self.ticker_table_format = ticker_table_format
return Strategy()
def _create_strategy(base_strategy, minute_list=ALL_MINUTE):
strategy_list = []
for minute in minute_list:
for ticker_table_format in const.TICKER_TABLE_FORMAT.ALL:
st = _strategy(base_strategy, minute, ticker_table_format)
strategy_list.append(st)
return strategy_list
def ma_strategy(base_strategy):
return _create_strategy(base_strategy)
In MA/touch_avg.py:
MA_TOUCH_AVG inherit the BaseStrategy class
from base.strategy import MA_TOUCH_AVG
from base.strategy_util import ma_strategy
strategy = ma_strategy(MA_TOUCH_AVG)
And then I use click to call the strategy like python run_strategy.py run MA touch_avg
In run_strategy.py:
#cli.command()
#click.argument('indicator')
#click.argument('strategy_name')
def run(indicator, strategy_name):
""" run indicator strategy_name """
real_time_strategy_name = ["touch_avg", "limit"]
util.script(indicator, strategy_name,
real_time=strategy_name in real_time_strategy_name)
The reason you're having this problem is because you create a class dynamically, in a function. APScheduler stores the reference to the scheduled function as module:varname. How is the scheduler expected to find a class that you're making on the fly in a function?

AndroidViewClient serialno error

I'm testing various smartphones using AndroidViewClient.
To prevent connection errors, I used connection options(kwargs1, kwargs2) as follows.
from com.dtmilano.android.viewclient import *
from com.dtmilano.android.adb.adbclient import *
kwargs1 = {'ignoresecuredevice': True}
kwargs2 = {'startviewserver': False, 'autodump': False}
vc = ViewClient(*ViewClient.connectToDeviceOrExit(**kwargs1), **kwargs2)
device, serialno = vc.device, vc.serialno
adb = AdbClient(serialno=serialno)
MODEL = adb.getProperty('ro.product.model')
print 'MODEL :', MODEL
So, connection errors disappeared.
But some phones with special serial number(such as 'LG-F160S-e0a852', 'EF47S01111100117300', ...) raised following serialno error.
Traceback (most recent call last):
File "D:\$Project\Eclipse\_Python\AutoTest\01_get_property4.py", line 43, in <module>
adb = AdbClient(serialno=serialno)
File "D:\$Project\Eclipse\AndroidViewClient-master\src\com\dtmilano\android\adb\adbclient.py", line 108, in __init__
self.__setTransport()
File "D:\$Project\Eclipse\AndroidViewClient-master\src\com\dtmilano\android\adb\adbclient.py", line 251, in __setTransport
raise RuntimeError("ERROR: couldn't find device that matches '%s'" % self.serialno)
RuntimeError: ERROR: couldn't find device that matches '8b1ac56e'
How can I get correct serialno or prevent this error?