Access local folder in colab - google-colaboratory

I want to create a function to read files from the folder one by one in the colab.
I have tried below that i.e. I uploaded my folder into My Drive and then tried to access it from there.
But still, it is showing the error.
from google.colab import drive
drive.mount('/content/gdrive')
mypath = "/gdrive/My Drive/resume pdf" #enter your path here where you saved the resumes
onlyfiles = [os.path.join(mypath, f) for f in os.listdir(mypath) if os.path.isfile(os.path.join(mypath, f))]
Error-->
FileNotFoundError Traceback (most recent call last)
<ipython-input-15-a7a8abc74cc6> in <module>()
1 mypath = "/gdrive/My Drive/resume pdf" #enter your path here where you saved the resumes
----> 2 onlyfiles = [os.path.join(mypath, f) for f in os.listdir(mypath) if os.path.isfile(os.path.join(mypath, f))]
FileNotFoundError: [Errno 2] No such file or directory: '/gdrive/My Drive/resume pdf'`enter code here`
I have crossed check that there is a folder named "resume pdf" in my drive.
So How can I upload a local folder and access each file of it one by one in colab??

You mounted it at /content/gdrive, but you try to access it at /gdrive, that's the error.
Just change to
mypath = "/content/gdrive/My Drive/resume pdf"

Related

How to use drive with external account in colab

So, i have all important files in my account and i need to import that files to the local machine of colab and share the files can be dangerous and for any rason the files copy and paste theirself in the drive filling it, i try to use pydrive but i cant "automatize" the process of login with a user account. I need a form to automatize the process of login using colab, ideas?
`
from pydrive2.auth import GoogleAuth
from pydrive2.drive import GoogleDrive
from google_drive_downloader import GoogleDriveDownloader as gdd
gauth = GoogleAuth()
drive = GoogleDrive(gauth)
file = open("client_secrets.json", "w")
file.write('{"web": __')
file.close()
file = open("credentials.json", "w")
file.write('{"access_token": __"}')
file.close()
file = open("settings.json", "w")
file.write('')
if gauth.credentials is None:
gauth.CommandLineAuth()
elif gauth.access_token_expired:
gauth.Refresh()
else:
gauth.Authorize()
( __ = private information)

Allennlp: How to use CPU instead of GPU?

I'm running some code that works when there is GPU. But I'm trying to figure out how to run it locally with CPU. Here's the error:
2022-07-06 17:58:39,042 - INFO - allennlp.common.plugins - Plugin allennlp_models available
Traceback (most recent call last):
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/bin/allennlp", line 8, in <module>
sys.exit(run())
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/__main__.py", line 34, in run
main(prog="allennlp")
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/commands/__init__.py", line 118, in main
args.func(args)
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/commands/predict.py", line 205, in _predict
predictor = _get_predictor(args)
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/commands/predict.py", line 105, in _get_predictor
check_for_gpu(args.cuda_device)
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/common/checks.py", line 131, in check_for_gpu
" 'trainer.cuda_device=-1' in the json config file." + torch_gpu_error
allennlp.common.checks.ConfigurationError: **Experiment specified a GPU but none is available; if you want to run on CPU use the override 'trainer.cuda_device=-1' in the json config file.**
module 'torch.cuda' has no attribute '_check_driver'
Could you give me some guidance on what to do? Where is the config file and what is it called?
Here's the code (originally from: https://colab.research.google.com/drive/1F9zW_nVkwfwIVXTOA_juFDrlPz5TLjpK?usp=sharing):
# Use pretrained SpanModel weights for prediction
import sys
sys.path.append("aste")
from pathlib import Path
from data_utils import Data, Sentence, SplitEnum
from wrapper import SpanModel
def predict_sentence(text: str, model: SpanModel) -> Sentence:
path_in = "temp_in.txt"
path_out = "temp_out.txt"
sent = Sentence(tokens=text.split(), triples=[], pos=[], is_labeled=False, weight=1, id=1)
data = Data(root=Path(), data_split=SplitEnum.test, sentences=[sent])
data.save_to_path(path_in)
model.predict(path_in, path_out)
data = Data.load_from_full_path(path_out)
return data.sentences[0]
text = "Did not enjoy the new Windows 8 and touchscreen functions ."
model = SpanModel(save_dir="pretrained_14lap", random_seed=0)
sent = predict_sentence(text, model)
Try using something like:
device = torch.device("cpu")
model = SpanModel(save_dir="pretrained_14lap", random_seed=0)
model.to(device)
The config file is inside of the model.tar.gz in the pretrained_14lap directory (it is always named config.json). It also contains the param "cuda_device": 0, which may be causing your problem.

Unknown report type: xlsx: - Odoo 10

I am trying to create an xlsx report. I tried below code but getting an error:
Traceback (most recent call last):
File "/home/shar/Projects/git/odoo/addons/web/controllers/main.py", line 72, in wrap
return f(*args, **kwargs)
File "/home/shar/Projects/git/odoo/addons/web/controllers/main.py", line 1485, in index
request.session.db, request.session.uid, request.session.password, report_id])
File "/home/shar/Projects/git/odoo/odoo/http.py", line 118, in dispatch_rpc
result = dispatch(method, params)
File "/home/shar/Projects/git/odoo/odoo/service/report.py", line 35, in dispatch
res = fn(db, uid, *params)
File "/home/shar/Projects/git/odoo/odoo/service/report.py", line 142, in exp_report_get
return _check_report(report_id)
File "/home/shar/Projects/git/odoo/odoo/service/report.py", line 120, in _check_report
raise UserError('%s: %s' % (exc.message, exc.traceback))
UserError: (u"Unknown report type: xlsx: (, NotImplementedError(u'Unknown report type: xlsx',), )", '')
Here is my code:
*.py
# -*- coding: utf-8 -*-
from odoo.addons.report_xlsx.report.report_xlsx import ReportXlsx
class PartnerXlsx(ReportXlsx):
def generate_xlsx_report(self, workbook, data, partners):
for obj in partners:
report_name = obj.name
# One sheet by partner
sheet = workbook.add_worksheet(report_name[:31])
bold = workbook.add_format({'bold': True})
sheet.write(0, 0, obj.name, bold)
PartnerXlsx('report.module_name.res.partner.xlsx',
'res.partner')
*.xml
<report
id="partner_xlsx"
model="res.partner"
string="Print to XLSX"
report_type="xlsx"
name="res.partner.xlsx"
file="res.partner.xlsx"
attachment_use="False"
/>
Your code seems right, but remember that all other odoo rules still apply, don't forget to:
Add 'report_xlsx' as a dependency in _openerp_.py manifest
Add your .xml file in data dict inside _openerp_.py manifest ('data': ['report/file.xml'])
Add an __init__.py file with from . import <report_file_name> inside your report folder (where your .py file lies, and desirably, also your xml file, as declared on the manifest).
Add from . import report inside your addon __init__.py file
Update your addon inside odoo app.
Should work after that.
We don't have a report_type as xlsx format
we have only qweb-pdf,qweb-html,controller
report_type will accept anyone of these (qweb-pdf,qweb-html,controller)
It does not have xlsx as such
Please refer 'ir.actions.report.xml' class for further reference

Sagemaker ImportError: Import by filename is not supported

I have a custom algorithm for text prediction. I want to deploy that in sagemaker. I am following this tutorial.
https://docs.aws.amazon.com/sagemaker/latest/dg/tf-example1.html
The only change from the tutorial is.
from sagemaker.tensorflow import TensorFlow
iris_estimator = TensorFlow(entry_point='/home/ec2-user/SageMaker/sagemaker.py',
role=role,
output_path=model_artifacts_location,
code_location=custom_code_upload_location,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
training_steps=1000,
evaluation_steps=100, source_dir="./", requirements_file="requirements.txt")
.
%%time
import boto3
train_data_location = 's3://sagemaker-<my bucket>'
iris_estimator.fit(train_data_location)
INFO: the dataset is at the root of the bucket.
error log
ValueError: Error training sagemaker-tensorflow-2018-06-19-07-11-13-634: Failed Reason: AlgorithmError: uncaught exception during training: Import by filename is not supported.
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/container_support/training.py", line 36, in start
fw.train()
File "/usr/local/lib/python2.7/dist-packages/tf_container/train_entry_point.py", line 143, in train
customer_script = env.import_user_module()
File "/usr/local/lib/python2.7/dist-packages/container_support/environment.py", line 101, in import_user_module
user_module = importlib.import_module(script)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
ImportError: Import by filename is not supported.
I solved this issue, The problem was using absolute path for entry_point.
when you use a source_dir parameter the path to the entry_point should be relative to the source_dir
I solved with:
region = boto3.Session().region_name
train_data_location = 's3://sagemaker-<my bucket>'.format(region)

openpyxl+load_workbook+AttributeError: 'NoneType' object has no attribute 'date1904'

When I use openpyxl to load the Excel file( .xlsx), this error displays (the last the link is the sample Excel file):
from openpyxl import *
wb = load_workbook("D:/develop/workspace/exman/test sample/510001653.xlsx")
Traceback (most recent call last):
File "", line 1, in
File "C:\Python34\lib\site-packages\openpyxl-2.5.0-py3.4.egg\openpyxl\reader\
xcel.py", line 161, in load_workbook
parser.parse()
File "C:\Python34\lib\site-packages\openpyxl-2.5.0-py3.4.egg\openpyxl\packagi
g\workbook.py", line 42, in parse
if package.properties.date1904:
AttributeError: 'NoneType' object has no attribute 'date1904'
sample excel file download
I debug the python file ,and find that the workbookPr = None , cause the package.properties to None( properties = Alias(workbookPr). So I change the code of workbookParser.parser() like follow, the error is solved.
class WorkbookParser:
def __init__(self, archive):
self.archive = archive
self.wb = Workbook()
self.sheets = []
self.rels = get_dependents(self.archive, ARC_WORKBOOK_RELS)
def parse(self):
src = self.archive.read(ARC_WORKBOOK)
node = fromstring(src)
package = WorkbookPackage.from_tree(node)
if package.properties is not None: #add this line
if package.properties.date1904:
wb.excel_base_date = CALENDAR_MAC_1904
self.wb.code_name = package.properties.codeName
self.wb.active = package.active
..........
This bug was fixed in newer versions (I checked 2.4.8 and its fixed. 2.4.0 still had it)
pip install --upgrade openpyxl