import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import cv2
import random
from google.colab.patches import cv2_imshow
import matplotlib.pyplot as plt
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
AttributeError Traceback (most recent call last)
<ipython-input-3-fa7489d52c2b> in <module>()
12 # import some common detectron2 utilities
13
---> 14 from detectron2.engine import DefaultPredictor
15 from detectron2.config import get_cfg
16 from detectron2.utils.visualizer import Visualizer
15 frames
/usr/local/lib/python3.6/dist-packages/torchvision/ops/boxes.py in <module>()
43
44
---> 45 #torch.jit._script_if_tracing
46 def batched_nms(
47 boxes: Tensor,
AttributeError: module 'torch.jit' has no attribute '_script_if_tracing'
In my case using pip install torchvision==0.7.0 instead of torchvision==0.7.0+cpu solved the error.
Related
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from google.colab import files
uploded = files.upload()
df = pd.read_csv("quotation_test_data.csv")
print(df)
import seaborn as sns
df . describe() #describe the data as it shows mean , count
df.isnull().sum() #for see is there any column dont have a data
df ["Net profit"].value_counts()
sns.countplot(df['Net profit'])
#spli data into x and y then make it into train and test data
x=df.iloc[: , :-1]
y=df.iloc[: , :-1]
# [:,:-1] the (:) mean the coumln (:-1) this mean like print the alll column except the last one
#npw i will split the data into train and test data will use sklearn
from sklearn.model_selection import train_test_split
x_train , x_test,y_train,y_test = train_test_split(x,y,random_state=100)
x_train.shape
y_train.shape
from sklearn.tree import DecisionTreeClassifier #intilize desion tree
from pandas.core.common import random_state
clf = DecisionTreeClassifier(criterion="gini",max_depth=7,min_samples_split=10,random_state=10) # criteraition
import pandas as pd
data=pd.read_csv('quotation_test_data.csv')
dataconver = data.replace('[^\d.]','', regex=True).astype(float)
i was try to make a desion tree
then this is show for me :
ValueError Traceback (most recent call last)
<ipython-input-75-df55a55b03a4> in <module>
----> 1 dataconver = data.replace('[^\d.]', regex=True).astype(float)
2
7 frames
/usr/local/lib/python3.8/dist-packages/pandas/core/dtypes/cast.py in astype_nansafe(arr, dtype, copy, skipna)
1199 if copy or is_object_dtype(arr.dtype) or is_object_dtype(dtype):
1200 # Explicit copy, or required since NumPy can't view from / to object.
-> 1201 return arr.astype(dtype, copy=True)
1202
1203 return arr.astype(dtype, copy=copy)
ValueError: could not convert string to float: 'Kellie Scott'
i was try make desion tree
Using Google Colab Unable to install basemap to Colab
pip install geemap
import google.colab
import geemap.eefolium as geemap
import os
import geemap
import ee
ee.Authenticate()
ee.Initialize()
!apt-get install -q libgeos-3.5.0
!apt-get install -q libgeos-dev
!pip install -q https://github.com/matplotlib/basemap/archive/master.zip
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
%matplotlib inline
import geemap.basemaps
from geemap.basemaps import basemaps
ImportError Traceback (most recent call last)
in ()
----> 1 from geemap.basemaps import basemaps
ImportError: cannot import name 'basemaps' from 'geemap.basemaps' (/usr/local/lib/python3.7/dist-packages/geemap/basemaps.py)
I am trying to generate overlap patches from image size (112,112) but i am unable to do so. I have already tried a lot but it didn't work out.
**Code**
import torch
import numpy as np
import torch.nn as nn
from torch import nn
from PIL import Image
import cv2
import os
import math
import torch.nn.functional as F
import torchvision.transforms as T
from timm import create_model
from typing import List
import matplotlib.pyplot as plt
from torchvision import io, transforms
from utils_torch import Image, ImageDraw
from torchvision.transforms.functional import to_pil_image
IMG_SIZE = 112
# PATCH_SIZE = 64
resize = transforms.Resize((IMG_SIZE, IMG_SIZE))
img = resize(io.read_image("Adam_Brody_233.png"))
img = img.to(torch.float32)
image_size = 112
patch_size = 28
ac_patch_size = 12
pad = 4
img = img.unsqueeze(0)
soft_split = nn.Unfold(kernel_size=(ac_patch_size, ac_patch_size), stride=(patch_size, patch_size), padding=(pad, pad))
patches = soft_split(img).transpose(1, 2)
fig, ax = plt.subplots(16, 16)
for i in range(16):
for j in range(16):
sub_img = patches[:, i, j]
ax[i][j].imshow(to_pil_image(sub_img))
ax[i][j].axis('off')
plt.show()
Traceback
Traceback (most recent call last):
File "/home/cvpr/Documents/OPVT/unfold_ours.py", line 32, in <module>
patches = soft_split(img).transpose(1, 2)
File "/home/cvpr/anaconda3/envs/OPVT/lib/python3.7/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/cvpr/anaconda3/envs/OPVT/lib/python3.7/site-packages/torch/nn/modules/fold.py", line 295, in forward
self.padding, self.stride)
File "/home/cvpr/anaconda3/envs/OPVT/lib/python3.7/site-packages/torch/nn/functional.py", line 3831, in unfold
_pair(dilation), _pair(padding), _pair(stride))
RuntimeError: "im2col_out_cpu" not implemented for 'Byte'
Yes this is an open issue in PyTorch. A simple fix is just to convert your image tensor from ints to floats you can do it like this:
img = img.to(torch.float32)
This should solve your problem
This is PIP install that I did
!pip install ohmeow-blurr==0.0.6
!pip install datasets==1.3.0 -qq
!pip install fsspec==2021.7.0
This is the library imports that I did
import ast
import torch
from fastai.text.all import *
from transformers import *
from blurr.data.all import *
from blurr.modeling.all import *
I tried using fastai2 but this is the error that I get
RuntimeError Traceback (most recent call last)
Input In [16], in <cell line: 3>()
1 import ast
2 import torch
----> 3 from fastai2.text.all import *
4 from transformers import *
8 from blurr.data.all import *
File ~\anaconda3\lib\site-packages\fastai2\text\all.py:1, in <module>
----> 1 from ..basics import *
2 from ..callback.all import *
3 from .core import *
File ~\anaconda3\lib\site-packages\fastai2\basics.py:1, in <module>
----> 1 from .data.all import *
2 from .optimizer import *
3 from .callback.core import *
File ~\anaconda3\lib\site-packages\fastai2\data\all.py:1, in <module>
----> 1 from ..torch_basics import *
2 from .core import *
3 from .load import *
File ~\anaconda3\lib\site-packages\fastai2\torch_basics.py:4, in <module>
2 from .imports import *
3 from .torch_imports import *
----> 4 from .torch_core import *
5 from .layers import *
File ~\anaconda3\lib\site-packages\fastai2\torch_core.py:312, in <module>
309 if isinstance(f, (MethodWrapperType, BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType)):
310 setattr(TensorBase, fn, get_f(fn))
--> 312 _patch_tb()
314 # Cell
315 class TensorCategory(TensorBase): pass
File ~\anaconda3\lib\site-packages\fastai2\torch_core.py:308, in _patch_tb()
306 for fn in dir(t):
307 if fn in skips: continue
--> 308 f = getattr(t, fn)
309 if isinstance(f, (MethodWrapperType, BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType)):
310 setattr(TensorBase, fn, get_f(fn))
RuntimeError: tensor.H is only supported on matrices (2-D tensors). Got 1-D tensor.
This is the task that I wanted to add but somehow not being able to
task = HF_TASKS_AUTO.TokenClassification
This the error that I get from the code above
The API had to be updated to work on kaggle ... you should be able to simply use get_hf_objects from the utils module ...
from blurr.text.utils import get_hf_objects
hf_arch, hf_config, hf_tokenizer, hf_model = get_hf_objects(
"microsoft/deberta-v3-small",
model_cls=AutoModelForSequenceClassification
)
my code for detecting smiling faces using opencv but I am getting the error thats described in the title.
`
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
import numpy as np
import cv2 as cv
import imutils
detector=cv.CascadeClassifier("D:\haarcascade_frontalface_default.xml")
model=load_model("model.h5")
camera=cv.VideoCapture(0)
while True:
(grabbed,frame)=camera.read()
frame = imutils.resize(frame, width=300)
gray=cv.cvtColor(frame,cv.COLOR_RGB2GRAY)
frameClone=frame.copy()
rects=detector.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5, minSize=(30,
30),flags=cv.CASCADE_SCALE_IMAGE)
for (fx,fy,fw,fh) in rects:
roi=gray[fy:fy+fh,fx:fx+fw]
roi=cv.resize(roi,(28,28))
roi=roi.astype("float")/255.0
roi=img_to_array(roi)
roi=np.expand_dims(roi,axis=0)
(not_smiling,smiling)=model.predict(roi)[0]
label="smiling" if smiling>not_smiling else "NotSmiling"
cv.putText(frameClone,label,(fx,fy-10),cv.FONT_HERSHEY_SIMPLEX,0.45,(0,255,0),2)
cv.rectangle(frame,(fx,fy),(fx+fw,fy+fh),(0,255,255),2)
cv.imshow("face",frameClone)
if cv.waitKey(1) & 0xFF==ord("q"):
break
camera.release()
cv.destroyAllWindows()`
#my error is-->raise e.ag_error_metadata.to_exception(e)
what causes this and how can I fix it?