how to pass continuous video stream to pyqt5 using Qthread correctly for face recognition? - pyqt5

I want to pass continuous video stream to pyqt5 qlabel named label_cam to show up in ui using QThread but it keep failed showing the video stream on the qlabel. The video stream is later aimed to recognize people out.
I have been trying to connect the signal "change_Pixmap" from the VideoThread class to the "set_image" function in the "label_cam" object but i guess the flow of code or variable assign is wrong. Below is my code.
class FaceRecogScreen(QDialog):
def init(self):
super(FaceRecogScreen, self).init()
uic.loadUi("face_recog.ui", self)
self.update_date_time()
self.pushButton_back.clicked.connect(self.back_to_main3)
self.load_model()
def load_model(self):
self.prototxt = "deploy.prototxt.txt"
self.model = "res10_300x300_ssd_iter_140000.caffemodel"
print("[INFORMATION] Loading model....")
self.net = cv2.dnn.readNetFromCaffe(self.prototxt, self.model)
weight = "facenet_keras_weights.h5"
self.model2 = load_model('FaceNetModel.h5')
self.model2.load_weights(weight)
self.collected_encodings = pickle.loads(open('face_encoding.pickle', "rb").read())
infile = open('face_encoding', 'rb')
data = pickle.load(infile)
self.knownEncodings, self.knownNames = data['encodings'], data['names']
self.knownEncodings = np.array(self.knownEncodings)
self.knownNames = np.array(self.knownNames)
self.clf = svm.SVC(gamma="scale", probability=True, tol=0.01)
self.clf.fit(self.knownEncodings, self.knownNames)
# self.label_cam= VideoLabel()
self.thread = VideoThread(self)
self.thread.change_Pixmap.connect(self.set_image)
# call the run() function in VideoThread class
self.thread.start()
# self.thread.change_Pixmap.connect(self.label_cam.set_image)
# # call the run() function in VideoThread class
# self.thread.start()
# layout = self.layout()
# layout.addWidget(self.label_cam)
# self.thread.run.start()
def update_date_time(self):
# Get the current date and time
date_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.label_datetime.setText(date_time)
# Update the date and time in the table
def back_to_main3(self):
pushButton_back = WelcomeScreen()
widget.addWidget(pushButton_back)
widget.setCurrentIndex(widget.currentIndex()+1)
def set_image(self, frame):
self.setPixmap(QPixmap.fromImage(frame))
class VideoThread(QtCore.QThread):
change_Pixmap = QtCore.pyqtSignal(QtGui.QImage)
def run(self):
cap = cv2.VideoCapture(1)
while True:
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = frame.shape
bytesPerLine = ch * w
convertToQtFormat = QtGui.QImage(frame.data, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
self.change_Pixmap.emit(p)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(
frame, (160, 160)), 1.0, (300, 300), (104, 177, 123))
self.net.setInput(blob)
detections = self.net.forward()
self.frame = frame
# self.frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cv2.normalize(frame, None, 0, 1.0, cv2.NORM_MINMAX, dtype=cv2.CV_32F)
pixels = np.expand_dims(frame, axis=0)
encode = self.model2.predict(pixels)
face_name = []
for encoding in encode:
name = self.clf.predict([encoding])
face_name.extend(name)
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence < 0.5:
continue
box = detections[0, 0, i, 3:7]*np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
text = "{:.2f}%".format(confidence*100)
y = startY - 10 if startY - 10 > 10 else startY*10
if name == 'unknown':
cv2.rectangle(frame, (startX, y), (endX, endY), (0, 0, 255), 2)
cv2.putText(frame, name, (startX, startY),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
else:
cv2.rectangle(frame, (startX, y), (endX, endY), (0, 255, 0), 2)
cv2.putText(frame, name[0], (startX, startY),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

Related

Set timer on detected object

i'm using yolo to detect object but i want to set timer for the detected object, can anyone help me?
so i want to make the object detecting with limited time for my projcet
i'm try my best but i don't have any idea how to do it
here is my code:
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
whT = 320
confThreshold = 0.1
nmsThreshold = 0.4
classesFile = "coco.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = [line.strip() for line in f.readlines()]
modelConfiguration = "yolov4.cfg"
modelWeights = "yolov4.weights"
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
def findObjects(outputs,img):
hT, wT, cT = img.shape
bbox = []
classIds = []
confs = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
w,h = int(det[2]*wT) , int(det[3]*hT)
x,y = int((det[0]*wT)-w/2) , int((det[1]*hT)-h/2)
bbox.append([x,y,w,h])
classIds.append(classId)
confs.append(float(confidence))
indices = cv.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)
font = cv.FONT_HERSHEY_PLAIN
for i in indices:
label = str(classNames[classIds[i]])
x, y, w, h = bbox[i]
#print(x,y,w,h)
cv.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)
cv.putText(img, label, (x, y + 30), font, 3, (0,0,0), 3)
print("Jenis Mobil: " + label)
#cv.putText(img,f'{classNames[classIds[i]].upper()} {int(confs[i]*100)}%', (x, y-10), cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
while True:
success, img = cap.read()
blob = cv.dnn.blobFromImage(img, 1 / 255, (whT, whT), [0, 0, 0], 1, crop=False)
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i - 1]) for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
findObjects(outputs,img)
cv.imshow('Image', img)
key = cv.waitKey(1)
if key == 27:
break
cap.release()
cv.destroyAllWindows()

How to use trained yolov7 best.pt file as "vehicle plate detection" application in python?

I have trained a model using yolov7 https://colab.research.google.com/drive/1X9A8odmK4k6l26NDviiT6dd6TgR-piOa#scrollTo=nD-uPyQ_2jiN colab file. After training I got a file named best.pt and I want to use it as a service/application in python. There is a yolov3 example which I created. This one was using .weights so how can I use .pt?
import cv2
import numpy as np
import time
import os
def getPhoto():
ROOT_DIR = os.path.dirname(__file__)
URL = "http://192.168.1.3:4747/video"
PC_CAM = 0
net = cv2.dnn.readNet(
f"{ROOT_DIR}\\yolov3_custom_final.weights",
f"{ROOT_DIR}\\yolov3_custom.cfg",
)
classes = []
with open(f"{ROOT_DIR}\\classes.txt", "r") as f:
classes = f.read().splitlines()
timeElapsed = 0
wCam, hCam = 640, 360
font = cv2.FONT_HERSHEY_PLAIN
colors = np.random.uniform(0, 255, size=(2, 3))
cap = cv2.VideoCapture(URL)
cap.set(3, wCam)
cap.set(4, hCam)
while True:
_, img = cap.read()
cv2.imshow("Detection Screen", img)
if cv2.waitKey(1) == ord("c"):
break
height, width, _ = img.shape
blob = cv2.dnn.blobFromImage(
img, 1 / 255, (416, 416), (0, 0, 0), swapRB=True, crop=False
)
net.setInput(blob)
output_layers_names = net.getUnconnectedOutLayersNames()
layerOutputs = net.forward(output_layers_names)
boxes = []
confidences = []
class_ids = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.6:
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append((float(confidence)))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.2, 0.4)
if len(indexes) > 0:
for i in indexes.flatten():
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence = str(round(confidences[i], 2))
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(
img,
label + " " + confidence,
(x, y + 20),
font,
2,
(255, 255, 0),
2,
)
cv2.imwrite(f"{ROOT_DIR}\\DetectedPhoto.jpg", img)
print("Image Saved")
getPhoto()
I want to detect vehicle plates using .pt file and cut vehicle plates and save them as .jpeg.
you can do this with detect.py insided yolov7 folder. Run this:
python detect.py --weights best.pt --source image.jpg
you can download yolov7 with this method:
git clone https://github.com/WongKinYiu/yolov7.git
in yolov7 you can see detect.py

ValueError: cannot reshape array of size 692224 into shape (1,3,416,416)-yolov5 cpu error

I was trying to run my yolov5 custom model on cpu and I got this error.
this is the github page I have used : https://github.com/Amelia0911/onnxruntime-for-yolov5
import onnxruntime
from models.utils import *
import time
IMAGE_SIZE = (416, 416)
CONF_TH = 0.3
NMS_TH = 0.45
CLASSES = 80
model = onnxruntime.InferenceSession("models_train/bestnone.onnx")
anchor_list = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
stride = [8, 16, 32]
def draw(img, boxinfo, dst, id):
for *xyxy, conf, cls in boxinfo:
label = '{}|{}'.format(int(cls), '%.2f' % conf)
plot_one_box(xyxy, img, label=label, color=[0, 0, 255])
cv2.imencode('.jpg', img)[1].tofile(dst)
def detect(image):
img = cv2.resize(image,IMAGE_SIZE)
img = img.transpose(2, 0, 1)
dataset = (img, image)
img = dataset[0].astype('float32')
img_size = [dataset[0].shape[1], dataset[0].shape[2]]
img /= 255.0
img = img.reshape(1, 3, img_size[0], img_size[1])
inputs = {model.get_inputs()[0].name: img}
pred = torch.tensor(model.run(None, inputs)[0])
anchor = torch.tensor(anchor_list).float().view(3, -1, 2)
area = img_size[0]*img_size[1]
size = [int(area/stride[0]**2), int(area/stride[1]**2), int(area/stride[2]**2)]
feature = [[int(j/stride[i]) for j in img_size] for i in range(3)]
y = []
y.append(pred[:, :size[0]*3, :])
y.append(pred[:, size[0]*3:size[0]*3+size[1]*3, :])
y.append(pred[:, size[0]*3+size[1]*3:, :])
grid = []
for k, f in enumerate(feature):
grid.append([[i, j] for j in range(f[0]) for i in range(f[1])])
z = []
for i in range(3):
src = y[i]
xy = src[..., 0:2] * 2. - 0.5
wh = (src[..., 2:4] * 2) ** 2
dst_xy = []
dst_wh = []
for j in range(3):
dst_xy.append((xy[:, j*size[i]:(j+1)*size[i], :] + torch.tensor(grid[i])) * stride[i])
dst_wh.append(wh[:, j*size[i]:(j+1)*size[i], :] * anchor[i][j])
src[..., 0:2] = torch.from_numpy(np.concatenate((dst_xy[0], dst_xy[1], dst_xy[2]), axis=1))
src[..., 2:4] = torch.from_numpy(np.concatenate((dst_wh[0], dst_wh[1], dst_wh[2]), axis=1))
z.append(src.view(1, -1, CLASSES+5)) #85
pred = torch.cat(z, 1)
pred = nms(pred, CONF_TH, NMS_TH)
for det in pred:
if det is not None and len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], dataset[1].shape).round()
if det == None:
return np.array([])
return det
if __name__ == '__main__':
import time
src = 'Temp-640x640.jpg'
t1 = time.time()
img = cv2.imdecode(np.fromfile(src, dtype=np.uint8), -1)
print(IMAGE_SIZE)
results = detect(img)
t2 = time.time()
print(results)
print("onnxruntime time = ", t2 - t1)
if results is not None and len(results):
draw(img, results, 'dst3.jpg', str(id))
print('Down!')
when I run this code I got the following error:
File "C:\Users\acer\.spyder-py3\metallic surface defect detection\3_onnx_cpu_detec.py", line 85, in <module>
results = detect(img)
File "C:\Users\acer\.spyder-py3\metallic surface defect detection\3_onnx_cpu_detec.py", line 30, in detect
img = img.reshape(1, 3, img_size[0], img_size[1])
ValueError: cannot reshape array of size 692224 into shape (1,3,416,416)
I think it is a color channel issue. I have tried to fix it .But it doesn't work .If someone know how to fix it please inform me.Thanks in advance

How can I increase the resolution of my input video for my real time Object Detection API?

I am having trouble increasing the resolution in my input video for real time object detection. I have tried increasing the input resolution of the video read in, but the output gives me an error.
I am streaming the video using a webcam program
The only dimensions that work are 640 x 480. Anything bigger gives me an output error. The resolution will only stay 1920 x 1080 until before the frame window, then it switches back to 640 x 480 and gives me an error when the video saves.
import numpy as np
import tensorflow as tf
from object_detection.utils import visualization_utils as vis_util
import cv2 as cv
from time import time
import serial
refPoints = []
# draw ROI
def image_crop(event, x, y, flags, param):
global refPoints
if event == cv.EVENT_LBUTTONDOWN:
refPoints = [(x, y)]
elif event == cv.EVENT_LBUTTONUP:
refPoints.append((x, y))
# run inference on single image
def run_inference_for_single_image(image, graph, sess):
with graph.as_default():
boxes = tf.get_default_graph().get_tensor_by_name('detection_boxes:0')
scores = tf.get_default_graph().get_tensor_by_name('detection_scores:0')
classes = tf.get_default_graph().get_tensor_by_name('detection_classes:0')
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
rboxes, rscores, rclasses = sess.run([boxes, scores, classes], feed_dict={image_tensor: np.expand_dims(image, 0)})
return rboxes[0], rscores[0], rclasses[0]
def main():
# 0 - load model
PATH_TO_MODEL = 'C:\\frozen_inference_graph.pb'
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_MODEL, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# 1 - define video streamer & serial port
streamer = 0
out = cv.VideoWriter('output.avi', cv.VideoWriter_fourcc(*'XVID'), 20, (1920, 1080)) #previously 25.0, (640, 480)
port = 'COM4'
ser = serial.Serial(port, 9600)
# 2 - draw region of interest (ROI)
cap = cv.VideoCapture(cv.CAP_DSHOW)
clone = frame.copy()
cv.namedWindow('frame')
cv.resizeWindow('frame', (1920, 1080))
cv.setMouseCallback('frame', image_crop)
print(frame.shape)#
print('ROI selecting...')
while True:
cv.imshow('frame', frame)
if cv.waitKey(1) & 0xFF == ord('r'):
print('ROI selection reset.')
frame = clone.copy()
elif cv.waitKey(1) & 0xff == ord('c'):
print('ROI selected.')
cap.release()
break
if len(refPoints) == 2:
cv.rectangle(frame, refPoints[0], refPoints[1], (0, 255, 0), 2)
cv.imshow('frame', frame)
if cv.waitKey(0) & 0xFF == ord('q'):
cap.release()
cv.destroyAllWindows()
else:
print('only one ROI allowed.')
cap.release()
cv.destroyAllWindows()
return -1
# 3 - run inferences on ROI
t1 = 0
t2 = 0
t3 = 0
i = 0
cap = cv.VideoCapture(streamer)
cv.namedWindow('frame')
cv.resizeWindow('frame', (1920, 1080))
while(True):
# image reading & cropping
t0 = time()
ret, frame = cap.read()
if frame is None:
break
cropped = frame[refPoints[0][1]:refPoints[1][1], refPoints[0][0]:refPoints[1][0]]
dt = time()-t0
t1 += dt
# image inference
t0 = time()
image = cropped.copy()
boxes, scores, classes = run_inference_for_single_image(image, detection_graph, sess)
boxes = boxes[scores>0.95]
classes = classes[scores>0.95]
boxes1 = boxes[classes==1]
boxes2 = boxes[classes==2]
dt = time()-t0
t2 += dt
t0 = time()
cv.rectangle(frame, refPoints[0], refPoints[1], (0, 255, 0), 2)
if len(boxes1):
#vis_util.draw_bounding_boxes_on_image_array(image, boxes)
cv.rectangle(frame, refPoints[0], refPoints[1], (0, 0, 255), 4)
ser.write(b'1')
else:
ser.write(b'0')
#if len(boxes2):
# cv.rectangle(frame, refPoints[0], refPoints[1], (255, 0, 0), 4)
#print(frame.shape)
#print(frame.shape)#
cv.imshow('frame', frame)
out.write(frame)
dt = time()-t0
t3 += dt
i += 1
if cv.waitKey(1) & 0xFF == ord('q'):
break
print("image reading: average %f sec/frame"%(t1/i))
print("image processing: average %f sec/frame"%(t2/i))
print("image showing/saving: average %f sec/frame"%(t3/i))
ser.close()
sess.close()
cap.release()
out.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()

how to differentiate background from foreground in pyqt5

I am trying to differentiate bits of background from foreground image in pyqt5.I am splitting the Qgraphicitem into bits and saving it as png image based on 16*16 pixel grids
I have tried the following code
[![from PyQt5.QtCore import (QByteArray, QDataStream, QIODevice,QObject, QMimeData, QPointF, QPoint, Qt,pyqtProperty, QRect,QTimer,QLineF, QEvent,QRectF)
from PyQt5.QtGui import QColor, QDrag, QPainter, QPixmap,QFont,QFontMetrics,QBrush, QLinearGradient, QIcon, QPen, QPainterPath,QKeySequence, QTransform,QCursor,QMouseEvent,QClipboard
from PyQt5.QtWidgets import QSplitterHandle,QApplication,QGraphicsScene,QGraphicsView,QWidget,QFontDialog,QHBoxLayout,QLabel,QMenu ,QGraphicsTextItem,QColorDialog,QGraphicsItemGroup,QGraphicsPixmapItem,QGraphicsRectItem,QMessageBox, QFormLayout,QSizePolicy, QScrollArea,QShortcut, QPushButton,QDialogButtonBox,QGroupBox,QLineEdit, QMainWindow,QInputDialog, QGraphicsPathItem,QDialog, QVBoxLayout,QGraphicsItem,QStatusBar
class GraphicsSceneClass(QGraphicsScene):
global selectedObjType
def __init__(self, parent=None):
super(GraphicsSceneClass, self).__init__(parent)
self.gridOn = 0
self.setSceneRect(0, 0, 1920, 1080)
self.setItemIndexMethod(QGraphicsScene.NoIndex)
self.setBackgroundBrush(QBrush(Qt.black))
def mousePressEvent(self, event):
sampleTransform = QTransform()
objectAtMouse = self.itemAt(event.scenePos(), sampleTransform)
if objectAtMouse and event.button()== Qt.LeftButton:
objectAtMouse.setSelected(True)
elif objectAtMouse==None and event.button()==Qt.RightButton:
# pass
self.grid = self.TargPosForLine(event.scenePos(), "ForLine")
self.grid = self.TargPosForLine(event.scenePos(), "ForLine")
def TargPosForLine(self, position, mode):
clicked_column = int((position.y() // 16)) * 16
clicked_row = int((position.x() // 16)) * 16
if clicked_column < 0:
clicked_column = 0
if clicked_row < 0:
clicked_row = 0
if (mode == "ForRect"):
return QRect(clicked_row, clicked_column, 16, 16)
elif (mode == "ForLine"):
return QPointF(clicked_row, clicked_column)
def DeselectItems(self):
selectedObjects = self.selectedItems()
for object in selectedObjects:
object.setSelected(False)
def mouseReleaseEvent(self, event):
# self.DeselectItems()
pass
def textassign(self):
self.dialog = QDialog()
self.Gearname = QLabel("Name")
self.Gearnameedit = QLineEdit()
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
# buttonBox.rejected.connect(self.reject)
hbox = QFormLayout()
hbox.addRow(self.Gearname, self.Gearnameedit)
hbox.addRow(buttonBox)
self.dialog.setLayout(hbox)
self.dialog.exec()
def accept(self):
text = self.Gearnameedit.text()
# self.objectdrop._text
self.font = QFont()
self.font.setPixelSize(20)
self.font.setBold(False)
self.font.setFamily("Arial Unicode MS")
self.text = QGraphicsTextItem()
self.text.setPlainText(str(text))
self.text.setFont(self.font)
self.text.setDefaultTextColor(Qt.darkGreen)
self.addItem(self.text)
self.text.setPos(self.grid.x() + 20, self.grid.y() - 35)
# print("y", y)
self.text.setFlag(QGraphicsItem.ItemIsSelectable)
self.text.setFlag(QGraphicsItem.ItemIsMovable)
self.text.setFlag(QGraphicsItem.ItemIgnoresTransformations)
# self.objectdrop.setFlag(QGraphicsItem.ItemIsSelectable)
# self.text._position = QPointF(y.x() + 20, y.y() - 35)
self.text._type1 = "Text"
self.text._txtvalue = text
print(self.text._txtvalue)
self.dialog.close()
def createbitmap(self,objectDrop,gridPos):
w = 0
h = 0
self.objectdrop = objectDrop
pixmap = QPixmap(objectDrop._width, objectDrop._height)
pixmap.fill(Qt.transparent)
painter1 = QPainter(pixmap)
painter1.setPen(Qt.NoPen)
painter1.setBrush(objectDrop.brush())
# painter1.setClipPath(objectDrop.path())
painter1.setRenderHint(QPainter.HighQualityAntialiasing)
# painter1.setOpacity(0.5)
painter1.setBackgroundMode(0)
painter1.drawPath(objectDrop.path())
count = objectDrop._width // 16
pixmap.save("point.png")
# mid = self.TargPosForLine(objectDrop._midPos, "ForRect")
rect = QRect(16,0, 16, 16)
cropped = QPixmap(pixmap.copy(rect))
cropped.save("bit.png")
class MainWindow(QMainWindow):
global selectedObjType
# global item
def __init__(self, ):
super(MainWindow, self).__init__()
#
self.scene = GraphicsSceneClass()
MainWindow.obj = self.scene
self.view = QGraphicsView(self.scene)
# self.view.setDragMode(QGraphicsView.RubberBandDrag)
self.view.setMouseTracking(True)
self.view.setRenderHint(QPainter.HighQualityAntialiasing)
self.widg = QWidget()
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.addWidget(self.view)
self.widg.setMouseTracking(True)
self.widget = QWidget()
self.widget.setLayout(self.horizontalLayout)
self.setCentralWidget(self.widget)
self.obj = None
def deleteItem(self):
for item in self.scene.selectedItems():
self.scene.removeItem(item)
def selectAll(self):
for item in self.scene.items():
item.setSelected(True)
def GridOnOffControl(self):
if self.scene.gridOn == 0:
self.scene.gridOn = 1
else:
self.scene.gridOn = 0
if self.scene.gridOn == 1:
self.scene.setBackgroundBrush(QBrush(QPixmap('images/Grid.png')))
else:
self.scene.setBackgroundBrush(QBrush(Qt.black))
def contextMenuEvent(self, event):
contextMenu = QMenu(self)
Cutaction = contextMenu.addAction("Cut")
Coaction = contextMenu.addAction("Copy")
Paaction = contextMenu.addAction("Paste")
Propaction = contextMenu.addAction("draw1")
quitAct = contextMenu.addAction("quit")
action = contextMenu.exec_(self.mapToGlobal(event.pos()))
if action == quitAct:
self.close()
elif action == Propaction:
objectDrop = None
painterPath = QPainterPath()
painterPath.moveTo(10.0, 0.0)
painterPath.arcTo(0.0, 0.0, 4.0, 4.0, 90.0, 90.0)
painterPath.lineTo(0.0, 4.0)
painterPath.arcTo(0.0, 2.0, 4.0, 4.0, 180.0, 90.0)
painterPath.lineTo(112 - 6, 6.0)
painterPath.arcTo(112 - 8, 2.0, 4.0, 4.0, 270.0, 90.0)
painterPath.lineTo(112 - 4, 2.0)
painterPath.arcTo(112 - 8, 0.0, 4.0, 4.0, 0.0, 90.0)
gradient = QLinearGradient(1, 1, 1, 5)
gradient.setColorAt(0, QColor(Qt.gray))
gradient.setColorAt(0.5, QColor(192, 192, 192, 255))
gradient.setColorAt(1, QColor(Qt.darkGray))
painterPath.closeSubpath()
objectDrop = QGraphicsPathItem()
objectDrop.setPath(painterPath)
objectDrop.setBrush(QBrush(gradient))
objectDrop.setPos(self.scene.grid)
objectDrop._positon=QPointF(self.scene.grid.x(),self.scene.grid.y())
objectDrop._endPos=QPointF(objectDrop._positon.x()+112,objectDrop._positon.y())
objectDrop._width=objectDrop._endPos.x()-objectDrop._positon.x()
objectDrop._height=16
self.scene.addItem(objectDrop)
objectDrop.setFlag(QGraphicsItem.ItemIsSelectable)
objectDrop.setFlag(QGraphicsItem.ItemIsMovable)
self.scene.textassign()
self.scene.createbitmap(objectDrop,self.scene.grid)
def selectedItem(self):
items = self.scene.selectedItems()
if len(items) == 1:
return items\[0\]
return None
if __name__=="__main__":
import sys
app=QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())][1]][1]
If i give rect = QRect(16,0, 16, 16)it will give image 1
if rect = QRect(0,16,16,16) it will give image2
I want to neglect image2 kind of images.i.e images with only background