Capture USB camera with remote shutter by Ardunio - video-capture

I am using Ardunio UNO to make a remote shutter for USB camera.
But I can't use waitKey to take a picture by Ardunio.
Could you please have?
import cv2
import os
from datetime import datetime
now = datetime.now()
path='C:\\Users\\Minh\\Documents\\TEST'
pYear=path+'\\'+str(now.year)
pYear_exist=os.path.exists(pYear)
if not pYear_exist:
os.makedirs(pYear)
pMonth=pYear+'\\'+str(now.month)
pMonth_exist=os.path.exists(pMonth)
if not pMonth_exist:
os.makedirs(pMonth)
pDay=pMonth+'\\'+str(now.day)
pDay_exist=os.path.exists(pDay)
if not pDay_exist:
os.makedirs(pDay)
file_name=str(now.strftime('%Y-%m-%d_%H%M%S'))
print(file_name)
os.chdir(pDay)
#Bật camera, chụp và lưu lại ảnh
cap = cv2.VideoCapture(1,cv2.CAP_DSHOW)
import serial
ArdunioSerial=serial.Serial('COM4',9600)
while True:
ret, frame=cap.read()
if not ret:
print('Fail to grap frame!')
break
cv2.imshow('CAM',frame)
k=cv2.waitKey(1)
data = ArdunioSerial.readline()
databutton = data.decode('utf8')
print(databutton)
if k==27: #Press ESC to escape.
print('Clossing...')
break
if databutton==1: #Press Space to take a photo.
now = datetime.now()
img_name = str(now.strftime('%Y-%m-%d_%H%M%S'))+".jpg"
cv2.imwrite(img_name,frame)
print(img_name)
cap.release()
cv2.destroyAllWindows()
I am using Ardunio UNO to make a remote shutter for USB camera.
But I can't use waitKey to take a picture by Ardunio.

Related

Using trained webcam on trained roboflow model

I'm trying to run a trained roboflow model using my webcam on visual code studio. The webcam does load up alongside the popup, but it's just a tiny rectangle in the corner and you can't see anything else. If i change "image", image to "image",1 or something else in the cv2.imshow line, the webcam lights up for a second and returns the error code:
cv2.error: OpenCV(4.5.4) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
Here is my code as obtained from a github roboflow has:
# load config
import json
with open('roboflow_config.json') as f:
config = json.load(f)
ROBOFLOW_API_KEY = "********"
ROBOFLOW_MODEL = "penguins-ojf2k"
ROBOFLOW_SIZE = "416"
FRAMERATE = config["FRAMERATE"]
BUFFER = config["BUFFER"]
import asyncio
import cv2
import base64
import numpy as np
import httpx
import time
# Construct the Roboflow Infer URL
# (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
upload_url = "".join([
"https://detect.roboflow.com/",
ROBOFLOW_MODEL,
"?api_key=",
ROBOFLOW_API_KEY,
"&format=image", # Change to json if you want the prediction boxes, not the visualization
"&stroke=5"
])
# Get webcam interface via opencv-python
video = cv2.VideoCapture(0,cv2.CAP_DSHOW)
# Infer via the Roboflow Infer API and return the result
# Takes an httpx.AsyncClient as a parameter
async def infer(requests):
# Get the current image from the webcam
ret, img = video.read()
# Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
height, width, channels = img.shape
scale = min(height, width)
img = cv2.resize(img, (2000, 1500))
# Encode image to base64 string
retval, buffer = cv2.imencode('.jpg', img)
img_str = base64.b64encode(buffer)
# Get prediction from Roboflow Infer API
resp = await requests.post(upload_url, data=img_str, headers={
"Content-Type": "application/x-www-form-urlencoded"
})
# Parse result image
image = np.asarray(bytearray(resp.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
# Main loop; infers at FRAMERATE frames per second until you press "q"
async def main():
# Initialize
last_frame = time.time()
# Initialize a buffer of images
futures = []
async with httpx.AsyncClient() as requests:
while True:
# On "q" keypress, exit
if(cv2.waitKey(1) == ord('q')):
break
# Throttle to FRAMERATE fps and print actual frames per second achieved
elapsed = time.time() - last_frame
await asyncio.sleep(max(0, 1/FRAMERATE - elapsed))
print((1/(time.time()-last_frame)), " fps")
last_frame = time.time()
# Enqueue the inference request and safe it to our buffer
task = asyncio.create_task(infer(requests))
futures.append(task)
# Wait until our buffer is big enough before we start displaying results
if len(futures) < BUFFER * FRAMERATE:
continue
# Remove the first image from our buffer
# wait for it to finish loading (if necessary)
image = await futures.pop(0)
# And display the inference results
img = cv2.imread('img.jpg')
cv2.imshow('image', image)
# Run our main loop
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
# Release resources when finished
video.release()
cv2.destroyAllWindows()
It looks like you're missing your model's version number so the API is probably returning a 404 error which OpenCV is trying to read as an image.
I found your project on Roboflow Universe based on the ROBOFLOW_MODEL in your code; it looks like you're looking for version 3.
So try changing the line
ROBOFLOW_MODEL = "penguins-ojf2k"
to
ROBOFLOW_MODEL = "penguins-ojf2k/3"
It also looks like your model was trained at 640x640 (not 416x416) so you should change ROBOFLOW_SIZE to 640 as well for best results.

Cannot open direct import of RAW from camera with crtw2fits

I open a BULB image with gphoto2 but I cannot find the way to separate the bands of the RAW image. The code crtwo2fits could convert the images into FITS format but cr2.py flags the file as closed:
from crtwo2fits import cr2 camera_file = cr2.CR2Image(self.camera.file_get(self.folder, self.file, gp.GP_FILE_TYPE_NORMAL) ) data_type = 9 # for int32 numpy array img = cr2.CR2Image.load (camera_file) band = cr2._getExifValue(img, data_type)

Any ideas about how to save the sound produced by runAndWait()?

I'm using pyttsx3 to convert text to speech.
import pyttsx3
def tts(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
but the problem is that I can't save the sound to a file (which is so weird for a library like this).
I've tried some other alternatives like espeak which is the driver for pyttsx but the sound was bad even after tweaking some options.
If you have any suggestions of how can I save the sound or names of other offline libraries offering good speech quality (even with other programming languages) that would be so helpful.
Thank you.
This may be late answer but I hope it will be useful :)
# pip install comtypes
import pyttsx3
engine = pyttsx3.init()
voices = engine.getProperty('voices')
voiceList = []
for voice in voices:
voiceList.append(voice.name)
print("Voice List: " ,voiceList)
def playItNow(textf, filename, useFile = True, rate = 2, voice = voiceList[0]):
from comtypes.client import CreateObject
engine = CreateObject("SAPI.SpVoice")
engine.rate = rate # can be -10 to 10
for v in engine.GetVoices():
if v.GetDescription().find(voice) >= 0:
engine.Voice = v
break
else:
print("Voice not found")
if useFile:
datei = open(textf, 'r',encoding="utf8")
text = datei.read()
datei.close()
else:
text = textf
stream = CreateObject("SAPI.SpFileStream")
from comtypes.gen import SpeechLib
stream.Open(filename, SpeechLib.SSFMCreateForWrite)
engine.AudioOutputStream = stream
engine.speak(text)
stream.Close()
import winsound
winsound.PlaySound(filename, winsound.SND_FILENAME)
playItNow("TextFile.txt", "test_2.wav", useFile= False, rate = -1)

RGB_D Video capturing using Kinect_v1

I am trying to capture video using Kinext 1. I am looking for easiest way to do this operation. What can be easy solution? I looked through stackoverflow and found following code .
import freenect
import cv2
import numpy as np
def nothing(x):
pass
kernel = np.ones((5, 5), np.uint8)
def pretty_depth(depth):
np.clip(depth, 0, 2**10 - 1, depth)
depth >>= 2
depth = depth.astype(np.uint8)
return depth
while 1:
dst = pretty_depth(freenect.sync_get_depth()[0])#input from kinect
cv2.imshow('Video', dst)
if cv2.waitKey(1) & 0xFF == ord('b'):
break
How i can i modify above code so that i will have RGB-D video or RGB and depth frames with time stamps .
Solved using the following function:
freenect.runloop(depth=display_depth,
video=display_rgb,
body=body)
I saved images using imwrite method with argument of display_depth function and display_rgb function. In body i just killed the freenect .

PyAudio recording/capturing and stop/terminate in Python on Raspi

I am not an expert of Python, trying to capture/record the Audio by USB audio device.
It is working fine on command terminal.
But I want to make a program which just recording audio and stop when I want.
I heard ab8 Pyaudio library which has certain API to perform this job(like pyaudio.PyAudio(), pyaudio.Pyaudio.open(), pyaudio.stream, pyaudio.stream.close, pyaudio.PyAudio.terminate().....
Can somebody help to make a simple program for audio recording in Python?
Thank you.
I just add relevant comments in front of commands, let me know If you want to clear more ab8 it
import pyaudio, wave, sys
CHUNK = 8192
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 10
WAVE_OUTPUT_FILENAME = 'Audio_.wav'
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = 0,
frames_per_buffer = CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream() # "Stop Audio Recording
stream.close() # "Close Audio Recording
p.terminate() # "Audio System Close
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()