Hello,
I have trained a dataset in Roboflow train, I have a snippet, and I am trying to do Python Webcam Inference. I need to be able to export certain results from the inference to a locally saved csv or txt file. I believe I am having trouble with my “async with httpx.AsyncClient() as requests:” function.
I am following the infer-async.py file from the API Snippets Python Webcam interface demo. Github. blog.roboflow.com
I have debugged many issues related to the code on my end but I am having trouble around line 75 where futures = is listed. I have values in image. I have values in resp.content. I am simply unable to load the webcam cv2.imshow callout. I am not sure if this is because there is another variable outputting a NoneType? I have some errors below.
b'{"time":0.062274398999761615,"image":{"width":1280,"height":720},"predictions":[]}'
[123 34 116 105 109 101 34 58 48 46 48 54 50 50 55 52 51 57
56 57 57 57 55 54 49 54 49 53 44 34 105 109 97 103 101 34
58 123 34 119 105 100 116 104 34 58 49 50 56 48 44 34 104 101
105 103 104 116 34 58 55 50 48 125 44 34 112 114 101 100 105 99
116 105 111 110 115 34 58 91 93 125]
File "C:\temp\thesis\Tensorflow\roboflow_webcam_demo\roboflow-api-snippets\Python\webcam\infer-async_B.py", line 109, in <module>
asyncio.run(main())
File "C:\temp\thesis\Tensorflow\roboflow_webcam_demo\roboflow-api-snippets\Python\webcam\infer-async_B.py", line 105, in main
cv2.imshow('image', image)
I have browsed the discussion boards but cannot find a solution. Problems relating to the demo snippet have been posted but none related to this issue. I will paste code below.
# load config
import json
with open('roboflow_config.json') as f:
config = json.load(f)
ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
ROBOFLOW_MODEL = config["ROBOFLOW_MODEL"]
ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
FRAMERATE = config["FRAMERATE"]
BUFFER = config["BUFFER"]
import asyncio
import cv2
import base64
import numpy as np
import httpx
import time
# Construct the Roboflow Infer URL
# (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
upload_url = "https://detect.roboflow.com/BLOCKED/3?api_key=BLOCKEDFORPRIVACY"
"""upload_url = "".join([
"https://detect.roboflow.com/",
ROBOFLOW_MODEL,
"?api_key=",
ROBOFLOW_API_KEY,
"&format=image", # Change to json if you want the prediction boxes, not the visualization
"&stroke=5"
])"""
# Get webcam interface via opencv-python
video = cv2.VideoCapture(0)
#video.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
#video.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
# Infer via the Roboflow Infer API and return the result
# Takes an httpx.AsyncClient as a parameter
async def infer(requests):
# Get the current image from the webcam
ret, img = video.read()
# Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
height, width, channels = img.shape
scale = ROBOFLOW_SIZE / max(height, width)
img = cv2.resize(img, (1280, 720))
#img = cv2.resize(img, (round(scale * width), round(scale * height)))
# Encode image to base64 string
retval, buffer = cv2.imencode('.jpg', img)
img_str = base64.b64encode(buffer)
# Get prediction from Roboflow Infer API
resp = await requests.post(upload_url, data=img_str, headers={
"Content-Type": "application/x-www-form-urlencoded"
})
#print(resp) #test the connection to the Roboflow hosted Model
print(resp.content)
# Parse result image
image = np.asarray(bytearray(resp.content), dtype="uint8")
print(image)
#image = cv2.imdecode(image, cv2.IMREAD_COLOR)
print(image)
return image
# Main loop; infers at FRAMERATE frames per second until you press "q"
async def main():
# Initialize
last_frame = time.time()
# Initialize a buffer of images
futures = []
async with httpx.AsyncClient() as requests:
while 1:
# On "q" keypress, exit
if(cv2.waitKey(1) == ord('q')):
break
# Throttle to FRAMERATE fps and print actual frames per second achieved
elapsed = time.time() - last_frame
await asyncio.sleep(max(0, 1/FRAMERATE - elapsed))
print((1/(time.time()-last_frame)), " fps")
last_frame = time.time()
# Enqueue the inference request and safe it to our buffer
task = asyncio.create_task(infer(requests))
futures.append(task)
# Wait until our buffer is big enough before we start displaying results
if len(futures) < BUFFER * FRAMERATE:
continue
# Remove the first image from our buffer
# wait for it to finish loading (if necessary)
image = await futures.pop(0)
print(image)
# And display the inference results
cv2.imshow('image', image)
# Run our main loop
asyncio.run(main())
# Release resources when finished
video.release()
cv2.destroyAllWindows()
Thank you for any assistance!!! It is greatly appreciated!