I am trying to deploy Inference server on NVIDIA Jetson
I am using reComputer Industrial J3011 Orin Nano 8GB.
Here is my project
Workspace: ARIES14
Project ID : pcba-5ohq1
Inference Server on my NVDIA Jetson is up and run using docker.
OAK-D Pro connected to J3011 via USB
Here is my Python code
import depthai as dai
import cv2
import requests
import numpy as np
ROBOFLOW_API_KEY = “tDJFy0d7a2nvfhwu8tdT”
ROBOFLOW_MODEL_URL = “http://localhost:9001/pcba-5ohq1/1”
def process_frame_with_roboflow(frame):
retval,buffer = cv2.imencode(".jpg", frame)
if not retval:
print("Failed to encode frame")
return None
response = requests.post(
ROBOFLOW_MODEL_URL,
params={"api_key": ROBOFLOW_API_KEY},
files={"file": buffer.tobytes()},
headers={"accept": "application/json"}
)
return response.json()
def draw_predictions(frame, predictions):
if not predictions or 'predictions' not in predictions:
return frame
for pred in predictions['predictions']:
x, y, width, height = int(pred['x']),int(pred['y']), int(pred['width']), int(pred['height'])
confidence = pred['confidence']
label = pred['class']
start_point = (x-width // 2, y-height // 2)
end_point = (x+width // 2 , y+height // 2)
color = (0, 255, 0)
cv2.rectangle(frame, start_point, end_point, color, 2)
cv2.putText(frame, f"{label}: {confidence:.2f}", (x-width // 2, y-height // 2 - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return frame
pipeline = dai.Pipeline()
cam_rgb = pipeline.create(dai.node.ColorCamera)
cam_rgb.setPreviewSize(640 , 480)
cam_rgb.setInterleaved(False)
cam_rgb.setFps(30)
xout_rgb = pipeline.create(dai.node.XLinkOut)
xout_rgb.setStreamName(“rgb”)
cam_rgb.preview.link(xout_rgb.input)
with dai.Device(pipeline) as device:
print("starting stream... press 'q' to quit")
queue = device.getOutputQueue(name='rgb', maxSize=4, blocking=False)
while True:
frame_data = queue.get()
frame = frame_data.getCvFrame()
predictions = process_frame_with_roboflow(frame)
frame = draw_predictions(frame, predictions)
cv2.imshow("OAK-D Stream with Roboflow Inference", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllwindows()
It is working fine if Inference server is set to “detect.roboflow.com”
But there will be an error if it is set to “localhost:9001”
[ error on client]
starting stream… press ‘q’ to quit
Traceback (most recent call last):
File “oakd.py”, line 64, in
predictions = process_frame_with_roboflow(frame)
File “oakd.py”, line 23, in process_frame_with_roboflow
return response.json()
File “/usr/lib/python3/dist-packages/requests/models.py”, line 897, in json
return complexjson.loads(self.text, **kwargs)
File “/usr/lib/python3/dist-packages/simplejson/init.py”, line 518, in loads
return _default_decoder.decode(s)
File “/usr/lib/python3/dist-packages/simplejson/decoder.py”, line 370, in decode
obj, end = self.raw_decode(s)
File “/usr/lib/python3/dist-packages/simplejson/decoder.py”, line 400, in raw_decode
return self.scan_once(s, idx=_w(s, idx).end())
simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
[ Error from Inference Server]
TypeError: req.body.replace is not a function
at transformImageBody (/inference-server/server/index.js:323:26)
at Layer.handle [as handle_request] (/inference-server/server/node_modules/express/lib/router/layer.js:95:5)
at next (/inference-server/server/node_modules/express/lib/router/route.js:144:13)
at Route.dispatch (/inference-server/server/node_modules/express/lib/router/route.js:114:3)
at Layer.handle [as handle_request] (/inference-server/server/node_modules/express/lib/router/layer.js:95:5)
at /inference-server/server/node_modules/express/lib/router/index.js:284:15
at param (/inference-server/server/node_modules/express/lib/router/index.js:365:14)
at param (/inference-server/server/node_modules/express/lib/router/index.js:376:14)
at param (/inference-server/server/node_modules/express/lib/router/index.js:376:14)
at Function.process_params (/inference-server/server/node_modules/express/lib/router/index.js:421:3)