video stream success with pi camera

video
Cole Deck 5 years ago
parent 32ae8d4559
commit bfa501b6db

Binary file not shown.

8548
detect.c

File diff suppressed because it is too large Load Diff

@ -75,38 +75,45 @@ ap.add_argument("-n", "--number", type=int, required=False,
help="show on the screen") help="show on the screen")
args = vars(ap.parse_args()) args = vars(ap.parse_args())
args2 = ap.parse_args()""" args2 = ap.parse_args()"""
def detect(calibration_width, img_file, show): def detect(calibration_width, img_file, show, quick):
selected = 2 selected = 2
#if type(args["number"]) == type(selected): #if type(args["number"]) == type(selected):
# selected = args["number"] # selected = args["number"]
# load the image, convert it to grayscale, and blur it slightly # load the image, convert it to grayscale, and blur it slightly
image = None
print(str(type(img_file)))
if str(type(img_file)) == "<class 'numpy.ndarray'>":
image = img_file.copy()
else:
image = cv2.imread(img_file) image = cv2.imread(img_file)
#image = img_file.copy()
#image = cv2.resize(image, (int(image.shape[1]*1), int(image.shape[0]*1))) #image = cv2.resize(image, (int(image.shape[1]*1), int(image.shape[0]*1)))
image = cv2.resize(image, (1000, int( image = cv2.resize(image, (1000, int(
image.shape[0]/image.shape[1] * 1000)), interpolation=cv2.INTER_NEAREST) image.shape[0]/image.shape[1] * 1000)), interpolation=cv2.INTER_NEAREST)
if show: if show and not quick:
cv2.namedWindow("Item Sorter") cv2.namedWindow("Item Sorter")
cv2.imshow("Item Sorter", image) cv2.imshow("Item Sorter", image)
cv2.waitKey(0) cv2.waitKey(0)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0) gray = cv2.GaussianBlur(gray, (5, 5), 0)
if show: if show and not quick:
cv2.imshow("Item Sorter", gray) cv2.imshow("Item Sorter", gray)
cv2.waitKey(0) cv2.waitKey(0)
# perform edge detection, then perform a dilation + erosion to # perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges # close gaps in between object edges
edged = cv2.Canny(gray, 50, 100) edged = cv2.Canny(gray, 50, 100)
if show: if show and not quick:
cv2.imshow("Item Sorter", edged) cv2.imshow("Item Sorter", edged)
cv2.waitKey(0) cv2.waitKey(0)
edged = cv2.dilate(edged, None, iterations=1) edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1) edged = cv2.erode(edged, None, iterations=1)
if show: if show and not quick:
cv2.imshow("Item Sorter", edged) cv2.imshow("Item Sorter", edged)
cv2.waitKey(0) cv2.waitKey(0)
# find contours in the edge map # find contours in the edge map
@ -171,7 +178,7 @@ def detect(calibration_width, img_file, show):
pixelsPerMetric = smaller(dA, dB) / calibration_width pixelsPerMetric = smaller(dA, dB) / calibration_width
continue continue
pixelsPerMetric = 25
orig = image.copy() orig = image.copy()
objtype = "Unknown" objtype = "Unknown"
objname = "" objname = ""
@ -180,7 +187,7 @@ def detect(calibration_width, img_file, show):
#orig = image.copy() #orig = image.copy()
num += 1 num += 1
# if the contour is not sufficiently large, ignore it # if the contour is not sufficiently large, ignore it
if cv2.contourArea(c) < 100 or pixelsPerMetric is None: if cv2.contourArea(c) < 100: # or pixelsPerMetric is None:
continue continue
# compute the rotated bounding box of the contour # compute the rotated bounding box of the contour
@ -357,5 +364,7 @@ def detect(calibration_width, img_file, show):
if show: if show:
cv2.imshow("Item Sorter", orig) cv2.imshow("Item Sorter", orig)
#cv2.waitKey(1) #cv2.waitKey(1)
if quick:
return orig
else:
cv2.waitKey(0) cv2.waitKey(0)

@ -5,7 +5,7 @@ calibration_width = 0.75
image = "img7.jpg" image = "img7.jpg"
images = ("img.jpg", "img2.jpg", "img3.jpg", "img4.jpg", "img5.jpg", "img6.jpg", "img7.jpg", "img8.jpg") images = ("img.jpg", "img2.jpg", "img3.jpg", "img4.jpg", "img5.jpg", "img6.jpg", "img7.jpg", "img8.jpg")
show = False show = False
video = False video = True
def go(): def go():
#for file in images: #for file in images:
detect.detect(calibration_width, "img7.jpg", show) detect.detect(calibration_width, "img7.jpg", show)
@ -14,14 +14,19 @@ if not video:
elapsed_time = timeit.timeit(go, number=3)/3 elapsed_time = timeit.timeit(go, number=3)/3
print(elapsed_time) print(elapsed_time)
else : else :
capture = cv2.VideoCapture('tcpclientsrc host=192.168.43.152 port=5000 ! queue ! decodebin ! appsink', cv2.CAP_GSTREAMER) capture = cv2.VideoCapture('tcpclientsrc host=192.168.86.108 port=5001 ! gdpdepay ! rtph264depay ! avdec_h264 ! videoconvert ! appsink', cv2.CAP_GSTREAMER)
# server command for pi camera /opt/vc/bin/raspivid -t 0 -w 1920 -h 1080 -hf -fps 5 -o - | gst-launch-1.0 -v fdsrc ! h264parse ! rtph264pay config-interval=1 pt=96 ! gdppay ! tcpserversink host=192.168.86.108 port=5001
ret,frame = capture.read()
detect.detect(calibration_width, "img7.jpg", True, False)
detect.detect(calibration_width, frame, True, True)
while True: while True:
ret,frame = capture.read() ret,frame = capture.read()
if not ret: if not ret:
print('empty frame') print('empty frame')
break break
print('frame') #print('frame')
cv2.imshow('receive', frame)
cv2.imshow('Item Sorter', detect.detect(calibration_width, frame, True, True))
if cv2.waitKey(1)&0xFF == ord('q'): if cv2.waitKey(1)&0xFF == ord('q'):
break break

Loading…
Cancel
Save