Update detection to be more reliable

video
Cole Deck 5 years ago
parent bcfd61a7cf
commit 0feabe96e5

Binary file not shown.

Binary file not shown.

@ -98,21 +98,16 @@ def detect(calibration_width, img_file, show, quick):
cv2.imshow("Item Sorter", image)
cv2.waitKey(0)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
if show and not quick:
cv2.imshow("Item Sorter", gray)
cv2.waitKey(0)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges
edged = cv2.Canny(gray, 50, 100)
if show and not quick:
cv2.imshow("Item Sorter", edged)
cv2.waitKey(0)
edged = cv2.dilate(edged, None, iterations=2)
edged = cv2.erode(edged, None, iterations=2)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
edged = cv2.dilate(edged, None, iterations=1)
if show and not quick:
cv2.imshow("Item Sorter", edged)
cv2.waitKey(0)
@ -127,7 +122,6 @@ def detect(calibration_width, img_file, show, quick):
pixelsPerMetric = None
num = 0
# Calibration loop
for c in cnts:
# if the contour is not sufficiently large, ignore it
@ -262,7 +256,8 @@ def detect(calibration_width, img_file, show, quick):
objtype = "Penny"
iteml = 0
else:
if circular and near(radius * 2 / pixelsPerMetric, 0.38, 0.03):
if circular and near(radius * 2 / pixelsPerMetric, 0.4, 0.03):
# Keps nut or spacer
objtype = "Spacer"
mask = np.zeros(gray.shape, np.uint8)
@ -270,11 +265,13 @@ def detect(calibration_width, img_file, show, quick):
#pixelpoints = np.transpose(np.nonzero(mask))
hsv = cv2.cvtColor(orig, cv2.COLOR_BGR2HSV)
mean_val = cv2.mean(hsv, mask=mask)
#print(str(mean_val[0]))
if near(mean_val[0], 47, 5) and near(mean_val[1], 70, 5) and near(mean_val[2], 78, 5):
mean_rgb = cv2.mean(orig, mask=mask)
if near(mean_rgb[2], 59, 3) and near(mean_val[1], 85, 5): #and near(mean_val[2], 78, 5):
objtype = "Keps Nut"
if circular and near(radius / pixelsPerMetric, 0.23, 0.02):
print(str(mean_rgb[2]) + objtype + str(mean_val[1]))
elif circular and near(radius / pixelsPerMetric, 0.23, 0.02):
objtype = "Washer"
#print(str(radius * 2 / pixelsPerMetric) + objtype)
epsilon = 3 # 0.02*cv2.arcLength(c,True)
# print(str(epsilon))
approx = cv2.approxPolyDP(c, epsilon, True)
@ -359,7 +356,7 @@ def detect(calibration_width, img_file, show, quick):
if show and not quick:
cv2.imshow("Item Sorter", orig)
#cv2.waitKey(1)
if quick:
return (list, orig)
else:
if not quick:
cv2.waitKey(0)
return (list, orig)

@ -5,12 +5,12 @@ import cv2
from imutils.video import FPS
calibration_width = 0.75
image = "img7.jpg"
images = ("img.jpg", "img2.jpg", "img3.jpg", "img4.jpg", "img5.jpg", "img6.jpg", "img7.jpg", "img8.jpg")
images = ("img2.jpg", "img3.jpg", "img4.jpg", "img5.jpg", "img6.jpg", "img7.jpg", "img8.jpg")
#images = ("img.jpg", "img2.jpg")
video = False
def go():
for file in images:
items,output = detect.detect(calibration_width, file, True, True)
items,output = detect.detect(calibration_width, file, True, False)
print(str(items))
if "Penny" in items:
items.remove("Penny")
@ -30,7 +30,7 @@ if not video:
print(elapsed_time)
else :
#tcp capture = cv2.VideoCapture('udpsrc port=5001 ! gdpdepay ! rtph264depay ! avdec_h264 ! videoconvert ! videorate ! video/x-raw,framerate=5/1 ! appsink', cv2.CAP_GSTREAMER)
#capture = cv2.VideoCapture('tcpclientsrc port=5001 host=192.168.1.129 ! gdpdepay ! rtph264depay ! avdec_h264 ! videoconvert ! videorate ! video/x-raw,framerate=5/1 ! appsink', cv2.CAP_GSTREAMER)
capture = cv2.VideoCapture('udpsrc port=9000 caps="application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264" ! rtph264depay ! avdec_h264 ! videoconvert ! appsink sync=false', cv2.CAP_GSTREAMER)
# server command for imx135 camera ./video2stdout | gst-launch-1.0 -v fdsrc ! h264parse ! rtph264pay config-interval=1 pt=96 ! gdppay ! tcpserversink host=192.168.43.152 port=5001

@ -28,4 +28,4 @@ def sort(input):
def clear():
write(["Unknown"])
write(["Unknown"])
#write(["Unknown"])
Loading…
Cancel
Save