|
|
|
@ -75,38 +75,45 @@ ap.add_argument("-n", "--number", type=int, required=False,
|
|
|
|
|
help="show on the screen")
|
|
|
|
|
args = vars(ap.parse_args())
|
|
|
|
|
args2 = ap.parse_args()"""
|
|
|
|
|
def detect(calibration_width, img_file, show):
|
|
|
|
|
def detect(calibration_width, img_file, show, quick):
|
|
|
|
|
selected = 2
|
|
|
|
|
#if type(args["number"]) == type(selected):
|
|
|
|
|
# selected = args["number"]
|
|
|
|
|
|
|
|
|
|
# load the image, convert it to grayscale, and blur it slightly
|
|
|
|
|
image = cv2.imread(img_file)
|
|
|
|
|
image = None
|
|
|
|
|
print(str(type(img_file)))
|
|
|
|
|
if str(type(img_file)) == "<class 'numpy.ndarray'>":
|
|
|
|
|
image = img_file.copy()
|
|
|
|
|
else:
|
|
|
|
|
image = cv2.imread(img_file)
|
|
|
|
|
|
|
|
|
|
#image = img_file.copy()
|
|
|
|
|
#image = cv2.resize(image, (int(image.shape[1]*1), int(image.shape[0]*1)))
|
|
|
|
|
image = cv2.resize(image, (1000, int(
|
|
|
|
|
image.shape[0]/image.shape[1] * 1000)), interpolation=cv2.INTER_NEAREST)
|
|
|
|
|
|
|
|
|
|
if show:
|
|
|
|
|
if show and not quick:
|
|
|
|
|
cv2.namedWindow("Item Sorter")
|
|
|
|
|
cv2.imshow("Item Sorter", image)
|
|
|
|
|
cv2.waitKey(0)
|
|
|
|
|
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
|
|
|
gray = cv2.GaussianBlur(gray, (5, 5), 0)
|
|
|
|
|
if show:
|
|
|
|
|
if show and not quick:
|
|
|
|
|
cv2.imshow("Item Sorter", gray)
|
|
|
|
|
cv2.waitKey(0)
|
|
|
|
|
|
|
|
|
|
# perform edge detection, then perform a dilation + erosion to
|
|
|
|
|
# close gaps in between object edges
|
|
|
|
|
edged = cv2.Canny(gray, 50, 100)
|
|
|
|
|
if show:
|
|
|
|
|
if show and not quick:
|
|
|
|
|
cv2.imshow("Item Sorter", edged)
|
|
|
|
|
cv2.waitKey(0)
|
|
|
|
|
|
|
|
|
|
edged = cv2.dilate(edged, None, iterations=1)
|
|
|
|
|
edged = cv2.erode(edged, None, iterations=1)
|
|
|
|
|
|
|
|
|
|
if show:
|
|
|
|
|
if show and not quick:
|
|
|
|
|
cv2.imshow("Item Sorter", edged)
|
|
|
|
|
cv2.waitKey(0)
|
|
|
|
|
# find contours in the edge map
|
|
|
|
@ -171,7 +178,7 @@ def detect(calibration_width, img_file, show):
|
|
|
|
|
pixelsPerMetric = smaller(dA, dB) / calibration_width
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pixelsPerMetric = 25
|
|
|
|
|
orig = image.copy()
|
|
|
|
|
objtype = "Unknown"
|
|
|
|
|
objname = ""
|
|
|
|
@ -180,7 +187,7 @@ def detect(calibration_width, img_file, show):
|
|
|
|
|
#orig = image.copy()
|
|
|
|
|
num += 1
|
|
|
|
|
# if the contour is not sufficiently large, ignore it
|
|
|
|
|
if cv2.contourArea(c) < 100 or pixelsPerMetric is None:
|
|
|
|
|
if cv2.contourArea(c) < 100: # or pixelsPerMetric is None:
|
|
|
|
|
continue
|
|
|
|
|
# compute the rotated bounding box of the contour
|
|
|
|
|
|
|
|
|
@ -357,5 +364,7 @@ def detect(calibration_width, img_file, show):
|
|
|
|
|
if show:
|
|
|
|
|
cv2.imshow("Item Sorter", orig)
|
|
|
|
|
#cv2.waitKey(1)
|
|
|
|
|
|
|
|
|
|
cv2.waitKey(0)
|
|
|
|
|
if quick:
|
|
|
|
|
return orig
|
|
|
|
|
else:
|
|
|
|
|
cv2.waitKey(0)
|
|
|
|
|