From 4788300a8787c901f7d1bdec46d3c2553357736e Mon Sep 17 00:00:00 2001 From: knolax <1339802534.kk@gmail.com> Date: Tue, 17 Jan 2017 15:19:51 +0000 Subject: initial commit, see doc for information on the process. --- all.png | Bin 0 -> 884169 bytes ballramp.jpg | Bin 0 -> 225221 bytes capball.jpg | Bin 0 -> 685208 bytes doc | 47 ++++++++++++++++++++++++++++++++++++ particle.png | Bin 0 -> 384860 bytes proc.py | 68 +++++++++++++++++++++++++++++++++++++++++++++++++++++ stallman.jpg | Bin 0 -> 39530 bytes stallmask.jpg | Bin 0 -> 130024 bytes stallmasked.jpg | Bin 0 -> 129759 bytes stallmaskedcon.jpg | Bin 0 -> 230073 bytes stallp.jpg | Bin 0 -> 152540 bytes 11 files changed, 115 insertions(+) create mode 100644 all.png create mode 100644 ballramp.jpg create mode 100644 capball.jpg create mode 100644 doc create mode 100644 particle.png create mode 100644 proc.py create mode 100644 stallman.jpg create mode 100644 stallmask.jpg create mode 100644 stallmasked.jpg create mode 100644 stallmaskedcon.jpg create mode 100644 stallp.jpg diff --git a/all.png b/all.png new file mode 100644 index 0000000..ee8f763 Binary files /dev/null and b/all.png differ diff --git a/ballramp.jpg b/ballramp.jpg new file mode 100644 index 0000000..40efdf4 Binary files /dev/null and b/ballramp.jpg differ diff --git a/capball.jpg b/capball.jpg new file mode 100644 index 0000000..833d03a Binary files /dev/null and b/capball.jpg differ diff --git a/doc b/doc new file mode 100644 index 0000000..e61cd8c --- /dev/null +++ b/doc @@ -0,0 +1,47 @@ +pencv documentation : + http://docs.opencv.org/3.2.0/ +variable stuff(all the shared data types are in core): + http://docs.opencv.org/3.2.0/d1/dfb/intro.html +Image processing module : + http://docs.opencv.org/3.2.0/d7/dbd/group__imgproc.html + Hough transform finds lines in image and arbitrary shapes now? + Hesse lines: + r = xcos(theta) + y sin(theta) r is distance, theta is some angle +method the FTC lib used: + http://stackoverflow.com/questions/10262600/how-to-detect-region-of-large-of-white-pixels-using-opencv + http://docs.opencv.org/3.2.0/db/d8e/tutorial_threshold.html + http://docs.opencv.org/3.1.0/d4/d73/tutorial_py_contours_begin.html + http://docs.opencv.org/trunk/df/d9d/tutorial_py_colorspaces.html + http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.html + http://docs.opencv.org/2.4/modules/core/doc/operations_on_arrays.html#bitwise-or + http://docs.opencv.org/trunk/dd/d49/tutorial_py_contour_features.html + http://docs.opencv.org/2.4/doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.html +1/13/2017: + python lost all packages in te transition from pythone 3.5 to 3.6, pip reinstalled. +img = cv2.imread("stallman.jpg") +>>> thresh = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) +>>> ret, thresh = cv2.threshold(thresh,120,255,cv2.THRESH_BINARY) +>>> cv2.imwrite("stalltel.jpg",thresh) +works, use fincontours() for the blobs and we'll be set. +1/14/2017 + I have found the contours, but the image has many places where the h value is not red because the s an v values are too low to tell, I tried gaussian blur in both the BGR and HSV stages of the image. Gaussian blur in the HSV stage caused many non red regions to be put into the layer mask. Gaussian blur in BGR removed some abnormities but not all. + gaussian blur does not work so I am instead going to fill in all the countours in te mask I can find that are white when blurred but black before bluring. + Instead I have tried bluring the mask only and that seems to produce the best results. +here's the flowchart I'll use +-countour larger than x pixels squared, where x is some small number smaller than particles at max range. +blue or red +-Contour bounding sphere matches contour by x%, average distance to closest point on sphere? +- we can do this quickly with minimal bounding circle, since minimal bounding circle can only be => than the contour, the closer the contour is to that circle(%?) the closer it is to a circle. +-this could be capcall, particle or the light +-particles would have to have internal contours that match x% of an oval and make up y% of the particle area. +-The lights would then have to be near a white shape around a bounding box. +1/14/2017 continued + the minimum circle method of detectin roundness isn't as clear cut as though, as the percent difference for the capball, what would assume to be an easy one was 40%, while the others were at 85 and 96 percent. + after testing with anoter image, we found may false positives with non ball shaped objects, and adjacent red objects being detected as one contour. + I am going to use contour similarity test with one generated from the minimum bonding circle, Also I am going to and together thresholds for color and saturation, ignoring value. A minimum s level of 60/255 seems to eliminate most false positives, but it failed to eliminate all non red zones. In addition it improved the circle accuracy for the cap ball to 22%. The particle and ramp remained connected. + The gaussian blur was removed as the contour only needed to fit in a spere, not be close to a sphere. We acheived 20% for the small particle, which was now being detected seperate from the ramp. The capball is also at 13.6% accuracy now. + given how blurry the camera is at times, the child contour method is doubtful to work. We should instead test the fact that for the area the robot is in, the capball will likely always be larger than a size the particle can ever get to, + it seems that though there are no holes in the threshhold mask, there are however difference in regularity that produce differences in the percentage of threshholds. + after testing with some other images, i have decided to move circle closeness requirement higher to 35, as i got one very clear picture of th capball that scored a 32%. + the diffrence in regularity is not good enough for distinguishing capball from particle.i + some more testing with the blue ball and one of them was a false negative. 3 more false positives, also one of the particles was below the cut off area. another false negative red ball at 36%, ut it was really covered up by a lot if stuff. diff --git a/particle.png b/particle.png new file mode 100644 index 0000000..615bfec Binary files /dev/null and b/particle.png differ diff --git a/proc.py b/proc.py new file mode 100644 index 0000000..7bdef13 --- /dev/null +++ b/proc.py @@ -0,0 +1,68 @@ +#!/bin/python +#python 3.6.0 +import numpy +import cv2 +import math +import sys +def thresh (img, minv, maxv) : + ret, thresh = cv2.threshold(img, minv, 255, cv2.THRESH_BINARY) + ret, ithresh = cv2.threshold(img, maxv, 255, cv2.THRESH_BINARY_INV) + return cv2.bitwise_and(thresh,ithresh) +img = cv2.imread(sys.argv[1]) +#blur, kernel is the distance of pixels the rurrounding ones are +#29 seems to be upper lmit on effectiveness. +himg = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) +h,s,v = cv2.split(himg) +# i think the range for red is 0 10, 170 179 +#blue is 195-135, 40? really that much? +#hue only testing, not very effective. +# better with saturation +thresh = cv2.bitwise_and(cv2.bitwise_or(thresh(h,0,10),thresh(h,169,180)), thresh(s,60,255)) +#thresh = cv2.bitwise_and(thresh(h,95,135), thresh(s,60,255)) +#thresh = cv2.GaussianBlur(thresh,(29,29),0,0,cv2.BORDER_REFLECT_101) +cv2.imwrite("stallmask.jpg",thresh); +mask = cv2.merge((thresh,thresh,thresh)) +masked = cv2.bitwise_and(himg, mask); +cv2.imwrite("stallmasked.jpg",cv2.cvtColor(masked,cv2.COLOR_HSV2BGR)); +#only takes 1 cannel arrays? +im2, con, heir = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) +v = 0 +for i in range(0, len(con)) : + if (cv2.contourArea(con[i]) > 2000) : + a = cv2.contourArea(con[i]) + cv2.drawContours(masked, con, i, (120,255,255), 5) + (cx, cy), r = cv2.minEnclosingCircle(con[i]) + p = 1- ( a / (math.pi * math.pow(r, 2))) + c = (int(cx),int(cy)) + cv2.circle(masked, c,int(r), (150,255,255), 5) + if (p < .35) : + print("yee") + #find find frst child + #her in list mode returns the array inside of another array + if ( heir[0][i][2] != -1 ): + j = heir[0][i][2] + while (j != -1) : + cv2.drawContours(masked, con, j, (160,255,255), 3) + cp = 0 + cp = cp + cv2.contourArea(con[j]) + j = heir[0][j][0] + cp = cp/a + else : + cp = 0 + print ("child area perc") + print(cp) + if (cp > 0.0005) : + cv2.putText(masked, "p", c, cv2.FONT_HERSHEY_PLAIN, 10, (0,255,255),cv2.LINE_AA) + else : + cv2.putText(masked, 'c', c, cv2.FONT_HERSHEY_PLAIN, 10, (0,255,255),cv2.LINE_AA) + + print(p) + + v = v + 1 + else : + cv2.drawContours(masked, con, i, (80,255,255), 1) +cv2.imwrite("stallmaskedcon.jpg",cv2.cvtColor(masked,cv2.COLOR_HSV2BGR)) +print("properly sized contours") +print(v) +print("all contours") +print(len(con)) diff --git a/stallman.jpg b/stallman.jpg new file mode 100644 index 0000000..d9bded5 Binary files /dev/null and b/stallman.jpg differ diff --git a/stallmask.jpg b/stallmask.jpg new file mode 100644 index 0000000..78cde04 Binary files /dev/null and b/stallmask.jpg differ diff --git a/stallmasked.jpg b/stallmasked.jpg new file mode 100644 index 0000000..055b940 Binary files /dev/null and b/stallmasked.jpg differ diff --git a/stallmaskedcon.jpg b/stallmaskedcon.jpg new file mode 100644 index 0000000..687323d Binary files /dev/null and b/stallmaskedcon.jpg differ diff --git a/stallp.jpg b/stallp.jpg new file mode 100644 index 0000000..d019560 Binary files /dev/null and b/stallp.jpg differ -- cgit v1.1