[英]How to find the object on the noisy background?
我一直在進行一個項目,在該項目中,我必須使用觸發模式下的全局快門相機來找到對象的x和y坐標。 到現在為止,一切工作都很好,我得到了預期的結果,但是問題是,當我以觸發模式連續處理圖像時,得到的帶有噪點的圖像的額外讀數很少。
源圖像:
誰能建議我以一種好的方法來解決這個問題。
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
image = cv2.imread('022.bmp')
gray = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
gray = cv2.GaussianBlur(gray,(7,7),0)
edged = cv2.Canny(gray,50,100)
edged = cv2.dilate(edged,None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
cnts = cv2.findContours(edged.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
(cnts, _) = contours.sort_contours(cnts)
colors = ((0,0,255),(240,0,159),(0,165,255),(255,255,0),(255,0,255))
refObj = None
for c in cnts:
if cv2.contourArea(c)<250:
#print "in if loop"
continue
#print cv2.contourArea(c)
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box,dtype="int")
box = perspective.order_points(box)
cX = np.average(box[:,0])
cY = np.average(box[:,1])
if refObj is None:
(tl,tr,br,bl) = box
(tlblX, tlblY) = midpoint(tl,bl)
(trbrX, trbrY) = midpoint(tr,br)
D = dist.euclidean((tlblX,tlblY),(trbrX,trbrY))
refObj = (box, (cX,cY),D)
print refObj[0]
print 'cx-1',cX
print 'cy-1',cY
continue
orig = image.copy()
cv2.drawContours(orig,[box.astype("int")], -1, (0,255,0), 2)
cv2.drawContours(orig, [refObj[0].astype("int")],-1,(0,255,0),2)
refCoords = np.vstack([refObj[0],refObj[1]])
objCoords = np.vstack([box, (cX,cY)])
print box
print 'cx',cX
print 'cy',cY
for ((xA,yA), (xB,yB), color) in zip(refCoords,objCoords,colors):
cv2.circle(orig, (int(xA),int(yA)),5,color,-1)
cv2.circle(orig, (int(xB),int(yB)),5,color,-1)
cv2.line(orig, (int(xA),int(yA)),(int(xB),int(yB)),color,2)
cv2.imshow('img',orig)
cv2.waitKey(0)
您能對此提出建議嗎? 謝謝!
我備份了源圖像,避免了替換或刪除它。
我分析了不同ColorSpace中的圖像,並發現B(BGR)
通道非常適合您的任務。
然后執行以下步驟:
代碼和結果。
#!/usr/bin/python3
# 2017.12.09 00:25:47 CST
# 2018.01.10 21:10:07 CST
import cv2
import numpy as np
## (1) read and extract the blue channel
img = cv2.imread("img01.png")
blue = img[...,0]
## (2) threshold the blue
th, threshed = cv2.threshold(blue, 200, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
## (3) find contours
cnts = cv2.findContours(threshed,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)[-2]
## (4) filter by AREA
canvas = img.copy()
H,W = img.shape[:2]
AREA = H*W
for cnt in cnts:
area = cv2.contourArea(cnt)
if not AREA/100<area<AREA/20:
continue
box = cv2.minAreaRect(cnt)
box = cv2.boxPoints(box)
box = np.array(box,dtype="int")
cv2.drawContours(canvas, [box], -1,(255,0,255) , 2, cv2.LINE_AA)
## save
cv2.imwrite("result.png", canvas)
類似問題:
(1) 如何在嘈雜的背景下找到物體?
我建議進行更好的初始預處理,在應用邊緣檢測之前消除噪聲。 一種可能的方法是應用雙邊濾波,該濾波可以平滑圖像,但同時保留邊緣。
這里有個簡單的例子:
import numpy as np
import cv2
img = cv2.imread('img01.png')
canvas = img.copy()
filtered = cv2.bilateralFilter(img, 30, 80, 80)
gray = cv2.cvtColor(filtered, cv2.COLOR_BGR2HSV)
edged = cv2.Canny(gray,50,100)
_, cnts, _ = cv2.findContours(edged,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
H,W = img.shape[:2]
AREA = H*W
for cnt in cnts:
area = cv2.contourArea(cnt)
if not AREA/100<area<AREA/20:
continue
box = cv2.minAreaRect(cnt)
box = cv2.boxPoints(box)
box = np.array(box,dtype="int")
cv2.drawContours(canvas, [box], -1,(255,0,255) , 2, cv2.LINE_AA)
result = np.hstack((img, filtered, cv2.cvtColor(edged, cv2.COLOR_GRAY2BGR), canvas))
cv2.imwrite("result.png", result)
cv2.imshow('Results', result)
cv2.waitKey(0)
此外,您可以通過應用均值漂移過濾來采取更嚴厲的措施,從而減少圖像中的顏色數量。 只需將雙邊過濾器替換為:
filtered = cv2.pyrMeanShiftFiltering(img, 20, 50)
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.