简体   繁体   English

OpenCV 中的 KLT 跟踪器与 Python 无法正常工作

[英]KLT tracker in OpenCV not working properly with Python

I am using KLT (Kanade-Lucas-Tomasi Tracking) Tracking algorithm to track the motion of traffic in India.我正在使用KLT (Kanade-Lucas-Tomasi 跟踪)跟踪算法来跟踪印度的交通状况。 I am tracking flow of one side of traffic properly, but other side of traffic, that is moving in frame is not detected at all.我正在正确跟踪交通的一侧的流量,但根本没有检测到正在帧中移动的另一侧的交通。

Algorithm consist of cv2.goodFeaturesToTrack and cv2.calcOpticalFlowPyrLK to achieve the result.算法由cv2.goodFeaturesToTrackcv2.calcOpticalFlowPyrLK以实现结果。

在此处输入图片说明

In the image you can see that Red and Silver car have no tracking feature on them.在图像中,您可以看到Red 和 Silver 汽车上没有跟踪功能。 Yellow Auto on left is also not tracked.左侧的黄色自动也不被跟踪。 Any reason for this?这有什么原因吗? Corners are still there.角落还在。

Feature Parameters for cv2.goodFeaturesToTrack : cv2.goodFeaturesToTrack特征参数:

feature_params = dict( maxCorners = 500,   # How many pts. to locate
                       qualityLevel = 0.1,  # b/w 0 & 1, min. quality below which everyone is rejected
                       minDistance = 7,   # Min eucledian distance b/w corners detected
                       blockSize = 3 ) # Size of an average block for computing a derivative covariation matrix over each pixel neighborhood

Feature Parameters for cv2.calcOpticalFlowPyrLK : cv2.calcOpticalFlowPyrLK特征参数:

lk_params = dict( winSize  = (15,15),  # size of the search window at each pyramid level
                  maxLevel = 2,   #  0, pyramids are not used (single level), if set to 1, two levels are used, and so on
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

Video I have to use is 60 min.我必须使用的视频是 60 分钟。 long and KLT stops tracking after 5 min .. Any suggestions or help would be great. long 和 KLT 在 5 分钟后停止跟踪.. 任何建议或帮助都会很棒。 Thanks.谢谢。

Basically you are doing everything right you just need to reinitialize the good points for tracking like this基本上你做的一切都是正确的,你只需要像这样重新初始化跟踪的好点

p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)

after say every 5th frame or whatever you like Hope it helps !之后说每 5 帧或任何你喜欢的希望它有帮助! following is my code:以下是我的代码:

import cv2
import numpy as np

cap = cv2.VideoCapture('side.avi')
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
                       qualityLevel = 0.3,
                       minDistance = 7,
                       blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (15,15),
                  maxLevel = 2,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
for i in range(60):
    ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
print(p0)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while(1):
    ret,frame = cap.read()
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame_no = cap.get(cv2.CAP_PROP_POS_FRAMES)
    if int(frame_no)%5 == 0:
        p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
    # Select good points
    good_new = p1[st==1]
    good_old = p0[st==1]
    # draw the tracks
    for i,(new,old) in enumerate(zip(good_new,good_old)):
        a,b = new.ravel()
        c,d = old.ravel()
        mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
        frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
    img = cv2.add(frame,mask)
    cv2.imshow('frame',img)
    k = cv2.waitKey(2000) & 0xff
    if k == 27:
        break
    # Now update the previous frame and previous points
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release()
import numpy as np
import cv2

video_path = ''
output_file = ""     
cap = cv2.VideoCapture(video_path)

fourcc = cv2.VideoWriter_fourcc(*'DIVX')

# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 500,   # How many pts. to locate
                       qualityLevel = 0.1,  # b/w 0 & 1, min. quality below which everyone is rejected
                       minDistance = 7,   # Min eucledian distance b/w corners detected
                       blockSize = 3 ) # Size of an average block for computing a derivative covariation matrix over each pixel neighborhood

# Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (15,15),  # size of the search window at each pyramid level
                  maxLevel = 2,   #  0, pyramids are not used (single level), if set to 1, two levels are used, and so on
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

''' Criteria : Termination criteria for iterative search algorithm.
    after maxcount { Criteria_Count } : no. of max iterations.
    or after { Criteria Epsilon } : search window moves by less than this epsilon '''


# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)  #use goodFeaturesToTrack to find the location of the good corner.

# Create a mask image for drawing purposes filed with zeros
mask = np.zeros_like(old_frame)

y = 0
is_begin = True # To save the output video
count = 1  # for the frame count
n = 50  # Frames refresh rate for feature generation

while True:
    ret,frame = cap.read()
    if frame is None:
        break
    processed = frame

    #Saving the Video
    if is_begin:
        h, w, _ = processed.shape
        out = cv2.VideoWriter(output_file, fourcc, 30, (w, h), True)
        is_begin = False

    # Convert to Grey Frame
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if count%n == 0:  # Refresh the tracking features after every 50 frames
        cv2.imwrite('img/r{0:05d}.jpg'.format(y), img)
        y += 1
        ret, old_frame = cap.read()
        old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
        p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
        mask = np.zeros_like(old_frame)

    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)

    # Select good points
    good_new = p1[st==1]
    good_old = p0[st==1]

    # draw the tracks
    for i,(new,old) in enumerate(zip(good_new,good_old)):
        a,b = new.ravel() #tmp new value
        c,d = old.ravel() #tmp old value
        #draws a line connecting the old point with the new point
        mask = cv2.line(mask, (a,b),(c,d), (0,255,0), 1)
        #draws the new point
        frame = cv2.circle(frame,(a,b),2,(0,0,255), -1)
    img = cv2.add(frame,mask)

    out.write(img)
    cv2.imshow('frame',img)
    k = cv2.waitKey(30) & 0xff

    #Show the Output
    if k == 27:
        cv2.imshow('', img)
        break

    # Now update the previous frame and previous points
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1,1,2)

    count += 1

# release and destroy all windows
cv2.destroyAllWindows()
cap.release()

I added the refresh rate for the GoodFeaturetoTrack and it's working, but we will not get the full trajectory.我为 GoodFeaturetoTrack 添加了刷新率并且它正在工作,但我们不会获得完整的轨迹。 Working on it now.现在正在努力。

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM