简体   繁体   中英

OpenCV Error: Expected Ptr<cv::UMat> for argument '%s'

I am new to OpenCV, and attempting to use OpenCV for Python 3.7 to stitch five images together into one singular image. The source code I originally used only allowed for two images to be stitched together, so I had to modify it to allow five webcam images to be stitched. However, I am presented with this error:

Traceback (most recent call last): File "C:\Users\colby\OneDrive\Desktop\New folder\photomosaic.py", line 80, in <module> img3 = cv2.drawMatches(kpsA,secondImg,kpsB,thirdImg, kpsC, forthImg, kpsD, fifthImg, kpsE, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) TypeError: Expected Ptr<cv::UMat> for argument '%s'

My Code:

  import cv2
   import numpy as np
   import matplotlib.pyplot as plt
   import imageio
   import imutils
   import cameracapture
   cv2.ocl.setUseOpenCL(False)
   feature_extractor = 'orb' # one of 'sift', 'surf', 'brisk', 'orb'
   feature_matching = 'bf'
   firstImg = imageio.imread('cap1.jpg')
   firstImg_gray = cv2.cvtColor(firstImg, cv2.COLOR_RGB2GRAY)
   secondImg = imageio.imread('cap2.jpg')
   secondImg_gray = cv2.cvtColor(secondImg, cv2.COLOR_RGB2GRAY)
   thirdImg = imageio.imread('cap3.jpg')
   thirdImg_gray = cv2.cvtColor(thirdImg, cv2.COLOR_RGB2GRAY)
   forthImg = imageio.imread('cap4.jpg')
   forthImg_gray = cv2.cvtColor(forthImg, cv2.COLOR_RGB2GRAY)
   fifthImg = imageio.imread('cap5.jpg')
   fifthImg_gray = cv2.cvtColor(fifthImg, cv2.COLOR_RGB2GRAY)

   plt.show()
   def detectAndDescribe(image, method=None):   
       assert method is not None, "You need to define a feature detection 
method. Values are: 'sift', 'surf'"

    if method == 'sift':
        descriptor = cv2.xfeatures2d.SIFT_create()
    elif method == 'surf':
        descriptor = cv2.xfeatures2d.SURF_create()
    elif method == 'brisk':
       descriptor = cv2.BRISK_create()
    elif method == 'orb':
       descriptor = cv2.ORB_create()

    # get keypoints and descriptors
    (kps, features) = descriptor.detectAndCompute(image, None)    
    return (kps, features)
kpsA, featuresA = detectAndDescribe(firstImg_gray, method=feature_extractor)
kpsB, featuresB = detectAndDescribe(secondImg_gray, method=feature_extractor)
kpsC, featuresC = detectAndDescribe(thirdImg_gray, method=feature_extractor)
kpsD, featuresD = detectAndDescribe(forthImg_gray, method=feature_extractor)
kpsE, featuresE = detectAndDescribe(fifthImg_gray, method=feature_extractor) 
def createMatcher(method,crossCheck):
     if method == 'sift' or method == 'surf':
     bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=crossCheck)
 elif method == 'orb' or method == 'brisk':
     bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=crossCheck)
 return bf
 def matchKeyPointsBF(featuresA, featuresB, featuresC, featuresD, featuresE, method):
    bf = createMatcher(method, crossCheck=True)

    # Match descriptors.
    best_matches = bf.match(featuresA,featuresB)
    best_matchs = bf.match(featuresA,featuresB + featuresC + featuresD + featuresE)

    # Sort the features in order of distance.
    # The points with small distance (more similarity) are ordered first in the vector
    rawMatches = sorted(best_matches, key = lambda x:x.distance)
    print("Raw matches (Brute force):", len(rawMatches))
    return rawMatches
def matchKeyPointsKNN(featuresA, featuresB, featuresC, featuresD, featuresE, ratio, method):
    bf = createMatcher(method, crossCheck=False)
# compute the raw matches and initialize the list of actual matches
    rawMatches = bf.knnMatch(featuresA,featuresB, featuresC, featuresD, featuresE, 2)
    print("Raw matches (knn):", len(rawMatches))
    matches = []

    # loop over the raw matches
    for m,n in rawMatches:
        # ensure the distance is within a certain ratio of each
        # other (i.e. Lowe's ratio test)
        if m.distance < n.distance * ratio:
            matches.append(m)
    return matches
print("Using: {} feature matcher".format(feature_matching))

fig = plt.figure(figsize=(20,8))

if feature_matching == 'bf':
    matches = matchKeyPointsBF(featuresA,featuresB, featuresC, featuresD, 
featuresE, method=feature_extractor)
    img3 = cv2.drawMatches(kpsA,secondImg,kpsB,thirdImg, kpsC, forthImg, 
kpsD, fifthImg, kpsE, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
elif feature_matching == 'knn':
    matches = matchKeyPointsKNN(featuresA,featuresB, featuresC, featuresD, 
featuresE, ratio=0.75, method=feature_extractor)
    img3 = cv2.drawMatches(firstImg,kpsA,secondImg,kpsB,thirdImg, kpsC, 
forthImg, kpsD, fifthImg, kpsE, np.random.choice(matches,100),

None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)


plt.imshow(img3)
plt.show()
  def getHomography(kpsA, kpsB, kpsC, kpsD, kpsE, featuresA, featuresB, 
  featuresC, featuresD, featuresE, matches, reprojThresh):
        # convert the keypoints to numpy arrays
    kpsA = np.float32([kp.pt for kp in kpsA])
    kpsB = np.float32([kp.pt for kp in kpsB])
    kpsC = np.float32([kp.pt for kp in kpsC])
    kpsD = np.float32([kp.pt for kp in kpsD])
    kpsE = np.float32([kp.pt for kp in kpsE])

    if len(matches) > 4:

    # construct the two sets of points
    ptsA = np.float32([kpsA[m.firstIdx] for m in matches])
    ptsB = np.float32([kpsB[m.secondIdx] for m in matches])
    ptsC = np.float32([kpsC[m.thirdIdx] for m in matches])
    ptsD = np.float32([kpsD[m.forthIdx] for m in matches])
    ptsE = np.float32([kpsE[m.fifthIdx] for m in matches])
        # estimate the homography between the sets of points
        (H, status) = cv2.findHomography(ptsA,ptsB,ptsC,ptsD,ptsE, cv2.RANSAC, reprojThresh)

        return (matches, H, status)
    else:
        return None
M = getHomography(kpsA, kpsB, kpsC, kpsD, kpsE, featuresA, featuresB, 
featuresC, featuresD, featuresE, matches, reprojThresh=4)
if M is None:
    print("Error!")
(matches, H, status) = M
print(H)
# Apply panorama correction
width = trainImg.shape[1] + queryImg.shape[1]
height = trainImg.shape[0] + queryImg.shape[0]

result = cv2.warpPerspective(trainImg, H, (width, height))
result[0:queryImg.shape[0], 0:queryImg.shape[1]] = queryImg

plt.figure(figsize=(20,10))
plt.imshow(result)

plt.axis('off')
plt.show()
# transform the panorama image to grayscale and threshold it 
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]

# Finds contours from the binary image
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, 
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

# get the maximum contour area
c = max(cnts, key=cv2.contourArea)

# get a bbox from the contour area
(x, y, w, h) = cv2.boundingRect(c)

# crop the image to the bbox coordinates
result = result[y:y + h, x:x + w]

# show the cropped image
plt.figure(figsize=(20,10))
plt.imshow(result)

The expected output:

I expect the program to be able to take five pictures with my webcam (which is imported through cameracapture ), and then stitch them together to make one image. Any advice or fixes to this error would be greatly appreciated.

 firstImg = imageio.imread('cap1.jpg')
 print(type(firstImg))

if the output is

<class 'PIL.JpegImagePlugin.JpegImageFile'>

Then covert it to an array using:-

firstImg = np.asarray(firstImg)

Load this firstImg to convert it to gray scale image

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM