我正在尝试在OpenCV中制作增强现实程序。但是,当我调用solvePnP时,似乎总是会出错。

我要做的是在OpenCV中制作增强现实程序,方法是将选定裁剪图像的单应点获取到整个图像,然后将这些单应点输入到solvePnP中以获得姿势估计。

我设法成功实现了单应性点,但是由于某种原因,我似乎无法使solvePnP正常工作。我怀疑我的输入格式不正确,但是我不确定。

如果您希望自己运行代码,请git clone this(需要一些文件才能运行此代码):
(https://github.com/vanstorm9/SLAM-experiments.git)

并运行文件:/augmented-reality/sample-scripts/test.py

有人可以解决这个问题吗?

错误:

Traceback (most recent call last):
  File "/augmented-reality/sample-scripts/test.py", line 315, in <module>
    (ret, rvecs, tvecs) = cv2.solvePnP(objp, corners2, mtx, dist)
error: /opencv/modules/calib3d/src/solvepnp.cpp:61: error: (-215) npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) in function solvePnP

resolvePnP的输入和大小
(42, 3)     # objp
(143, 2, 1) # corners2
(3, 3)      # mtx
(1, 5)      # dist

# objp
[[ 0.  0.  0.]
 [ 1.  0.  0.]
 [ 2.  0.  0.]
 [ 3.  0.  0.]
 [ 4.  0.  0.]
 [ 5.  0.  0.]
 [ 6.  0.  0.]
 [ 0.  1.  0.]
 [ 1.  1.  0.]
 [ 2.  1.  0.]
 [ 3.  1.  0.]
 [ 4.  1.  0.]
 [ 5.  1.  0.]
 [ 6.  1.  0.]
 [ 0.  2.  0.]
 [ 1.  2.  0.]
 [ 2.  2.  0.]
 [ 3.  2.  0.]
 [ 4.  2.  0.]
 [ 5.  2.  0.]
 [ 6.  2.  0.]
 [ 0.  3.  0.]
 [ 1.  3.  0.]
 [ 2.  3.  0.]
 [ 3.  3.  0.]
 [ 4.  3.  0.]
 [ 5.  3.  0.]
 [ 6.  3.  0.]
 [ 0.  4.  0.]
 [ 1.  4.  0.]
 [ 2.  4.  0.]
 [ 3.  4.  0.]
 [ 4.  4.  0.]
 [ 5.  4.  0.]
 [ 6.  4.  0.]
 [ 0.  5.  0.]
 [ 1.  5.  0.]
 [ 2.  5.  0.]
 [ 3.  5.  0.]
 [ 4.  5.  0.]
 [ 5.  5.  0.]
 [ 6.  5.  0.]]

 #corners2
 [[[   0.]
  [   0.]]

 [[   1.]
  [   1.]]

 [[   2.]
  [   2.]]

 [[   3.]
  [   3.]]

 [[   4.]
  [   4.]]

 [[   5.]
  [   5.]]

 [[   6.]
  [   6.]]

 [[   7.]
  [   7.]]

 [[   8.]
  [   8.]]

 [[   9.]
  [   9.]]

 [[  10.]
  [  10.]]

 [[  11.]
  [  11.]]

 [[  12.]
  [  12.]]

 [[  13.]
  [  13.]]

 [[  14.]
  [  14.]]

 [[  15.]
  [  18.]]

 [[  16.]
  [  19.]]

 [[  17.]
  [  20.]]

 [[  18.]
  [  28.]]

 [[  19.]
  [  29.]]

 [[  20.]
  [  30.]]

 [[  21.]
  [  31.]]

 [[  22.]
  [  32.]]

 [[  23.]
  [  33.]]

 [[  24.]
  [  35.]]

 [[  25.]
  [  36.]]

 [[  26.]
  [  39.]]

 [[  27.]
  [  40.]]

 [[  28.]
  [  41.]]

 [[  29.]
  [  42.]]

 [[  31.]
  [  52.]]

 [[  32.]
  [  53.]]

 [[  33.]
  [  54.]]

 [[  34.]
  [  56.]]

 [[  35.]
  [  57.]]

 [[  36.]
  [  59.]]

 [[  37.]
  [  60.]]

 [[  38.]
  [  61.]]

 [[  40.]
  [  69.]]

 [[  41.]
  [  70.]]

 [[  42.]
  [  71.]]

 [[  43.]
  [  72.]]

 [[  44.]
  [  75.]]

 [[  45.]
  [  76.]]

 [[  47.]
  [  78.]]

 [[  49.]
  [  79.]]

 [[  50.]
  [  86.]]

 [[  51.]
  [  87.]]

 [[  52.]
  [  88.]]

 [[  53.]
  [  89.]]

 [[  54.]
  [  90.]]

 [[  55.]
  [  94.]]

 [[  48.]
  [  95.]]

 [[  56.]
  [ 101.]]

 [[  42.]
  [ 105.]]

 [[  61.]
  [ 109.]]

 [[  62.]
  [ 110.]]

 [[  57.]
  [ 111.]]

 [[  58.]
  [ 112.]]

 [[  61.]
  [ 113.]]

 [[  59.]
  [ 115.]]

 [[  58.]
  [ 116.]]

 [[  63.]
  [ 117.]]

 [[  60.]
  [ 118.]]

 [[  70.]
  [ 119.]]

 [[  71.]
  [ 120.]]

 [[  74.]
  [ 125.]]

 [[  75.]
  [ 126.]]

 [[  76.]
  [ 128.]]

 [[  77.]
  [ 129.]]

 [[  78.]
  [ 131.]]

 [[  66.]
  [ 133.]]

 [[  67.]
  [ 134.]]

 [[  69.]
  [ 135.]]

 [[  79.]
  [ 136.]]

 [[  72.]
  [ 137.]]

 [[  80.]
  [ 139.]]

 [[  73.]
  [ 140.]]

 [[  83.]
  [ 141.]]

 [[  82.]
  [ 142.]]

 [[  91.]
  [ 143.]]

 [[  92.]
  [ 144.]]

 [[  93.]
  [ 145.]]

 [[  94.]
  [ 146.]]

 [[  85.]
  [ 147.]]

 [[  86.]
  [ 148.]]

 [[  87.]
  [ 149.]]

 [[  95.]
  [ 150.]]

 [[ 101.]
  [ 153.]]

 [[ 102.]
  [ 154.]]

 [[ 103.]
  [ 155.]]

 [[ 104.]
  [ 156.]]

 [[ 105.]
  [ 157.]]

 [[ 106.]
  [ 158.]]

 [[ 107.]
  [ 159.]]

 [[ 108.]
  [ 160.]]

 [[ 109.]
  [ 161.]]

 [[ 110.]
  [ 163.]]

 [[ 111.]
  [ 164.]]

 [[ 112.]
  [ 165.]]

 [[ 113.]
  [ 166.]]

 [[ 114.]
  [ 167.]]

 [[  99.]
  [ 168.]]

 [[ 100.]
  [ 169.]]

 [[ 118.]
  [ 171.]]

 [[ 119.]
  [ 172.]]

 [[ 120.]
  [ 173.]]

 [[ 121.]
  [ 174.]]

 [[ 122.]
  [ 175.]]

 [[ 123.]
  [ 176.]]

 [[ 102.]
  [ 177.]]

 [[ 103.]
  [ 178.]]

 [[ 106.]
  [ 180.]]

 [[ 107.]
  [ 181.]]

 [[ 124.]
  [ 182.]]

 [[ 115.]
  [ 183.]]

 [[ 116.]
  [ 184.]]

 [[ 117.]
  [ 187.]]

 [[ 150.]
  [ 188.]]

 [[ 128.]
  [ 192.]]

 [[ 127.]
  [ 194.]]

 [[ 129.]
  [ 197.]]

 [[ 130.]
  [ 198.]]

 [[ 131.]
  [ 199.]]

 [[ 135.]
  [ 200.]]

 [[ 136.]
  [ 201.]]

 [[ 137.]
  [ 202.]]

 [[ 138.]
  [ 203.]]

 [[ 134.]
  [ 204.]]

 [[ 113.]
  [ 205.]]

 [[ 141.]
  [ 206.]]

 [[ 142.]
  [ 207.]]

 [[ 145.]
  [ 208.]]

 [[ 143.]
  [ 212.]]

 [[ 144.]
  [ 213.]]

 [[ 149.]
  [ 214.]]

 [[ 157.]
  [ 216.]]

 [[ 159.]
  [ 218.]]

 [[ 131.]
  [ 220.]]

 [[ 112.]
  [ 221.]]

 [[ 163.]
  [ 223.]]

 [[ 164.]
  [ 224.]]

 [[ 157.]
  [ 228.]]]

代码
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import glob

# Load previously saved data

mtx = np.load('calib-matrix/mtx.npy')
dist = np.load('calib-matrix/dist.npy')

rect = (0, 0, 0, 0)
startPoint = False
endPoint = False

selectedPoint = False


def on_mouse(
    event,
    x,
    y,
    flags,
    params,
    ):

    global rect, startPoint, endPoint, selectedPoint

    # get mouse click

    if event == cv2.EVENT_LBUTTONDOWN:

        if startPoint == True and endPoint == True:

        # Resets and delete box once you are done

            startPoint = False
            endPoint = False
            rect = (0, 0, 0, 0)

        if startPoint == False:

        # First click, waits for final click to create box

            rect = (x, y, 0, 0)
            startPoint = True
        elif endPoint == False:

        # creates the box (I think}

            rect = (rect[0], rect[1], x, y)
            print '________________'
            print 'Rectangle location: ', rect[0], ' ', rect[1], ' ', \
                x, ' ', y
            endPoint = True

    return


def drawCube(img, corners, imgpts):
    imgpts = np.int32(imgpts).reshape(-1, 2)

    # draw ground floor in green

    img = cv2.drawContours(img, [imgpts[:4]], -1, (0, 255, 0), -3)

    # draw pillars in blue color

    for (i, j) in zip(range(4), range(4, 8)):
        img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]), 255, 3)

    # draw top layer in red color

    img = cv2.drawContours(img, [imgpts[4:]], -1, (0, 0, 255), 3)

    return img


def draw(img, corners, imgpts):
    corner = tuple(corners[0].ravel())
    img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0),
                   5)
    img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0),
                   5)
    img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255),
                   5)
    return img


def detectAndDescribe(image):

    # convert the image to grayscale
    # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # detect and extract features from the image

    descriptor = cv2.xfeatures2d.SIFT_create()
    (kps, features) = descriptor.detectAndCompute(image, None)

    # convert the keypoints from KeyPoint objects to NumPy
    # arrays

    kps = np.float32([kp.pt for kp in kps])

    # return a tuple of keypoints and features

    return (kps, features)


def matchKeypoints(
    kpsA,
    kpsB,
    featuresA,
    featuresB,
    ratio,
    reprojThresh,
    ):

    # compute the raw matches and initialize the list of actual
    # matches

    matcher = cv2.DescriptorMatcher_create('BruteForce')
    rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
    matches = []
    match_ar = []

    i = 0

    # loop over the raw matches

    for m in rawMatches:

        # ensure the distance is within a certain ratio of each
        # other (i.e. Lowe's ratio test)

        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            matches.append((m[0].trainIdx, m[0].queryIdx))
            if i == 0:
                match_ar = np.array([[[m[0].trainIdx],
                                    [m[0].queryIdx]]], dtype=np.float)
                match_ar = match_ar.transpose()
                match_ar = list(match_ar)
                print type(match_ar)
                i = i + 1
            else:
                m_add = np.array([[m[0].trainIdx],
                                 [m[0].queryIdx]])[None, :]
                m_add = m_add.transpose()
                match_ar = np.concatenate([match_ar, m_add])


    # computing a homography requires at least 4 matches

    if len(matches) > 4:

        # construct the two sets of points

        ptsA = np.float32([kpsA[i] for (_, i) in matches])
        ptsB = np.float32([kpsB[i] for (i, _) in matches])

        # compute the homography between the two sets of points

        (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
                reprojThresh)

        # return the matches along with the homograpy matrix
        # and status of each matched point
        # return (matches, H, status)

        return (matches, match_ar, H, status)

    # otherwise, no homograpy could be computed

    return None


def drawMatches(
    imageA,
    imageB,
    kpsA,
    kpsB,
    matches,
    status,
    ):

    # initialize the output visualization image

    (hA, wA) = imageA.shape[:2]
    (hB, wB) = imageB.shape[:2]
    vis = np.zeros((max(hA, hB), wA + wB, 3), dtype='uint8')
    print imageA.shape
    print imageB.shape
    vis[0:hA, 0:wA] = imageA
    vis[0:hB, wA:] = imageB

    # loop over the matches

    for ((trainIdx, queryIdx), s) in zip(matches, status):

        # only process the match if the keypoint was successfully
        # matched

        if s == 1:

            # draw the match

            ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
            ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
            cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

    # return the visualization

    return vis


criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
            0.001)
objp = np.zeros((6 * 7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)



axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3)
cubeAxis = np.float32([
    [0, 0, 0],
    [0, 3, 0],
    [3, 3, 0],
    [3, 0, 0],
    [0, 0, -3],
    [0, 3, -3],
    [3, 3, -3],
    [3, 0, -3],
    ])

# Here we are going to try to define our corners

#fname = 'images/left01.jpg'
fname = 'images/checkerboard4.jpg'

img = cv2.imread(fname)

cv2.namedWindow('Label')
cv2.setMouseCallback('Label', on_mouse)

'''
while 1:
    if selectedPoint == True:
        break

    if startPoint == True and endPoint == True:
        cv2.rectangle(img, (rect[0], rect[1]), (rect[2], rect[3]),
                      (255, 0, 255), 2)
    cv2.imshow('Label', img)
    if cv2.waitKey(20) & 255 == 27:
        break
cv2.destroyAllWindows()
'''


'''
reference_color = img[rect[1]:rect[3], rect[0]:rect[2]]

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
reference_img = gray[rect[1]:rect[3], rect[0]:rect[2]]
'''
reference_color = img[101:400, 92:574]

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
reference_img = gray[101:400, 92:574]



####### Attempting to preform homography #######

sift = cv2.xfeatures2d.SIFT_create()

(kp1, des1) = detectAndDescribe(gray)
(kp2, des2) = detectAndDescribe(reference_img)


(M, M_ar, H, status) = matchKeypoints(kp1,kp2,des1,des2,0.75,4.0,)

if M is None:
    print 'No matches found'
    exit()

print 'Matches found'

# vis = drawMatches(gray, reference_img, kp1, kp2, M, status)

vis = drawMatches(img,reference_color,kp1,kp2,M,status)
cv2.imshow('vis', vis)
cv2.waitKey(0)

corners2 = M_ar



if 1 == 1:

    # Find the rotation and translation vectors.
    print 'here'
    (ret, rvecs, tvecs) = cv2.solvePnP(objp, corners2, mtx, dist)

    # project 3D points to image plane

    (imgpts, jac) = cv2.projectPoints(cubeAxis, rvecs, tvecs, mtx, dist)

    img = drawCube(img, corners2, imgpts)
    cv2.imshow('img', img)
    k = cv2.waitKey(0) & 255
else:
    print 'No corners were detected'

最佳答案

我不知道您是否修复它。也许您可以使用_,rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners2, mtx, dist)对其进行修复。因为solvePnP发生了变化,但尚未在自己的教程中进行记录。

关于python - 在OpenCV Python中运行SolvePnP时出错,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/39051015/

10-11 19:24