我制造了这款Harris Corner Detector,它在功能方面完全符合预期,但是在性能方面却非常慢。我几乎可以确定,这与我正在访问图像的每个像素有关,但也可能是我实现了某些错误。我一直在思考如何优化np数组访问以应用过滤器,但是由于这些过滤器的性质,我仍然想不出一个好主意。
该方法本身并不慢,因为对于同一图像,OpenCV基本上是即时的。

import numpy as np
import cv2 as cv
import matplotlib.pyplot as pl
import matplotlib.cm as cm
import math


def hor_edge_strength(x, y, img_in = [], filter = []):
strength = 0
for i in range (0,3):
    for j in range (0,3):
        strength += img_in[x+i-1][y+j-1] * filter[i][j]
return strength


def ver_edge_strength(x, y, img_in = [], filter = []):
strength = 0
for i in range (0,3):
    for j in range (0,3):
        strength += img_in[x+i-1][y+j-1] * filter[i][j]
return strength

def gauss_kernels(size,sigma=1):
## returns a 2d gaussian kernel
if size<3:
    size = 3
m = size/2
x, y = np.mgrid[-m:m+1, -m:m+1]
kernel = np.exp(-(x*x + y*y)/(2*sigma*sigma))
kernel_sum = kernel.sum()
if not sum==0:
    kernel = kernel/kernel_sum
return kernel

sobel_h = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
sobel_v = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]

img_arr = ['checker.jpg'] #Test image
for img_name in img_arr:
img = cv.imread(img_name,0)
sep = '.'
img_name = img_name.split(sep, 1)[0]
print img_name
gray_img = img.astype(float)
gx = np.zeros_like(gray_img)
gy = np.zeros_like(gray_img)

print 'Getting strengths'
for i in range(1, len(gray_img) - 1):
    for j in range(1, len(gray_img[0]) - 1):
        gx[i][j] = hor_edge_strength(i, j, gray_img, sobel_h)
        gy[i][j] = ver_edge_strength(i, j, gray_img, sobel_v)
I_xx = gx * gx
I_xy = gx * gy
I_yy = gy * gy

gaussKernel = gauss_kernels(3,1)

W_xx = np.zeros_like(gray_img)
W_xy = np.zeros_like(gray_img)
W_yy = np.zeros_like(gray_img)

print 'Convoluting'
for i in range(1, len(gray_img) - 1):
    for j in range(1, len(gray_img[0]) - 1):
        W_xx[i][j] = hor_edge_strength(i, j, I_xx, gaussKernel)
        W_xy[i][j] = hor_edge_strength(i, j, I_xy, gaussKernel)
        W_yy[i][j] = hor_edge_strength(i, j, I_yy, gaussKernel)

print 'Calculating Harris Corner'
k = 0.06
HCResponse = np.zeros_like(gray_img)
for i in range(1, len(gray_img) - 1):
    for j in range(1, len(gray_img[0]) - 1):
        W = np.matrix([[W_xx[i][j],W_xy[i][j]],[W_xy[i][j],W_yy[i][j]]]) #For lap purposes, but not needed
        detW = W_xx[i][j]*W_yy[i][j] - (W_xy[i][j] * W_xy[i][j])
        traceW = W_xx[i][j] + W_yy[i][j]
        HCResponse[i][j] = detW - k*traceW*traceW

threshold = 0.1
imageTreshold = max(HCResponse.ravel()) * threshold

HCResponseTreshold = (HCResponse >= imageTreshold) * 1
candidates = np.transpose(HCResponseTreshold.nonzero())

print 'Drawing'

x, y = gray_img.shape
image = np.empty((x, y, 3), dtype=np.uint8)
image[:, :, 0] = gray_img
image[:, :, 1] = gray_img
image[:, :, 2] = gray_img
for i in candidates:
    x,y = i.ravel()
    image[x][y] = [255,0,0]
pl.imshow(image)
pl.show()
pl.savefig(img_name + '_edge.jpg')

是否有任何可能的解决方案可以显着提高此边缘检测器的性能?

最佳答案

import cv2
import numpy as np

filename = 'chess1.jpg'
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)

#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)

# Threshold for an optimal value, it may vary depending on the image.
img[dst>0.01*dst.max()]=[0,0,255]

cv2.imshow('dst',img)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()

有关更多信息,请参见link

关于python - 提高哈里斯角落探测器的效率,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/33617982/

10-11 22:08