问题描述
我已使用opencv将数独图像转换为数独网格
现在我想从图像中提取每个框,什么是最好的方法?
据我所知,我试图找到线的交点以找到每个盒子的角
SudokuSolverPlay类:def __init __(自己,图片):def __preProcess(self,img):"返回灰度图像""返回灰度".def __maskSudoku(self,img):"返回屏蔽的图像""def __dectactEdge(self,img):"返回数独网格"".def drawLines(src,dest,迭代= 1):minLineLength = 100src = cv2.convertScaleAbs(src)对于范围内的_(迭代):lines = cv2.HoughLinesP(image = src,rho = 1,theta = np.pi/180,阈值= 100,线= np.array([]),minLineLength = minLineLength,maxLineGap = 100)a,b,c = lines.shape对于范围(a)中的我:x1,y1,x2,y2 =线[i] [0] [0],线[i] [0] [1],线[i] [0] [2],线[i] [0] [3]cv2.line(目标,(x1,y1),(x2,y2),255,1,cv2.LINE_AA)src = cv2.convertScaleAbs(dest)def findVerticalLines(img):imgX = cv2.GaussianBlur(img,(5,5),0)kernelx = cv2.getStructuringElement(cv2.MORPH_RECT,(2,10))imgY = cv2.Sobel(img,cv2.CV_64F,1,0)imgY = cv2.convertScaleAbs(imgY)cv2.normalize(imgY,imgY,0,255,cv2.NORM_MINMAX)imgY = cv2.morphologyEx(imgY,cv2.MORPH_CLOSE,kernelx,迭代次数= 1)返回imgYdef findHorizontalLines(img):与相同"仅是不同的".img1 = np.zeros(img.shape)边缘= cv2.Canny(img,50,150,apertureSize = 3)laplacian = cv2.Laplacian(edges,cv2.CV_64F)drawLines(laplacian,img1,迭代= 1)sby = findVerticalLines(img1)sbx = findHorizontalLines(img1)返回img1defsolveSudoku(个体经营):灰色= self .__ preProcess(self .__ originalImg)masked = self .__ maskSudoku(灰色)grid = self .__ dectactGrid(已屏蔽)如果__name__ =='__main__':colorImg = cv2.imread('sudoku1.jpg')解算器= SudokuSolverPlay(colorImg)Solver.solveSudoku()
此处 findVerticalLines()
和 findHorizontalLines()
无法正确地确定水平线和垂直线
-
假设最小盒子尺寸为20 * 20
line_min_width = 20
找到水平线
kernal_h = np.ones((1,line_min_width),np.uint8)img_bin_h = cv2.morphologyEx(img_bin,cv2.MORPH_OPEN,kernal_h)
找到垂直线
kernal_v = np.ones((line_min_width,1),np.uint8)img_bin_v = cv2.morphologyEx(img_bin,cv2.MORPH_OPEN,kernal_v)
合并并添加一个膨胀层以弥合小间隙
img_bin_final = img_bin_h | img_bin_vfinal_kernel = np.ones((3,3),np.uint8)img_bin_final = cv2.dilate(img_bin_final,final_kernel,iterations = 1)
应用连接的分量分析
ret,标签,统计信息,质心= cv2.connectedComponentsWithStats(〜img_bin_final,连接性= 8,ltype = cv2.CV_32S)
可视化连接的组件图像
如您所见,我们还检测到一些文本作为框,我们可以使用简单的过滤条件轻松地将其删除.在这里,我所过滤的区域应至少为1000像素.
在检测到的盒子上绘制矩形.
### 1和0,以及背景和残基连接的组件,我们不需要用于统计数据中的x,y,w,h,area [2:]:#cv2.putText(image,'box',(x-10,y-10),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,255,0),2)如果面积> 1000:cv2.rectangle(image,(x,y),(x + w,y + h),(0,255,0),2)
最终输出图像
此答案基于我的解决方案,该解决方案使用OpenCV查找图像中的复选框/表.您可以在我的博客中找到详细的说明.迈向数据科学.希望这将使您更接近解决方案.
快乐编码:)
-编辑1
代码可实现关联组件的可视化
def imshow_components(labels):###创建hsv图像,每个标签具有唯一的色相值label_hue = np.uint8(179 * labels/np.max(labels))###使饱和度和音量为255empty_channel = 255 * np.ones_like(label_hue)tagged_img = cv2.merge([label_hue,empty_channel,empty_channel])###将hsv图像转换为BGR图像labeled_img = cv2.cvtColor(labeled_img,cv2.COLOR_HSV2BGR)tagged_img [label_hue == 0] = 0###返回彩色图像以可视化连接的Componenet返回labeled_img
i have converted sudoku image into sudoku grid using opencv
now i want to extract each box from image what is best way to do this?
as per my knowledge i am trying to find intersection points of lines to find corner of each box
class SudokuSolverPlay: def __init__(self, image): def __preProcess(self, img): """return grayscale image""" def __maskSudoku(self, img): """return masked image""" def __dectactEdge(self, img): """return sudoku grid""" def drawLines(src, dest, iteration=1): minLineLength = 100 src = cv2.convertScaleAbs(src) for _ in range(iteration): lines = cv2.HoughLinesP(image=src, rho=1, theta=np.pi / 180, threshold=100, lines=np.array([]), minLineLength=minLineLength, maxLineGap=100) a, b, c = lines.shape for i in range(a): x1, y1, x2, y2 = lines[i][0][0], lines[i][0][1], lines[i][0][2], lines[i][0][3] cv2.line(dest, (x1, y1), (x2, y2),255, 1, cv2.LINE_AA) src = cv2.convertScaleAbs(dest) def findVerticalLines(img): imgX = cv2.GaussianBlur(img, (5, 5), 0) kernelx = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 10)) imgY = cv2.Sobel(img, cv2.CV_64F, 1, 0) imgY = cv2.convertScaleAbs(imgY) cv2.normalize(imgY, imgY, 0, 255, cv2.NORM_MINMAX) imgY = cv2.morphologyEx(imgY, cv2.MORPH_CLOSE, kernelx, iterations=1) return imgY def findHorizontalLines(img): """same as above only args different""" img1 = np.zeros(img.shape) edges = cv2.Canny(img, 50, 150, apertureSize=3) laplacian = cv2.Laplacian(edges, cv2.CV_64F) drawLines(laplacian, img1, iteration=1) sby = findVerticalLines(img1) sbx = findHorizontalLines(img1) return img1 def solveSudoku(self): gray = self.__preProcess(self.__originalImg) masked = self.__maskSudoku(gray) grid = self.__dectactGrid(masked) if __name__ == '__main__': colorImg = cv2.imread('sudoku1.jpg') solver = SudokuSolverPlay(colorImg) solver.solveSudoku()
here
findVerticalLines()
andfindHorizontalLines()
are not able to dictect horizontal and vertical lines properly解决方案One way to solve is to do a morphological operation to find vertical and horizontal lines from the canny edge image, then do a connected component analysis to find the boxes.I have done a sample version below. You can finetune it further to make it better.I started with the masked image as input.
### reading input image gray_scale=cv2.imread('masked_image.jpg',0)
Performing canny edge detection and adding a dilation layer
img_bin = cv2.Canny(gray_scale,50,110) dil_kernel = np.ones((3,3), np.uint8) img_bin=cv2.dilate(img_bin,dil_kernel,iterations=1)
Now, the dilated binary image looks like this.
assuming minimum box size would be 20*20
line_min_width = 20
finding horizontal lines
kernal_h = np.ones((1,line_min_width), np.uint8) img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernal_h)
finding vertical lines
kernal_v = np.ones((line_min_width,1), np.uint8) img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernal_v)
merging and adding a dilation layer to close small gaps
img_bin_final=img_bin_h|img_bin_v final_kernel = np.ones((3,3), np.uint8) img_bin_final=cv2.dilate(img_bin_final,final_kernel,iterations=1)
applying connected component analysis
ret, labels, stats,centroids = cv2.connectedComponentsWithStats(~img_bin_final, connectivity=8, ltype=cv2.CV_32S)
visualising Connected component image
AS you can see, we have detected some text also as boxes, we can easily remove them with simple filter conditions, Here I'm filtering with the area should be minimum 1000 pixels condition.
drawing rectangles on the detected boxes.
### 1 and 0 and the background and residue connected components whihc we do not require for x,y,w,h,area in stats[2:]: # cv2.putText(image,'box',(x-10,y-10),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,255,0), 2) if area>1000: cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
final output image
This answer is based on my solution to find checkboxes/tables in an image using OpenCV. You can find a detailed explanation in my blog at Towards Data Science.Hope this will take you closer to a solution.
Happy coding :)
-- edit 1
code to do connected component visualisation
def imshow_components(labels): ### creating a hsv image, with a unique hue value for each label label_hue = np.uint8(179*labels/np.max(labels)) ### making saturation and volume to be 255 empty_channel = 255*np.ones_like(label_hue) labeled_img = cv2.merge([label_hue, empty_channel, empty_channel]) ### converting the hsv image to BGR image labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR) labeled_img[label_hue==0] = 0 ### returning the color image for visualising Connected Componenets return labeled_img
这篇关于在OpenCV中从数独中提取盒子的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!