python - 使用OpenCV识别前色和背景色-LMLPHP
我对Python和OpenCV很陌生。我有一些屏幕截图(附有一个示例供参考),我想为其识别文本的前色和背景色。我将使用这种颜色来计算文本的颜色对比度。使用pytesseract,我能够识别单词和文本的绘制边界矩形。谁能指导我如何检测文本的前色和底色?下面是我到目前为止所做的代码。

import cv2
import pytesseract
import numpy as np


pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'

imgOriginal = cv2.imread('3.png')

gray = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
img = cv2.GaussianBlur(thresh, (3,3), 0)
cv2.imshow("Filtered",img)

### Detecting words
hImg,wImg,_ = imgOriginal.shape
boxes = pytesseract.image_to_data(img, config='--psm 6') #list
for i,b in enumerate(boxes.splitlines()):
    if i!=0: #no need to extract the first row since it is the header
        b=b.split()
        if len(b)==12: #12th item is the word
            x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
            cv2.rectangle(imgOriginal, (x, y), (x+w, y+h), (0, 0, 255), 1)


cv2.imshow('Image',imgOriginal)

k = cv2.waitKey(0)
if k==ord('q'):
    cv2.destroyAllWindows()

最佳答案

如果您仍在寻找答案。

imgOriginal = cv2.imread('windows.png')
image = imgOriginal.copy()
image_1 = imgOriginal.copy()
gray = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]

# Removing the horizantal lines
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7,1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
    cv2.drawContours(image, [c], -1, (255,255,255), 2)

# Removing the vertical lines
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,7))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
    cv2.drawContours(image, [c], -1, (255,255,255), 2)

gray_no_lines = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
otsu = cv2.threshold(gray_no_lines, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]

### Detecting words
boxes = pytesseract.image_to_data(otsu, config='--psm 6') #list

xs = []
ys = []
ws = []
hs = []
words = []
for i,b in enumerate(boxes.splitlines()):
    if i!=0: #no need to extract the first row since it is the header
        b=b.split()
        if len(b)==12: #12th item is the word
            if b[11] != -1:
                x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
                cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 1)
                xs.append(x)
                ys.append(y)
                ws.append(w)
                hs.append(h)
                words.append(b[11])
text_colors = []
bg_colors = []
for j in range(len(words)):
    x,y,w,h = xs[j],ys[j],ws[j],hs[j]
    roi_otsu = otsu[y:y+h,x:x+w]
    roi_image = image_1[y:y+h,x:x+w]

    black_coords = np.column_stack(np.where(roi_otsu == 0))
    white_coords = np.column_stack(np.where(roi_otsu == 255))

    blues_text = []
    greens_text = []
    reds_text = []
    blues_bg = []
    greens_bg = []
    reds_bg = []

    for i in range(len(black_coords)):
        blue_t = roi_image.item(black_coords[i][0],black_coords[i][1],0)
        green_t = roi_image.item(black_coords[i][0],black_coords[i][1],1)
        red_t = roi_image.item(black_coords[i][0],black_coords[i][1],2)
        blues_text.append(blue_t)
        greens_text.append(green_t)
        reds_text.append(red_t)

    color_t = (int(np.mean(blues_text)),int(np.mean(greens_text)),int(np.mean(reds_text)))
    for i in range(len(white_coords)):
        blue_bg = roi_image.item(white_coords[i][0],white_coords[i][1],0)
        green_bg = roi_image.item(white_coords[i][0],white_coords[i][1],1)
        red_bg = roi_image.item(white_coords[i][0],white_coords[i][1],2)
        blues_bg.append(blue_bg)
        greens_bg.append(green_bg)
        reds_bg.append(red_bg)

    color_bg = (int(np.mean(blues_bg)),int(np.mean(greens_bg)),int(np.mean(reds_bg)))

    text_colors.append(color_t)
    bg_colors.append(color_bg)

print(text_colors)
print(bg_colors)

# print(len(text_colors),len(bg_colors))
我删除了水平和垂直线以获得更好的效果。对图像进行二值化并收集每个文本区域的坐标。对感兴趣区域进行 slice ,并从文本二值化后的 slice 区域中收集文本和背景协调的像素。从彩色 slice 区域收集了这些坐标的像素值。取每种颜色的平均值,然后将颜色附加到最终列表中。
希望这能解决您的问题。如果我错了,请纠正我。

关于python - 使用OpenCV识别前色和背景色,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/63543846/

10-10 18:47