问题一:基于附件1中提供的可收获苹果的图像数据集,提取图像特征,建立数学模型,计算每幅图像中的苹果的数量,并绘制附件1中所有苹果的分布直方图。

对于自动采摘机器人,首要的能力就是识别出苹果对象,因此如何从画面(图像)中准确的识别出苹果对象对于自动采摘机器人有重要影响。附件1给出了200张有苹果对象的图像,要计算出每个图像中苹果的数量,并分析附件1中苹果的数量分布。考虑从颜色空间(HSV,Hue Saturation Value),通过对不同色调、明度和饱和度的识别,结合轮廓检测对苹果与周围环境做出识别,并进行计数。

import os
import cv2
import numpy as  np
# 图片文件夹路径
folder_path = 'D:/math_model/2023yatai/Attachment/Attachment 1'
image_files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
count = []
for file in image_files:
    # 读取图片
    image_path = os.path.join(folder_path, file)
    img = cv2.imread(image_path)
    # 将图片变为灰度图片
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 进行腐蚀膨胀操作
    kernel = np.ones((2, 2), np.uint8)
    erosion = cv2.erode(gray, kernel, iterations=5)  # 腐蚀
    dilation = cv2.dilate(erosion, kernel, iterations=5)  # 膨胀
    
    # 颜色阈值化提取红色区域
    lower_red = np.array([20, 0, 100])
    upper_red = np.array([80, 100, 255])
    mask = cv2.inRange(img, lower_red, upper_red)
    '''
    # 定义红色苹果的HSV范围
    lower_red = np.array([0, 50, 50])
    upper_red = np.array([10, 255, 255])
    mask_red = cv2.inRange(img, lower_red, upper_red)

    # 定义青色苹果的HSV范围
    lower_green = np.array([35, 50, 50])
    upper_green = np.array([85, 255, 255])
    mask_green = cv2.inRange(img, lower_green, upper_green)

    # 合并红色和青色苹果的掩码
    mask = cv2.bitwise_or(mask_red, mask_green)
    '''
    # 找出红色区域的轮廓
    contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # 建立空数组,放减去最小面积的连通域
    contours_filtered = []

    # 设定面积阈值
    mianji = []
    for contour in contours:
        area = cv2.contourArea(contour)
        mianji.append(area)
    '''
    mianji = [x for i, x in enumerate(mianji) if x not in mianji[:i]]  #去重
    #mianji = list(filter(lambda x: x != 0, mianji))  #删去0
    #mianji = [x for x in mianji if x >= 30]
    mianji = sorted(mianji) 
    min_area = np.median(mianji)
    '''
    min_area = np.max(mianji)/80
    # 过滤面积太小的连通域
    for contour in contours:
        area = cv2.contourArea(contour)
        if area > min_area:
            contours_filtered.append(contour)

    # 绘制红色区域的轮廓并计数
    cv2.drawContours(img, contours_filtered, -1, (0, 0, 255), 1)
    apple_count = len(contours_filtered)
    if apple_count > 100:
        apple_count = apple_count*0.7
    count.append(apple_count)
count_all = np.sum(count)


import matplotlib.pyplot as plt
plt.hist(count, bins=30, density=True, alpha=0.5,
         histtype='stepfilled',  color='steelblue',
         edgecolor='none')
plt.title('Histogram of apple count distribution')
plt.xlabel('Number of apples')
plt.ylabel('Frequency')
# 显示数值(除了0)
n, bins, patches = plt.hist(count, bins=30, color='skyblue', edgecolor='black', alpha=0.7)
for i in range(len(patches)):
    if n[i] != 0:
        plt.text(patches[i].get_x() + patches[i].get_width() / 2, patches[i].get_height(),
                 str(int(n[i])), ha='center', va='bottom')
#plt.savefig('D:/math_model/2023yatai/图/Histogram of apple count distribution.png', dpi = 600) #保存图片
plt.show()

2023年亚太杯A题:果园采摘机器人的图像识别,一二题-LMLPHP

#%%  拼接几个图展示
def imge_single(i):
    ii = str(i)
    img = cv2.imread(r'D:/math_model/2023yatai/Attachment/Attachment 1/' + ii +'.jpg', 1)  # 读取图片

    # 将图片变为灰度图片
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 进行腐蚀膨胀操作
    kernel = np.ones((2, 2), np.uint8)
    erosion = cv2.erode(gray, kernel, iterations=5)  # 腐蚀
    dilation = cv2.dilate(erosion, kernel, iterations=5)  # 膨胀
    lower_red = np.array([20, 0, 100])
    upper_red = np.array([80, 100, 255])
    mask = cv2.inRange(img, lower_red, upper_red)
    contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours_filtered = []
    mianji = []
    for contour in contours:
        area = cv2.contourArea(contour)
        mianji.append(area)
    min_area = np.max(mianji)/80
    cv2.contourArea
    for contour in contours:
        area = cv2.contourArea(contour)
        if area > min_area:
            contours_filtered.append(contour)
    # 绘制红色区域的轮廓并计数
    cv2.drawContours(img, contours_filtered, -1, (0, 0, 255), 1)
    apple_count = len(contours_filtered)
    # 在图像上显示苹果数量
    cv2.putText(img, f"Apple Count: {apple_count}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (225, 25, 25), 2)
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))  # 将BGR图像转换为RGB格式
    plt.axis('off')  # 不显示坐标轴


plt.figure()
plt.tight_layout()
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, \
    wspace=0.0005, hspace=0.1)
plt.subplot(2,2,1)
imge_single(55)
plt.subplot(2,2,2)
imge_single(2)
plt.subplot(2,2,3)
imge_single(7)
plt.subplot(2,2,4)
imge_single(11)
#plt.savefig('D:/math_model/2023yatai/图/苹果拼图', dpi=500, bbox_inches='tight')  # 保存为JPEG格式,设置dpi和bbox_inches参数
plt.show()

2023年亚太杯A题:果园采摘机器人的图像识别,一二题-LMLPHP

问题二:根据附件1中提供的可收获苹果的图像数据集,以图像的左下角为坐标原点,确定每个图像中苹果的位置,并绘制附件1中所有苹果的几何坐标的二维散点图。

对于前方的苹果,人类可以通过感觉精准分摘取,但机器人没有感官,它只能通过数字定位去获取苹果的位置。因此,识别图像中每个苹果的位置,并以图像左下角为原点,精准地给出苹果的坐标就很有必要。考虑在问题一的基础上,针对问题一已经找到的苹果,输出其中心点的位置坐标。苹果位置的分布规律在散点图中并不明显。从图中只可以看出四周的苹果分布会少一些,具体哪一个位置分布最广并不清晰。所以考虑使用热力图呈现图像中的苹果位置分布规律。

import os
import cv2
import numpy as np
import matplotlib.pyplot as plt

# 图片文件夹路径
folder_path = 'D:/math_model/2023yatai/Attachment/Attachment 1'
image_files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
all_apple_positions = []

for file in image_files:
    # 读取图片
    image_path = os.path.join(folder_path, file)
    img = cv2.imread(image_path)
    # 将图片变为灰度图片
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 进行腐蚀膨胀操作
    kernel = np.ones((2, 2), np.uint8)
    erosion = cv2.erode(gray, kernel, iterations=5)  # 腐蚀
    dilation = cv2.dilate(erosion, kernel, iterations=5)  # 膨胀
    
    # 颜色阈值化提取红色区域
    lower_red = np.array([20, 0, 100])
    upper_red = np.array([80, 100, 255])
    mask = cv2.inRange(img, lower_red, upper_red)

    # 找出红色区域的轮廓
    contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # 建立空数组,放减去最小面积的连通域
    contours_filtered = []

    # 设定面积阈值
    mianji = []
    for contour in contours:
        area = cv2.contourArea(contour)
        mianji.append(area)
    '''
    mianji = [x for i, x in enumerate(mianji) if x not in mianji[:i]]  # 去重
    mianji = list(filter(lambda x: x != 0, mianji))  # 删去0
    mianji = sorted(mianji)
    min_area = np.median(mianji)
    '''
    min_area = np.max(mianji)/80
    # 过滤面积太小的连通域,并绘制红色区域的轮廓
    for contour in contours:
        area = cv2.contourArea(contour)
        if area > min_area:
            contours_filtered.append(contour)
            # 计算中心点位置
            M = cv2.moments(contour)
            if M["m00"] != 0:
                center_x = int(M["m10"] / M["m00"])
                center_y = int(M["m01"] / M["m00"])
                all_apple_positions.append((center_x, center_y))

# 绘制所有苹果位置的二维散点图
x_coords, y_coords = zip(*all_apple_positions)
plt.scatter(x_coords, y_coords)
plt.xlabel('Horizontal position')
plt.ylabel('Vertical position')
plt.title('Apple location scatterplot')
#plt.savefig('D:/math_model/2023yatai/图/散点图(不建模-备用).png', dpi = 600)
plt.show()

import seaborn as sns
# 绘制散点图热力图
plt.figure(figsize=(10, 6))
sns.kdeplot(x=x_coords, y=y_coords, cmap="Reds", fill=True, bw_adjust=0.5)
plt.title('Heat map of the geometric coordinates of all apples', fontsize = 16)
plt.xlabel('Horizontal position', fontsize = 14)
plt.ylabel('Vertical position', fontsize = 14)
plt.gca().invert_yaxis()
#plt.savefig('D:/math_model/2023yatai/图/散点热力图.png', dpi = 600)
plt.show()

2023年亚太杯A题:果园采摘机器人的图像识别,一二题-LMLPHP2023年亚太杯A题:果园采摘机器人的图像识别,一二题-LMLPHP

 

 

06-02 07:16