我在np.save上收到此错误。请让我知道原因以及如何解决此问题。
下面是我的代码:

import cv2
import numpy as np
import os
from random import shuffle
from tqdm import tqdm
import tflearn
import tensorflow as tf
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout,fully_connected
from tflearn.layers.estimator import regression


TRAIN_DIR = "/Users/preetisingh/Documents/PlantsDataset/train"
TEST_DIR = "/Users/preetisingh/Documents/PlantsDataset/test"
IMG_SIZE = 50
LR = 1e-3

MODEL_NAME = "plants-{}-{}.model".format(LR, "2conv-basic")

label_dict = {
"Abiotic Sunburn": 0,
"Alternaria Brown Spot": 1,
"Anthracnose of Banana": 2,
"Anthracnose of Cotton": 3,
"Bacterial Blight of Rice": 4,
"Bacterial Blight of Cotton": 5,
"Anthracnose of Grape": 6,
"Bacterial Canker": 7,
"Banana Bract Mosaic": 8,
}


def label_img(img):
    label = label_dict[img]
    return label


def create_train_data():
    training_data = []
    for direc in tqdm(os.listdir(TRAIN_DIR)):
        j = 0
        label = direc
        disease = TRAIN_DIR + "/" + label
        if label == ".DS_Store" or label == ".git":
            continue
        for img in os.listdir(disease):
            labeldata = label_img(label)
            labeldata = tf.one_hot(labeldata, 9)
            if img == ".DS_Store":
                continue
            path = os.path.join(disease, img)
            img = cv2.resize(
                cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE)
            )
            training_data.append([np.array(img), np.array(labeldata)])
            j += 1
        shuffle(training_data)
    np.save("plants_data.npy", training_data)
    return training_data

def create_test_data():
    testing_data = []
    for img in tqdm(os.listdir(TEST_DIR)):
        path = os.path.join(TEST_DIR, img)
        img_num = img.split(".")[0]
        img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE))
        testing_data.append([np.array(img), img_num])
    return testing_data


train_data = create_train_data()
test_data = create_test_data()

train = train_data[:-11]
test = train_data[-11:]
X = (
np.array([i[0] for i in train])
.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
.astype(np.float32)
)
Y = [i[1] for i in train]
test_x = (
np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, 1).astype(np.float32)
)
test_y = [i[1] for i in test]

convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name="input")
input_layer = tf.reshape(X, [-1, IMG_SIZE, IMG_SIZE, 1])
convnet = conv_2d(input_layer, 32, 2, activation="relu")
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation="relu")
convnet = max_pool_2d(convnet, 2)
convnet = fully_connected(convnet, 32, activation="relu")
convnet = dropout(convnet, 0.4)
logits = tf.layers.dense(inputs=convnet, units=9)

convnet = fully_connected(logits, 9, activation="softmax")
convnet = regression(
logits,
optimizer="adam",
learning_rate=LR,
loss="categorical_crossentropy",
name="targets",
)
model = tflearn.DNN(logits, tensorboard_dir="log")

if os.path.exists("{}.meta".format(MODEL_NAME)):
    model.load(MODEL_NAME)
    print("model loaded")


try:
    model.fit(
    {"input": X},
    {"targets": Y},
    n_epoch=3,
    validation_set=({"input": test_x}, {"targets": test_y}),
    snapshot_step=500,
    show_metric=False,
    run_id=MODEL_NAME,
    )

except ValueError as e:
    print(e)


我是机器学习的新手,正在尝试对9种植物病进行图像分类。
我在X中有图像张量,在Y中有来自label_dict字典的目标标签值。


  这是我得到的错误:


Traceback (most recent call last):
File "hello.py", line 72, in <module>
train_data = create_train_data()
File "hello.py", line 58, in create_train_data
np.save("plants_data.npy", training_data)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/numpy/lib/npyio.py", line 521, in save
pickle_kwargs=pickle_kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/numpy/lib/format.py", line 593, in write_array
pickle.dump(array, fp, protocol=2, **pickle_kwargs)
TypeError: can't pickle _thread.lock objects


谁能帮我解决这个错误?

最佳答案

我解决了错误。
我的labeldata变量的one_hot编码错误,因此我进行了更正。

关于numpy - TypeError:保存numpy文件时无法腌制_thread.lock对象,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/51852590/

10-11 00:15