import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import numpy as np
if torch.cuda.is_available():
print('use GPU')
else:
print('use CPU')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class data_re(Dataset):
def __init__(self):
super(data_re, self).__init__()
self.root = 'result.txt'
self.index_t, self.label_t = self.load_lgtxt(self.root)
self.len = len(self.label_t)
def load_lgtxt(self, root):
indexs_tensor_list, labels_tensor_list = [], []
with open(root, 'r', encoding='utf-8') as f:
data_str_list = [line.strip() for line in f]
for i in data_str_list:
indexs = list(i[1:])
labels = indexs[0]
indexs = np.array(list(map(float, indexs[1:]))).astype(np.float32)
labels = np.array(float(labels)).astype(np.float32)
indexs = torch.from_numpy(indexs)
labels = torch.from_numpy(labels)
indexs_tensor_list.append(indexs)
labels_tensor_list.append(labels)
print('数据总级:{}'.format(len(indexs_tensor_list)))
return indexs_tensor_list, labels_tensor_list
def __len__(self):
return self.len
def __getitem__(self, idx):
index_t = self.index_t[idx]
label_t = self.label_t[idx]
return index_t, label_t
class model(nn.Module):
def __init__(self):
super(model, self).__init__()
self.net = nn.Sequential(
nn.Linear(319, 160), # Adjusted input size to match your data
nn.ReLU(),
nn.Linear(160, 80),
nn.ReLU(),
nn.Linear(80, 3),
)
def forward(self, input):
return self.net(input)
train_data = data_re()
train_dataloader = DataLoader(dataset=train_data)
epoch = int(input('请输入训练轮数:'))
net = model().to(device)
optimizer = torch.optim.SGD(net.parameters(), lr=0.005)
loss_func = nn.MSELoss().to(device)
list_pre = []
for i in range(epoch):
print('-------第{}轮训练开始-------'.format(i+1))
for indexs, labels in train_dataloader:
labels = labels.to(device)
indexs = indexs.to(device)
output = net(indexs.unsqueeze(0)) # Adjusted input size to match your data
loss = loss_func(output.squeeze(), labels) # Squeezed the output to match the label size
loss.backward()
optimizer.step()
optimizer.zero_grad()
if i % 100 == 0:
list_pre.append(output)
print('epoch:{} \n loss:{}'.format(i, round(loss.item(), 10)))
好码
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import numpy as np
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.net = nn.Sequential(
nn.Linear(320, 160),
nn.ReLU(),
nn.Linear(160, 80),
nn.ReLU(),
nn.Linear(80, 3),
)
def forward(self, input):
return self.net(input)
class DataRemake(Dataset):
def __init__(self, path):
self.data, self.label = self.transform(path)
self.len = len(self.label)
def __getitem__(self, index):
label = self.label[index]
data = self.data[index]
return label, data
def __len__(self):
return self.len
def transform(self, path):
data_tensor_list = []
label_list = []
with open(path, mode='r', encoding='utf-8') as fp:
data_str_list = [line.strip() for line in fp]
for i in data_str_list:
data = list(i)
label = data[0]
data = data[1:]
# 检查数据的长度并进行处理
if len(data) != 320:
# 如果数据长度不是320,进行填充或截断操作
if len(data) < 320:
# 填充数据,这里假设用0填充
data.extend([0] * (320 - len(data)))
else:
# 截断数据
data = data[:320]
data = np.array(list(map(float, data))).astype(np.float32)
label = np.array(list(map(float, label))).astype(np.float32)
data = torch.from_numpy(data)
label = torch.from_numpy(label)
data_tensor_list.append(data)
label_list.append(label)
return data_tensor_list, label_list
# 路径可能需要根据实际情况修改
train_data = DataRemake('result.txt')
train_dataloader = DataLoader(dataset=train_data, batch_size=10)
net = Model().to(DEVICE)
optimizer = torch.optim.SGD(net.parameters(), lr=0.005)
loss_func = nn.MSELoss().to(DEVICE)
list_pre = []
for epoch in range(1000):
for labels, datas in train_dataloader:
labels = labels.to(DEVICE)
datas = datas.to(DEVICE)
output = net(datas)
loss = loss_func(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 100 == 0:
list_pre.append(output)
print('epoch:{} \n loss:{}'.format(epoch, round(loss.item(), 10)))