线性回归

# 导入 torch、torch.autograd的Variable模块
import torch
from torch.autograd import Variable

# 生成需要回归需要的tensor
x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]]))
y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]])) #定义模型类,继承自nn包的Module模块
class Model(torch.nn.Module):
# 初始化方法
def __init__(self):
"""
In the constructor we instantiate two nn.Linear module
"""
# 子类转换为父类对象并调用初始化方法
super(Model, self).__init__()
# 线性回归,一个输入,一个输出
self.linear = torch.nn.Linear(1, 1) # One in and one out def forward(self, x):
"""
In the forward function we accept a Variable of input data and we must return
a Variable of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Variables.
"""
# y=K *x 返回结果
y_pred = self.linear(x)
return y_pred # our model # 调用模型类生成模型的对象
model = Model() # Construct our loss function and an Optimizer. The call to model.parameters()
# in the SGD constructor will contain the learnable parameters of the two
# nn.Linear modules which are members of the model. ##使用均方误差函数,在多个样本情况下,不计算平均值
criterion = torch.nn.MSELoss(size_average=False)
##使用随机梯度下降,学习率为0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01) # Training loop
# 训练 500轮
for epoch in range(500):
# Forward pass: Compute predicted y by passing x to the model
# 前向推理模型
y_pred = model(x_data) # Compute and print loss
# 计算损失
loss = criterion(y_pred, y_data)
print(epoch, loss.data[0]) # Zero gradients, perform a backward pass, and update the weights.
# 由于使用多次训练,每轮结束之后要清零数据
optimizer.zero_grad()
# 反向传播求梯度值
loss.backward()
# 更新所有的参数
optimizer.step() # After training
hour_var = Variable(torch.Tensor([[4.0]]))
y_pred = model(hour_var)
print("predict (after training)", 4, model(hour_var).data[0][0])
05-11 17:20