注意:本文无理论和推导
相关视频:
[双语字幕]吴恩达深度学习deeplearning.ai
PyTorch 动态神经网络 (莫烦 Python 教学)
一、导入库、创造数据
二、基本格式
三、编写网络
1.设计网络
实现左边那个三层的简单网络
input layer的神经元个数:n_input
hidden layer的神经元个数:n_hidden
output layer的神经元个数:n_output
有些人又将n_input写为n_features
2.init(初始化)
3.forward
4.创建网络
5.优化器
6.训练网络
7.画出loss的图像
8.预测
四、完整代码
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
# 自己创建的网路
class Net(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(Net, self).__init__()
self.hidden_layer = torch.nn.Linear(n_input, n_hidden)
self.output_layer = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
x = F.relu(self.hidden_layer(x))
x = self.output_layer(x)
return x
# 创造数据
x = torch.unsqueeze(torch.linspace(-1,1,100), dim=1)
print(x.shape)
y = torch.pow(x, 2) + 0.2 * torch.rand(x.size())
print(y.shape)
plt.scatter(x.data.numpy(), y.data.numpy())
plt.show()
x, y = Variable(x), Variable(y)
# 创建网络
net = Net(1, 10, 1)
print(net)
# 优化器
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
# 损失函数
loss_func = torch.nn.MSELoss() # Mean Square Error Loss
# 训练
losses = []
for i in range(100):
# 计算loss
predict = net(x)
loss = loss_func(predict, y)
# 训练优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 每训练5次输出一次loss
if i%5 == 0:
print(loss.data.item())
losses.append(loss.data.item())
# 画出loss的图像
plt.plot(range(len(losses)), losses)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.xticks(range(0,20,2))
plt.show()
# 预测
test = torch.tensor([[1.1], [1.2], [1.3], [1.4], [1.5]])
print(test)
result = net(test)
print(result)
# 实际与预测的差距
real = torch.pow(test, 2)
print(real)
print(real - result)