- 使用有噪音的数据等价于 Tikhonov 正则- 丢弃法将一些输出项随机置0来控制模型复杂度
- 长作用在多层感知机的隐藏层输出上
- 丢弃改了是控制模型复杂度的超参数
我们实现 dropout_layer 函数,该函数以dropout的概率丢弃张量输入X中的元素import torchfrom torch import nnfrom d2l import torch as d2ldef dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return torch.zeros_like(X) if dropout == 0: return X mask = (torch.Tensor(X.shape).uniform_(0, 1) > dropout).float() return mask * X / (1.0 - dropout)
X = torch.arange(16, dtype=torch.float32).reshape((2, 8))print(X)print(dropout_layer(X, 0.))print(dropout_layer(X, 0.5))print(dropout_layer(X, 1.))
定义具有两个隐藏层的多层感知机,每个隐藏层包含256个单元num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256dropout1, dropout2 = 0.2, 0.5class Net(nn.Module): def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training=True): super(Net, self).__init__() self.num_inputs = num_inputs self.training = is_training self.lin1 = nn.Linear(num_inputs, num_hiddens1) self.lin2 = nn.Linear(num_hiddens1, num_hiddens2) self.lin3 = nn.Linear(num_hiddens2, num_outputs) self.relu = nn.ReLU() def forward(self, X): H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs)))) if self.training == True: H1 = dropout_layer(H1, dropout1) H2 = self.relu(self.lin2(H1)) if self.training == True: H2 = dropout_layer(H2, dropout2) out = self.lin3(H2) return outnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)
num_epochs, lr, batch_size = 10, 0.5, 256loss = nn.CrossEntropyLoss()train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)trainer = torch.optim.SGD(net.parameters(), lr=lr)d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Dropout(dropout1), nn.Linear(256, 256), nn.ReLU(), nn.Dropout(dropout2), nn.Linear(256, 10))def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight, std=0.01)net.apply(init_weights);
trainer = torch.optim.SGD(net.parameters(), lr=lr)d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)