如何解决验证性能是否可以从 MNIST 训练数据和测试数据中输出?
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torch.nn.init
import torch.nn.functional as F
# from sklearn.linear_model import SGDClassifier # Test i did
# from sklearn.model_selection import cross_val_score
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
mnist_train = dsets.MNIST(root='MNIST_data/',# Specify download path
train=True,# Specify True to download as training data
transform=transforms.ToTensor(),# Convert to tensor
download=True)
mnist_test = dsets.MNIST(root='MNIST_data/',# Specify download path
train=False,# If false is specified,download as test data
transform=transforms.ToTensor(),# Convert to tensor
download=True)
data_loader = torch.utils.data.DataLoader(dataset=mnist_train,batch_size=batch_size,shuffle=True,drop_last=True)
class CNN(torch.nn.Module):
def __init__(self):
super(CNN,self).__init__()
# l layer
# ImgIn shape=(?,28,1)
self.layer1 = torch.nn.Sequential(
torch.nn.Conv2d(1,32,kernel_size=3,stride=1,padding=1),torch.nn.ReLU(),torch.nn.MaxPool2d(kernel_size=2,stride=2))
# 2 layer
self.layer2 = torch.nn.Sequential(
torch.nn.Conv2d(32,64,stride=2))
# 7x7x64 inputs -> 10 outputs
self.fc1 = torch.nn.Linear(7 * 7 * 64,100,bias=True)
self.fc2 = torch.nn.Linear(100,10,bias=True)
def forward(self,x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0),-1) # Flatten for total bonding layer
out = F.relu(self.fc1(out))
out = self.fc2(out)
return out
# CNN model definition
model = CNN().to(device)
criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax function included in cost function.
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
total_batch = len(data_loader)
print('Number of batches : {}'.format(total_batch))
def epoch_acc():
X_test = mnist_test.test_data.view(len(mnist_test),1,28).float().to(device)
Y_test = mnist_test.test_labels.to(device)
prediction = model(X_test)
correct_prediction = torch.argmax(prediction,1) == Y_test
accuracy = correct_prediction.float().mean()
acc = accuracy.item()
return acc
for epoch in range(training_epochs):
avg_cost = 0
for X,Y in data_loader: # I take it out in a mini-batch unit
# image is already size of (28x28),no reshape
# label is not one-hot encoded
X = X.to(device)
Y = Y.to(device)
optimizer.zero_grad()
hypothesis = model(X)
cost = criterion(hypothesis,Y)
cost.backward()
optimizer.step()
avg_cost += cost / total_batch
print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1,avg_cost) + " " + "ACC= ",epoch_acc())
# I will not proceed with learning,so torch.no_grad()
with torch.no_grad():
X_test = mnist_test.test_data.view(len(mnist_test),1) == Y_test
accuracy = correct_prediction.float().mean()
print('Accuracy:',accuracy.item())
这段代码是使用CNN对MNIST进行数据分离。
如果你运行这个 在 15 个 epoch 期间,显示了成本和学习性能 (acc)。
但是,我想输出如图所示的验证性能
图片也是使用此代码的代码。
目前,在这段代码中,训练数据设置为 60000,测试数据设置为 10000。这里如何输出验证性能?
我可以在代码本身中找到验证性能(VallACC)吗?
或者我应该使用 sklearn 创建一个新的验证集并使用与交叉验证相同的方法吗?
2021-04-26 此代码已通过添加您上传的代码进行了修改。
import torchvision.transforms as transforms
import torch.nn.init
import torch.nn.functional as F
import numpy as np
from torch.utils.data import (
DataLoader,random_split,SubsetRandomSampler,WeightedRandomSampler,)
device = "cuda" if torch.cuda.is_available() else "cpu"
print(device)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
mnist_train = dsets.MNIST(
root="MNIST_data/",# Specify download path
train=True,# Specify True to download as training data
transform=transforms.ToTensor(),# Convert to tensor
download=True,)
mnist_test = dsets.MNIST(
root="MNIST_data/",# Specify download path
train=False,download as test data
transform=transforms.ToTensor(),)
data_loader = torch.utils.data.DataLoader(
dataset=mnist_train,drop_last=True
)
class CNN(torch.nn.Module):
def __init__(self):
super(CNN,1)
self.layer1 = torch.nn.Sequential(
torch.nn.Conv2d(1,stride=2),)
# 2 layer
self.layer2 = torch.nn.Sequential(
torch.nn.Conv2d(32,)
# 7x7x64 inputs -> 10 outputs
self.fc1 = torch.nn.Linear(7 * 7 * 64,bias=True)
def forward(self,-1) # Flatten for total bonding layer
out = F.relu(self.fc1(out))
out = self.fc2(out)
return out
# CNN model definition
model = CNN().to(device)
criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax function included in cost function.
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
total_batch = len(data_loader)
print("Number of batches : {}".format(total_batch))
valid_size = 0.2
num_train = len(mnist_train)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx,valid_idx = indices[split:],indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
mnist_train,batch_size=64,sampler=train_sampler,num_workers=2
)
valid_loader = torch.utils.data.DataLoader(
mnist_train,batch_size=30,sampler=valid_sampler,num_workers=2
)
for i,(data,target) in enumerate(train_loader):
# move tensor to gpu if cuda is available
data,target = data.to(device),target.to(device)
# clear the gradiant of all optimizer variable
optimizer.zero_grad()
# forward pass: compute pradictions by passing inputs
output = model(data)
# calculate batch loss
loss = criterion(output,target)
# backward pass: compute gradiant of the loss with respect to the parameters
loss.backward()
# update parameters by optimizing single step
optimizer.step()
# update training loss
train_loss += loss.item() * data.size(0)
# validate the model
model.eval()
for batch_idx,target) in enumerate(valid_loader):
# move tensor to gpu
data,target.to(device)
# forward pass: compute the validation predictions
output = model(data)
# calculate the loss
loss = criterion(output,target)
# update the validation loss
valid_loss += loss.item() * data.size(0)
# calculate average loss
train_losses = train_loss / len(train_loader.sampler)
valid_losses = valid_loss / len(valid_loader.sampler)
scheduler.step()
# Print the train and validation loss statistic
print(
"Epoch: {} \t Training Loss: {:.3f} \t Validation Loss: {:.3f}".format(
epoch,train_losses,valid_losses
)
)
# save model if validation loss decrease
if valid_losses <= valid_loss_min:
print(
"Validation loss decreased {:.4f}--->{:.4f} Saving model...".format(
valid_loss_min,valid_losses
)
)
# save current model
torch.save(model.state_dict(),"model_cifer.pt")
valid_loss_min = valid_losses
print("Learning rate: {:.5f}".format(optimizer.state_dict()["param_groups"][0]["lr"]))
解决方法
- 将训练数据分成两部分进行验证和训练。
获取将用于验证的训练索引
valid_size = 0.2
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx,valid_idx = indices[split:],indices[:split]
定义用于获取训练和验证批次的采样器
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
结合数据集和采样器来准备数据加载器。
train_loader = torch.utils.data.DataLoader(train_data,batch_size=64,sampler=train_sampler,num_workers=2)
valid_loader = torch.utils.data.DataLoader(train_data,batch_size=30,sampler=valid_sampler,num_workers=2)
2.使用验证数据对模型进行验证。每个epoch后会打印训练损失和验证损失,只保存验证损失最小的参数。
for i,(data,target) in enumerate(train_loader):
# move tensor to gpu if cuda is available
data,target = data.to(device),target.to(device)
# clear the gradiant of all optimizer variable
optimizer.zero_grad()
# forward pass: compute pradictions by passing inputs
output = model(data)
# calculate batch loss
loss = criterion(output,target)
# backward pass: compute gradiant of the loss with respect to the parameters
loss.backward()
# update parameters by optimizing single step
optimizer.step()
# update training loss
train_loss += loss.item()*data.size(0)
# validate the model
model.eval()
for batch_idx,target) in enumerate(valid_loader):
# move tensor to gpu
data,target.to(device)
# forward pass: compute the validation predictions
output = model(data)
# calculate the loss
loss = criterion(output,target)
# update the validation loss
valid_loss += loss.item()*data.size(0)
# calculate average loss
train_losses = train_loss/len(train_loader.sampler)
valid_losses = valid_loss/len(valid_loader.sampler)
scheduler.step()
# Print the train and validation loss statistic
print('Epoch: {} \t Training Loss: {:.3f} \t Validation Loss: {:.3f}'.format(epoch,train_losses,valid_losses))
# save model if validation loss decrease
if valid_losses <= valid_loss_min:
print("Validation loss decreased {:.4f}--->{:.4f} Saving model...".format(valid_loss_min,valid_losses))
# save current model
torch.save(model.state_dict(),'model_cifer.pt')
valid_loss_min = valid_losses
print('Learning rate: {:.5f}'.format(optimizer.state_dict()['param_groups'][0]['lr']))
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。