如何解决CNN带来同样的损失和准确性
我正在尝试使用pytorch创建CNN。当我使用3个或更少的连续层时,它似乎可以正常工作,损耗降低了,而精度则提高了。但是,当我连续有4个或更多的conv2d层时,损耗和精度保持不变(精度= 0.0961)。有时,4层以上的模型可以工作,但是如果我再次尝试训练它们,它们会遇到上述问题。我正在使用MNIST数据集进行测试。
我的代码如下:
from pathlib import Path
import requests
import pickle
import gzip
from matplotlib import pyplot
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader,TensorDataset
DATA_PATH = Path("data")
PATH = DATA_PATH / "mnist"
PATH.mkdir(parents=True,exist_ok=True)
URL = "http://deeplearning.net/data/mnist/"
FILENAME = "mnist.pkl.gz"
if not (PATH / FILENAME).exists():
content = requests.get(URL + FILENAME).content
(PATH / FILENAME).open("wb").write(content)
with gzip.open((PATH / FILENAME).as_posix(),"rb") as f:
((x_train,y_train),(x_valid,y_valid),_) = pickle.load(f,encoding="latin-1")
x_train = torch.tensor(x_train)
y_train = torch.tensor(y_train).unsqueeze(1)
x_valid = torch.tensor(x_valid)
y_valid = torch.tensor(y_valid).unsqueeze(1)
train_ds = TensorDataset(x_train,y_train)
train_dl = DataLoader(train_ds,batch_size=64,shuffle=True)
valid_ds = TensorDataset(x_valid,y_valid)
valid_dl = DataLoader(valid_ds,batch_size=64)
class Lambda(nn.Module):
def __init__(self,func):
super().__init__()
self.func = func
def forward(self,x):
return self.func(x)
def preprocess(x):
return x.view(-1,1,28,28)
model = nn.Sequential(
Lambda(preprocess),nn.Conv2d(1,kernel_size=(3,3),padding=2,padding_mode='zeros'),nn.ReLU(),nn.AdaptiveAvgPool2d((10,1)),Lambda(lambda x: x.squeeze(1)),nn.Softmax(dim=1)
)
def loss_batch(model,loss_func,xb,yb,opt=None):
pred = model(xb)
loss = loss_func(pred,yb)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(),len(xb)
else:
label = torch.argmax(pred,dim=1)
correct = (label == yb).float().sum()
accuracy = correct/len(xb)
return loss.item(),len(xb),accuracy
def fit(epochs,model,opt,train_dl,valid_dl):
for epoch in range(epochs):
model.train()
losses = []
nums = []
for xb,yb in train_dl:
loss,num = loss_batch(model,opt)
losses.append(loss)
nums.append(num)
train_loss = np.sum(np.multiply(losses,nums)) / np.sum(nums)
model.eval()
with torch.no_grad():
losses = []
nums = []
accuracies = []
for xb,yb in valid_dl:
loss,num,accuracy = loss_batch(model,yb)
losses.append(loss)
nums.append(num)
accuracies.append(accuracy)
val_loss = np.sum(np.multiply(losses,nums)) / np.sum(nums)
val_acc = np.sum(np.multiply(accuracies,nums)) / np.sum(nums)
print("Epoch ",epoch+1,": ",train_loss,val_loss,val_acc)
opt = torch.optim.Adam(model.parameters(),lr=0.01)
fit(20,F.cross_entropy,valid_dl)
解决方法
暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!
如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。
小编邮箱:dio#foxmail.com(将#修改为@)