Pytorch无法建立多比例内核嵌套模型

如何解决Pytorch无法建立多比例内核嵌套模型

我正在尝试创建一个修改后的MNIST模型,该模型采用输入的1x28x28 MNIST张量图像,并且将其分支到具有不同大小内核的不同模型中,并在最后进行累加,从而给出多尺度内核图像空间域中的响应。我担心这个模型,因为我无法构造它。


import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
from torchvision import datasets,transforms
import torch.nn.functional as F
import timeit
import unittest


torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(0)

# check availability of GPU and set the device accordingly

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


# define a transforms for preparing the dataset
transform = transforms.Compose([
        transforms.ToTensor(),# convert the image to a pytorch tensor
        transforms.Normalize((0.1307,),(0.3081,)) # normalise the images with mean and std of the dataset
        ])

# Load the MNIST training,test datasets using `torchvision.datasets.MNIST` using the transform defined above

train_dataset = datasets.MNIST('./data',train=True,transform=transform,download=True)
test_dataset =  datasets.MNIST('./data',train=False,download=True)


# create dataloaders for training and test datasets
# use a batch size of 32 and set shuffle=True for the training set

train_dataloader = Data.DataLoader(dataset=train_dataset,batch_size=32,shuffle=True)
test_dataloader = Data.DataLoader(dataset=test_dataset,shuffle=True)


# My Net

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        
        # define a conv layer with output channels as 16,kernel size of 3 and stride of 1
        self.conv11 = nn.Conv2d(1,16,3,1) # Input = 1x28x28  Output = 16x26x26
        self.conv12 = nn.Conv2d(1,5,1) # Input = 1x28x28  Output = 16x24x24
        self.conv13 = nn.Conv2d(1,7,1) # Input = 1x28x28  Output = 16x22x22

        # define a conv layer with output channels as 32,kernel size of 3 and stride of 1
        self.conv21 = nn.Conv2d(16,32,1) # Input = 16x26x26 Output = 32x24x24
        self.conv22 = nn.Conv2d(16,1) # Input = 16x24x24 Output = 32x20x20
        self.conv23 = nn.Conv2d(16,1) # Input = 16x22x22 Output = 32x16x16

        # define a conv layer with output channels as 64,kernel size of 3 and stride of 1
        self.conv31 = nn.Conv2d(32,64,1) # Input = 32x24x24 Output = 64x22x22
        self.conv32 = nn.Conv2d(32,1) # Input = 32x20x20 Output = 64x16x16
        self.conv33 = nn.Conv2d(32,1) # Input = 32x16x16 Output = 64x10x10

        # define a max pooling layer with kernel size 2
        self.maxpool = nn.MaxPool2d(2),# Output = 64x11x11
        # define dropout layer with a probability of 0.25
        self.dropout1 = nn.Dropout(0.25)
        # define dropout layer with a probability of 0.5
        self.dropout2 = nn.Dropout(0.5)
        # define a linear(dense) layer with 128 output features
        self.fc11 = nn.Linear(64*11*11,128)
        self.fc12 = nn.Linear(64*8*8,128)      # after maxpooling 2x2
        self.fc13 = nn.Linear(64*5*5,128)

        # define a linear(dense) layer with output features corresponding to the number of classes in the dataset
        self.fc21 = nn.Linear(128,10)
        self.fc22 = nn.Linear(128,10)
        self.fc23 = nn.Linear(128,10)

        self.fc33 = nn.Linear(30,10)
        

    def forward(self,x1):
        # Use the layers defined above in a sequential way (folow the same as the layer definitions above) and 
        # write the forward pass,after each of conv1,conv2,conv3 and fc1 use a relu activation. 
        

        x = F.relu(self.conv11(x1))
        x = F.relu(self.conv21(x))
        x = F.relu(self.maxpool(self.conv31(x)))
        #x = torch.flatten(x,1)
        x = x.view(-1,64*11*11)
        x = self.dropout1(x)
        x = F.relu(self.fc11(x))
        x = self.dropout2(x)
        x = self.fc21(x)

        y = F.relu(self.conv12(x1))
        y = F.relu(self.conv22(y))
        y = F.relu(self.maxpool(self.conv32(y)))
        #x = torch.flatten(x,1)
        y = y.view(-1,64*8*8)
        y = self.dropout1(y)
        y = F.relu(self.fc12(y))
        y = self.dropout2(y)
        y = self.fc22(y)

        z = F.relu(self.conv13(x1))
        z = F.relu(self.conv23(z))
        z = F.relu(self.maxpool(self.conv33(z)))
        #x = torch.flatten(x,1)
        z = z.view(-1,64*5*5)
        z = self.dropout1(z)
        z = F.relu(self.fc13(z))
        z = self.dropout2(z)
        z = self.fc23(z)

        out = self.fc33(torch.cat((x,y,z),0))
        
        output = F.log_softmax(out,dim=1)
        return output

import unittest

class TestImplementations(unittest.TestCase):
    
    # Dataloading tests
    def test_dataset(self):
        self.dataset_classes = ['0 - zero','1 - one','2 - two','3 - three','4 - four','5 - five','6 - six','7 - seven','8 - eight','9 - nine']
        self.assertTrue(train_dataset.classes == self.dataset_classes)
        self.assertTrue(train_dataset.train == True)
    
    def test_dataloader(self):        
        self.assertTrue(train_dataloader.batch_size == 32)
        self.assertTrue(test_dataloader.batch_size == 32)      
         
    def test_total_parameters(self):
        model = Net().to(device)
        #self.assertTrue(sum(p.numel() for p in model.parameters()) == 1015946)

suite = unittest.TestLoader().loadTestsFromModule(TestImplementations())
unittest.TextTestRunner().run(suite)

def train(model,device,train_loader,optimizer,epoch):
    model.train()
    for batch_idx,(data,target) in enumerate(train_loader):
        # send the image,target to the device
        data,target = data.to(device),target.to(device)
        # flush out the gradients stored in optimizer
        optimizer.zero_grad()
        # pass the image to the model and assign the output to variable named output
        output = model(data)
        # calculate the loss (use nll_loss in pytorch)
        loss = F.nll_loss(output,target)
        # do a backward pass
        loss.backward()
        # update the weights
        optimizer.step()
      
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch,batch_idx * len(data),len(train_loader.dataset),100. * batch_idx / len(train_loader),loss.item()))

def test(model,test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data,target in test_loader:
          
            # send the image,target to the device
            data,target.to(device)
            # pass the image to the model and assign the output to variable named output
            output = model(data)
            test_loss += F.nll_loss(output,target,reduction='sum').item() # sum up batch loss
          
            pred = output.argmax(dim=1,keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)

    print('\nTest set: Average loss: {:.4f},Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss,correct,len(test_loader.dataset),100. * correct / len(test_loader.dataset)))

model = Net().to(device)

## Define Adam Optimiser with a learning rate of 0.01
optimizer =  torch.optim.Adam(model.parameters(),lr=0.01)

start = timeit.default_timer()
for epoch in range(1,11):
  train(model,train_dataloader,epoch)
  test(model,test_dataloader)
stop = timeit.default_timer()
print('Total time taken: {} seconds'.format(int(stop - start)) )




这是我的完整代码。我不明白什么地方可能出问题...

正在给予

<ipython-input-72-194680537dcc> in forward(self,x1)
     46         x = F.relu(self.conv11(x1))
     47         x = F.relu(self.conv21(x))
---> 48         x = F.relu(self.maxpool(self.conv31(x)))
     49         #x = torch.flatten(x,1)
     50         x = x.view(-1,64*11*11)

TypeError: 'tuple' object is not callable

错误。

P.S .:这里是Pytorch Noob。

解决方法

我看到您在self.maxpool = nn.MaxPool2d(2)的定义中没有提供stride参数。选择一个:例如self.maxpool = nn.MaxPool2d(2,stride = 2)

,

您错误地将逗号放在定义self.maxpool的行的末尾:self.maxpool = nn.MaxPool2d(2),# Output = 64x11x11看到了吗? 该逗号使self.maxpool成为元组,而不是torch.nn.modules.pooling.MaxPool2d。在末尾放逗号,此错误已得到解决。

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


依赖报错 idea导入项目后依赖报错,解决方案:https://blog.csdn.net/weixin_42420249/article/details/81191861 依赖版本报错:更换其他版本 无法下载依赖可参考:https://blog.csdn.net/weixin_42628809/a
错误1:代码生成器依赖和mybatis依赖冲突 启动项目时报错如下 2021-12-03 13:33:33.927 ERROR 7228 [ main] o.s.b.d.LoggingFailureAnalysisReporter : *************************** APPL
错误1:gradle项目控制台输出为乱码 # 解决方案:https://blog.csdn.net/weixin_43501566/article/details/112482302 # 在gradle-wrapper.properties 添加以下内容 org.gradle.jvmargs=-Df
错误还原:在查询的过程中,传入的workType为0时,该条件不起作用 &lt;select id=&quot;xxx&quot;&gt; SELECT di.id, di.name, di.work_type, di.updated... &lt;where&gt; &lt;if test=&qu
报错如下,gcc版本太低 ^ server.c:5346:31: 错误:‘struct redisServer’没有名为‘server_cpulist’的成员 redisSetCpuAffinity(server.server_cpulist); ^ server.c: 在函数‘hasActiveC
解决方案1 1、改项目中.idea/workspace.xml配置文件,增加dynamic.classpath参数 2、搜索PropertiesComponent,添加如下 &lt;property name=&quot;dynamic.classpath&quot; value=&quot;tru
删除根组件app.vue中的默认代码后报错:Module Error (from ./node_modules/eslint-loader/index.js): 解决方案:关闭ESlint代码检测,在项目根目录创建vue.config.js,在文件中添加 module.exports = { lin
查看spark默认的python版本 [root@master day27]# pyspark /home/software/spark-2.3.4-bin-hadoop2.7/conf/spark-env.sh: line 2: /usr/local/hadoop/bin/hadoop: No s
使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams[&#39;font.sans-serif&#39;] = [&#39;SimHei&#39;] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -&gt; systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping(&quot;/hires&quot;) public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate&lt;String
使用vite构建项目报错 C:\Users\ychen\work&gt;npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-