在不同的容器端口中设置Hyperledger Fabric 2.0时出现的问题测试和开发

如何解决在不同的容器端口中设置Hyperledger Fabric 2.0时出现的问题测试和开发

我一直在研究在默认​​端口下运行的Hyperledger fabric 2.0 Multi-Org Networking。设置如下:

Org1 ( Peer0:7051,Peer1:8051,CA: 7054,couchdb0:5984,couchdb1:6984:5984)
Org2 ( Peer0:9051,Peer1:10051,CA: 8054,couchdb2:7984:5984,couchdb3:8984:5984)
Orderer (0rderer1:7050,Orderer2:8050,Orderer3: 9050) RAFT Mechanism

要求是重新定义上述所有容器端口,以便我可以作为两个环境(一个用于测试(稳定版)和一个用于开发)运行相同的Fabric应用程序 我试图更改对等方,定单方,CA的端口(在docker-compose中指定端口的环境变量)。但是我对于CouchDB没有任何选择,因为CouchDB始终具有默认端口(5984)

有什么办法可以做到这一点?这样也将有助于在同一虚拟机中运行两个不同的Fabric应用程序

EDIT1: 我的docker-compose.yaml文件(我只提到了-Org1(Peer0,peer1),Orderer1,ca-org1,couchdb0,couchdb1)

version: "2"

networks:
  test2:

services:
  ca-org1:
    image: hyperledger/fabric-ca
    environment:
      - FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
      - FABRIC_CA_SERVER_CA_NAME=ca.org1.test.com
      - FABRIC_CA_SERVER_CA_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org1.test.com-cert.pem
      - FABRIC_CA_SERVER_CA_KEYFILE=/etc/hyperledger/fabric-ca-server-config/priv_sk
      - FABRIC_CA_SERVER_TLS_ENABLED=true
      - FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-tls/tlsca.org1.test.com-cert.pem
      - FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-tls/priv_sk
    ports:
      - "3054:3054"
    command: sh -c 'fabric-ca-server start -b admin:adminpw -d'
    volumes:
      - ./channel/crypto-config/peerOrganizations/org1.test.com/ca/:/etc/hyperledger/fabric-ca-server-config
      - ./channel/crypto-config/peerOrganizations/org1.test.com/tlsca/:/etc/hyperledger/fabric-ca-server-tls
    container_name: ca.org1.test.com
    hostname: ca.org1.test.com
    networks:
      - test2

  orderer.test.com:
    container_name: orderer.test.com
    image: hyperledger/fabric-orderer:2.1
    dns_search: .
    environment:
      - ORDERER_GENERAL_LOGLEVEL=info
      - FABRIC_LOGGING_SPEC=INFO
      - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
      - ORDERER_GENERAL_GENESISMETHOD=file
      - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/genesis.block
      - ORDERER_GENERAL_LOCALMSPID=OrdererMSP
      - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
      - ORDERER_GENERAL_TLS_ENABLED=true
      - ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
      - ORDERER_KAFKA_VERBOSE=true
      - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
      - ORDERER_METRICS_PROVIDER=prometheus
      - ORDERER_OPERATIONS_LISTENADDRESS=0.0.0.0:3443
      - ORDERER_GENERAL_LISTENPORT=3050
    working_dir: /opt/gopath/src/github.com/hyperledger/fabric/orderers
    command: orderer
    ports:
      - 3050:3050
      - 3443:3443
    networks:
      - test2
    volumes:
      - ./channel/genesis.block:/var/hyperledger/orderer/genesis.block
      - ./channel/crypto-config/ordererOrganizations/test.com/orderers/orderer.test.com/msp:/var/hyperledger/orderer/msp
      - ./channel/crypto-config/ordererOrganizations/test.com/orderers/orderer.test.com/tls:/var/hyperledger/orderer/tls

  couchdb0:
    container_name: couchdb0-test
    image: hyperledger/fabric-couchdb
    environment:
      - COUCHDB_USER=
      - COUCHDB_PASSWORD=
    ports:
      - 1984:1984
    networks:
      - test2

  couchdb1:
    container_name: couchdb1-test
    image: hyperledger/fabric-couchdb
    environment:
      - COUCHDB_USER=
      - COUCHDB_PASSWORD=
    ports:
      - 2984:1984
    networks:
      - test2

  
  peer0.org1.test.com:
    container_name: peer0.org1.test.com
    extends:
      file: base.yaml
      service: peer-base
    environment:
      - FABRIC_LOGGING_SPEC=DEBUG
      - ORDERER_GENERAL_LOGLEVEL=DEBUG
      - CORE_PEER_LOCALMSPID=Org1MSP

      - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=artifacts_test2

      - CORE_PEER_ID=peer0.org1.test.com
      - CORE_PEER_ADDRESS=peer0.org1.test.com:3051
      - CORE_PEER_LISTENADDRESS=0.0.0.0:3051
      - CORE_PEER_CHAINCODEADDRESS=peer0.org1.test.com:3052
      - CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:3052
      - CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org1.test.com:4051
      - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.test.com:3051

      # - CORE_OPERATIONS_LISTENADDRESS=0.0.0.0:9440

      - CORE_LEDGER_STATE_STATEDATABASE=CouchDB
      - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb0-test:1984
      - CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=
      - CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=
      - CORE_METRICS_PROVIDER=prometheus
      - CORE_PEER_TLS_ENABLED=true
      - CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/crypto/peer/tls/server.crt
      - CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/crypto/peer/tls/server.key
      - CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/crypto/peer/tls/ca.crt
      - CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/crypto/peer/msp
    depends_on:
      - couchdb0
    ports:
      - 3051:3051
    volumes:
      - ./channel/crypto-config/peerOrganizations/org1.test.com/peers/peer0.org1.test.com/msp:/etc/hyperledger/crypto/peer/msp
      - ./channel/crypto-config/peerOrganizations/org1.test.com/peers/peer0.org1.test.com/tls:/etc/hyperledger/crypto/peer/tls
      - /var/run/:/host/var/run/
      - ./channel/:/etc/hyperledger/channel/
    networks:
      - test2

  peer1.org1.test.com:
    container_name: peer1.org1.test.com
    extends:
      file: base.yaml
      service: peer-base
    environment:
      - FABRIC_LOGGING_SPEC=DEBUG
      - ORDERER_GENERAL_LOGLEVEL=debug
      - CORE_PEER_LOCALMSPID=Org1MSP

      - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=artifacts_test2

      - CORE_PEER_ID=peer1.org1.test.com
      - CORE_PEER_ADDRESS=peer1.org1.test.com:4051
      - CORE_PEER_LISTENADDRESS=0.0.0.0:4051
      - CORE_PEER_CHAINCODEADDRESS=peer1.org1.test.com:4052
      - CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:4052
      - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.org1.test.com:4051
      - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org1.test.com:3051

      - CORE_LEDGER_STATE_STATEDATABASE=CouchDB
      - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb1-test:1984
      - CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=
      - CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=
      - CORE_METRICS_PROVIDER=prometheus
      # - CORE_OPERATIONS_LISTENADDRESS=0.0.0.0:9440
      - CORE_PEER_TLS_ENABLED=true
      - CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/crypto/peer/tls/server.crt
      - CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/crypto/peer/tls/server.key
      - CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/crypto/peer/tls/ca.crt
      - CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/crypto/peer/msp
    ports:
      - 4051:4051
    volumes:
      - ./channel/crypto-config/peerOrganizations/org1.test.com/peers/peer1.org1.test.com/msp:/etc/hyperledger/crypto/peer/msp
      - ./channel/crypto-config/peerOrganizations/org1.test.com/peers/peer1.org1.test.com/tls:/etc/hyperledger/crypto/peer/tls
      - /var/run/:/host/var/run/
      - ./channel/:/etc/hyperledger/channel/
    networks:
      - test2


感谢有关bedDB的建议。我认为我们只应为每个实例指定默认的ouchDB端口。无论如何,我错过了首先更改容器名称的步骤(默认将peer0.org1.example.com更改为peer0.org1.test.com),因此能够使用新的容器名称启动Docker容器,因此不会停止(重新创建)已经在实际端口上运行的现有容器。 我现在面临的问题是对等体无法与couchdb-test url通信

U 04c Entering VerifyCouchConfig()
2020-08-12 11:22:45.010 UTC [couchdb] handleRequest -> DEBU 04d Entering handleRequest()  method=GET  url=http://couchdb1-test:1984/  dbName=
2020-08-12 11:22:45.010 UTC [couchdb] handleRequest -> DEBU 04e Request URL: http://couchdb1-test:1984/
2020-08-12 11:22:45.011 UTC [couchdb] handleRequest -> WARN 04f Retrying couchdb request in 125ms. Attempt:1  Error:Get "http://couchdb1-test:1984/": dial tcp 172.27.0.11:1984: connect: connection refused
2020-08-12 11:22:45.137 UTC [couchdb] handleRequest -> WARN 050 Retrying couchdb request in 250ms. Attempt:2  Error:Get "http://couchdb1-test:1984/": dial tcp 172.27.0.11:1984: connect: connection refused
2020-08-12 11:22:45.389 UTC [couchdb] handleRequest -> WARN 051 Retrying couchdb request in 500ms. Attempt:3  Error:Get "http://couchdb1-test:1984/": dial tcp 172.27.0.11:1984: connect: connection refused

因此,如果我尝试创建频道,则对等容器将退出,即使它一直运行到现在也无法加入频道

2020-08-12 10:58:29.264 UTC [channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized
2020-08-12 10:58:29.301 UTC [cli.common] readBlock -> INFO 002 Expect block,but got status: &{NOT_FOUND}
2020-08-12 10:58:29.305 UTC [channelCmd] InitCmdFactory -> INFO 003 Endorser and orderer connections initialized
2020-08-12 10:58:29.506 UTC [cli.common] readBlock -> INFO 004 Expect block,but got status: &{SERVICE_UNAVAILABLE}
2020-08-12 10:58:29.509 UTC [channelCmd] InitCmdFactory -> INFO 005 Endorser and orderer connections initialized
2020-08-12 10:58:29.710 UTC [cli.common] readBlock -> INFO 006 Expect block,but got status: &{SERVICE_UNAVAILABLE}
2020-08-12 10:58:29.713 UTC [channelCmd] InitCmdFactory -> INFO 007 Endorser and orderer connections initialized
2020-08-12 10:58:29.916 UTC [cli.common] readBlock -> INFO 008 Expect block,but got status: &{SERVICE_UNAVAILABLE}
2020-08-12 10:58:29.922 UTC [channelCmd] InitCmdFactory -> INFO 009 Endorser and orderer connections initialized
2020-08-12 10:58:30.123 UTC [cli.common] readBlock -> INFO 00a Expect block,but got status: &{SERVICE_UNAVAILABLE}
2020-08-12 10:58:30.126 UTC [channelCmd] InitCmdFactory -> INFO 00b Endorser and orderer connections initialized
2020-08-12 10:58:30.327 UTC [cli.common] readBlock -> INFO 00c Expect block,but got status: &{SERVICE_UNAVAILABLE}
2020-08-12 10:58:30.331 UTC [channelCmd] InitCmdFactory -> INFO 00d Endorser and orderer connections initialized
2020-08-12 10:58:30.534 UTC [cli.common] readBlock -> INFO 00e Received block: 0
Error: error getting endorser client for channel: endorser client failed to connect to localhost:3051: failed to create new connection: connection error: desc = "transport: authentication handshake failed: read tcp 127.0.0.1:53668->127.0.0.1:3051: read: connection reset by peer"
Error: error getting endorser client for channel: endorser client failed to connect to localhost:4051: failed to create new connection: connection error: desc = "transport: authentication handshake failed: read tcp 127.0.0.1:60724->127.0.0.1:4051: read: connection reset by peer"
Error: error getting endorser client for channel: endorser client failed to connect to localhost:5051: failed to create new connection: connection error: desc = "transport: authentication handshake failed: read tcp 127.0.0.1:57948->127.0.0.1:5051: read: connection reset by peer"
Error: error getting endorser client for channel: endorser client failed to connect to localhost:6051: failed to create new connection: connection error: desc = "transport: authentication handshake failed: read tcp 127.0.0.1:58976->127.0.0.1:6051: read: connection reset by peer"
2020-08-12 10:58:37.518 UTC [channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized
2020-08-12 10:58:37.552 UTC [channelCmd] update -> INFO 002 Successfully submitted channel update
2020-08-12 10:58:37.685 UTC [channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized
2020-08-12 10:58:37.763 UTC [channelCmd] update -> INFO 002 Successfully submitted channel update

在这里,即使在更改端口之后,也只有订购者成功添加到了渠道,而没有添加到对等端。

解决方法

您可以从docker-compose文件中更改couchDb端口。 显示docekr-compose.yaml文件中的摘要。

  couchdb0:
    container_name: couchdb0
    image: couchdb:2.3
    # Populate the COUCHDB_USER and COUCHDB_PASSWORD to set an admin user and password
    # for CouchDB.  This will prevent CouchDB from operating in an "Admin Party" mode.
    environment:
      - COUCHDB_USER=
      - COUCHDB_PASSWORD=
    # Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service,# for example map it to utilize Fauxton User Interface in dev environments.
    ports:
      - "5984:5984"
    networks:
      - byfn

从这里您可以轻松更改端口。

,

这不是问题,您可以像对其他人一样指定它。在映射端口时是否遇到一些特定问题

ports:
  - 6984:5984 # Mapping Host Port to Container Port

enter image description here

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


依赖报错 idea导入项目后依赖报错,解决方案:https://blog.csdn.net/weixin_42420249/article/details/81191861 依赖版本报错:更换其他版本 无法下载依赖可参考:https://blog.csdn.net/weixin_42628809/a
错误1:代码生成器依赖和mybatis依赖冲突 启动项目时报错如下 2021-12-03 13:33:33.927 ERROR 7228 [ main] o.s.b.d.LoggingFailureAnalysisReporter : *************************** APPL
错误1:gradle项目控制台输出为乱码 # 解决方案:https://blog.csdn.net/weixin_43501566/article/details/112482302 # 在gradle-wrapper.properties 添加以下内容 org.gradle.jvmargs=-Df
错误还原:在查询的过程中,传入的workType为0时,该条件不起作用 <select id="xxx"> SELECT di.id, di.name, di.work_type, di.updated... <where> <if test=&qu
报错如下,gcc版本太低 ^ server.c:5346:31: 错误:‘struct redisServer’没有名为‘server_cpulist’的成员 redisSetCpuAffinity(server.server_cpulist); ^ server.c: 在函数‘hasActiveC
解决方案1 1、改项目中.idea/workspace.xml配置文件,增加dynamic.classpath参数 2、搜索PropertiesComponent,添加如下 <property name="dynamic.classpath" value="tru
删除根组件app.vue中的默认代码后报错:Module Error (from ./node_modules/eslint-loader/index.js): 解决方案:关闭ESlint代码检测,在项目根目录创建vue.config.js,在文件中添加 module.exports = { lin
查看spark默认的python版本 [root@master day27]# pyspark /home/software/spark-2.3.4-bin-hadoop2.7/conf/spark-env.sh: line 2: /usr/local/hadoop/bin/hadoop: No s
使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams['font.sans-serif'] = ['SimHei'] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -> systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping("/hires") public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate<String
使用vite构建项目报错 C:\Users\ychen\work>npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-