Errno 104-由Docker上的对等方重置连接

如何解决Errno 104-由Docker上的对等方重置连接

我有一个托管在服务器上的Web项目。前端是有角度的,后端是flask,数据库是mongodb,所有这些都是作为相互链接的docker容器制成的。

当后端尝试从mongodb获取数据时,会出现问题。 它是->“我的服务器的IP地址”:27017:[Errno 104]对等重置连接

代码是-

#Vectorization of text using TF/IDF and calculating similarity with asked question vector by increase 
weights for technical terms
import gensim
import numpy as np
import re
import json


def get_keywords(ques,bug_id): #extract the keywords from the question like version numbers (ex,16.6.1) and platforms (ex,CAT9k) and return list of keywords(useful for analytics)

num_words = []
for i in ques.split():
    if re.search(r'\d',i) != None:
        num_words.append(i.strip('? '))
for i in num_words:
    if '/' in i:
        lis = i.split('/')
        num_words.remove(i)
        for j in lis:
            num_words.append(j.strip(' ?'))
num_words = [ i.lower() for i in  num_words]
if bug_id.lower() in num_words:
    num_words.remove(bug_id.lower())
return num_words


def get_numeric_similarity(ques,s,bug_id): #perform keyword similarity check (release versions and platforms,etc) b/w the question asked by CE and all the questions for that particular BUG in database

num_words,del_list = [],[]
s = s.lower()
cnt = 0
for i in ques.split():
    if re.search(r'\d',i) != None:
        num_words.append(i.strip('? '))
for i in num_words:
    if '/' in i:
        lis = i.split('/')
        num_words.remove(i)
        for j in lis:
            num_words.append(j.strip(' ?'))
num_words = [ i.lower() for i in  num_words]
if bug_id.lower() in num_words:
    num_words.remove(bug_id.lower()) #num_words has all the keywords,i.e,the words containing digits ex,release version (16.6.1) and the platform number (ex: cat9k),except for the BUG ID
size = len(num_words)

for i in num_words: #perform an exact similarity match: if the contents of num_words are present in s,increment cnt by 1 (ex,16.6.1 in question asked matches with 16.6.1 in question in database)
    if i in s:
        cnt += 1
        del_list.append(i)
for i in del_list:
    num_words.remove(i)
    
del_list = []
for i in s.split(): #if the version number mentioned in s has 'x',for ex,16.x release,then only check for the first set of digit/s before '.',i.e.,'16' and if it matches,increment cnt by 1 (16.6.1 matches with 16.x)
    if re.search(r'[0-9]{1,2}\.x\.x|[0-9]{1,2}\.x',i):
        for ele in num_words:
            if i.split('.')[0] in ele.split('.')[0]:
                cnt += 1
                del_list.append(ele)
for i in del_list:
    num_words.remove(i)

find = re.findall(r'[0-9]{1,2}\.[0-9]{1,2}\.{0,1}[0-9]{0,2}',s)

del_list = []
for i in num_words: #if the question being asked has 'x' in the release version,then check with all the release versions in s and if the first set of digits match,increment cnt by 0.5 (16.x partially matches with 16.6.1)
    if re.search(r'[0-9]{1,i):
        for ele in find:
            if i.split('.')[0] == ele.split('.')[0]:
                cnt += 0.5
                del_list.append(i)
                break
for i in del_list:
    num_words.remove(i)

del_list = []
if len(num_words) > 0: #do a similarity check in case of 'k' in platform number (cat9000 matches with cat9k)
    find1 = re.findall(r'[a-z]{1,3}\s{0,1}[0-9]000',s)
    find2 = re.findall(r'[a-z]{1,1}[0-9]k',s)
    find = find1 + find2
    find = ["".join(i.split()) for i in find]
    find = [re.sub(r'000','k',i) for i in find]
    num_words = [re.sub(r'000',i) for i in num_words]
    for i in num_words:
        if i in find:
            cnt += 1
            del_list.append(i)
    for i in del_list:
        num_words.remove(i)

#check for only digit similarity for ex,Catalyst 9000 matches with Cat9000
del_list = []
if len(num_words) > 0:
     for i in num_words:
        find = re.search(r'\d(.*)\d|\dk',i)
        if find:
            if find.group(0) in s:
                cnt += 0.5
                del_list.append(i)
for i in del_list:
    num_words.remove(i)
                
if size > 0: 
    cnt = cnt/size # keyword similarity = (total no. of times match is found)/(total number of keywords in question asked)
return cnt


def recommend_Questions(ques,question,answer,sim_th,bug_id,num_sim_wt,tfidf_sim_wt,location): 
#Vectorizing text with vocabulary belonging only to given bug ID and recommending Q&A pairs only if weighted average similarity (cosine sim: TF-IDF,numeric sim: technical term based extraction [increased weights]) surpasses the threshold)

data = []
ind = []

#putting sure shot questions into data list
for key,value in question.items():
    for ele in value:
        if ele[1] == 1:
            data.append(ele[0])
            ind.append(key)
            break

#generate tokens from data
gen_docs = []
for ele in data:
    tokens = ele.split()
    tok = []
    for element in tokens:
        element = element.lower().strip("? .:\'\"")
        if element != '':
            tok.append(element)
    gen_docs.append(tok)

#generating corpora dict
dictionary = gensim.corpora.Dictionary(gen_docs)

corpus = [dictionary.doc2bow(gen_doc) for gen_doc in gen_docs]

tf_idf = gensim.models.TfidfModel(corpus)

sims = gensim.similarities.Similarity(location,tf_idf[corpus],num_features=len(dictionary))

#tokenize question asked by CE
tokens = ques.split()
query_doc = []
for element in tokens:
    element = element.lower().strip("? .:\'\"")
    if element != '':
        query_doc.append(element)

#update an existing dictionary and
query_doc_bow = dictionary.doc2bow(query_doc) 

# perform a similarity query against the corpus
query_doc_tf_idf = tf_idf[query_doc_bow]

num_score = []
for ele in data:
    num_score.append(get_numeric_similarity(ques,ele,bug_id))

num_score = np.array(num_score)       
total_score = ((num_sim_wt/100)*num_score) + ((tfidf_sim_wt/100)*sims[query_doc_tf_idf]) #weighted average similarity

res = []
for i,sim in enumerate(total_score):
    if sim*100 >= sim_th:
        if bug_id in ind[i]:
            res.append([data[i],answer[ind[i]],sim*100,ind[i]])
            
res.sort(key = lambda x: x[2],reverse = True)

return res

 def recommend_Unanswered(ques,unanswered,location): 
#Retrieving most similar unanswered question (similar to Q&A recommendation)

data = []
ind = []

#putting sure shot questions into data list
for key,value in unanswered.items():
    data.append(value[0])
    ind.append(key)

#generate tokens from data
gen_docs = []
for ele in data:
    tokens = ele.split()
    tok = []
    for element in tokens:
        element = element.lower().strip("? .:\'\"")
        if element != '':
            tok.append(element)
    gen_docs.append(tok)

#generating corpora dict
dictionary = gensim.corpora.Dictionary(gen_docs)

corpus = [dictionary.doc2bow(gen_doc) for gen_doc in gen_docs]

tf_idf = gensim.models.TfidfModel(corpus)
sims = gensim.similarities.Similarity(location,bug_id))

num_score = np.array(num_score)

total_score = ((num_sim_wt/100)*num_score) + ((tfidf_sim_wt/100)*sims[query_doc_tf_idf]) #weighted average similarity

res = []   
for i,ind[i]])
        
res.sort(key = lambda x: x[1],reverse = True)
        
return res


def NCE_query_similarity(mydb,query_doc,query_bug_id,location,q_sim_th,uns_sim_th):

try:
    unanswered_db = mydb["unanswered"]
    questions_db = mydb["questions"]
    
    #answer: string or list look at it.
    question = {}
    answer = {}
    unanswered = {}
    
    ques = questions_db.find()
    unans = unanswered_db.find()


    content_lis = []
    counter = 1
    
    for i in ques:
        question[i['_id']] = i['question']
        answer[i['_id']] = i['answer']
    
    for i in unans:
        unanswered[i['_id']] = i['unanswered']
    
    question_bug = {}
    for k,v in question.items():
        if query_bug_id in k:
            question_bug[k] = v
    
    res = []
    if len(question_bug) > 0:
        res = recommend_Questions(query_doc,question_bug,location) #Retrieve similar Q&A pairs for asked query
    print(res)
    #len(res) > 0 : related Q&As
    if len(res) > 0:
        #return_Res = [type,[[counter,qn,ans,sim],......]]
        for i in res:
            content_lis.append([counter,i[0],i[1][0],i[2]])
            counter += 1
        return ["Related Q&As",content_lis]
    
    #checking similarity with stored unanswered questions 
    if len(res) == 0:
        unanswered_bug = {}
        
        for k,v in unanswered.items():
            if query_bug_id in k:
                unanswered_bug[k] = v
        
        if len(unanswered_bug) > 0:
            
            res = recommend_Unanswered(query_doc,unanswered_bug,uns_sim_th,location) #Retreive most similar unanswered question for asked query
            # res = [unans,sim,unans_id]
            
            if len(res) > 0: #Gentle remainder case
                
                unans_id_list = []
                for j in res:
                    unans_id_list.append(j[-1])
                with open("temp_unans_id.json",'w') as fp:
                    json.dump(unans_id_list,fp)
                    
                #return_Res = [type,unans,......]]
                for i in res:
                    content_lis.append([counter,i[1]])
                    counter += 1
                    
                return ["Related Unanswered Query",content_lis]
                    
        if len(res) == 0:  #If asked question is not found similar to any of the stored Q&A or unanswered questions -> maybe completely new question asked
            return "No matching Questions Found"
except Exception as e:
    print(e)
    return None   

            

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


依赖报错 idea导入项目后依赖报错,解决方案:https://blog.csdn.net/weixin_42420249/article/details/81191861 依赖版本报错:更换其他版本 无法下载依赖可参考:https://blog.csdn.net/weixin_42628809/a
错误1:代码生成器依赖和mybatis依赖冲突 启动项目时报错如下 2021-12-03 13:33:33.927 ERROR 7228 [ main] o.s.b.d.LoggingFailureAnalysisReporter : *************************** APPL
错误1:gradle项目控制台输出为乱码 # 解决方案:https://blog.csdn.net/weixin_43501566/article/details/112482302 # 在gradle-wrapper.properties 添加以下内容 org.gradle.jvmargs=-Df
错误还原:在查询的过程中,传入的workType为0时,该条件不起作用 <select id="xxx"> SELECT di.id, di.name, di.work_type, di.updated... <where> <if test=&qu
报错如下,gcc版本太低 ^ server.c:5346:31: 错误:‘struct redisServer’没有名为‘server_cpulist’的成员 redisSetCpuAffinity(server.server_cpulist); ^ server.c: 在函数‘hasActiveC
解决方案1 1、改项目中.idea/workspace.xml配置文件,增加dynamic.classpath参数 2、搜索PropertiesComponent,添加如下 <property name="dynamic.classpath" value="tru
删除根组件app.vue中的默认代码后报错:Module Error (from ./node_modules/eslint-loader/index.js): 解决方案:关闭ESlint代码检测,在项目根目录创建vue.config.js,在文件中添加 module.exports = { lin
查看spark默认的python版本 [root@master day27]# pyspark /home/software/spark-2.3.4-bin-hadoop2.7/conf/spark-env.sh: line 2: /usr/local/hadoop/bin/hadoop: No s
使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams['font.sans-serif'] = ['SimHei'] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -> systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping("/hires") public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate<String
使用vite构建项目报错 C:\Users\ychen\work>npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-