如何解决Python新浪微博网络抓取问题
我正在尝试对微博进行网络加密,并且帐户登录存在问题。我的目标是使用存储在.txt文件中的查询列表在s.weibo.com上执行搜索。以下是Python脚本。每次我运行代码时,它都会打开微博,成功输入查询并显示相关文章。然而,几秒钟后,一个窗口弹出,要求我登录(我已经这样做了)。有谁知道如何解决这个问题?提前致谢!任何帮助表示赞赏!
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
import urllib
import urllib.parse
from selenium import webdriver
import datetime
import time as systime
from selenium.webdriver.firefox.webdriver import FirefoxProfile
import unicodecsv as csv
base_url = 'http://s.weibo.com/weibo/'
file = open(r'C:\Users\some.name\query.txt',encoding="utf8")
file_index = 6
def scrap():
global file_index
with open(r'C:\Users\some.name\query.txt',encoding="utf8") as f:
each_query = f.readlines()
each_query = [x.strip() for x in each_query]
# print urllib.quote(urllib.quote(each_query[0]))
for each in each_query:
query = each
s = each.split(';')
keyword = s[0]# urllib.quote(urllib.quote(s[0]))
date = s[1]
start = s[2]
end = s[3]
page = s[4]
scrap_each_query(keyword,date,start,end,page,query)
file_index = file_index + 1
def scrap_each_query(keyword,query):
real_keyword = keyword
keyword = urllib.parse.quote(urllib.parse.quote(keyword))
# login_url = 'http://m.weibo.com/'
# driver = webdriver.Chrome()
# driver.get(login_url)
# driver.implicitly_wait(2)
# string = '登录'
# driver.find_element_by_link_text ( string.decode('utf-8') ).click()
# driver.implicitly_wait(2)
# driver.find_element_by_link_text(string.decode('utf-8') ).click()
# savedCookies = driver.get_cookies()
# # login code
# pickle.dump(driver.get_cookies(),open("chrome.pkl","wb"))
# driver.close()
all_content = []
all_time = []
# profile = FirefoxProfile(r"C:\Users\keith.yuen\AppData\Roaming\Mozilla\Firefox\Profiles\ciyiapug.default-release")
# driver = webdriver.Firefox(profile)
# co = webdriver.ChromeOptions()
# co.add_argument('user-data-dir=/Users/xuzhouyin/Library/Application Support/Google/Chrome/')
driver = webdriver.Chrome(executable_path=r"C:\Users\some.name\chromedriver_win32\chromedriver.exe")
url = base_url + keyword + "&typeall=1&suball=1×cope=custom:" + start + ":" + end + "&page=" + "1"
driver.get(url)
systime.sleep(5)
for i in range(int(page)):
url = base_url + keyword + "&typeall=1&suball=1×cope=custom:" + start + ":" + end + "&page=" + str(i + 1)
# url = "http://s.weibo.com/weibo/%25E5%2585%2583%25E6%2597%25A6&typeall=1&suball=1×cope=custom:2016-12-31:2016-12-31&Refer=g"
# chrome_options = Options()
# chrome_options.add_argument("~/Library/Application Support/Google/Chrome/Default");
# co = webdriver.ChromeOptions()
# co.add_argument('/Users/xuzhouyin/Library/Application\ Support/Google/Chrome/Default')
# for cookie in pickle.load(open("chrome.pkl","rb")):
# driver.add_cookie(cookie)
driver.get(url)
# driver.magage().add_cookie(savedCookies)
page_source = driver.page_source
soup = BeautifulSoup(page_source,"html.parser")
content = soup.findAll("p",{ "class" : "comment_txt" })
time = soup.findAll("a",{ "class" : "W_textb" })
for each in content:
all_content.append(each.get_text().encode('utf-8'))
for each in time:
each = each.get_text()
each = each.encode('utf-8')
time = ""
if "月" in each:
time = str(datetime.datetime.now().year) + "-" + each[0:each.index("月")] + "-" + each[(each.index("月") + 3):each.index("日")]
else:
time = each[0:each.index(" ")]
all_time.append(time)
driver.close()
save_to_csv(file + str(file_index),real_keyword,all_content,all_time,query)
def save_to_csv(filename,keyword,content,time,query):
with open('./output/' + filename + '.csv','w') as csvfile:
spamwriter = csv.writer(csvfile,dialect='excel',encoding='utf-16')
spamwriter.writerow(["query","Post ID","keyword","event Date","Post Content","Post Time"])
for i in range(len(content)):
spamwriter.writerow([query,i + 1,content[i],time[i]])
scrap()
解决方法
我认为您遇到的真正问题是如何成功登录或如何避免在脚本中登录。
从代码中很难判断您是否成功登录或登录失败的原因。
因此,这是另一个解决问题的方法:use browser with default user data/profile to avoid login
。
注意:前提是您每天使用浏览器时已经手动登录了微博。
有关更多信息,请a demo
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。