首页
学习
活动
专区
圈层
工具
发布
社区首页 >问答首页 >Python新浪微博网络抓取问题

Python新浪微博网络抓取问题
EN

Stack Overflow用户
提问于 2020-08-19 17:56:58
回答 1查看 159关注 0票数 0

我正在尝试对微博进行网络刮擦,但我的帐户登录有问题。我的目标是使用存储在.txt文件中的查询列表在s.weibo.com上执行搜索。下面是Python脚本。每次我运行代码,它都会打开微博,成功地输入查询,并显示相关文章。然而,几秒钟后,弹出一个窗口,要求我登录(我已经登录了)。有没有人知道如何解决这个问题?提前感谢!如有任何帮助,我们不胜感激!

代码语言:javascript
复制
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
import urllib
import urllib.parse 
from selenium import webdriver
import datetime
import time as systime
from selenium.webdriver.firefox.webdriver import FirefoxProfile
import unicodecsv as csv
base_url = 'http://s.weibo.com/weibo/'
file = open(r'C:\Users\some.name\query.txt', encoding="utf8")
file_index = 6
def scrape():
    global file_index
    with open(r'C:\Users\some.name\query.txt', encoding="utf8") as f:
        each_query = f.readlines()
    each_query = [x.strip() for x in each_query]
    # print urllib.quote(urllib.quote(each_query[0]))
    for each in each_query:
        query = each
        s = each.split(';')
        keyword = s[0]# urllib.quote(urllib.quote(s[0]))
        date = s[1]
        start = s[2]
        end = s[3]
        page = s[4]
        scrape_each_query(keyword, date, start, end, page, query)
        file_index = file_index + 1

def scrape_each_query(keyword, date, start, end, page, query):
    real_keyword = keyword
    keyword = urllib.parse.quote(urllib.parse.quote(keyword))
    # login_url = 'http://m.weibo.com/'
    # driver = webdriver.Chrome()
    # driver.get(login_url)
    # driver.implicitly_wait(2)
    # string = '登录'
    # driver.find_element_by_link_text ( string.decode('utf-8') ).click()
    # driver.implicitly_wait(2)
    # driver.find_element_by_link_text(string.decode('utf-8') ).click()
    # savedCookies = driver.get_cookies()
    # # login code
    # pickle.dump(driver.get_cookies() , open("chrome.pkl","wb"))
    # driver.close()
    all_content = []
    all_time = []
#   profile = FirefoxProfile(r"C:\Users\keith.yuen\AppData\Roaming\Mozilla\Firefox\Profiles\ciyiapug.default-release")
#   driver = webdriver.Firefox(profile)
#   co = webdriver.ChromeOptions()
#   co.add_argument('user-data-dir=/Users/xuzhouyin/Library/Application Support/Google/Chrome/')
    driver = webdriver.Chrome(executable_path=r"C:\Users\some.name\chromedriver_win32\chromedriver.exe")
    url = base_url + keyword + "&typeall=1&suball=1&timescope=custom:" + start + ":" + end + "&page=" + "1"
    driver.get(url)
    systime.sleep(5)
    for i in range(int(page)):
        url = base_url + keyword + "&typeall=1&suball=1&timescope=custom:" + start + ":" + end + "&page=" + str(i + 1)
        # url = "http://s.weibo.com/weibo/%25E5%2585%2583%25E6%2597%25A6&typeall=1&suball=1&timescope=custom:2016-12-31:2016-12-31&Refer=g"
        # chrome_options = Options()
        # chrome_options.add_argument("~/Library/Application Support/Google/Chrome/Default");
        # co = webdriver.ChromeOptions()
        # co.add_argument('/Users/xuzhouyin/Library/Application\ Support/Google/Chrome/Default')
        
        # for cookie in pickle.load(open("chrome.pkl", "rb")):
        #   driver.add_cookie(cookie)
        driver.get(url)
        
        # driver.magage().add_cookie(savedCookies)
        page_source = driver.page_source
        soup = BeautifulSoup(page_source, "html.parser")
        content = soup.findAll("p", { "class" : "comment_txt" })
        time = soup.findAll("a", { "class" : "W_textb" })
        
        for each in content:
            all_content.append(each.get_text().encode('utf-8'))
        for each in time:
            each = each.get_text()
            each = each.encode('utf-8')
            time = ""
            if "月" in each:
                time = str(datetime.datetime.now().year) + "-" + each[0:each.index("月")] + "-" + each[(each.index("月") + 3):each.index("日")]
            else:
                time = each[0:each.index(" ")]
            all_time.append(time)
    driver.close()
    save_to_csv(file + str(file_index), real_keyword, date, all_content, all_time, query)

def save_to_csv(filename, keyword, date, content, time, query):
    with open('./output/' + filename + '.csv', 'w') as csvfile:
        spamwriter = csv.writer(csvfile, dialect='excel', encoding='utf-16')
        spamwriter.writerow(["query", "Post ID", "keyword", "event Date", "Post Content", "Post Time"])
        for i in range(len(content)):
            spamwriter.writerow([query, i + 1, keyword, date, content[i], time[i]])

scrape()
EN

回答 1

Stack Overflow用户

发布于 2020-08-20 16:21:04

我认为你遇到的真正问题是如何成功登录或,如何避免在script中登录。

从您的代码中,很难判断您是否成功登录或登录失败的原因。

因此,这里有另一个解决问题的想法:use browser with default user data/profile to avoid login

注意:前提是您已经手动登录了微博,同时每天使用浏览器。

有关更多信息,请访问a demo

票数 0
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/63484477

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档