如果此人不是唐纳德·特朗普,为什么我可以将推文拉到csv文件中,但当我尝试使用它来实现唐纳德·特朗普的screen_name时,它只返回最多200条推文,这有什么明确的原因吗?
你可以在这里找到代码。(例如,对于JLo,它可以完美地工作...)
def get_all_tweets(screen_name):
# Twitter only allows access to a users most recent 3240 tweets with this method
# authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
# initialize a list to hold all the tweepy Tweets
alltweets = []
# make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode='extended')
# save most recent tweets
alltweets.extend(new_tweets)
# save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print
"getting tweets before %s" % (oldest)
# all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode='extended')
# save most recent tweets
alltweets.extend(new_tweets)
# update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print
"...%s tweets downloaded so far" % (len(alltweets))
# transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.full_text.replace("\n","")] for tweet in alltweets]
# write the csv
with open('%s_tweets.csv' % screen_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(["id", "created_at", "full_text"])
writer.writerows(outtweets)
pass
if __name__ == '__main__':
# pass in the username of the account you want to download
get_all_tweets("realDonaldTrump")发布于 2020-03-17 08:51:20
您的实现可以工作;但是,您的Twitter API应用程序可能会被限制。在Twitter的API中阅读有关rate limiting的信息。
通常,当您像这样从第三方API抓取数据时,您希望将结果持久化到某个可靠的地方(在您的情况下,保存在文件系统上的CSV文件中,这对于您的问题范围来说可能没什么问题),这样您就可以再次查询API,以检索在以前的查询中可能没有接收到的任何数据。
我将在下面提供一个简单的示例,说明您可以重新设计您的应用程序以使其以这种方式运行。简而言之,在每次执行时,下面的示例将修改任何预先存在的CSV,以便它预先考虑比CSV中最新的已知记录更新的结果,并附加比CSV中最旧的已知记录更旧的结果。
下面的例子还演示了如何使用Tweepy的cursors遍历分页的tweet数据。
import csv
from dataclasses import dataclass
from pathlib import Path
import logging
import os
import sys
import tweepy
@dataclass
class TweetIdRange:
count: int
newest_id: int
oldest_id: int
def get_tweets_file_id_range(tweets_file_name: Path) -> TweetIdRange:
id_range = TweetIdRange(count=0, newest_id=None, oldest_id=None)
with open(tweets_file_name) as tweets_file:
reader = csv.reader(tweets_file)
try:
next(reader)
except StopIteration:
raise RuntimeError(f'Tweets file ({tweets_file_name}) does not contain any rows; '
f'expected at least one header row')
try:
row = next(reader)
row_id = int(row[0])
id_range.newest_id = row_id
id_range.oldest_id = row_id
id_range.count += 1
except StopIteration:
pass
else:
for row in reader:
id_range.count += 1
row_id = int(row[0])
if row_id < id_range.oldest_id:
id_range.oldest_id = row_id
if row_id > id_range.newest_id:
id_range.newest_id = row_id
return id_range
def write_tweets(tweets_file_name: Path, screen_name: str, since_id: int = None, max_id: int = None):
user_timeline_options = {
'count': 100,
'tweet_mode': 'extended',
'screen_name': screen_name,
'since_id': since_id, # >
'max_id': max_id # <=
}
tweet_count = 0
with open(tweets_file_name, 'w') as tweets_file:
writer = csv.writer(tweets_file)
for page in tweepy.Cursor(api.user_timeline, **user_timeline_options).pages():
tweets = [[tweet.id_str, tweet.created_at, tweet.full_text.replace('\n', '')] for tweet in page]
tweet_count += len(tweets)
writer.writerows(tweets)
return tweet_count
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
screen_name = sys.argv[1] if len(sys.argv) > 1 else None
if not screen_name:
print('error: missing required screen name positional argument', file=sys.stderr)
sys.exit(1)
tweets_file_name = Path(f'{screen_name}_tweets.csv')
if not tweets_file_name.exists():
logging.info('%s: initializing new file', tweets_file_name)
with open(tweets_file_name, 'w') as existing_tweets_file:
writer = csv.writer(existing_tweets_file)
writer.writerow(['id', 'created_at', 'full_text'])
tweets_id_range = get_tweets_file_id_range(tweets_file_name)
logging.info('%s: tweet ID range (before): count: %s; newest: %s; oldest: %s',
tweets_file_name, tweets_id_range.count, tweets_id_range.newest_id, tweets_id_range.oldest_id)
newest_tweets_file_name = tweets_file_name.with_suffix('.newest' + ''.join(tweets_file_name.suffixes))
new_tweet_count = write_tweets(newest_tweets_file_name, screen_name, since_id=tweets_id_range.newest_id)
logging.info('%s: prepending %s new tweets', tweets_file_name, new_tweet_count)
oldest_tweets_file_name = tweets_file_name.with_suffix('.oldest' + ''.join(tweets_file_name.suffixes))
if tweets_id_range.oldest_id:
old_tweet_count = write_tweets(oldest_tweets_file_name, screen_name, max_id=tweets_id_range.oldest_id - 1)
logging.info('%s: appending %s old tweets', tweets_file_name, old_tweet_count)
swap_tweets_file_name = tweets_file_name.with_suffix('.swap' + ''.join(tweets_file_name.suffixes))
with open(swap_tweets_file_name, 'w') as swap_file, \
open(tweets_file_name, 'r') as existing_file:
swap_file.write(existing_file.readline())
with open(newest_tweets_file_name, 'r') as newest_tweets_file:
for line in newest_tweets_file:
swap_file.write(line)
for line in existing_file:
swap_file.write(line)
if oldest_tweets_file_name.exists():
with open(oldest_tweets_file_name, 'r') as oldest_tweets_file:
for line in oldest_tweets_file:
swap_file.write(line)
swap_tweets_file_name.rename(tweets_file_name)
os.remove(newest_tweets_file_name)
if oldest_tweets_file_name.exists():
os.remove(oldest_tweets_file_name)
tweets_id_range = get_tweets_file_id_range(tweets_file_name)
logging.info('%s: tweet ID range (after): count: %s; newest: %s; oldest: %s',
tweets_file_name, tweets_id_range.count, tweets_id_range.newest_id, tweets_id_range.oldest_id)您将注意到,连续运行此程序将返回更多数据,直到您达到Twitter的API所能提供的范围为止。
▶ python test.py realDonaldTrump
INFO:root:realDonaldTrump_tweets.csv: tweet ID range (before): count: 350; newest: 1239685852093169664; oldest: 1235005879226961924
INFO:root:realDonaldTrump_tweets.csv: prepending 0 new tweets
INFO:root:realDonaldTrump_tweets.csv: appending 1799 old tweets
INFO:root:realDonaldTrump_tweets.csv: tweet ID range (after): count: 2149; newest: 1239685852093169664; oldest: 1214517113437720576
▶ python test.py realDonaldTrump
INFO:root:realDonaldTrump_tweets.csv: tweet ID range (before): count: 2149; newest: 1239685852093169664; oldest: 1214517113437720576
INFO:root:realDonaldTrump_tweets.csv: prepending 0 new tweets
INFO:root:realDonaldTrump_tweets.csv: appending 1045 old tweets
INFO:root:realDonaldTrump_tweets.csv: tweet ID range (after): count: 3194; newest: 1239685852093169664; oldest: 1203103574781317121
▶ python test.py realDonaldTrump
INFO:root:realDonaldTrump_tweets.csv: tweet ID range (before): count: 3194; newest: 1239685852093169664; oldest: 1203103574781317121
INFO:root:realDonaldTrump_tweets.csv: prepending 0 new tweets
INFO:root:realDonaldTrump_tweets.csv: appending 0 old tweets
INFO:root:realDonaldTrump_tweets.csv: tweet ID range (after): count: 3194; newest: 1239685852093169664; oldest: 1203103574781317121这个例子的目的不是为了提供一个生产就绪的实现;但是,它应该给你一些新的想法,它应该让你超越你在问题中概述的问题。
https://stackoverflow.com/questions/60713850
复制相似问题