-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathTwitterScaper.py
35 lines (26 loc) · 1015 Bytes
/
TwitterScaper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import tweepy
import pandas as pd
# Twitter API credentials
bearer_token = "Your Token"
# Authenticate with the Twitter API using Bearer Token
client = tweepy.Client(bearer_token=bearer_token)
query = "UNH"
max_results = 50
# List to hold the scraped tweets
data = []
print(f"Scraping tweets containing the keyword '{query}'...")
response = client.search_recent_tweets(query=query, max_results=max_results, tweet_fields=["created_at", "text", "author_id"])
if response.data:
for tweet in response.data:
data.append([
tweet.created_at,
tweet.author_id,
tweet.text,
f"https://twitter.com/i/web/status/{tweet.id}"
])
# Convert the scraped data to a DataFrame
df = pd.DataFrame(data, columns=["Date", "Author ID", "Content", "URL"])
# Save the data to a CSV file
output_file = "tweets_with_x.csv"
df.to_csv(output_file, index=False)
print(f"Scraping complete! {len(df)} tweets saved to '{output_file}'.")