-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
134 lines (104 loc) · 4.08 KB
/
main.py
File metadata and controls
134 lines (104 loc) · 4.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
from fastapi import FastAPI, Response
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import re
import twint
import nest_asyncio
import numpy as np
from fastapi.middleware.cors import CORSMiddleware
from langdetect import detect
nest_asyncio.apply()
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
tokenizer = AutoTokenizer.from_pretrained(
'nlptown/bert-base-multilingual-uncased-sentiment')
model = AutoModelForSequenceClassification.from_pretrained(
'nlptown/bert-base-multilingual-uncased-sentiment')
def get_tweets_by_keyword(keyword, fetch_rate=50):
k = twint.Config()
k.Pandas = True
k.Lang = 'en'
k.Search = keyword
k.Limit = fetch_rate
twint.run.Search(k)
df = twint.storage.panda.Tweets_df
return df
def get_users_tweets(username, fetch_rate=50):
u = twint.Config()
u.Pandas = True
u.Lang = 'en'
u.Search = 'from:@'+str(username)
u.Limit = fetch_rate
twint.run.Search(u)
df = twint.storage.panda.Tweets_df
return df
def clean_tweets(tweet):
tweet = tweet.lower()
tweet = re.sub("@[A-Za-z0-9_]+", "", tweet) # Remove Mentions
tweet = re.sub("#[A-Za-z0-9_]+", "", tweet) # Remove Hashtags
tweet = re.sub(r"http\S+", "", tweet) # Remove Hyper Links
tweet = re.sub(r"www.\S+", "", tweet) # Remove Hyper Links
tweet = re.sub('[()!?]', '', tweet) # Remove Punctuations
tweet = re.sub('\[.*?\]', '', tweet) # Remove Punctuations
tweet = re.sub("[^a-z0-9]", " ", tweet) # Remove Non-Alphanumeric Values
tweet = tweet.lstrip().rstrip()
tweet = ' '.join(tweet.split())
return tweet
def get_sentiment_scores(tweet):
tokens = tokenizer.encode(tweet, return_tensors='pt')
result = model(tokens)
return int(torch.argmax(result.logits)) + 1
def detect_lang(tweet):
try:
val = detect(tweet)
except:
val = 'nan'
return val
@app.get('/keyword/{keyword}/{fetchrate}')
def trend_sentiments(keyword, fetchrate):
keyword_tweets_df = get_tweets_by_keyword(keyword, fetchrate)
if len(keyword_tweets_df) > 0:
keyword_tweets_df['cleaned_tweets'] = keyword_tweets_df['tweet'].apply(
clean_tweets)
keyword_tweets_df['cleaned_tweets'] = keyword_tweets_df['cleaned_tweets'].replace(
'', np.nan)
keyword_tweets_df = keyword_tweets_df.dropna(
axis=0, subset=['cleaned_tweets'])
keyword_tweets_df['lang'] = keyword_tweets_df['cleaned_tweets'].apply(
detect_lang)
keyword_input_df = keyword_tweets_df.loc[keyword_tweets_df['lang'] == 'en']
keyword_input_df['sentiment_scores'] = keyword_input_df['cleaned_tweets'].apply(
get_sentiment_scores)
res_json = keyword_input_df[[
'cleaned_tweets', 'sentiment_scores']].to_json()
print(len(keyword_tweets_df), len(keyword_input_df))
return Response(content=res_json, media_type="application/json")
else:
return f'nan'
@app.get('/user/{keyword}/{fetchrate}')
def trend_sentiments(keyword, fetchrate):
keyword_tweets_df = get_users_tweets(keyword, fetchrate)
if len(keyword_tweets_df) > 0:
keyword_tweets_df['cleaned_tweets'] = keyword_tweets_df['tweet'].apply(
clean_tweets)
keyword_tweets_df['cleaned_tweets'] = keyword_tweets_df['cleaned_tweets'].replace(
'', np.nan)
keyword_tweets_df = keyword_tweets_df.dropna(
axis=0, subset=['cleaned_tweets'])
keyword_tweets_df['lang'] = keyword_tweets_df['cleaned_tweets'].apply(
detect_lang)
keyword_input_df = keyword_tweets_df.loc[keyword_tweets_df['lang'] == 'en']
keyword_input_df['sentiment_scores'] = keyword_input_df['cleaned_tweets'].apply(
get_sentiment_scores)
res_json = keyword_input_df[[
'cleaned_tweets', 'sentiment_scores']].to_json()
return Response(content=res_json, media_type="application/json")
else:
return f'nan'