-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
executable file
·113 lines (75 loc) · 2.59 KB
/
utils.py
File metadata and controls
executable file
·113 lines (75 loc) · 2.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize
wordnet_lemmatizer = WordNetLemmatizer()
from nltk.tokenize import word_tokenize
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from stop_words import get_stop_words
from sklearn.datasets import load_files
import numpy as np
from nltk.tokenize import RegexpTokenizer
import csv
# ----------------------------------------------- Text Manipulation ----------------------------------------------------
def get_sw():
return stopwords.words('english') + get_stop_words("english")
def get_sentences(text):
text = text.decode('ascii', 'ignore')
# try:
# text = unicode(text, errors='replace')
# except:
# pass
return sent_tokenize(text)
def get_words(text):
text = text.decode('ascii', 'ignore')
# try:
# text = unicode(text, errors='replace')
# except:
# pass
return word_tokenize(text)
def lemmatize_text(text):
text = text.decode('utf8', 'ignore')
# try:
# text = unicode(text, errors='replace')
# except:
# pass
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
tokens = filter(lambda x: x not in get_sw(), tokens)
for i in range(0, len(tokens)):
tokens[i] = wordnet_lemmatizer.lemmatize(tokens[i], pos="v");
result = " ".join(tokens)
return result.encode('utf-8')
# -------------------------------------------------- Data Loading ------------------------------------------------------
def load_data(file = "./data/labeledTrainData.tsv", test_size = 0.3, seed = 7):
f = open(file, 'rb')
reader = csv.reader(f, delimiter="\t")
first = True
x = []
y = []
for row in reader:
if first:
first = False
continue
text = row[2]
category = int(row[1])
x.append(text)
y.append(category)
f.close()
return train_test_split(x, y, test_size=test_size, random_state=seed)
def load_data2():
train = load_files("./data/aclImdb/train/")
x_train, y_train = train.data, train.target
test = load_files("./data/aclImdb/test/")
x_test, y_test = test.data, test.target
return x_train, x_test, y_train, y_test
def extract_features(x, xt, y, yt, vectorizer):
all_texts = x + xt
vectorizer.fit(all_texts)
x = vectorizer.transform(x)
x = np.array(x.toarray())
y = np.array(y)
xt = vectorizer.transform(xt)
xt = np.array(xt.toarray())
yt = np.array(yt)
return x, xt, y, yt