-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathpreprocess.py
More file actions
198 lines (141 loc) · 5.83 KB
/
preprocess.py
File metadata and controls
198 lines (141 loc) · 5.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
"""
The file preprocesses the files/train.txt and files/test.txt files.
I requires the dependency based embeddings by Levy et al.. Download them from his website and change
the embeddingsPath variable in the script to point to the unzipped deps.words file.
"""
from __future__ import print_function
import numpy as np
import gzip
import os
import sys
if (sys.version_info > (3, 0)):
import pickle as pkl
else: #Python 2.7 imports
import cPickle as pkl
import codecs
mode = 'ent_candidate'
print("mode: " + mode)
pkl_path = 'pkl/bioc_rel_%s.pkl.gz' % mode
root_dir = 'data/org_ent'
fns = ['training.txt', 'development.txt', 'test.txt']
files = [os.path.join(root_dir, fn) for fn in fns]
print(files)
#We download English word embeddings from here https://www.cs.york.ac.uk/nlp/extvec/
# embeddingsPath = 'embeddings/wiki_extvec.gz'
embeddingsPath = os.path.join(os.environ['WE_PATH'], 'glove.6B.300d.txt')
# embeddingsPath = os.path.join(os.environ['WE_PATH'], 'pmc.w2v.vector.wf=7.itr=1.layer=100')
#Mapping of the labels to integers
rel2id = {'NA': 0,
'CPR:3': 1,
'CPR:4': 2,
'CPR:5': 3,
'CPR:6': 4,
'CPR:9': 5,
}
words = {}
maxSentenceLen = [0] * len(files)
distanceMapping = {'PADDING': 0, 'LowerMin': 1, 'GreaterMax': 2}
minDistance = -30
maxDistance = 30
for dis in range(minDistance, maxDistance+1):
distanceMapping[dis] = len(distanceMapping)
def createMatrices(file, word2Idx, maxSentenceLen=100):
"""Creates matrices for the events and sentence for the given file"""
labels = []
positionMatrix1 = []
positionMatrix2 = []
tokenMatrix = []
for line in open(file):
splits = line.strip().split('\t')
label = splits[0]
pos1 = splits[3]
pos2 = splits[4]
sentence = splits[5]
tokens = sentence.split(" ")
tokenIds = np.zeros(maxSentenceLen)
positionValues1 = np.zeros(maxSentenceLen)
positionValues2 = np.zeros(maxSentenceLen)
for idx in range(0, min(maxSentenceLen, len(tokens))):
tokenIds[idx] = getWordIdx(tokens[idx], word2Idx)
distance1 = idx - int(pos1)
distance2 = idx - int(pos2)
if distance1 in distanceMapping:
positionValues1[idx] = distanceMapping[distance1]
elif distance1 <= minDistance:
positionValues1[idx] = distanceMapping['LowerMin']
else:
positionValues1[idx] = distanceMapping['GreaterMax']
if distance2 in distanceMapping:
positionValues2[idx] = distanceMapping[distance2]
elif distance2 <= minDistance:
positionValues2[idx] = distanceMapping['LowerMin']
else:
positionValues2[idx] = distanceMapping['GreaterMax']
# if rel2id[label] > 5:
# continue
tokenMatrix.append(tokenIds)
positionMatrix1.append(positionValues1)
positionMatrix2.append(positionValues2)
# print(rel2id[label])
labels.append(rel2id[label])
return np.array(labels, dtype='int32'), np.array(tokenMatrix, dtype='int32'),\
np.array(positionMatrix1, dtype='int32'), np.array(positionMatrix2, dtype='int32'),
def getWordIdx(token, word2Idx):
"""Returns from the word2Idex table the word index for a given token"""
if token in word2Idx:
return word2Idx[token]
elif token.lower() in word2Idx:
return word2Idx[token.lower()]
return word2Idx["UNKNOWN_TOKEN"]
for fileIdx in range(len(files)):
file = files[fileIdx]
print(file)
for line in open(file):
splits = line.strip().split('\t')
label = splits[0]
sentence = splits[5]
tokens = sentence.split(" ")
maxSentenceLen[fileIdx] = max(maxSentenceLen[fileIdx], len(tokens))
for token in tokens:
words[token.lower()] = True
print("Max Sentence Lengths: ", maxSentenceLen)
# :: Read in word embeddings ::
# :: Read in word embeddings ::
word2Idx = {}
wordEmbeddings = []
fEmbeddings = codecs.open(embeddingsPath)
print("Load pre-trained embeddings file")
for line in fEmbeddings:
split = line.decode('utf-8').strip().split(" ")
word = split[0]
if len(word2Idx) == 0: #Add padding+unknown
word2Idx["PADDING_TOKEN"] = len(word2Idx)
vector = np.zeros(len(split)-1) #Zero vector vor 'PADDING' word
wordEmbeddings.append(vector)
word2Idx["UNKNOWN_TOKEN"] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, len(split)-1)
wordEmbeddings.append(vector)
if word.lower() in words:
vector = np.array([float(num) for num in split[1:]])
wordEmbeddings.append(vector)
word2Idx[word] = len(word2Idx)
wordEmbeddings = np.array(wordEmbeddings)
print("Embeddings shape: ", wordEmbeddings.shape)
print("Len words: ", len(words))
# :: Create token matrix ::
# train_set = createMatrices(files[0], word2Idx, max(maxSentenceLen))
# dev_set = createMatrices(files[1], word2Idx, max(maxSentenceLen))
# test_set = createMatrices(files[2], word2Idx, max(maxSentenceLen))
sent_length = 170
train_set = createMatrices(files[0], word2Idx, sent_length)
dev_set = createMatrices(files[1], word2Idx, sent_length)
test_set = createMatrices(files[2], word2Idx, sent_length)
print("training token matrix shape: " + str(train_set[1].shape))
print("dev token matrix shape: " + str(dev_set[1].shape))
print("testing token matrix shape: " + str(test_set[1].shape))
data = {'wordEmbeddings': wordEmbeddings, 'word2Idx': word2Idx,
'train_set': train_set, 'dev_set': dev_set, 'test_set': test_set}
f = gzip.open(pkl_path, 'wb')
pkl.dump(data, f)
f.close()
print("Data stored in pkl folder")