-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtokenizer.go
More file actions
324 lines (287 loc) · 8.68 KB
/
tokenizer.go
File metadata and controls
324 lines (287 loc) · 8.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
package tanuki
import (
"fmt"
"regexp"
"strconv"
"strings"
)
// Options is a struct that allows you to change the parsing behavior.
//
// Default options have been provided under a variable named "DefaultOptions".
type Options struct {
// DefaultOptions value: " _.&+,|"
// Each character in this string will be evaluated as a delimiter during parsing.
// The defaults are fairly sane, but in some cases you may want to change them.
// For example in the following filename: DRAMAtical Murder Episode 1 - Data_01_Login
// With the defaults, the "_" characters would be replaced with spaces, but this may
// not be desired behavior.
AllowedDelimiters string
// DefaultOptions value: []string{}
// These strings will be removed from the filename.
IgnoredStrings []string
// DefaultOptions value: true
// Determines if the episode number will be parsed into the Elements struct.
ParseEpisodeNumber bool
// DefaultOptions value: true
// Determines if the episode title will be parsed into the Elements struct.
ParseEpisodeTitle bool
// DefaultOptions value: true
// Determines if the file extension will be parsed into the Elements struct.
ParseFileExtension bool
// DefaultOptions value: true
// Determines if the release group will be parsed into the Elements struct.
ParseReleaseGroup bool
}
type tokenizer struct {
filename string
options Options
tokens *tokens
keywordManager *keywordManager
elements *Elements
}
func (t *tokenizer) addToken(cat int, content string, enclosed bool) {
t.tokens.appendToken(token{
Category: cat,
Content: content,
Enclosed: enclosed,
})
}
func (t *tokenizer) tokenize() {
brackets := [][]rune{
{'(', ')'},
{'[', ']'},
{'{', '}'},
{'\u300C', '\u300D'},
{'\u300E', '\u300F'},
{'\u3010', '\u3011'},
{'\uFF08', '\uFF09'},
}
text := t.filename
isBracketOpen := false
var matchingBracket rune
for len(text) > 0 {
var bracketIndex int
if !isBracketOpen {
bracketIndex, matchingBracket = findFirstBracket(text, brackets)
} else {
// Looking for the matching bracket allows us to better handle some rare cases with nested brackets
bracketIndex = strings.IndexRune(text, matchingBracket)
}
// Found a token before the bracket
if bracketIndex != 0 {
if bracketIndex != -1 {
t.tokenizeByPreidentified(text[:bracketIndex], isBracketOpen)
} else {
t.tokenizeByPreidentified(text, isBracketOpen)
}
}
// Found bracket
if bracketIndex != -1 {
t.addToken(tokenCategoryBracket, string(text[bracketIndex]), true)
isBracketOpen = !isBracketOpen
text = text[bracketIndex+1:]
} else { // Reached the end
text = ""
}
}
}
func (t *tokenizer) tokenizeByPreidentified(filename string, enclosed bool) {
preIdentifiedtokens := t.keywordManager.peek(filename, t.elements)
lastTokenEndPos := 0
for _, preIdentified := range preIdentifiedtokens {
tknBeginPos := preIdentified.beginPos
tknEndPos := preIdentified.endPos
if lastTokenEndPos != tknBeginPos && tknBeginPos <= len(filename) {
// Tokenize the text between the pre-identified tokens
t.tokenizeByDelimiters(filename[lastTokenEndPos:tknBeginPos], enclosed)
}
if tknEndPos <= len(filename) {
t.addToken(tokenCategoryIdentifier, filename[tknBeginPos:tknEndPos], enclosed)
lastTokenEndPos = tknEndPos
}
}
if lastTokenEndPos != len(filename) {
// Tokenize the text after the pre-identified tokens (or all the text
// if there was no pre-identified tokens)
t.tokenizeByDelimiters(filename[lastTokenEndPos:], enclosed)
}
}
func (t *tokenizer) tokenizeByDelimiters(filename string, enclosed bool) {
var delimiters string
var splitText []string
for _, delimiter := range t.options.AllowedDelimiters {
delimiters = delimiters + "\\" + string(delimiter)
}
pattern := fmt.Sprintf("([%v])", delimiters)
text := filename
re := regexp.MustCompile(pattern)
splitText = splitWith(re, text, -1)
for _, subtext := range splitText {
if subtext != "" {
if strings.Contains(t.options.AllowedDelimiters, subtext) {
t.addToken(tokenCategoryDelimiter, subtext, enclosed)
} else {
t.addToken(tokenCategoryUnknown, subtext, enclosed)
}
}
}
t.validateDelimitertokens()
}
func (t *tokenizer) validateDelimitertokens() {
for _, tkn := range *t.tokens {
if tkn.Category != tokenCategoryDelimiter {
continue
}
delimiter := tkn.Content
prevToken, _ := t.findPreviousValidToken(tkn)
nextToken, _ := t.findNextValidToken(tkn)
// Check for single-character tokens to prevent splitting group
// names, keywords, episode number, etc.
if delimiter != " " && delimiter != "_" {
if t.isSingleCharacterToken((*prevToken)) {
nestedNextToken := *nextToken
prevToken = t.appendTokenTo(tkn, prevToken)
for t.isUnknownToken(nestedNextToken) {
prevToken = t.appendTokenTo(&nestedNextToken, prevToken)
if nestedNextToken.Content == nextToken.Content {
nextToken.Category = tokenCategoryInvalid
}
holder, _ := t.findNextValidToken(&nestedNextToken)
nestedNextToken = *holder
if t.isDelimiterToken(nestedNextToken) && nestedNextToken.Content == delimiter {
prevToken = t.appendTokenTo(&nestedNextToken, prevToken)
holder, _ = t.findNextValidToken(&nestedNextToken)
nestedNextToken = *holder
}
continue
}
}
if t.isSingleCharacterToken((*nextToken)) {
prevToken = t.appendTokenTo(tkn, prevToken)
t.appendTokenTo(nextToken, prevToken)
continue
}
}
// Check for adjacent delimiters
if t.isUnknownToken((*prevToken)) && t.isDelimiterToken((*nextToken)) {
nextDelimiter := nextToken.Content
if delimiter != nextDelimiter && delimiter != "," {
if nextDelimiter == " " || nextDelimiter == "_" {
prevToken = t.appendTokenTo(tkn, prevToken)
}
}
} else if t.isDelimiterToken((*prevToken)) && t.isDelimiterToken((*nextToken)) {
prevDelimiter := prevToken.Content
nextDelimiter := nextToken.Content
if prevDelimiter == nextDelimiter && prevDelimiter != delimiter {
tkn.Category = tokenCategoryUnknown // e.g. "&" in "_&_"
}
}
// Check for other special cases
if delimiter == "&" || delimiter == "+" {
if t.isUnknownToken((*prevToken)) && t.isUnknownToken((*nextToken)) {
if isNumeric(prevToken.Content) && isNumeric(nextToken.Content) {
prevToken = t.appendTokenTo(tkn, prevToken)
t.appendTokenTo(nextToken, prevToken) // e.g. "01+02"
}
}
}
}
var newTkns tokens
for _, tkn := range *t.tokens {
if tkn.Category != tokenCategoryInvalid {
newTkns = append(newTkns, tkn)
}
}
t.tokens.update(newTkns)
}
func (t *tokenizer) findPreviousValidToken(tkn *token) (*token, bool) {
return t.tokens.findPrevious(*tkn, tokenFlagsValid)
}
func (t *tokenizer) findNextValidToken(tkn *token) (*token, bool) {
return t.tokens.findNext(*tkn, tokenFlagsValid)
}
func (t *tokenizer) isDelimiterToken(tkn token) bool {
if !tkn.empty() && tkn.Category == tokenCategoryDelimiter {
return true
}
return false
}
func (t *tokenizer) isUnknownToken(tkn token) bool {
if !tkn.empty() && tkn.Category == tokenCategoryUnknown {
return true
}
return false
}
func (t *tokenizer) isSingleCharacterToken(tkn token) bool {
if t.isUnknownToken(tkn) && len(tkn.Content) == 1 && tkn.Content != "-" {
return true
}
return false
}
func (t *tokenizer) appendTokenTo(tkn, appendTo *token) *token {
appendToIndex := t.tokens.getIndex(*appendTo, 0)
appendToSrc, _ := t.tokens.get(appendToIndex)
appendToSrc.Content += tkn.Content
srcTknIndex := t.tokens.getIndex(*tkn, appendToIndex)
srcTkn, _ := t.tokens.get(srcTknIndex)
srcTkn.Category = tokenCategoryInvalid
return appendToSrc
}
func findFirstBracket(filename string, brackets [][]rune) (int, rune) {
var openBrackets []rune
for _, v := range brackets {
openBrackets = append(openBrackets, v[0])
}
index := -1
for idx, bracket := range filename {
var found bool
for _, v := range openBrackets {
if bracket == v {
found = true
break
}
}
if found {
index = idx
break
}
}
var matchingBracket rune
for _, v := range brackets {
if index != -1 {
if strings.IndexRune(filename, v[0]) == index {
matchingBracket = v[1]
}
}
}
return index, matchingBracket
}
func isNumeric(s string) bool {
_, err := strconv.Atoi(s)
return err == nil
}
func splitWith(re *regexp.Regexp, s string, n int) []string {
if n == 0 {
return nil
}
matches := re.FindAllStringIndex(s, n)
strings := make([]string, 0, len(matches))
beg := 0
end := 0
for _, match := range matches {
if n > 0 && len(strings) >= n-1 {
break
}
end = match[0]
if match[1] != 0 {
strings = append(strings, s[beg:end])
}
beg = match[1]
strings = append(strings, s[match[0]:match[1]])
}
if end != len(s) {
strings = append(strings, s[beg:])
}
return strings
}