-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdataset.py
More file actions
66 lines (52 loc) · 2.41 KB
/
dataset.py
File metadata and controls
66 lines (52 loc) · 2.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
from torch.utils.data import Dataset
import torch
from datasets import load_dataset
from config import get_config
from tokenizers import Tokenizer
class LanguageData(Dataset):
def __init__(self, ds, enc_tokenizer: Tokenizer, dec_tokenizer: Tokenizer) -> None:
super().__init__()
self.data = ds
self.enc_tokenizer = enc_tokenizer
self.dec_tokenizer = dec_tokenizer
self.max_seq_len = get_config()['max_seq_len']
def __len__(self):
return len(self.data)
def __getitem__(self, index):
bos_token = self.dec_tokenizer.token_to_id('[SOS]')
eos_token = self.dec_tokenizer.token_to_id('[EOS]')
pad_token = self.dec_tokenizer.token_to_id('[PAD]')
enc_lang_text = self.data[index]['en'][:self.max_seq_len]
dec_lang_text = self.data[index]['fr'][:self.max_seq_len]
enc_output = self.enc_tokenizer.encode(enc_lang_text).ids
dec_output = self.dec_tokenizer.encode(dec_lang_text).ids
enc_padding_tokens = (self.max_seq_len - len(enc_output)) - 2
dec_padding_tokens = (self.max_seq_len - len(dec_output)) - 1
enc_tokens = torch.cat([
torch.tensor([bos_token], dtype=torch.int64),
torch.tensor(enc_output, dtype=torch.int64),
torch.tensor([eos_token], dtype=torch.int64),
torch.tensor([pad_token] * enc_padding_tokens, dtype=torch.int64)
])
dec_tokens = torch.cat([
torch.tensor([bos_token], dtype=torch.int64),
torch.tensor(dec_output, dtype=torch.int64),
torch.tensor([pad_token] * dec_padding_tokens, dtype=torch.int64)
])
label = torch.cat([
torch.tensor(dec_output, dtype=torch.int64),
torch.tensor([eos_token], dtype=torch.int64),
torch.tensor([pad_token] * dec_padding_tokens, dtype=torch.int64)
])
return {
'enc_lang_text': enc_lang_text,
'enc_tokens': enc_tokens,
'enc_padding_mask': (enc_tokens != pad_token).unsqueeze(0).unsqueeze(0).int(),
'dec_lang_text': dec_lang_text,
'dec_tokens': dec_tokens,
'dec_padding_mask': (dec_tokens != pad_token).unsqueeze(0).unsqueeze(0).int() & causal_mask(get_config()['max_seq_len']),
'label': label,
}
def causal_mask(size):
mask = torch.triu(torch.ones(1, size, size), diagonal=1).type(torch.int)
return mask == 0