-
Notifications
You must be signed in to change notification settings - Fork 19
/
Copy pathdata.py
65 lines (50 loc) · 1.93 KB
/
data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from dataclasses import dataclass
import datasets
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer, DataCollatorWithPadding
class IndexingTrainDataset(Dataset):
def __init__(
self,
path_to_data,
max_length: int,
cache_dir: str,
tokenizer: PreTrainedTokenizer,
):
self.train_data = datasets.load_dataset(
'json',
data_files=path_to_data,
ignore_verifications=False,
cache_dir=cache_dir
)['train']
self.max_length = max_length
self.tokenizer = tokenizer
self.total_len = len(self.train_data)
def __len__(self):
return self.total_len
def __getitem__(self, item):
data = self.train_data[item]
input_ids = self.tokenizer(data['text'],
return_tensors="pt",
truncation='only_first',
max_length=self.max_length).input_ids[0]
return input_ids, str(data['text_id'])
@dataclass
class IndexingCollator(DataCollatorWithPadding):
def __call__(self, features):
input_ids = [{'input_ids': x[0]} for x in features]
docids = [x[1] for x in features]
inputs = super().__call__(input_ids)
labels = self.tokenizer(
docids, padding="longest", return_tensors="pt"
).input_ids
# replace padding token id's of the labels by -100 according to https://huggingface.co/docs/transformers/model_doc/t5#training
labels[labels == self.tokenizer.pad_token_id] = -100
inputs['labels'] = labels
return inputs
@dataclass
class QueryEvalCollator(DataCollatorWithPadding):
def __call__(self, features):
input_ids = [{'input_ids': x[0]} for x in features]
labels = [x[1] for x in features]
inputs = super().__call__(input_ids)
return inputs, labels