-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrainer.py
More file actions
57 lines (48 loc) · 1.5 KB
/
trainer.py
File metadata and controls
57 lines (48 loc) · 1.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from transformers import GPT2Config, GPT2LMHeadModel, GPT2Tokenizer
from transformers.data import data_collator
from datasets import load_dataset
from transformers import DataCollatorForLanguageModeling, Trainer, TrainingArguments
from tokenizer import tokenizer_main
from encode import encode
# tokenizer, and training files path
tokenizer, paths = tokenizer_main()
# Assigning it to GPT2tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('tokenizer')
# Adding the special tokens
tokenizer.add_special_tokens({
"eos_token": "</s>",
"bos_token": "<s>",
"unk_token": "<unk>",
"pad_token": "<pad>",
"mask_token": "<mask>"
})
config = GPT2Config(
vocab_size = tokenizer.vocab_size,
bos_token = tokenizer.bos_token_id,
eos_token = tokenizer.eos_token_id
)
# Initialising model and dataset
model = GPT2LMHeadModel(config)
dataset = load_dataset("text", data_files = paths)
# Transformation
dataset.set_transform(encode)
dataset = dataset['train']
# Data collator from hugging face for LMs
data_collator = DataCollatorForLanguageModeling(tokenizer = tokenizer, mlm=True, mlm_probability = 0.15)
# Training arguments
training_args = TrainingArguments(
output_dir = "code_generation",
overwrite_output_dir = True,
num_train_epochs = 1,
per_device_train_batch_size = 32,
save_steps = 100,
save_total_limit = 2,
prediction_loss_only = True
)
# Trainer
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset =dataset
)