fine-tune/ft.py

157 lines
4.8 KiB
Python
Raw Normal View History

2023-07-26 11:32:44 +02:00
#! /usr/bin/env python3
2023-08-01 13:50:29 +02:00
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, AdamW
from torch.optim.lr_scheduler import ReduceLROnPlateau
2023-07-26 13:56:51 +02:00
import torch
from dl import load_dataset
from tqdm import tqdm
2023-07-28 10:23:07 +02:00
import os
2023-07-26 13:56:51 +02:00
2023-07-26 14:46:13 +02:00
class FT:
def __init__(self):
torch.backends.cudnn.benchmark = True
2023-08-01 13:50:29 +02:00
self.model_name = "facebook/mbart-large-50"
2023-07-26 14:46:13 +02:00
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load tokenizer and model
2023-08-01 13:50:29 +02:00
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelForSeq2SeqLM.from_pretrained(
self.model_name, load_in_8bit=True
)
2023-07-26 14:46:13 +02:00
# set up optimizer
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=1e-5)
2023-08-01 13:50:29 +02:00
self.scheduler = ReduceLROnPlateau(
self.optimizer, mode="min", patience=1, factor=0.5
) # add this line
2023-07-26 14:46:13 +02:00
try:
from torch.cuda.amp import GradScaler, autocast
self.scaler = GradScaler()
except ImportError:
2023-07-28 10:23:07 +02:00
2023-07-26 14:46:13 +02:00
class autocast:
def __enter__(self):
pass
def __exit__(self, *args):
pass
self.scaler = None # We won't use a scaler if we don't have Amp
def train_model(self, dataloader):
self.model.train()
total_loss = 0
print("Training model...")
for batch in tqdm(dataloader):
self.optimizer.zero_grad()
inputs = self.tokenizer(
batch[1],
return_tensors="pt",
padding=True,
truncation=True,
max_length=512,
)
inputs.to(self.device)
labels = self.tokenizer(
batch[0],
return_tensors="pt",
padding=True,
truncation=True,
max_length=512,
)
labels.to(self.device)
outputs = self.model(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
labels=labels["input_ids"],
)
2023-07-26 13:56:51 +02:00
loss = outputs.loss
2023-07-26 14:46:13 +02:00
loss.backward()
self.optimizer.step()
2023-07-26 13:56:51 +02:00
total_loss += loss.item()
2023-07-26 14:46:13 +02:00
avg_train_loss = total_loss / len(dataloader)
return avg_train_loss
def test_model(self, dataloader):
self.model.eval()
total_loss = 0
print("Testing model...")
for batch in tqdm(dataloader):
with torch.no_grad():
inputs = self.tokenizer(
batch[1],
return_tensors="pt",
padding=True,
truncation=True,
max_length=512,
)
inputs.to(self.device)
labels = self.tokenizer(
batch[0],
return_tensors="pt",
padding=True,
truncation=True,
max_length=512,
)
labels.to(self.device)
outputs = self.model(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
labels=labels["input_ids"],
)
loss = outputs.loss
total_loss += loss.item()
avg_test_loss = total_loss / len(dataloader)
return avg_test_loss
2023-07-28 10:23:07 +02:00
def save_checkpoint(self, epoch):
os.makedirs("./checkpoints/ft", exist_ok=True)
torch.save(
{
"epoch": epoch,
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
},
f"./checkpoints/ft/ft_{epoch}.pt",
)
2023-07-26 14:46:13 +02:00
def train(self):
train_dataloader, test_dataloader = load_dataset(
2023-07-31 12:33:32 +02:00
"./dataset/hason_out.json",
2023-08-01 13:50:29 +02:00
200,
200,
1,
2023-07-26 14:46:13 +02:00
test_ratio=0.2,
)
2023-08-01 13:50:29 +02:00
num_epochs = 100
last_lr = None
2023-07-26 14:46:13 +02:00
for epoch in range(num_epochs):
avg_train_loss = self.train_model(train_dataloader)
print(f"Train loss for epoch {epoch+1}: {avg_train_loss}")
2023-07-28 10:23:07 +02:00
self.save_checkpoint(epoch)
print("Checkpoint saved!")
2023-07-26 14:46:13 +02:00
avg_test_loss = self.test_model(test_dataloader)
print(f"Test loss for epoch {epoch+1}: {avg_test_loss}")
2023-08-01 13:50:29 +02:00
# Check if the learning rate has changed
current_lr = self.optimizer.param_groups[0]["lr"]
if last_lr and current_lr != last_lr:
print(f"Learning rate reduced from {last_lr} to {current_lr}")
last_lr = current_lr
2023-07-26 13:56:51 +02:00
2023-07-26 14:46:13 +02:00
if __name__ == "__main__":
trainer = FT()
trainer.train()