2022-12-14 15:53:27 +01:00
|
|
|
from transformers import VisionEncoderDecoderConfig, DonutProcessor, VisionEncoderDecoderModel
|
|
|
|
import torch
|
2023-01-04 11:10:24 +01:00
|
|
|
from torch.utils.data import DataLoader
|
2022-12-14 15:53:27 +01:00
|
|
|
from pytorch_lightning.loggers import WandbLogger
|
2023-01-04 11:10:24 +01:00
|
|
|
from pytorch_lightning.callbacks import ModelCheckpoint
|
2022-12-14 15:53:27 +01:00
|
|
|
import pytorch_lightning as pl
|
|
|
|
import os
|
|
|
|
from huggingface_hub import login
|
2023-01-04 11:10:24 +01:00
|
|
|
import argparse
|
|
|
|
from sconf import Config
|
|
|
|
from utils.checkpoint import CustomCheckpointIO
|
|
|
|
from utils.donut_dataset import DonutDataset
|
|
|
|
from utils.donut_model_pl import DonutModelPLModule
|
|
|
|
from utils.callbacks import PushToHubCallback
|
|
|
|
import warnings
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(config, hug_token):
|
|
|
|
|
|
|
|
config_vision = VisionEncoderDecoderConfig.from_pretrained(
|
|
|
|
config.pretrained_model_path)
|
|
|
|
config_vision.encoder.image_size = config.image_size
|
|
|
|
config_vision.decoder.max_length = config.max_length
|
|
|
|
|
|
|
|
processor = DonutProcessor.from_pretrained(config.start_model_path)
|
|
|
|
model = VisionEncoderDecoderModel.from_pretrained(
|
|
|
|
config.pretrained_model_path, config=config_vision)
|
|
|
|
|
|
|
|
processor.image_processor.size = config.image_size[::-1]
|
|
|
|
processor.image_processor.do_align_long_axis = False
|
|
|
|
|
|
|
|
added_tokens = []
|
|
|
|
|
|
|
|
train_dataset = DonutDataset(
|
|
|
|
config.dataset_path,
|
|
|
|
processor=processor,
|
|
|
|
model=model,
|
|
|
|
max_length=config.max_length,
|
|
|
|
split="train",
|
|
|
|
task_start_token="<s_cord-v2>",
|
|
|
|
prompt_end_token="<s_cord-v2>",
|
|
|
|
added_tokens=added_tokens,
|
|
|
|
sort_json_key=False, # cord dataset is preprocessed, so no need for this
|
2022-12-14 15:53:27 +01:00
|
|
|
)
|
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
val_dataset = DonutDataset(
|
|
|
|
config.dataset_path,
|
|
|
|
processor=processor,
|
|
|
|
model=model,
|
|
|
|
max_length=config.max_length,
|
|
|
|
split="validation",
|
|
|
|
task_start_token="<s_cord-v2>",
|
|
|
|
prompt_end_token="<s_cord-v2>",
|
|
|
|
added_tokens=added_tokens,
|
|
|
|
sort_json_key=False, # cord dataset is preprocessed, so no need for this
|
2022-12-14 15:53:27 +01:00
|
|
|
)
|
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
model.config.pad_token_id = processor.tokenizer.pad_token_id
|
|
|
|
model.config.decoder_start_token_id = processor.tokenizer.convert_tokens_to_ids(['<s_cord-v2>'])[0]
|
2022-12-14 15:53:27 +01:00
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=4)
|
|
|
|
val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=4)
|
2022-12-14 15:53:27 +01:00
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
login(hug_token, True)
|
2022-12-14 15:53:27 +01:00
|
|
|
|
2023-01-04 11:33:50 +01:00
|
|
|
model_module = DonutModelPLModule(config.train_config.toDict(), processor, model, max_length=config.max_length, train_dataloader=train_dataloader, val_dataloader=val_dataloader)
|
2022-12-14 15:53:27 +01:00
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
wandb_logger = WandbLogger(project="Donut", name=config.wandb_test_name)
|
2022-12-14 15:53:27 +01:00
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
checkpoint_callback = ModelCheckpoint(
|
2022-12-14 16:26:35 +01:00
|
|
|
monitor="val_metric",
|
2023-01-04 11:10:24 +01:00
|
|
|
dirpath=config.checkpoint_path,
|
2022-12-14 16:26:35 +01:00
|
|
|
filename="artifacts",
|
|
|
|
save_top_k=1,
|
|
|
|
save_last=False,
|
|
|
|
mode="min",
|
|
|
|
)
|
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
custom_ckpt = CustomCheckpointIO()
|
2022-12-14 16:26:35 +01:00
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
trainer = pl.Trainer(
|
2023-01-03 13:15:05 +01:00
|
|
|
accelerator="gpu" if torch.cuda.is_available() else 'cpu', # change to gpu
|
2022-12-14 15:53:27 +01:00
|
|
|
devices=1,
|
2023-01-04 11:10:24 +01:00
|
|
|
max_epochs=config.train_config.max_epochs,
|
|
|
|
val_check_interval=config.train_config.val_check_interval,
|
|
|
|
check_val_every_n_epoch=config.train_config.check_val_every_n_epoch,
|
|
|
|
gradient_clip_val=config.train_config.gradient_clip_val,
|
2022-12-14 15:53:27 +01:00
|
|
|
precision=16, # we'll use mixed precision
|
2022-12-14 16:26:35 +01:00
|
|
|
plugins=custom_ckpt,
|
2022-12-14 15:53:27 +01:00
|
|
|
num_sanity_val_steps=0,
|
|
|
|
logger=wandb_logger,
|
2023-01-04 11:33:50 +01:00
|
|
|
callbacks=[PushToHubCallback(output_model_path=config.output_model_path), checkpoint_callback],
|
2023-01-04 11:10:24 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
trainer.fit(model_module)
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
parser = argparse.ArgumentParser()
|
|
|
|
parser.add_argument("--config", type=str, required=True)
|
|
|
|
args, left_argv = parser.parse_known_args()
|
|
|
|
config = Config(args.config)
|
|
|
|
config.argv_update(left_argv)
|
|
|
|
|
|
|
|
hug_token = os.environ.get("HUG_TOKEN", None)
|
2022-12-14 15:53:27 +01:00
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
if not torch.cuda.is_available():
|
|
|
|
warnings.warn("You don't have cuda available, training might be taking long time or impossible")
|
2022-12-14 15:53:27 +01:00
|
|
|
|
2023-01-04 11:10:24 +01:00
|
|
|
if not hug_token:
|
2023-01-04 11:24:57 +01:00
|
|
|
raise Exception("You need to set up HUG_TOKEN in enviroments to push output model to hub")
|
2023-01-04 11:10:24 +01:00
|
|
|
main(config, hug_token)
|