This commit is contained in:
Michał Kozłowski 2023-01-25 21:29:15 +01:00
parent 93a231a477
commit dd5febad65
2 changed files with 58 additions and 55 deletions

View File

@ -34,68 +34,72 @@ def main(config, hug_token):
added_tokens = [] added_tokens = []
train_dataset = DonutDataset( dataset = load_dataset(config.dataset_path)
config.dataset_path, dataset.train_test_split(test_size=0.1)
processor=processor, print(dataset)
model=model,
max_length=config.max_length,
split="train",
task_start_token="<s_cord-v2>",
prompt_end_token="<s_cord-v2>",
added_tokens=added_tokens,
sort_json_key=False, # cord dataset is preprocessed, so no need for this
)
val_dataset = DonutDataset( # train_dataset = DonutDataset(
config.dataset_path, # dataset,
processor=processor, # processor=processor,
model=model, # model=model,
max_length=config.max_length, # max_length=config.max_length,
split="validation", # split="train",
task_start_token="<s_cord-v2>", # task_start_token="<s_cord-v2>",
prompt_end_token="<s_cord-v2>", # prompt_end_token="<s_cord-v2>",
added_tokens=added_tokens, # added_tokens=added_tokens,
sort_json_key=False, # cord dataset is preprocessed, so no need for this # sort_json_key=False, # cord dataset is preprocessed, so no need for this
) # )
model.config.pad_token_id = processor.tokenizer.pad_token_id # val_dataset = DonutDataset(
model.config.decoder_start_token_id = processor.tokenizer.convert_tokens_to_ids(['<s_cord-v2>'])[0] # dataset,
# processor=processor,
# model=model,
# max_length=config.max_length,
# split="validation",
# task_start_token="<s_cord-v2>",
# prompt_end_token="<s_cord-v2>",
# added_tokens=added_tokens,
# sort_json_key=False, # cord dataset is preprocessed, so no need for this
# )
train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=1) # model.config.pad_token_id = processor.tokenizer.pad_token_id
val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=1) # model.config.decoder_start_token_id = processor.tokenizer.convert_tokens_to_ids(['<s_cord-v2>'])[0]
login(hug_token, True) # train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=1)
# val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=1)
model_module = DonutModelPLModule(config.train_config.toDict(), processor, model, max_length=config.max_length, train_dataloader=train_dataloader, val_dataloader=val_dataloader) # login(hug_token, True)
# model_module = DonutModelPLModule(config.train_config.toDict(), processor, model, max_length=config.max_length, train_dataloader=train_dataloader, val_dataloader=val_dataloader)
wandb_logger = WandbLogger(project="Donut", name=config.wandb_test_name) # wandb_logger = WandbLogger(project="Donut", name=config.wandb_test_name)
checkpoint_callback = ModelCheckpoint( # checkpoint_callback = ModelCheckpoint(
monitor="val_metric", # monitor="val_metric",
dirpath=config.checkpoint_path, # dirpath=config.checkpoint_path,
filename="artifacts", # filename="artifacts",
save_top_k=1, # save_top_k=1,
save_last=False, # save_last=False,
mode="min", # mode="min",
) # )
custom_ckpt = CustomCheckpointIO() # custom_ckpt = CustomCheckpointIO()
trainer = pl.Trainer( # trainer = pl.Trainer(
accelerator="gpu" if torch.cuda.is_available() else 'cpu', # change to gpu # accelerator="gpu" if torch.cuda.is_available() else 'cpu', # change to gpu
devices=1, # devices=1,
max_epochs=config.train_config.max_epochs, # max_epochs=config.train_config.max_epochs,
val_check_interval=config.train_config.val_check_interval, # val_check_interval=config.train_config.val_check_interval,
check_val_every_n_epoch=config.train_config.check_val_every_n_epoch, # check_val_every_n_epoch=config.train_config.check_val_every_n_epoch,
gradient_clip_val=config.train_config.gradient_clip_val, # gradient_clip_val=config.train_config.gradient_clip_val,
precision=16, # we'll use mixed precision # precision=16, # we'll use mixed precision
plugins=custom_ckpt, # plugins=custom_ckpt,
num_sanity_val_steps=0, # num_sanity_val_steps=0,
logger=wandb_logger, # logger=wandb_logger,
callbacks=[PushToHubCallback(output_model_path=config.output_model_path), checkpoint_callback], # callbacks=[PushToHubCallback(output_model_path=config.output_model_path), checkpoint_callback],
) # )
trainer.fit(model_module) # trainer.fit(model_module)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -24,7 +24,7 @@ class DonutDataset(Dataset):
def __init__( def __init__(
self, self,
dataset_name_or_path: str, dataset: Dataset,
max_length: int, max_length: int,
processor: DonutProcessor, processor: DonutProcessor,
model: VisionEncoderDecoderModel, model: VisionEncoderDecoderModel,
@ -47,8 +47,7 @@ class DonutDataset(Dataset):
self.sort_json_key = sort_json_key self.sort_json_key = sort_json_key
self.added_tokens = added_tokens self.added_tokens = added_tokens
self.dataset = load_dataset(dataset_name_or_path, split=self.split, streaming=True).with_format("torch") self.dataset = dataset
print(self.dataset)
self.dataset_length = len(self.dataset) self.dataset_length = len(self.dataset)
self.gt_token_sequences = [] self.gt_token_sequences = []