from datasets import load_dataset from torch.utils.data import Dataset import json from typing import Any, List, Tuple import random import torch from transformers import DonutProcessor, VisionEncoderDecoderModel class DonutDatasetStream: def __init__( self, processor: DonutProcessor, model: VisionEncoderDecoderModel, max_length: int, ignore_id: int = -100, split: str = 'train', task_start_token: str = "", prompt_end_token: str = None, sort_json_key: bool = True, added_tokens: list = [] ): self.split = split self.max_length = max_length self.processor = processor self.model = model self.ignore_id = ignore_id self.task_start_token = task_start_token self.prompt_end_token = prompt_end_token if prompt_end_token else task_start_token self.sort_json_key = sort_json_key self.added_tokens = added_tokens def process(self, row): ground_truth = json.loads(row["ground_truth"]) if "gt_parses" in ground_truth: # when multiple ground truths are available, e.g., docvqa assert isinstance(ground_truth["gt_parses"], list) gt_jsons = ground_truth["gt_parses"] else: assert "gt_parse" in ground_truth and isinstance(ground_truth["gt_parse"], dict) gt_jsons = [ground_truth["gt_parse"]] self.gt_token_sequences = ( [ self.json2token( gt_json, update_special_tokens_for_json_key=self.split == "train", sort_json_key=self.sort_json_key, ) + self.processor.tokenizer.eos_token for gt_json in gt_jsons # load json from list of json ] ) self.add_tokens([self.task_start_token, self.prompt_end_token]) self.prompt_end_token_id = self.processor.tokenizer.convert_tokens_to_ids(self.prompt_end_token) # change if not 3 channels if row['image'].mode != "RGB": row['image'] = row['image'].convert("RGB") # inputs pixel_values = self.processor(row["image"], random_padding=self.split == "train", return_tensors="pt").pixel_values pixel_values = pixel_values.squeeze() # targets target_sequence = random.choice(self.gt_token_sequences) # can be more than one, e.g., DocVQA Task 1 input_ids = self.processor.tokenizer( target_sequence, add_special_tokens=False, max_length=self.max_length, padding="max_length", truncation=True, return_tensors="pt", )["input_ids"].squeeze(0) labels = input_ids.clone() labels[labels == self.processor.tokenizer.pad_token_id] = self.ignore_id # model doesn't need to predict pad token return {"pixel_values": pixel_values, "labels": labels, 'target_sequence': target_sequence } def json2token(self, obj: Any, update_special_tokens_for_json_key: bool = True, sort_json_key: bool = True): """ Convert an ordered JSON object into a token sequence """ if type(obj) == dict: if len(obj) == 1 and "text_sequence" in obj: return obj["text_sequence"] else: output = "" if sort_json_key: keys = sorted(obj.keys(), reverse=True) else: keys = obj.keys() for k in keys: if update_special_tokens_for_json_key: self.add_tokens([fr"", fr""]) output += ( fr"" + self.json2token(obj[k], update_special_tokens_for_json_key, sort_json_key) + fr"" ) return output elif type(obj) == list: return r"".join( [self.json2token(item, update_special_tokens_for_json_key, sort_json_key) for item in obj] ) else: obj = str(obj) if f"<{obj}/>" in self.added_tokens: obj = f"<{obj}/>" # for categorical special tokens return obj def add_tokens(self, list_of_tokens: List[str]): """ Add special tokens to tokenizer and resize the token embeddings of the decoder """ newly_added_num = self.processor.tokenizer.add_tokens(list_of_tokens) if newly_added_num > 0: self.model.decoder.resize_token_embeddings(len(self.processor.tokenizer)) self.added_tokens.extend(list_of_tokens)