from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline import requests class MachineLearningNLG: def __init__(self): self.model_name = "./nlg_model" # Ścieżka do wytrenowanego modelu self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name) self.generator = pipeline('text2text-generation', model=self.model, tokenizer=self.tokenizer) def translate_text(self, text, target_language='pl'): url = 'https://translate.googleapis.com/translate_a/single?client=gtx&sl=auto&tl={}&dt=t&q={}'.format(target_language, text) response = requests.get(url) if response.status_code == 200: translated_text = response.json()[0][0][0] return translated_text else: return text # Zwracamy oryginalny tekst w razie problemów z tłumaczeniem def nlg(self, system_act): input_text = f"generate text: {system_act}" result = self.generator(input_text) response = result[0]['generated_text'] translated_response = self.translate_text(response, target_language='pl') return translated_response if __name__ == "__main__": nlg = MachineLearningNLG() system_act = "inform(date.from=15.07, date.to=22.07)" print(nlg.nlg(system_act))