Merge remote-tracking branch 'origin/test_branch' into test_branch

This commit is contained in:
s464786 2024-06-04 11:44:58 +02:00
commit 99306f0532
4 changed files with 25 additions and 5 deletions

1
ConvLab-3 Submodule

@ -0,0 +1 @@
Subproject commit 60f4e5641f93e99b8d61b49cf5fd6dc818a83c4c

View File

@ -1,15 +1,21 @@
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
import requests
import os
import time
class MachineLearningNLG:
def __init__(self):
self.model_name = "./nlg_model" # Ścieżka do wytrenowanego modelu
if not os.path.exists(self.model_name):
raise ValueError(
f"Ścieżka {self.model_name} nie istnieje. Upewnij się, że model został poprawnie zapisany.")
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
self.generator = pipeline('text2text-generation', model=self.model, tokenizer=self.tokenizer)
def translate_text(self, text, target_language='pl'):
url = 'https://translate.googleapis.com/translate_a/single?client=gtx&sl=auto&tl={}&dt=t&q={}'.format(target_language, text)
url = f'https://translate.googleapis.com/translate_a/single?client=gtx&sl=auto&tl={target_language}&dt=t&q={text}'
response = requests.get(url)
if response.status_code == 200:
translated_text = response.json()[0][0][0]
@ -19,13 +25,22 @@ class MachineLearningNLG:
def nlg(self, system_act):
input_text = f"generate text: {system_act}"
start_time = time.time()
result = self.generator(input_text)
response_time = time.time() - start_time
response = result[0]['generated_text']
translated_response = self.translate_text(response, target_language='pl')
return translated_response
def generate(self, action):
return self.nlg(action)
def init_session(self):
pass
# Przykład użycia
if __name__ == "__main__":
nlg = MachineLearningNLG()
system_act = "inform(date.from=15.07, date.to=22.07)"
system_act = "inform(people.kids.ages=[4,9])"
print(nlg.nlg(system_act))

View File

@ -9,7 +9,7 @@ if __name__ == "__main__":
nlu = NaturalLanguageAnalyzer()
dst = DialogueStateTracker()
policy = DialoguePolicy()
nlg = MachineLearningNLG() # Używamy nowego komponentu NLG
nlg = MachineLearningNLG()
agent = PipelineAgent(nlu=nlu, dst=dst, policy=policy, nlg=nlg, name='sys')
response = agent.response(text)

View File

@ -50,10 +50,10 @@ training_args = Seq2SeqTrainingArguments(
per_device_eval_batch_size=16,
predict_with_generate=True,
learning_rate=5e-5,
num_train_epochs=3,
num_train_epochs=10,
evaluation_strategy="epoch",
save_strategy="epoch",
save_total_limit=1,
save_total_limit=None, # Wyłącz rotację punktów kontrolnych
load_best_model_at_end=True,
)
@ -68,3 +68,7 @@ trainer = Seq2SeqTrainer(
# Trening modelu
trainer.train()
# Zapisanie wytrenowanego modelu
trainer.save_model("./nlg_model")
tokenizer.save_pretrained("./nlg_model")