diff --git a/application/functions/errors.py b/application/functions/errors.py index 77f5ace..62e2548 100644 --- a/application/functions/errors.py +++ b/application/functions/errors.py @@ -1,10 +1,6 @@ -from transformers import AutoTokenizer from transformers import pipeline -#from transformers import BartForConditionalGeneration -tokenizer = AutoTokenizer.from_pretrained("szymonj/polish-simple-error-correction") -#model = BartForConditionalGeneration.from_pretrained("szymonj/polish-simple-error-correction") -pipe = pipeline("text2text-generation",model="szymonj/polish-simple-error-correction",tokenizer=tokenizer,max_length=2000) +pipe = pipeline("text2text-generation",model="szymonj/polish-simple-error-correction", tokenizer="szymonj/polish-simple-error-correction", max_length=2000) def errors_correction(data): result = pipe(data) diff --git a/application/functions/sentiment.py b/application/functions/sentiment.py index dc1f42b..d2534d1 100644 --- a/application/functions/sentiment.py +++ b/application/functions/sentiment.py @@ -1,17 +1,12 @@ -from transformers import AutoTokenizer -from transformers import pipeline, GPT2ForSequenceClassification +from transformers import pipeline import re from facebook_scraper import get_posts -# model = 'application/models/sentiment_model' -# tokenizer = AutoTokenizer.from_pretrained('application/tokenizers/sentiment_tokenizer') - -#model = GPT2ForSequenceClassification.from_pretrained("Scigi/sentiment-analysis-model", num_labels=3) -tokenizer = AutoTokenizer.from_pretrained("Scigi/sentiment-analysis-model") -pipe = pipeline('text-classification', model="Scigi/sentiment-analysis-model", tokenizer = tokenizer) +pipe = pipeline('text-classification', model="Scigi/sentiment-analysis-model", tokenizer = "Scigi/sentiment-analysis-model") def sentiment_prediction(data): result = pipe(data) + return result def clear_data(data): @@ -19,6 +14,7 @@ def clear_data(data): data = [x for x in data if x != ''] data = [i.strip() for i in data] data = [i.lower() for i in data] + return data def count_predictions(predictions): @@ -50,4 +46,5 @@ def scrapp_comments(url): comments.append(comment['comment_text']) all['post'] = text_post all['sentences'] = comments + return all