Init
This commit is contained in:
commit
8d954b42e8
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
*~
|
38338
meetup.ipynb
Normal file
38338
meetup.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
29
run2.py
Executable file
29
run2.py
Executable file
@ -0,0 +1,29 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
device = 'cuda:0'
|
||||||
|
|
||||||
|
# Inicjalizacja tokenizera i modelu
|
||||||
|
tokenizer = GPT2Tokenizer.from_pretrained("gpt2-medium")
|
||||||
|
model = GPT2LMHeadModel.from_pretrained("gpt2-medium").to(device)
|
||||||
|
|
||||||
|
model.half()
|
||||||
|
|
||||||
|
# Tekst, który chcesz kontynuować
|
||||||
|
input_text = "Yesterday morning, a flying saucer has landed in Poznan and rt"
|
||||||
|
|
||||||
|
# Kodowanie tekstu wejściowego
|
||||||
|
input_ids = tokenizer.encode(input_text, return_tensors='pt').to(device)
|
||||||
|
|
||||||
|
# Generowanie tekstu
|
||||||
|
output = model(input_ids)
|
||||||
|
|
||||||
|
distrib = torch.softmax(output[0][0][-1], dim=0)
|
||||||
|
|
||||||
|
values, indices = torch.topk(distrib, 11)
|
||||||
|
|
||||||
|
for val, idx in zip(values, indices):
|
||||||
|
print(f'{tokenizer.decode([idx])} {idx} {val}')
|
9
test2.py
Executable file
9
test2.py
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
from sentence_transformers import SentenceTransformer
|
||||||
|
from sentence_transformers.util import cos_sim
|
||||||
|
|
||||||
|
sentences = ["Bardzo lubię jeść słodycze i cukierki.", "Uwielbiam zajadać się słodkościami.", "Mój samochód się zepsuł."]
|
||||||
|
model = SentenceTransformer("sdadas/st-polish-paraphrase-from-mpnet")
|
||||||
|
results = model.encode(sentences, convert_to_tensor=True, show_progress_bar=False)
|
||||||
|
|
||||||
|
print(results.size())
|
Loading…
Reference in New Issue
Block a user