30 lines
736 B
Python
30 lines
736 B
Python
|
#!/usr/bin/python3
|
||
|
|
||
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
||
|
|
||
|
import torch
|
||
|
|
||
|
device = 'cuda:0'
|
||
|
|
||
|
# Inicjalizacja tokenizera i modelu
|
||
|
tokenizer = GPT2Tokenizer.from_pretrained("gpt2-medium")
|
||
|
model = GPT2LMHeadModel.from_pretrained("gpt2-medium").to(device)
|
||
|
|
||
|
model.half()
|
||
|
|
||
|
# Tekst, który chcesz kontynuować
|
||
|
input_text = "Yesterday morning, a flying saucer has landed in Poznan and rt"
|
||
|
|
||
|
# Kodowanie tekstu wejściowego
|
||
|
input_ids = tokenizer.encode(input_text, return_tensors='pt').to(device)
|
||
|
|
||
|
# Generowanie tekstu
|
||
|
output = model(input_ids)
|
||
|
|
||
|
distrib = torch.softmax(output[0][0][-1], dim=0)
|
||
|
|
||
|
values, indices = torch.topk(distrib, 11)
|
||
|
|
||
|
for val, idx in zip(values, indices):
|
||
|
print(f'{tokenizer.decode([idx])} {idx} {val}')
|