06
This commit is contained in:
parent
9461ddc03b
commit
e0e02d388c
@ -239,12 +239,12 @@ Aby utworzyć taki słownik użyjemy gotowej klasy ~Vocab~ z pakietu torchtext:
|
|||||||
:end:
|
:end:
|
||||||
|
|
||||||
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
||||||
len(vocab)
|
vocab.lookup_tokens([0, 1, 2, 10, 12345])
|
||||||
#+END_SRC
|
#+END_SRC
|
||||||
|
|
||||||
#+RESULTS:
|
#+RESULTS:
|
||||||
:results:
|
:results:
|
||||||
20000
|
['<unk>', '</s>', '<s>', 'w', 'wierzyli']
|
||||||
:end:
|
:end:
|
||||||
|
|
||||||
*** Definicja sieci
|
*** Definicja sieci
|
||||||
@ -272,15 +272,12 @@ Naszą prostą sieć neuronową zaimplementujemy używając frameworku PyTorch.
|
|||||||
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size)
|
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size)
|
||||||
|
|
||||||
vocab.set_default_index(vocab['<unk>'])
|
vocab.set_default_index(vocab['<unk>'])
|
||||||
ixs = torch.tensor(vocab.forward(['mieszkam', 'w', 'londynie']))
|
ixs = torch.tensor(vocab.forward(['pies']))
|
||||||
|
out[0][vocab['jest']]
|
||||||
out = model(ixs)
|
|
||||||
out.size()
|
|
||||||
#+END_SRC
|
#+END_SRC
|
||||||
|
|
||||||
#+RESULTS:
|
#+RESULTS:
|
||||||
:results:
|
:results:
|
||||||
torch.Size([3, 20000])
|
|
||||||
:end:
|
:end:
|
||||||
|
|
||||||
Teraz wyuczmy model. Wpierw tylko potasujmy nasz plik:
|
Teraz wyuczmy model. Wpierw tylko potasujmy nasz plik:
|
||||||
@ -329,7 +326,7 @@ shuf < opensubtitlesA.pl.txt > opensubtitlesA.pl.shuf.txt
|
|||||||
|
|
||||||
#+RESULTS:
|
#+RESULTS:
|
||||||
:results:
|
:results:
|
||||||
(2, 19922)
|
(2, 5)
|
||||||
:end:
|
:end:
|
||||||
|
|
||||||
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
||||||
@ -340,13 +337,13 @@ shuf < opensubtitlesA.pl.txt > opensubtitlesA.pl.shuf.txt
|
|||||||
|
|
||||||
#+RESULTS:
|
#+RESULTS:
|
||||||
:results:
|
:results:
|
||||||
[tensor([ 2, 19922, 114, 888, 1152]), tensor([19922, 114, 888, 1152, 3])]
|
[tensor([ 2, 5, 51, 3481, 231]), tensor([ 5, 51, 3481, 231, 4])]
|
||||||
:end:
|
:end:
|
||||||
|
|
||||||
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
||||||
device = 'cuda'
|
device = 'cuda'
|
||||||
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
|
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
|
||||||
data = DataLoader(train_dataset, batch_size=8000)
|
data = DataLoader(train_dataset, batch_size=5000)
|
||||||
optimizer = torch.optim.Adam(model.parameters())
|
optimizer = torch.optim.Adam(model.parameters())
|
||||||
criterion = torch.nn.NLLLoss()
|
criterion = torch.nn.NLLLoss()
|
||||||
|
|
||||||
@ -372,9 +369,15 @@ shuf < opensubtitlesA.pl.txt > opensubtitlesA.pl.shuf.txt
|
|||||||
None
|
None
|
||||||
:end:
|
:end:
|
||||||
|
|
||||||
|
Policzmy najbardziej prawdopodobne kontynuację dla zadanego słowa:
|
||||||
|
|
||||||
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
||||||
vocab = train_dataset.vocab
|
device = 'cuda'
|
||||||
ixs = torch.tensor(vocab.forward(['jest', 'mieszkam', 'w', 'londynie'])).to(device)
|
model = SimpleBigramNeuralLanguageModel(vocab_size, embed_size).to(device)
|
||||||
|
model.load_state_dict(torch.load('model1.bin'))
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
ixs = torch.tensor(vocab.forward(['dla'])).to(device)
|
||||||
|
|
||||||
out = model(ixs)
|
out = model(ixs)
|
||||||
top = torch.topk(out[0], 10)
|
top = torch.topk(out[0], 10)
|
||||||
@ -386,5 +389,46 @@ None
|
|||||||
|
|
||||||
#+RESULTS:
|
#+RESULTS:
|
||||||
:results:
|
:results:
|
||||||
[('jorku', 1079, 0.41101229190826416), ('.', 3, 0.07469522953033447), ('<unk>', 0, 0.04370327666401863), (',', 4, 0.023186953738331795), ('...', 15, 0.0091575738042593), ('?', 6, 0.008711819536983967), ('tym', 30, 0.0047738500870764256), ('to', 7, 0.004259662237018347), ('do', 17, 0.004140778910368681), ('w', 10, 0.003930391278117895)]
|
[('ciebie', 73, 0.1580502986907959), ('mnie', 26, 0.15395283699035645), ('<unk>', 0, 0.12862136960029602), ('nas', 83, 0.0410110242664814), ('niego', 172, 0.03281523287296295), ('niej', 245, 0.02104802615940571), ('siebie', 181, 0.020788608118891716), ('którego', 365, 0.019379809498786926), ('was', 162, 0.013852755539119244), ('wszystkich', 235, 0.01381855271756649)]
|
||||||
|
:end:
|
||||||
|
|
||||||
|
Teraz zbadajmy najbardziej podobne zanurzenia dla zadanego słowa:
|
||||||
|
|
||||||
|
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
||||||
|
vocab = train_dataset.vocab
|
||||||
|
ixs = torch.tensor(vocab.forward(['kłopot'])).to(device)
|
||||||
|
|
||||||
|
out = model(ixs)
|
||||||
|
top = torch.topk(out[0], 10)
|
||||||
|
top_indices = top.indices.tolist()
|
||||||
|
top_probs = top.values.tolist()
|
||||||
|
top_words = vocab.lookup_tokens(top_indices)
|
||||||
|
list(zip(top_words, top_indices, top_probs))
|
||||||
|
#+END_SRC
|
||||||
|
|
||||||
|
#+RESULTS:
|
||||||
|
:results:
|
||||||
|
[('.', 3, 0.404473215341568), (',', 4, 0.14222915470600128), ('z', 14, 0.10945753753185272), ('?', 6, 0.09583134204149246), ('w', 10, 0.050338443368673325), ('na', 12, 0.020703863352537155), ('i', 11, 0.016762692481279373), ('<unk>', 0, 0.014571071602404118), ('...', 15, 0.01453721895813942), ('</s>', 1, 0.011769450269639492)]
|
||||||
|
:end:
|
||||||
|
|
||||||
|
#+BEGIN_SRC python :session mysession :exports both :results raw drawer
|
||||||
|
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
|
||||||
|
|
||||||
|
embeddings = model.model[0].weight
|
||||||
|
|
||||||
|
vec = embeddings[vocab['poszedł']]
|
||||||
|
|
||||||
|
similarities = cos(vec, embeddings)
|
||||||
|
|
||||||
|
top = torch.topk(similarities, 10)
|
||||||
|
|
||||||
|
top_indices = top.indices.tolist()
|
||||||
|
top_probs = top.values.tolist()
|
||||||
|
top_words = vocab.lookup_tokens(top_indices)
|
||||||
|
list(zip(top_words, top_indices, top_probs))
|
||||||
|
#+END_SRC
|
||||||
|
|
||||||
|
#+RESULTS:
|
||||||
|
:results:
|
||||||
|
[('poszedł', 1087, 1.0), ('idziesz', 1050, 0.4907470941543579), ('przyjeżdża', 4920, 0.45242372155189514), ('pojechałam', 12784, 0.4342481195926666), ('wrócił', 1023, 0.431664377450943), ('dobrać', 10351, 0.4312002956867218), ('stałeś', 5738, 0.4258835017681122), ('poszła', 1563, 0.41979148983955383), ('trafiłam', 18857, 0.4109022617340088), ('jedzie', 1674, 0.4091658890247345)]
|
||||||
:end:
|
:end:
|
||||||
|
Loading…
Reference in New Issue
Block a user