diff --git a/main.py b/main.py index 0bddde9..217a07a 100644 --- a/main.py +++ b/main.py @@ -28,8 +28,8 @@ def words_to_vecs(list_of_words): return [nlp(x).vector for x in list_of_words] def softXEnt(input, target): - m = torch.nn.LogSoftmax() - logprobs = m(input, dim=1) + m = torch.nn.LogSoftmax(dim=1) + logprobs = m(input) return -(target * logprobs).sum() / input.shape[0] def compute_class_vector(mark, classes): @@ -64,6 +64,11 @@ model.train() optimizer = torch.optim.AdamW(model.parameters(), lr=0.02) loss_function = softXEnt +""" +TODO +1) metoda ewaluacyjna +2) przenieść na cude !!!! +""" if mode == "train": for epoch in range(epochs): diff --git a/util.py b/util.py index 6199b86..f1610b7 100644 --- a/util.py +++ b/util.py @@ -18,7 +18,7 @@ class Model(torch.nn.Module): """ self.lstm = torch.nn.LSTM(150, 300, 2) self.dense2 = torch.nn.Linear(300, 7) - self.softmax = torch.nn.Softmax() + self.softmax = torch.nn.Softmax(dim=1) def forward(self, data, hidden_state, cell_state): data = self.dense1(data.T)