Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks
This commit is contained in:
commit
ce461797fb
@ -507,11 +507,7 @@
|
|||||||
" acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
|
" acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
|
||||||
" items_total += Y.shape[0] \n",
|
" items_total += Y.shape[0] \n",
|
||||||
"\n",
|
"\n",
|
||||||
" optimizer.zero_grad()\n",
|
|
||||||
" loss = criterion(Y_predictions, Y)\n",
|
" loss = criterion(Y_predictions, Y)\n",
|
||||||
" loss.backward()\n",
|
|
||||||
" optimizer.step()\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
" loss_score += loss.item() * Y.shape[0] \n",
|
" loss_score += loss.item() * Y.shape[0] \n",
|
||||||
" return (loss_score / items_total), (acc_score / items_total)"
|
" return (loss_score / items_total), (acc_score / items_total)"
|
||||||
|
@ -699,11 +699,7 @@
|
|||||||
" acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
|
" acc_score += torch.sum((Y_predictions > 0.5) == Y).item()\n",
|
||||||
" items_total += Y.shape[0] \n",
|
" items_total += Y.shape[0] \n",
|
||||||
"\n",
|
"\n",
|
||||||
" optimizer.zero_grad()\n",
|
|
||||||
" loss = criterion(Y_predictions, Y)\n",
|
" loss = criterion(Y_predictions, Y)\n",
|
||||||
" loss.backward()\n",
|
|
||||||
" optimizer.step()\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
" loss_score += loss.item() * Y.shape[0] \n",
|
" loss_score += loss.item() * Y.shape[0] \n",
|
||||||
" return (loss_score / items_total), (acc_score / items_total)"
|
" return (loss_score / items_total), (acc_score / items_total)"
|
||||||
|
12380
cw/09_sequence_labeling.ipynb
Normal file
12380
cw/09_sequence_labeling.ipynb
Normal file
File diff suppressed because one or more lines are too long
931
cw/09_sequence_labeling_ODPOWIEDZI.ipynb
Normal file
931
cw/09_sequence_labeling_ODPOWIEDZI.ipynb
Normal file
@ -0,0 +1,931 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Klasyfikacja wieloklasowa i sequence labelling"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import gensim\n",
|
||||||
|
"import torch\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import seaborn as sns\n",
|
||||||
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
|
"\n",
|
||||||
|
"from datasets import load_dataset\n",
|
||||||
|
"from torchtext.vocab import Vocab\n",
|
||||||
|
"from collections import Counter\n",
|
||||||
|
"\n",
|
||||||
|
"from sklearn.datasets import fetch_20newsgroups\n",
|
||||||
|
"# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
|
||||||
|
"\n",
|
||||||
|
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
|
||||||
|
"from sklearn.metrics import accuracy_score"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Klasyfikacja"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Klasfikacja binarna- 2 klasy"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"CATEGORIES = ['soc.religion.christian', 'alt.atheism']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"newsgroups = fetch_20newsgroups(categories=CATEGORIES)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X = newsgroups['data']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y = newsgroups['target']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y_names = newsgroups['target_names']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X[0:1]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y_names"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"del CATEGORIES, newsgroups, X, Y, Y_names"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### klasyfikacja wieloklasowa"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"newsgroups_train_dev = fetch_20newsgroups(subset = 'train')\n",
|
||||||
|
"newsgroups_test = fetch_20newsgroups(subset = 'test')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"newsgroups_train_dev_text = newsgroups_train_dev['data']\n",
|
||||||
|
"newsgroups_test_text = newsgroups_test['data']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y_train_dev = newsgroups_train_dev['target']\n",
|
||||||
|
"Y_test = newsgroups_test['target']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"newsgroups_train_text, newsgroups_dev_text, Y_train, Y_dev = train_test_split(newsgroups_train_dev_text, Y_train_dev, random_state=42)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y_names = newsgroups_train_dev['target_names']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y_train_dev"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"Y_names"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"**Jaki baseline?**"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": false
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"pd.value_counts(Y_train)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"accuracy_score(Y_test, np.ones_like(Y_test) * 10)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"**Pytanie** - w jaki sposób stworzyć taki klasyfikator na podstawie tylko wiedzy z poprzednich ćwiczeń?"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Zadanie - stworzyć klasyfikator regresji logistycznej one vs rest na podstawie tfdif. TFIDF powinien mieć słownik o wielkości 10000\n",
|
||||||
|
"\n",
|
||||||
|
"https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html\n",
|
||||||
|
"https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html\n",
|
||||||
|
"https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from sklearn.multiclass import OneVsRestClassifier\n",
|
||||||
|
"from sklearn.linear_model import LogisticRegression\n",
|
||||||
|
"from sklearn.feature_extraction.text import TfidfVectorizer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"FEAUTERES = 10_000"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"vectorizer = TfidfVectorizer(max_features=FEAUTERES)\n",
|
||||||
|
"X_train = vectorizer.fit_transform(newsgroups_train_text)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X_dev = vectorizer.transform(newsgroups_dev_text)\n",
|
||||||
|
"X_test = vectorizer.transform(newsgroups_test_text)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"clf = OneVsRestClassifier(LogisticRegression()).fit(X_train, Y_train)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"clf.predict(X_train[0:1])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"clf.predict_proba(X_train[0:1])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"np.max(clf.predict_proba(X_train[0]))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"accuracy_score(clf.predict(X_train), Y_train)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"accuracy_score(clf.predict(X_dev), Y_dev)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"accuracy_score(clf.predict(X_test), Y_test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Podejście softmax na tfidif"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"**Zadanie** Na podstawie poprzednich zajęć stworzyć sieć w pytorch bez warstw ukrytych, z jedną warstwą *output* z funkcją softmax (bez trenowania i ewaluacji sieci)\n",
|
||||||
|
"\n",
|
||||||
|
"Użyć https://pytorch.org/docs/stable/generated/torch.nn.Softmax.html?highlight=softmax"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X_train"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class NeuralNetworkModel(torch.nn.Module):\n",
|
||||||
|
"\n",
|
||||||
|
" def __init__(self,FEAUTERES, output_size):\n",
|
||||||
|
" super(NeuralNetworkModel, self).__init__()\n",
|
||||||
|
" self.fc1 = torch.nn.Linear(FEAUTERES,OUTPUT_SIZE)\n",
|
||||||
|
" self.softmax = torch.nn.Softmax(dim=0)\n",
|
||||||
|
" \n",
|
||||||
|
"\n",
|
||||||
|
" def forward(self, x):\n",
|
||||||
|
" x = self.fc1(x)\n",
|
||||||
|
" x = self.softmax(x)\n",
|
||||||
|
" return x"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"OUTPUT_SIZE = len(Y_names)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"nn_model = NeuralNetworkModel(FEAUTERES, OUTPUT_SIZE)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"nn_model(torch.Tensor(X_train[0:3].astype(np.float32).todense()))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"BATCH_SIZE = 5"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"criterion = torch.nn.NLLLoss()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.2)\n",
|
||||||
|
"#optimizer = torch.optim.Adam(nn_model.parameters())"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_loss_acc(model, X_dataset, Y_dataset):\n",
|
||||||
|
" loss_score = 0\n",
|
||||||
|
" acc_score = 0\n",
|
||||||
|
" items_total = 0\n",
|
||||||
|
" model.eval()\n",
|
||||||
|
" for i in range(0, Y_dataset.shape[0], BATCH_SIZE):\n",
|
||||||
|
" X = X_dataset[i:i+BATCH_SIZE]\n",
|
||||||
|
" X = torch.tensor(X.astype(np.float32).todense())\n",
|
||||||
|
" Y = Y_dataset[i:i+BATCH_SIZE]\n",
|
||||||
|
" Y = torch.tensor(Y)\n",
|
||||||
|
" Y_predictions = model(X)\n",
|
||||||
|
" acc_score += torch.sum(torch.argmax(Y_predictions,dim=1) == Y).item()\n",
|
||||||
|
" items_total += Y.shape[0] \n",
|
||||||
|
"\n",
|
||||||
|
" loss = criterion(Y_predictions, Y)\n",
|
||||||
|
"\n",
|
||||||
|
" loss_score += loss.item() * Y.shape[0] \n",
|
||||||
|
" return (loss_score / items_total), (acc_score / items_total)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"for epoch in range(5):\n",
|
||||||
|
" loss_score = 0\n",
|
||||||
|
" acc_score = 0\n",
|
||||||
|
" items_total = 0\n",
|
||||||
|
" nn_model.train()\n",
|
||||||
|
" for i in range(0, Y_train.shape[0], BATCH_SIZE):\n",
|
||||||
|
" X = X_train[i:i+BATCH_SIZE]\n",
|
||||||
|
" X = torch.tensor(X.astype(np.float32).todense())\n",
|
||||||
|
" Y = Y_train[i:i+BATCH_SIZE]\n",
|
||||||
|
"\n",
|
||||||
|
" Y = torch.tensor(Y)\n",
|
||||||
|
" Y_predictions = nn_model(X)\n",
|
||||||
|
" acc_score += torch.sum(torch.argmax(Y_predictions,dim=1) == Y).item()\n",
|
||||||
|
" items_total += Y.shape[0] \n",
|
||||||
|
"\n",
|
||||||
|
" optimizer.zero_grad()\n",
|
||||||
|
" loss = criterion(Y_predictions, Y)\n",
|
||||||
|
" loss.backward()\n",
|
||||||
|
" optimizer.step()\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
" loss_score += loss.item() * Y.shape[0]\n",
|
||||||
|
"\n",
|
||||||
|
" \n",
|
||||||
|
" display(epoch)\n",
|
||||||
|
" display(get_loss_acc(nn_model, X_train, Y_train))\n",
|
||||||
|
" display(get_loss_acc(nn_model, X_dev, Y_dev))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X.shape"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"newsgroups_train_text"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Podejście softmax z embeddingami na przykładzie NER"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# !pip install torchtext\n",
|
||||||
|
"# !pip install datasets"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"https://www.aclweb.org/anthology/W03-0419.pdf"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": false
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"dataset = load_dataset(\"conll2003\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def build_vocab(dataset):\n",
|
||||||
|
" counter = Counter()\n",
|
||||||
|
" for document in dataset:\n",
|
||||||
|
" counter.update(document)\n",
|
||||||
|
" return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"vocab = build_vocab(dataset['train']['tokens'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"dataset['train']['tokens']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"len(vocab.itos)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"vocab['on']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def data_process(dt):\n",
|
||||||
|
" return [ torch.tensor([vocab['<bos>']] +[vocab[token] for token in document ] + [vocab['<eos>']], dtype = torch.long) for document in dt]\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def labels_process(dt):\n",
|
||||||
|
" return [ torch.tensor([0] + document + [0], dtype = torch.long) for document in dt]\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"train_tokens_ids = data_process(dataset['train']['tokens'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"test_tokens_ids = data_process(dataset['test']['tokens'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"train_labels = labels_process(dataset['train']['ner_tags'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"test_labels = labels_process(dataset['test']['ner_tags'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"train_tokens_ids[0]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"max([max(x) for x in dataset['train']['ner_tags'] ])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class NERModel(torch.nn.Module):\n",
|
||||||
|
"\n",
|
||||||
|
" def __init__(self,):\n",
|
||||||
|
" super(NERModel, self).__init__()\n",
|
||||||
|
" self.emb = torch.nn.Embedding(23627,200)\n",
|
||||||
|
" self.fc1 = torch.nn.Linear(600,9)\n",
|
||||||
|
" #self.softmax = torch.nn.Softmax(dim=0)\n",
|
||||||
|
" # nie trzeba, bo używamy https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html\n",
|
||||||
|
" # jako kryterium\n",
|
||||||
|
" \n",
|
||||||
|
"\n",
|
||||||
|
" def forward(self, x):\n",
|
||||||
|
" x = self.emb(x)\n",
|
||||||
|
" x = x.reshape(600) \n",
|
||||||
|
" x = self.fc1(x)\n",
|
||||||
|
" #x = self.softmax(x)\n",
|
||||||
|
" return x"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"train_tokens_ids[0][1:4]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ner_model = NERModel()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ner_model(train_tokens_ids[0][1:4])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"criterion = torch.nn.CrossEntropyLoss()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"optimizer = torch.optim.Adam(ner_model.parameters())"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"len(train_labels)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"for epoch in range(2):\n",
|
||||||
|
" loss_score = 0\n",
|
||||||
|
" acc_score = 0\n",
|
||||||
|
" prec_score = 0\n",
|
||||||
|
" selected_items = 0\n",
|
||||||
|
" recall_score = 0\n",
|
||||||
|
" relevant_items = 0\n",
|
||||||
|
" items_total = 0\n",
|
||||||
|
" nn_model.train()\n",
|
||||||
|
" #for i in range(len(train_labels)):\n",
|
||||||
|
" for i in range(100):\n",
|
||||||
|
" for j in range(1, len(train_labels[i]) - 1):\n",
|
||||||
|
" \n",
|
||||||
|
" X = train_tokens_ids[i][j-1: j+2]\n",
|
||||||
|
" Y = train_labels[i][j: j+1]\n",
|
||||||
|
"\n",
|
||||||
|
" Y_predictions = ner_model(X)\n",
|
||||||
|
" \n",
|
||||||
|
" \n",
|
||||||
|
" acc_score += int(torch.argmax(Y_predictions) == Y)\n",
|
||||||
|
" \n",
|
||||||
|
" if torch.argmax(Y_predictions) != 0:\n",
|
||||||
|
" selected_items +=1\n",
|
||||||
|
" if torch.argmax(Y_predictions) != 0 and torch.argmax(Y_predictions) == Y.item():\n",
|
||||||
|
" prec_score += 1\n",
|
||||||
|
" \n",
|
||||||
|
" if Y.item() != 0:\n",
|
||||||
|
" relevant_items +=1\n",
|
||||||
|
" if Y.item() != 0 and torch.argmax(Y_predictions) == Y.item():\n",
|
||||||
|
" recall_score += 1\n",
|
||||||
|
" \n",
|
||||||
|
" items_total += 1\n",
|
||||||
|
"\n",
|
||||||
|
" \n",
|
||||||
|
" optimizer.zero_grad()\n",
|
||||||
|
" loss = criterion(Y_predictions.unsqueeze(0), Y)\n",
|
||||||
|
" loss.backward()\n",
|
||||||
|
" optimizer.step()\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
" loss_score += loss.item() \n",
|
||||||
|
" \n",
|
||||||
|
" precision = prec_score / selected_items\n",
|
||||||
|
" recall = recall_score / relevant_items\n",
|
||||||
|
" f1_score = (2*precision * recall) / (precision + recall)\n",
|
||||||
|
" display('epoch: ', epoch)\n",
|
||||||
|
" display('loss: ', loss_score / items_total)\n",
|
||||||
|
" display('acc: ', acc_score / items_total)\n",
|
||||||
|
" display('prec: ', precision)\n",
|
||||||
|
" display('recall: : ', recall)\n",
|
||||||
|
" display('f1: ', f1_score)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"loss_score = 0\n",
|
||||||
|
"acc_score = 0\n",
|
||||||
|
"prec_score = 0\n",
|
||||||
|
"selected_items = 0\n",
|
||||||
|
"recall_score = 0\n",
|
||||||
|
"relevant_items = 0\n",
|
||||||
|
"items_total = 0\n",
|
||||||
|
"nn_model.eval()\n",
|
||||||
|
"for i in range(100):\n",
|
||||||
|
"#for i in range(len(test_labels)):\n",
|
||||||
|
" for j in range(1, len(test_labels[i]) - 1):\n",
|
||||||
|
"\n",
|
||||||
|
" X = test_tokens_ids[i][j-1: j+2]\n",
|
||||||
|
" Y = test_labels[i][j: j+1]\n",
|
||||||
|
"\n",
|
||||||
|
" Y_predictions = ner_model(X)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
" acc_score += int(torch.argmax(Y_predictions) == Y)\n",
|
||||||
|
"\n",
|
||||||
|
" if torch.argmax(Y_predictions) != 0:\n",
|
||||||
|
" selected_items +=1\n",
|
||||||
|
" if torch.argmax(Y_predictions) != 0 and torch.argmax(Y_predictions) == Y.item():\n",
|
||||||
|
" prec_score += 1\n",
|
||||||
|
"\n",
|
||||||
|
" if Y.item() != 0:\n",
|
||||||
|
" relevant_items +=1\n",
|
||||||
|
" if Y.item() != 0 and torch.argmax(Y_predictions) == Y.item():\n",
|
||||||
|
" recall_score += 1\n",
|
||||||
|
"\n",
|
||||||
|
" items_total += 1\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
" loss = criterion(Y_predictions.unsqueeze(0), Y)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
" loss_score += loss.item() \n",
|
||||||
|
"\n",
|
||||||
|
"precision = prec_score / selected_items\n",
|
||||||
|
"recall = recall_score / relevant_items\n",
|
||||||
|
"f1_score = (2*precision * recall) / (precision + recall)\n",
|
||||||
|
"display('loss: ', loss_score / items_total)\n",
|
||||||
|
"display('acc: ', acc_score / items_total)\n",
|
||||||
|
"display('prec: ', precision)\n",
|
||||||
|
"display('recall: : ', recall)\n",
|
||||||
|
"display('f1: ', f1_score)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Zadanie domowe\n",
|
||||||
|
"\n",
|
||||||
|
"- sklonować repozytorium https://git.wmi.amu.edu.pl/kubapok/en-ner-conll-2003\n",
|
||||||
|
"- stworzyć klasyfikator bazujący na sieci neuronowej feed forward w pytorchu (można bazować na tym jupyterze lub nie).\n",
|
||||||
|
"- klasyfikator powinien obejmować dodatkowe cechy (np. długość wyrazu, czy wyraz zaczyna się od wielkiej litery, stemmming słowa, czy zawiera cyfrę)\n",
|
||||||
|
"- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
|
||||||
|
"- wynik fscore sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.60\n",
|
||||||
|
"- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
|
||||||
|
"termin 08.06, 80 punktów\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.8.5"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user