Compare commits

..

56 Commits

Author SHA1 Message Date
72db2aa687 Drobne poprawki w wykładzie 2021-10-05 21:44:46 +02:00
Jakub Pokrywka
f804311da1 reformat 2021-10-05 15:28:02 +02:00
Jakub Pokrywka
3c0223d434 reformat 2021-10-05 15:04:58 +02:00
Jakub Pokrywka
0f34dcdeb4 Merge git.wmi.amu.edu.pl:filipg/aitech-eks 2021-09-28 09:13:36 +02:00
Jakub Pokrywka
5acffc0265 add 03 2021-09-27 14:02:30 +02:00
Jakub Pokrywka
4437a7f71b add ipynb to md conversion 2021-09-27 13:29:40 +02:00
Jakub Pokrywka
ad34aaeae0 add metadata to cw 2021-09-27 12:34:44 +02:00
1836dc18c1 Remove too long text material 2021-09-27 08:10:10 +02:00
fedffd5456 Add metadata 2021-09-27 07:57:37 +02:00
9a61b2c06c Add helper script 2021-09-27 07:43:02 +02:00
22f1e74aef Fixes 2021-09-27 07:42:48 +02:00
72c6fbcbf6 Fixes 2021-09-27 07:36:37 +02:00
a45fd570e5 Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-09-27 06:42:33 +02:00
Jakub Pokrywka
cc675a8591 Merge git.wmi.amu.edu.pl:filipg/aitech-eks 2021-07-12 12:44:57 +02:00
Jakub Pokrywka
93ea351350 split to 14 and 15 2021-07-12 12:44:24 +02:00
kubapok
2cd5bef0a0 Merge git.wmi.amu.edu.pl:filipg/aitech-eks 2021-06-23 10:08:36 +02:00
kubapok
d3c996511c add bbc news train 2021-06-23 10:08:23 +02:00
kubapok
f3db74bfd3 add bbc news train 2021-06-23 10:07:07 +02:00
kubapok
691dd36092 Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks-pub 2021-06-23 10:02:16 +02:00
kubapok
2a031bc8d8 add similarity search 2021-06-23 10:01:55 +02:00
077c2b6f90 Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-06-22 21:16:11 +02:00
97d92d38e8 15 2021-06-22 21:15:58 +02:00
Jakub Pokrywka
9d96c9ec4f Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-06-16 15:14:53 +02:00
Jakub Pokrywka
f1bc633468 add transformery2 2021-06-16 15:14:42 +02:00
78fb510cba 14 2021-06-14 15:39:15 +02:00
edf3811cd7 new 2021-06-14 08:23:02 +02:00
89155edea0 Up 2021-06-12 15:47:37 +02:00
Jakub Pokrywka
0ea752f091 add transformers 2021-06-09 12:46:15 +02:00
0d10bc2fca Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-06-09 12:43:37 +02:00
429caef49c Atencja 2021-06-09 12:43:29 +02:00
aebba6c18b Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-06-08 21:14:04 +02:00
98012914c4 bpe 2021-06-08 21:13:39 +02:00
kubapok
af907e23af Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-06-02 13:27:45 +02:00
kubapok
15854bb61f rnn 2021-06-02 13:27:34 +02:00
a004360ec5 11 2021-06-02 13:16:24 +02:00
1071d5ba44 RNN 2021-06-02 13:09:09 +02:00
86a5fbe20c Update 2021-06-01 10:38:23 +02:00
87e0faf1a2 Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-05-26 15:56:33 +02:00
a6a4106844 up 2021-05-26 15:56:22 +02:00
kubapok
d76157a5a5 Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-05-26 14:36:53 +02:00
kubapok
6099566d29 crf 2021-05-26 14:36:47 +02:00
340197a94c fixes 2021-05-26 13:48:10 +02:00
54f52bbc6a Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-05-26 13:29:59 +02:00
10981fc2bc CRF cd. 2021-05-26 13:29:45 +02:00
kubapok
22c32c8a43 Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-05-26 12:44:01 +02:00
kubapok
da334a57fe add crf in progress 2021-05-26 12:43:49 +02:00
90b0947029 up 2021-05-19 16:04:43 +02:00
ebfd32b60d up 2021-05-19 16:04:21 +02:00
b40e011c66 Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks-pub 2021-05-19 13:34:11 +02:00
ce461797fb Merge branch 'master' of git.wmi.amu.edu.pl:filipg/aitech-eks 2021-05-19 13:33:40 +02:00
a50afdf750 Klasyfikacja wieloklasowa 2021-05-19 13:33:08 +02:00
65e28bbbb9 org 2021-05-19 13:21:13 +02:00
kubapok
db2fad735e Merge git.wmi.amu.edu.pl:filipg/aitech-eks 2021-05-19 10:32:50 +02:00
kubapok
d2bf465d4a add seq labeling; 2021-05-19 10:32:14 +02:00
kubapok
81e0329199 seq labelling work in progress 2021-05-17 15:24:29 +02:00
kubapok
a8fd576d16 add seq labeling template 2021-05-11 12:37:31 +02:00
88 changed files with 19100 additions and 6085 deletions

49
add-metadata.py Executable file
View File

@ -0,0 +1,49 @@
#!/usr/bin/env python3
#procedura napisywania plików ipynb (generowanie nagłówka i metadanych)
import json
import sys
import re
def modjup(filen,numer,tytul,typ,author,email,lang,title,year):
zerocell=['![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n',
'<div class="alert alert-block alert-info">\n',
'<h1> %s </h1>\n'%(title),
'<h2> %s. <i>%s</i> [%s]</h2> \n'%(numer,tytul,typ),
'<h3> %s (%s)</h3>\n'%(author,year),
'</div>\n',
'\n',
'![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)']
zerodict={'cell_type': 'markdown','metadata': {'collapsed': False},'source': zerocell}
with open(filen, 'r+',encoding='utf-8') as f:
ll=json.load(f)
ll["metadata"]["author"]=author
ll["metadata"]["email"]=email
ll["metadata"]["lang"]=lang
subtitle="%s.%s[%s]"%(numer,tytul,typ)
ll["metadata"]["subtitle"]=subtitle
ll["metadata"]["title"]=title
ll["metadata"]["year"]=year
if not(ll['cells'][0]['source'][0]==zerocell[0]):
ll['cells'].insert(0,zerodict)
else:
ll['cells'][0]=zerodict
f.seek(0)
json.dump(ll,f,indent=4)
#zmodyfikuj te dane
filen=sys.argv[1]
numer=re.match(r'^(?:\D+/)?0*(\d+)', filen).group(1)
tytul=sys.argv[2]
typ="wykład"
author="Filip Graliński"
email="filipg@amu.edu.pl"
lang= "pl"
title="Ekstrakcja informacji"
year="2021"
#uruchom procedurę
modjup(filen,numer,tytul,typ,author,email,lang,title,year)

7
convert_ipynb_to_md.sh Normal file
View File

@ -0,0 +1,7 @@
set -ex
FILEIPYNB=$1
jupyter nbconvert --to script $1
FILEPY=$(echo $FILEIPYNB | sed 's/.ipynb$/.py/')
FILEMD=$(echo $FILEIPYNB | sed 's/.ipynb$/.md/')
python convert_python_to_markdown.py "$FILEPY" "$FILEMD"

View File

@ -0,0 +1,13 @@
import sys
from bs4 import BeautifulSoup
from markdown import markdown
with open(sys.argv[1]) as f_in, open(sys.argv[2],'w') as f_out:
for i, line in enumerate(f_in):
if i in (1,2):
continue
if line[:2] == "# " and line[:5] != "# In[":
text = line[:2]
f_out.write(line[2:])

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 0. <i>Informacje na temat przedmiotu</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -57,21 +71,17 @@
"\n", "\n",
"**Żeby zaliczyć przedmiot należy pojawiać się na laboratoriach. Maksymalna liczba nieobecności to 3. Obecność będę sprawdzał poprzez panel MS TEAMS, czyli będę sprawdzał czy ktoś jest wdzwoniony na ćwiczenia. Jeżeli kogoś nie będzie więcej niż 3 razy, to nie będzie miał zaliczonego przedmiotu** \n" "**Żeby zaliczyć przedmiot należy pojawiać się na laboratoriach. Maksymalna liczba nieobecności to 3. Obecność będę sprawdzał poprzez panel MS TEAMS, czyli będę sprawdzał czy ktoś jest wdzwoniony na ćwiczenia. Jeżeli kogoś nie będzie więcej niż 3 razy, to nie będzie miał zaliczonego przedmiotu** \n"
] ]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -83,7 +93,10 @@
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.3" "version": "3.8.3"
} },
"subtitle": "0.Informacje na temat przedmiotu[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,181 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Opracować w języku Haskell wyspecjalizowanego robota pobierającego dane z konkretnego serwisu.\n",
"\n",
"Punkty: 80 (domyślnie - niektóre zadanie są trudniejsze, wówczas podaję osobno liczbę punktów)\n",
"\n",
"Ogólne zasady:\n",
"\n",
"* pobieramy informacje (metadane) o plikach PDF, DjVU, JPG itp, ale nie same pliki,\n",
"* nie pobierajmy całego serwisu, tylko tyle, ile trzeba, by pobrać metadane o interesujących nas zasobach,\n",
"* interesują nas tylko teksty polskie, jeśli nie jest to trudne, należy odfiltrować publikacje obcojęzyczne,\n",
"* staramy się ustalać datę z możliwie dużą dokładnością.\n",
"\n",
"Sposób pracy:\n",
"\n",
"0. Pobrać Haskell Stack\n",
"\n",
"~~~\n",
"curl -sSL https://get.haskellstack.org/ | sh -s - -d ~/bin\n",
"~~~\n",
"\n",
"Na fizycznych komputerach wydziałowych są błędnie ustawione prawa dostępu na dyskach sieciowych, Haskell Stack musi działać na fizycznym dysku:\n",
"\n",
"~~~\n",
"rm -rf /mnt/poligon/.stack\n",
"mkdir /mnt/poligon/.stack\n",
"mv ~/.stack ~/.stack-bak # gdyby już był... proszę się nie przejmować błędem\n",
"ln -s /mnt/poligon/.stack ~/.stack\n",
"~~~\n",
"\n",
"1. Pobrać repozytorium:\n",
"\n",
"~~~\n",
"git clone https://git.wmi.amu.edu.pl/filipg/twilight-library.git\n",
"~~~\n",
"\n",
"2. Wypchnąć na początek do swojego repozytorium (trzeba sobie najpierw założyć to repozytorium na <https://git.wmi.amu.edu.pl>)\n",
"\n",
"~~~\n",
"cd twilight-library\n",
"git remote set-url origin git@git.wmi.amu.edu.pl:YOURID/twilight-library\n",
"git push origin master\n",
"git remote add mother git://gonito.net/twilight-library\n",
"~~~\n",
"\n",
"3. Zobacz, czy przykładowy robot dla strony z „Alamanachem Muszyny” działa:\n",
"\n",
"~~~\n",
"~/bin/stack install # może trwać długo za pierwszym razem\n",
"~/bin/stack exec almanachmuszyny\n",
"~~~\n",
"\n",
"\n",
"W razie problemów z instalacją:\n",
"\n",
"~~~\n",
"sudo apt install libpcre3 libpcre3-dev\n",
"~~~\n",
"\n",
"3. Opracuj swojego robota wzorując się na pliku `almanachmuszyny.hs`.\n",
" (Ale dodaj swój plik, nie zmieniaj `almanachmuszyny.hs`!)\n",
"\n",
"4. Dopisz specyfikację swojego robota do `shadow-library.cabal`.\n",
"\n",
"5. Pracuj nad swoim robotem, uruchamiaj go w następujący sposób:\n",
"\n",
"~~~\n",
"~/bin/stack install\n",
"~/bin/stack exec mojrobot\n",
"~~~\n",
"\n",
"(Tzn. nie nazywaj go „mojrobot”, tylko użyj jakieś sensownej nazwy.)\n",
"\n",
"6. Jeśli publikacja (np. pojedynczy numer gazety) składa się z wielu plików, powinien zostać wygenerowany jeden\n",
"rekord, w `finalUrl` powinny znaleźć się URL do poszczególnych stron (np. plików JPR) oddzielone ` // `.\n",
"\n",
"7. Po zakończeniu prac prześlij mejla do prowadzącego zajęcia z URL-em do swojego repozytorium.\n",
"\n",
"Lista serwisów do wyboru (na każdy serwis 1 osoba):\n",
"\n",
"1. [Teksty Drugie](http://tekstydrugie.pl)\n",
"2. [Archiwum Inspektora Pracy](https://www.pip.gov.pl/pl/inspektor-pracy/66546,archiwum-inspektora-pracy-.html)\n",
"3. [Medycyna Weterynaryjna](http://www.medycynawet.edu.pl/archives) — również historyczne zasoby od 1945 roku, **120 punktów**\n",
"4. [Polskie Towarzystwo Botaniczne](https://pbsociety.org.pl/default/dzialalnosc-wydawnicza/) — wszystkie dostępne zdigitalizowane publikacje!, **130 punktow**\n",
"5. [Wieści Pepowa](http://archiwum2019.pepowo.pl/news/c-10/gazeta) — nie pominąć strony nr 2 z wynikami, **110 punktów**\n",
"6. [Czasopismo Kosmos](http://kosmos.icm.edu.pl/)\n",
"7. [Czasopismo Wszechświat](http://www.ptpk.org/archiwum.html)\n",
"8. [Czasopisma polonijne we Francji](https://argonnaute.parisnanterre.fr/ark:/14707/a011403267917yQQFAS) — najlepiej w postaci PDF-ów, jak np. [https://argonnaute.parisnanterre.fr/medias/customer_3/periodique/immi_pol_lotmz1_pdf/BDIC_GFP_2929_1945_039.pdf](), **220 punktów**\n",
"9. [Muzeum Sztuki — czasopisma](https://zasoby.msl.org.pl/mobjects/show), **220 punktów**, publikacje, teksty, czasopisma, wycinki\n",
"10. [Wiadomości Urzędu Patentowego](https://grab.uprp.pl/sites/Wydawnictwa/WydawnictwaArchiwum/WydawnictwaArchiwum/Forms/AllItems.aspx)\n",
"11. [Czas, czasopismo polonijne](https://digitalcollections.lib.umanitoba.ca/islandora/object/uofm:2222545), **140 punktów** S.G.\n",
"12. [Stenogramy Okrągłego Stołu](http://okragly-stol.pl/stenogramy/), **110 punktów**\n",
"13. [Nasze Popowice](https://smpopowice.pl/index.php/numery-archiwalne)\n",
"14. [Czasopisma entomologiczne](http://pte.au.poznan.pl/)\n",
"15. [Wiadomości matematyczne](https://wydawnictwa.ptm.org.pl/index.php/wiadomosci-matematyczne/issue/archive?issuesPage=2), **120 punktow**\n",
"16. [Alkoholizm i Narkomania](http://www.ain.ipin.edu.pl/archiwum-starsze.html)\n",
"17. [Czasopismo Etyka](https://etyka.uw.edu.pl/tag/etyka-562018/), O.K.\n",
"18. [Skup makulatury](https://chomikuj.pl/skup.makulatury.prl), **250 punktów**\n",
"19. [Hermes](https://chomikuj.pl/hermes50-1) i https://chomikuj.pl/hermes50-2, **250 punktów**\n",
"20. [E-dziennik Województwa Mazowieckiego](https://edziennik.mazowieckie.pl/actbymonths) **150 punktów**\n",
"21. [Czasopismo Węgiel Brunatny](http://www.ppwb.org.pl/wegiel_brunatny)\n",
"22. [Gazeta GUM](https://gazeta.gumed.edu.pl/61323.html)\n",
"23. [Nowiny Andrychowskie](https://radioandrychow.pl/nowiny/)\n",
"24. [Kawęczyniak](http://bip.kaweczyn.pl/kaweczyn/pl/dla-mieszkanca/publikacje/archiwalne-numery-kaweczyniaka-rok-1995-2005/kaweczyniaki-rok-1997.html)\n",
"25. [Zbór Chrześcijański w Bielawia](http://zborbielawa.pl/archiwum/)\n",
"26. [Gazeta Rytwiańska](http://www.rytwiany.com.pl/index.php?sid=5)\n",
"27. [Nasze Popowice](https://smpopowice.pl/gazeta/2005_12_nasze-popowice-nr_01.pdf)\n",
"28. [Echo Chełmka](http://moksir.chelmek.pl/o-nas/echo-chelmka)\n",
"29. [Głos Świdnika](http://s.bibliotekaswidnik.pl/index.php/archwium/116-glos-swidnika) **100 punktów**\n",
"30. [Aneks](https://aneks.kulturaliberalna.pl/archiwum-aneksu/) **90 punktów**\n",
"31. [Teatr Lalel](http://polunima.pl/teatr-lalek)\n",
"32. [Biuletyn Bezpieczna Chemia](https://www.pipc.org.pl/publikacje/biuletyn-bezpieczna-chemia)\n",
"33. [Głos Maszynisty](https://zzm.org.pl/glos-maszynisty/)\n",
"34. [Kultura Paryska](https://www.kulturaparyska.com/pl/index), całe archiwum z książkami i innymi czasopismami, **180 punktów**\n",
"35. [Gazeta Fabryczna - Kraśnik](https://80lat.flt.krasnik.pl/index.php/gazeta-fabryczna/) **120 punktów**\n",
"36. [Artykuły o Jujutsu](http://www.kobudo.pl/artykuly_jujutsu.html)\n",
"37. [Wycinki o Taekwon-Do](https://www2.pztkd.lublin.pl/archpras.html#z1996)\n",
"38. [Materiały o kolejnictwie](https://enkol.pl/Strona_g%C5%82%C3%B3wna) **180 punktów**\n",
"39. [Centralny Instytut Ochrony Pracy](http://archiwum.ciop.pl/), znaleźć wszystkie publikacje typu <http://archiwum.ciop.pl/44938>, wymaga trochę sprytu **130 punktów**\n",
"40. [Biblioteka Sejmowa - Zasoby Cyfrowe](https://biblioteka.sejm.gov.pl/zasoby_cyfrowe/), **200 punktów**\n",
"41. [Elektronika Praktyczna](https://ep.com.pl/archiwum), te numery, które dostępne w otwarty sposób, np. rok 1993\n",
"42. [Litewska Akademia Nauk](http://www.mab.lt/), tylko materiały w jęz. polskim, takie jak np.\n",
" <https://elibrary.mab.lt/handle/1/840>, **170 punktów**\n",
"43. [Litewska Biblioteka Cyfrowa](https://www.epaveldas.lt), wyłuskać tylko materiały w jęz. polskim, **190 punktów**\n",
"44. [Czasopisma Geologiczne](https://geojournals.pgi.gov.pl), **120 punktów**\n",
"45. [Czasopisma PTTK](https://www.czasopisma.centralnabibliotekapttk.pl/index.php?i3), **120 punktów**\n",
"46. [Czasopisma Polskiego Towarzystwa Dendrologicznego](https://www.ptd.pl/?page_id=7), **100 punktów**\n",
"47. [Kilka przedwojennych książek](https://dziemiela.com/documents.htm)\n",
"48. [Historia polskiej informatyki](http://klio.spit.iq.pl/a4-wyroby-polskiej-informatyki/a4-2-sprzet/) - wyjątkowo bez datowania\n",
"49. [Zeszyty Formacyjne Katolickiego Stowarzyszenia „Civitas Christania”](http://podkarpacki.civitaschristiana.pl/formacja/zeszyty-formacyjne/), tylko niektóre pliki można zdatować\n",
"50. [Józef Piłsudski Institute of America](https://archiwa.pilsudski.org/) - **220 punktów**\n",
"51. [Prasa podziemna — Częstochowa](http://www.podziemie.com.pl), również ulotki i inne materiały skanowane - **180 punktów**\n",
"52. [Tajemnica Atari](http://krap.pl/mirrorz/atari/horror.mirage.com.pl/pixel/), plik ZIP z DjVu\n",
"\n",
"\n",
"### F.A.Q.\n",
"\n",
"**P: Nie działają strony z protokołem https, co zrobić?**\n",
"\n",
"O: Trzeba użyć modułu opartego na bibliotece curl. Paczka Ubuntu została zainstalowana na komputerach wydziałowych. Na\n",
"swoim komputerze możemy zainstalować paczkę libcurl4-openssl-dev, a\n",
"następnie można sobie ściągnąć wersję twilight-library opartą na libcurl:\n",
"\n",
" git fetch git://gonito.net/twilight-library withcurl\n",
" git merge FETCH_HEAD\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 1. <i>Wyszukiwarki wprowadzenie</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -234,11 +248,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -249,8 +266,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.5" "version": "3.8.3"
} },
"subtitle": "1.Wyszukiwarki wprowadzenie[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 2. <i>Wyszukiwarki roboty</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -262,21 +276,17 @@
"67. [Instytut Techniki Górniczej - wycinki](http://www.komag.gliwice.pl/archiwum/historia-komag)\n", "67. [Instytut Techniki Górniczej - wycinki](http://www.komag.gliwice.pl/archiwum/historia-komag)\n",
"\n" "\n"
] ]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -287,8 +297,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.1" "version": "3.8.3"
} },
"subtitle": "2.Wyszukiwarki roboty[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 3. <i>tfidf (1)</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -1065,21 +1079,17 @@
"$|D|$ - ilość dokumentów w korpusie\n", "$|D|$ - ilość dokumentów w korpusie\n",
"$|\\{d : t_i \\in d \\}|$ - ilość dokumentów w korpusie, gdzie dany term występuje chociaż jeden raz" "$|\\{d : t_i \\in d \\}|$ - ilość dokumentów w korpusie, gdzie dany term występuje chociaż jeden raz"
] ]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -1091,7 +1101,10 @@
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.3" "version": "3.8.3"
} },
"subtitle": "3.tfidf (1)[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 3. <i>tfidf (1)</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 1,
@ -46,11 +60,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -62,7 +79,10 @@
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.3" "version": "3.8.3"
} },
"subtitle": "3.tfidf (1)[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 3. <i>tfidf (2)</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -434,217 +448,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 21, "execution_count": null,
"metadata": { "metadata": {
"scrolled": false "scrolled": true
}, },
"outputs": [ "outputs": [],
{
"name": "stdout",
"output_type": "stream",
"text": [
"From: ray@netcom.com (Ray Fischer)\n",
"Subject: Re: x86 ~= 680x0 ?? (How do they compare?)\n",
"Organization: Netcom. San Jose, California\n",
"Distribution: usa\n",
"Lines: 36\n",
"\n",
"dhk@ubbpc.uucp (Dave Kitabjian) writes ...\n",
">I'm sure Intel and Motorola are competing neck-and-neck for \n",
">crunch-power, but for a given clock speed, how do we rank the\n",
">following (from 1st to 6th):\n",
"> 486\t\t68040\n",
"> 386\t\t68030\n",
"> 286\t\t68020\n",
"\n",
"040 486 030 386 020 286\n",
"\n",
">While you're at it, where will the following fit into the list:\n",
"> 68060\n",
"> Pentium\n",
"> PowerPC\n",
"\n",
"060 fastest, then Pentium, with the first versions of the PowerPC\n",
"somewhere in the vicinity.\n",
"\n",
">And about clock speed: Does doubling the clock speed double the\n",
">overall processor speed? And fill in the __'s below:\n",
"> 68030 @ __ MHz = 68040 @ __ MHz\n",
"\n",
"No. Computer speed is only partly dependent of processor/clock speed.\n",
"Memory system speed play a large role as does video system speed and\n",
"I/O speed. As processor clock rates go up, the speed of the memory\n",
"system becomes the greatest factor in the overall system speed. If\n",
"you have a 50MHz processor, it can be reading another word from memory\n",
"every 20ns. Sure, you can put all 20ns memory in your computer, but\n",
"it will cost 10 times as much as the slower 80ns SIMMs.\n",
"\n",
"And roughly, the 68040 is twice as fast at a given clock\n",
"speed as is the 68030.\n",
"\n",
"-- \n",
"Ray Fischer \"Convictions are more dangerous enemies of truth\n",
"ray@netcom.com than lies.\" -- Friedrich Nietzsche\n",
"\n",
"0.4778416465020907\n",
"----------------------------------------------------------------------------------------------------\n",
"----------------------------------------------------------------------------------------------------\n",
"----------------------------------------------------------------------------------------------------\n",
"From: rvenkate@ux4.cso.uiuc.edu (Ravikuma Venkateswar)\n",
"Subject: Re: x86 ~= 680x0 ?? (How do they compare?)\n",
"Distribution: usa\n",
"Organization: University of Illinois at Urbana\n",
"Lines: 59\n",
"\n",
"ray@netcom.com (Ray Fischer) writes:\n",
"\n",
">dhk@ubbpc.uucp (Dave Kitabjian) writes ...\n",
">>I'm sure Intel and Motorola are competing neck-and-neck for \n",
">>crunch-power, but for a given clock speed, how do we rank the\n",
">>following (from 1st to 6th):\n",
">> 486\t\t68040\n",
">> 386\t\t68030\n",
">> 286\t\t68020\n",
"\n",
">040 486 030 386 020 286\n",
"\n",
"How about some numbers here? Some kind of benchmark?\n",
"If you want, let me start it - 486DX2-66 - 32 SPECint92, 16 SPECfp92 .\n",
"\n",
">>While you're at it, where will the following fit into the list:\n",
">> 68060\n",
">> Pentium\n",
">> PowerPC\n",
"\n",
">060 fastest, then Pentium, with the first versions of the PowerPC\n",
">somewhere in the vicinity.\n",
"\n",
"Numbers? Pentium @66MHz - 65 SPECint92, 57 SPECfp92 .\n",
"\t PowerPC @66MHz - 50 SPECint92, 80 SPECfp92 . (Note this is the 601)\n",
" (Alpha @150MHz - 74 SPECint92,126 SPECfp92 - just for comparison)\n",
"\n",
">>And about clock speed: Does doubling the clock speed double the\n",
">>overall processor speed? And fill in the __'s below:\n",
">> 68030 @ __ MHz = 68040 @ __ MHz\n",
"\n",
">No. Computer speed is only partly dependent of processor/clock speed.\n",
">Memory system speed play a large role as does video system speed and\n",
">I/O speed. As processor clock rates go up, the speed of the memory\n",
">system becomes the greatest factor in the overall system speed. If\n",
">you have a 50MHz processor, it can be reading another word from memory\n",
">every 20ns. Sure, you can put all 20ns memory in your computer, but\n",
">it will cost 10 times as much as the slower 80ns SIMMs.\n",
"\n",
"Not in a clock-doubled system. There isn't a doubling in performance, but\n",
"it _is_ quite significant. Maybe about a 70% increase in performance.\n",
"\n",
"Besides, for 0 wait state performance, you'd need a cache anyway. I mean,\n",
"who uses a processor that runs at the speed of 80ns SIMMs? Note that this\n",
"memory speed corresponds to a clock speed of 12.5 MHz.\n",
"\n",
">And roughly, the 68040 is twice as fast at a given clock\n",
">speed as is the 68030.\n",
"\n",
"Numbers?\n",
"\n",
">-- \n",
">Ray Fischer \"Convictions are more dangerous enemies of truth\n",
">ray@netcom.com than lies.\" -- Friedrich Nietzsche\n",
"-- \n",
"Ravikumar Venkateswar\n",
"rvenkate@uiuc.edu\n",
"\n",
"A pun is a no' blessed form of whit.\n",
"\n",
"0.44292082969477664\n",
"----------------------------------------------------------------------------------------------------\n",
"----------------------------------------------------------------------------------------------------\n",
"----------------------------------------------------------------------------------------------------\n",
"From: ray@netcom.com (Ray Fischer)\n",
"Subject: Re: x86 ~= 680x0 ?? (How do they compare?)\n",
"Organization: Netcom. San Jose, California\n",
"Distribution: usa\n",
"Lines: 30\n",
"\n",
"rvenkate@ux4.cso.uiuc.edu (Ravikuma Venkateswar) writes ...\n",
">ray@netcom.com (Ray Fischer) writes:\n",
">>040 486 030 386 020 286\n",
">\n",
">How about some numbers here? Some kind of benchmark?\n",
"\n",
"Benchmarks are for marketing dweebs and CPU envy. OK, if it will make\n",
"you happy, the 486 is faster than the 040. BFD. Both architectures\n",
"are nearing then end of their lifetimes. And especially with the x86\n",
"architecture: good riddance.\n",
"\n",
">Besides, for 0 wait state performance, you'd need a cache anyway. I mean,\n",
">who uses a processor that runs at the speed of 80ns SIMMs? Note that this\n",
">memory speed corresponds to a clock speed of 12.5 MHz.\n",
"\n",
"The point being the processor speed is only one of many aspects of a\n",
"computers performance. Clock speed, processor, memory speed, CPU\n",
"architecture, I/O systems, even the application program all contribute \n",
"to the overall system performance.\n",
"\n",
">>And roughly, the 68040 is twice as fast at a given clock\n",
">>speed as is the 68030.\n",
">\n",
">Numbers?\n",
"\n",
"Look them up yourself.\n",
"\n",
"-- \n",
"Ray Fischer \"Convictions are more dangerous enemies of truth\n",
"ray@netcom.com than lies.\" -- Friedrich Nietzsche\n",
"\n",
"0.3491800997095306\n",
"----------------------------------------------------------------------------------------------------\n",
"----------------------------------------------------------------------------------------------------\n",
"----------------------------------------------------------------------------------------------------\n",
"From: mb4008@cehp11 (Morgan J Bullard)\n",
"Subject: Re: speeding up windows\n",
"Keywords: speed\n",
"Organization: University of Illinois at Urbana\n",
"Lines: 30\n",
"\n",
"djserian@flash.LakeheadU.Ca (Reincarnation of Elvis) writes:\n",
"\n",
">I have a 386/33 with 8 megs of memory\n",
"\n",
">I have noticed that lately when I use programs like WpfW or Corel Draw\n",
">my computer \"boggs\" down and becomes really sluggish!\n",
"\n",
">What can I do to increase performance? What should I turn on or off\n",
"\n",
">Will not loading wallpapers or stuff like that help when it comes to\n",
">the running speed of windows and the programs that run under it?\n",
"\n",
">Thanx in advance\n",
"\n",
">Derek\n",
"\n",
"1) make sure your hard drive is defragmented. This will speed up more than \n",
" just windows BTW. Use something like Norton's or PC Tools.\n",
"2) I _think_ that leaving the wall paper out will use less RAM and therefore\n",
" will speed up your machine but I could very will be wrong on this.\n",
"There's a good chance you've already done this but if not it may speed things\n",
"up. good luck\n",
"\t\t\t\tMorgan Bullard mb4008@coewl.cen.uiuc.edu\n",
"\t\t\t\t\t or mjbb@uxa.cso.uiuc.edu\n",
"\n",
">--\n",
">$_ /|$Derek J.P. Serianni $ E-Mail : djserian@flash.lakeheadu.ca $ \n",
">$\\'o.O' $Sociologist $ It's 106 miles to Chicago,we've got a full tank$\n",
">$=(___)=$Lakehead University $ of gas, half a pack of cigarettes,it's dark,and$\n",
">$ U $Thunder Bay, Ontario$ we're wearing sunglasses. -Elwood Blues $ \n",
"\n",
"0.26949927393886913\n",
"----------------------------------------------------------------------------------------------------\n",
"----------------------------------------------------------------------------------------------------\n",
"----------------------------------------------------------------------------------------------------\n"
]
}
],
"source": [ "source": [
"for i in range (1,5):\n", "for i in range (1,5):\n",
" print(newsgroups[similarities.argsort()[0][-i]])\n", " print(newsgroups[similarities.argsort()[0][-i]])\n",
@ -685,11 +493,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -701,7 +512,10 @@
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.3" "version": "3.8.3"
} },
"subtitle": "3.tfidf (2)[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 4. <i>Wyszukiwarki</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -71,21 +85,17 @@
" * proszę zaznaczyć w MS TEAMS, że Państwo zrobili zadanie w assigments\n", " * proszę zaznaczyć w MS TEAMS, że Państwo zrobili zadanie w assigments\n",
" * zdawanie zadania będzie na zajęciach. Proszę przygotować prezentację do 5 minut" " * zdawanie zadania będzie na zajęciach. Proszę przygotować prezentację do 5 minut"
] ]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -96,8 +106,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.1" "version": "3.8.3"
} },
"subtitle": "4.wyszukiwarki[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 5. <i>Ekstrakcja informacji z dokumentów</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -213,11 +227,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -229,7 +246,10 @@
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.3" "version": "3.8.3"
} },
"subtitle": "5.ekEtrakcja informacji z dokumentCCow[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 6. <i>Klasyfikacja</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -253,104 +267,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 14, "execution_count": null,
"metadata": { "metadata": {
"scrolled": true "scrolled": true
}, },
"outputs": [ "outputs": [],
{
"data": {
"text/plain": [
"['New_York',\n",
" 'New_York',\n",
" 'Delaware',\n",
" 'Massachusetts',\n",
" 'Delaware',\n",
" 'Washington',\n",
" 'Delaware',\n",
" 'New_Jersey',\n",
" 'New_York',\n",
" 'NONE',\n",
" 'NONE',\n",
" 'Delaware',\n",
" 'Delaware',\n",
" 'Delaware',\n",
" 'New_York',\n",
" 'Massachusetts',\n",
" 'Minnesota',\n",
" 'California',\n",
" 'New_York',\n",
" 'California',\n",
" 'Iowa',\n",
" 'California',\n",
" 'Virginia',\n",
" 'North_Carolina',\n",
" 'Arizona',\n",
" 'Indiana',\n",
" 'New_Jersey',\n",
" 'California',\n",
" 'Delaware',\n",
" 'Georgia',\n",
" 'New_York',\n",
" 'New_York',\n",
" 'California',\n",
" 'Minnesota',\n",
" 'California',\n",
" 'Kentucky',\n",
" 'Minnesota',\n",
" 'Ohio',\n",
" 'Michigan',\n",
" 'California',\n",
" 'Minnesota',\n",
" 'California',\n",
" 'Delaware',\n",
" 'Illinois',\n",
" 'Minnesota',\n",
" 'Texas',\n",
" 'New_Jersey',\n",
" 'Delaware',\n",
" 'Washington',\n",
" 'NONE',\n",
" 'Delaware',\n",
" 'Oregon',\n",
" 'Delaware',\n",
" 'Delaware',\n",
" 'Delaware',\n",
" 'Massachusetts',\n",
" 'California',\n",
" 'NONE',\n",
" 'Delaware',\n",
" 'Illinois',\n",
" 'Idaho',\n",
" 'Washington',\n",
" 'New_York',\n",
" 'New_York',\n",
" 'California',\n",
" 'Utah',\n",
" 'Delaware',\n",
" 'Washington',\n",
" 'Virginia',\n",
" 'New_York',\n",
" 'New_York',\n",
" 'Illinois',\n",
" 'California',\n",
" 'Delaware',\n",
" 'NONE',\n",
" 'Texas',\n",
" 'California',\n",
" 'Washington',\n",
" 'Delaware',\n",
" 'Washington',\n",
" 'New_York',\n",
" 'Washington',\n",
" 'Illinois']"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"dev_expected_jurisdiction" "dev_expected_jurisdiction"
] ]
@ -942,11 +863,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -958,7 +882,10 @@
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.3" "version": "3.8.3"
} },
"subtitle": "6.Klasyfikacja[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 6. <i>Klasyfikacja</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -253,104 +267,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 14, "execution_count": null,
"metadata": { "metadata": {
"scrolled": true "scrolled": true
}, },
"outputs": [ "outputs": [],
{
"data": {
"text/plain": [
"['New_York',\n",
" 'New_York',\n",
" 'Delaware',\n",
" 'Massachusetts',\n",
" 'Delaware',\n",
" 'Washington',\n",
" 'Delaware',\n",
" 'New_Jersey',\n",
" 'New_York',\n",
" 'NONE',\n",
" 'NONE',\n",
" 'Delaware',\n",
" 'Delaware',\n",
" 'Delaware',\n",
" 'New_York',\n",
" 'Massachusetts',\n",
" 'Minnesota',\n",
" 'California',\n",
" 'New_York',\n",
" 'California',\n",
" 'Iowa',\n",
" 'California',\n",
" 'Virginia',\n",
" 'North_Carolina',\n",
" 'Arizona',\n",
" 'Indiana',\n",
" 'New_Jersey',\n",
" 'California',\n",
" 'Delaware',\n",
" 'Georgia',\n",
" 'New_York',\n",
" 'New_York',\n",
" 'California',\n",
" 'Minnesota',\n",
" 'California',\n",
" 'Kentucky',\n",
" 'Minnesota',\n",
" 'Ohio',\n",
" 'Michigan',\n",
" 'California',\n",
" 'Minnesota',\n",
" 'California',\n",
" 'Delaware',\n",
" 'Illinois',\n",
" 'Minnesota',\n",
" 'Texas',\n",
" 'New_Jersey',\n",
" 'Delaware',\n",
" 'Washington',\n",
" 'NONE',\n",
" 'Delaware',\n",
" 'Oregon',\n",
" 'Delaware',\n",
" 'Delaware',\n",
" 'Delaware',\n",
" 'Massachusetts',\n",
" 'California',\n",
" 'NONE',\n",
" 'Delaware',\n",
" 'Illinois',\n",
" 'Idaho',\n",
" 'Washington',\n",
" 'New_York',\n",
" 'New_York',\n",
" 'California',\n",
" 'Utah',\n",
" 'Delaware',\n",
" 'Washington',\n",
" 'Virginia',\n",
" 'New_York',\n",
" 'New_York',\n",
" 'Illinois',\n",
" 'California',\n",
" 'Delaware',\n",
" 'NONE',\n",
" 'Texas',\n",
" 'California',\n",
" 'Washington',\n",
" 'Delaware',\n",
" 'Washington',\n",
" 'New_York',\n",
" 'Washington',\n",
" 'Illinois']"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"dev_expected_jurisdiction" "dev_expected_jurisdiction"
] ]
@ -462,18 +383,9 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 17, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/kuba/anaconda3/lib/python3.8/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.\n",
" warnings.warn(msg)\n"
]
}
],
"source": [ "source": [
"from sklearn.datasets import fetch_20newsgroups\n", "from sklearn.datasets import fetch_20newsgroups\n",
"# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n", "# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
@ -1088,11 +1000,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -1104,7 +1019,10 @@
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.3" "version": "3.8.3"
} },
"subtitle": "6.Klasyfikacja[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 7. <i>Regresja liniowa</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -443,28 +457,28 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/1.png)" "![regresja liniowa 1](obrazki/1.png)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/2.png)" "![regresja liniowa 2](obrazki/2.png)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/3.png)" "![regresja liniowa 3](obrazki/3.png)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/4.png)" "![regresja liniowa 4](obrazki/4.png)"
] ]
}, },
{ {
@ -559,14 +573,14 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/6.png)" "![RMSE 2](obrazki/6.png)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/5.png)" "![RMSE 2](obrazki/5.png)"
] ]
}, },
{ {
@ -1015,9 +1029,9 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/9.png)\n", "![RMSE 3](obrazki/9.png)\n",
"\n", "\n",
"![a](obrazki/10.png)" "![RMSE 4](obrazki/10.png)"
] ]
}, },
{ {
@ -1046,11 +1060,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -1061,8 +1078,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.5" "version": "3.8.3"
} },
"subtitle": "7.Regresja liniowa[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 7. <i>Regresja liniowa</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -442,28 +456,28 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/1.png)" "![regresja liniowa 1](obrazki/1.png)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/2.png)" "![regresja liniowa 2](obrazki/2.png)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/3.png)" "![regresja liniowa 3](obrazki/3.png)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/4.png)" "![regresja liniowa 4](obrazki/4.png)"
] ]
}, },
{ {
@ -602,7 +616,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/6.png)" "![RMSE 1](obrazki/6.png)"
] ]
}, },
{ {
@ -777,9 +791,9 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/7.png)\n", "![RMSE 2](obrazki/7.png)\n",
"\n", "\n",
"![a](obrazki/8.png)\n", "![RMSE 3](obrazki/8.png)\n",
"\n" "\n"
] ]
}, },
@ -1323,9 +1337,9 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"![a](obrazki/9.png)\n", "![RMSE 5](obrazki/9.png)\n",
"\n", "\n",
"![a](obrazki/10.png)" "![6](obrazki/10.png)"
] ]
}, },
{ {
@ -1354,11 +1368,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -1369,8 +1386,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.5" "version": "3.8.3"
} },
"subtitle": "7.Regresja liniowa[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 8. <i>Regresja logistyczna</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -16,20 +30,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": null,
"metadata": { "metadata": {
"scrolled": true "scrolled": true
}, },
"outputs": [ "outputs": [],
{
"name": "stderr",
"output_type": "stream",
"text": [
"/media/kuba/ssdsam/anaconda3/lib/python3.8/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.\n",
" warnings.warn(msg)\n"
]
}
],
"source": [ "source": [
"import numpy as np\n", "import numpy as np\n",
"import gensim\n", "import gensim\n",
@ -1024,11 +1029,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -1039,8 +1047,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.5" "version": "3.8.3"
} },
"subtitle": "8.Regresja logistyczna[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 8. <i>Regresja logistyczna</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -16,20 +30,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": null,
"metadata": { "metadata": {
"scrolled": true "scrolled": true
}, },
"outputs": [ "outputs": [],
{
"name": "stderr",
"output_type": "stream",
"text": [
"/media/kuba/ssdsam/anaconda3/lib/python3.8/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.\n",
" warnings.warn(msg)\n"
]
}
],
"source": [ "source": [
"import numpy as np\n", "import numpy as np\n",
"import gensim\n", "import gensim\n",
@ -1216,11 +1221,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -1231,8 +1239,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.5" "version": "3.8.3"
} },
"subtitle": "8.Regresja logistyczna[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -0,0 +1,850 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 9. <i>Sequence labeling</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Klasyfikacja wieloklasowa i sequence labelling"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import gensim\n",
"import torch\n",
"import pandas as pd\n",
"import seaborn as sns\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from datasets import load_dataset\n",
"from torchtext.vocab import Vocab\n",
"from collections import Counter\n",
"\n",
"from sklearn.datasets import fetch_20newsgroups\n",
"# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
"\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from sklearn.metrics import accuracy_score"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Klasyfikacja"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Klasfikacja binarna- 2 klasy"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"CATEGORIES = ['soc.religion.christian', 'alt.atheism']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups = fetch_20newsgroups(categories=CATEGORIES)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X = newsgroups['data']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y = newsgroups['target']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_names = newsgroups['target_names']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"X[0:1]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"Y_names"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"del CATEGORIES, newsgroups, X, Y, Y_names"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### klasyfikacja wieloklasowa"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_dev = fetch_20newsgroups(subset = 'train')\n",
"newsgroups_test = fetch_20newsgroups(subset = 'test')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_dev_text = newsgroups_train_dev['data']\n",
"newsgroups_test_text = newsgroups_test['data']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_train_dev = newsgroups_train_dev['target']\n",
"Y_test = newsgroups_test['target']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_text, newsgroups_dev_text, Y_train, Y_dev = train_test_split(newsgroups_train_dev_text, Y_train_dev, random_state=42)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_names = newsgroups_train_dev['target_names']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_train_dev"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_names"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Jaki baseline?**"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"pd.value_counts(Y_train)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"accuracy_score(Y_test, np.ones_like(Y_test) * 10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"\n",
"**Pytanie** - w jaki sposób stworzyć taki klasyfikator na podstawie tylko wiedzy z poprzednich ćwiczeń?"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Zadanie - stworzyć klasyfikator regresji logistycznej one vs rest na podstawie tfdif. TFIDF powinien mieć słownik o wielkości 10000\n",
"\n",
"https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html\n",
"https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html\n",
"https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.multiclass import OneVsRestClassifier\n",
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.feature_extraction.text import TfidfVectorizer"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Podejście softmax na tfidif"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Zadanie** Na podstawie poprzednich zajęć stworzyć sieć w pytorch bez warstw ukrytych, z jedną warstwą *output* z funkcją softmax (bez trenowania i ewaluacji sieci)\n",
"\n",
"Użyć https://pytorch.org/docs/stable/generated/torch.nn.Softmax.html?highlight=softmax"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class NeuralNetworkModel(torch.nn.Module):\n",
"\n",
" pass"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"OUTPUT_SIZE = len(Y_names)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"nn_model = NeuralNetworkModel(FEAUTERES, OUTPUT_SIZE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"nn_model(torch.Tensor(X_train[0:3].astype(np.float32).todense()))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"BATCH_SIZE = 5"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"criterion = torch.nn.NLLLoss()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.2)\n",
"#optimizer = torch.optim.Adam(nn_model.parameters())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_loss_acc(model, X_dataset, Y_dataset):\n",
" loss_score = 0\n",
" acc_score = 0\n",
" items_total = 0\n",
" model.eval()\n",
" for i in range(0, Y_dataset.shape[0], BATCH_SIZE):\n",
" X = X_dataset[i:i+BATCH_SIZE]\n",
" X = torch.tensor(X.astype(np.float32).todense())\n",
" Y = Y_dataset[i:i+BATCH_SIZE]\n",
" Y = torch.tensor(Y)\n",
" Y_predictions = model(X)\n",
" acc_score += torch.sum(torch.argmax(Y_predictions,dim=1) == Y).item()\n",
" items_total += Y.shape[0] \n",
"\n",
" loss = criterion(Y_predictions, Y)\n",
"\n",
" loss_score += loss.item() * Y.shape[0] \n",
" return (loss_score / items_total), (acc_score / items_total)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"for epoch in range(5):\n",
" loss_score = 0\n",
" acc_score = 0\n",
" items_total = 0\n",
" nn_model.train()\n",
" for i in range(0, Y_train.shape[0], BATCH_SIZE):\n",
" X = X_train[i:i+BATCH_SIZE]\n",
" X = torch.tensor(X.astype(np.float32).todense())\n",
" Y = Y_train[i:i+BATCH_SIZE]\n",
"\n",
" Y = torch.tensor(Y)\n",
" Y_predictions = nn_model(X)\n",
" acc_score += torch.sum(torch.argmax(Y_predictions,dim=1) == Y).item()\n",
" items_total += Y.shape[0] \n",
"\n",
" optimizer.zero_grad()\n",
" loss = criterion(Y_predictions, Y)\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
"\n",
" loss_score += loss.item() * Y.shape[0]\n",
"\n",
" \n",
" display(epoch)\n",
" display(get_loss_acc(nn_model, X_train, Y_train))\n",
" display(get_loss_acc(nn_model, X_dev, Y_dev))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X.shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_text"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Podejście softmax z embeddingami na przykładzie NER"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# !pip install torchtext\n",
"# !pip install datasets"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://www.aclweb.org/anthology/W03-0419.pdf"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"dataset = load_dataset(\"conll2003\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def build_vocab(dataset):\n",
" counter = Counter()\n",
" for document in dataset:\n",
" counter.update(document)\n",
" return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vocab = build_vocab(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataset['train']['tokens']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(vocab.itos)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vocab['on']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def data_process(dt):\n",
" return [ torch.tensor([vocab['<bos>']] +[vocab[token] for token in document ] + [vocab['<eos>']], dtype = torch.long) for document in dt]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def labels_process(dt):\n",
" return [ torch.tensor([0] + document + [0], dtype = torch.long) for document in dt]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids = data_process(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_tokens_ids = data_process(dataset['test']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"train_labels = labels_process(dataset['train']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_labels = labels_process(dataset['test']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"max([max(x) for x in dataset['train']['ner_tags'] ])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class NERModel(torch.nn.Module):\n",
"\n",
" def __init__(self,):\n",
" super(NERModel, self).__init__()\n",
" self.emb = torch.nn.Embedding(23627,200)\n",
" self.fc1 = torch.nn.Linear(600,9)\n",
" #self.softmax = torch.nn.Softmax(dim=0)\n",
" # nie trzeba, bo używamy https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html\n",
" # jako kryterium\n",
" \n",
"\n",
" def forward(self, x):\n",
" x = self.emb(x)\n",
" x = x.reshape(600) \n",
" x = self.fc1(x)\n",
" #x = self.softmax(x)\n",
" return x"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids[0][1:4]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ner_model = NERModel()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ner_model(train_tokens_ids[0][1:4])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"criterion = torch.nn.CrossEntropyLoss()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"optimizer = torch.optim.Adam(ner_model.parameters())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(train_labels)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for epoch in range(2):\n",
" loss_score = 0\n",
" acc_score = 0\n",
" prec_score = 0\n",
" selected_items = 0\n",
" recall_score = 0\n",
" relevant_items = 0\n",
" items_total = 0\n",
" nn_model.train()\n",
" #for i in range(len(train_labels)):\n",
" for i in range(100):\n",
" for j in range(1, len(train_labels[i]) - 1):\n",
" \n",
" X = train_tokens_ids[i][j-1: j+2]\n",
" Y = train_labels[i][j: j+1]\n",
"\n",
" Y_predictions = ner_model(X)\n",
" \n",
" \n",
" acc_score += int(torch.argmax(Y_predictions) == Y)\n",
" \n",
" if torch.argmax(Y_predictions) != 0:\n",
" selected_items +=1\n",
" if torch.argmax(Y_predictions) != 0 and torch.argmax(Y_predictions) == Y.item():\n",
" prec_score += 1\n",
" \n",
" if Y.item() != 0:\n",
" relevant_items +=1\n",
" if Y.item() != 0 and torch.argmax(Y_predictions) == Y.item():\n",
" recall_score += 1\n",
" \n",
" items_total += 1\n",
"\n",
" \n",
" optimizer.zero_grad()\n",
" loss = criterion(Y_predictions.unsqueeze(0), Y)\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
"\n",
" loss_score += loss.item() \n",
" \n",
" precision = prec_score / selected_items\n",
" recall = recall_score / relevant_items\n",
" f1_score = (2*precision * recall) / (precision + recall)\n",
" display('epoch: ', epoch)\n",
" display('loss: ', loss_score / items_total)\n",
" display('acc: ', acc_score / items_total)\n",
" display('prec: ', precision)\n",
" display('recall: : ', recall)\n",
" display('f1: ', f1_score)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"loss_score = 0\n",
"acc_score = 0\n",
"prec_score = 0\n",
"selected_items = 0\n",
"recall_score = 0\n",
"relevant_items = 0\n",
"items_total = 0\n",
"nn_model.eval()\n",
"for i in range(100):\n",
"#for i in range(len(test_labels)):\n",
" for j in range(1, len(test_labels[i]) - 1):\n",
"\n",
" X = test_tokens_ids[i][j-1: j+2]\n",
" Y = test_labels[i][j: j+1]\n",
"\n",
" Y_predictions = ner_model(X)\n",
"\n",
"\n",
" acc_score += int(torch.argmax(Y_predictions) == Y)\n",
"\n",
" if torch.argmax(Y_predictions) != 0:\n",
" selected_items +=1\n",
" if torch.argmax(Y_predictions) != 0 and torch.argmax(Y_predictions) == Y.item():\n",
" prec_score += 1\n",
"\n",
" if Y.item() != 0:\n",
" relevant_items +=1\n",
" if Y.item() != 0 and torch.argmax(Y_predictions) == Y.item():\n",
" recall_score += 1\n",
"\n",
" items_total += 1\n",
"\n",
"\n",
" loss = criterion(Y_predictions.unsqueeze(0), Y)\n",
"\n",
"\n",
"\n",
" loss_score += loss.item() \n",
"\n",
"precision = prec_score / selected_items\n",
"recall = recall_score / relevant_items\n",
"f1_score = (2*precision * recall) / (precision + recall)\n",
"display('loss: ', loss_score / items_total)\n",
"display('acc: ', acc_score / items_total)\n",
"display('prec: ', precision)\n",
"display('recall: : ', recall)\n",
"display('f1: ', f1_score)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Zadanie domowe\n",
"\n",
"- sklonować repozytorium https://git.wmi.amu.edu.pl/kubapok/en-ner-conll-2003\n",
"- stworzyć klasyfikator bazujący na sieci neuronowej feed forward w pytorchu (można bazować na tym jupyterze lub nie).\n",
"- klasyfikator powinien obejmować dodatkowe cechy (np. długość wyrazu, czy wyraz zaczyna się od wielkiej litery, stemmming słowa, czy zawiera cyfrę)\n",
"- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
"- wynik fscore sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.60\n",
"- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
"termin 08.06, 80 punktów\n"
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "9.Sequence labeling[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,951 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 9. <i>Sequence labeling</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Klasyfikacja wieloklasowa i sequence labelling"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import gensim\n",
"import torch\n",
"import pandas as pd\n",
"import seaborn as sns\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from datasets import load_dataset\n",
"from torchtext.vocab import Vocab\n",
"from collections import Counter\n",
"\n",
"from sklearn.datasets import fetch_20newsgroups\n",
"# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
"\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from sklearn.metrics import accuracy_score"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Klasyfikacja"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Klasfikacja binarna- 2 klasy"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"CATEGORIES = ['soc.religion.christian', 'alt.atheism']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups = fetch_20newsgroups(categories=CATEGORIES)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X = newsgroups['data']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y = newsgroups['target']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_names = newsgroups['target_names']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"X[0:1]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"Y_names"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"del CATEGORIES, newsgroups, X, Y, Y_names"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### klasyfikacja wieloklasowa"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_dev = fetch_20newsgroups(subset = 'train')\n",
"newsgroups_test = fetch_20newsgroups(subset = 'test')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_dev_text = newsgroups_train_dev['data']\n",
"newsgroups_test_text = newsgroups_test['data']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_train_dev = newsgroups_train_dev['target']\n",
"Y_test = newsgroups_test['target']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_text, newsgroups_dev_text, Y_train, Y_dev = train_test_split(newsgroups_train_dev_text, Y_train_dev, random_state=42)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_names = newsgroups_train_dev['target_names']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_train_dev"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y_names"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Jaki baseline?**"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"pd.value_counts(Y_train)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"accuracy_score(Y_test, np.ones_like(Y_test) * 10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"\n",
"**Pytanie** - w jaki sposób stworzyć taki klasyfikator na podstawie tylko wiedzy z poprzednich ćwiczeń?"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Zadanie - stworzyć klasyfikator regresji logistycznej one vs rest na podstawie tfdif. TFIDF powinien mieć słownik o wielkości 10000\n",
"\n",
"https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html\n",
"https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html\n",
"https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.multiclass import OneVsRestClassifier\n",
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.feature_extraction.text import TfidfVectorizer"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"FEAUTERES = 10_000"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vectorizer = TfidfVectorizer(max_features=FEAUTERES)\n",
"X_train = vectorizer.fit_transform(newsgroups_train_text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_dev = vectorizer.transform(newsgroups_dev_text)\n",
"X_test = vectorizer.transform(newsgroups_test_text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"clf = OneVsRestClassifier(LogisticRegression()).fit(X_train, Y_train)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"clf.predict(X_train[0:1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"clf.predict_proba(X_train[0:1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"np.max(clf.predict_proba(X_train[0]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"accuracy_score(clf.predict(X_train), Y_train)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"accuracy_score(clf.predict(X_dev), Y_dev)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"accuracy_score(clf.predict(X_test), Y_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Podejście softmax na tfidif"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Zadanie** Na podstawie poprzednich zajęć stworzyć sieć w pytorch bez warstw ukrytych, z jedną warstwą *output* z funkcją softmax (bez trenowania i ewaluacji sieci)\n",
"\n",
"Użyć https://pytorch.org/docs/stable/generated/torch.nn.Softmax.html?highlight=softmax"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class NeuralNetworkModel(torch.nn.Module):\n",
"\n",
" def __init__(self,FEAUTERES, output_size):\n",
" super(NeuralNetworkModel, self).__init__()\n",
" self.fc1 = torch.nn.Linear(FEAUTERES,OUTPUT_SIZE)\n",
" self.softmax = torch.nn.Softmax(dim=0)\n",
" \n",
"\n",
" def forward(self, x):\n",
" x = self.fc1(x)\n",
" x = self.softmax(x)\n",
" return x"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"OUTPUT_SIZE = len(Y_names)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"nn_model = NeuralNetworkModel(FEAUTERES, OUTPUT_SIZE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"nn_model(torch.Tensor(X_train[0:3].astype(np.float32).todense()))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"BATCH_SIZE = 5"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"criterion = torch.nn.NLLLoss()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"optimizer = torch.optim.SGD(nn_model.parameters(), lr = 0.2)\n",
"#optimizer = torch.optim.Adam(nn_model.parameters())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_loss_acc(model, X_dataset, Y_dataset):\n",
" loss_score = 0\n",
" acc_score = 0\n",
" items_total = 0\n",
" model.eval()\n",
" for i in range(0, Y_dataset.shape[0], BATCH_SIZE):\n",
" X = X_dataset[i:i+BATCH_SIZE]\n",
" X = torch.tensor(X.astype(np.float32).todense())\n",
" Y = Y_dataset[i:i+BATCH_SIZE]\n",
" Y = torch.tensor(Y)\n",
" Y_predictions = model(X)\n",
" acc_score += torch.sum(torch.argmax(Y_predictions,dim=1) == Y).item()\n",
" items_total += Y.shape[0] \n",
"\n",
" loss = criterion(Y_predictions, Y)\n",
"\n",
" loss_score += loss.item() * Y.shape[0] \n",
" return (loss_score / items_total), (acc_score / items_total)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"for epoch in range(5):\n",
" loss_score = 0\n",
" acc_score = 0\n",
" items_total = 0\n",
" nn_model.train()\n",
" for i in range(0, Y_train.shape[0], BATCH_SIZE):\n",
" X = X_train[i:i+BATCH_SIZE]\n",
" X = torch.tensor(X.astype(np.float32).todense())\n",
" Y = Y_train[i:i+BATCH_SIZE]\n",
"\n",
" Y = torch.tensor(Y)\n",
" Y_predictions = nn_model(X)\n",
" acc_score += torch.sum(torch.argmax(Y_predictions,dim=1) == Y).item()\n",
" items_total += Y.shape[0] \n",
"\n",
" optimizer.zero_grad()\n",
" loss = criterion(Y_predictions, Y)\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
"\n",
" loss_score += loss.item() * Y.shape[0]\n",
"\n",
" \n",
" display(epoch)\n",
" display(get_loss_acc(nn_model, X_train, Y_train))\n",
" display(get_loss_acc(nn_model, X_dev, Y_dev))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X.shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"newsgroups_train_text"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Podejście softmax z embeddingami na przykładzie NER"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# !pip install torchtext\n",
"# !pip install datasets"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://www.aclweb.org/anthology/W03-0419.pdf"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"dataset = load_dataset(\"conll2003\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def build_vocab(dataset):\n",
" counter = Counter()\n",
" for document in dataset:\n",
" counter.update(document)\n",
" return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vocab = build_vocab(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataset['train']['tokens']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(vocab.itos)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vocab['on']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def data_process(dt):\n",
" return [ torch.tensor([vocab['<bos>']] +[vocab[token] for token in document ] + [vocab['<eos>']], dtype = torch.long) for document in dt]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def labels_process(dt):\n",
" return [ torch.tensor([0] + document + [0], dtype = torch.long) for document in dt]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids = data_process(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_tokens_ids = data_process(dataset['test']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"train_labels = labels_process(dataset['train']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_labels = labels_process(dataset['test']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"max([max(x) for x in dataset['train']['ner_tags'] ])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class NERModel(torch.nn.Module):\n",
"\n",
" def __init__(self,):\n",
" super(NERModel, self).__init__()\n",
" self.emb = torch.nn.Embedding(23627,200)\n",
" self.fc1 = torch.nn.Linear(600,9)\n",
" #self.softmax = torch.nn.Softmax(dim=0)\n",
" # nie trzeba, bo używamy https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html\n",
" # jako kryterium\n",
" \n",
"\n",
" def forward(self, x):\n",
" x = self.emb(x)\n",
" x = x.reshape(600) \n",
" x = self.fc1(x)\n",
" #x = self.softmax(x)\n",
" return x"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids[0][1:4]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ner_model = NERModel()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ner_model(train_tokens_ids[0][1:4])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"criterion = torch.nn.CrossEntropyLoss()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"optimizer = torch.optim.Adam(ner_model.parameters())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(train_labels)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for epoch in range(2):\n",
" loss_score = 0\n",
" acc_score = 0\n",
" prec_score = 0\n",
" selected_items = 0\n",
" recall_score = 0\n",
" relevant_items = 0\n",
" items_total = 0\n",
" nn_model.train()\n",
" #for i in range(len(train_labels)):\n",
" for i in range(100):\n",
" for j in range(1, len(train_labels[i]) - 1):\n",
" \n",
" X = train_tokens_ids[i][j-1: j+2]\n",
" Y = train_labels[i][j: j+1]\n",
"\n",
" Y_predictions = ner_model(X)\n",
" \n",
" \n",
" acc_score += int(torch.argmax(Y_predictions) == Y)\n",
" \n",
" if torch.argmax(Y_predictions) != 0:\n",
" selected_items +=1\n",
" if torch.argmax(Y_predictions) != 0 and torch.argmax(Y_predictions) == Y.item():\n",
" prec_score += 1\n",
" \n",
" if Y.item() != 0:\n",
" relevant_items +=1\n",
" if Y.item() != 0 and torch.argmax(Y_predictions) == Y.item():\n",
" recall_score += 1\n",
" \n",
" items_total += 1\n",
"\n",
" \n",
" optimizer.zero_grad()\n",
" loss = criterion(Y_predictions.unsqueeze(0), Y)\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
"\n",
" loss_score += loss.item() \n",
" \n",
" precision = prec_score / selected_items\n",
" recall = recall_score / relevant_items\n",
" f1_score = (2*precision * recall) / (precision + recall)\n",
" display('epoch: ', epoch)\n",
" display('loss: ', loss_score / items_total)\n",
" display('acc: ', acc_score / items_total)\n",
" display('prec: ', precision)\n",
" display('recall: : ', recall)\n",
" display('f1: ', f1_score)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"loss_score = 0\n",
"acc_score = 0\n",
"prec_score = 0\n",
"selected_items = 0\n",
"recall_score = 0\n",
"relevant_items = 0\n",
"items_total = 0\n",
"nn_model.eval()\n",
"for i in range(100):\n",
"#for i in range(len(test_labels)):\n",
" for j in range(1, len(test_labels[i]) - 1):\n",
"\n",
" X = test_tokens_ids[i][j-1: j+2]\n",
" Y = test_labels[i][j: j+1]\n",
"\n",
" Y_predictions = ner_model(X)\n",
"\n",
"\n",
" acc_score += int(torch.argmax(Y_predictions) == Y)\n",
"\n",
" if torch.argmax(Y_predictions) != 0:\n",
" selected_items +=1\n",
" if torch.argmax(Y_predictions) != 0 and torch.argmax(Y_predictions) == Y.item():\n",
" prec_score += 1\n",
"\n",
" if Y.item() != 0:\n",
" relevant_items +=1\n",
" if Y.item() != 0 and torch.argmax(Y_predictions) == Y.item():\n",
" recall_score += 1\n",
"\n",
" items_total += 1\n",
"\n",
"\n",
" loss = criterion(Y_predictions.unsqueeze(0), Y)\n",
"\n",
"\n",
"\n",
" loss_score += loss.item() \n",
"\n",
"precision = prec_score / selected_items\n",
"recall = recall_score / relevant_items\n",
"f1_score = (2*precision * recall) / (precision + recall)\n",
"display('loss: ', loss_score / items_total)\n",
"display('acc: ', acc_score / items_total)\n",
"display('prec: ', precision)\n",
"display('recall: : ', recall)\n",
"display('f1: ', f1_score)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Zadanie domowe\n",
"\n",
"- sklonować repozytorium https://git.wmi.amu.edu.pl/kubapok/en-ner-conll-2003\n",
"- stworzyć klasyfikator bazujący na sieci neuronowej feed forward w pytorchu (można bazować na tym jupyterze lub nie).\n",
"- klasyfikator powinien obejmować dodatkowe cechy (np. długość wyrazu, czy wyraz zaczyna się od wielkiej litery, stemmming słowa, czy zawiera cyfrę)\n",
"- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
"- wynik fscore sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.60\n",
"- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
"termin 08.06, 80 punktów\n"
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "9.Sequence labeling[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

447
cw/10_CRF.ipynb Normal file
View File

@ -0,0 +1,447 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 10. <i>CRF</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Podejście softmax z embeddingami na przykładzie NER"
]
},
{
"cell_type": "markdown",
"metadata": {
"scrolled": true
},
"source": [
"https://pytorch-crf.readthedocs.io/en/stable/"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://www.aclweb.org/anthology/W03-0419.pdf"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import gensim\n",
"import torch\n",
"import pandas as pd\n",
"import seaborn as sns\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from datasets import load_dataset\n",
"from torchtext.vocab import Vocab\n",
"from collections import Counter\n",
"\n",
"from sklearn.datasets import fetch_20newsgroups\n",
"# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
"\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from sklearn.metrics import accuracy_score\n",
"\n",
"from tqdm.notebook import tqdm\n",
"\n",
"import torch\n",
"from torchcrf import CRF"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"dataset = load_dataset(\"conll2003\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def build_vocab(dataset):\n",
" counter = Counter()\n",
" for document in dataset:\n",
" counter.update(document)\n",
" return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vocab = build_vocab(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(vocab.itos)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vocab['on']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def data_process(dt):\n",
" return [ torch.tensor([vocab['<bos>']] +[vocab[token] for token in document ] + [vocab['<eos>']], dtype = torch.long) for document in dt]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def labels_process(dt):\n",
" return [ torch.tensor([0] + document + [0], dtype = torch.long) for document in dt]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids = data_process(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_tokens_ids = data_process(dataset['test']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"validation_tokens_ids = data_process(dataset['validation']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"train_labels = labels_process(dataset['train']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"validation_labels = labels_process(dataset['validation']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_labels = labels_process(dataset['test']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_scores(y_true, y_pred):\n",
" acc_score = 0\n",
" tp = 0\n",
" fp = 0\n",
" selected_items = 0\n",
" relevant_items = 0 \n",
"\n",
" for p,t in zip(y_pred, y_true):\n",
" if p == t:\n",
" acc_score +=1\n",
"\n",
" if p > 0 and p == t:\n",
" tp +=1\n",
"\n",
" if p > 0:\n",
" selected_items += 1\n",
"\n",
" if t > 0 :\n",
" relevant_items +=1\n",
"\n",
" \n",
" \n",
" if selected_items == 0:\n",
" precision = 1.0\n",
" else:\n",
" precision = tp / selected_items\n",
" \n",
" \n",
" if relevant_items == 0:\n",
" recall = 1.0\n",
" else:\n",
" recall = tp / relevant_items\n",
" \n",
" \n",
" if precision + recall == 0.0 :\n",
" f1 = 0.0\n",
" else:\n",
" f1 = 2* precision * recall / (precision + recall)\n",
"\n",
" return precision, recall, f1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_tags = max([max(x) for x in dataset['train']['ner_tags'] ]) + 1 "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class FF(torch.nn.Module):\n",
"\n",
" def __init__(self,):\n",
" super(FF, self).__init__()\n",
" self.emb = torch.nn.Embedding(23627,200)\n",
" self.fc1 = torch.nn.Linear(200,num_tags)\n",
" \n",
"\n",
" def forward(self, x):\n",
" x = self.emb(x)\n",
" x = self.fc1(x)\n",
" return x"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ff = FF()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"crf = CRF(num_tags)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"params = list(ff.parameters()) + list(crf.parameters())\n",
"\n",
"optimizer = torch.optim.Adam(params)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def eval_model(dataset_tokens, dataset_labels):\n",
" Y_true = []\n",
" Y_pred = []\n",
" ff.eval()\n",
" crf.eval()\n",
" for i in tqdm(range(len(dataset_labels))):\n",
" batch_tokens = dataset_tokens[i]\n",
" tags = list(dataset_labels[i].numpy())\n",
" emissions = ff(batch_tokens).unsqueeze(1)\n",
" Y_pred += crf.decode(emissions)[0]\n",
" Y_true += tags\n",
"\n",
" return get_scores(Y_true, Y_pred)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"NUM_EPOCHS = 4"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"for i in range(NUM_EPOCHS):\n",
" ff.train()\n",
" crf.train()\n",
" for i in tqdm(range(len(train_labels))):\n",
" batch_tokens = train_tokens_ids[i]\n",
" tags = train_labels[i].unsqueeze(1)\n",
" emissions = ff(batch_tokens).unsqueeze(1)\n",
"\n",
" optimizer.zero_grad()\n",
" loss = -crf(emissions,tags)\n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
" ff.eval()\n",
" crf.eval()\n",
" print(eval_model(validation_tokens_ids, validation_labels))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"eval_model(validation_tokens_ids, validation_labels)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"eval_model(test_tokens_ids, test_labels)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(train_tokens_ids)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Zadanie domowe\n",
"\n",
"- sklonować repozytorium https://git.wmi.amu.edu.pl/kubapok/en-ner-conll-2003\n",
"- stworzyć klasyfikator bazujący na sieci neuronowej feed forward w pytorchu + CRF (można bazować na tym jupyterze lub nie).\n",
"- sieć feedforward powinna obejmować aktualne słowo, poprzednie i następne + dodatkowe cechy (np. długość wyrazu, czy wyraz zaczyna się od wielkiej litery, stemmming słowa, czy zawiera cyfrę)\n",
"- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
"- wynik fscore sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.65\n",
"- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
"termin 15.06, 60 punktów, za najlepszy wynik- 100 punktów\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "10.CRF[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

615
cw/11_NER_RNN.ipynb Normal file
View File

@ -0,0 +1,615 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 11. <i>NER RNN</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Podejście softmax z embeddingami na przykładzie NER"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import gensim\n",
"import torch\n",
"import pandas as pd\n",
"import seaborn as sns\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from datasets import load_dataset\n",
"from torchtext.vocab import Vocab\n",
"from collections import Counter\n",
"\n",
"from sklearn.datasets import fetch_20newsgroups\n",
"# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
"\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from sklearn.metrics import accuracy_score\n",
"\n",
"from tqdm.notebook import tqdm\n",
"\n",
"import torch"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"dataset = load_dataset(\"conll2003\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def build_vocab(dataset):\n",
" counter = Counter()\n",
" for document in dataset:\n",
" counter.update(document)\n",
" return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"vocab = build_vocab(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"23627"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(vocab.itos)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"15"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"vocab['on']"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def data_process(dt):\n",
" return [ torch.tensor([vocab['<bos>']] +[vocab[token] for token in document ] + [vocab['<eos>']], dtype = torch.long) for document in dt]"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def labels_process(dt):\n",
" return [ torch.tensor([0] + document + [0], dtype = torch.long) for document in dt]\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids = data_process(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"test_tokens_ids = data_process(dataset['test']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"validation_tokens_ids = data_process(dataset['validation']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"train_labels = labels_process(dataset['train']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"validation_labels = labels_process(dataset['validation']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"test_labels = labels_process(dataset['test']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([ 2, 966, 22409, 238, 773, 9, 4588, 212, 7686, 4,\n",
" 3])"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_tokens_ids[0]"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'chunk_tags': [11, 21, 11, 12, 21, 22, 11, 12, 0],\n",
" 'id': '0',\n",
" 'ner_tags': [3, 0, 7, 0, 0, 0, 7, 0, 0],\n",
" 'pos_tags': [22, 42, 16, 21, 35, 37, 16, 21, 7],\n",
" 'tokens': ['EU',\n",
" 'rejects',\n",
" 'German',\n",
" 'call',\n",
" 'to',\n",
" 'boycott',\n",
" 'British',\n",
" 'lamb',\n",
" '.']}"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"dataset['train'][0]"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
"tensor([0, 3, 0, 7, 0, 0, 0, 7, 0, 0, 0])"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_labels[0]"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"def get_scores(y_true, y_pred):\n",
" acc_score = 0\n",
" tp = 0\n",
" fp = 0\n",
" selected_items = 0\n",
" relevant_items = 0 \n",
"\n",
" for p,t in zip(y_pred, y_true):\n",
" if p == t:\n",
" acc_score +=1\n",
"\n",
" if p > 0 and p == t:\n",
" tp +=1\n",
"\n",
" if p > 0:\n",
" selected_items += 1\n",
"\n",
" if t > 0 :\n",
" relevant_items +=1\n",
"\n",
" \n",
" \n",
" if selected_items == 0:\n",
" precision = 1.0\n",
" else:\n",
" precision = tp / selected_items\n",
" \n",
" \n",
" if relevant_items == 0:\n",
" recall = 1.0\n",
" else:\n",
" recall = tp / relevant_items\n",
" \n",
" \n",
" if precision + recall == 0.0 :\n",
" f1 = 0.0\n",
" else:\n",
" f1 = 2* precision * recall / (precision + recall)\n",
"\n",
" return precision, recall, f1"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"num_tags = max([max(x) for x in dataset['train']['ner_tags'] ]) + 1 "
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
"class LSTM(torch.nn.Module):\n",
"\n",
" def __init__(self):\n",
" super(LSTM, self).__init__()\n",
" self.emb = torch.nn.Embedding(len(vocab.itos),100)\n",
" self.rec = torch.nn.LSTM(100, 256, 1, batch_first = True)\n",
" self.fc1 = torch.nn.Linear( 256 , 9)\n",
"\n",
" def forward(self, x):\n",
" emb = torch.relu(self.emb(x))\n",
" \n",
" lstm_output, (h_n, c_n) = self.rec(emb)\n",
" \n",
" out_weights = self.fc1(lstm_output)\n",
"\n",
" return out_weights"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
"lstm = LSTM()"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"criterion = torch.nn.CrossEntropyLoss()"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"optimizer = torch.optim.Adam(lstm.parameters())"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"def eval_model(dataset_tokens, dataset_labels, model):\n",
" Y_true = []\n",
" Y_pred = []\n",
" for i in tqdm(range(len(dataset_labels))):\n",
" batch_tokens = dataset_tokens[i].unsqueeze(0)\n",
" tags = list(dataset_labels[i].numpy())\n",
" Y_true += tags\n",
" \n",
" Y_batch_pred_weights = model(batch_tokens).squeeze(0)\n",
" Y_batch_pred = torch.argmax(Y_batch_pred_weights,1)\n",
" Y_pred += list(Y_batch_pred.numpy())\n",
" \n",
"\n",
" return get_scores(Y_true, Y_pred)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [],
"source": [
"NUM_EPOCHS = 5"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"for i in range(NUM_EPOCHS):\n",
" lstm.train()\n",
" #for i in tqdm(range(500)):\n",
" for i in tqdm(range(len(train_labels))):\n",
" batch_tokens = train_tokens_ids[i].unsqueeze(0)\n",
" tags = train_labels[i].unsqueeze(1)\n",
" \n",
" \n",
" predicted_tags = lstm(batch_tokens)\n",
"\n",
" \n",
" optimizer.zero_grad()\n",
" loss = criterion(predicted_tags.squeeze(0),tags.squeeze(1))\n",
" \n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
" lstm.eval()\n",
" print(eval_model(validation_tokens_ids, validation_labels, lstm))"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "5159f7a61c3a439bab45573f15ea55b2",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"HBox(children=(FloatProgress(value=0.0, max=3250.0), HTML(value='')))"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n"
]
},
{
"data": {
"text/plain": [
"(0.7963248522230789, 0.7203301174009067, 0.7564235581324383)"
]
},
"execution_count": 27,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"eval_model(validation_tokens_ids, validation_labels, lstm)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "4b604bbb796f4d4cb99528fad98cfdff",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"HBox(children=(FloatProgress(value=0.0, max=3453.0), HTML(value='')))"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n"
]
},
{
"data": {
"text/plain": [
"(0.7450810185185185, 0.6348619329388561, 0.685569755058573)"
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"eval_model(test_tokens_ids, test_labels, lstm)"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
"14041"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(train_tokens_ids)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## pytania\n",
"\n",
"- co zrobić z trenowaniem na batchach > 1 ?\n",
"- co zrobić, żeby sieć uwzględniała następne tokeny, a nie tylko poprzednie?\n",
"- w jaki sposób wykorzystać taką sieć do zadania zwykłej klasyfikacji?"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Zadanie na zajęcia ( 20 minut)\n",
"\n",
"zmodyfikować sieć tak, żeby była używała dwuwarstwowej, dwukierunkowej warstwy GRU oraz dropoutu. Dropout ma nałożony na embeddingi.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Zadanie domowe\n",
"\n",
"\n",
"- sklonować repozytorium https://git.wmi.amu.edu.pl/kubapok/en-ner-conll-2003\n",
"- stworzyć model seq labelling bazujący na sieci neuronowej opisanej w punkcie niżej (można bazować na tym jupyterze lub nie).\n",
"- model sieci to GRU (o dowolnych parametrach) + CRF w pytorchu korzystając z modułu CRF z poprzednich zajęć- - stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
"- wynik fscore sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.65\n",
"- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
"termin 22.06, 60 punktów, za najlepszy wynik- 100 punktów\n",
" "
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "11.NER RNN[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,565 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 11. <i>NER RNN</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Podejście softmax z embeddingami na przykładzie NER"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import gensim\n",
"import torch\n",
"import pandas as pd\n",
"import seaborn as sns\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"from datasets import load_dataset\n",
"from torchtext.vocab import Vocab\n",
"from collections import Counter\n",
"\n",
"from sklearn.datasets import fetch_20newsgroups\n",
"# https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html\n",
"\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from sklearn.metrics import accuracy_score\n",
"\n",
"from tqdm.notebook import tqdm\n",
"\n",
"import torch"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"dataset = load_dataset(\"conll2003\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def build_vocab(dataset):\n",
" counter = Counter()\n",
" for document in dataset:\n",
" counter.update(document)\n",
" return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vocab = build_vocab(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(vocab.itos)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vocab['on']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def data_process(dt):\n",
" return [ torch.tensor([vocab['<bos>']] +[vocab[token] for token in document ] + [vocab['<eos>']], dtype = torch.long) for document in dt]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def labels_process(dt):\n",
" return [ torch.tensor([0] + document + [0], dtype = torch.long) for document in dt]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids = data_process(dataset['train']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_tokens_ids = data_process(dataset['test']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"validation_tokens_ids = data_process(dataset['validation']['tokens'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"train_labels = labels_process(dataset['train']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"validation_labels = labels_process(dataset['validation']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_labels = labels_process(dataset['test']['ner_tags'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_tokens_ids[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataset['train'][0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"train_labels[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_scores(y_true, y_pred):\n",
" acc_score = 0\n",
" tp = 0\n",
" fp = 0\n",
" selected_items = 0\n",
" relevant_items = 0 \n",
"\n",
" for p,t in zip(y_pred, y_true):\n",
" if p == t:\n",
" acc_score +=1\n",
"\n",
" if p > 0 and p == t:\n",
" tp +=1\n",
"\n",
" if p > 0:\n",
" selected_items += 1\n",
"\n",
" if t > 0 :\n",
" relevant_items +=1\n",
"\n",
" \n",
" \n",
" if selected_items == 0:\n",
" precision = 1.0\n",
" else:\n",
" precision = tp / selected_items\n",
" \n",
" \n",
" if relevant_items == 0:\n",
" recall = 1.0\n",
" else:\n",
" recall = tp / relevant_items\n",
" \n",
" \n",
" if precision + recall == 0.0 :\n",
" f1 = 0.0\n",
" else:\n",
" f1 = 2* precision * recall / (precision + recall)\n",
"\n",
" return precision, recall, f1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_tags = max([max(x) for x in dataset['train']['ner_tags'] ]) + 1 "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class LSTM(torch.nn.Module):\n",
"\n",
" def __init__(self):\n",
" super(LSTM, self).__init__()\n",
" self.emb = torch.nn.Embedding(len(vocab.itos),100)\n",
" self.rec = torch.nn.LSTM(100, 256, 1, batch_first = True)\n",
" self.fc1 = torch.nn.Linear( 256 , 9)\n",
"\n",
" def forward(self, x):\n",
" emb = torch.relu(self.emb(x))\n",
" \n",
" lstm_output, (h_n, c_n) = self.rec(emb)\n",
" \n",
" out_weights = self.fc1(lstm_output)\n",
"\n",
" return out_weights"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"lstm = LSTM()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"criterion = torch.nn.CrossEntropyLoss()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"optimizer = torch.optim.Adam(lstm.parameters())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def eval_model(dataset_tokens, dataset_labels, model):\n",
" Y_true = []\n",
" Y_pred = []\n",
" for i in tqdm(range(len(dataset_labels))):\n",
" batch_tokens = dataset_tokens[i].unsqueeze(0)\n",
" tags = list(dataset_labels[i].numpy())\n",
" Y_true += tags\n",
" \n",
" Y_batch_pred_weights = model(batch_tokens).squeeze(0)\n",
" Y_batch_pred = torch.argmax(Y_batch_pred_weights,1)\n",
" Y_pred += list(Y_batch_pred.numpy())\n",
" \n",
"\n",
" return get_scores(Y_true, Y_pred)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"NUM_EPOCHS = 5"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"for i in range(NUM_EPOCHS):\n",
" lstm.train()\n",
" #for i in tqdm(range(500)):\n",
" for i in tqdm(range(len(train_labels))):\n",
" batch_tokens = train_tokens_ids[i].unsqueeze(0)\n",
" tags = train_labels[i].unsqueeze(1)\n",
" \n",
" \n",
" predicted_tags = lstm(batch_tokens)\n",
"\n",
" \n",
" optimizer.zero_grad()\n",
" loss = criterion(predicted_tags.squeeze(0),tags.squeeze(1))\n",
" \n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
" lstm.eval()\n",
" print(eval_model(validation_tokens_ids, validation_labels, lstm))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"eval_model(validation_tokens_ids, validation_labels, lstm)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"eval_model(test_tokens_ids, test_labels, lstm)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"len(train_tokens_ids)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## pytania\n",
"\n",
"- co zrobić z trenowaniem na batchach > 1 ?\n",
"- co zrobić, żeby sieć uwzględniała następne tokeny, a nie tylko poprzednie?\n",
"- w jaki sposób wykorzystać taką sieć do zadania zwykłej klasyfikacji?"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Zadanie na zajęcia ( 20 minut)\n",
"\n",
"zmodyfikować sieć tak, żeby była używała dwuwarstwowej, dwukierunkowej warstwy GRU oraz dropoutu. Dropout ma nałożony na embeddingi.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"class GRU(torch.nn.Module):\n",
"\n",
" def __init__(self):\n",
" super(GRU, self).__init__()\n",
" self.emb = torch.nn.Embedding(len(vocab.itos),100)\n",
" self.dropout = torch.nn.Dropout(0.2)\n",
" self.rec = torch.nn.GRU(100, 256, 2, batch_first = True, bidirectional = True)\n",
" self.fc1 = torch.nn.Linear(2* 256 , 9)\n",
" \n",
" def forward(self, x):\n",
" emb = torch.relu(self.emb(x))\n",
" emb = self.dropout(emb)\n",
" \n",
" gru_output, h_n = self.rec(emb)\n",
" \n",
" out_weights = self.fc1(gru_output)\n",
"\n",
" return out_weights"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gru = GRU()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"criterion = torch.nn.CrossEntropyLoss()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"optimizer = torch.optim.Adam(gru.parameters())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"NUM_EPOCHS = 5"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"for i in range(NUM_EPOCHS):\n",
" gru.train()\n",
" #for i in tqdm(range(50)):\n",
" for i in tqdm(range(len(train_labels))):\n",
" batch_tokens = train_tokens_ids[i].unsqueeze(0)\n",
" tags = train_labels[i].unsqueeze(1)\n",
" \n",
" \n",
" predicted_tags = gru(batch_tokens)\n",
"\n",
" \n",
" optimizer.zero_grad()\n",
" loss = criterion(predicted_tags.squeeze(0),tags.squeeze(1))\n",
" \n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
" \n",
" gru.eval()\n",
" print(eval_model(validation_tokens_ids, validation_labels, gru))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Zadanie domowe\n",
"\n",
"\n",
"- sklonować repozytorium https://git.wmi.amu.edu.pl/kubapok/en-ner-conll-2003\n",
"- stworzyć model seq labelling bazujący na sieci neuronowej opisanej w punkcie niżej (można bazować na tym jupyterze lub nie).\n",
"- model sieci to GRU (o dowolnych parametrach) + CRF w pytorchu korzystając z modułu CRF z poprzednich zajęć- - stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
"- wynik fscore sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.65\n",
"- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
"termin 22.06, 60 punktów, za najlepszy wynik- 100 punktów\n",
" "
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "11.NER RNN[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

690
cw/12_transformery.ipynb Normal file
View File

@ -0,0 +1,690 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 12. <i>Transformery</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# bpe"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"pip install tokenizers"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://github.com/huggingface/tokenizers/tree/master/bindings/python"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tokenizers import Tokenizer, models, trainers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tokenizers.trainers import BpeTrainer"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = Tokenizer(models.BPE())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"trainer = trainers.BpeTrainer(vocab_size=20000, min_frequency=2)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://wolnelektury.pl/media/book/txt/pan-tadeusz.txt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer.train(files = ['/home/kuba/Syncthing/przedmioty/2020-02/ISI/zajecia9_ngramowy_model_jDDezykowy/pan-tadeusz-train.txt'], trainer = trainer)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output = tokenizer.encode(\"Nie śpiewają piosenek: pracują leniwo,\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output.ids"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output.tokens"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer.save(\"./my-bpe.tokenizer.json\", pretty=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## ZADANIE\n",
"stworzyć BPE tokenizer na podstawie https://git.wmi.amu.edu.pl/kubapok/lalka-lm/src/branch/master/train/train.tsv\n",
"i stworzyć stokenizowaną listę: \n",
"https://git.wmi.amu.edu.pl/kubapok/lalka-lm/src/branch/master/test-A/in.tsv\n",
"\n",
"wybrać vocab_size = 8k, uwzględnić dodatkowe tokeny: BOS oraz EOS i wpleść je do zbioru testowego"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# transformery"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# pip install transformers"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"przykłady pochodzą częściowo z: https://huggingface.co/"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import torch"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import pipeline, set_seed"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import RobertaTokenizer, RobertaModel"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = RobertaModel.from_pretrained('roberta-base')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text = \"Replace me by any text you'd like. Bla Bla\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"encoded_input = tokenizer(text, return_tensors='pt')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"encoded_input['input_ids']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"encoded_input['input_ids']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer.decode([162])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output = model(**encoded_input)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"output"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://huggingface.co/transformers/main_classes/output.html#basemodeloutputwithpoolingandcrossattentionsM"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://arxiv.org/pdf/1907.11692.pdf"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[0].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"\n",
"output[1].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output = model(**encoded_input, output_hidden_states=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output[2])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"output[2][0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[2][0].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[2][1].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[2][12].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output = model(**encoded_input, output_attentions=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output[2])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[2][0].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"output[2][2]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## gotowe api"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### generowanie tekstu"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = pipeline('text-generation', model='gpt2')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model(\"Hello, I'm a computer science student\", max_length=30, num_return_sequences=5)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"model(\"I want to contribute to Google's Computer Vision Program, which is doing extensive work on big\", max_length=30, num_return_sequences=5)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### sentiment analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import pipeline\n",
"\n",
"model = pipeline(\"sentiment-analysis\", model='distilbert-base-uncased-finetuned-sst-2-english')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model(\"I'm very happy. Today is the beatifull weather\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model(\"It's raining. What a terrible day...\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## NER"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = pipeline(\"sentiment-analysis\", model='distilbert-base-uncased-finetuned-sst-2-english')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import pipeline\n",
"model = pipeline(\"ner\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text = \"George Washington went to Washington\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model(text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### masked language modelling"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### ZADANIE (10 minut)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"przewidzieć <mask> token w \"The world <MASK> II started in 1939\"\" wg dowolnego anglojęzycznego modelu"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### text summarization"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"summarizer = pipeline(\"summarization\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ARTICLE = \"\"\" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York.\n",
"A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband.\n",
"Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared \"I do\" five more times, sometimes only within two weeks of each other.\n",
"In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her \"first and only\" marriage.\n",
"Barrientos, now 39, is facing two criminal counts of \"offering a false instrument for filing in the first degree,\" referring to her false statements on the\n",
"2010 marriage license application, according to court documents.\n",
"Prosecutors said the marriages were part of an immigration scam.\n",
"On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further.\n",
"After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective\n",
"Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002.\n",
"All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say.\n",
"Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages.\n",
"Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted.\n",
"The case was referred to the Bronx District Attorney\\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\\'s\n",
"Investigation Division. Seven of the men are from so-called \"red-flagged\" countries, including Egypt, Turkey, Georgia, Pakistan and Mali.\n",
"Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force.\n",
"If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(summarizer(ARTICLE, max_length=130, min_length=30, do_sample=False))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### ZADANIE DOMOWE"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"- sforkować repozytorium: https://git.wmi.amu.edu.pl/kubapok/paranormal-or-skeptic-ISI-public\n",
"- finetunować klasyfikator bazujący na jakieś pretrenowanej sieć typu transformer (np BERT, Roberta). Można użyć dowolnej biblioteki\n",
" (np hugging face, fairseq)\n",
"- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
"- wynik accuracy sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.67\n",
"- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
"termin 22.06, 60 punktów\n"
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "12.Transformery[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,802 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 12. <i>Transformery</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# bpe"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"pip install tokenizers"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://github.com/huggingface/tokenizers/tree/master/bindings/python"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tokenizers import Tokenizer, models, trainers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tokenizers.trainers import BpeTrainer"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = Tokenizer(models.BPE())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"trainer = trainers.BpeTrainer(vocab_size=20000, min_frequency=2)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://wolnelektury.pl/media/book/txt/pan-tadeusz.txt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer.train(files = ['/home/kuba/Syncthing/przedmioty/2020-02/ISI/zajecia9_ngramowy_model_jDDezykowy/pan-tadeusz-train.txt'], trainer = trainer)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output = tokenizer.encode(\"Nie śpiewają piosenek: pracują leniwo,\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output.ids"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output.tokens"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer.save(\"./my-bpe.tokenizer.json\", pretty=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## ZADANIE\n",
"stworzyć BPE tokenizer na podstawie https://git.wmi.amu.edu.pl/kubapok/lalka-lm/src/branch/master/train/train.tsv\n",
"i stworzyć stokenizowaną listę: \n",
"https://git.wmi.amu.edu.pl/kubapok/lalka-lm/src/branch/master/test-A/in.tsv\n",
"\n",
"wybrać vocab_size = 8k, uwzględnić dodatkowe tokeny: BOS oraz EOS i wpleść je do zbioru testowego"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = Tokenizer(models.BPE())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"trainer = trainers.BpeTrainer(vocab_size=8000, special_tokens=[\"[BOS]\", \"[EOS]\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer.train(files = ['/home/kuba/Syncthing/przedmioty/2020-02/ISI/zajecia12_transformers/lalka-lm/train/train_with_special_tokens'], trainer = trainer)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_path = '/home/kuba/Syncthing/przedmioty/2020-02/ISI/zajecia12_transformers/lalka-lm/train/train.tsv'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with open(test_path, 'r') as test_in:\n",
" test_lines = ['[BOS] ' + x.rstrip('\\n') + ' [EOS]' for x in test_in.readlines()]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_lines"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenized = [tokenizer.encode(x) for x in test_lines]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenized"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"tokenized[0].tokens"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenized[0].tokens"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# transformery"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# pip install transformers"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"przykłady pochodzą częściowo z: https://huggingface.co/"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import torch"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import pipeline, set_seed"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import RobertaTokenizer, RobertaModel"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = RobertaModel.from_pretrained('roberta-base')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text = \"Replace me by any text you'd like. Bla Bla\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"encoded_input = tokenizer(text, return_tensors='pt')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"encoded_input['input_ids']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"encoded_input['input_ids']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer.decode([162])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output = model(**encoded_input)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"output"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://huggingface.co/transformers/main_classes/output.html#basemodeloutputwithpoolingandcrossattentionsM"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://arxiv.org/pdf/1907.11692.pdf"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[0].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"\n",
"output[1].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output = model(**encoded_input, output_hidden_states=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output[2])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"output[2][0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[2][0].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[2][1].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[2][12].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output = model(**encoded_input, output_attentions=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"len(output[2])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"output[2][0].shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"output[2][2]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## gotowe api"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### generowanie tekstu"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = pipeline('text-generation', model='gpt2')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model(\"Hello, I'm a computer science student\", max_length=30, num_return_sequences=5)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"model(\"I want to contribute to Google's Computer Vision Program, which is doing extensive work on big\", max_length=30, num_return_sequences=5)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### sentiment analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import pipeline\n",
"\n",
"model = pipeline(\"sentiment-analysis\", model='distilbert-base-uncased-finetuned-sst-2-english')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model(\"I'm very happy. Today is the beatifull weather\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model(\"It's raining. What a terrible day...\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## NER"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = pipeline(\"sentiment-analysis\", model='distilbert-base-uncased-finetuned-sst-2-english')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import pipeline\n",
"model = pipeline(\"ner\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text = \"George Washington went to Washington\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model(text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### masked language modelling"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"### ZADANIE (10 minut)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"przewidziać <mask> token w \"The world <MASK> II started in 1939\"\" wg dowolnego anglojęzycznego modelu"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = pipeline(\"fill-mask\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model(f\"The world {model.tokenizer.mask_token} II started in 1939\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### text summarization"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"summarizer = pipeline(\"summarization\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ARTICLE = \"\"\" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York.\n",
"A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband.\n",
"Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared \"I do\" five more times, sometimes only within two weeks of each other.\n",
"In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her \"first and only\" marriage.\n",
"Barrientos, now 39, is facing two criminal counts of \"offering a false instrument for filing in the first degree,\" referring to her false statements on the\n",
"2010 marriage license application, according to court documents.\n",
"Prosecutors said the marriages were part of an immigration scam.\n",
"On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further.\n",
"After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective\n",
"Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002.\n",
"All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say.\n",
"Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages.\n",
"Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted.\n",
"The case was referred to the Bronx District Attorney\\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\\'s\n",
"Investigation Division. Seven of the men are from so-called \"red-flagged\" countries, including Egypt, Turkey, Georgia, Pakistan and Mali.\n",
"Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force.\n",
"If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(summarizer(ARTICLE, max_length=130, min_length=30, do_sample=False))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### ZADANIE DOMOWE"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"- sforkować repozytorium: https://git.wmi.amu.edu.pl/kubapok/paranormal-or-skeptic-ISI-public\n",
"- finetunować klasyfikator bazujący na jakieś pretrenowanej sieć typu transformer (np BERT, Roberta). Można użyć dowolnej biblioteki\n",
" (np hugging face, fairseq)\n",
"- stworzyć predykcje w plikach dev-0/out.tsv oraz test-A/out.tsv\n",
"- wynik accuracy sprawdzony za pomocą narzędzia geval (patrz poprzednie zadanie) powinien wynosić conajmniej 0.67\n",
"- proszę umieścić predykcję oraz skrypty generujące (w postaci tekstowej a nie jupyter) w repo, a w MS TEAMS umieścić link do swojego repo\n",
"termin 22.06, 60 punktów\n"
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "12.Transformery[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

243
cw/13_transformery2.ipynb Normal file
View File

@ -0,0 +1,243 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 13. <i>Transformery 2</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Wizualizacja atencji\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://github.com/jessevig/bertviz"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"!pip install bertviz"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import AutoTokenizer, AutoModel\n",
"from bertviz import model_view, head_view"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TEXT = \"This is a sample input sentence for a transformer model\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"MODEL = \"distilbert-base-uncased\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = AutoTokenizer.from_pretrained(MODEL)\n",
"model = AutoModel.from_pretrained(MODEL, output_attentions=True)\n",
"inputs = tokenizer.encode(TEXT, return_tensors='pt')\n",
"outputs = model(inputs)\n",
"attention = outputs[-1]\n",
"tokens = tokenizer.convert_ids_to_tokens(inputs[0]) \n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### SELF ATTENTION MODELS"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"head_view(attention, tokens)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_view(attention, tokens)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### ENCODER-DECODER MODELS"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"MODEL = \"Helsinki-NLP/opus-mt-en-de\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TEXT_ENCODER = \"She sees the small elephant.\"\n",
"TEXT_DECODER = \"Sie sieht den kleinen Elefanten.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = AutoTokenizer.from_pretrained(MODEL)\n",
"model = AutoModel.from_pretrained(MODEL, output_attentions=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"encoder_input_ids = tokenizer(TEXT_ENCODER, return_tensors=\"pt\", add_special_tokens=True).input_ids\n",
"decoder_input_ids = tokenizer(TEXT_DECODER, return_tensors=\"pt\", add_special_tokens=True).input_ids\n",
"\n",
"outputs = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids)\n",
"\n",
"encoder_text = tokenizer.convert_ids_to_tokens(encoder_input_ids[0])\n",
"decoder_text = tokenizer.convert_ids_to_tokens(decoder_input_ids[0])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"head_view(\n",
" encoder_attention=outputs.encoder_attentions,\n",
" decoder_attention=outputs.decoder_attentions,\n",
" cross_attention=outputs.cross_attentions,\n",
" encoder_tokens= encoder_text,\n",
" decoder_tokens = decoder_text\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"model_view(\n",
" encoder_attention=outputs.encoder_attentions,\n",
" decoder_attention=outputs.decoder_attentions,\n",
" cross_attention=outputs.cross_attentions,\n",
" encoder_tokens= encoder_text,\n",
" decoder_tokens = decoder_text\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Zadanie (10 minut)\n",
"\n",
"Za pomocą modelu en-fr przetłumacz dowolne zdanie z angielskiego na język francuski i sprawdź wagi atencji dla tego tłumaczenia"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### PRZYKŁAD: GPT3"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### ZADANIE DOMOWE - POLEVAL"
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "13.Transformery 2[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,336 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 13. <i>Transformery 2</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Wizualizacja atencji\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://github.com/jessevig/bertviz"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"!pip install bertviz"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import AutoTokenizer, AutoModel\n",
"from bertviz import model_view, head_view"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TEXT = \"This is a sample input sentence for a transformer model\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"MODEL = \"distilbert-base-uncased\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = AutoTokenizer.from_pretrained(MODEL)\n",
"model = AutoModel.from_pretrained(MODEL, output_attentions=True)\n",
"inputs = tokenizer.encode(TEXT, return_tensors='pt')\n",
"outputs = model(inputs)\n",
"attention = outputs[-1]\n",
"tokens = tokenizer.convert_ids_to_tokens(inputs[0]) \n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### SELF ATTENTION MODELS"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"head_view(attention, tokens)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_view(attention, tokens)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### ENCODER-DECODER MODELS"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"MODEL = \"Helsinki-NLP/opus-mt-en-de\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TEXT_ENCODER = \"She sees the small elephant.\"\n",
"TEXT_DECODER = \"Sie sieht den kleinen Elefanten.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = AutoTokenizer.from_pretrained(MODEL)\n",
"model = AutoModel.from_pretrained(MODEL, output_attentions=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"encoder_input_ids = tokenizer(TEXT_ENCODER, return_tensors=\"pt\", add_special_tokens=True).input_ids\n",
"decoder_input_ids = tokenizer(TEXT_DECODER, return_tensors=\"pt\", add_special_tokens=True).input_ids\n",
"\n",
"outputs = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids)\n",
"\n",
"encoder_text = tokenizer.convert_ids_to_tokens(encoder_input_ids[0])\n",
"decoder_text = tokenizer.convert_ids_to_tokens(decoder_input_ids[0])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"head_view(\n",
" encoder_attention=outputs.encoder_attentions,\n",
" decoder_attention=outputs.decoder_attentions,\n",
" cross_attention=outputs.cross_attentions,\n",
" encoder_tokens= encoder_text,\n",
" decoder_tokens = decoder_text\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"model_view(\n",
" encoder_attention=outputs.encoder_attentions,\n",
" decoder_attention=outputs.decoder_attentions,\n",
" cross_attention=outputs.cross_attentions,\n",
" encoder_tokens= encoder_text,\n",
" decoder_tokens = decoder_text\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Zadanie (10 minut)\n",
"\n",
"Za pomocą modelu en-fr przetłumacz dowolne zdanie z angielskiego na język francuski i sprawdź wagi atencji dla tego tłumaczenia"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"MODEL = \"Helsinki-NLP/opus-mt-en-fr\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TEXT_ENCODER = \"Although I still have fresh memories of my brother the elder Hamlets death, and though it was proper to mourn him throughout our kingdom, life still goes on—I think its wise to mourn him while also thinking about my own well being.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import AutoModelWithLMHead, AutoTokenizer\n",
"\n",
"model = AutoModelWithLMHead.from_pretrained(MODEL)\n",
"tokenizer = AutoTokenizer.from_pretrained(MODEL)\n",
"\n",
"inputs = tokenizer.encode(TEXT_ENCODER, return_tensors=\"pt\")\n",
"outputs = model.generate(inputs, max_length=40, num_beams=4, early_stopping=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TEXT_DECODER = tokenizer.decode(outputs[0])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"TEXT_DECODER"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tokenizer = AutoTokenizer.from_pretrained(MODEL)\n",
"model = AutoModel.from_pretrained(MODEL, output_attentions=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"encoder_input_ids = tokenizer(TEXT_ENCODER, return_tensors=\"pt\", add_special_tokens=True).input_ids\n",
"decoder_input_ids = tokenizer(TEXT_DECODER, return_tensors=\"pt\", add_special_tokens=True).input_ids\n",
"\n",
"outputs = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids)\n",
"\n",
"encoder_text = tokenizer.convert_ids_to_tokens(encoder_input_ids[0])\n",
"decoder_text = tokenizer.convert_ids_to_tokens(decoder_input_ids[0])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"head_view(\n",
" encoder_attention=outputs.encoder_attentions,\n",
" decoder_attention=outputs.decoder_attentions,\n",
" cross_attention=outputs.cross_attentions,\n",
" encoder_tokens= encoder_text,\n",
" decoder_tokens = decoder_text\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### PRZYKŁAD: GPT3"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### ZADANIE DOMOWE - POLEVAL"
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "13.Transformery 2[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,212 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 14. <i>Ekstrakcja informacji seq2seq</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### SIMILARITY SEARCH\n",
"1. zainstaluj faiss i zrób tutorial: https://github.com/facebookresearch/faiss\n",
"2. wczytaj treści artykułów z BBC News Train.csv\n",
"3. Użyj któregoś z transformerów (możesz użyć biblioteki sentence-transformers) do stworzenia embeddingów dokumentów\n",
"4. wczytaj embeddingi do bazy danych faiss\n",
"5. wyszukaj query 'consumer electronics market'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://www.kaggle.com/avishi/bbc-news-train-data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import pickle\n",
"import numpy as np\n",
"import faiss\n",
"from sklearn.metrics import ndcg_score, dcg_score, average_precision_score"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"!pip install sentence-transformers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sentence_transformers import SentenceTransformer\n",
"sentences = [\"Hello World\", \"Hallo Welt\"]\n",
"\n",
"model = SentenceTransformer('LaBSE')\n",
"embeddings = model.encode(sentences)\n",
"print(embeddings)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"r = pd.read_csv('BBC News Train.csv')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"DOCUMENTS = list(r.Text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"embeddings = model.encode(DOCUMENTS)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"embeddings = model.encode(list(r.Text))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"QUERY_STR = 'consumer electronics market'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"query = model.encode([QUERY_STR])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"index = faiss.IndexFlatL2(embeddings.shape[1]) "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"index.add(np.ascontiguousarray(embeddings))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"D, I = index.search(query, 5) "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"I"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"D"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"DOCUMENTS[1363]"
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "14.Ekstrakcja informacji seq2seq[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,369 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 15. <i>Similarity search</i> [ćwiczenia]</h2> \n",
"<h3> Jakub Pokrywka (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://arxiv.org/pdf/1910.10683.pdf"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://github.com/applicaai/kleister-nda"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import T5Tokenizer, T5ForConditionalGeneration"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text = \"translate English to French: My name is Azeem and I live in India\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text = \"summarize: Machine learning involves computers discovering how they can perform tasks without being explicitly programmed to do so. It involves computers learning from data provided so that they carry out certain tasks. For simple tasks assigned to computers, it is possible to program algorithms telling the machine how to execute all steps required to solve the problem at hand; on the computer's part, no learning is needed. For more advanced tasks, it can be challenging for a human to manually create the needed algorithms. In practice, it can turn out to be more effective to help the machine develop its own algorithm, rather than having human programmers specify every needed step.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"from transformers import T5Tokenizer, T5ForConditionalGeneration\n",
"\n",
"tokenizer = T5Tokenizer.from_pretrained('t5-small')\n",
"\n",
"model = T5ForConditionalGeneration.from_pretrained('t5-small', return_dict=True,).to('cuda')\n",
"\n",
"\n",
"# You can also use \"translate English to French\" and \"translate English to Romanian\"\n",
"input_ids = tokenizer(text, return_tensors=\"pt\").input_ids.to('cuda') # Batch size 1\n",
"\n",
"outputs = model.generate(input_ids)\n",
"\n",
"decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
"\n",
"print(decoded)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"KLEISTER_PATH = '/media/kuba/ssdsam/Syncthing/Syncthing/przedmioty/2020-02/IE/applica/kleister-nda/'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_exp_f = open(KLEISTER_PATH + 'train/expected.tsv')\n",
"train_exp = []\n",
"for line in train_exp_f:\n",
" line_splitted = line.strip('\\n').split(' ')\n",
" found = False\n",
" for elem in line_splitted:\n",
" if 'jurisdiction=' in elem:\n",
" train_exp.append('jurisdiction: ' + elem.split('=')[1])\n",
" found = True\n",
" break\n",
" if not found:\n",
" train_exp.append('jurisdiction: NONE')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dev_exp_f = open(KLEISTER_PATH + 'dev-0/expected.tsv')\n",
"dev_exp = []\n",
"for line in dev_exp_f:\n",
" line_splitted = line.strip('\\n').split(' ')\n",
" found = False\n",
" for elem in line_splitted:\n",
" if 'jurisdiction=' in elem:\n",
" dev_exp.append('jurisdiction: ' + elem.split('=')[1])\n",
" found = True\n",
" break\n",
" if not found:\n",
" dev_exp.append('jurisdiction: NONE')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_exp"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_in_f = open(KLEISTER_PATH + 'train/in.tsv')\n",
"train_in = []\n",
"for line in train_in_f:\n",
" line = line.rstrip('\\n')\n",
" train_in.append(line)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dev_in_f = open(KLEISTER_PATH + 'dev-0/in.tsv')\n",
"dev_in = []\n",
"for line in dev_in_f:\n",
" line = line.rstrip('\\n')\n",
" dev_in.append(line)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_in[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.device"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"input = train_in[0]\n",
"\n",
"# You can also use \"translate English to French\" and \"translate English to Romanian\"\n",
"input_ids = tokenizer(input, return_tensors=\"pt\").input_ids[:,:512].to('cuda') # Batch size 1\n",
"\n",
"outputs = model.generate(input_ids)\n",
"\n",
"decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
"\n",
"print(decoded)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"input_ids = tokenizer('translate English to German: The house is wonderful.', return_tensors='pt').input_ids.to('cuda')\n",
"labels = tokenizer('Das Haus ist wunderbar.', return_tensors='pt').input_ids.to('cuda')\n",
"# the forward function automatically creates the correct decoder_input_ids\n",
"loss = model(input_ids=input_ids, labels=labels).loss"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"loss"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import AdamW\n",
"\n",
"optimizer = AdamW(model.parameters(), lr=5e-5)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.train()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for line_in, line_exp in zip(train_in, train_exp):\n",
" input_ids = tokenizer(line_in, return_tensors='pt').input_ids[:,:512].to('cuda')\n",
" labels = tokenizer(line_exp, return_tensors='pt').input_ids.to('cuda')\n",
" # the forward function automatically creates the correct decoder_input_ids\n",
" loss = model(input_ids=input_ids, labels=labels).loss\n",
" loss.backward()\n",
" optimizer.step()\n",
" optimizer.zero_grad()\n",
" print(loss.item())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.eval()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"input = dev_in[0]\n",
"\n",
"input_ids = tokenizer(input, return_tensors=\"pt\").input_ids[:,:512].to('cuda') # Batch size 1\n",
"\n",
"outputs = model.generate(input_ids)\n",
"\n",
"decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
"\n",
"print(decoded)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"dev_exp[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"input = dev_in[2]\n",
"\n",
"input_ids = tokenizer(input, return_tensors=\"pt\").input_ids[:,:512].to('cuda') # Batch size 1\n",
"\n",
"outputs = model.generate(input_ids)\n",
"\n",
"decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
"\n",
"print(decoded)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dev_exp[2]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## pytanie:\n",
"- co można poprawić w istniejącym rozwiązaniu?"
]
}
],
"metadata": {
"author": "Jakub Pokrywka",
"email": "kubapok@wmi.amu.edu.pl",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.3"
},
"subtitle": "15.Similarity search[ćwiczenia]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

1491
cw/BBC News Train.csv Normal file

File diff suppressed because one or more lines are too long

5
run_conversion.sh Normal file
View File

@ -0,0 +1,5 @@
for i in {cw,wyk}/*.ipynb;
do
bash convert_ipynb_to_md.sh $i
echo $i done
done

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 1. <i>Wyszukiwarki — wprowadzenie</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -8,16 +22,19 @@
"\n", "\n",
"## Systemy wyszukiwania informacji (information retrieval systems)\n", "## Systemy wyszukiwania informacji (information retrieval systems)\n",
"\n", "\n",
"![System wyszukiwania informacji](system-wyszukiwania-informacji.png)" "![Schemat systemu wyszukiwania informacji](system-wyszukiwania-informacji.png)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"jp-MarkdownHeadingCollapsed": true,
"tags": []
},
"source": [ "source": [
"## Wyszukiwarki\n", "## Wyszukiwarki\n",
"\n", "\n",
"![Wyszukiwarki](wyszukiwarka-internetowa.png)" "![Schemat wyszukiwarki internetowej](wyszukiwarka-internetowa.png)"
] ]
}, },
{ {
@ -89,7 +106,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Dostępne są też \"ekstrakty\" czystego tekstu - zob. http://data.statmt.org/ngrams/raw/, np. 59 GB czystego tekstu po polsku z 2012 roku." "Dostępne są też „ekstrakty” czystego tekstu — zob. http://data.statmt.org/ngrams/raw/, np. 59 GB czystego tekstu po polsku z 2012 roku."
] ]
}, },
{ {
@ -284,7 +301,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Odpytywać \"pasożytniczo\" inną wyszukiwarkę" "### Odpytywać „pasożytniczo” inną wyszukiwarkę"
] ]
}, },
{ {
@ -293,7 +310,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# see https://hackernoon.com/how-to-scrape-google-with-python-bo7d2tal\n", "# zob. https://hackernoon.com/how-to-scrape-google-with-python-bo7d2tal\n",
"\n", "\n",
"import urllib\n", "import urllib\n",
"import requests\n", "import requests\n",
@ -817,823 +834,7 @@
"User-agent: *\n", "User-agent: *\n",
"Disallow: /*/wyszukaj/\n", "Disallow: /*/wyszukaj/\n",
"Disallow: /*servlet\n", "Disallow: /*servlet\n",
"Disallow: /reloadwww?\n", "...\n",
"Disallow: /dfptools/adview/\n",
"Disallow: /pub/ips/*\n",
"Disallow: /ods?\n",
"Disallow: /getFile.servlet*\n",
"Disallow: /aliasy/blad.jsp\n",
"Disallow: /znajdz.do\n",
"Disallow: /portalSearch.do\n",
"Disallow: /im/ab/b4/10/z17515435Q.jpg\n",
"Disallow: /75224259/\n",
"\n",
"User-agent: Googlebot-News\n",
"Disallow: /nowy/\n",
"Disallow: /mapa_strony\n",
"Disallow: /*/wyszukaj/\n",
"Disallow: /*/51,\n",
"Disallow: /*/55,\n",
"Disallow: /*/2,\n",
"Disallow: /*order=\n",
"Disallow: /*obxx=\n",
"Disallow: /*tag=\n",
"Disallow: /reloadwww?\n",
"Disallow: /ods?\n",
"Disallow: /*servlet\n",
"Disallow: /dfptools/adview/\n",
"\n",
"User-agent: Yandex\n",
"Disallow: /\n",
"\n",
"User-Agent: bingbot\n",
"Disallow: /\n",
"\n",
"User-agent: 008\n",
"Disallow: /\n",
"\n",
"User-agent: 010\n",
"Disallow: /\n",
"\n",
"User-agent: 360Spider\n",
"Disallow: /\n",
"\n",
"User-agent: 80legs\n",
"Disallow: /\n",
"\n",
"User-agent: Aboundex\n",
"Disallow: /\n",
"\n",
"User-agent: accelobot\n",
"Disallow: /\n",
"\n",
"User-agent: Add\\ Catalog\n",
"Disallow: /\n",
"\n",
"User-agent: AhrefsBot\n",
"Disallow: /\n",
"\n",
"User-agent: aiHitBot\n",
"Disallow: /\n",
"\n",
"User-agent: Alexibot\n",
"Disallow: /\n",
"\n",
"User-agent: Aqua_Products\n",
"Disallow: /\n",
"\n",
"User-agent: AskJeeves\n",
"Disallow: /\n",
"\n",
"User-agent: asterias\n",
"Disallow: /\n",
"\n",
"User-agent: awcheckBot\n",
"Disallow: /\n",
"\n",
"User-agent: b2w/0.1\n",
"Disallow: /\n",
"\n",
"User-agent: BackDoorBot/1.0\n",
"Disallow: /\n",
"\n",
"User-agent: BacklinkCrawler\n",
"Disallow: /\n",
"\n",
"User-agent: Baiduspider\n",
"Disallow: /\n",
"\n",
"User-agent: BecomeBot\n",
"Disallow: /\n",
"\n",
"User-agent: BLEXBot\n",
"Disallow: /\n",
"\n",
"User-agent: BlowFish/1.0\n",
"Disallow: /\n",
"\n",
"User-agent: Bookmark search tool\n",
"Disallow: /\n",
"\n",
"User-agent: BotALot\n",
"Disallow: /\n",
"\n",
"User-agent: brandwatch.net\n",
"Disallow: /\n",
"\n",
"User-agent: BuiltBotTough\n",
"Disallow: /\n",
"\n",
"User-agent: Bullseye/1.0\n",
"Disallow: /\n",
"\n",
"User-agent: BunnySlippers\n",
"Disallow: /\n",
"\n",
"User-agent: Butterfly\n",
"Disallow: /\n",
"\n",
"User-agent: CatchBot\n",
"Disallow: /\n",
"\n",
"User-agent: Charlotte\n",
"Disallow: /\n",
"\n",
"User-agent: CheeseBot\n",
"Disallow: /\n",
"\n",
"User-agent: CherryPicker\n",
"Disallow: /\n",
"\n",
"User-agent: CherryPickerElite/1.0\n",
"Disallow: /\n",
"\n",
"User-agent: CherryPickerSE/1.0\n",
"Disallow: /\n",
"\n",
"User-agent: CLIPish\n",
"Disallow: /\n",
"\n",
"User-agent: Cliqzbot\n",
"Disallow: /\n",
"\n",
"User-agent: COMODO\n",
"Disallow: /\n",
"\n",
"User-agent: Comodo-Certificates-Spider\n",
"Disallow: /\n",
"\n",
"User-agent: CompSpyBot\n",
"Disallow: /\n",
"\n",
"User-agent: Copernic\n",
"Disallow: /\n",
"\n",
"User-agent: CopyRightCheck\n",
"Disallow: /\n",
"\n",
"User-agent: cosmos\n",
"Disallow: /\n",
"\n",
"User-agent: crawler\n",
"Disallow: /\n",
"\n",
"User-agent: Crescent\n",
"Disallow: /\n",
"\n",
"User-agent: Crescent Internet ToolPak HTTP OLE Control v.1.0\n",
"Disallow: /\n",
"\n",
"User-agent: Curious\n",
"Disallow: /\n",
"\n",
"User-agent: curl\n",
"Disallow: /\n",
"\n",
"User-agent: dataprovider\\.com\n",
"Disallow: /\n",
"\n",
"User-agent: DinoPing\n",
"Disallow: /\n",
"\n",
"User-agent: discoverybot\n",
"Disallow: /\n",
"\n",
"User-agent: DittoSpyder\n",
"Disallow: /\n",
"\n",
"User-agent: DomainCrawler\n",
"Disallow: /\n",
"\n",
"User-agent: DomainCrawler\n",
"Disallow: /\n",
"\n",
"User-agent: dotbot\n",
"Disallow: /\n",
"\n",
"User-agent: dotnetdotcom\n",
"Disallow: /\n",
"\n",
"User-agent: Dow\\ Jones\\ Searchbot\n",
"Disallow: /\n",
"\n",
"User-agent: dumbot\n",
"Disallow: /\n",
"\n",
"User-agent: EasouSpider\n",
"Disallow: /\n",
"\n",
"User-agent: EmailCollector\n",
"Disallow: /\n",
"\n",
"User-agent: EmailSiphon\n",
"Disallow: /\n",
"\n",
"User-agent: EmailWolf\n",
"Disallow: /\n",
"\n",
"User-agent: Enterprise_Search\n",
"Disallow: /\n",
"\n",
"User-agent: Enterprise_Search/1.0\n",
"Disallow: /\n",
"\n",
"User-agent: EroCrawler\n",
"Disallow: /\n",
"\n",
"User-agent: es\n",
"Disallow: /\n",
"\n",
"User-agent: Exabot\n",
"Disallow: /\n",
"\n",
"User-agent: ExtractorPro\n",
"Disallow: /\n",
"\n",
"User-agent: EzineArticlesLinkScanner\n",
"Disallow: /\n",
"\n",
"User-agent: Ezooms\n",
"Disallow: /\n",
"\n",
"User-agent: FairAd Client\n",
"Disallow: /\n",
"\n",
"User-agent: Flaming AttackBot\n",
"Disallow: /\n",
"\n",
"User-agent: Foobot\n",
"Disallow: /\n",
"\n",
"User-agent: FreeFind\n",
"Disallow: /\n",
"\n",
"User-agent: FTRF\\:\\ Friendly\n",
"Disallow: /\n",
"\n",
"User-agent: Gaisbot\n",
"Disallow: /\n",
"\n",
"User-agent: GetRight/4.2\n",
"Disallow: /\n",
"\n",
"User-agent: gigabot\n",
"Disallow: /\n",
"\n",
"User-agent: grub\n",
"Disallow: /\n",
"\n",
"User-agent: grub-client\n",
"Disallow: /\n",
"\n",
"User-agent: Harvest/1.5\n",
"Disallow: /\n",
"\n",
"User-agent: Hatena Antenna\n",
"Disallow: /\n",
"\n",
"User-agent: hloader\n",
"Disallow: /\n",
"\n",
"User-agent: http://www.SearchEngineWorld.com bot\n",
"Disallow: /\n",
"\n",
"User-agent: http://www.WebmasterWorld.com bot\n",
"Disallow: /\n",
"\n",
"User-agent: HTTP_Request\n",
"Disallow: /\n",
"\n",
"User-agent: HTTP_Request2\n",
"Disallow: /\n",
"\n",
"User-agent: httplib\n",
"Disallow: /\n",
"\n",
"User-agent: humanlinks\n",
"Disallow: /\n",
"\n",
"User-agent: ia_archiver\n",
"Disallow: /\n",
"\n",
"User-agent: ia_archiver\n",
"Disallow: /\n",
"\n",
"User-agent: ia_archiver/1.6\n",
"Disallow: /\n",
"\n",
"User-agent: Indy\\ Library\n",
"Disallow: /\n",
"\n",
"User-agent: InfoNaviRobot\n",
"Disallow: /\n",
"\n",
"User-agent: ip\\-web\\-crawler\\.com\n",
"Disallow: /\n",
"\n",
"User-agent: Iron33/1.0.2\n",
"Disallow: /\n",
"\n",
"User-agent: Jakarta\\ Commons-HttpClient\n",
"Disallow: /\n",
"\n",
"User-agent: Jeeves\n",
"Disallow: /\n",
"\n",
"User-agent: JennyBot\n",
"Disallow: /\n",
"\n",
"User-agent: Jetbot\n",
"Disallow: /\n",
"\n",
"User-agent: Jetbot/1.0\n",
"Disallow: /\n",
"\n",
"User-agent: JikeSpider\n",
"Disallow: /\n",
"\n",
"User-agent: Kenjin Spider\n",
"Disallow: /\n",
"\n",
"User-agent: Keyword Density/0.9\n",
"Disallow: /\n",
"\n",
"User-agent: larbin\n",
"Disallow: /\n",
"\n",
"User-agent: LexiBot\n",
"Disallow: /\n",
"\n",
"User-agent: libWeb/clsHTTP\n",
"Disallow: /\n",
"\n",
"User-agent: libwww-perl\n",
"Disallow: /\n",
"\n",
"User-agent: lindex\\.com\n",
"Disallow: /\n",
"\n",
"User-agent: linkdex\\.com\n",
"Disallow: /\n",
"\n",
"User-agent: linkdexbot\n",
"Disallow: /\n",
"\n",
"User-agent: LinkextractorPro\n",
"Disallow: /\n",
"\n",
"User-agent: LinkScan/8.1a Unix\n",
"Disallow: /\n",
"\n",
"User-agent: LinkWalker\n",
"Disallow: /\n",
"\n",
"User-agent: lipperhey\n",
"Disallow: /\n",
"\n",
"User-agent: LNSpiderguy\n",
"Disallow: /\n",
"\n",
"User-agent: looksmart\n",
"Disallow: /\n",
"\n",
"User-agent: ltbot\n",
"Disallow: /\n",
"\n",
"User-agent: lwp-trivial\n",
"Disallow: /\n",
"\n",
"User-agent: lwp-trivial/1.34\n",
"Disallow: /\n",
"\n",
"User-agent: Lynx\n",
"Disallow: /\n",
"\n",
"User-agent: magpie\\-crawler\n",
"Disallow: /\n",
"\n",
"User-agent: Mata Hari\n",
"Disallow: /\n",
"\n",
"User-agent: Microsoft URL Control\n",
"Disallow: /\n",
"\n",
"User-agent: Microsoft URL Control - 5.01.4511\n",
"Disallow: /\n",
"\n",
"User-agent: Microsoft URL Control - 6.00.8169\n",
"Disallow: /\n",
"\n",
"User-agent: MIIxpc\n",
"Disallow: /\n",
"\n",
"User-agent: MIIxpc/4.2\n",
"Disallow: /\n",
"\n",
"User-agent: Mister PiX\n",
"Disallow: /\n",
"\n",
"User-agent: MJ12bot\n",
"Disallow: /\n",
"\n",
"User-agent: moget\n",
"Disallow: /\n",
"\n",
"User-agent: moget/2.1\n",
"Disallow: /\n",
"\n",
"User-agent: Mozilla/4.0 (compatible; BullsEye; Windows 95)\n",
"Disallow: /\n",
"\n",
"User-agent: MSIE\\ or\\ Firefox\\ mutant\n",
"Disallow: /\n",
"\n",
"User-agent: MSIECrawler\n",
"Disallow: /\n",
"\n",
"User-agent: naver\n",
"Disallow: /\n",
"\n",
"User-agent: NCBot\n",
"Disallow: /\n",
"\n",
"User-agent: NetAnts\n",
"Disallow: /\n",
"\n",
"User-agent: NetcraftSurveyAgent\n",
"Disallow: /\n",
"\n",
"User-agent: netEstate\\ NE\\ Crawler\n",
"Disallow: /\n",
"\n",
"User-agent: NetMechanic\n",
"Disallow: /\n",
"\n",
"User-agent: Netseer\n",
"Disallow: /\n",
"\n",
"User-agent: NextGenSearchBot\n",
"Disallow: /\n",
"\n",
"User-agent: NICErsPRO\n",
"Disallow: /\n",
"\n",
"User-agent: Nutch\n",
"Disallow: /\n",
"\n",
"User-agent: Nutch\n",
"Disallow: /\n",
"\n",
"User-agent: Ocelli\n",
"Disallow: /\n",
"\n",
"User-agent: Offline Explorer\n",
"Disallow: /\n",
"\n",
"User-agent: OmniExplorer_Bot\n",
"Disallow: /\n",
"\n",
"User-agent: Openbot\n",
"Disallow: /\n",
"\n",
"User-agent: Openfind\n",
"Disallow: /\n",
"\n",
"User-agent: Openfind\n",
"Disallow: /\n",
"\n",
"User-agent: Openfind data gathere\n",
"Disallow: /\n",
"\n",
"User-agent: OpenWebIndex\n",
"Disallow: /\n",
"\n",
"User-agent: Oracle Ultra Search\n",
"Disallow: /\n",
"\n",
"User-agent: PagesInventory\n",
"Disallow: /\n",
"\n",
"User-agent: PEAR\n",
"Disallow: /\n",
"\n",
"User-agent: PeoplePal\n",
"Disallow: /\n",
"\n",
"User-agent: PerMan\n",
"Disallow: /\n",
"\n",
"User-agent: ProCogSEOBot\n",
"Disallow: /\n",
"\n",
"User-agent: ProPowerBot/2.14\n",
"Disallow: /\n",
"\n",
"User-agent: ProWebWalker\n",
"Disallow: /\n",
"\n",
"User-agent: proximic\n",
"Disallow: /\n",
"\n",
"User-agent: psbot\n",
"Disallow: /\n",
"\n",
"User-agent: purebot\n",
"Disallow: /\n",
"\n",
"User-agent: QueryN Metasearch\n",
"Disallow: /\n",
"\n",
"User-agent: QuerySeekerSpider\n",
"Disallow: /\n",
"\n",
"User-agent: Radiation Retriever 1.1\n",
"Disallow: /\n",
"\n",
"User-agent: RepoMonkey\n",
"Disallow: /\n",
"\n",
"User-agent: RepoMonkey Bait & Tackle/v1.01\n",
"Disallow: /\n",
"\n",
"User-agent: Riddler\n",
"Disallow: /\n",
"\n",
"User-agent: RMA\n",
"Disallow: /\n",
"\n",
"User-agent: rojerbot\n",
"Disallow: /\n",
"\n",
"User-agent: RyteBot\n",
"Disallow: /\n",
"\n",
"User-agent: scooter\n",
"Disallow: /\n",
"\n",
"User-agent: ScoutJet\n",
"Disallow: /\n",
"\n",
"User-agent: Scrapy\n",
"Disallow: /\n",
"\n",
"User-agent: ScreenerBot\n",
"Disallow: /\n",
"\n",
"User-agent: searchmetrics\n",
"Disallow: /\n",
"\n",
"User-agent: searchpreview\n",
"Disallow: /\n",
"\n",
"User-agent: SemrushBot\n",
"Disallow: /\n",
"\n",
"User-agent: sentibot\n",
"Disallow: /\n",
"\n",
"User-agent: SEO-CRAWLING\n",
"Disallow: /\n",
"\n",
"User-agent: SEOENGWorldBot\n",
"Disallow: /\n",
"\n",
"User-agent: SEOkicks-Robot\n",
"Disallow: /\n",
"\n",
"User-agent: ShopWiki\n",
"Disallow: /\n",
"\n",
"User-agent: sistrix\n",
"Disallow: /\n",
"\n",
"User-agent: sitebot\n",
"Disallow: /\n",
"\n",
"User-agent: SiteSnagger\n",
"Disallow: /\n",
"\n",
"User-agent: Snoopy\n",
"Disallow: /\n",
"\n",
"User-agent: SocialSearcher\n",
"Disallow: /\n",
"\n",
"User-agent: Sogou\n",
"Disallow: /\n",
"\n",
"User-agent: SolomonoBot\n",
"Disallow: /\n",
"\n",
"User-agent: sootle\n",
"Disallow: /\n",
"\n",
"User-agent: Sosospider\n",
"Disallow: /\n",
"\n",
"User-agent: SpankBot\n",
"Disallow: /\n",
"\n",
"User-agent: spanner\n",
"Disallow: /\n",
"\n",
"User-agent: spbot\n",
"Disallow: /\n",
"\n",
"User-agent: Speedy\n",
"Disallow: /\n",
"\n",
"User-agent: Stanford\n",
"Disallow: /\n",
"\n",
"User-agent: Stanford Comp Sci\n",
"Disallow: /\n",
"\n",
"User-agent: SurveyBot\n",
"Disallow: /\n",
"\n",
"User-agent: suzuran\n",
"Disallow: /\n",
"\n",
"User-agent: Szukacz/1.4\n",
"Disallow: /\n",
"\n",
"User-agent: Szukacz/1.4\n",
"Disallow: /\n",
"\n",
"User-agent: Teleport\n",
"Disallow: /\n",
"\n",
"User-agent: TeleportPro\n",
"Disallow: /\n",
"\n",
"User-agent: Telesoft\n",
"Disallow: /\n",
"\n",
"User-agent: Teoma\n",
"Disallow: /\n",
"\n",
"User-agent: The Intraformant\n",
"Disallow: /\n",
"\n",
"User-agent: The\\ Incutio\\ XML-RPC\\ PHP\\ Library\n",
"Disallow: /\n",
"\n",
"User-agent: TheNomad\n",
"Disallow: /\n",
"\n",
"User-agent: toCrawl/UrlDispatcher\n",
"Disallow: /\n",
"\n",
"User-agent: True_Robot\n",
"Disallow: /\n",
"\n",
"User-agent: True_Robot/1.0\n",
"Disallow: /\n",
"\n",
"User-agent: turingos\n",
"Disallow: /\n",
"\n",
"User-agent: TurnitinBot\n",
"Disallow: /\n",
"\n",
"User-agent: uCrawler\n",
"Disallow: /\n",
"\n",
"User-agent: URL Control\n",
"Disallow: /\n",
"\n",
"User-agent: URL_Spider_Pro\n",
"Disallow: /\n",
"\n",
"User-agent: URLy Warning\n",
"Disallow: /\n",
"\n",
"User-agent: VCI\n",
"Disallow: /\n",
"\n",
"User-agent: VCI WebViewer VCI WebViewer Win32\n",
"Disallow: /\n",
"\n",
"User-agent: visaduhoc\\.info\n",
"Disallow: /\n",
"\n",
"User-agent: WBSearchBot\n",
"Disallow: /\n",
"\n",
"User-agent: Web Image Collector\n",
"Disallow: /\n",
"\n",
"User-agent: WebAuto\n",
"Disallow: /\n",
"\n",
"User-agent: WebBandit\n",
"Disallow: /\n",
"\n",
"User-agent: WebBandit/3.50\n",
"Disallow: /\n",
"\n",
"User-agent: WebCapture\n",
"Disallow: /\n",
"\n",
"User-agent: WebCopier\n",
"Disallow: /\n",
"\n",
"User-agent: WebEnhancer\n",
"Disallow: /\n",
"\n",
"User-agent: WebInDetail\\.com\n",
"Disallow: /\n",
"\n",
"User-agent: WebmasterWorld Extractor\n",
"Disallow: /\n",
"\n",
"User-agent: WebmasterWorldForumBot\n",
"Disallow: /\n",
"\n",
"User-agent: WebSauger\n",
"Disallow: /\n",
"\n",
"User-agent: Website Quester\n",
"Disallow: /\n",
"\n",
"User-agent: WEBSITEtheWEB\\.COM\n",
"Disallow: /\n",
"\n",
"User-agent: Webster Pro\n",
"Disallow: /\n",
"\n",
"User-agent: WebStripper\n",
"Disallow: /\n",
"\n",
"User-agent: WebVac\n",
"Disallow: /\n",
"\n",
"User-agent: WebZip\n",
"Disallow: /\n",
"\n",
"User-agent: WebZip/4.0\n",
"Disallow: /\n",
"\n",
"User-agent: Wget\n",
"Disallow: /\n",
"\n",
"User-agent: Wget/1.5.3\n",
"Disallow: /\n",
"\n",
"User-agent: Wget/1.6\n",
"Disallow: /\n",
"\n",
"User-agent: Wotbot\n",
"Disallow: /\n",
"\n",
"User-agent: www\\.integromedb\\.org\n",
"Disallow: /\n",
"\n",
"User-agent: WWW-Collector-E\n",
"Disallow: /\n",
"\n",
"User-agent: Xenu's\n",
"Disallow: /\n",
"\n",
"User-agent: Xenu's Link Sleuth 1.1c\n",
"Disallow: /\n",
"\n",
"User-agent: xpymep\\.exe\n",
"Disallow: /\n",
"\n",
"User-agent: YamanaLab-Robot\n",
"Disallow: /\n",
"\n",
"User-agent: YisouSpider\n",
"Disallow: /\n",
"\n",
"User-agent: YodaoBot\n",
"Disallow: /\n",
"\n",
"User-agent: YoudaoBot\n",
"Disallow: /\n",
"\n",
"User-agent: Zend_Http_Client\n",
"Disallow: /\n",
"\n",
"User-agent: Zeus\n",
"Disallow: /\n",
"\n",
"User-agent: Zeus 32297 Webster Pro V2.9 Win32\n",
"Disallow: /\n",
"\n",
"User-agent: Zeus Link Scout\n",
"Disallow: /\n",
"\n",
"User-agent: ZmEu\n",
"Disallow: /\n",
"\n",
"User-agent: ZumBot\n",
"Disallow: /\n",
"\n",
"User-agent: Linguee\n",
"Disallow: /\n",
"\n", "\n",
"User-agent: sogou\n", "User-agent: sogou\n",
"Disallow: /\n" "Disallow: /\n"
@ -1675,11 +876,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3 (ipykernel)",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -1690,8 +894,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.1" "version": "3.9.6"
} },
"subtitle": "2.Wyszukiwarki — wprowadzenie[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 2. <i>Wyszukiwarki — roboty</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -314,7 +328,7 @@
"\n", "\n",
"* urllib\n", "* urllib\n",
"* request\n", "* request\n",
"* Beautiful Soup (do parsowania HTML-a)" "* Beautiful Soup (do parsowania dokumentów HTML)"
] ]
}, },
{ {
@ -494,11 +508,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3 (ipykernel)",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -509,8 +526,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.1" "version": "3.9.6"
} },
"subtitle": "2.Wyszukiwarki — roboty[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 5. <i>Gęste reprezentacje wektorowe</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -129,7 +143,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Musimy tylko sparametryzować naszą funkcję rozmiarem \"odcisku\" (parametr $b$)." "Musimy tylko sparametryzować naszą funkcję rozmiarem „odcisku” (parametr $b$)."
] ]
}, },
{ {
@ -1604,11 +1618,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Haskell", "display_name": "Haskell",
"language": "haskell", "language": "haskell",
"name": "haskell" "name": "haskell"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": "ihaskell", "codemirror_mode": "ihaskell",
"file_extension": ".hs", "file_extension": ".hs",
@ -1616,7 +1633,10 @@
"name": "haskell", "name": "haskell",
"pygments_lexer": "Haskell", "pygments_lexer": "Haskell",
"version": "8.10.4" "version": "8.10.4"
} },
"subtitle": "5.Gęste reprezentacje wektorowe[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 6. <i>Wyzwania uczenia maszynowego</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -367,11 +381,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3 (ipykernel)",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -382,8 +399,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.2" "version": "3.9.6"
} },
"subtitle": "6.Wyzwania uczenia maszynowego[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 4

View File

@ -1,5 +1,20 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"id": "45264aad",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 7. <i>Naiwny klasyfikator bayesowski w ekstrakcji informacji</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "moderate-array", "id": "moderate-array",
@ -9,7 +24,7 @@
"\n", "\n",
"Zakładamy, że mamy dwie klasy: $c$ i jej dopełnienie ($\\bar{c}$).\n", "Zakładamy, że mamy dwie klasy: $c$ i jej dopełnienie ($\\bar{c}$).\n",
"\n", "\n",
"Typowym przykładem jest zadanie klasyfikacji mejla, czy należy do spamu, czy nie (_spam_ vs _ham_), czyli innymi słowy filtr antyspamowy." "Typowym przykładem jest zadanie klasyfikacji mejla, czy należy do spamu, czy nie (_spam_ vs _ham_), czyli, innymi słowy, filtr antyspamowy."
] ]
}, },
{ {
@ -63,9 +78,9 @@
"\n", "\n",
"W klasyfikacji (i w ogóle w uczeniu nadzorowanym) można wskazać dwa podejścia:\n", "W klasyfikacji (i w ogóle w uczeniu nadzorowanym) można wskazać dwa podejścia:\n",
"\n", "\n",
"* generatywne - wymyślamy pewną \"historyjkę\", w jaki sposób powstaje tekst, \"historyjka\" powinna mieć miejsca do wypełnienia (parametry), np. częstości wyrazów, na podstawie zbioru uczącego dobieramy wartości parametrów (przez rachunki wprost); \"historyjka\" nie musi być prawdziwa, wystarczy, że jakoś przybliża rzeczywistość\n", "* generatywne — wymyślamy pewną „historyjkę”, w jaki sposób powstaje tekst, „historyjka” powinna mieć miejsca do wypełnienia (parametry), np. częstości wyrazów, na podstawie zbioru uczącego dobieramy wartości parametrów (przez rachunki wprost); „historyjka” nie musi być prawdziwa, wystarczy, że jakoś przybliża rzeczywistość\n",
"\n", "\n",
"* dyskryminatywne - nie zastanawiamy się, w jaki sposób powstają teksty, po prostu \"na siłę\" dobieramy wartości parametrów (wag) modelu, tak aby uzyskać jak najmniejszą wartość funkcji kosztu na zbiorze uczącym; zwykle odbywa się to w iteracyjnym procesie (tak jak przedstawiono na schemacie na poprzednim wykładzie).\n", "* dyskryminatywne — nie zastanawiamy się, w jaki sposób powstają teksty, po prostu „na siłę” dobieramy wartości parametrów (wag) modelu, tak aby uzyskać jak najmniejszą wartość funkcji kosztu na zbiorze uczącym; zwykle odbywa się to w iteracyjnym procesie (tak jak przedstawiono na schemacie na poprzednim wykładzie).\n",
"\n", "\n",
"**Pytanie**: Jakie są wady i zalety obu podejść?" "**Pytanie**: Jakie są wady i zalety obu podejść?"
] ]
@ -131,11 +146,11 @@
"source": [ "source": [
"## Naiwny klasyfikator bayesowski\n", "## Naiwny klasyfikator bayesowski\n",
"\n", "\n",
"* _naiwny_ - niekoniecznie oznacza, że to \"głupi\", bezużyteczny klasyfikator\n", "* _naiwny_— niekoniecznie oznacza, że to „głupi”, bezużyteczny klasyfikator\n",
"* _klasyfikator_ \n", "* _klasyfikator_ \n",
"* _bayesowski_ - będzie odwoływać się do wzoru Bayesa.\n", "* _bayesowski_ będzie odwoływać się do wzoru Bayesa.\n",
"\n", "\n",
"Naiwny klasyfikator bayesowski raczej nie powinien być stosowany \"produkcyjnie\" (są lepsze metody). Natomiast jest to metoda bardzo prosta w implementacji dająca przyzwoity _baseline_.\n", "Naiwny klasyfikator bayesowski raczej nie powinien być stosowany „produkcyjnie” (są lepsze metody). Natomiast jest to metoda bardzo prosta w implementacji dająca przyzwoity _baseline_.\n",
"\n", "\n",
"Naiwny klasyfikator bayesowski ma dwie odmiany:\n", "Naiwny klasyfikator bayesowski ma dwie odmiany:\n",
"\n", "\n",
@ -206,14 +221,14 @@
"source": [ "source": [
"#### Prawdopodobieństwo _a priori_\n", "#### Prawdopodobieństwo _a priori_\n",
"\n", "\n",
"$P(c)$ - prawdopodobieństwo a priori klasy $c$\n", "$P(c)$ prawdopodobieństwo a priori klasy $c$\n",
"\n", "\n",
"$\\hat{P}(c) = \\frac{N_c}{N}$\n", "$\\hat{P}(c) = \\frac{N_c}{N}$\n",
"\n", "\n",
"gdzie\n", "gdzie\n",
"\n", "\n",
"* N - liczba wszystkich dokumentów w zbiorze uczącym\n", "* N liczba wszystkich dokumentów w zbiorze uczącym\n",
"* N_c - liczba dokumentow w zbiorze uczącym z klasą $c$\n", "* N_c liczba dokumentow w zbiorze uczącym z klasą $c$\n",
"\n", "\n",
"$\\hat{P}(c) = 0,75$\n", "$\\hat{P}(c) = 0,75$\n",
"\n", "\n",
@ -241,11 +256,11 @@
"source": [ "source": [
"$P(d|c) = P(t_1\\dots t_n|c)$\n", "$P(d|c) = P(t_1\\dots t_n|c)$\n",
"\n", "\n",
"Żeby pójść dalej musimy doszczegółowić nasz model generatywny. Przyjmijmy bardzo naiwny i niezgodny z rzeczywistością model spamera (i nie-spamera): spamer wyciąga wyrazy z worka i wrzuca je z powrotem (losowanie ze zwracaniem). Jedyne co odróżnia spamera i nie-spamera, to **prawdopodobieństwo wylosowania wyrazu** (np. spamer wylosuje słowo _Viagra_ z dość dużym prawdopodobieństwem, nie-spamer - z bardzo niskim).\n", "Aby pójść dalej, musimy doszczegółowić nasz model generatywny. Przyjmijmy bardzo naiwny i niezgodny z rzeczywistością model spamera (i nie-spamera): spamer wyciąga wyrazy z worka i wrzuca je z powrotem (losowanie ze zwracaniem). Jedyne co odróżnia spamera i nie-spamera, to **prawdopodobieństwo wylosowania wyrazu** (np. spamer wylosuje słowo _Viagra_ z dość dużym prawdopodobieństwem, nie-spamer z bardzo niskim).\n",
"\n", "\n",
"**Pytanie:** Ile może wynosić $P(\\mathit{Viagra}|c)$?\n", "**Pytanie:** Ile może wynosić $P(\\mathit{Viagra}|c)$?\n",
"\n", "\n",
"Po przyjęciu takich \"naiwnych założeń\":\n", "Po przyjęciu takich „naiwnych założeń”:\n",
"\n", "\n",
"$$P(d|c) = P(t_1\\dots t_n|c) \\approx P(t_1|c)\\dots P(t_n|c) = \\prod_i^n P(t_i|c)$$" "$$P(d|c) = P(t_1\\dots t_n|c) \\approx P(t_1|c)\\dots P(t_n|c) = \\prod_i^n P(t_i|c)$$"
] ]
@ -291,7 +306,7 @@
"\n", "\n",
"$$f(m, k, T) = \\frac{k+1}{T+m}$$\n", "$$f(m, k, T) = \\frac{k+1}{T+m}$$\n",
"\n", "\n",
"Jest to wygładzanie +1, albo wygładzanie Laplace'a.\n", "Jest to wygładzanie +1, inaczej wygładzanie Laplace'a.\n",
"\n", "\n",
"**Pytanie:** Wymyślić jakiś inny przykład funkcji, która będzie spełniała aksjomaty.\n", "**Pytanie:** Wymyślić jakiś inny przykład funkcji, która będzie spełniała aksjomaty.\n",
"\n", "\n",
@ -347,11 +362,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3 (ipykernel)",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -362,8 +380,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.2" "version": "3.9.6"
} },
"subtitle": "7.Naiwny klasyfikator bayesowski w ekstrakcji informacji[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 5 "nbformat_minor": 5

View File

@ -1,5 +1,20 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"id": "35c19016",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 8. <i>Regresja liniowa</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "cathedral-newark", "id": "cathedral-newark",
@ -129,7 +144,10 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "freelance-controversy", "id": "freelance-controversy",
"metadata": {}, "metadata": {
"jp-MarkdownHeadingCollapsed": true,
"tags": []
},
"source": [ "source": [
"## Uczenie\n", "## Uczenie\n",
"\n", "\n",
@ -139,11 +157,11 @@
"\n", "\n",
"### Metoda gradientu prostego\n", "### Metoda gradientu prostego\n",
"\n", "\n",
"![Morskie Oko - Krzysztof Dudzik](08_files/morskieoko.jpg)\n", "![Morskie oko; Autor:Krzysztof Dudzik; Źródło: [https://pl.wikipedia.org/wiki/Morskie_Oko#/media/Plik:Morskie_Oko_ze_szlaku_przez_%C5%9Awist%C3%B3wk%C4%99.jpg](https://pl.wikipedia.org/wiki/Morskie_Oko#/media/Plik:Morskie_Oko_ze_szlaku_przez_%C5%9Awist%C3%B3wk%C4%99.jpg); Licencja: CC-BY 3.0](08_files/morskieoko.jpg)\n",
"\n", "\n",
"Schodź wzdłuż lokalnego spadku funkcji błędu.\n", "Schodź wzdłuż lokalnego spadku funkcji błędu.\n",
"\n", "\n",
"Tak więc w praktyce zamiast podstawiać do wzoru lepiej się uczyć iteracyjnie -\n", "Tak więc w praktyce zamiast podstawiać do wzoru lepiej się uczyć iteracyjnie \n",
" metodą **gradientu prostego** (ang. _gradient descent_).\n", " metodą **gradientu prostego** (ang. _gradient descent_).\n",
"\n", "\n",
"1. Zacznij od byle jakich wag $w_i$ (np. wylosuj)\n", "1. Zacznij od byle jakich wag $w_i$ (np. wylosuj)\n",
@ -192,7 +210,7 @@
"\n", "\n",
"Czym jest wektor $\\vec{x} = (x_1,\\dots,x_n)$? Wiemy, np. reprezentacja tf-idf (być z trikiem z haszowaniem, Word2vec etc.).\n", "Czym jest wektor $\\vec{x} = (x_1,\\dots,x_n)$? Wiemy, np. reprezentacja tf-idf (być z trikiem z haszowaniem, Word2vec etc.).\n",
"\n", "\n",
"![schemat regresji liniowej](08_files/regresja-liniowa-tekst.png)\n" "![Schemat regresji liniowej tekstu](08_files/regresja-liniowa-tekst.png)\n"
] ]
}, },
{ {
@ -279,11 +297,14 @@
} }
], ],
"metadata": { "metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3 (ipykernel)",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"lang": "pl",
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -294,8 +315,11 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.2" "version": "3.9.6"
} },
"subtitle": "8.Regresja liniowa[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 5 "nbformat_minor": 5

2048
wyk/09_neurozoo.ipynb Normal file

File diff suppressed because one or more lines are too long

1057
wyk/09_neurozoo.org Normal file

File diff suppressed because it is too large Load Diff

543
wyk/11_rnn.ipynb Normal file
View File

@ -0,0 +1,543 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 11. <i>Sieci rekurencyjne</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Rekurencyjne sieci neuronowe\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Inne spojrzenie na sieci przedstawione do tej pory\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Regresja liniowa/logistyczna lub klasyfikacja wieloklasowa na całym tekście\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"W regresji liniowej czy logistycznej bądź w klasyfikacji wieloklasowej\n",
"(z funkcją Softmax) stosowaliśmy następujący schemat:\n",
"\n",
"Do tej pory patrzyliśmy na to tak, że po prostu cały tekst jest od\n",
"razu przetwarzany przez (prostą) sieć neuronową, popatrzmy na ten\n",
"przypadek, jak na sytuację przetwarzania sekwencyjnego. Będzie to\n",
"trochę sztuczne, ale uogólnimy to potem w sensowny sposób.\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Wektoryzacja\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Po pierwsze, zauważmy, że w wielu schematach wektoryzacji (np. tf), wektor\n",
"dokumentów jest po prostu sumą wektorów poszczególnych składowych:\n",
"\n",
"$$\\vec{v}(d) = \\vec{v}(t^1,\\ldots,t^K) = \\vec{v}(t^1) + \\ldots + \\vec{v}(t^K) = \\sum_{k=1}^K \\vec{v}(t^i),$$\n",
"\n",
"gdzie w schemacie tf \\vec{v}(t<sup>i</sup>) to po prostu wektor *one-hot* dla słowa.\n",
"\n",
"**Pytanie** Jak postać przyjmie w \\vec{v}(t<sup>i</sup>) dla wektoryzacji tf-idf?\n",
"\n",
"Wektory $\\vec{v}(t^k)$ mogą być również gęstymi wektorami\n",
"($\\vec{v}(t^k) \\in \\mathcal{R}^n$, gdzie $n$ jest rzędu 10-1000), np.\n",
"w modelu Word2vec albo mogą to być **wyuczalne** wektory (zanurzenia\n",
"słów, *embeddings*), tzn. wektory, które są parametrami uczonej sieci!\n",
"\n",
"**Pytanie** Ile wag (parametrów) wnoszą wyuczalne wektory do sieci?\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Prosta wektoryzacja wyrażona w modelu sekwencyjnym\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Jak zapisać równoważnie powyższą wektoryzację w modelu **sekwencyjnym**, tj. przy założeniu, że\n",
"przetwarzamy wejście token po tokenie (a nie „naraz”)? Ogólnie wprowadzimy bardzo\n",
"ogólny model sieci **rekurencyjnej**.\n",
"\n",
"Po pierwsze zakładamy, że sieć ma pewien stan $\\vec{s^k} \\in\n",
"\\mathcal{R}^m$ (stan jest wektorem o długości $m$), który może\n",
"zmieniać się z każdym krokiem (przetwarzanym tokenem). Zmiana stanu\n",
"jest określona przez pewną funkcję $R : \\mathcal{R}^m \\times\n",
"\\mathcal{R}^n \\rightarrow \\mathcal{R}^m$ ($n$ to rozmiar wektorów\n",
"$\\vec{v}(t^k)$):\n",
"\n",
"$$\\vec{s^k} = R(\\vec{s^{k-1}}, \\vec{v}(t^k)).$$\n",
"\n",
"W przypadku wektoryzacji tf-idf mamy do czynienia z prostym\n",
"sumowaniem, więc $R$ przyjmuje bardzo prostą postać:\n",
"\n",
"$$\\vec{s^0} = [0,\\dots,0],$$\n",
"\n",
"$$R(\\vec{s}, \\vec{x}) = \\vec{s} + \\vec{x}.$$\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Wyjście z modelu\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Dla regresji liniowej/logistycznej, oprócz funkcji $R$, która określa\n",
"zmianę stanu, potrzebujemy funkcji $O$, która określa wyjście systemu w każdym kroku.\n",
"\n",
"$$y^k = O(\\vec{s^k})$$\n",
"\n",
"W zadaniach klasyfikacji czy regresji, kiedy patrzymy na cały tekst w\n",
"zasadzie wystarczy wziąć *ostatnią* wartość (tj. $y^K$). Można sobie\n",
"wyobrazić sytuację, kiedy wartości $y^k$ dla $k < k$ również mogą być jakoś przydatne\n",
"(np. klasyfikujemy na bieżąco tekst wpisywany przez użytkownika).\n",
"\n",
"W każdym razie dla regresji liniowej funkcja $O$ przyjmie postać:\n",
"\n",
"$$O(\\vec{s}) = \\vec{w}\\vec{s}$$,\n",
"\n",
"gdzie $\\vec{w}$ jest wektorem wyuczylnych wag, dla regresji zaś logistycznej:\n",
"\n",
"$$O(\\vec{s}) = \\operatorname{softmax}(\\vec{w}\\vec{s})$$\n",
"\n",
"**Pytanie**: jaką postać przyjmie $O$ dla klasyfikacji wieloklasowej\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prosta sieć rekurencyjna\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"W najprostszej sieci rekurencyjnej (*Vanilla RNN*, sieć Elmana,\n",
"czasami po prostu RNN) w każdym kroku oprócz właściwego wejścia\n",
"($\\vec{v}(t^k)$) będziemy również podawać na wejściu poprzedni stan\n",
"sieci ($\\vec{s^{k-1}}$).\n",
"\n",
"Innymi słowy, funkcje $R$ przyjmie następującą postać:\n",
"\n",
"$$s^k = \\sigma(W\\langle\\vec{v}(t^k), \\vec{s^{k-1}}\\rangle + \\vec{b}),$$\n",
"\n",
"gdzie:\n",
"\n",
"- $\\langle\\vec{x},\\vec{y}\\rangle$ to konkatenacja dwóch wektorów,\n",
"- $W \\in \\mathcal{R}^m \\times \\mathcal{R}^{n+m}$ — macierz wag,\n",
"- $b \\in \\mathcal{R}^m$ — wektor obciążeń (*biases*).\n",
"\n",
"Taką sieć RNN można przedstawić schematycznie w następujący sposób:\n",
"\n",
"![Pojedynczy krok sieci rekurencyjnej](./img-rnn.png)\n",
"\n",
"Zauważmy, że zamiast macierzy $W$ działającej na konkatenacji wektorów można wprowadzić dwie\n",
"macierze $U$ i $V$ i tak zapisać wzór:\n",
"\n",
"$$s^k = \\sigma(U\\vec{v}(t^k) + V\\vec{s^{k-1}} + \\vec{b}).$$\n",
"\n",
"Jeszcze inne spojrzenie na sieć RNN:\n",
"\n",
"![Pojedynczy krok sieci rekurencyjnej II](./rnn.png)\n",
"\n",
"Powyższy rysunek przedstawia pojedynczy krok sieci RNN. Dla całego\n",
"wejścia (powiedzmy, 3-wyrazowego) możemy sieć rozwinąć (*unroll*):\n",
"\n",
"![Rozwinięta sieć rekurencyjna](./rnn-seq.png)\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Zastosowanie sieci RNN do etykietowania sekwencji\n",
"\n",
"Sieć RNN może w prosty sposób być użyta do etykietowania sekwencji (w każdym kroku zwracamy etykietę)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Problemy z prostymi sieciami RNN\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"W praktyce proste sieci RNN są bardzo trudne w uczenia, zazwyczaj\n",
"pojawia się problem **zanikających** (rzadziej: **eksplodujących**)\n",
"gradientów: w propagacji wstecznej błąd szybko zanika i nie jest w\n",
"stanie dotrzeć do początkowych wejść.\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Sieci RNN z bramkami\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"W prostych sieciach RNN podstawowa trudność polega na tym, że mamy\n",
"niewielką kontrolę nad tym jak pamięć (stan) jest aktualizowana. Aby\n",
"zwiększyć tę kontrolę, potrzebujemy **bramek**.\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Bramki\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Zazwyczaj do tej pory rozpatrywaliśmy iloczyn skalarny wektorów, w\n",
"wyniku którego otrzymujemy liczbę (w PyTorchu wyrażany za pomocą operatora `@`), np.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"# Out[2]:\n",
"tensor(-5)"
]
}
],
"source": [
"import torch\n",
"\n",
"a = torch.tensor([-1, 0, 3])\n",
"b = torch.tensor([2, 5, -1])\n",
"a @ b"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Czasami przydatny jest **iloczyn Hadamarda**, czyli przemnożenie\n",
"wektorów (albo macierzy) po współrzędnych. W PyTorchu taki iloczyn\n",
"wyrażany jest za pomocą operatora `*`, w notacji matematycznej będziemy używali\n",
"znaku $\\odot$.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"# Out[3]:\n",
"tensor([-2, 0, -3])"
]
}
],
"source": [
"import torch\n",
"\n",
"a = torch.tensor([-1, 0, 3])\n",
"b = torch.tensor([2, 5, -1])\n",
"a * b"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Zauważmy, że iloczyn Hadamarda przez wektor złożony z zer i jedynek daje nam *filtr*, możemy\n",
"selektywnie wygaszać pozycje wektora, np. tutaj wyzerowaliśmy 2. i 5. pozycję wektora:\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"# Out[4]:\n",
"tensor([1., 0., 3., 4., 0.])"
]
}
],
"source": [
"import torch\n",
"\n",
"a = torch.tensor([1., 2., 3., 4., 5.])\n",
"b = torch.tensor([1., 0., 1., 1., 0.])\n",
"a * b"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Co więcej, za pomocą bramki możemy selektywnie kontrolować, co\n",
"zapamiętujemy, a co zapominamy. Rozpatrzmy mianowicie wektor zer i\n",
"jedynek $\\vec{g} \\in \\{0,1\\}^m$, dla stanu (pamięci) $\\vec{s}$ i nowej informacji\n",
"$\\vec{x}$ możemy dokonywać aktualizacji w następujący sposób:\n",
"\n",
"$$\\vec{s} \\leftarrow \\vec{g} \\odot \\vec{x} + (1 - \\vec{g}) \\odot \\vec{s}$$\n",
"\n",
"Na przykład, za pomocą bramki można wpisać nową wartość na 2. i 5. pozycję wektora.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"# Out[8]:\n",
"tensor([ 1., 7., 3., 4., -8.])"
]
}
],
"source": [
"import torch\n",
"\n",
"s = torch.tensor([1., 2., 3., 4., 5.])\n",
"x = torch.tensor([8., 7., 15., -3., -8.])\n",
"\n",
"g = torch.tensor([0., 1., 0., 0., 1.])\n",
"\n",
"s = g * x + (1 - g) * s\n",
"s"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Wektor bramki nie musi być z góry określony, może być wyuczalny. Wtedy\n",
"jednak lepiej założyć, że bramka jest „miękka”, np. jej wartości\n",
"pochodzi z sigmoidy zastosowanej do jakiejś wcześniejszej warstwy.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"# Out[14]:\n",
"tensor([ 1.5310, 6.9998, 5.7777, 4.0000, -5.2159])"
]
}
],
"source": [
"import torch\n",
"\n",
"s = torch.tensor([1., 2., 3., 4., 5.])\n",
"x = torch.tensor([8., 7., 15., -3., -8.])\n",
"\n",
"pre_g = torch.tensor([-2.5, 10.0, -1.2, -101., 1.3])\n",
"g = torch.sigmoid(pre_g)\n",
"\n",
"s = g * x + (1 - g) * s\n",
"s"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Pytanie:** dlaczego sigmoida zamiast tanh?\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Sieć LSTM\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Architektura LSTM (*Long Short-Term Memory*) pozwala rozwiązać problem\n",
"znikających gradientów — za cenę komplikacji obliczeń.\n",
"\n",
"W sieci LSTM stan $\\vec{s^k}$ ma dwie połówki, tj. $\\vec{s^k} =\n",
"\\langle\\vec{c^k},\\vec{h^k}\\rangle$, gdzie\n",
"\n",
"- $\\vec{c^k}$ to **komórka pamięci**, która nie zmienia swojej, chyba że celowo zmodyfikujemy jej wartość\n",
" za pomocą bramek,\n",
"- $\\vec{h^k}$ to ukryty stan (przypominający $\\vec{s^k}$ ze zwykłej sieci RNN).\n",
"\n",
"Sieć LSTM zawiera 3 bramki:\n",
"\n",
"- bramkę zapominania (*forget gate*), która steruje wymazywaniem informacji z komórki\n",
" pamięci $\\vec{c^k}$,\n",
"- bramkę wejścia (*input gate*), która steruje tym, na ile nowe informacje aktualizują\n",
" komórkę pamięci $\\vec{c^k}$,\n",
"- bramkę wyjścia (*output gate*), która steruje tym, co z komórki\n",
" pamięci przekazywane jest na wyjście.\n",
"\n",
"Wszystkie trzy bramki definiowane są za pomocą bardzo podobnego wzoru — warstwy liniowej na\n",
"poprzedniej wartości warstwy ukrytej i bieżącego wejścia.\n",
"\n",
"$$\\vec{i} = \\sigma(W_i\\langle\\vec{v}(t^k),\\vec{h^{k-1}}\\rangle)$$\n",
"\n",
"$$\\vec{f} = \\sigma(W_f\\langle\\vec{v}(t^k),\\vec{h^{k-1}}\\rangle)$$\n",
"\n",
"$$\\vec{o} = \\sigma(W_o\\langle\\vec{v}(t^k),\\vec{h^{k-1}}\\rangle)$$\n",
"\n",
"Jak widać, wzory różnią się tylko macierzami wag $W_*$.\n",
"\n",
"Zmiana komórki pamięci jest zdefiniowana jak następuje:\n",
"\n",
"$$\\vec{c^k} = \\vec{f} \\odot \\vec{c^{k-1}} + \\vec{i} \\vec{z^k}$$,\n",
"\n",
"gdzie\n",
"\n",
"$$\\vec{z^k} = \\operatorname{tanh}(W_z\\langle\\vec{v}(t^k),\\vec{h^{k-1}}\\rangle)$$\n",
"\n",
"Stan ukryty zmienia się w następujący sposób:\n",
"\n",
"$$\\vec{h^K} = \\vec{o} \\odot \\operatorname{tanh}(\\vec{c^k})$$.\n",
"\n",
"Ostateczne wyjście może być wyliczane na podstawie wektora $\\vec{h^k}$:\n",
"\n",
"$$O(\\vec{s}) = O(\\langle\\vec{c},\\vec{h}\\rangle) = \\vec{h}$$\n",
"\n",
"**Pytanie**: Ile wag/parametrów ma sieć RNN o rozmiarze wejścia $n$ i rozmiarze warstwy ukrytej $m$?\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Literatura\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Yoav Goldberg, *Neural Network Methods for Natural Language Processing*,\n",
"Morgan & Claypool Publishers, 2017\n",
"\n"
]
}
],
"metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
},
"org": null,
"subtitle": "11.Sieci rekurencyjne[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

288
wyk/11_rnn.org Normal file
View File

@ -0,0 +1,288 @@
* Rekurencyjne sieci neuronowe
** Inne spojrzenie na sieci przedstawione do tej pory
*** Regresja liniowa/logistyczna lub klasyfikacja wieloklasowa na całym tekście
W regresji liniowej czy logistycznej bądź w klasyfikacji wieloklasowej
(z funkcją Softmax) stosowaliśmy następujący schemat:
Do tej pory patrzyliśmy na to tak, że po prostu cały tekst jest od
razu przetwarzany przez (prostą) sieć neuronową, popatrzmy na ten
przypadek, jak na sytuację przetwarzania sekwencyjnego. Będzie to
trochę sztuczne, ale uogólnimy to potem w sensowny sposób.
**** Wektoryzacja
Po pierwsze, zauważmy, że w wielu schematach wektoryzacji (np. tf), wektor
dokumentów jest po prostu sumą wektorów poszczególnych składowych:
$$\vec{v}(d) = \vec{v}(t^1,\ldots,t^K) = \vec{v}(t^1) + \ldots + \vec{v}(t^K) = \sum_{k=1}^K \vec{v}(t^i),$$
gdzie w schemacie tf \vec{v}(t^i) to po prostu wektor /one-hot/ dla słowa.
*Pytanie* Jak postać przyjmie w \vec{v}(t^i) dla wektoryzacji tf-idf?
Wektory $\vec{v}(t^k)$ mogą być również gęstymi wektorami
($\vec{v}(t^k) \in \mathcal{R}^n$, gdzie $n$ jest rzędu 10-1000), np.
w modelu Word2vec albo mogą to być *wyuczalne* wektory (zanurzenia
słów, /embeddings/), tzn. wektory, które są parametrami uczonej sieci!
*Pytanie* Ile wag (parametrów) wnoszą wyuczalne wektory do sieci?
**** Prosta wektoryzacja wyrażona w modelu sekwencyjnym
Jak zapisać równoważnie powyższą wektoryzację w modelu *sekwencyjnym*, tj. przy założeniu, że
przetwarzamy wejście token po tokenie (a nie „naraz”)? Ogólnie wprowadzimy bardzo
ogólny model sieci *rekurencyjnej*.
Po pierwsze zakładamy, że sieć ma pewien stan $\vec{s^k} \in
\mathcal{R}^m$ (stan jest wektorem o długości $m$), który może
zmieniać się z każdym krokiem (przetwarzanym tokenem). Zmiana stanu
jest określona przez pewną funkcję $R : \mathcal{R}^m \times
\mathcal{R}^n \rightarrow \mathcal{R}^m$ ($n$ to rozmiar wektorów
$\vec{v}(t^k)$):
$$\vec{s^k} = R(\vec{s^{k-1}}, \vec{v}(t^k)).$$
W przypadku wektoryzacji tf-idf mamy do czynienia z prostym
sumowaniem, więc $R$ przyjmuje bardzo prostą postać:
$$\vec{s^0} = [0,\dots,0],$$
$$R(\vec{s}, \vec{x}) = \vec{s} + \vec{x}.$$
**** Wyjście z modelu
Dla regresji liniowej/logistycznej, oprócz funkcji $R$, która określa
zmianę stanu, potrzebujemy funkcji $O$, która określa wyjście systemu w każdym kroku.
$$y^k = O(\vec{s^k})$$
W zadaniach klasyfikacji czy regresji, kiedy patrzymy na cały tekst w
zasadzie wystarczy wziąć /ostatnią/ wartość (tj. $y^K$). Można sobie
wyobrazić sytuację, kiedy wartości $y^k$ dla $k < k$ również mogą być jakoś przydatne
(np. klasyfikujemy na bieżąco tekst wpisywany przez użytkownika).
W każdym razie dla regresji liniowej funkcja $O$ przyjmie postać:
$$O(\vec{s}) = \vec{w}\vec{s}$$,
gdzie $\vec{w}$ jest wektorem wyuczylnych wag, dla regresji zaś logistycznej:
$$O(\vec{s}) = \operatorname{softmax}(\vec{w}\vec{s})$$
*Pytanie*: jaką postać przyjmie $O$ dla klasyfikacji wieloklasowej
** Prosta sieć rekurencyjna
W najprostszej sieci rekurencyjnej (/Vanilla RNN/, sieć Elmana,
czasami po prostu RNN) w każdym kroku oprócz właściwego wejścia
($\vec{v}(t^k)$) będziemy również podawać na wejściu poprzedni stan
sieci ($\vec{s^{k-1}}$).
Innymi słowy, funkcje $R$ przyjmie następującą postać:
$$s^k = \sigma(W\langle\vec{v}(t^k), \vec{s^{k-1}}\rangle + \vec{b}),$$
gdzie:
- $\langle\vec{x},\vec{y}\rangle$ to konkatenacja dwóch wektorów,
- $W \in \mathcal{R}^m \times \mathcal{R}^{n+m}$ — macierz wag,
- $b \in \mathcal{R}^m$ — wektor obciążeń (/biases/).
Taką sieć RNN można przedstawić schematycznie w następujący sposób:
[[./img-rnn.png]]
Zauważmy, że zamiast macierzy $W$ działającej na konkatenacji wektorów można wprowadzić dwie
macierze $U$ i $V$ i tak zapisać wzór:
$$s^k = \sigma(U\vec{v}(t^k) + V\vec{s^{k-1}} + \vec{b}).$$
Jeszcze inne spojrzenie na sieć RNN:
[[./rnn.png]]
Powyższy rysunek przedstawia pojedynczy krok sieci RNN. Dla całego
wejścia (powiedzmy, 3-wyrazowego) możemy sieć rozwinąć (/unroll/):
[[./rnn-seq.png]]
*** Zastosowanie sieci RNN do etykietowania sekwencji
*** Problemy z prostymi sieciami RNN
W praktyce proste sieci RNN są bardzo trudne w uczenia, zazwyczaj
pojawia się problem *zanikających* (rzadziej: *eksplodujących*)
gradientów: w propagacji wstecznej błąd szybko zanika i nie jest w
stanie dotrzeć do początkowych wejść.
** Sieci RNN z bramkami
W prostych sieciach RNN podstawowa trudność polega na tym, że mamy
niewielką kontrolę nad tym jak pamięć (stan) jest aktualizowana. Aby
zwiększyć tę kontrolę, potrzebujemy *bramek*.
*** Bramki
Zazwyczaj do tej pory rozpatrywaliśmy iloczyn skalarny wektorów, w
wyniku którego otrzymujemy liczbę (w PyTorchu wyrażany za pomocą operatora ~@~), np.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
import torch
a = torch.tensor([-1, 0, 3])
b = torch.tensor([2, 5, -1])
a @ b
#+END_SRC
#+RESULTS:
:results:
# Out[2]:
: tensor(-5)
:end:
Czasami przydatny jest *iloczyn Hadamarda*, czyli przemnożenie
wektorów (albo macierzy) po współrzędnych. W PyTorchu taki iloczyn
wyrażany jest za pomocą operatora ~*~, w notacji matematycznej będziemy używali
znaku $\odot$.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
import torch
a = torch.tensor([-1, 0, 3])
b = torch.tensor([2, 5, -1])
a * b
#+END_SRC
#+RESULTS:
:results:
# Out[3]:
: tensor([-2, 0, -3])
:end:
Zauważmy, że iloczyn Hadamarda przez wektor złożony z zer i jedynek daje nam /filtr/, możemy
selektywnie wygaszać pozycje wektora, np. tutaj wyzerowaliśmy 2. i 5. pozycję wektora:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
import torch
a = torch.tensor([1., 2., 3., 4., 5.])
b = torch.tensor([1., 0., 1., 1., 0.])
a * b
#+END_SRC
#+RESULTS:
:results:
# Out[4]:
: tensor([1., 0., 3., 4., 0.])
:end:
Co więcej, za pomocą bramki możemy selektywnie kontrolować, co
zapamiętujemy, a co zapominamy. Rozpatrzmy mianowicie wektor zer i
jedynek $\vec{g} \in \{0,1\}^m$, dla stanu (pamięci) $\vec{s}$ i nowej informacji
$\vec{x}$ możemy dokonywać aktualizacji w następujący sposób:
$$\vec{s} \leftarrow \vec{g} \odot \vec{x} + (1 - \vec{g}) \odot \vec{s}$$
Na przykład, za pomocą bramki można wpisać nową wartość na 2. i 5. pozycję wektora.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
import torch
s = torch.tensor([1., 2., 3., 4., 5.])
x = torch.tensor([8., 7., 15., -3., -8.])
g = torch.tensor([0., 1., 0., 0., 1.])
s = g * x + (1 - g) * s
s
#+END_SRC
#+RESULTS:
:results:
# Out[8]:
: tensor([ 1., 7., 3., 4., -8.])
:end:
Wektor bramki nie musi być z góry określony, może być wyuczalny. Wtedy
jednak lepiej założyć, że bramka jest „miękka”, np. jej wartości
pochodzi z sigmoidy zastosowanej do jakiejś wcześniejszej warstwy.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
import torch
s = torch.tensor([1., 2., 3., 4., 5.])
x = torch.tensor([8., 7., 15., -3., -8.])
pre_g = torch.tensor([-2.5, 10.0, -1.2, -101., 1.3])
g = torch.sigmoid(pre_g)
s = g * x + (1 - g) * s
s
#+END_SRC
#+RESULTS:
:results:
# Out[14]:
: tensor([ 1.5310, 6.9998, 5.7777, 4.0000, -5.2159])
:end:
*Pytanie:* dlaczego sigmoida zamiast tanh?
*** Sieć LSTM
Architektura LSTM (/Long Short-Term Memory/) pozwala rozwiązać problem
znikających gradientów — za cenę komplikacji obliczeń.
W sieci LSTM stan $\vec{s^k}$ ma dwie połówki, tj. $\vec{s^k} =
\langle\vec{c^k},\vec{h^k}\rangle$, gdzie
- $\vec{c^k}$ to *komórka pamięci*, która nie zmienia swojej, chyba że celowo zmodyfikujemy jej wartość
za pomocą bramek,
- $\vec{h^k}$ to ukryty stan (przypominający $\vec{s^k}$ ze zwykłej sieci RNN).
Sieć LSTM zawiera 3 bramki:
- bramkę zapominania (/forget gate/), która steruje wymazywaniem informacji z komórki
pamięci $\vec{c^k}$,
- bramkę wejścia (/input gate/), która steruje tym, na ile nowe informacje aktualizują
komórkę pamięci $\vec{c^k}$,
- bramkę wyjścia (/output gate/), która steruje tym, co z komórki
pamięci przekazywane jest na wyjście.
Wszystkie trzy bramki definiowane są za pomocą bardzo podobnego wzoru — warstwy liniowej na
poprzedniej wartości warstwy ukrytej i bieżącego wejścia.
$$\vec{i} = \sigma(W_i\langle\vec{v}(t^k),\vec{h^{k-1}}\rangle)$$
$$\vec{f} = \sigma(W_f\langle\vec{v}(t^k),\vec{h^{k-1}}\rangle)$$
$$\vec{o} = \sigma(W_o\langle\vec{v}(t^k),\vec{h^{k-1}}\rangle)$$
Jak widać, wzory różnią się tylko macierzami wag $W_*$.
Zmiana komórki pamięci jest zdefiniowana jak następuje:
$$\vec{c^k} = \vec{f} \odot \vec{c^{k-1}} + \vec{i} \vec{z^k}$$,
gdzie
$$\vec{z^k} = \operatorname{tanh}(W_z\langle\vec{v}(t^k),\vec{h^{k-1}}\rangle)$$
Stan ukryty zmienia się w następujący sposób:
$$\vec{h^K} = \vec{o} \odot \operatorname{tanh}(\vec{c^k})$$.
Ostateczne wyjście może być wyliczane na podstawie wektora $\vec{h^k}$:
$$O(\vec{s}) = O(\langle\vec{c},\vec{h}\rangle) = \vec{h}$$
*Pytanie*: Ile wag/parametrów ma sieć RNN o rozmiarze wejścia $n$ i rozmiarze warstwy ukrytej $m$?
** Literatura
Yoav Goldberg, /Neural Network Methods for Natural Language Processing/,
Morgan & Claypool Publishers, 2017

859
wyk/12_bpe.ipynb Normal file

File diff suppressed because one or more lines are too long

396
wyk/12_bpe.org Normal file
View File

@ -0,0 +1,396 @@
* Podział na jednostki podwyrazowe
** Słownik nie może być za duży…
Jeśli używamy wyuczalnych zanurzeń słów (embeddingów), wówczas musimy
je dopisać do listy parametrów całego modelu — jest to $|V|n$ wag,
gdzie $n$ to rozmiar embeddingów; w wypadku uczenia dodatkowo musimy
jeszcze pamiętać związane z embeddingami gradienty. Pamięć RAM karty
graficznej jest rzecz jasna ograniczona, słownik więc nie może być
dowolnie duży. Dla danego modelu karty graficznej dość łatwo ustalić
maksymalny rozmiar słownika — jest „twarde” ograniczenie, które musimy
spełnić.
*** Czy rzeczywiście słownik może być taki duży?
Ile jest różnych form fleksyjnych w języku polskim? Zobaczmy w słowniku PoliMorf…
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! wget -q 'http://zil.ipipan.waw.pl/PoliMorf?action=AttachFile&do=get&target=PoliMorf-0.6.7.tab.gz' -O - | zcat | cut -f 1 | uniq | head -n 20
#+END_SRC
#+RESULTS:
:results:
# Out[2]:
:end:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! wget -q 'http://zil.ipipan.waw.pl/PoliMorf?action=AttachFile&do=get&target=PoliMorf-0.6.7.tab.gz' -O - | zcat | cut -f 1 | sort -u | wc -l
#+END_SRC
#+RESULTS:
:results:
# Out[3]:
:end:
*Pytanie* W którym języku europejskim wyrazów będzie jeszcze więcej niż języku polskim?
Tak naprawdę form jest jeszcze więcej, oczywiście PoliMorf nie wyczerpuje zbioru…
*Pytanie* Podaj przykłady „oczywistych” wyrazów, których nie ma w PoliMorfie. Jak w sposób systematyczny szukać takich wyrazów?
Z drugiej strony, w PoliMorfie jest dużo dziwnych, „sztucznych” wyrazów.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! wget -q 'http://zil.ipipan.waw.pl/PoliMorf?action=AttachFile&do=get&target=PoliMorf-0.6.7.tab.gz' -O - | zcat | cut -f 1 | shuf -n 20
#+END_SRC
#+RESULTS:
:results:
# Out[4]:
:end:
Inaczej, zobaczmy, ile różnych wyrazów jest w jakimś rzeczywistym zbiorze tekstów, rozpatrzmy
teksty zebrane na potrzeby identyfikacji płci autora tekstu:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! git clone --single-branch --depth 1 git://gonito.net/petite-difference-challenge2
#+END_SRC
#+RESULTS:
:results:
# Out[7]:
:end:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! xzcat petite-difference-challenge2/train/in.tsv.xz | perl -C -ne 'print "$&\n" while/\p{L}+/g;' | sort -u > vocab.txt
#+END_SRC
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! head -n 50 vocab.txt
#+END_SRC
#+RESULTS:
:results:
# Out[11]:
:end:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! wc -l vocab.txt
#+END_SRC
#+RESULTS:
:results:
# Out[9]:
:end:
Co gorsza, nawet jak weźmiemy cały taki słownik bez ograniczeń i tak
nie pokryje on sporej części tekstów przetwarzanych w czasie inferencji.
Zobaczmy, ilu wyrazów ze zbioru deweloperskiego nie będzie w słowniku.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! cat petite-difference-challenge2/dev-0/in.tsv | perl -C -ne 'print "$&\n" while/\p{L}+/g;' | sort -u | comm vocab.txt - -13 | wc -l
#+END_SRC
Takie wyrazy nazywamy wyrazami *OOV* (/out-of-vocabulary/).
** Obcięcie słownika
Najprostszy sposób ograniczenia słownika to po prostu obcięcie do $N$ najczęstszych słów.
Spróbujmy zastosować do korpusu „płci”:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! xzcat petite-difference-challenge2/train/in.tsv.xz | perl -C -ne 'print "$&\n" while/\p{L}+/g;' | sort | uniq -c | sort -k 1rn | head -n 50000 | sort -k 2 > vocab50000.txt
#+END_SRC
#+RESULTS:
:results:
# Out[8]:
:end:
Daje to lepszy efekt niż można się spodziewać. Odrzucamy w ten sposób
tylko bardzo rzadkie słowa (albo takie, które wystąpiły tylko raz w
korpusie — tzw. /hapax legomena/), choć tych słów jest bardzo dużo.
*Zagadka*: 50000 najczęstszych słów (1,9% *typów*) pokrywa jaki odsetek *wystąpień*?
Rozkład normalny w języku nie jest… normalny — nie spotkamy się z nim
badając języki. W tekstach dominują „skrzywione” rozkłady z długimi,
„chudymi” ogonami.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! xzcat petite-difference-challenge2/train/in.tsv.xz | perl -C -ne 'print "$&\n" while/\p{L}+/g;' | sort | uniq -c | sort -k 1rn | cut -f 1 > freqs.txt
#+END_SRC
#+BEGIN_SRC ipython :session mysession :results file
%matplotlib inline
import matplotlib.pyplot as plt
import re
from math import log
freqs = []
with open('freqs.txt', 'r') as fh:
for line in fh:
m = re.match(r'\s*(\d+)', line)
if m:
freqs.append(int(m.group(1)))
plt.plot(range(len(freqs)), freqs)
fname = 'word-distribution.png'
plt.savefig(fname)
fname
#+END_SRC
#+RESULTS:
[[file:# Out[25]:
: 'word-distribution.png'
[[file:./obipy-resources/c0TrCn.png]]]]
** Lematyzacja
Lematyzacja wydaje się dobrym pomysłem, zwłaszcza dla języków dla bogatej fleksji:
- znacznie redukujemy słownik,
- formy fleksyjne tego samego wyrazu są traktowane tak samo (co wydaje się słuszne).
W praktyce współcześnie *nie* stosuje się lematyzacji (w połączeniu z
metodami opartymi na sieciach neuronowych):
- lematyzacja wymaga wiedzy językowej (reguł lub słownika),
wytworzenie takiej wiedzy może być kosztowne, obecnie preferowane
są metody niezależne od języka;
- tracimy pewną informację niesioną przez formę fleksyjną (co w szczególnych
przypadkach może być niefortunne, np. /aspiracja/ i /aspiracje/);
- lematyzacja nie jest trywialnym problemem ze względu na niejednoznaczności
(/Lekarzu, lecz się sam/);
- niektóre niejednoznaczności są seryjne, wybór lematu może być arbitralny,
np. czy /posiadanie/, /gotowanie/, /skakanie/ to rzeczowniki czy czasowniki?
a /urządzenie/, /mieszkanie/?
- zazwyczaj sieci neuronowe (czy nawet prostsze modele typu Word2vec)
są w stanie nauczyć się rekonstruowania zależności między formami fleksyjnymi
(i więcej: błędnych form, błędów ortograficznych, form archaicznych itd.)
** Zejście na poziom znaków
Skoro słownik wyrazów jest zbyt duży, to może zejść na poziom znaków?
- pojedynczy znak alfabetu wprawdzie nic nie znaczy (co znaczy /h/?)
- … ale rozmiar wejścia przy kodowaniu gorącą jedynką
dramatycznie się zmniejsza
- może działać, jeśli dodać wielowarstwową sieć
neuronową
- … ale może być bardzo kosztowne obliczeniowo
A może coś pośredniego między znakami a wyrazami?
** BPE
Ani znaki, ani wyrazy — coś pomiędzy: jednostki podwyrazowe (/subword
units/). Moglibyśmy np. dzielić wyraz /superkomputera/ na dwie
jednostki /super/+/komputera/, a może nawet trzy: /super/+/komputer/+/a/?
Najpopularniejszy algorytm podziału na jednostki podwyrazowe to BPE
(/byte-pair encoding/), zainspirowany algorytmami kompresji danych.
Lista jednostek jest automatycznie indukowana na podstawie tekstu (nie
potrzeba żadnej wiedzy o języku!). Ich liczba musi być natomiast z góry
określona.
W kroku początkowym zaznaczamy końce wyrazów (tokenów), robimy to po
to, żeby jednostki podwyrazowe nie przekraczały granic wyrazów.
Następnie wykonujemy tyle kroków iteracji, ile wynosi rozmiar zadanego
słownika. W każdym kroku szukamy najczęstszego bigramu, od tego
momentu traktujemy go jako całostkę (wkładamy go do „pudełka”).
[[./bpe.png]]
*** Implementacja w Pythonie
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
from collections import Counter
def replace_bigram(l, b, r):
i = 0
while i < len(l) - 1:
if (l[i], l[i+1]) == b:
l[i:i+2] = [r]
i += 1
return l
def learn_bpe_vocab(d, max_vocab_size):
d = list(d.replace(' ', '$') + '$')
vocab = []
for ix in range(0, max_vocab_size):
bigrams = [(d[i], d[i+1]) for i in range(0, len(d) - 1) if d[i][-1] != '$']
selected_bigram = Counter(bigrams).most_common(1)[0][0]
new_subword = selected_bigram[0] + selected_bigram[1]
d = replace_bigram(d, selected_bigram, new_subword)
vocab.append(new_subword)
return vocab
vocab1 = learn_bpe_vocab('to be or not to be that is the question', 10)
vocab1
#+END_SRC
#+RESULTS:
:results:
# Out[1]:
: ['e$', 'to', 'to$', 'be$', 't$', 'th', 'or', 'or$', 'no', 'not$']
:end:
Słownik jednostek podwyrazowych możemy zastosować do dowolnego tekstu, np. do tekstu,
na którym słownik był wyuczony:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
def apply_bpe_vocab(vocab, d):
d = list(d.replace(' ', '$') + '$')
vocab_set = set(vocab)
modified = True
while modified:
ix = 0
modified = False
while ix < len(d) - 1:
bigram = d[ix] + d[ix+1]
if bigram in vocab_set:
d[ix:ix+2] = [bigram]
modified = True
else:
ix += 1
return d
' '.join(apply_bpe_vocab(vocab1, 'to be or not to be that is the question'))
#+END_SRC
#+RESULTS:
:results:
# Out[5]:
: 'to$ be$ or$ not$ to$ be$ th a t$ i s $ th e$ q u e s t i o n $'
:end:
Zauważmy, że oprócz jednostek podwyrazowych zostały izolowane litery,
zazwyczaj dodajemy je do słownika. (I zazwyczaj, słownik jest trochę
większy niż wartość podana jako parametr przy uczeniu BPE — jest
większy o znaki i specjalne tokeny typu ~UNK~, ~BOS~, ~EOS~, ~PAD~.)
*Pytanie*: Jaki problem może pojawić przy zastosowaniu BPE dla tekstu,
gdzie pojawiają się chińskie znaki? Jak można sobie z nim poradzić?
Słownik jednostek podwyrazowych można stosować dla dowolnego tekstu:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
' '.join(apply_bpe_vocab(vocab1, 'tom will be the best'))
#+END_SRC
#+RESULTS:
:results:
# Out[6]:
: 'to m $ w i l l $ be$ th e$ b e s t$'
:end:
Jak można zauważyć algorytm BPE daje dwa rodzaje jednostek podwyrazowych:
- jednostki, które mogą doklejane na początku wyrazu;
- jednostki, które stanowią koniec wyrazu, w szczególności są całym wyrazem.
*** Gotowa implementacja
Po raz pierwszy BPE użyto do neuronowego tłumaczenia maszynowego.
Użyjmy modułu autorstwa Rica Sennricha (https://github.com/rsennrich/subword-nmt).
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! pip install subword-nmt
#+END_SRC
Wyindukujmy słownik dla zbioru uczącego zadania identyfikacji płci
autora tekstu:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! xzcat petite-difference-challenge2/train/in.tsv.xz | perl -C -ne 'print "$&\n" while/\p{L}+/g;' | python -m subword_nmt.learn_bpe -s 50000 -v > bpe_vocab.txt
#+END_SRC
Procedura trwa kilka minut, trzeba uzbroić się w cierpliwość (ale wypisywanie bigramów przyspieszy!).
#+BEGIN_SRC
pair 0: n i -> ni (frequency 17625075)
pair 1: i e -> ie (frequency 11471590)
pair 2: c z -> cz (frequency 9143490)
pair 3: ni e</w> -> nie</w> (frequency 7901783)
pair 4: p o -> po (frequency 7790826)
pair 5: r z -> rz (frequency 7542046)
pair 6: s t -> st (frequency 7269069)
pair 7: e m</w> -> em</w> (frequency 7207280)
pair 8: d z -> dz (frequency 6860931)
pair 9: s z -> sz (frequency 6609907)
pair 10: r a -> ra (frequency 6601618)
pair 11: o w -> ow (frequency 6395963)
pair 12: i e</w> -> ie</w> (frequency 5906869)
pair 13: n a -> na (frequency 5300380)
pair 14: r o -> ro (frequency 5181363)
pair 15: n a</w> -> na</w> (frequency 5125807)
pair 16: a ł -> ał (frequency 4786696)
pair 17: j e -> je (frequency 4599579)
pair 18: s i -> si (frequency 4300984)
pair 19: a l -> al (frequency 4276823)
pair 20: t e -> te (frequency 4033344)
pair 21: w i -> wi (frequency 3939063)
pair 22: c h</w> -> ch</w> (frequency 3919410)
pair 23: c h -> ch (frequency 3661410)
pair 24: k o -> ko (frequency 3629840)
pair 25: z a -> za (frequency 3625424)
pair 26: t a -> ta (frequency 3570094)
pair 27: p rz -> prz (frequency 3494551)
pair 28: g o</w> -> go</w> (frequency 3279997)
pair 29: a r -> ar (frequency 3081492)
pair 30: si ę</w> -> się</w> (frequency 2973681)
...
pair 49970: brz mieniu</w> -> brzmieniu</w> (frequency 483)
pair 49971: bieżą cych</w> -> bieżących</w> (frequency 483)
pair 49972: biegu nkę</w> -> biegunkę</w> (frequency 483)
pair 49973: ban kowości</w> -> bankowości</w> (frequency 483)
pair 49974: ba ku</w> -> baku</w> (frequency 483)
pair 49975: ba cznie</w> -> bacznie</w> (frequency 483)
pair 49976: Przypad kowo</w> -> Przypadkowo</w> (frequency 483)
pair 49977: MA Ł -> MAŁ (frequency 483)
pair 49978: Lep pera</w> -> Leppera</w> (frequency 483)
pair 49979: Ko za -> Koza (frequency 483)
pair 49980: Jak byś</w> -> Jakbyś</w> (frequency 483)
pair 49981: Geni alne</w> -> Genialne</w> (frequency 483)
pair 49982: Że nada</w> -> Żenada</w> (frequency 482)
pair 49983: ń czykiem</w> -> ńczykiem</w> (frequency 482)
pair 49984: zwie ń -> zwień (frequency 482)
pair 49985: zost ałaś</w> -> zostałaś</w> (frequency 482)
pair 49986: zni szczona</w> -> zniszczona</w> (frequency 482)
pair 49987: ze stawi -> zestawi (frequency 482)
pair 49988: za sób</w> -> zasób</w> (frequency 482)
pair 49989: węd rówkę</w> -> wędrówkę</w> (frequency 482)
pair 49990: wysko czyła</w> -> wyskoczyła</w> (frequency 482)
pair 49991: wyle czenia</w> -> wyleczenia</w> (frequency 482)
pair 49992: wychowaw cze</w> -> wychowawcze</w> (frequency 482)
pair 49993: w t -> wt (frequency 482)
pair 49994: un da -> unda (frequency 482)
pair 49995: udzie lałem</w> -> udzielałem</w> (frequency 482)
pair 49996: tę czy</w> -> tęczy</w> (frequency 482)
pair 49997: tro sce</w> -> trosce</w> (frequency 482)
pair 49998: słusz ności</w> -> słuszności</w> (frequency 482)
pair 49999: su me</w> -> sume</w> (frequency 482
#+END_SRC
Zastosujmy teraz wyindukowany słownik BPE dla jakiegoś rzeczywistego tekstu.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! echo 'Cierpiałem na straszne lagi kilkanaście sekund lub dłużej czarnego ekranu przy próbie przełączenia się / uruchomienia prawie każdej aplikacji. Dodatkowo telefon mi się wyłączał czasem bez powodu sam z siebie, albo resetował. Ostatnio nawet przeglądarka zaczęła się często zawieszać i Android proponował wymuszone zamknięcie. Do tego te problemy z połączeniem do komputera przez USB.' | perl -C -ne 'print "$& " while/\p{L}+/g;' | python -m subword_nmt.apply_bpe -c bpe_vocab.txt
#+END_SRC
Ta konkretna implementacja zaznacza za pomocą sekwencji ~@@ ~ koniec jednostki podwyrazowej.

View File

@ -0,0 +1,133 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 13. <i>Podejście generatywne w ekstrakcji informacji</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Ekstrakcja informacji a podejście generatywne\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Podejście generatywne\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Do tej pory zadanie ekstrakcji informacji traktowaliśmy jako zadanie etykietowania sekwencji, tzn. uczyliśmy system zaznaczać tokeny składające się na ekstrahowane informacje.\n",
"\n",
"![Ekstrakcja informacji jako etykietowanie sekwencji, schemat](./ie-seqlab.png)\n",
"\n",
"Możliwe jest inne podeście, **generatywne**, w którym podchodzimy do problemu ekstrakcji informacji jak do swego rodzaju **tłumaczenia maszynowego** — „tłumaczymy” tekst (wraz z pytaniem lub etykietą) na informację.\n",
"\n",
"![Ekstrakcja informacji w podejściu generatywnym](./ie-gener.png)\n",
"\n",
"To podejście może się wydawać trudniejsze niż etykietowanie sekwencji, ale wystarczająco zaawansowanej architekturze sieci, jest wykonalne.\n",
"\n",
"Zalety:\n",
"\n",
"- informacja nie musi być dosłownie zapisana w tekście, ekstraktor może nauczyć się również normalizacji czy parafrazowania,\n",
"- nie wprowadzamy wielu kroków przetwarzania (gdzie błędy mogą się\n",
" namnażać), system działa na zasadzie *end-to-end*.\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Atencja\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Pierwsze systemu neuronowego tłumaczenia maszynowego używały siecie LSTM. Dopiero jednak dodanie tzw. atencji (*attention*) umożliwiło duży przeskok jakościowy. Najpierw atencję dodano do sieci rekurencyjnych, później powstały sieci oparte *wyłącznie* na atencji — modele Transformer.\n",
"\n",
"Idea atencji polega na tym, że sieć może kierować selektywnie „snop” uwagi na wyrazy na wejściu lub do tej pory wygenerowane wyrazy.\n",
"\n",
"Mechanizm atencji korzysta z:\n",
"\n",
"- z poprzedniego stanu sieci $\\vec{s^{k-1}}$ (to jest „miejsce”, z którego „kierujemy” atencję),\n",
"- z wektora reprezentującego słowo $\\vec{v}(t_i)$ (to jest „miejsce”, na które kierujemy atencję), gdzie\n",
" $\\vec{v}(t_i)$ to reprezentacja wektorowa wyrazu $t_i$ (statyczny embedding lub reprezentacja wektorowa\n",
" z poprzedniej warstwy dla sieci wielowarstwowej),\n",
"\n",
"aby wytworzyć wektor kontekstu $\\vec{\\xi^k}$ (który z kolei będzie w jakiś sposób wnosił wkład do wyliczenia nowej wartości stanu $\\vec{s^k}$ lub wyjścia $y^k$.\n",
"\n",
"Najpierw wyliczymy skalarne wartości atencji, tzn. liczby, które będą sygnalizowały, jak bardzo wektor $\\vec{v}(t_i)$ „pasuje” do $\\vec{s^{k-1}}$, w najprostszej wersji można po prostu skorzystać z iloczynu skalarnego (o ile $n=m$),\n",
"\n",
"$$a(\\vec{s^{k-1}}, \\vec{v}(t_i)) = \\vec{s^{k-1}}\\vec{v}(t_i).$$\n",
"\n",
"**Pytanie**: co jeśli $n$ nie jest równe $m$, tzn. rozmiar embeddingu nie jest równy rozmiarowi wektora stanu?\n",
"\n",
"W przypadku sieci LSTM korzysta się częściej z bardziej skomplikowanego wzoru zawierającego dodatkowe wyuczalne wagi:\n",
"\n",
"$$a(\\vec{s^{k-1}}, \\vec{v}(t_i)) = \\vec{w_a}\\operatorname{tanh}(W_a\\vec{s^{k-1}} + U_a\\vec{v}(t_i))$$\n",
"\n",
"**Pytanie**: jakie rozmiary mają macierze $W_a$, $U_a$ i wektor $w_a$?\n",
"\n",
"Powtórzmy, że wartości $a$ są wartościami skalarnymi, natomiast nie są one znormalizowane (nie sumują się do jedynki), normalizujemy je używając schematu podobnego do softmaxa:\n",
"\n",
"$$\\alpha_{i} = \\frac{e^{a(\\vec{s^{k-1}}, \\vec{v}(t_i))}}{\\sum_j e^{a(\\vec{s^{k-1}}, \\vec{v}(t_j))}}$$\n",
"\n",
"Wektor kontekstu $\\vec{\\xi^k}$ będzie po prostu średnią ważoną wektorowych reprezentacji słów:\n",
"\n",
"$$\\vec{\\xi^k} = \\sum_i \\alpha_i\\vec{v}(t_i)$$\n",
"\n",
"**Pytanie**: zasadniczo atencja jest środkiem do celu (żeby sieć się sprawniej uczyła), czy można atencja sama w sobie może być do czegoś przydatna?\n",
"\n"
]
}
],
"metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
},
"org": null,
"subtitle": "13.Podejście generatywne w ekstrakcji informacji[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,55 @@
* Ekstrakcja informacji a podejście generatywne
** Podejście generatywne
Do tej pory zadanie ekstrakcji informacji traktowaliśmy jako zadanie etykietowania sekwencji, tzn. uczyliśmy system zaznaczać tokeny składające się na ekstrahowane informacje.
[[./ie-seqlab.png]]
Możliwe jest inne podeście, *generatywne*, w którym podchodzimy do problemu ekstrakcji informacji jak do swego rodzaju *tłumaczenia maszynowego* — „tłumaczymy” tekst (wraz z pytaniem lub etykietą) na informację.
[[./ie-gener.png]]
To podejście może się wydawać trudniejsze niż etykietowanie sekwencji, ale wystarczająco zaawansowanej architekturze sieci, jest wykonalne.
Zalety:
- informacja nie musi być dosłownie zapisana w tekście, ekstraktor może nauczyć się również normalizacji czy parafrazowania,
- nie wprowadzamy wielu kroków przetwarzania (gdzie błędy mogą się
namnażać), system działa na zasadzie /end-to-end/.
** Atencja
Pierwsze systemu neuronowego tłumaczenia maszynowego używały siecie LSTM. Dopiero jednak dodanie tzw. atencji (/attention/) umożliwiło duży przeskok jakościowy. Najpierw atencję dodano do sieci rekurencyjnych, później powstały sieci oparte /wyłącznie/ na atencji — modele Transformer.
Idea atencji polega na tym, że sieć może kierować selektywnie „snop” uwagi na wyrazy na wejściu lub do tej pory wygenerowane wyrazy.
Mechanizm atencji korzysta z:
- z poprzedniego stanu sieci $\vec{s^{k-1}}$ (to jest „miejsce”, z którego „kierujemy” atencję),
- z wektora reprezentującego słowo $\vec{v}(t_i)$ (to jest „miejsce”, na które kierujemy atencję), gdzie
$\vec{v}(t_i)$ to reprezentacja wektorowa wyrazu $t_i$ (statyczny embedding lub reprezentacja wektorowa
z poprzedniej warstwy dla sieci wielowarstwowej),
aby wytworzyć wektor kontekstu $\vec{\xi^k}$ (który z kolei będzie w jakiś sposób wnosił wkład do wyliczenia nowej wartości stanu $\vec{s^k}$ lub wyjścia $y^k$.
Najpierw wyliczymy skalarne wartości atencji, tzn. liczby, które będą sygnalizowały, jak bardzo wektor $\vec{v}(t_i)$ „pasuje” do $\vec{s^{k-1}}$, w najprostszej wersji można po prostu skorzystać z iloczynu skalarnego (o ile $n=m$),
$$a(\vec{s^{k-1}}, \vec{v}(t_i)) = \vec{s^{k-1}}\vec{v}(t_i).$$
*Pytanie*: co jeśli $n$ nie jest równe $m$, tzn. rozmiar embeddingu nie jest równy rozmiarowi wektora stanu?
W przypadku sieci LSTM korzysta się częściej z bardziej skomplikowanego wzoru zawierającego dodatkowe wyuczalne wagi:
$$a(\vec{s^{k-1}}, \vec{v}(t_i)) = \vec{w_a}\operatorname{tanh}(W_a\vec{s^{k-1}} + U_a\vec{v}(t_i))$$
*Pytanie*: jakie rozmiary mają macierze $W_a$, $U_a$ i wektor $w_a$?
Powtórzmy, że wartości $a$ są wartościami skalarnymi, natomiast nie są one znormalizowane (nie sumują się do jedynki), normalizujemy je używając schematu podobnego do softmaxa:
$$\alpha_{i} = \frac{e^{a(\vec{s^{k-1}}, \vec{v}(t_i))}}{\sum_j e^{a(\vec{s^{k-1}}, \vec{v}(t_j))}}$$
Wektor kontekstu $\vec{\xi^k}$ będzie po prostu średnią ważoną wektorowych reprezentacji słów:
$$\vec{\xi^k} = \sum_i \alpha_i\vec{v}(t_i)$$
*Pytanie*: zasadniczo atencja jest środkiem do celu (żeby sieć się sprawniej uczyła), czy można atencja sama w sobie może być do czegoś przydatna?

389
wyk/14_pretrenowanie.ipynb Normal file
View File

@ -0,0 +1,389 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 14. <i>Pretrenowane modele języka</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Pretrenowanie modeli\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"System AlphaZero uczy się grając sam ze sobą — wystarczy 24 godziny,\n",
"by system nauczył się grać w szachy lub go na nadludzkim poziomie.\n",
"\n",
"**Pytanie**: Dlaczego granie samemu ze sobą nie jest dobrym sposobem\n",
" nauczenia się grania w szachy dla człowieka, a dla maszyny jest?\n",
"\n",
"Co jest odpowiednikiem grania samemu ze sobą w świecie przetwarzania tekstu?\n",
"Tzn. **pretrenowanie** (*pretraining*) na dużym korpusie tekstu. (Tekst jest tani!)\n",
"\n",
"Jest kilka sposobów na pretrenowanie modelu, w każdym razie sprowadza\n",
"się do odgadywania następnego bądź zamaskowanego słowa.\n",
"W każdym razie zawsze stosujemy softmax (być może ze „sztuczkami” takimi jak\n",
"negatywne próbkowanie albo hierarchiczny softmax) na pewnej **reprezentacji kontekstowej**:\n",
"\n",
"$$\\vec{p} = \\operatorname{softmax}(f(\\vec{c})).$$\n",
"\n",
"Model jest karany przy użyciu funkcji log loss:\n",
"\n",
"$$-\\log(p_j),$$\n",
"\n",
"gdzie $w_j$ jest wyrazem, który pojawił się rzeczywiście w korpusie.\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Przewidywanie słowa (GPT-2)\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Jeden ze sposobów pretrenowania modelu to po prostu przewidywanie\n",
"następnego słowa.\n",
"\n",
"Zainstalujmy najpierw bibliotekę transformers.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"! pip install transformers"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"50257\n"
]
},
{
"data": {
"text/plain": [
"[('Âł', 0.6182783842086792),\n",
" ('È', 0.1154019758105278),\n",
" ('Ñģ', 0.026960616931319237),\n",
" ('_____', 0.024418892338871956),\n",
" ('________', 0.014962316490709782),\n",
" ('ÃĤ', 0.010653386823832989),\n",
" ('ä¸Ń', 0.008340531960129738),\n",
" ('Ñ', 0.007557711564004421),\n",
" ('Ê', 0.007046067621558905),\n",
" ('ãĢ', 0.006875576451420784),\n",
" ('ile', 0.006685272324830294),\n",
" ('____', 0.006307446397840977),\n",
" ('âĢĭ', 0.006306538358330727),\n",
" ('ÑĢ', 0.006197483278810978),\n",
" ('ĠBelarus', 0.006108700763434172),\n",
" ('Æ', 0.005720408633351326),\n",
" ('ĠPoland', 0.0053678699769079685),\n",
" ('á¹', 0.004606408067047596),\n",
" ('îĢ', 0.004161055199801922),\n",
" ('????', 0.004056799225509167),\n",
" ('_______', 0.0038176667876541615),\n",
" ('ä¸', 0.0036082742735743523),\n",
" ('Ì', 0.003221835708245635),\n",
" ('urs', 0.003080119378864765),\n",
" ('________________', 0.0027312245219945908),\n",
" ('ĠLithuania', 0.0023860156070441008),\n",
" ('ich', 0.0021211160346865654),\n",
" ('iz', 0.002069818088784814),\n",
" ('vern', 0.002001357264816761),\n",
" ('ÅĤ', 0.001717406208626926)]"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import torch\n",
"from transformers import GPT2Tokenizer, GPT2LMHeadModel\n",
"tokenizer = GPT2Tokenizer.from_pretrained('gpt2-large')\n",
"model = GPT2LMHeadModel.from_pretrained('gpt2-large')\n",
"text = 'Warsaw is the capital city of'\n",
"encoded_input = tokenizer(text, return_tensors='pt')\n",
"output = model(**encoded_input)\n",
"next_token_probs = torch.softmax(output[0][:, -1, :][0], dim=0)\n",
"\n",
"nb_of_tokens = next_token_probs.size()[0]\n",
"print(nb_of_tokens)\n",
"\n",
"_, top_k_indices = torch.topk(next_token_probs, 30, sorted=True)\n",
"\n",
"words = tokenizer.convert_ids_to_tokens(top_k_indices)\n",
"\n",
"top_probs = []\n",
"\n",
"for ix in range(len(top_k_indices)):\n",
" top_probs.append((words[ix], next_token_probs[top_k_indices[ix]].item()))\n",
"\n",
"top_probs"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Zalety tego podejścia:\n",
"\n",
"- prostota,\n",
"- dobra podstawa do strojenia systemów generowania tekstu zwłaszcza\n",
" „otwartego” (systemy dialogowe, generowanie (fake) newsów, streszczanie tekstu),\n",
" ale niekoniecznie tłumaczenia maszynowego,\n",
"- zaskakująca skuteczność przy uczeniu *few-shot* i *zero-shot*.\n",
"\n",
"Wady:\n",
"\n",
"- asymetryczność, przetwarzanie tylko z lewej do prawej, preferencja\n",
" dla lewego kontekstu,\n",
"- mniejsza skuteczność przy dostrajaniu do zadań klasyfikacji i innych zadań\n",
" niepolegających na prostym generowaniu.\n",
"\n",
"Przykłady modeli: GPT, GPT-2, GPT-3, DialoGPT.\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Maskowanie słów (BERT)\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Inną metodą jest maskowanie słów (*Masked Language Modeling*, *MLM*).\n",
"\n",
"W tym podejściu losowe wybrane zastępujemy losowe słowa specjalnym\n",
"tokenem (`[MASK]`) i każemy modelowi odgadywać w ten sposób\n",
"zamaskowane słowa (z uwzględnieniem również prawego kontekstu!).\n",
"\n",
"Móciąc ściśle, w jednym z pierwszych modeli tego typu (BERT)\n",
"zastosowano schemat, w którym również niezamaskowane słowa są odgadywane (!):\n",
"\n",
"- wybieramy losowe 15% wyrazów do odgadnięcia\n",
"- 80% z nich zastępujemy tokenem `[MASK]`,\n",
"- 10% zastępujemy innym losowym wyrazem,\n",
"- 10% pozostawiamy bez zmian.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/filipg/.local/lib/python3.9/site-packages/transformers/models/auto/modeling_auto.py:806: FutureWarning: The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"W którym państwie leży Bombaj? W USA. (score: 0.16715531051158905)\n",
"W którym państwie leży Bombaj? W India. (score: 0.09912960231304169)\n",
"W którym państwie leży Bombaj? W Indian. (score: 0.039642028510570526)\n",
"W którym państwie leży Bombaj? W Nepal. (score: 0.027137665078043938)\n",
"W którym państwie leży Bombaj? W Pakistan. (score: 0.027065709233283997)\n",
"W którym państwie leży Bombaj? W Polsce. (score: 0.023737527430057526)\n",
"W którym państwie leży Bombaj? W .... (score: 0.02306722290813923)\n",
"W którym państwie leży Bombaj? W Bangladesh. (score: 0.022106658667325974)\n",
"W którym państwie leży Bombaj? W .... (score: 0.01628892682492733)\n",
"W którym państwie leży Bombaj? W Niemczech. (score: 0.014501162804663181)\n"
]
}
],
"source": [
"from transformers import AutoModelWithLMHead, AutoTokenizer\n",
"import torch\n",
"\n",
"tokenizer = AutoTokenizer.from_pretrained(\"xlm-roberta-large\")\n",
"model = AutoModelWithLMHead.from_pretrained(\"xlm-roberta-large\")\n",
"\n",
"sequence = f'W którym państwie leży Bombaj? W {tokenizer.mask_token}.'\n",
"\n",
"input_ids = tokenizer.encode(sequence, return_tensors=\"pt\")\n",
"mask_token_index = torch.where(input_ids == tokenizer.mask_token_id)[1]\n",
"\n",
"token_logits = model(input_ids)[0]\n",
"mask_token_logits = token_logits[0, mask_token_index, :]\n",
"mask_token_logits = torch.softmax(mask_token_logits, dim=1)\n",
"\n",
"top_10 = torch.topk(mask_token_logits, 10, dim=1)\n",
"top_10_tokens = zip(top_10.indices[0].tolist(), top_10.values[0].tolist())\n",
"\n",
"for token, score in top_10_tokens:\n",
" print(sequence.replace(tokenizer.mask_token, tokenizer.decode([token])), f\"(score: {score})\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Przykłady: BERT, RoBERTa (również Polish RoBERTa).\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Podejście generatywne (koder-dekoder).\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"System ma wygenerować odpowiedź na różne pytania (również\n",
"odpowiadające zadaniu MLM), np.:\n",
"\n",
"- \"translate English to German: That is good.\" => \"Das ist gut.\"\n",
"- \"cola sentence: The course is jumping well.\" => \"not acceptable\"\n",
"- \"summarize: state authorities dispatched emergency crews tuesday to survey the damage after an onslaught of severe weather in mississippi&#x2026;\"\n",
" => \"six people hospitalized after a storm in attala county\"\n",
"- \"Thank you for <X> me to your party <Y> week.\" => <X> for inviting <Y> last <Z>\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['World War II ended in World War II.',\n",
" 'World War II ended in 1945..',\n",
" 'World War II ended in 1945.',\n",
" 'World War II ended in 1945.',\n",
" 'World War II ended in 1945.']"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from transformers import T5Tokenizer, T5Config, T5ForConditionalGeneration\n",
"\n",
"T5_PATH = 't5-base'\n",
"\n",
"t5_tokenizer = T5Tokenizer.from_pretrained(T5_PATH)\n",
"t5_config = T5Config.from_pretrained(T5_PATH)\n",
"t5_mlm = T5ForConditionalGeneration.from_pretrained(T5_PATH, config=t5_config)\n",
"\n",
"slot = '<extra_id_0>'\n",
"\n",
"text = f'World War II ended in {slot}.'\n",
"\n",
"encoded = t5_tokenizer.encode_plus(text, add_special_tokens=True, return_tensors='pt')\n",
"input_ids = encoded['input_ids']\n",
"\n",
"outputs = t5_mlm.generate(input_ids=input_ids,\n",
" num_beams=200, num_return_sequences=5,\n",
" max_length=5)\n",
"\n",
"_0_index = text.index(slot)\n",
"_result_prefix = text[:_0_index]\n",
"_result_suffix = text[_0_index+len(slot):]\n",
"\n",
"def _filter(output, end_token='<extra_id_1>'):\n",
" _txt = t5_tokenizer.decode(output[2:], skip_special_tokens=False, clean_up_tokenization_spaces=False)\n",
" if end_token in _txt:\n",
" _end_token_index = _txt.index(end_token)\n",
" return _result_prefix + _txt[:_end_token_index] + _result_suffix\n",
" else:\n",
" return _result_prefix + _txt + _result_suffix\n",
"\n",
"\n",
"results = [_filter(out) for out in outputs]\n",
"results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"(Zob. [https://arxiv.org/pdf/1910.10683.pdf](https://arxiv.org/pdf/1910.10683.pdf))\n",
"\n",
"Przykład: T5, mT5\n",
"\n"
]
}
],
"metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
},
"org": null,
"subtitle": "14.Pretrenowane modele języka[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

212
wyk/14_pretrenowanie.org Normal file
View File

@ -0,0 +1,212 @@
* Pretrenowanie modeli
System AlphaZero uczy się grając sam ze sobą — wystarczy 24 godziny,
by system nauczył się grać w szachy lub go na nadludzkim poziomie.
*Pytanie*: Dlaczego granie samemu ze sobą nie jest dobrym sposobem
nauczenia się grania w szachy dla człowieka, a dla maszyny jest?
Co jest odpowiednikiem grania samemu ze sobą w świecie przetwarzania tekstu?
Tzn. *pretrenowanie* (/pretraining/) na dużym korpusie tekstu. (Tekst jest tani!)
Jest kilka sposobów na pretrenowanie modelu, w każdym razie sprowadza
się do odgadywania następnego bądź zamaskowanego słowa.
W każdym razie zawsze stosujemy softmax (być może ze „sztuczkami” takimi jak
negatywne próbkowanie albo hierarchiczny softamx) na pewnej *representecji kontekstowej*:
$$\vec{p} = \operatorname{softmax}(f(\vec{c})).$$
Model jest karany używając funkcji log loss:
$$-\log(p_j),$$
gdzie $w_j$ jest wyrazem, który pojawił się rzeczywiście w korpusie.
** Przewidywanie słowa (GPT-2)
Jeden ze sposobów pretrenowania modelu to po prostu przewidywanie
następnego słowa.
Zainstalujmy najpierw bibliotekę transformers.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
! pip install transformers
#+END_SRC
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2-large')
model = GPT2LMHeadModel.from_pretrained('gpt2-large')
text = "Warsaw is the capital city of"
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
next_token_probs = torch.softmax(output[0][:, -1, :][0], dim=0)
nb_of_tokens = next_token_probs.size()[0]
_, top_k_indices = torch.topk(next_token_probs, 30, sorted=True)
top_k_indices
# words = tokenizer.convert_ids_to_tokens(top)
# top_probs = []
# for ix in range(len(top)):
# top_probs.append((words[ix], next_token_probs[top[ix]].item()))
# top_probs
#+END_SRC
#+RESULTS:
:results:
# Out[8]:
#+BEGIN_EXAMPLE
[('Ġthe', 0.4415026307106018),
('ĠPoland', 0.236798495054245),
('ĠBelarus', 0.10114768147468567),
('ĠUkraine', 0.058283545076847076),
('Ġeastern', 0.020564062520861626),
('ĠEastern', 0.011137397028505802),
('ĠPolish', 0.010205904021859169),
('ĠWestern', 0.00833223108202219),
('Ġwestern', 0.006872199941426516),
('Ġa', 0.004939113277941942),
('ĠSlovakia', 0.003553805174306035),
('ĠLithuania', 0.003335304092615843),
('ĠRussia', 0.002872465644031763),
('Ġcentral', 0.002493523992598057),
('Ġmodern', 0.0022767107002437115),
('ĠCzech', 0.0022264323197305202),
('ĠPr', 0.002146221464499831),
('Ġformer', 0.0021054286044090986),
('Ġwhat', 0.0017435317859053612),
('ĠSlov', 0.0014634730760008097),
('ĠUkrainian', 0.0014347084797918797),
('ĠCentral', 0.0013676199596375227),
('ĠSouth', 0.0013484350638464093),
('Ġone', 0.001204205909743905),
('ĠNorthern', 0.0011802552035078406),
('ĠWest', 0.001175572513602674),
('ĠEast', 0.0011596156982704997),
('Ġsouthern', 0.0011580033460631967),
('Ġnorthern', 0.001110077602788806),
('Ġ"', 0.0010494199814274907)]
#+END_EXAMPLE
:end:
Zalety tego podejścia:
- prostota,
- dobra podstawa do strojenia systemów generowania tekstu zwłaszcza
„otwartego” (systemy dialogowe, generowanie (fake) newsów, streszczanie tekstu),
ale niekoniecznie tłumaczenia maszynowego,
- zaskakująca skuteczność przy uczeniu /few-shot/ i /zero-shot/.
Wady:
- asymetryczność, przetwarzanie tylko z lewej do prawej, preferencja
dla lewego kontekstu,
- mniejsza skuteczność przy dostrajaniu do zadań klasyfikacji i innych zadań
niepolegających na prostym generowaniu.
Przykłady modeli: GPT, GPT-2, GPT-3, DialoGPT.
** Maskowanie słów (BERT)
Inną metodą jest maskowanie słów (/Masked Language Modeling/, /MLM/).
W tym podejściu losowe wybrane zastępujemy losowe słowa specjalnym
tokenem (~[MASK]~) i każemy modelowi odgadywać w ten sposób
zamaskowane słowa (z uwzględnieniem również prawego kontekstu!).
Móciąc ściśle, w jednym z pierwszych modeli tego typu (BERT)
zastosowano schemat, w którym również niezamaskowane słowa są odgadywane (!):
- wybieramy losowe 15% wyrazów do odgadnięcia
- 80% z nich zastępujemy tokenem ~[MASK]~,
- 10% zastępujemy innym losowym wyrazem,
- 10% pozostawiamy bez zmian.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
from transformers import AutoModelWithLMHead, AutoTokenizer
import torch
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large")
model = AutoModelWithLMHead.from_pretrained("xlm-roberta-large")
sequence = f'II wojna światowa zakończyła się w {tokenizer.mask_token} roku.'
input_ids = tokenizer.encode(sequence, return_tensors="pt")
mask_token_index = torch.where(input_ids == tokenizer.mask_token_id)[1]
token_logits = model(input_ids)[0]
mask_token_logits = token_logits[0, mask_token_index, :]
mask_token_logits = torch.softmax(mask_token_logits, dim=1)
top_10 = torch.topk(mask_token_logits, 10, dim=1)
top_10_tokens = zip(top_10.indices[0].tolist(), top_10.values[0].tolist())
for token, score in top_10_tokens:
print(sequence.replace(tokenizer.mask_token, tokenizer.decode([token])), f"(score: {score})")
#+END_SRC
#+RESULTS:
:results:
# Out[3]:
:end:
Przykłady: BERT, RoBERTa (również Polish RoBERTa).
** Podejście generatywne (koder-dekoder).
System ma wygenerować odpowiedź na różne pytania (również
odpowiadające zadaniu MLM), np.:
- "translate English to German: That is good." => "Das ist gut."
- "cola sentence: The course is jumping well." => "not acceptable"
- "summarize: state authorities dispatched emergency crews tuesday to survey the damage after an onslaught of severe weather in mississippi..."
=> "six people hospitalized after a storm in attala county"
- "Thank you for <X> me to your party <Y> week." => <X> for inviting <Y> last <Z>
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
from transformers import T5Tokenizer, T5Config, T5ForConditionalGeneration
T5_PATH = 't5-base'
t5_tokenizer = T5Tokenizer.from_pretrained(T5_PATH)
t5_config = T5Config.from_pretrained(T5_PATH)
t5_mlm = T5ForConditionalGeneration.from_pretrained(T5_PATH, config=t5_config)
slot = '<extra_id_0>'
text = f'Warsaw is the {slot} of Poland.'
encoded = t5_tokenizer.encode_plus(text, add_special_tokens=True, return_tensors='pt')
input_ids = encoded['input_ids']
outputs = t5_mlm.generate(input_ids=input_ids,
num_beams=200, num_return_sequences=5,
max_length=5)
_0_index = text.index(slot)
_result_prefix = text[:_0_index]
_result_suffix = text[_0_index+len(slot):]
def _filter(output, end_token='<extra_id_1>'):
_txt = t5_tokenizer.decode(output[2:], skip_special_tokens=False, clean_up_tokenization_spaces=False)
if end_token in _txt:
_end_token_index = _txt.index(end_token)
return _result_prefix + _txt[:_end_token_index] + _result_suffix
else:
return _result_prefix + _txt + _result_suffix
results = [_filter(out) for out in outputs]
results
#+END_SRC
(Zob. https://arxiv.org/pdf/1910.10683.pdf)
Przykład: T5, mT5

273
wyk/15_transformer.ipynb Normal file
View File

@ -0,0 +1,273 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"![Logo 1](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech1.jpg)\n",
"<div class=\"alert alert-block alert-info\">\n",
"<h1> Ekstrakcja informacji </h1>\n",
"<h2> 15. <i>Sieci Transformer i ich zastosowanie w ekstrakcji informacji</i> [wykład]</h2> \n",
"<h3> Filip Graliński (2021)</h3>\n",
"</div>\n",
"\n",
"![Logo 2](https://git.wmi.amu.edu.pl/AITech/Szablon/raw/branch/master/Logotyp_AITech2.jpg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Modele Transformer\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Atencja\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Atencję w modelach Transformer można interpretować jako rodzaj\n",
"„miękkiego” odpytywania swego rodzaju bazy danych, w której\n",
"przechowywane są pary klucz-wartość. Mamy trzy rodzaje wektorów (a\n",
"właściwie macierzy, bo wektory są od razu upakowane w macierze):\n",
"\n",
"- $Q$ - macierz zapytań,\n",
"- $K$ - macierz kluczy,\n",
"- $V$ - macierz wartości odpowiadających kluczom $K$.\n",
"\n",
"W atencji modeli Transformer patrzymy jak bardzo zapytania $Q$ pasują\n",
"do kluczy $K$ i na tej podstawie zwracamy wartości $V$ (im bardziej\n",
"**klucz** pasuje do **zapytania**, tym większy wkład wnosi odpowiednia **wartość**).\n",
"Ten rodzaj odpytywania można zrealizować z pomocą mnożenia macierzy i funkcji softmax:\n",
"\n",
"$$\\operatorname{Atention}(Q,K,V) = \\operatorname{softmax}(QK^T)V$$\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Uproszczony przykład\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Załóżmy, że rozmiar embeddingu wynosi 4, w macierzach rozpatrywać\n",
"będziemy po 3 wektory naraz (możemy sobie wyobrazić, że zdanie zawiera 3 wyrazy).\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[20.5700, 36.2400, 31.1000],\n",
" [15.1100, 13.9100, 7.9500],\n",
" [ 2.2100, 7.1800, 7.4000]])"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import torch\n",
"\n",
"Q = torch.tensor([\n",
" [0.3, -2.0, 0.4, 6.0],\n",
" [-1.0, 1.5, 0.2, 3.0],\n",
" [0.3, -1.0, 0.2, 1.0]])\n",
"\n",
"K = torch.tensor([\n",
" [-0.5, 1.7, 0.3, 4.0],\n",
" [0.4, -1.5, 0.3, 5.5],\n",
" [-1.0, -3.5, 1.0, 4.0]])\n",
"\n",
"M = Q @ torch.transpose(K, 0, 1)\n",
"M"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Jak widać, najbardziej pierwszy wektor $Q$ pasuje do drugiego wektora $K$.\n",
"Znormalizujmy te wartości używać funkcji softmax.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[1.5562e-07, 9.9418e-01, 5.8236e-03],\n",
" [7.6807e-01, 2.3134e-01, 5.9683e-04],\n",
" [3.0817e-03, 4.4385e-01, 5.5307e-01]])"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import torch\n",
"\n",
"Mn = torch.softmax(M, 1)\n",
"Mn"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Drugi wektor zapytania najbardziej pasuje do pierwszego klucza, trochę\n",
"mniej do drugiego klucza, o wiele mniej do trzeciego klucza. Te\n",
"wektory to oczywiście wektory atencji (drugie słowo najbardziej\n",
"„patrzy” na pierwsze słowo).\n",
"\n",
"Teraz będziemy przemnażać przez wektory wartości:\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[ 3.9750e+00, 9.9419e-02, 1.0116e-01, 1.5765e-01, 5.8255e-04],\n",
" [ 9.2517e-01, 6.9357e+00, 2.3313e-02, -3.8112e+00, 9.2174e-01],\n",
" [ 1.6095e+00, 7.2120e-02, 2.1031e-01, 5.5597e+00, 5.9005e-02]])"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import torch\n",
"\n",
"V = torch.tensor([\n",
" [0.0, 9.0, 0.0, -5.0, 1.2],\n",
" [4.0, 0.1, 0.1, 0.1, 0.0],\n",
" [-0.3, 0.0, 0.3, 10.0, 0.1]])\n",
"\n",
"Mn @ V"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Dodatkowa normalizacja\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"W praktyce dobrze jest znormalizować pierwszy iloczyn przez\n",
"$\\sqrt{d_k}$, gdzie $d_k$ to rozmiar wektora klucza.\n",
"\n",
"$$\\operatorname{Atention}(Q,K,V) = \\operatorname{softmax}(\\frac{QK^T}{\\sqrt{d^k}})V$$\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Skąd się biorą Q, K i V?\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Wektory (macierze) $Q$, $K$ i $V$ w pierwszej warstwie pochodzą z\n",
"embeddingów tokenów $E$ (właściwie jednostek BPE).\n",
"\n",
"- $Q$ = $EW^Q$\n",
"- $K$ = $EW^K$\n",
"- $V$ = $EW^V$\n",
"\n",
"W kolejnych warstwach zamiast $E$ wykorzystywane jest wyjście z poprzedniej warstwy.\n",
"\n",
"## Zastosowanie w ekstrakcji informacji\n",
"\n",
"W prosty sposób możemy do sieci Transformer dołączyć głowicę realizującą etykietowanie sekwencji."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Literatura\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"[https://arxiv.org/pdf/1706.03762.pdf](https://arxiv.org/pdf/1706.03762.pdf)\n",
"\n"
]
}
],
"metadata": {
"author": "Filip Graliński",
"email": "filipg@amu.edu.pl",
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"lang": "pl",
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
},
"org": null,
"subtitle": "15.Sieci Transformer i ich zastosowanie w ekstrakcji informacji[wykład]",
"title": "Ekstrakcja informacji",
"year": "2021"
},
"nbformat": 4,
"nbformat_minor": 4
}

124
wyk/15_transformer.org Normal file
View File

@ -0,0 +1,124 @@
* Modele Transformer
** Atencja
Atencję w modelach Transformer można interpretować jako rodzaj
„miękkiego” odpytywania swego rodzaju bazy danych, w której
przechowywane są pary klucz-wartość. Mamy trzy rodzaje wektorów (a
właściwie macierzy, bo wektory są od razu upakowane w macierze):
- $Q$ - macierz zapytań,
- $K$ - macierz kluczy,
- $V$ - macierz wartości odpowiadających kluczom $K$.
W atencji modeli Transformer patrzymy jak bardzo zapytania $Q$ pasują
do kluczy $K$ i na tej podstawie zwracamy wartości $V$ (im bardziej
*klucz* pasuje do *zapytania*, tym większy wkład wnosi odpowiednia *wartość*).
Ten rodzaj odpytywania można zrealizować z pomocą mnożenia macierzy i funkcji softmax:
$$\operatorname{Atention}(Q,K,V) = \operatorname{softmax}(QK^T)V$$
*** Uproszczony przykład
Załóżmy, że rozmiar embeddingu wynosi 4, w macierzach rozpatrywać
będziemy po 3 wektory naraz (możemy sobie wyobrazić, że zdanie zawiera 3 wyrazy).
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
import torch
Q = torch.tensor([
[0.3, -2.0, 0.4, 6.0],
[-1.0, 1.5, 0.2, 3.0],
[0.3, -1.0, 0.2, 1.0]])
K = torch.tensor([
[-0.5, 1.7, 0.3, 4.0],
[0.4, -1.5, 0.3, 5.5],
[-1.0, -3.5, 1.0, 4.0]])
M = Q @ torch.transpose(K, 0, 1)
M
#+END_SRC
#+RESULTS:
:results:
# Out[11]:
#+BEGIN_EXAMPLE
tensor([[20.5700, 36.2400, 31.1000],
[15.1100, 13.9100, 7.9500],
[ 2.2100, 7.1800, 7.4000]])
#+END_EXAMPLE
:end:
Jak widać, najbardziej pierwszy wektor $Q$ pasuje do drugiego wektora $K$.
Znormalizujmy te wartości używać funkcji softmax.
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
import torch
Mn = torch.softmax(M, 1)
Mn
#+END_SRC
#+RESULTS:
:results:
# Out[12]:
#+BEGIN_EXAMPLE
tensor([[1.5562e-07, 9.9418e-01, 5.8236e-03],
[7.6807e-01, 2.3134e-01, 5.9683e-04],
[3.0817e-03, 4.4385e-01, 5.5307e-01]])
#+END_EXAMPLE
:end:
Drugi wektor zapytania najbardziej pasuje do pierwszego klucza, trochę
mniej do drugiego klucza, o wiele mniej do trzeciego klucza. Te
wektory to oczywiście wektory atencji (drugie słowo najbardziej
„patrzy” na pierwsze słowo).
Teraz będziemy przemnażać przez wektory wartości:
#+BEGIN_SRC ipython :session mysession :exports both :results raw drawer
import torch
V = torch.tensor([
[0.0, 9.0, 0.0, -5.0],
[4.0, 0.1, 0.1, 0.1],
[-0.3, 0.0, 0.3, 10.0]])
Mn @ V
#+END_SRC
#+RESULTS:
:results:
# Out[13]:
#+BEGIN_EXAMPLE
tensor([[ 3.9750, 0.0994, 0.1012, 0.1577],
[ 0.9252, 6.9357, 0.0233, -3.8112],
[ 1.6095, 0.0721, 0.2103, 5.5597]])
#+END_EXAMPLE
:end:
*** Dodatkowa normalizacja
W praktyce dobrze jest znormalizować pierwszy iloczyn przez
$\sqrt{d_k}$, gdzie $d_k$ to rozmiar wektora klucza.
$$\operatorname{Atention}(Q,K,V) = \operatorname{softmax}(\frac{QK^T}{d^k})V$$
*** Skąd się biorą Q, K i V?
Wektory (macierze) $Q$, $K$ i $V$ w pierwszej warstwie pochodzą z
embeddingów tokenów $E$ (właściwie jednostek BPE).
- $Q$ = $EW^Q$
- $K$ = $EW^K$
- $V$ = $EW^V$
W kolejnych warstwach zamiast $E$ wykorzystywane jest wyjście z poprzedniej warstwy.
** Literatura
https://arxiv.org/pdf/1706.03762.pdf

BIN
wyk/bpe.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

1
wyk/crf-viterbi.drawio Normal file
View File

@ -0,0 +1 @@
<mxfile host="app.diagrams.net" modified="2021-05-26T13:43:59.377Z" agent="5.0 (X11)" etag="4emVx1cQBqc02lQgOp_X" version="14.6.13" type="device"><diagram id="7pFB8Xg2-vPC_YrQG171" name="Page-1">5ZpNc5swEIZ/Dcd2jAQ2PsbYTQ/pTKaZTt3eFJCNWowYWY5xf31FEAa0dr4DDLl40IIk9Lyr1UrYwv4muxQkjb7xkMYWGoWZhecWQlPHVr+54VAYXIwLw1qwsDDZleGG/aPaONLWHQvptvGg5DyWLG0aA54kNJANGxGC75uPrXjc7DUlawoMNwGJofUnC2VUWD00qexfKVtHZc/2eFrc2ZDyYT2SbURCvq+Z8MLCvuBcFlebzKdxzq7kUtT7cubu8cUETeRTKiwvJ+j3gvFb5s3Dq/2PdDlzPulW7ki80wO+iIl+X3koIUiaqS5mkdzEymCry60U/C/1ecyFsiQ8UU/OViyODROJ2TpRxUC9JFX22R0Vkim8F/rGhoVh3s1sHzFJb1IS5H3ulS8pm+C7JKT5+49USb+qaoBmZxnYR7LKIynfUCkO6pGywliLob3RdnR5X2lbmqKarKWGRHvT+thyBVxdaObP4I8A/82A8SO3Z/gxwJ/yPaOW71rTmeU7ljcZrhq4b2o4QA0f0FfNqNB/jlFNFLJNi/VgxbKc2tuED7dJzIPE8Ali+L2IjQGx634TQyd8rFViE0Dse7+J4VHHxJAHAYUqR9JFLmTE1zwh8aKyGsGqeuaK81TD+0OlPOiEj+wkb6KlGZPL2vWvvKnPri7NM93yfeFQFhI13mW9UKuVF6tq96WyXjG+fFAPa6YY8J0I6AOwNCtJxJrKh/KuM04gaEwku2u+yJtL6vU90CIzT+s60E7fNdDWcwUL4ZUX0CAAiYW6c+u5jvtWyZjBuPPQXG75+hubTWSdx2Yb7t96NpOx27OUyYZbrp7lTCay7icm3Cb1bGKayLqfmHAvs7XcmQKp2hvZljsHAIezr/SMKT+GYrS6r7Rh0m8PFz844zoxF9rFD9NNNFz83uNZVbv0YeqKh0sfO0bs6dr5EUxqBxx7DOe3y5WgM/ow2Rtw6AHH6507P0wcBxx7DOfHo66d3wX08xwUFTkoGnQOCpZh1PVUgEf1NTGGvSEAy8K465kBNwQ1MfCHEgN1vUZjeIYF6NMkvMj/zJFjjMl2y4KmFi89QH38zB9yrHFyT3AqbU8+ydc9XHOmOq5kmhoyuQb/4lOErlVJABrC5jGvbTRUfKsADd1reRz2K+SFOcCz5X3dx6Dq+8+vso12PgaVpF/8NagdVzselpUeMn6pq03OLLptuRo8dLv9KIdujiGiPe044cGn11g1Cn/YSthTM9t5NyVUsfr/YDGLqj9h4sV/</diagram></mxfile>

BIN
wyk/crf-viterbi.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

1
wyk/ie-gener.drawio Normal file
View File

@ -0,0 +1 @@
<mxfile host="app.diagrams.net" modified="2021-06-09T07:54:35.721Z" agent="5.0 (X11)" etag="NciLNBJF1axAiSJ0r0sv" version="14.7.3" type="device"><diagram id="HvCQlNLg7fWOxGx64C6g" name="Page-1">3Vjbbts4EP0aAe2DF9bNiR9jO9lisVekQLF9Y8SxRJsSVYqKrHz9Di+SJctF0m6TtvGDRB2RM+QcnhnKXrjOD79KUmZ/CArcC+b04IUbLwiWkY9XDbQWiMPQAqlk1EL+EbhlD+DAuUNrRqEadVRCcMXKMZiIooBEjTAipWjG3baCj72WJIUJcJsQPkU/MKoyi14GF0f8HbA06zz7i6V9k5Ous1tJlREqmgEUXnvhWgqhbCs/rIHr2HVxseNuPvO2n5iEQj1lgPgrjt7/+du/G76THw8f5Wyxv545K/eE127BXrDgaG+V4atFqlsdUpWk0OtQrQvO4lOtJ7+6I8k+laIu6CwRXEgvvMJ+Mr17E8Sxp6e1xutJ++1xfOfoHRDKirTzh0uxLsfTQHg6t/LFJ/u7kJDr8JZVre/UGEOPTOmtl4Oyo3FjVrgxQdX6LS6wZBVLzDLnwJnrVQE1NjTI6ioX+lFBXhqbrEgYZbQutOlaXzi5Q/+6t+p8g9l0aUG0G84+1eSXxyM5iFswCleg4GC2gco5Aj42t4zztQ3ZphAF2CibpYSb+Pj0XpQIzDB84arJmIJbxLXVBjMEYuIe5JYbKWSMUigQM4SA3ql6VKWk2EPnywvCufn100QDOL3P6sDv1YVZCQRSIVvs0g24dIJ0GclfuufmqO8gclg20rYDicspaW/7KDtsOOV9gQqDiQoV7CvNbAM7bx17y1XCRNM+gaRx6BxNZ5jDHZJi4DcJRg0QX+mYMsx7V+5FjtRoN2cpHNP1LUjxHyflHCfBc1ESnkmMJ8HHhF7q5pbD4UqXGgwFFNQ1NwknFSp9TI81AnRSdB4N1CAQ8ZlAdJgEThS7H5s/Fx3n4W/BTFZxPERRPOIBpTc2UYlaJuBGDavNiaHYf8SQIjIFNTFkyOqX/fX8RRP+tKIk2Sc7nSBNlnwgFaEPDEymtymRdfkQiZwpMcPbIFuyL8iWP6kQo8WJEC++sxCXEyK7SquLXV19XeH6llF/1moVh2M++kAP+Fi+JB9doh4Q0rR9kYJOLXey08qbprXSy0RDCjDHma2QOUl28Pb1Cik+PWYspsT1Ve9lmDtX0yxblN13dP1Tm7MnVP3B0gtvpqwOsuJg8P8TI4et+oGlODk4Rk88OD4fo9MqV7aKFFqGN6DaPQNFXq/E/PkJIeF3rlXdmWeUGwfJrzUfdfZkb0ia53XF9A0f1pF3eWE84PLb5KHAoz9GQ1QKv6rsV+KO7PXqrJVByn2tBMfLk+J3+Xw5FB+P/8XYU+jxD63w+j8=</diagram></mxfile>

BIN
wyk/ie-gener.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

1
wyk/ie-seqlab.drawio Normal file
View File

@ -0,0 +1 @@
<mxfile host="app.diagrams.net" modified="2021-06-09T07:51:04.277Z" agent="5.0 (X11)" etag="tdTX7mJTGI1dKBKJk9w0" version="14.7.3" type="device"><diagram id="HvCQlNLg7fWOxGx64C6g" name="Page-1">7Vhtk5MwEP41zOiHOhQKd/14bc+3Uet4zjj6LSVbyDUQDOEo/no3EEp5uempd+qc9kMbHpLd7PNkl6WWu4z3LyRJo7eCArccm+4td2U5znw2xW8NlDXguW4NhJLRGpq2wBX7Bga0DZozCllnohKCK5Z2wUAkCQSqgxEpRdGdthW86zUlIQyAq4DwIfqJURXV6Llz1uIvgYVR43nqz+s7MWkmm0iyiFBRHEHupeUupRCqHsX7JXDNXcNLve75LXcPG5OQqLssEGtv9vHd688rfi2/7L/Iib+7nBgrN4TnJmDL8TnaW0R4yw/1qEGylCQ6DlUacvyvud78YkOCXShFntBJILiQlnuB82S4eeJ4nqW3tcTv3vhpu75x9BIIZUnY+MNQapfdbSA83Fv62zf7RkiINb1plutfWhlDj0zpoxeDqlfjwczwYILK9V0MMGUZC6owbeDMzMqAVjY0yPIsFvpSQZxWNlkSMMponmjTuf7iZIP+9WzV+Ibq0IUJ0W44+5qTZ6eZPOLN6dDlKNhXx0DFHIEpDreM82VN2SoRCdQsV6G4K6+9+ihSBCZIn7soIqbgCnFttcAKgZi4AbnlVSpEjFJIEKsEAX1S9apMSbGDxpfluHb1OWwTDeD2bs2D6SG7sCqBQClkiVOaBecmIU1Fmvrmumjz25kZLOrktgGJqSnhwXabdjgwmfcDWegMslDBLtPKFnBtLT1rvgiYKMo7iNSlzsg0ohyekBCJXwXIGiC+0JwyrHsX5kaM0mg3oxJ25boPUaY9Uc6Gooxp4jyUJO5IYeyRjwU91cMth/2FftQgFZBQM1wFnGSY6V15aiNABw+dk0QdEeGNENFgEjhR7KZrfowd4+G9YFVVMTrMvK4OA4IzkcsAzKrjp03PkOecMKSIDEENDFViHcL+ef1mA/1AlTsGShQkYfB4M2nm/2WZ5P1vMR5Bi/EAQXvTkZj7rcwtOvU7nI3sq7MVVSjbOttaR0ssYAx0yO+gGPrHXxLrxEw2WTp6ba9Pz7nLmtswezFZrd+sP+Do1WG0PmKijqwb7TD+/z3fUVH0vDv0fP5Yzzd7qLLo/5sNxrl7Tw3G3P6zDcbZQL9Uikzp0lWUVGzKQHcZdtXI54+33Tif/WXtxnygS/+R8jNF7z5Zf9BKN+/nl+0N9Jj/Tj2aF7sjQYry8FILwwfYk6LUOSNJpFt1qHqTrZAxCa7h6eNNpPnZ6UfU4S35F5XDy/Z/x7ogtn/eupffAQ==</diagram></mxfile>

BIN
wyk/ie-seqlab.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
wyk/img-feed-forward.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

1
wyk/rnn-seq.drawio Normal file
View File

@ -0,0 +1 @@
<mxfile host="app.diagrams.net" modified="2021-06-01T16:04:31.943Z" agent="5.0 (X11)" etag="_GtJ7cj7F7SQ6oQHAF32" version="14.6.13" type="device"><diagram id="Q_5Aon-lI3fA6Ftl2Xdk" name="Page-1">7Vxtc5s4EP41zFw/JIPAGPyxtnMvM+mlc0mn1/umGMVWAsiD5Tj4159kBDZvBtcFiTgzTUdahED77K6WfcCaOfHf/gjhcvGFuMjTDN1908ypZhijAWD/c0EUC8DQNmLJPMSukO0F93iLhFAX0jV20SozkBLiUbzMCmckCNCMZmQwDMkmO+yJeNmrLuEcFQT3M+gVpd+xSxex1DHsvfxPhOeL5MpgOIqP+DAZLKZYLaBLNrFotzjzRjMnISE0bvlvE+Rx5SV6iTXwe8XR9MZCFNAmJ3yjk/V/Fn4ZRI59E03tLw+3z1emGU/zCr21WLG4WxolKpiHZL0Uw1BI0VuZ4uFjMlwv3hhIl8vsBBEf0TBiQ8RE6SnCRmzR3ez1bVpCtjjQdSqEAuN5OvVeDawhNHGCVkC9UphOAhfxSXTNHG8WmKL7JZzxoxvmCUy2oD676BSwZqXyDpV0BJ9K1YGc6oZF1aVDDlUH7LZUZ5Sobuixy44xa8x547MHExm7RCouqJgpi2b1uKIheUET4pGQSQISsJHjJ+x5ORH08Dxg3RlTLmLyMVc9Zk79WRzwsevyy5QCl4W2PeyGWexMowjeoMzs24KuGAqQ/4hcFwdzJtasyW/s7xXNNHv8qtlT3uNBDlMm4Jja009M9OmIqwCJrjIC14aVUXhZoAG2fW11qPNhQVvIZfuP6JKQLsicBNC72Utz9rkfc0vIUmjxGVEaic0UrinJ6hi9YfovP50tNe79EJPx9vTtsBMlnYAt9+Ak3v2RzMc7+9N2veS8eH18UT+DJtMMWYczVB9wKAzn6OiE5dYRIg9S/Jq9u18Os10SFQfs30qzbvS4eTHhL8kEbatRuDNa2+StI6CACwNFTGOC7J4kH6RhAaQNDFd0k6YQj2GSQHg4wIQfUHL/AaaVU21x9xl1ud1bfdl6Wt9CzIZbSGyMsvaQxMfkAAYO4UrBq8sVDjOFg8RBVq5g9wJoMOqLZyoLNGiMtCUV6aq8kD1HrfDchxVZiKI7mtlsR2stWUgrbT1yHMWeppJ8q95xbKl7YdEnFEW6dcTi2uQZQIhTvxLMbmjv3E6uOpVz2thAxEk5NNO7OAPgsjoij4V3vQqJDXP89iKiWVCUon6ibkRsmkqc7YjnIV1VzYgut5oBSuiQTqsXQ6cElBwGbbNrlnLsWsKQKkCvxQD1iF5LJj7Gr/mXQa/VYacavWYXWeXm/JovnV6rUbea9JpdjCwf+U9ma6qvmekN8x+7Yh/qiGEbVOQ/nM0x3mn+U+OTyrE5djFJVZXNqdtdVGNz7N68SdB+xEreE6yNWFKLv7YtEzGV+ZwE6fYBLK99DXJh07GyM8QW2Frty3ZkGobK/E/zEDBqakEDqSGg7ElYHf7n1D1QNv+TlKz75DiqZftWU8cZyXQcpzdMX+uIOeeWnZXkfxyjIjJK5n9ODImy+R/ng//pLJU42xHPQ7qq/hFdbv1DOv8D9LIwlgOhbQLIUY4AAnqDr846YoAERD2igIBe5uo5DuiF0MtggWrxU40GAnqxCNqcB9rhKpkJqlO5mlQQ0HtTIe06F0q3qfqXYZLYU/9+oC73zVr92CdX5jvNh+o8UzlCCOjFN2RUZYRqdxrVKCGg9+ZDgi4iV9PKlrBJaZELSK1iqswLpWh3gKGSzBAAval7SrON+kgAmluR1I8c0htVlB46eUOUzQ8B8FEO/QUbaVN2XtivPPd59x8JnRL02nlVQjJNBCp/xEEyT3RqbJRNFAHwUR3pMrOQ/N1x5YfH0QVXR+TTRaDse6FcZb9YFymv88eyZV6QjnhAAV8qeqEMKUPfkhWFAUZckSTcrmckYAti69BjIU829W0AZ1sU4ApqIZUtW6AbVswoeIXcnFr73sMuzvCfxig3HcLs5Mnb/erigpkYW/JPmdNxgo0lGsNMMDesknJ3ko1kSKIWTan4atl3jiB0Gcpr1vr75p8Uf76YnfNzr9d9MtcmA40/2+qP0a5p89BEti/axNIcg83hY66iEG5csiQueWQ2wg8NVnSzsyAeaWn0ghGFswXr3MW2NL76yi/Lm3/tm+Or27tJIt01dUzd66Kl3zEbpWi2FSZJ+DXHE+Tz+99ES7yC6d0GcCd75kNG49luyeKO+GiyG/K8ieIpVtvIr1zQ+rrPofC47ZpWNg8peUQbOp3GQLO4NQmrebcY5HLoq4YUZYsYFPehu/er/2Fe/yVO0LH+i8H7Het/1J3+WXf/g8vxQ+j+Z6vNm/8B</diagram></mxfile>

BIN
wyk/rnn-seq.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

1
wyk/rnn.drawio Normal file
View File

@ -0,0 +1 @@
<mxfile host="app.diagrams.net" modified="2021-06-01T15:52:54.501Z" agent="5.0 (X11)" etag="o_4DnBy9wLtBbjAQzQYJ" version="14.6.13" type="device"><diagram id="Q_5Aon-lI3fA6Ftl2Xdk" name="Page-1">5Vltj5s4EP41SO2HXcUYwuZjQ3J3H1pdpb3TtffNBRfcAI6M88L++hsH8w45qk2W7FbarDyPX7Bn5pkZjIHd+Pi7INvwE/dpZJgz/2jglWGaCwvBfwVkOYDmjpkjgWC+xirgkT1RDc40umM+TRsDJeeRZNsm6PEkoZ5sYEQIfmgO+86j5lO3JKAd4NEjURf9h/kyzNEH06nwPygLwuLJaL7Ie2JSDNZLpCHx+SGHTofDawO7gnOZt+KjSyOlvEIvuQZ+G+gtNyZoIsdM+Fu6u39ttrGyB2edrZxPf338cadX2ZNopw+sNyuzQgOC7xKfqkVmBl4eQibp45Z4qvcANgcslHEEEoKmXo4KSY+D+0Tl6cFtKI+pFBkM0ROwpU1fuAzW8qHSP9JnnYV13TsaJNrmQbl2pRZoaM38hJbmHaVQH7xEi1zIkAc8IdG6QpdNtVVjPnK+1cr6QaXMtMuTneRNVdIjk1/U9HtbS1/1Yqq9OtaFrBASOG5tkhK/FuspoZp2kop5+fnUoc4bDXTAd8KjZ3SlHUgSEVB5ZhzudwJBIyLZvrmPixvU7HF7C/6kYa83ebNjcvBo2bRQKgXfUJdHXACS8ETZ/TuLohZEIhYkIHqgTAr4UvGDQYz5oDti5vsnp+ljV9ORLkGwhybB8KJLMKuHX/ha9LJfC70uSBM8kiZoPiVPcIcnNP5GfZ8lAcCG7b6D3556hrPcG87q3Yk/7wF7fyaNoJdJIwjdm3bT0a2eTOI49/YL+nqRuKZxdlR39dLx/y+X1DNJLbFcP5c4r4IkzkAySYEMQIwNFFnO6q0nlaL6LLhmWvaopGJei2jIPmOWN5/jbXxj5ig5f/NJ/gZqaDQfG/icKQNfsc0axQ5EpPJADHMeKSJ9E9AKVCtiCeOqY+qywG7WBGZPTbB4yeIXLV4fMSYrCNDoisCekhgmfn02nS7YjbVpfjc1WbAbKvPgbSdlQUwGSorJA9xsXIC7Xubv6uRGyXBBp362r+qpnzmDR5fGtWatu9FZy2o5ifSsluHKbTzDlkMXZ3/eqPejken9et4/9BqS/QqvIe2rRmfcTePVrIFHfO/wdmJfeixN/A/qK5JSaUTSlHnPDzM+ScPyAUr4TCQYKzkh5gxdOL0OVEI1E9g9JiiwZwYsu/Uxp2PafP+dgNVZyGwvZI2LfGA9ktWGbdWA9MyGF/0bHtqX3YrId/b58e1z3KHGeGjkO75o2MbdsJ1uxZPhWsZiYbi2sXRpwiiMeIJfQlKZd22TLAZgA7GHUdXynkjKDwp8sxEL99uncWd8oZgFYvX5N7d19REdr/8D</diagram></mxfile>

BIN
wyk/rnn.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

1
wyk/softmax.drawio Normal file
View File

@ -0,0 +1 @@
<mxfile host="app.diagrams.net" modified="2021-05-19T13:46:19.321Z" agent="5.0 (X11)" etag="BY0RhjNpZv5bOkoRQgjF" version="14.6.13" type="device"><diagram id="g29DhglkffejKOdKbHrJ" name="Page-1">7VrbctowEP0aHuPRxTb2Y8ilmXYyzQztNOmbByug1liMEAHy9ZVr+SKJBOIYTGnykLHW8lo651ja1dLDF9PVJx7NJrcsJkkPgXjVw5c9hEIXyv+ZYZ0bPIxzw5jTODfByjCkz0QZgbIuaEzmWkfBWCLoTDeOWJqSkdBsEedsqXd7ZIn+1lk0JpZhOIoS2/qDxmKSWwPUr+w3hI4nxZuhH+Z3plHRWc1kPolitqyZ8FUPX3DGRH41XV2QJMOuwCV/7vqFu+XAOEnFLg+g65+f+VW8GoQPXyi5dAffxc2Z8vIUJQs1YTVYsS4Q4GyRxiRzAnp4sJxQQYazaJTdXUrKpW0ipolsQXmp3BEuyOrFccJy9lI1hE2J4GvZRT2AgAJMKQZ5qr2s8MeFbVLHvkA6UpyPS98VLPJCIfMGlPoWSsjxLKDklIWOxlxw9ptcsIRxaUlZKnsOHmmSGKYooeNUNkcSIiLtgwxAKkV4rm5MaRxnr9kIv05QGwwgnQG4gQF3AwF7wz+w8D+DjnvCBIRHRkBoEYBPF33cPzL0i72wBj9wgtMlwHWPjQC0fZskaXyexRsZjEk0n9ORzoWcO1/fZyA50p1qP8j2GXCA3y8slyuFY95a11t3hFM5oYyh3LiiIvcI+ki1H4rXyevKV9ZY1xqmp3w2JLZiIYMyOWO24CPyClb5ymBzW+Nu0+Zd2DhJIkGf9HFsIlS94Y5ROcJq5cShJh3s9x1geMmnoB6sB0ymryJ2Vb5cjJww1H2JiI+JsHz9VVk5+XcID7cgvFIlGPTrKnH+Y51gYOgkAM1EgqEpEnBYhbhHuTTtKpTO+A/MdaIp/6HBP/KcMDisBLwPCTSQgOu1JAHX714CviUB4Hh9SwYnEyKaMbrbeYhop0hyw/2PDgk6ZwBtypIAOOGPwDwm6J4C+zgx2z5OlwEzU+2eATtTJauZRcDBz3SNxQL5Nk6lmutAmdF0e0DZmVXK+FTq6zka/Yp65an98YCGceegtZBs9NpLIbvNIIzFF4Om4aOxhpQDPFDsiOzY8c2k1tIH4Pv1/EEaMN5n+oBAjs/WRfFYjqRg2FAoqGuh2GWwf0koHR41FcXCQgFB2PhQMvPmu6D801NPZOaw+5aEXZn7kMTb9453CSIEDjweQdhp8IcgdjiL8tsThPR1RILAdko4/Hr97fb83tLFyWSFEG7Pdg6aFeI2igS1YqOvl5H8oqy0eyGp9o172he+5eve9UPepQClMNgePnZaqYLmEUPTPAOaCcuBw0fcRpmiqmUiqIuw01pmtwmGh+S+GFZ/vs4z8JoHF74eueIDVzdxG7lpqRkvxJpmwMfCtU9Z+q/LEr5DloEhy0MvZW2kPaUsA5lFvXc/PZnFzEyNXNhwu9vf0iWb1a+28+7VT9/x1R8=</diagram></mxfile>

BIN
wyk/softmax.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

1
wyk/transformer.drawio Normal file
View File

@ -0,0 +1 @@
<mxfile host="app.diagrams.net" modified="2021-06-21T18:26:41.003Z" agent="5.0 (X11)" etag="PRyyghb9x3NmC1nfczsT" version="14.7.6" type="device"><diagram id="RKMbFXdSo0HOZQEC31J_" name="Page-1">7Ztbb5swFMc/DdL2sCpgLs0jkHbXvjSTqj5NKLgJKuCMOLd9+tnBkGK7TTqtsmVXiiJzMBf/fz7mHGMckFa7z022XNygHJaON8p3Dpg4njf2XfJPDfvWEADQGuZNkbcm92iYFn8gM46YdV3kcDWoiBEqcbEcGmeoruEMD2xZ06DtsNoDKodXXWZzKBims6wUrXdFjhet9dKLjvYvsJgvuiu74bjdU2VdZXaK1SLL0bY1HRoHrhyQNgjhtlTtUlhS7TpdWgWun9nb31gDa3zOAd8m9+PsKs7r9f5r7F9H6Y/q9lPATrPJyjVrMbtbvO8kaNC6ziE9y8gByXZRYDhdZjO6d0uYE9sCVyXZckmRnQ42GO6evVG3bz7pNhBVEDd7UqXrMwz9vusJbHt71D9kbR0tnmgPQmbMGPN5f+qjLKTAlHmFSqJIcZkJOpEW46EYK9ygR5iiEjXEUqOa1EweirLkTFlZzGuyOSMKQWJPqH4F6YQx21EVeU4vI1V/yOc/AHAvhwRAIBLwJQC8t9LfF/SvDJbfA5rJHwjyPyJsMgDd+n8kGaRTc/UHujnApah/kNzE0+9OMDEYg25uMBYwJNmqMHgg8n3NCHRRuSVP4mCkm/6ScN2CkSgIdePgCRwuzJW/z7a0kR9IAiKf/oJ0A2dOlGycaPKBbFEGBSYGmrBFk4+smrGohNTNVY1KzN1OoaqsICVkecpJiWneKVKHNNAGVtp5VfhqVjRjtAAV4J9VylHJkvcTqI4hnQ3EtHMuSbr/TuylcFw5MXFm4D3IkCawqkl5kgT2BKl2jscCWH6gGywxy+Vg/SZcTMficpNwvd7KsJxMqR5twBJxWJR7y1n5k/FYxrp5iyz+TgwGwIXT/XIKZQBk4bTBAPiJHfUAZNGxyQB084CuQ9gCgJ+DUQ9AlnWYDEA7D5BlEgYD4FM59QBkb8wMBsBPfKgHIEvaTAYQ6gZAlp6FJVU7LzakOMeHlg8ytiWdjHL7mae2Orn84AhTEQqZRKQYoS8bxM5C6FmKUMhFlCOUDYNnIQS2ItTOC/95IB1bijB0dUMoWz5gEwDVwYj/+kUBFza8Agt1WxTlS5f+eyPajpR4UXoo0KVT6YiWaJifeuTf5OW4HudOvuQLvR7J4Au9N8N0cgHA7Jdrw9sXLlhwgWL/6eZBXgIztgAM/wRSD0ackbuLb6c/72JzIbg8hLd7upDN45fdh31PPo8HV38B</diagram></mxfile>

BIN
wyk/word-distribution.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB