forked from kubapok/kleister-nda-clone
467 lines
14 KiB
Plaintext
467 lines
14 KiB
Plaintext
|
{
|
||
|
"cells": [
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"# Extract key information from Edgar NDA documents"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 1,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"import pathlib\n",
|
||
|
"from collections import Counter\n",
|
||
|
"from sklearn.metrics import *"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 2,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"KLEISTER_PATH = pathlib.Path('C:/Users/Fijka/Documents/kleister-nda-clone')\n",
|
||
|
"file_name = 'train'"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Read expected train data"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 3,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"def get_expected_data(filepath, data_key):\n",
|
||
|
" dataset_expected_key = []\n",
|
||
|
" with open(filepath,'r') as train_expected_file:\n",
|
||
|
" for line in train_expected_file:\n",
|
||
|
" key_values = line.rstrip('\\n').split(' ')\n",
|
||
|
" data_value = None\n",
|
||
|
" for key_value in key_values:\n",
|
||
|
" key, value = key_value.split('=')\n",
|
||
|
" if key == data_key:\n",
|
||
|
" data_value = value\n",
|
||
|
" if data_value is None:\n",
|
||
|
" data_value = 'NONE'\n",
|
||
|
" dataset_expected_key.append(data_value)\n",
|
||
|
" return dataset_expected_key"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 4,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"KEYS = ['effective_date', 'jurisdiction', 'party', 'term']"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 5,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"def read_expected_data(filepath):\n",
|
||
|
" data = []\n",
|
||
|
" for key in KEYS:\n",
|
||
|
" data.append(get_expected_data(filepath, key))\n",
|
||
|
" return data\n",
|
||
|
"\n",
|
||
|
"if file_name != 'test-A':\n",
|
||
|
" train_expected_data = read_expected_data(KLEISTER_PATH/file_name/'expected.tsv')"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 6,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"if file_name != 'test-A':\n",
|
||
|
" [i[:1] for i in train_expected_data]"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Read train dataset"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 7,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"import lzma\n",
|
||
|
"import csv\n",
|
||
|
"\n",
|
||
|
"def read_data(filename):\n",
|
||
|
" all_data = lzma.open(filename).read().decode('UTF-8').split('\\n')\n",
|
||
|
" return [line.split('\\t') for line in all_data][:-1]\n",
|
||
|
"\n",
|
||
|
"train_data = read_data(KLEISTER_PATH/file_name/'in.tsv.xz')"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## JURISDICTION"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 8,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"STATES = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware','Florida',\n",
|
||
|
" 'Georgia', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine',\n",
|
||
|
" 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada',\n",
|
||
|
" 'New Hampshire', 'New Jersey', 'New Mexico', 'New York', 'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma',\n",
|
||
|
" 'Oregon', 'Pennsylvania', 'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'Utah',\n",
|
||
|
" 'Vermont', 'Virginia', 'Washington', 'West Virginia', 'Wisconsin', 'Wyoming']"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 9,
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"name": "stdout",
|
||
|
"output_type": "stream",
|
||
|
"text": [
|
||
|
"false jurisdiction: 22\n"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"import spacy\n",
|
||
|
"nlp = spacy.load(\"en_core_web_sm\")\n",
|
||
|
"from operator import itemgetter\n",
|
||
|
"\n",
|
||
|
"jurisdiction = []\n",
|
||
|
"\n",
|
||
|
"def normalize(text):\n",
|
||
|
" return text.replace('\\\\n', ' ').lower()\n",
|
||
|
" # nlp(text) -> tokenizacja\n",
|
||
|
"\n",
|
||
|
"def check_jurisdiction(document):\n",
|
||
|
" states = {}\n",
|
||
|
" for text in document[2:]:\n",
|
||
|
" text = normalize(text)\n",
|
||
|
" for state in STATES:\n",
|
||
|
" if state.lower() in text:\n",
|
||
|
" if state in states:\n",
|
||
|
" states[state][0] += text.count(state.lower())\n",
|
||
|
" else:\n",
|
||
|
" states[state] = [text.count(state.lower()), text.index(state.lower())]\n",
|
||
|
" if states != {}:\n",
|
||
|
" states = sorted(states.items(), key=itemgetter(1), reverse=True)\n",
|
||
|
" jurisdiction.append(states[0][0].replace(' ', '_'))\n",
|
||
|
" return states[0][0], states\n",
|
||
|
" else:\n",
|
||
|
" jurisdiction.append(None)\n",
|
||
|
" return None\n",
|
||
|
" \n",
|
||
|
"tmp = 0\n",
|
||
|
"for i in range(len(train_data)):\n",
|
||
|
" tt = check_jurisdiction(train_data[i])\n",
|
||
|
" if file_name != 'test-A':\n",
|
||
|
" if tt == None:\n",
|
||
|
" if train_expected_data[1][i] != None:\n",
|
||
|
" # print(i, train_expected_data[1][i], tt)\n",
|
||
|
" tmp += 1\n",
|
||
|
" else:\n",
|
||
|
" if tt[0] != train_expected_data[1][i].replace('_', ' '):\n",
|
||
|
" # print(i, train_expected_data[1][i], tt[0])\n",
|
||
|
" tmp += 1\n",
|
||
|
"print('false jurisdiction:', tmp)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## EFFECTIVE DATE"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 10,
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"name": "stdout",
|
||
|
"output_type": "stream",
|
||
|
"text": [
|
||
|
"false effective date 42\n"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"import re\n",
|
||
|
"import datetime\n",
|
||
|
"from datetime import date\n",
|
||
|
"\n",
|
||
|
"effective_date = []\n",
|
||
|
"\n",
|
||
|
"def parse_date(date):\n",
|
||
|
" month = str(date.month)\n",
|
||
|
" if len(month) == 1:\n",
|
||
|
" month = '0' + str(date.month)\n",
|
||
|
" day = str(date.day)\n",
|
||
|
" if len(day) == 1:\n",
|
||
|
" day = '0' + str(date.day)\n",
|
||
|
" return str(date.year) + '-' + month + '-' + day\n",
|
||
|
"\n",
|
||
|
"def find_dates(text):\n",
|
||
|
" \n",
|
||
|
" MONTHS = {'January' : 1, 'February' : 2, 'March' : 3, 'April' : 4, 'May' : 5, 'June' : 6,\n",
|
||
|
" 'July' : 7, 'August' : 8, 'September' : 9, 'October' : 10, 'November' : 11, 'December' : 12}\n",
|
||
|
" \n",
|
||
|
" all_dates = []\n",
|
||
|
" \n",
|
||
|
" text = text.replace('\\\\n', ' ')\n",
|
||
|
" \n",
|
||
|
" dic = {'\\d{1,2}\\/\\d{1,2}\\/\\d{2}' : '%m/%d/%y',\n",
|
||
|
" '[01]*[0-9]\\/[01]*[0-9]\\/\\d{4}' : '%m/%d/%Y',\n",
|
||
|
" '\\w{3,9}?\\s\\d{1,2}?,\\s\\d{4}?' : '%B %d, %Y',\n",
|
||
|
" '\\w{3,9}?\\s\\d{1,2}?,\\d{4}?' : '%B %d,%Y',\n",
|
||
|
" '\\d{1,2}?th\\sday\\sof\\s\\w{3,9}?\\s\\d{4}?' : '%dth day of %B %Y',\n",
|
||
|
" '\\d{1,2}?th\\sday\\sof\\s\\w{3,9}?,\\s\\d{4}?' : '%dth day of %B, %Y',\n",
|
||
|
" '\\d{1,2}?ND\\sday\\sof\\s\\w{3,9}?\\s\\d{4}?' : '%dND day of %B %Y',\n",
|
||
|
" '\\w{3,9}?\\s\\d{1,2}?th\\s,\\s\\d{4}?' : '%B %dth , %Y',\n",
|
||
|
" '\\w{3,9}?\\s\\d{1,2}?th,\\s\\d{4}?' : '%B %dth, %Y',\n",
|
||
|
" '\\d{1,2}?\\sday\\sof\\s\\w{3,9}?,\\s\\d{4}?' : '%d day of %B, %Y',\n",
|
||
|
" '\\w{3,9}?\\.\\s\\d{1,2}?,\\s\\d{4}?' : '%b. %d, %Y',\n",
|
||
|
" '\\d{1,2}?\\s\\w{3,9}?,\\s\\d{4}?' : '%d %B, %Y',\n",
|
||
|
" '\\d{1,2}?st\\sday\\sof\\s\\w{3,9}?\\s,\\s\\d{4}?' : '%dst day of %B , %Y',\n",
|
||
|
" '\\d{1,2}?st\\sday\\sof\\s\\w{3,9}?,\\s\\d{4}?' : '%dst day of %B, %Y',\n",
|
||
|
" '\\d{1,2}?nd\\sday\\sof\\s\\w{3,9}?,\\s\\d{4}?' : '%dnd day of %B, %Y',\n",
|
||
|
" '\\d{1,2}\\.\\d{1,2}\\.\\d{2,4}' : '%m.%d.%y'\n",
|
||
|
" }\n",
|
||
|
" \n",
|
||
|
" for d in dic:\n",
|
||
|
" match = re.search(r'' + d, text)\n",
|
||
|
" if match != None:\n",
|
||
|
" try:\n",
|
||
|
" date = datetime.datetime.strptime(match.group(), dic[d]).date()\n",
|
||
|
" all_dates.append(parse_date(date))\n",
|
||
|
" except:\n",
|
||
|
" pass\n",
|
||
|
" \n",
|
||
|
" return all_dates\n",
|
||
|
"\n",
|
||
|
"def check_effective_date(text):\n",
|
||
|
" dates = []\n",
|
||
|
" x = find_dates(text)\n",
|
||
|
" if x != []:\n",
|
||
|
" dates.append(x)\n",
|
||
|
" return(dates)\n",
|
||
|
"\n",
|
||
|
"test = 0\n",
|
||
|
"for i in range(len(train_data)):\n",
|
||
|
" xx = check_effective_date(train_data[i][2])\n",
|
||
|
" if file_name != 'test-A':\n",
|
||
|
" if train_expected_data[0][i] == 'NONE':\n",
|
||
|
" if xx != []:\n",
|
||
|
" # print(i, train_expected_data[0][i], xx[-1][0])\n",
|
||
|
" test += 1\n",
|
||
|
" else:\n",
|
||
|
" if xx != []:\n",
|
||
|
" if xx[0][-1] != train_expected_data[0][i]:\n",
|
||
|
" # print(i, train_expected_data[0][i], xx[-1][0])\n",
|
||
|
" test +=1\n",
|
||
|
" else:\n",
|
||
|
" # print(i, train_expected_data[0][i], xx)\n",
|
||
|
" test += 1\n",
|
||
|
" if xx != []:\n",
|
||
|
" effective_date.append(xx[-1][0])\n",
|
||
|
" else:\n",
|
||
|
" effective_date.append(None)\n",
|
||
|
"print('false effective date', test)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## PARTY"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 11,
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"name": "stdout",
|
||
|
"output_type": "stream",
|
||
|
"text": [
|
||
|
"false party: 202\n"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"party = []\n",
|
||
|
"\n",
|
||
|
"def check_party(document):\n",
|
||
|
" dic = {'And_' : 4,\n",
|
||
|
" 'From_' : 5,\n",
|
||
|
" 'For' : 4,\n",
|
||
|
" 'Between' : 8,\n",
|
||
|
" 'With' : 5,\n",
|
||
|
" 'Ceo' : 4,\n",
|
||
|
" 'To' : 3,\n",
|
||
|
" }\n",
|
||
|
" \n",
|
||
|
" for text in document[2:]:\n",
|
||
|
" text = text.replace('\\\\n', ' ')\n",
|
||
|
" \n",
|
||
|
" result = None\n",
|
||
|
" match = re.search(r'\\w*\\s\\w*\\s\\w*,\\sInc\\.', text)\n",
|
||
|
" if match == None:\n",
|
||
|
" match = re.search(r'\\w*\\s\\w*\\s\\w*,\\sINC\\.', text)\n",
|
||
|
" if match != None:\n",
|
||
|
" result = match.group().title()\n",
|
||
|
" result = result.replace(',', '').replace(' ', '_')\n",
|
||
|
" for d in dic:\n",
|
||
|
" if d in result:\n",
|
||
|
" result = result[result.index(d) + dic[d]:]\n",
|
||
|
" if result.startswith('_'):\n",
|
||
|
" result = result[1:]\n",
|
||
|
" return result\n",
|
||
|
" \n",
|
||
|
"tmp = 0\n",
|
||
|
"for i in range(len(train_data)):\n",
|
||
|
" tt = check_party(train_data[i])\n",
|
||
|
" party.append(tt)\n",
|
||
|
" if file_name != 'test-A':\n",
|
||
|
" if train_expected_data[2][i] != tt:\n",
|
||
|
" tmp += 1\n",
|
||
|
" # print(i, train_expected_data[2][i], tt)\n",
|
||
|
"print('false party:', tmp)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## TERM"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 12,
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"name": "stdout",
|
||
|
"output_type": "stream",
|
||
|
"text": [
|
||
|
"false term: 144\n"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"term = []\n",
|
||
|
"\n",
|
||
|
"def check_term(document):\n",
|
||
|
" \n",
|
||
|
" result = None\n",
|
||
|
" for text in document[2:]:\n",
|
||
|
" text = text.replace('\\\\n', ' ')\n",
|
||
|
" \n",
|
||
|
" \n",
|
||
|
" match = re.search(r'\\(\\d*\\)\\syears', text)\n",
|
||
|
" if match == None:\n",
|
||
|
" match = re.search(r'\\(\\d*\\)\\smonths', text)\n",
|
||
|
" if match != None:\n",
|
||
|
" result = match.group().replace('(', '').replace(')', '').replace(' ', '_')\n",
|
||
|
" return result\n",
|
||
|
" return result\n",
|
||
|
" \n",
|
||
|
"tmp = 0\n",
|
||
|
"for i in range(len(train_data)):\n",
|
||
|
" tt = check_term(train_data[i])\n",
|
||
|
" term.append(tt)\n",
|
||
|
" if file_name != 'test-A':\n",
|
||
|
" if train_expected_data[3][i] != tt:\n",
|
||
|
" if train_expected_data[3][i] == 'NONE' and tt == None:\n",
|
||
|
" pass\n",
|
||
|
" else:\n",
|
||
|
" # print(i, train_expected_data[3][i], tt)\n",
|
||
|
" tmp += 1\n",
|
||
|
"print('false term:', tmp)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 13,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"import os\n",
|
||
|
"\n",
|
||
|
"def write_output(effective_date, jurisdiction, party, term):\n",
|
||
|
" if os.path.exists(KLEISTER_PATH/file_name/'out.tsv'):\n",
|
||
|
" os.remove(KLEISTER_PATH/file_name/'out.tsv')\n",
|
||
|
" file = open(KLEISTER_PATH/file_name/'out.tsv', 'w')\n",
|
||
|
" for doc in range(len(effective_date)):\n",
|
||
|
" result = ''\n",
|
||
|
" if effective_date[doc] != None:\n",
|
||
|
" result += 'effective_date=' + effective_date[doc] + '\\t'\n",
|
||
|
" if jurisdiction[doc] != None:\n",
|
||
|
" result += 'jurisdiction=' + jurisdiction[doc] + '\\t'\n",
|
||
|
" if party[doc] != None:\n",
|
||
|
" result += 'party=' + party[doc] + '\\t'\n",
|
||
|
" if term[doc] != None:\n",
|
||
|
" result += 'term=' + term[doc] + '\\t'\n",
|
||
|
" if len(result) > 1:\n",
|
||
|
" result = result[:-1]\n",
|
||
|
" result += '\\n'\n",
|
||
|
" file.write(result)\n",
|
||
|
" file.close()\n",
|
||
|
" \n",
|
||
|
"write_output(effective_date, jurisdiction, party, term)"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"metadata": {
|
||
|
"kernelspec": {
|
||
|
"display_name": "Python 3",
|
||
|
"language": "python",
|
||
|
"name": "python3"
|
||
|
},
|
||
|
"language_info": {
|
||
|
"codemirror_mode": {
|
||
|
"name": "ipython",
|
||
|
"version": 3
|
||
|
},
|
||
|
"file_extension": ".py",
|
||
|
"mimetype": "text/x-python",
|
||
|
"name": "python",
|
||
|
"nbconvert_exporter": "python",
|
||
|
"pygments_lexer": "ipython3",
|
||
|
"version": "3.8.3"
|
||
|
}
|
||
|
},
|
||
|
"nbformat": 4,
|
||
|
"nbformat_minor": 4
|
||
|
}
|