trzymam kciuki za dobry wynik
This commit is contained in:
parent
9abd651db6
commit
b647907ce4
15022
dev-0/out.tsv
15022
dev-0/out.tsv
File diff suppressed because it is too large
Load Diff
86
predict.py
86
predict.py
@ -8,46 +8,77 @@ from itertools import islice
|
|||||||
import json
|
import json
|
||||||
import pdb
|
import pdb
|
||||||
|
|
||||||
model_v = "4000"
|
model_v = "1"
|
||||||
PREFIX_VALID = 'test-A'
|
PREFIX_VALID = 'test-A'
|
||||||
probabilities = {}
|
|
||||||
with open(f'model_{model_v}.tsv', 'r') as f:
|
|
||||||
|
|
||||||
|
|
||||||
|
prob_4gram = {}
|
||||||
|
with open(f'4_gram_model_{model_v}.tsv', 'r') as f:
|
||||||
for line in f:
|
for line in f:
|
||||||
line = line.rstrip()
|
line = line.rstrip()
|
||||||
splitted_line = line.split('\t')
|
splitted_line = line.split('\t')
|
||||||
|
|
||||||
probabilities[tuple(splitted_line[:4])] = (float(splitted_line[4]), float(splitted_line[5]))
|
prob_4gram[tuple(splitted_line[:3])] = json.loads(splitted_line[-1])
|
||||||
|
prob_3gram = {}
|
||||||
|
# with open(f'3_gram_model_{model_v}.tsv', 'r') as f:
|
||||||
|
# for line in f:
|
||||||
|
# line = line.rstrip()
|
||||||
|
# splitted_line = line.split('\t')
|
||||||
|
|
||||||
|
# prob_3gram[tuple(splitted_line[:2])] = json.loads(splitted_line[-1])
|
||||||
|
|
||||||
|
prob_2gram = {}
|
||||||
|
# with open(f'2_gram_model_{model_v}.tsv', 'r') as f:
|
||||||
|
# for line in f:
|
||||||
|
# line = line.rstrip()
|
||||||
|
# splitted_line = line.split('\t')
|
||||||
|
|
||||||
|
# prob_2gram[tuple(splitted_line[0])] = json.loads(splitted_line[-1])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
vocab = set()
|
vocab = set()
|
||||||
with open(f"vocab_{model_v}.txt", 'r') as f:
|
with open(f"vocab_{model_v}.txt", 'r') as f:
|
||||||
for l in f:
|
for l in f:
|
||||||
vocab.add(l.rstrip())
|
vocab.add(l.rstrip())
|
||||||
|
|
||||||
def count_probabilities(_probabilities, _chunk_left, _chunk_right):
|
# probabilities_bi = {}
|
||||||
|
# with open(f'bigram_big_unk_20', 'r') as f:
|
||||||
|
|
||||||
for index, (l, r) in enumerate(zip( _chunk_left, _chunk_right)):
|
# for line in f:
|
||||||
|
# line = line.rstrip()
|
||||||
|
# splitted_line = line.split('\t')
|
||||||
|
|
||||||
|
# probabilities_bi[tuple(splitted_line[:2])] = (float(splitted_line[2]), float(splitted_line[3]))
|
||||||
|
|
||||||
|
|
||||||
|
def count_probabilities(prob_4gram_x, prob_3gram_x, prob_2gram_x, _chunk_left, _chunk_right):
|
||||||
|
|
||||||
|
for index, (l, r) in enumerate(zip(_chunk_left, _chunk_right)):
|
||||||
if l not in vocab:
|
if l not in vocab:
|
||||||
_chunk_left[index] = "<UNK>"
|
_chunk_left[index] = "<UNK>"
|
||||||
if r not in vocab:
|
if r not in vocab:
|
||||||
_chunk_right[index] = "<UNK>"
|
_chunk_right[index] = "<UNK>"
|
||||||
|
_chunk_left = tuple(_chunk_left)
|
||||||
|
_chunk_right = tuple(_chunk_right)
|
||||||
|
|
||||||
results_left = {}
|
|
||||||
best_ = {}
|
|
||||||
for tetragram, probses in _probabilities.items():
|
|
||||||
if tetragram[-1] == "<UNK>":
|
|
||||||
return best_
|
|
||||||
|
|
||||||
if len(results_left) > 2:
|
|
||||||
break
|
|
||||||
if list(tetragram[:3]) == _chunk_left:
|
|
||||||
# for tetragram_2, probses_2 in _probabilities.items():
|
|
||||||
# if list(tetragram_2[1:]) == _chunk_right:
|
|
||||||
# best_[tetragram[-1]] = probses[0] * probses_2[1]
|
|
||||||
|
|
||||||
if tetragram[-1] not in best_:
|
hyps_4 = prob_4gram_x.get(_chunk_left)
|
||||||
best_[tetragram[-1]] = probses[0] * 0.7
|
|
||||||
|
# if _chunk_left not in prob_3gram_x:
|
||||||
|
# return {}
|
||||||
|
# hyps_3 = prob_3gram_x.get(_chunk_left)
|
||||||
|
|
||||||
|
# if _chunk_left not in prob_2gram_x:
|
||||||
|
# return {}
|
||||||
|
# hyps_2 = prob_2gram_x.get(_chunk_left)
|
||||||
|
|
||||||
|
if hyps_4 is None:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
items = hyps_4.items()
|
||||||
|
|
||||||
items = best_.items()
|
|
||||||
|
|
||||||
return OrderedDict(sorted(items, key=lambda t:t[1], reverse=True))
|
return OrderedDict(sorted(items, key=lambda t:t[1], reverse=True))
|
||||||
|
|
||||||
@ -60,6 +91,8 @@ with lzma.open(f'{PREFIX_VALID}/in.tsv.xz', 'r') as train:
|
|||||||
|
|
||||||
t_line = t_line.lower()
|
t_line = t_line.lower()
|
||||||
|
|
||||||
|
t_line = t_line.replace("\\\\n", ' ')
|
||||||
|
|
||||||
t_line_splitted_by_tab = t_line.split('\t')
|
t_line_splitted_by_tab = t_line.split('\t')
|
||||||
|
|
||||||
words_before = t_line_splitted_by_tab[-2]
|
words_before = t_line_splitted_by_tab[-2]
|
||||||
@ -72,7 +105,7 @@ with lzma.open(f'{PREFIX_VALID}/in.tsv.xz', 'r') as train:
|
|||||||
chunk_left = words_before[-3:]
|
chunk_left = words_before[-3:]
|
||||||
chunk_right = words_after[0:3]
|
chunk_right = words_after[0:3]
|
||||||
|
|
||||||
probs_ordered = count_probabilities(probabilities, chunk_left, chunk_right)
|
probs_ordered = count_probabilities(prob_4gram, prob_3gram, prob_2gram, chunk_left, chunk_right)
|
||||||
|
|
||||||
# if len(probs_ordered) !=0:
|
# if len(probs_ordered) !=0:
|
||||||
# print(probs_ordered)
|
# print(probs_ordered)
|
||||||
@ -82,12 +115,18 @@ with lzma.open(f'{PREFIX_VALID}/in.tsv.xz', 'r') as train:
|
|||||||
continue
|
continue
|
||||||
result_string = ''
|
result_string = ''
|
||||||
counter_ = 0
|
counter_ = 0
|
||||||
|
p_sum = 0
|
||||||
|
|
||||||
for word_, p in probs_ordered.items():
|
for word_, p in probs_ordered.items():
|
||||||
if counter_>4:
|
|
||||||
|
|
||||||
|
if counter_>30:
|
||||||
break
|
break
|
||||||
re_ = re.search(r'\p{L}+', word_)
|
re_ = re.search(r'\p{L}+', word_)
|
||||||
if re_:
|
if re_:
|
||||||
word_cleared = re_.group(0)
|
word_cleared = re_.group(0)
|
||||||
|
p = p*0.9
|
||||||
|
p_sum += p
|
||||||
result_string += f"{word_cleared}:{str(p)} "
|
result_string += f"{word_cleared}:{str(p)} "
|
||||||
else:
|
else:
|
||||||
if result_string == '':
|
if result_string == '':
|
||||||
@ -95,6 +134,7 @@ with lzma.open(f'{PREFIX_VALID}/in.tsv.xz', 'r') as train:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
counter_+=1
|
counter_+=1
|
||||||
result_string += ':0.2'
|
res = 1 - p_sum
|
||||||
|
result_string += f':{res}'
|
||||||
print(result_string)
|
print(result_string)
|
||||||
a=1
|
a=1
|
||||||
|
14828
test-A/out.tsv
14828
test-A/out.tsv
File diff suppressed because it is too large
Load Diff
108
train.py
108
train.py
@ -7,8 +7,8 @@ import regex as re
|
|||||||
from itertools import islice
|
from itertools import islice
|
||||||
import json
|
import json
|
||||||
import tqdm
|
import tqdm
|
||||||
ignore_rare = 4000
|
ignore_rare = 15000 #7500 perpex511.51 9000 perpex=505 15000 perpex503
|
||||||
model_v = '4000'
|
model_v = '1'
|
||||||
|
|
||||||
|
|
||||||
def freq_list(g, top=None):
|
def freq_list(g, top=None):
|
||||||
@ -56,6 +56,7 @@ with lzma.open(f'{PREFIX_TRAIN}/in.tsv.xz', 'r') as train, open(f'{PREFIX_TRAIN}
|
|||||||
|
|
||||||
t_line_cleared = t_line_cleared.lower()
|
t_line_cleared = t_line_cleared.lower()
|
||||||
|
|
||||||
|
t_line_cleared = t_line_cleared.replace("\\\\n", ' ')
|
||||||
|
|
||||||
|
|
||||||
words += re.findall(r'\p{L}+', t_line_cleared)
|
words += re.findall(r'\p{L}+', t_line_cleared)
|
||||||
@ -67,7 +68,7 @@ with lzma.open(f'{PREFIX_TRAIN}/in.tsv.xz', 'r') as train, open(f'{PREFIX_TRAIN}
|
|||||||
print(counter_lines)
|
print(counter_lines)
|
||||||
|
|
||||||
counter_lines+=1
|
counter_lines+=1
|
||||||
if counter_lines > 70000: # 50000 12gb ram
|
if counter_lines > 130000: # 50000 12gb ram
|
||||||
break
|
break
|
||||||
|
|
||||||
words_c = Counter(words)
|
words_c = Counter(words)
|
||||||
@ -78,34 +79,97 @@ with open(f'vocab_{model_v}.txt', 'w') as f:
|
|||||||
continue
|
continue
|
||||||
f.write(word + '\n')
|
f.write(word + '\n')
|
||||||
|
|
||||||
|
with open(f'vocab_{model_v}.txt', 'w') as f:
|
||||||
|
for word, amount in words_c.items():
|
||||||
|
if amount < ignore_rare:
|
||||||
|
continue
|
||||||
|
f.write(word + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
def create_model(grams4, trigrams):
|
||||||
|
model = {}
|
||||||
|
for gram4, amount4 in grams4.items():
|
||||||
|
|
||||||
|
trigram = gram4[:-1]
|
||||||
|
last_word = gram4[-1]
|
||||||
|
if last_word == "<UNK>":
|
||||||
|
continue
|
||||||
|
|
||||||
|
probibility = amount4 / trigrams[trigram]
|
||||||
|
|
||||||
|
if trigram in model:
|
||||||
|
model[trigram][last_word] = probibility
|
||||||
|
continue
|
||||||
|
model[trigram] = {last_word: probibility}
|
||||||
|
return model
|
||||||
|
|
||||||
|
def create_bigram_model(bigram_x, word_c_x):
|
||||||
|
model = {}
|
||||||
|
for gram4, amount4 in bigram_x.items():
|
||||||
|
|
||||||
|
word_key = gram4[0]
|
||||||
|
last_word = gram4[1]
|
||||||
|
if last_word == "<UNK>" or word_key=="<UNK>":
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
probibility = amount4 / word_c_x[word_key]
|
||||||
|
except:
|
||||||
|
print(gram4)
|
||||||
|
print(word_key)
|
||||||
|
print(last_word)
|
||||||
|
raise Exception
|
||||||
|
if word_key in model:
|
||||||
|
model[word_key][last_word] = probibility
|
||||||
|
continue
|
||||||
|
model[word_key] = {last_word: probibility}
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
trigrams_ = ngrams(words, 3, words_c)
|
trigrams_ = ngrams(words, 3, words_c)
|
||||||
tetragrams_ = ngrams(words, 4, words_c)
|
tetragrams_ = ngrams(words, 4, words_c)
|
||||||
|
|
||||||
|
trigram_c = Counter(trigrams_)
|
||||||
|
trigrams_ = ''
|
||||||
|
tetragrams_c = Counter(tetragrams_)
|
||||||
|
tetragrams_ = ''
|
||||||
|
model = create_model(tetragrams_c, trigram_c)
|
||||||
|
|
||||||
def create_probabilities_bigrams(trigrams, tetragrams):
|
|
||||||
probabilities_grams = {}
|
|
||||||
for tetragram, gram_amount in tetragrams.items():
|
|
||||||
# if bigram_amount <=2:
|
|
||||||
# continue
|
|
||||||
p_word_right = gram_amount / trigrams[tetragram[:-1]]
|
|
||||||
p_word_left = gram_amount / trigrams[tetragram[1:]]
|
|
||||||
probabilities_grams[tetragram] = (str(p_word_right), str(p_word_left))
|
|
||||||
|
|
||||||
return probabilities_grams
|
with open(f'4_gram_model_{model_v}.tsv', 'w') as f:
|
||||||
|
for trigram, hyps in model.items():
|
||||||
|
f.write("\t".join(trigram) + "\t" + json.dumps(hyps) + '\n')
|
||||||
|
# ========= Trigram
|
||||||
|
|
||||||
|
model=""
|
||||||
|
|
||||||
|
trigrams_ = ngrams(words, 3, words_c)
|
||||||
|
bigrams_ = ngrams(words, 2, words_c)
|
||||||
|
|
||||||
trigram_c = Counter(trigrams_)
|
trigram_c = Counter(trigrams_)
|
||||||
word_=''
|
trigrams_ = ''
|
||||||
tetragrams_ = Counter(tetragrams_)
|
bigram_c = Counter(bigrams_)
|
||||||
probabilities = create_probabilities_bigrams(trigram_c, tetragrams_)
|
bigrams_ = ''
|
||||||
|
model = create_model(trigram_c, bigram_c)
|
||||||
|
trigram_c = ""
|
||||||
|
|
||||||
|
with open(f'3_gram_model_{model_v}.tsv', 'w') as f:
|
||||||
|
for trigram, hyps in model.items():
|
||||||
|
f.write("\t".join(trigram) + "\t" + json.dumps(hyps) + '\n')
|
||||||
|
model = ""
|
||||||
|
|
||||||
|
# ========= Bigram
|
||||||
|
|
||||||
|
|
||||||
items = probabilities.items()
|
model=""
|
||||||
probabilities = OrderedDict(sorted(items, key=lambda t:t[1], reverse=True))
|
|
||||||
items=''
|
bigrams_ = ngrams(words, 2, words_c)
|
||||||
|
|
||||||
|
bigram_c = Counter(bigrams_)
|
||||||
|
bigrams_ = ''
|
||||||
|
model = create_bigram_model(bigram_c, words_c)
|
||||||
|
|
||||||
|
|
||||||
with open(f'model_{model_v}.tsv', 'w') as f:
|
with open(f'2_gram_model_{model_v}.tsv', 'w') as f:
|
||||||
for tetragram, left_right_p in probabilities.items():
|
for trigram, hyps in model.items():
|
||||||
f.write("\t".join(tetragram) + "\t" + "\t".join(left_right_p) + '\n')
|
f.write(trigram + "\t" + json.dumps(hyps) + '\n')
|
||||||
|
model = ""
|
||||||
|
Loading…
Reference in New Issue
Block a user