s430705
This commit is contained in:
parent
206774da84
commit
68537ae8d2
14179
dev-0/out.tsv
14179
dev-0/out.tsv
File diff suppressed because it is too large
Load Diff
46
run.py
46
run.py
@ -13,12 +13,6 @@ DEFAULT_PREDICTION = 'the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1'
|
|||||||
|
|
||||||
|
|
||||||
def preprocess_text(text):
|
def preprocess_text(text):
|
||||||
# normalize text
|
|
||||||
text = (
|
|
||||||
unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode(
|
|
||||||
'utf-8', 'ignore'))
|
|
||||||
# replace html chars with ' '
|
|
||||||
text = re.sub('<.*?>', ' ', text)
|
|
||||||
# remove punctuation
|
# remove punctuation
|
||||||
text = text.translate(str.maketrans(' ', ' ', string.punctuation))
|
text = text.translate(str.maketrans(' ', ' ', string.punctuation))
|
||||||
# only alphabets and numerics
|
# only alphabets and numerics
|
||||||
@ -56,18 +50,6 @@ def predict_probs(word1, word2):
|
|||||||
return str_prediction
|
return str_prediction
|
||||||
|
|
||||||
|
|
||||||
def prepare_output(file_path):
|
|
||||||
with open(file_path, 'w') as file:
|
|
||||||
for index, row in test_data.iterrows():
|
|
||||||
text = preprocess_text(str(row[7]))
|
|
||||||
words = word_tokenize(text)
|
|
||||||
if len(words) < 4:
|
|
||||||
prediction = DEFAULT_PREDICTION
|
|
||||||
else:
|
|
||||||
prediction = predict_probs(words[0], words[1])
|
|
||||||
file.write(prediction + '\n')
|
|
||||||
|
|
||||||
|
|
||||||
def train_model(training_data):
|
def train_model(training_data):
|
||||||
for index, row in training_data.iterrows():
|
for index, row in training_data.iterrows():
|
||||||
text = preprocess_text(str(row["final"]))
|
text = preprocess_text(str(row["final"]))
|
||||||
@ -90,15 +72,16 @@ data = pd.read_csv(
|
|||||||
warn_bad_lines=False,
|
warn_bad_lines=False,
|
||||||
header=None,
|
header=None,
|
||||||
quoting=csv.QUOTE_NONE,
|
quoting=csv.QUOTE_NONE,
|
||||||
nrows=200000,
|
nrows=100000,
|
||||||
)
|
)
|
||||||
|
|
||||||
train_labels = pd.read_csv(
|
train_labels = pd.read_csv(
|
||||||
"train/expected.tsv",
|
"train/expected.tsv",
|
||||||
sep="\t",
|
sep="\t",
|
||||||
error_bad_lines=False,
|
error_bad_lines=False,
|
||||||
header=None,
|
header=None,
|
||||||
quoting=csv.QUOTE_NONE,
|
quoting=csv.QUOTE_NONE,
|
||||||
nrows=200000,
|
nrows=100000,
|
||||||
)
|
)
|
||||||
|
|
||||||
train_data = data[[6, 7]]
|
train_data = data[[6, 7]]
|
||||||
@ -113,5 +96,24 @@ test_data = pd.read_csv('test-A/in.tsv.xz', sep='\t', error_bad_lines=False, war
|
|||||||
|
|
||||||
|
|
||||||
train_model(train_data)
|
train_model(train_data)
|
||||||
prepare_output("dev-0/out.tsv")
|
|
||||||
prepare_output("test-A/out.tsv")
|
with open("dev-0/out.tsv", "w") as file:
|
||||||
|
for _, row in dev_data.iterrows():
|
||||||
|
text = preprocess_text(str(row[7]))
|
||||||
|
words = word_tokenize(text)
|
||||||
|
if len(words) < 3:
|
||||||
|
prediction = "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"
|
||||||
|
else:
|
||||||
|
prediction = predict_probs(words[0], words[1])
|
||||||
|
file.write(prediction + "\n")
|
||||||
|
|
||||||
|
with open("test-A/out.tsv", "w") as file:
|
||||||
|
for _, row in test_data.iterrows():
|
||||||
|
text = preprocess_text(str(row[7]))
|
||||||
|
words = word_tokenize(text)
|
||||||
|
if len(words) < 3:
|
||||||
|
prediction = "the:0.2 be:0.2 to:0.2 of:0.1 and:0.1 a:0.1 :0.1"
|
||||||
|
else:
|
||||||
|
prediction = predict_probs(words[0], words[1])
|
||||||
|
file.write(prediction + "\n")
|
||||||
|
|
||||||
|
9640
test-A/out.tsv
9640
test-A/out.tsv
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user