small fix

This commit is contained in:
s464967 2024-06-11 11:38:31 +02:00
parent 134b585d49
commit f491aaac21
1 changed files with 3 additions and 21 deletions

24
main.py
View File

@ -3,27 +3,23 @@ from datasets import load_dataset, load_metric
from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer, DataCollatorForTokenClassification from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer, DataCollatorForTokenClassification
import numpy as np import numpy as np
# Load the CoNLL-2003 dataset with trust_remote_code
dataset = load_dataset("conll2003", trust_remote_code=True) dataset = load_dataset("conll2003", trust_remote_code=True)
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
# Define label list and map labels to IDs
label_list = dataset['train'].features['ner_tags'].feature.names label_list = dataset['train'].features['ner_tags'].feature.names
# Tokenize and align labels function
def tokenize_and_align_labels(examples): def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples['tokens'], truncation=True, padding='max_length', is_split_into_words=True) tokenized_inputs = tokenizer(examples['tokens'], truncation=True, padding='max_length', is_split_into_words=True)
labels = [] labels = []
for i, label in enumerate(examples['ner_tags']): for i, label in enumerate(examples['ner_tags']):
word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word. word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None previous_word_idx = None
label_ids = [] label_ids = []
for word_idx in word_ids: # Set the special tokens to -100. for word_idx in word_ids:
if word_idx is None: if word_idx is None:
label_ids.append(-100) label_ids.append(-100)
elif word_idx != previous_word_idx: # Only label the first token of a given word. elif word_idx != previous_word_idx:
label_ids.append(label[word_idx]) label_ids.append(label[word_idx])
else: else:
label_ids.append(-100) label_ids.append(-100)
@ -32,20 +28,15 @@ def tokenize_and_align_labels(examples):
tokenized_inputs["labels"] = labels tokenized_inputs["labels"] = labels
return tokenized_inputs return tokenized_inputs
# Tokenize the datasets
tokenized_datasets = dataset.map(tokenize_and_align_labels, batched=True) tokenized_datasets = dataset.map(tokenize_and_align_labels, batched=True)
# Split the dataset into training and evaluation sets
train_dataset = tokenized_datasets["train"] train_dataset = tokenized_datasets["train"]
eval_dataset = tokenized_datasets["validation"] eval_dataset = tokenized_datasets["validation"]
# Load the model
model = AutoModelForTokenClassification.from_pretrained("bert-base-cased", num_labels=len(label_list)) model = AutoModelForTokenClassification.from_pretrained("bert-base-cased", num_labels=len(label_list))
# Data collator for token classification
data_collator = DataCollatorForTokenClassification(tokenizer) data_collator = DataCollatorForTokenClassification(tokenizer)
# Training arguments
training_args = TrainingArguments( training_args = TrainingArguments(
output_dir='./results', output_dir='./results',
evaluation_strategy="epoch", evaluation_strategy="epoch",
@ -56,7 +47,6 @@ training_args = TrainingArguments(
weight_decay=0.01, weight_decay=0.01,
) )
# Define the trainer
trainer = Trainer( trainer = Trainer(
model=model, model=model,
args=training_args, args=training_args,
@ -69,34 +59,26 @@ trainer = Trainer(
}, },
) )
# Train the model
trainer.train() trainer.train()
# Evaluate the model
results = trainer.evaluate() results = trainer.evaluate()
# Print the results
print("Evaluation results:", results) print("Evaluation results:", results)
# Predict on the evaluation set
predictions, labels, _ = trainer.predict(eval_dataset) predictions, labels, _ = trainer.predict(eval_dataset)
predictions = np.argmax(predictions, axis=2) predictions = np.argmax(predictions, axis=2)
# Convert the predictions and labels to the original tags
true_labels = [[label_list[l] for l in label if l != -100] for label in labels] true_labels = [[label_list[l] for l in label if l != -100] for label in labels]
true_predictions = [ true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100] [label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels) for prediction, label in zip(predictions, labels)
] ]
# Create a DataFrame for the results
results_df = pd.DataFrame({ results_df = pd.DataFrame({
'tokens': eval_dataset['tokens'], 'tokens': eval_dataset['tokens'],
'true_labels': true_labels, 'true_labels': true_labels,
'predicted_labels': true_predictions 'predicted_labels': true_predictions
}) })
# Save the results to a CSV file
results_df.to_csv('mnt/data/ner_results.csv', index=False) results_df.to_csv('mnt/data/ner_results.csv', index=False)
print("Wyniki analizy NER zostały zapisane do pliku 'mnt/data/ner_results.csv'.") print("Wyniki analizy NER zostały zapisane do pliku 'mnt/data/ner_results.csv'.")