DL_Project/project.ipynb

6.7 KiB
Raw Permalink Blame History

import pandas as pd
import numpy as np
import re
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score

# Wczytywanie danych
data = pd.read_csv('spam.csv')
def preprocess_text(text):
    text = re.sub(r'\W', ' ', text)
    text = text.lower()
    text = text.split()
    text = ' '.join(text)
    return text

data['Message'] = data['Message'].apply(preprocess_text)

TF-IDF + Naive Bayes

# Podział danych na zbiór treningowy i testowy
X_train, X_test, y_train, y_test = train_test_split(data['Message'], data['Category'], test_size=0.2, random_state=0)

# Wektoryzacja TF-IDF
tfidf = TfidfVectorizer(max_features=3000)
X_train_tfidf = tfidf.fit_transform(X_train)
X_test_tfidf = tfidf.transform(X_test)

# Model Naive Bayes
nb_model = MultinomialNB()
nb_model.fit(X_train_tfidf, y_train)
y_pred = nb_model.predict(X_test_tfidf)

# Ewaluacja
print('Naive Bayes TF-IDF:')
print(f'Accuracy: {accuracy_score(y_test, y_pred)}')
print(f'Precision: {precision_score(y_test, y_pred, pos_label="spam")}')
print(f'Recall: {recall_score(y_test, y_pred, pos_label="spam")}')
print(f'F1-score: {f1_score(y_test, y_pred, pos_label="spam")}')
Naive Bayes TF-IDF:
Accuracy: 0.97847533632287
Precision: 1.0
Recall: 0.85
F1-score: 0.918918918918919

RNN (LSTM)

from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense

# Wczytanie danych
data = pd.read_csv('spam.csv')

# Tokenizacja i padding
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(data['Message'])
X = tokenizer.texts_to_sequences(data['Message'])
X = pad_sequences(X, maxlen=100)

X_train, X_test, y_train, y_test = train_test_split(X, data['Category'], test_size=0.2, random_state=0)

# Przekształcenie etykiet 'ham' i 'spam' na wartości liczbowe
label_mapping = {'ham': 0, 'spam': 1}
y_train = y_train.map(label_mapping)
y_test = y_test.map(label_mapping)

# Model LSTM
model = Sequential()
model.add(Embedding(input_dim=5000, output_dim=128, input_length=100))
model.add(LSTM(128))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=5, batch_size=32, validation_split=0.1)

y_pred = (model.predict(X_test) > 0.5).astype("int32")

# Ewaluacja
print('LSTM:')
print(f'Accuracy: {accuracy_score(y_test, y_pred)}')
print(f'Precision: {precision_score(y_test, y_pred)}')
print(f'Recall: {recall_score(y_test, y_pred)}')
print(f'F1-score: {f1_score(y_test, y_pred)}')

Epoch 1/5
c:\Users\walcz\Desktop\studia\uczenie\projekt\myenv\lib\site-packages\keras\src\layers\core\embedding.py:90: UserWarning: Argument `input_length` is deprecated. Just remove it.
  warnings.warn(
126/126 ━━━━━━━━━━━━━━━━━━━━ 9s 54ms/step - accuracy: 0.9047 - loss: 0.3002 - val_accuracy: 0.9843 - val_loss: 0.0670
Epoch 2/5
126/126 ━━━━━━━━━━━━━━━━━━━━ 7s 53ms/step - accuracy: 0.9902 - loss: 0.0401 - val_accuracy: 0.9865 - val_loss: 0.0522
Epoch 3/5
126/126 ━━━━━━━━━━━━━━━━━━━━ 7s 52ms/step - accuracy: 0.9972 - loss: 0.0149 - val_accuracy: 0.9843 - val_loss: 0.0582
Epoch 4/5
126/126 ━━━━━━━━━━━━━━━━━━━━ 6s 47ms/step - accuracy: 0.9983 - loss: 0.0078 - val_accuracy: 0.9865 - val_loss: 0.0601
Epoch 5/5
126/126 ━━━━━━━━━━━━━━━━━━━━ 6s 49ms/step - accuracy: 0.9974 - loss: 0.0071 - val_accuracy: 0.9865 - val_loss: 0.0628
35/35 ━━━━━━━━━━━━━━━━━━━━ 1s 19ms/step
LSTM:
Accuracy: 0.9856502242152466
Precision: 0.9615384615384616
Recall: 0.9375
F1-score: 0.9493670886075949