54 lines
1.6 KiB
Python
54 lines
1.6 KiB
Python
import numpy as np
|
|
from numpy import loadtxt
|
|
import pandas as pd
|
|
from sklearn.model_selection import train_test_split
|
|
import tensorflow as tf
|
|
from tensorflow.keras.layers import Dense, Dropout
|
|
|
|
|
|
products = ['glass', 'mixed', 'metal', 'paper']
|
|
n_products = len(products)
|
|
|
|
|
|
|
|
data = pd.read_csv("data.csv")
|
|
dff = data.loc[:, data.columns != 'Unnamed: 0']
|
|
|
|
"""#podział danych na treningowe i testowe"""
|
|
|
|
#jest osiem zmiennych wejściowych i jedna zmienna wyjściowa (ostatnia kolumna).
|
|
|
|
X = dff.drop(labels=['label'], axis=1)
|
|
y = dff.label
|
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =0.2 )
|
|
|
|
X_test.to_csv("X_test.csv")
|
|
|
|
"""## Budowa sieci neuronowej"""
|
|
|
|
|
|
|
|
|
|
model = tf.keras.models.Sequential([
|
|
Dense(10, input_shape=[8], activation='relu'),
|
|
#Dropout(0.2),
|
|
Dense(n_products, activation='softmax')
|
|
])
|
|
|
|
model.summary()
|
|
|
|
|
|
model.compile(optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics=[ 'accuracy'])
|
|
|
|
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=4)
|
|
history = model.fit(x=X_train, y=y_train, validation_split=0.1, epochs=100, batch_size=32, callbacks=[early_stopping])
|
|
# Epoka : jeden przebieg przez wszystkie wiersze w zestawie danych uczących
|
|
# Batch_size: liczba wierszy zestawu danych, które są brane pod uwagę, zanim wagi modelu zostaną zaktualizowane w każdej epoce
|
|
#model.predict(X_test)
|
|
|
|
#model.evaluate(X_test, y_test)
|
|
|
|
model.save('./saved_model')
|
|
|
|
|