Upload files to "my_runs/_sources"
This commit is contained in:
parent
cf82853fd3
commit
24e4693aba
@ -0,0 +1,120 @@
|
|||||||
|
import tensorflow as tf
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
|
||||||
|
from keras.models import Sequential
|
||||||
|
from keras.layers import Dense, Dropout
|
||||||
|
from keras import regularizers
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime
|
||||||
|
from importlib.metadata import version
|
||||||
|
|
||||||
|
from sacred import Experiment
|
||||||
|
from sacred.observers import FileStorageObserver
|
||||||
|
|
||||||
|
ex = Experiment("464903", interactive=True)
|
||||||
|
ex.observers.append(FileStorageObserver('my_runs'))
|
||||||
|
|
||||||
|
@ex.config
|
||||||
|
def my_config():
|
||||||
|
num_epochs = 100
|
||||||
|
dropout_layer_value = 0.4
|
||||||
|
|
||||||
|
@ex.capture
|
||||||
|
def prepare_info(num_epochs, dropout_layer_value, _run):
|
||||||
|
_run.info["num_epochs"] = num_epochs
|
||||||
|
_run.info["dropout_layer_value"] = dropout_layer_value
|
||||||
|
_run.info["training_ts"] = datetime.now()
|
||||||
|
|
||||||
|
@ex.main
|
||||||
|
def train_and_evaluate(num_epochs, dropout_layer_value, _run):
|
||||||
|
prepare_info()
|
||||||
|
|
||||||
|
ex.open_resource('./lettuce_dataset_updated.csv', "r")
|
||||||
|
|
||||||
|
dataset = pd.read_csv('./lettuce_dataset_updated.csv', encoding="ISO-8859-1")
|
||||||
|
|
||||||
|
print(version('tensorflow'))
|
||||||
|
print(version('scikit-learn'))
|
||||||
|
print(version('keras'))
|
||||||
|
|
||||||
|
print(version('numpy'))
|
||||||
|
print(version('pandas'))
|
||||||
|
|
||||||
|
ph_level = dataset['pH Level'].values.tolist()
|
||||||
|
temp_F = dataset['Temperature (F)'].values.tolist()
|
||||||
|
humid = dataset['Humidity'].values.tolist()
|
||||||
|
days = dataset['Growth Days'].values.tolist()
|
||||||
|
plant_id = dataset['Plant_ID'].values.tolist()
|
||||||
|
|
||||||
|
X = []
|
||||||
|
Y = []
|
||||||
|
|
||||||
|
id = plant_id[0]
|
||||||
|
temp_sum = 0
|
||||||
|
humid_sum = 0
|
||||||
|
ph_level_sum = 0
|
||||||
|
day = 1
|
||||||
|
|
||||||
|
for i in range(0, len(plant_id)):
|
||||||
|
if plant_id[i] == id:
|
||||||
|
temp_sum += temp_F[i]
|
||||||
|
humid_sum += humid[i]
|
||||||
|
ph_level_sum += ph_level[i]
|
||||||
|
day = days[i]
|
||||||
|
else:
|
||||||
|
temp = []
|
||||||
|
temp.append(temp_sum/day)
|
||||||
|
temp.append(humid_sum/day)
|
||||||
|
temp.append(ph_level_sum/day)
|
||||||
|
X.append(temp)
|
||||||
|
Y.append(day)
|
||||||
|
temp_sum = 0
|
||||||
|
humid_sum = 0
|
||||||
|
ph_level_sum = 0
|
||||||
|
day = 1
|
||||||
|
id = plant_id[i]
|
||||||
|
|
||||||
|
scaler = MinMaxScaler()
|
||||||
|
X = scaler.fit_transform(X)
|
||||||
|
|
||||||
|
X = np.array(X)
|
||||||
|
Y = np.array(Y)
|
||||||
|
|
||||||
|
encoder = OneHotEncoder(sparse=False)
|
||||||
|
y_onehot = encoder.fit_transform(Y.reshape(-1,1))
|
||||||
|
|
||||||
|
X_train, X_test, y_train, y_test = train_test_split(X, y_onehot, test_size=0.4, random_state=42)
|
||||||
|
|
||||||
|
model = Sequential([
|
||||||
|
Dense(8, activation='relu', input_dim=3, kernel_regularizer=regularizers.l2(0.04)),
|
||||||
|
Dropout(dropout_layer_value),
|
||||||
|
Dense(8, activation='relu', kernel_regularizer=regularizers.l2(0.04)),
|
||||||
|
Dropout(dropout_layer_value),
|
||||||
|
Dense(4, activation='softmax', kernel_regularizer=regularizers.l2(0.04)),
|
||||||
|
])
|
||||||
|
|
||||||
|
model.compile(optimizer='sgd',
|
||||||
|
loss='categorical_crossentropy',
|
||||||
|
metrics=['accuracy'])
|
||||||
|
|
||||||
|
history = model.fit(X_train, y_train, epochs=num_epochs, validation_data=(X_test, y_test), verbose=2)
|
||||||
|
|
||||||
|
test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=2)
|
||||||
|
|
||||||
|
print(history.history['val_accuracy'])
|
||||||
|
|
||||||
|
_run.log_scalar("train loss", history.history['loss'])
|
||||||
|
_run.log_scalar("train accuracy", history.history['accuracy'])
|
||||||
|
_run.log_scalar("test loss", test_loss)
|
||||||
|
_run.log_scalar("test accuracy", test_accuracy)
|
||||||
|
|
||||||
|
print(f"Dokładność testowa: {test_accuracy:.2%}")
|
||||||
|
|
||||||
|
model.evaluate(X_test, y_test)[1]
|
||||||
|
|
||||||
|
model.save('./model.keras')
|
||||||
|
ex.add_artifact("./model.keras")
|
||||||
|
|
||||||
|
ex.run()
|
||||||
|
print(my_config())
|
@ -0,0 +1,120 @@
|
|||||||
|
import tensorflow as tf
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
|
||||||
|
from keras.models import Sequential
|
||||||
|
from keras.layers import Dense, Dropout
|
||||||
|
from keras import regularizers
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime
|
||||||
|
from importlib.metadata import version
|
||||||
|
|
||||||
|
from sacred import Experiment
|
||||||
|
from sacred.observers import FileStorageObserver
|
||||||
|
|
||||||
|
ex = Experiment("464903", interactive=True)
|
||||||
|
ex.observers.append(FileStorageObserver('my_runs'))
|
||||||
|
|
||||||
|
@ex.config
|
||||||
|
def my_config():
|
||||||
|
num_epochs = 200
|
||||||
|
dropout_layer_value = 0.3
|
||||||
|
|
||||||
|
@ex.capture
|
||||||
|
def prepare_info(num_epochs, dropout_layer_value, _run):
|
||||||
|
_run.info["num_epochs"] = num_epochs
|
||||||
|
_run.info["dropout_layer_value"] = dropout_layer_value
|
||||||
|
_run.info["training_ts"] = datetime.now()
|
||||||
|
|
||||||
|
@ex.main
|
||||||
|
def train_and_evaluate(num_epochs, dropout_layer_value, _run):
|
||||||
|
prepare_info()
|
||||||
|
|
||||||
|
ex.open_resource('./lettuce_dataset_updated.csv', "r")
|
||||||
|
|
||||||
|
dataset = pd.read_csv('./lettuce_dataset_updated.csv', encoding="ISO-8859-1")
|
||||||
|
|
||||||
|
print(version('tensorflow'))
|
||||||
|
print(version('scikit-learn'))
|
||||||
|
print(version('keras'))
|
||||||
|
|
||||||
|
print(version('numpy'))
|
||||||
|
print(version('pandas'))
|
||||||
|
|
||||||
|
ph_level = dataset['pH Level'].values.tolist()
|
||||||
|
temp_F = dataset['Temperature (F)'].values.tolist()
|
||||||
|
humid = dataset['Humidity'].values.tolist()
|
||||||
|
days = dataset['Growth Days'].values.tolist()
|
||||||
|
plant_id = dataset['Plant_ID'].values.tolist()
|
||||||
|
|
||||||
|
X = []
|
||||||
|
Y = []
|
||||||
|
|
||||||
|
id = plant_id[0]
|
||||||
|
temp_sum = 0
|
||||||
|
humid_sum = 0
|
||||||
|
ph_level_sum = 0
|
||||||
|
day = 1
|
||||||
|
|
||||||
|
for i in range(0, len(plant_id)):
|
||||||
|
if plant_id[i] == id:
|
||||||
|
temp_sum += temp_F[i]
|
||||||
|
humid_sum += humid[i]
|
||||||
|
ph_level_sum += ph_level[i]
|
||||||
|
day = days[i]
|
||||||
|
else:
|
||||||
|
temp = []
|
||||||
|
temp.append(temp_sum/day)
|
||||||
|
temp.append(humid_sum/day)
|
||||||
|
temp.append(ph_level_sum/day)
|
||||||
|
X.append(temp)
|
||||||
|
Y.append(day)
|
||||||
|
temp_sum = 0
|
||||||
|
humid_sum = 0
|
||||||
|
ph_level_sum = 0
|
||||||
|
day = 1
|
||||||
|
id = plant_id[i]
|
||||||
|
|
||||||
|
scaler = MinMaxScaler()
|
||||||
|
X = scaler.fit_transform(X)
|
||||||
|
|
||||||
|
X = np.array(X)
|
||||||
|
Y = np.array(Y)
|
||||||
|
|
||||||
|
encoder = OneHotEncoder(sparse=False)
|
||||||
|
y_onehot = encoder.fit_transform(Y.reshape(-1,1))
|
||||||
|
|
||||||
|
X_train, X_test, y_train, y_test = train_test_split(X, y_onehot, test_size=0.4, random_state=42)
|
||||||
|
|
||||||
|
model = Sequential([
|
||||||
|
Dense(8, activation='relu', input_dim=3, kernel_regularizer=regularizers.l2(0.04)),
|
||||||
|
Dropout(dropout_layer_value),
|
||||||
|
Dense(8, activation='relu', kernel_regularizer=regularizers.l2(0.04)),
|
||||||
|
Dropout(dropout_layer_value),
|
||||||
|
Dense(4, activation='softmax', kernel_regularizer=regularizers.l2(0.04)),
|
||||||
|
])
|
||||||
|
|
||||||
|
model.compile(optimizer='sgd',
|
||||||
|
loss='categorical_crossentropy',
|
||||||
|
metrics=['accuracy'])
|
||||||
|
|
||||||
|
history = model.fit(X_train, y_train, epochs=num_epochs, validation_data=(X_test, y_test), verbose=2)
|
||||||
|
|
||||||
|
test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=2)
|
||||||
|
|
||||||
|
print(history.history['val_accuracy'])
|
||||||
|
|
||||||
|
_run.log_scalar("train loss", history.history['loss'])
|
||||||
|
_run.log_scalar("train accuracy", history.history['accuracy'])
|
||||||
|
_run.log_scalar("test loss", test_loss)
|
||||||
|
_run.log_scalar("test accuracy", test_accuracy)
|
||||||
|
|
||||||
|
print(f"Dokładność testowa: {test_accuracy:.2%}")
|
||||||
|
|
||||||
|
model.evaluate(X_test, y_test)[1]
|
||||||
|
|
||||||
|
model.save('./model.keras')
|
||||||
|
ex.add_artifact("./model.keras")
|
||||||
|
|
||||||
|
ex.run()
|
||||||
|
print(my_config())
|
Loading…
Reference in New Issue
Block a user