Prześlij pliki do ''

This commit is contained in:
Michał Dudziak 2023-05-05 17:11:39 +02:00
parent 4e512639d3
commit f013b7f9c6
3 changed files with 4675 additions and 0 deletions

70
predict.ipynb Normal file
View File

@ -0,0 +1,70 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "47153112-da26-4dbd-a32a-1abdd8bda4fa",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"2949/2949 [==============================] - 1s 462us/step\n"
]
}
],
"source": [
"import tensorflow as tf\n",
"import pandas as pd\n",
"import numpy as np\n",
"import sklearn\n",
"import sklearn.model_selection\n",
"from tensorflow.keras.models import load_model\n",
"\n",
"feature_cols = ['year', 'mileage', 'vol_engine']\n",
"\n",
"model = load_model('model.h5')\n",
"test_data = pd.read_csv('test.csv')\n",
"\n",
"predictions = model.predict(test_data[feature_cols])\n",
"predicted_prices = [p[0] for p in predictions]\n",
"\n",
"\n",
"results = pd.DataFrame({'id': test_data['id'], 'year': test_data['year'], 'mileage': test_data['mileage'], 'vol_engine': test_data['vol_engine'], 'predicted_price': predicted_prices})\n",
"results.to_csv('predictions.csv', index=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bca76252-c90d-4343-8ff8-a665cd32cf26",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

66
sacred.py Normal file
View File

@ -0,0 +1,66 @@
import tensorflow as tf
from sacred import Experiment
from sacred.observers import FileStorageObserver
import pandas as pd
import sklearn
import sklearn.model_selection
import numpy as np
ex = Experiment('452662')
ex.observers.append(FileStorageObserver.create('my_runs'))
#ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
def normalize(df,feature_name):
result = df.copy()
max_value = df[feature_name].max()
min_value = df[feature_name].min()
result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)
return result
@ex.automain
def run_experiment():
cars = pd.read_csv('zbior_ium/Car_Prices_Poland_Kaggle.csv')
cars = cars.drop(73436) #wiersz z błednymi danymi
cars_normalized = normalize(cars,'vol_engine')
cars_train, cars_test = sklearn.model_selection.train_test_split(cars_normalized, test_size=23586, random_state=1)
cars_dev, cars_test = sklearn.model_selection.train_test_split(cars_test, test_size=11793, random_state=1)
cars_train.rename(columns = {list(cars_train)[0]: 'id'}, inplace = True)
cars_train.to_csv('test.csv')
feature_cols = ['year', 'mileage', 'vol_engine']
inputs = tf.keras.Input(shape=(len(feature_cols),))
# Warstwy sieci neuronowej
x = tf.keras.layers.Dense(10, activation='relu')(inputs)
x = tf.keras.layers.Dense(10, activation='relu')(x)
outputs = tf.keras.layers.Dense(1, activation='linear')(x)
# Utworzenie modelu
model = tf.keras.Model(inputs=inputs, outputs=outputs)
# Kompilacja modelu
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss='mse', metrics=['mae'])
# Trenowanie modelu
model.fit(cars_train[feature_cols], cars_train['price'], epochs=100)
# Zapis plików wejściowych
ex.add_resource('train_data.csv')
ex.add_resource('test_data.csv')
# Zapis kodu źródłowego
ex.add_artifact(__file__)
# Zapis modelu do pliku
model.save('model.h5')
ex.add_artifact('model.h5')
# Zapisanie metryk
metrics = model.evaluate(cars_train[feature_cols], cars_train['price'])
ex.log_scalar('mse', metrics[0])
ex.log_scalar('mae', metrics[1])

4539
train.ipynb Normal file

File diff suppressed because it is too large Load Diff