zad7.1
This commit is contained in:
parent
2f71b4340e
commit
69db80f48a
5
Zad7/Zajęcia7/my_runs/1/config.json
Normal file
5
Zad7/Zajęcia7/my_runs/1/config.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"seed": 770691781,
|
||||
"test_size_param": 0.2,
|
||||
"train_size_param": 0.8
|
||||
}
|
1225
Zad7/Zajęcia7/my_runs/1/cout.txt
Normal file
1225
Zad7/Zajęcia7/my_runs/1/cout.txt
Normal file
File diff suppressed because one or more lines are too long
3
Zad7/Zajęcia7/my_runs/1/info.json
Normal file
3
Zad7/Zajęcia7/my_runs/1/info.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"prepare_model_ts": "2021-05-10 01:14:47.306858"
|
||||
}
|
1
Zad7/Zajęcia7/my_runs/1/metrics.json
Normal file
1
Zad7/Zajęcia7/my_runs/1/metrics.json
Normal file
@ -0,0 +1 @@
|
||||
{}
|
71
Zad7/Zajęcia7/my_runs/1/run.json
Normal file
71
Zad7/Zajęcia7/my_runs/1/run.json
Normal file
@ -0,0 +1,71 @@
|
||||
{
|
||||
"artifacts": [],
|
||||
"command": "my_main",
|
||||
"experiment": {
|
||||
"base_dir": "/home/tomasz/IUM/ium_434695/Zad7",
|
||||
"dependencies": [
|
||||
"numpy==1.19.5",
|
||||
"pandas==1.2.3",
|
||||
"requests==2.22.0",
|
||||
"sacred==0.8.2",
|
||||
"scikit-learn==0.24.1",
|
||||
"tensorflow==2.4.1",
|
||||
"wget==3.2"
|
||||
],
|
||||
"mainfile": "zadanie1.py",
|
||||
"name": "file_observer",
|
||||
"repositories": [
|
||||
{
|
||||
"commit": "2f71b4340e1a7092c2640a0446109758bcd02dad",
|
||||
"dirty": false,
|
||||
"url": "https://git.wmi.amu.edu.pl/s434695/ium_434695.git"
|
||||
}
|
||||
],
|
||||
"sources": [
|
||||
[
|
||||
"zadanie1.py",
|
||||
"_sources/zadanie1_4b3c4820a644952451146e0b432e8947.py"
|
||||
]
|
||||
]
|
||||
},
|
||||
"heartbeat": "2021-05-09T23:30:03.895239",
|
||||
"host": {
|
||||
"ENV": {},
|
||||
"cpu": "Intel(R) Core(TM) i7-3520M CPU @ 2.90GHz",
|
||||
"hostname": "tomasz-HP-EliteBook-8570p",
|
||||
"os": [
|
||||
"Linux",
|
||||
"Linux-5.8.0-50-generic-x86_64-with-glibc2.29"
|
||||
],
|
||||
"python_version": "3.8.5"
|
||||
},
|
||||
"meta": {
|
||||
"command": "my_main",
|
||||
"options": {
|
||||
"--beat-interval": null,
|
||||
"--capture": null,
|
||||
"--comment": null,
|
||||
"--debug": false,
|
||||
"--enforce_clean": false,
|
||||
"--file_storage": null,
|
||||
"--force": false,
|
||||
"--help": false,
|
||||
"--loglevel": null,
|
||||
"--mongo_db": null,
|
||||
"--name": null,
|
||||
"--pdb": false,
|
||||
"--print-config": false,
|
||||
"--priority": null,
|
||||
"--queue": false,
|
||||
"--s3": null,
|
||||
"--sql": null,
|
||||
"--tiny_db": null,
|
||||
"--unobserved": false
|
||||
}
|
||||
},
|
||||
"resources": [],
|
||||
"result": null,
|
||||
"start_time": "2021-05-09T23:14:47.279713",
|
||||
"status": "COMPLETED",
|
||||
"stop_time": "2021-05-09T23:30:03.883313"
|
||||
}
|
@ -0,0 +1,76 @@
|
||||
#! /usr/bin/python3
|
||||
from tensorflow.keras.models import Sequential, load_model
|
||||
from tensorflow.keras.layers import Dense
|
||||
from sklearn.metrics import accuracy_score, classification_report
|
||||
import pandas as pd
|
||||
from sklearn.model_selection import train_test_split
|
||||
import wget
|
||||
import numpy as np
|
||||
import requests
|
||||
from sacred.observers import FileStorageObserver
|
||||
from sacred import Experiment
|
||||
from datetime import datetime
|
||||
import os
|
||||
|
||||
ex = Experiment("file_observer", interactive=True)
|
||||
|
||||
ex.observers.append(FileStorageObserver('Zajęcia7/my_runs'))
|
||||
|
||||
@ex.config
|
||||
def my_config():
|
||||
train_size_param = 0.8
|
||||
test_size_param = 0.2
|
||||
|
||||
@ex.capture
|
||||
def prepare_model(train_size_param, test_size_param, _run):
|
||||
_run.info["prepare_model_ts"] = str(datetime.now())
|
||||
|
||||
url = 'https://git.wmi.amu.edu.pl/s434695/ium_434695/raw/commit/2301fb86e434734376f73503307a8f3255a75cc6/vgsales.csv'
|
||||
r = requests.get(url, allow_redirects=True)
|
||||
|
||||
open('vgsales.csv', 'wb').write(r.content)
|
||||
df = pd.read_csv('vgsales.csv')
|
||||
|
||||
|
||||
|
||||
def regression_model():
|
||||
model = Sequential()
|
||||
model.add(Dense(32,activation = "relu", input_shape = (x_train.shape[1],)))
|
||||
model.add(Dense(64,activation = "relu"))
|
||||
model.add(Dense(1,activation = "relu"))
|
||||
|
||||
model.compile(optimizer = "adam", loss = "mean_squared_error")
|
||||
return model
|
||||
|
||||
df['Nintendo'] = df['Publisher'].apply(lambda x: 1 if x=='Nintendo' else 0)
|
||||
df = df.drop(['Rank','Name','Platform','Year','Genre','Publisher'],axis = 1)
|
||||
df
|
||||
|
||||
y = df.Nintendo
|
||||
|
||||
df=((df-df.min())/(df.max()-df.min()))
|
||||
|
||||
x = df.drop(['Nintendo'],axis = 1)
|
||||
|
||||
x_train, x_test, y_train, y_test = train_test_split(x,y , test_size=0.2,train_size=0.8, random_state=21)
|
||||
|
||||
model = regression_model()
|
||||
model.fit(x_train, y_train, epochs = 600, verbose = 1)
|
||||
|
||||
y_pred = model.predict(x_test)
|
||||
|
||||
y_pred[:5]
|
||||
|
||||
y_pred = np.around(y_pred, decimals=0)
|
||||
|
||||
y_pred[:5]
|
||||
|
||||
return(classification_report(y_test,y_pred))
|
||||
|
||||
@ex.main
|
||||
def my_main(train_size_param, test_size_param):
|
||||
print(prepare_model()) ## Nie musimy przekazywać wartości
|
||||
|
||||
|
||||
r = ex.run()
|
||||
ex.add_artifact("Zajęcia7/saved_model/saved_model.pb")
|
16599
Zad7/vgsales.csv
Normal file
16599
Zad7/vgsales.csv
Normal file
File diff suppressed because it is too large
Load Diff
76
Zad7/zadanie1.py
Normal file
76
Zad7/zadanie1.py
Normal file
@ -0,0 +1,76 @@
|
||||
#! /usr/bin/python3
|
||||
from tensorflow.keras.models import Sequential, load_model
|
||||
from tensorflow.keras.layers import Dense
|
||||
from sklearn.metrics import accuracy_score, classification_report
|
||||
import pandas as pd
|
||||
from sklearn.model_selection import train_test_split
|
||||
import wget
|
||||
import numpy as np
|
||||
import requests
|
||||
from sacred.observers import FileStorageObserver
|
||||
from sacred import Experiment
|
||||
from datetime import datetime
|
||||
import os
|
||||
|
||||
ex = Experiment("file_observer", interactive=True)
|
||||
|
||||
ex.observers.append(FileStorageObserver('Zajęcia7/my_runs'))
|
||||
|
||||
@ex.config
|
||||
def my_config():
|
||||
train_size_param = 0.8
|
||||
test_size_param = 0.2
|
||||
|
||||
@ex.capture
|
||||
def prepare_model(train_size_param, test_size_param, _run):
|
||||
_run.info["prepare_model_ts"] = str(datetime.now())
|
||||
|
||||
url = 'https://git.wmi.amu.edu.pl/s434695/ium_434695/raw/commit/2301fb86e434734376f73503307a8f3255a75cc6/vgsales.csv'
|
||||
r = requests.get(url, allow_redirects=True)
|
||||
|
||||
open('vgsales.csv', 'wb').write(r.content)
|
||||
df = pd.read_csv('vgsales.csv')
|
||||
|
||||
|
||||
|
||||
def regression_model():
|
||||
model = Sequential()
|
||||
model.add(Dense(32,activation = "relu", input_shape = (x_train.shape[1],)))
|
||||
model.add(Dense(64,activation = "relu"))
|
||||
model.add(Dense(1,activation = "relu"))
|
||||
|
||||
model.compile(optimizer = "adam", loss = "mean_squared_error")
|
||||
return model
|
||||
|
||||
df['Nintendo'] = df['Publisher'].apply(lambda x: 1 if x=='Nintendo' else 0)
|
||||
df = df.drop(['Rank','Name','Platform','Year','Genre','Publisher'],axis = 1)
|
||||
df
|
||||
|
||||
y = df.Nintendo
|
||||
|
||||
df=((df-df.min())/(df.max()-df.min()))
|
||||
|
||||
x = df.drop(['Nintendo'],axis = 1)
|
||||
|
||||
x_train, x_test, y_train, y_test = train_test_split(x,y , test_size=0.2,train_size=0.8, random_state=21)
|
||||
|
||||
model = regression_model()
|
||||
model.fit(x_train, y_train, epochs = 600, verbose = 1)
|
||||
|
||||
y_pred = model.predict(x_test)
|
||||
|
||||
y_pred[:5]
|
||||
|
||||
y_pred = np.around(y_pred, decimals=0)
|
||||
|
||||
y_pred[:5]
|
||||
|
||||
return(classification_report(y_test,y_pred))
|
||||
|
||||
@ex.main
|
||||
def my_main(train_size_param, test_size_param):
|
||||
print(prepare_model()) ## Nie musimy przekazywać wartości
|
||||
|
||||
|
||||
r = ex.run()
|
||||
ex.add_artifact("Zajęcia7/saved_model/saved_model.pb")
|
Loading…
Reference in New Issue
Block a user