Compare commits

..

5 Commits

4 changed files with 40 additions and 17 deletions

View File

@ -0,0 +1,19 @@
This repository contains several scripts integral to the development, training, and deployment of a machine learning model.
**classifiers_test.py**
*Purpose:* This script is used for testing various classifiers.
*Functionality:* It conducts a series of tests on different classifiers to evaluate their performance and suitability for our model.
**model_creation.py**
*Purpose:* Responsible for creating and training the machine learning model.
*Functionality:* Reads input data, trains the model and saves it to trained_model.pkl.
**model_inference.py**
*Purpose:* Utilizes the trained model to make predictions on new data.
*Functionality:* Loads the model from trained_model.pkl, makes predictions on new data and outputs a binary table indicating collapse predictions.

View File

@ -8,12 +8,9 @@ TEST_DATA_DIR = "datasets_test"
test_df_list = []
for file in os.listdir(TEST_DATA_DIR):
file_path = os.path.join(TEST_DATA_DIR, file)
df = pd.read_csv(file_path, delim_whitespace=True, skiprows=1,
data_test = pd.read_csv(file_path, delim_whitespace=True, skiprows=1,
names=["tbid", "tphys", "r", "vr", "vt", "ik1", "ik2", "sm1", "sm2", "a", "e",
"collapsed"])
test_df_list.append(df)
data_test = pd.concat(test_df_list, ignore_index=True).sample(frac=1, random_state=42)
X_test = data_test.iloc[:, 1:-1].values
imputer = SimpleImputer(strategy='mean')
@ -23,5 +20,7 @@ model_filename = 'trained_model.pkl'
model = joblib.load(model_filename)
predictions = model.predict(X_test)
print(predictions)
data_test['prediction'] = predictions
save_path = os.path.join(TEST_DATA_DIR, file + '.pred')
data_test.to_csv(save_path, index=False, sep=' ')

5
pig_scripts/README.md Normal file
View File

@ -0,0 +1,5 @@
# Testing
The "Testing" directory contains scripts used to perform basic tests of the system. If any of the testing scripts don't work, there might be some problems with privileges and access to the datasets. These scripts should be run at least once every major update, to assure that everything works correctly, before writing any longer scripts.
# System_Table
The "System_Table" directory contains full scripts that were used to extract data from MOCCA simulations.

View File

@ -12,22 +12,22 @@ function App() {
const [results, setResults] = useState('');
const [isVisible, setIsVisible] = useState(true);
const fetchDataFromAPI = async (dataset, table, column) => {
const fetchDataFromAPI = async (ldataset, ltable, lcolumn, tdataset, ttable, tcolumn) => {
try {
const response = await axios.get(`https://cat-fact.herokuapp.com/facts/?dataset=${dataset}&table=${table}&column=${column}`);
const data = response.data;
postML(entryId, ldataset, ltable, lcolumn, tdataset, ttable, tcolumn);
setResults(data);
} catch (error) {
console.error('Error fetching data:', error);
console.error('ERROR fetching data:', error);
setResults('Error fetching data');
}
};
const handleButtonClick = () => {
fetchDataFromAPI(datasetQuarry, tableQuarry, columns);
fetchDataFromAPI(datasetQuarry2, tableQuarry2, columns2, datasetQuarry, tableQuarry, columns);
};
const toggleVisibility = () => {
setIsVisible(!isVisible);
};