Commit 55acef4b authored by Feliks Kiszkurno's avatar Feliks Kiszkurno
Browse files

log instead of norm

parent 71070676
......@@ -22,10 +22,7 @@ def init():
settings['sen'] = True # True - include sensitivity, False - ignore sensitivity
# Include depth
settings['depth'] = True # True - include depth, False - ignore depth
# Clip data to max and min values from the input model
settings['clip'] = True # True - clip data, False - use unclipped data
settings['depth'] = True # True - include depth, False - ignore depth
# Paths
settings['results_folder'] = 'results'
......@@ -34,3 +31,8 @@ def init():
# Plots
settings['plot_formats'] = ['png'] # list of formats to save plots as, supported formats: eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff
## LEGACY STUFF - SET TO FALSE UNLESS THERE IS A VERY GOOD REASON!!!!
# Clip data to max and min values from the input model
settings['clip'] = False # True - clip data, False - use unclipped data
......@@ -12,6 +12,7 @@ import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from matplotlib import ticker
import slopestabilitytools
......@@ -19,10 +20,10 @@ import slopestabilitytools
def run_classification(test_training, test_prediction, test_results, clf, clf_name):
accuracy_score = []
accuracy_result = []
accuracy_labels = []
accuracy_score_training = []
accuracy_result_training = []
accuracy_labels_training = []
num_feat = []
......@@ -71,13 +72,16 @@ def run_classification(test_training, test_prediction, test_results, clf, clf_na
# Prepare data
print(test_name)
x_train, y_train = slopestabilityML.preprocess_data(test_results[test_name])
#x_train = test_results[test_name]
#y_train = test_results[test_name]
# Train classifier
clf_pipeline.fit(x_train, y_train)
score_training = clf_pipeline.score(x_train, y_train)
accuracy_score_training.append(score_training * 100)
y_pred = clf_pipeline.predict(x_train)
score_training1 = clf_pipeline.score(x_train, y_train)
score_training = accuracy_score(y_train, y_pred)
if score_training1 == score_training:
print('MATCH!')
else:
print('MISMATCH!')
accuracy_result_training.append(score_training * 100)
accuracy_labels_training.append(test_name)
result_class = {}
......@@ -91,10 +95,14 @@ def run_classification(test_training, test_prediction, test_results, clf, clf_na
y_pred = clf_pipeline.predict(x_question)
result_class[test_name_pred] = y_pred
# print(y_pred)
score = clf_pipeline.score(x_question, y_answer)
score1 = clf_pipeline.score(x_question, y_answer)
score = accuracy_score(y_answer, y_pred)
if score1 == score:
print('MATCH!')
else:
print('MISMATCH!')
print('score: '+str(score))
if settings.settings['norm_class'] is True:
class_in = test_results[test_name]['CLASSN']
elif settings.settings['norm_class'] is False:
......@@ -106,9 +114,9 @@ def run_classification(test_training, test_prediction, test_results, clf, clf_na
slopestabilityML.plot_class_res(test_results, test_name_pred, class_in, y_pred, clf_name)
# Evaluate result
#accuracy_score.append(len(np.where(y_pred == y_answer.to_numpy())) / len(y_answer.to_numpy()) * 100)
accuracy_score.append(score*100)
#accuracy_.append(len(np.where(y_pred == y_answer.to_numpy())) / len(y_answer.to_numpy()) * 100)
accuracy_result.append(score*100)
accuracy_labels.append(test_name_pred)
return result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training
return result_class, accuracy_labels, accuracy_result, accuracy_labels_training, accuracy_result_training
......@@ -62,12 +62,12 @@ def run_all_tests(test_results):
# ml_results['RVM'] = {'score': rvm_accuracy_score, 'labels': rvm_accuracy_labels,
# 'score_training': rvm_accuracy_score_training, 'labels_training': rvm_accuracy_labels_training}
print('Running MGC')
mgc_result_class, mgc_accuracy_score, mgc_accuracy_labels, mgc_accuracy_score_training, mgc_accuracy_labels_training \
= slopestabilityML.MGC.mgc_run(test_results, random_seed)
ml_results['MGC'] = {'score': mgc_accuracy_score, 'labels': mgc_accuracy_labels,
'score_training': mgc_accuracy_score_training, 'labels_training': mgc_accuracy_labels_training}
ml_results_class['mgc'] = mgc_result_class
# print('Running MGC')
# mgc_result_class, mgc_accuracy_score, mgc_accuracy_labels, mgc_accuracy_score_training, mgc_accuracy_labels_training \
# = slopestabilityML.MGC.mgc_run(test_results, random_seed)
# ml_results['MGC'] = {'score': mgc_accuracy_score, 'labels': mgc_accuracy_labels,
# 'score_training': mgc_accuracy_score_training, 'labels_training': mgc_accuracy_labels_training}
# ml_results_class['mgc'] = mgc_result_class
print('Asking committee for verdict')
committee_accuracy_score, committee_accuracy_labels, committee_accuracy_score_training, committee_accuracy_labels_training \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment