Commit 0abc6c68 authored by Feliks Kiszkurno's avatar Feliks Kiszkurno
Browse files

Diverse changes

parent 98daf623
......@@ -19,10 +19,10 @@ import test_definitions
settings.init()
# Config
create_new_data = True # set to True if you need to reassign the classes
create_new_data_only = True # set to False in order to run ML classifications
create_new_data = False # set to True if you need to reassign the classes
create_new_data_only = False # set to False in order to run ML classifications
reassign_classes = False; class_type = 'norm'
param_path = os.path.abspath(os.path.join(os.getcwd()) + '/' + 'TestDefinitions/hor_1layer_varying_depth_lambda_zweight.csv')
param_path = os.path.abspath(os.path.join(os.getcwd()) + '/' + 'TestDefinitions/hor_1layer_varying_depth_lambda_zweight_ergänzung.csv')
test_definitions.init(path=param_path)
test_definitions.init(path=param_path)
......
This diff is collapsed.
......@@ -17,15 +17,15 @@ from sklearn.tree import DecisionTreeClassifier
def adaboost_run(test_results, random_seed):
test_training, test_prediction = slopestabilityML.split_dataset(test_results.keys(), random_seed)
clf = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=3),
n_estimators=20, random_state=0)
clf = AdaBoostClassifier()#base_estimator=DecisionTreeClassifier(max_depth=3),
#n_estimators=20, random_state=0)
# Train classifier
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training = \
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training = \
slopestabilityML.run_classification(test_training, test_prediction, test_results, clf, 'ADA')
# Plot
# slopestabilityML.plot_results(accuracy_labels, accuracy_score, 'ADA_prediction')
# slopestabilityML.plot_results(accuracy_labels_training, accuracy_score_training, 'ADA_training')
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training
......@@ -22,11 +22,11 @@ def gbc_run(test_results, random_seed):
clf = ensemble.GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
# Train classifier
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training = \
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training = \
slopestabilityML.run_classification(test_training, test_prediction, test_results, clf, 'GBC')
# Plot
# slopestabilityML.plot_results(accuracy_labels, accuracy_score, 'GBC_prediction')
# slopestabilityML.plot_results(accuracy_labels_training, accuracy_score_training, 'GBC_training')
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training
......@@ -17,14 +17,14 @@ def knn_run(test_results, random_seed):
test_training, test_prediction = slopestabilityML.split_dataset(test_results.keys(), random_seed)
# Create classifier
clf = KNeighborsClassifier(n_neighbors=2)
clf = KNeighborsClassifier(n_neighbors=2, n_jobs=-1)
# Train classifier
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training = \
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training = \
slopestabilityML.run_classification(test_training, test_prediction, test_results, clf, 'KNN')
# Plot
# slopestabilityML.plot_results(accuracy_labels, accuracy_score, 'KNN_prediction')
# slopestabilityML.plot_results(accuracy_labels_training, accuracy_score_training, 'KNN_training')
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training
\ No newline at end of file
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training
\ No newline at end of file
......@@ -18,14 +18,14 @@ def sgd_run(test_results, random_seed):
test_training, test_prediction = slopestabilityML.split_dataset(test_results.keys(), random_seed)
# Create classifier
clf = SGDClassifier(loss="hinge", penalty="l2", max_iter=5)
clf = SGDClassifier(loss="hinge", penalty="l2", max_iter=5, n_jobs=-1)
# Train classifier
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training = \
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training = \
slopestabilityML.run_classification(test_training, test_prediction, test_results, clf, 'SGD')
# Plot
# slopestabilityML.plot_results(accuracy_labels, accuracy_score, 'SGD_prediction')
# slopestabilityML.plot_results(accuracy_labels_training, accuracy_score_training, 'SGD_training')
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training
......@@ -29,7 +29,7 @@ def svm_run(test_results, random_seed):
clf = svm.SVC(gamma=0.001, C=100, kernel='linear')
# Train classifier
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training = \
result_class, accuracy_labels, accuracy_score, accuracy_labels_training, accuracy_score_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training = \
slopestabilityML.run_classification(test_training, test_prediction, test_results, clf, 'SVM')
......@@ -37,4 +37,4 @@ def svm_run(test_results, random_seed):
# slopestabilityML.plot_results(accuracy_labels, accuracy_score, 'SVM_prediction')
# slopestabilityML.plot_results(accuracy_labels_training, accuracy_score_training, 'SVM_training')
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training
return result_class, accuracy_score, accuracy_labels, accuracy_score_training, accuracy_labels_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training
......@@ -43,6 +43,8 @@ def combine_results(ml_results):
slopestabilitytools.save_plot(fig, '', 'ML_summary_prediction', skip_fileformat=True)
del fig
# Training
fig = plt.figure()
ax = fig.subplots(1)
......@@ -70,3 +72,75 @@ def combine_results(ml_results):
#fig.tight_layout()
slopestabilitytools.save_plot(fig, '', 'ML_summary_training', skip_fileformat=True)
del fig
'''
Plotting accuracy of the detection of the depth of the interface
'''
# Predictions
fig = plt.figure()
ax = fig.subplots(1)
fig.suptitle('Accuracy of different ML methods: predictions')
prediction_depth_estim_sum = 0
prediction_depth_estim_num = 0
for method_name in sorted(ml_results.keys()):
if method_name is 'com':
print('Skipping com')
else:
plt.plot(ml_results[method_name]['depth_labels'], ml_results[method_name]['depth_accuracy'], marker='x',
label=method_name)
prediction_depth_estim_sum = prediction_depth_estim_sum + np.sum(np.array(ml_results[method_name]['depth_accuracy']))
prediction_depth_estim_num = prediction_depth_estim_num + len(ml_results[method_name]['depth_accuracy'])
prediction_depth_estim_avg = prediction_depth_estim_sum / prediction_depth_estim_num
print('Prediction depth accuracy: {result:.2f}%'.format(result=prediction_depth_estim_avg))
x_limits = ax.get_xlim()
plt.gca().invert_yaxis()
ax.axhline(y=prediction_depth_estim_avg, xmin=x_limits[0], xmax=x_limits[1])
plt.xlabel('Test name')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.ylabel('Accuracy of the interface detection [error%]')
plt.legend(loc='lower right')
slopestabilitytools.save_plot(fig, '', 'ML_summary_prediction_depth_estim', skip_fileformat=True)
del fig
# Training
fig = plt.figure()
ax = fig.subplots(1)
fig.suptitle('Accuracy of different ML methods - training')
training_depth_estim_sum = 0
training_depth_estim_num = 0
for method_name in sorted(ml_results.keys()):
if method_name is 'com':
print('Skipping com')
else:
plt.plot(ml_results[method_name]['depth_labels_training'], ml_results[method_name]['depth_accuracy_training'], marker='x',
label=method_name)
training_depth_estim_sum = training_depth_estim_sum + np.sum(np.array(ml_results[method_name]['depth_accuracy_training']))
training_depth_estim_num = training_depth_estim_num + len(ml_results[method_name]['depth_accuracy_training'])
training_depth_estim_avg = training_depth_estim_sum / training_depth_estim_num
print('Training depth accuracy: {result:.2f}%'.format(result=training_depth_estim_avg))
x_limits = ax.get_xlim()
plt.gca().invert_yaxis()
plt.axhline(y=training_depth_estim_avg, xmin=x_limits[0], xmax=x_limits[1])
plt.xlabel('Test name')
plt.setp(ax.get_xticklabels(), rotation=90)
plt.ylabel('Accuracy of the interface detection [error%]')
plt.legend(loc='lower right')
# fig.tight_layout()
slopestabilitytools.save_plot(fig, '', 'ML_summary_training_depth_estim', skip_fileformat=True)
\ No newline at end of file
......@@ -15,7 +15,7 @@ import test_definitions
import settings
def plot_class_overview(test_results, test_name, class_in, y_pred, clf_name, *, training=False):
def plot_class_overview(test_results, test_name, class_in, y_pred, clf_name, *, training=False, depth_estimate='x', depth_accuracy='x'):
x = test_results['X'].to_numpy()
y = test_results['Y'].to_numpy()
......@@ -28,7 +28,9 @@ def plot_class_overview(test_results, test_name, class_in, y_pred, clf_name, *,
ax = _ax.flatten()
cb = []
fig.suptitle('Classification overview: ' + test_name + ', ' + clf_name)
#fig.suptitle('Classification overview: ' + test_name + ', ' + clf_name + ', depth estimate accuracy: ' + str(depth_accuracy) + '%, depth (est/true): ' + str(depth_estimate) + '/' + str(test_definitions.test_parameters[test_name]['layers_pos'][0]))
depth_true = test_definitions.test_parameters[test_name]['layers_pos'][0]
fig.suptitle('Classification overview: {}, {}, depth estimate accuracy: {:.2f}%, depth (est/true): {:.2f}/{:.2f}'.format(test_name, clf_name, depth_accuracy, depth_estimate, depth_true))
fig.subplots_adjust(hspace=0.8)
# Convert labels to numerical for plotting
......@@ -37,7 +39,7 @@ def plot_class_overview(test_results, test_name, class_in, y_pred, clf_name, *,
# Plot input classes
im0 = ax[0].scatter(x, y, c=class_in)
for depth in test_definitions.test_definitions[test_name]['layers_pos']:
for depth in test_definitions.test_parameters[test_name]['layers_pos']:
ax[0].hlines(y=depth, xmin=x.min(), xmax=x.max(), linestyle='-', color='r')
ax[0].set_title('Input classes')
ax[0] = slopestabilitytools.set_labels(ax[0])
......@@ -52,8 +54,9 @@ def plot_class_overview(test_results, test_name, class_in, y_pred, clf_name, *,
# Plot prediction
im1 = ax[1].scatter(x, y, c=y_pred)
for depth in test_definitions.test_definitions[test_name]['layers_pos']:
for depth in test_definitions.test_parameters[test_name]['layers_pos']:
ax[1].hlines(y=depth, xmin=x.min(), xmax=x.max(), linestyle='-', color='r')
ax[1].hlines(y=depth_estimate, xmin=x.min(), xmax=x.max(), linestyle='-', color='g')
ax[1].set_title('Predicted classes')
ax[1] = slopestabilitytools.set_labels(ax[1])
cb.append(plt.colorbar(im1, ax=ax[1], label='Class')) # , shrink=0.9)
......@@ -63,7 +66,7 @@ def plot_class_overview(test_results, test_name, class_in, y_pred, clf_name, *,
# Plot input model
im2 = ax[2].scatter(x, y, c=test_results['INMN'].to_numpy())
for depth in test_definitions.test_definitions[test_name]['layers_pos']:
for depth in test_definitions.test_parameters[test_name]['layers_pos']:
ax[2].hlines(y=depth, xmin=x.min(), xmax=x.max(), linestyle='-', color='r')
ax[2].set_title('Input model')
ax[2] = slopestabilitytools.set_labels(ax[2])
......@@ -77,7 +80,7 @@ def plot_class_overview(test_results, test_name, class_in, y_pred, clf_name, *,
# Plot difference between correct and predicted classes
im3 = ax[3].scatter(x, y, c=class_diff)
for depth in test_definitions.test_definitions[test_name]['layers_pos']:
for depth in test_definitions.test_parameters[test_name]['layers_pos']:
ax[3].hlines(y=depth, xmin=x.min(), xmax=x.max(), linestyle='-', color='r')
ax[3].set_title('Difference')
ax[3] = slopestabilitytools.set_labels(ax[3])
......@@ -88,7 +91,7 @@ def plot_class_overview(test_results, test_name, class_in, y_pred, clf_name, *,
# Plot sensitivity
im4 = ax[4].scatter(x, y, c=test_results['SEN'].to_numpy())
for depth in test_definitions.test_definitions[test_name]['layers_pos']:
for depth in test_definitions.test_parameters[test_name]['layers_pos']:
ax[4].hlines(y=depth, xmin=x.min(), xmax=x.max(), linestyle='-', color='r')
ax[4].set_title('Sensitivity')
ax[4] = slopestabilitytools.set_labels(ax[4])
......
......@@ -8,23 +8,31 @@ Created on 19.01.2021
import settings
import slopestabilityML
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn.metrics import mean_absolute_error
import slopestabilitytools
import test_definitions
def run_classification(test_training, test_prediction, test_results, clf, clf_name):
accuracy_result = []
accuracy_labels = []
accuracy_result_training = []
accuracy_labels_training = []
depth_estim = []
depth_estim_accuracy = []
depth_estim_labels = []
depth_estim_training = []
depth_estim_accuracy_training = []
depth_estim_labels_training = []
num_feat = []
if settings.settings['norm'] is True:
......@@ -45,7 +53,7 @@ def run_classification(test_training, test_prediction, test_results, clf, clf_na
cat_lab = ['Very Low', 'Low', 'Medium', 'High', 'Very High']
cat_trans = OneHotEncoder(categories=[cat_lab])
preprocessor = ColumnTransformer(transformers=[('num', num_trans, num_feat)])
#('cat', cat_trans, cat_feat)])
# ('cat', cat_trans, cat_feat)])
else:
preprocessor = ColumnTransformer(transformers=[('num', num_trans, num_feat)])
......@@ -58,6 +66,7 @@ def run_classification(test_training, test_prediction, test_results, clf, clf_na
test_results_combined = test_results_combined.reset_index()
test_results_combined = test_results_combined.drop(['index'], axis='columns')
x_train, y_train = slopestabilityML.preprocess_data(test_results_combined)
x_position = test_results_combined['X']
clf_pipeline.fit(x_train, y_train)
......@@ -68,12 +77,38 @@ def run_classification(test_training, test_prediction, test_results, clf, clf_na
class_correct = test_results_combined['CLASSN'].loc[index]
else:
class_correct = test_results_combined['CLASS'].loc[index]
y_pred = clf_pipeline.predict(x_train.loc[index])
x_train_temp = x_train.loc[index]
y_pred = clf_pipeline.predict(x_train_temp)
score_training = accuracy_score(class_correct, y_pred)
accuracy_result_training.append(score_training * 100)
accuracy_labels_training.append(name)
#print(y_train.loc[index])
slopestabilityML.plot_class_overview(test_results_combined.loc[index], name, y_train.loc[index], y_pred, clf_name, training=True)
# Evaluate the accuracy of interface depth detection
x = x_position.loc[index].to_numpy()
y = x_train_temp['Y'].to_numpy()
xi, yi, gridded_data = slopestabilitytools.grid_data(x, y, {'class': y_pred})
y_pred_grid = gridded_data['class']
depth_all = np.zeros(y_pred_grid.shape[0])
depth_all_correct = np.ones(y_pred_grid.shape[0]) * test_definitions.test_parameters[name]['layers_pos'][0]
for column_id in range(y_pred_grid.shape[0]):
# if len(np.unique(y_pred_grid[:,column_id])) is not 2:
depth_id = np.array(np.where(y_pred_grid[:, column_id] == 4))
if np.size(depth_id) is 0:
depth = yi[-1]
else:
depth_id = depth_id.min()
depth = yi[depth_id]
depth_all[column_id] = depth
depth_interface_estimate = np.mean(depth_all)
depth_interface_accuracy = (mean_absolute_error(depth_all_correct, depth_all) / abs(test_definitions.test_parameters[name]['layers_pos'][0]))*100
print(depth_interface_accuracy)
depth_estim_training.append(depth_interface_estimate)
depth_estim_accuracy_training.append(depth_interface_accuracy)
depth_estim_labels_training.append(name + '_' + str(test_definitions.test_parameters[name]['layers_pos'][0]))
# print(y_train.loc[index])
slopestabilityML.plot_class_overview(test_results_combined.loc[index], name, y_train.loc[index], y_pred,
clf_name, training=True, depth_estimate=depth_interface_estimate,
depth_accuracy=depth_interface_accuracy)
result_class = {}
......@@ -86,7 +121,7 @@ def run_classification(test_training, test_prediction, test_results, clf, clf_na
result_class[test_name_pred] = y_pred
# print(y_pred)
score = accuracy_score(y_answer, y_pred)
print('score: {score:.2f} %'.format(score=score*100))
print('score: {score:.2f} %'.format(score=score * 100))
if settings.settings['norm_class'] is True:
class_in = test_results[test_name_pred]['CLASSN']
......@@ -98,11 +133,38 @@ def run_classification(test_training, test_prediction, test_results, clf, clf_na
print('I don\'t know which class to use! Exiting...')
exit(0)
slopestabilityML.plot_class_overview(test_results[test_name_pred], test_name_pred, class_in, y_pred, clf_name)
# Evaluate the accuracy of interface depth detection
x = x_position.loc[index].to_numpy()
y = x_train_temp['Y'].to_numpy()
xi, yi, gridded_data = slopestabilitytools.grid_data(x, y, {'class': y_pred})
y_pred_grid = gridded_data['class']
depth_all = np.zeros(y_pred_grid.shape[0])
depth_all_correct = np.ones(y_pred_grid.shape[0]) * test_definitions.test_parameters[test_name_pred]['layers_pos'][0]
for column_id in range(y_pred_grid.shape[0]):
# if len(np.unique(y_pred_grid[:,column_id])) is not 2:
depth_id = np.array(np.where(y_pred_grid[:, column_id] == 4))
if np.size(depth_id) is 0:
depth = yi[-1]
else:
depth_id = depth_id.min()
depth = yi[depth_id]
depth_all[column_id] = depth
depth_interface_estimate = np.mean(depth_all)
depth_interface_accuracy = (mean_absolute_error(depth_all_correct, depth_all) / abs(test_definitions.test_parameters[name]['layers_pos'][0]))*100
print(depth_interface_accuracy)
depth_estim.append(depth_interface_estimate)
depth_estim_accuracy.append(depth_interface_accuracy)
depth_estim_labels.append(test_name_pred + '_' + str(test_definitions.test_parameters[test_name_pred]['layers_pos'][0]))
slopestabilityML.plot_class_overview(test_results[test_name_pred], test_name_pred, class_in, y_pred, clf_name, depth_estimate=depth_interface_estimate,
depth_accuracy=depth_interface_accuracy)
# Evaluate result
#accuracy_.append(len(np.where(y_pred == y_answer.to_numpy())) / len(y_answer.to_numpy()) * 100)
accuracy_result.append(score*100)
# accuracy_.append(len(np.where(y_pred == y_answer.to_numpy())) / len(y_answer.to_numpy()) * 100)
accuracy_result.append(score * 100)
accuracy_labels.append(test_name_pred)
return result_class, accuracy_labels, accuracy_result, accuracy_labels_training, accuracy_result_training
# Evaluate
return result_class, accuracy_labels, accuracy_result, accuracy_labels_training, accuracy_result_training, depth_estim, depth_estim_accuracy, depth_estim_labels, depth_estim_training, depth_estim_accuracy_training, depth_estim_labels_training
......@@ -24,43 +24,61 @@ def run_all_tests(test_results):
ml_results_class = {}
print('Running SVM...')
svm_result_class, svm_accuracy_score, svm_accuracy_labels, svm_accuracy_score_training, svm_accuracy_labels_training = \
svm_result_class, svm_accuracy_score, svm_accuracy_labels, svm_accuracy_score_training, svm_accuracy_labels_training, svm_depth_estim, svm_depth_estim_accuracy, svm_depth_estim_labels, svm_depth_estim_training, svm_depth_estim_accuracy_training, svm_depth_estim_labels_training = \
slopestabilityML.SVM.svm_run(test_results, random_seed)
ml_results['svm'] = {'score': svm_accuracy_score, 'labels': svm_accuracy_labels,
'score_training': svm_accuracy_score_training, 'labels_training': svm_accuracy_labels_training}
'score_training': svm_accuracy_score_training, 'labels_training': svm_accuracy_labels_training,
'depth_estim': svm_depth_estim, 'depth_accuracy': svm_depth_estim_accuracy, 'depth_labels': svm_depth_estim_labels,
'depth_estim_training': svm_depth_estim_training, 'depth_accuracy_training': svm_depth_estim_accuracy_training,'depth_labels_training': svm_depth_estim_labels_training
}
ml_results_class['svm'] = svm_result_class
gc.collect()
print('Running GBC...')
gbc_result_class, gbc_accuracy_score, gbc_accuracy_labels, gbc_accuracy_score_training, gbc_accuracy_labels_training = \
gbc_result_class, gbc_accuracy_score, gbc_accuracy_labels, gbc_accuracy_score_training, gbc_accuracy_labels_training, gbc_depth_estim, gbc_depth_estim_accuracy, gbc_depth_estim_labels, gbc_depth_estim_training, gbc_depth_estim_accuracy_training, gbc_depth_estim_labels_training= \
slopestabilityML.GBC.gbc_run(test_results, random_seed)
ml_results['gbc'] = {'score': gbc_accuracy_score, 'labels': gbc_accuracy_labels,
'score_training': gbc_accuracy_score_training, 'labels_training': gbc_accuracy_labels_training}
'score_training': gbc_accuracy_score_training, 'labels_training': gbc_accuracy_labels_training,
'depth_estim': gbc_depth_estim, 'depth_accuracy': gbc_depth_estim_accuracy, 'depth_labels': gbc_depth_estim_labels,
'depth_estim_training': gbc_depth_estim_training, 'depth_accuracy_training': gbc_depth_estim_accuracy_training, 'depth_labels_training': gbc_depth_estim_labels_training
}
ml_results_class['gbc'] = gbc_result_class
gc.collect()
print('Running SGD...')
sgd_result_class, sgd_accuracy_score, sgd_accuracy_labels, sgd_accuracy_score_training, sgd_accuracy_labels_training = \
sgd_result_class, sgd_accuracy_score, sgd_accuracy_labels, sgd_accuracy_score_training, sgd_accuracy_labels_training, sgd_depth_estim, sgd_depth_estim_accuracy, sgd_depth_estim_labels, sgd_depth_estim_training, sgd_depth_estim_accuracy_training, sgd_depth_estim_labels_training= \
slopestabilityML.SGD.sgd_run(test_results, random_seed)
ml_results['sgd'] = {'score': sgd_accuracy_score, 'labels': sgd_accuracy_labels,
'score_training': sgd_accuracy_score_training, 'labels_training': sgd_accuracy_labels_training}
'score_training': sgd_accuracy_score_training, 'labels_training': sgd_accuracy_labels_training,
'depth_estim': sgd_depth_estim, 'depth_accuracy': sgd_depth_estim_accuracy, 'depth_labels': sgd_depth_estim_labels,
'depth_estim_training': sgd_depth_estim_training, 'depth_accuracy_training': sgd_depth_estim_accuracy_training, 'depth_labels_training': sgd_depth_estim_labels_training
}
ml_results_class['sgd'] = sgd_result_class
gc.collect()
print('Running KNN...')
knn_result_class, knn_accuracy_score, knn_accuracy_labels, knn_accuracy_score_training, knn_accuracy_labels_training = \
knn_result_class, knn_accuracy_score, knn_accuracy_labels, knn_accuracy_score_training, knn_accuracy_labels_training, knn_depth_estim, knn_depth_estim_accuracy, knn_depth_estim_labels, knn_depth_estim_training, knn_depth_estim_accuracy_training, knn_depth_estim_labels_training = \
slopestabilityML.KNN.knn_run(test_results, random_seed)
ml_results['KNN'] = {'score': knn_accuracy_score, 'labels': knn_accuracy_labels,
'score_training': knn_accuracy_score_training, 'labels_training': knn_accuracy_labels_training}
'score_training': knn_accuracy_score_training, 'labels_training': knn_accuracy_labels_training,
'depth_estim': knn_depth_estim, 'depth_accuracy': knn_depth_estim_accuracy, 'depth_labels': knn_depth_estim_labels,
'depth_estim_training': knn_depth_estim_training, 'depth_accuracy_training': knn_depth_estim_accuracy_training, 'depth_labels_training': knn_depth_estim_labels_training
}
ml_results_class['knn'] = knn_result_class
gc.collect()
# print('Running ADABOOST...')
# ada_result_class, ada_accuracy_score, ada_accuracy_labels, ada_accuracy_score_training, ada_accuracy_labels_training = \
# ada_result_class, ada_accuracy_score, ada_accuracy_labels, ada_accuracy_score_training, ada_accuracy_labels_training, ada_depth_estim, ada_depth_estim_accuracy, ada_depth_estim_labels, ada_depth_estim_training, ada_depth_estim_accuracy_training, ada_depth_estim_labels_training = \
# slopestabilityML.ADABOOST.adaboost_run(test_results, random_seed)
# ml_results['ADA'] = {'score': ada_accuracy_score, 'labels': ada_accuracy_labels,
# 'score_training': ada_accuracy_score_training, 'labels_training': ada_accuracy_labels_training}
# ml_results_class['ada'] = ada_result_class
# ml_results['ADABOOST'] = {'score': ada_accuracy_score, 'labels': ada_accuracy_labels,
# 'score_training': ada_accuracy_score_training, 'labels_training': ada_accuracy_labels_training,
# 'depth_estim': ada_depth_estim, 'depth_accuracy': ada_depth_estim_accuracy,
# 'depth_labels': ada_depth_estim_labels,
# 'depth_estim_training': ada_depth_estim_training,
# 'depth_accuracy_training': ada_depth_estim_accuracy_training,
# 'depth_labels_training': ada_depth_estim_labels_training
# }
# ml_results_class['ADABOOST'] = ada_result_class
# gc.collect()
# print('Running RVM...')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment