Commit c1f97839 authored by Feliks Kiszkurno's avatar Feliks Kiszkurno
Browse files

Fixed bug with one test being selected multiple times for prediction.

Added split_proportion parameter to settings.
Added average line to summary plots.
parent 10421584
......@@ -17,8 +17,8 @@ import test_definitions
settings.init()
test_definitions.init()
# Config
create_new_data = False # set to True if you need to reassign the classes
create_new_data_only = False # set to False in order to run ML classifications
create_new_data = False # set to True if you need to reassign the classes
create_new_data_only = False # set to False in order to run ML classifications
reassign_classes = False; class_type = 'norm'
# Load existing data instead of creating new one.
......
......@@ -13,11 +13,13 @@ def init():
settings = {}
settings['split_proportion'] = 0.75
# Normalization and classes
settings['norm_class'] = False # True to use normalized classes, False to use class_ids
settings['norm_class'] = True # True to use normalized classes, False to use class_ids
settings['norm_class_num'] = 5 # Number of classes for normalized data
settings['norm'] = False # True to use normalized data, False to use raw data
settings['use_labels'] = True # True to use labels instead of classes
settings['use_labels'] = False # True to use labels instead of classes
# Include sensitivity
settings['sen'] = True # True - include sensitivity, False - ignore sensitivity
......
......@@ -7,6 +7,7 @@ Created on 19.01.2021
"""
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import slopestabilitytools
......@@ -14,15 +15,27 @@ import slopestabilitytools
def combine_results(ml_results):
print('Plotting the summary...')
# Predictions
fig = plt.figure()
ax = fig.subplots(1)
fig.suptitle('Accuracy of different ML methods: predictions')
prediction_score_sum = 0
prediction_score_num = 0
for method_name in sorted(ml_results.keys()):
plt.plot(ml_results[method_name]['labels'], ml_results[method_name]['score'], marker='x',
label=method_name)
prediction_score_sum = prediction_score_sum + np.sum(np.array(ml_results[method_name]['score']))
prediction_score_num = prediction_score_num + len(ml_results[method_name]['score'])
prediction_score_avg = prediction_score_sum / prediction_score_num
print('Prediction accuracy: {result:.2f}%'.format(result=prediction_score_avg))
x_limits = ax.get_xlim()
ax.axhline(y=prediction_score_avg, xmin=x_limits[0], xmax=x_limits[1])
plt.xlabel('Test name')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.ylabel('Correct points [%]')
......@@ -35,9 +48,20 @@ def combine_results(ml_results):
ax = fig.subplots(1)
fig.suptitle('Accuracy of different ML methods - training')
training_score_sum = 0
training_score_num = 0
for method_name in sorted(ml_results.keys()):
plt.plot(ml_results[method_name]['labels_training'], ml_results[method_name]['score_training'], marker='x',
label=method_name)
training_score_sum = training_score_sum + np.sum(np.array(ml_results[method_name]['score_training']))
training_score_num = training_score_num + len(ml_results[method_name]['score_training'])
training_score_avg = training_score_sum / training_score_num
print('Training accuracy: {result:.2f}%'.format(result=training_score_avg))
x_limits = ax.get_xlim()
plt.axhline(y=training_score_avg, xmin=x_limits[0], xmax=x_limits[1])
plt.xlabel('Test name')
plt.setp(ax.get_xticklabels(), rotation=90)
......
......@@ -9,14 +9,18 @@ Created on 19.01.2021
import slopestabilitytools
import random
import math
import settings
def split_dataset(test_names, random_seed, *, proportion=0.25):
def split_dataset(test_names, random_seed, *, proportion=False):
if proportion is False:
proportion = settings.settings['split_proportion']
random.seed(random_seed)
test_number = len(test_names)
test_prediction = random.choices(list(test_names),
test_prediction = random.sample(list(test_names),
k=math.ceil(test_number * proportion))
test_training = slopestabilitytools.set_diff(list(test_names), set(test_prediction))
......
......@@ -75,7 +75,7 @@ def create_data(test_name, test_config, max_depth):
# RUN INVERSION #
k0 = pg.physics.ert.createGeometricFactors(data)
model_inverted = ert_manager.invert(data=data, lam=100, paraDX=0.25, paraMaxCellSize=2, paraDepth=2 * max_depth,
model_inverted = ert_manager.invert(data=data, lam=100, paraDX=0.25, paraMaxCellSize=2, # paraDepth=2 * max_depth,
quality=34, zPower=0.4)
result_full = ert_manager.inv.model
......
......@@ -12,25 +12,25 @@ import numpy as np
def init():
global test_definitions
test_definitions = {'hor1_01': {'layer_n': 1, 'rho_values': [[1, 5], [2, 15]], 'layers_pos': np.array([-5])},
'hor1_02': {'layer_n': 1, 'rho_values': [[1, 5], [2, 50]], 'layers_pos': np.array([-5])},
'hor1_03': {'layer_n': 1, 'rho_values': [[1, 15], [2, 20]], 'layers_pos': np.array([-8])},
'hor1_04': {'layer_n': 1, 'rho_values': [[1, 5], [2, 10]], 'layers_pos': np.array([-3])},
'hor1_05': {'layer_n': 1, 'rho_values': [[1, 5], [2, 25]], 'layers_pos': np.array([-3])},
'hor1_06': {'layer_n': 1, 'rho_values': [[1, 2], [2, 10]], 'layers_pos': np.array([-4])},
'hor1_07': {'layer_n': 1, 'rho_values': [[1, 10], [2, 20]], 'layers_pos': np.array([-6])},
'hor1_08': {'layer_n': 1, 'rho_values': [[1, 5], [2, 25]], 'layers_pos': np.array([-3])},
'hor1_09': {'layer_n': 1, 'rho_values': [[1, 3], [2, 25]], 'layers_pos': np.array([-3])},
'hor1_10': {'layer_n': 1, 'rho_values': [[1, 5], [2, 25]], 'layers_pos': np.array([-7])},
'hor1_11': {'layer_n': 1, 'rho_values': [[1, 10], [2, 12]], 'layers_pos': np.array([-4])},
'hor1_12': {'layer_n': 1, 'rho_values': [[1, 15], [2, 50]], 'layers_pos': np.array([-5])},
'hor1_14': {'layer_n': 1, 'rho_values': [[1, 5], [2, 75]], 'layers_pos': np.array([-5])},
'hor1_15': {'layer_n': 1, 'rho_values': [[1, 15], [2, 50]], 'layers_pos': np.array([-8])},
'hor1_16': {'layer_n': 1, 'rho_values': [[1, 25], [2, 50]], 'layers_pos': np.array([-5])},
'hor1_17': {'layer_n': 1, 'rho_values': [[1, 25], [2, 75]], 'layers_pos': np.array([-5])},
'hor1_18': {'layer_n': 1, 'rho_values': [[1, 5], [2, 75]], 'layers_pos': np.array([-8])},
'hor1_19': {'layer_n': 1, 'rho_values': [[1, 50], [2, 60]], 'layers_pos': np.array([-7])},
'hor1_20': {'layer_n': 1, 'rho_values': [[1, 2], [2, 10]], 'layers_pos': np.array([-15])},
test_definitions = {#'hor1_01': {'layer_n': 1, 'rho_values': [[1, 5], [2, 15]], 'layers_pos': np.array([-5])},
# 'hor1_02': {'layer_n': 1, 'rho_values': [[1, 5], [2, 50]], 'layers_pos': np.array([-5])},
# 'hor1_03': {'layer_n': 1, 'rho_values': [[1, 15], [2, 20]], 'layers_pos': np.array([-8])},
# 'hor1_04': {'layer_n': 1, 'rho_values': [[1, 5], [2, 10]], 'layers_pos': np.array([-3])},
# 'hor1_05': {'layer_n': 1, 'rho_values': [[1, 5], [2, 25]], 'layers_pos': np.array([-3])},
# 'hor1_06': {'layer_n': 1, 'rho_values': [[1, 2], [2, 10]], 'layers_pos': np.array([-4])},
# 'hor1_07': {'layer_n': 1, 'rho_values': [[1, 10], [2, 20]], 'layers_pos': np.array([-6])},
# 'hor1_08': {'layer_n': 1, 'rho_values': [[1, 5], [2, 25]], 'layers_pos': np.array([-3])},
# 'hor1_09': {'layer_n': 1, 'rho_values': [[1, 3], [2, 25]], 'layers_pos': np.array([-3])},
# 'hor1_10': {'layer_n': 1, 'rho_values': [[1, 5], [2, 25]], 'layers_pos': np.array([-7])},
# 'hor1_11': {'layer_n': 1, 'rho_values': [[1, 10], [2, 12]], 'layers_pos': np.array([-4])},
# 'hor1_12': {'layer_n': 1, 'rho_values': [[1, 15], [2, 50]], 'layers_pos': np.array([-5])},
# 'hor1_14': {'layer_n': 1, 'rho_values': [[1, 5], [2, 75]], 'layers_pos': np.array([-5])},
# 'hor1_15': {'layer_n': 1, 'rho_values': [[1, 15], [2, 50]], 'layers_pos': np.array([-8])},
# 'hor1_16': {'layer_n': 1, 'rho_values': [[1, 25], [2, 50]], 'layers_pos': np.array([-5])},
# 'hor1_17': {'layer_n': 1, 'rho_values': [[1, 25], [2, 75]], 'layers_pos': np.array([-5])},
# 'hor1_18': {'layer_n': 1, 'rho_values': [[1, 5], [2, 75]], 'layers_pos': np.array([-8])},
# 'hor1_19': {'layer_n': 1, 'rho_values': [[1, 50], [2, 60]], 'layers_pos': np.array([-7])},
# 'hor1_20': {'layer_n': 1, 'rho_values': [[1, 2], [2, 10]], 'layers_pos': np.array([-15])},
# Tests with two layers
'hor2_01': {'layer_n': 2, 'rho_values': [[1, 3], [2, 5], [3, 15]],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment