Commit eb920bfa authored by Feliks Kiszkurno's avatar Feliks Kiszkurno
Browse files

tried DNN

parent a75b6bd1
,,,
NAME,LAYER_N,RHO,POS
Hor1_01,1,5,0
Hor1_01,1,15,-1
Hor1_01,1,15,-4
Hor1_02,1,5,0
Hor1_02,1,15,-2
Hor1_02,1,15,-5
Hor1_03,1,5,0
Hor1_03,1,15,-3
Hor1_03,1,15,-6
Hor1_04,1,5,0
Hor1_04,1,15,-4
Hor1_04,1,15,-7
Hor1_05,1,5,0
Hor1_05,1,15,-5
Hor1_05,1,15,-8
Hor1_06,1,5,0
Hor1_06,1,15,-6
Hor1_06,1,15,-9
Hor1_07,1,5,0
Hor1_07,1,15,-7
Hor1_07,1,15,-10
Hor1_08,1,5,0
Hor1_08,1,15,-8
Hor1_08,1,15,-11
Hor1_09,1,5,0
Hor1_09,1,15,-9
Hor1_09,1,15,-12
Hor1_10,1,5,0
Hor1_10,1,15,-10
Hor1_10,1,15,-13
......@@ -19,8 +19,8 @@ import test_definitions
settings.init()
#test_definitions.init()
# Config
create_new_data = True # set to True if you need to reassign the classes
create_new_data_only = True # set to False in order to run ML classifications
create_new_data = False # set to True if you need to reassign the classes
create_new_data_only = False # set to False in order to run ML classifications
reassign_classes = False; class_type = 'norm'
# Load existing data instead of creating new one.
......@@ -58,7 +58,7 @@ else:
# min_depth, max_depth)
#tests_parameters = test_definitions.test_definitions
tests_parameters = slostabcreatedata.read_test_parameters(os.path.abspath(os.path.join(os.getcwd()) + '/' + 'TestDefinitions/hor_1layer_varying_depth.csv'))
tests_parameters = slostabcreatedata.read_test_parameters(os.path.abspath(os.path.join(os.getcwd()) + '/' + 'TestDefinitions/big_list_overnight_part2.csv'))
test_definitions.init(tests_parameters)
# tests_parameters = {'hor_11': {'layer_n': 1, 'rho_values': [[1, 10], [2, 12]], 'layers_pos': np.array([-4])}}
......@@ -83,7 +83,7 @@ if not create_new_data_only:
print('Running ML stuff...')
tests_parameters = slostabcreatedata.read_test_parameters(
os.path.abspath(os.path.join(os.getcwd()) + '/' + 'TestDefinitions/hor_1layer_varying_depth.csv'))
os.path.abspath(os.path.join(os.getcwd()) + '/' + 'TestDefinitions/hor_1layer_constant_depth_varying_contrast.csv'))
test_definitions.init(tests_parameters)
ml_results = slopestabilityML.run_all_tests(test_results)
......
This diff is collapsed.
......@@ -3,4 +3,5 @@ matplotlib~=3.3.2
pandas~=1.1.5
pygimli~=1.1.0
scikit-learn~=0.23.2
scipy~=1.5.2
\ No newline at end of file
scipy~=1.5.2
keras~=2.3.1
\ No newline at end of file
......@@ -13,7 +13,7 @@ def init():
settings = {}
settings['split_proportion'] = 0.25
settings['split_proportion'] = 0.25 # Part of available profiles that will be used for prediction
# Normalization and classes
settings['norm_class'] = True # True to use normalized classes, False to use class_ids
......
......@@ -68,5 +68,5 @@ def combine_results(ml_results):
plt.ylabel('Correct points [%]')
plt.legend(loc='lower right')
fig.tight_layout()
#fig.tight_layout()
slopestabilitytools.save_plot(fig, '', 'ML_summary_training', skip_fileformat=True)
......@@ -24,7 +24,8 @@ def plot_results(accuracy_labels, accuracy_score, clf_name):
plt.ylabel('Correct points [%]')
plt.title(clf_name_title+' accuracy score')
print('plot script is executed')
fig.tight_layout()
plt.tick_params(axis='x', which='major', labelsize=4)
#fig.tight_layout()
slopestabilitytools.save_plot(fig, clf_name, '_accuracy', subfolder='ML/')
return
......@@ -55,13 +55,13 @@ def run_all_tests(test_results):
ml_results_class['knn'] = knn_result_class
gc.collect()
print('Running ADABOOST...')
ada_result_class, ada_accuracy_score, ada_accuracy_labels, ada_accuracy_score_training, ada_accuracy_labels_training = \
slopestabilityML.ADABOOST.adaboost_run(test_results, random_seed)
ml_results['ADA'] = {'score': ada_accuracy_score, 'labels': ada_accuracy_labels,
'score_training': ada_accuracy_score_training, 'labels_training': ada_accuracy_labels_training}
ml_results_class['ada'] = ada_result_class
gc.collect()
# print('Running ADABOOST...')
# ada_result_class, ada_accuracy_score, ada_accuracy_labels, ada_accuracy_score_training, ada_accuracy_labels_training = \
# slopestabilityML.ADABOOST.adaboost_run(test_results, random_seed)
# ml_results['ADA'] = {'score': ada_accuracy_score, 'labels': ada_accuracy_labels,
# 'score_training': ada_accuracy_score_training, 'labels_training': ada_accuracy_labels_training}
# ml_results_class['ada'] = ada_result_class
# gc.collect()
# print('Running RVM...')
# rvm_accuracy_score, rvm_accuracy_labels, rvm_accuracy_score_training, rvm_accuracy_labels_training = \
......
......@@ -75,7 +75,7 @@ def create_data(test_name, test_config, max_depth):
# RUN INVERSION #
k0 = pg.physics.ert.createGeometricFactors(data)
model_inverted = ert_manager.invert(data=data, lam=20, paraDX=0.25, paraMaxCellSize=2, # paraDepth=2 * max_depth,
model_inverted = ert_manager.invert(data=data, lam=20, paraDX=0.25, paraMaxCellSize=2, zWeight=0.9, # paraDepth=2 * max_depth,
quality=34, zPower=0.4)
result_full = ert_manager.inv.model
......
......@@ -13,7 +13,7 @@ import os.path
def init(test_def):
global test_definitions
test_definitions = slostabcreatedata.read_test_parameters(os.path.abspath(os.path.join(os.getcwd()) + '/' + 'TestDefinitions/hor_1layer_varying_depth.csv'))
test_definitions = slostabcreatedata.read_test_parameters(os.path.abspath(os.path.join(os.getcwd()) + '/' + 'TestDefinitions/big_list_overnight_part2.csv'))
# test_definitions = {'hor1_01': {'layer_n': 1, 'rho_values': [[1, 5], [2, 15]], 'layers_pos': np.array([-5])},
# 'hor1_02': {'layer_n': 1, 'rho_values': [[1, 5], [2, 50]], 'layers_pos': np.array([-5])},
......
This diff is collapsed.
import slopestabilitytools
import slopestabilityML
import pandas as pd
import settings
settings.init()
# Import necessary modules
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from math import sqrt
# Keras specific
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
test_results = slopestabilitytools.datamanagement.import_tests()
is_success = slopestabilitytools.folder_structure.create_folder_structure()
test_results_combined = pd.DataFrame()
test_training, test_prediction = slopestabilityML.split_dataset(test_results.keys(), 999)
for name in test_training:
test_results_combined = test_results_combined.append(test_results[name])
test_results_combined = test_results_combined.reset_index()
test_results_combined = test_results_combined.drop(['index'], axis='columns')
x_train, y_train = slopestabilityML.preprocess_data(test_results_combined)
model = Sequential()
model.add(Dense(500, activation='relu', input_dim=8))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(2, activation='softmax'))
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=20)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment