To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit 1d056330 authored by felikskiszkurno's avatar felikskiszkurno
Browse files

SVM test should run.

Added documentation files.
Start of work on reading the ERT data from csv files.
parent 0f71a3ca
......@@ -23,7 +23,7 @@ import pygimli.physics.ert as ert
is_success = slopestabilitytools.folder_structure.create_folder_structure()
# Settings
number_of_tests = 2
number_of_tests = 10
rho_spread_factor = 1.5
rho_max = 150
layers_min = 1
......@@ -50,4 +50,4 @@ for test_name in tests_horizontal.keys():
slopestabilitytools.plot_and_save(test_name, test_results[test_name], 'Test: '+test_name)
slopestabilityML.svm_run(test_results)
svm_accuracy_score, svm_accuracy_labels = slopestabilityML.svm_run(test_results)
numpy~=1.18.5
matplotlib~=3.3.2
pandas~=1.1.5
pygimli~=1.1.0
\ No newline at end of file
pygimli~=1.1.0
scikit-learn~=0.23.2
\ No newline at end of file
......@@ -10,11 +10,17 @@ from sklearn import svm
import slopestabilitytools
import random
import math
import numpy as np
import slopestabilityML.plot_results
# TODO: for comparability with other ML methods, add option to define which test should be used for training externaly
def svm_run(test_results):
# https://stackabuse.com/implementing-svm-and-kernel-svm-with-pythons-scikit-learn/
accuracy_score = []
accuracy_labels = []
test_number = len(test_results.keys())
test_prediction = random.choices(list(test_results.keys()),
k=math.ceil(test_number * 0.1))
......@@ -22,17 +28,33 @@ def svm_run(test_results):
test_training = slopestabilitytools.set_diff(list(test_results.keys()), set(test_prediction))
# Create classifier
clf = svm.SVC(gamma=0.001, C=100)
clf = svm.SVC(gamma=0.001, C=100, kernel='linear')
# Train classifier
for test_name in test_training:
# Prepare data
data_set = test_results[test_name]
X = data_set.drop('Z', '')
x_train = data_set.drop('Z', 'INM', 'CLASS')
y_train = data_set['CLASS']
# Train classifier
clf.fit(x_train, y_train)
# Predict with classfier
for test_name_pred in test_prediction:
# Prepare data
data_set_pred = test_prediction[test_name_pred]
x_question = data_set_pred.drop('Z', 'INM', 'CLASS')
y_answer = data_set_pred['CLASS']
y_pred = clf.predict(x_question)
# Evaluate result
accuracy_score.append(len(np.where(y_pred == y_answer)) / len(y_answer) * 100)
accuracy_labels.append(test_name_pred)
# Plot
slopestabilityML.plot_results(accuracy_labels, accuracy_score)
return
return accuracy_score, accuracy_labels
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on
Created on 17.01.2021
@author: Feliks Kiszkurno
"""
import matplotlib.pyplot as plt
def plot_results():
def plot_results(accuracy_labels, accuracy_score):
plt.figure()
plt.scatter(accuracy_labels, accuracy_score)
plt.ylabel('Test name')
plt.xlabel('Correct points [%]')
plt.title('SVM classification accuracy')
plt.savefig('results/figures/SVM.eps')
plt.savefig('results/figures/SVM.pdf')
plt.savefig('results/figures/SVM.png')
return
......@@ -5,3 +5,8 @@ Created on
@author:
"""
from .test_list import test_list
from .read_to_pandas import read_to_pandas
from .write import write
from .import_tests import import_tests
\ No newline at end of file
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 17.01.2021
@author: Feliks Kiszkurno
"""
def import_tests():
return
......@@ -6,4 +6,6 @@ Created on
@author:
"""
def
\ No newline at end of file
def read_to_pandas():
return
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 17.01.2021
@author: Feliks Kiszkurno
"""
import os
def test_list(extension):
path = 'results/results'
file_list = os.listdir(path)
test_names = []
for file in file_list:
test_names.append(file[:file.find(extension)])
test_names = test_names.sort()
return test_names
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on
Created on 17.01.2021
@author:
@author: Feliks Kiszkurno
"""
def read_to_pandas():
return
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment