Commit ed983480 authored by Alexander Henkel's avatar Alexander Henkel
Browse files

experiement stuff

parent df6e38a8
This diff is collapsed.
%% Cell type:code id: tags:
%% Cell type:code id:dc2c77ba tags:
 
``` python
%load_ext autoreload
%autoreload 2
 
%matplotlib notebook
```
 
%% Cell type:code id: tags:
%% Cell type:code id:f21ac9e3 tags:
 
``` python
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, Markdown
import copy
import dbm
```
 
%% Cell type:code id: tags:
%% Cell type:code id:4f6cc421 tags:
 
``` python
module_path = os.path.abspath(os.path.join('..'))
os.chdir(module_path)
if module_path not in sys.path:
sys.path.append(module_path)
```
 
%% Cell type:code id: tags:
%% Cell type:code id:a8b45dae tags:
 
``` python
from personalization_tools.load_data_sets import *
from personalization_tools.helpers import *
from personalization_tools.learner_pipeline import LearnerPipeline, Evaluation
from personalization_tools.dataset_builder import *
from personalization_tools.model_evaluation import ModelEvaluation
from personalization_tools.sensor_recorder_data_reader import SensorRecorderDataReader
from personalization_tools.dataset_manager import DatasetManager
from personalization_tools.trainings_manager import TrainingsManager
from personalization_tools.pseudo_model_settings import pseudo_model_settings, pseudo_label_generators_to_int
from personalization_tools.pseudo_label_evaluation import *
```
 
%% Cell type:code id: tags:
%% Cell type:code id:12ee6c24 tags:
 
``` python
predictions_db = './data/cluster/personalization_evolution/evolution_predictions_db'
training_db = './data/cluster/personalization_evolution/evolution_training_db'
models_directory = './data/cluster/personalization_evolution/evolution_models/'
synthetic_dataset_db = './data/synthetic_dataset_db'
recorded_dataset_db = './data/recorded_dataset_db'
configuration_file = './data/cluster/personalization_evolution/personalization_evolution_config.yaml'
base_model_prefix = './data/'
```
 
%% Cell type:code id: tags:
%% Cell type:code id:aeefd6a9 tags:
 
``` python
predictions_db = './data/cluster/personalization_evolution_2/evolution_predictions_db'
training_db = './data/cluster/personalization_evolution_2/evolution_training_db'
models_directory = './data/cluster/personalization_evolution_2/evolution_models/'
synthetic_dataset_db = './data/synthetic_dataset_db'
recorded_dataset_db = './data/recorded_dataset_db_2'
configuration_file = './data/cluster/personalization_evolution_2/personalization_evolution_config.yaml'
base_model_prefix = './data/'
```
 
%% Cell type:code id: tags:
%% Cell type:code id:42e67d18 tags:
 
``` python
model_evaluation = ModelEvaluation()
model_evaluation.load_predictions(predictions_db)
training_manager = TrainingsManager(training_db)
 
synthetic_dataset_manager = DatasetManager(synthetic_dataset_db)
recorded_dataset_manager = DatasetManager(recorded_dataset_db)
training_config = dict()
with open(configuration_file, 'r') as config_file:
training_config = yaml.safe_load(config_file)
 
all_models = [pred[0] for pred in model_evaluation.predictions.keys()]
```
 
%% Cell type:code id: tags:
%% Cell type:code id:e6269abd tags:
 
``` python
model_evaluation.get_evaluations()
```
 
%% Output
 
<pandas.io.formats.style.Styler at 0x7f88203f1250>
 
%% Cell type:code id: tags:
%% Cell type:code id:ab4ce094 tags:
 
``` python
def get_test_collection_of_config(collection_config):
if training_config['collection_configs']['synthetic_collections'] is not None:
for col_conf in training_config['collection_configs']['synthetic_collections'].values():
if col_conf['name'] == collection_config['name']:
return list(synthetic_dataset_manager.filter_by_category(collection_config['test_collection_name']).values())
return list(recorded_dataset_manager.filter_by_category(collection_config['test_collection_name']).values())
 
def get_evaluation_models_of_config(collection_config, generators, epochs):
return get_models_with_infos({'collection_name': collection_config['name'], 'generators': generators, 'epochs': epochs}, training_manager.get_all_information(), all_models, allow_missing=False)
 
def plot_evolution(collection_conf, target_value, pseudo_label_conf, epochs, ax=None):
test_collection = get_test_collection_of_config(collection_conf)
# print(test_collection)
collection_models = get_evaluation_models_of_config(collection_conf,pseudo_label_generators_to_int(pseudo_model_settings[pseudo_label_conf]), epochs)
# print(collection_models)
averages = calc_average_pseudo_model_evaluation(model_evaluation.predictions, target_value, collection_models, [str(ds) for ds in test_collection])
iterative_averages = dict()
single_averages = dict()
for key, value in averages.items():
if training_manager.get_all_information()[key]['iteration'] == 'single':
single_averages[key] = value
else:
iterative_averages[key] = value
 
# print(averages)
 
iterative_averages_keys = list(iterative_averages.keys())
iterative_averages_keys = sorted(iterative_averages_keys, key=lambda model_name: training_manager.get_all_information()[model_name]['iteration'])
# print(iterative_averages_keys)
 
single_averages_keys = list(single_averages.keys())
single_averages_keys = sorted(single_averages_keys, key=lambda model_name: training_manager.get_all_information()[model_name]['based_on_iteration'])
# print(single_averages_keys)
 
learning_rate = np.array([iterative_averages[key] for key in iterative_averages_keys])
learning_rate -= np.array([single_averages[key] for key in single_averages_keys])
 
base_averages = calc_average_pseudo_model_evaluation(model_evaluation.predictions, target_value, [collection_conf['base_model'], collection_conf['base_inc_model']], [str(ds) for ds in test_collection])
# print('base averages:', base_averages)
 
if ax is None:
fig, ax = plt.subplots(figsize=(9,6))
ax.set_title(collection_conf['name'] + ' epochs: ' + str(epochs))
ax.set_xlabel('iteration step')
ax.set_ylabel(target_value)
 
ax.axhline(base_averages[collection_conf['base_model']], linestyle=':', label='base', color='red')
ax.axhline(base_averages[collection_conf['base_inc_model']], linestyle=':', label='inc', color='green')
ax.plot(np.arange(len(iterative_averages_keys)), [iterative_averages[key] for key in iterative_averages_keys], label='iterative')
ax.plot(np.arange(len(single_averages_keys)), [single_averages[key] for key in single_averages_keys], label='single')
#ax.plot(np.arange(learning_rate.shape[0]), learning_rate, label='learning rate')
 
# ax.tick_params(labelrotation=75)
plt.subplots_adjust(hspace=0.3)
plt.subplots_adjust(bottom=0.1)
if 'fig' in locals():
fig.legend()
fig.show()
else:
ax.legend()
 
def plot_evolution_over_epochs(collection_conf, target_value, pseudo_label_conf):
epochs = (25, 50, 100, 150)
fig, axes = plt.subplots(len(epochs), figsize=(9, 6*len(epochs)))
fig.suptitle(pseudo_label_conf, fontsize=16)
for i, epoch in enumerate(epochs):
plot_evolution(collection_conf, target_value, pseudo_label_conf, epoch, axes[i])
# fig.legend()
fig.show()
 
def filter_group_plot(config, collection_group, collection_name, target_value, filters):
for filter_config in filters:
plot_evolution_over_epochs(config['collection_configs'][collection_group][collection_name], target_value, filter_config)
 
def compare_collections(config, collection_1, collection_2, target_value, pseudo_label_conf):
epochs = (25, 50, 100)
fig, axes = plt.subplots(len(epochs)*2, figsize=(9, 6*len(epochs)*2))
fig.suptitle(pseudo_label_conf, fontsize=16)
for i, epoch in enumerate(epochs):
plot_evolution(config['collection_configs'][collection_1[0]][collection_1[1]], target_value, pseudo_label_conf, epoch, axes[i*2])
plot_evolution(config['collection_configs'][collection_2[0]][collection_2[1]], target_value, pseudo_label_conf, epoch, axes[i*2+1])
```
 
%% Cell type:code id: tags:
%% Cell type:code id:4d4844f4 tags:
 
``` python
#filter_group_plot(training_config, 'synthetic_collections', 1, 's1', ['allnoise_correctedscore', 'allnoise_correctbyconvlstm3filter', 'alldeepconv_correctbyconvlstm3filter6'])
#filter_group_plot(training_config, 'synthetic_collections', '01_regularized', 's1', ['allnoise_correctedscore', 'allnoise_correctbyconvlstm3filter', 'alldeepconv_correctbyconvlstm3filter6'])
#plot_evolution_over_epochs(training_config['collection_configs']['synthetic_collections']['01_regularized'], 's1', 'allnoise_correctedscore')
#plot_evolution_over_epochs(training_config['collection_configs']['synthetic_collections']['01_regularized'], 's1', 'allnoise_correctbyconvlstm3filter')
#plot_evolution_over_epochs(training_config['collection_configs']['synthetic_collections']['01_regularized'], 's1', 'alldeepconv_correctbyconvlstm3filter6')
 
#plot_evolution_over_epochs(training_config['collection_configs']['synthetic_collections'][10], 'f1', 'allnoise_correctedscore')
#plot_evolution_over_epochs(training_config['collection_configs']['synthetic_collections']['10_regularized_regularized'], 'f1', 'allnoise_correctedscore')
#plot_evolution_over_epochs(training_config['collection_configs']['synthetic_collections'][10], 'f1', 'alldeepconv_correctbyconvlstm3filter6')
#plot_evolution_over_epochs(training_config['collection_configs']['synthetic_collections']['10_swapped'], 'f1', 'allnoise_correctedscore')
#plot_evolution_over_epochs(training_config['collection_configs']['synthetic_collections']['10_swapped'], 'f1', 'alldeepconv_correctbyconvlstm3filter6')
 
#plot_evolution_over_epochs(training_config['collection_configs']['recorded_collections'][1], 'f1', 'allnoise_correctedscore')
#plot_evolution_over_epochs(training_config['collection_configs']['recorded_collections'][1], 'f1', 'allnoise_correctbyconvlstm3filter')
#plot_evolution_over_epochs(training_config['collection_configs']['recorded_collections'][1], 'f1', 'alldeepconv_correctbyconvlstm3filter6')
 
#plot_evolution(training_config['collection_configs']['synthetic_collections'][1], 'f1', 'allnoise_correctedscore', 50)
#plot_evolution(training_config['collection_configs']['synthetic_collections']['01_regularized'], 'f1', 'allnoise_correctedhwgt', 100)
 
 
#compare_collections(training_config, ['synthetic_collections', 1], ['synthetic_collections', '01_regularized'], 'f1', 'alldeepconv_correctbyconvlstm3filter6')
# mpare_collections(training_config, ['synthetic_collections', 2], ['synthetic_collections', '02_regularized'], 'f1', 'alldeepconv_correctbyconvlstm3filter6')
 
# mpare_collections(training_config, ['recorded_collections', 1], ['recorded_collections', '01_regularized'], 'f1', 'alldeepconv_correctbyconvlstm3filter6')
compare_collections(training_config, ['recorded_collections', 2], ['recorded_collections', '02_regularized'], 'f1', 'alldeepconv_correctbyconvlstm3filter6')
 
#compare_collections(training_config, ['recorded_collections', 1], ['recorded_collections', '01_regularized'], 'specificity', 'alldeepconv_correctbyconvlstm3filter6')
 
```
 
%% Output
 
 
 
%% Cell type:code id: tags:
%% Cell type:code id:c4d792ed tags:
 
``` python
plot_evolution(training_config['collection_configs']['synthetic_collections'][11], 'f1', 'allnoise_correctedscore', 50)
plot_evolution(training_config['collection_configs']['synthetic_collections']['11_regularized'], 'f1', 'allnoise_correctedscore', 50)
```
 
%% Output
 
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-10-255874a4d5f2> in <module>
----> 1 plot_evolution(training_config['collection_configs']['synthetic_collections'][11], 'f1', 'allnoise_correctedscore', 50)
2 plot_evolution(training_config['collection_configs']['synthetic_collections']['11_regularized'], 'f1', 'allnoise_correctedscore', 50)
KeyError: 11
 
%% Cell type:code id: tags:
%% Cell type:code id:c1430ef2 tags:
 
``` python
plot_evolution(training_config['collection_configs']['synthetic_collections'][11], 'f1', 'allnoise_correctedscore', 50)
plot_evolution(training_config['collection_configs']['synthetic_collections']['11_regularized'], 'f1', 'allnoise_correctedscore', 50)
```
 
%% Cell type:code id: tags:
%% Cell type:code id:f06a47d1 tags:
 
``` python
plot_evolution(training_config['collection_configs']['synthetic_collections'][11], 'f1', 'allnoise_correctedscore', 50)
plot_evolution(training_config['collection_configs']['synthetic_collections']['11_regularized'], 'f1', 'allnoise_correctedscore', 50)
```
 
%% Cell type:code id: tags:
%% Cell type:code id:babebc45 tags:
 
``` python
```
This diff is collapsed.
%% Cell type:code id:b9f62c58 tags:
``` python
%load_ext autoreload
%autoreload 2
%matplotlib notebook
```
%% Cell type:code id:621e3e7f tags:
``` python
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, Markdown
```
%% Cell type:code id:0dab0b49 tags:
``` python
module_path = os.path.abspath(os.path.join('..'))
os.chdir(module_path)
if module_path not in sys.path:
sys.path.append(module_path)
```
%% Cell type:code id:16e7ff01 tags:
``` python
from personalization_tools.load_data_sets import *
from personalization_tools.helpers import *
from personalization_tools.learner_pipeline import LearnerPipeline
from personalization_tools.dataset_builder import *
from personalization_tools.personalizer import Personalizer
from personalization_tools.dataset_manager import DatasetManager
from personalization_tools.trainings_manager import TrainingsManager
from personalization_tools.sensor_recorder_data_reader import SensorRecorderDataReader
from personalization_tools.pseudo_model_settings import pseudo_model_settings
```
%% Cell type:code id:6f7b32bb tags:
``` python
WINDOW_LENGTH = 150
WINDOW_SHIFT = 75
inc_model_name = 'recorded_inc_01.pt'
training_run_name = '01_training'
inc_model_name = 'recorded_inc_02.pt'
training_run_name = '02_training'
base_model_name = './data/DeepConvLSTMA_statedict.pt'
```
%% Cell type:code id:3f93660a tags:
``` python
personalizer = Personalizer()
personalizer.initialize(base_model_name)
```
%% Cell type:code id:7e0d7f13 tags:
``` python
trainings_manager = TrainingsManager('./data/cluster/recorded_pseudo_training_db_2')
```
%% Cell type:code id:f0a6fd1b tags:
``` python
dataset_manager = DatasetManager('./data/recorded_dataset_db')
```
%% Cell type:code id:608252a0 tags:
``` python
record_reader = SensorRecorderDataReader('/home/alex/gitrepos/uni/MasterProject/Webserver/uploads/recordings')
```
%% Cell type:code id:7090d170 tags:
``` python
series = dataset_manager.filter_by_category(training_run_name).values()
```
%% Cell type:code id:cf454d2f tags:
``` python
series = record_reader.get_collection(['e5fbd0a6-11c9-409b-a2ba-0d392fdb0af6', '2252f3d0-cd90-42b4-9b4e-4172ed5fa847', '88134a7c-00f9-4b82-b579-95ef47cb8a20', '9c1913f3-e44d-40a0-a89a-feb2d3692494'])
```
%% Output
Load: e5fbd0a6-11c9-409b-a2ba-0d392fdb0af6
Load: 2252f3d0-cd90-42b4-9b4e-4172ed5fa847
Load: 88134a7c-00f9-4b82-b579-95ef47cb8a20
Load: 9c1913f3-e44d-40a0-a89a-feb2d3692494
%% Cell type:code id:ecbb38ae tags:
``` python
personalizer.plot_series(series)
```
%% Output
%% Cell type:code id:1f6f9f91 tags:
``` python
personalizer.incremental_learn_series_gt(series, save_model_as='./data/' + inc_model_name, epochs=100)
#trainings_manager.database['training_runs'][training_run_name] = []
#trainings_manager.database['training_runs'][training_run_name].append(inc_model_name)
# trainings_manager.db_update()
```
%% Output
Load base model: ./data/DeepConvLSTMA_statedict.pt
Train on: e5fbd0a6-11c9-409b-a2ba-0d392fdb0af6
tensor([ 0.5024, 102.6791])
Starting Training cuda
loss before training: 0.09575944393873215
F1-Score Val: 0.35294117647058826
spec Val 0.9641693811074918
F1-Score Val: 0.2857142857142857
spec Val 0.9838187702265372
sens Val 1.0
Train on: 2252f3d0-cd90-42b4-9b4e-4172ed5fa847
tensor([ 0.5021, 121.3232])
Train on: daf902bc-01a6-43c5-a04e-f784c8f13c49
tensor([ 0.5029, 87.6667])
Starting Training cuda
loss before training: 0.6541967988014221
F1-Score Val: 0.21052631578947367
spec Val 0.9663677130044843
sens Val 1.0
loss before training: 0.05737384781241417
stopped early! 55 0.06454429057107043
F1-Score Val: 0.0
spec Val 0
sens Val 0
Train on: 88134a7c-00f9-4b82-b579-95ef47cb8a20
tensor([ 0.5031, 81.8682])
Train on: 28fd37cc-c752-45ec-8adb-f6c008f72754
tensor([ 0.5012, 204.2273])
Starting Training cuda
loss before training: 0.1365356147289276
F1-Score Val: 0.2666666666666667
spec Val 0.959409594095941
sens Val 1.0
loss before training: 0.08396054059267044
F1-Score Val: 0.0
spec Val 0
sens Val 0
Train on: 9c1913f3-e44d-40a0-a89a-feb2d3692494
tensor([0.5336, 7.9348])
Train on: 8af26275-c037-4291-ab79-dcb050a4686d
tensor([ 0.5015, 161.8913])
Starting Training cuda
loss before training: 1.8791489601135254
loss before training: 0.04854266345500946
F1-Score Val: 0.0
spec Val 0.9375
sens Val 0.0
spec Val 0
sens Val 0
Train on: 2252f3d0-cd90-42b4-9b4e-4172ed5fa847
tensor([ 0.5021, 121.3232])
Starting Training cuda
loss before training: 0.06260821223258972
F1-Score Val: 0.2857142857142857
spec Val 0.9888143176733781
sens Val 1.0
%% Cell type:code id:56893cf5 tags:
``` python
del trainings_manager.database['training_runs'][training_run_name]
trainings_manager.db_update()
```
%% Cell type:code id:d7464036 tags:
``` python
personalizer.compare_model_evaluation_of_recordings(inc_model_name)
display(Markdown(personalizer.render_evaluation()))
```
%% Output
| name | specificity | sensitivity | f1 |
|:---- | ----------- | ----------- | ---------- |
| data: 34a0c627-45a9-4b96-9900-497907da94b1 model: DeepConvLSTMA_statedict | 0.913863 | 0.866667 | 0.063363 |
| data: 34a0c627-45a9-4b96-9900-497907da94b1 model: recorded_inc_alex_tmp | 0.978447 | 1.000000 | 0.238727 |
| data: b7584e6d-e21d-45cd-b1f7-3bac7b4c2612 model: DeepConvLSTMA_statedict | 0.890805 | 0.933333 | 0.486957 |
| data: b7584e6d-e21d-45cd-b1f7-3bac7b4c2612 model: recorded_inc_alex_tmp | 0.936782 | 0.966667 | 0.630435 |
| data: d729fbf5-48ed-4851-a5ea-eb41d2b1f688 model: DeepConvLSTMA_statedict | 0.947441 | 0.578947 | 0.037898 |
| data: d729fbf5-48ed-4851-a5ea-eb41d2b1f688 model: recorded_inc_alex_tmp | 0.989736 | 0.868421 | 0.230769 |
%% Cell type:code id:608d6b70 tags:
``` python
ax = personalizer.learner_pipeline.plot_dataset()
personalizer.learner_pipeline.plot_prediction(axes=ax)
```
%% Output
[[9.8303926e-01 1.6960735e-02]
[6.2543350e-01 3.7456647e-01]
[9.9613547e-01 3.8645437e-03]
...
[9.9962878e-01 3.7124348e-04]
[9.9982846e-01 1.7155796e-04]
[9.9764913e-01 2.3508898e-03]]
<AxesSubplot:>
%% Cell type:code id:9193d12e tags:
``` python
predictions = generate_predictions(series, base_model_name)
```
%% Output
run prediction on [cuda]: e5fbd0a6-11c9-409b-a2ba-0d392fdb0af6...0.6196446418762207 seconds
run prediction on [cuda]: 2252f3d0-cd90-42b4-9b4e-4172ed5fa847...0.8462226390838623 seconds
run prediction on [cuda]: 88134a7c-00f9-4b82-b579-95ef47cb8a20...1.0194389820098877 seconds
run prediction on [cuda]: 9c1913f3-e44d-40a0-a89a-feb2d3692494...0.030718564987182617 seconds
%% Cell type:code id:f7096db7 tags:
``` python
pseudo_filter = 'allnoise_correctedhwgt'
for dataset in series:
dataset.generate_feedback_areas(prediction=predictions[dataset.name])
dataset.apply_pseudo_label_generators(pseudo_model_settings[pseudo_filter])
```
%% Cell type:code id:d2d4574e tags:
``` python
inc_model_name = 'gt_test3.pt'
base_model_name = './data/DeepConvLSTMA_statedict.pt'
```
%% Cell type:code id:387d9e22 tags:
``` python
personalizer = Personalizer()
personalizer.initialize(base_model_name)
personalizer.incremental_learn_series_pseudo(series, save_model_as='./data/' + inc_model_name, epochs=100, use_regularization=False)
```
%% Output
Train on: e5fbd0a6-11c9-409b-a2ba-0d392fdb0af6
Starting Training cuda
loss before training: 0.09575944393873215
F1-Score Val: 0.5333333333333333
spec Val 0.9771241830065359
sens Val 1.0
Train on: 2252f3d0-cd90-42b4-9b4e-4172ed5fa847
Starting Training cuda
loss before training: 0.07606717944145203
F1-Score Val: 0.15384615384615385
spec Val 0.9753914988814317
sens Val 1.0
Train on: 88134a7c-00f9-4b82-b579-95ef47cb8a20
Starting Training cuda
loss before training: 0.025048060342669487
stopped early! 50 0.05347004958285835
F1-Score Val: 0.4
spec Val 0.961038961038961
sens Val 1.0
Train on: 9c1913f3-e44d-40a0-a89a-feb2d3692494
Starting Training cuda
loss before training: 0.5482482314109802
F1-Score Val: 0.0
spec Val 0
sens Val 0
%% Cell type:code id:6981b0c1 tags:
``` python
```
......
......@@ -15,7 +15,7 @@ def filter_possible_handwashes(dataset, prediction):
min_val = np.min(dataset.x_data[ts - 75:ts], axis=0)
max_val = np.max(dataset.x_data[ts - 75:ts], axis=0)
diff = np.absolute(max_val - min_val)
if np.any(diff > 10):
if np.any(diff > 13):
possible_spots.append(int(ts / 75))
for possible_spot in possible_spots:
for i in range(0, 5):
......@@ -41,7 +41,8 @@ def build_simulated_running_mean(dataset, model_name, kernel_size=20, kernel_thr
kernel = np.ones(kernel_size) / kernel_size
mean = np.convolve(prediction[:, 1], kernel, mode='same')
r_mean = np.empty_like(mean)
# r_mean[kernel_size:] = mean[:-kernel_size]
r_mean[kernel_size:] = mean[:-kernel_size]
#r_mean = mean
return r_mean, prediction
......@@ -58,15 +59,18 @@ def get_triggers_on_running_mean(dataset, r_mean, kernel_threshold=0.59):
correct_triggers = 0
correct_trigger_spots = []
detected_regions = []
prev_pos_trigger_spot = 0
for trigger_spot in trigger_spots[:]:
for region in dataset.feedback_areas.labeled_regions_hw:
if region[0] <= trigger_spot <= region[1] + 10:
if region in detected_regions:
if region[0] <= trigger_spot <= region[1] + 14:
if trigger_spot - prev_pos_trigger_spot < 18:
trigger_spots.remove(trigger_spot)
prev_pos_trigger_spot = trigger_spot
else:
correct_triggers += 1
correct_trigger_spots.append(trigger_spot)
detected_regions.append(region)
prev_pos_trigger_spot = trigger_spot
break
return trigger_spots, correct_triggers, correct_trigger_spots
......@@ -93,7 +97,7 @@ def calc_scores(results, allow_neutral_correct_triggers=False):
for (target_kernel_size, target_kernel_threshold), (false_diff_relative, correct_diff_relative) in results.items():
score = calc_settings_score(false_diff_relative, correct_diff_relative,
allow_neutral_correct_triggers=allow_neutral_correct_triggers)
print('ft:', false_diff_relative, 'ct:', correct_diff_relative, 'score:', score)
# print('ft:', false_diff_relative, 'ct:', correct_diff_relative, 'score:', score)
scores.append(
(score, (target_kernel_size, target_kernel_threshold, false_diff_relative, correct_diff_relative)))
if score > best_score:
......@@ -101,8 +105,9 @@ def calc_scores(results, allow_neutral_correct_triggers=False):
return scores, best_score
def calc_best_running_mean_settings(all_datasets: List[Dataset], base_model_path: str, new_model_path: str,
base_kernel_with, base_kernel_threshold, prediction_buffer=None):
print(all_datasets)
base_kernel_with, base_kernel_threshold, prediction_buffer=None, do_print=True):
if do_print:
print(all_datasets)
if prediction_buffer is None:
prediction_buffer = dict()
base_false_triggers = 0
......@@ -117,7 +122,8 @@ def calc_best_running_mean_settings(all_datasets: List[Dataset], base_model_path
base_false_triggers = 1
if base_correct_triggers == 0:
base_correct_triggers = 1
print('base ft:', base_false_triggers, 'base ct:', base_correct_triggers)
if do_print:
print('base ft:', base_false_triggers, 'base ct:', base_correct_triggers)
target_kernel_size = 20
results = dict()
......@@ -149,7 +155,8 @@ def calc_best_running_mean_settings(all_datasets: List[Dataset], base_model_path
scores, best_score = calc_scores(results)
if best_score == 0:
print('include same positive triggers')
if do_print:
print('include same positive triggers')
scores, best_score = calc_scores(results, allow_neutral_correct_triggers=True)
scores = sorted(scores, key=lambda scores: scores[0], reverse=True)
......@@ -241,12 +248,13 @@ def plot_hw_feedback_areas(dataset):
return fig
def calc_quality_difference(dataset: Dataset, base_model: str, inc_model: str, kernel_size=20, kernel_threshold=0.59):
def calc_quality_difference(dataset: Dataset, base_model: str, inc_model: str, kernel_size=20, kernel_threshold=0.59,
prediction_buffer=None):
r_mean, prediction = build_simulated_running_mean(dataset, base_model, kernel_size=20,
kernel_threshold=0.59, prediction_buffer=None)
kernel_threshold=0.59, prediction_buffer=prediction_buffer)
base_trigger_spots, base_correct_triggers, _ = get_triggers_on_running_mean(dataset, r_mean, 0.59)
r_mean, prediction = build_simulated_running_mean(dataset, inc_model, kernel_size=kernel_size,
kernel_threshold=kernel_threshold, prediction_buffer=None)
kernel_threshold=kernel_threshold, prediction_buffer=prediction_buffer)
trigger_spots, correct_triggers, _ = get_triggers_on_running_mean(dataset, r_mean, kernel_threshold=kernel_threshold)
return len(trigger_spots) - len(base_trigger_spots), correct_triggers - base_correct_triggers<