Commit 660c9d2a authored by Alexander Henkel's avatar Alexander Henkel
Browse files

bugfixing

parent a4241dd1
# Created by .ignore support plugin (hsz.mobi)
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# dotenv
.env
# virtualenv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
### VirtualEnv template
# Virtualenv
# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
[Bb]in
[Ii]nclude
[Ll]ib
[Ll]ib64
[Ll]ocal
[Ss]cripts
pyvenv.cfg
.venv
pip-selfcheck.json
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# AWS User-specific
.idea/**/aws.xml
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# SonarLint plugin
.idea/sonarlint/
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
# idea folder, uncomment if you don't need it
# .idea
*.bundle.*
lib/
node_modules/
*.egg-info/
.ipynb_checkpoints
*.tsbuildinfo
# Created by https://www.gitignore.io/api/python
# Edit at https://www.gitignore.io/?templates=python
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# OS X stuff
*.DS_Store
# End of https://www.gitignore.io/api/python
_temp_extension
junit.xml
[uU]ntitled*
notebook/static/*
!notebook/static/favicons
notebook/labextension
notebook/schemas
docs/source/changelog.md
docs/source/contributing.md
# playwright
ui-tests/test-results
ui-tests/playwright-report
# VSCode
.vscode
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -424,6 +424,9 @@ class SyntheticDataset(Dataset):
return ax
def randomize_evaluation(self, evaluation_reliability_no=1, evaluation_reliability_yes=1):
super().randomize_evaluation(evaluation_reliability_no, evaluation_reliability_yes)
# self.set_indicators(self.indicators)
class ManualDataset(Dataset):
......
......@@ -79,7 +79,6 @@ class SensorRecorderDataReader:
evaluation_indices[i, 0] = np.argmax(processor[RecordingEntry.ACCELERATION][:, 0] >= ts)
evaluation_indices[i, 1] = evaluations[i, 1]
evaluation_indices[:, 0] = np.floor(evaluation_indices[:, 0] / self.window_shift)
indicators = (manual_hw_indices, evaluation_indices)
return indicators
......
......@@ -3,6 +3,7 @@ import copy
from typing import List
import numpy as np
from personalization_tools.globals import Indicators
from personalization_tools.personalizer import Personalizer
from personalization_tools.dataset_manager import DatasetManager
from personalization_tools.helpers import Evaluation, generate_predictions
......@@ -96,7 +97,6 @@ observed_models = []
model_predictions = dict()
global target_pseudo_model_settings
target_pseudo_model_settings = list(pseudo_model_settings.items())
def gen_name(base_name, additional_info):
......@@ -130,6 +130,9 @@ def calc_relative_training_size():
overall_size /= len(collection)
null_size /= len(collection)
hw_size /= len(collection)
# print({'hw_regions': hw_regions, 'null_regions': null_regions, 'neut_regions': neut_regions})
return {'overall_size': overall_size, 'null_size': null_size, 'hw_size': hw_size, 'hw_regions': hw_regions,
'null_regions': null_regions, 'neut_regions': neut_regions}
......@@ -175,8 +178,8 @@ def train_model(model_name):
if not skip_model and (not skip_existing or model_name not in trainings_manager.database['training_runs'][
training_run_name] or force_model):
print('train:', model_name)
# personalizer.incremental_learn_series_pseudo(collection,
# save_model_as=models_directory + model_name, epochs=100)
personalizer.incremental_learn_series_pseudo(collection,
save_model_as=models_directory + model_name, epochs=100)
if model_name not in trainings_manager.database['training_runs'][training_run_name]:
trainings_manager.database['training_runs'][training_run_name].append(model_name)
else:
......@@ -236,13 +239,13 @@ def start_training():
if 'random' in training_run_name:
target_randoms = list()
global target_pseudo_model_settings
# target_pseudo_model_selection = ['allnoise_correctedhwgt', 'allnoise_correctedscore',
# 'allnoise_correctbydeepconvfilter', 'allnoise_correctbyfcndaefilter',
# 'allnoise_correctbyconvlstmfilter', 'allnoise_correctbyconvlstm2filter',
# 'allnoise_correctbyconvlstm3filter', 'alldeepconv_correctbyconvlstm3filter',
# 'alldeepconv_correctbyconvlstm2filter6', 'alldeepconv_correctbyconvlstm3filter6']
target_pseudo_model_selection = ['allnoise_correctedhwgt', 'allnoise_correctedscore',
'allnoise_correctbydeepconvfilter', 'allnoise_correctbyfcndaefilter',
'allnoise_correctbyconvlstmfilter', 'allnoise_correctbyconvlstm2filter',
'allnoise_correctbyconvlstm3filter', 'alldeepconv_correctbyconvlstm3filter',
'alldeepconv_correctbyconvlstm2filter6', 'alldeepconv_correctbyconvlstm3filter6']
target_pseudo_model_selection = ['alldeepconv_correctbyconvlstm3filter6']
# target_pseudo_model_selection = ['allnoise_correctbyconvlstm2filter', 'alldeepconv_correctbyconvlstm3filter6']
target_pseudo_model_settings = [(key, value) for key, value in pseudo_model_settings.items() if
key in target_pseudo_model_selection]
......@@ -257,7 +260,7 @@ def start_training():
for target_random in target_randoms:
info = {'random_no': target_random[0], 'random_yes': target_random[1]}
print('random target:', target_random)
#print('random target:', target_random)
for dataset in collection:
if isinstance(dataset, SyntheticDataset):
......@@ -281,11 +284,12 @@ def start_training():
# evaluation_reliability_no=target_random[0],
# evaluation_reliability_yes=target_random[1],
# clear_just_covered=False)
print(dataset.indicators[1])
hw_indicators_hw = dataset.get_indicators()[1][dataset.get_indicators()[1][:, 1] == Indicators.HAND_WASH]
#print(hw_indicators_hw.shape)
if dataset.name not in indicator_backups:
indicator_backups[dataset.name] = (dataset.indicators[0].copy(), dataset.indicators[1].copy())
indicator_backups[dataset.name] = (dataset.get_indicators()[0].copy(), dataset.get_indicators()[1].copy())
else:
dataset.indicators = (indicator_backups[dataset.name][0].copy(), indicator_backups[dataset.name][1].copy())
dataset.set_indicators((indicator_backups[dataset.name][0].copy(), indicator_backups[dataset.name][1].copy()))
dataset.randomize_evaluation(evaluation_reliability_no=target_random[0],
evaluation_reliability_yes=target_random[1])
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment