@article{wagner_kavagait_2018, title = {{KAVAGait}: {Knowledge}-{Assisted} {Visual} {Analytics} for {Clinical} {Gait} {Analysis}}, volume = {25}, url = {https://doi.org/10.1109/TVCG.2017.2785271}, doi = {10/ghppzn}, abstract = {In 2014, more than 10 million people in the US were affected by an ambulatory disability. Thus, gait rehabilitation is a crucial part of health care systems. The quantification of human locomotion enables clinicians to describe and analyze a patient’s gait performance in detail and allows them to base clinical decisions on objective data. These assessments generate a vast amount of complex data which need to be interpreted in a short time period. We conducted a design study in cooperation with gait analysis experts to develop a novel Knowledge-Assisted Visual Analytics solution for clinical Gait analysis (KAVAGait). KAVAGait allows the clinician to store and inspect complex data derived during clinical gait analysis. The system incorporates innovative and interactive visual interface concepts, which were developed based on the needs of clinicians. Additionally, an explicit knowledge store (EKS) allows externalization and storage of implicit knowledge from clinicians. It makes this information available for others, supporting the process of data inspection and clinical decision making. We validated our system by conducting expert reviews, a user study, and a case study. Results suggest that KAVAGait is able to support a clinician during clinical practice by visualizing complex gait data and providing knowledge of other clinicians.}, number = {3}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, author = {Wagner, Markus and Slijepcevic, Djordje and Horsak, Brian and Rind, Alexander and Zeppelzauer, Matthias and Aigner, Wolfgang}, year = {2018}, note = {Projekt: KAVA-Time Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, Design Study, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Healthcare, Human Gait Analysis, Human-Computer Interaction, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Visual analytics, Wiss. Beitrag, best, best-bhorsak, best-lbaigner, best-lbwagnerm, best-mzeppelzauer, information visualization, knowledge generation, peer-reviewed}, pages = {1528--1542}, } @article{slijepcevic_automatic_2018, title = {Automatic {Classification} of {Functional} {Gait} {Disorders}}, volume = {5}, issn = {2168-2194}, url = {https://arxiv.org/abs/1712.06405}, doi = {10/ghz24w}, number = {22}, urldate = {2017-12-21}, journal = {IEEE Journal of Biomedical and Health Informatics}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Raberger, Anna-Maria and Schwab, Caterine and Schuller, Michael and Baca, Arnold and Breiteneder, Christian and Horsak, Brian}, year = {2018}, note = {Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, best-mzeppelzauer, peer-reviewed}, pages = {1653 -- 1661}, } @inproceedings{slijepcevic_ground_2017, address = {Trondheim, Norway}, title = {Ground reaction force measurements for gait classification tasks: {Effects} of different {PCA}-based representations}, volume = {57}, url = {http://www.gaitposture.com/article/S0966-6362(17)30712-9/pdf}, doi = {10.1016/j.gaitpost.2017}, booktitle = {Gait \& {Posture} {Supplement}}, author = {Slijepcevic, Djordje and Horsak, Brian and Schwab, Caterine and Raberger, Anna-Maria and Schüller, Michael and Baca, Arnold and Breitender, Christian and Zeppelzauer, Matthias}, year = {2017}, note = {Projekt: IntelliGait Projekt: DHLab}, keywords = {2017, Biofeedback, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Creative Industries, DHLab, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, peer-reviewed, project\_carma, project\_intelligait, ⚠️ Invalid DOI}, pages = {4--5}, } @article{bernard_vial_2018, title = {{VIAL} – {A} {Unified} {Process} for {Visual}-{Interactive} {Labeling}}, volume = {34}, copyright = {Springer, Berlin, Heidelberg}, issn = {1432-2315}, url = {https://bit.ly/2My1Yrt}, doi = {10/gd5hr3}, abstract = {The assignment of labels to data instances is a fundamental prerequisite for many machine learning tasks. Moreover, labeling is a frequently applied process in visual-interactive analysis approaches and visual analytics. However, the strategies for creating labels usually differ between these two fields. This raises the question whether synergies between the different approaches can be attained. In this paper, we study the process of labeling data instances with the user in the loop, from both the machine learning and visual-interactive perspective. Based on a review of differences and commonalities, we propose the ’Visual-Interactive Labeling‘ (VIAL) process that unifies both approaches. We describe the six major steps of the process and discuss their specific challenges. Additionally, we present two heterogeneous usage scenarios from the novel VIAL perspective, one on metric distance learning and one on object detection in videos. Finally, we discuss general challenges to VIAL and point out necessary work for the realization of future VIAL approaches.}, number = {1189}, journal = {The Visual Computer}, author = {Bernard, Jürgen and Zeppelzauer, Matthias and Sedlmair, Michael and Aigner, Wolfgang}, year = {2018}, note = {Projekt: KAVA-Time Projekt: IntelliGait Projekt: CARMA}, keywords = {Active Learning, Candidate Selection, Center for Artificial Intelligence, Creative Industries, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Interactive Labeling, Labeling Strategies, Machine Learning, Media Computing Group, Visual Interactive Labeling, best, best-mzeppelzauer, information visualization}, pages = {16}, }