research:memoman
% Search:
% Encoding: utf-8
@InProceedings{beetz07assistive,
author = {Michael Beetz and Jan Bandouch and Alexandra Kirsch and Alexis Maldonado and Armin M{\"u}ller and Radu Bogdan Rusu},
title = {The Assistive Kitchen --- A Demonstration Scenario for Cognitive Technical Systems},
booktitle = {Proceedings of the 4th COE Workshop on Human
Adaptive Mechatronics (HAM)},
year = {2007},
bib2html_pubtype ={Workshop Paper},
bib2html_rescat ={Planning,Learning,Action,Perception,Models,Reasoning},
bib2html_groups ={Cogito,Memoman,Cogman},
abstract = { This paper introduces the Assistive
Kitchen as a comprehensive demonstration and challenge scenario
for technical cognitive systems. We describe its hardware and
software infrastructure. Within the Assistive Kitchen
application, we select particular domain activities as research
subjects and identify the cognitive capabilities needed for
perceiving, interpreting, analyzing, and executing these activities
as research foci. We conclude by outlining open research issues
that need to be solved to realize the scenarios successfully.}
}
@InProceedings{beetz08assistive,
author = {Michael Beetz and Freek Stulp and Bernd Radig and Jan Bandouch and Nico Blodow and Mihai Dolha and Andreas Fedrizzi and Dominik Jain and Uli Klank and Ingo Kresse and Alexis Maldonado and Zoltan Marton and Lorenz M{\"o}senlechner and Federico Ruiz and Radu Bogdan Rusu and Moritz Tenorth},
title = {{The Assistive Kitchen -- A Demonstration Scenario for Cognitive Technical Systems}},
booktitle = {IEEE 17th International Symposium on Robot and Human Interactive Communication (RO-MAN), Muenchen, Germany},
pages = {1-8},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Planning, Learning},
bib2html_groups = {Memoman, Cogito, EnvMod, Cogman, K4C, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
note = {Invited paper.},
}
@InProceedings{bandouch08bmvc,
author = {Jan Bandouch and Florian Engstler and Michael Beetz},
title = {{Evaluation of Hierarchical Sampling Strategies in 3D Human Pose Estimation}},
year = {2008},
booktitle = {Proceedings of the 19th British Machine Vision Conference (BMVC)},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups ={Memoman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {A common approach to the problem of 3D human pose estimation from video
is to recursively estimate the most likely pose via particle filtering.
However, standard particle filtering methods fail the task due to the high
dimensionality of the 3D articulated human pose space.
In this paper we present a thorough evaluation of two variants of particle
filtering, namely Annealed Particle Filtering and Partitioned Sampling
Particle Filtering, that have been proposed to make the problem feasible by
exploiting the hierarchical structures inside the pose space. We evaluate
both methods in the context of markerless model-based 3D motion capture
using silhouette shapes from multiple cameras. For that we created a
simulation from ground truth sequences of human motions, which enables
us to focus our evaluation on the sampling capabilities of the approaches,
i.e. on how efficient particles are spread towards the modes of the
distribution. We show the behaviour with respect to the amount of cameras
used, the amount of particles used, as well as the dimensionality of the
search space. Especially the performance when using more complex human
models (40 DOF and above) that are able to capture human movements
with higher precision compared to previous approaches is of interest
in this work.
In summary, we show that both methods have complementary strengths, and
propose a combined method that is able to perform the tracking task with
higher robustness despite reduced computational effort.}
}
@InProceedings{bandouch08amdo,
author = {Jan Bandouch and Florian Engstler and Michael Beetz},
title = {Accurate Human Motion Capture Using an Ergonomics-Based Anthropometric Human Model},
year = {2008},
booktitle = {Proceedings of the Fifth International Conference on Articulated Motion and Deformable Objects (AMDO)},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {In this paper we present our work on markerless model-based 3D
human motion capture using multiple cameras. We use an industry
proven anthropometric human model that was modeled taking ergonomic
considerations into account. The outer surface consists of a precise
yet compact 3D surface mesh that is mostly rigid on body part level
apart from some small but important torsion deformations. Benefits
are the ability to capture a great amount of possible human
appearances with high accuracy while still having a simple to use
and computationally efficient model. We have introduced special
optimizations such as caching into the model to improve its
performance in tracking applications. Available force and comfort
measures within the model provide further opportunities for future
research.
3D articulated pose estimation is performed in a Bayesian framework,
using a set of hierarchically coupled local particle filters for
tracking. This makes it possible to sample efficiently from the high
dimensional space of articulated human poses without constraining
the allowed movements. Sequences of tracked upper-body as well as
full-body motions captured by three cameras show promising results.
Despite the high dimensionality of our model (51 DOF) we succeed
at tracking using only silhouette overlap as weighting function
due to the precise outer appearance of our model and the
hierarchical decomposition.}
}
@InProceedings{Rusu08ROMAN,
author = {Radu Bogdan Rusu and Jan Bandouch and Zoltan Csaba Marton and Nico Blodow and Michael Beetz},
title = {{Action Recognition in Intelligent Environments using Point Cloud Features Extracted from Silhouette Sequences}},
booktitle = {IEEE 17th International Symposium on Robot and Human Interactive Communication (RO-MAN), Muenchen, Germany},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {
In this paper we present our work on human action recognition in intelligent
environments. We classify actions by looking at a time-sequence of
silhouettes extracted from various camera images. By treating time as the
third spatial dimension we generate so-called space-time shapes that contain
rich information about the actions. We propose a novel approach for
recognizing actions, by representing the shapes as 3D point clouds and
estimating feature histograms for them. Preliminary results show that our
method robustly derives different classes of actions, even in the presence
of large variability in the data, coming from different persons at different
time intervals.
}
}
@InProceedings{bandouch09hci,
author = {Jan Bandouch and Michael Beetz},
title = {Tracking Humans Interacting with the Environment Using Efficient Hierarchical Sampling and Layered Observation Models},
booktitle = {IEEE Int. Workshop on Human-Computer Interaction (HCI). In conjunction with ICCV2009},
year = {2009},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = { We present a markerless tracking system for unconstrained human
motions which are typical for everyday manipulation tasks. Our
system is capable of tracking a high-dimensional human model
(51 DOF) without constricting the type of motion and the need for
training sequences. The system reliably tracks humans that
frequently interact with the environment, that manipulate objects,
and that can be partially occluded by the environment.
We describe and discuss two key components that substantially
contribute to the accuracy and reliability of the system. First, a
sophisticated hierarchical sampling strategy for recursive Bayesian
estimation that combines partitioning with annealing strategies to enable
efficient search in the presence of many local maxima. Second, a simple yet
effective appearance model that allows for the combination of shape and
appearance masks to implicitly deal with two cases of environmental occlusions
by (1) subtracting dynamic non-human objects from the region of
interest and (2) modeling objects (e.g. tables) that both occlude and
can be occluded by human subjects. The appearance model is based on
bit representations that makes our algorithm well suited for
implementation on highly parallel hardware such as commodity GPUs.
Extensive evaluations on the HumanEva2 benchmarks show the potential
of our method when compared to state-of-the-art Bayesian techniques.
Besides the HumanEva2 benchmarks, we present results on more
challenging sequences, including table setting tasks in a kitchen
environment and persons getting into and out of a car mock-up.}
}
@InProceedings{tenorth09dataset,
author = {Moritz Tenorth and Jan Bandouch and Michael Beetz},
title = {{The {TUM} Kitchen Data Set of Everyday Manipulation Activities for Motion Tracking and Action Recognition}},
booktitle = {IEEE International Workshop on Tracking Humans for the Evaluation of their Motion in Image Sequences (THEMIS), in conjunction with ICCV2009},
year = {2009},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman, K4C},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {We introduce the publicly available TUM Kitchen Data Set as a comprehensive collection of activity sequences recorded in a kitchen environment equipped with multiple complementary sensors. The recorded data consists of observations of naturally performed manipulation tasks as encountered in everyday activities of human life. Several instances of a table-setting task were performed by different subjects, involving the manipulation of objects and the environment. We provide the original video sequences, fullbody motion capture data recorded by a markerless motion tracker, RFID tag readings and magnetic sensor readings from objects and the environment, as well as corresponding action labels. In this paper, we both describe how the data was computed, in particular the motion tracker and the labeling, and give examples what it can be used for. We present first results of an automatic method for segmenting the observed motions into semantic classes, and describe how the data can be integrated in a knowledge-based framework for reasoning about the observations.}
}
@InProceedings{beetz09qlts,
author = {Michael Beetz and Jan Bandouch and Dominik Jain and Moritz Tenorth},
title = {{Towards Automated Models of Activities of Daily Life}},
booktitle = {First International Symposium on Quality of Life Technology -- Intelligent Systems for Better Living},
year = {2009},
address = {Pittsburgh, Pennsylvania USA},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Learning,Models,Planning,Perception,Knowledge,Reasoning,Representation},
bib2html_groups = {Memoman,K4C,ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {We propose automated probabilistic models of everyday
activities (AM-EvA) as a novel technical means for the
perception, interpretation, and analysis of everyday manipulation
tasks and activities of daily life. AM-EvAs are based on
action-related concepts in everyday activities such as
action-related places (the place where cups are taken from the
cupboard), capabilities (the objects that can be picked up
single-handedly), etc. These concepts are probabilistically derived
from a set of previous activities that are fully and automatically
observed by computer vision and additional sensor systems. AM-EvA
models enable robots and technical systems to analyze activities in
the complete situation and activity context. They render the
classification and the assessment of actions and situations objective
and can justify the probabilistic interpretation with respect to the
activities the concepts have been learned from.
In this paper, we describe the current state of implementation of the
system that realizes this idea of automated models of
everyday activities and show example results from the observation
and analysis of table setting episodes.}
}
@Article{Rusu09RSJ-AR,
author = {Radu Bogdan Rusu and Jan Bandouch and Franziska Meier and Irfan Essa and Michael Beetz},
title = {{Human Action Recognition using Global Point Feature Histograms and Action Shapes}},
journal = {Advanced Robotics journal, Robotics Society of Japan (RSJ)},
year = {2009},
bib2html_pubtype = {Journal},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = { This article investigates the recognition of human actions from 3D point clouds
that encode the motions of people acting in sensor-distributed indoor environments.
Data streams are time-sequences of silhouettes extracted from cameras in the environment.
From the 2D silhouette contours we generate space-time streams by continuously aligning and
stacking the contours along the time axis as third spatial dimension.
The space-time stream of an observation sequence is segmented into parts
corresponding to subactions using a pattern matching technique based
on suffix trees and interval scheduling. Then, the segmented space-time shapes
are processed by treating the shapes as 3D point clouds and estimating global
point feature histograms for them. The resultant models are clustered using
statistical analysis, and our experimental results indicate that the presented
methods robustly derive different action classes. This holds despite large
intra-class variance in the recorded datasets due to performances from different
persons at different time intervals.
}
}
@InProceedings{engstler09iea,
author = {Florian Engstler and Jan Bandouch and Heiner Bubb},
title = {MeMoMan - Model Based Markerless Capturing of Human Motion},
booktitle = {The 17th World Congress on Ergonomics (International Ergonomics Association, IEA)},
year = {2009},
address = {Beijing, China},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@article{beetz10ameva,
title={{Towards Automated Models of Activities of Daily Life}},
author={Michael Beetz and Moritz Tenorth and Dominik Jain and Jan Bandouch},
journal={Technology and Disability},
volume={22},
number= {1-2},
pages={27--40},
year={2010},
publisher={IOS Press},
bib2html_pubtype = {Journal},
bib2html_groups = {K4C, Memoman, ProbCog},
bib2html_funding = {CoTeSys, MeMoMan},
bib2html_rescat = {Perception, Models, Representation, Learning, Reasoning},
bib2html_domain = {Assistive Household}
}
@article{Bandouch12memoman,
author = {Jan Bandouch and
Odest Chadwicke Jenkins and
Michael Beetz},
title = {A Self-Training Approach for Visual Tracking and Recognition
of Complex Human Activity Patterns},
journal = {International Journal of Computer Vision},
volume = {99},
number = {2},
year = {2012},
pages = {166-189}
}