User Tools

Site Tools


research:cop
% Search: % Encoding: utf-8 @Article{Han04CCD, author = {Robert Hanek and Michael Beetz}, title = {The Contracting Curve Density Algorithm: Fitting Parametric Curve Models to Images Using Local Self-adapting Separation Criteria}, journal = {International Journal of Computer Vision}, year = 2004, volume = {59}, number = {3}, pages = {233-258}, bib2html_pubtype = {Journal}, bib2html_rescat = {RoboCup, Vision}, bib2html_groups = {IAS, IU}, bib2html_funding = {BV}, bib2html_keywords = {Robot, Vision}, abstract = {The task of fitting parametric curve models to the boundaries of perceptually meaningful image regions is a key problem in computer vision with numerous applications, such as image segmentation, pose estimation, object tracking, and 3-D reconstruction. In this article, we propose the Contracting Curve Density (CCD) algorithm as a solution to the curve-fitting problem. The CCD algorithm extends the state-of-the-art in two important ways. First, it applies a novel likelihood function for the assessment of a fit between the curve model and the image data. This likelihood function can cope with highly inhomogeneous image regions, because it is formulated in terms of local image statistics. The local image statistics are learned on the fly from the vicinity of the expected curve. They provide therefore locally adapted criteria for separating the adjacent image regions. These local criteria replace often used predefined fixed criteria that rely on homogeneous image regions or specific edge properties. The second contribution is the use of blurred curve models as efficient means for iteratively optimizing the posterior density over possible model parameters. These blurred curve models enable the algorithm to trade-off two conflicting objectives, namely heaving a large area of convergence and achieving high accuracy. We apply the CCD algorithm to several challenging image segmentation and 3-D pose estimation problems. Our experiments with RGB images show that the CCD algorithm achieves a high level of robustness and subpixel accuracy even in the presence of severe texture, shading, clutter, partial occlusion, and strong changes of illumination.}} @InProceedings{mueller07towards, author = {Armin M{\"u}ller and Michael Beetz}, title = {Towards a Plan Library for Household Robots}, booktitle = {Proceedings of the ICAPS'07 Workshop on Planning and Plan Execution for Real-World Systems: Principles and Practices for Planning in Execution}, year = {2007}, month = {September}, address = {Providence, USA}, bib2html_pubtype = {Workshop Paper}, bib2html_rescat = {Planning}, bib2html_groups = {Cogito}, abstract = { This paper describes the structure for a plan library of a service robot intended to perform household chores. The plans in the library are particularly designed to enable reliable, flexible, and efficient robot control, to learn control heuristics, to generalize the plans to cope with new objects and situations. We believe that plans with these characteristics are required for competent autonomous robots performing skilled manipulation tasks in human environments. }} @InProceedings{rusu07towards, author = {Radu Bogdan Rusu and Nico Blodow and Zoltan-Csaba Marton and Alina Soos and Michael Beetz}, title = {Towards 3D Object Maps for Autonomous Household Robots}, booktitle = {Proceedings of the 20th IEEE International Conference on Intelligent Robots and Systems (IROS)}, year = {2007}, address = {San Diego, CA, USA}, abstract = {This paper describes a mapping system that acquires 3D object models of man-made indoor environments such as kitchens. The system segments and geometrically reconstructs cabinets with doors, tables, drawers, and shelves, objects that are important for robots retrieving and manipulating objects in these environments. The system also acquires models of objects of daily use such glasses, plates, and ingredients. The models enable the recognition of the objects in cluttered scenes and the classification of newly encountered objects. Key technical contributions include (1)~a robust, accurate, and efficient algorithm for constructing complete object models from 3D point clouds constituting partial object views, (2)~feature-based recognition procedures for cabinets, tables, and other task-relevant furniture objects, and (3)~automatic inference of object instance and class signatures for objects of daily use that enable robots to reliably recognize the objects in cluttered and real task contexts. We present results from the sensor-based mapping of a real kitchen.}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household} } @InProceedings{Rusu08IAS, author = {Radu Bogdan Rusu and Zoltan Csaba Marton and Nico Blodow and Michael Beetz}, title = {{Persistent Point Feature Histograms for 3D Point Clouds}}, booktitle = {Proceedings of the 10th International Conference on Intelligent Autonomous Systems (IAS-10), Baden-Baden, Germany}, year = {2008}, abstract = { This paper proposes a novel way of characterizing the local geometry of 3D points, using persistent feature histograms. The relationships between the neighbors of a point are analyzed and the resulted values are stored in a 16-bin histogram. The histograms are pose and point cloud density invariant and cope well with noisy datasets. We show that geometric primitives have unique signatures in this feature space, preserved even in the presence of additive noise. To extract a compact subset of points which characterizes a point cloud dataset, we perform an in-depth analysis of all point feature histograms using different distance metrics. Preliminary results show that point clouds can be roughly segmented based on the uniqueness of geometric primitives feature histograms. We validate our approach on datasets acquired. from laser sensors in indoor (kitchen) environments. }, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception}, bib2html_groups = {EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, } @InProceedings{Pangercic08ETFA, author = {Dejan Pangercic and Radu Bogdan Rusu and Michael Beetz}, title = {{3D-Based Monocular SLAM for Mobile Agents Navigating in Indoor Environments}}, booktitle = {Proceedings of the 13th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA), Hamburg, Germany, September 15-18}, year = {2008}, bib2html_rescat = {Perception, Action}, bib2html_groups = {Cop, Cogmash}, bib2html_funding = {CoTeSys}, bib2html_pubtype = {Conference Paper}, bib2html_domain = {Assistive Household, Cognitive Factory} } @phdthesis{mueller08transformational, author = {Armin M{\"u}ller}, title = {Transformational Planning for Autonomous Household Robots using Libraries of Robust and Flexible Plans}, school = {Technische Universit{\"a}t M{\"u}nchen}, year = {2008}, bib2html_pubtype = {PhD Thesis}, bib2html_rescat = {Planning,Reasoning}, bib2html_groups = {Cogito}, pdf = {http://mediatum2.ub.tum.de/doc/645588/645588.pdf}, url = {http://mediatum2.ub.tum.de/node?id=645588}, abstract = {One of the oldest dreams of Artificial Intelligence is the realization of autonomous robots that achieve a level of problem-solving competency comparable to humans. Human problem-solving capabilities are particularly impressive in the context of everyday ac- tivities such as performing household chores: people are able to deal with ambiguous and incomplete information, they adapt their plans to different environments and specific sit- uations achieving intuitively almost optimal behavior, they cope with interruptions and failures and manage multiple interfering jobs. The investigations presented in this work make substantial progress in the direction of building robots that show similar behavior. This thesis addresses the problem of competently accomplishing everyday manipu- lation activities, such as setting the table and preparing meals, as a plan-based control problem. In plan-based control, robots do not only execute their programs but also reason about and modify them. We propose TRANER (Transformational Planner) as a suitable planning system for the optimization of everyday manipulation activities. TRANER real- izes planning through a generate-test cycle in which plan revision rules propose alternative plans and new plans are simulated in order to test and evaluate them. The unique features of TRANER are that it can realize very general and abstract plan revisions such as "stack objects before carrying them instead of handling them one by one" and that it successfully operates on plans in a way that they generate reliable, flexible, and efficient robot behavior in realistic simulations. The key contributions of this dissertation are threefold. First, it extends the plan rep- resentation to support the specification of robust and transformable plans. Second, it pro- poses a library of general and flexible plans for a household robot, using the extended plan representation. Third, it establishes a powerful, yet intuitive syntax for transforma- tion rules together with a set of general transformation rules for optimizing pick-and-place tasks in an everyday setting using the rule language. The viability and strength of the approach is empirically demonstrated in comprehen- sive and extensive experiments in a simulation environment with realistically simulated action and sensing mechanisms. The experiments show that transformational planning is necessary to tailor the robot's activities and that it is capable of substantially improving the robot's performance.} } @article{klank2008afg, title={{Automatic feature generation in endoscopic images}}, bib2html_pubtype = {Journal}, bib2html_groups = {Other}, bib2html_funding = {Other}, author={Ulrich Klank and N. Padoy and H. Feussner and N. Navab}, journal={International Journal of Computer Assisted Radiology and Surgery}, volume={3}, number={3}, pages={331--339}, year={2008}, publisher={Springer} } @InProceedings{klank09searchspace, author = {Ulrich Klank and Dejan Pangercic and Radu Bogdan Rusu and Michael Beetz}, title = {{Real-time CAD Model Matching for Mobile Manipulation and Grasping}}, booktitle = {9th IEEE-RAS International Conference on Humanoid Robots}, month = {December 7-10}, year = {2009}, address = {Paris, France}, pages = {290--296}, bib2html_groups = {Cop, EnvMod}, bib2html_rescat = {Perception, Models}, bib2html_pubtype = {Conference Paper}, bib2html_domain = {Assistive Household} } @InProceedings{09SceneDetection, author = {Dejan Pangercic and Rok Tavcar and Moritz Tenorth and Michael Beetz}, title = {Visual Scene Detection and Interpretation using Encyclopedic Knowledge and Formal Description Logic}, booktitle = {Proceedings of the International Conference on Advanced Robotics (ICAR).}, year = {2009}, month = {June 22 - 26}, address = {Munich, Germany}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Reasoning}, bib2html_groups = {Cop, K4C}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, } @InProceedings{klank09icra, author = {Ulrich Klank and Muhammad Zeeshan Zia and Michael Beetz}, title = {{3D Model Selection from an Internet Database for Robotic Vision}}, booktitle = {International Conference on Robotics and Automation (ICRA)}, year = {2009}, pages = {2406--2411}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Models}, bib2html_groups = {Cop}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, abstract = {We propose a new method for automatically accessing an internet database of 3D models that are searchable only by their user-annotated labels, for using them for vision and robotic manipulation purposes. Instead of having only a local database containing already seen objects, we want to use shared databases available over the internet. This approach while having the potential to dramatically increase the visual recognition capability of robots, also poses certain problems, like wrong annotation due to the open nature of the database, or overwhelming amounts of data (many 3D models) or the lack of relevant data (no models matching a specified label). To solve those problems we propose the following: First, we present an outlier/inlier classification method for reducing the number of results and discarding invalid 3D models that do not match our query. Second, we utilize an approach from computer graphics, the so called 'morphing', to this application to specialize the models, in order to describe more objects. Third, we search for 3D models using a restricted search space, as obtained from our knowledge of the environment. We show our classification and matching results and finally show how we can recover the correct scaling with the stereo setup of our robot.} } @InProceedings{zia09icar, author = {Muhammad Zeeshan Zia and Ulrich Klank and Michael Beetz}, title = {{Acquisition of a Dense 3D Model Database for Robotic Vision}}, booktitle = {International Conference on Advanced Robotics (ICAR)}, year = {2009}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Models}, bib2html_groups = {Cop}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, abstract = {Service Robots in real world environments need to have computer vision capability for detecting a large class of objects. We discuss how freely available 3D model databases can be used to enable robots to know the appearance of a wide variety of objects in human environments with special application to our Assistive Kitchen. However, the open and free nature of such databases pose problems for example the presence of incorrectly annotated 3D models, or objects for which very few models exist online. We have previously proposed techniques to automatically select the useful models from the search result, and utilizing such models to perform simple manipulation tasks. Here, we build upon that work, to describe a technique based on Morphing to form new 3D models if we only have a few models corresponding to a label. However, morphing in computer graphics requires a human operator and is computationally burdensome, due to which we present our own automatic morphing technique. We also present a simple technique to speed the matching process of 3D models against real scenes using Visibility culling. This technique can potentially speed-up the matching process by 2-3 times while using less memory, if we have some prior information model and world pose.} } @InProceedings{sunli-2009-cvprws, title={EYEWATCHME - 3D Hand and object tracking for inside out activity analysis}, author={Li Sun and Ulrich Klank and Michael Beetz}, booktitle={IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2009. CVPR 2009.}, year={2009}, month={June}, volume={}, number={}, pages={9-16}, abstract={This paper investigates the inside-out recognition of everyday manipulation tasks using a gaze-directed camera, which is a camera that actively directs at the visual attention focus of the person wearing the camera. We present EYEWATCHME, an integrated vision and state estimation system that at the same time tracks the positions and the poses of the acting hands, the pose that the manipulated object, and the pose of the observing camera. Taken together, EYEWATCHME provides comprehensive data for learning predictive models of vision-guided manipulation that include the objects people are attending, the interaction of attention and reaching/grasping, and the segmentation of reaching and grasping using visual attention as evidence. Key technical contributions of this paper include an ego view hand tracking system that estimates 27 DOF hand poses. The hand tracking system is capable of detecting hands and estimating their poses despite substantial self-occlusion caused by the hand and occlusions caused by the manipulated object. EYEWATCHME can also cope with blurred images that are caused by rapid eye movements. The second key contribution is the of the integrated activity recognition system that simultaneously tracks the attention of the person, the hand poses, and the poses of the manipulated objects in terms of a global scene coordinates. We demonstrate the operation of EYEWATCHME in the context of kitchen tasks including filling a cup with water.}, keywords={computer graphics, human computer interaction, image restoration, image segmentation, image sensors, object recognition, tracking3D hand tracking, 3D object tracking, EYEWATCHME, blurred images, gaze-directed camera, grasping segmentation, inside out activity analysis, integrated activity recognition system, reaching segmentation, state estimation system, substantial self-occlusion, vision-guided manipulation}, doi={10.1109/CVPR.2009.5204358}, ISSN={1063-6919}, bib2html_pubtype = {Workshop Paper}, bib2html_groups = {Cop}, bib2html_rescat = {Perception}, bib2html_domain = {Assistive Household} } @InProceedings{Marton09ISRR, author = {Zoltan Csaba Marton and Lucian Cosmin Goron and Radu Bogdan Rusu and Michael Beetz}, title = {{Reconstruction and Verification of 3D Object Models for Grasping}}, booktitle = {Proceedings of the 14th International Symposium on Robotics Research (ISRR09)}, address = {Lucerne, Switzerland}, month = {August 31 -- September 3}, year = {2009}, bib2html_pubtype = {Workshop Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, } @InProceedings{Rusu09ICCV-WS, author = {Radu Bogdan Rusu and Andreas Holzbach and Gary Bradski and Michael Beetz}, title = {Detecting and Segmenting Objects for Mobile Manipulation}, booktitle = {Proceedings of IEEE Workshop on Search in 3D and Video (S3DV), held in conjunction with the 12th IEEE International Conference on Computer Vision (ICCV)}, month = {September 27}, year = {2009}, address = {Kyoto, Japan}, bib2html_pubtype = {Workshop Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, } @InProceedings{Blodow09Humanoids, author = {Nico Blodow and Radu Bogdan Rusu and Zoltan Csaba Marton and Michael Beetz}, title = {{Partial View Modeling and Validation in 3D Laser Scans for Grasping}}, booktitle = {9th IEEE-RAS International Conference on Humanoid Robots (Humanoids)}, month = {December 7-10}, year = {2009}, address = {Paris, France}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, } @InProceedings{Rusu09Humanoids, author = {Radu Bogdan Rusu and Andreas Holzbach and Rosen Diankov and Gary Bradski and Michael Beetz}, title = {Perception for Mobile Manipulation and Grasping using Active Stereo}, booktitle = {9th IEEE-RAS International Conference on Humanoid Robots (Humanoids)}, month = {December 7-10}, year = {2009}, address = {Paris, France}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, } @InProceedings{IAS09CoPMan, author = {Michael Beetz and Nico Blodow and Ulrich Klank and Zoltan Csaba Marton and Dejan Pangercic and Radu Bogdan Rusu}, title = {{CoP-Man -- Perception for Mobile Pick-and-Place in Human Living Environments}}, booktitle = {Proceedings of the 22nd IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) Workshop on Semantic Perception for Mobile Manipulation}, month = {October 11-15}, year = {2009}, address = {St. Louis, MO, USA}, note = {Invited paper.}, bib2html_pubtype = {Workshop Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, } @InCollection{hoyninge10ccis, author = {Nicolai v. Hoyningen-Huene and Michael Beetz}, editor = {AlpeshKumar Ranchordas and Helder Araujo}, booktitle = {VISIGRAPP 2009}, title = {Importance Sampling as One Solution to the Data Association Problem in Multi-target Tracking}, publisher = {Springer-Verlag Berlin Heidelberg}, year = {2010}, number = {68}, series = {Communications in Computer and Information Science (CCIS)}, pages = {309--325}, bib2html_pubtype = {Selected Paper}, bib2html_rescat = {Person Tracking}, bib2html_groups = {Aspogamo}, bib2html_funding = {ASpoGAMo}, bib2html_domain = {Soccer Analysis}, abstract = {Tracking multiple targets with similar appearance is a common task in many computer vision applications as surveillance or sports analysis. We propose a Rao-Blackwellized Resampling Particle Filter (RBRPF) as a real-time multi-target tracking method that solves the data association problem by a Monte Carlo approach. Each particle containing the whole target configuration is predicted by using a process model and resampled by sampling associations and fusing of the predicted state with the assigned measurement(s) instead of the common dispersion. As each target state is modeled as a Gaussian, Rao-Blackwellization can be used to solve some of these steps analytically. The sampling of associations splits the multi-target tracking problem in multiple single target tracking problems, which can be handled by Kalman filters in an optimal way. The method is independent of the order of measurements which is mostly predetermined by the measuring process in contrast to other state-of-the-art approaches. Smart resampling and memoization is introduced to equip the tracking method with real-time capabilities in the first place exploiting the discreteness of the associations. The probabilistic framework allows for consideration of appearance models and the fusion of different sensors. A way to constrain the multiplicity of measurements associated with a single target is proposed and -- along with the ability to cope with a high number of targets in clutter -- evaluated in a simulation experiment. We demonstrate the applicability of the proposed method to real world applications by tracking soccer players captured by multiple cameras through occlusions in real-time.} } @InProceedings{Marton10IROS, author = {Zoltan-Csaba Marton and Dejan Pangercic and Nico Blodow and Jonathan Kleinehellefort and Michael Beetz}, title = {{General 3D Modelling of Novel Objects from a Single View}}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, month = {October 18-22}, year = {2010}, address = {Taipei, Taiwan}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household} } @InProceedings{iros10kcopman, author = {Dejan Pangercic and Moritz Tenorth and Dominik Jain and Michael Beetz}, title = {{Combining Perception and Knowledge Processing for Everyday Manipulation}}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, pages = {1065-1071}, year = {2010}, month = {October 18-22}, address = {Taipei, Taiwan}, bib2html_groups = {K4C,ProbCog,Cop}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Models}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household} } @InProceedings{GRSD10Humanoids, author = {Zoltan-Csaba Marton and Dejan Pangercic and Radu Bogdan Rusu and Andreas Holzbach and Michael Beetz}, title = {Hierarchical Object Geometric Categorization and Appearance Classification for Mobile Manipulation}, booktitle = {Proceedings of the IEEE-RAS International Conference on Humanoid Robots}, month = {December 6-8}, year = {2010}, address = {Nashville, TN, USA}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household} } @InProceedings{Blodow10Humanoids, author = {Nico Blodow and Dominik Jain and Zoltan-Csaba Marton and Michael Beetz}, title = {{Perception and Probabilistic Anchoring for Dynamic World State Logging}}, booktitle = {10th IEEE-RAS International Conference on Humanoid Robots}, pages = {160-166}, month = {December 6-8}, year = {2010}, address = {Nashville, TN, USA}, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod, ProbCog}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household} } @INPROCEEDINGS{goron10isr-robotik, AUTHOR="Lucian Cosmin Goron and Zoltan Csaba Marton and Gheorghe Lazea and Michael Beetz", TITLE="Automatic Layered 3D Reconstruction of Simplified Object Models for Grasping", BOOKTITLE="Joint 41st International Symposium on Robotics (ISR) and 6th German Conference on Robotics (ROBOTIK)", ADDRESS="Munich, Germany", YEAR=2010, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household} } @inproceedings{klank11transparent, author = {Ulrich Klank and Daniel Carton and Michael Beetz}, title = {Transparent Object Detection and Reconstruction on a Mobile Platform}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA)}, month = {May, 9--13}, year = {2011}, address = {Shanghai, China}, bib2html_pubtype = {Conference Paper}, bib2html_groups = {CoP}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Object Recognition}, bib2html_domain = {Assistive Household} } @article{mozos11furniture, title={{Furniture Models Learned from the WWW -- Using Web Catalogs to Locate and Categorize Unknown Furniture Pieces in 3D Laser Scans}}, author={Oscar Martinez Mozos and Zoltan Csaba Marton and Michael Beetz}, journal={Robotics \& Automation Magazine}, volume={18}, number={2}, pages={22--32}, month={June}, year={2011}, publisher={IEEE}, bib2html_pubtype = {Journal}, bib2html_groups = {EnvMod, CoP}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Perception, Models, Reasoning}, bib2html_domain = {Assistive Household} } @inproceedings{icra11perception-manipulation, author = {Michael Beetz and Ulrich Klank and Alexis Maldonado and Dejan Pangercic and Thomas R\"uhr}, title = {Robotic Roommates Making Pancakes - Look Into Perception-Manipulation Loop}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA), Workshop on Mobile Manipulation: Integrating Perception and Manipulation}, month = {May, 9--13}, year = {2011}, pages = {529--536}, bib2html_pubtype = {Workshop Paper}, bib2html_groups = {EnvMod, Cogman, CoP}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Action, Perception}, bib2html_domain = {Assistive Household} } @article{marton11ijrr, title={{Combined 2D-3D Categorization and Classification for Multimodal Perception Systems}}, author={Zoltan Csaba Marton and Dejan Pangercic and Nico Blodow and Michael Beetz}, journal={The International Journal of Robotics Research}, volume={30}, number={11}, pages={1378--1402}, month={September}, year={2011}, publisher={Sage Publications}, bib2html_pubtype = {Journal}, bib2html_groups = {EnvMod, CoP}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Perception, Models, Reasoning}, bib2html_domain = {Assistive Household} } @inproceedings{ccd11humanoids, author = {Shulei Zhu and Dejan Pangercic and Michael Beetz}, title = {Contracting Curve Density Algorithm for Applications in Personal Robotics}, booktitle = {11th IEEE-RAS International Conference on Humanoid Robots}, year = {2011}, month = {October, 26--28}, address = {Bled, Slovenia}, bib2html_pubtype = {Conference Paper}, bib2html_groups = {EnvMod, CoP}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, bib2html_rescat = {Perception}, } @inproceedings{pancakes11humanoids, author = {Michael Beetz and Ulrich Klank and Ingo Kresse and Alexis Maldonado and Lorenz M\"osenlechner and Dejan Pangercic and Thomas R\"uhr and Moritz Tenorth}, title = {{Robotic Roommates Making Pancakes}}, booktitle = {11th IEEE-RAS International Conference on Humanoid Robots}, year = {2011}, month = {October, 26--28}, address = {Bled, Slovenia}, bib2html_pubtype = {Conference Paper}, bib2html_groups = {EnvMod, CoP, Cogito, K4C, Cogman}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, bib2html_rescat = {Action, Perception, Reasoning, Planning, Models, Representation}, } @misc{shopping_demo11aaai, author = {Dejan Pangercic and Koppany Mathe and Zoltan-Csaba Marton and Lucian Cosmin Goron and Monica-Simona Opris and Martin Schuster and Moritz Tenorth and Dominik Jain and Thomas Ruehr and Michael Beetz}, title = {A Robot that Shops for and Stores Groceries}, howpublished = {AAAI Video Competition (AIVC 2011)}, address = {San Francisco, CA, USA}, year = {2011}, month = {August 7--11}, url = {http://youtu.be/x0Ybod_6ADA}, bib2html_pubtype = {Video}, bib2html_groups = {EnvMod, CoP, K4C}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, bib2html_rescat = {Action, Perception, Reasoning, Models, Representation}, } @inproceedings{toolrep11humanoids, author = {Ingo Kresse and Ulrich Klank and Michael Beetz}, title = {Multimodal Autonomous Tool Analyses and Appropriate Application}, booktitle = {11th IEEE-RAS International Conference on Humanoid Robots}, year = {2011}, month = {October, 26--28}, address = {Bled, Slovenia}, bib2html_pubtype = {Conference Paper}, bib2html_groups = {CoP, Cogman}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, bib2html_rescat = {Perception, Action, Representation}, } @inproceedings{irosws11germandeli, author = {Dejan Pangercic and Vladimir Haltakov and Michael Beetz}, title = {Fast and Robust Object Detection in Household Environments Using Vocabulary Trees with SIFT Descriptors}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), Workshop on Active Semantic Perception and Object Search in the Real World}, month = {September, 25--30}, year = {2011}, address = {San Francisco, CA, USA}, bib2html_pubtype = {Workshop Paper}, bib2html_groups = {CoP}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Perception}, bib2html_domain = {Assistive Household}, } @inproceedings{irosws11vosch, author = {Asako Kanezaki and Zoltan-Csaba Marton and Dejan Pangercic and Tatsuya Harada and Yasuo Kuniyoshi and Michael Beetz}, title = {{Voxelized Shape and Color Histograms for RGB-D}}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), Workshop on Active Semantic Perception and Object Search in the Real World}, month = {September, 25--30}, year = {2011}, address = {San Francisco, CA, USA}, bib2html_pubtype = {Workshop Paper}, bib2html_groups = {CoP,EnvMod}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Perception}, bib2html_domain = {Assistive Household}, } @inproceedings{marton11rgbd, author = {Zoltan-Csaba Marton and Dejan Pangercic and Michael Beetz}, title = {{Efficient Surface and Feature Estimation in RGBD}}, booktitle = {RGB-D Workshop on 3D Perception in Robotics at the European Robotics (euRobotics) Forum}, month = {April 8}, year = {2011}, address = {V\"aster\aa{}s, Sweden}, bib2html_pubtype = {Workshop Paper}, bib2html_groups = {CoP,EnvMod}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Perception}, bib2html_domain = {Assistive Household}, } @inproceedings{murray11frames, author = {William R. Murray and Dominik Jain}, title = {{Modeling Cognitive Frames for Situations with Markov Logic Networks}}, booktitle = {Proceedings of the 8th International NLPCS Workshop: Human-Machine Interaction in Translation, Copenhagen Studies in Language 41}, pages = {167--178}, publisher = {Samfundslitteratur}, month = {August 11}, year = {2011}, bib2html_groups = {ProbCog} } @inproceedings{klank12validation, author = {Ulrich Klank and Lorenz M\"osenlechner and Alexis Maldonado and Michael Beetz}, title = {Robots that Validate Learned Perceptual Models}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA)}, year = {2012}, month = {May 14--18}, address = {St. Paul, MN, USA}, bib2html_pubtype = {Conference Paper}, bib2html_groups = {Cop, Cogito}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Representation, Reasoning}, bib2html_domain = {Assistive Household} } @misc{roboearth-video-icra12, author = {Daniel {Di~Marco} and Andreas Koch and Oliver Zweigle and Kai H\"aussermann and Bj\"orn Schie\ss le and Paul Levi and Dorian Galvez Lopez and Luis Riazuelo and Javier Civera and J.M.M Montiel and Moritz Tenorth and Alexander Clifford Perzylo and Markus Waibel and Marinus Jacobus Gerardus van de Molengraft}, title = {Creating and using RoboEarth object models}, howpublished = {IEEE International Conference on Robotics and Automation (ICRA)}, year = {2012}, month = {May 14--18}, address = {St. Paul, MN, USA}, bib2html_pubtype = {Video}, bib2html_groups = {EnvMod, CoP, K4C}, bib2html_funding = {RoboEarth}, bib2html_domain = {Assistive Household}, bib2html_rescat = {Perception, Models, Representation}, } @inproceedings{cogsys12parts, author = {Ferenc Balint-Benczedi and Zoltan-Csaba Marton and Michael Beetz}, title = {Efficient Part-Graph Hashes for Object Categorization}, booktitle = {5th International Conference on Cognitive Systems (CogSys)}, year = {2012}, bib2html_pubtype = {Poster}, bib2html_groups = {EnvMod, Cop}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Perception}, bib2html_domain = {Assistive Household} } @InProceedings{marton12SC, author = {Zoltan-Csaba Marton and Ferenc Balint-Benczedi and Florian Seidel and Lucian Cosmin Goron and Michael Beetz}, title = {{Object Categorization in Clutter using Additive Features and Hashing of Part-graph Descriptors}}, booktitle = {Proceedings of Spatial Cognition (SC)}, address = {Abbey Kloster Seeon, Germany}, year = {2012}, bib2html_pubtype = {Conference Paper}, bib2html_groups = {EnvMod, Cop}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Perception}, bib2html_domain = {Assistive Household}, keywords = {openease_kbe_perception_everyday}, url = {http://link.springer.com/chapter/10.1007%2F978-3-642-32732-2_2}, } @INPROCEEDINGS{goron12robotik, AUTHOR="Lucian Cosmin Goron and Zoltan Csaba Marton and Gheorghe Lazea and Michael Beetz", TITLE="Segmenting Cylindrical and Box-like Objects in Cluttered {3D} Scenes", BOOKTITLE="7th German Conference on Robotics (ROBOTIK)", ADDRESS="Munich, Germany", MONTH=may, YEAR=2012, bib2html_pubtype = {Conference Paper}, bib2html_rescat = {Perception, Models}, bib2html_groups = {Cop, EnvMod}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household} } @InProceedings{marton12robotdoc, author = {Zoltan-Csaba Marton and Florian Seidel and Michael Beetz}, title = {{Towards Modular Spatio-temporal Perception for Task-adapting Robots}}, booktitle = {Postgraduate Conference on Robotics and Development of Cognition (RobotDoC-PhD), a satellite event of the 22nd International Conference on Artificial Neural Networks (ICANN)}, address = {Lausanne, Switzerland}, year = {2012}, bib2html_pubtype = {Conference Paper}, bib2html_groups = {EnvMod, Cop}, bib2html_funding = {CoTeSys}, bib2html_rescat = {Perception}, bib2html_domain = {Assistive Household} } @phdthesis {klank2012PhD, author = {Ulrich Klank}, title = {Everyday Perception for Mobile Manipulation in Human Environments}, year = {2012}, school = {Technische Universit\"at M\"unchen}, bib2html_pubtype = {PhD Thesis}, bib2html_rescat = {Perception, Representation, Learning}, bib2html_groups = {Cop}, bib2html_funding = {CoTeSys}, bib2html_domain = {Assistive Household}, pdf = {http://mediatum.ub.tum.de/download/1080039/1080039.pdf}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:91-diss-20120412-1080039-1-7} } @inproceedings{ wimmer08anasmfittingmethod, author = {Matthias Wimmer and Shinya Fujie and Freek Stulp and Tetsunori Kobayashi and Bernd Radig}, title = {An {ASM} Fitting Method Based on Machine Learning that Provides a Robust Parameter Initialization for {AAM} Fitting}, booktitle = {Proc. of the International Conference on Automatic Face and Gesture Recognition (FGR08)}, year = {2008}, month = {September}, address = {Amsterdam, Netherlands}, xxxvolume = {}, xxxpages = {}, bib2html_pubtype = {Refereed Conference Paper}, bib2html_rescat = {Image Understanding}, bib2html_groups = {IU}, abstract = {Due to their use of information contained in texture, Active Appearance Models~(AAM) generally outperform Active Shape Models~(ASM) in terms of fitting accuracy. Although many extensions and improvements over the original AAM have been proposed, on of the main drawbacks of AAMs remains its dependence on good initial model parameters to achieve accurate fitting results. In this paper, we determine the initial model parameters for AAM fitting with ASM fitting, and use machine learning techniques to improve the scope and accuracy of ASM fitting. Combining the precision of AAM fitting with the large radius of convergence of learned ASM fitting improves the results by an order of magnitude, as our empirical evaluation on a database of publicly available benchmark images demonstrates.}, }