RAVG: Robotics Active Vision
ResearchPUBLICATIONS
This is the list of publications of this laboratory
Perez-Alcocer, R. R.; Torres-Mendez, Luz Abril; Olguin-Diaz, Ernesto; Maldonado-Ramirez, Alejandro Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control Journal Article In: 2016. Hernandez-Rodriguez, Felipe; Castelan, Mario A photometric sampling method for facial shape recovery Journal Article In: Machine Vision and Applications, vol. 27, no. 4, pp. 483-497, 2016. Martinez-Gonzalez, Pablo; Castelan, Mario; Arechavaleta, Gustavo Vision based persistent localization of a humanoid robot for locomotion Tasks Journal Article In: International Journal of Applied Mathematics and Computer Science, vol. 26, no. 3, 2016. Delfin, Josafat; Becerra, Hector M; Arechavaleta, Gustavo Visual Servo Walking Control for Humanoids with Finite-time Convergence and Smooth Robot Velocities Journal Article In: International Journal of Control, vol. 89, no. 7, pp. 1342-1358, 2016, ISSN: 1366-5820. Rios-Cabrera, Reyes; Morales-Diaz, America B.; Aviles-Viñas, Jaime F; Lopez-Juarez, Ismael Robotic GMAW online learning: issues and experiments Journal Article In: The International Journal of Advanced Manufacturing Technology, vol. 87, no. 5, pp. 2113–2134, 2016, ISSN: 1433-3015. Aviles-Viñas, Jaime F; Rios-Cabrera, Reyes; Lopez-Juarez, Ismael On-line learning of welding bead geometry in industrial robots Journal Article In: The International Journal of Advanced Manufacturing Technology, vol. 83, no. 1, pp. 217–231, 2016, ISSN: 1433-3015. Maldonado-Ramirez, Alejandro; Torres-Mendez, Luz Abril Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions Journal Article In: Journal of Sensors, vol. 2016, 2016. Delfin, Josafat; Becerra, Héctor M; Arechavaleta, Gustavo Humanoid Localization and Navigation using a Visual Memory Conference IEEE-RAS 16th International Conference on Humanoid Robots, IEEE, 2016, ISSN: 2164-0580. Maldonado-Ramirez, Alejandro; Torres-Mendez, Luz Abril A Bag of Relevant Regions Model for Place Recognition in Coral Reefs Conference OCEANS 2016, IEEE 2016. Aviles-Viñas, Jaime F; Lopez-Juarez, Ismael; Rios-Cabrera, Reyes Acquisition of welding skills in industrial robots Journal Article In: Industrial Robot: An International Journal, vol. 42, no. 2, pp. 156-166, 2015. Navarro-Gonzalez, Jose Luis; Lopez-Juarez, Ismael; Ordaz-Hernandez, Keny; Rios-Cabrera, Reyes On-line incremental learning for unknown conditions during assembly operations with industrial robots Journal Article In: Evolving Systems, vol. 6, no. 2, pp. 101–114, 2015, ISSN: 1868-6486. Castelan, Mario; Cruz-Perez, Elier; Torres-Mendez, Luz Abril A Photometric Sampling Strategy for Reflectance Characterization and Transference Journal Article In: Computación y Sistemas, vol. 19, no. 2, pp. 255-272, 2015. Martinez-Garcia, Edgar A.; Torres-Mendez, Luz Abril; Elara Mohan, Rajesh Multi-legged robot dynamics navigation model with optical flow Journal Article In: International Journal of Intelligent Unmanned Systems, vol. 2, no. 2, pp. 121-139, 2014. Rodriguez-Telles, Francisco G; Perez-Alcocer, Ricardo; Maldonado-Ramirez, Alejandro; Torres-Mendez, Luz Abril; Bikram Dey, Bir; Martinez-Garcia, Edgar A. Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat Conference 2014 IEEE International Conference on Robotics and Automation (ICRA), IEEE 2014. Maldonado-Ramirez, Alejandro; Torres-Mendez, Luz Abril; Martinez-Garcia, Edgar A. Robust detection and tracking of regions of interest for autonomous underwater robotic exploration Conference Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications, 2014. Rios-Cabrera, Reyes; Tuytelaars, Tinne; Van Gool, Luc J. Efficient multi-camera vehicle detection, tracking, and identification in a tunnel surveillance application Journal Article In: Computer Vision and Image Understanding, vol. 116, no. 6, pp. 742 - 753, 2012, ISSN: 1077-3142.2016
Journal Articles
@article{P\'{e}rez-Alcocer2016,
title = {Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control},
author = {Perez-Alcocer, R. R. and Torres-Mendez, Luz Abril and Olguin-Diaz, Ernesto and Maldonado-Ramirez, Alejandro },
year = {2016},
date = {2016-06-06},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{Hernandez-Rodriguez2016,
title = {A photometric sampling method for facial shape recovery},
author = {Hernandez-Rodriguez, Felipe and Castelan, Mario },
url = {http://link.springer.com/article/10.1007%2Fs00138-016-0755-9},
year = {2016},
date = {2016-04-01},
journal = {Machine Vision and Applications},
volume = {27},
number = {4},
pages = {483-497},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{Mart\`{i}nez-Gonz\'{a}lez2016,
title = {Vision based persistent localization of a humanoid robot for locomotion Tasks},
author = {Martinez-Gonzalez, Pablo and Castelan, Mario and Arechavaleta, Gustavo },
url = {https://drive.google.com/file/d/0B-7dVUdTjeJUNGdXd0N6UWRvdk0/view},
year = {2016},
date = {2016-03-26},
journal = {International Journal of Applied Mathematics and Computer Science},
volume = {26},
number = {3},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{Delfin2016,
title = {Visual Servo Walking Control for Humanoids with Finite-time Convergence and Smooth Robot Velocities},
author = {Delfin, Josafat and Becerra, Hector M and Arechavaleta, Gustavo },
url = {http://www.tandfonline.com/doi/abs/10.1080/00207179.2015.1129558},
doi = {http://dx.doi.org/10.1080/00207179.2015.1129558},
issn = {1366-5820},
year = {2016},
date = {2016-01-10},
journal = {International Journal of Control},
volume = {89},
number = {7},
pages = {1342-1358},
abstract = {In this paper, we address the problem of humanoid locomotion guided from information of a monocular camera. The goal of the robot is to reach a desired location defined in terms of a target image, i.e., a positioning task. The proposed approach allows us to introduce a desired time to complete the positioning task, which is advantageous in contrast to the classical exponential convergence. In particular, finite-time convergence is achieved while generating smooth robot velocities and considering the omnidirectional waking capability of the robot. In addition, we propose a hierarchical task-based control scheme, which can simultaneously handle the visual positioning and the obstacle avoidance tasks without affecting the desired time of convergence. The controller is able to activate or inactivate the obstacle avoidance task without generating discontinuous velocity references while the humanoid is walking. Stability of the closed loop for the two task-based control is demonstrated theoretically even during the transitions between the tasks. The proposed approach is generic in the sense that different visual control schemes are supported. We evaluate a homography-based visual servoing for position-based and image-based modalities, as well as for eye-in-hand and eye-to-hand configurations. The experimental evaluation is performed with the humanoid robot NAO.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{Rios-Cabrera2016,
title = {Robotic GMAW online learning: issues and experiments},
author = {Rios-Cabrera, Reyes and Morales-Diaz, America B. and Aviles-Vi\~{n}as, Jaime F and Lopez-Juarez, Ismael },
url = {http://dx.doi.org/10.1007/s00170-016-8618-0},
doi = {10.1007/s00170-016-8618-0},
issn = {1433-3015},
year = {2016},
date = {2016-01-01},
journal = {The International Journal of Advanced Manufacturing Technology},
volume = {87},
number = {5},
pages = {2113--2134},
abstract = {This paper presents three main contributions: (i) an experimental analysis of variables, using well-defined statistical patterns applied to the main parameters of the welding process. (ii) An on-line/off-line learning and testing method, showing that robots can acquire a useful knowledge base without human intervention to learn and reproduce bead geometries. And finally, (iii) an on-line testing analysis including penetration of the bead, that is used to train an artificial neural network (ANN). For the experiments, an optic camera was used in order to measure bead geometry (width and height). Also real-time computer vision algorithms were implemented to extract training patterns. The proposal was carried out using an industrial KUKA robot and a GMAW type machine inside a manufacturing cell. We present expermental analysis that show different issues and solutions to build an industrial adaptive system for the robotics welding process.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{Aviles-Vi\~{n}as2016b,
title = {On-line learning of welding bead geometry in industrial robots},
author = {Aviles-Vi\~{n}as, Jaime F and Rios-Cabrera, Reyes and Lopez-Juarez, Ismael },
url = {http://dx.doi.org/10.1007/s00170-015-7422-6},
doi = {10.1007/s00170-015-7422-6},
issn = {1433-3015},
year = {2016},
date = {2016-01-01},
journal = {The International Journal of Advanced Manufacturing Technology},
volume = {83},
number = {1},
pages = {217--231},
abstract = {In this paper, we propose an architecture based on an artificial neural network (ANN), to learn welding skills automatically in industrial robots. With the aid of an optic camera and a laser-based sensor, the bead geometry (width and height) is measured. We propose a real-time computer vision algorithm to extract training patterns in order to acquire knowledge to later predict specific geometries. The proposal is implemented and tested in an industrial KUKA KR16 robot and a GMAW type machine within a manufacturing cell. Several data analysis are described as well as off-line and on-line training, learning strategies, and testing experimentation. It is demonstrated during our experiments that, after learning the skill, the robot is able to produce the requested bead geometry even without any knowledge about the welding parameters such as arc voltage and current. We implemented an on-line learning test, where the whole experiments and learning process take only about 4 min. Using this knowledge later, we obtained up to 95 % accuracy in prediction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{maldonado2016robotic,
title = {Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions},
author = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril},
url = {https://www.hindawi.com/journals/js/2016/4265042/},
year = {2016},
date = {2016-01-01},
journal = {Journal of Sensors},
volume = {2016},
publisher = {Hindawi Publishing Corporation},
abstract = {Using visual sensors for detecting regions of interest in underwater environments is fundamental for many robotic applications. Particularly, for an autonomous exploration task, an underwater vehicle must be guided towards features that are of interest. If the relevant features can be seen from the distance, then smooth control movements of the vehicle are feasible in order to position itself close enough with the final goal of gathering visual quality images. However, it is a challenging task for a robotic system to achieve stable tracking of the same regions since marine environments are unstructured and highly dynamic and usually have poor visibility. In this paper, a framework that robustly detects and tracks regions of interest in real time is presented. We use the chromatic channels of a perceptual uniform color space to detect relevant regions and adapt a visual attention scheme to underwater scenes. For the tracking, we associate with each relevant point superpixel descriptors which are invariant to changes in illumination and shape. The field experiment results have demonstrated that our approach is robust when tested on different visibility conditions and depths in underwater explorations.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Conferences
@conference{conf:Delfin2016,
title = {Humanoid Localization and Navigation using a Visual Memory},
author = {Delfin, Josafat and Becerra, H\'{e}ctor M and Arechavaleta, Gustavo },
doi = {10.1109/HUMANOIDS.2016.7803354},
issn = {2164-0580},
year = {2016},
date = {2016-11-15},
booktitle = {IEEE-RAS 16th International Conference on Humanoid Robots},
pages = {725-731},
publisher = {IEEE},
abstract = {A visual memory (VM) is a topological map in which a set of key images organized in form of a graph represents an environment. In this paper, a navigation strategy for humanoid robots addressing the problems of localization, visual path planning and path following based on a VM is proposed. Assuming that the VM is given, the main contributions of the paper are: 1) A novel pure vision-based localization method. 2) The introduction of the estimated rotation between key images in the path planning stage to benefit paths with enough visual information and with less effort of robot rotation. 3) The integration of the complete navigation strategy and its experimental evaluation with a Nao robot in an unstructured environment. The humanoid robot is modeled as a holonomic system and the strategy might be used in different scenarios like corridors, uncluttered or cluttered environments.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
@conference{maldonado2016bag,
title = {A Bag of Relevant Regions Model for Place Recognition in Coral Reefs},
author = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril },
year = {2016},
date = {2016-01-01},
booktitle = {OCEANS 2016},
pages = {1--5},
organization = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2015
Journal Articles
@article{doi:10.1108/IR-09-2014-0395,
title = {Acquisition of welding skills in industrial robots},
author = {Aviles-Vi\~{n}as, Jaime F and Lopez-Juarez, Ismael and Rios-Cabrera, Reyes },
url = {http://dx.doi.org/10.1108/IR-09-2014-0395},
doi = {10.1108/IR-09-2014-0395},
year = {2015},
date = {2015-01-01},
journal = {Industrial Robot: An International Journal},
volume = {42},
number = {2},
pages = {156-166},
abstract = {Purpose \textendash The purpose of this paper was to propose a method based on an Artificial Neural Network and a real-time vision algorithm, to learn welding skills in industrial robotics. Design/methodology/approach \textendash By using an optic camera to measure the bead geometry (width and height), the authors propose a real-time computer vision algorithm to extract training patterns and to enable an industrial robot to acquire and learn autonomously the welding skill. To test the approach, an industrial KUKA robot and a welding gas metal arc welding machine were used in a manufacturing cell. Findings \textendash Several data analyses are described, showing empirically that industrial robots can acquire the skill even if the specific welding parameters are unknown. Research limitations/implications \textendash The approach considers only stringer beads. Weave bead and bead penetration are not considered. Practical implications \textendash With the proposed approach, it is possible to learn specific welding parameters despite of the material, type of robot or welding machine. This is due to the fact that the feedback system produces automatic measurements that are labelled prior to the learning process. Originality/value \textendash The main contribution is that the complex learning process is reduced into an input-process-output system, where the process part is learnt automatically without human supervision, by registering the patterns with an automatically calibrated vision system.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{Navarro-Gonzalez2015,
title = {On-line incremental learning for unknown conditions during assembly operations with industrial robots},
author = {Navarro-Gonzalez, Jose Luis and Lopez-Juarez, Ismael and Ordaz-Hernandez, Keny and Rios-Cabrera, Reyes },
url = {http://dx.doi.org/10.1007/s12530-014-9125-x},
doi = {10.1007/s12530-014-9125-x},
issn = {1868-6486},
year = {2015},
date = {2015-01-01},
journal = {Evolving Systems},
volume = {6},
number = {2},
pages = {101--114},
abstract = {The assembly operation using industrial robots can be accomplished successfully in well-structured environments where the mating pair location is known in advance. However, in real-world scenarios there are uncertainties associated to sensing, control and modelling errors that make the assembly task very complex. In addition, there are also unmodeled uncertainties that have to be taken into account for an effective control algorithm to succeed. Among these uncertainties, it can be mentioned disturbances, backlash and aging of mechanisms. In this paper, a method to overcome the effect of those uncertainties based on the Fuzzy ARTMAP artificial neural network (ANN) to successfully accomplish the assembly task is proposed. Experimental work is reported using an industrial 6 DOF robot arm in conjunction with a vision system for part location and wrist force/torque sensing data for assembly. Force data is fed into an ANN evolving controller during a typical peg in hole (PIH) assembly operation. The controller uses an incremental learning mechanism that is solely guided by the sensed forces. In this article, two approaches are presented in order to compare the incremental learning capability of the manipulator. The first approach uses a primitive knowledge base (PKB) containing 16 primitive movements to learn online the first insertion. During assembly, the manipulator learns new patterns according to the learning criteria which turn the PKB into an enhanced knowledge base (EKB). During a second insertion the controller uses effectively the EKB and operation improves. The second approach employs minimum information (it contains only the assembly direction) and the process starts from scratch. After several operations, that knowledge base increases by including only the needed patterns to perform the insertion. Experimental results showed that the evolving controller is able to assemble the matting pairs enhancing its knowledge whenever it is needed depending on the part geometry and level of expertise. Our approach is demonstrated through several PIH operations with different tolerances and part geometry. As the robot's expertise evolves, the PIH operation is carried out faster with shorter assembly trajectories.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{Castelan2015,
title = {A Photometric Sampling Strategy for Reflectance Characterization and Transference},
author = {Castelan, Mario and Cruz-Perez, Elier and Torres-Mendez, Luz Abril},
url = {http://www.cys.cic.ipn.mx/ojs/index.php/CyS/article/view/1944},
year = {2015},
date = {2015-01-01},
journal = {Computaci\'{o}n y Sistemas},
volume = {19},
number = {2},
pages = {255-272},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2014
Journal Articles
@article{doi:10.1108/IJIUS-04-2014-0003,
title = {Multi-legged robot dynamics navigation model with optical flow},
author = {Martinez-Garcia, Edgar A. and Torres-Mendez, Luz Abril and Elara Mohan, Rajesh },
url = {http://dx.doi.org/10.1108/IJIUS-04-2014-0003},
doi = {10.1108/IJIUS-04-2014-0003},
year = {2014},
date = {2014-01-01},
journal = {International Journal of Intelligent Unmanned Systems},
volume = {2},
number = {2},
pages = {121-139},
abstract = {Purpose \textendash The purpose of this paper is to establish analytical and numerical solutions of a navigational law to estimate displacements of hyper-static multi-legged mobile robots, which combines: monocular vision (optical flow of regional invariants) and legs dynamics. Design/methodology/approach \textendash In this study the authors propose a Euler-Lagrange equation that control legs’ joints to control robot's displacements. Robot's rotation and translational velocities are feedback by motion features of visual invariant descriptors. A general analytical solution of a derivative navigation law is proposed for hyper-static robots. The feedback is formulated with the local speed rate obtained from optical flow of visual regional invariants. The proposed formulation includes a data association algorithm aimed to correlate visual invariant descriptors detected in sequential images through monocular vision. The navigation law is constrained by a set of three kinematic equilibrium conditions for navigational scenarios: constant acceleration, constant velocity, and instantaneous acceleration. Findings \textendash The proposed data association method concerns local motions of multiple invariants (enhanced MSER) by minimizing the norm of multidimensional optical flow feature vectors. Kinematic measurements are used as observable arguments in the general dynamic control equation; while the legs joints dynamics model is used to formulate the controllable arguments. Originality/value \textendash The given analysis does not combine sensor data of any kind, but only monocular passive vision. The approach automatically detects environmental invariant descriptors with an enhanced version of the MSER method. Only optical flow vectors and robot's multi-leg dynamics are used to formulate descriptive rotational and translational motions for self-positioning.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Conferences
@conference{rodriguez2014vision,
title = {Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat},
author = {Rodriguez-Telles, Francisco G and Perez-Alcocer, Ricardo and Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Bikram Dey, Bir and Martinez-Garcia, Edgar A.},
year = {2014},
date = {2014-01-01},
booktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA)},
pages = {3813--3818},
organization = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
@conference{maldonado2014robust,
title = {Robust detection and tracking of regions of interest for autonomous underwater robotic exploration},
author = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Martinez-Garcia, Edgar A.},
year = {2014},
date = {2014-01-01},
booktitle = {Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications},
pages = {165--171},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2012
Journal Articles
@article{RIOSCABRERA2012742,
title = {Efficient multi-camera vehicle detection, tracking, and identification in a tunnel surveillance application},
author = {Rios-Cabrera, Reyes and Tuytelaars, Tinne and Van Gool, Luc J.},
url = {http://www.sciencedirect.com/science/article/pii/S1077314212000380},
doi = {http://dx.doi.org/10.1016/j.cviu.2012.02.006},
issn = {1077-3142},
year = {2012},
date = {2012-01-01},
journal = {Computer Vision and Image Understanding},
volume = {116},
number = {6},
pages = {742 - 753},
keywords = {},
pubstate = {published},
tppubtype = {article}
}