{"id":76,"date":"2017-09-10T05:46:36","date_gmt":"2017-09-10T05:46:36","guid":{"rendered":"https:\/\/ryma.cinvestav.mx\/atorres\/?page_id=76"},"modified":"2017-11-11T17:35:18","modified_gmt":"2017-11-11T17:35:18","slug":"publicaciones","status":"publish","type":"page","link":"https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/","title":{"rendered":"Publicaciones"},"content":{"rendered":"<p>[et_pb_section bb_built=\u00bb1&#8243; fullwidth=\u00bbon\u00bb specialty=\u00bboff\u00bb background_image=\u00bbhttps:\/\/ryma.cinvestav.mx\/atorres\/wp-content\/uploads\/sites\/8\/2017\/09\/IMAGEN-CAMPA\u00d1A1.jpg\u00bb _builder_version=\u00bb3.0.72&#8243; background_color_gradient_start=\u00bbrgba(0,0,0,0.56)\u00bb background_color_gradient_end=\u00bb#ffffff\u00bb background_color_gradient_direction=\u00bb122deg\u00bb background_color_gradient_direction_radial=\u00bbleft\u00bb background_color_gradient_start_position=\u00bb9%\u00bb locked=\u00bbon\u00bb global_module=\u00bb98&#8243;][et_pb_fullwidth_header admin_label=\u00bbNombre del Investigador\u00bb global_parent=\u00bb98&#8243; title=\u00bbDRA. LUZ ABRIL TORRES M\u00c9NDEZ\u00bb subhead=\u00bbProfesor Investigador\u00bb background_layout=\u00bbdark\u00bb text_orientation=\u00bbleft\u00bb header_fullscreen=\u00bboff\u00bb header_scroll_down=\u00bboff\u00bb content_orientation=\u00bbcenter\u00bb image_orientation=\u00bbcenter\u00bb title_font=\u00bb|on|||\u00bb subhead_font=\u00bb|on|||\u00bb custom_button_one=\u00bboff\u00bb button_one_letter_spacing=\u00bb0&#8243; button_one_icon_placement=\u00bbright\u00bb button_one_letter_spacing_hover=\u00bb0&#8243; custom_button_two=\u00bboff\u00bb button_two_letter_spacing=\u00bb0&#8243; button_two_icon_placement=\u00bbright\u00bb button_two_letter_spacing_hover=\u00bb0&#8243; subhead_font_size=\u00bb20px\u00bb _builder_version=\u00bb3.0.72&#8243; background_overlay_color=\u00bbrgba(0,0,0,0.3)\u00bb parent_locked=\u00bbon\u00bb \/][et_pb_fullwidth_menu global_parent=\u00bb98&#8243; menu_id=\u00bb6&#8243; background_layout=\u00bblight\u00bb text_orientation=\u00bbleft\u00bb submenu_direction=\u00bbdownwards\u00bb fullwidth_menu=\u00bboff\u00bb active_link_color=\u00bbrgba(131,0,233,0.81)\u00bb menu_font=\u00bb|on|||\u00bb _builder_version=\u00bb3.0.72&#8243; background_color_gradient_start=\u00bb#009f93&#8243; background_color_gradient_end=\u00bbrgba(0,0,0,0.27)\u00bb background_color_gradient_start_position=\u00bb20%\u00bb background_color=\u00bb#d3d3d3&#8243; parent_locked=\u00bbon\u00bb \/][\/et_pb_section][et_pb_section bb_built=\u00bb1&#8243; _builder_version=\u00bb3.0.72&#8243; custom_css_main_element=\u00bbbox-shadow: inset 0px 3px 2px rgba(50, 50, 50, 0.75);\u00bb locked=\u00bboff\u00bb][et_pb_row parent_locked=\u00bboff\u00bb background_position=\u00bbtop_left\u00bb background_repeat=\u00bbrepeat\u00bb background_size=\u00bbinitial\u00bb][et_pb_column type=\u00bb4_4&#8243;][et_pb_text _builder_version=\u00bb3.0.72&#8243; background_layout=\u00bblight\u00bb text_orientation=\u00bbleft\u00bb border_style=\u00bbsolid\u00bb parent_locked=\u00bboff\u00bb]<\/p>\n<hr \/>\n<div class='et-box et-shadow'>\n\t\t\t\t\t<div class='et-box-content'><h2><strong>PUBLICACIONES<\/strong><\/h2><\/div><\/div>\n<p>Para ver las publicaciones de todo Rob\u00f3tica y Manufactura Avanzada, ver:\u00a0 <strong><a href=\"https:\/\/ryma.cinvestav.mx\/investigacion\/publicaciones\/\">Publicaciones RYMA<\/a><\/strong><\/p>\n<p><strong><div class=\"teachpress_pub_list\"><form name=\"tppublistform\" method=\"get\"><a name=\"tppubs\" id=\"tppubs\"><\/a><div class=\"teachpress_filter\"><select class=\"default\" name=\"yr\" id=\"yr\" tabindex=\"2\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/?')\">\r\n                   <option value=\"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=#tppubs\">Todos los a\u00f1os<\/option>\r\n                   <option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2020#tppubs\" >2020<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2019#tppubs\" >2019<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2018#tppubs\" >2018<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2017#tppubs\" >2017<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2016#tppubs\" >2016<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2015#tppubs\" >2015<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2014#tppubs\" >2014<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2013#tppubs\" >2013<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2012#tppubs\" >2012<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2011#tppubs\" >2011<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2010#tppubs\" >2010<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2009#tppubs\" >2009<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2008#tppubs\" >2008<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2007#tppubs\" >2007<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2006#tppubs\" >2006<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2005#tppubs\" >2005<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2004#tppubs\" >2004<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2003#tppubs\" >2003<\/option>\r\n                <\/select><select class=\"default\" name=\"type\" id=\"type\" tabindex=\"3\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=#tppubs\">Todas las tipolog\u00edas<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=article#tppubs\" >Art\u00edculos de revista<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=conference#tppubs\" >Conferencias<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=inbook#tppubs\" >Cap\u00edtulos de libros<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=inproceedings#tppubs\" >Proceedings Articles<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=proceedings#tppubs\" >Actas de congresos<\/option>\r\n                <\/select><select class=\"default\" name=\"usr\" id=\"usr\" tabindex=\"6\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;type=&amp;auth=&amp;usr=#tppubs\">Todos los usuarios<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;type=&amp;auth=&amp;usr=12#tppubs\" >mcastelan<\/option>\r\n                <\/select><\/div><\/form><div class=\"teachpress_publication_list\"><h3 class=\"tp_h3\" id=\"tp_h3_2016\">2016<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Perez-Alcocer, R. R.;  Torres-Mendez, Luz Abril;  Olguin-Diaz, Ernesto;  Maldonado-Ramirez, Alejandro<\/p><p class=\"tp_pub_title\">Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_154\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('154','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_154\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{P\\'{e}rez-Alcocer2016,<br \/>\r\ntitle = {Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control},<br \/>\r\nauthor = {Perez-Alcocer, R. R. and Torres-Mendez, Luz Abril and Olguin-Diaz, Ernesto and Maldonado-Ramirez, Alejandro },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('154','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('8','tp_links')\" style=\"cursor:pointer;\">Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">Journal of Sensors, <\/span><span class=\"tp_pub_additional_volume\">vol. 2016, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_8\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{maldonado2016robotic,<br \/>\r\ntitle = {Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril},<br \/>\r\nurl = {https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {Journal of Sensors},<br \/>\r\nvolume = {2016},<br \/>\r\npublisher = {Hindawi Publishing Corporation},<br \/>\r\nabstract = {Using visual sensors for detecting regions of interest in underwater environments is fundamental for many robotic applications. Particularly, for an autonomous exploration task, an underwater vehicle must be guided towards features that are of interest. If the relevant features can be seen from the distance, then smooth control movements of the vehicle are feasible in order to position itself close enough with the final goal of gathering visual quality images. However, it is a challenging task for a robotic system to achieve stable tracking of the same regions since marine environments are unstructured and highly dynamic and usually have poor visibility. In this paper, a framework that robustly detects and tracks regions of interest in real time is presented. We use the chromatic channels of a perceptual uniform color space to detect relevant regions and adapt a visual attention scheme to underwater scenes. For the tracking, we associate with each relevant point superpixel descriptors which are invariant to changes in illumination and shape. The field experiment results have demonstrated that our approach is robust when tested on different visibility conditions and depths in underwater explorations.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_8\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Using visual sensors for detecting regions of interest in underwater environments is fundamental for many robotic applications. Particularly, for an autonomous exploration task, an underwater vehicle must be guided towards features that are of interest. If the relevant features can be seen from the distance, then smooth control movements of the vehicle are feasible in order to position itself close enough with the final goal of gathering visual quality images. However, it is a challenging task for a robotic system to achieve stable tracking of the same regions since marine environments are unstructured and highly dynamic and usually have poor visibility. In this paper, a framework that robustly detects and tracks regions of interest in real time is presented. We use the chromatic channels of a perceptual uniform color space to detect relevant regions and adapt a visual attention scheme to underwater scenes. For the tracking, we associate with each relevant point superpixel descriptors which are invariant to changes in illumination and shape. The field experiment results have demonstrated that our approach is robust when tested on different visibility conditions and depths in underwater explorations.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_8\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/\" title=\"https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/\" target=\"_blank\">https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Cortes-Perez, Noel;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), <\/span><span class=\"tp_pub_additional_pages\">pp. 61-68, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_168\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('168','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_168\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Cortx00E9sPx00E9rez2016ALM,<br \/>\r\ntitle = {A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation},<br \/>\r\nauthor = {Cortes-Perez, Noel and Torres-Mendez, Luz Abril},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)},<br \/>\r\npages = {61-68},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('168','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferencias<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('166','tp_links')\" style=\"cursor:pointer;\">A bag of relevant regions for visual place recognition in challenging environments<\/a> <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2016 23rd International Conference on Pattern Recognition (ICPR), <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_166\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{7899826,<br \/>\r\ntitle = {A bag of relevant regions for visual place recognition in challenging environments},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Castelan, Mario},<br \/>\r\ndoi = {10.1109\/ICPR.2016.7899826},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-12-01},<br \/>\r\nbooktitle = {2016 23rd International Conference on Pattern Recognition (ICPR)},<br \/>\r\npages = {1358-1363},<br \/>\r\nabstract = {In this paper, we present a method for vision-based place recognition in environments with a high content of similar features and that are prone to variations in illumination. The high similarity of features makes difficult the disambiguation between two different places. The novelty of our method relies on using the Bag of Words (BoW) approach to derive an image descriptor from a set of relevant regions, which are extracted using a visual attention algorithm. We name our approach Bag of Relevant Regions (BoRR). The descriptor of each relevant region is built by using a 2D histogram of the chromatic channels of the CIE-Lab color space. We have compared our results with those using state of the art descriptors that include the BoW and demonstrate that our approach performs better in most of the cases.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_166\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In this paper, we present a method for vision-based place recognition in environments with a high content of similar features and that are prone to variations in illumination. The high similarity of features makes difficult the disambiguation between two different places. The novelty of our method relies on using the Bag of Words (BoW) approach to derive an image descriptor from a set of relevant regions, which are extracted using a visual attention algorithm. We name our approach Bag of Relevant Regions (BoRR). The descriptor of each relevant region is built by using a 2D histogram of the chromatic channels of the CIE-Lab color space. We have compared our results with those using state of the art descriptors that include the BoW and demonstrate that our approach performs better in most of the cases.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_166\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/ICPR.2016.7899826\" title=\"DOI de seguimiento:10.1109\/ICPR.2016.7899826\" target=\"_blank\">doi:10.1109\/ICPR.2016.7899826<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">A Bag of Relevant Regions Model for Place Recognition in Coral Reefs <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">OCEANS 2016, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_9\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('9','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_9\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2016bag,<br \/>\r\ntitle = {A Bag of Relevant Regions Model for Place Recognition in Coral Reefs},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {OCEANS 2016},<br \/>\r\npages = {1--5},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('9','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-M\u00e9ndez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('165','tp_links')\" style=\"cursor:pointer;\">A bag of relevant regions model for visual place recognition in coral reefs<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-5, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_165\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7761188,<br \/>\r\ntitle = {A bag of relevant regions model for visual place recognition in coral reefs},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-M\\'{e}ndez, Luz Abril},<br \/>\r\ndoi = {10.1109\/OCEANS.2016.7761188},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-09-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1-5},<br \/>\r\nabstract = {Vision-based place recognition in underwater environments is a key component for autonomous robotic exploration. However, this task can be very challenging due to the inherent properties of this kind of places such as: color distortion, poor visibility, perceptual aliasing and dynamic illumination. In this paper, we present a method for vision-based place recognition in coral reefs. Our method relies on using the Bag-of-Words (BoW) approach to derive a descriptor, for the whole image, from a set of relevant regions, which are extracted by utilizing a visual attention algorithm. The descriptor for each relevant region is built by using an histogram of the chromatic channels of the CIE-Lab color space. We present results of our method for a place recognition task in real life videos as well as comparisons of our method against other popular techniques. It can be seen that our approach performs better in most of the cases.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_165\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Vision-based place recognition in underwater environments is a key component for autonomous robotic exploration. However, this task can be very challenging due to the inherent properties of this kind of places such as: color distortion, poor visibility, perceptual aliasing and dynamic illumination. In this paper, we present a method for vision-based place recognition in coral reefs. Our method relies on using the Bag-of-Words (BoW) approach to derive a descriptor, for the whole image, from a set of relevant regions, which are extracted by utilizing a visual attention algorithm. The descriptor for each relevant region is built by using an histogram of the chromatic channels of the CIE-Lab color space. We present results of our method for a place recognition task in real life videos as well as comparisons of our method against other popular techniques. It can be seen that our approach performs better in most of the cases.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_165\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/OCEANS.2016.7761188\" title=\"DOI de seguimiento:10.1109\/OCEANS.2016.7761188\" target=\"_blank\">doi:10.1109\/OCEANS.2016.7761188<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A. N.;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('167','tp_links')\" style=\"cursor:pointer;\">A statistical learning approach for underwater color restoration with adaptive training based on visual attention<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-6, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_167\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7761187,<br \/>\r\ntitle = {A statistical learning approach for underwater color restoration with adaptive training based on visual attention},<br \/>\r\nauthor = {Ponce-Hinestroza, A. N. and Torres-Mendez, Luz Abril and Drews, Paulo},<br \/>\r\ndoi = {10.1109\/OCEANS.2016.7761187},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-09-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1-6},<br \/>\r\nabstract = {In most artificial vision systems the quality of acquired images is directly related with the amount of information that can be obtained from them, and, particularly in underwater robotics applications involving monitoring and inspection tasks this is crucial. Statistical learning methods like Markov Random Fields with Belief Propagation (MRF-BP) provide a solution by using existing essential correlations in training sets. However, as in any restoration\/correction method for real applications, it is not possible to have color ground truth available on-line. In this paper, we present a MRF-BP model formulated in the chromatic domain of underwater scenes such that we synthesize the ground truth color to train the model and maximize the capabilities of our method. The generated ground truth introduces some improvements to existing color correction methods and visual attention considerations which also helps to choose a small size training set for the MRF-BP model. Feasibility of our approach is shown from the results in which a good color discrimination is observed even in poor visibility conditions.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_167\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In most artificial vision systems the quality of acquired images is directly related with the amount of information that can be obtained from them, and, particularly in underwater robotics applications involving monitoring and inspection tasks this is crucial. Statistical learning methods like Markov Random Fields with Belief Propagation (MRF-BP) provide a solution by using existing essential correlations in training sets. However, as in any restoration\/correction method for real applications, it is not possible to have color ground truth available on-line. In this paper, we present a MRF-BP model formulated in the chromatic domain of underwater scenes such that we synthesize the ground truth color to train the model and maximize the capabilities of our method. The generated ground truth introduces some improvements to existing color correction methods and visual attention considerations which also helps to choose a small size training set for the MRF-BP model. Feasibility of our approach is shown from the results in which a good color discrimination is observed even in poor visibility conditions.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_167\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/OCEANS.2016.7761187\" title=\"DOI de seguimiento:10.1109\/OCEANS.2016.7761187\" target=\"_blank\">doi:10.1109\/OCEANS.2016.7761187<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A-N;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\">A statistical learning approach for underwater color restoration with adaptive training based on visual attention <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1\u20136, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_11\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('11','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_11\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{ponce2016oceansb,<br \/>\r\ntitle = {A statistical learning approach for underwater color restoration with adaptive training based on visual attention},<br \/>\r\nauthor = {Ponce-Hinestroza, A-N and Torres-Mendez, Luz Abril and Drews, Paulo },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1--6},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('11','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A-N;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\">Using a MRF-BP Model with Color Adaptive Training for Underwater Color Restoration <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">ICPR 2016 IEEE Cancun, <\/span><span class=\"tp_pub_additional_pages\">pp. 1\u20136, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_12\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('12','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_12\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{ponce2016icpr,<br \/>\r\ntitle = {Using a MRF-BP Model with Color Adaptive Training for Underwater Color Restoration},<br \/>\r\nauthor = {Ponce-Hinestroza, A-N and Torres-Mendez, Luz Abril and Drews, Paulo},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {ICPR 2016 IEEE Cancun},<br \/>\r\npages = {1--6},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('12','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2015\">2015<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Castelan, Mario;  Cruz-Perez, Elier;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('16','tp_links')\" style=\"cursor:pointer;\">A Photometric Sampling Strategy for Reflectance Characterization and Transference<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">Computaci\u00f3n y Sistemas, <\/span><span class=\"tp_pub_additional_volume\">vol. 19, <\/span><span class=\"tp_pub_additional_number\">no 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 255-272, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_16\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('16','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_16\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('16','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_16\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Castelan2015,<br \/>\r\ntitle = {A Photometric Sampling Strategy for Reflectance Characterization and Transference},<br \/>\r\nauthor = {Castelan, Mario and Cruz-Perez, Elier and Torres-Mendez, Luz Abril},<br \/>\r\nurl = {http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Computaci\\'{o}n y Sistemas},<br \/>\r\nvolume = {19},<br \/>\r\nnumber = {2},<br \/>\r\npages = {255-272},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('16','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_16\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944\" title=\"http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944\" target=\"_blank\">http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('16','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferencias<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Rodriguez-Telles, Francisco G<\/p><p class=\"tp_pub_title\">Ethologically inspired reactive exploration of coral reefs with collision avoidance: Bridging the gap between human and robot spatial understanding of unstructured environments <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">Intelligent Robots and Systems (IROS), 2015 IEEE\/RSJ International Conference on, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_17\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('17','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_17\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2015ethologically,<br \/>\r\ntitle = {Ethologically inspired reactive exploration of coral reefs with collision avoidance: Bridging the gap between human and robot spatial understanding of unstructured environments},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Rodriguez-Telles, Francisco G},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nbooktitle = {Intelligent Robots and Systems (IROS), 2015 IEEE\/RSJ International Conference on},<br \/>\r\npages = {4872--4879},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('17','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">Autonomous robotic exploration of coral reefs using a visual attention-driven strategy for detecting and tracking regions of interest <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">OCEANS 2015-Genova, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_18\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('18','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_18\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2015autonomous,<br \/>\r\ntitle = {Autonomous robotic exploration of coral reefs using a visual attention-driven strategy for detecting and tracking regions of interest},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril <br \/>\r\n},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nbooktitle = {OCEANS 2015-Genova},<br \/>\r\npages = {1--5},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('18','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Romero-Mart\u00ednez, C. E.;  Torres-Mendez, Luz Abril;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('108','tp_links')\" style=\"cursor:pointer;\">Modeling motor-perceptual behaviors to enable intuitive paths in an aquatic robot<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2015 - MTS\/IEEE Washington, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-5, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_108\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('108','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_108\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('108','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_108\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7404424,<br \/>\r\ntitle = {Modeling motor-perceptual behaviors to enable intuitive paths in an aquatic robot},<br \/>\r\nauthor = {Romero-Mart\\'{i}nez, C. E. and Torres-Mendez, Luz Abril and Martinez-Garcia, Edgar A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7404424\/},<br \/>\r\ndoi = {10.23919\/OCEANS.2015.7404424},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-10-01},<br \/>\r\nbooktitle = {OCEANS 2015 - MTS\/IEEE Washington},<br \/>\r\npages = {1-5},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('108','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_108\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7404424\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7404424\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7404424\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.23919\/OCEANS.2015.7404424\" title=\"DOI de seguimiento:10.23919\/OCEANS.2015.7404424\" target=\"_blank\">doi:10.23919\/OCEANS.2015.7404424<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('108','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Labastida-Vald\u00e9s, L.;  Torres-Mendez, Luz Abril;  Hutchinson, S. A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('106','tp_links')\" style=\"cursor:pointer;\">Using the motion perceptibility measure to classify points of interest for visual-based AUV guidance in a reef ecosystem<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2015 - MTS\/IEEE Washington, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-6, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_106\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('106','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_106\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('106','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_106\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7404605,<br \/>\r\ntitle = {Using the motion perceptibility measure to classify points of interest for visual-based AUV guidance in a reef ecosystem},<br \/>\r\nauthor = {Labastida-Vald\\'{e}s, L. and Torres-Mendez, Luz Abril and Hutchinson, S. A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7404605\/},<br \/>\r\ndoi = {10.23919\/OCEANS.2015.7404605},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-10-01},<br \/>\r\nbooktitle = {OCEANS 2015 - MTS\/IEEE Washington},<br \/>\r\npages = {1-6},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('106','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_106\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7404605\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7404605\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7404605\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.23919\/OCEANS.2015.7404605\" title=\"DOI de seguimiento:10.23919\/OCEANS.2015.7404605\" target=\"_blank\">doi:10.23919\/OCEANS.2015.7404605<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('106','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Gonz\u00e1lez-Garc\u00eda, Luis C.;  Torres-Mendez, Luz Abril;  Mart\u00ednez, Julieta;  Sattar, Junaed;  Little, James<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('109','tp_links')\" style=\"cursor:pointer;\">Are You Talking to Me? Detecting Attention in First-Person Interactions<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_pages\">pp.  137-142, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 2308-4197<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_109\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Gonz\\'{a}lez-Garc\\'{i}a2015,<br \/>\r\ntitle = {Are You Talking to Me? Detecting Attention in First-Person Interactions},<br \/>\r\nauthor = {Gonz\\'{a}lez-Garc\\'{i}a, Luis C. and Torres-Mendez, Luz Abril and Mart\\'{i}nez, Julieta and Sattar, Junaed and Little, James},<br \/>\r\nurl = {https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions},<br \/>\r\nissn = {2308-4197},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-00-00},<br \/>\r\npages = { 137-142},<br \/>\r\nabstract = {This paper presents an approach for a mobile robot to detect the level of attention of a human in first-person interactions. Determining the degree of attention is an essential task in day-today interactions. In particular, we are interested in natural Human-Robot Interactions (HRI's) during which a robot needs to estimate the focus and the degree of the user's attention to determine the most appropriate moment to initiate, continue and terminate an interaction. Our approach is novel in that it uses a linear regression technique to classify raw depth-image data according to three levels of user attention on the robot (null, partial and total). This is achieved by measuring the linear independence of the input range data with respect to a dataset of user poses. We overcome the problem of time overhead that a large database can add to real-time Linear Regression Classification (LRC) methods by including only the feature vectors with the most relevant information. We demonstrate the approach by presenting experimental data from human-interaction studies with a PR2 robot. Results demonstrate our attention classifier to be accurate and robust in detecting the attention levels of human participants. I. INTRODUCTION Determining the attention of people is an essential component of day-today interactions. We are constantly monitoring other people's gaze, head and body poses while engaged in a conversation [1][2][3]. We also perform attention estimation in order to perform natural interactions [4][5]. In short, attention estimation is a fundamental component of effective social interaction; therefore, for robots to be efficient social agents it is necessary to provide them with reliable mechanisms to estimate human attention. We believe that human attention estimation, particularly in the context of interactions, is highly subjective. However, attempts to model it have been relatively successful, e.g., allowing a robot to ask for directions when it finds a human, as in the work of Weiss et al. [6]. Nonetheless, the state-of-the-art is still far from reaching a point where a robot can successfully interact with humans without relying on mechanisms not common to natural language. Recently, the use of range images to make more natural human-machine interfaces has been in the agenda of researchers, like in the case of the Microsoft Kinect TM , which delivers a skeleton of <br \/>\r\n<br \/>\r\nAre You Talking to Me? Detecting Attention in First-Person Interactions (PDF Download Available). Available from: https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions [accessed Jun 17, 2017].},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_109\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper presents an approach for a mobile robot to detect the level of attention of a human in first-person interactions. Determining the degree of attention is an essential task in day-today interactions. In particular, we are interested in natural Human-Robot Interactions (HRI's) during which a robot needs to estimate the focus and the degree of the user's attention to determine the most appropriate moment to initiate, continue and terminate an interaction. Our approach is novel in that it uses a linear regression technique to classify raw depth-image data according to three levels of user attention on the robot (null, partial and total). This is achieved by measuring the linear independence of the input range data with respect to a dataset of user poses. We overcome the problem of time overhead that a large database can add to real-time Linear Regression Classification (LRC) methods by including only the feature vectors with the most relevant information. We demonstrate the approach by presenting experimental data from human-interaction studies with a PR2 robot. Results demonstrate our attention classifier to be accurate and robust in detecting the attention levels of human participants. I. INTRODUCTION Determining the attention of people is an essential component of day-today interactions. We are constantly monitoring other people's gaze, head and body poses while engaged in a conversation [1][2][3]. We also perform attention estimation in order to perform natural interactions [4][5]. In short, attention estimation is a fundamental component of effective social interaction; therefore, for robots to be efficient social agents it is necessary to provide them with reliable mechanisms to estimate human attention. We believe that human attention estimation, particularly in the context of interactions, is highly subjective. However, attempts to model it have been relatively successful, e.g., allowing a robot to ask for directions when it finds a human, as in the work of Weiss et al. [6]. Nonetheless, the state-of-the-art is still far from reaching a point where a robot can successfully interact with humans without relying on mechanisms not common to natural language. Recently, the use of range images to make more natural human-machine interfaces has been in the agenda of researchers, like in the case of the Microsoft Kinect TM , which delivers a skeleton of <br \/>\r\n<br \/>\r\nAre You Talking to Me? Detecting Attention in First-Person Interactions (PDF Download Available). Available from: https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions [accessed Jun 17, 2017].<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_109\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions\" title=\"https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecti[...]\" target=\"_blank\">https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecti[...]<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">using supercolor-pixels descriptors for tracking relevant cues in underwater environments with poor visibility conditions <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_publisher\">ICRA 2015 Workshop on Visual Place Recognition in Changing Environmen ts, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_107\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('107','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_107\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Maldonao-Ramirez2015,<br \/>\r\ntitle = {using supercolor-pixels descriptors for tracking relevant cues in underwater environments with poor visibility conditions},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-00-00},<br \/>\r\npublisher = {ICRA 2015 Workshop on Visual Place Recognition in Changing Environmen ts},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('107','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2014\">2014<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Martinez-Garcia, Edgar A.;  Torres-Mendez, Luz Abril;  Elara Mohan, Rajesh<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('20','tp_links')\" style=\"cursor:pointer;\">Multi-legged robot dynamics navigation model with optical flow<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Intelligent Unmanned Systems, <\/span><span class=\"tp_pub_additional_volume\">vol. 2, <\/span><span class=\"tp_pub_additional_number\">no 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 121-139, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_20\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{doi:10.1108\/IJIUS-04-2014-0003,<br \/>\r\ntitle = {Multi-legged robot dynamics navigation model with optical flow},<br \/>\r\nauthor = {Martinez-Garcia, Edgar A. and Torres-Mendez, Luz Abril and Elara Mohan, Rajesh },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003},<br \/>\r\ndoi = {10.1108\/IJIUS-04-2014-0003},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\njournal = {International Journal of Intelligent Unmanned Systems},<br \/>\r\nvolume = {2},<br \/>\r\nnumber = {2},<br \/>\r\npages = {121-139},<br \/>\r\nabstract = {Purpose \\textendash The purpose of this paper is to establish analytical and numerical solutions of a navigational law to estimate displacements of hyper-static multi-legged mobile robots, which combines: monocular vision (optical flow of regional invariants) and legs dynamics. Design\/methodology\/approach \\textendash In this study the authors propose a Euler-Lagrange equation that control legs\u2019 joints to control robot's displacements. Robot's rotation and translational velocities are feedback by motion features of visual invariant descriptors. A general analytical solution of a derivative navigation law is proposed for hyper-static robots. The feedback is formulated with the local speed rate obtained from optical flow of visual regional invariants. The proposed formulation includes a data association algorithm aimed to correlate visual invariant descriptors detected in sequential images through monocular vision. The navigation law is constrained by a set of three kinematic equilibrium conditions for navigational scenarios: constant acceleration, constant velocity, and instantaneous acceleration. Findings \\textendash The proposed data association method concerns local motions of multiple invariants (enhanced MSER) by minimizing the norm of multidimensional optical flow feature vectors. Kinematic measurements are used as observable arguments in the general dynamic control equation; while the legs joints dynamics model is used to formulate the controllable arguments. Originality\/value \\textendash The given analysis does not combine sensor data of any kind, but only monocular passive vision. The approach automatically detects environmental invariant descriptors with an enhanced version of the MSER method. Only optical flow vectors and robot's multi-leg dynamics are used to formulate descriptive rotational and translational motions for self-positioning.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_20\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Purpose \u2013 The purpose of this paper is to establish analytical and numerical solutions of a navigational law to estimate displacements of hyper-static multi-legged mobile robots, which combines: monocular vision (optical flow of regional invariants) and legs dynamics. Design\/methodology\/approach \u2013 In this study the authors propose a Euler-Lagrange equation that control legs\u2019 joints to control robot's displacements. Robot's rotation and translational velocities are feedback by motion features of visual invariant descriptors. A general analytical solution of a derivative navigation law is proposed for hyper-static robots. The feedback is formulated with the local speed rate obtained from optical flow of visual regional invariants. The proposed formulation includes a data association algorithm aimed to correlate visual invariant descriptors detected in sequential images through monocular vision. The navigation law is constrained by a set of three kinematic equilibrium conditions for navigational scenarios: constant acceleration, constant velocity, and instantaneous acceleration. Findings \u2013 The proposed data association method concerns local motions of multiple invariants (enhanced MSER) by minimizing the norm of multidimensional optical flow feature vectors. Kinematic measurements are used as observable arguments in the general dynamic control equation; while the legs joints dynamics model is used to formulate the controllable arguments. Originality\/value \u2013 The given analysis does not combine sensor data of any kind, but only monocular passive vision. The approach automatically detects environmental invariant descriptors with an enhanced version of the MSER method. Only optical flow vectors and robot's multi-leg dynamics are used to formulate descriptive rotational and translational motions for self-positioning.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_20\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" title=\"http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" target=\"_blank\">http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" title=\"DOI de seguimiento:10.1108\/IJIUS-04-2014-0003\" target=\"_blank\">doi:10.1108\/IJIUS-04-2014-0003<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferencias<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\">Robust detection and tracking of regions of interest for autonomous underwater robotic exploration <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_22\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('22','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_22\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2014robust,<br \/>\r\ntitle = {Robust detection and tracking of regions of interest for autonomous underwater robotic exploration},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Martinez-Garcia, Edgar A.},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\nbooktitle = {Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications},<br \/>\r\npages = {165--171},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('22','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rodriguez-Telles, Francisco G;  Perez-Alcocer, Ricardo;  Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Bikram Dey, Bir;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\">Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2014 IEEE International Conference on Robotics and Automation (ICRA), <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_21\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('21','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_21\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{rodriguez2014vision,<br \/>\r\ntitle = {Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat},<br \/>\r\nauthor = {Rodriguez-Telles, Francisco G and Perez-Alcocer, Ricardo and Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Bikram Dey, Bir and Martinez-Garcia, Edgar A.},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\nbooktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA)},<br \/>\r\npages = {3813--3818},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('21','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rodr\u00edguez-Teiles, F. G.;  Perez-Alcocer, Ricardo;  Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Dey, B. B.;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('68','tp_links')\" style=\"cursor:pointer;\">Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">2014 IEEE International Conference on Robotics and Automation (ICRA), <\/span><span class=\"tp_pub_additional_pages\">pp. 3813-3818, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1050-4729<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_68\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('68','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_68\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('68','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_68\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{6907412,<br \/>\r\ntitle = {Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat},<br \/>\r\nauthor = {Rodr\\'{i}guez-Teiles, F. G. and Perez-Alcocer, Ricardo and Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Dey, B. B. and Martinez-Garcia, Edgar A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/6907412\/},<br \/>\r\ndoi = {10.1109\/ICRA.2014.6907412},<br \/>\r\nissn = {1050-4729},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-05-01},<br \/>\r\nbooktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA)},<br \/>\r\npages = {3813-3818},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('68','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_68\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/6907412\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/6907412\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/6907412\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/ICRA.2014.6907412\" title=\"DOI de seguimiento:10.1109\/ICRA.2014.6907412\" target=\"_blank\">doi:10.1109\/ICRA.2014.6907412<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('68','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2013\">2013<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rivero-Juarez, Joaquin;  Martinez-Garcia, Edgar A.;  Torres-Mendez, Luz Abril;  Elara Mohan, Rajesh<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('25','tp_links')\" style=\"cursor:pointer;\">3D Heterogeneous Multi-sensor Global Registration<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">Procedia Engineering, <\/span><span class=\"tp_pub_additional_volume\">vol. 64, <\/span><span class=\"tp_pub_additional_pages\">pp. 1552 - 1561, <\/span><span class=\"tp_pub_additional_year\">2013<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1877-7058<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_25\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{RIVEROJUAREZ20131552,<br \/>\r\ntitle = {3D Heterogeneous Multi-sensor Global Registration},<br \/>\r\nauthor = {Rivero-Juarez, Joaquin and Martinez-Garcia, Edgar A. and Torres-Mendez, Luz Abril and Elara Mohan, Rajesh },<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237},<br \/>\r\nissn = {1877-7058},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-01-01},<br \/>\r\njournal = {Procedia Engineering},<br \/>\r\nvolume = {64},<br \/>\r\npages = {1552 - 1561},<br \/>\r\nabstract = {This manuscript presents a deterministic model to register heterogeneous 3D data arising from a ring of eight ultrasonic sonar, one high data density LiDAR (light detection and ranging), and a semi-ring of three visual sensors. The three visual sensors are arranged in a cylindrical ring, and although they provide 2D colour images, a radial multi-stereo geometric model is proposed to yield 3D data. All deployed sensors are geometrically placed on-board a wheeled mobile robot platform, and data registration is carried out navigating indoors. The sensor devices in discussion are coordinated and synchronized by a home-made distributed sensor suite system. Mathematical deterministic formulation for data registration is used to obtain experimental and numerical results on global mapping. Data registration relies on a geometric model to compute depth information from a semi- circular trinocular stereo sensor that is proposed to rectify and calibrate three image frames with different orientations and positions, but with same projection point.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_25\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This manuscript presents a deterministic model to register heterogeneous 3D data arising from a ring of eight ultrasonic sonar, one high data density LiDAR (light detection and ranging), and a semi-ring of three visual sensors. The three visual sensors are arranged in a cylindrical ring, and although they provide 2D colour images, a radial multi-stereo geometric model is proposed to yield 3D data. All deployed sensors are geometrically placed on-board a wheeled mobile robot platform, and data registration is carried out navigating indoors. The sensor devices in discussion are coordinated and synchronized by a home-made distributed sensor suite system. Mathematical deterministic formulation for data registration is used to obtain experimental and numerical results on global mapping. Data registration relies on a geometric model to compute depth information from a semi- circular trinocular stereo sensor that is proposed to rectify and calibrate three image frames with different orientations and positions, but with same projection point.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_25\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237\" title=\"DOI de seguimiento:http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2012\">2012<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Miranda-Hernandez, Jocelyn;  Castelan, Mario;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('28','tp_links')\" style=\"cursor:pointer;\">Face colour synthesis using partial least squares and the luminance-\u03b1-\u03b2 colour transform<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">IET Computer Vision, <\/span><span class=\"tp_pub_additional_volume\">vol. 6, <\/span><span class=\"tp_pub_additional_number\">no 4, <\/span><span class=\"tp_pub_additional_pages\">pp. 263-272, <\/span><span class=\"tp_pub_additional_year\">2012<\/span>, <span class=\"tp_pub_additional_isbn\">ISBN: 1751-9632<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_28\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('28','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_28\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('28','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_28\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('28','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_28\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Miranda2012,<br \/>\r\ntitle = {Face colour synthesis using partial least squares and the luminance-\u03b1-\u03b2 colour transform},<br \/>\r\nauthor = {Miranda-Hernandez, Jocelyn and Castelan, Mario and Torres-Mendez, Luz Abril },<br \/>\r\nurl = {http:\/\/digital-library.theiet.org\/content\/journals\/10.1049\/iet-cvi.2011.0168},<br \/>\r\ndoi = {10.1049\/iet-cvi.2011.0168},<br \/>\r\nisbn = {1751-9632},<br \/>\r\nyear  = {2012},<br \/>\r\ndate = {2012-07-01},<br \/>\r\njournal = {IET Computer Vision},<br \/>\r\nvolume = {6},<br \/>\r\nnumber = {4},<br \/>\r\npages = {263-272},<br \/>\r\nabstract = {For many tasks, it is necessary to synthesise realistic colour in faces from greyscale values. This is the problem the authors address in this study. Rather than propagating colour information in some regions of the image or transferring colour from an image source to a greyscale using some corresponding criterion, as many colouring systems attempt to do, they seek to synthesise facial colour information using a database of examples. This methodology is divided into two main stages. In the first stage the facial skin tone is predicted through the multiple linear regression method known as partial least squares. This regression allows to define a linear transformation between facial greyscale and colour subspaces. The second stage involves the luminance-\u03b1-\u03b2 (L\u03b1\u03b2) colour transform which is responsible for the recovery of the fine facial detail. The core of the proposed methodology is the combination of statistical subspace analysis with the appropriate colour transform so as to produce realistic facial colourisation results in a direct manner.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('28','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_28\" style=\"display:none;\"><div class=\"tp_abstract_entry\">For many tasks, it is necessary to synthesise realistic colour in faces from greyscale values. This is the problem the authors address in this study. Rather than propagating colour information in some regions of the image or transferring colour from an image source to a greyscale using some corresponding criterion, as many colouring systems attempt to do, they seek to synthesise facial colour information using a database of examples. This methodology is divided into two main stages. In the first stage the facial skin tone is predicted through the multiple linear regression method known as partial least squares. This regression allows to define a linear transformation between facial greyscale and colour subspaces. The second stage involves the luminance-\u03b1-\u03b2 (L\u03b1\u03b2) colour transform which is responsible for the recovery of the fine facial detail. The core of the proposed methodology is the combination of statistical subspace analysis with the appropriate colour transform so as to produce realistic facial colourisation results in a direct manner.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('28','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_28\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/digital-library.theiet.org\/content\/journals\/10.1049\/iet-cvi.2011.0168\" title=\"http:\/\/digital-library.theiet.org\/content\/journals\/10.1049\/iet-cvi.2011.0168\" target=\"_blank\">http:\/\/digital-library.theiet.org\/content\/journals\/10.1049\/iet-cvi.2011.0168<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1049\/iet-cvi.2011.0168\" title=\"DOI de seguimiento:10.1049\/iet-cvi.2011.0168\" target=\"_blank\">doi:10.1049\/iet-cvi.2011.0168<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('28','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2007\">2007<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferencias<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Torres-Mendez, Luz Abril;  Ramirez-Sosa Moran, Marco I;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('41','tp_links')\" style=\"cursor:pointer;\">A Single-Frame Super-Resolution Innovative Approach<\/a> <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">MICAI 2007: Advances in Artificial Intelligence: 6th Mexican International Conference on Artificial Intelligence, Aguascalientes, Mexico, November 4-10, 2007. Proceedings, <\/span><span class=\"tp_pub_additional_publisher\">Springer Berlin Heidelberg, <\/span><span class=\"tp_pub_additional_address\">Berlin, Heidelberg, <\/span><span class=\"tp_pub_additional_year\">2007<\/span>, <span class=\"tp_pub_additional_isbn\">ISBN: 978-3-540-76631-5<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_41\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('41','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_41\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('41','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_41\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{Torres-M\\'{e}ndez2007,<br \/>\r\ntitle = {A Single-Frame Super-Resolution Innovative Approach},<br \/>\r\nauthor = {Torres-Mendez, Luz Abril and Ramirez-Sosa Moran, Marco I and Castelan, Mario },<br \/>\r\neditor = {Gelbukh, Alexander <br \/>\r\nand Kuri Morales, Angel Fernando},<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61},<br \/>\r\ndoi = {10.1007\/978-3-540-76631-5_61},<br \/>\r\nisbn = {978-3-540-76631-5},<br \/>\r\nyear  = {2007},<br \/>\r\ndate = {2007-01-01},<br \/>\r\nbooktitle = {MICAI 2007: Advances in Artificial Intelligence: 6th Mexican International Conference on Artificial Intelligence, Aguascalientes, Mexico, November 4-10, 2007. Proceedings},<br \/>\r\npages = {640--649},<br \/>\r\npublisher = {Springer Berlin Heidelberg},<br \/>\r\naddress = {Berlin, Heidelberg},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('41','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_41\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61\" title=\"http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61\" title=\"DOI de seguimiento:10.1007\/978-3-540-76631-5_61\" target=\"_blank\">doi:10.1007\/978-3-540-76631-5_61<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('41','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Castelan, Mario;  Almazan-Delfin, Ana Judith;  Ramirez-Sosa Moran, Marco I;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('42','tp_links')\" style=\"cursor:pointer;\">Example-Based Face Shape Recovery Using the Zenith Angle of the Surface Normal<\/a> <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">MICAI 2007: Advances in Artificial Intelligence: 6th Mexican International Conference on Artificial Intelligence, Aguascalientes, Mexico, November 4-10, 2007. Proceedings, <\/span><span class=\"tp_pub_additional_publisher\">Springer Berlin Heidelberg, <\/span><span class=\"tp_pub_additional_address\">Berlin, Heidelberg, <\/span><span class=\"tp_pub_additional_year\">2007<\/span>, <span class=\"tp_pub_additional_isbn\">ISBN: 978-3-540-76631-5<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_42\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('42','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_42\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('42','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_42\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{Castel\\'{a}n2007b,<br \/>\r\ntitle = {Example-Based Face Shape Recovery Using the Zenith Angle of the Surface Normal},<br \/>\r\nauthor = {Castelan, Mario and Almazan-Delfin, Ana Judith and Ramirez-Sosa Moran, Marco I and Torres-Mendez, Luz Abril },<br \/>\r\neditor = {Gelbukh, Alexander <br \/>\r\nand Kuri Morales, Angel Fernando},<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72},<br \/>\r\ndoi = {10.1007\/978-3-540-76631-5_72},<br \/>\r\nisbn = {978-3-540-76631-5},<br \/>\r\nyear  = {2007},<br \/>\r\ndate = {2007-01-01},<br \/>\r\nbooktitle = {MICAI 2007: Advances in Artificial Intelligence: 6th Mexican International Conference on Artificial Intelligence, Aguascalientes, Mexico, November 4-10, 2007. Proceedings},<br \/>\r\npages = {758--768},<br \/>\r\npublisher = {Springer Berlin Heidelberg},<br \/>\r\naddress = {Berlin, Heidelberg},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('42','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_42\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72\" title=\"http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72\" title=\"DOI de seguimiento:10.1007\/978-3-540-76631-5_72\" target=\"_blank\">doi:10.1007\/978-3-540-76631-5_72<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('42','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><\/div><\/div><\/strong><\/p>\n<p>[\/et_pb_text][\/et_pb_column][\/et_pb_row][\/et_pb_section][et_pb_section bb_built=\u00bb1&#8243; fullwidth=\u00bboff\u00bb specialty=\u00bboff\u00bb background_color=\u00bbrgba(0,0,0,0.32)\u00bb inner_shadow=\u00bbon\u00bb custom_css_main_element=\u00bbbox-shadow: inset 0px 3px 2px rgba(150, 150, 150, 0.85);\u00bb _builder_version=\u00bb3.0.72&#8243; locked=\u00bbon\u00bb global_module=\u00bb321&#8243;][et_pb_row global_parent=\u00bb321&#8243; make_fullwidth=\u00bboff\u00bb use_custom_width=\u00bboff\u00bb width_unit=\u00bbon\u00bb use_custom_gutter=\u00bboff\u00bb allow_player_pause=\u00bboff\u00bb parallax=\u00bboff\u00bb parallax_method=\u00bbon\u00bb make_equal=\u00bboff\u00bb parallax_1=\u00bboff\u00bb parallax_method_1=\u00bboff\u00bb custom_margin=\u00bb-40px|||\u00bb background_position=\u00bbtop_left\u00bb background_repeat=\u00bbrepeat\u00bb background_size=\u00bbinitial\u00bb parent_locked=\u00bbon\u00bb][et_pb_column type=\u00bb4_4&#8243;][et_pb_image admin_label=\u00bbLogoCINVESTAV del Pie de p\u00e1gina\u00bb global_parent=\u00bb321&#8243; src=\u00bbhttps:\/\/ryma.cinvestav.mx\/wp-content\/uploads\/2014\/08\/roboticaCinvestavOK_transparencia_white.png\u00bb alt=\u00bbRob\u00f3tica y Manufactura Avanzada, Cinvestav\u00bb show_in_lightbox=\u00bboff\u00bb url_new_window=\u00bboff\u00bb use_overlay=\u00bboff\u00bb animation=\u00bboff\u00bb sticky=\u00bbon\u00bb align=\u00bbcenter\u00bb max_width=\u00bb95px\u00bb max_width_last_edited=\u00bbon|desktop\u00bb force_fullwidth=\u00bboff\u00bb always_center_on_mobile=\u00bbon\u00bb border_style=\u00bbsolid\u00bb custom_margin=\u00bb||15px|\u00bb _builder_version=\u00bb3.0.72&#8243; parent_locked=\u00bbon\u00bb \/][et_pb_text global_parent=\u00bb321&#8243; _builder_version=\u00bb3.0.72&#8243; background_layout=\u00bbdark\u00bb text_orientation=\u00bbcenter\u00bb border_style=\u00bbsolid\u00bb custom_margin=\u00bb||-50px|\u00bb parent_locked=\u00bbon\u00bb]<\/p>\n<hr \/>\n<p style=\"text-align: center;\">Av. Industrial\u00a0Metalurgia\u00a0#1062,\u00a0Parque Ind. Ramos Arizpe,\u00a0Ramos Arizpe, Coah.\u00a0C.P. 25900, M\u00e9xico. \u00a0Tel. +52 (844) 438-9600<\/p>\n<p>[\/et_pb_text][\/et_pb_column][\/et_pb_row][\/et_pb_section]<\/p>\n","protected":false},"excerpt":{"rendered":"<p><div class='et-box et-shadow'>\n\t\t\t\t\t<div class='et-box-content'>PUBLICACIONES<\/div><\/div> Para ver las publicaciones de todo Rob\u00f3tica y Manufactura Avanzada, ver:\u00a0 Publicaciones RYMA <div class=\"teachpress_pub_list\"><form name=\"tppublistform\" method=\"get\"><a name=\"tppubs\" id=\"tppubs\"><\/a><div class=\"teachpress_filter\"><select class=\"default\" name=\"yr\" id=\"yr\" tabindex=\"2\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/?')\">\r\n                   <option value=\"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=#tppubs\">Todos los a\u00f1os<\/option>\r\n                   <option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2020#tppubs\" >2020<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2019#tppubs\" >2019<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2018#tppubs\" >2018<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2017#tppubs\" >2017<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2016#tppubs\" >2016<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2015#tppubs\" >2015<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2014#tppubs\" >2014<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2013#tppubs\" >2013<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2012#tppubs\" >2012<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2011#tppubs\" >2011<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2010#tppubs\" >2010<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2009#tppubs\" >2009<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2008#tppubs\" >2008<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2007#tppubs\" >2007<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2006#tppubs\" >2006<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2005#tppubs\" >2005<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2004#tppubs\" >2004<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2003#tppubs\" >2003<\/option>\r\n                <\/select><select class=\"default\" name=\"type\" id=\"type\" tabindex=\"3\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=#tppubs\">Todas las tipolog\u00edas<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=article#tppubs\" >Art\u00edculos de revista<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=conference#tppubs\" >Conferencias<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=inbook#tppubs\" >Cap\u00edtulos de libros<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=inproceedings#tppubs\" >Proceedings Articles<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=proceedings#tppubs\" >Actas de congresos<\/option>\r\n                <\/select><select class=\"default\" name=\"usr\" id=\"usr\" tabindex=\"6\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;type=&amp;auth=&amp;usr=#tppubs\">Todos los usuarios<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;type=&amp;auth=&amp;usr=12#tppubs\" >mcastelan<\/option>\r\n                <\/select><\/div><\/form><div class=\"teachpress_publication_list\"><h3 class=\"tp_h3\" id=\"tp_h3_2016\">2016<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Perez-Alcocer, R. R.;  Torres-Mendez, Luz Abril;  Olguin-Diaz, Ernesto;  Maldonado-Ramirez, Alejandro<\/p><p class=\"tp_pub_title\">Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_154\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('154','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_154\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{P\\'{e}rez-Alcocer2016,<br \/>\r\ntitle = {Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control},<br \/>\r\nauthor = {Perez-Alcocer, R. R. and Torres-Mendez, Luz Abril and Olguin-Diaz, Ernesto and Maldonado-Ramirez, Alejandro },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('154','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('8','tp_links')\" style=\"cursor:pointer;\">Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">Journal of Sensors, <\/span><span class=\"tp_pub_additional_volume\">vol. 2016, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_8\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{maldonado2016robotic,<br \/>\r\ntitle = {Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril},<br \/>\r\nurl = {https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {Journal of Sensors},<br \/>\r\nvolume = {2016},<br \/>\r\npublisher = {Hindawi Publishing Corporation},<br \/>\r\nabstract = {Using visual sensors for detecting regions of interest in underwater environments is fundamental for many robotic applications. Particularly, for an autonomous exploration task, an underwater vehicle must be guided towards features that are of interest. If the relevant features can be seen from the distance, then smooth control movements of the vehicle are feasible in order to position itself close enough with the final goal of gathering visual quality images. However, it is a challenging task for a robotic system to achieve stable tracking of the same regions since marine environments are unstructured and highly dynamic and usually have poor visibility. In this paper, a framework that robustly detects and tracks regions of interest in real time is presented. We use the chromatic channels of a perceptual uniform color space to detect relevant regions and adapt a visual attention scheme to underwater scenes. For the tracking, we associate with each relevant point superpixel descriptors which are invariant to changes in illumination and shape. The field experiment results have demonstrated that our approach is robust when tested on different visibility conditions and depths in underwater explorations.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_8\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Using visual sensors for detecting regions of interest in underwater environments is fundamental for many robotic applications. Particularly, for an autonomous exploration task, an underwater vehicle must be guided towards features that are of interest. If the relevant features can be seen from the distance, then smooth control movements of the vehicle are feasible in order to position itself close enough with the final goal of gathering visual quality images. However, it is a challenging task for a robotic system to achieve stable tracking of the same regions since marine environments are unstructured and highly dynamic and usually have poor visibility. In this paper, a framework that robustly detects and tracks regions of interest in real time is presented. We use the chromatic channels of a perceptual uniform color space to detect relevant regions and adapt a visual attention scheme to underwater scenes. For the tracking, we associate with each relevant point superpixel descriptors which are invariant to changes in illumination and shape. The field experiment results have demonstrated that our approach is robust when tested on different visibility conditions and depths in underwater explorations.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_8\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/\" title=\"https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/\" target=\"_blank\">https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Cortes-Perez, Noel;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), <\/span><span class=\"tp_pub_additional_pages\">pp. 61-68, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_168\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('168','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_168\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Cortx00E9sPx00E9rez2016ALM,<br \/>\r\ntitle = {A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation},<br \/>\r\nauthor = {Cortes-Perez, Noel and Torres-Mendez, Luz Abril},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)},<br \/>\r\npages = {61-68},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('168','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferencias<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('166','tp_links')\" style=\"cursor:pointer;\">A bag of relevant regions for visual place recognition in challenging environments<\/a> <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2016 23rd International Conference on Pattern Recognition (ICPR), <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_166\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{7899826,<br \/>\r\ntitle = {A bag of relevant regions for visual place recognition in challenging environments},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Castelan, Mario},<br \/>\r\ndoi = {10.1109\/ICPR.2016.7899826},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-12-01},<br \/>\r\nbooktitle = {2016 23rd International Conference on Pattern Recognition (ICPR)},<br \/>\r\npages = {1358-1363},<br \/>\r\nabstract = {In this paper, we present a method for vision-based place recognition in environments with a high content of similar features and that are prone to variations in illumination. The high similarity of features makes difficult the disambiguation between two different places. The novelty of our method relies on using the Bag of Words (BoW) approach to derive an image descriptor from a set of relevant regions, which are extracted using a visual attention algorithm. We name our approach Bag of Relevant Regions (BoRR). The descriptor of each relevant region is built by using a 2D histogram of the chromatic channels of the CIE-Lab color space. We have compared our results with those using state of the art descriptors that include the BoW and demonstrate that our approach performs better in most of the cases.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_166\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In this paper, we present a method for vision-based place recognition in environments with a high content of similar features and that are prone to variations in illumination. The high similarity of features makes difficult the disambiguation between two different places. The novelty of our method relies on using the Bag of Words (BoW) approach to derive an image descriptor from a set of relevant regions, which are extracted using a visual attention algorithm. We name our approach Bag of Relevant Regions (BoRR). The descriptor of each relevant region is built by using a 2D histogram of the chromatic channels of the CIE-Lab color space. We have compared our results with those using state of the art descriptors that include the BoW and demonstrate that our approach performs better in most of the cases.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_166\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/ICPR.2016.7899826\" title=\"DOI de seguimiento:10.1109\/ICPR.2016.7899826\" target=\"_blank\">doi:10.1109\/ICPR.2016.7899826<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">A Bag of Relevant Regions Model for Place Recognition in Coral Reefs <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">OCEANS 2016, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_9\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('9','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_9\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2016bag,<br \/>\r\ntitle = {A Bag of Relevant Regions Model for Place Recognition in Coral Reefs},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {OCEANS 2016},<br \/>\r\npages = {1--5},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('9','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-M\u00e9ndez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('165','tp_links')\" style=\"cursor:pointer;\">A bag of relevant regions model for visual place recognition in coral reefs<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-5, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_165\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7761188,<br \/>\r\ntitle = {A bag of relevant regions model for visual place recognition in coral reefs},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-M\\'{e}ndez, Luz Abril},<br \/>\r\ndoi = {10.1109\/OCEANS.2016.7761188},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-09-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1-5},<br \/>\r\nabstract = {Vision-based place recognition in underwater environments is a key component for autonomous robotic exploration. However, this task can be very challenging due to the inherent properties of this kind of places such as: color distortion, poor visibility, perceptual aliasing and dynamic illumination. In this paper, we present a method for vision-based place recognition in coral reefs. Our method relies on using the Bag-of-Words (BoW) approach to derive a descriptor, for the whole image, from a set of relevant regions, which are extracted by utilizing a visual attention algorithm. The descriptor for each relevant region is built by using an histogram of the chromatic channels of the CIE-Lab color space. We present results of our method for a place recognition task in real life videos as well as comparisons of our method against other popular techniques. It can be seen that our approach performs better in most of the cases.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_165\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Vision-based place recognition in underwater environments is a key component for autonomous robotic exploration. However, this task can be very challenging due to the inherent properties of this kind of places such as: color distortion, poor visibility, perceptual aliasing and dynamic illumination. In this paper, we present a method for vision-based place recognition in coral reefs. Our method relies on using the Bag-of-Words (BoW) approach to derive a descriptor, for the whole image, from a set of relevant regions, which are extracted by utilizing a visual attention algorithm. The descriptor for each relevant region is built by using an histogram of the chromatic channels of the CIE-Lab color space. We present results of our method for a place recognition task in real life videos as well as comparisons of our method against other popular techniques. It can be seen that our approach performs better in most of the cases.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_165\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/OCEANS.2016.7761188\" title=\"DOI de seguimiento:10.1109\/OCEANS.2016.7761188\" target=\"_blank\">doi:10.1109\/OCEANS.2016.7761188<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A. N.;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('167','tp_links')\" style=\"cursor:pointer;\">A statistical learning approach for underwater color restoration with adaptive training based on visual attention<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-6, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_167\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7761187,<br \/>\r\ntitle = {A statistical learning approach for underwater color restoration with adaptive training based on visual attention},<br \/>\r\nauthor = {Ponce-Hinestroza, A. N. and Torres-Mendez, Luz Abril and Drews, Paulo},<br \/>\r\ndoi = {10.1109\/OCEANS.2016.7761187},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-09-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1-6},<br \/>\r\nabstract = {In most artificial vision systems the quality of acquired images is directly related with the amount of information that can be obtained from them, and, particularly in underwater robotics applications involving monitoring and inspection tasks this is crucial. Statistical learning methods like Markov Random Fields with Belief Propagation (MRF-BP) provide a solution by using existing essential correlations in training sets. However, as in any restoration\/correction method for real applications, it is not possible to have color ground truth available on-line. In this paper, we present a MRF-BP model formulated in the chromatic domain of underwater scenes such that we synthesize the ground truth color to train the model and maximize the capabilities of our method. The generated ground truth introduces some improvements to existing color correction methods and visual attention considerations which also helps to choose a small size training set for the MRF-BP model. Feasibility of our approach is shown from the results in which a good color discrimination is observed even in poor visibility conditions.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_167\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In most artificial vision systems the quality of acquired images is directly related with the amount of information that can be obtained from them, and, particularly in underwater robotics applications involving monitoring and inspection tasks this is crucial. Statistical learning methods like Markov Random Fields with Belief Propagation (MRF-BP) provide a solution by using existing essential correlations in training sets. However, as in any restoration\/correction method for real applications, it is not possible to have color ground truth available on-line. In this paper, we present a MRF-BP model formulated in the chromatic domain of underwater scenes such that we synthesize the ground truth color to train the model and maximize the capabilities of our method. The generated ground truth introduces some improvements to existing color correction methods and visual attention considerations which also helps to choose a small size training set for the MRF-BP model. Feasibility of our approach is shown from the results in which a good color discrimination is observed even in poor visibility conditions.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_167\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/OCEANS.2016.7761187\" title=\"DOI de seguimiento:10.1109\/OCEANS.2016.7761187\" target=\"_blank\">doi:10.1109\/OCEANS.2016.7761187<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A-N;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\">A statistical learning approach for underwater color restoration with adaptive training based on visual attention <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1\u20136, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_11\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('11','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_11\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{ponce2016oceansb,<br \/>\r\ntitle = {A statistical learning approach for underwater color restoration with adaptive training based on visual attention},<br \/>\r\nauthor = {Ponce-Hinestroza, A-N and Torres-Mendez, Luz Abril and Drews, Paulo },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1--6},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('11','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A-N;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\">Using a MRF-BP Model with Color Adaptive Training for Underwater Color Restoration <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">ICPR 2016 IEEE Cancun, <\/span><span class=\"tp_pub_additional_pages\">pp. 1\u20136, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_12\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('12','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_12\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{ponce2016icpr,<br \/>\r\ntitle = {Using a MRF-BP Model with Color Adaptive Training for Underwater Color Restoration},<br \/>\r\nauthor = {Ponce-Hinestroza, A-N and Torres-Mendez, Luz Abril and Drews, Paulo},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {ICPR 2016 IEEE Cancun},<br \/>\r\npages = {1--6},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('12','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2015\">2015<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Castelan, Mario;  Cruz-Perez, Elier;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('16','tp_links')\" style=\"cursor:pointer;\">A Photometric Sampling Strategy for Reflectance Characterization and Transference<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">Computaci\u00f3n y Sistemas, <\/span><span class=\"tp_pub_additional_volume\">vol. 19, <\/span><span class=\"tp_pub_additional_number\">no 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 255-272, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_16\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('16','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_16\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('16','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_16\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Castelan2015,<br \/>\r\ntitle = {A Photometric Sampling Strategy for Reflectance Characterization and Transference},<br \/>\r\nauthor = {Castelan, Mario and Cruz-Perez, Elier and Torres-Mendez, Luz Abril},<br \/>\r\nurl = {http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Computaci\\'{o}n y Sistemas},<br \/>\r\nvolume = {19},<br \/>\r\nnumber = {2},<br \/>\r\npages = {255-272},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('16','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_16\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944\" title=\"http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944\" target=\"_blank\">http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('16','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferencias<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Rodriguez-Telles, Francisco G<\/p><p class=\"tp_pub_title\">Ethologically inspired reactive exploration of coral reefs with collision avoidance: Bridging the gap between human and robot spatial understanding of unstructured environments <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">Intelligent Robots and Systems (IROS), 2015 IEEE\/RSJ International Conference on, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_17\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('17','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_17\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2015ethologically,<br \/>\r\ntitle = {Ethologically inspired reactive exploration of coral reefs with collision avoidance: Bridging the gap between human and robot spatial understanding of unstructured environments},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Rodriguez-Telles, Francisco G},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nbooktitle = {Intelligent Robots and Systems (IROS), 2015 IEEE\/RSJ International Conference on},<br \/>\r\npages = {4872--4879},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('17','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">Autonomous robotic exploration of coral reefs using a visual attention-driven strategy for detecting and tracking regions of interest <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">OCEANS 2015-Genova, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_18\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('18','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_18\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2015autonomous,<br \/>\r\ntitle = {Autonomous robotic exploration of coral reefs using a visual attention-driven strategy for detecting and tracking regions of interest},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril <br \/>\r\n},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nbooktitle = {OCEANS 2015-Genova},<br \/>\r\npages = {1--5},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('18','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Romero-Mart\u00ednez, C. E.;  Torres-Mendez, Luz Abril;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('108','tp_links')\" style=\"cursor:pointer;\">Modeling motor-perceptual behaviors to enable intuitive paths in an aquatic robot<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2015 - MTS\/IEEE Washington, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-5, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_108\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('108','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_108\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('108','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_108\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7404424,<br \/>\r\ntitle = {Modeling motor-perceptual behaviors to enable intuitive paths in an aquatic robot},<br \/>\r\nauthor = {Romero-Mart\\'{i}nez, C. E. and Torres-Mendez, Luz Abril and Martinez-Garcia, Edgar A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7404424\/},<br \/>\r\ndoi = {10.23919\/OCEANS.2015.7404424},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-10-01},<br \/>\r\nbooktitle = {OCEANS 2015 - MTS\/IEEE Washington},<br \/>\r\npages = {1-5},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('108','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_108\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7404424\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7404424\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7404424\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.23919\/OCEANS.2015.7404424\" title=\"DOI de seguimiento:10.23919\/OCEANS.2015.7404424\" target=\"_blank\">doi:10.23919\/OCEANS.2015.7404424<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('108','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Labastida-Vald\u00e9s, L.;  Torres-Mendez, Luz Abril;  Hutchinson, S. A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('106','tp_links')\" style=\"cursor:pointer;\">Using the motion perceptibility measure to classify points of interest for visual-based AUV guidance in a reef ecosystem<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2015 - MTS\/IEEE Washington, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-6, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_106\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('106','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_106\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('106','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_106\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7404605,<br \/>\r\ntitle = {Using the motion perceptibility measure to classify points of interest for visual-based AUV guidance in a reef ecosystem},<br \/>\r\nauthor = {Labastida-Vald\\'{e}s, L. and Torres-Mendez, Luz Abril and Hutchinson, S. A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7404605\/},<br \/>\r\ndoi = {10.23919\/OCEANS.2015.7404605},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-10-01},<br \/>\r\nbooktitle = {OCEANS 2015 - MTS\/IEEE Washington},<br \/>\r\npages = {1-6},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('106','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_106\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7404605\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7404605\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7404605\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.23919\/OCEANS.2015.7404605\" title=\"DOI de seguimiento:10.23919\/OCEANS.2015.7404605\" target=\"_blank\">doi:10.23919\/OCEANS.2015.7404605<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('106','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Gonz\u00e1lez-Garc\u00eda, Luis C.;  Torres-Mendez, Luz Abril;  Mart\u00ednez, Julieta;  Sattar, Junaed;  Little, James<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('109','tp_links')\" style=\"cursor:pointer;\">Are You Talking to Me? Detecting Attention in First-Person Interactions<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_pages\">pp.  137-142, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 2308-4197<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_109\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Gonz\\'{a}lez-Garc\\'{i}a2015,<br \/>\r\ntitle = {Are You Talking to Me? Detecting Attention in First-Person Interactions},<br \/>\r\nauthor = {Gonz\\'{a}lez-Garc\\'{i}a, Luis C. and Torres-Mendez, Luz Abril and Mart\\'{i}nez, Julieta and Sattar, Junaed and Little, James},<br \/>\r\nurl = {https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions},<br \/>\r\nissn = {2308-4197},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-00-00},<br \/>\r\npages = { 137-142},<br \/>\r\nabstract = {This paper presents an approach for a mobile robot to detect the level of attention of a human in first-person interactions. Determining the degree of attention is an essential task in day-today interactions. In particular, we are interested in natural Human-Robot Interactions (HRI's) during which a robot needs to estimate the focus and the degree of the user's attention to determine the most appropriate moment to initiate, continue and terminate an interaction. Our approach is novel in that it uses a linear regression technique to classify raw depth-image data according to three levels of user attention on the robot (null, partial and total). This is achieved by measuring the linear independence of the input range data with respect to a dataset of user poses. We overcome the problem of time overhead that a large database can add to real-time Linear Regression Classification (LRC) methods by including only the feature vectors with the most relevant information. We demonstrate the approach by presenting experimental data from human-interaction studies with a PR2 robot. Results demonstrate our attention classifier to be accurate and robust in detecting the attention levels of human participants. I. INTRODUCTION Determining the attention of people is an essential component of day-today interactions. We are constantly monitoring other people's gaze, head and body poses while engaged in a conversation [1][2][3]. We also perform attention estimation in order to perform natural interactions [4][5]. In short, attention estimation is a fundamental component of effective social interaction; therefore, for robots to be efficient social agents it is necessary to provide them with reliable mechanisms to estimate human attention. We believe that human attention estimation, particularly in the context of interactions, is highly subjective. However, attempts to model it have been relatively successful, e.g., allowing a robot to ask for directions when it finds a human, as in the work of Weiss et al. [6]. Nonetheless, the state-of-the-art is still far from reaching a point where a robot can successfully interact with humans without relying on mechanisms not common to natural language. Recently, the use of range images to make more natural human-machine interfaces has been in the agenda of researchers, like in the case of the Microsoft Kinect TM , which delivers a skeleton of <br \/>\r\n<br \/>\r\nAre You Talking to Me? Detecting Attention in First-Person Interactions (PDF Download Available). Available from: https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions [accessed Jun 17, 2017].},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_109\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper presents an approach for a mobile robot to detect the level of attention of a human in first-person interactions. Determining the degree of attention is an essential task in day-today interactions. In particular, we are interested in natural Human-Robot Interactions (HRI's) during which a robot needs to estimate the focus and the degree of the user's attention to determine the most appropriate moment to initiate, continue and terminate an interaction. Our approach is novel in that it uses a linear regression technique to classify raw depth-image data according to three levels of user attention on the robot (null, partial and total). This is achieved by measuring the linear independence of the input range data with respect to a dataset of user poses. We overcome the problem of time overhead that a large database can add to real-time Linear Regression Classification (LRC) methods by including only the feature vectors with the most relevant information. We demonstrate the approach by presenting experimental data from human-interaction studies with a PR2 robot. Results demonstrate our attention classifier to be accurate and robust in detecting the attention levels of human participants. I. INTRODUCTION Determining the attention of people is an essential component of day-today interactions. We are constantly monitoring other people's gaze, head and body poses while engaged in a conversation [1][2][3]. We also perform attention estimation in order to perform natural interactions [4][5]. In short, attention estimation is a fundamental component of effective social interaction; therefore, for robots to be efficient social agents it is necessary to provide them with reliable mechanisms to estimate human attention. We believe that human attention estimation, particularly in the context of interactions, is highly subjective. However, attempts to model it have been relatively successful, e.g., allowing a robot to ask for directions when it finds a human, as in the work of Weiss et al. [6]. Nonetheless, the state-of-the-art is still far from reaching a point where a robot can successfully interact with humans without relying on mechanisms not common to natural language. Recently, the use of range images to make more natural human-machine interfaces has been in the agenda of researchers, like in the case of the Microsoft Kinect TM , which delivers a skeleton of <br \/>\r\n<br \/>\r\nAre You Talking to Me? Detecting Attention in First-Person Interactions (PDF Download Available). Available from: https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions [accessed Jun 17, 2017].<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_109\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions\" title=\"https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecti[...]\" target=\"_blank\">https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecti[...]<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">using supercolor-pixels descriptors for tracking relevant cues in underwater environments with poor visibility conditions <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_publisher\">ICRA 2015 Workshop on Visual Place Recognition in Changing Environmen ts, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_107\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('107','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_107\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Maldonao-Ramirez2015,<br \/>\r\ntitle = {using supercolor-pixels descriptors for tracking relevant cues in underwater environments with poor visibility conditions},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-00-00},<br \/>\r\npublisher = {ICRA 2015 Workshop on Visual Place Recognition in Changing Environmen ts},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('107','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2014\">2014<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Martinez-Garcia, Edgar A.;  Torres-Mendez, Luz Abril;  Elara Mohan, Rajesh<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('20','tp_links')\" style=\"cursor:pointer;\">Multi-legged robot dynamics navigation model with optical flow<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Intelligent Unmanned Systems, <\/span><span class=\"tp_pub_additional_volume\">vol. 2, <\/span><span class=\"tp_pub_additional_number\">no 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 121-139, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_20\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{doi:10.1108\/IJIUS-04-2014-0003,<br \/>\r\ntitle = {Multi-legged robot dynamics navigation model with optical flow},<br \/>\r\nauthor = {Martinez-Garcia, Edgar A. and Torres-Mendez, Luz Abril and Elara Mohan, Rajesh },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003},<br \/>\r\ndoi = {10.1108\/IJIUS-04-2014-0003},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\njournal = {International Journal of Intelligent Unmanned Systems},<br \/>\r\nvolume = {2},<br \/>\r\nnumber = {2},<br \/>\r\npages = {121-139},<br \/>\r\nabstract = {Purpose \\textendash The purpose of this paper is to establish analytical and numerical solutions of a navigational law to estimate displacements of hyper-static multi-legged mobile robots, which combines: monocular vision (optical flow of regional invariants) and legs dynamics. Design\/methodology\/approach \\textendash In this study the authors propose a Euler-Lagrange equation that control legs\u2019 joints to control robot's displacements. Robot's rotation and translational velocities are feedback by motion features of visual invariant descriptors. A general analytical solution of a derivative navigation law is proposed for hyper-static robots. The feedback is formulated with the local speed rate obtained from optical flow of visual regional invariants. The proposed formulation includes a data association algorithm aimed to correlate visual invariant descriptors detected in sequential images through monocular vision. The navigation law is constrained by a set of three kinematic equilibrium conditions for navigational scenarios: constant acceleration, constant velocity, and instantaneous acceleration. Findings \\textendash The proposed data association method concerns local motions of multiple invariants (enhanced MSER) by minimizing the norm of multidimensional optical flow feature vectors. Kinematic measurements are used as observable arguments in the general dynamic control equation; while the legs joints dynamics model is used to formulate the controllable arguments. Originality\/value \\textendash The given analysis does not combine sensor data of any kind, but only monocular passive vision. The approach automatically detects environmental invariant descriptors with an enhanced version of the MSER method. Only optical flow vectors and robot's multi-leg dynamics are used to formulate descriptive rotational and translational motions for self-positioning.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_20\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Purpose \u2013 The purpose of this paper is to establish analytical and numerical solutions of a navigational law to estimate displacements of hyper-static multi-legged mobile robots, which combines: monocular vision (optical flow of regional invariants) and legs dynamics. Design\/methodology\/approach \u2013 In this study the authors propose a Euler-Lagrange equation that control legs\u2019 joints to control robot's displacements. Robot's rotation and translational velocities are feedback by motion features of visual invariant descriptors. A general analytical solution of a derivative navigation law is proposed for hyper-static robots. The feedback is formulated with the local speed rate obtained from optical flow of visual regional invariants. The proposed formulation includes a data association algorithm aimed to correlate visual invariant descriptors detected in sequential images through monocular vision. The navigation law is constrained by a set of three kinematic equilibrium conditions for navigational scenarios: constant acceleration, constant velocity, and instantaneous acceleration. Findings \u2013 The proposed data association method concerns local motions of multiple invariants (enhanced MSER) by minimizing the norm of multidimensional optical flow feature vectors. Kinematic measurements are used as observable arguments in the general dynamic control equation; while the legs joints dynamics model is used to formulate the controllable arguments. Originality\/value \u2013 The given analysis does not combine sensor data of any kind, but only monocular passive vision. The approach automatically detects environmental invariant descriptors with an enhanced version of the MSER method. Only optical flow vectors and robot's multi-leg dynamics are used to formulate descriptive rotational and translational motions for self-positioning.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_20\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" title=\"http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" target=\"_blank\">http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" title=\"DOI de seguimiento:10.1108\/IJIUS-04-2014-0003\" target=\"_blank\">doi:10.1108\/IJIUS-04-2014-0003<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferencias<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\">Robust detection and tracking of regions of interest for autonomous underwater robotic exploration <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_22\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('22','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_22\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2014robust,<br \/>\r\ntitle = {Robust detection and tracking of regions of interest for autonomous underwater robotic exploration},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Martinez-Garcia, Edgar A.},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\nbooktitle = {Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications},<br \/>\r\npages = {165--171},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('22','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rodriguez-Telles, Francisco G;  Perez-Alcocer, Ricardo;  Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Bikram Dey, Bir;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\">Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2014 IEEE International Conference on Robotics and Automation (ICRA), <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_21\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('21','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_21\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{rodriguez2014vision,<br \/>\r\ntitle = {Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat},<br \/>\r\nauthor = {Rodriguez-Telles, Francisco G and Perez-Alcocer, Ricardo and Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Bikram Dey, Bir and Martinez-Garcia, Edgar A.},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\nbooktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA)},<br \/>\r\npages = {3813--3818},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('21','tp_bibtex')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rodr\u00edguez-Teiles, F. G.;  Perez-Alcocer, Ricardo;  Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Dey, B. B.;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('68','tp_links')\" style=\"cursor:pointer;\">Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_booktitle\">2014 IEEE International Conference on Robotics and Automation (ICRA), <\/span><span class=\"tp_pub_additional_pages\">pp. 3813-3818, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1050-4729<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_68\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('68','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_68\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('68','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_68\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{6907412,<br \/>\r\ntitle = {Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat},<br \/>\r\nauthor = {Rodr\\'{i}guez-Teiles, F. G. and Perez-Alcocer, Ricardo and Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Dey, B. B. and Martinez-Garcia, Edgar A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/6907412\/},<br \/>\r\ndoi = {10.1109\/ICRA.2014.6907412},<br \/>\r\nissn = {1050-4729},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-05-01},<br \/>\r\nbooktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA)},<br \/>\r\npages = {3813-3818},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('68','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_68\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/6907412\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/6907412\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/6907412\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/ICRA.2014.6907412\" title=\"DOI de seguimiento:10.1109\/ICRA.2014.6907412\" target=\"_blank\">doi:10.1109\/ICRA.2014.6907412<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('68','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2013\">2013<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rivero-Juarez, Joaquin;  Martinez-Garcia, Edgar A.;  Torres-Mendez, Luz Abril;  Elara Mohan, Rajesh<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('25','tp_links')\" style=\"cursor:pointer;\">3D Heterogeneous Multi-sensor Global Registration<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">Procedia Engineering, <\/span><span class=\"tp_pub_additional_volume\">vol. 64, <\/span><span class=\"tp_pub_additional_pages\">pp. 1552 - 1561, <\/span><span class=\"tp_pub_additional_year\">2013<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1877-7058<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_25\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{RIVEROJUAREZ20131552,<br \/>\r\ntitle = {3D Heterogeneous Multi-sensor Global Registration},<br \/>\r\nauthor = {Rivero-Juarez, Joaquin and Martinez-Garcia, Edgar A. and Torres-Mendez, Luz Abril and Elara Mohan, Rajesh },<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237},<br \/>\r\nissn = {1877-7058},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-01-01},<br \/>\r\njournal = {Procedia Engineering},<br \/>\r\nvolume = {64},<br \/>\r\npages = {1552 - 1561},<br \/>\r\nabstract = {This manuscript presents a deterministic model to register heterogeneous 3D data arising from a ring of eight ultrasonic sonar, one high data density LiDAR (light detection and ranging), and a semi-ring of three visual sensors. The three visual sensors are arranged in a cylindrical ring, and although they provide 2D colour images, a radial multi-stereo geometric model is proposed to yield 3D data. All deployed sensors are geometrically placed on-board a wheeled mobile robot platform, and data registration is carried out navigating indoors. The sensor devices in discussion are coordinated and synchronized by a home-made distributed sensor suite system. Mathematical deterministic formulation for data registration is used to obtain experimental and numerical results on global mapping. Data registration relies on a geometric model to compute depth information from a semi- circular trinocular stereo sensor that is proposed to rectify and calibrate three image frames with different orientations and positions, but with same projection point.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_25\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This manuscript presents a deterministic model to register heterogeneous 3D data arising from a ring of eight ultrasonic sonar, one high data density LiDAR (light detection and ranging), and a semi-ring of three visual sensors. The three visual sensors are arranged in a cylindrical ring, and although they provide 2D colour images, a radial multi-stereo geometric model is proposed to yield 3D data. All deployed sensors are geometrically placed on-board a wheeled mobile robot platform, and data registration is carried out navigating indoors. The sensor devices in discussion are coordinated and synchronized by a home-made distributed sensor suite system. Mathematical deterministic formulation for data registration is used to obtain experimental and numerical results on global mapping. Data registration relies on a geometric model to compute depth information from a semi- circular trinocular stereo sensor that is proposed to rectify and calibrate three image frames with different orientations and positions, but with same projection point.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_25\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237\" title=\"DOI de seguimiento:http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2012\">2012<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Art\u00edculos de revista<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Miranda-Hernandez, Jocelyn;  Castelan, Mario;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('28','tp_links')\" style=\"cursor:pointer;\">Face colour synthesis using partial least squares and the luminance-\u03b1-\u03b2 colour transform<\/a> <span class=\"tp_pub_type tp_  article\">Art\u00edculo de revista<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">En: <\/span><span class=\"tp_pub_additional_journal\">IET Computer Vision, <\/span><span class=\"tp_pub_additional_volume\">vol. 6, <\/span><span class=\"tp_pub_additional_number\">no 4, <\/span><span class=\"tp_pub_additional_pages\">pp. 263-272, <\/span><span class=\"tp_pub_additional_year\">2012<\/span>, <span class=\"tp_pub_additional_isbn\">ISBN: 1751-9632<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_28\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('28','tp_abstract')\" title=\"Mostrar resumen\" style=\"cursor:pointer;\">Resumen<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_28\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('28','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_28\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('28','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_28\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Miranda2012,<br \/>\r\ntitle = {Face colour synthesis using partial least squares and the luminance-\u03b1-\u03b2 colour transform},<br \/>\r\nauthor = {Miranda-Hernandez, Jocelyn and Castelan, Mario and Torres-Mendez, Luz Abril },<br \/>\r\nurl = {http:\/\/digital-library.theiet.org\/content\/journals\/10.1049\/iet-cvi.2011.0168},<br \/>\r\ndoi = {10.1049\/iet-cvi.2011.0168},<br \/>\r\nisbn = {1751-9632},<br \/>\r\nyear  = {2012},<br \/>\r\ndate = {2012-07-01},<br \/>\r\njournal = {IET Computer Vision},<br \/>\r\nvolume = {6},<br \/>\r\nnumber = {4},<br \/>\r\npages = {263-272},<br \/>\r\nabstract = {For many tasks, it is necessary to synthesise realistic colour in faces from greyscale values. This is the problem the authors address in this study. Rather than propagating colour information in some regions of the image or transferring colour from an image source to a greyscale using some corresponding criterion, as many colouring systems attempt to do, they seek to synthesise facial colour information using a database of examples. This methodology is divided into two main stages. In the first stage the facial skin tone is predicted through the multiple linear regression method known as partial least squares. This regression allows to define a linear transformation between facial greyscale and colour subspaces. The second stage involves the luminance-\u03b1-\u03b2 (L\u03b1\u03b2) colour transform which is responsible for the recovery of the fine facial detail. The core of the proposed methodology is the combination of statistical subspace analysis with the appropriate colour transform so as to produce realistic facial colourisation results in a direct manner.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('28','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_28\" style=\"display:none;\"><div class=\"tp_abstract_entry\">For many tasks, it is necessary to synthesise realistic colour in faces from greyscale values. This is the problem the authors address in this study. Rather than propagating colour information in some regions of the image or transferring colour from an image source to a greyscale using some corresponding criterion, as many colouring systems attempt to do, they seek to synthesise facial colour information using a database of examples. This methodology is divided into two main stages. In the first stage the facial skin tone is predicted through the multiple linear regression method known as partial least squares. This regression allows to define a linear transformation between facial greyscale and colour subspaces. The second stage involves the luminance-\u03b1-\u03b2 (L\u03b1\u03b2) colour transform which is responsible for the recovery of the fine facial detail. The core of the proposed methodology is the combination of statistical subspace analysis with the appropriate colour transform so as to produce realistic facial colourisation results in a direct manner.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('28','tp_abstract')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_28\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/digital-library.theiet.org\/content\/journals\/10.1049\/iet-cvi.2011.0168\" title=\"http:\/\/digital-library.theiet.org\/content\/journals\/10.1049\/iet-cvi.2011.0168\" target=\"_blank\">http:\/\/digital-library.theiet.org\/content\/journals\/10.1049\/iet-cvi.2011.0168<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1049\/iet-cvi.2011.0168\" title=\"DOI de seguimiento:10.1049\/iet-cvi.2011.0168\" target=\"_blank\">doi:10.1049\/iet-cvi.2011.0168<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('28','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2007\">2007<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferencias<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Torres-Mendez, Luz Abril;  Ramirez-Sosa Moran, Marco I;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('41','tp_links')\" style=\"cursor:pointer;\">A Single-Frame Super-Resolution Innovative Approach<\/a> <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">MICAI 2007: Advances in Artificial Intelligence: 6th Mexican International Conference on Artificial Intelligence, Aguascalientes, Mexico, November 4-10, 2007. Proceedings, <\/span><span class=\"tp_pub_additional_publisher\">Springer Berlin Heidelberg, <\/span><span class=\"tp_pub_additional_address\">Berlin, Heidelberg, <\/span><span class=\"tp_pub_additional_year\">2007<\/span>, <span class=\"tp_pub_additional_isbn\">ISBN: 978-3-540-76631-5<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_41\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('41','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_41\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('41','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_41\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{Torres-M\\'{e}ndez2007,<br \/>\r\ntitle = {A Single-Frame Super-Resolution Innovative Approach},<br \/>\r\nauthor = {Torres-Mendez, Luz Abril and Ramirez-Sosa Moran, Marco I and Castelan, Mario },<br \/>\r\neditor = {Gelbukh, Alexander <br \/>\r\nand Kuri Morales, Angel Fernando},<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61},<br \/>\r\ndoi = {10.1007\/978-3-540-76631-5_61},<br \/>\r\nisbn = {978-3-540-76631-5},<br \/>\r\nyear  = {2007},<br \/>\r\ndate = {2007-01-01},<br \/>\r\nbooktitle = {MICAI 2007: Advances in Artificial Intelligence: 6th Mexican International Conference on Artificial Intelligence, Aguascalientes, Mexico, November 4-10, 2007. Proceedings},<br \/>\r\npages = {640--649},<br \/>\r\npublisher = {Springer Berlin Heidelberg},<br \/>\r\naddress = {Berlin, Heidelberg},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('41','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_41\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61\" title=\"http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_61\" title=\"DOI de seguimiento:10.1007\/978-3-540-76631-5_61\" target=\"_blank\">doi:10.1007\/978-3-540-76631-5_61<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('41','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Castelan, Mario;  Almazan-Delfin, Ana Judith;  Ramirez-Sosa Moran, Marco I;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('42','tp_links')\" style=\"cursor:pointer;\">Example-Based Face Shape Recovery Using the Zenith Angle of the Surface Normal<\/a> <span class=\"tp_pub_type tp_  conference\">Conferencia<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">MICAI 2007: Advances in Artificial Intelligence: 6th Mexican International Conference on Artificial Intelligence, Aguascalientes, Mexico, November 4-10, 2007. Proceedings, <\/span><span class=\"tp_pub_additional_publisher\">Springer Berlin Heidelberg, <\/span><span class=\"tp_pub_additional_address\">Berlin, Heidelberg, <\/span><span class=\"tp_pub_additional_year\">2007<\/span>, <span class=\"tp_pub_additional_isbn\">ISBN: 978-3-540-76631-5<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_42\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('42','tp_links')\" title=\"Mostrar enlaces y recursos\" style=\"cursor:pointer;\">Enlaces<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_42\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('42','tp_bibtex')\" title=\"Mostrar entrada BibTeX \" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_42\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{Castel\\'{a}n2007b,<br \/>\r\ntitle = {Example-Based Face Shape Recovery Using the Zenith Angle of the Surface Normal},<br \/>\r\nauthor = {Castelan, Mario and Almazan-Delfin, Ana Judith and Ramirez-Sosa Moran, Marco I and Torres-Mendez, Luz Abril },<br \/>\r\neditor = {Gelbukh, Alexander <br \/>\r\nand Kuri Morales, Angel Fernando},<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72},<br \/>\r\ndoi = {10.1007\/978-3-540-76631-5_72},<br \/>\r\nisbn = {978-3-540-76631-5},<br \/>\r\nyear  = {2007},<br \/>\r\ndate = {2007-01-01},<br \/>\r\nbooktitle = {MICAI 2007: Advances in Artificial Intelligence: 6th Mexican International Conference on Artificial Intelligence, Aguascalientes, Mexico, November 4-10, 2007. Proceedings},<br \/>\r\npages = {758--768},<br \/>\r\npublisher = {Springer Berlin Heidelberg},<br \/>\r\naddress = {Berlin, Heidelberg},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('42','tp_bibtex')\">Cerrar<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_42\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72\" title=\"http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/978-3-540-76631-5_72\" title=\"DOI de seguimiento:10.1007\/978-3-540-76631-5_72\" target=\"_blank\">doi:10.1007\/978-3-540-76631-5_72<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('42','tp_links')\">Cerrar<\/a><\/p><\/div><\/div><\/div><\/div><\/div> Av. Industrial\u00a0Metalurgia\u00a0#1062,\u00a0Parque Ind. Ramos Arizpe,\u00a0Ramos Arizpe, Coah.\u00a0C.P. 25900, M\u00e9xico. \u00a0Tel. +52 (844) 438-9600<\/p>\n","protected":false},"author":15,"featured_media":0,"parent":0,"menu_order":0,"comment_status":"closed","ping_status":"closed","template":"","meta":{"_et_pb_use_builder":"on","_et_pb_old_content":"","_et_gb_content_width":"","footnotes":""},"class_list":["post-76","page","type-page","status-publish","hentry"],"yoast_head":"<!-- This site is optimized with the Yoast SEO plugin v27.3 - https:\/\/yoast.com\/product\/yoast-seo-wordpress\/ -->\n<title>Publicaciones - Prof. Dra. Luz Abril Torres Mendez<\/title>\n<meta name=\"robots\" content=\"index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1\" \/>\n<link rel=\"canonical\" href=\"https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/\" \/>\n<meta property=\"og:locale\" content=\"es_ES\" \/>\n<meta property=\"og:type\" content=\"article\" \/>\n<meta property=\"og:title\" content=\"Publicaciones - Prof. Dra. Luz Abril Torres Mendez\" \/>\n<meta property=\"og:description\" content=\"Para ver las publicaciones de todo Rob\u00f3tica y Manufactura Avanzada, ver:\u00a0 Publicaciones RYMA Av. Industrial\u00a0Metalurgia\u00a0#1062,\u00a0Parque Ind. Ramos Arizpe,\u00a0Ramos Arizpe, Coah.\u00a0C.P. 25900, M\u00e9xico. \u00a0Tel. +52 (844) 438-9600\" \/>\n<meta property=\"og:url\" content=\"https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/\" \/>\n<meta property=\"og:site_name\" content=\"Prof. Dra. Luz Abril Torres Mendez\" \/>\n<meta property=\"article:modified_time\" content=\"2017-11-11T17:35:18+00:00\" \/>\n<meta name=\"twitter:card\" content=\"summary_large_image\" \/>\n<meta name=\"twitter:label1\" content=\"Tiempo de lectura\" \/>\n\t<meta name=\"twitter:data1\" content=\"3 minutos\" \/>\n<script type=\"application\/ld+json\" class=\"yoast-schema-graph\">{\"@context\":\"https:\\\/\\\/schema.org\",\"@graph\":[{\"@type\":\"WebPage\",\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/publicaciones\\\/\",\"url\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/publicaciones\\\/\",\"name\":\"Publicaciones - Prof. Dra. Luz Abril Torres Mendez\",\"isPartOf\":{\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/#website\"},\"datePublished\":\"2017-09-10T05:46:36+00:00\",\"dateModified\":\"2017-11-11T17:35:18+00:00\",\"breadcrumb\":{\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/publicaciones\\\/#breadcrumb\"},\"inLanguage\":\"es\",\"potentialAction\":[{\"@type\":\"ReadAction\",\"target\":[\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/publicaciones\\\/\"]}]},{\"@type\":\"BreadcrumbList\",\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/publicaciones\\\/#breadcrumb\",\"itemListElement\":[{\"@type\":\"ListItem\",\"position\":1,\"name\":\"Portada\",\"item\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/\"},{\"@type\":\"ListItem\",\"position\":2,\"name\":\"Publicaciones\"}]},{\"@type\":\"WebSite\",\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/#website\",\"url\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/\",\"name\":\"Prof. Dra. Luz Abril Torres Mendez\",\"description\":\"Miembro de Rob\u00f3tica y Manufactura Avanzada - Cinvestav\",\"potentialAction\":[{\"@type\":\"SearchAction\",\"target\":{\"@type\":\"EntryPoint\",\"urlTemplate\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/atorres\\\/?s={search_term_string}\"},\"query-input\":{\"@type\":\"PropertyValueSpecification\",\"valueRequired\":true,\"valueName\":\"search_term_string\"}}],\"inLanguage\":\"es\"}]}<\/script>\n<!-- \/ Yoast SEO plugin. -->","yoast_head_json":{"title":"Publicaciones - Prof. Dra. Luz Abril Torres Mendez","robots":{"index":"index","follow":"follow","max-snippet":"max-snippet:-1","max-image-preview":"max-image-preview:large","max-video-preview":"max-video-preview:-1"},"canonical":"https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/","og_locale":"es_ES","og_type":"article","og_title":"Publicaciones - Prof. Dra. Luz Abril Torres Mendez","og_description":"Para ver las publicaciones de todo Rob\u00f3tica y Manufactura Avanzada, ver:\u00a0 Publicaciones RYMA Av. Industrial\u00a0Metalurgia\u00a0#1062,\u00a0Parque Ind. Ramos Arizpe,\u00a0Ramos Arizpe, Coah.\u00a0C.P. 25900, M\u00e9xico. \u00a0Tel. +52 (844) 438-9600","og_url":"https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/","og_site_name":"Prof. Dra. Luz Abril Torres Mendez","article_modified_time":"2017-11-11T17:35:18+00:00","twitter_card":"summary_large_image","twitter_misc":{"Tiempo de lectura":"3 minutos"},"schema":{"@context":"https:\/\/schema.org","@graph":[{"@type":"WebPage","@id":"https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/","url":"https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/","name":"Publicaciones - Prof. Dra. Luz Abril Torres Mendez","isPartOf":{"@id":"https:\/\/ryma.cinvestav.mx\/atorres\/#website"},"datePublished":"2017-09-10T05:46:36+00:00","dateModified":"2017-11-11T17:35:18+00:00","breadcrumb":{"@id":"https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/#breadcrumb"},"inLanguage":"es","potentialAction":[{"@type":"ReadAction","target":["https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/"]}]},{"@type":"BreadcrumbList","@id":"https:\/\/ryma.cinvestav.mx\/atorres\/publicaciones\/#breadcrumb","itemListElement":[{"@type":"ListItem","position":1,"name":"Portada","item":"https:\/\/ryma.cinvestav.mx\/atorres\/"},{"@type":"ListItem","position":2,"name":"Publicaciones"}]},{"@type":"WebSite","@id":"https:\/\/ryma.cinvestav.mx\/atorres\/#website","url":"https:\/\/ryma.cinvestav.mx\/atorres\/","name":"Prof. Dra. Luz Abril Torres Mendez","description":"Miembro de Rob\u00f3tica y Manufactura Avanzada - Cinvestav","potentialAction":[{"@type":"SearchAction","target":{"@type":"EntryPoint","urlTemplate":"https:\/\/ryma.cinvestav.mx\/atorres\/?s={search_term_string}"},"query-input":{"@type":"PropertyValueSpecification","valueRequired":true,"valueName":"search_term_string"}}],"inLanguage":"es"}]}},"_links":{"self":[{"href":"https:\/\/ryma.cinvestav.mx\/atorres\/wp-json\/wp\/v2\/pages\/76","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/ryma.cinvestav.mx\/atorres\/wp-json\/wp\/v2\/pages"}],"about":[{"href":"https:\/\/ryma.cinvestav.mx\/atorres\/wp-json\/wp\/v2\/types\/page"}],"author":[{"embeddable":true,"href":"https:\/\/ryma.cinvestav.mx\/atorres\/wp-json\/wp\/v2\/users\/15"}],"replies":[{"embeddable":true,"href":"https:\/\/ryma.cinvestav.mx\/atorres\/wp-json\/wp\/v2\/comments?post=76"}],"version-history":[{"count":30,"href":"https:\/\/ryma.cinvestav.mx\/atorres\/wp-json\/wp\/v2\/pages\/76\/revisions"}],"predecessor-version":[{"id":497,"href":"https:\/\/ryma.cinvestav.mx\/atorres\/wp-json\/wp\/v2\/pages\/76\/revisions\/497"}],"wp:attachment":[{"href":"https:\/\/ryma.cinvestav.mx\/atorres\/wp-json\/wp\/v2\/media?parent=76"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}