{"id":76,"date":"2017-09-10T05:46:36","date_gmt":"2017-09-10T05:46:36","guid":{"rendered":"https:\/\/ryma.cinvestav.mx\/ravg\/?page_id=76"},"modified":"2017-11-11T17:48:08","modified_gmt":"2017-11-11T17:48:08","slug":"publications","status":"publish","type":"page","link":"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/","title":{"rendered":"Publications"},"content":{"rendered":"<p>[et_pb_section bb_built=&#8221;1&#8243; fullwidth=&#8221;on&#8221; specialty=&#8221;off&#8221; _builder_version=&#8221;3.0.86&#8243; background_image=&#8221;https:\/\/ryma.cinvestav.mx\/ravg\/wp-content\/uploads\/sites\/19\/2017\/09\/IMAGEN-CAMPA\u00d1A1.jpg&#8221; global_module=&#8221;714&#8243;][et_pb_fullwidth_header admin_label=&#8221;RESEARCH_UpperTitle&#8221; global_parent=&#8221;714&#8243; _builder_version=&#8221;3.0.86&#8243; title=&#8221;RAVG: Robotics Active Vision&#8221; subhead=&#8221;Research&#8221; text_orientation=&#8221;left&#8221; header_fullscreen=&#8221;off&#8221; header_scroll_down=&#8221;off&#8221; image_orientation=&#8221;center&#8221; background_layout=&#8221;dark&#8221; content_orientation=&#8221;center&#8221; custom_button_two=&#8221;off&#8221; button_two_icon_placement=&#8221;right&#8221; custom_button_one=&#8221;off&#8221; button_one_icon_placement=&#8221;right&#8221; background_overlay_color=&#8221;rgba(0,0,0,0.3)&#8221; text_shadow_style=&#8221;preset3&#8243; text_shadow_blur_strength=&#8221;1em&#8221; text_shadow_color=&#8221;rgba(0,0,0,0.8)&#8221; title_font=&#8221;|700|||||||&#8221; subhead_font=&#8221;|700|||||||&#8221; subhead_font_size=&#8221;20&#8243; \/][et_pb_fullwidth_menu global_parent=&#8221;714&#8243; _builder_version=&#8221;3.0.86&#8243; menu_id=&#8221;10&#8243; background_color=&#8221;#d3d3d3&#8243; submenu_direction=&#8221;downwards&#8221; fullwidth_menu=&#8221;off&#8221; background_layout=&#8221;light&#8221; active_link_color=&#8221;#8300e9&#8243; menu_font=&#8221;|700|||||||&#8221; \/][\/et_pb_section][et_pb_section bb_built=&#8221;1&#8243; _builder_version=&#8221;3.0.72&#8243; custom_css_main_element=&#8221;box-shadow: inset 0px 3px 2px rgba(50, 50, 50, 0.75);&#8221; locked=&#8221;off&#8221;][et_pb_row parent_locked=&#8221;off&#8221; background_position=&#8221;top_left&#8221; background_repeat=&#8221;repeat&#8221; background_size=&#8221;initial&#8221;][et_pb_column type=&#8221;4_4&#8243;][et_pb_text admin_label=&#8221;Publications code&#8221; _builder_version=&#8221;3.0.86&#8243; background_layout=&#8221;light&#8221; parent_locked=&#8221;off&#8221; module_alignment=&#8221;left&#8221;]<\/p>\n<hr \/>\n<div class='et-box et-shadow'>\n\t\t\t\t\t<div class='et-box-content'><h2 style=\"text-align: center;\"><strong>PUBLICATIONS<\/strong><\/h2><\/div><\/div>\n<p>This is the list of publications of this laboratory<\/p>\n<p><strong><div class=\"teachpress_pub_list\"><form name=\"tppublistform\" method=\"get\"><a name=\"tppubs\" id=\"tppubs\"><\/a><div class=\"teachpress_filter\"><select class=\"default\" name=\"yr\" id=\"yr\" tabindex=\"2\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?')\">\r\n                   <option value=\"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=#tppubs\">All years<\/option>\r\n                   <option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2020#tppubs\" >2020<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2019#tppubs\" >2019<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2018#tppubs\" >2018<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2017#tppubs\" >2017<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2016#tppubs\" >2016<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2015#tppubs\" >2015<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2014#tppubs\" >2014<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2013#tppubs\" >2013<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2012#tppubs\" >2012<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2011#tppubs\" >2011<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2010#tppubs\" >2010<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2009#tppubs\" >2009<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2008#tppubs\" >2008<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2007#tppubs\" >2007<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2006#tppubs\" >2006<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2005#tppubs\" >2005<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2004#tppubs\" >2004<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2003#tppubs\" >2003<\/option>\r\n                <\/select><select class=\"default\" name=\"type\" id=\"type\" tabindex=\"3\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=#tppubs\">All types<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=article#tppubs\" >Journal Articles<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=conference#tppubs\" >Conferences<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=inbook#tppubs\" >Book Chapters<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=inproceedings#tppubs\" >Proceedings Articles<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=proceedings#tppubs\" >Proceedings<\/option>\r\n                <\/select><select class=\"default\" name=\"auth\" id=\"auth\" tabindex=\"5\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=#tppubs\">All authors<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=74#tppubs\" > Arechavaleta, Gustavo<\/option><option value = \"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=61#tppubs\" > Castelan, Mario<\/option><option value = \"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=77#tppubs\" > Rios-Cabrera, Reyes<\/option><option value = \"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=58#tppubs\" > Torres-Mendez, Luz Abril<\/option>\r\n                <\/select><select class=\"default\" name=\"usr\" id=\"usr\" tabindex=\"6\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;type=&amp;auth=&amp;usr=#tppubs\">All users<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;type=&amp;auth=&amp;usr=12#tppubs\" >mcastelan<\/option>\r\n                <\/select><\/div><\/form><div class=\"tablenav\"><div class=\"tablenav-pages\"><span class=\"displaying-num\">82 entries<\/span> <a class=\"page-numbers button disabled\">&laquo;<\/a> <a class=\"page-numbers button disabled\">&lsaquo;<\/a> 1 of 2 <a href=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?limit=2&amp;tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=&amp;tsr=#tppubs\" title=\"next page\" class=\"page-numbers button\">&rsaquo;<\/a> <a href=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?limit=2&amp;tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=&amp;tsr=#tppubs\" title=\"last page\" class=\"page-numbers button\">&raquo;<\/a> <\/div><\/div><div class=\"teachpress_publication_list\"><h3 class=\"tp_h3\" id=\"tp_h3_2019\">2019<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ramos-Oliveira, Jorge;  Baltazar, Arturo;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('225','tp_links')\" style=\"cursor:pointer;\">On ray tracing for sharp changing media<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Journal of the Acoustic Society of America, <\/span><span class=\"tp_pub_additional_volume\">vol. 146, <\/span><span class=\"tp_pub_additional_number\">no. 3, <\/span><span class=\"tp_pub_additional_pages\">pp. 1595-1604, <\/span><span class=\"tp_pub_additional_year\">2019<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_225\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('225','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_225\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('225','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_225\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Ramos-Oliveira2019,<br \/>\r\ntitle = {On ray tracing for sharp changing media},<br \/>\r\nauthor = {Ramos-Oliveira, Jorge and Baltazar, Arturo and Castelan, Mario},<br \/>\r\nurl = {https:\/\/doi.org\/10.1121\/1.5125133},<br \/>\r\ndoi = {10.1121\/1.5125133},<br \/>\r\nyear  = {2019},<br \/>\r\ndate = {2019-07-10},<br \/>\r\njournal = {Journal of the Acoustic Society of America},<br \/>\r\nvolume = {146},<br \/>\r\nnumber = {3},<br \/>\r\npages = {1595-1604},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('225','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_225\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/doi.org\/10.1121\/1.5125133\" title=\"https:\/\/doi.org\/10.1121\/1.5125133\" target=\"_blank\">https:\/\/doi.org\/10.1121\/1.5125133<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1121\/1.5125133\" title=\"Follow DOI:10.1121\/1.5125133\" target=\"_blank\">doi:10.1121\/1.5125133<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('225','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Luna-Aguilar, Christian;  Morales-Diaz, America;  Castelan, Mario;  Nadeu, Climent<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('223','tp_links')\" style=\"cursor:pointer;\">Incorporation of acoustic sensors in the regulation of a mobile robot<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Advanced Robotics, <\/span><span class=\"tp_pub_additional_volume\">vol. 33, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 61-73, <\/span><span class=\"tp_pub_additional_year\">2019<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 0169-1864<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_223\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('223','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_223\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('223','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_223\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('223','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_223\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{{Luna-Aguilar}2019,<br \/>\r\ntitle = {Incorporation of acoustic sensors in the regulation of a mobile robot},<br \/>\r\nauthor = {Luna-Aguilar, Christian and Morales-Diaz, America and Castelan, Mario and Nadeu, Climent},<br \/>\r\neditor = {Taylor and Francis},<br \/>\r\nurl = {https:\/\/doi.org\/10.1080\/01691864.2019.1573703},<br \/>\r\ndoi = {10.1080\/01691864.2019.1573703},<br \/>\r\nissn = {0169-1864},<br \/>\r\nyear  = {2019},<br \/>\r\ndate = {2019-01-01},<br \/>\r\njournal = {Advanced Robotics},<br \/>\r\nvolume = {33},<br \/>\r\nnumber = {2},<br \/>\r\npages = {61-73},<br \/>\r\nabstract = {This article introduces the incorporation of acoustic sensors for the localization of a mobile robot. The robot is considered as a sound source and its position is located applying a Time Delay of Arrival (TDOA) method. Since the accuracy of this method varies with the microphone array, a naviga- tion acoustic map that indicates the location errors is built. This map also provides the robot with navigation trajectories point-to-point and the control is capable to drive the robot through these trajectories to a desired configuration. The proposed localization method is thoroughly tested using both a 900 Hz square signal and the natural sound of the robot, which is driven near the desired point with an average error of 0.067 m.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('223','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_223\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This article introduces the incorporation of acoustic sensors for the localization of a mobile robot. The robot is considered as a sound source and its position is located applying a Time Delay of Arrival (TDOA) method. Since the accuracy of this method varies with the microphone array, a naviga- tion acoustic map that indicates the location errors is built. This map also provides the robot with navigation trajectories point-to-point and the control is capable to drive the robot through these trajectories to a desired configuration. The proposed localization method is thoroughly tested using both a 900 Hz square signal and the natural sound of the robot, which is driven near the desired point with an average error of 0.067 m.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('223','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_223\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/doi.org\/10.1080\/01691864.2019.1573703\" title=\"https:\/\/doi.org\/10.1080\/01691864.2019.1573703\" target=\"_blank\">https:\/\/doi.org\/10.1080\/01691864.2019.1573703<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1080\/01691864.2019.1573703\" title=\"Follow DOI:10.1080\/01691864.2019.1573703\" target=\"_blank\">doi:10.1080\/01691864.2019.1573703<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('223','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rico-Fernandez, Maria;  Rios-Cabrera, Reyes;  Castelan, Mario;  Guerrero-Reyes, Hector;  Juarez-Maldonado, Antonio<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('224','tp_links')\" style=\"cursor:pointer;\">A contextualized approach for segmentation of foliage in different crop species<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Computers and Electronics in Agriculture, <\/span><span class=\"tp_pub_additional_volume\">vol. 156, <\/span><span class=\"tp_pub_additional_pages\">pp. 378-386, <\/span><span class=\"tp_pub_additional_year\">2019<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 0168-1699<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_224\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('224','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_224\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('224','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_224\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Rico-Fernandez2019,<br \/>\r\ntitle = {A contextualized approach for segmentation of foliage in different crop species},<br \/>\r\nauthor = {Rico-Fernandez, Maria and Rios-Cabrera, Reyes and Castelan, Mario and Guerrero-Reyes, Hector and Juarez-Maldonado, Antonio},<br \/>\r\neditor = {Elsevier},<br \/>\r\nurl = {https:\/\/doi.org\/10.1016\/j.compag.2018.11.033},<br \/>\r\nissn = {0168-1699},<br \/>\r\nyear  = {2019},<br \/>\r\ndate = {2019-01-01},<br \/>\r\njournal = {Computers and Electronics in Agriculture},<br \/>\r\nvolume = {156},<br \/>\r\npages = {378-386},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('224','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_224\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/doi.org\/10.1016\/j.compag.2018.11.033\" title=\"https:\/\/doi.org\/10.1016\/j.compag.2018.11.033\" target=\"_blank\">https:\/\/doi.org\/10.1016\/j.compag.2018.11.033<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('224','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2017\">2017<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Lopez-Juarez, Ismael;  Rios-Cabrera, Reyes;  Hsieh, S J;  Howarth, M.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('205','tp_links')\" style=\"cursor:pointer;\">A hybrid non-invasive method for internal\/external quality assessment of potatoes<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">European Food Research and Technology, <\/span><span class=\"tp_pub_additional_year\">2017<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1438-2385<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_205\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('205','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_205\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('205','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_205\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('205','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_205\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Lopez-Juarez2017,<br \/>\r\ntitle = {A hybrid non-invasive method for internal\/external quality assessment of potatoes},<br \/>\r\nauthor = {Lopez-Juarez, Ismael and Rios-Cabrera, Reyes and Hsieh,S J and Howarth, M .},<br \/>\r\nurl = {https:\/\/doi.org\/10.1007\/s00217-017-2936-9},<br \/>\r\ndoi = {10.1007\/s00217-017-2936-9},<br \/>\r\nissn = {1438-2385},<br \/>\r\nyear  = {2017},<br \/>\r\ndate = {2017-07-11},<br \/>\r\njournal = {European Food Research and Technology},<br \/>\r\nabstract = {Consumers purchase fruits and vegetables based on its quality, which can be defined as a degree of excellence which is the result of a combination of characteristics, attributes and properties that have significance for market acceptability. In this paper, a novel hybrid active imaging methodology for potato quality inspection that uses an optical colour camera and an infrared thermal camera is presented. The methodology employs an artificial neural network (ANN) that uses quality data composed by two descriptors as input. The ANN works as a feature classifier so that its output is the potato quality grade. The input vector contains information related to external characteristics, such as shape, weight, length and width. Internal characteristics are also accounted for in the input vector in the form of excessive sugar content. The extra sugar content of the potato is an important problem for potato growers and potato chip manufacturers. Extra sugar content could result in diseases or wounds in the potato tuber. In general, potato tubers with low sugar content are considered as having a higher quality. The validation of the methodology was made through experimentation which consisted in fusing both, external and internal characteristics in the input vector to the ANN for an overall quality classification. Results using internal data as obtained from an infrared camera and fused with optical external parameters demonstrated the feasibility of the method since the prediction accuracy increased during potato grading.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('205','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_205\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Consumers purchase fruits and vegetables based on its quality, which can be defined as a degree of excellence which is the result of a combination of characteristics, attributes and properties that have significance for market acceptability. In this paper, a novel hybrid active imaging methodology for potato quality inspection that uses an optical colour camera and an infrared thermal camera is presented. The methodology employs an artificial neural network (ANN) that uses quality data composed by two descriptors as input. The ANN works as a feature classifier so that its output is the potato quality grade. The input vector contains information related to external characteristics, such as shape, weight, length and width. Internal characteristics are also accounted for in the input vector in the form of excessive sugar content. The extra sugar content of the potato is an important problem for potato growers and potato chip manufacturers. Extra sugar content could result in diseases or wounds in the potato tuber. In general, potato tubers with low sugar content are considered as having a higher quality. The validation of the methodology was made through experimentation which consisted in fusing both, external and internal characteristics in the input vector to the ANN for an overall quality classification. Results using internal data as obtained from an infrared camera and fused with optical external parameters demonstrated the feasibility of the method since the prediction accuracy increased during potato grading.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('205','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_205\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/doi.org\/10.1007\/s00217-017-2936-9\" title=\"https:\/\/doi.org\/10.1007\/s00217-017-2936-9\" target=\"_blank\">https:\/\/doi.org\/10.1007\/s00217-017-2936-9<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/s00217-017-2936-9\" title=\"Follow DOI:10.1007\/s00217-017-2936-9\" target=\"_blank\">doi:10.1007\/s00217-017-2936-9<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('205','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Arechavaleta, Gustavo;  Morales-Diaz, America B.;  Perez-Villeda, Hector Manuel;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('2','tp_links')\" style=\"cursor:pointer;\">Hierarchical Task-Based Control of Multirobot Systems With Terminal Attractors<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">IEEE Transactions on Control Systems Technology, <\/span><span class=\"tp_pub_additional_volume\">vol. 25, <\/span><span class=\"tp_pub_additional_number\">no. 1, <\/span><span class=\"tp_pub_additional_pages\">pp. 334 - 341, <\/span><span class=\"tp_pub_additional_year\">2017<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1063-6536<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_2\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('2','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_2\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('2','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_2\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('2','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_2\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{7454708,<br \/>\r\ntitle = {Hierarchical Task-Based Control of Multirobot Systems With Terminal Attractors},<br \/>\r\nauthor = {Arechavaleta, Gustavo and Morales-Diaz, America B. and Perez-Villeda, Hector Manuel and Castelan, Mario },<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/abstract\/document\/7454708\/},<br \/>\r\ndoi = {10.1109\/TCST.2016.2549279},<br \/>\r\nissn = {1063-6536},<br \/>\r\nyear  = {2017},<br \/>\r\ndate = {2017-01-01},<br \/>\r\njournal = {IEEE Transactions on Control Systems Technology},<br \/>\r\nvolume = {25},<br \/>\r\nnumber = {1},<br \/>\r\npages = {334 - 341},<br \/>\r\nabstract = {This brief proposes a hierarchical control scheme based on the definition of a set of multirobot task functions. To deal with the inherent conflicts between tasks, a strict hierarchy is imposed on them. We present a novel scheme that copes with two main difficulties shared in standard task-based controllers: 1) to impose a desired time convergence of tasks and 2) to avoid discontinuous task transitions occurred when a task is inserted or removed in the hierarchical structure. As a result, continuous input references are generated for the low-level control of the group. The validation is achieved in simulation and by performing an experiment with wheeled mobile robots.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('2','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_2\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This brief proposes a hierarchical control scheme based on the definition of a set of multirobot task functions. To deal with the inherent conflicts between tasks, a strict hierarchy is imposed on them. We present a novel scheme that copes with two main difficulties shared in standard task-based controllers: 1) to impose a desired time convergence of tasks and 2) to avoid discontinuous task transitions occurred when a task is inserted or removed in the hierarchical structure. As a result, continuous input references are generated for the low-level control of the group. The validation is achieved in simulation and by performing an experiment with wheeled mobile robots.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('2','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_2\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/abstract\/document\/7454708\/\" title=\"http:\/\/ieeexplore.ieee.org\/abstract\/document\/7454708\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/abstract\/document\/7454708\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/TCST.2016.2549279\" title=\"Follow DOI:10.1109\/TCST.2016.2549279\" target=\"_blank\">doi:10.1109\/TCST.2016.2549279<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('2','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2016\">2016<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Perez-Alcocer, R. R.;  Torres-Mendez, Luz Abril;  Olguin-Diaz, Ernesto;  Maldonado-Ramirez, Alejandro<\/p><p class=\"tp_pub_title\">Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_154\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('154','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_154\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{P\\'{e}rez-Alcocer2016,<br \/>\r\ntitle = {Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control},<br \/>\r\nauthor = {Perez-Alcocer, R. R. and Torres-Mendez, Luz Abril and Olguin-Diaz, Ernesto and Maldonado-Ramirez, Alejandro },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('154','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Mart\u00ednez-Gonz\u00e1lez, Pablo Arturo;  Castelan, Mario;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\">Vision Based Persistent Localization of a Humanoid Robot for Locomotion Tasks <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_159\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('159','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_159\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Mart\\'{i}nez-Gonz\\'{a}lez2016b,<br \/>\r\ntitle = {Vision Based Persistent Localization of a Humanoid Robot for Locomotion Tasks},<br \/>\r\nauthor = {Mart\\'{i}nez-Gonz\\'{a}lez, Pablo Arturo and Castelan, Mario and Arechavaleta, Gustavo},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('159','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Hernandez-Rodriguez, Felipe;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('4','tp_links')\" style=\"cursor:pointer;\">A photometric sampling method for facial shape recovery<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Machine Vision and Applications, <\/span><span class=\"tp_pub_additional_volume\">vol. 27, <\/span><span class=\"tp_pub_additional_number\">no. 4, <\/span><span class=\"tp_pub_additional_pages\">pp. 483-497, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_4\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('4','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_4\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('4','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_4\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Hernandez-Rodriguez2016,<br \/>\r\ntitle = {A photometric sampling method for facial shape recovery},<br \/>\r\nauthor = {Hernandez-Rodriguez, Felipe and Castelan, Mario },<br \/>\r\nurl = {http:\/\/link.springer.com\/article\/10.1007%2Fs00138-016-0755-9},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-04-01},<br \/>\r\njournal = {Machine Vision and Applications},<br \/>\r\nvolume = {27},<br \/>\r\nnumber = {4},<br \/>\r\npages = {483-497},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('4','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_4\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/link.springer.com\/article\/10.1007%2Fs00138-016-0755-9\" title=\"http:\/\/link.springer.com\/article\/10.1007%2Fs00138-016-0755-9\" target=\"_blank\">http:\/\/link.springer.com\/article\/10.1007%2Fs00138-016-0755-9<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('4','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Martinez-Gonzalez, Pablo;  Castelan, Mario;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('1','tp_links')\" style=\"cursor:pointer;\">Vision based persistent localization of a humanoid robot for locomotion Tasks<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Applied Mathematics and Computer Science, <\/span><span class=\"tp_pub_additional_volume\">vol. 26, <\/span><span class=\"tp_pub_additional_number\">no. 3, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_1\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('1','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_1\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('1','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_1\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Mart\\`{i}nez-Gonz\\'{a}lez2016,<br \/>\r\ntitle = {Vision based persistent localization of a humanoid robot for locomotion Tasks},<br \/>\r\nauthor = {Martinez-Gonzalez, Pablo and Castelan, Mario and Arechavaleta, Gustavo },<br \/>\r\nurl = {https:\/\/drive.google.com\/file\/d\/0B-7dVUdTjeJUNGdXd0N6UWRvdk0\/view},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-03-26},<br \/>\r\njournal = {International Journal of Applied Mathematics and Computer Science},<br \/>\r\nvolume = {26},<br \/>\r\nnumber = {3},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('1','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_1\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/drive.google.com\/file\/d\/0B-7dVUdTjeJUNGdXd0N6UWRvdk0\/view\" title=\"https:\/\/drive.google.com\/file\/d\/0B-7dVUdTjeJUNGdXd0N6UWRvdk0\/view\" target=\"_blank\">https:\/\/drive.google.com\/file\/d\/0B-7dVUdTjeJUNGdXd0N6UWRvdk0\/view<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('1','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Delfin, Josafat;  Becerra, Hector M;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('5','tp_links')\" style=\"cursor:pointer;\">Visual Servo Walking Control for Humanoids with Finite-time Convergence and Smooth Robot Velocities<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Control, <\/span><span class=\"tp_pub_additional_volume\">vol. 89, <\/span><span class=\"tp_pub_additional_number\">no. 7, <\/span><span class=\"tp_pub_additional_pages\">pp. 1342-1358, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1366-5820<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_5\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('5','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_5\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('5','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_5\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('5','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_5\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Delfin2016,<br \/>\r\ntitle = {Visual Servo Walking Control for Humanoids with Finite-time Convergence and Smooth Robot Velocities},<br \/>\r\nauthor = {Delfin, Josafat and Becerra, Hector M and Arechavaleta, Gustavo },<br \/>\r\nurl = {http:\/\/www.tandfonline.com\/doi\/abs\/10.1080\/00207179.2015.1129558},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558},<br \/>\r\nissn = {1366-5820},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-10},<br \/>\r\njournal = {International Journal of Control},<br \/>\r\nvolume = {89},<br \/>\r\nnumber = {7},<br \/>\r\npages = {1342-1358},<br \/>\r\nabstract = {In this paper, we address the problem of humanoid locomotion guided from information of a monocular camera. The goal of the robot is to reach a desired location defined in terms of a target image, i.e., a positioning task. The proposed approach allows us to introduce a desired time to complete the positioning task, which is advantageous in contrast to the classical exponential convergence. In particular, finite-time convergence is achieved while generating smooth robot velocities and considering the omnidirectional waking capability of the robot. In addition, we propose a hierarchical task-based control scheme, which can simultaneously handle the visual positioning and the obstacle avoidance tasks without affecting the desired time of convergence. The controller is able to activate or inactivate the obstacle avoidance task without generating discontinuous velocity references while the humanoid is walking. Stability of the closed loop for the two task-based control is demonstrated theoretically even during the transitions between the tasks. The proposed approach is generic in the sense that different visual control schemes are supported. We evaluate a homography-based visual servoing for position-based and image-based modalities, as well as for eye-in-hand and eye-to-hand configurations. The experimental evaluation is performed with the humanoid robot NAO.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('5','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_5\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In this paper, we address the problem of humanoid locomotion guided from information of a monocular camera. The goal of the robot is to reach a desired location defined in terms of a target image, i.e., a positioning task. The proposed approach allows us to introduce a desired time to complete the positioning task, which is advantageous in contrast to the classical exponential convergence. In particular, finite-time convergence is achieved while generating smooth robot velocities and considering the omnidirectional waking capability of the robot. In addition, we propose a hierarchical task-based control scheme, which can simultaneously handle the visual positioning and the obstacle avoidance tasks without affecting the desired time of convergence. The controller is able to activate or inactivate the obstacle avoidance task without generating discontinuous velocity references while the humanoid is walking. Stability of the closed loop for the two task-based control is demonstrated theoretically even during the transitions between the tasks. The proposed approach is generic in the sense that different visual control schemes are supported. We evaluate a homography-based visual servoing for position-based and image-based modalities, as well as for eye-in-hand and eye-to-hand configurations. The experimental evaluation is performed with the humanoid robot NAO.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('5','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_5\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.tandfonline.com\/doi\/abs\/10.1080\/00207179.2015.1129558\" title=\"http:\/\/www.tandfonline.com\/doi\/abs\/10.1080\/00207179.2015.1129558\" target=\"_blank\">http:\/\/www.tandfonline.com\/doi\/abs\/10.1080\/00207179.2015.1129558<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('5','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rios-Cabrera, Reyes;  Morales-Diaz, America B.;  Aviles-Vi\u00f1as, Jaime F;  Lopez-Juarez, Ismael<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('6','tp_links')\" style=\"cursor:pointer;\">Robotic GMAW online learning: issues and experiments<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">The International Journal of Advanced Manufacturing Technology, <\/span><span class=\"tp_pub_additional_volume\">vol. 87, <\/span><span class=\"tp_pub_additional_number\">no. 5, <\/span><span class=\"tp_pub_additional_pages\">pp. 2113\u20132134, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1433-3015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_6\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('6','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_6\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('6','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_6\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('6','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_6\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Rios-Cabrera2016,<br \/>\r\ntitle = {Robotic GMAW online learning: issues and experiments},<br \/>\r\nauthor = {Rios-Cabrera, Reyes and Morales-Diaz, America B. and Aviles-Vi\\~{n}as, Jaime F and Lopez-Juarez, Ismael },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0},<br \/>\r\ndoi = {10.1007\/s00170-016-8618-0},<br \/>\r\nissn = {1433-3015},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {The International Journal of Advanced Manufacturing Technology},<br \/>\r\nvolume = {87},<br \/>\r\nnumber = {5},<br \/>\r\npages = {2113--2134},<br \/>\r\nabstract = {This paper presents three main contributions: (i) an experimental analysis of variables, using well-defined statistical patterns applied to the main parameters of the welding process. (ii) An on-line\/off-line learning and testing method, showing that robots can acquire a useful knowledge base without human intervention to learn and reproduce bead geometries. And finally, (iii) an on-line testing analysis including penetration of the bead, that is used to train an artificial neural network (ANN). For the experiments, an optic camera was used in order to measure bead geometry (width and height). Also real-time computer vision algorithms were implemented to extract training patterns. The proposal was carried out using an industrial KUKA robot and a GMAW type machine inside a manufacturing cell. We present expermental analysis that show different issues and solutions to build an industrial adaptive system for the robotics welding process.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('6','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_6\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper presents three main contributions: (i) an experimental analysis of variables, using well-defined statistical patterns applied to the main parameters of the welding process. (ii) An on-line\/off-line learning and testing method, showing that robots can acquire a useful knowledge base without human intervention to learn and reproduce bead geometries. And finally, (iii) an on-line testing analysis including penetration of the bead, that is used to train an artificial neural network (ANN). For the experiments, an optic camera was used in order to measure bead geometry (width and height). Also real-time computer vision algorithms were implemented to extract training patterns. The proposal was carried out using an industrial KUKA robot and a GMAW type machine inside a manufacturing cell. We present expermental analysis that show different issues and solutions to build an industrial adaptive system for the robotics welding process.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('6','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_6\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0\" title=\"http:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0\" title=\"Follow DOI:10.1007\/s00170-016-8618-0\" target=\"_blank\">doi:10.1007\/s00170-016-8618-0<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('6','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Aviles-Vi\u00f1as, Jaime F;  Rios-Cabrera, Reyes;  Lopez-Juarez, Ismael<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('7','tp_links')\" style=\"cursor:pointer;\">On-line learning of welding bead geometry in industrial robots<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">The International Journal of Advanced Manufacturing Technology, <\/span><span class=\"tp_pub_additional_volume\">vol. 83, <\/span><span class=\"tp_pub_additional_number\">no. 1, <\/span><span class=\"tp_pub_additional_pages\">pp. 217\u2013231, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1433-3015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_7\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('7','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_7\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('7','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_7\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('7','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_7\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Aviles-Vi\\~{n}as2016b,<br \/>\r\ntitle = {On-line learning of welding bead geometry in industrial robots},<br \/>\r\nauthor = {Aviles-Vi\\~{n}as, Jaime F and Rios-Cabrera, Reyes and Lopez-Juarez, Ismael },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6},<br \/>\r\ndoi = {10.1007\/s00170-015-7422-6},<br \/>\r\nissn = {1433-3015},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {The International Journal of Advanced Manufacturing Technology},<br \/>\r\nvolume = {83},<br \/>\r\nnumber = {1},<br \/>\r\npages = {217--231},<br \/>\r\nabstract = {In this paper, we propose an architecture based on an artificial neural network (ANN), to learn welding skills automatically in industrial robots. With the aid of an optic camera and a laser-based sensor, the bead geometry (width and height) is measured. We propose a real-time computer vision algorithm to extract training patterns in order to acquire knowledge to later predict specific geometries. The proposal is implemented and tested in an industrial KUKA KR16 robot and a GMAW type machine within a manufacturing cell. Several data analysis are described as well as off-line and on-line training, learning strategies, and testing experimentation. It is demonstrated during our experiments that, after learning the skill, the robot is able to produce the requested bead geometry even without any knowledge about the welding parameters such as arc voltage and current. We implemented an on-line learning test, where the whole experiments and learning process take only about 4 min. Using this knowledge later, we obtained up to 95 % accuracy in prediction.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('7','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_7\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In this paper, we propose an architecture based on an artificial neural network (ANN), to learn welding skills automatically in industrial robots. With the aid of an optic camera and a laser-based sensor, the bead geometry (width and height) is measured. We propose a real-time computer vision algorithm to extract training patterns in order to acquire knowledge to later predict specific geometries. The proposal is implemented and tested in an industrial KUKA KR16 robot and a GMAW type machine within a manufacturing cell. Several data analysis are described as well as off-line and on-line training, learning strategies, and testing experimentation. It is demonstrated during our experiments that, after learning the skill, the robot is able to produce the requested bead geometry even without any knowledge about the welding parameters such as arc voltage and current. We implemented an on-line learning test, where the whole experiments and learning process take only about 4 min. Using this knowledge later, we obtained up to 95 % accuracy in prediction.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('7','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_7\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6\" title=\"http:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6\" title=\"Follow DOI:10.1007\/s00170-015-7422-6\" target=\"_blank\">doi:10.1007\/s00170-015-7422-6<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('7','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('8','tp_links')\" style=\"cursor:pointer;\">Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Journal of Sensors, <\/span><span class=\"tp_pub_additional_volume\">vol. 2016, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_8\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{maldonado2016robotic,<br \/>\r\ntitle = {Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril},<br \/>\r\nurl = {https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {Journal of Sensors},<br \/>\r\nvolume = {2016},<br \/>\r\npublisher = {Hindawi Publishing Corporation},<br \/>\r\nabstract = {Using visual sensors for detecting regions of interest in underwater environments is fundamental for many robotic applications. Particularly, for an autonomous exploration task, an underwater vehicle must be guided towards features that are of interest. If the relevant features can be seen from the distance, then smooth control movements of the vehicle are feasible in order to position itself close enough with the final goal of gathering visual quality images. However, it is a challenging task for a robotic system to achieve stable tracking of the same regions since marine environments are unstructured and highly dynamic and usually have poor visibility. In this paper, a framework that robustly detects and tracks regions of interest in real time is presented. We use the chromatic channels of a perceptual uniform color space to detect relevant regions and adapt a visual attention scheme to underwater scenes. For the tracking, we associate with each relevant point superpixel descriptors which are invariant to changes in illumination and shape. The field experiment results have demonstrated that our approach is robust when tested on different visibility conditions and depths in underwater explorations.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_8\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Using visual sensors for detecting regions of interest in underwater environments is fundamental for many robotic applications. Particularly, for an autonomous exploration task, an underwater vehicle must be guided towards features that are of interest. If the relevant features can be seen from the distance, then smooth control movements of the vehicle are feasible in order to position itself close enough with the final goal of gathering visual quality images. However, it is a challenging task for a robotic system to achieve stable tracking of the same regions since marine environments are unstructured and highly dynamic and usually have poor visibility. In this paper, a framework that robustly detects and tracks regions of interest in real time is presented. We use the chromatic channels of a perceptual uniform color space to detect relevant regions and adapt a visual attention scheme to underwater scenes. For the tracking, we associate with each relevant point superpixel descriptors which are invariant to changes in illumination and shape. The field experiment results have demonstrated that our approach is robust when tested on different visibility conditions and depths in underwater explorations.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_8\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/\" title=\"https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/\" target=\"_blank\">https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> S\u00e1nchez-Escobedo, Dalila;  Castelan, Mario;  Smith, William A P<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('10','tp_links')\" style=\"cursor:pointer;\">Statistical 3D face shape estimation from occluding contours<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Computer Vision and Image Understanding, <\/span><span class=\"tp_pub_additional_volume\">vol. 142, <\/span><span class=\"tp_pub_additional_pages\">pp. 111 - 124, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1077-3142<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_10\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('10','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_10\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('10','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_10\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('10','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_10\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{S\\'{a}nchezEscobedo2016111,<br \/>\r\ntitle = {Statistical 3D face shape estimation from occluding contours},<br \/>\r\nauthor = {S\\'{a}nchez-Escobedo, Dalila and Castelan, Mario and Smith, William A P},<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1077314215001885},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.cviu.2015.08.012},<br \/>\r\nissn = {1077-3142},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {Computer Vision and Image Understanding},<br \/>\r\nvolume = {142},<br \/>\r\npages = {111 - 124},<br \/>\r\nabstract = {Abstract This paper addresses the problem of 3D face shape approximation from occluding contours, i.e., the boundaries between the facial region and the background. To this end, a linear regression process that models the relationship between a set of 2D occluding contours and a set of 3D vertices is applied onto the corresponding training sets using Partial Least Squares. The result of this step is a regression matrix which is capable of estimating new 3D face point clouds from the out-of-training 2D Cartesian pixel positions of the selected contours. Our approach benefits from the highly correlated spaces spanned by the 3D vertices around the occluding boundaries of a face and their corresponding 2D pixel projections. As a result, the proposed method resembles dense surface shape recovery from missing data. Our technique is evaluated over four scenarios designed to investigate both the influence of the contours included in the training set and the considered number of contours. Qualitative and quantitative experiments demonstrate that using contours outperform the state of the art on the database used in this article. Even using a limited number of contours provides a useful approximation to the 3D face surface.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('10','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_10\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Abstract This paper addresses the problem of 3D face shape approximation from occluding contours, i.e., the boundaries between the facial region and the background. To this end, a linear regression process that models the relationship between a set of 2D occluding contours and a set of 3D vertices is applied onto the corresponding training sets using Partial Least Squares. The result of this step is a regression matrix which is capable of estimating new 3D face point clouds from the out-of-training 2D Cartesian pixel positions of the selected contours. Our approach benefits from the highly correlated spaces spanned by the 3D vertices around the occluding boundaries of a face and their corresponding 2D pixel projections. As a result, the proposed method resembles dense surface shape recovery from missing data. Our technique is evaluated over four scenarios designed to investigate both the influence of the contours included in the training set and the considered number of contours. Qualitative and quantitative experiments demonstrate that using contours outperform the state of the art on the database used in this article. Even using a limited number of contours provides a useful approximation to the 3D face surface.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('10','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_10\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1077314215001885\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1077314215001885\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1077314215001885<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.cviu.2015.08.012\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/j.cviu.2015.08.012\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.cviu.2015.08.012<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('10','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Delfin, Josafat;  Becerra, Hector M;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('160','tp_links')\" style=\"cursor:pointer;\">Visual servo walking control for humanoids with finite-time convergence and smooth robot velocities<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Control, <\/span><span class=\"tp_pub_additional_volume\">vol. 89, <\/span><span class=\"tp_pub_additional_number\">no. 7, <\/span><span class=\"tp_pub_additional_pages\">pp. 1342-1358, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_160\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('160','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_160\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('160','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_160\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('160','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_160\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{doi:10.1080\/00207179.2015.1129558,<br \/>\r\ntitle = {Visual servo walking control for humanoids with finite-time convergence and smooth robot velocities},<br \/>\r\nauthor = {Delfin, Josafat and Becerra, Hector M and Arechavaleta, Gustavo},<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558},<br \/>\r\ndoi = {10.1080\/00207179.2015.1129558},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {International Journal of Control},<br \/>\r\nvolume = {89},<br \/>\r\nnumber = {7},<br \/>\r\npages = {1342-1358},<br \/>\r\nabstract = {ABSTRACTIn this paper, we address the problem of humanoid locomotion guided from information of a monocular camera. The goal of the robot is to reach a desired location defined in terms of a target image, i.e., a positioning task. The proposed approach allows us to introduce a desired time to complete the positioning task, which is advantageous in contrast to the classical exponential convergence. In particular, finite-time convergence is achieved while generating smooth robot velocities and considering the omnidirectional waking capability of the robot. In addition, we propose a hierarchical task-based control scheme, which can simultaneously handle the visual positioning and the obstacle avoidance tasks without affecting the desired time of convergence. The controller is able to activate or inactivate the obstacle avoidance task without generating discontinuous velocity references while the humanoid is walking. Stability of the closed loop for the two task-based control is demonstrated theoretically even during the transitions between the tasks. The proposed approach is generic in the sense that different visual control schemes are supported. We evaluate a homography-based visual servoing for position-based and image-based modalities, as well as for eye-in-hand and eye-to-hand configurations. The experimental evaluation is performed with the humanoid robot NAO.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('160','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_160\" style=\"display:none;\"><div class=\"tp_abstract_entry\">ABSTRACTIn this paper, we address the problem of humanoid locomotion guided from information of a monocular camera. The goal of the robot is to reach a desired location defined in terms of a target image, i.e., a positioning task. The proposed approach allows us to introduce a desired time to complete the positioning task, which is advantageous in contrast to the classical exponential convergence. In particular, finite-time convergence is achieved while generating smooth robot velocities and considering the omnidirectional waking capability of the robot. In addition, we propose a hierarchical task-based control scheme, which can simultaneously handle the visual positioning and the obstacle avoidance tasks without affecting the desired time of convergence. The controller is able to activate or inactivate the obstacle avoidance task without generating discontinuous velocity references while the humanoid is walking. Stability of the closed loop for the two task-based control is demonstrated theoretically even during the transitions between the tasks. The proposed approach is generic in the sense that different visual control schemes are supported. We evaluate a homography-based visual servoing for position-based and image-based modalities, as well as for eye-in-hand and eye-to-hand configurations. The experimental evaluation is performed with the humanoid robot NAO.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('160','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_160\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" title=\"http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" target=\"_blank\">http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" title=\"Follow DOI:10.1080\/00207179.2015.1129558\" target=\"_blank\">doi:10.1080\/00207179.2015.1129558<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('160','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Benitez Perez, H.;  Lopez-Juarez, Ismael;  Garza-Alanis, P. C.;  Rios-Cabrera, Reyes;  Duran Chavesti, A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('163','tp_links')\" style=\"cursor:pointer;\">Reconfiguration Distributed Objects in an Intelligent Manufacturing Cell<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">IEEE Latin America Transactions, <\/span><span class=\"tp_pub_additional_volume\">vol. 14, <\/span><span class=\"tp_pub_additional_number\">no. 1, <\/span><span class=\"tp_pub_additional_pages\">pp. 136-146, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1548-0992<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_163\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('163','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_163\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('163','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_163\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('163','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_163\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{7430073,<br \/>\r\ntitle = {Reconfiguration Distributed Objects in an Intelligent Manufacturing Cell},<br \/>\r\nauthor = {Benitez Perez, H. and Lopez-Juarez, Ismael and Garza-Alanis, P. C. and Rios-Cabrera, Reyes and Duran Chavesti, A.},<br \/>\r\ndoi = {10.1109\/TLA.2016.7430073},<br \/>\r\nissn = {1548-0992},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {IEEE Latin America Transactions},<br \/>\r\nvolume = {14},<br \/>\r\nnumber = {1},<br \/>\r\npages = {136-146},<br \/>\r\nabstract = {A manufacture system with the abilities of easy reconfiguration and highly scalability becomes flexible, dynamic and open to the use of software technologies. To give these abilities to a manufacture cell formed of three industrial robots and two conveyors, a middleware based on the programming standard Common Object Request Broker Architecture (CORA) was developed, thus creating a distributed manufacture cell, allowing us to have a real production with different final products. In order to optimize the production times of the different products to be manufactured, a product scheduler was developed using the algorithm Earlies Deadline First (EDF) and the support algorithm Deferrable Server (DS). Given that failures may occur on any of the specialized modules of the manufacture system, the self reconfiguration of the manufacture system is something very desirable. This article propose an algorithm to solve this problem, the algorithm identifies the failures in relation to the time it takes the system to make a product, then makes a modification on the working speed of the plant elements of the specialized modules.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('163','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_163\" style=\"display:none;\"><div class=\"tp_abstract_entry\">A manufacture system with the abilities of easy reconfiguration and highly scalability becomes flexible, dynamic and open to the use of software technologies. To give these abilities to a manufacture cell formed of three industrial robots and two conveyors, a middleware based on the programming standard Common Object Request Broker Architecture (CORA) was developed, thus creating a distributed manufacture cell, allowing us to have a real production with different final products. In order to optimize the production times of the different products to be manufactured, a product scheduler was developed using the algorithm Earlies Deadline First (EDF) and the support algorithm Deferrable Server (DS). Given that failures may occur on any of the specialized modules of the manufacture system, the self reconfiguration of the manufacture system is something very desirable. This article propose an algorithm to solve this problem, the algorithm identifies the failures in relation to the time it takes the system to make a product, then makes a modification on the working speed of the plant elements of the specialized modules.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('163','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_163\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/TLA.2016.7430073\" title=\"Follow DOI:10.1109\/TLA.2016.7430073\" target=\"_blank\">doi:10.1109\/TLA.2016.7430073<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('163','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Cortes-Perez, Noel;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), <\/span><span class=\"tp_pub_additional_pages\">pp. 61-68, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_168\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('168','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_168\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Cortx00E9sPx00E9rez2016ALM,<br \/>\r\ntitle = {A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation},<br \/>\r\nauthor = {Cortes-Perez, Noel and Torres-Mendez, Luz Abril},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)},<br \/>\r\npages = {61-68},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('168','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferences<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('166','tp_links')\" style=\"cursor:pointer;\">A bag of relevant regions for visual place recognition in challenging environments<\/a> <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2016 23rd International Conference on Pattern Recognition (ICPR), <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_166\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{7899826,<br \/>\r\ntitle = {A bag of relevant regions for visual place recognition in challenging environments},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Castelan, Mario},<br \/>\r\ndoi = {10.1109\/ICPR.2016.7899826},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-12-01},<br \/>\r\nbooktitle = {2016 23rd International Conference on Pattern Recognition (ICPR)},<br \/>\r\npages = {1358-1363},<br \/>\r\nabstract = {In this paper, we present a method for vision-based place recognition in environments with a high content of similar features and that are prone to variations in illumination. The high similarity of features makes difficult the disambiguation between two different places. The novelty of our method relies on using the Bag of Words (BoW) approach to derive an image descriptor from a set of relevant regions, which are extracted using a visual attention algorithm. We name our approach Bag of Relevant Regions (BoRR). The descriptor of each relevant region is built by using a 2D histogram of the chromatic channels of the CIE-Lab color space. We have compared our results with those using state of the art descriptors that include the BoW and demonstrate that our approach performs better in most of the cases.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_166\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In this paper, we present a method for vision-based place recognition in environments with a high content of similar features and that are prone to variations in illumination. The high similarity of features makes difficult the disambiguation between two different places. The novelty of our method relies on using the Bag of Words (BoW) approach to derive an image descriptor from a set of relevant regions, which are extracted using a visual attention algorithm. We name our approach Bag of Relevant Regions (BoRR). The descriptor of each relevant region is built by using a 2D histogram of the chromatic channels of the CIE-Lab color space. We have compared our results with those using state of the art descriptors that include the BoW and demonstrate that our approach performs better in most of the cases.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_166\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/ICPR.2016.7899826\" title=\"Follow DOI:10.1109\/ICPR.2016.7899826\" target=\"_blank\">doi:10.1109\/ICPR.2016.7899826<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Delfin, Josafat;  Becerra, H\u00e9ctor M;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('3','tp_links')\" style=\"cursor:pointer;\">Humanoid Localization and Navigation using a Visual Memory<\/a> <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">IEEE-RAS 16th International Conference on Humanoid Robots, <\/span><span class=\"tp_pub_additional_publisher\">IEEE, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 2164-0580<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_3\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('3','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_3\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('3','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_3\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('3','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_3\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{conf:Delfin2016,<br \/>\r\ntitle = {Humanoid Localization and Navigation using a Visual Memory},<br \/>\r\nauthor = {Delfin, Josafat and Becerra, H\\'{e}ctor M and Arechavaleta, Gustavo },<br \/>\r\ndoi = {10.1109\/HUMANOIDS.2016.7803354},<br \/>\r\nissn = {2164-0580},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-11-15},<br \/>\r\nbooktitle = {IEEE-RAS 16th International Conference on Humanoid Robots},<br \/>\r\npages = {725-731},<br \/>\r\npublisher = {IEEE},<br \/>\r\nabstract = {A visual memory (VM) is a topological map in which a set of key images organized in form of a graph represents an environment. In this paper, a navigation strategy for humanoid robots addressing the problems of localization, visual path planning and path following based on a VM is proposed. Assuming that the VM is given, the main contributions of the paper are: 1) A novel pure vision-based localization method. 2) The introduction of the estimated rotation between key images in the path planning stage to benefit paths with enough visual information and with less effort of robot rotation. 3) The integration of the complete navigation strategy and its experimental evaluation with a Nao robot in an unstructured environment. The humanoid robot is modeled as a holonomic system and the strategy might be used in different scenarios like corridors, uncluttered or cluttered environments.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('3','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_3\" style=\"display:none;\"><div class=\"tp_abstract_entry\">A visual memory (VM) is a topological map in which a set of key images organized in form of a graph represents an environment. In this paper, a navigation strategy for humanoid robots addressing the problems of localization, visual path planning and path following based on a VM is proposed. Assuming that the VM is given, the main contributions of the paper are: 1) A novel pure vision-based localization method. 2) The introduction of the estimated rotation between key images in the path planning stage to benefit paths with enough visual information and with less effort of robot rotation. 3) The integration of the complete navigation strategy and its experimental evaluation with a Nao robot in an unstructured environment. The humanoid robot is modeled as a holonomic system and the strategy might be used in different scenarios like corridors, uncluttered or cluttered environments.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('3','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_3\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/HUMANOIDS.2016.7803354\" title=\"Follow DOI:10.1109\/HUMANOIDS.2016.7803354\" target=\"_blank\">doi:10.1109\/HUMANOIDS.2016.7803354<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('3','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">A Bag of Relevant Regions Model for Place Recognition in Coral Reefs <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">OCEANS 2016, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_9\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('9','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_9\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2016bag,<br \/>\r\ntitle = {A Bag of Relevant Regions Model for Place Recognition in Coral Reefs},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {OCEANS 2016},<br \/>\r\npages = {1--5},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('9','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-M\u00e9ndez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('165','tp_links')\" style=\"cursor:pointer;\">A bag of relevant regions model for visual place recognition in coral reefs<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-5, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_165\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7761188,<br \/>\r\ntitle = {A bag of relevant regions model for visual place recognition in coral reefs},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-M\\'{e}ndez, Luz Abril},<br \/>\r\ndoi = {10.1109\/OCEANS.2016.7761188},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-09-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1-5},<br \/>\r\nabstract = {Vision-based place recognition in underwater environments is a key component for autonomous robotic exploration. However, this task can be very challenging due to the inherent properties of this kind of places such as: color distortion, poor visibility, perceptual aliasing and dynamic illumination. In this paper, we present a method for vision-based place recognition in coral reefs. Our method relies on using the Bag-of-Words (BoW) approach to derive a descriptor, for the whole image, from a set of relevant regions, which are extracted by utilizing a visual attention algorithm. The descriptor for each relevant region is built by using an histogram of the chromatic channels of the CIE-Lab color space. We present results of our method for a place recognition task in real life videos as well as comparisons of our method against other popular techniques. It can be seen that our approach performs better in most of the cases.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_165\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Vision-based place recognition in underwater environments is a key component for autonomous robotic exploration. However, this task can be very challenging due to the inherent properties of this kind of places such as: color distortion, poor visibility, perceptual aliasing and dynamic illumination. In this paper, we present a method for vision-based place recognition in coral reefs. Our method relies on using the Bag-of-Words (BoW) approach to derive a descriptor, for the whole image, from a set of relevant regions, which are extracted by utilizing a visual attention algorithm. The descriptor for each relevant region is built by using an histogram of the chromatic channels of the CIE-Lab color space. We present results of our method for a place recognition task in real life videos as well as comparisons of our method against other popular techniques. It can be seen that our approach performs better in most of the cases.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_165\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/OCEANS.2016.7761188\" title=\"Follow DOI:10.1109\/OCEANS.2016.7761188\" target=\"_blank\">doi:10.1109\/OCEANS.2016.7761188<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A. N.;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('167','tp_links')\" style=\"cursor:pointer;\">A statistical learning approach for underwater color restoration with adaptive training based on visual attention<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-6, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_167\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7761187,<br \/>\r\ntitle = {A statistical learning approach for underwater color restoration with adaptive training based on visual attention},<br \/>\r\nauthor = {Ponce-Hinestroza, A. N. and Torres-Mendez, Luz Abril and Drews, Paulo},<br \/>\r\ndoi = {10.1109\/OCEANS.2016.7761187},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-09-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1-6},<br \/>\r\nabstract = {In most artificial vision systems the quality of acquired images is directly related with the amount of information that can be obtained from them, and, particularly in underwater robotics applications involving monitoring and inspection tasks this is crucial. Statistical learning methods like Markov Random Fields with Belief Propagation (MRF-BP) provide a solution by using existing essential correlations in training sets. However, as in any restoration\/correction method for real applications, it is not possible to have color ground truth available on-line. In this paper, we present a MRF-BP model formulated in the chromatic domain of underwater scenes such that we synthesize the ground truth color to train the model and maximize the capabilities of our method. The generated ground truth introduces some improvements to existing color correction methods and visual attention considerations which also helps to choose a small size training set for the MRF-BP model. Feasibility of our approach is shown from the results in which a good color discrimination is observed even in poor visibility conditions.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_167\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In most artificial vision systems the quality of acquired images is directly related with the amount of information that can be obtained from them, and, particularly in underwater robotics applications involving monitoring and inspection tasks this is crucial. Statistical learning methods like Markov Random Fields with Belief Propagation (MRF-BP) provide a solution by using existing essential correlations in training sets. However, as in any restoration\/correction method for real applications, it is not possible to have color ground truth available on-line. In this paper, we present a MRF-BP model formulated in the chromatic domain of underwater scenes such that we synthesize the ground truth color to train the model and maximize the capabilities of our method. The generated ground truth introduces some improvements to existing color correction methods and visual attention considerations which also helps to choose a small size training set for the MRF-BP model. Feasibility of our approach is shown from the results in which a good color discrimination is observed even in poor visibility conditions.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_167\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/OCEANS.2016.7761187\" title=\"Follow DOI:10.1109\/OCEANS.2016.7761187\" target=\"_blank\">doi:10.1109\/OCEANS.2016.7761187<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A-N;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\">A statistical learning approach for underwater color restoration with adaptive training based on visual attention <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1\u20136, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_11\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('11','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_11\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{ponce2016oceansb,<br \/>\r\ntitle = {A statistical learning approach for underwater color restoration with adaptive training based on visual attention},<br \/>\r\nauthor = {Ponce-Hinestroza, A-N and Torres-Mendez, Luz Abril and Drews, Paulo },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1--6},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('11','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A-N;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\">Using a MRF-BP Model with Color Adaptive Training for Underwater Color Restoration <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">ICPR 2016 IEEE Cancun, <\/span><span class=\"tp_pub_additional_pages\">pp. 1\u20136, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_12\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('12','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_12\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{ponce2016icpr,<br \/>\r\ntitle = {Using a MRF-BP Model with Color Adaptive Training for Underwater Color Restoration},<br \/>\r\nauthor = {Ponce-Hinestroza, A-N and Torres-Mendez, Luz Abril and Drews, Paulo},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {ICPR 2016 IEEE Cancun},<br \/>\r\npages = {1--6},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('12','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_proceedings\">Proceedings<\/h3><div class=\"tp_publication tp_publication_proceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Mirelez-Delgado, Flabio;  Morales-Diaz, America B.;  Rios-Cabrera, Reyes;  Gutierrez-Flores, Hugo<\/p><p class=\"tp_pub_title\">Towards intelligent robotic agents for cooperative tasks <span class=\"tp_pub_type tp_  proceedings\">Proceedings<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_139\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('139','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_139\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@proceedings{Mirelez-Delgado2016,<br \/>\r\ntitle = {Towards intelligent robotic agents for cooperative tasks},<br \/>\r\nauthor = {Mirelez-Delgado, Flabio and Morales-Diaz, America B. and Rios-Cabrera, Reyes and Gutierrez-Flores, Hugo},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {proceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('139','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_proceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Mirelez-Delgado, Flabio;  Morales-Diaz, America B.;  Rios-Cabrera, Reyes<\/p><p class=\"tp_pub_title\">Kinematic control for an omnidirectional mobile manipulator <span class=\"tp_pub_type tp_  proceedings\">Proceedings<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_157\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('157','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_157\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@proceedings{Mirelez-Delgado2016,<br \/>\r\ntitle = {Kinematic control for an omnidirectional mobile manipulator},<br \/>\r\nauthor = {Mirelez-Delgado, Flabio and Morales-Diaz, America B. and Rios-Cabrera, Reyes},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {proceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('157','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2015\">2015<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Luna-Aguilar, C. A.;  Castelan, Mario;  Morales-Diaz, America B.;  Nadeu, C.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('136','tp_links')\" style=\"cursor:pointer;\">Incorporaci\u00f3n de sensores ac\u00fasticos en el control de regulaci\u00f3n a un punto de un robot m\u00f3vil<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_pages\">pp. 582-587, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_136\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('136','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_136\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('136','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_136\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Luna-Aguilar2015,<br \/>\r\ntitle = {Incorporaci\\'{o}n de sensores ac\\'{u}sticos en el control de regulaci\\'{o}n a un punto de un robot m\\'{o}vil},<br \/>\r\nauthor = {Luna-Aguilar, C. A. and Castelan, Mario and Morales-Diaz, America B. and Nadeu, C.},<br \/>\r\nurl = {https:\/\/upcommons.upc.edu\/handle\/2117\/102668},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-06-06},<br \/>\r\npages = {582-587},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('136','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_136\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/upcommons.upc.edu\/handle\/2117\/102668\" title=\"https:\/\/upcommons.upc.edu\/handle\/2117\/102668\" target=\"_blank\">https:\/\/upcommons.upc.edu\/handle\/2117\/102668<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('136','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Aviles-Vi\u00f1as, Jaime F;  Lopez-Juarez, Ismael;  Rios-Cabrera, Reyes<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('13','tp_links')\" style=\"cursor:pointer;\">Acquisition of welding skills in industrial robots<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Industrial Robot: An International Journal, <\/span><span class=\"tp_pub_additional_volume\">vol. 42, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 156-166, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_13\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('13','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_13\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('13','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_13\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('13','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_13\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{doi:10.1108\/IR-09-2014-0395,<br \/>\r\ntitle = {Acquisition of welding skills in industrial robots},<br \/>\r\nauthor = {Aviles-Vi\\~{n}as, Jaime F and Lopez-Juarez, Ismael and Rios-Cabrera, Reyes },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395},<br \/>\r\ndoi = {10.1108\/IR-09-2014-0395},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Industrial Robot: An International Journal},<br \/>\r\nvolume = {42},<br \/>\r\nnumber = {2},<br \/>\r\npages = {156-166},<br \/>\r\nabstract = {Purpose \\textendash The purpose of this paper was to propose a method based on an Artificial Neural Network and a real-time vision algorithm, to learn welding skills in industrial robotics. Design\/methodology\/approach \\textendash By using an optic camera to measure the bead geometry (width and height), the authors propose a real-time computer vision algorithm to extract training patterns and to enable an industrial robot to acquire and learn autonomously the welding skill. To test the approach, an industrial KUKA robot and a welding gas metal arc welding machine were used in a manufacturing cell. Findings \\textendash Several data analyses are described, showing empirically that industrial robots can acquire the skill even if the specific welding parameters are unknown. Research limitations\/implications \\textendash The approach considers only stringer beads. Weave bead and bead penetration are not considered. Practical implications \\textendash With the proposed approach, it is possible to learn specific welding parameters despite of the material, type of robot or welding machine. This is due to the fact that the feedback system produces automatic measurements that are labelled prior to the learning process. Originality\/value \\textendash The main contribution is that the complex learning process is reduced into an input-process-output system, where the process part is learnt automatically without human supervision, by registering the patterns with an automatically calibrated vision system.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('13','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_13\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Purpose \u2013 The purpose of this paper was to propose a method based on an Artificial Neural Network and a real-time vision algorithm, to learn welding skills in industrial robotics. Design\/methodology\/approach \u2013 By using an optic camera to measure the bead geometry (width and height), the authors propose a real-time computer vision algorithm to extract training patterns and to enable an industrial robot to acquire and learn autonomously the welding skill. To test the approach, an industrial KUKA robot and a welding gas metal arc welding machine were used in a manufacturing cell. Findings \u2013 Several data analyses are described, showing empirically that industrial robots can acquire the skill even if the specific welding parameters are unknown. Research limitations\/implications \u2013 The approach considers only stringer beads. Weave bead and bead penetration are not considered. Practical implications \u2013 With the proposed approach, it is possible to learn specific welding parameters despite of the material, type of robot or welding machine. This is due to the fact that the feedback system produces automatic measurements that are labelled prior to the learning process. Originality\/value \u2013 The main contribution is that the complex learning process is reduced into an input-process-output system, where the process part is learnt automatically without human supervision, by registering the patterns with an automatically calibrated vision system.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('13','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_13\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395\" title=\"http:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395\" target=\"_blank\">http:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395\" title=\"Follow DOI:10.1108\/IR-09-2014-0395\" target=\"_blank\">doi:10.1108\/IR-09-2014-0395<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('13','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Navarro-Gonzalez, Jose Luis;  Lopez-Juarez, Ismael;  Ordaz-Hernandez, Keny;  Rios-Cabrera, Reyes<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('14','tp_links')\" style=\"cursor:pointer;\">On-line incremental learning for unknown conditions during assembly operations with industrial robots<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Evolving Systems, <\/span><span class=\"tp_pub_additional_volume\">vol. 6, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 101\u2013114, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1868-6486<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_14\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('14','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_14\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('14','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_14\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('14','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_14\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Navarro-Gonzalez2015,<br \/>\r\ntitle = {On-line incremental learning for unknown conditions during assembly operations with industrial robots},<br \/>\r\nauthor = {Navarro-Gonzalez, Jose Luis and Lopez-Juarez, Ismael and Ordaz-Hernandez, Keny and Rios-Cabrera, Reyes },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x},<br \/>\r\ndoi = {10.1007\/s12530-014-9125-x},<br \/>\r\nissn = {1868-6486},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Evolving Systems},<br \/>\r\nvolume = {6},<br \/>\r\nnumber = {2},<br \/>\r\npages = {101--114},<br \/>\r\nabstract = {The assembly operation using industrial robots can be accomplished successfully in well-structured environments where the mating pair location is known in advance. However, in real-world scenarios there are uncertainties associated to sensing, control and modelling errors that make the assembly task very complex. In addition, there are also unmodeled uncertainties that have to be taken into account for an effective control algorithm to succeed. Among these uncertainties, it can be mentioned disturbances, backlash and aging of mechanisms. In this paper, a method to overcome the effect of those uncertainties based on the Fuzzy ARTMAP artificial neural network (ANN) to successfully accomplish the assembly task is proposed. Experimental work is reported using an industrial 6 DOF robot arm in conjunction with a vision system for part location and wrist force\/torque sensing data for assembly. Force data is fed into an ANN evolving controller during a typical peg in hole (PIH) assembly operation. The controller uses an incremental learning mechanism that is solely guided by the sensed forces. In this article, two approaches are presented in order to compare the incremental learning capability of the manipulator. The first approach uses a primitive knowledge base (PKB) containing 16 primitive movements to learn online the first insertion. During assembly, the manipulator learns new patterns according to the learning criteria which turn the PKB into an enhanced knowledge base (EKB). During a second insertion the controller uses effectively the EKB and operation improves. The second approach employs minimum information (it contains only the assembly direction) and the process starts from scratch. After several operations, that knowledge base increases by including only the needed patterns to perform the insertion. Experimental results showed that the evolving controller is able to assemble the matting pairs enhancing its knowledge whenever it is needed depending on the part geometry and level of expertise. Our approach is demonstrated through several PIH operations with different tolerances and part geometry. As the robot's expertise evolves, the PIH operation is carried out faster with shorter assembly trajectories.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('14','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_14\" style=\"display:none;\"><div class=\"tp_abstract_entry\">The assembly operation using industrial robots can be accomplished successfully in well-structured environments where the mating pair location is known in advance. However, in real-world scenarios there are uncertainties associated to sensing, control and modelling errors that make the assembly task very complex. In addition, there are also unmodeled uncertainties that have to be taken into account for an effective control algorithm to succeed. Among these uncertainties, it can be mentioned disturbances, backlash and aging of mechanisms. In this paper, a method to overcome the effect of those uncertainties based on the Fuzzy ARTMAP artificial neural network (ANN) to successfully accomplish the assembly task is proposed. Experimental work is reported using an industrial 6 DOF robot arm in conjunction with a vision system for part location and wrist force\/torque sensing data for assembly. Force data is fed into an ANN evolving controller during a typical peg in hole (PIH) assembly operation. The controller uses an incremental learning mechanism that is solely guided by the sensed forces. In this article, two approaches are presented in order to compare the incremental learning capability of the manipulator. The first approach uses a primitive knowledge base (PKB) containing 16 primitive movements to learn online the first insertion. During assembly, the manipulator learns new patterns according to the learning criteria which turn the PKB into an enhanced knowledge base (EKB). During a second insertion the controller uses effectively the EKB and operation improves. The second approach employs minimum information (it contains only the assembly direction) and the process starts from scratch. After several operations, that knowledge base increases by including only the needed patterns to perform the insertion. Experimental results showed that the evolving controller is able to assemble the matting pairs enhancing its knowledge whenever it is needed depending on the part geometry and level of expertise. Our approach is demonstrated through several PIH operations with different tolerances and part geometry. As the robot's expertise evolves, the PIH operation is carried out faster with shorter assembly trajectories.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('14','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_14\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x\" title=\"http:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x\" title=\"Follow DOI:10.1007\/s12530-014-9125-x\" target=\"_blank\">doi:10.1007\/s12530-014-9125-x<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('14','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Navarro-Gonzalez, Jose Luis;  Lopez-Juarez, Ismael;  Rios-Cabrera, Reyes;  Ordaz-Hernandez, Keny<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('15','tp_links')\" style=\"cursor:pointer;\">On-line knowledge acquisition and enhancement in robotic assembly tasks<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Robotics and Computer-Integrated Manufacturing, <\/span><span class=\"tp_pub_additional_volume\">vol. 33, <\/span><span class=\"tp_pub_additional_pages\">pp. 78 - 89, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 0736-5845<\/span><span class=\"tp_pub_additional_note\">, (Special Issue on Knowledge Driven Robotics and Manufacturing)<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_15\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('15','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_15\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('15','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_15\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('15','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_15\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{NavarroGonzalez201578b,<br \/>\r\ntitle = {On-line knowledge acquisition and enhancement in robotic assembly tasks},<br \/>\r\nauthor = {Navarro-Gonzalez, Jose Luis and Lopez-Juarez, Ismael and Rios-Cabrera, Reyes and Ordaz-Hernandez, Keny},<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S073658451400074X},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.rcim.2014.08.013},<br \/>\r\nissn = {0736-5845},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Robotics and Computer-Integrated Manufacturing},<br \/>\r\nvolume = {33},<br \/>\r\npages = {78 - 89},<br \/>\r\nabstract = {Abstract Industrial robots are reliable machines for manufacturing tasks such as welding, panting, assembly, palletizing or kitting operations. They are traditionally programmed by an operator using a teach pendant in a point-to-point scheme with limited sensing capabilities such as industrial vision systems and force\/torque sensing. The use of these sensing capabilities is associated to the particular robot controller, operative systems and programming language. Today, robots can react to environment changes specific to their task domain but are still unable to learn skills to effectively use their current knowledge. The need for such a skill in unstructured environments where knowledge can be acquired and enhanced is desirable so that robots can effectively interact in multimodal real-world scenarios. In this article we present a multimodal assembly controller (MAC) approach to embed and effectively enhance knowledge into industrial robots working in multimodal manufacturing scenarios such as assembly during kitting operations with varying shapes and tolerances. During learning, the robot uses its vision and force capabilities resembling a human operator carrying out the same operation. The approach consists of using a MAC based on the Fuzzy ARTMAP artificial neural network in conjunction with a knowledge base. The robot starts the operation having limited initial knowledge about what task it has to accomplish. During the operation, the robot learns the skill for recognising assembly parts and how to assemble them. The skill acquisition is evaluated by counting the steps to complete the assembly, length of the followed assembly path and compliant behaviour. The performance improves with time so that the robot becomes an expert demonstrated by the assembly of a kit with different part geometries. The kit is unknown by the robot at the beginning of the operation; therefore, the kit type, location and orientation are unknown as well as the parts to be assembled since they are randomly fed by a conveyor belt.},<br \/>\r\nnote = {Special Issue on Knowledge Driven Robotics and Manufacturing},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('15','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_15\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Abstract Industrial robots are reliable machines for manufacturing tasks such as welding, panting, assembly, palletizing or kitting operations. They are traditionally programmed by an operator using a teach pendant in a point-to-point scheme with limited sensing capabilities such as industrial vision systems and force\/torque sensing. The use of these sensing capabilities is associated to the particular robot controller, operative systems and programming language. Today, robots can react to environment changes specific to their task domain but are still unable to learn skills to effectively use their current knowledge. The need for such a skill in unstructured environments where knowledge can be acquired and enhanced is desirable so that robots can effectively interact in multimodal real-world scenarios. In this article we present a multimodal assembly controller (MAC) approach to embed and effectively enhance knowledge into industrial robots working in multimodal manufacturing scenarios such as assembly during kitting operations with varying shapes and tolerances. During learning, the robot uses its vision and force capabilities resembling a human operator carrying out the same operation. The approach consists of using a MAC based on the Fuzzy ARTMAP artificial neural network in conjunction with a knowledge base. The robot starts the operation having limited initial knowledge about what task it has to accomplish. During the operation, the robot learns the skill for recognising assembly parts and how to assemble them. The skill acquisition is evaluated by counting the steps to complete the assembly, length of the followed assembly path and compliant behaviour. The performance improves with time so that the robot becomes an expert demonstrated by the assembly of a kit with different part geometries. The kit is unknown by the robot at the beginning of the operation; therefore, the kit type, location and orientation are unknown as well as the parts to be assembled since they are randomly fed by a conveyor belt.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('15','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_15\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S073658451400074X\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S073658451400074X\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S073658451400074X<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.rcim.2014.08.013\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/j.rcim.2014.08.013\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.rcim.2014.08.013<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('15','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Castelan, Mario;  Cruz-Perez, Elier;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('16','tp_links')\" style=\"cursor:pointer;\">A Photometric Sampling Strategy for Reflectance Characterization and Transference<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Computaci\u00f3n y Sistemas, <\/span><span class=\"tp_pub_additional_volume\">vol. 19, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 255-272, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_16\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('16','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_16\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('16','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_16\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Castelan2015,<br \/>\r\ntitle = {A Photometric Sampling Strategy for Reflectance Characterization and Transference},<br \/>\r\nauthor = {Castelan, Mario and Cruz-Perez, Elier and Torres-Mendez, Luz Abril},<br \/>\r\nurl = {http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Computaci\\'{o}n y Sistemas},<br \/>\r\nvolume = {19},<br \/>\r\nnumber = {2},<br \/>\r\npages = {255-272},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('16','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_16\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944\" title=\"http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944\" target=\"_blank\">http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('16','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferences<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Luna-Aguilar, C. A.;  Castelan, Mario;  Morales-Diaz, America B.;  Nadeu, C.<\/p><p class=\"tp_pub_title\">Incorporaci\u00f3n de sensores ac\u00fasticos en el control de regulaci\u00f3n a un punto de un robot m\u00f3vil <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_137\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('137','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_137\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{Luna-Aguilar2015b,<br \/>\r\ntitle = {Incorporaci\\'{o}n de sensores ac\\'{u}sticos en el control de regulaci\\'{o}n a un punto de un robot m\\'{o}vil},<br \/>\r\nauthor = {Luna-Aguilar, C. A. and Castelan, Mario and Morales-Diaz, America B. and Nadeu, C.},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('137','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Rodriguez-Telles, Francisco G<\/p><p class=\"tp_pub_title\">Ethologically inspired reactive exploration of coral reefs with collision avoidance: Bridging the gap between human and robot spatial understanding of unstructured environments <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">Intelligent Robots and Systems (IROS), 2015 IEEE\/RSJ International Conference on, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_17\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('17','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_17\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2015ethologically,<br \/>\r\ntitle = {Ethologically inspired reactive exploration of coral reefs with collision avoidance: Bridging the gap between human and robot spatial understanding of unstructured environments},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Rodriguez-Telles, Francisco G},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nbooktitle = {Intelligent Robots and Systems (IROS), 2015 IEEE\/RSJ International Conference on},<br \/>\r\npages = {4872--4879},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('17','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">Autonomous robotic exploration of coral reefs using a visual attention-driven strategy for detecting and tracking regions of interest <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">OCEANS 2015-Genova, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_18\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('18','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_18\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2015autonomous,<br \/>\r\ntitle = {Autonomous robotic exploration of coral reefs using a visual attention-driven strategy for detecting and tracking regions of interest},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril <br \/>\r\n},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nbooktitle = {OCEANS 2015-Genova},<br \/>\r\npages = {1--5},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('18','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Labastida-Vald\u00e9s, L.;  Torres-Mendez, Luz Abril;  Hutchinson, S. A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('106','tp_links')\" style=\"cursor:pointer;\">Using the motion perceptibility measure to classify points of interest for visual-based AUV guidance in a reef ecosystem<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2015 - MTS\/IEEE Washington, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-6, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_106\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('106','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_106\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('106','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_106\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7404605,<br \/>\r\ntitle = {Using the motion perceptibility measure to classify points of interest for visual-based AUV guidance in a reef ecosystem},<br \/>\r\nauthor = {Labastida-Vald\\'{e}s, L. and Torres-Mendez, Luz Abril and Hutchinson, S. A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7404605\/},<br \/>\r\ndoi = {10.23919\/OCEANS.2015.7404605},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-10-01},<br \/>\r\nbooktitle = {OCEANS 2015 - MTS\/IEEE Washington},<br \/>\r\npages = {1-6},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('106','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_106\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7404605\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7404605\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7404605\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.23919\/OCEANS.2015.7404605\" title=\"Follow DOI:10.23919\/OCEANS.2015.7404605\" target=\"_blank\">doi:10.23919\/OCEANS.2015.7404605<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('106','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Romero-Mart\u00ednez, C. E.;  Torres-Mendez, Luz Abril;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('108','tp_links')\" style=\"cursor:pointer;\">Modeling motor-perceptual behaviors to enable intuitive paths in an aquatic robot<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2015 - MTS\/IEEE Washington, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-5, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_108\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('108','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_108\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('108','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_108\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7404424,<br \/>\r\ntitle = {Modeling motor-perceptual behaviors to enable intuitive paths in an aquatic robot},<br \/>\r\nauthor = {Romero-Mart\\'{i}nez, C. E. and Torres-Mendez, Luz Abril and Martinez-Garcia, Edgar A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7404424\/},<br \/>\r\ndoi = {10.23919\/OCEANS.2015.7404424},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-10-01},<br \/>\r\nbooktitle = {OCEANS 2015 - MTS\/IEEE Washington},<br \/>\r\npages = {1-5},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('108','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_108\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7404424\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7404424\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7404424\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.23919\/OCEANS.2015.7404424\" title=\"Follow DOI:10.23919\/OCEANS.2015.7404424\" target=\"_blank\">doi:10.23919\/OCEANS.2015.7404424<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('108','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Mirelez-Delgado, Flabio;  Morales-Diaz, America B.;  Rios-Cabrera, Reyes;  Perez-Villeda, Hector Manuel<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('105','tp_links')\" style=\"cursor:pointer;\">Control Servovisual de un Kuka youBot para la manipulacion y traslado de objetos<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_105\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('105','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_105\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('105','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_105\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('105','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_105\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Mireles-Delgado2015,<br \/>\r\ntitle = {Control Servovisual de un Kuka youBot para la manipulacion y traslado de objetos},<br \/>\r\nauthor = {Mirelez-Delgado, Flabio and Morales-Diaz, America B. and Rios-Cabrera, Reyes and Perez-Villeda, Hector Manuel},<br \/>\r\nurl = {http:\/\/amca.mx\/memorias\/amca2015\/articulos\/0044_MiCT3-04.pdf},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nabstract = {Este trabajo presenta la implementaci\u00b4on de un Control Servovisual Basado en<br \/>\r\nImagen en un robot manipulador m\u00b4ovil omnidireccional Kuka youBot. El sistema de visi\u00b4on<br \/>\r\nest\u00b4a compuesto por un sensor RGB-D Asus Xtion Pror. La ley de control implementada tiene<br \/>\r\nla estructura de un PD cl\u00b4asico para la plataforma m\u00b4ovil. El manipulador m\u00b4ovil se desplaza a<br \/>\r\npuntos 3D conocidos mediante el c\u00b4alculo de cinem\u00b4atica inversa. En este art\u00b4\u0131culo se demuestra<br \/>\r\nla efectividad del algoritmo en la localizaci\u00b4on del objeto de inter\u00b4es as\u00b4\u0131 como en la manipulaci\u00b4on<br \/>\r\ndel mismo para llevarlo de su lugar original a otro espacio deseado.<br \/>\r\n},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('105','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_105\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Este trabajo presenta la implementaci\u00b4on de un Control Servovisual Basado en<br \/>\r\nImagen en un robot manipulador m\u00b4ovil omnidireccional Kuka youBot. El sistema de visi\u00b4on<br \/>\r\nest\u00b4a compuesto por un sensor RGB-D Asus Xtion Pror. La ley de control implementada tiene<br \/>\r\nla estructura de un PD cl\u00b4asico para la plataforma m\u00b4ovil. El manipulador m\u00b4ovil se desplaza a<br \/>\r\npuntos 3D conocidos mediante el c\u00b4alculo de cinem\u00b4atica inversa. En este art\u00b4\u0131culo se demuestra<br \/>\r\nla efectividad del algoritmo en la localizaci\u00b4on del objeto de inter\u00b4es as\u00b4\u0131 como en la manipulaci\u00b4on<br \/>\r\ndel mismo para llevarlo de su lugar original a otro espacio deseado.<br \/>\r\n<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('105','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_105\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-file-pdf\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/amca.mx\/memorias\/amca2015\/articulos\/0044_MiCT3-04.pdf\" title=\"http:\/\/amca.mx\/memorias\/amca2015\/articulos\/0044_MiCT3-04.pdf\" target=\"_blank\">http:\/\/amca.mx\/memorias\/amca2015\/articulos\/0044_MiCT3-04.pdf<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('105','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">using supercolor-pixels descriptors for tracking relevant cues in underwater environments with poor visibility conditions <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_publisher\">ICRA 2015 Workshop on Visual Place Recognition in Changing Environmen ts, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_107\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('107','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_107\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Maldonao-Ramirez2015,<br \/>\r\ntitle = {using supercolor-pixels descriptors for tracking relevant cues in underwater environments with poor visibility conditions},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-00-00},<br \/>\r\npublisher = {ICRA 2015 Workshop on Visual Place Recognition in Changing Environmen ts},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('107','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Gonz\u00e1lez-Garc\u00eda, Luis C.;  Torres-Mendez, Luz Abril;  Mart\u00ednez, Julieta;  Sattar, Junaed;  Little, James<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('109','tp_links')\" style=\"cursor:pointer;\">Are You Talking to Me? Detecting Attention in First-Person Interactions<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_pages\">pp.  137-142, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 2308-4197<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_109\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Gonz\\'{a}lez-Garc\\'{i}a2015,<br \/>\r\ntitle = {Are You Talking to Me? Detecting Attention in First-Person Interactions},<br \/>\r\nauthor = {Gonz\\'{a}lez-Garc\\'{i}a, Luis C. and Torres-Mendez, Luz Abril and Mart\\'{i}nez, Julieta and Sattar, Junaed and Little, James},<br \/>\r\nurl = {https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions},<br \/>\r\nissn = {2308-4197},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-00-00},<br \/>\r\npages = { 137-142},<br \/>\r\nabstract = {This paper presents an approach for a mobile robot to detect the level of attention of a human in first-person interactions. Determining the degree of attention is an essential task in day-today interactions. In particular, we are interested in natural Human-Robot Interactions (HRI's) during which a robot needs to estimate the focus and the degree of the user's attention to determine the most appropriate moment to initiate, continue and terminate an interaction. Our approach is novel in that it uses a linear regression technique to classify raw depth-image data according to three levels of user attention on the robot (null, partial and total). This is achieved by measuring the linear independence of the input range data with respect to a dataset of user poses. We overcome the problem of time overhead that a large database can add to real-time Linear Regression Classification (LRC) methods by including only the feature vectors with the most relevant information. We demonstrate the approach by presenting experimental data from human-interaction studies with a PR2 robot. Results demonstrate our attention classifier to be accurate and robust in detecting the attention levels of human participants. I. INTRODUCTION Determining the attention of people is an essential component of day-today interactions. We are constantly monitoring other people's gaze, head and body poses while engaged in a conversation [1][2][3]. We also perform attention estimation in order to perform natural interactions [4][5]. In short, attention estimation is a fundamental component of effective social interaction; therefore, for robots to be efficient social agents it is necessary to provide them with reliable mechanisms to estimate human attention. We believe that human attention estimation, particularly in the context of interactions, is highly subjective. However, attempts to model it have been relatively successful, e.g., allowing a robot to ask for directions when it finds a human, as in the work of Weiss et al. [6]. Nonetheless, the state-of-the-art is still far from reaching a point where a robot can successfully interact with humans without relying on mechanisms not common to natural language. Recently, the use of range images to make more natural human-machine interfaces has been in the agenda of researchers, like in the case of the Microsoft Kinect TM , which delivers a skeleton of <br \/>\r\n<br \/>\r\nAre You Talking to Me? Detecting Attention in First-Person Interactions (PDF Download Available). Available from: https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions [accessed Jun 17, 2017].},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_109\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper presents an approach for a mobile robot to detect the level of attention of a human in first-person interactions. Determining the degree of attention is an essential task in day-today interactions. In particular, we are interested in natural Human-Robot Interactions (HRI's) during which a robot needs to estimate the focus and the degree of the user's attention to determine the most appropriate moment to initiate, continue and terminate an interaction. Our approach is novel in that it uses a linear regression technique to classify raw depth-image data according to three levels of user attention on the robot (null, partial and total). This is achieved by measuring the linear independence of the input range data with respect to a dataset of user poses. We overcome the problem of time overhead that a large database can add to real-time Linear Regression Classification (LRC) methods by including only the feature vectors with the most relevant information. We demonstrate the approach by presenting experimental data from human-interaction studies with a PR2 robot. Results demonstrate our attention classifier to be accurate and robust in detecting the attention levels of human participants. I. INTRODUCTION Determining the attention of people is an essential component of day-today interactions. We are constantly monitoring other people's gaze, head and body poses while engaged in a conversation [1][2][3]. We also perform attention estimation in order to perform natural interactions [4][5]. In short, attention estimation is a fundamental component of effective social interaction; therefore, for robots to be efficient social agents it is necessary to provide them with reliable mechanisms to estimate human attention. We believe that human attention estimation, particularly in the context of interactions, is highly subjective. However, attempts to model it have been relatively successful, e.g., allowing a robot to ask for directions when it finds a human, as in the work of Weiss et al. [6]. Nonetheless, the state-of-the-art is still far from reaching a point where a robot can successfully interact with humans without relying on mechanisms not common to natural language. Recently, the use of range images to make more natural human-machine interfaces has been in the agenda of researchers, like in the case of the Microsoft Kinect TM , which delivers a skeleton of <br \/>\r\n<br \/>\r\nAre You Talking to Me? Detecting Attention in First-Person Interactions (PDF Download Available). Available from: https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions [accessed Jun 17, 2017].<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_109\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions\" title=\"https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecti[...]\" target=\"_blank\">https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecti[...]<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2014\">2014<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Martinez-Garcia, Edgar A.;  Torres-Mendez, Luz Abril;  Elara Mohan, Rajesh<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('20','tp_links')\" style=\"cursor:pointer;\">Multi-legged robot dynamics navigation model with optical flow<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Intelligent Unmanned Systems, <\/span><span class=\"tp_pub_additional_volume\">vol. 2, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 121-139, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_20\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{doi:10.1108\/IJIUS-04-2014-0003,<br \/>\r\ntitle = {Multi-legged robot dynamics navigation model with optical flow},<br \/>\r\nauthor = {Martinez-Garcia, Edgar A. and Torres-Mendez, Luz Abril and Elara Mohan, Rajesh },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003},<br \/>\r\ndoi = {10.1108\/IJIUS-04-2014-0003},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\njournal = {International Journal of Intelligent Unmanned Systems},<br \/>\r\nvolume = {2},<br \/>\r\nnumber = {2},<br \/>\r\npages = {121-139},<br \/>\r\nabstract = {Purpose \\textendash The purpose of this paper is to establish analytical and numerical solutions of a navigational law to estimate displacements of hyper-static multi-legged mobile robots, which combines: monocular vision (optical flow of regional invariants) and legs dynamics. Design\/methodology\/approach \\textendash In this study the authors propose a Euler-Lagrange equation that control legs\u2019 joints to control robot's displacements. Robot's rotation and translational velocities are feedback by motion features of visual invariant descriptors. A general analytical solution of a derivative navigation law is proposed for hyper-static robots. The feedback is formulated with the local speed rate obtained from optical flow of visual regional invariants. The proposed formulation includes a data association algorithm aimed to correlate visual invariant descriptors detected in sequential images through monocular vision. The navigation law is constrained by a set of three kinematic equilibrium conditions for navigational scenarios: constant acceleration, constant velocity, and instantaneous acceleration. Findings \\textendash The proposed data association method concerns local motions of multiple invariants (enhanced MSER) by minimizing the norm of multidimensional optical flow feature vectors. Kinematic measurements are used as observable arguments in the general dynamic control equation; while the legs joints dynamics model is used to formulate the controllable arguments. Originality\/value \\textendash The given analysis does not combine sensor data of any kind, but only monocular passive vision. The approach automatically detects environmental invariant descriptors with an enhanced version of the MSER method. Only optical flow vectors and robot's multi-leg dynamics are used to formulate descriptive rotational and translational motions for self-positioning.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_20\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Purpose \u2013 The purpose of this paper is to establish analytical and numerical solutions of a navigational law to estimate displacements of hyper-static multi-legged mobile robots, which combines: monocular vision (optical flow of regional invariants) and legs dynamics. Design\/methodology\/approach \u2013 In this study the authors propose a Euler-Lagrange equation that control legs\u2019 joints to control robot's displacements. Robot's rotation and translational velocities are feedback by motion features of visual invariant descriptors. A general analytical solution of a derivative navigation law is proposed for hyper-static robots. The feedback is formulated with the local speed rate obtained from optical flow of visual regional invariants. The proposed formulation includes a data association algorithm aimed to correlate visual invariant descriptors detected in sequential images through monocular vision. The navigation law is constrained by a set of three kinematic equilibrium conditions for navigational scenarios: constant acceleration, constant velocity, and instantaneous acceleration. Findings \u2013 The proposed data association method concerns local motions of multiple invariants (enhanced MSER) by minimizing the norm of multidimensional optical flow feature vectors. Kinematic measurements are used as observable arguments in the general dynamic control equation; while the legs joints dynamics model is used to formulate the controllable arguments. Originality\/value \u2013 The given analysis does not combine sensor data of any kind, but only monocular passive vision. The approach automatically detects environmental invariant descriptors with an enhanced version of the MSER method. Only optical flow vectors and robot's multi-leg dynamics are used to formulate descriptive rotational and translational motions for self-positioning.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_20\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" title=\"http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" target=\"_blank\">http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" title=\"Follow DOI:10.1108\/IJIUS-04-2014-0003\" target=\"_blank\">doi:10.1108\/IJIUS-04-2014-0003<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rios-Cabrera, Reyes;  Tuytelaars, Tinne<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('64','tp_links')\" style=\"cursor:pointer;\">Boosting Masked Dominant Orientation Templates for Efficient Object Detection<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Comput. Vis. Image Underst., <\/span><span class=\"tp_pub_additional_volume\">vol. 120, <\/span><span class=\"tp_pub_additional_pages\">pp. 103\u2013116, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1077-3142<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_64\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('64','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_64\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('64','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_64\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Rios-Cabrera:2014:BMD:2583127.2583285,<br \/>\r\ntitle = {Boosting Masked Dominant Orientation Templates for Efficient Object Detection},<br \/>\r\nauthor = {Rios-Cabrera, Reyes and Tuytelaars, Tinne},<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008},<br \/>\r\ndoi = {10.1016\/j.cviu.2013.12.008},<br \/>\r\nissn = {1077-3142},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\njournal = {Comput. Vis. Image Underst.},<br \/>\r\nvolume = {120},<br \/>\r\npages = {103--116},<br \/>\r\npublisher = {Elsevier Science Inc.},<br \/>\r\naddress = {New York, NY, USA},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('64','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_64\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008\" title=\"http:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008\" target=\"_blank\">http:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008\" title=\"Follow DOI:10.1016\/j.cviu.2013.12.008\" target=\"_blank\">doi:10.1016\/j.cviu.2013.12.008<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('64','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferences<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Martinez-Gonzalez, Pablo;  Varas, David;  Castelan, Mario;  Camacho, Margarita;  Marques, Ferran;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('19','tp_links')\" style=\"cursor:pointer;\">3D shape reconstruction from a humanoid generated video sequence<\/a> <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2014 IEEE-RAS International Conference on Humanoid Robots, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 2164-0572<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_19\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('19','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_19\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('19','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_19\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('19','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_19\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{7041439,<br \/>\r\ntitle = {3D shape reconstruction from a humanoid generated video sequence},<br \/>\r\nauthor = {Martinez-Gonzalez, Pablo and Varas, David and Castelan, Mario and Camacho, Margarita and Marques, Ferran and Arechavaleta, Gustavo },<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7041439\/?arnumber=7041439\\&amp;tag=1},<br \/>\r\ndoi = {10.1109\/HUMANOIDS.2014.7041439},<br \/>\r\nissn = {2164-0572},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-11-01},<br \/>\r\nbooktitle = {2014 IEEE-RAS International Conference on Humanoid Robots},<br \/>\r\npages = {699-706},<br \/>\r\nabstract = {This paper presents a strategy for estimating the geometry of an interest object from a monocular video sequence acquired by a walking humanoid robot. The problem is solved using a space carving algorithm, which relies on both the accurate extraction of the occluding boundaries of the object as well as the precise estimation of the camera pose for each video frame. For data acquisition, a monocular visual-based control has been developed that drives the trajectory of the robot around an object placed on a small table. Due to the stepping of the humanoid, the recorded sequence is contaminated with artefacts that affect the correct extraction of contours along the video frames. To overcome this issue, a method that assigns a fitness score for each frame is proposed, delivering a subset of camera poses and video frames that produce consistent 3D shape estimations of the objects used for experimental evaluation.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('19','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_19\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper presents a strategy for estimating the geometry of an interest object from a monocular video sequence acquired by a walking humanoid robot. The problem is solved using a space carving algorithm, which relies on both the accurate extraction of the occluding boundaries of the object as well as the precise estimation of the camera pose for each video frame. For data acquisition, a monocular visual-based control has been developed that drives the trajectory of the robot around an object placed on a small table. Due to the stepping of the humanoid, the recorded sequence is contaminated with artefacts that affect the correct extraction of contours along the video frames. To overcome this issue, a method that assigns a fitness score for each frame is proposed, delivering a subset of camera poses and video frames that produce consistent 3D shape estimations of the objects used for experimental evaluation.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('19','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_19\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7041439\/?arnumber=7041439&amp;amp;tag=1\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7041439\/?arnumber=7041439&amp;amp;tag=1\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7041439\/?arnumber=7041439&amp;amp;tag=1<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/HUMANOIDS.2014.7041439\" title=\"Follow DOI:10.1109\/HUMANOIDS.2014.7041439\" target=\"_blank\">doi:10.1109\/HUMANOIDS.2014.7041439<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('19','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rodriguez-Telles, Francisco G;  Perez-Alcocer, Ricardo;  Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Bikram Dey, Bir;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\">Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2014 IEEE International Conference on Robotics and Automation (ICRA), <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_21\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('21','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_21\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{rodriguez2014vision,<br \/>\r\ntitle = {Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat},<br \/>\r\nauthor = {Rodriguez-Telles, Francisco G and Perez-Alcocer, Ricardo and Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Bikram Dey, Bir and Martinez-Garcia, Edgar A.},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\nbooktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA)},<br \/>\r\npages = {3813--3818},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('21','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\">Robust detection and tracking of regions of interest for autonomous underwater robotic exploration <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_22\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('22','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_22\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2014robust,<br \/>\r\ntitle = {Robust detection and tracking of regions of interest for autonomous underwater robotic exploration},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Martinez-Garcia, Edgar A.},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\nbooktitle = {Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications},<br \/>\r\npages = {165--171},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('22','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rodr\u00edguez-Teiles, F. G.;  Perez-Alcocer, Ricardo;  Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Dey, B. B.;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('68','tp_links')\" style=\"cursor:pointer;\">Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">2014 IEEE International Conference on Robotics and Automation (ICRA), <\/span><span class=\"tp_pub_additional_pages\">pp. 3813-3818, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1050-4729<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_68\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('68','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_68\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('68','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_68\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{6907412,<br \/>\r\ntitle = {Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat},<br \/>\r\nauthor = {Rodr\\'{i}guez-Teiles, F. G. and Perez-Alcocer, Ricardo and Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Dey, B. B. and Martinez-Garcia, Edgar A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/6907412\/},<br \/>\r\ndoi = {10.1109\/ICRA.2014.6907412},<br \/>\r\nissn = {1050-4729},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-05-01},<br \/>\r\nbooktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA)},<br \/>\r\npages = {3813-3818},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('68','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_68\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/6907412\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/6907412\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/6907412\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/ICRA.2014.6907412\" title=\"Follow DOI:10.1109\/ICRA.2014.6907412\" target=\"_blank\">doi:10.1109\/ICRA.2014.6907412<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('68','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Estopier-Castillo, Vicente;  Arechavaleta, Gustavo;  Olgu\u00edn-D\u00edaz, Ernesto<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('78','tp_links')\" style=\"cursor:pointer;\">Generacion de Movimientos Humanoides con Dinamica Inversa Jerarquica<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">Generacion de Movimientos Humanoides con Dinamica Inversa Jerarquica, <\/span><span class=\"tp_pub_additional_publisher\">Congreso Latinoamericano de Control Autom\u00e1tico CLCA 2014, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_78\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('78','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_78\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('78','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_78\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Castillo2014,<br \/>\r\ntitle = {Generacion de Movimientos Humanoides con Dinamica Inversa Jerarquica},<br \/>\r\nauthor = {Estopier-Castillo, Vicente and Arechavaleta, Gustavo and Olgu\\'{i}n-D\\'{i}az, Ernesto},<br \/>\r\nurl = {http:\/\/amca.mx\/memorias\/amca2014\/articulos\/0112.pdf},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-00-00},<br \/>\r\nbooktitle = {Generacion de Movimientos Humanoides con Dinamica Inversa Jerarquica},<br \/>\r\npublisher = {Congreso Latinoamericano de Control Autom\\'{a}tico CLCA 2014},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('78','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_78\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-file-pdf\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/amca.mx\/memorias\/amca2014\/articulos\/0112.pdf\" title=\"http:\/\/amca.mx\/memorias\/amca2014\/articulos\/0112.pdf\" target=\"_blank\">http:\/\/amca.mx\/memorias\/amca2014\/articulos\/0112.pdf<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('78','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2013\">2013<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Sanchez-Escobedo, Dalila;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('23','tp_links')\" style=\"cursor:pointer;\">3D face shape prediction from a frontal image using cylindrical coordinates and partial least squares<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Pattern Recognition Letters, <\/span><span class=\"tp_pub_additional_volume\">vol. 34, <\/span><span class=\"tp_pub_additional_number\">no. 4, <\/span><span class=\"tp_pub_additional_pages\">pp. 389 - 399, <\/span><span class=\"tp_pub_additional_year\">2013<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 0167-8655<\/span><span class=\"tp_pub_additional_note\">, (Advances in Pattern Recognition Methodology and Applications)<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_23\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('23','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_23\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('23','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_23\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('23','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_23\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{SanchezEscobedo2013389,<br \/>\r\ntitle = {3D face shape prediction from a frontal image using cylindrical coordinates and partial least squares},<br \/>\r\nauthor = {Sanchez-Escobedo, Dalila and Castelan, Mario},<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S0167865512002929},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.patrec.2012.09.007},<br \/>\r\nissn = {0167-8655},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-01-01},<br \/>\r\njournal = {Pattern Recognition Letters},<br \/>\r\nvolume = {34},<br \/>\r\nnumber = {4},<br \/>\r\npages = {389 - 399},<br \/>\r\nabstract = {This paper addresses the problem of linearly approximating 3D shape from intensities in the context of facial analysis. In other words, given a frontal pose grayscale input face, the direct estimation of its 3D structure is sought through a regression matrix. Approaches falling into this category generally assume that both 2D and 3D features are defined under Cartesian schemes, which is not optimal for the task of novel view synthesis. The current article aims to overcome this issue by exploiting the 3D structure of faces through cylindrical coordinates, aided by the partial least squares regression. In the context of facial shape analysis, partial least squares builds a set of basis faces, for both grayscale and 3D shape spaces, seeking for maximizing shared covariance between projections of the data along the basis faces. Experimental tests show how the cylindrical representations are suitable for the purposes of linear regression, resulting in a benefit for the generation of novel facial views, showing a potential use in model based face identification.},<br \/>\r\nnote = {Advances in Pattern Recognition Methodology and Applications},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('23','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_23\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper addresses the problem of linearly approximating 3D shape from intensities in the context of facial analysis. In other words, given a frontal pose grayscale input face, the direct estimation of its 3D structure is sought through a regression matrix. Approaches falling into this category generally assume that both 2D and 3D features are defined under Cartesian schemes, which is not optimal for the task of novel view synthesis. The current article aims to overcome this issue by exploiting the 3D structure of faces through cylindrical coordinates, aided by the partial least squares regression. In the context of facial shape analysis, partial least squares builds a set of basis faces, for both grayscale and 3D shape spaces, seeking for maximizing shared covariance between projections of the data along the basis faces. Experimental tests show how the cylindrical representations are suitable for the purposes of linear regression, resulting in a benefit for the generation of novel facial views, showing a potential use in model based face identification.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('23','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_23\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S0167865512002929\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S0167865512002929\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S0167865512002929<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.patrec.2012.09.007\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/j.patrec.2012.09.007\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.patrec.2012.09.007<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('23','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Lopez-Juarez, Ismael;  Castelan, Mario;  Castro-Mart\u00eenez, Francisco Javier;  Pe\u00f1a-Cabrera, Mario;  Osorio-Comparan, Roman<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('24','tp_links')\" style=\"cursor:pointer;\">Using Object\u2019s Contour, Form and Depth to Embed Recognition Capability into Industrial Robots<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Journal of Applied Research and Technology, <\/span><span class=\"tp_pub_additional_volume\">vol. 11, <\/span><span class=\"tp_pub_additional_number\">no. 1, <\/span><span class=\"tp_pub_additional_pages\">pp. 5 - 17, <\/span><span class=\"tp_pub_additional_year\">2013<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1665-6423<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_24\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('24','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_24\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('24','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_24\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('24','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_24\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{LopezJuarez20135,<br \/>\r\ntitle = {Using Object\u2019s Contour, Form and Depth to Embed Recognition Capability into Industrial Robots},<br \/>\r\nauthor = {Lopez-Juarez, Ismael and Castelan, Mario and Castro-Mart\\^{i}nez, Francisco Javier and Pe\\~{n}a-Cabrera, Mario and Osorio-Comparan, Roman},<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1665642313715116},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/S1665-6423(13)71511-6},<br \/>\r\nissn = {1665-6423},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-01-01},<br \/>\r\njournal = {Journal of Applied Research and Technology},<br \/>\r\nvolume = {11},<br \/>\r\nnumber = {1},<br \/>\r\npages = {5 - 17},<br \/>\r\nabstract = {Abstract Robot vision systems can differentiate parts by pattern matching irrespective of part orientation and location. Some manufacturers offer 3D guidance systems using robust vision and laser systems so that a 3D programmed point can be repeated even if the part is moved varying its location, rotation and orientation within the working space. Despite these developments, current industrial robots are still unable to recognize objects in a robust manner; that is, to distinguish an object among equally shaped objects taking into account not only the object\u2019s contour but also its form and depth information, which is precisely the major contribution of this research. Our hypothesis establishes that it is possible to integrate a robust invariant object recognition capability into industrial robots by using image features from the object\u2019s contour (boundary object information), its form (i.e., type of curvature or topographical surface information) and depth information (from stereo disparity maps). These features can be concatenated in order to form an invariant vector descriptor which is the input to an artificial neural network (ANN) for learning and recognition purposes. In this paper we present the recognition results under different working conditions using a KUKA KR16 industrial robot, which validated our approach.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('24','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_24\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Abstract Robot vision systems can differentiate parts by pattern matching irrespective of part orientation and location. Some manufacturers offer 3D guidance systems using robust vision and laser systems so that a 3D programmed point can be repeated even if the part is moved varying its location, rotation and orientation within the working space. Despite these developments, current industrial robots are still unable to recognize objects in a robust manner; that is, to distinguish an object among equally shaped objects taking into account not only the object\u2019s contour but also its form and depth information, which is precisely the major contribution of this research. Our hypothesis establishes that it is possible to integrate a robust invariant object recognition capability into industrial robots by using image features from the object\u2019s contour (boundary object information), its form (i.e., type of curvature or topographical surface information) and depth information (from stereo disparity maps). These features can be concatenated in order to form an invariant vector descriptor which is the input to an artificial neural network (ANN) for learning and recognition purposes. In this paper we present the recognition results under different working conditions using a KUKA KR16 industrial robot, which validated our approach.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('24','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_24\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1665642313715116\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1665642313715116\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1665642313715116<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/S1665-6423(13)71511-6\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/S1665-6423(13)71511-6\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/S1665-6423(13)71511-6<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('24','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rivero-Juarez, Joaquin;  Martinez-Garcia, Edgar A.;  Torres-Mendez, Luz Abril;  Elara Mohan, Rajesh<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('25','tp_links')\" style=\"cursor:pointer;\">3D Heterogeneous Multi-sensor Global Registration<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Procedia Engineering, <\/span><span class=\"tp_pub_additional_volume\">vol. 64, <\/span><span class=\"tp_pub_additional_pages\">pp. 1552 - 1561, <\/span><span class=\"tp_pub_additional_year\">2013<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1877-7058<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_25\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{RIVEROJUAREZ20131552,<br \/>\r\ntitle = {3D Heterogeneous Multi-sensor Global Registration},<br \/>\r\nauthor = {Rivero-Juarez, Joaquin and Martinez-Garcia, Edgar A. and Torres-Mendez, Luz Abril and Elara Mohan, Rajesh },<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237},<br \/>\r\nissn = {1877-7058},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-01-01},<br \/>\r\njournal = {Procedia Engineering},<br \/>\r\nvolume = {64},<br \/>\r\npages = {1552 - 1561},<br \/>\r\nabstract = {This manuscript presents a deterministic model to register heterogeneous 3D data arising from a ring of eight ultrasonic sonar, one high data density LiDAR (light detection and ranging), and a semi-ring of three visual sensors. The three visual sensors are arranged in a cylindrical ring, and although they provide 2D colour images, a radial multi-stereo geometric model is proposed to yield 3D data. All deployed sensors are geometrically placed on-board a wheeled mobile robot platform, and data registration is carried out navigating indoors. The sensor devices in discussion are coordinated and synchronized by a home-made distributed sensor suite system. Mathematical deterministic formulation for data registration is used to obtain experimental and numerical results on global mapping. Data registration relies on a geometric model to compute depth information from a semi- circular trinocular stereo sensor that is proposed to rectify and calibrate three image frames with different orientations and positions, but with same projection point.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_25\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This manuscript presents a deterministic model to register heterogeneous 3D data arising from a ring of eight ultrasonic sonar, one high data density LiDAR (light detection and ranging), and a semi-ring of three visual sensors. The three visual sensors are arranged in a cylindrical ring, and although they provide 2D colour images, a radial multi-stereo geometric model is proposed to yield 3D data. All deployed sensors are geometrically placed on-board a wheeled mobile robot platform, and data registration is carried out navigating indoors. The sensor devices in discussion are coordinated and synchronized by a home-made distributed sensor suite system. Mathematical deterministic formulation for data registration is used to obtain experimental and numerical results on global mapping. Data registration relies on a geometric model to compute depth information from a semi- circular trinocular stereo sensor that is proposed to rectify and calibrate three image frames with different orientations and positions, but with same projection point.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_25\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rios-Cabrera, Reyes;  Tuytelaars, Tinne<\/p><p class=\"tp_pub_title\">Discriminatively Trained Templates for 3D Object Detection: A Real Time Scalable Approach <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">The IEEE International Conference on Computer Vision (ICCV), <\/span><span class=\"tp_pub_additional_year\">2013<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_215\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('215','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_215\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Rios-Cabrera_2013_ICCV__B,<br \/>\r\ntitle = {Discriminatively Trained Templates for 3D Object Detection: A Real Time Scalable Approach},<br \/>\r\nauthor = {Rios-Cabrera, Reyes and Tuytelaars, Tinne},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-12-01},<br \/>\r\nbooktitle = {The IEEE International Conference on Computer Vision (ICCV)},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('215','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><\/div><div class=\"tablenav\"><div class=\"tablenav-pages\"><span class=\"displaying-num\">82 entries<\/span> <a class=\"page-numbers button disabled\">&laquo;<\/a> <a class=\"page-numbers button disabled\">&lsaquo;<\/a> 1 of 2 <a href=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?limit=2&amp;tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=&amp;tsr=#tppubs\" title=\"next page\" class=\"page-numbers button\">&rsaquo;<\/a> <a href=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?limit=2&amp;tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=&amp;tsr=#tppubs\" title=\"last page\" class=\"page-numbers button\">&raquo;<\/a> <\/div><\/div><\/div><\/strong><\/p>\n<p>[\/et_pb_text][\/et_pb_column][\/et_pb_row][\/et_pb_section][et_pb_section bb_built=&#8221;1&#8243; fullwidth=&#8221;off&#8221; specialty=&#8221;off&#8221; background_color=&#8221;rgba(0,0,0,0.32)&#8221; inner_shadow=&#8221;on&#8221; custom_css_main_element=&#8221;box-shadow: inset 0px 3px 2px rgba(150, 150, 150, 0.85);&#8221; _builder_version=&#8221;3.0.72&#8243; locked=&#8221;off&#8221; global_module=&#8221;321&#8243;][et_pb_row global_parent=&#8221;321&#8243; make_fullwidth=&#8221;off&#8221; use_custom_width=&#8221;off&#8221; width_unit=&#8221;on&#8221; use_custom_gutter=&#8221;off&#8221; allow_player_pause=&#8221;off&#8221; parallax=&#8221;off&#8221; parallax_method=&#8221;on&#8221; make_equal=&#8221;off&#8221; parallax_1=&#8221;off&#8221; parallax_method_1=&#8221;off&#8221; custom_margin=&#8221;-40px|||&#8221; background_position=&#8221;top_left&#8221; background_repeat=&#8221;repeat&#8221; background_size=&#8221;initial&#8221; parent_locked=&#8221;off&#8221;][et_pb_column type=&#8221;4_4&#8243;][et_pb_image admin_label=&#8221;LogoCINVESTAV del Pie de p\u00e1gina&#8221; global_parent=&#8221;321&#8243; src=&#8221;https:\/\/ryma.cinvestav.mx\/wp-content\/uploads\/2014\/08\/roboticaCinvestavOK_transparencia_white.png&#8221; alt=&#8221;Rob\u00f3tica y Manufactura Avanzada, Cinvestav&#8221; show_in_lightbox=&#8221;off&#8221; url_new_window=&#8221;off&#8221; use_overlay=&#8221;off&#8221; animation=&#8221;off&#8221; sticky=&#8221;on&#8221; align=&#8221;center&#8221; max_width=&#8221;95px&#8221; max_width_last_edited=&#8221;on|desktop&#8221; force_fullwidth=&#8221;off&#8221; always_center_on_mobile=&#8221;on&#8221; custom_margin=&#8221;||15px|&#8221; _builder_version=&#8221;3.0.85&#8243; parent_locked=&#8221;off&#8221; url=&#8221;https:\/\/ryma.cinvestav.mx\/&#8221; show_bottom_space=&#8221;on&#8221; \/][et_pb_text global_parent=&#8221;321&#8243; _builder_version=&#8221;3.0.72&#8243; background_layout=&#8221;dark&#8221; text_orientation=&#8221;center&#8221; border_style=&#8221;solid&#8221; custom_margin=&#8221;||-50px|&#8221; parent_locked=&#8221;off&#8221;]<\/p>\n<hr \/>\n<p style=\"text-align: center;\">Av. Industrial\u00a0Metalurgia\u00a0#1062,\u00a0Parque Ind. Ramos Arizpe,\u00a0Ramos Arizpe, Coah.\u00a0C.P. 25900, M\u00e9xico. \u00a0Tel. +52 (844) 438-9600<\/p>\n<p>[\/et_pb_text][\/et_pb_column][\/et_pb_row][\/et_pb_section]<\/p>\n","protected":false},"excerpt":{"rendered":"<p><div class='et-box et-shadow'>\n\t\t\t\t\t<div class='et-box-content'>PUBLICATIONS<\/div><\/div> This is the list of publications of this laboratory <div class=\"teachpress_pub_list\"><form name=\"tppublistform\" method=\"get\"><a name=\"tppubs\" id=\"tppubs\"><\/a><div class=\"teachpress_filter\"><select class=\"default\" name=\"yr\" id=\"yr\" tabindex=\"2\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?')\">\r\n                   <option value=\"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=#tppubs\">All years<\/option>\r\n                   <option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2020#tppubs\" >2020<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2019#tppubs\" >2019<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2018#tppubs\" >2018<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2017#tppubs\" >2017<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2016#tppubs\" >2016<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2015#tppubs\" >2015<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2014#tppubs\" >2014<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2013#tppubs\" >2013<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2012#tppubs\" >2012<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2011#tppubs\" >2011<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2010#tppubs\" >2010<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2009#tppubs\" >2009<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2008#tppubs\" >2008<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2007#tppubs\" >2007<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2006#tppubs\" >2006<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2005#tppubs\" >2005<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2004#tppubs\" >2004<\/option><option value = \"tgid=&amp;type=&amp;auth=&amp;usr=&amp;yr=2003#tppubs\" >2003<\/option>\r\n                <\/select><select class=\"default\" name=\"type\" id=\"type\" tabindex=\"3\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=#tppubs\">All types<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=article#tppubs\" >Journal Articles<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=conference#tppubs\" >Conferences<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=inbook#tppubs\" >Book Chapters<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=inproceedings#tppubs\" >Proceedings Articles<\/option><option value = \"tgid=&amp;yr=&amp;auth=&amp;usr=&amp;type=proceedings#tppubs\" >Proceedings<\/option>\r\n                <\/select><select class=\"default\" name=\"auth\" id=\"auth\" tabindex=\"5\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=#tppubs\">All authors<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=74#tppubs\" > Arechavaleta, Gustavo<\/option><option value = \"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=61#tppubs\" > Castelan, Mario<\/option><option value = \"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=77#tppubs\" > Rios-Cabrera, Reyes<\/option><option value = \"tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=58#tppubs\" > Torres-Mendez, Luz Abril<\/option>\r\n                <\/select><select class=\"default\" name=\"usr\" id=\"usr\" tabindex=\"6\" onchange=\"teachpress_jumpMenu('parent',this, 'https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?')\">\r\n                   <option value=\"tgid=&amp;yr=&amp;type=&amp;auth=&amp;usr=#tppubs\">All users<\/option>\r\n                   <option value = \"tgid=&amp;yr=&amp;type=&amp;auth=&amp;usr=12#tppubs\" >mcastelan<\/option>\r\n                <\/select><\/div><\/form><div class=\"tablenav\"><div class=\"tablenav-pages\"><span class=\"displaying-num\">82 entries<\/span> <a class=\"page-numbers button disabled\">&laquo;<\/a> <a class=\"page-numbers button disabled\">&lsaquo;<\/a> 1 of 2 <a href=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?limit=2&amp;tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=&amp;tsr=#tppubs\" title=\"next page\" class=\"page-numbers button\">&rsaquo;<\/a> <a href=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?limit=2&amp;tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=&amp;tsr=#tppubs\" title=\"last page\" class=\"page-numbers button\">&raquo;<\/a> <\/div><\/div><div class=\"teachpress_publication_list\"><h3 class=\"tp_h3\" id=\"tp_h3_2019\">2019<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ramos-Oliveira, Jorge;  Baltazar, Arturo;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('225','tp_links')\" style=\"cursor:pointer;\">On ray tracing for sharp changing media<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Journal of the Acoustic Society of America, <\/span><span class=\"tp_pub_additional_volume\">vol. 146, <\/span><span class=\"tp_pub_additional_number\">no. 3, <\/span><span class=\"tp_pub_additional_pages\">pp. 1595-1604, <\/span><span class=\"tp_pub_additional_year\">2019<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_225\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('225','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_225\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('225','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_225\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Ramos-Oliveira2019,<br \/>\r\ntitle = {On ray tracing for sharp changing media},<br \/>\r\nauthor = {Ramos-Oliveira, Jorge and Baltazar, Arturo and Castelan, Mario},<br \/>\r\nurl = {https:\/\/doi.org\/10.1121\/1.5125133},<br \/>\r\ndoi = {10.1121\/1.5125133},<br \/>\r\nyear  = {2019},<br \/>\r\ndate = {2019-07-10},<br \/>\r\njournal = {Journal of the Acoustic Society of America},<br \/>\r\nvolume = {146},<br \/>\r\nnumber = {3},<br \/>\r\npages = {1595-1604},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('225','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_225\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/doi.org\/10.1121\/1.5125133\" title=\"https:\/\/doi.org\/10.1121\/1.5125133\" target=\"_blank\">https:\/\/doi.org\/10.1121\/1.5125133<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1121\/1.5125133\" title=\"Follow DOI:10.1121\/1.5125133\" target=\"_blank\">doi:10.1121\/1.5125133<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('225','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Luna-Aguilar, Christian;  Morales-Diaz, America;  Castelan, Mario;  Nadeu, Climent<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('223','tp_links')\" style=\"cursor:pointer;\">Incorporation of acoustic sensors in the regulation of a mobile robot<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Advanced Robotics, <\/span><span class=\"tp_pub_additional_volume\">vol. 33, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 61-73, <\/span><span class=\"tp_pub_additional_year\">2019<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 0169-1864<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_223\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('223','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_223\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('223','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_223\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('223','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_223\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{{Luna-Aguilar}2019,<br \/>\r\ntitle = {Incorporation of acoustic sensors in the regulation of a mobile robot},<br \/>\r\nauthor = {Luna-Aguilar, Christian and Morales-Diaz, America and Castelan, Mario and Nadeu, Climent},<br \/>\r\neditor = {Taylor and Francis},<br \/>\r\nurl = {https:\/\/doi.org\/10.1080\/01691864.2019.1573703},<br \/>\r\ndoi = {10.1080\/01691864.2019.1573703},<br \/>\r\nissn = {0169-1864},<br \/>\r\nyear  = {2019},<br \/>\r\ndate = {2019-01-01},<br \/>\r\njournal = {Advanced Robotics},<br \/>\r\nvolume = {33},<br \/>\r\nnumber = {2},<br \/>\r\npages = {61-73},<br \/>\r\nabstract = {This article introduces the incorporation of acoustic sensors for the localization of a mobile robot. The robot is considered as a sound source and its position is located applying a Time Delay of Arrival (TDOA) method. Since the accuracy of this method varies with the microphone array, a naviga- tion acoustic map that indicates the location errors is built. This map also provides the robot with navigation trajectories point-to-point and the control is capable to drive the robot through these trajectories to a desired configuration. The proposed localization method is thoroughly tested using both a 900 Hz square signal and the natural sound of the robot, which is driven near the desired point with an average error of 0.067 m.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('223','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_223\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This article introduces the incorporation of acoustic sensors for the localization of a mobile robot. The robot is considered as a sound source and its position is located applying a Time Delay of Arrival (TDOA) method. Since the accuracy of this method varies with the microphone array, a naviga- tion acoustic map that indicates the location errors is built. This map also provides the robot with navigation trajectories point-to-point and the control is capable to drive the robot through these trajectories to a desired configuration. The proposed localization method is thoroughly tested using both a 900 Hz square signal and the natural sound of the robot, which is driven near the desired point with an average error of 0.067 m.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('223','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_223\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/doi.org\/10.1080\/01691864.2019.1573703\" title=\"https:\/\/doi.org\/10.1080\/01691864.2019.1573703\" target=\"_blank\">https:\/\/doi.org\/10.1080\/01691864.2019.1573703<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1080\/01691864.2019.1573703\" title=\"Follow DOI:10.1080\/01691864.2019.1573703\" target=\"_blank\">doi:10.1080\/01691864.2019.1573703<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('223','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rico-Fernandez, Maria;  Rios-Cabrera, Reyes;  Castelan, Mario;  Guerrero-Reyes, Hector;  Juarez-Maldonado, Antonio<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('224','tp_links')\" style=\"cursor:pointer;\">A contextualized approach for segmentation of foliage in different crop species<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Computers and Electronics in Agriculture, <\/span><span class=\"tp_pub_additional_volume\">vol. 156, <\/span><span class=\"tp_pub_additional_pages\">pp. 378-386, <\/span><span class=\"tp_pub_additional_year\">2019<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 0168-1699<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_224\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('224','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_224\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('224','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_224\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Rico-Fernandez2019,<br \/>\r\ntitle = {A contextualized approach for segmentation of foliage in different crop species},<br \/>\r\nauthor = {Rico-Fernandez, Maria and Rios-Cabrera, Reyes and Castelan, Mario and Guerrero-Reyes, Hector and Juarez-Maldonado, Antonio},<br \/>\r\neditor = {Elsevier},<br \/>\r\nurl = {https:\/\/doi.org\/10.1016\/j.compag.2018.11.033},<br \/>\r\nissn = {0168-1699},<br \/>\r\nyear  = {2019},<br \/>\r\ndate = {2019-01-01},<br \/>\r\njournal = {Computers and Electronics in Agriculture},<br \/>\r\nvolume = {156},<br \/>\r\npages = {378-386},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('224','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_224\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/doi.org\/10.1016\/j.compag.2018.11.033\" title=\"https:\/\/doi.org\/10.1016\/j.compag.2018.11.033\" target=\"_blank\">https:\/\/doi.org\/10.1016\/j.compag.2018.11.033<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('224','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2017\">2017<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Lopez-Juarez, Ismael;  Rios-Cabrera, Reyes;  Hsieh, S J;  Howarth, M.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('205','tp_links')\" style=\"cursor:pointer;\">A hybrid non-invasive method for internal\/external quality assessment of potatoes<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">European Food Research and Technology, <\/span><span class=\"tp_pub_additional_year\">2017<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1438-2385<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_205\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('205','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_205\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('205','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_205\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('205','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_205\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Lopez-Juarez2017,<br \/>\r\ntitle = {A hybrid non-invasive method for internal\/external quality assessment of potatoes},<br \/>\r\nauthor = {Lopez-Juarez, Ismael and Rios-Cabrera, Reyes and Hsieh,S J and Howarth, M .},<br \/>\r\nurl = {https:\/\/doi.org\/10.1007\/s00217-017-2936-9},<br \/>\r\ndoi = {10.1007\/s00217-017-2936-9},<br \/>\r\nissn = {1438-2385},<br \/>\r\nyear  = {2017},<br \/>\r\ndate = {2017-07-11},<br \/>\r\njournal = {European Food Research and Technology},<br \/>\r\nabstract = {Consumers purchase fruits and vegetables based on its quality, which can be defined as a degree of excellence which is the result of a combination of characteristics, attributes and properties that have significance for market acceptability. In this paper, a novel hybrid active imaging methodology for potato quality inspection that uses an optical colour camera and an infrared thermal camera is presented. The methodology employs an artificial neural network (ANN) that uses quality data composed by two descriptors as input. The ANN works as a feature classifier so that its output is the potato quality grade. The input vector contains information related to external characteristics, such as shape, weight, length and width. Internal characteristics are also accounted for in the input vector in the form of excessive sugar content. The extra sugar content of the potato is an important problem for potato growers and potato chip manufacturers. Extra sugar content could result in diseases or wounds in the potato tuber. In general, potato tubers with low sugar content are considered as having a higher quality. The validation of the methodology was made through experimentation which consisted in fusing both, external and internal characteristics in the input vector to the ANN for an overall quality classification. Results using internal data as obtained from an infrared camera and fused with optical external parameters demonstrated the feasibility of the method since the prediction accuracy increased during potato grading.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('205','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_205\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Consumers purchase fruits and vegetables based on its quality, which can be defined as a degree of excellence which is the result of a combination of characteristics, attributes and properties that have significance for market acceptability. In this paper, a novel hybrid active imaging methodology for potato quality inspection that uses an optical colour camera and an infrared thermal camera is presented. The methodology employs an artificial neural network (ANN) that uses quality data composed by two descriptors as input. The ANN works as a feature classifier so that its output is the potato quality grade. The input vector contains information related to external characteristics, such as shape, weight, length and width. Internal characteristics are also accounted for in the input vector in the form of excessive sugar content. The extra sugar content of the potato is an important problem for potato growers and potato chip manufacturers. Extra sugar content could result in diseases or wounds in the potato tuber. In general, potato tubers with low sugar content are considered as having a higher quality. The validation of the methodology was made through experimentation which consisted in fusing both, external and internal characteristics in the input vector to the ANN for an overall quality classification. Results using internal data as obtained from an infrared camera and fused with optical external parameters demonstrated the feasibility of the method since the prediction accuracy increased during potato grading.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('205','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_205\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/doi.org\/10.1007\/s00217-017-2936-9\" title=\"https:\/\/doi.org\/10.1007\/s00217-017-2936-9\" target=\"_blank\">https:\/\/doi.org\/10.1007\/s00217-017-2936-9<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/s00217-017-2936-9\" title=\"Follow DOI:10.1007\/s00217-017-2936-9\" target=\"_blank\">doi:10.1007\/s00217-017-2936-9<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('205','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Arechavaleta, Gustavo;  Morales-Diaz, America B.;  Perez-Villeda, Hector Manuel;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('2','tp_links')\" style=\"cursor:pointer;\">Hierarchical Task-Based Control of Multirobot Systems With Terminal Attractors<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">IEEE Transactions on Control Systems Technology, <\/span><span class=\"tp_pub_additional_volume\">vol. 25, <\/span><span class=\"tp_pub_additional_number\">no. 1, <\/span><span class=\"tp_pub_additional_pages\">pp. 334 - 341, <\/span><span class=\"tp_pub_additional_year\">2017<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1063-6536<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_2\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('2','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_2\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('2','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_2\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('2','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_2\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{7454708,<br \/>\r\ntitle = {Hierarchical Task-Based Control of Multirobot Systems With Terminal Attractors},<br \/>\r\nauthor = {Arechavaleta, Gustavo and Morales-Diaz, America B. and Perez-Villeda, Hector Manuel and Castelan, Mario },<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/abstract\/document\/7454708\/},<br \/>\r\ndoi = {10.1109\/TCST.2016.2549279},<br \/>\r\nissn = {1063-6536},<br \/>\r\nyear  = {2017},<br \/>\r\ndate = {2017-01-01},<br \/>\r\njournal = {IEEE Transactions on Control Systems Technology},<br \/>\r\nvolume = {25},<br \/>\r\nnumber = {1},<br \/>\r\npages = {334 - 341},<br \/>\r\nabstract = {This brief proposes a hierarchical control scheme based on the definition of a set of multirobot task functions. To deal with the inherent conflicts between tasks, a strict hierarchy is imposed on them. We present a novel scheme that copes with two main difficulties shared in standard task-based controllers: 1) to impose a desired time convergence of tasks and 2) to avoid discontinuous task transitions occurred when a task is inserted or removed in the hierarchical structure. As a result, continuous input references are generated for the low-level control of the group. The validation is achieved in simulation and by performing an experiment with wheeled mobile robots.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('2','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_2\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This brief proposes a hierarchical control scheme based on the definition of a set of multirobot task functions. To deal with the inherent conflicts between tasks, a strict hierarchy is imposed on them. We present a novel scheme that copes with two main difficulties shared in standard task-based controllers: 1) to impose a desired time convergence of tasks and 2) to avoid discontinuous task transitions occurred when a task is inserted or removed in the hierarchical structure. As a result, continuous input references are generated for the low-level control of the group. The validation is achieved in simulation and by performing an experiment with wheeled mobile robots.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('2','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_2\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/abstract\/document\/7454708\/\" title=\"http:\/\/ieeexplore.ieee.org\/abstract\/document\/7454708\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/abstract\/document\/7454708\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/TCST.2016.2549279\" title=\"Follow DOI:10.1109\/TCST.2016.2549279\" target=\"_blank\">doi:10.1109\/TCST.2016.2549279<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('2','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2016\">2016<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Perez-Alcocer, R. R.;  Torres-Mendez, Luz Abril;  Olguin-Diaz, Ernesto;  Maldonado-Ramirez, Alejandro<\/p><p class=\"tp_pub_title\">Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_154\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('154','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_154\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{P\\'{e}rez-Alcocer2016,<br \/>\r\ntitle = {Vision-based Autonomous Underwater Vehicle Navigation in Poor Visibility Conditions using a Model-free Robust Control},<br \/>\r\nauthor = {Perez-Alcocer, R. R. and Torres-Mendez, Luz Abril and Olguin-Diaz, Ernesto and Maldonado-Ramirez, Alejandro },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('154','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Mart\u00ednez-Gonz\u00e1lez, Pablo Arturo;  Castelan, Mario;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\">Vision Based Persistent Localization of a Humanoid Robot for Locomotion Tasks <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_159\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('159','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_159\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Mart\\'{i}nez-Gonz\\'{a}lez2016b,<br \/>\r\ntitle = {Vision Based Persistent Localization of a Humanoid Robot for Locomotion Tasks},<br \/>\r\nauthor = {Mart\\'{i}nez-Gonz\\'{a}lez, Pablo Arturo and Castelan, Mario and Arechavaleta, Gustavo},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('159','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Hernandez-Rodriguez, Felipe;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('4','tp_links')\" style=\"cursor:pointer;\">A photometric sampling method for facial shape recovery<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Machine Vision and Applications, <\/span><span class=\"tp_pub_additional_volume\">vol. 27, <\/span><span class=\"tp_pub_additional_number\">no. 4, <\/span><span class=\"tp_pub_additional_pages\">pp. 483-497, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_4\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('4','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_4\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('4','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_4\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Hernandez-Rodriguez2016,<br \/>\r\ntitle = {A photometric sampling method for facial shape recovery},<br \/>\r\nauthor = {Hernandez-Rodriguez, Felipe and Castelan, Mario },<br \/>\r\nurl = {http:\/\/link.springer.com\/article\/10.1007%2Fs00138-016-0755-9},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-04-01},<br \/>\r\njournal = {Machine Vision and Applications},<br \/>\r\nvolume = {27},<br \/>\r\nnumber = {4},<br \/>\r\npages = {483-497},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('4','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_4\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/link.springer.com\/article\/10.1007%2Fs00138-016-0755-9\" title=\"http:\/\/link.springer.com\/article\/10.1007%2Fs00138-016-0755-9\" target=\"_blank\">http:\/\/link.springer.com\/article\/10.1007%2Fs00138-016-0755-9<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('4','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Martinez-Gonzalez, Pablo;  Castelan, Mario;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('1','tp_links')\" style=\"cursor:pointer;\">Vision based persistent localization of a humanoid robot for locomotion Tasks<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Applied Mathematics and Computer Science, <\/span><span class=\"tp_pub_additional_volume\">vol. 26, <\/span><span class=\"tp_pub_additional_number\">no. 3, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_1\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('1','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_1\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('1','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_1\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Mart\\`{i}nez-Gonz\\'{a}lez2016,<br \/>\r\ntitle = {Vision based persistent localization of a humanoid robot for locomotion Tasks},<br \/>\r\nauthor = {Martinez-Gonzalez, Pablo and Castelan, Mario and Arechavaleta, Gustavo },<br \/>\r\nurl = {https:\/\/drive.google.com\/file\/d\/0B-7dVUdTjeJUNGdXd0N6UWRvdk0\/view},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-03-26},<br \/>\r\njournal = {International Journal of Applied Mathematics and Computer Science},<br \/>\r\nvolume = {26},<br \/>\r\nnumber = {3},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('1','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_1\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/drive.google.com\/file\/d\/0B-7dVUdTjeJUNGdXd0N6UWRvdk0\/view\" title=\"https:\/\/drive.google.com\/file\/d\/0B-7dVUdTjeJUNGdXd0N6UWRvdk0\/view\" target=\"_blank\">https:\/\/drive.google.com\/file\/d\/0B-7dVUdTjeJUNGdXd0N6UWRvdk0\/view<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('1','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Delfin, Josafat;  Becerra, Hector M;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('5','tp_links')\" style=\"cursor:pointer;\">Visual Servo Walking Control for Humanoids with Finite-time Convergence and Smooth Robot Velocities<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Control, <\/span><span class=\"tp_pub_additional_volume\">vol. 89, <\/span><span class=\"tp_pub_additional_number\">no. 7, <\/span><span class=\"tp_pub_additional_pages\">pp. 1342-1358, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1366-5820<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_5\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('5','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_5\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('5','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_5\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('5','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_5\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Delfin2016,<br \/>\r\ntitle = {Visual Servo Walking Control for Humanoids with Finite-time Convergence and Smooth Robot Velocities},<br \/>\r\nauthor = {Delfin, Josafat and Becerra, Hector M and Arechavaleta, Gustavo },<br \/>\r\nurl = {http:\/\/www.tandfonline.com\/doi\/abs\/10.1080\/00207179.2015.1129558},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558},<br \/>\r\nissn = {1366-5820},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-10},<br \/>\r\njournal = {International Journal of Control},<br \/>\r\nvolume = {89},<br \/>\r\nnumber = {7},<br \/>\r\npages = {1342-1358},<br \/>\r\nabstract = {In this paper, we address the problem of humanoid locomotion guided from information of a monocular camera. The goal of the robot is to reach a desired location defined in terms of a target image, i.e., a positioning task. The proposed approach allows us to introduce a desired time to complete the positioning task, which is advantageous in contrast to the classical exponential convergence. In particular, finite-time convergence is achieved while generating smooth robot velocities and considering the omnidirectional waking capability of the robot. In addition, we propose a hierarchical task-based control scheme, which can simultaneously handle the visual positioning and the obstacle avoidance tasks without affecting the desired time of convergence. The controller is able to activate or inactivate the obstacle avoidance task without generating discontinuous velocity references while the humanoid is walking. Stability of the closed loop for the two task-based control is demonstrated theoretically even during the transitions between the tasks. The proposed approach is generic in the sense that different visual control schemes are supported. We evaluate a homography-based visual servoing for position-based and image-based modalities, as well as for eye-in-hand and eye-to-hand configurations. The experimental evaluation is performed with the humanoid robot NAO.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('5','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_5\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In this paper, we address the problem of humanoid locomotion guided from information of a monocular camera. The goal of the robot is to reach a desired location defined in terms of a target image, i.e., a positioning task. The proposed approach allows us to introduce a desired time to complete the positioning task, which is advantageous in contrast to the classical exponential convergence. In particular, finite-time convergence is achieved while generating smooth robot velocities and considering the omnidirectional waking capability of the robot. In addition, we propose a hierarchical task-based control scheme, which can simultaneously handle the visual positioning and the obstacle avoidance tasks without affecting the desired time of convergence. The controller is able to activate or inactivate the obstacle avoidance task without generating discontinuous velocity references while the humanoid is walking. Stability of the closed loop for the two task-based control is demonstrated theoretically even during the transitions between the tasks. The proposed approach is generic in the sense that different visual control schemes are supported. We evaluate a homography-based visual servoing for position-based and image-based modalities, as well as for eye-in-hand and eye-to-hand configurations. The experimental evaluation is performed with the humanoid robot NAO.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('5','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_5\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.tandfonline.com\/doi\/abs\/10.1080\/00207179.2015.1129558\" title=\"http:\/\/www.tandfonline.com\/doi\/abs\/10.1080\/00207179.2015.1129558\" target=\"_blank\">http:\/\/www.tandfonline.com\/doi\/abs\/10.1080\/00207179.2015.1129558<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('5','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rios-Cabrera, Reyes;  Morales-Diaz, America B.;  Aviles-Vi\u00f1as, Jaime F;  Lopez-Juarez, Ismael<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('6','tp_links')\" style=\"cursor:pointer;\">Robotic GMAW online learning: issues and experiments<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">The International Journal of Advanced Manufacturing Technology, <\/span><span class=\"tp_pub_additional_volume\">vol. 87, <\/span><span class=\"tp_pub_additional_number\">no. 5, <\/span><span class=\"tp_pub_additional_pages\">pp. 2113\u20132134, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1433-3015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_6\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('6','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_6\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('6','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_6\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('6','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_6\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Rios-Cabrera2016,<br \/>\r\ntitle = {Robotic GMAW online learning: issues and experiments},<br \/>\r\nauthor = {Rios-Cabrera, Reyes and Morales-Diaz, America B. and Aviles-Vi\\~{n}as, Jaime F and Lopez-Juarez, Ismael },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0},<br \/>\r\ndoi = {10.1007\/s00170-016-8618-0},<br \/>\r\nissn = {1433-3015},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {The International Journal of Advanced Manufacturing Technology},<br \/>\r\nvolume = {87},<br \/>\r\nnumber = {5},<br \/>\r\npages = {2113--2134},<br \/>\r\nabstract = {This paper presents three main contributions: (i) an experimental analysis of variables, using well-defined statistical patterns applied to the main parameters of the welding process. (ii) An on-line\/off-line learning and testing method, showing that robots can acquire a useful knowledge base without human intervention to learn and reproduce bead geometries. And finally, (iii) an on-line testing analysis including penetration of the bead, that is used to train an artificial neural network (ANN). For the experiments, an optic camera was used in order to measure bead geometry (width and height). Also real-time computer vision algorithms were implemented to extract training patterns. The proposal was carried out using an industrial KUKA robot and a GMAW type machine inside a manufacturing cell. We present expermental analysis that show different issues and solutions to build an industrial adaptive system for the robotics welding process.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('6','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_6\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper presents three main contributions: (i) an experimental analysis of variables, using well-defined statistical patterns applied to the main parameters of the welding process. (ii) An on-line\/off-line learning and testing method, showing that robots can acquire a useful knowledge base without human intervention to learn and reproduce bead geometries. And finally, (iii) an on-line testing analysis including penetration of the bead, that is used to train an artificial neural network (ANN). For the experiments, an optic camera was used in order to measure bead geometry (width and height). Also real-time computer vision algorithms were implemented to extract training patterns. The proposal was carried out using an industrial KUKA robot and a GMAW type machine inside a manufacturing cell. We present expermental analysis that show different issues and solutions to build an industrial adaptive system for the robotics welding process.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('6','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_6\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0\" title=\"http:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/s00170-016-8618-0\" title=\"Follow DOI:10.1007\/s00170-016-8618-0\" target=\"_blank\">doi:10.1007\/s00170-016-8618-0<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('6','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Aviles-Vi\u00f1as, Jaime F;  Rios-Cabrera, Reyes;  Lopez-Juarez, Ismael<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('7','tp_links')\" style=\"cursor:pointer;\">On-line learning of welding bead geometry in industrial robots<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">The International Journal of Advanced Manufacturing Technology, <\/span><span class=\"tp_pub_additional_volume\">vol. 83, <\/span><span class=\"tp_pub_additional_number\">no. 1, <\/span><span class=\"tp_pub_additional_pages\">pp. 217\u2013231, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1433-3015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_7\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('7','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_7\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('7','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_7\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('7','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_7\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Aviles-Vi\\~{n}as2016b,<br \/>\r\ntitle = {On-line learning of welding bead geometry in industrial robots},<br \/>\r\nauthor = {Aviles-Vi\\~{n}as, Jaime F and Rios-Cabrera, Reyes and Lopez-Juarez, Ismael },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6},<br \/>\r\ndoi = {10.1007\/s00170-015-7422-6},<br \/>\r\nissn = {1433-3015},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {The International Journal of Advanced Manufacturing Technology},<br \/>\r\nvolume = {83},<br \/>\r\nnumber = {1},<br \/>\r\npages = {217--231},<br \/>\r\nabstract = {In this paper, we propose an architecture based on an artificial neural network (ANN), to learn welding skills automatically in industrial robots. With the aid of an optic camera and a laser-based sensor, the bead geometry (width and height) is measured. We propose a real-time computer vision algorithm to extract training patterns in order to acquire knowledge to later predict specific geometries. The proposal is implemented and tested in an industrial KUKA KR16 robot and a GMAW type machine within a manufacturing cell. Several data analysis are described as well as off-line and on-line training, learning strategies, and testing experimentation. It is demonstrated during our experiments that, after learning the skill, the robot is able to produce the requested bead geometry even without any knowledge about the welding parameters such as arc voltage and current. We implemented an on-line learning test, where the whole experiments and learning process take only about 4 min. Using this knowledge later, we obtained up to 95 % accuracy in prediction.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('7','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_7\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In this paper, we propose an architecture based on an artificial neural network (ANN), to learn welding skills automatically in industrial robots. With the aid of an optic camera and a laser-based sensor, the bead geometry (width and height) is measured. We propose a real-time computer vision algorithm to extract training patterns in order to acquire knowledge to later predict specific geometries. The proposal is implemented and tested in an industrial KUKA KR16 robot and a GMAW type machine within a manufacturing cell. Several data analysis are described as well as off-line and on-line training, learning strategies, and testing experimentation. It is demonstrated during our experiments that, after learning the skill, the robot is able to produce the requested bead geometry even without any knowledge about the welding parameters such as arc voltage and current. We implemented an on-line learning test, where the whole experiments and learning process take only about 4 min. Using this knowledge later, we obtained up to 95 % accuracy in prediction.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('7','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_7\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6\" title=\"http:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/s00170-015-7422-6\" title=\"Follow DOI:10.1007\/s00170-015-7422-6\" target=\"_blank\">doi:10.1007\/s00170-015-7422-6<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('7','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('8','tp_links')\" style=\"cursor:pointer;\">Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Journal of Sensors, <\/span><span class=\"tp_pub_additional_volume\">vol. 2016, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_8\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('8','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_8\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{maldonado2016robotic,<br \/>\r\ntitle = {Robotic Visual Tracking of Relevant Cues in Underwater Environments with Poor Visibility Conditions},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril},<br \/>\r\nurl = {https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {Journal of Sensors},<br \/>\r\nvolume = {2016},<br \/>\r\npublisher = {Hindawi Publishing Corporation},<br \/>\r\nabstract = {Using visual sensors for detecting regions of interest in underwater environments is fundamental for many robotic applications. Particularly, for an autonomous exploration task, an underwater vehicle must be guided towards features that are of interest. If the relevant features can be seen from the distance, then smooth control movements of the vehicle are feasible in order to position itself close enough with the final goal of gathering visual quality images. However, it is a challenging task for a robotic system to achieve stable tracking of the same regions since marine environments are unstructured and highly dynamic and usually have poor visibility. In this paper, a framework that robustly detects and tracks regions of interest in real time is presented. We use the chromatic channels of a perceptual uniform color space to detect relevant regions and adapt a visual attention scheme to underwater scenes. For the tracking, we associate with each relevant point superpixel descriptors which are invariant to changes in illumination and shape. The field experiment results have demonstrated that our approach is robust when tested on different visibility conditions and depths in underwater explorations.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_8\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Using visual sensors for detecting regions of interest in underwater environments is fundamental for many robotic applications. Particularly, for an autonomous exploration task, an underwater vehicle must be guided towards features that are of interest. If the relevant features can be seen from the distance, then smooth control movements of the vehicle are feasible in order to position itself close enough with the final goal of gathering visual quality images. However, it is a challenging task for a robotic system to achieve stable tracking of the same regions since marine environments are unstructured and highly dynamic and usually have poor visibility. In this paper, a framework that robustly detects and tracks regions of interest in real time is presented. We use the chromatic channels of a perceptual uniform color space to detect relevant regions and adapt a visual attention scheme to underwater scenes. For the tracking, we associate with each relevant point superpixel descriptors which are invariant to changes in illumination and shape. The field experiment results have demonstrated that our approach is robust when tested on different visibility conditions and depths in underwater explorations.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_8\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/\" title=\"https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/\" target=\"_blank\">https:\/\/www.hindawi.com\/journals\/js\/2016\/4265042\/<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('8','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> S\u00e1nchez-Escobedo, Dalila;  Castelan, Mario;  Smith, William A P<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('10','tp_links')\" style=\"cursor:pointer;\">Statistical 3D face shape estimation from occluding contours<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Computer Vision and Image Understanding, <\/span><span class=\"tp_pub_additional_volume\">vol. 142, <\/span><span class=\"tp_pub_additional_pages\">pp. 111 - 124, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1077-3142<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_10\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('10','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_10\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('10','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_10\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('10','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_10\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{S\\'{a}nchezEscobedo2016111,<br \/>\r\ntitle = {Statistical 3D face shape estimation from occluding contours},<br \/>\r\nauthor = {S\\'{a}nchez-Escobedo, Dalila and Castelan, Mario and Smith, William A P},<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1077314215001885},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.cviu.2015.08.012},<br \/>\r\nissn = {1077-3142},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {Computer Vision and Image Understanding},<br \/>\r\nvolume = {142},<br \/>\r\npages = {111 - 124},<br \/>\r\nabstract = {Abstract This paper addresses the problem of 3D face shape approximation from occluding contours, i.e., the boundaries between the facial region and the background. To this end, a linear regression process that models the relationship between a set of 2D occluding contours and a set of 3D vertices is applied onto the corresponding training sets using Partial Least Squares. The result of this step is a regression matrix which is capable of estimating new 3D face point clouds from the out-of-training 2D Cartesian pixel positions of the selected contours. Our approach benefits from the highly correlated spaces spanned by the 3D vertices around the occluding boundaries of a face and their corresponding 2D pixel projections. As a result, the proposed method resembles dense surface shape recovery from missing data. Our technique is evaluated over four scenarios designed to investigate both the influence of the contours included in the training set and the considered number of contours. Qualitative and quantitative experiments demonstrate that using contours outperform the state of the art on the database used in this article. Even using a limited number of contours provides a useful approximation to the 3D face surface.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('10','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_10\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Abstract This paper addresses the problem of 3D face shape approximation from occluding contours, i.e., the boundaries between the facial region and the background. To this end, a linear regression process that models the relationship between a set of 2D occluding contours and a set of 3D vertices is applied onto the corresponding training sets using Partial Least Squares. The result of this step is a regression matrix which is capable of estimating new 3D face point clouds from the out-of-training 2D Cartesian pixel positions of the selected contours. Our approach benefits from the highly correlated spaces spanned by the 3D vertices around the occluding boundaries of a face and their corresponding 2D pixel projections. As a result, the proposed method resembles dense surface shape recovery from missing data. Our technique is evaluated over four scenarios designed to investigate both the influence of the contours included in the training set and the considered number of contours. Qualitative and quantitative experiments demonstrate that using contours outperform the state of the art on the database used in this article. Even using a limited number of contours provides a useful approximation to the 3D face surface.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('10','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_10\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1077314215001885\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1077314215001885\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1077314215001885<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.cviu.2015.08.012\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/j.cviu.2015.08.012\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.cviu.2015.08.012<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('10','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Delfin, Josafat;  Becerra, Hector M;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('160','tp_links')\" style=\"cursor:pointer;\">Visual servo walking control for humanoids with finite-time convergence and smooth robot velocities<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Control, <\/span><span class=\"tp_pub_additional_volume\">vol. 89, <\/span><span class=\"tp_pub_additional_number\">no. 7, <\/span><span class=\"tp_pub_additional_pages\">pp. 1342-1358, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_160\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('160','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_160\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('160','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_160\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('160','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_160\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{doi:10.1080\/00207179.2015.1129558,<br \/>\r\ntitle = {Visual servo walking control for humanoids with finite-time convergence and smooth robot velocities},<br \/>\r\nauthor = {Delfin, Josafat and Becerra, Hector M and Arechavaleta, Gustavo},<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558},<br \/>\r\ndoi = {10.1080\/00207179.2015.1129558},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {International Journal of Control},<br \/>\r\nvolume = {89},<br \/>\r\nnumber = {7},<br \/>\r\npages = {1342-1358},<br \/>\r\nabstract = {ABSTRACTIn this paper, we address the problem of humanoid locomotion guided from information of a monocular camera. The goal of the robot is to reach a desired location defined in terms of a target image, i.e., a positioning task. The proposed approach allows us to introduce a desired time to complete the positioning task, which is advantageous in contrast to the classical exponential convergence. In particular, finite-time convergence is achieved while generating smooth robot velocities and considering the omnidirectional waking capability of the robot. In addition, we propose a hierarchical task-based control scheme, which can simultaneously handle the visual positioning and the obstacle avoidance tasks without affecting the desired time of convergence. The controller is able to activate or inactivate the obstacle avoidance task without generating discontinuous velocity references while the humanoid is walking. Stability of the closed loop for the two task-based control is demonstrated theoretically even during the transitions between the tasks. The proposed approach is generic in the sense that different visual control schemes are supported. We evaluate a homography-based visual servoing for position-based and image-based modalities, as well as for eye-in-hand and eye-to-hand configurations. The experimental evaluation is performed with the humanoid robot NAO.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('160','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_160\" style=\"display:none;\"><div class=\"tp_abstract_entry\">ABSTRACTIn this paper, we address the problem of humanoid locomotion guided from information of a monocular camera. The goal of the robot is to reach a desired location defined in terms of a target image, i.e., a positioning task. The proposed approach allows us to introduce a desired time to complete the positioning task, which is advantageous in contrast to the classical exponential convergence. In particular, finite-time convergence is achieved while generating smooth robot velocities and considering the omnidirectional waking capability of the robot. In addition, we propose a hierarchical task-based control scheme, which can simultaneously handle the visual positioning and the obstacle avoidance tasks without affecting the desired time of convergence. The controller is able to activate or inactivate the obstacle avoidance task without generating discontinuous velocity references while the humanoid is walking. Stability of the closed loop for the two task-based control is demonstrated theoretically even during the transitions between the tasks. The proposed approach is generic in the sense that different visual control schemes are supported. We evaluate a homography-based visual servoing for position-based and image-based modalities, as well as for eye-in-hand and eye-to-hand configurations. The experimental evaluation is performed with the humanoid robot NAO.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('160','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_160\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" title=\"http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" target=\"_blank\">http:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1080\/00207179.2015.1129558\" title=\"Follow DOI:10.1080\/00207179.2015.1129558\" target=\"_blank\">doi:10.1080\/00207179.2015.1129558<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('160','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Benitez Perez, H.;  Lopez-Juarez, Ismael;  Garza-Alanis, P. C.;  Rios-Cabrera, Reyes;  Duran Chavesti, A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('163','tp_links')\" style=\"cursor:pointer;\">Reconfiguration Distributed Objects in an Intelligent Manufacturing Cell<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">IEEE Latin America Transactions, <\/span><span class=\"tp_pub_additional_volume\">vol. 14, <\/span><span class=\"tp_pub_additional_number\">no. 1, <\/span><span class=\"tp_pub_additional_pages\">pp. 136-146, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1548-0992<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_163\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('163','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_163\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('163','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_163\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('163','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_163\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{7430073,<br \/>\r\ntitle = {Reconfiguration Distributed Objects in an Intelligent Manufacturing Cell},<br \/>\r\nauthor = {Benitez Perez, H. and Lopez-Juarez, Ismael and Garza-Alanis, P. C. and Rios-Cabrera, Reyes and Duran Chavesti, A.},<br \/>\r\ndoi = {10.1109\/TLA.2016.7430073},<br \/>\r\nissn = {1548-0992},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {IEEE Latin America Transactions},<br \/>\r\nvolume = {14},<br \/>\r\nnumber = {1},<br \/>\r\npages = {136-146},<br \/>\r\nabstract = {A manufacture system with the abilities of easy reconfiguration and highly scalability becomes flexible, dynamic and open to the use of software technologies. To give these abilities to a manufacture cell formed of three industrial robots and two conveyors, a middleware based on the programming standard Common Object Request Broker Architecture (CORA) was developed, thus creating a distributed manufacture cell, allowing us to have a real production with different final products. In order to optimize the production times of the different products to be manufactured, a product scheduler was developed using the algorithm Earlies Deadline First (EDF) and the support algorithm Deferrable Server (DS). Given that failures may occur on any of the specialized modules of the manufacture system, the self reconfiguration of the manufacture system is something very desirable. This article propose an algorithm to solve this problem, the algorithm identifies the failures in relation to the time it takes the system to make a product, then makes a modification on the working speed of the plant elements of the specialized modules.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('163','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_163\" style=\"display:none;\"><div class=\"tp_abstract_entry\">A manufacture system with the abilities of easy reconfiguration and highly scalability becomes flexible, dynamic and open to the use of software technologies. To give these abilities to a manufacture cell formed of three industrial robots and two conveyors, a middleware based on the programming standard Common Object Request Broker Architecture (CORA) was developed, thus creating a distributed manufacture cell, allowing us to have a real production with different final products. In order to optimize the production times of the different products to be manufactured, a product scheduler was developed using the algorithm Earlies Deadline First (EDF) and the support algorithm Deferrable Server (DS). Given that failures may occur on any of the specialized modules of the manufacture system, the self reconfiguration of the manufacture system is something very desirable. This article propose an algorithm to solve this problem, the algorithm identifies the failures in relation to the time it takes the system to make a product, then makes a modification on the working speed of the plant elements of the specialized modules.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('163','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_163\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/TLA.2016.7430073\" title=\"Follow DOI:10.1109\/TLA.2016.7430073\" target=\"_blank\">doi:10.1109\/TLA.2016.7430073<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('163','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Cortes-Perez, Noel;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), <\/span><span class=\"tp_pub_additional_pages\">pp. 61-68, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_168\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('168','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_168\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Cortx00E9sPx00E9rez2016ALM,<br \/>\r\ntitle = {A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation},<br \/>\r\nauthor = {Cortes-Perez, Noel and Torres-Mendez, Luz Abril},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\njournal = {2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)},<br \/>\r\npages = {61-68},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('168','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferences<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('166','tp_links')\" style=\"cursor:pointer;\">A bag of relevant regions for visual place recognition in challenging environments<\/a> <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2016 23rd International Conference on Pattern Recognition (ICPR), <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_166\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('166','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_166\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{7899826,<br \/>\r\ntitle = {A bag of relevant regions for visual place recognition in challenging environments},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Castelan, Mario},<br \/>\r\ndoi = {10.1109\/ICPR.2016.7899826},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-12-01},<br \/>\r\nbooktitle = {2016 23rd International Conference on Pattern Recognition (ICPR)},<br \/>\r\npages = {1358-1363},<br \/>\r\nabstract = {In this paper, we present a method for vision-based place recognition in environments with a high content of similar features and that are prone to variations in illumination. The high similarity of features makes difficult the disambiguation between two different places. The novelty of our method relies on using the Bag of Words (BoW) approach to derive an image descriptor from a set of relevant regions, which are extracted using a visual attention algorithm. We name our approach Bag of Relevant Regions (BoRR). The descriptor of each relevant region is built by using a 2D histogram of the chromatic channels of the CIE-Lab color space. We have compared our results with those using state of the art descriptors that include the BoW and demonstrate that our approach performs better in most of the cases.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_166\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In this paper, we present a method for vision-based place recognition in environments with a high content of similar features and that are prone to variations in illumination. The high similarity of features makes difficult the disambiguation between two different places. The novelty of our method relies on using the Bag of Words (BoW) approach to derive an image descriptor from a set of relevant regions, which are extracted using a visual attention algorithm. We name our approach Bag of Relevant Regions (BoRR). The descriptor of each relevant region is built by using a 2D histogram of the chromatic channels of the CIE-Lab color space. We have compared our results with those using state of the art descriptors that include the BoW and demonstrate that our approach performs better in most of the cases.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_166\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/ICPR.2016.7899826\" title=\"Follow DOI:10.1109\/ICPR.2016.7899826\" target=\"_blank\">doi:10.1109\/ICPR.2016.7899826<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('166','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Delfin, Josafat;  Becerra, H\u00e9ctor M;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('3','tp_links')\" style=\"cursor:pointer;\">Humanoid Localization and Navigation using a Visual Memory<\/a> <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">IEEE-RAS 16th International Conference on Humanoid Robots, <\/span><span class=\"tp_pub_additional_publisher\">IEEE, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 2164-0580<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_3\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('3','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_3\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('3','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_3\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('3','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_3\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{conf:Delfin2016,<br \/>\r\ntitle = {Humanoid Localization and Navigation using a Visual Memory},<br \/>\r\nauthor = {Delfin, Josafat and Becerra, H\\'{e}ctor M and Arechavaleta, Gustavo },<br \/>\r\ndoi = {10.1109\/HUMANOIDS.2016.7803354},<br \/>\r\nissn = {2164-0580},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-11-15},<br \/>\r\nbooktitle = {IEEE-RAS 16th International Conference on Humanoid Robots},<br \/>\r\npages = {725-731},<br \/>\r\npublisher = {IEEE},<br \/>\r\nabstract = {A visual memory (VM) is a topological map in which a set of key images organized in form of a graph represents an environment. In this paper, a navigation strategy for humanoid robots addressing the problems of localization, visual path planning and path following based on a VM is proposed. Assuming that the VM is given, the main contributions of the paper are: 1) A novel pure vision-based localization method. 2) The introduction of the estimated rotation between key images in the path planning stage to benefit paths with enough visual information and with less effort of robot rotation. 3) The integration of the complete navigation strategy and its experimental evaluation with a Nao robot in an unstructured environment. The humanoid robot is modeled as a holonomic system and the strategy might be used in different scenarios like corridors, uncluttered or cluttered environments.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('3','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_3\" style=\"display:none;\"><div class=\"tp_abstract_entry\">A visual memory (VM) is a topological map in which a set of key images organized in form of a graph represents an environment. In this paper, a navigation strategy for humanoid robots addressing the problems of localization, visual path planning and path following based on a VM is proposed. Assuming that the VM is given, the main contributions of the paper are: 1) A novel pure vision-based localization method. 2) The introduction of the estimated rotation between key images in the path planning stage to benefit paths with enough visual information and with less effort of robot rotation. 3) The integration of the complete navigation strategy and its experimental evaluation with a Nao robot in an unstructured environment. The humanoid robot is modeled as a holonomic system and the strategy might be used in different scenarios like corridors, uncluttered or cluttered environments.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('3','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_3\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/HUMANOIDS.2016.7803354\" title=\"Follow DOI:10.1109\/HUMANOIDS.2016.7803354\" target=\"_blank\">doi:10.1109\/HUMANOIDS.2016.7803354<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('3','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">A Bag of Relevant Regions Model for Place Recognition in Coral Reefs <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">OCEANS 2016, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_9\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('9','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_9\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2016bag,<br \/>\r\ntitle = {A Bag of Relevant Regions Model for Place Recognition in Coral Reefs},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {OCEANS 2016},<br \/>\r\npages = {1--5},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('9','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-M\u00e9ndez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('165','tp_links')\" style=\"cursor:pointer;\">A bag of relevant regions model for visual place recognition in coral reefs<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-5, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_165\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('165','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_165\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7761188,<br \/>\r\ntitle = {A bag of relevant regions model for visual place recognition in coral reefs},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-M\\'{e}ndez, Luz Abril},<br \/>\r\ndoi = {10.1109\/OCEANS.2016.7761188},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-09-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1-5},<br \/>\r\nabstract = {Vision-based place recognition in underwater environments is a key component for autonomous robotic exploration. However, this task can be very challenging due to the inherent properties of this kind of places such as: color distortion, poor visibility, perceptual aliasing and dynamic illumination. In this paper, we present a method for vision-based place recognition in coral reefs. Our method relies on using the Bag-of-Words (BoW) approach to derive a descriptor, for the whole image, from a set of relevant regions, which are extracted by utilizing a visual attention algorithm. The descriptor for each relevant region is built by using an histogram of the chromatic channels of the CIE-Lab color space. We present results of our method for a place recognition task in real life videos as well as comparisons of our method against other popular techniques. It can be seen that our approach performs better in most of the cases.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_165\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Vision-based place recognition in underwater environments is a key component for autonomous robotic exploration. However, this task can be very challenging due to the inherent properties of this kind of places such as: color distortion, poor visibility, perceptual aliasing and dynamic illumination. In this paper, we present a method for vision-based place recognition in coral reefs. Our method relies on using the Bag-of-Words (BoW) approach to derive a descriptor, for the whole image, from a set of relevant regions, which are extracted by utilizing a visual attention algorithm. The descriptor for each relevant region is built by using an histogram of the chromatic channels of the CIE-Lab color space. We present results of our method for a place recognition task in real life videos as well as comparisons of our method against other popular techniques. It can be seen that our approach performs better in most of the cases.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_165\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/OCEANS.2016.7761188\" title=\"Follow DOI:10.1109\/OCEANS.2016.7761188\" target=\"_blank\">doi:10.1109\/OCEANS.2016.7761188<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('165','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A. N.;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('167','tp_links')\" style=\"cursor:pointer;\">A statistical learning approach for underwater color restoration with adaptive training based on visual attention<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-6, <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_167\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('167','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_167\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7761187,<br \/>\r\ntitle = {A statistical learning approach for underwater color restoration with adaptive training based on visual attention},<br \/>\r\nauthor = {Ponce-Hinestroza, A. N. and Torres-Mendez, Luz Abril and Drews, Paulo},<br \/>\r\ndoi = {10.1109\/OCEANS.2016.7761187},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-09-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1-6},<br \/>\r\nabstract = {In most artificial vision systems the quality of acquired images is directly related with the amount of information that can be obtained from them, and, particularly in underwater robotics applications involving monitoring and inspection tasks this is crucial. Statistical learning methods like Markov Random Fields with Belief Propagation (MRF-BP) provide a solution by using existing essential correlations in training sets. However, as in any restoration\/correction method for real applications, it is not possible to have color ground truth available on-line. In this paper, we present a MRF-BP model formulated in the chromatic domain of underwater scenes such that we synthesize the ground truth color to train the model and maximize the capabilities of our method. The generated ground truth introduces some improvements to existing color correction methods and visual attention considerations which also helps to choose a small size training set for the MRF-BP model. Feasibility of our approach is shown from the results in which a good color discrimination is observed even in poor visibility conditions.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_167\" style=\"display:none;\"><div class=\"tp_abstract_entry\">In most artificial vision systems the quality of acquired images is directly related with the amount of information that can be obtained from them, and, particularly in underwater robotics applications involving monitoring and inspection tasks this is crucial. Statistical learning methods like Markov Random Fields with Belief Propagation (MRF-BP) provide a solution by using existing essential correlations in training sets. However, as in any restoration\/correction method for real applications, it is not possible to have color ground truth available on-line. In this paper, we present a MRF-BP model formulated in the chromatic domain of underwater scenes such that we synthesize the ground truth color to train the model and maximize the capabilities of our method. The generated ground truth introduces some improvements to existing color correction methods and visual attention considerations which also helps to choose a small size training set for the MRF-BP model. Feasibility of our approach is shown from the results in which a good color discrimination is observed even in poor visibility conditions.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_167\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/OCEANS.2016.7761187\" title=\"Follow DOI:10.1109\/OCEANS.2016.7761187\" target=\"_blank\">doi:10.1109\/OCEANS.2016.7761187<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('167','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A-N;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\">A statistical learning approach for underwater color restoration with adaptive training based on visual attention <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2016 MTS\/IEEE Monterey, <\/span><span class=\"tp_pub_additional_pages\">pp. 1\u20136, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_11\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('11','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_11\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{ponce2016oceansb,<br \/>\r\ntitle = {A statistical learning approach for underwater color restoration with adaptive training based on visual attention},<br \/>\r\nauthor = {Ponce-Hinestroza, A-N and Torres-Mendez, Luz Abril and Drews, Paulo },<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {OCEANS 2016 MTS\/IEEE Monterey},<br \/>\r\npages = {1--6},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('11','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Ponce-Hinestroza, A-N;  Torres-Mendez, Luz Abril;  Drews, Paulo<\/p><p class=\"tp_pub_title\">Using a MRF-BP Model with Color Adaptive Training for Underwater Color Restoration <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">ICPR 2016 IEEE Cancun, <\/span><span class=\"tp_pub_additional_pages\">pp. 1\u20136, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_12\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('12','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_12\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{ponce2016icpr,<br \/>\r\ntitle = {Using a MRF-BP Model with Color Adaptive Training for Underwater Color Restoration},<br \/>\r\nauthor = {Ponce-Hinestroza, A-N and Torres-Mendez, Luz Abril and Drews, Paulo},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-01-01},<br \/>\r\nbooktitle = {ICPR 2016 IEEE Cancun},<br \/>\r\npages = {1--6},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('12','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_proceedings\">Proceedings<\/h3><div class=\"tp_publication tp_publication_proceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Mirelez-Delgado, Flabio;  Morales-Diaz, America B.;  Rios-Cabrera, Reyes;  Gutierrez-Flores, Hugo<\/p><p class=\"tp_pub_title\">Towards intelligent robotic agents for cooperative tasks <span class=\"tp_pub_type tp_  proceedings\">Proceedings<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_139\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('139','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_139\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@proceedings{Mirelez-Delgado2016,<br \/>\r\ntitle = {Towards intelligent robotic agents for cooperative tasks},<br \/>\r\nauthor = {Mirelez-Delgado, Flabio and Morales-Diaz, America B. and Rios-Cabrera, Reyes and Gutierrez-Flores, Hugo},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {proceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('139','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_proceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Mirelez-Delgado, Flabio;  Morales-Diaz, America B.;  Rios-Cabrera, Reyes<\/p><p class=\"tp_pub_title\">Kinematic control for an omnidirectional mobile manipulator <span class=\"tp_pub_type tp_  proceedings\">Proceedings<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_year\">2016<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_157\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('157','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_157\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@proceedings{Mirelez-Delgado2016,<br \/>\r\ntitle = {Kinematic control for an omnidirectional mobile manipulator},<br \/>\r\nauthor = {Mirelez-Delgado, Flabio and Morales-Diaz, America B. and Rios-Cabrera, Reyes},<br \/>\r\nyear  = {2016},<br \/>\r\ndate = {2016-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {proceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('157','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2015\">2015<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Luna-Aguilar, C. A.;  Castelan, Mario;  Morales-Diaz, America B.;  Nadeu, C.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('136','tp_links')\" style=\"cursor:pointer;\">Incorporaci\u00f3n de sensores ac\u00fasticos en el control de regulaci\u00f3n a un punto de un robot m\u00f3vil<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_pages\">pp. 582-587, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_136\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('136','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_136\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('136','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_136\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Luna-Aguilar2015,<br \/>\r\ntitle = {Incorporaci\\'{o}n de sensores ac\\'{u}sticos en el control de regulaci\\'{o}n a un punto de un robot m\\'{o}vil},<br \/>\r\nauthor = {Luna-Aguilar, C. A. and Castelan, Mario and Morales-Diaz, America B. and Nadeu, C.},<br \/>\r\nurl = {https:\/\/upcommons.upc.edu\/handle\/2117\/102668},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-06-06},<br \/>\r\npages = {582-587},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('136','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_136\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/upcommons.upc.edu\/handle\/2117\/102668\" title=\"https:\/\/upcommons.upc.edu\/handle\/2117\/102668\" target=\"_blank\">https:\/\/upcommons.upc.edu\/handle\/2117\/102668<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('136','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Aviles-Vi\u00f1as, Jaime F;  Lopez-Juarez, Ismael;  Rios-Cabrera, Reyes<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('13','tp_links')\" style=\"cursor:pointer;\">Acquisition of welding skills in industrial robots<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Industrial Robot: An International Journal, <\/span><span class=\"tp_pub_additional_volume\">vol. 42, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 156-166, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_13\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('13','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_13\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('13','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_13\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('13','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_13\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{doi:10.1108\/IR-09-2014-0395,<br \/>\r\ntitle = {Acquisition of welding skills in industrial robots},<br \/>\r\nauthor = {Aviles-Vi\\~{n}as, Jaime F and Lopez-Juarez, Ismael and Rios-Cabrera, Reyes },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395},<br \/>\r\ndoi = {10.1108\/IR-09-2014-0395},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Industrial Robot: An International Journal},<br \/>\r\nvolume = {42},<br \/>\r\nnumber = {2},<br \/>\r\npages = {156-166},<br \/>\r\nabstract = {Purpose \\textendash The purpose of this paper was to propose a method based on an Artificial Neural Network and a real-time vision algorithm, to learn welding skills in industrial robotics. Design\/methodology\/approach \\textendash By using an optic camera to measure the bead geometry (width and height), the authors propose a real-time computer vision algorithm to extract training patterns and to enable an industrial robot to acquire and learn autonomously the welding skill. To test the approach, an industrial KUKA robot and a welding gas metal arc welding machine were used in a manufacturing cell. Findings \\textendash Several data analyses are described, showing empirically that industrial robots can acquire the skill even if the specific welding parameters are unknown. Research limitations\/implications \\textendash The approach considers only stringer beads. Weave bead and bead penetration are not considered. Practical implications \\textendash With the proposed approach, it is possible to learn specific welding parameters despite of the material, type of robot or welding machine. This is due to the fact that the feedback system produces automatic measurements that are labelled prior to the learning process. Originality\/value \\textendash The main contribution is that the complex learning process is reduced into an input-process-output system, where the process part is learnt automatically without human supervision, by registering the patterns with an automatically calibrated vision system.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('13','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_13\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Purpose \u2013 The purpose of this paper was to propose a method based on an Artificial Neural Network and a real-time vision algorithm, to learn welding skills in industrial robotics. Design\/methodology\/approach \u2013 By using an optic camera to measure the bead geometry (width and height), the authors propose a real-time computer vision algorithm to extract training patterns and to enable an industrial robot to acquire and learn autonomously the welding skill. To test the approach, an industrial KUKA robot and a welding gas metal arc welding machine were used in a manufacturing cell. Findings \u2013 Several data analyses are described, showing empirically that industrial robots can acquire the skill even if the specific welding parameters are unknown. Research limitations\/implications \u2013 The approach considers only stringer beads. Weave bead and bead penetration are not considered. Practical implications \u2013 With the proposed approach, it is possible to learn specific welding parameters despite of the material, type of robot or welding machine. This is due to the fact that the feedback system produces automatic measurements that are labelled prior to the learning process. Originality\/value \u2013 The main contribution is that the complex learning process is reduced into an input-process-output system, where the process part is learnt automatically without human supervision, by registering the patterns with an automatically calibrated vision system.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('13','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_13\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395\" title=\"http:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395\" target=\"_blank\">http:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1108\/IR-09-2014-0395\" title=\"Follow DOI:10.1108\/IR-09-2014-0395\" target=\"_blank\">doi:10.1108\/IR-09-2014-0395<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('13','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Navarro-Gonzalez, Jose Luis;  Lopez-Juarez, Ismael;  Ordaz-Hernandez, Keny;  Rios-Cabrera, Reyes<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('14','tp_links')\" style=\"cursor:pointer;\">On-line incremental learning for unknown conditions during assembly operations with industrial robots<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Evolving Systems, <\/span><span class=\"tp_pub_additional_volume\">vol. 6, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 101\u2013114, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1868-6486<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_14\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('14','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_14\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('14','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_14\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('14','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_14\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Navarro-Gonzalez2015,<br \/>\r\ntitle = {On-line incremental learning for unknown conditions during assembly operations with industrial robots},<br \/>\r\nauthor = {Navarro-Gonzalez, Jose Luis and Lopez-Juarez, Ismael and Ordaz-Hernandez, Keny and Rios-Cabrera, Reyes },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x},<br \/>\r\ndoi = {10.1007\/s12530-014-9125-x},<br \/>\r\nissn = {1868-6486},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Evolving Systems},<br \/>\r\nvolume = {6},<br \/>\r\nnumber = {2},<br \/>\r\npages = {101--114},<br \/>\r\nabstract = {The assembly operation using industrial robots can be accomplished successfully in well-structured environments where the mating pair location is known in advance. However, in real-world scenarios there are uncertainties associated to sensing, control and modelling errors that make the assembly task very complex. In addition, there are also unmodeled uncertainties that have to be taken into account for an effective control algorithm to succeed. Among these uncertainties, it can be mentioned disturbances, backlash and aging of mechanisms. In this paper, a method to overcome the effect of those uncertainties based on the Fuzzy ARTMAP artificial neural network (ANN) to successfully accomplish the assembly task is proposed. Experimental work is reported using an industrial 6 DOF robot arm in conjunction with a vision system for part location and wrist force\/torque sensing data for assembly. Force data is fed into an ANN evolving controller during a typical peg in hole (PIH) assembly operation. The controller uses an incremental learning mechanism that is solely guided by the sensed forces. In this article, two approaches are presented in order to compare the incremental learning capability of the manipulator. The first approach uses a primitive knowledge base (PKB) containing 16 primitive movements to learn online the first insertion. During assembly, the manipulator learns new patterns according to the learning criteria which turn the PKB into an enhanced knowledge base (EKB). During a second insertion the controller uses effectively the EKB and operation improves. The second approach employs minimum information (it contains only the assembly direction) and the process starts from scratch. After several operations, that knowledge base increases by including only the needed patterns to perform the insertion. Experimental results showed that the evolving controller is able to assemble the matting pairs enhancing its knowledge whenever it is needed depending on the part geometry and level of expertise. Our approach is demonstrated through several PIH operations with different tolerances and part geometry. As the robot's expertise evolves, the PIH operation is carried out faster with shorter assembly trajectories.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('14','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_14\" style=\"display:none;\"><div class=\"tp_abstract_entry\">The assembly operation using industrial robots can be accomplished successfully in well-structured environments where the mating pair location is known in advance. However, in real-world scenarios there are uncertainties associated to sensing, control and modelling errors that make the assembly task very complex. In addition, there are also unmodeled uncertainties that have to be taken into account for an effective control algorithm to succeed. Among these uncertainties, it can be mentioned disturbances, backlash and aging of mechanisms. In this paper, a method to overcome the effect of those uncertainties based on the Fuzzy ARTMAP artificial neural network (ANN) to successfully accomplish the assembly task is proposed. Experimental work is reported using an industrial 6 DOF robot arm in conjunction with a vision system for part location and wrist force\/torque sensing data for assembly. Force data is fed into an ANN evolving controller during a typical peg in hole (PIH) assembly operation. The controller uses an incremental learning mechanism that is solely guided by the sensed forces. In this article, two approaches are presented in order to compare the incremental learning capability of the manipulator. The first approach uses a primitive knowledge base (PKB) containing 16 primitive movements to learn online the first insertion. During assembly, the manipulator learns new patterns according to the learning criteria which turn the PKB into an enhanced knowledge base (EKB). During a second insertion the controller uses effectively the EKB and operation improves. The second approach employs minimum information (it contains only the assembly direction) and the process starts from scratch. After several operations, that knowledge base increases by including only the needed patterns to perform the insertion. Experimental results showed that the evolving controller is able to assemble the matting pairs enhancing its knowledge whenever it is needed depending on the part geometry and level of expertise. Our approach is demonstrated through several PIH operations with different tolerances and part geometry. As the robot's expertise evolves, the PIH operation is carried out faster with shorter assembly trajectories.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('14','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_14\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x\" title=\"http:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x\" target=\"_blank\">http:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1007\/s12530-014-9125-x\" title=\"Follow DOI:10.1007\/s12530-014-9125-x\" target=\"_blank\">doi:10.1007\/s12530-014-9125-x<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('14','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Navarro-Gonzalez, Jose Luis;  Lopez-Juarez, Ismael;  Rios-Cabrera, Reyes;  Ordaz-Hernandez, Keny<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('15','tp_links')\" style=\"cursor:pointer;\">On-line knowledge acquisition and enhancement in robotic assembly tasks<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Robotics and Computer-Integrated Manufacturing, <\/span><span class=\"tp_pub_additional_volume\">vol. 33, <\/span><span class=\"tp_pub_additional_pages\">pp. 78 - 89, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 0736-5845<\/span><span class=\"tp_pub_additional_note\">, (Special Issue on Knowledge Driven Robotics and Manufacturing)<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_15\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('15','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_15\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('15','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_15\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('15','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_15\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{NavarroGonzalez201578b,<br \/>\r\ntitle = {On-line knowledge acquisition and enhancement in robotic assembly tasks},<br \/>\r\nauthor = {Navarro-Gonzalez, Jose Luis and Lopez-Juarez, Ismael and Rios-Cabrera, Reyes and Ordaz-Hernandez, Keny},<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S073658451400074X},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.rcim.2014.08.013},<br \/>\r\nissn = {0736-5845},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Robotics and Computer-Integrated Manufacturing},<br \/>\r\nvolume = {33},<br \/>\r\npages = {78 - 89},<br \/>\r\nabstract = {Abstract Industrial robots are reliable machines for manufacturing tasks such as welding, panting, assembly, palletizing or kitting operations. They are traditionally programmed by an operator using a teach pendant in a point-to-point scheme with limited sensing capabilities such as industrial vision systems and force\/torque sensing. The use of these sensing capabilities is associated to the particular robot controller, operative systems and programming language. Today, robots can react to environment changes specific to their task domain but are still unable to learn skills to effectively use their current knowledge. The need for such a skill in unstructured environments where knowledge can be acquired and enhanced is desirable so that robots can effectively interact in multimodal real-world scenarios. In this article we present a multimodal assembly controller (MAC) approach to embed and effectively enhance knowledge into industrial robots working in multimodal manufacturing scenarios such as assembly during kitting operations with varying shapes and tolerances. During learning, the robot uses its vision and force capabilities resembling a human operator carrying out the same operation. The approach consists of using a MAC based on the Fuzzy ARTMAP artificial neural network in conjunction with a knowledge base. The robot starts the operation having limited initial knowledge about what task it has to accomplish. During the operation, the robot learns the skill for recognising assembly parts and how to assemble them. The skill acquisition is evaluated by counting the steps to complete the assembly, length of the followed assembly path and compliant behaviour. The performance improves with time so that the robot becomes an expert demonstrated by the assembly of a kit with different part geometries. The kit is unknown by the robot at the beginning of the operation; therefore, the kit type, location and orientation are unknown as well as the parts to be assembled since they are randomly fed by a conveyor belt.},<br \/>\r\nnote = {Special Issue on Knowledge Driven Robotics and Manufacturing},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('15','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_15\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Abstract Industrial robots are reliable machines for manufacturing tasks such as welding, panting, assembly, palletizing or kitting operations. They are traditionally programmed by an operator using a teach pendant in a point-to-point scheme with limited sensing capabilities such as industrial vision systems and force\/torque sensing. The use of these sensing capabilities is associated to the particular robot controller, operative systems and programming language. Today, robots can react to environment changes specific to their task domain but are still unable to learn skills to effectively use their current knowledge. The need for such a skill in unstructured environments where knowledge can be acquired and enhanced is desirable so that robots can effectively interact in multimodal real-world scenarios. In this article we present a multimodal assembly controller (MAC) approach to embed and effectively enhance knowledge into industrial robots working in multimodal manufacturing scenarios such as assembly during kitting operations with varying shapes and tolerances. During learning, the robot uses its vision and force capabilities resembling a human operator carrying out the same operation. The approach consists of using a MAC based on the Fuzzy ARTMAP artificial neural network in conjunction with a knowledge base. The robot starts the operation having limited initial knowledge about what task it has to accomplish. During the operation, the robot learns the skill for recognising assembly parts and how to assemble them. The skill acquisition is evaluated by counting the steps to complete the assembly, length of the followed assembly path and compliant behaviour. The performance improves with time so that the robot becomes an expert demonstrated by the assembly of a kit with different part geometries. The kit is unknown by the robot at the beginning of the operation; therefore, the kit type, location and orientation are unknown as well as the parts to be assembled since they are randomly fed by a conveyor belt.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('15','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_15\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S073658451400074X\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S073658451400074X\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S073658451400074X<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.rcim.2014.08.013\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/j.rcim.2014.08.013\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.rcim.2014.08.013<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('15','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Castelan, Mario;  Cruz-Perez, Elier;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('16','tp_links')\" style=\"cursor:pointer;\">A Photometric Sampling Strategy for Reflectance Characterization and Transference<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Computaci\u00f3n y Sistemas, <\/span><span class=\"tp_pub_additional_volume\">vol. 19, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 255-272, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_16\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('16','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_16\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('16','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_16\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Castelan2015,<br \/>\r\ntitle = {A Photometric Sampling Strategy for Reflectance Characterization and Transference},<br \/>\r\nauthor = {Castelan, Mario and Cruz-Perez, Elier and Torres-Mendez, Luz Abril},<br \/>\r\nurl = {http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\njournal = {Computaci\\'{o}n y Sistemas},<br \/>\r\nvolume = {19},<br \/>\r\nnumber = {2},<br \/>\r\npages = {255-272},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('16','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_16\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944\" title=\"http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944\" target=\"_blank\">http:\/\/www.cys.cic.ipn.mx\/ojs\/index.php\/CyS\/article\/view\/1944<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('16','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferences<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Luna-Aguilar, C. A.;  Castelan, Mario;  Morales-Diaz, America B.;  Nadeu, C.<\/p><p class=\"tp_pub_title\">Incorporaci\u00f3n de sensores ac\u00fasticos en el control de regulaci\u00f3n a un punto de un robot m\u00f3vil <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_137\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('137','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_137\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{Luna-Aguilar2015b,<br \/>\r\ntitle = {Incorporaci\\'{o}n de sensores ac\\'{u}sticos en el control de regulaci\\'{o}n a un punto de un robot m\\'{o}vil},<br \/>\r\nauthor = {Luna-Aguilar, C. A. and Castelan, Mario and Morales-Diaz, America B. and Nadeu, C.},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-06-06},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('137','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Rodriguez-Telles, Francisco G<\/p><p class=\"tp_pub_title\">Ethologically inspired reactive exploration of coral reefs with collision avoidance: Bridging the gap between human and robot spatial understanding of unstructured environments <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">Intelligent Robots and Systems (IROS), 2015 IEEE\/RSJ International Conference on, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_17\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('17','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_17\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2015ethologically,<br \/>\r\ntitle = {Ethologically inspired reactive exploration of coral reefs with collision avoidance: Bridging the gap between human and robot spatial understanding of unstructured environments},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Rodriguez-Telles, Francisco G},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nbooktitle = {Intelligent Robots and Systems (IROS), 2015 IEEE\/RSJ International Conference on},<br \/>\r\npages = {4872--4879},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('17','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">Autonomous robotic exploration of coral reefs using a visual attention-driven strategy for detecting and tracking regions of interest <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">OCEANS 2015-Genova, <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_18\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('18','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_18\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2015autonomous,<br \/>\r\ntitle = {Autonomous robotic exploration of coral reefs using a visual attention-driven strategy for detecting and tracking regions of interest},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril <br \/>\r\n},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nbooktitle = {OCEANS 2015-Genova},<br \/>\r\npages = {1--5},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('18','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Labastida-Vald\u00e9s, L.;  Torres-Mendez, Luz Abril;  Hutchinson, S. A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('106','tp_links')\" style=\"cursor:pointer;\">Using the motion perceptibility measure to classify points of interest for visual-based AUV guidance in a reef ecosystem<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2015 - MTS\/IEEE Washington, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-6, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_106\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('106','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_106\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('106','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_106\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7404605,<br \/>\r\ntitle = {Using the motion perceptibility measure to classify points of interest for visual-based AUV guidance in a reef ecosystem},<br \/>\r\nauthor = {Labastida-Vald\\'{e}s, L. and Torres-Mendez, Luz Abril and Hutchinson, S. A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7404605\/},<br \/>\r\ndoi = {10.23919\/OCEANS.2015.7404605},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-10-01},<br \/>\r\nbooktitle = {OCEANS 2015 - MTS\/IEEE Washington},<br \/>\r\npages = {1-6},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('106','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_106\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7404605\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7404605\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7404605\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.23919\/OCEANS.2015.7404605\" title=\"Follow DOI:10.23919\/OCEANS.2015.7404605\" target=\"_blank\">doi:10.23919\/OCEANS.2015.7404605<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('106','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Romero-Mart\u00ednez, C. E.;  Torres-Mendez, Luz Abril;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('108','tp_links')\" style=\"cursor:pointer;\">Modeling motor-perceptual behaviors to enable intuitive paths in an aquatic robot<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">OCEANS 2015 - MTS\/IEEE Washington, <\/span><span class=\"tp_pub_additional_pages\">pp. 1-5, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_108\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('108','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_108\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('108','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_108\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{7404424,<br \/>\r\ntitle = {Modeling motor-perceptual behaviors to enable intuitive paths in an aquatic robot},<br \/>\r\nauthor = {Romero-Mart\\'{i}nez, C. E. and Torres-Mendez, Luz Abril and Martinez-Garcia, Edgar A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7404424\/},<br \/>\r\ndoi = {10.23919\/OCEANS.2015.7404424},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-10-01},<br \/>\r\nbooktitle = {OCEANS 2015 - MTS\/IEEE Washington},<br \/>\r\npages = {1-5},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('108','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_108\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7404424\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7404424\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7404424\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.23919\/OCEANS.2015.7404424\" title=\"Follow DOI:10.23919\/OCEANS.2015.7404424\" target=\"_blank\">doi:10.23919\/OCEANS.2015.7404424<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('108','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Mirelez-Delgado, Flabio;  Morales-Diaz, America B.;  Rios-Cabrera, Reyes;  Perez-Villeda, Hector Manuel<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('105','tp_links')\" style=\"cursor:pointer;\">Control Servovisual de un Kuka youBot para la manipulacion y traslado de objetos<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_105\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('105','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_105\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('105','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_105\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('105','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_105\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Mireles-Delgado2015,<br \/>\r\ntitle = {Control Servovisual de un Kuka youBot para la manipulacion y traslado de objetos},<br \/>\r\nauthor = {Mirelez-Delgado, Flabio and Morales-Diaz, America B. and Rios-Cabrera, Reyes and Perez-Villeda, Hector Manuel},<br \/>\r\nurl = {http:\/\/amca.mx\/memorias\/amca2015\/articulos\/0044_MiCT3-04.pdf},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-01-01},<br \/>\r\nabstract = {Este trabajo presenta la implementaci\u00b4on de un Control Servovisual Basado en<br \/>\r\nImagen en un robot manipulador m\u00b4ovil omnidireccional Kuka youBot. El sistema de visi\u00b4on<br \/>\r\nest\u00b4a compuesto por un sensor RGB-D Asus Xtion Pror. La ley de control implementada tiene<br \/>\r\nla estructura de un PD cl\u00b4asico para la plataforma m\u00b4ovil. El manipulador m\u00b4ovil se desplaza a<br \/>\r\npuntos 3D conocidos mediante el c\u00b4alculo de cinem\u00b4atica inversa. En este art\u00b4\u0131culo se demuestra<br \/>\r\nla efectividad del algoritmo en la localizaci\u00b4on del objeto de inter\u00b4es as\u00b4\u0131 como en la manipulaci\u00b4on<br \/>\r\ndel mismo para llevarlo de su lugar original a otro espacio deseado.<br \/>\r\n},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('105','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_105\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Este trabajo presenta la implementaci\u00b4on de un Control Servovisual Basado en<br \/>\r\nImagen en un robot manipulador m\u00b4ovil omnidireccional Kuka youBot. El sistema de visi\u00b4on<br \/>\r\nest\u00b4a compuesto por un sensor RGB-D Asus Xtion Pror. La ley de control implementada tiene<br \/>\r\nla estructura de un PD cl\u00b4asico para la plataforma m\u00b4ovil. El manipulador m\u00b4ovil se desplaza a<br \/>\r\npuntos 3D conocidos mediante el c\u00b4alculo de cinem\u00b4atica inversa. En este art\u00b4\u0131culo se demuestra<br \/>\r\nla efectividad del algoritmo en la localizaci\u00b4on del objeto de inter\u00b4es as\u00b4\u0131 como en la manipulaci\u00b4on<br \/>\r\ndel mismo para llevarlo de su lugar original a otro espacio deseado.<br \/>\r\n<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('105','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_105\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-file-pdf\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/amca.mx\/memorias\/amca2015\/articulos\/0044_MiCT3-04.pdf\" title=\"http:\/\/amca.mx\/memorias\/amca2015\/articulos\/0044_MiCT3-04.pdf\" target=\"_blank\">http:\/\/amca.mx\/memorias\/amca2015\/articulos\/0044_MiCT3-04.pdf<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('105','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril<\/p><p class=\"tp_pub_title\">using supercolor-pixels descriptors for tracking relevant cues in underwater environments with poor visibility conditions <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_publisher\">ICRA 2015 Workshop on Visual Place Recognition in Changing Environmen ts, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_107\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('107','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_107\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Maldonao-Ramirez2015,<br \/>\r\ntitle = {using supercolor-pixels descriptors for tracking relevant cues in underwater environments with poor visibility conditions},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-00-00},<br \/>\r\npublisher = {ICRA 2015 Workshop on Visual Place Recognition in Changing Environmen ts},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('107','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Gonz\u00e1lez-Garc\u00eda, Luis C.;  Torres-Mendez, Luz Abril;  Mart\u00ednez, Julieta;  Sattar, Junaed;  Little, James<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('109','tp_links')\" style=\"cursor:pointer;\">Are You Talking to Me? Detecting Attention in First-Person Interactions<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_pages\">pp.  137-142, <\/span><span class=\"tp_pub_additional_year\">2015<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 2308-4197<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_109\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('109','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_109\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Gonz\\'{a}lez-Garc\\'{i}a2015,<br \/>\r\ntitle = {Are You Talking to Me? Detecting Attention in First-Person Interactions},<br \/>\r\nauthor = {Gonz\\'{a}lez-Garc\\'{i}a, Luis C. and Torres-Mendez, Luz Abril and Mart\\'{i}nez, Julieta and Sattar, Junaed and Little, James},<br \/>\r\nurl = {https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions},<br \/>\r\nissn = {2308-4197},<br \/>\r\nyear  = {2015},<br \/>\r\ndate = {2015-00-00},<br \/>\r\npages = { 137-142},<br \/>\r\nabstract = {This paper presents an approach for a mobile robot to detect the level of attention of a human in first-person interactions. Determining the degree of attention is an essential task in day-today interactions. In particular, we are interested in natural Human-Robot Interactions (HRI's) during which a robot needs to estimate the focus and the degree of the user's attention to determine the most appropriate moment to initiate, continue and terminate an interaction. Our approach is novel in that it uses a linear regression technique to classify raw depth-image data according to three levels of user attention on the robot (null, partial and total). This is achieved by measuring the linear independence of the input range data with respect to a dataset of user poses. We overcome the problem of time overhead that a large database can add to real-time Linear Regression Classification (LRC) methods by including only the feature vectors with the most relevant information. We demonstrate the approach by presenting experimental data from human-interaction studies with a PR2 robot. Results demonstrate our attention classifier to be accurate and robust in detecting the attention levels of human participants. I. INTRODUCTION Determining the attention of people is an essential component of day-today interactions. We are constantly monitoring other people's gaze, head and body poses while engaged in a conversation [1][2][3]. We also perform attention estimation in order to perform natural interactions [4][5]. In short, attention estimation is a fundamental component of effective social interaction; therefore, for robots to be efficient social agents it is necessary to provide them with reliable mechanisms to estimate human attention. We believe that human attention estimation, particularly in the context of interactions, is highly subjective. However, attempts to model it have been relatively successful, e.g., allowing a robot to ask for directions when it finds a human, as in the work of Weiss et al. [6]. Nonetheless, the state-of-the-art is still far from reaching a point where a robot can successfully interact with humans without relying on mechanisms not common to natural language. Recently, the use of range images to make more natural human-machine interfaces has been in the agenda of researchers, like in the case of the Microsoft Kinect TM , which delivers a skeleton of <br \/>\r\n<br \/>\r\nAre You Talking to Me? Detecting Attention in First-Person Interactions (PDF Download Available). Available from: https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions [accessed Jun 17, 2017].},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_109\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper presents an approach for a mobile robot to detect the level of attention of a human in first-person interactions. Determining the degree of attention is an essential task in day-today interactions. In particular, we are interested in natural Human-Robot Interactions (HRI's) during which a robot needs to estimate the focus and the degree of the user's attention to determine the most appropriate moment to initiate, continue and terminate an interaction. Our approach is novel in that it uses a linear regression technique to classify raw depth-image data according to three levels of user attention on the robot (null, partial and total). This is achieved by measuring the linear independence of the input range data with respect to a dataset of user poses. We overcome the problem of time overhead that a large database can add to real-time Linear Regression Classification (LRC) methods by including only the feature vectors with the most relevant information. We demonstrate the approach by presenting experimental data from human-interaction studies with a PR2 robot. Results demonstrate our attention classifier to be accurate and robust in detecting the attention levels of human participants. I. INTRODUCTION Determining the attention of people is an essential component of day-today interactions. We are constantly monitoring other people's gaze, head and body poses while engaged in a conversation [1][2][3]. We also perform attention estimation in order to perform natural interactions [4][5]. In short, attention estimation is a fundamental component of effective social interaction; therefore, for robots to be efficient social agents it is necessary to provide them with reliable mechanisms to estimate human attention. We believe that human attention estimation, particularly in the context of interactions, is highly subjective. However, attempts to model it have been relatively successful, e.g., allowing a robot to ask for directions when it finds a human, as in the work of Weiss et al. [6]. Nonetheless, the state-of-the-art is still far from reaching a point where a robot can successfully interact with humans without relying on mechanisms not common to natural language. Recently, the use of range images to make more natural human-machine interfaces has been in the agenda of researchers, like in the case of the Microsoft Kinect TM , which delivers a skeleton of <br \/>\r\n<br \/>\r\nAre You Talking to Me? Detecting Attention in First-Person Interactions (PDF Download Available). Available from: https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions [accessed Jun 17, 2017].<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_109\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecting_Attention_in_First-Person_Interactions\" title=\"https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecti[...]\" target=\"_blank\">https:\/\/www.researchgate.net\/publication\/274065286_Are_You_Talking_to_Me_Detecti[...]<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('109','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2014\">2014<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Martinez-Garcia, Edgar A.;  Torres-Mendez, Luz Abril;  Elara Mohan, Rajesh<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('20','tp_links')\" style=\"cursor:pointer;\">Multi-legged robot dynamics navigation model with optical flow<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">International Journal of Intelligent Unmanned Systems, <\/span><span class=\"tp_pub_additional_volume\">vol. 2, <\/span><span class=\"tp_pub_additional_number\">no. 2, <\/span><span class=\"tp_pub_additional_pages\">pp. 121-139, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_20\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('20','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_20\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{doi:10.1108\/IJIUS-04-2014-0003,<br \/>\r\ntitle = {Multi-legged robot dynamics navigation model with optical flow},<br \/>\r\nauthor = {Martinez-Garcia, Edgar A. and Torres-Mendez, Luz Abril and Elara Mohan, Rajesh },<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003},<br \/>\r\ndoi = {10.1108\/IJIUS-04-2014-0003},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\njournal = {International Journal of Intelligent Unmanned Systems},<br \/>\r\nvolume = {2},<br \/>\r\nnumber = {2},<br \/>\r\npages = {121-139},<br \/>\r\nabstract = {Purpose \\textendash The purpose of this paper is to establish analytical and numerical solutions of a navigational law to estimate displacements of hyper-static multi-legged mobile robots, which combines: monocular vision (optical flow of regional invariants) and legs dynamics. Design\/methodology\/approach \\textendash In this study the authors propose a Euler-Lagrange equation that control legs\u2019 joints to control robot's displacements. Robot's rotation and translational velocities are feedback by motion features of visual invariant descriptors. A general analytical solution of a derivative navigation law is proposed for hyper-static robots. The feedback is formulated with the local speed rate obtained from optical flow of visual regional invariants. The proposed formulation includes a data association algorithm aimed to correlate visual invariant descriptors detected in sequential images through monocular vision. The navigation law is constrained by a set of three kinematic equilibrium conditions for navigational scenarios: constant acceleration, constant velocity, and instantaneous acceleration. Findings \\textendash The proposed data association method concerns local motions of multiple invariants (enhanced MSER) by minimizing the norm of multidimensional optical flow feature vectors. Kinematic measurements are used as observable arguments in the general dynamic control equation; while the legs joints dynamics model is used to formulate the controllable arguments. Originality\/value \\textendash The given analysis does not combine sensor data of any kind, but only monocular passive vision. The approach automatically detects environmental invariant descriptors with an enhanced version of the MSER method. Only optical flow vectors and robot's multi-leg dynamics are used to formulate descriptive rotational and translational motions for self-positioning.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_20\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Purpose \u2013 The purpose of this paper is to establish analytical and numerical solutions of a navigational law to estimate displacements of hyper-static multi-legged mobile robots, which combines: monocular vision (optical flow of regional invariants) and legs dynamics. Design\/methodology\/approach \u2013 In this study the authors propose a Euler-Lagrange equation that control legs\u2019 joints to control robot's displacements. Robot's rotation and translational velocities are feedback by motion features of visual invariant descriptors. A general analytical solution of a derivative navigation law is proposed for hyper-static robots. The feedback is formulated with the local speed rate obtained from optical flow of visual regional invariants. The proposed formulation includes a data association algorithm aimed to correlate visual invariant descriptors detected in sequential images through monocular vision. The navigation law is constrained by a set of three kinematic equilibrium conditions for navigational scenarios: constant acceleration, constant velocity, and instantaneous acceleration. Findings \u2013 The proposed data association method concerns local motions of multiple invariants (enhanced MSER) by minimizing the norm of multidimensional optical flow feature vectors. Kinematic measurements are used as observable arguments in the general dynamic control equation; while the legs joints dynamics model is used to formulate the controllable arguments. Originality\/value \u2013 The given analysis does not combine sensor data of any kind, but only monocular passive vision. The approach automatically detects environmental invariant descriptors with an enhanced version of the MSER method. Only optical flow vectors and robot's multi-leg dynamics are used to formulate descriptive rotational and translational motions for self-positioning.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_20\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" title=\"http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" target=\"_blank\">http:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1108\/IJIUS-04-2014-0003\" title=\"Follow DOI:10.1108\/IJIUS-04-2014-0003\" target=\"_blank\">doi:10.1108\/IJIUS-04-2014-0003<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('20','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rios-Cabrera, Reyes;  Tuytelaars, Tinne<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('64','tp_links')\" style=\"cursor:pointer;\">Boosting Masked Dominant Orientation Templates for Efficient Object Detection<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Comput. Vis. Image Underst., <\/span><span class=\"tp_pub_additional_volume\">vol. 120, <\/span><span class=\"tp_pub_additional_pages\">pp. 103\u2013116, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1077-3142<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_64\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('64','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_64\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('64','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_64\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{Rios-Cabrera:2014:BMD:2583127.2583285,<br \/>\r\ntitle = {Boosting Masked Dominant Orientation Templates for Efficient Object Detection},<br \/>\r\nauthor = {Rios-Cabrera, Reyes and Tuytelaars, Tinne},<br \/>\r\nurl = {http:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008},<br \/>\r\ndoi = {10.1016\/j.cviu.2013.12.008},<br \/>\r\nissn = {1077-3142},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\njournal = {Comput. Vis. Image Underst.},<br \/>\r\nvolume = {120},<br \/>\r\npages = {103--116},<br \/>\r\npublisher = {Elsevier Science Inc.},<br \/>\r\naddress = {New York, NY, USA},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('64','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_64\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008\" title=\"http:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008\" target=\"_blank\">http:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1016\/j.cviu.2013.12.008\" title=\"Follow DOI:10.1016\/j.cviu.2013.12.008\" target=\"_blank\">doi:10.1016\/j.cviu.2013.12.008<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('64','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_conference\">Conferences<\/h3><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Martinez-Gonzalez, Pablo;  Varas, David;  Castelan, Mario;  Camacho, Margarita;  Marques, Ferran;  Arechavaleta, Gustavo<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('19','tp_links')\" style=\"cursor:pointer;\">3D shape reconstruction from a humanoid generated video sequence<\/a> <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2014 IEEE-RAS International Conference on Humanoid Robots, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 2164-0572<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_19\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('19','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_19\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('19','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_19\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('19','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_19\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{7041439,<br \/>\r\ntitle = {3D shape reconstruction from a humanoid generated video sequence},<br \/>\r\nauthor = {Martinez-Gonzalez, Pablo and Varas, David and Castelan, Mario and Camacho, Margarita and Marques, Ferran and Arechavaleta, Gustavo },<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/7041439\/?arnumber=7041439\\&amp;tag=1},<br \/>\r\ndoi = {10.1109\/HUMANOIDS.2014.7041439},<br \/>\r\nissn = {2164-0572},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-11-01},<br \/>\r\nbooktitle = {2014 IEEE-RAS International Conference on Humanoid Robots},<br \/>\r\npages = {699-706},<br \/>\r\nabstract = {This paper presents a strategy for estimating the geometry of an interest object from a monocular video sequence acquired by a walking humanoid robot. The problem is solved using a space carving algorithm, which relies on both the accurate extraction of the occluding boundaries of the object as well as the precise estimation of the camera pose for each video frame. For data acquisition, a monocular visual-based control has been developed that drives the trajectory of the robot around an object placed on a small table. Due to the stepping of the humanoid, the recorded sequence is contaminated with artefacts that affect the correct extraction of contours along the video frames. To overcome this issue, a method that assigns a fitness score for each frame is proposed, delivering a subset of camera poses and video frames that produce consistent 3D shape estimations of the objects used for experimental evaluation.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('19','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_19\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper presents a strategy for estimating the geometry of an interest object from a monocular video sequence acquired by a walking humanoid robot. The problem is solved using a space carving algorithm, which relies on both the accurate extraction of the occluding boundaries of the object as well as the precise estimation of the camera pose for each video frame. For data acquisition, a monocular visual-based control has been developed that drives the trajectory of the robot around an object placed on a small table. Due to the stepping of the humanoid, the recorded sequence is contaminated with artefacts that affect the correct extraction of contours along the video frames. To overcome this issue, a method that assigns a fitness score for each frame is proposed, delivering a subset of camera poses and video frames that produce consistent 3D shape estimations of the objects used for experimental evaluation.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('19','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_19\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/7041439\/?arnumber=7041439&amp;amp;tag=1\" title=\"http:\/\/ieeexplore.ieee.org\/document\/7041439\/?arnumber=7041439&amp;amp;tag=1\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/7041439\/?arnumber=7041439&amp;amp;tag=1<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/HUMANOIDS.2014.7041439\" title=\"Follow DOI:10.1109\/HUMANOIDS.2014.7041439\" target=\"_blank\">doi:10.1109\/HUMANOIDS.2014.7041439<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('19','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rodriguez-Telles, Francisco G;  Perez-Alcocer, Ricardo;  Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Bikram Dey, Bir;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\">Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">2014 IEEE International Conference on Robotics and Automation (ICRA), <\/span><span class=\"tp_pub_additional_organization\">IEEE <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_21\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('21','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_21\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{rodriguez2014vision,<br \/>\r\ntitle = {Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat},<br \/>\r\nauthor = {Rodriguez-Telles, Francisco G and Perez-Alcocer, Ricardo and Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Bikram Dey, Bir and Martinez-Garcia, Edgar A.},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\nbooktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA)},<br \/>\r\npages = {3813--3818},<br \/>\r\norganization = {IEEE},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('21','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_conference\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\">Robust detection and tracking of regions of interest for autonomous underwater robotic exploration <span class=\"tp_pub_type tp_  conference\">Conference<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_booktitle\">Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_22\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('22','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_22\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@conference{maldonado2014robust,<br \/>\r\ntitle = {Robust detection and tracking of regions of interest for autonomous underwater robotic exploration},<br \/>\r\nauthor = {Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Martinez-Garcia, Edgar A.},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-01-01},<br \/>\r\nbooktitle = {Proc. 6th Int. Conf. on Advanced Cognitive Technologies and Applications},<br \/>\r\npages = {165--171},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {conference}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('22','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rodr\u00edguez-Teiles, F. G.;  Perez-Alcocer, Ricardo;  Maldonado-Ramirez, Alejandro;  Torres-Mendez, Luz Abril;  Dey, B. B.;  Martinez-Garcia, Edgar A.<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('68','tp_links')\" style=\"cursor:pointer;\">Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">2014 IEEE International Conference on Robotics and Automation (ICRA), <\/span><span class=\"tp_pub_additional_pages\">pp. 3813-3818, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1050-4729<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_68\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('68','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_68\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('68','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_68\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{6907412,<br \/>\r\ntitle = {Vision-based reactive autonomous navigation with obstacle avoidance: Towards a non-invasive and cautious exploration of marine habitat},<br \/>\r\nauthor = {Rodr\\'{i}guez-Teiles, F. G. and Perez-Alcocer, Ricardo and Maldonado-Ramirez, Alejandro and Torres-Mendez, Luz Abril and Dey, B. B. and Martinez-Garcia, Edgar A.},<br \/>\r\nurl = {http:\/\/ieeexplore.ieee.org\/document\/6907412\/},<br \/>\r\ndoi = {10.1109\/ICRA.2014.6907412},<br \/>\r\nissn = {1050-4729},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-05-01},<br \/>\r\nbooktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA)},<br \/>\r\npages = {3813-3818},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('68','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_68\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/ieeexplore.ieee.org\/document\/6907412\/\" title=\"http:\/\/ieeexplore.ieee.org\/document\/6907412\/\" target=\"_blank\">http:\/\/ieeexplore.ieee.org\/document\/6907412\/<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/10.1109\/ICRA.2014.6907412\" title=\"Follow DOI:10.1109\/ICRA.2014.6907412\" target=\"_blank\">doi:10.1109\/ICRA.2014.6907412<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('68','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Estopier-Castillo, Vicente;  Arechavaleta, Gustavo;  Olgu\u00edn-D\u00edaz, Ernesto<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('78','tp_links')\" style=\"cursor:pointer;\">Generacion de Movimientos Humanoides con Dinamica Inversa Jerarquica<\/a> <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">Generacion de Movimientos Humanoides con Dinamica Inversa Jerarquica, <\/span><span class=\"tp_pub_additional_publisher\">Congreso Latinoamericano de Control Autom\u00e1tico CLCA 2014, <\/span><span class=\"tp_pub_additional_year\">2014<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_resource_link\"><a id=\"tp_links_sh_78\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('78','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_78\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('78','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_78\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Castillo2014,<br \/>\r\ntitle = {Generacion de Movimientos Humanoides con Dinamica Inversa Jerarquica},<br \/>\r\nauthor = {Estopier-Castillo, Vicente and Arechavaleta, Gustavo and Olgu\\'{i}n-D\\'{i}az, Ernesto},<br \/>\r\nurl = {http:\/\/amca.mx\/memorias\/amca2014\/articulos\/0112.pdf},<br \/>\r\nyear  = {2014},<br \/>\r\ndate = {2014-00-00},<br \/>\r\nbooktitle = {Generacion de Movimientos Humanoides con Dinamica Inversa Jerarquica},<br \/>\r\npublisher = {Congreso Latinoamericano de Control Autom\\'{a}tico CLCA 2014},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('78','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_78\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-file-pdf\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/amca.mx\/memorias\/amca2014\/articulos\/0112.pdf\" title=\"http:\/\/amca.mx\/memorias\/amca2014\/articulos\/0112.pdf\" target=\"_blank\">http:\/\/amca.mx\/memorias\/amca2014\/articulos\/0112.pdf<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('78','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_2013\">2013<\/h3><h3 class=\"tp_h3\" id=\"tp_h3_article\">Journal Articles<\/h3><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Sanchez-Escobedo, Dalila;  Castelan, Mario<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('23','tp_links')\" style=\"cursor:pointer;\">3D face shape prediction from a frontal image using cylindrical coordinates and partial least squares<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Pattern Recognition Letters, <\/span><span class=\"tp_pub_additional_volume\">vol. 34, <\/span><span class=\"tp_pub_additional_number\">no. 4, <\/span><span class=\"tp_pub_additional_pages\">pp. 389 - 399, <\/span><span class=\"tp_pub_additional_year\">2013<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 0167-8655<\/span><span class=\"tp_pub_additional_note\">, (Advances in Pattern Recognition Methodology and Applications)<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_23\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('23','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_23\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('23','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_23\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('23','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_23\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{SanchezEscobedo2013389,<br \/>\r\ntitle = {3D face shape prediction from a frontal image using cylindrical coordinates and partial least squares},<br \/>\r\nauthor = {Sanchez-Escobedo, Dalila and Castelan, Mario},<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S0167865512002929},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.patrec.2012.09.007},<br \/>\r\nissn = {0167-8655},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-01-01},<br \/>\r\njournal = {Pattern Recognition Letters},<br \/>\r\nvolume = {34},<br \/>\r\nnumber = {4},<br \/>\r\npages = {389 - 399},<br \/>\r\nabstract = {This paper addresses the problem of linearly approximating 3D shape from intensities in the context of facial analysis. In other words, given a frontal pose grayscale input face, the direct estimation of its 3D structure is sought through a regression matrix. Approaches falling into this category generally assume that both 2D and 3D features are defined under Cartesian schemes, which is not optimal for the task of novel view synthesis. The current article aims to overcome this issue by exploiting the 3D structure of faces through cylindrical coordinates, aided by the partial least squares regression. In the context of facial shape analysis, partial least squares builds a set of basis faces, for both grayscale and 3D shape spaces, seeking for maximizing shared covariance between projections of the data along the basis faces. Experimental tests show how the cylindrical representations are suitable for the purposes of linear regression, resulting in a benefit for the generation of novel facial views, showing a potential use in model based face identification.},<br \/>\r\nnote = {Advances in Pattern Recognition Methodology and Applications},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('23','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_23\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This paper addresses the problem of linearly approximating 3D shape from intensities in the context of facial analysis. In other words, given a frontal pose grayscale input face, the direct estimation of its 3D structure is sought through a regression matrix. Approaches falling into this category generally assume that both 2D and 3D features are defined under Cartesian schemes, which is not optimal for the task of novel view synthesis. The current article aims to overcome this issue by exploiting the 3D structure of faces through cylindrical coordinates, aided by the partial least squares regression. In the context of facial shape analysis, partial least squares builds a set of basis faces, for both grayscale and 3D shape spaces, seeking for maximizing shared covariance between projections of the data along the basis faces. Experimental tests show how the cylindrical representations are suitable for the purposes of linear regression, resulting in a benefit for the generation of novel facial views, showing a potential use in model based face identification.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('23','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_23\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S0167865512002929\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S0167865512002929\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S0167865512002929<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.patrec.2012.09.007\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/j.patrec.2012.09.007\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.patrec.2012.09.007<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('23','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Lopez-Juarez, Ismael;  Castelan, Mario;  Castro-Mart\u00eenez, Francisco Javier;  Pe\u00f1a-Cabrera, Mario;  Osorio-Comparan, Roman<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('24','tp_links')\" style=\"cursor:pointer;\">Using Object\u2019s Contour, Form and Depth to Embed Recognition Capability into Industrial Robots<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Journal of Applied Research and Technology, <\/span><span class=\"tp_pub_additional_volume\">vol. 11, <\/span><span class=\"tp_pub_additional_number\">no. 1, <\/span><span class=\"tp_pub_additional_pages\">pp. 5 - 17, <\/span><span class=\"tp_pub_additional_year\">2013<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1665-6423<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_24\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('24','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_24\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('24','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_24\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('24','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_24\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{LopezJuarez20135,<br \/>\r\ntitle = {Using Object\u2019s Contour, Form and Depth to Embed Recognition Capability into Industrial Robots},<br \/>\r\nauthor = {Lopez-Juarez, Ismael and Castelan, Mario and Castro-Mart\\^{i}nez, Francisco Javier and Pe\\~{n}a-Cabrera, Mario and Osorio-Comparan, Roman},<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1665642313715116},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/S1665-6423(13)71511-6},<br \/>\r\nissn = {1665-6423},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-01-01},<br \/>\r\njournal = {Journal of Applied Research and Technology},<br \/>\r\nvolume = {11},<br \/>\r\nnumber = {1},<br \/>\r\npages = {5 - 17},<br \/>\r\nabstract = {Abstract Robot vision systems can differentiate parts by pattern matching irrespective of part orientation and location. Some manufacturers offer 3D guidance systems using robust vision and laser systems so that a 3D programmed point can be repeated even if the part is moved varying its location, rotation and orientation within the working space. Despite these developments, current industrial robots are still unable to recognize objects in a robust manner; that is, to distinguish an object among equally shaped objects taking into account not only the object\u2019s contour but also its form and depth information, which is precisely the major contribution of this research. Our hypothesis establishes that it is possible to integrate a robust invariant object recognition capability into industrial robots by using image features from the object\u2019s contour (boundary object information), its form (i.e., type of curvature or topographical surface information) and depth information (from stereo disparity maps). These features can be concatenated in order to form an invariant vector descriptor which is the input to an artificial neural network (ANN) for learning and recognition purposes. In this paper we present the recognition results under different working conditions using a KUKA KR16 industrial robot, which validated our approach.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('24','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_24\" style=\"display:none;\"><div class=\"tp_abstract_entry\">Abstract Robot vision systems can differentiate parts by pattern matching irrespective of part orientation and location. Some manufacturers offer 3D guidance systems using robust vision and laser systems so that a 3D programmed point can be repeated even if the part is moved varying its location, rotation and orientation within the working space. Despite these developments, current industrial robots are still unable to recognize objects in a robust manner; that is, to distinguish an object among equally shaped objects taking into account not only the object\u2019s contour but also its form and depth information, which is precisely the major contribution of this research. Our hypothesis establishes that it is possible to integrate a robust invariant object recognition capability into industrial robots by using image features from the object\u2019s contour (boundary object information), its form (i.e., type of curvature or topographical surface information) and depth information (from stereo disparity maps). These features can be concatenated in order to form an invariant vector descriptor which is the input to an artificial neural network (ANN) for learning and recognition purposes. In this paper we present the recognition results under different working conditions using a KUKA KR16 industrial robot, which validated our approach.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('24','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_24\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1665642313715116\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1665642313715116\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1665642313715116<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/S1665-6423(13)71511-6\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/S1665-6423(13)71511-6\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/S1665-6423(13)71511-6<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('24','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><div class=\"tp_publication tp_publication_article\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rivero-Juarez, Joaquin;  Martinez-Garcia, Edgar A.;  Torres-Mendez, Luz Abril;  Elara Mohan, Rajesh<\/p><p class=\"tp_pub_title\"><a class=\"tp_title_link\" onclick=\"teachpress_pub_showhide('25','tp_links')\" style=\"cursor:pointer;\">3D Heterogeneous Multi-sensor Global Registration<\/a> <span class=\"tp_pub_type tp_  article\">Journal Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_journal\">Procedia Engineering, <\/span><span class=\"tp_pub_additional_volume\">vol. 64, <\/span><span class=\"tp_pub_additional_pages\">pp. 1552 - 1561, <\/span><span class=\"tp_pub_additional_year\">2013<\/span>, <span class=\"tp_pub_additional_issn\">ISSN: 1877-7058<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_abstract_link\"><a id=\"tp_abstract_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_abstract')\" title=\"Show abstract\" style=\"cursor:pointer;\">Abstract<\/a><\/span> | <span class=\"tp_resource_link\"><a id=\"tp_links_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_links')\" title=\"Show links and resources\" style=\"cursor:pointer;\">Links<\/a><\/span> | <span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_25\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('25','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_25\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@article{RIVEROJUAREZ20131552,<br \/>\r\ntitle = {3D Heterogeneous Multi-sensor Global Registration},<br \/>\r\nauthor = {Rivero-Juarez, Joaquin and Martinez-Garcia, Edgar A. and Torres-Mendez, Luz Abril and Elara Mohan, Rajesh },<br \/>\r\nurl = {http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517},<br \/>\r\ndoi = {http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237},<br \/>\r\nissn = {1877-7058},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-01-01},<br \/>\r\njournal = {Procedia Engineering},<br \/>\r\nvolume = {64},<br \/>\r\npages = {1552 - 1561},<br \/>\r\nabstract = {This manuscript presents a deterministic model to register heterogeneous 3D data arising from a ring of eight ultrasonic sonar, one high data density LiDAR (light detection and ranging), and a semi-ring of three visual sensors. The three visual sensors are arranged in a cylindrical ring, and although they provide 2D colour images, a radial multi-stereo geometric model is proposed to yield 3D data. All deployed sensors are geometrically placed on-board a wheeled mobile robot platform, and data registration is carried out navigating indoors. The sensor devices in discussion are coordinated and synchronized by a home-made distributed sensor suite system. Mathematical deterministic formulation for data registration is used to obtain experimental and numerical results on global mapping. Data registration relies on a geometric model to compute depth information from a semi- circular trinocular stereo sensor that is proposed to rectify and calibrate three image frames with different orientations and positions, but with same projection point.},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {article}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_bibtex')\">Close<\/a><\/p><\/div><div class=\"tp_abstract\" id=\"tp_abstract_25\" style=\"display:none;\"><div class=\"tp_abstract_entry\">This manuscript presents a deterministic model to register heterogeneous 3D data arising from a ring of eight ultrasonic sonar, one high data density LiDAR (light detection and ranging), and a semi-ring of three visual sensors. The three visual sensors are arranged in a cylindrical ring, and although they provide 2D colour images, a radial multi-stereo geometric model is proposed to yield 3D data. All deployed sensors are geometrically placed on-board a wheeled mobile robot platform, and data registration is carried out navigating indoors. The sensor devices in discussion are coordinated and synchronized by a home-made distributed sensor suite system. Mathematical deterministic formulation for data registration is used to obtain experimental and numerical results on global mapping. Data registration relies on a geometric model to compute depth information from a semi- circular trinocular stereo sensor that is proposed to rectify and calibrate three image frames with different orientations and positions, but with same projection point.<\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_abstract')\">Close<\/a><\/p><\/div><div class=\"tp_links\" id=\"tp_links_25\" style=\"display:none;\"><div class=\"tp_links_entry\"><ul class=\"tp_pub_list\"><li><i class=\"fas fa-globe\"><\/i><a class=\"tp_pub_list\" href=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517\" title=\"http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517\" target=\"_blank\">http:\/\/www.sciencedirect.com\/science\/article\/pii\/S1877705813017517<\/a><\/li><li><i class=\"ai ai-doi\"><\/i><a class=\"tp_pub_list\" href=\"https:\/\/dx.doi.org\/http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237\" title=\"Follow DOI:http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237\" target=\"_blank\">doi:http:\/\/dx.doi.org\/10.1016\/j.proeng.2013.09.237<\/a><\/li><\/ul><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('25','tp_links')\">Close<\/a><\/p><\/div><\/div><\/div><h3 class=\"tp_h3\" id=\"tp_h3_inproceedings\">Proceedings Articles<\/h3><div class=\"tp_publication tp_publication_inproceedings\"><div class=\"tp_pub_info\"><p class=\"tp_pub_author\"> Rios-Cabrera, Reyes;  Tuytelaars, Tinne<\/p><p class=\"tp_pub_title\">Discriminatively Trained Templates for 3D Object Detection: A Real Time Scalable Approach <span class=\"tp_pub_type tp_  inproceedings\">Proceedings Article<\/span> <\/p><p class=\"tp_pub_additional\"><span class=\"tp_pub_additional_in\">In: <\/span><span class=\"tp_pub_additional_booktitle\">The IEEE International Conference on Computer Vision (ICCV), <\/span><span class=\"tp_pub_additional_year\">2013<\/span>.<\/p><p class=\"tp_pub_menu\"><span class=\"tp_bibtex_link\"><a id=\"tp_bibtex_sh_215\" class=\"tp_show\" onclick=\"teachpress_pub_showhide('215','tp_bibtex')\" title=\"Show BibTeX entry\" style=\"cursor:pointer;\">BibTeX<\/a><\/span><\/p><div class=\"tp_bibtex\" id=\"tp_bibtex_215\" style=\"display:none;\"><div class=\"tp_bibtex_entry\"><pre>@inproceedings{Rios-Cabrera_2013_ICCV__B,<br \/>\r\ntitle = {Discriminatively Trained Templates for 3D Object Detection: A Real Time Scalable Approach},<br \/>\r\nauthor = {Rios-Cabrera, Reyes and Tuytelaars, Tinne},<br \/>\r\nyear  = {2013},<br \/>\r\ndate = {2013-12-01},<br \/>\r\nbooktitle = {The IEEE International Conference on Computer Vision (ICCV)},<br \/>\r\nkeywords = {},<br \/>\r\npubstate = {published},<br \/>\r\ntppubtype = {inproceedings}<br \/>\r\n}<br \/>\r\n<\/pre><\/div><p class=\"tp_close_menu\"><a class=\"tp_close\" onclick=\"teachpress_pub_showhide('215','tp_bibtex')\">Close<\/a><\/p><\/div><\/div><\/div><\/div><div class=\"tablenav\"><div class=\"tablenav-pages\"><span class=\"displaying-num\">82 entries<\/span> <a class=\"page-numbers button disabled\">&laquo;<\/a> <a class=\"page-numbers button disabled\">&lsaquo;<\/a> 1 of 2 <a href=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?limit=2&amp;tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=&amp;tsr=#tppubs\" title=\"next page\" class=\"page-numbers button\">&rsaquo;<\/a> <a href=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/?limit=2&amp;tgid=&amp;yr=&amp;type=&amp;usr=&amp;auth=&amp;tsr=#tppubs\" title=\"last page\" class=\"page-numbers button\">&raquo;<\/a> <\/div><\/div><\/div> Av. Industrial\u00a0Metalurgia\u00a0#1062,\u00a0Parque Ind. Ramos Arizpe,\u00a0Ramos Arizpe, Coah.\u00a0C.P. 25900, M\u00e9xico. \u00a0Tel. +52 (844) 438-9600<\/p>\n","protected":false},"author":1,"featured_media":0,"parent":0,"menu_order":1,"comment_status":"closed","ping_status":"closed","template":"","meta":{"_et_pb_use_builder":"on","_et_pb_old_content":"","_et_gb_content_width":"","footnotes":""},"class_list":["post-76","page","type-page","status-publish","hentry"],"yoast_head":"<!-- This site is optimized with the Yoast SEO plugin v27.3 - https:\/\/yoast.com\/product\/yoast-seo-wordpress\/ -->\n<title>Publications - Robotics Active Vision Group<\/title>\n<meta name=\"robots\" content=\"index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1\" \/>\n<link rel=\"canonical\" href=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/\" \/>\n<meta property=\"og:locale\" content=\"en_US\" \/>\n<meta property=\"og:type\" content=\"article\" \/>\n<meta property=\"og:title\" content=\"Publications - Robotics Active Vision Group\" \/>\n<meta property=\"og:description\" content=\"This is the list of publications of this laboratory Av. Industrial\u00a0Metalurgia\u00a0#1062,\u00a0Parque Ind. Ramos Arizpe,\u00a0Ramos Arizpe, Coah.\u00a0C.P. 25900, M\u00e9xico. \u00a0Tel. +52 (844) 438-9600\" \/>\n<meta property=\"og:url\" content=\"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/\" \/>\n<meta property=\"og:site_name\" content=\"Robotics Active Vision Group\" \/>\n<meta property=\"article:modified_time\" content=\"2017-11-11T17:48:08+00:00\" \/>\n<meta name=\"twitter:card\" content=\"summary_large_image\" \/>\n<meta name=\"twitter:label1\" content=\"Est. reading time\" \/>\n\t<meta name=\"twitter:data1\" content=\"2 minutes\" \/>\n<script type=\"application\/ld+json\" class=\"yoast-schema-graph\">{\"@context\":\"https:\\\/\\\/schema.org\",\"@graph\":[{\"@type\":\"WebPage\",\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/publications\\\/\",\"url\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/publications\\\/\",\"name\":\"Publications - Robotics Active Vision Group\",\"isPartOf\":{\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/#website\"},\"datePublished\":\"2017-09-10T05:46:36+00:00\",\"dateModified\":\"2017-11-11T17:48:08+00:00\",\"breadcrumb\":{\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/publications\\\/#breadcrumb\"},\"inLanguage\":\"en-US\",\"potentialAction\":[{\"@type\":\"ReadAction\",\"target\":[\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/publications\\\/\"]}]},{\"@type\":\"BreadcrumbList\",\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/publications\\\/#breadcrumb\",\"itemListElement\":[{\"@type\":\"ListItem\",\"position\":1,\"name\":\"Home\",\"item\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/\"},{\"@type\":\"ListItem\",\"position\":2,\"name\":\"Publications\"}]},{\"@type\":\"WebSite\",\"@id\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/#website\",\"url\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/\",\"name\":\"Robotics Active Vision Group\",\"description\":\"Miembro de Rob\u00f3tica y Manufactura Avanzada - Cinvestav\",\"potentialAction\":[{\"@type\":\"SearchAction\",\"target\":{\"@type\":\"EntryPoint\",\"urlTemplate\":\"https:\\\/\\\/ryma.cinvestav.mx\\\/ravg\\\/?s={search_term_string}\"},\"query-input\":{\"@type\":\"PropertyValueSpecification\",\"valueRequired\":true,\"valueName\":\"search_term_string\"}}],\"inLanguage\":\"en-US\"}]}<\/script>\n<!-- \/ Yoast SEO plugin. -->","yoast_head_json":{"title":"Publications - Robotics Active Vision Group","robots":{"index":"index","follow":"follow","max-snippet":"max-snippet:-1","max-image-preview":"max-image-preview:large","max-video-preview":"max-video-preview:-1"},"canonical":"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/","og_locale":"en_US","og_type":"article","og_title":"Publications - Robotics Active Vision Group","og_description":"This is the list of publications of this laboratory Av. Industrial\u00a0Metalurgia\u00a0#1062,\u00a0Parque Ind. Ramos Arizpe,\u00a0Ramos Arizpe, Coah.\u00a0C.P. 25900, M\u00e9xico. \u00a0Tel. +52 (844) 438-9600","og_url":"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/","og_site_name":"Robotics Active Vision Group","article_modified_time":"2017-11-11T17:48:08+00:00","twitter_card":"summary_large_image","twitter_misc":{"Est. reading time":"2 minutes"},"schema":{"@context":"https:\/\/schema.org","@graph":[{"@type":"WebPage","@id":"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/","url":"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/","name":"Publications - Robotics Active Vision Group","isPartOf":{"@id":"https:\/\/ryma.cinvestav.mx\/ravg\/#website"},"datePublished":"2017-09-10T05:46:36+00:00","dateModified":"2017-11-11T17:48:08+00:00","breadcrumb":{"@id":"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/#breadcrumb"},"inLanguage":"en-US","potentialAction":[{"@type":"ReadAction","target":["https:\/\/ryma.cinvestav.mx\/ravg\/publications\/"]}]},{"@type":"BreadcrumbList","@id":"https:\/\/ryma.cinvestav.mx\/ravg\/publications\/#breadcrumb","itemListElement":[{"@type":"ListItem","position":1,"name":"Home","item":"https:\/\/ryma.cinvestav.mx\/ravg\/"},{"@type":"ListItem","position":2,"name":"Publications"}]},{"@type":"WebSite","@id":"https:\/\/ryma.cinvestav.mx\/ravg\/#website","url":"https:\/\/ryma.cinvestav.mx\/ravg\/","name":"Robotics Active Vision Group","description":"Miembro de Rob\u00f3tica y Manufactura Avanzada - Cinvestav","potentialAction":[{"@type":"SearchAction","target":{"@type":"EntryPoint","urlTemplate":"https:\/\/ryma.cinvestav.mx\/ravg\/?s={search_term_string}"},"query-input":{"@type":"PropertyValueSpecification","valueRequired":true,"valueName":"search_term_string"}}],"inLanguage":"en-US"}]}},"_links":{"self":[{"href":"https:\/\/ryma.cinvestav.mx\/ravg\/wp-json\/wp\/v2\/pages\/76","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/ryma.cinvestav.mx\/ravg\/wp-json\/wp\/v2\/pages"}],"about":[{"href":"https:\/\/ryma.cinvestav.mx\/ravg\/wp-json\/wp\/v2\/types\/page"}],"author":[{"embeddable":true,"href":"https:\/\/ryma.cinvestav.mx\/ravg\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/ryma.cinvestav.mx\/ravg\/wp-json\/wp\/v2\/comments?post=76"}],"version-history":[{"count":38,"href":"https:\/\/ryma.cinvestav.mx\/ravg\/wp-json\/wp\/v2\/pages\/76\/revisions"}],"predecessor-version":[{"id":733,"href":"https:\/\/ryma.cinvestav.mx\/ravg\/wp-json\/wp\/v2\/pages\/76\/revisions\/733"}],"wp:attachment":[{"href":"https:\/\/ryma.cinvestav.mx\/ravg\/wp-json\/wp\/v2\/media?parent=76"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}