<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">L. Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">M. Wilson</style></author><author><style face="normal" font="default" size="100%">Sofiane Boucenna</style></author><author><style face="normal" font="default" size="100%">N. Cuperlier</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">From Animals to Animats 16. Proceedings 16th International Conference on Simulation of Adaptive Behavior, SAB 2022</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats 16. Proceedings 16th International Conference on Simulation of Adaptive Behavior, SAB 2022</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2022</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1007/978-3-031-16770-6</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer, LNAI, LNCS </style></publisher><pub-location><style face="normal" font="default" size="100%">CY Cergy Paris University, Cergy-Pontoise, France, September 20–23, 2022</style></pub-location><volume><style face="normal" font="default" size="100%">volume 13499</style></volume><isbn><style face="normal" font="default" size="100%">978-3-031-16769-0</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, L.</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">Wilson, M.</style></author><author><style face="normal" font="default" size="100%">Sofiane Boucenna</style></author><author><style face="normal" font="default" size="100%">Cuperlier, N.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Preface</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats 16. Proceedings 16th International Conference on Simulation of Adaptive Behavior, SAB 2022</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2022</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1007/978-3-031-16770-6</style></url></web-urls></urls><number><style face="normal" font="default" size="100%">LNAI, LNCS, volume 13499</style></number><publisher><style face="normal" font="default" size="100%">Springer, LNAI, LNCS</style></publisher><pages><style face="normal" font="default" size="100%">v - x</style></pages><isbn><style face="normal" font="default" size="100%">978-3-031-16769-0</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">C Hasson</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Catherine Pelachaud</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion et cognition: les robots comme outils et modèles</style></title><secondary-title><style face="normal" font="default" size="100%">Systèmes d'interaction émotionnelle</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">Lavoisier Hermes Science</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><isbn><style face="normal" font="default" size="100%">978-2-7462-2115-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">9</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Peirre Andry</style></author><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Shuzhi Sam Ge</style></author><author><style face="normal" font="default" size="100%">Haizhou Li</style></author><author><style face="normal" font="default" size="100%">John-John Cabibihan</style></author><author><style face="normal" font="default" size="100%">Yeow Kee Tan</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Using the Interaction Rhythm as a Natural Reinforcement Signal for Social Robots: A Matter of Belief</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. International Conference on Social Robotics, ICSR 2010</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Singapore</style></pub-location><volume><style face="normal" font="default" size="100%">6414</style></volume><pages><style face="normal" font="default" size="100%">81–89</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-17247-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper, we present the results of a pilot study of a human robot interaction experiment where the rhythm of the interaction is used as a reinforcement signal to learn sensorimotor associations. The algorithm uses breaks and variations in the rhythm at which the human is producing actions. The concept is based on the hypothesis that a constant rhythm is an intrinsic property of a positive interaction whereas a break reflects a negative event. Subjects from various backgrounds interacted with a NAO robot where they had to teach the robot to mirror their actions by learning the correct sensorimotor associations. The results show that in order for the rhythm to be a useful reinforcement signal, the subjects have to be convinced that the robot is an agent with which they can act naturally, using their voice and facial expressions as cues to help it understand the correct behaviour to learn. When the subjects do behave naturally, the rhythm and its variations truly reflects how well the interaction is going and helps the robot learn efficiently. These results mean that non-expert users can interact naturally and fruitfully with an autonomous robot if the interaction is believed to be natural, without any technical knowledge of the cognitive capacities of the robot.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>19</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">M Simon</style></author><author><style face="normal" font="default" size="100%">P Canet</style></author><author><style face="normal" font="default" size="100%">R Soussignan</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Reconnaissance et résonance émotionnelle face à un humain et à un robot chez des enfants typiques et des enfants avec autisme de haut niveau</style></title><secondary-title><style face="normal" font="default" size="100%">Bulletin scientifique de l’Arapi</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">M Simon</style></author><author><style face="normal" font="default" size="100%">P Canet</style></author><author><style face="normal" font="default" size="100%">R Soussignan</style></author><author><style face="normal" font="default" size="100%">P Blancard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Human Responses to an Expressive Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the Sixth International Workshop on Epigenetic Robotics</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.lucs.lu.se/LUCS/128/Nadeletal.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><volume><style face="normal" font="default" size="100%">128</style></volume><pages><style face="normal" font="default" size="100%">79–86</style></pages><isbn><style face="normal" font="default" size="100%">91-974741-6-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper reports the results of the first study comparing subjects' responses to robotic emotional facial displays and human emotional facial displays.
It describes step by step the building of believable emotional expressions in a robotic head, the problems raised by a comparative approach of robotic and human expressions, and the solutions found in order to ensure a valid comparison. Twenty adults and 15 children aged 3 were presented static (photos) and dynamic (2-D videoclips, or 3-D live) displays of emotional expressions presented by a robot or a person.
The study compares two dependent variables: emotional resonance (automatic facial feed-back during an emotional display) and emotion recognition (emotion labeling) according to partners (robot or person) and to the nature of the display (static or dynamic). Results for emotional resonance were similar with young children and with adults. Both groups resonated significantly more to dynamic displays than to static displays, be they robotic expressions or human expressions. In both groups, emotion recognition was easier for human expressions than for robotic ones.
Unlike children that recognized more easily emotional expressions dynamically displayed, adults scored higher with static displays thus reflecting a cognitive strategy independent from emotional resonance. Results are discussed in the perspective of the therapeutic use of this comparative approach with children with autism that are described as impaired in emotion sharing and communication.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">Darwin Muir</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion Understanding: Robots as Tools and Models</style></title><secondary-title><style face="normal" font="default" size="100%">Emotional Development: Recent Research Advances</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><publisher><style face="normal" font="default" size="100%">Oxford University Press</style></publisher><pages><style face="normal" font="default" size="100%">235–258</style></pages><isbn><style face="normal" font="default" size="100%">0-19-85-2883-3 (Hbk) 0-19-85-2884-1 (Pbk)</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">9</style></section></record></records></xml>