<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Markelius, A.</style></author><author><style face="normal" font="default" size="100%">Sjöberg, S.</style></author><author><style face="normal" font="default" size="100%">Lemhaouri, Z.</style></author><author><style face="normal" font="default" size="100%">Cohen, L.</style></author><author><style face="normal" font="default" size="100%">Lowe, R.</style></author><author><style face="normal" font="default" size="100%">Cañamero, L.</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Abdulaziz Al Ali</style></author><author><style face="normal" font="default" size="100%">Nader Meskin</style></author><author><style face="normal" font="default" size="100%">Wanyue Jiang</style></author><author><style face="normal" font="default" size="100%">Shuzhi Sam Ge</style></author><author><style face="normal" font="default" size="100%">John-John Cabibihan</style></author><author><style face="normal" font="default" size="100%">Silvia Rossi</style></author><author><style face="normal" font="default" size="100%">Hongsheng He</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Human-Robot Mutual Learning System with Affect-Grounded Language Acquisition and Differential Outcomes Training</style></title><secondary-title><style face="normal" font="default" size="100%">Social Robotics. 15th International Conference, ICSR 2023, Proceedings Part II</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2024</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1007/978-981-99-8718-4</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Doha, Qatar, December 3–7, 2023</style></pub-location><volume><style face="normal" font="default" size="100%">LNAI 14454</style></volume><pages><style face="normal" font="default" size="100%">108–122</style></pages><isbn><style face="normal" font="default" size="100%">978-981-99-8717-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, L.</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Misselhorn, C.</style></author><author><style face="normal" font="default" size="100%">Poljanšek, T.</style></author><author><style face="normal" font="default" size="100%">Störzinger, T.</style></author><author><style face="normal" font="default" size="100%">M. Klein</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">When Emotional Machines are Intelligent Machines: The Tangled Knot of Affective Cognition</style></title><secondary-title><style face="normal" font="default" size="100%">Emotional Machines. Perspectives from Affective Computing and Emotional Human-Machine Interaction</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2023</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1007/978-3-658-37641-3_6</style></url></web-urls></urls><number><style face="normal" font="default" size="100%">Technikzukünfte, Wissenschaft und Gesellschaft / Futures of Technology, Science and Society</style></number><publisher><style face="normal" font="default" size="100%">Springer VS</style></publisher><pub-location><style face="normal" font="default" size="100%">Wiesbaden</style></pub-location><isbn><style face="normal" font="default" size="100%">978-3-658-37640-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Research in neurobiology has provided evidence that emotions pervade human intelligence at many levels. However, “emotion” and “cognition” are still largely conceptualized as separate notions that “interact”, and untangling and modeling those interactions remains a challenge, both in biological and artificial systems. My research focuses on modeling in autonomous robots how “cognition”, “motivation” and “emotion” interact in what we could term embodied affective cognition, and particularly investigating how affect lies at the root of and drives how agents apprehend and interact with the world, making them “intelligent” in the sense of being able to adapt to their environments in flexible and beneficial ways. In this chapter, I discuss this issue as I illustrate how my embodied model of affect has been used in my group to ground a broad range of affective, cognitive and social skills such as adaptive action selection, different types of learning, development, and social interaction.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Scarinzi, A.</style></author><author><style face="normal" font="default" size="100%">Cañamero, L.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Toward Affective Interactions: E-Motions and Embodied Artificial Cognitive Systems</style></title><secondary-title><style face="normal" font="default" size="100%">Frontiers in Psychology</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2022</style></year><pub-dates><date><style  face="normal" font="default" size="100%">04/2022</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.3389/fpsyg.2022.768416</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Frontiers in Psychology</style></publisher><volume><style face="normal" font="default" size="100%">13</style></volume><pages><style face="normal" font="default" size="100%">1 - 2</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">article 768416</style></issue><work-type><style face="normal" font="default" size="100%">Opinion article</style></work-type><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://doi.org/10.3389/fpsyg.2022.768416&quot;&gt;Download&lt;/a&gt; (Open Access)
</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Imran Khan</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Josh Bongard</style></author><author><style face="normal" font="default" size="100%">Juniper Lovato</style></author><author><style face="normal" font="default" size="100%">Laurent Hebert-Dufrésne</style></author><author><style face="normal" font="default" size="100%">Radhakrishna Dasari</style></author><author><style face="normal" font="default" size="100%">Lisa Soros</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Modelling the Social Buffering Hypothesis in an Artificial Life Environment</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the Artificial Life Conference 2020 (ALIFE 2020)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2020</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2020</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mitpressjournals.org/doi/abs/10.1162/isal_a_00302</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Montreal, Canada</style></pub-location><pages><style face="normal" font="default" size="100%">393–401</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In social species, individuals who form social bonds have been found to live longer, healthier lives. One hypothesised reason for this effect is that social support, mediated by oxytocin, &quot;buffers&quot; responses to stress in a number of ways, and is considered an important process of adaptation that facilitates long-term wellbeing in changing, stressful conditions. Using an artificial life model, we have investigated the role of one hypothesised stress-reducing effect of social support on the survival and social interactions of agents in a small society. We have investigated this effect using different types of social bonds and bond partner combinations across environmentally-challenging conditions. Our results have found that stress reduction through social support benefits the survival of agents with social bonds, and that this effect often extends to the wider society. We have also found that this effect is significantly affected by environmental and social contexts. Our findings suggest that these &quot;social buffering&quot; effects may not be universal, but dependent upon the degree of environmental challenges, the quality of affective relationships and the wider social context.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.mitpressjournals.org/doi/abs/10.1162/isal_a_00302&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ana Tanevska</style></author><author><style face="normal" font="default" size="100%">Francesco Rea</style></author><author><style face="normal" font="default" size="100%">Giulio Sandini</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Alessandra Sciutti</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Socially Adaptable Framework for Human-Robot Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Frontiers in Robotics and AI</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2020</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.frontiersin.org/article/10.3389/frobt.2020.00121</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">7</style></volume><pages><style face="normal" font="default" size="100%">121</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In our everyday lives we regularly engage in complex, personalized, and adaptive interactions with our peers. To recreate the same kind of rich, human-like interactions, a social robot should be aware of our needs and affective states and continuously adapt its behavior to them. Our proposed solution is to have the robot learn how to select the behaviors that would maximize the pleasantness of the interaction for its peers. To make the robot autonomous in its decision making, this process could be guided by an internal motivation system. We wish to investigate how an adaptive robotic framework of this kind would function and personalize to different users. We also wish to explore whether the adaptability and personalization would bring any additional richness to the human-robot interaction (HRI), or whether it would instead bring uncertainty and unpredictability that would not be accepted by the robot's human peers. To this end, we designed a socially adaptive framework for the humanoid robot iCub. As a result, the robot perceives and reuses the affective and interactive signals from the person as input for the adaptation based on internal social motivation. We strive to investigate the value of the generated adaptation in our framework in the context of HRI. In particular, we compare how users will experience interaction with an adaptive versus a non-adaptive social robot. To address these questions, we propose a comparative interaction study with iCub whereby users act as the robot's caretaker, and iCub's social adaptation is guided by an internal comfort level that varies with the stimuli that iCub receives from its caretaker. We investigate and compare how iCub's internal dynamics would be perceived by people, both in a condition when iCub does not personalize its behavior to the person, and in a condition where it is instead adaptive. Finally, we establish the potential benefits that an adaptive framework could bring to the context of repeated interactions with a humanoid robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.frontiersin.org/article/10.3389/frobt.2020.00121&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ana Tanevska</style></author><author><style face="normal" font="default" size="100%">Francesco Rea</style></author><author><style face="normal" font="default" size="100%">Giulio Sandini</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Alessandra Sciutti</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Cognitive Architecture for Socially Adaptable Robots</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2019 Joint IEEE 9th International Conference on Development and Learning and Epigenetic Robotics (ICDL-EpiRob)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2019</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2019</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/document/8850688</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Oslo, Norway</style></pub-location><pages><style face="normal" font="default" size="100%">195–200</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/8850688&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ana Tanevska</style></author><author><style face="normal" font="default" size="100%">Francesco Rea</style></author><author><style face="normal" font="default" size="100%">Giulio Sandini</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Alessandra Sciutti</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Eager to Learn vs. Quick to Complain? How a socially adaptive robot architecture performs with different robot personalities</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2019 IEEE International Conference on Systems, Man, and Cybernetics (IEEE SMC 2019)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2019</style></year><pub-dates><date><style  face="normal" font="default" size="100%">10/2019</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/document/8913903</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Bari, Italy</style></pub-location><pages><style face="normal" font="default" size="100%">365–371</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">A social robot that is aware of our needs and continuously adapts its behaviour to them has the potential of creating a complex, personalized, human-like interaction of the kind we are used to have with our peers in our everyday lives. We are interested in exploring how would an adaptive architecture function and personalize to different users when given different initial values of its variables, i.e. when implementing the same adaptive framework with different robot personalities. Would an architecture that learns very quickly outperform a slower but steadier learning profile? To further explore this, we propose a cognitive architecture for the humanoid robot iCub supporting adaptability and we attempt to validate its functionality and test different robot profiles.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/8913903&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Coninx, Alexandre</style></author><author><style face="normal" font="default" size="100%">Paul E. Baxter</style></author><author><style face="normal" font="default" size="100%">Oleari, Elettra</style></author><author><style face="normal" font="default" size="100%">Bellini, Sara</style></author><author><style face="normal" font="default" size="100%">Bierman, Bert</style></author><author><style face="normal" font="default" size="100%">Henkemans, Olivier Blanson</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Valentin Enescu</style></author><author><style face="normal" font="default" size="100%">Espinoza, Raquel Ros</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Remi Humbert</style></author><author><style face="normal" font="default" size="100%">Kiefer, Bernd</style></author><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Looije, Rosmarijn</style></author><author><style face="normal" font="default" size="100%">Mosconi, Marco</style></author><author><style face="normal" font="default" size="100%">Mark A. Neerincx</style></author><author><style face="normal" font="default" size="100%">Giulio Paci</style></author><author><style face="normal" font="default" size="100%">Patsis, Georgios</style></author><author><style face="normal" font="default" size="100%">Pozzi, Clara</style></author><author><style face="normal" font="default" size="100%">Sacchitelli, Francesca</style></author><author><style face="normal" font="default" size="100%">Hichem Sahli</style></author><author><style face="normal" font="default" size="100%">Alberto Sanna</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Yiannis Demiris</style></author><author><style face="normal" font="default" size="100%">Tony Belpaeme</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Towards Long-Term Social Child-Robot Interaction: Using Multi-Activity Switching to Engage Young Users</style></title><secondary-title><style face="normal" font="default" size="100%">Journal of Human-Robot Interaction</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2016</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://dl.acm.org/doi/abs/10.5898/JHRI.5.1.Coninx</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">5</style></volume><pages><style face="normal" font="default" size="100%">32–67</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Social robots have the potential to provide support in a number of practical domains, such as learning and behaviour change. This potential is particularly relevant for children, who have proven receptive to interactions with social robots. To reach learning and therapeutic goals, a number of issues need to be investigated, notably the design of an effective child-robot interaction (cHRI) to ensure the child remains engaged in the relationship and that educational goals are met. Typically, current cHRI research experiments focus on a single type of interaction activity (e.g. a game). However, these can suffer from a lack of adaptation to the child, or from an increasingly repetitive nature of the activity and interaction. In this paper, we motivate and propose a practicable solution to this issue: an adaptive robot able to switch between multiple activities within single interactions. We describe a system that embodies this idea, and present a case study in which diabetic children collaboratively learn with the robot about various aspects of managing their condition. We demonstrate the ability of our system to induce a varied interaction and show the potential of this approach both as an educational tool and as a research method for long-term cHRI.</style></abstract><issue><style face="normal" font="default" size="100%">1</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://dl.acm.org/doi/abs/10.5898/JHRI.5.1.Coninx&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Oleari, Elettra</style></author><author><style face="normal" font="default" size="100%">Pozzi, Clara</style></author><author><style face="normal" font="default" size="100%">Sacchitelli, Francesca</style></author><author><style face="normal" font="default" size="100%">Bagherzadhalimi, Anahita</style></author><author><style face="normal" font="default" size="100%">Bellini, Sara</style></author><author><style face="normal" font="default" size="100%">Kiefer, Bernd</style></author><author><style face="normal" font="default" size="100%">Racioppa, Stefania</style></author><author><style face="normal" font="default" size="100%">Coninx, Alexandre</style></author><author><style face="normal" font="default" size="100%">Paul E. Baxter</style></author><author><style face="normal" font="default" size="100%">Bierman, Bert</style></author><author><style face="normal" font="default" size="100%">Henkemans, Olivier Blanson</style></author><author><style face="normal" font="default" size="100%">Mark A. Neerincx</style></author><author><style face="normal" font="default" size="100%">Rosemarijn Looije</style></author><author><style face="normal" font="default" size="100%">Yiannis Demiris</style></author><author><style face="normal" font="default" size="100%">Espinoza, Raquel Ros</style></author><author><style face="normal" font="default" size="100%">Mosconi, Marco</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Remi Humbert</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Hichem Sahli</style></author><author><style face="normal" font="default" size="100%">Joachim de Greeff</style></author><author><style face="normal" font="default" size="100%">James Kennedy</style></author><author><style face="normal" font="default" size="100%">Robin Read</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Giulio Paci</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Athanasopoulos, Georgios</style></author><author><style face="normal" font="default" size="100%">Patsis, Georgios</style></author><author><style face="normal" font="default" size="100%">Verhelst, Werner</style></author><author><style face="normal" font="default" size="100%">Alberto Sanna</style></author><author><style face="normal" font="default" size="100%">Tony Belpaeme</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Let’s Be Friends: Perception of a Social Robotic Companion for children with T1DM</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. New Friends 2015</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2015</style></year><pub-dates><date><style  face="normal" font="default" size="100%">10/2015</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mheerink.home.xs4all.nl/pdf/ProceedingsNF2015-3.pdf</style></url></web-urls></urls><pub-location><style face="normal" font="default" size="100%">Almere, The Netherlands</style></pub-location><pages><style face="normal" font="default" size="100%">32–33</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We describe the social characteristics of a robot developed to support children with Type 1 Diabetes Mellitus (T1DM) in the process of education and care. We evaluated the perception of the robot at a summer camp where diabetic children aged 10-14 experienced the robot in group interactions. Children in the intervention condition additionally interacted with it also individually, in one-to-one sessions featuring several game-like activities. These children perceived the robot significantly more as a friend than those in the control group. They also readily engaged with it in dialogues about their habits related to healthy lifestyle as well as personal experiences concerning diabetes. This indicates that the one-on-one interactions added a special quality to the relationship of the children with the robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://mheerink.home.xs4all.nl/pdf/ProceedingsNF2015-3.pdf&quot;&gt;Download full proceedings&lt;/a&gt; (PDF)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Wang, Weiyi</style></author><author><style face="normal" font="default" size="100%">Athanasopoulos, Georgios</style></author><author><style face="normal" font="default" size="100%">Yilmazyildiz, Selma</style></author><author><style face="normal" font="default" size="100%">Patsis, Georgios</style></author><author><style face="normal" font="default" size="100%">Valentin Enescu</style></author><author><style face="normal" font="default" size="100%">Hichem Sahli</style></author><author><style face="normal" font="default" size="100%">Verhelst, Werner</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Natural Emotion Elicitation for Emotion Modeling in Child-Robot Interactions</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 4th Workshop on Child Computer Interaction (WOCCI 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.isca-speech.org/archive/wocci_2014/wc14_051.html</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">ICSA</style></publisher><pub-location><style face="normal" font="default" size="100%">Singapore</style></pub-location><pages><style face="normal" font="default" size="100%">51–56</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Obtaining spontaneous emotional expressions is the very first and vital step in affective computing studies, for both psychologists and computer scientists. However, it is quite challenging to record them in real life, especially when certain modalities are required (e.g.  3D representation of the body).  Traditional elicitation and capturing protocols either introduce the awareness of the recording, which may impair the naturalness of the behaviors, or cause too much information loss.  In this paper, we  present  natural  emotion  elicitation  and  recording  experiments, which were set in child-robot interaction scenarios. Several state-of-the-art technologies were employed to acquire the multi-modal expressive data that will be further used for emotion modeling and recognition studies. The obtained recordings exhibit the expected emotional expressions.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.isca-speech.org/archive/wocci_2014/wc14_051.html&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Interpretation of Emotional Body Language Displayed by a Humanoid Robot: A Case Study with Children</style></title><secondary-title><style face="normal" font="default" size="100%">International Journal of Social Robotics</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">emotion</style></keyword><keyword><style  face="normal" font="default" size="100%">emotional body language</style></keyword><keyword><style  face="normal" font="default" size="100%">perception</style></keyword><keyword><style  face="normal" font="default" size="100%">Social robotics</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/article/10.1007/s12369-013-0193-z</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">5</style></volume><pages><style face="normal" font="default" size="100%">325–334</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The work reported in this paper focuses on giving humanoid robots the capacity to express emotions with their body. Previous results show that adults are able to interpret different key poses displayed by a humanoid robot and also that changing the head position affects the expressiveness of the key poses in a consistent way. Moving the head down leads to decreased arousal (the level of energy) and valence (positive or negative emotion) whereas moving the head up produces an increase along these dimensions. Hence, changing the head position during an interaction should send intuitive signals. The study reported in this paper tested children’s ability to recognize the emotional body language displayed by a humanoid robot. The results suggest that body postures and head position can be used to convey emotions during child-robot interaction.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/article/10.1007/s12369-013-0193-z&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Nalin, Marco</style></author><author><style face="normal" font="default" size="100%">Baroni, Ilaria</style></author><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Cuayáhuitl, Heriberto</style></author><author><style face="normal" font="default" size="100%">Alberto Sanna</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Children's Adaptation in Multi-session Interaction with a Humanoid Robot</style></title><secondary-title><style face="normal" font="default" size="100%">2012 IEEE RO-MAN: The 21st IEEE International Symposium on Robot and Human Interactive Communication</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/6343778/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pages><style face="normal" font="default" size="100%">351–357</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This work presents preliminary observations from a study of children (N=19, age 5–12) interacting in multiple sessions with a humanoid robot in a scenario involving game activities. The main purpose of the study was to see how their perception of the robot, their engagement, and their enjoyment of the robot as a companion evolve across multiple interactions, separated by one-two weeks. However, an interesting phenomenon was observed during the experiment: most of the children soon adapted to the behaviors of the robot, in terms of speech timing, speed and tone, verbal input formulation, nodding, gestures, etc. We describe the experimental setup and the system, and our observations and preliminary analysis results, which open interesting questions for further research.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/6343778&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Stevens, Brett</style></author><author><style face="normal" font="default" size="100%">Kim A. Bard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotional Body Language Displayed by Artificial Agents</style></title><secondary-title><style face="normal" font="default" size="100%">ACM Transactions on Interactive Intelligent Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://dl.acm.org/doi/10.1145/2133366.2133368</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">ACM</style></publisher><pub-location><style face="normal" font="default" size="100%">New York, NY</style></pub-location><volume><style face="normal" font="default" size="100%">2</style></volume><pages><style face="normal" font="default" size="100%">2:1–2:29</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Complex and natural social interaction between artificial agents (computer-generated or robotic) and humans necessitates the display of rich emotions in order to be believable, socially relevant, and accepted, and to generate the natural emotional responses that humans show in the context of social interaction, such as engagement or empathy. Whereas some robots use faces to display (simplified) emotional expressions, for other robots such as Nao, body language is the best medium available given their inability to convey facial expressions. Displaying emotional body language that can be interpreted whilst interacting with the robot should significantly improve naturalness. This research investigates the creation of an affect space for the generation of emotional body language to be displayed by humanoid robots. To do so, three experiments investigating how emotional body language displayed by agents is interpreted were conducted. The first experiment compared the interpretation of emotional body language displayed by humans and agents. The results showed that emotional body language displayed by an agent or a human is interpreted in a similar way in terms of recognition. Following these results, emotional key poses were extracted from an actor's performances and implemented in a Nao robot. The interpretation of these key poses was validated in a second study where it was found that participants were better than chance at interpreting the key poses displayed. Finally, an affect space was generated by blending key poses and validated in a third study. Overall, these experiments confirmed that body language is an appropriate medium for robots to display emotions and suggest that an affect space for body expressions can be used to improve the expressiveness of humanoid robots.</style></abstract><issue><style face="normal" font="default" size="100%">1</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://dl.acm.org/doi/10.1145/2133366.2133368&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tony Belpaeme</style></author><author><style face="normal" font="default" size="100%">Paul E. Baxter</style></author><author><style face="normal" font="default" size="100%">Robin Read</style></author><author><style face="normal" font="default" size="100%">Rachel Wood</style></author><author><style face="normal" font="default" size="100%">Cuayáhuitl, Heriberto</style></author><author><style face="normal" font="default" size="100%">Kiefer, Bernd</style></author><author><style face="normal" font="default" size="100%">Racioppa, Stefania</style></author><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Athanasopoulos, Georgios</style></author><author><style face="normal" font="default" size="100%">Valentin Enescu</style></author><author><style face="normal" font="default" size="100%">Rosemarijn Looije</style></author><author><style face="normal" font="default" size="100%">Mark A. Neerincx</style></author><author><style face="normal" font="default" size="100%">Yiannis Demiris</style></author><author><style face="normal" font="default" size="100%">Raquel Ros-Espinoza</style></author><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Baroni, Ilaria</style></author><author><style face="normal" font="default" size="100%">Nalin, Marco</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Giulio Paci</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author><author><style face="normal" font="default" size="100%">Remi Humbert</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Multimodal Child-Robot Interaction: Building Social Bonds</style></title><secondary-title><style face="normal" font="default" size="100%">Journal of Human-Robot Interaction</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://dl.acm.org/doi/10.5555/3109688.3109691</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">1</style></volume><pages><style face="normal" font="default" size="100%">33–53</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">For robots to interact effectively with human users they must be capable of coordinated, timely behavior in response to social context. The Adaptive Strategies for Sustainable Long-Term Social Interaction (ALIZ-E) project focuses on the design of long-term, adaptive social interaction between robots and child users in real-world settings. In this paper, we report on the iterative approach taken to scientific and technical developments toward this goal: advancing individual technical competencies and integrating them to form an autonomous robotic system for evaluation “in the wild.” The first evaluation iterations have shown the potential of this methodology in terms of adaptation of the robot to the interactant and the resulting influences on engagement. This sets the foundation for an ongoing research program that seeks to develop technologies for social robot companions.</style></abstract><issue><style face="normal" font="default" size="100%">2</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://dl.acm.org/doi/10.5555/3109688.3109691&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Children Interpretation of Emotional Body Language Displayed by a Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 3rd International Conference on Social Robotics (ICSR 2011)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-642-25504-5_7</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Amsterdam, The Netherlands</style></pub-location><pages><style face="normal" font="default" size="100%">62–70</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-25504-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Previous results show that adults are able to interpret different key poses displayed by the robot and also that changing the head position affects the expressiveness of the key poses in a consistent way. Moving the head down leads to decreased arousal (the level of energy), valence (positive or negative) and stance (approaching or avoiding) whereas moving the head up produces an increase along these dimensions [1]. Hence, changing the head position during an interaction should send intuitive signals which could be used during an interaction. The ALIZ-E target group are children between the age of 8 and 11. Existing results suggest that they would be able to interpret human emotional body language [2, 3].

Based on these results, an experiment was conducted to test whether the results of [1] can be applied to children. If yes body postures and head position could be used to convey emotions during an interaction.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/chapter/10.1007%2F978-3-642-25504-5_7&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Paul E. Baxter</style></author><author><style face="normal" font="default" size="100%">Tony Belpaeme</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Yiannis Demiris</style></author><author><style face="normal" font="default" size="100%">Valentin Enescu</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Rosemarijn Looije</style></author><author><style face="normal" font="default" size="100%">Nalin, Marco</style></author><author><style face="normal" font="default" size="100%">Mark A. Neerincx</style></author><author><style face="normal" font="default" size="100%">Hichem Sahli</style></author><author><style face="normal" font="default" size="100%">Giocomo Sommavilla</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Rachel Wood</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Long-Term Human-Robot Interaction with Young Users</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. ACM/IEEE Human-Robot Interaction conference (HRI-2011) (Robots with Children Workshop)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.researchgate.net/publication/228470784_Long-term_human-robot_interaction_with_young_users</style></url></web-urls></urls><pub-location><style face="normal" font="default" size="100%">Lausanne, Switzerland</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Artificial companion agents have the potential to combine novel means for effective health communication with young patients support and entertainment. However, the theory and practice of long-term child-robot interaction is currently an underdeveloped area of research. This paper introduces an approach that integrates multiple functional aspects necessary to implement temporally extended human-robot interaction in the setting of a paediatric ward. We present our methodology for the implementation of a companion robot which will be used to support young patients in hospital as they learn to manage a lifelong metabolic disorder (diabetes). The robot will interact with patients over an extended period of time. The necessary functional aspects are identified and introduced, and a review of the technical challenges involved is presented.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.researchgate.net/publication/228470784_Long-term_human-robot_interaction_with_young_users&quot;&gt;Downlaod&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jackie Chappell</style></author><author><style face="normal" font="default" size="100%">Susannah Thorpe</style></author><author><style face="normal" font="default" size="100%">Nick Hawes</style></author><author><style face="normal" font="default" size="100%">Aaron Sloman</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Constructing Emotions: Epistemological Groundings and Applications in Robotics for a Synthetic Approach to Emotions</style></title><secondary-title><style face="normal" font="default" size="100%">International Symposium on AI-Inspired Biology</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.cs.bham.ac.uk/research/projects/cogaff/aiib/Symposium_6/Papers/Damiano.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">The Society for the Study of Artificial Intelligence and the Simulation of Behaviour</style></publisher><pub-location><style face="normal" font="default" size="100%">De Montford University, Leicester, UK</style></pub-location><pages><style face="normal" font="default" size="100%">20–28</style></pages><isbn><style face="normal" font="default" size="100%">1902956923</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Can the sciences of the artificial positively contribute to the scientific exploration of life and cognition? Can they actually improve the scientific knowledge of natural living and cognitive processes, from biological metabolism to reproduction, from conceptual mapping of the environment to logic reasoning, language, or even emotional expression? To these kinds of questions our article aims to answer in the affirmative. Its main object is the scientific emergent methodology often called the “synthetic approach”, which promotes the programmatic production of embodied and situated models of living and cognitive systems in order to explore aspects of life and cognition not accessible in natural systems and scenarios. The first part of this article presents and discusses the synthetic approach, and proposes an epistemological framework which promises to warrant genuine transmission of knowledge from the sciences of the artificial to the sciences of the natural. The second part of this article looks at the research applying the synthetic approach to the psychological study of emotional development. It shows how robotics, through the synthetic methodology, can develop a particular perspective on emotions, coherent with current psychological theories of emotional development and fitting well with the recent “cognitive extension” approach proposed by cognitive sciences and philosophy of mind.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.cs.bham.ac.uk/research/projects/cogaff/aiib/Symposium_6/Papers/Damiano.pdf&quot;&gt;Download&lt;/a&gt; (PDF)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">O'Bryne, Claire</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Harold Fellermann</style></author><author><style face="normal" font="default" size="100%">Mark Dörr</style></author><author><style face="normal" font="default" size="100%">Martin M Hanczy</style></author><author><style face="normal" font="default" size="100%">Lone Ladegaard Laursen</style></author><author><style face="normal" font="default" size="100%">Sarah Maurer</style></author><author><style face="normal" font="default" size="100%">Daniel Merkle</style></author><author><style face="normal" font="default" size="100%">Pierre-Alain Monnard</style></author><author><style face="normal" font="default" size="100%">Kasper Støy</style></author><author><style face="normal" font="default" size="100%">Steen Rasmussen</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion in Decisions of Life and Death – Its Role in Brain-Body-Environment Interactions for Predator and Prey</style></title><secondary-title><style face="normal" font="default" size="100%">Artificial Life XII: Proc. of the 12th International Conference on the Synthesis and Simulation of Living Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2010</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262290758chap141.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Odense, Denmark</style></pub-location><pages><style face="normal" font="default" size="100%">812–822</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Taking inspiration from the biological world, in our work we are attempting to create and examine artificial predator-prey relationships using two LEGO robots. We do so to explore the possible adaptive value of emotion-like states for action selection in this context. However, we also aim to study and consider these concepts together at different levels of abstraction. For example, in terms of individual agents’ brain-body-environment interactions, as well as the (emergent) predator-prey relationships resulting from these. Here, we discuss some of the background concepts and motivations driving the design of our implementation and experiments. First, we explain why we think the predator-prey relationship is so interesting. Narrowing our focus to emotion-based architectures, this is followed by a review of existing literature, comparing different types and highlighting the novel aspects of our own. We conclude with our proposed contributions to the literature and thus, ultimately, the design and creation of artificial life.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262290758chap141.pdf&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Evolution of Bistable Dynamics in Spiking Neural Controllers for Agents Performing Olfactory Attraction and Aversion</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 19th Annual Computational Neuroscience Meeting (CNS*2010)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2010</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://bmcneurosci.biomedcentral.com/articles/10.1186/1471-2202-11-S1-P92</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">BioMed Central Ltd.</style></publisher><pub-location><style face="normal" font="default" size="100%">San Antonio, TX</style></pub-location><volume><style face="normal" font="default" size="100%">11(Suppl 1)</style></volume><pages><style face="normal" font="default" size="100%">92</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Evolution of Bilateral Symmetry in Agents Controlled by Spiking Neural Networks</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2009 IEEE Symposium on Artificial Life (ALIFE 2009)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/4937702/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Nashville, TN</style></pub-location><pages><style face="normal" font="default" size="100%">116–123</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-2763-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present in this paper three novel developmental models allowing information to be encoded in space and time, using spiking neurons placed on a 2D substrate. In two of these models, we introduce neural development that can use bilateral symmetry. We show that these models can create neural controllers for agents evolved to perform chemotaxis. Neural bilateral symmetry can be evolved and be beneficial for an agent. This work is the first, as far as we know, to present developmental models where spiking neurons are generated in space and where bilateral symmetry can be evolved and proved to be beneficial in this context.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">John C Murray</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Kim A. Bard</style></author><author><style face="normal" font="default" size="100%">Ross, Marina Davila</style></author><author><style face="normal" font="default" size="100%">Thorsteinsson, Kate</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kim, Jong-Hwan</style></author><author><style face="normal" font="default" size="100%">Ge, Shuzhi Sam</style></author><author><style face="normal" font="default" size="100%">Vadakkepat, Prahlad</style></author><author><style face="normal" font="default" size="100%">Jesse, Norbert</style></author><author><style face="normal" font="default" size="100%">Al Manum, Abdullah</style></author><author><style face="normal" font="default" size="100%">Puthusserypady K, Sadasivan</style></author><author><style face="normal" font="default" size="100%">Rückert, Ulrich</style></author><author><style face="normal" font="default" size="100%">Sitte, Joaquin</style></author><author><style face="normal" font="default" size="100%">Witkowski, Ulf</style></author><author><style face="normal" font="default" size="100%">Nakatsu, Ryohei</style></author><author><style face="normal" font="default" size="100%">Braunl, Thomas</style></author><author><style face="normal" font="default" size="100%">Baltes, Jacky</style></author><author><style face="normal" font="default" size="100%">Anderson, John</style></author><author><style face="normal" font="default" size="100%">Wong, Ching-Chang</style></author><author><style face="normal" font="default" size="100%">Verner, Igor</style></author><author><style face="normal" font="default" size="100%">Ahlgren, David</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">The Influence of Social Interaction on the Perception of Emotional Expression: A Case Study with a Robot Head</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Robotics: Proc. FIRA RoboWorld Congress 2009</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-642-03983-6_10</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer Berlin Heidelberg</style></publisher><pub-location><style face="normal" font="default" size="100%">Incheon, Korea</style></pub-location><volume><style face="normal" font="default" size="100%">5744</style></volume><pages><style face="normal" font="default" size="100%">63–72</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-03983-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we focus primarily on the influence that socio-emotional interaction has on the perception of emotional expression by a robot. We also investigate and discuss the importance of emotion expression in socially interactive situations involving human robot interaction (HRI), and show the importance of utilising emotion expression when dealing with interactive robots, that are to learn and develop in socially situated environments. We discuss early expressional development and the function of emotion in communication in humans and how this can improve HRI communications. Finally we provide experimental results showing how emotion-rich interaction via emotion expression can affect the HRI process by providing additional information.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">David Bowes</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The role of lateral inhibition in the sensory processing in a simulated spiking neural controller for a robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2009 IEEE Symposium on Artificial Life (ALIFE 2009)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/4937710/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Nashville, TN</style></pub-location><pages><style face="normal" font="default" size="100%">179–183</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-2763-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Visual adaptation is the process that allows animals to be able to see over a wide range of light levels. This is achieved partially by lateral inhibition in the retina which compensates for low/high light levels. Neural controllers which cause robots to turn away from or towards light tend to work in a limited range of light conditions. In real environments, the light conditions can vary greatly reducing the effectiveness of the robot. Our solution for a simple Braitenberg vehicle is to add a single inhibitory neuron which laterally inhibits the output to the robot motors. This solution has additionally reduced the computational complexity of our simple neuron allowing for a greater number of neurons to be simulated with a fixed set of resources.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">David Bowes</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Pierre-Yves Oudeyer</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Should I worry about my stressed pregnant robot?</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 9th International Conference on Epigenetic Robotics: Modeling Cognitive Development in Robotic Systems (EpiRob 2009)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">11/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.lucs.lu.se/LUCS/146/epirob09.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Venice, Italy</style></pub-location><volume><style face="normal" font="default" size="100%">146</style></volume><pages><style face="normal" font="default" size="100%">203–204</style></pages><isbn><style face="normal" font="default" size="100%">978-91-977-380-7-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Asada, Minoru</style></author><author><style face="normal" font="default" size="100%">Hallam, John C T</style></author><author><style face="normal" font="default" size="100%">Jean-Arcady Meyer</style></author><author><style face="normal" font="default" size="100%">Tani, Jun</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Adaptive Olfactory Encoding in Agents Controlled by Spiking Neural Networks</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats 10: Proc. 10th International Conference on Simulation of Adaptive Behavior (SAB 2008)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science (LNCS)</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://link.springer.com/chapter/10.1007/978-3-540-69134-1_15</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer, Berlin, Heidelberg</style></publisher><pub-location><style face="normal" font="default" size="100%">Osaka, Japan</style></pub-location><volume><style face="normal" font="default" size="100%"> 5040</style></volume><pages><style face="normal" font="default" size="100%">148–158</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-69134-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We created a neural architecture that can use two different types of information encoding strategies depending on the environment. The goal of this research was to create a simulated agent that could react to two different overlapping chemicals having varying concentrations. The neural network controls the agent by encoding its sensory information as temporal coincidences in a low concentration environment, and as firing rates at high concentration. With such an architecture, we could study synchronization of firing in a simple manner and see its effect on the agent’s behaviour.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Matthew Schlesinger</style></author><author><style face="normal" font="default" size="100%">Luc Berthouze</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Conscientious Caretaking for Autonomous Robots: An Arousal-Based Model of Exploratory Behavior</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 8th International Conference on Epigenetic Robotics: Modeling Cognitive Development in Robotic Systems (EpiRob 2008)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.lucs.lu.se/LUCS/139/hiolle.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Brighton, UK</style></pub-location><volume><style face="normal" font="default" size="100%">139</style></volume><pages><style face="normal" font="default" size="100%">45–52</style></pages><isbn><style face="normal" font="default" size="100%">978-91-977-380-1-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The question of how autonomous robots could be part of our everyday life is gaining increasing interest. We present here an experiment in which an autonomous robot explores its environment and tries to familiarize itself with its novel elements using a neural-network-based architecture. When confronted with novelty, the lack of stability of its learning structures increases the arousal level of the robot, pushing it to look for comfort from its caretaker in order to reduce this arousal. In this paper, we studied how the behavior of the caretaker—and in particular the amount of comfort it provides to the robot during its exploration of the environment—influences the course of the robot’s exploration and learning experience. This work takes inspiration from early mother-infant interactions and the impact that the primary caretaker has on the development of children—at least in mainstream Western culture. The underlying hypothesis is that the behavior of a caregiver, and particularly his/her role in modulating arousal, will influence the development of an autonomous robot, and that arousal regulation will also depend on how accurately the robot signals its internal state and how the caretaker (or human user) responds to these signals.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Seth Bullock</style></author><author><style face="normal" font="default" size="100%">Jason Noble</style></author><author><style face="normal" font="default" size="100%">Richard A. Watson</style></author><author><style face="normal" font="default" size="100%">Mark A Bedau</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Optimal Noise in Spiking Neural Networks for the Detection of Chemicals by Simulated Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Artificial Life XI: Proceedings of the Eleventh International Conference on the Simulation and Synthesis of Living Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262287196chap58.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Winchester, UK</style></pub-location><pages><style face="normal" font="default" size="100%">443–449</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-75017-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We created a spiking neural controller for an agent that could use two different types of information encoding strategies depending on the level of chemical concentration present in the environment. The first goal of this research was to create a simulated agent that could react and stay within a region where there were two different overlapping chemicals having uniform concentrations. The agent was controlled by a spiking neural network that encoded sensory information using temporal coincidence of incoming spikes when the level of chemical concentration was low, and as firing rates at high level of concentration. With this architecture, we could study synchronization of firing in a simple manner and see its effect on the agent’s behaviour. The next experiment we did was to use a more realistic model by having an environment composed of concentration gradients and by adding input current noise to all neurons. We used a realistic model of diffusive noise and showed that it could improve the agent’s behaviour if used within a certain range. Therefore, an agent with neuronal noise was better able to stay within the chemical concentration than an agent without.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Trappl, R</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Optimal Receptor Response Functions for the Detection of Pheromones by Agents Driven by Spiking Neural Networks</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 9th European Meeting on Cybernetics and Systems Research, Vol. II</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.cogsci.uci.edu/~noros/mypapers/OROS_2008_EMCSR.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Austrian Society for Cybernetic Studies</style></publisher><pub-location><style face="normal" font="default" size="100%">Vienna, Austria</style></pub-location><pages><style face="normal" font="default" size="100%">427–432</style></pages><isbn><style face="normal" font="default" size="100%">978-3-85206-175-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The goal of the work presented here is to find a model of a spiking sensory neuron that could cope with small variations in the concentration of simulated chemicals and also the whole range of concentrations. By using a biologically plausible sigmoid function in our model to map chemical concentration to current, we could produce agents able to detect the whole range of concentration of chemicals (pheromones) present in the environment as well as small variations of them. The sensory neurons used in our model are able to encode the stimulus intensity into appropriate firing rates.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">David Bowes</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Madani, K</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Receptor Response and Soma Leakiness in a Simulated Spiking Neural Controller for a Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 4th International Workshop on Artificial Neural Networks and Intelligent Information Processing (ANNIIP 2008)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">05/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://uhra.herts.ac.uk/handle/2299/6832</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">INSTICC (Inst. Syst. Technologies Information Control and Communication)</style></publisher><pub-location><style face="normal" font="default" size="100%">Funchal, Madeira, Portugal</style></pub-location><pages><style face="normal" font="default" size="100%">100–106</style></pages><isbn><style face="normal" font="default" size="100%">978-989-8111-33-3</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper investigates different models of leakiness for the soma of a simulated spiking neural controller for a robot exhibiting negative photo-taxis. It also investigates two models of receptor response to stimulus levels. The results show that exponential decay of ions across the soma and of a receptor response function where intensity is proportional to intensity is the best combination for dark seeking behaviour.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>19</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">M Simon</style></author><author><style face="normal" font="default" size="100%">P Canet</style></author><author><style face="normal" font="default" size="100%">R Soussignan</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Reconnaissance et résonance émotionnelle face à un humain et à un robot chez des enfants typiques et des enfants avec autisme de haut niveau</style></title><secondary-title><style face="normal" font="default" size="100%">Bulletin scientifique de l’Arapi</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Martin V Butz</style></author><author><style face="normal" font="default" size="100%">Olivier Sigaud</style></author><author><style face="normal" font="default" size="100%">Giovanni Pezzulo</style></author><author><style face="normal" font="default" size="100%">Gianluca Baldassarre</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Anticipating Rewards in Continuous Time and Space: A Case Study in Developmental Robotics</style></title><secondary-title><style face="normal" font="default" size="100%">Anticipatory Behavior in Adaptive Learning Systems: From Brains to Individual and Social Behavior</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Artificial Intelligence</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.springer.com/gp/book/9783540742616</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin, Heidelberg</style></pub-location><volume><style face="normal" font="default" size="100%">4520</style></volume><pages><style face="normal" font="default" size="100%">267–284</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-74261-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper presents the first basic principles, implementation and experimental results of what could be regarded as a new approach to reinforcement learning, where agents—physical robots interacting with objects and other agents in the real world—can learn to anticipate rewards using their sensory inputs. Our approach does not need discretization, notion of events, or classification, and instead of learning rewards for the different possible actions of an agent in all the situations, we propose to make agents learn only the main situations worth avoiding and reaching. However, the main focus of our work is not reinforcement learning as such, but modeling cognitive development on a small autonomous robot interacting with an “adult” caretaker, typically a human, in the real world; the control architecture follows a Perception-Action approach incorporating a basic homeostatic principle. This interaction occurs in very close proximity, uses very coarse and limited sensory-motor capabilities, and affects the “well-being” and affective state of the robot. The type of anticipatory behavior we are concerned with in this context relates to both sensory and reward anticipation. We have applied and tested our model on a real robot.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">M Simon</style></author><author><style face="normal" font="default" size="100%">P Canet</style></author><author><style face="normal" font="default" size="100%">R Soussignan</style></author><author><style face="normal" font="default" size="100%">P Blancard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Human Responses to an Expressive Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the Sixth International Workshop on Epigenetic Robotics</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.lucs.lu.se/LUCS/128/Nadeletal.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><volume><style face="normal" font="default" size="100%">128</style></volume><pages><style face="normal" font="default" size="100%">79–86</style></pages><isbn><style face="normal" font="default" size="100%">91-974741-6-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper reports the results of the first study comparing subjects' responses to robotic emotional facial displays and human emotional facial displays.
It describes step by step the building of believable emotional expressions in a robotic head, the problems raised by a comparative approach of robotic and human expressions, and the solutions found in order to ensure a valid comparison. Twenty adults and 15 children aged 3 were presented static (photos) and dynamic (2-D videoclips, or 3-D live) displays of emotional expressions presented by a robot or a person.
The study compares two dependent variables: emotional resonance (automatic facial feed-back during an emotional display) and emotion recognition (emotion labeling) according to partners (robot or person) and to the nature of the display (static or dynamic). Results for emotional resonance were similar with young children and with adults. Both groups resonated significantly more to dynamic displays than to static displays, be they robotic expressions or human expressions. In both groups, emotion recognition was easier for human expressions than for robotic ones.
Unlike children that recognized more easily emotional expressions dynamically displayed, adults scored higher with static displays thus reflecting a cognitive strategy independent from emotional resonance. Results are discussed in the perspective of the therapeutic use of this comparative approach with children with autism that are described as impaired in emotion sharing and communication.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cos-Aguilera, Ignasi</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author><author><style face="normal" font="default" size="100%">Gillies, Andrew</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Joanna J Bryson</style></author><author><style face="normal" font="default" size="100%">Tony J Prescott</style></author><author><style face="normal" font="default" size="100%">Anil K Seth</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Ecological Integration of Affordances and Drives for Behaviour Selection</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. IJCAI 2005 Workshop on Modeling Natural Action Selection</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><pub-location><style face="normal" font="default" size="100%">Edinburgh, Scotland</style></pub-location><pages><style face="normal" font="default" size="100%">225–228</style></pages><isbn><style face="normal" font="default" size="100%">1-902956-40-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper shows a study of the integration of physiology and perception in a biologically inspired robotic architecture that learns behavioural patterns by interaction with the environment. This implements a hierarchical view of learning and behaviour selection which bases adaptation on a relationship between reinforcement and the agent’s inner motivations. This view ingrains together the basic principles necessary to explain the underlying processes of learning behavioural patterns and the way these change via interaction with the environment. These principles have been experimentally tested and the results are presented and discussed throughout the paper.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Luc Berthouze</style></author><author><style face="normal" font="default" size="100%">Frédéric Kaplan</style></author><author><style face="normal" font="default" size="100%">Hideki Kozima</style></author><author><style face="normal" font="default" size="100%">Hiroyuki Yano</style></author><author><style face="normal" font="default" size="100%">Jürgen Konczak</style></author><author><style face="normal" font="default" size="100%">Giorgio Metta</style></author><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">Giulio Sandini</style></author><author><style face="normal" font="default" size="100%">Georgi Stojanov</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">From Imprinting to Adaptation: Building a History of Affective Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Fifth International Workshop on Epigenetic Robotics: Modeling Cognitive Development in Robotic Systems (EpiRob2005)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><publisher><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></publisher><pages><style face="normal" font="default" size="100%">23–30</style></pages><isbn><style face="normal" font="default" size="100%">91-974741-4-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present a Perception-Action architecture and experiments to simulate imprinting—the establishment of strong attachment links with a &quot;caregiver&quot;—in a robot. Following recent theories, we do not consider imprinting as rigidly timed and irreversible, but as a more flexible phenomenon that allows for further adaptation as a result of reward-based learning through experience. Our architecture reconciles these two types of perceptual learning traditionally considered as different and even incompatible. After the initial imprinting, adaptation is achieved in the context of a history of &quot;affective&quot; interactions between the robot and a human, driven by &quot;distress&quot; and &quot;comfort&quot; responses in the robot.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cortés, Ulises</style></author><author><style face="normal" font="default" size="100%">Annicchiarico, Roberta</style></author><author><style face="normal" font="default" size="100%">Campana, Fabio</style></author><author><style face="normal" font="default" size="100%">Vázquez-Salceda, Javier</style></author><author><style face="normal" font="default" size="100%">Urdiales, Cristina</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Maite López</style></author><author><style face="normal" font="default" size="100%">Miquel Sànchez-Marrè</style></author><author><style face="normal" font="default" size="100%">Di Vincenzo, Sarah</style></author><author><style face="normal" font="default" size="100%">Carlo Caltagirone</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Intelligenza artificiale in medicina: progetto di una piattaforma mobile inserita in un ambiente intelligente per l'assistenza ai disabili e agli anziani</style></title><secondary-title><style face="normal" font="default" size="100%">Recenti Progressi in Medicina</style></secondary-title><translated-title><style face="normal" font="default" size="100%">Artificial intelligence in medicine: project of a mobile platform in an intelligent environment for the care of disabled and elderly people</style></translated-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><publisher><style face="normal" font="default" size="100%">Pensiero scientifico</style></publisher><volume><style face="normal" font="default" size="100%">95</style></volume><pages><style face="normal" font="default" size="100%">190–195</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Viene presentato un progetto basato sull'integrazione di nuove tecnologie e di Intelligenza artificiale per sviluppare uno strumento – e-tool – indirizzato alle persone disabili ed agli anziani. Una piattaforma mobile inserita all'interno di ambienti intelligenti (strutture di assistenza o abitazioni), controllata e gestita attraverso un'architettura multilivello, viene proposta come supporto sia per i pazienti che per i caregiver al fine di aumentare l'autonomia nella vita quotidiana.

A project based on the integration of new technologies and artificial intelligence to develop a device – e-tool – for disabled patients and elderly people is presented. A mobile platform in intelligent environments (skilled-care facilities and home-care), controlled and managed by a multi-level architecture, is proposed to support patients and caregivers to increase self-dependency in activities of daily living.</style></abstract><issue><style face="normal" font="default" size="100%">4</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Robert Lowe</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Nehaniv, Chrystopher L</style></author><author><style face="normal" font="default" size="100%">Daniel Polani</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Harald Schaub</style></author><author><style face="normal" font="default" size="100%">Frank Detje</style></author><author><style face="normal" font="default" size="100%">Ulrike Brüggermann</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Strategies in the Evolution of Affect Related Displays and Recognition</style></title><secondary-title><style face="normal" font="default" size="100%">The Logic Of Artificial Life: Abstracting and Synthesizing the Principles of Living Systems; Proc. 6th German Workshop on Artificial Life 2004</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><publisher><style face="normal" font="default" size="100%">IOS Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Bamberg, Germany</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">A more realistic alternative to the game theoretic approach to measuring the behavioural success of animal display can be represented by affect related expression and perception The current paper investigates the ways in which agents can use evolved affect related displays to manipulate the behaviour of affect perceiving rival agents to their survival advantage. 
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Stefan Schaal</style></author><author><style face="normal" font="default" size="100%">Auke Jan Ijspeert</style></author><author><style face="normal" font="default" size="100%">Aude Billard</style></author><author><style face="normal" font="default" size="100%">Sethu Vijayakumar</style></author><author><style face="normal" font="default" size="100%">John Hallam</style></author><author><style face="normal" font="default" size="100%">Jean-Arcady Meyer</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Using Hormonal Feedback to Modulate Action Selection in a Competitive Scenario</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats 8: Proc. 8th Intl. Conf. on Simulation of Adaptive Behavior (SAB'04)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.researchgate.net/profile/Orlando_Avila-Garcia/publication/228958663_Using_Hormonal_Feedback_to_Modulate_Action_Selection_in_a_Competitive_Scenario/links/0deec533c8411ebe0c000000.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Los Angeles, USA</style></pub-location><pages><style face="normal" font="default" size="100%">243–252</style></pages><isbn><style face="normal" font="default" size="100%">9780262693417</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we investigate the use of hormonal feedback as a mechanism to modulate a &quot;motivation-based,&quot; homeostatic action selection mechanism (ASM) in a robot. We have framed our study in the context of a dynamic, multirobot, competitive &quot;two-resource&quot; action selection problem. The introduction of competitors has important consequences for action selection. We first show how the interaction between robots introduces new forms of environmental complexity that affect their viability. Secondly, we propose a &quot;hormone-like&quot; mechanism that, modulating the input of the ASM, tackles these new sources of complexity.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cortés, Ulises</style></author><author><style face="normal" font="default" size="100%">Annicchiarico, Roberta</style></author><author><style face="normal" font="default" size="100%">Vázquez-Salceda, Javier</style></author><author><style face="normal" font="default" size="100%">Urdiales, Cristina</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Maite López</style></author><author><style face="normal" font="default" size="100%">Miquel Sànchez-Marrè</style></author><author><style face="normal" font="default" size="100%">Carlo Caltagirone</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Assistive technologies for the disabled and for the new generation of senior citizens: The e-Tools architecture</style></title><secondary-title><style face="normal" font="default" size="100%">AI Communications</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://content.iospress.com/articles/ai-communications/aic288</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IOS Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Amsterdam, The Netherlands</style></pub-location><volume><style face="normal" font="default" size="100%">16</style></volume><pages><style face="normal" font="default" size="100%">193–207</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we present our exploratory ideas about the integration of agent technology with other technologies to build specific e-tools for the disabled and for the new generation of senior citizens. &quot;e-Tools&quot; stands for Embedded Tools, as we aim to embed intelligent assistive devices in homes and other facilities, creating ambient intelligence environments to give support to patients and caregivers. In particular, we aim to explore the benefits of the concept of situated intelligence to build artefacts that will enhance the autonomy of the target user group in their daily life.</style></abstract><issue><style face="normal" font="default" size="100%">3</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cortés, Ulises</style></author><author><style face="normal" font="default" size="100%">Annicchiarico, Roberta</style></author><author><style face="normal" font="default" size="100%">Vázquez-Salceda, Javier</style></author><author><style face="normal" font="default" size="100%">Urdiales, Cristina</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Maite López</style></author><author><style face="normal" font="default" size="100%">Miquel Sànchez-Marrè</style></author><author><style face="normal" font="default" size="100%">Carlo Caltagirone</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">I Rudomín</style></author><author><style face="normal" font="default" size="100%">J Vázquez-Salceda</style></author><author><style face="normal" font="default" size="100%">J L Díaz de León Santiago</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">e-Tools: The use of Assistive Technologies to enhance disabled and senior citizens’ autonomy</style></title><secondary-title><style face="normal" font="default" size="100%">e-Health: Application of Computing Science in Medicine and Health Care</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year></dates><publisher><style face="normal" font="default" size="100%">Instituto Politécnico National Press</style></publisher><pages><style face="normal" font="default" size="100%">119–132</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we present our preliminary ideas about the integration of several technologies to build specific e-tools for the disabled and for the new generation of senior citizens. ‘e-Tools’ stands for Embedded Tools, as we aim to embed intelligent assistive devices in homes and other facilities, creating ambient intelligence environments to give support to patients and caregivers. In particular, we aim to explore the benefits of the concept of situated intelligence to build intelligent artefacts that will enhance the autonomy of the target group during their daily life. We present here a multi-level architecture and our preliminary research on navigation schemes for a robotic wheelchair.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cos-Aguilera, Ignasi</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Detjer, Frank</style></author><author><style face="normal" font="default" size="100%">Dörner, Dietrich</style></author><author><style face="normal" font="default" size="100%">Harald Schaub</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Motivation-driven learning of object affordances: First experiments using a simulated khepera robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 5th International Conference in Cognitive Modelling (ICCM'03)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year></dates><pub-location><style face="normal" font="default" size="100%">Bamberg, Germany</style></pub-location><pages><style face="normal" font="default" size="100%">57–62</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Nehaniv, Chrystopher L</style></author><author><style face="normal" font="default" size="100%">Daniel Polani</style></author><author><style face="normal" font="default" size="100%">Kerstin Dautenhahn</style></author><author><style face="normal" font="default" size="100%">René te Boekhorst</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Russell Standish</style></author><author><style face="normal" font="default" size="100%">Mark A Bedau</style></author><author><style face="normal" font="default" size="100%">Hussein A Abbass</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Meaningful Information, Sensor Evolution, and the Temporal Horizon of Embodied Organisms</style></title><secondary-title><style face="normal" font="default" size="100%">Artificial Life VIII: Proceedings of the Eighth International Conference on Artificial Life</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Sydney, Australia</style></pub-location><pages><style face="normal" font="default" size="100%">345–349</style></pages><isbn><style face="normal" font="default" size="100%">9780262692816</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We survey and outline how an agent-centered, information-theoretic approach to meaningful information extending classical Shannon information theory by means of utility measures relevant for the goals of particular agents can be applied to sensor evolution for real and constructed organisms. Furthermore, we discuss the relationship of this approach to the programme of freeing artificial life and robotic systems from reactivity, by describing useful types of information with broader temporal horizon, for signaling, communication, affective grounding, two-process learning, individual learning, imitation and social learning, and episodic experiential information (memories, narrative, and culturally transmitted information).</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Bisson, Gilles</style></author><author><style face="normal" font="default" size="100%">Nédellec, Claire</style></author><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Staab, S</style></author><author><style face="normal" font="default" size="100%">Maedche, A</style></author><author><style face="normal" font="default" size="100%">Nédellec, Claire</style></author><author><style face="normal" font="default" size="100%">Wiemer-Hastins, P</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Designing Clustering Methods for Ontology Building: The Mo'K Workbench</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. First Workshop on Ontology Learning. Workshop of the 14th European Conference on Artificial Intelligence (ECAI 2000)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2000</style></year></dates><pages><style face="normal" font="default" size="100%">13–18</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper describes Mo'K, a configurable workbench that supports the development of conceptual clustering methods for ontology building. Mo'K is intended to assist ontology developers in the exploratory process of defining the most suitable learning methods for a given task. To do so, it provides facilities for evaluation, comparison, characterization and elaboration of conceptual clustering methods. Also, the model underlying Mo'K permits a fine- grained definition of similarity measures and class construction operators, easing the tasks of method instantiation and configuration. This paper presents some experimental results that illustrate the suitability of the model to help characterize and assess the performance of different methods that learn semantic classes from parsed corpora.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">Vincent Corruble</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Joan Bliss</style></author><author><style face="normal" font="default" size="100%">Roger Säljö</style></author><author><style face="normal" font="default" size="100%">Paul Light</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Situated Cognition: A Challenge to Artificial Intelligence?</style></title><secondary-title><style face="normal" font="default" size="100%">Learning Sites: Social and Technological Contexts for Learning</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year></dates><publisher><style face="normal" font="default" size="100%">Elsevier</style></publisher><pages><style face="normal" font="default" size="100%">223–235</style></pages><isbn><style face="normal" font="default" size="100%">978-0080433509</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">17</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Bart de Boer</style></author><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Joan Bliss</style></author><author><style face="normal" font="default" size="100%">Roger Säljö</style></author><author><style face="normal" font="default" size="100%">Paul Light</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Situated Learning in Autonomous Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Learning Sites: Social and Technological Contexts for Learning</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year></dates><publisher><style face="normal" font="default" size="100%">Elsevier</style></publisher><pages><style face="normal" font="default" size="100%">236–248</style></pages><isbn><style face="normal" font="default" size="100%">978-0080433509</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">18</style></section></record></records></xml>