<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Imran Khan</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Adaptation-By-Proxy: Contagion Effect of Social Buffering in an Artificial Society</style></title><secondary-title><style face="normal" font="default" size="100%">ALIFE 2021: The 2021 Conference on Artificial Life</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2021</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://direct.mit.edu/isal/proceedings/isal/90/102917</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">The MIT Press</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The “social buffering” phenomenon proposes that social support facilitates wellbeing by reducing stress in a number of different ways. While this phenomenon may benefit agents with social support from others, its potential effects on the wider social group are less clear. Using a biologically-inspired artificial life model, we have investigated how some of the hypothesised hormonal mechanisms that underpin the “social buffering” phenomenon affect the wellbeing and interactions of agents without social support across numerous social and physical contexts. We tested these effects in a small, rank-based society, with half of the agents endowed with numerous hormonal mechanisms associated with “social buffering”, and half without. Surprisingly, our results found that these “social buffering” mechanisms provided survival-related advantages to agents without social support across numerous conditions. We found that agents with socially-adaptive mechanisms themselves become a proxy for adaptation, and suggest that, in some (artificial) societies, “social buffering” may be a contagious phenomenon.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://direct.mit.edu/isal/proceedings/isal/90/102917&quot;&gt;Download&lt;/a&gt; (Open Access)
</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Zakaria Lemhaouri</style></author><author><style face="normal" font="default" size="100%">Laura Cohen</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Affect-grounded Language Learning in a Robot</style></title><secondary-title><style face="normal" font="default" size="100%">FEEL-COG: The Role of Affect in the Development of Cognition, ICDL 2021 Workshop</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2021</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://whisperproject.eu/workshop-feel-cog</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://whisperproject.eu/images/FEEL-COG/submissions/Affect_grounded_Language_Learning.pdf&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Laura Cohen</style></author><author><style face="normal" font="default" size="100%">Ann Nowé</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A functionalist approach to language learning in robots: the development of meaning potentials in social and emotional contexts</style></title><secondary-title><style face="normal" font="default" size="100%">HBCR 2021: IROS 2021 workshop on Human-Like Behavior and Cognition in Robots</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2021</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://sites.google.com/view/hbcr-workshop-2021/speakers</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Mickaëlla Grondin-Verdon</style></author><author><style face="normal" font="default" size="100%">Nezih Younsi</style></author><author><style face="normal" font="default" size="100%">Michele Grimaldi</style></author><author><style face="normal" font="default" size="100%">Catherine Pelachaud</style></author><author><style face="normal" font="default" size="100%">Laurence Chaby</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Induction of the being-seen-feeling by an embodied conversational agent in a socially interactive context</style></title><secondary-title><style face="normal" font="default" size="100%">21st ACM International Conference on Intelligent Virtual Agents</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2021</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://hal.archives-ouvertes.fr/hal-03342893/document</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://hal.archives-ouvertes.fr/hal-03342893/document&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Stavros Anagnou</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Towards an Affective Model of Norm Emergence and Adaptation</style></title><secondary-title><style face="normal" font="default" size="100%">TSAR 2021: RO-MAN 2021 Workshop on Robot Behavior Adaptation to Human Social Norms</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2021</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://tsar2021.ai.vub.ac.be/uploads/papers/TSAR_2021_paper_10.pdf</style></url></web-urls></urls><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://tsar2021.ai.vub.ac.be/uploads/papers/TSAR_2021_paper_10.pdf&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Hickton, Luke</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Kheng Lee Koay</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Does Expression of Grounded Affect in a Hexapod Robot Elicit More Prosocial Responses?</style></title><secondary-title><style face="normal" font="default" size="100%">UKRAS20 Conference: &quot;Robots into the real world&quot; Proceedings</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2020</style></year><pub-dates><date><style  face="normal" font="default" size="100%">04/2020</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://uhra.herts.ac.uk/bitstream/handle/2299/22817/UKRAS20_paper_09.pdf</style></url></web-urls></urls><pub-location><style face="normal" font="default" size="100%">Lincoln, UK</style></pub-location><pages><style face="normal" font="default" size="100%">40–42</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We consider how non-humanoid robots can communicate their affective state via bodily forms of communication, and the extent to which this can influence human response. We propose a simple model of grounded affect and kinesic expression and outline two experiments (N=9 and N=180) in which participants were asked to watch expressive and non-expressive hexapod robots perform different ‘scenes’. Our preliminary findings suggest the expressive robot stimulated greater desire for interaction, and was more likely to be attributed with emotion. It also elicited more desire for prosocial behaviour.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://uhra.herts.ac.uk/bitstream/handle/2299/22817/UKRAS20_paper_09.pdf&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Embodied Affect for Real-World Human-Robot Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the 2020 ACM/IEEE International Conference on Human-Robot Interaction</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">HRI '20</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2020</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://dl.acm.org/doi/abs/10.1145/3319502.3374843</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Association for Computing Machinery</style></publisher><pub-location><style face="normal" font="default" size="100%">New York, NY, USA</style></pub-location><pages><style face="normal" font="default" size="100%">459–460</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The potential that robots offer to support humans in multiple aspects of our daily lives is increasingly acknowledged. Despite the clear progress in social robotics and human-robot interaction, the actual realization of this potential still faces numerous scientific and technical challenges, many of them linked to difficulties in dealing with the complexity of the real world. Achieving real-world human-robot interaction requires, on the one hand, taking into account and addressing real-world (e.g., stakeholder's) needs and application areas and, on the other hand, making our robots operational in the real world. In this talk, I will address some of the contributions that Embodied Artificial Intelligence can make towards this goal, illustrating my arguments with examples of my and my group's research on HRI using embodied autonomous affective robots in areas such as developmental robotics, healthcare, and computational psychiatry. So far little explored in HRI, Embodied AI, which started as an alternative to &quot;symbolic AI&quot; (a &quot;paradigm change&quot;) in the way to conceive and model the notion of &quot;intelligence&quot; and the interactions of embodied agents with the real world, is highly relevant towards achieving &quot;real-world HRI&quot;, with its emphasis on notions such as autonomy, adaptation, interaction with dynamic environments, sensorimotor loops and coordination, learning from interactions, and more generally, as Rodney Brooks put it, using and exploiting the real world as &quot;its own best model&quot;.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://dl.acm.org/doi/10.1145/3319502.3374843&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Hickton, Luke</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Abdelkhilick Mohammad</style></author><author><style face="normal" font="default" size="100%">Xin Dong</style></author><author><style face="normal" font="default" size="100%">Matteo Russo</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Expression of Grounded Affect: How Much Emotion Can Arousal Convey?</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 21st Towards Autonomous Robotic Systems Conference  (TAROS2020)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2020</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2020</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007/978-3-030-63486-5_26</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Nottingham, UK</style></pub-location><volume><style face="normal" font="default" size="100%">12228</style></volume><pages><style face="normal" font="default" size="100%">234–248</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we consider how non-humanoid robots can communicate their affective state via bodily forms of communication (kinesics), and the extent to which this influences how humans respond to them. We propose a simple model of grounded affect and kinesic expression before presenting the qualitative findings of an exploratory study (N=9), during which participants were interviewed after watching expressive and non-expressive hexapod robots perform different ‘scenes’. A summary of these interviews is presented and a number of emerging themes are identified and discussed. Whilst our findings suggest that the expressive robot did not evoke significantly greater empathy or altruistic intent in humans than the control robot, the expressive robot stimulated greater desire for interaction and was also more likely to be attributed with emotion.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.nottingham.ac.uk/conference/fac-eng/taros/proceedings/proceedings.aspx&quot;&gt;Download&lt;/a&gt; (the complete proceedings are available from the link on this page)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Imran Khan</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Josh Bongard</style></author><author><style face="normal" font="default" size="100%">Juniper Lovato</style></author><author><style face="normal" font="default" size="100%">Laurent Hebert-Dufrésne</style></author><author><style face="normal" font="default" size="100%">Radhakrishna Dasari</style></author><author><style face="normal" font="default" size="100%">Lisa Soros</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Modelling the Social Buffering Hypothesis in an Artificial Life Environment</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the Artificial Life Conference 2020 (ALIFE 2020)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2020</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2020</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mitpressjournals.org/doi/abs/10.1162/isal_a_00302</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Montreal, Canada</style></pub-location><pages><style face="normal" font="default" size="100%">393–401</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In social species, individuals who form social bonds have been found to live longer, healthier lives. One hypothesised reason for this effect is that social support, mediated by oxytocin, &quot;buffers&quot; responses to stress in a number of ways, and is considered an important process of adaptation that facilitates long-term wellbeing in changing, stressful conditions. Using an artificial life model, we have investigated the role of one hypothesised stress-reducing effect of social support on the survival and social interactions of agents in a small society. We have investigated this effect using different types of social bonds and bond partner combinations across environmentally-challenging conditions. Our results have found that stress reduction through social support benefits the survival of agents with social bonds, and that this effect often extends to the wider society. We have also found that this effect is significantly affected by environmental and social contexts. Our findings suggest that these &quot;social buffering&quot; effects may not be universal, but dependent upon the degree of environmental challenges, the quality of affective relationships and the wider social context.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.mitpressjournals.org/doi/abs/10.1162/isal_a_00302&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ana Tanevska</style></author><author><style face="normal" font="default" size="100%">Francesco Rea</style></author><author><style face="normal" font="default" size="100%">Giulio Sandini</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Alessandra Sciutti</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Socially Adaptable Framework for Human-Robot Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Frontiers in Robotics and AI</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2020</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.frontiersin.org/article/10.3389/frobt.2020.00121</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">7</style></volume><pages><style face="normal" font="default" size="100%">121</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In our everyday lives we regularly engage in complex, personalized, and adaptive interactions with our peers. To recreate the same kind of rich, human-like interactions, a social robot should be aware of our needs and affective states and continuously adapt its behavior to them. Our proposed solution is to have the robot learn how to select the behaviors that would maximize the pleasantness of the interaction for its peers. To make the robot autonomous in its decision making, this process could be guided by an internal motivation system. We wish to investigate how an adaptive robotic framework of this kind would function and personalize to different users. We also wish to explore whether the adaptability and personalization would bring any additional richness to the human-robot interaction (HRI), or whether it would instead bring uncertainty and unpredictability that would not be accepted by the robot's human peers. To this end, we designed a socially adaptive framework for the humanoid robot iCub. As a result, the robot perceives and reuses the affective and interactive signals from the person as input for the adaptation based on internal social motivation. We strive to investigate the value of the generated adaptation in our framework in the context of HRI. In particular, we compare how users will experience interaction with an adaptive versus a non-adaptive social robot. To address these questions, we propose a comparative interaction study with iCub whereby users act as the robot's caretaker, and iCub's social adaptation is guided by an internal comfort level that varies with the stimuli that iCub receives from its caretaker. We investigate and compare how iCub's internal dynamics would be perceived by people, both in a condition when iCub does not personalize its behavior to the person, and in a condition where it is instead adaptive. Finally, we establish the potential benefits that an adaptive framework could bring to the context of repeated interactions with a humanoid robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.frontiersin.org/article/10.3389/frobt.2020.00121&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ana Tanevska</style></author><author><style face="normal" font="default" size="100%">Francesco Rea</style></author><author><style face="normal" font="default" size="100%">Giulio Sandini</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Alessandra Sciutti</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Cognitive Architecture for Socially Adaptable Robots</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2019 Joint IEEE 9th International Conference on Development and Learning and Epigenetic Robotics (ICDL-EpiRob)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2019</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2019</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/document/8850688</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Oslo, Norway</style></pub-location><pages><style face="normal" font="default" size="100%">195–200</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/8850688&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ana Tanevska</style></author><author><style face="normal" font="default" size="100%">Francesco Rea</style></author><author><style face="normal" font="default" size="100%">Giulio Sandini</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Alessandra Sciutti</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Eager to Learn vs. Quick to Complain? How a socially adaptive robot architecture performs with different robot personalities</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2019 IEEE International Conference on Systems, Man, and Cybernetics (IEEE SMC 2019)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2019</style></year><pub-dates><date><style  face="normal" font="default" size="100%">10/2019</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/document/8913903</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Bari, Italy</style></pub-location><pages><style face="normal" font="default" size="100%">365–371</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">A social robot that is aware of our needs and continuously adapts its behaviour to them has the potential of creating a complex, personalized, human-like interaction of the kind we are used to have with our peers in our everyday lives. We are interested in exploring how would an adaptive architecture function and personalize to different users when given different initial values of its variables, i.e. when implementing the same adaptive framework with different robot personalities. Would an architecture that learns very quickly outperform a slower but steadier learning profile? To further explore this, we propose a cognitive architecture for the humanoid robot iCub supporting adaptability and we attempt to validate its functionality and test different robot profiles.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/8913903&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Imran Khan</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The Effects of Affective Social Bonds on the Interactions and Survival of Simulated Agents</style></title><secondary-title><style face="normal" font="default" size="100%">ACII2019 Workshop on Social Emotions, Theories and Models (SE-THEMO)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2019</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2019</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/abstract/document/8925031</style></url></web-urls></urls><pub-location><style face="normal" font="default" size="100%">Cambridge, UK</style></pub-location><pages><style face="normal" font="default" size="100%">374–380</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The formation and maintenance of affective social bonds plays a key role in the well-being of social agents. Oxytocin has been correlated with social partner preference, and it is hypothesised to influence prosocial behaviours. In this paper, we investigate the effects of modulating the preference of affective social bond partners through oxytocin during decisions related to food-sharing and grooming, in a society of simulated agents with different dominance ranks. Our results show survival benefits for agents with affective social bonds across a number of groups with different bond combinations. We observe a number of emergent social behaviours and suggest that our results bear some similarity with behaviors observed in biological agents.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/abstract/document/8925031&quot;&gt;Download&lt;/a&gt; (or &lt;a href=&quot;http://www.emotion-modeling.info/sites/default/files/Khan_et_al_Affective_Social_Bonds_ACII2019_AcceptedVersion.pdf&quot;&gt;Download accepted version&lt;/a&gt;)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Embodied Robot Models for Interdisciplinary Emotion Research</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE Transactions on Affective Computing</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2019</style></year><pub-dates><date><style  face="normal" font="default" size="100%">Early Access</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/document/8700489/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Due to their complex nature, emotions cannot be properly understood from the perspective of a single discipline. In this paper, I discuss how the use of robots as models is beneficial for interdisciplinary emotion research. Addressing this issue through the lens of my own research, I focus on a critical analysis of embodied robots models of different aspects of emotion, relate them to theories in psychology and neuroscience, and provide representative examples. I discuss concrete ways in which embodied robot models can be used to carry out interdisciplinary emotion research, assessing their contributions: as hypothetical models, and as operational models of specific emotional phenomena, of general emotion principles, and of specific emotion &quot;dimensions&quot;. I conclude by discussing the advantages of using embodied robot models over other models.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/8700489&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Naomi Fineberg</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Robot Model of OC-Spectrum Disorders: Design Framework, Implementation and First Experiments</style></title><secondary-title><style face="normal" font="default" size="100%">Computational Psychiatry</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2019</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://cpsyjournal.org/article/10.1162/CPSY_a_00025/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><volume><style face="normal" font="default" size="100%">3</style></volume><pages><style face="normal" font="default" size="100%">40–75</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Computational psychiatry is increasingly establishing itself as valuable discipline for understanding human mental disorders. However, robot models and their potential for investigating embodied and contextual aspects of mental health have been, to date, largely unexplored. In this paper, we present an initial robot model of obsessive-compulsive (OC) spectrum disorders based on an embodied motivation-based control architecture for decision making in autonomous robots. The OC family of conditions is chiefly characterized by obsessions (recurrent, invasive thoughts) and/or compulsions (an urge to carry out certain repetitive or ritualized behaviors). The design of our robot model follows and illustrates a general design framework that we have proposed to ground research in robot models of mental disorders, and to link it with existing methodologies in psychiatry, and notably in the design of animal models. To test and validate our model, we present and discuss initial experiments, results, and quantitative and qualitative analysis regarding the compulsive and obsessive elements of OC-spectrum disorders. While this initial stage of development only models basic elements of such disorders, our results already shed light on aspects of the underlying theoretical model that are not obvious simply from consideration of the model.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://cpsyjournal.org/article/10.1162/CPSY_a_00025/&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Robot Model of Stress-Induced Compulsive Behavior</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 8th International Conference on Affective Computing &amp; Intelligent Interaction (ACII 2019)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2019</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2019</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/document/8925511</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Cambridge, United Kingdom</style></pub-location><pages><style face="normal" font="default" size="100%">559–565</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Stress is one of the potential mechanisms underlying compulsive behavior in obsessive-compulsive spectrum disorders. In this paper, we present a robot model and experiments investigating the interactions between internally- and externally-induced stress and compulsive behavior. Our results show properties of the model with potential implications for understanding how stress can result in the generation and maintenance of compulsive behaviors, and how response-prevention interventions can affect compulsive responses under different conditions.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/8925511&quot;&gt;Download&lt;/a&gt; (or &lt;a href=&quot;http://www.emotion-modeling.info/sites/default/files/ACII_Lewis_Canamero_2019_draft.pdf&quot;&gt;Download accepted version&lt;/a&gt;)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Imran Khan</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Adaptation and the Social Salience Hypothesis of Oxytocin: Early Experiments in a Simulated Agent Environment</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2nd Symposium on Social Interactions in Complex Intelligent Systems (SICIS)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Proc. 2018 Convention of the Society for the Study of Artificial Intelligence and Simulation of Behaviour (AISB 2018)</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2018</style></year><pub-dates><date><style  face="normal" font="default" size="100%">04/2018</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://aisb2018.csc.liv.ac.uk/PROCEEDINGS%20AISB2018/Social%20Interactions%20in%20Complex%20Intelligent%20Systems%20(SICIS)%20-%20AISB2018.pdf</style></url></web-urls></urls><pub-location><style face="normal" font="default" size="100%">Liverpool, UK</style></pub-location><pages><style face="normal" font="default" size="100%">2–9</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Allostasis is a mechanism that permits adaptation of an organism as a response to changing (physical or social) environmental conditions. Allostasis is driven by a number of factors, including regulation through hormonal mechanisms. Oxytocin (OT) is a hormone that has been found to play a role in regulating social behaviours and adaptation. However, the concrete effects that OT promotes remain unclear and controversial. One of these effects is on the attention paid to social cues (social salience). Two opposing hypotheses have been proposed. One hypothesis is that adaptation is achieved by increasing attention to social cues (increasing social salience), the other that adaptation is achieved by decreasing attention to social cues (decreasing social salience). In this paper, we present agent simulation experiments that test these two contrasting hypotheses under different environmental conditions related to food availability: a comfortable environment, a challenging environment, and a very challenging environment. Our results show that, for the particular conditions modelled, increased social salience through the release of simulated oxytocin presents significant advantages in the challenging conditions.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;http://aisb2018.csc.liv.ac.uk/PROCEEDINGS%20AISB2018/Social%20Interactions%20in%20Complex%20Intelligent%20Systems%20(SICIS)%20-%20AISB2018.pdf&quot;&gt;Download full proceedings&lt;/a&gt; (PDF)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lones, John</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Hormone-Driven Epigenetic Mechanism for Adaptation in Autonomous Robots</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE Transactions on Cognitive and Developmental Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2018</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/document/8115310/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><volume><style face="normal" font="default" size="100%">10</style></volume><pages><style face="normal" font="default" size="100%">445–454</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Different epigenetic mechanisms provide biological organisms with the ability to adjust their physiology and/or morphology and adapt to a wide range of challenges posed by their environments. In particular, one type of epigenetic process, in which hormone concentrations are linked to the regulation of hormone receptors, has been shown to have implications for behavioral development. In this paper, taking inspiration from these biological processes, we investigate whether an epigenetic model based on the concept of hormonal regulation of receptors can provide a similarly robust and general adaptive mechanism for autonomous robots. We have implemented our model using a Koala robot, and tested it in a series of experiments in six different environments with varying challenges to negotiate. Our results, including the emergence of varied behaviors that permit the robot to exploit its current environment, demonstrate the potential of our epigenetic model as a general mechanism for adaptation in autonomous robots.</style></abstract><issue><style face="normal" font="default" size="100%">2</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/8115310&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Imran Khan</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Modelling Adaptation through Social Allostasis: Modulating the Effects of Social Touch with Oxytocin in Embodied Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Multimodal Technologies and Interaction</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2018</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mdpi.com/2414-4088/2/4/67</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MDPI</style></publisher><pub-location><style face="normal" font="default" size="100%">Basel, Switzerland</style></pub-location><volume><style face="normal" font="default" size="100%">2</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Social allostasis is a mechanism of adaptation that permits individuals to dynamically adapt their physiology to changing physical and social conditions. Oxytocin (OT) is widely considered to be one of the hormones that drives and adapts social behaviours. While its precise effects remain unclear, two areas where OT may promote adaptation are by affecting social salience, and affecting internal responses of performing social behaviours. Working towards a model of dynamic adaptation through social allostasis in simulated embodied agents, and extending our previous work studying OT-inspired modulation of social salience, we present a model and experiments that investigate the effects and adaptive value of allostatic processes based on hormonal (OT) modulation of affective elements of a social behaviour. In particular, we investigate and test the effects and adaptive value of modulating the degree of satisfaction of tactile contact in a social motivation context in a small simulated agent society across different environmental challenges (related to availability of food) and effects of OT modulation of social salience as a motivational incentive. Our results show that the effects of these modulatory mechanisms have different (positive or negative) adaptive value across different groups and under different environmental circumstance in a way that supports the context-dependent nature of OT, put forward by the interactionist approach to OT modulation in biological agents. In terms of simulation models, this means that OT modulation of the mechanisms that we have described should be context-dependent in order to maximise viability of our socially adaptive agents, illustrating the relevance of social allostasis mechanisms.</style></abstract><issue><style face="normal" font="default" size="100%">4</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.mdpi.com/2414-4088/2/4/67&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes><section><style face="normal" font="default" size="100%">67</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Robin: An Autonomous Robot for Diabetic Children</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. UK-RAS Conference: 'Robots Working For &amp; Among Us', 2017</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2018</style></year></dates><pub-location><style face="normal" font="default" size="100%">Bristol, UK</style></pub-location><pages><style face="normal" font="default" size="100%">13–15</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We describe the cognitively and motivationally autonomous robot toddler Robin, designed as a tool to help children learn about diabetes management. The design of Robin follows an Embodied Artificial Intelligence approach to robotics, to create a robust social interaction agent, friendly but independent. We have used Robin in autonomous interactions with diabetic children in a scenario designed to give them mastery experiences of diabetes management in order to increase their self-efficacy.</style></abstract><notes><style face="normal" font="default" size="100%">Winner: 1st Prize, Best Paper
&lt;a href=&quot;http://www.emotion-modeling.info/sites/default/files/UK-RAS_2017_Robin_proceedings.pdf&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Using Robots to Model Mental Disorders</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. UK-RAS Conference: 'Robots Working For &amp; Among Us', 2017</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2018</style></year></dates><pub-location><style face="normal" font="default" size="100%">Bristol, UK</style></pub-location><pages><style face="normal" font="default" size="100%">121–123</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We are currently at a point where the use of robots to model human mental disorders is possible, and this capability will only increase. By considering the lessons learned from animal models, we argue that robot models of human mental disorders can complement existing approaches in mental health research.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;http://www.emotion-modeling.info/sites/default/files/UK-RAS_2017_Robot_Models_proceedings.pdf&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Hickton, Luke</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Gao, Yang</style></author><author><style face="normal" font="default" size="100%">Fallah, Saber</style></author><author><style face="normal" font="default" size="100%">Jin, Yaochu</style></author><author><style face="normal" font="default" size="100%">Lekakou, Constantina</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Flexible Component-Based Robot Control Architecture for Hormonal Modulation of Behaviour and Affect</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. Towards Autonomous Robotic Systems 18th Annual Conference, TAROS 2017</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">LNCS</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2017</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2017</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007/978-3-319-64107-2_36</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer International</style></publisher><pub-location><style face="normal" font="default" size="100%">Guildford, UK</style></pub-location><volume><style face="normal" font="default" size="100%">10454</style></volume><pages><style face="normal" font="default" size="100%">464–474</style></pages><isbn><style face="normal" font="default" size="100%">978-3-319-64106-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we present the foundations of an architecture that will support the wider context of our work, which is to explore the link between affect, perception and behaviour from an embodied perspective and assess their relevance to Human Robot Interaction (HRI). Our approach builds upon existing affect-based architectures by combining artificial hormones with discrete abstract components that are designed with the explicit consideration of influencing, and being receptive to, the wider affective state of the robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/chapter/10.1007/978-3-319-64107-2_36&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Robot Models of Mental Disorders</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 7th International Conference on Affective Computing and Intelligent Interaction, Workshops and Demos (ACIIW 2017)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2017</style></year><pub-dates><date><style  face="normal" font="default" size="100%">10/2017</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/8272613/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">San Antonio, TX</style></pub-location><pages><style face="normal" font="default" size="100%">193–200</style></pages><isbn><style face="normal" font="default" size="100%">978-1-5386-0680-3</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Alongside technological tools to support wellbeing and treatment of mental disorders, models of these disorders can also be invaluable tools to understand, support and improve these conditions. Robots can provide ecologically valid models that take into account embodiment-, interaction-, and context-related elements. Focusing on Obsessive-Compulsive spectrum disorders, in this paper we discuss some of the potential contributions of robot models and relate them to other models used in psychology and psychiatry, particularly animal models. We also present some initial recommendations for their meaningful design and rigorous use.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;http://ieeexplore.ieee.org/document/8272613/&quot;&gt;Download&lt;/a&gt; (or &lt;a href=&quot;http://www.emotion-modeling.info/sites/default/files/ACII_Lewis_Canamero_2017_Robot_Models_of_Mental_Disorders_draft.pdf&quot;&gt;Download authors' draft&lt;/a&gt;)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lones, John</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">From Sensorimotor Experiences to Cognitive Development: Investigating the Influence of Experiential Diversity on the Development of an Epigenetic Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Frontiers in Robotics and AI</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2016</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2016</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://journal.frontiersin.org/article/10.3389/frobt.2016.00044</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Frontiers</style></publisher><volume><style face="normal" font="default" size="100%">3</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Using an epigenetic model, in this paper we investigate the importance of sensorimotor experiences and environmental conditions in the emergence of more advanced cognitive abilities in an autonomous robot. We let the robot develop in three environments affording very different (physical and social) sensorimotor experiences: a &quot;normal&quot;, standard environment, with reasonable opportunities for stimulation, a &quot;novel&quot; environment that offers many novel experiences, and a &quot;sensory deprived&quot; environment where the robot has very few and over-simplistic chances to interact. We then: (a) assess how these different experiences influence and change the robot's ongoing development and behavior; (b) compare the said development to the different sensorimotor stages that infants go through and (c) finally after each &quot;baby&quot; robot has had time to develop in its environment, we recreate and asses its cognitive abilities using different well-known tests used with human infants such as violation of expectation (VOE) paradigm. Although our model was not explicitly designed following Piaget's, or any other sensorimotor developmental theory, we observed, and discuss in the paper, that relevant sensorimotor experiences, or the lack of, result in the robot going through unplanned development &quot;stages&quot; bearing some similarities to infant development, and could be interpreted in terms of Piaget's theory.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;http://journal.frontiersin.org/article/10.3389/frobt.2016.00044&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Hedonic quality or reward? A study of basic pleasure in homeostasis and decision making of a motivated autonomous robot</style></title><secondary-title><style face="normal" font="default" size="100%">Adaptive Behavior</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2016</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://journals.sagepub.com/doi/full/10.1177/1059712316666331</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">SAGE</style></publisher><volume><style face="normal" font="default" size="100%">24</style></volume><pages><style face="normal" font="default" size="100%">267–291</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present a robot architecture and experiments to investigate some of the roles that pleasure plays in the decision making (action selection) process of an autonomous robot that must survive in its environment. We have conducted three sets of experiments to assess the effect of different types of pleasure—related versus unrelated to the satisfaction of physiological needs—under different environmental circumstances. Our results indicate that pleasure, including pleasure unrelated to need satisfaction, has value for homeostatic management in terms of improved viability and increased flexibility in adaptive behavior.</style></abstract><issue><style face="normal" font="default" size="100%">5</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://journals.sagepub.com/doi/full/10.1177/1059712316666331&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Making New &quot;New AI&quot; Friends: Designing a Social Robot for Diabetic Children from an Embodied AI Perspective</style></title><secondary-title><style face="normal" font="default" size="100%">International Journal of Social Robotics</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2016</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/article/10.1007%2Fs12369-016-0364-9</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><volume><style face="normal" font="default" size="100%">8</style></volume><pages><style face="normal" font="default" size="100%">523–537</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Robin is a cognitively and motivationally autonomous affective robot toddler with &quot;robot diabetes&quot; that we have developed to support perceived self-efficacy and emotional wellbeing in children with diabetes. Robin provides children with positive mastery experiences of diabetes management in a playful but realistic and natural interaction context. Underlying the design of Robin is an &quot;Embodied&quot; (formerly also known as &quot;New&quot;) Artificial Intelligence (AI) approach to robotics. In this paper we discuss the rationale behind the design of Robin to meet the needs of our intended end users (both children and medical staff), and how &quot;New AI&quot; provides a suitable approach to developing a friendly companion that fulfills the therapeutic and affective requirements of our end users beyond other approaches commonly used in assistive robotics and child–robot interaction. Finally, we discuss how our approach permitted our robot to interact with and provide suitable experiences of diabetes management to children with very different social interaction styles.</style></abstract><issue><style face="normal" font="default" size="100%">4</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/article/10.1007%2Fs12369-016-0364-9&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philip Pärnamets</style></author><author><style face="normal" font="default" size="100%">Birger Johansson</style></author><author><style face="normal" font="default" size="100%">Martin V Butz</style></author><author><style face="normal" font="default" size="100%">Andreas Olsson</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Outline of a sensory-motor perspective on intrinsically moral agents</style></title><secondary-title><style face="normal" font="default" size="100%">Adaptive Behavior</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2016</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://journals.sagepub.com/doi/10.1177/1059712316667203</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">SAGE</style></publisher><volume><style face="normal" font="default" size="100%">24</style></volume><pages><style face="normal" font="default" size="100%">306–319 </style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We propose that moral behaviour of artificial agents could (and should) be intrinsically grounded in their own sensory-motor experiences. Such an ability depends critically on seven types of competencies. First, intrinsic morality should be grounded in the internal values of the robot arising from its physiology and embodiment. Second, the moral principles of robots should develop through their interactions with the environment and with other agents. Third, we claim that the dynamics of moral (or social) emotions closely follows that of other non-social emotions used in valuation and decision making. Fourth, we explain how moral emotions can be learned from the observation of others. Fifth, we argue that to assess social interaction, a robot should be able to learn about and understand responsibility and causation. Sixth, we explain how mechanisms that can learn the consequences of actions are necessary for a robot to make moral decisions. Seventh, we describe how the moral evaluation mechanisms outlined can be extended to situations where a robot should understand the goals of others. Finally, we argue that these competencies lay the foundation for robots that can feel guilt, shame and pride, that have compassion and that know how to assign responsibility and blame.</style></abstract><issue><style face="normal" font="default" size="100%">5</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://journals.sagepub.com/doi/10.1177/1059712316667203&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Coninx, Alexandre</style></author><author><style face="normal" font="default" size="100%">Paul E. Baxter</style></author><author><style face="normal" font="default" size="100%">Oleari, Elettra</style></author><author><style face="normal" font="default" size="100%">Bellini, Sara</style></author><author><style face="normal" font="default" size="100%">Bierman, Bert</style></author><author><style face="normal" font="default" size="100%">Henkemans, Olivier Blanson</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Valentin Enescu</style></author><author><style face="normal" font="default" size="100%">Espinoza, Raquel Ros</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Remi Humbert</style></author><author><style face="normal" font="default" size="100%">Kiefer, Bernd</style></author><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Looije, Rosmarijn</style></author><author><style face="normal" font="default" size="100%">Mosconi, Marco</style></author><author><style face="normal" font="default" size="100%">Mark A. Neerincx</style></author><author><style face="normal" font="default" size="100%">Giulio Paci</style></author><author><style face="normal" font="default" size="100%">Patsis, Georgios</style></author><author><style face="normal" font="default" size="100%">Pozzi, Clara</style></author><author><style face="normal" font="default" size="100%">Sacchitelli, Francesca</style></author><author><style face="normal" font="default" size="100%">Hichem Sahli</style></author><author><style face="normal" font="default" size="100%">Alberto Sanna</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Yiannis Demiris</style></author><author><style face="normal" font="default" size="100%">Tony Belpaeme</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Towards Long-Term Social Child-Robot Interaction: Using Multi-Activity Switching to Engage Young Users</style></title><secondary-title><style face="normal" font="default" size="100%">Journal of Human-Robot Interaction</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2016</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://dl.acm.org/doi/abs/10.5898/JHRI.5.1.Coninx</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">5</style></volume><pages><style face="normal" font="default" size="100%">32–67</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Social robots have the potential to provide support in a number of practical domains, such as learning and behaviour change. This potential is particularly relevant for children, who have proven receptive to interactions with social robots. To reach learning and therapeutic goals, a number of issues need to be investigated, notably the design of an effective child-robot interaction (cHRI) to ensure the child remains engaged in the relationship and that educational goals are met. Typically, current cHRI research experiments focus on a single type of interaction activity (e.g. a game). However, these can suffer from a lack of adaptation to the child, or from an increasingly repetitive nature of the activity and interaction. In this paper, we motivate and propose a practicable solution to this issue: an adaptive robot able to switch between multiple activities within single interactions. We describe a system that embodies this idea, and present a case study in which diabetic children collaboratively learn with the robot about various aspects of managing their condition. We demonstrate the ability of our system to induce a varied interaction and show the potential of this approach both as an educational tool and as a research method for long-term cHRI.</style></abstract><issue><style face="normal" font="default" size="100%">1</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://dl.acm.org/doi/abs/10.5898/JHRI.5.1.Coninx&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Oleari, Elettra</style></author><author><style face="normal" font="default" size="100%">Pozzi, Clara</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Tapus, Adriana</style></author><author><style face="normal" font="default" size="100%">André, Elisabeth</style></author><author><style face="normal" font="default" size="100%">Martin, Jean-Claude</style></author><author><style face="normal" font="default" size="100%">Ferland, François</style></author><author><style face="normal" font="default" size="100%">Ammi, Mehdi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">An Embodied AI Approach to Individual Differences: Supporting Self-Efficacy in Diabetic Children with an Autonomous Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 7th International Conference on Social Robotics (ICSR-2015)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2015</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-319-25554-5_40</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer International Publishing</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris</style></pub-location><pages><style face="normal" font="default" size="100%">401–410</style></pages><isbn><style face="normal" font="default" size="100%">978-3-319-25553-8</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we discuss how a motivationally autonomous robot, designed using the principles of embodied AI, provides a suitable approach to address individual differences of children interacting with a robot, without having to explicitly modify the system. We do this in the context of two pilot studies using Robin, a robot to support self-confidence in diabetic children.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/chapter/10.1007%2F978-3-319-25554-5_40&quot;&gt;Download&lt;/a&gt; (or &lt;a href=&quot;http://www.emotion-modeling.info/sites/default/files/2015_Lewis_Canamero_ICSR.pdf&quot;&gt;Download authors' draft&lt;/a&gt;)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Oleari, Elettra</style></author><author><style face="normal" font="default" size="100%">Pozzi, Clara</style></author><author><style face="normal" font="default" size="100%">Sacchitelli, Francesca</style></author><author><style face="normal" font="default" size="100%">Bagherzadhalimi, Anahita</style></author><author><style face="normal" font="default" size="100%">Bellini, Sara</style></author><author><style face="normal" font="default" size="100%">Kiefer, Bernd</style></author><author><style face="normal" font="default" size="100%">Racioppa, Stefania</style></author><author><style face="normal" font="default" size="100%">Coninx, Alexandre</style></author><author><style face="normal" font="default" size="100%">Paul E. Baxter</style></author><author><style face="normal" font="default" size="100%">Bierman, Bert</style></author><author><style face="normal" font="default" size="100%">Henkemans, Olivier Blanson</style></author><author><style face="normal" font="default" size="100%">Mark A. Neerincx</style></author><author><style face="normal" font="default" size="100%">Rosemarijn Looije</style></author><author><style face="normal" font="default" size="100%">Yiannis Demiris</style></author><author><style face="normal" font="default" size="100%">Espinoza, Raquel Ros</style></author><author><style face="normal" font="default" size="100%">Mosconi, Marco</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Remi Humbert</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Hichem Sahli</style></author><author><style face="normal" font="default" size="100%">Joachim de Greeff</style></author><author><style face="normal" font="default" size="100%">James Kennedy</style></author><author><style face="normal" font="default" size="100%">Robin Read</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Giulio Paci</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Athanasopoulos, Georgios</style></author><author><style face="normal" font="default" size="100%">Patsis, Georgios</style></author><author><style face="normal" font="default" size="100%">Verhelst, Werner</style></author><author><style face="normal" font="default" size="100%">Alberto Sanna</style></author><author><style face="normal" font="default" size="100%">Tony Belpaeme</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Let’s Be Friends: Perception of a Social Robotic Companion for children with T1DM</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. New Friends 2015</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2015</style></year><pub-dates><date><style  face="normal" font="default" size="100%">10/2015</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mheerink.home.xs4all.nl/pdf/ProceedingsNF2015-3.pdf</style></url></web-urls></urls><pub-location><style face="normal" font="default" size="100%">Almere, The Netherlands</style></pub-location><pages><style face="normal" font="default" size="100%">32–33</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We describe the social characteristics of a robot developed to support children with Type 1 Diabetes Mellitus (T1DM) in the process of education and care. We evaluated the perception of the robot at a summer camp where diabetic children aged 10-14 experienced the robot in group interactions. Children in the intervention condition additionally interacted with it also individually, in one-to-one sessions featuring several game-like activities. These children perceived the robot significantly more as a friend than those in the control group. They also readily engaged with it in dialogues about their habits related to healthy lifestyle as well as personal experiences concerning diabetes. This indicates that the one-on-one interactions added a special quality to the relationship of the children with the robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://mheerink.home.xs4all.nl/pdf/ProceedingsNF2015-3.pdf&quot;&gt;Download full proceedings&lt;/a&gt; (PDF)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Angel Fernandez, Julian M.</style></author><author><style face="normal" font="default" size="100%">Bonarini, Andrea</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Tapus, Adriana</style></author><author><style face="normal" font="default" size="100%">André, Elisabeth</style></author><author><style face="normal" font="default" size="100%">Martin, Jean-Claude</style></author><author><style face="normal" font="default" size="100%">Ferland, François</style></author><author><style face="normal" font="default" size="100%">Ammi, Mehdi</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Reactive Competitive Emotion Selection System</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 7th International Conference on Social Robotics (ICSR-2015)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Emotion production</style></keyword><keyword><style  face="normal" font="default" size="100%">Emotional models</style></keyword><keyword><style  face="normal" font="default" size="100%">Human Robot Interaction</style></keyword><keyword><style  face="normal" font="default" size="100%">Social robotics</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2015</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-319-25554-5_4</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer International Publishing</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris</style></pub-location><pages><style face="normal" font="default" size="100%">31–40</style></pages><isbn><style face="normal" font="default" size="100%">978-3-319-25553-8</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present a reactive emotion selection system designed to be used in a robot that needs to respond autonomously to relevant events. A variety of emotion selection models based on &quot;cognitive appraisal&quot; theories exist, but the complexity of the concepts used by most of these models limits their use in robotics. Robots have physical constrains that condition their understanding of the world and limit their capacity to built the complex concepts needed for such models. The system presented in this paper was conceived to respond to &quot;disturbances&quot; detected in the environment through a stream of images, and use this low-level information to update emotion intensities. They are increased when specific patterns, based on Tomkins’ affect theory, are detected or reduced when it is not. This system could also be used as part of (or as first step in the incremental design of) a more cognitively complex emotional system for autonomous robots.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/chapter/10.1007%2F978-3-319-25554-5_4&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">An Affective Autonomous Robot Toddler to Support the Development of Self-Efficacy in Diabetic Children</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 23rd Annual IEEE International Symposium on Robot and Human Interactive Communication (IEEE RO-MAN 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2014</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/6926279/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Edinburgh</style></pub-location><pages><style face="normal" font="default" size="100%">359–364</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4799-6763-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present a software architecture and an interaction scenario for an autonomous robot toddler designed to support the development of self-efficacy in diabetic children, and discuss its potential medical benefits. We pay particular attention to the affective and social aspects of the interaction, as well as the importance of autonomy in the robot, examining their relationships to our scientific and therapeutic goals.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/6926279&quot;&gt;Download&lt;/a&gt; (or &lt;a href=&quot;http://www.emotion-modeling.info/sites/default/files/Lewis%2C_Canamero%2C_Autonomous_Robot_Toddler_Diabetic_Children%2C_ROMAN_2014_ACCEPTED.pdf&quot;&gt;Download authors' draft&lt;/a&gt;)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Arousal Regulation and Affective Adaptation to Human Responsiveness by a Robot that Explores and Learns a Novel Environment</style></title><secondary-title><style face="normal" font="default" size="100%">Frontiers in Neurorobotics</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://journal.frontiersin.org/article/10.3389/fnbot.2014.00017</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">8</style></volume><pages><style face="normal" font="default" size="100%">17</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In the context of our work in developmental robotics regarding robot-human caregiver interactions, in this paper we investigate how a &quot;baby&quot; robot that explores and learns novel environments can adapt its affective regulatory behavior of soliciting help from a &quot;caregiver&quot; to the preferences shown by the caregiver in terms of varying responsiveness. We build on two strands of previous work that assessed independently (a) the differences between two &quot;idealized&quot; robot profiles – a &quot;needy&quot; and an &quot;independent&quot; robot – in terms of their use of a caregiver as a means to regulate the &quot;stress&quot; (arousal) produced by the exploration and learning of a novel environment, and (b) the effects on the robot behaviors of two caregiving profiles varying in their responsiveness – &quot;responsive&quot; and &quot;non-responsive&quot; – to the regulatory requests of the robot. Going beyond previous work, in this paper we (a) assess the effects that the varying regulatory behavior of the two robot profiles has on the exploratory and learning patterns of the robots; (b) bring together the two strands previously investigated in isolation and take a step further by endowing the robot with the capability to adapt its regulatory behavior along the &quot;needy&quot; and &quot;independent&quot; axis as a function of the varying responsiveness of the caregiver; and (c) analyze the effects that the varying regulatory behavior has on the exploratory and learning patterns of the adaptive robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.frontiersin.org/articles/10.3389/fnbot.2014.00017/full&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Cognitive Architectures to Bridge Interdisciplinary Gaps in Emotion Research?</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 5th Annual International Conference on Biologically-Inspired Cognitive Architectures (BICA 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><pub-location><style face="normal" font="default" size="100%">Cambridge, MA</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Emotions are a fundamental aspect of cognition and interaction, and their importance has been broadly acknowledged by both the &quot;sciences of the natural&quot; (e.g., neuroscience, psychology, biology) and those of &quot;the artificial&quot; (e.g., artificial intelligence, cognitive science / robotics, artificial life). Emotions provide an ideal framework for inter- and cross-disciplinary research since, due to their complex multi-faceted nature, they cannot be properly understood from the perspective of a single discipline. In this abstract/presentation, I would argue that the use of robots as both ...</style></abstract><notes><style face="normal" font="default" size="100%">&lt;br&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Junpei Zhong</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">From Continuous Affective Space to Continuous Expression Space: Non-Verbal Behaviour Recognition and Generation</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 4th Joint IEEE International Conference on Development and Learning and on Epigenetic Robotics (ICDL-Epirob 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year><pub-dates><date><style  face="normal" font="default" size="100%">10/2014</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/6982957/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Genoa, Italy</style></pub-location><pages><style face="normal" font="default" size="100%">75–80</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this research, a recurrent neural network with parametric bias (RNNPB) was adopted to construct a continuous expression space from emotion caused human behaviours. It made use of the short-term memory ability of the recurrent weights to store spatio-temporal sequences features, while the attached parametric bias units were trained in a self-organizing way and represented as a low-dimensional expression space to capture these non-linear features of the sequences. Three demonstrations were given: training and recognition performances were examined in computer simulations, while the network generated both trained and novel movements were shown in a three-dimensional avatar demonstrations.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/6982957&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Egbert, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Habit-based Regulation of Essential Variables</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 14th Conference on the Synthesis and Simulation of Living Systems (ALIFE 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-32621-6-ch029</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">New York, NY</style></pub-location><pages><style face="normal" font="default" size="100%">168–175</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-32621-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">A variety of models have been developed to investigate &quot;homeostatic adaptation,&quot; a mechanism inspired by Ashby’s homeostat, where a plastic control medium is reorganized until one or more essential variables are maintained within predefined limits. In these models, &quot;habits&quot; emerge, defined as behavior-generating mechanisms that rely upon their own influence to maintain the conditions necessary for their own persistence. In this paper, we present a recently developed sensorimotor-habit-based controller that is coupled to a simulated two-wheeled robot with a simulated metabolism. The simulation is used to demonstrate how habits can have the same essential variable(s) as the metabolic or &quot;biological&quot; organism that is performing the behavior, and that in certain conditions when this is the case, the emergent habits will tend to stabilize essential variables within viability limits. The model also demonstrates that an explicit pre-specification of (A) which variables should induce plasticity and (B) which values of those variables should induce plasticity is not always necessary for homeostatic adaptation of behavior.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-32621-6-ch029&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lones, John</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Hormonal Modulation of Development and Behaviour Permits a Robot to Adapt to Novel Interactions</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 14th Conference on the Synthesis and Simulation of Living Systems (ALIFE 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-32621-6-ch031</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">New York, NY</style></pub-location><pages><style face="normal" font="default" size="100%">184–191</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-32621-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Hormones are known to play a critical role in modulating the behaviour and development of organisms when confronted with different environment challenges. In this paper we present a biologically plausible hormonal mechanism that allows an autonomous robot to interact appropriately with novel objects and interactions depending upon both its current internal state and its past experiences. In our experiments, robots that had been exposed to negative experiences during their initial developmental phase displayed withdrawn behaviour and were less likely to explore new objects and environments, or to engage with a human caregiver. In contrast, robots with a positive upbringing showed much greater levels of outgoing behaviour such as exploration and social interaction.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-32621-6-ch031&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lones, John</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Hormonal Modulation of Interaction Between Autonomous Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 4th Joint IEEE International Conference on Development and Learning and on Epigenetic Robotics (ICDL-Epirob 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year><pub-dates><date><style  face="normal" font="default" size="100%">10/2014</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/6983015/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Genoa, Italy</style></pub-location><pages><style face="normal" font="default" size="100%">402–407</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Epigenetic like mechanism potentially have a critical role to play in the long term modulation of behaviour, interaction and adaptation. In this study we implement an model of these mechanism, the upward and downward regulation of hormone receptors located in a simply hormone driven autonomous agent. We frame this study in a multi agent setup using competition as a way to change the dynamics of the environment and the challenges the agents face. We found that this simply epigenetic mechanism has the potential to significantly alter the behaviour of the agents, allowing them to develop not only unique individual behaviour but also group like structures that permit them to better adapt to the different challenges that the environment presents.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/6983015/&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Modulating Perception with Pleasure for Action Selection</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 5th Annual International Conference on Biologically-Inspired Cognitive Architectures (BICA 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year><pub-dates><date><style  face="normal" font="default" size="100%">11/2014</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Cambridge, MA</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Persistence and opportunism are two key features of cognitive action selection architectures. For an autonomous robot that has to satisfy multiple conflicting survival-related needs, it is crucial to persist in the execution of behaviors for long enough to get sufficient benefit. Persistence is important to avoid what is known as the &quot;dithering&quot; problem, which occurs when a robot keeps switching between trying to satisfy two needs without satisfying either of them enough to guarantee survival. Opportunism concerns the initiation of actions, and occurs when an agent chooses to consume a resource that might not satisfy its most pressing need, but which is available now and might not be available later. The degree to which a robot should show persistence and opportunism depends on multiple factors; we could generally say that persistence leads to a more &quot;conservative&quot; action selection behavior and opportunism to a more &quot;risky&quot; one.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;br&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Wang, Weiyi</style></author><author><style face="normal" font="default" size="100%">Athanasopoulos, Georgios</style></author><author><style face="normal" font="default" size="100%">Yilmazyildiz, Selma</style></author><author><style face="normal" font="default" size="100%">Patsis, Georgios</style></author><author><style face="normal" font="default" size="100%">Valentin Enescu</style></author><author><style face="normal" font="default" size="100%">Hichem Sahli</style></author><author><style face="normal" font="default" size="100%">Verhelst, Werner</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Natural Emotion Elicitation for Emotion Modeling in Child-Robot Interactions</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 4th Workshop on Child Computer Interaction (WOCCI 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.isca-speech.org/archive/wocci_2014/wc14_051.html</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">ICSA</style></publisher><pub-location><style face="normal" font="default" size="100%">Singapore</style></pub-location><pages><style face="normal" font="default" size="100%">51–56</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Obtaining spontaneous emotional expressions is the very first and vital step in affective computing studies, for both psychologists and computer scientists. However, it is quite challenging to record them in real life, especially when certain modalities are required (e.g.  3D representation of the body).  Traditional elicitation and capturing protocols either introduce the awareness of the recording, which may impair the naturalness of the behaviors, or cause too much information loss.  In this paper, we  present  natural  emotion  elicitation  and  recording  experiments, which were set in child-robot interaction scenarios. Several state-of-the-art technologies were employed to acquire the multi-modal expressive data that will be further used for emotion modeling and recognition studies. The obtained recordings exhibit the expected emotional expressions.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.isca-speech.org/archive/wocci_2014/wc14_051.html&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Pleasure, Persistence and Opportunism in Action Selection</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 14th Conference on the Synthesis and Simulation of Living Systems (ALIFE 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-32621-6-ch151</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">New York, NY</style></pub-location><pages><style face="normal" font="default" size="100%">932–933</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-32621-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">An autonomous robot must show appropriate levels of persistence and opportunism to survive.  We address this problem by using a mechanism akin to pleasure that modulates exteroception as a function of need satisfaction, rather than based on
internal deficits and external threats as in previous work. The different context in which the modulating hormone is released has important consequences on persistence and opportunism.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-32621-6-ch151&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Robot that Uses Arousal to Detect Learning Challenges and Seek Help</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 14th Conference on the Synthesis and Simulation of Living Systems (ALIFE 2014)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2014</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-32621-6-ch142</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">New York, NY</style></pub-location><pages><style face="normal" font="default" size="100%">864–871</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-32621-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In the context of our work on dyadic robot-human (caregiver) interaction from a developmental robotics perspective, in this paper we investigate how an autonomous robot that explores and learns novel environments can make use of its arousal system to detect situations that constitute learning challenges, and request help from a human at points where this help is most needed and can be most beneficial. In a set of experiments, our robot learns to classify and recognize the perceptual properties of various objects placed on a table. We show that the arousal system of the robot permits it to identify and react to incongruent and novel features in the environment. More specifically, our results show that the robot identifies perceived outliers and episodic perceptual anomalies. As in the case of young infants, arousal variations trigger regulatory behaviours that engage caregivers in helping behaviors. We conclude that this attachment-based architecture provides a generic process that permits a robot to request interventions from a human caregiver during relevant events.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-32621-6-ch142&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Are Discrete Emotions Useful in Human-Robot Interaction? Feedback from Motion Capture Analysis</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. Affective Computing and Intelligent Interaction (ACII 2013)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2013</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/document/6681414</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Geneva, Switzerland</style></pub-location><pages><style face="normal" font="default" size="100%">97–102</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We have conducted a study analyzing motion capture data of bodily expressions of human emotions towards the goal of building a social expressive robot that interacts with and supports hospitalized children. Although modeling emotional expression (and recognition) in (by) robots in terms of discrete categories presents advantages such as ease and clarity of interpretation, our results show that this approach also poses a number of problems. The main issues relate to the loss of subtle expressions and feelings, individual features, context, and social interaction elements that are present in real life.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/6681414&quot;&gt;Download&lt;/a&gt; (or &lt;a href=&quot;http://www.emotion-modeling.info/sites/default/files/ACII_2013_Lewis_Canamero%2C_Discrete_Emotions_Motion_Capture-draft.pdf&quot;&gt;Download authors' draft&lt;/a&gt;)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lones, John</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Epigenetic Adaptation in Action Selection Environments with Temporal Dynamics</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Artificial Life, ECAL 2013</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-31709-2-ch073</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pages><style face="normal" font="default" size="100%">505–512</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">To operate in dynamic environments robots must be able to adapt their behaviour to meet the challenges that these pose while being constrained by their physical and computational limitation. In this paper we continue our study into using biologically inspired epigenetic adaptation through hormone modulation as a way to accommodate the needed flexibility in robots’ behaviour, focusing on problems of temporal dynamics. We have specifically framed our study in three variants of dynamic three-resource action selection environment. The challenges posed by these environments include: moving resources, temporal and increasing unavailability of resources, and cyclic changes in type and availability of resources related to cyclic environmental changes.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-31709-2-ch073&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lones, John</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Epigenetic Adaptation through Hormone Modulation in Autonomous Robots</style></title><secondary-title><style face="normal" font="default" size="100%">2013 IEEE 3rd Joint International Conference on Development and Learning and Epigenetic Robotics (ICDL-Epirob 2013)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Osaka</style></pub-location><pages><style face="normal" font="default" size="100%">1–6</style></pages><isbn><style face="normal" font="default" size="100%">9781479910366</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Epigenetic adaptation provides biological organisms with the ability to adjust their physiology and/or morphology in order to meet some of the challenges posed by their environment. Recent research has suggested that this process may be controlled by hormones. In this paper, we present a model that allows an autonomous robot to develop its systems in accordance with the environment it is currently situated in. Experiments have been undertaken in multiple environments with different challenges and niches to negotiate. We have so far seen encouraging results and the emergence of unique behaviours tailored to exploiting its current environment.</style></abstract><notes><style face="normal" font="default" size="100%">Winner: Best Student Paper
&lt;a href=&quot;https://ieeexplore.ieee.org/document/6652561&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ignasi Cos</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author><author><style face="normal" font="default" size="100%">Gillies, Andrew</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Hedonic Value: Enhancing Adaptation for Motivated Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Adaptive Behavior</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Actor-Critic</style></keyword><keyword><style  face="normal" font="default" size="100%">Grounding</style></keyword><keyword><style  face="normal" font="default" size="100%">Hedonic Value</style></keyword><keyword><style  face="normal" font="default" size="100%">Motivation</style></keyword><keyword><style  face="normal" font="default" size="100%">Reinforcement Learning</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://journals.sagepub.com/doi/10.1177/1059712313486817</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">SAGE</style></publisher><volume><style face="normal" font="default" size="100%">21</style></volume><pages><style face="normal" font="default" size="100%">465–483</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Reinforcement learning (RL) in the context of artificial agents is typically used to produce behavioural responses as a function of the reward obtained by interaction with the environment. When the problem consists of learning the shortest path to a goal, it is common to use reward functions yielding a fixed value after each decision, for example a positive value if the target location has been attained and a negative one at each intermediate step. However, this fixed strategy may be overly simplistic for agents to adapt to dynamic environments, in which resources may vary from time to time. By contrast, there is significant evidence that most living beings internally modulate reward value as a function of their context to expand their range of adaptivity. Inspired by the potential of this operation, we present a review of its underlying processes and we introduce a simplified formalisation for artificial agents. The performance of this formalism is tested by monitoring the adaptation of an agent endowed with a model of motivated actor-critic, embedded with our formalisation of value and constrained by physiological stability, to environments with different resource distribution. Our main result shows that the manner in which reward is internally processed as a function of the agent’s motivational state, strongly influences adaptivity of the behavioural cycles generated and the agent’s physiological stability.</style></abstract><issue><style face="normal" font="default" size="100%">6</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://journals.sagepub.com/doi/10.1177/1059712313486817&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Interpretation of Emotional Body Language Displayed by a Humanoid Robot: A Case Study with Children</style></title><secondary-title><style face="normal" font="default" size="100%">International Journal of Social Robotics</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">emotion</style></keyword><keyword><style  face="normal" font="default" size="100%">emotional body language</style></keyword><keyword><style  face="normal" font="default" size="100%">perception</style></keyword><keyword><style  face="normal" font="default" size="100%">Social robotics</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/article/10.1007/s12369-013-0193-z</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">5</style></volume><pages><style face="normal" font="default" size="100%">325–334</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The work reported in this paper focuses on giving humanoid robots the capacity to express emotions with their body. Previous results show that adults are able to interpret different key poses displayed by a humanoid robot and also that changing the head position affects the expressiveness of the key poses in a consistent way. Moving the head down leads to decreased arousal (the level of energy) and valence (positive or negative emotion) whereas moving the head up produces an increase along these dimensions. Hence, changing the head position during an interaction should send intuitive signals. The study reported in this paper tested children’s ability to recognize the emotional body language displayed by a humanoid robot. The results suggest that body postures and head position can be used to convey emotions during child-robot interaction.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/article/10.1007/s12369-013-0193-z&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Sue Attwood</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">René te Boekhorst</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Pietro Liò</style></author><author><style face="normal" font="default" size="100%">Orazio Miglino</style></author><author><style face="normal" font="default" size="100%">Giuseppe Nicosia</style></author><author><style face="normal" font="default" size="100%">Stefano Nolfi</style></author><author><style face="normal" font="default" size="100%">Mario Pavone</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">SimianWorld – A Study of Social Organisation Using an Artificial Life Model</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Artificial Life, ECAL 2013</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-31709-2-ch090</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Taormina, Italy</style></pub-location><pages><style face="normal" font="default" size="100%">633–640</style></pages><isbn><style face="normal" font="default" size="100%">9780262317092</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In studies of social behaviour it is commonly assumed that individual complexity is the origin of intricate social interactions. In primates for example, social complexity is attributed to their intelligence and it is argued by many that the cognitive capacity of primates are especially manifest in the way they regulate their social relationships. Whereas the complex societies of non-human primates are considered to be as a direct result of their cognitive abilities this assumption is not made about social insects. In the absence of certain cognitive abilities their complex societies and structurally sophisticated nests are thought to arise from self-organisation. Since it is unlikely that cognitive capacities are all-or-nothing, usually integrating a range of mechanisms, it is possible that different species use similar cognitive mechanisms resulting in different behavioural outcomes.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.mitpressjournals.org/doi/abs/10.1162/978-0-262-31709-2-ch090&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Using Perlin Noise to Generate Emotional Expressions in a Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. Annual Meeting of the Cognitive Science Society (CogSci 2013)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2013</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mindmodeling.org/cogsci2013/papers/0343/index.html</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Cognitive Science Society</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin, Germany</style></pub-location><pages><style face="normal" font="default" size="100%">1845–1850</style></pages><isbn><style face="normal" font="default" size="100%">978-0-9768318 -9-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The development of social robots that convey emotion with their bodies---instead of or in conjunction with their faces---is an increasingly active research topic in the field of human-robot interaction (HRI). Rather than focusing either on postural or on dynamics aspects of bodily expression in isolation, we present a model and an empirical study where we combine both elements and produce expressive behaviors by adding dynamic elements (in the form of Perlin noise) to a subset of static postures prototypical of basic emotions, with the aim of creating expressions easily understandable by children and at the same time lively and flexible enough to be believable and engaging. Results show that the noise increases the recognition rate of the emotions portrayed by the robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://mindmodeling.org/cogsci2013/papers/0343/index.html&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Nalin, Marco</style></author><author><style face="normal" font="default" size="100%">Baroni, Ilaria</style></author><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Cuayáhuitl, Heriberto</style></author><author><style face="normal" font="default" size="100%">Alberto Sanna</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Children's Adaptation in Multi-session Interaction with a Humanoid Robot</style></title><secondary-title><style face="normal" font="default" size="100%">2012 IEEE RO-MAN: The 21st IEEE International Symposium on Robot and Human Interactive Communication</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/6343778/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pages><style face="normal" font="default" size="100%">351–357</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This work presents preliminary observations from a study of children (N=19, age 5–12) interacting in multiple sessions with a humanoid robot in a scenario involving game activities. The main purpose of the study was to see how their perception of the robot, their engagement, and their enjoyment of the robot as a companion evolve across multiple interactions, separated by one-two weeks. However, an interesting phenomenon was observed during the experiment: most of the children soon adapted to the behaviors of the robot, in terms of speech timing, speed and tone, verbal input formulation, nodding, gestures, etc. We describe the experimental setup and the system, and our observations and preliminary analysis results, which open interesting questions for further research.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://ieeexplore.ieee.org/document/6343778&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Davila-Ross, Marina</style></author><author><style face="normal" font="default" size="100%">Kim A. Bard</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Eliciting Caregiving Behavior in Dyadic Human-robot Attachment-like Interactions</style></title><secondary-title><style face="normal" font="default" size="100%">ACM Transactions on Interactive Intelligent Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://dl.acm.org/doi/10.1145/2133366.2133369</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">ACM</style></publisher><pub-location><style face="normal" font="default" size="100%">New York, NY</style></pub-location><volume><style face="normal" font="default" size="100%">2</style></volume><pages><style face="normal" font="default" size="100%">3:1–3:24</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present here the design and applications of an arousal-based model controlling the behavior of a Sony AIBO robot during the exploration of a novel environment: a children's play mat. When the robot experiences too many new perceptions, the increase of arousal triggers calls for attention towards its human caregiver. The caregiver can choose to either calm the robot down by providing it with comfort, or to leave the robot coping with the situation on its own. When the arousal of the robot has decreased, the robot moves on to further explore the play mat. We gathered results from two experiments using this arousal-driven control architecture. In the first setting, we show that such a robotic architecture allows the human caregiver to influence greatly the learning outcomes of the exploration episode, with some similarities to a primary caregiver during early childhood. In a second experiment, we tested how human adults behaved in a similar setup with two different robots: one “needy”, often demanding attention, and one more independent, requesting far less care or assistance. Our results show that human adults recognise each profile of the robot for what they have been designed, and behave accordingly to what would be expected, caring more for the needy robot than for the other. Additionally, the subjects exhibited a preference and more positive affect whilst interacting and rating the robot we designed as needy. This experiment leads us to the conclusion that our architecture and setup succeeded in eliciting positive and caregiving behavior from adults of different age groups and technological background. Finally, the consistency and reactivity of the robot during this dyadic interaction appeared crucial for the enjoyment and engagement of the human partner.</style></abstract><issue><style face="normal" font="default" size="100%">1</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://dl.acm.org/doi/10.1145/2133366.2133369&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Stevens, Brett</style></author><author><style face="normal" font="default" size="100%">Kim A. Bard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotional Body Language Displayed by Artificial Agents</style></title><secondary-title><style face="normal" font="default" size="100%">ACM Transactions on Interactive Intelligent Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://dl.acm.org/doi/10.1145/2133366.2133368</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">ACM</style></publisher><pub-location><style face="normal" font="default" size="100%">New York, NY</style></pub-location><volume><style face="normal" font="default" size="100%">2</style></volume><pages><style face="normal" font="default" size="100%">2:1–2:29</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Complex and natural social interaction between artificial agents (computer-generated or robotic) and humans necessitates the display of rich emotions in order to be believable, socially relevant, and accepted, and to generate the natural emotional responses that humans show in the context of social interaction, such as engagement or empathy. Whereas some robots use faces to display (simplified) emotional expressions, for other robots such as Nao, body language is the best medium available given their inability to convey facial expressions. Displaying emotional body language that can be interpreted whilst interacting with the robot should significantly improve naturalness. This research investigates the creation of an affect space for the generation of emotional body language to be displayed by humanoid robots. To do so, three experiments investigating how emotional body language displayed by agents is interpreted were conducted. The first experiment compared the interpretation of emotional body language displayed by humans and agents. The results showed that emotional body language displayed by an agent or a human is interpreted in a similar way in terms of recognition. Following these results, emotional key poses were extracted from an actor's performances and implemented in a Nao robot. The interpretation of these key poses was validated in a second study where it was found that participants were better than chance at interpreting the key poses displayed. Finally, an affect space was generated by blending key poses and validated in a third study. Overall, these experiments confirmed that body language is an appropriate medium for robots to display emotions and suggest that an affect space for body expressions can be used to improve the expressiveness of humanoid robots.</style></abstract><issue><style face="normal" font="default" size="100%">1</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://dl.acm.org/doi/10.1145/2133366.2133368&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The Frontier of Synthetic Knowledge: Toward a Constructivist Science</style></title><secondary-title><style face="normal" font="default" size="100%">World Futures</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.tandfonline.com/doi/abs/10.1080/02604027.2012.668409</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Taylor &amp; Francis</style></publisher><volume><style face="normal" font="default" size="100%">68</style></volume><pages><style face="normal" font="default" size="100%">171–177</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This article focuses on the frontier between the technological domain of production of artefacts and the naturalistic domain of the sciences of life and cognition. It shows that, since the 1940s, this frontier has become the place of production of an innovative kind of scientific knowledge—“synthetic knowledge.” The article describes the methodology and the main characteristics of synthetic knowledge, and formulates a hypothesis on its epistemological genealogy. Accordingly, it characterizes synthetic knowledge as one of the most advanced expressions of a heterodox tradition of research which, since the 1930s, has been promoting the development of a “non-representationalist”—“constructivist”—science.</style></abstract><issue><style face="normal" font="default" size="100%">3</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.tandfonline.com/doi/abs/10.1080/02604027.2012.668409&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Tony Belpaeme</style></author><author><style face="normal" font="default" size="100%">Paul E. Baxter</style></author><author><style face="normal" font="default" size="100%">Robin Read</style></author><author><style face="normal" font="default" size="100%">Rachel Wood</style></author><author><style face="normal" font="default" size="100%">Cuayáhuitl, Heriberto</style></author><author><style face="normal" font="default" size="100%">Kiefer, Bernd</style></author><author><style face="normal" font="default" size="100%">Racioppa, Stefania</style></author><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Athanasopoulos, Georgios</style></author><author><style face="normal" font="default" size="100%">Valentin Enescu</style></author><author><style face="normal" font="default" size="100%">Rosemarijn Looije</style></author><author><style face="normal" font="default" size="100%">Mark A. Neerincx</style></author><author><style face="normal" font="default" size="100%">Yiannis Demiris</style></author><author><style face="normal" font="default" size="100%">Raquel Ros-Espinoza</style></author><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Lewis, Matthew</style></author><author><style face="normal" font="default" size="100%">Baroni, Ilaria</style></author><author><style face="normal" font="default" size="100%">Nalin, Marco</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Giulio Paci</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author><author><style face="normal" font="default" size="100%">Remi Humbert</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Multimodal Child-Robot Interaction: Building Social Bonds</style></title><secondary-title><style face="normal" font="default" size="100%">Journal of Human-Robot Interaction</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2012</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://dl.acm.org/doi/10.5555/3109688.3109691</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">1</style></volume><pages><style face="normal" font="default" size="100%">33–53</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">For robots to interact effectively with human users they must be capable of coordinated, timely behavior in response to social context. The Adaptive Strategies for Sustainable Long-Term Social Interaction (ALIZ-E) project focuses on the design of long-term, adaptive social interaction between robots and child users in real-world settings. In this paper, we report on the iterative approach taken to scientific and technical developments toward this goal: advancing individual technical competencies and integrating them to form an autonomous robotic system for evaluation “in the wild.” The first evaluation iterations have shown the potential of this methodology in terms of adaptation of the robot to the interactant and the resulting influences on engagement. This sets the foundation for an ongoing research program that seeks to develop technologies for social robot companions.</style></abstract><issue><style face="normal" font="default" size="100%">2</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://dl.acm.org/doi/10.5555/3109688.3109691&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Sommavilla, Giacomo</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Children Interpretation of Emotional Body Language Displayed by a Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 3rd International Conference on Social Robotics (ICSR 2011)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-642-25504-5_7</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Amsterdam, The Netherlands</style></pub-location><pages><style face="normal" font="default" size="100%">62–70</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-25504-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Previous results show that adults are able to interpret different key poses displayed by the robot and also that changing the head position affects the expressiveness of the key poses in a consistent way. Moving the head down leads to decreased arousal (the level of energy), valence (positive or negative) and stance (approaching or avoiding) whereas moving the head up produces an increase along these dimensions [1]. Hence, changing the head position during an interaction should send intuitive signals which could be used during an interaction. The ALIZ-E target group are children between the age of 8 and 11. Existing results suggest that they would be able to interpret human emotional body language [2, 3].

Based on these results, an experiment was conducted to test whether the results of [1] can be applied to children. If yes body postures and head position could be used to convey emotions during an interaction.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/chapter/10.1007%2F978-3-642-25504-5_7&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Tom Lenaerts</style></author><author><style face="normal" font="default" size="100%">Mario Giacobini</style></author><author><style face="normal" font="default" size="100%">Hugues Bersini</style></author><author><style face="normal" font="default" size="100%">Paul Bourgine</style></author><author><style face="normal" font="default" size="100%">Marco Dorigo</style></author><author><style face="normal" font="default" size="100%">René Doursat</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Grounding Synthetic Knowledge: An Epistemological Framework and Criteria of Relevance for the Scientific Exploration of Life, Affect and Social Cognition</style></title><secondary-title><style face="normal" font="default" size="100%">Advances In Artificial Life, ECAL 2011 (Proc. 11th European Conference on Artificial Life)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262297140chap33.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><pages><style face="normal" font="default" size="100%">200–207</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-29714-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In what ways can artificial life contribute to the scientific exploration of cognitive, affective and social processes? In what sense can synthetic models be relevant for the advancement of behavioral and cognitive sciences? This article addresses these questions by way of a case study — an interdisciplinary cooperation between developmental robotics and developmental psychology in the exploration of attachment bonds. Its main aim is to show how the synthetic study of cognition, as well as the synthetic study of life, can find in autopoietic cognitive biology more than a theory useful to inspire the synthetic modelling of the processes under inquiry. We argue that autopoiesis offers, not only to artificial life, but also to the behavioural and social sciences, an epistemological framework able to generate general criteria of relevance for synthetic models of living and cognitive processes. By “criteria of relevance” we mean criteria (a) valuable for the three main branches of artificial life (soft, hard, and wet) and (b) useful for determining the significance of the models each branch produces for the scientific exploration of life and cognition. On the basis of these criteria and their application to the case study presented, this article defines a range of different ways that synthetic, and particularly autopoiesis-based models, can be relevant to the inquiries of biological, behavioural and cognitive sciences.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262297140chap33.pdf&quot;&gt;Download&lt;/a&gt; (PDF)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Paul E. Baxter</style></author><author><style face="normal" font="default" size="100%">Tony Belpaeme</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Cosi, Piero</style></author><author><style face="normal" font="default" size="100%">Yiannis Demiris</style></author><author><style face="normal" font="default" size="100%">Valentin Enescu</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Kruijff-Korbayová, Ivana</style></author><author><style face="normal" font="default" size="100%">Rosemarijn Looije</style></author><author><style face="normal" font="default" size="100%">Nalin, Marco</style></author><author><style face="normal" font="default" size="100%">Mark A. Neerincx</style></author><author><style face="normal" font="default" size="100%">Hichem Sahli</style></author><author><style face="normal" font="default" size="100%">Giocomo Sommavilla</style></author><author><style face="normal" font="default" size="100%">Tesser, Fabio</style></author><author><style face="normal" font="default" size="100%">Rachel Wood</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Long-Term Human-Robot Interaction with Young Users</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. ACM/IEEE Human-Robot Interaction conference (HRI-2011) (Robots with Children Workshop)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2011</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.researchgate.net/publication/228470784_Long-term_human-robot_interaction_with_young_users</style></url></web-urls></urls><pub-location><style face="normal" font="default" size="100%">Lausanne, Switzerland</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Artificial companion agents have the potential to combine novel means for effective health communication with young patients support and entertainment. However, the theory and practice of long-term child-robot interaction is currently an underdeveloped area of research. This paper introduces an approach that integrates multiple functional aspects necessary to implement temporally extended human-robot interaction in the setting of a paediatric ward. We present our methodology for the implementation of a companion robot which will be used to support young patients in hospital as they learn to manage a lifelong metabolic disorder (diabetes). The robot will interact with patients over an extended period of time. The necessary functional aspects are identified and introduced, and a review of the technical challenges involved is presented.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.researchgate.net/publication/228470784_Long-term_human-robot_interaction_with_young_users&quot;&gt;Downlaod&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Luisa Damiano</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jackie Chappell</style></author><author><style face="normal" font="default" size="100%">Susannah Thorpe</style></author><author><style face="normal" font="default" size="100%">Nick Hawes</style></author><author><style face="normal" font="default" size="100%">Aaron Sloman</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Constructing Emotions: Epistemological Groundings and Applications in Robotics for a Synthetic Approach to Emotions</style></title><secondary-title><style face="normal" font="default" size="100%">International Symposium on AI-Inspired Biology</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.cs.bham.ac.uk/research/projects/cogaff/aiib/Symposium_6/Papers/Damiano.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">The Society for the Study of Artificial Intelligence and the Simulation of Behaviour</style></publisher><pub-location><style face="normal" font="default" size="100%">De Montford University, Leicester, UK</style></pub-location><pages><style face="normal" font="default" size="100%">20–28</style></pages><isbn><style face="normal" font="default" size="100%">1902956923</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Can the sciences of the artificial positively contribute to the scientific exploration of life and cognition? Can they actually improve the scientific knowledge of natural living and cognitive processes, from biological metabolism to reproduction, from conceptual mapping of the environment to logic reasoning, language, or even emotional expression? To these kinds of questions our article aims to answer in the affirmative. Its main object is the scientific emergent methodology often called the “synthetic approach”, which promotes the programmatic production of embodied and situated models of living and cognitive systems in order to explore aspects of life and cognition not accessible in natural systems and scenarios. The first part of this article presents and discusses the synthetic approach, and proposes an epistemological framework which promises to warrant genuine transmission of knowledge from the sciences of the artificial to the sciences of the natural. The second part of this article looks at the research applying the synthetic approach to the psychological study of emotional development. It shows how robotics, through the synthetic methodology, can develop a particular perspective on emotions, coherent with current psychological theories of emotional development and fitting well with the recent “cognitive extension” approach proposed by cognitive sciences and philosophy of mind.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://www.cs.bham.ac.uk/research/projects/cogaff/aiib/Symposium_6/Papers/Damiano.pdf&quot;&gt;Download&lt;/a&gt; (PDF)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">C Hasson</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Catherine Pelachaud</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion et cognition: les robots comme outils et modèles</style></title><secondary-title><style face="normal" font="default" size="100%">Systèmes d'interaction émotionnelle</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">Lavoisier Hermes Science</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><isbn><style face="normal" font="default" size="100%">978-2-7462-2115-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">9</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">O'Bryne, Claire</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Harold Fellermann</style></author><author><style face="normal" font="default" size="100%">Mark Dörr</style></author><author><style face="normal" font="default" size="100%">Martin M Hanczy</style></author><author><style face="normal" font="default" size="100%">Lone Ladegaard Laursen</style></author><author><style face="normal" font="default" size="100%">Sarah Maurer</style></author><author><style face="normal" font="default" size="100%">Daniel Merkle</style></author><author><style face="normal" font="default" size="100%">Pierre-Alain Monnard</style></author><author><style face="normal" font="default" size="100%">Kasper Støy</style></author><author><style face="normal" font="default" size="100%">Steen Rasmussen</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion in Decisions of Life and Death – Its Role in Brain-Body-Environment Interactions for Predator and Prey</style></title><secondary-title><style face="normal" font="default" size="100%">Artificial Life XII: Proc. of the 12th International Conference on the Synthesis and Simulation of Living Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2010</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262290758chap141.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Odense, Denmark</style></pub-location><pages><style face="normal" font="default" size="100%">812–822</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Taking inspiration from the biological world, in our work we are attempting to create and examine artificial predator-prey relationships using two LEGO robots. We do so to explore the possible adaptive value of emotion-like states for action selection in this context. However, we also aim to study and consider these concepts together at different levels of abstraction. For example, in terms of individual agents’ brain-body-environment interactions, as well as the (emergent) predator-prey relationships resulting from these. Here, we discuss some of the background concepts and motivations driving the design of our implementation and experiments. First, we explain why we think the predator-prey relationship is so interesting. Narrowing our focus to emotion-based architectures, this is followed by a review of existing literature, comparing different types and highlighting the novel aspects of our own. We conclude with our proposed contributions to the literature and thus, ultimately, the design and creation of artificial life.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262290758chap141.pdf&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Evolution of Bistable Dynamics in Spiking Neural Controllers for Agents Performing Olfactory Attraction and Aversion</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 19th Annual Computational Neuroscience Meeting (CNS*2010)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2010</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://bmcneurosci.biomedcentral.com/articles/10.1186/1471-2202-11-S1-P92</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">BioMed Central Ltd.</style></publisher><pub-location><style face="normal" font="default" size="100%">San Antonio, TX</style></pub-location><volume><style face="normal" font="default" size="100%">11(Suppl 1)</style></volume><pages><style face="normal" font="default" size="100%">92</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Alexandre Mazel</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Interpretation of Emotional Body Language Displayed by Robots</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 3rd International Workshop on Affective Interaction in Natural Environments, AFFINE'10</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">ACM</style></publisher><pub-location><style face="normal" font="default" size="100%">Firenze, Italy</style></pub-location><pages><style face="normal" font="default" size="100%">37–42</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4503-0170-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In order for robots to be socially accepted and generate empathy they must display emotions. For robots such as Nao, body language is the best medium available, as they do not have the ability to display facial expressions. Displaying emotional body language that can be interpreted whilst interacting with the robot should greatly improve its acceptance. This research investigates the creation of an &quot;Affect Space&quot; for the generation of emotional body language that could be displayed by robots. An Affect Space is generated by &quot;blending&quot; (i.e. interpolating between) different emotional expressions to create new ones. An Affect Space for body language based on the Circumplex Model of emotions has been created. The experiment reported in this paper investigated the perception of specific key poses from the Affect Space. The results suggest that this Affect Space for body expressions can be used to improve the expressiveness of humanoid robots. In addition, early results of a pilot study are described. It revealed that the context helps human subjects improve their recognition rate during a human-robot imitation game, and in turn this recognition leads to better outcome of the interactions.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Ignasi Cos</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Learning Affordances of Consummatory Behaviors: Motivation-Driven Adaptive Perception</style></title><secondary-title><style face="normal" font="default" size="100%">Adaptive Behavior</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://journals.sagepub.com/doi/10.1177/1059712310375471</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">SAGE</style></publisher><volume><style face="normal" font="default" size="100%">18</style></volume><pages><style face="normal" font="default" size="100%">285–314</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This article introduces a formalization of the dynamics between sensorimotor interaction and homeostasis, integrated in a single architecture to learn object affordances of consummatory behaviors. We also describe the principles necessary to learn grounded knowledge in the context of an agent and its surrounding environment, which we use to investigate the constraints imposed by the agent’s internal dynamics and the environment. This is tested with an embodied, situated robot, in a simulated environment, yielding results that support this formalization. Furthermore, we show that this methodology allows learned affordances to be dynamically redefined, depending on object similarity, resource availability, and the rhythms of the agent’s internal physiology. For example, if a resource becomes increasingly scarce, the value assigned by the agent to its related effect increases accordingly, encouraging a more active behavioral strategy to maintain physiological stability. Experimental results also suggest that a combination of motivation-driven and affordance learning in a single architecture should simplify its overall complexity while increasing its adaptivity.</style></abstract><issue><style face="normal" font="default" size="100%">3-4</style></issue><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://journals.sagepub.com/doi/10.1177/1059712310375471&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Aryel Beck</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Kim A. Bard</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Towards an Affect Space for Robots to Display Emotional Body Language</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 19th Annual IEEE International Symposium on Robot and Human Interactive Communication (IEEE RO-MAN 2010)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Viareggio, Italy</style></pub-location><pages><style face="normal" font="default" size="100%">464–469</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-7991-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In order for robots to be socially accepted and generate empathy it is necessary that they display rich emotions. For robots such as Nao, body language is the best medium available given their inability to convey facial expressions. Displaying emotional body language that can be interpreted whilst interacting with the robot should significantly improve its sociability. This research investigates the creation of an Affect Space for the generation of emotional body language to be displayed by robots. To create an Affect Space for body language, one has to establish the contribution of the different positions of the joints to the emotional expression. The experiment reported in this paper investigated the effect of varying a robot's head position on the interpretation, Valence, Arousal and Stance of emotional key poses. It was found that participants were better than chance level in interpreting the key poses. This finding confirms that body language is an appropriate medium for robot to express emotions. Moreover, the results of this study support the conclusion that Head Position is an important body posture variable. Head Position up increased correct identification for some emotion displays (pride, happiness, and excitement), whereas Head Position down increased correct identification for other displays (anger, sadness). Fear, however, was identified well regardless of Head Position. Head up was always evaluated as more highly Aroused than Head straight or down. Evaluations of Valence (degree of negativity to positivity) and Stance (degree to which the robot was aversive to approaching), however, depended on both Head Position and the emotion displayed. The effects of varying this single body posture variable were complex.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Peirre Andry</style></author><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Shuzhi Sam Ge</style></author><author><style face="normal" font="default" size="100%">Haizhou Li</style></author><author><style face="normal" font="default" size="100%">John-John Cabibihan</style></author><author><style face="normal" font="default" size="100%">Yeow Kee Tan</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Using the Interaction Rhythm as a Natural Reinforcement Signal for Social Robots: A Matter of Belief</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. International Conference on Social Robotics, ICSR 2010</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2010</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Singapore</style></pub-location><volume><style face="normal" font="default" size="100%">6414</style></volume><pages><style face="normal" font="default" size="100%">81–89</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-17247-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper, we present the results of a pilot study of a human robot interaction experiment where the rhythm of the interaction is used as a reinforcement signal to learn sensorimotor associations. The algorithm uses breaks and variations in the rhythm at which the human is producing actions. The concept is based on the hypothesis that a constant rhythm is an intrinsic property of a positive interaction whereas a break reflects a negative event. Subjects from various backgrounds interacted with a NAO robot where they had to teach the robot to mirror their actions by learning the correct sensorimotor associations. The results show that in order for the rhythm to be a useful reinforcement signal, the subjects have to be convinced that the robot is an agent with which they can act naturally, using their voice and facial expressions as cues to help it understand the correct behaviour to learn. When the subjects do behave naturally, the rhythm and its variations truly reflects how well the interaction is going and helps the robot learn efficiently. These results mean that non-expert users can interact naturally and fruitfully with an autonomous robot if the interaction is believed to be natural, without any technical knowledge of the cognitive capacities of the robot.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Kim A. Bard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Assessing Human Responses to Different Robot Attachment Profiles</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 18th Annual IEEE International Symposium on Robot and Human Interactive Communication (IEEE RO-MAN 2009)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/5326216/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Toyama, Japan</style></pub-location><pages><style face="normal" font="default" size="100%">251–256</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-5081-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Emotional regulation is believed to be crucial for a balanced emotional and cognitive development in infants. Furthermore, during the first year of a child's life, the mother is playing a central role in shaping the development, through the attachment bond she shares with her child. Based on previous work on our model of arousal modulation for an autonomous robot, we present an experiment where human adults were interacting visually and via tactile contact with a SONY Aibo robot exploring a children playmat. The robots had two different attachment profiles: one requiring less attention then the other. The subjects answered one questionnaire per robot, describing how they would rate their experience with each robot. The analysis of the subjects' responses allow us to conclude that this setting was sufficient to elicit positive and active caretaking-like behaviours from the subjects, according to the profile of the robot they interacted with.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">John C Murray</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Developing Preferential Attention to a Speaker: A Robot Learning to Recognise its Carer</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2009 IEEE Symposium on Artificial Life (ALIFE 2009)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/4937697/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Nashville, TN</style></pub-location><pages><style face="normal" font="default" size="100%">77–84</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-2763-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we present a socially interactive multi-modal robotic head, ERWIN - Emotional Robot With Intelligent Networks, capable of emotion expression and interaction via speech and vision. The model presented shows how a robot can learn to attend to the voice of a specific speaker, providing a relevant emotional expressive response based on previous interactions. We show three aspects of the system; first, the learning phase, allowing the robot to learn faces and voices from interaction. Second, recognition of the learnt faces and voices, and third, the emotion expression aspect of the system. We show this from the perspective of an adult and child interacting and playing a small game, much like an infant and caregiver situation. We also discuss the importance of speaker recognition in terms of Human-Robot-Interaction and emotion, showing how the interaction process between a participant and ERWIN can allow the robot to prefer to attend to that person.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lori Malatesta</style></author><author><style face="normal" font="default" size="100%">John C Murray</style></author><author><style face="normal" font="default" size="100%">Amaryllis Raouzaiou</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Kostas Karpouzis</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Mario I. Chacon-M.</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion Modelling and Facial Affect Recognition in Human-Computer and Human-Robot Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Affective Computing, Emotion Modelling, Synthesis and Recognition</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.intechopen.com/books/state_of_the_art_in_face_recognition/emotion_modelling_and_facial_affect_recognition_in_human-computer_and_human-robot_interaction</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">InTechOpen Publishers</style></publisher><isbn><style face="normal" font="default" size="100%">978-3-902613-42-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">12</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Evolution of Bilateral Symmetry in Agents Controlled by Spiking Neural Networks</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2009 IEEE Symposium on Artificial Life (ALIFE 2009)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/4937702/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Nashville, TN</style></pub-location><pages><style face="normal" font="default" size="100%">116–123</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-2763-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present in this paper three novel developmental models allowing information to be encoded in space and time, using spiking neurons placed on a 2D substrate. In two of these models, we introduce neural development that can use bilateral symmetry. We show that these models can create neural controllers for agents evolved to perform chemotaxis. Neural bilateral symmetry can be evolved and be beneficial for an agent. This work is the first, as far as we know, to present developmental models where spiking neurons are generated in space and where bilateral symmetry can be evolved and proved to be beneficial in this context.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">O'Bryne, Claire</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">John C Murray</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The Importance of the Body in Affect-Modulated Action Selection: A Case Study Comparing Proximal Versus Distal Perception in a Prey-Predator Scenario</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 3rd Intl. Conference on Affective Computing and Intelligent Interaction (ACII 2009)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Amsterdam, The Netherlands</style></pub-location><pages><style face="normal" font="default" size="100%">1–6</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In the context of the animat approach, we investigate the effect of an emotion-like hormonal mechanism, as a modulator of perception - and second order controller to an underlying motivation-based action selection architecture - on brain-body-environment interactions within a prey-predator scenario. We are particularly interested in the effects that affective modulation of different perceptual capabilities has on the dynamics of interactions between predator and prey, as part of a broader study of the adaptive value of emotional states such as &quot;fear&quot; and &quot;aggression&quot; in the context of action selection. In this paper we present experiments where we modulated the architecture of a prey robot using two different types of sensory capabilities, proximal and distal, effectively creating combinations of different prey &quot;brains&quot; and &quot;bodies&quot;.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">John C Murray</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Kim A. Bard</style></author><author><style face="normal" font="default" size="100%">Ross, Marina Davila</style></author><author><style face="normal" font="default" size="100%">Thorsteinsson, Kate</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kim, Jong-Hwan</style></author><author><style face="normal" font="default" size="100%">Ge, Shuzhi Sam</style></author><author><style face="normal" font="default" size="100%">Vadakkepat, Prahlad</style></author><author><style face="normal" font="default" size="100%">Jesse, Norbert</style></author><author><style face="normal" font="default" size="100%">Al Manum, Abdullah</style></author><author><style face="normal" font="default" size="100%">Puthusserypady K, Sadasivan</style></author><author><style face="normal" font="default" size="100%">Rückert, Ulrich</style></author><author><style face="normal" font="default" size="100%">Sitte, Joaquin</style></author><author><style face="normal" font="default" size="100%">Witkowski, Ulf</style></author><author><style face="normal" font="default" size="100%">Nakatsu, Ryohei</style></author><author><style face="normal" font="default" size="100%">Braunl, Thomas</style></author><author><style face="normal" font="default" size="100%">Baltes, Jacky</style></author><author><style face="normal" font="default" size="100%">Anderson, John</style></author><author><style face="normal" font="default" size="100%">Wong, Ching-Chang</style></author><author><style face="normal" font="default" size="100%">Verner, Igor</style></author><author><style face="normal" font="default" size="100%">Ahlgren, David</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">The Influence of Social Interaction on the Perception of Emotional Expression: A Case Study with a Robot Head</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Robotics: Proc. FIRA RoboWorld Congress 2009</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-642-03983-6_10</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer Berlin Heidelberg</style></publisher><pub-location><style face="normal" font="default" size="100%">Incheon, Korea</style></pub-location><volume><style face="normal" font="default" size="100%">5744</style></volume><pages><style face="normal" font="default" size="100%">63–72</style></pages><isbn><style face="normal" font="default" size="100%">978-3-642-03983-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we focus primarily on the influence that socio-emotional interaction has on the perception of emotional expression by a robot. We also investigate and discuss the importance of emotion expression in socially interactive situations involving human robot interaction (HRI), and show the importance of utilising emotion expression when dealing with interactive robots, that are to learn and develop in socially situated environments. We discuss early expressional development and the function of emotion in communication in humans and how this can improve HRI communications. Finally we provide experimental results showing how emotion-rich interaction via emotion expression can affect the HRI process by providing additional information.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Pierre-Yves Oudeyer</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Learning Affective Landmarks</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 9th International Conference on Epigenetic Robotics: Modeling Cognitive Development in Robotic Systems (EpiRob 2009)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">11/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.lucs.lu.se/LUCS/146/epirob09.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Venice, Italy</style></pub-location><volume><style face="normal" font="default" size="100%">146</style></volume><pages><style face="normal" font="default" size="100%">211–212</style></pages><isbn><style face="normal" font="default" size="100%">978-91-977-380-7-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This poster presents early work on the effects of arousal and its regulation on learning about the environment, particularly affective memories associated with places that can be used to safely guide exploration.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Pierre-Yves Oudeyer</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Proceedings of the Ninth International Conference on Epigenetic Robotics: Modeling Cognitive Development in Robotic Systems</style></title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.lucs.lu.se/LUCS/146/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Venice, Italy</style></pub-location><volume><style face="normal" font="default" size="100%">146</style></volume><isbn><style face="normal" font="default" size="100%">978-91-977-380-7-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">David Bowes</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The role of lateral inhibition in the sensory processing in a simulated spiking neural controller for a robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2009 IEEE Symposium on Artificial Life (ALIFE 2009)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/4937710/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Nashville, TN</style></pub-location><pages><style face="normal" font="default" size="100%">179–183</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-2763-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Visual adaptation is the process that allows animals to be able to see over a wide range of light levels. This is achieved partially by lateral inhibition in the retina which compensates for low/high light levels. Neural controllers which cause robots to turn away from or towards light tend to work in a limited range of light conditions. In real environments, the light conditions can vary greatly reducing the effectiveness of the robot. Our solution for a simple Braitenberg vehicle is to add a single inhibitory neuron which laterally inhibits the output to the robot motors. This solution has additionally reduced the computational complexity of our simple neuron allowing for a greater number of neurons to be simulated with a fixed set of resources.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">David Bowes</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Pierre-Yves Oudeyer</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Should I worry about my stressed pregnant robot?</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 9th International Conference on Epigenetic Robotics: Modeling Cognitive Development in Robotic Systems (EpiRob 2009)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">11/2009</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.lucs.lu.se/LUCS/146/epirob09.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Venice, Italy</style></pub-location><volume><style face="normal" font="default" size="100%">146</style></volume><pages><style face="normal" font="default" size="100%">203–204</style></pages><isbn><style face="normal" font="default" size="100%">978-91-977-380-7-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">John C Murray</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Towards a Model of Emotion Expression in an Interactive Robot Head</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 18th IEEE International Symposium on Robot and Human Interactive Communication (IEEE RO-MAN 2009)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2009</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Toyama, Japan</style></pub-location><pages><style face="normal" font="default" size="100%">627–632</style></pages><isbn><style face="normal" font="default" size="100%">978-1-4244-5081-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we present a robotic head designed for interaction with humans, endowed with mechanisms to make the robot respond to social interaction with emotional expressions, allowing the emotional expression of the robot to be directly influenced by the social interaction process. We look into how emotionally expressive visual feedback from the robot can enrich the interaction process and provide the participant with additional information regarding the interaction, allowing the user to better understand the intentions of the robot. We discuss some of the interactions that are possible with ERWIN and how this can effect the response of the system. We show experimental scenarios where the interaction processes influences the emotional expressions and how the participants interpret this. We draw our conclusions from the feedback from experiments, showing that indeed emotional expression can have an influence on the social interaction between a robot and human.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Asada, Minoru</style></author><author><style face="normal" font="default" size="100%">Hallam, John C T</style></author><author><style face="normal" font="default" size="100%">Jean-Arcady Meyer</style></author><author><style face="normal" font="default" size="100%">Tani, Jun</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Adaptive Olfactory Encoding in Agents Controlled by Spiking Neural Networks</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats 10: Proc. 10th International Conference on Simulation of Adaptive Behavior (SAB 2008)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science (LNCS)</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://link.springer.com/chapter/10.1007/978-3-540-69134-1_15</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer, Berlin, Heidelberg</style></publisher><pub-location><style face="normal" font="default" size="100%">Osaka, Japan</style></pub-location><volume><style face="normal" font="default" size="100%"> 5040</style></volume><pages><style face="normal" font="default" size="100%">148–158</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-69134-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We created a neural architecture that can use two different types of information encoding strategies depending on the environment. The goal of this research was to create a simulated agent that could react to two different overlapping chemicals having varying concentrations. The neural network controls the agent by encoding its sensory information as temporal coincidences in a low concentration environment, and as firing rates at high concentration. With such an architecture, we could study synchronization of firing in a simple manner and see its effect on the agent’s behaviour.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Aylett, Ruth</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Animating Affective Robots for Social Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Animating Expressive Characters for Social Interaction</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Advances in Consciousness Research</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year></dates><number><style face="normal" font="default" size="100%">74</style></number><publisher><style face="normal" font="default" size="100%">John Benjamins Publishing Co.</style></publisher><pages><style face="normal" font="default" size="100%">103–121</style></pages><isbn><style face="normal" font="default" size="100%">978-90-272-5210-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>6</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Aylett, Ruth</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Animating Expressive Characters for Social Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Consciousness Research</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year></dates><number><style face="normal" font="default" size="100%">74</style></number><publisher><style face="normal" font="default" size="100%">John Benjamins Publishing Co.</style></publisher><isbn><style face="normal" font="default" size="100%">9789027289834</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Matthew Schlesinger</style></author><author><style face="normal" font="default" size="100%">Luc Berthouze</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Conscientious Caretaking for Autonomous Robots: An Arousal-Based Model of Exploratory Behavior</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 8th International Conference on Epigenetic Robotics: Modeling Cognitive Development in Robotic Systems (EpiRob 2008)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.lucs.lu.se/LUCS/139/hiolle.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Brighton, UK</style></pub-location><volume><style face="normal" font="default" size="100%">139</style></volume><pages><style face="normal" font="default" size="100%">45–52</style></pages><isbn><style face="normal" font="default" size="100%">978-91-977-380-1-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The question of how autonomous robots could be part of our everyday life is gaining increasing interest. We present here an experiment in which an autonomous robot explores its environment and tries to familiarize itself with its novel elements using a neural-network-based architecture. When confronted with novelty, the lack of stability of its learning structures increases the arousal level of the robot, pushing it to look for comfort from its caretaker in order to reduce this arousal. In this paper, we studied how the behavior of the caretaker—and in particular the amount of comfort it provides to the robot during its exploration of the environment—influences the course of the robot’s exploration and learning experience. This work takes inspiration from early mother-infant interactions and the impact that the primary caretaker has on the development of children—at least in mainstream Western culture. The underlying hypothesis is that the behavior of a caregiver, and particularly his/her role in modulating arousal, will influence the development of an autonomous robot, and that arousal regulation will also depend on how accurately the robot signals its internal state and how the caretaker (or human user) responds to these signals.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Pichler, Peter-Paul</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Seth Bullock</style></author><author><style face="normal" font="default" size="100%">Jason Noble</style></author><author><style face="normal" font="default" size="100%">Richard A. Watson</style></author><author><style face="normal" font="default" size="100%">Mark A Bedau</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Evolving Morphological and Behavioral Diversity Without Predefined Behavior Primitives</style></title><secondary-title><style face="normal" font="default" size="100%">Artificial Life XI: Proceedings of the Eleventh International Conference on the Simulation and Synthesis of Living Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262287196chap62.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Winchester, UK</style></pub-location><pages><style face="normal" font="default" size="100%">474–481</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-75017-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Virtual ecosystems, where natural selection is used to evolve complex agent behavior, are often preferred to traditional genetic algorithms because the absence of an explicitly defined fitness allows for a less constrained evolutionary process. However, these model ecosystems typically pre-specify a discrete set of possible action primitives the agents can perform. We think that this also constrains the evolutionary process with the modellers preconceptions of what possible solutions could be. Therefore, we propose an ecosystem model to evolve complete agents where all higher-level behavior results strictly from the interplay between extremely simple components and where no ‘behavior primitives’ are defined. On the basis of four distinct survival strategies we show that such primitives are not necessary to evolve behavioral diversity even in a simple and homogeneous environment.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Seth Bullock</style></author><author><style face="normal" font="default" size="100%">Jason Noble</style></author><author><style face="normal" font="default" size="100%">Richard A. Watson</style></author><author><style face="normal" font="default" size="100%">Mark A Bedau</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Optimal Noise in Spiking Neural Networks for the Detection of Chemicals by Simulated Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Artificial Life XI: Proceedings of the Eleventh International Conference on the Simulation and Synthesis of Living Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262287196chap58.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Winchester, UK</style></pub-location><pages><style face="normal" font="default" size="100%">443–449</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-75017-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We created a spiking neural controller for an agent that could use two different types of information encoding strategies depending on the level of chemical concentration present in the environment. The first goal of this research was to create a simulated agent that could react and stay within a region where there were two different overlapping chemicals having uniform concentrations. The agent was controlled by a spiking neural network that encoded sensory information using temporal coincidence of incoming spikes when the level of chemical concentration was low, and as firing rates at high level of concentration. With this architecture, we could study synchronization of firing in a simple manner and see its effect on the agent’s behaviour. The next experiment we did was to use a more realistic model by having an environment composed of concentration gradients and by adding input current noise to all neurons. We used a realistic model of diffusive noise and showed that it could improve the agent’s behaviour if used within a certain range. Therefore, an agent with neuronal noise was better able to stay within the chemical concentration than an agent without.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Oros, Nicolas</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Trappl, R</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Optimal Receptor Response Functions for the Detection of Pheromones by Agents Driven by Spiking Neural Networks</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 9th European Meeting on Cybernetics and Systems Research, Vol. II</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">03/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.cogsci.uci.edu/~noros/mypapers/OROS_2008_EMCSR.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Austrian Society for Cybernetic Studies</style></publisher><pub-location><style face="normal" font="default" size="100%">Vienna, Austria</style></pub-location><pages><style face="normal" font="default" size="100%">427–432</style></pages><isbn><style face="normal" font="default" size="100%">978-3-85206-175-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The goal of the work presented here is to find a model of a spiking sensory neuron that could cope with small variations in the concentration of simulated chemicals and also the whole range of concentrations. By using a biologically plausible sigmoid function in our model to map chemical concentration to current, we could produce agents able to detect the whole range of concentration of chemicals (pheromones) present in the environment as well as small variations of them. The sensory neurons used in our model are able to encode the stimulus intensity into appropriate firing rates.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">David Bowes</style></author><author><style face="normal" font="default" size="100%">Roderick G Adams</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Volker Steuber</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Madani, K</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Receptor Response and Soma Leakiness in a Simulated Spiking Neural Controller for a Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 4th International Workshop on Artificial Neural Networks and Intelligent Information Processing (ANNIIP 2008)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">05/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://uhra.herts.ac.uk/handle/2299/6832</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">INSTICC (Inst. Syst. Technologies Information Control and Communication)</style></publisher><pub-location><style face="normal" font="default" size="100%">Funchal, Madeira, Portugal</style></pub-location><pages><style face="normal" font="default" size="100%">100–106</style></pages><isbn><style face="normal" font="default" size="100%">978-989-8111-33-3</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper investigates different models of leakiness for the soma of a simulated spiking neural controller for a robot exhibiting negative photo-taxis. It also investigates two models of receptor response to stimulus levels. The results show that exponential decay of ions across the soma and of a receptor response function where intensity is proportional to intensity is the best combination for dark seeking behaviour.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>19</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">M Simon</style></author><author><style face="normal" font="default" size="100%">P Canet</style></author><author><style face="normal" font="default" size="100%">R Soussignan</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Reconnaissance et résonance émotionnelle face à un humain et à un robot chez des enfants typiques et des enfants avec autisme de haut niveau</style></title><secondary-title><style face="normal" font="default" size="100%">Bulletin scientifique de l’Arapi</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">John C Murray</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Robert Lowe</style></author><author><style face="normal" font="default" size="100%">Morse, A</style></author><author><style face="normal" font="default" size="100%">Ziemke, T</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Towards a Hormone-Modulated Model for Emotion Expression in a Socially Interactive Robot Head</style></title><secondary-title><style face="normal" font="default" size="100%">Workshop &quot;The role of Emotion in Adaptive Behavior and Cognitive Robotics&quot; held in conjunction with 10th International Conference on Simulation of Adaptive Behavior (SAB 2008)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://image.ece.ntua.gr/projects/feelix/system/files/Murray_SAB_final-1.pdf</style></url></web-urls></urls><pub-location><style face="normal" font="default" size="100%">Osaka, Japan</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we present a robot head ERWIN capable of human-robot interaction, endowed with interactive mechanisms for allowing the emotional state and expression of the robot to be directly influenced by the social interaction process. Allowing the interaction process to influence the expression of the robot head can in turn influence the way the user interacts with the robot, in addition to allowing the user to better understand the intentions of the robot during this process. We discuss some of the interactions that are possible with ERWIN and how this can affect the response of the system. We show an example scenario where the interaction process makes the robot go through several different emotions.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Seth Bullock</style></author><author><style face="normal" font="default" size="100%">Jason Noble</style></author><author><style face="normal" font="default" size="100%">Richard A. Watson</style></author><author><style face="normal" font="default" size="100%">Mark A Bedau</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Why Should You Care? An Arousal-Based Model of Exploratory Behavior for Autonomous Robots</style></title><secondary-title><style face="normal" font="default" size="100%">Artificial Life XI: Proceedings of the Eleventh International Conference on the Simulation and Synthesis of Living Systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">08/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://mitpress-request.mit.edu/sites/default/files/titles/alife/0262287196chap32.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Winchester, UK</style></pub-location><pages><style face="normal" font="default" size="100%">242–248</style></pages><isbn><style face="normal" font="default" size="100%">978-0-262-75017-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The question of how autonomous robots could be part of our everyday life is of a growing interest. We present here an experiment in which an autonomous robot explores its environment and tries to familiarize itself with the features available using a neural-network-based architecture. The lack of stability of its learning structures increases the arousal level of the robot, pushing the robot to look for comfort from its caretaker to reduce the arousal. In this paper, we studied how the behavior of the caretaker influences the course of the robot exploration and learning experience by providing certain amount of comfort during this exploration. We then draw some conclusions on how to use this architecture together with related work, to enhance the adaptability of autonomous robots development.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Martin V Butz</style></author><author><style face="normal" font="default" size="100%">Olivier Sigaud</style></author><author><style face="normal" font="default" size="100%">Giovanni Pezzulo</style></author><author><style face="normal" font="default" size="100%">Gianluca Baldassarre</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Anticipating Rewards in Continuous Time and Space: A Case Study in Developmental Robotics</style></title><secondary-title><style face="normal" font="default" size="100%">Anticipatory Behavior in Adaptive Learning Systems: From Brains to Individual and Social Behavior</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Artificial Intelligence</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.springer.com/gp/book/9783540742616</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Berlin, Heidelberg</style></pub-location><volume><style face="normal" font="default" size="100%">4520</style></volume><pages><style face="normal" font="default" size="100%">267–284</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-74261-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper presents the first basic principles, implementation and experimental results of what could be regarded as a new approach to reinforcement learning, where agents—physical robots interacting with objects and other agents in the real world—can learn to anticipate rewards using their sensory inputs. Our approach does not need discretization, notion of events, or classification, and instead of learning rewards for the different possible actions of an agent in all the situations, we propose to make agents learn only the main situations worth avoiding and reaching. However, the main focus of our work is not reinforcement learning as such, but modeling cognitive development on a small autonomous robot interacting with an “adult” caretaker, typically a human, in the real world; the control architecture follows a Perception-Action approach incorporating a basic homeostatic principle. This interaction occurs in very close proximity, uses very coarse and limited sensory-motor capabilities, and affects the “well-being” and affective state of the robot. The type of anticipatory behavior we are concerned with in this context relates to both sensory and reward anticipation. We have applied and tested our model on a real robot.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Parussel, Karla</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">de Sá, Joaquim Marques</style></author><author><style face="normal" font="default" size="100%">Alexandre, Luís A.</style></author><author><style face="normal" font="default" size="100%">Duch, Włodzisław</style></author><author><style face="normal" font="default" size="100%">Mandic, Danilo</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Biasing Neural Networks Towards Exploration or Exploitation Using Neuromodulation</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 17th International Conference on Artificial Neural Networks (ICANN 2007), Part II</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">LNCS</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2007</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007/978-3-540-74695-9_91</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer Berlin Heidelberg</style></publisher><pub-location><style face="normal" font="default" size="100%">Porto, Portugal</style></pub-location><volume><style face="normal" font="default" size="100%">4669</style></volume><pages><style face="normal" font="default" size="100%">889–898</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-74695-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Taking neuromodulation as a mechanism underlying emotions, this paper investigates how such a mechanism can bias an artificial neural network towards exploration of new courses of action, as seems to be the case in positive emotions, or exploitation of known possibilities, as in negative emotions such as predatory fear. We use neural networks of spiking leaky integrate-and-fire neurons acting as minimal disturbance systems, and test them with continuous actions. The networks have to balance the activations of all their output neurons concurrently. We have found that having the middle layer modulate the output layer helps balance the activations of the output neurons. A second discovery is that when the network is modulated in this way, it performs better at tasks requiring the exploitation of actions that are found to be rewarding. This is complementary to previous findings where having the input layer modulate the middle layer biases the network towards exploration of alternative actions. We conclude that a network can be biased towards either exploration of exploitation depending on which layers are being modulated.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Ana C R Paiva</style></author><author><style face="normal" font="default" size="100%">Rui Prada</style></author><author><style face="normal" font="default" size="100%">Rosalind W Picard</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">A Bottom-Up Investigation of Emotional Modulation in Competitive Scenarios</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. Second International Conference on Affective Computing and Intelligent Interaction (ACII 2007)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2007</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer Berlin Heidelberg</style></publisher><pub-location><style face="normal" font="default" size="100%">Lisbon, Portugal</style></pub-location><volume><style face="normal" font="default" size="100%">4738</style></volume><pages><style face="normal" font="default" size="100%">398–409</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-74888-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper, we take an incremental, bottom-up approach to investigate plausible mechanisms underlying emotional modulation of behavior selection and their adaptive value in autonomous robots. We focus in particular on achieving adaptive behavior selection in competitive robotic scenarios through modulation of perception, drawing on the notion of biological hormones. We discuss results from testing our architectures in two different competitive robotic scenarios.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Luc Berthouze</style></author><author><style face="normal" font="default" size="100%">C G Prince</style></author><author><style face="normal" font="default" size="100%">M Littman</style></author><author><style face="normal" font="default" size="100%">Hideki Kozima</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Developing Sensorimotor Associations Through Attachment Bonds</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 7th International Conference on Epigenetic Robotics (EpiRob 2007)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.lucs.lu.se/LUCS/135/Hiolle.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Piscataway, NJ, USA</style></pub-location><volume><style face="normal" font="default" size="100%">134</style></volume><pages><style face="normal" font="default" size="100%">45–52</style></pages><isbn><style face="normal" font="default" size="100%">91-974741-8-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Attachment bonds and positive affect help cognitive development and social interactions in infants and animals. In this paper we present a neural architecture to enable a robot to develop an attachment bond with a person or an object, and to discover the correct sensorimotor associations to maintain a desired affective state of well-being using a minimum amount of prior knowledge about the possible interactions with this object. We also discuss how our research on attachment bonds could further developmental robotics in the near future.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Développement de Liens Affectifs Basés sur le Phénomène d'Empreinte pour Moduler l'Exploration et l'Imitation d'un Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Enfance</style></secondary-title><translated-title><style face="normal" font="default" size="100%">Development of Affective Bonds Based on the Imprinting Phenomenon in Order to Modulate Exploration and Imitation in a Robot</style></translated-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.cairn.info/revue-enfance-2007-1-page-35.htm</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">59</style></volume><pages><style face="normal" font="default" size="100%">35–45</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Les comportements des enfants varient en fonction du contexte, notamment en fonction des liens affectifs qu'ils développent avec d'autres personnes en présence. Celainfluence par exemple leurs facultés à explorer ou imiter. Pour mieux comprendre ces phénomènes, nous proposons un modèle basé sur le phénomène de l'empreinte de liens affectifs et de leurs effets. Après avoir proposé des solutions pour simuler ces liens, nous montrerons comment nous pouvons les utiliser, où ils peuvent être utilisés afin de moduler les comportements d'exploration et d'imitation d'un robot réel. Finalement, nous discuterons du nouveau regard que peut apporter cette modélisation sur le comportement et le développement affectif des enfants.

An infant's behavior varies (depending on the context) to a large degree as a function of the affective bonds that they have with the people that are also present. This influences their ability to explore or imitate, for example. In order to better understand these phenomena, we propose a model of affective bonds and their effects based on the imprinting phenomenon. After proposing solutions for simulating these bonds, we show how we can use them to modulate exploratory and imitative behaviors in a real robot. Finally, we discuss the new light that this model sheds on the affective behavior and development of children.</style></abstract><issue><style face="normal" font="default" size="100%">1</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Pichler, Peter-Paul</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">An Evolving Ecosystems Approach to Generating Complex Agent Behaviour</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. IEEE Symposium on Artificial Life 2007, ALIFE'07</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year><pub-dates><date><style  face="normal" font="default" size="100%">04/2007</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/4218900/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%"> Honolulu, HI</style></pub-location><pages><style face="normal" font="default" size="100%">303–310</style></pages><isbn><style face="normal" font="default" size="100%">1-4244-0701-X</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We propose an evolving ecosystem approach to evolving complex agent behaviour based on the principle of natural selection. The agents start with very limited functional design and morphology and neural controllers are concurrently evolved as functional wholes. The agents are ‘grounded’ in an increasingly complex environment by a complex model metabolism and interaction dynamics. Furthermore, we introduce a novel criterion for evaluating differential reproductive success aimed at maximising evolutionary freedom. We also present first experimental results suggesting that this approach may be conducive to widening the scope of artificial evolution for the generation of agents exhibiting non-trivial behaviours in a complex ecosystem.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Antoine Hiolle</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Ana C R Paiva</style></author><author><style face="normal" font="default" size="100%">Rui Prada</style></author><author><style face="normal" font="default" size="100%">Rosalind W Picard</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Learning to Interact with the Caretaker: A Developmental Approach</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. Second International Conference on Affective Computing and Intelligent Interaction (ACII 2007)</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2007</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2007</style></date></pub-dates></dates><publisher><style face="normal" font="default" size="100%">Springer Berlin Heidelberg</style></publisher><pub-location><style face="normal" font="default" size="100%">Lisbon, Portugal</style></pub-location><volume><style face="normal" font="default" size="100%">4738</style></volume><pages><style face="normal" font="default" size="100%">422–433</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-74888-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">To build autonomous robots able to live and interact with humans in a real-world dynamic and uncertain environment, the design of architectures permitting robots to develop attachment bonds to humans and use them to build their own model of the world is a promising avenue, not only to improve human-robot interaction and adaptation to the environment, but also as a way to develop further cognitive and emotional capabilities. In this paper we present a neural architecture to enable a robot to develop an attachment bond with a person or an object, and to discover the correct sensorimotor associations to maintain a desired affective state of well-being using a minimum amount of prior knowledge about the possible interactions with this object.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Catherine Pelachaud</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Achieving Human-Like Qualities in Interactive Virtual and Physical Humanoids, Special issue of the International Journal of Humanoid Robotics</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Attachment Bonds for Human-Like Robots</style></title><secondary-title><style face="normal" font="default" size="100%">International Journal of Humanoid Robotics</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.worldscientific.com/doi/abs/10.1142/S0219843606000771</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">3</style></volume><pages><style face="normal" font="default" size="100%">301–320</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">If robots are to be truly integrated in humans' everyday environment, they cannot be simply (pre-)designed and directly taken &quot;off the shelf&quot; and embedded into a real-life setting. Also, technical excellence and human-like appearance and &quot;superficial&quot; traits of their behavior are not enough to make social robots trusted, believable, and accepted. Fuller and deeper integration into human environments would require that, like children, robots develop embedded in the social environment in which they will fulfill their roles. An important element to bootstrap and guide this integration is the establishment of affective bonds between the &quot;infant&quot; robot and the adults among whom it develops, from whom it learns, and who it will later have to look after. In this paper, we present a Perception–Action architecture and experiments to simulate imprinting — the establishment of strong attachment links with a &quot;caregiver&quot; — in a robot. Following recent theories, we do not consider imprinting as rigidly timed and irreversible, but as a more flexible phenomenon that allows for further adaptation as a result of reward-based learning through experience. After the initial imprinting, adaptation is achieved in the context of a history of &quot;affective&quot; interactions between the robot and a human, driven by &quot;distress&quot; and &quot;comfort&quot; responses in the robot.</style></abstract><issue><style face="normal" font="default" size="100%">3</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Developing Affect-Modulated Behaviors: Stability, Exploration, Exploitation or Imitation?</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the Sixth International Workshop on Epigenetic Robotics</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.lucs.lu.se/LUCS/128/BlanchardCanamero.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><volume><style face="normal" font="default" size="100%">128</style></volume><pages><style face="normal" font="default" size="100%">17–24</style></pages><isbn><style face="normal" font="default" size="100%">91-974741-6-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Exploring the environment is essential for autonomous agents to learn new things and to consolidate past experiences and apply them to improve behavior. However, exploration is also risky as it exposes the agent to unknown, potentially overwhelming or dangerous situations. A trade-off must hence exist between activities such as seeking stability, autonomous exploration of the environment, imitation of novel actions performed by another agents, and taking advantage of opportunities offered by new situations and events. In this paper, we present a Perception-Action robotic architecture that achieves this tradeoff on the grounds of modulatory mechanisms based on notions of “well-being” and “affect”. We have implemented and tested this architecture using a Koala robot, and we present and discuss behavior of the robot in different contexts.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Did Garbo Care about the Uncanny Valley? Commentary to K.F. MacDorman and H. Ishiguro, “The uncanny advantage of using androids in cognitive and social science research”</style></title><secondary-title><style face="normal" font="default" size="100%">Interaction Studies</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year><pub-dates><date><style  face="normal" font="default" size="100%">01/2006</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.ingentaconnect.com/content/jbp/is/2006/00000007/00000003/art00006</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">John Benjamins Publishing Company</style></publisher><volume><style face="normal" font="default" size="100%">7</style></volume><pages><style face="normal" font="default" size="100%">355–359</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">3</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">M Simon</style></author><author><style face="normal" font="default" size="100%">P Canet</style></author><author><style face="normal" font="default" size="100%">R Soussignan</style></author><author><style face="normal" font="default" size="100%">P Blancard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Human Responses to an Expressive Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the Sixth International Workshop on Epigenetic Robotics</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.lucs.lu.se/LUCS/128/Nadeletal.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Lund University</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris, France</style></pub-location><volume><style face="normal" font="default" size="100%">128</style></volume><pages><style face="normal" font="default" size="100%">79–86</style></pages><isbn><style face="normal" font="default" size="100%">91-974741-6-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper reports the results of the first study comparing subjects' responses to robotic emotional facial displays and human emotional facial displays.
It describes step by step the building of believable emotional expressions in a robotic head, the problems raised by a comparative approach of robotic and human expressions, and the solutions found in order to ensure a valid comparison. Twenty adults and 15 children aged 3 were presented static (photos) and dynamic (2-D videoclips, or 3-D live) displays of emotional expressions presented by a robot or a person.
The study compares two dependent variables: emotional resonance (automatic facial feed-back during an emotional display) and emotion recognition (emotion labeling) according to partners (robot or person) and to the nature of the display (static or dynamic). Results for emotional resonance were similar with young children and with adults. Both groups resonated significantly more to dynamic displays than to static displays, be they robotic expressions or human expressions. In both groups, emotion recognition was easier for human expressions than for robotic ones.
Unlike children that recognized more easily emotional expressions dynamically displayed, adults scored higher with static displays thus reflecting a cognitive strategy independent from emotional resonance. Results are discussed in the perspective of the therapeutic use of this comparative approach with children with autism that are described as impaired in emotion sharing and communication.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">J Burn</style></author><author><style face="normal" font="default" size="100%">M Wilson</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Modulation of Exploratory Behavior for Adaptation to the Context</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. AISB 2006 Symposium on Biologically Inspired Robotics (Biro-net)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2006</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://uhra.herts.ac.uk/handle/2299/9888</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AISB Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Bristol, UK</style></pub-location><pages><style face="normal" font="default" size="100%">131–137</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">For autonomous agents (children, animals or robots), exploratory learning is essential as it allows them to take advantage of their past experiences in order to improve their reactions in any situation similar to a situation already experimented. We have already exposed in Blanchard and Canamero (2005) how a robot can learn which situations it should memorize and try to reach, but we expose here architectures allowing the robot to take initiatives and explore new situations by itself. However, exploring is a risky behavior and we propose to moderate this behavior using novelty and context based on observations of animals behaviors. After having implemented and tested these architectures, we present a very interesting emergent behavior which is low-level imitation modulated by context.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Agents that Want and Like: Motivational and Emotional Roots of Cognition and Action. Papers from the AISB'05 Symposium</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.aisb.org.uk/publications/proceedings/aisb2005/2_Agents_Final.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AISB</style></publisher><pub-location><style face="normal" font="default" size="100%">Hatfield, UK</style></pub-location><isbn><style face="normal" font="default" size="100%">1-902956-41-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Robert Lowe</style></author><author><style face="normal" font="default" size="100%">Nehaniv, Chrystopher L</style></author><author><style face="normal" font="default" size="100%">Daniel Polani</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">The Degree of Potential Damage in Agonistic Contests and its Effects on Social Aggression, Territoriality and Display Evolution</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2005 IEEE Congress on Evolutionary Computation (CEC 2005)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><pub-location><style face="normal" font="default" size="100%">Edinburgh, Scotland</style></pub-location><pages><style face="normal" font="default" size="100%">351–358</style></pages><isbn><style face="normal" font="default" size="100%">0-7803-9363-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cos-Aguilera, Ignasi</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author><author><style face="normal" font="default" size="100%">Gillies, Andrew</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Joanna J Bryson</style></author><author><style face="normal" font="default" size="100%">Tony J Prescott</style></author><author><style face="normal" font="default" size="100%">Anil K Seth</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Ecological Integration of Affordances and Drives for Behaviour Selection</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. IJCAI 2005 Workshop on Modeling Natural Action Selection</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><pub-location><style face="normal" font="default" size="100%">Edinburgh, Scotland</style></pub-location><pages><style face="normal" font="default" size="100%">225–228</style></pages><isbn><style face="normal" font="default" size="100%">1-902956-40-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper shows a study of the integration of physiology and perception in a biologically inspired robotic architecture that learns behavioural patterns by interaction with the environment. This implements a hierarchical view of learning and behaviour selection which bases adaptation on a relationship between reinforcement and the agent’s inner motivations. This view ingrains together the basic principles necessary to explain the underlying processes of learning behavioural patterns and the way these change via interaction with the environment. These principles have been experimentally tested and the results are presented and discussed throughout the paper.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion Understanding from the Perspective of Autonomous Robots Research</style></title><secondary-title><style face="normal" font="default" size="100%">Neural Networks</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year><pub-dates><date><style  face="normal" font="default" size="100%">05/2005</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.sciencedirect.com/science/article/pii/S0893608005000365</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">18</style></volume><pages><style face="normal" font="default" size="100%">445–455</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper, I discuss some of the contributions that modeling emotions in autonomous robots can make towards understanding human emotions-'as sited in the brain' and as used in our interactions with the environment-and emotions in general. Such contributions are linked, on the one hand, to the potential use of such robotic models as tools and 'virtual laboratories' to test and explore systematically theories and models of human emotions, and on the other hand to a modeling approach that fosters conceptual clarification and operationalization of the relevant aspects of theoretical notions and models. As illustrated by an overview of recent advances in the field, this area is still in its infancy. However, the work carried out already shows that we share many conceptual problems and interests with other disciplines in the affective sciences and that sound progress necessitates multidisciplinary efforts.</style></abstract><issue><style face="normal" font="default" size="100%">4</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Philippe Gaussier</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">Darwin Muir</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotion Understanding: Robots as Tools and Models</style></title><secondary-title><style face="normal" font="default" size="100%">Emotional Development: Recent Research Advances</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><publisher><style face="normal" font="default" size="100%">Oxford University Press</style></publisher><pages><style face="normal" font="default" size="100%">235–258</style></pages><isbn><style face="normal" font="default" size="100%">0-19-85-2883-3 (Hbk) 0-19-85-2884-1 (Pbk)</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">9</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Luc Berthouze</style></author><author><style face="normal" font="default" size="100%">Frédéric Kaplan</style></author><author><style face="normal" font="default" size="100%">Hideki Kozima</style></author><author><style face="normal" font="default" size="100%">Hiroyuki Yano</style></author><author><style face="normal" font="default" size="100%">Jürgen Konczak</style></author><author><style face="normal" font="default" size="100%">Giorgio Metta</style></author><author><style face="normal" font="default" size="100%">Jacqueline Nadel</style></author><author><style face="normal" font="default" size="100%">Giulio Sandini</style></author><author><style face="normal" font="default" size="100%">Georgi Stojanov</style></author><author><style face="normal" font="default" size="100%">Christian Balkenius</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">From Imprinting to Adaptation: Building a History of Affective Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Fifth International Workshop on Epigenetic Robotics: Modeling Cognitive Development in Robotic Systems (EpiRob2005)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><publisher><style face="normal" font="default" size="100%">Lund University Cognitive Studies</style></publisher><pages><style face="normal" font="default" size="100%">23–30</style></pages><isbn><style face="normal" font="default" size="100%">91-974741-4-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present a Perception-Action architecture and experiments to simulate imprinting—the establishment of strong attachment links with a &quot;caregiver&quot;—in a robot. Following recent theories, we do not consider imprinting as rigidly timed and irreversible, but as a more flexible phenomenon that allows for further adaptation as a result of reward-based learning through experience. Our architecture reconciles these two types of perceptual learning traditionally considered as different and even incompatible. After the initial imprinting, adaptation is achieved in the context of a history of &quot;affective&quot; interactions between the robot and a human, driven by &quot;distress&quot; and &quot;comfort&quot; responses in the robot.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Hormonal Modulation of Perception in Motivation-Based Action Selection Architectures</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the Symposium on Agents that Want and Like: Motivational and Emotional Roots of Cognition and Action (SSAISB'05)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.aisb.org.uk/publications/proceedings/aisb2005/2_Agents_Final.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AISB</style></publisher><pub-location><style face="normal" font="default" size="100%">University of Hertfordshire, Hatfield, UK</style></pub-location><pages><style face="normal" font="default" size="100%">9–16</style></pages><isbn><style face="normal" font="default" size="100%">1-902956-41-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The animat approach to artificial intelligence proposes biologically-inspired control mechanisms for autonomous robots. One of the related subproblems is action selection or what to do next . Many action selection architectures have been proposed. Motivation-based architectures implement a combination between internal and external stimuli to choose the appropriate behavior. Recent studies have pointed out that a second order mechanism to control motivation-based architectures would improve--dramatically their performance. Drawing on the notion of biological hormones we have modeled two of the functionalities ascribed to them in order to improve the adaptivity of motivation-based architectures. We have tested our hormone-like mechanisms in dynamic and unpredictable robotic scenarios. We analyze the results in terms of interesting behavioral phenomena that emerge from the interaction of these artificial hormones with the rest of architectural elements.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">French, Richard L B</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Introducing Neuromodulation to a Braitenberg Vehicle</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2005 IEEE Int. Conf. on Robotics and Automation: Robots get Closer to Humans (ICRA'05)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year><pub-dates><date><style  face="normal" font="default" size="100%">04/2005</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/abstract/document/1570763/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Barcelona, Spain</style></pub-location><pages><style face="normal" font="default" size="100%">4199–4204</style></pages><isbn><style face="normal" font="default" size="100%">0-7803-8914-X</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Artificial neural networks are often used as the control systems for mobile robots. However, although these models usually claim inspiration from biology, they often lack an analogue of the biological phenomenon called neuromodulation. In this paper, we describe our initial work exploring a simple model of neuromodulation, used to provide a mobile robot with foraging behaviour.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cos-Aguilera, Ignasi</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Motivation Driven Learning of Action Affordances</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the Symposium on Agents that Want and Like: Motivational and Emotional Roots of Cognition and Action (SSAISB'05)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://aisb.org.uk/wp-content/uploads/2019/12/2_Agents_Final.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AISB</style></publisher><pub-location><style face="normal" font="default" size="100%">Hatfield, UK</style></pub-location><pages><style face="normal" font="default" size="100%">33–36</style></pages><isbn><style face="normal" font="default" size="100%">1-902956-41-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Survival in the animal realm often depends on the ability to elucidate the potentialities for action offered by every situation. This paper argues that affordance learning is a powerful ability for adaptive, embodied, situated agents, and presents a motivation-driven method for their learning. The method proposed considers the agent and its environment as a single unit, thus intrinsically relating agent's interactions to fluctuations of the agent's internal motivation. Being that the motivational state is an expression of the agent's physiology, the existing causality of interactions and their effect on the motivational state is exploited as a principle to learn object affordances. The hypothesis is tested in a Webots 4.0 simulator with a Khepera robot.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://aisb.org.uk/wp-content/uploads/2019/12/2_Agents_Final.pdf&quot;&gt;Download symposium proceedings&lt;/a&gt; (pdf)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arnaud J Blanchard</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Demiris, Y</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Using Visual Velocity Detection to Achieve Synchronization in Imitation</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 3rd Int. Symposium on Imitation in Animals and Artifacts</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2005</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.aisb.org.uk/publications/proceedings/aisb2005/3_Imitation_Final.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AISB</style></publisher><pub-location><style face="normal" font="default" size="100%">Hatfield, UK</style></pub-location><pages><style face="normal" font="default" size="100%">26–29</style></pages><isbn><style face="normal" font="default" size="100%">1-902956-42-5</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Synchronization and coordination are important mechanisms involved in imitation and social interaction. In this paper, we study different methods to improve the reactivity of agents to changes in their environment in different coordination tasks. In a robot synchronization task, we compare the differences between using only position detection or velocity detection. We first test an existing position detection approach, and then we compare the results with those obtained using a novel method that takes advantage of visual detection of velocity. We test and discuss the applicability of these two methods in several coordination scenarios, to conclude by seeing how to combine the advantages of both methods.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Eva Hudlicka</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Architectures for Modeling Emotion: Cross-Disciplinary Foundations</style></title><secondary-title><style face="normal" font="default" size="100%">Papers from the 2004 AAAI Spring Symposium</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><publisher><style face="normal" font="default" size="100%">AAAI Press</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Robert Lowe</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Nehaniv, Chrystopher L</style></author><author><style face="normal" font="default" size="100%">Daniel Polani</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Jordan Pollack</style></author><author><style face="normal" font="default" size="100%">Mark A Bedau</style></author><author><style face="normal" font="default" size="100%">Phil Husbands</style></author><author><style face="normal" font="default" size="100%">Takashi Ikegami</style></author><author><style face="normal" font="default" size="100%">Richard A. Watson</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">The Evolution of Affect-Related Displays, Recognition and Related Strategies</style></title><secondary-title><style face="normal" font="default" size="100%">ALIFE IX: Proceeding of the 9th international conference on the simulation and synthesis of living systems</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pages><style face="normal" font="default" size="100%">176–181</style></pages><isbn><style face="normal" font="default" size="100%">9780262661836</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper presents an ecologically motivated, bottom-up approach to investigating the evolution of expression, perception and related behaviour of affective internal states that complements game-theoretic studies of the evolutionary success of animal display. Our results show that the perception of displays related to affect greatly influences both the types of display produced and also the survival prospects of agents. Relative to agents that do not perceive rival agent internal state, affect perceivers prosper if the initial environment in which they reside provides numerous opportunities for interaction with other agents and resources. Conversely, where the initial environment with sparse resources does not allow for regular interaction, ability to perceive affect is not as facilitatory to survival. Furthermore, the agents evolve particular display strategies distorting the expression of affect and greatly influencing the proportion of affect perceiving to nonaffect perceiving agents over evolutionary time.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cortés, Ulises</style></author><author><style face="normal" font="default" size="100%">Annicchiarico, Roberta</style></author><author><style face="normal" font="default" size="100%">Campana, Fabio</style></author><author><style face="normal" font="default" size="100%">Vázquez-Salceda, Javier</style></author><author><style face="normal" font="default" size="100%">Urdiales, Cristina</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Maite López</style></author><author><style face="normal" font="default" size="100%">Miquel Sànchez-Marrè</style></author><author><style face="normal" font="default" size="100%">Di Vincenzo, Sarah</style></author><author><style face="normal" font="default" size="100%">Carlo Caltagirone</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Intelligenza artificiale in medicina: progetto di una piattaforma mobile inserita in un ambiente intelligente per l'assistenza ai disabili e agli anziani</style></title><secondary-title><style face="normal" font="default" size="100%">Recenti Progressi in Medicina</style></secondary-title><translated-title><style face="normal" font="default" size="100%">Artificial intelligence in medicine: project of a mobile platform in an intelligent environment for the care of disabled and elderly people</style></translated-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><publisher><style face="normal" font="default" size="100%">Pensiero scientifico</style></publisher><volume><style face="normal" font="default" size="100%">95</style></volume><pages><style face="normal" font="default" size="100%">190–195</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Viene presentato un progetto basato sull'integrazione di nuove tecnologie e di Intelligenza artificiale per sviluppare uno strumento – e-tool – indirizzato alle persone disabili ed agli anziani. Una piattaforma mobile inserita all'interno di ambienti intelligenti (strutture di assistenza o abitazioni), controllata e gestita attraverso un'architettura multilivello, viene proposta come supporto sia per i pazienti che per i caregiver al fine di aumentare l'autonomia nella vita quotidiana.

A project based on the integration of new technologies and artificial intelligence to develop a device – e-tool – for disabled patients and elderly people is presented. A mobile platform in intelligent environments (skilled-care facilities and home-care), controlled and managed by a multi-level architecture, is proposed to support patients and caregivers to increase self-dependency in activities of daily living.</style></abstract><issue><style face="normal" font="default" size="100%">4</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Robert Lowe</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Nehaniv, Chrystopher L</style></author><author><style face="normal" font="default" size="100%">Daniel Polani</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Harald Schaub</style></author><author><style face="normal" font="default" size="100%">Frank Detje</style></author><author><style face="normal" font="default" size="100%">Ulrike Brüggermann</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Strategies in the Evolution of Affect Related Displays and Recognition</style></title><secondary-title><style face="normal" font="default" size="100%">The Logic Of Artificial Life: Abstracting and Synthesizing the Principles of Living Systems; Proc. 6th German Workshop on Artificial Life 2004</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><publisher><style face="normal" font="default" size="100%">IOS Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Bamberg, Germany</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">A more realistic alternative to the game theoretic approach to measuring the behavioural success of animal display can be represented by affect related expression and perception The current paper investigates the ways in which agents can use evolved affect related displays to manipulate the behaviour of affect perceiving rival agents to their survival advantage. 
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cos-Aguilera, Ignasi</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Using a SOFM to learn Object Affordances</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 5th Workshop of Physical Agents (WAF'04)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://uhra.herts.ac.uk/handle/2299/9905</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">University of Edinburgh</style></publisher><pub-location><style face="normal" font="default" size="100%">Girona, Spain</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Learning affordances can be defined as learning action potentials, i.e., learning that an object exhibiting certain regularities offers the possibility of performing a particular action. We propose a method to endow an agent with the capability of acquiring this knowledge by relating the object invariants with the potentiality of performing an action via interaction episodes with each object. We introduce a biologically inspired model to test this learning hypothesis and a set of experiments to check its validity in a Webots simulator with a Khepera robot in a simple environment. The experiment set aims to show the use of a GWR network to cluster the sensory input of the agent; furthermore, that the aforementioned algorithm for neural clustering can be used as a--starting point to build agents that learn the relevant functional bindings between the cues in the environment and the internal needs of an agent.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;http://hdl.handle.net/2299/9905&quot;&gt;Download&lt;/a&gt; (Open Access)</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Stefan Schaal</style></author><author><style face="normal" font="default" size="100%">Auke Jan Ijspeert</style></author><author><style face="normal" font="default" size="100%">Aude Billard</style></author><author><style face="normal" font="default" size="100%">Sethu Vijayakumar</style></author><author><style face="normal" font="default" size="100%">John Hallam</style></author><author><style face="normal" font="default" size="100%">Jean-Arcady Meyer</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Using Hormonal Feedback to Modulate Action Selection in a Competitive Scenario</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats 8: Proc. 8th Intl. Conf. on Simulation of Adaptive Behavior (SAB'04)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2004</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.researchgate.net/profile/Orlando_Avila-Garcia/publication/228958663_Using_Hormonal_Feedback_to_Modulate_Action_Selection_in_a_Competitive_Scenario/links/0deec533c8411ebe0c000000.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Los Angeles, USA</style></pub-location><pages><style face="normal" font="default" size="100%">243–252</style></pages><isbn><style face="normal" font="default" size="100%">9780262693417</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we investigate the use of hormonal feedback as a mechanism to modulate a &quot;motivation-based,&quot; homeostatic action selection mechanism (ASM) in a robot. We have framed our study in the context of a dynamic, multirobot, competitive &quot;two-resource&quot; action selection problem. The introduction of competitors has important consequences for action selection. We first show how the interaction between robots introduces new forms of environmental complexity that affect their viability. Secondly, we propose a &quot;hormone-like&quot; mechanism that, modulating the input of the ASM, tackles these new sources of complexity.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">René te Boekhorst</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Banzhaf, Wolfgang</style></author><author><style face="normal" font="default" size="100%">Christaller, Thomas</style></author><author><style face="normal" font="default" size="100%">Dittrich, Peter</style></author><author><style face="normal" font="default" size="100%">Kim, Jan T</style></author><author><style face="normal" font="default" size="100%">Ziegler, Jens</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Analyzing the Performance of &quot;Winner-Take-All&quot; and &quot;Voting-Based&quot; Action Selection Policies within the Two-Resource Problem</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Artificial Life: 7th European Conference, ECAL 2003</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Artificial Intelligence</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2003</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/chapter/10.1007%2F978-3-540-39432-7_79</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Dortmund, Germany</style></pub-location><volume><style face="normal" font="default" size="100%">2801</style></volume><pages><style face="normal" font="default" size="100%">733–742</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-20057-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The problem of action selection for an autonomous creature implies resolving conflicts between competing behavioral alternatives. These conflicts can be resolved either via competition, following a “winner-take-all” approach, or via cooperation in a “voting-based” approach. In this paper we present two robotic architectures implementing these approaches, and report on experiments we have performed to compare their underlying optimization policies. We have framed this study within the context of the “two-resource problem,” as it provides a widely used standard that favors systematic experimentation, analysis, and comparison of results.</style></abstract><notes><style face="normal" font="default" size="100%">&lt;a href=&quot;https://link.springer.com/chapter/10.1007%2F978-3-540-39432-7_79&quot;&gt;Download&lt;/a&gt;</style></notes></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cortés, Ulises</style></author><author><style face="normal" font="default" size="100%">Annicchiarico, Roberta</style></author><author><style face="normal" font="default" size="100%">Vázquez-Salceda, Javier</style></author><author><style face="normal" font="default" size="100%">Urdiales, Cristina</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Maite López</style></author><author><style face="normal" font="default" size="100%">Miquel Sànchez-Marrè</style></author><author><style face="normal" font="default" size="100%">Carlo Caltagirone</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Assistive technologies for the disabled and for the new generation of senior citizens: The e-Tools architecture</style></title><secondary-title><style face="normal" font="default" size="100%">AI Communications</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://content.iospress.com/articles/ai-communications/aic288</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IOS Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Amsterdam, The Netherlands</style></pub-location><volume><style face="normal" font="default" size="100%">16</style></volume><pages><style face="normal" font="default" size="100%">193–207</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we present our exploratory ideas about the integration of agent technology with other technologies to build specific e-tools for the disabled and for the new generation of senior citizens. &quot;e-Tools&quot; stands for Embedded Tools, as we aim to embed intelligent assistive devices in homes and other facilities, creating ambient intelligence environments to give support to patients and caregivers. In particular, we aim to explore the benefits of the concept of situated intelligence to build artefacts that will enhance the autonomy of the target user group in their daily life.</style></abstract><issue><style face="normal" font="default" size="100%">3</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Robert Trappl</style></author><author><style face="normal" font="default" size="100%">Paolo Petta</style></author><author><style face="normal" font="default" size="100%">Sabine Payr</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Designing emotions for activity selection in autonomous agents</style></title><secondary-title><style face="normal" font="default" size="100%">Emotions in Humans and Artifacts</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year></dates><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pages><style face="normal" font="default" size="100%">115–148</style></pages><isbn><style face="normal" font="default" size="100%">9780262201421</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This chapter advocates a &quot;bottom-up&quot; philosophy for the design of emotional systems for autonomous agents that is guided by functional concerns and considers the particular case of designing emotions as mechanisms for action selection. The concrete realization of these ideas implies that the design process must start with an analysis of the requirements that the features of the environment, the characteristics of the action-selection task, and the agent architecture impose on the emotional system. This is particularly important if we see emotions as mechanisms that aim at modifying or maintaining the relation of the agent with its (external and internal) environment (rather than modifying the environment itself) in order to preserve the agent's goals. Emotions can then be selected and designed according to the roles they play with respect to this relation. 
</style></abstract><section><style face="normal" font="default" size="100%">4</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cortés, Ulises</style></author><author><style face="normal" font="default" size="100%">Annicchiarico, Roberta</style></author><author><style face="normal" font="default" size="100%">Vázquez-Salceda, Javier</style></author><author><style face="normal" font="default" size="100%">Urdiales, Cristina</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Maite López</style></author><author><style face="normal" font="default" size="100%">Miquel Sànchez-Marrè</style></author><author><style face="normal" font="default" size="100%">Carlo Caltagirone</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">I Rudomín</style></author><author><style face="normal" font="default" size="100%">J Vázquez-Salceda</style></author><author><style face="normal" font="default" size="100%">J L Díaz de León Santiago</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">e-Tools: The use of Assistive Technologies to enhance disabled and senior citizens’ autonomy</style></title><secondary-title><style face="normal" font="default" size="100%">e-Health: Application of Computing Science in Medicine and Health Care</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year></dates><publisher><style face="normal" font="default" size="100%">Instituto Politécnico National Press</style></publisher><pages><style face="normal" font="default" size="100%">119–132</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we present our preliminary ideas about the integration of several technologies to build specific e-tools for the disabled and for the new generation of senior citizens. ‘e-Tools’ stands for Embedded Tools, as we aim to embed intelligent assistive devices in homes and other facilities, creating ambient intelligence environments to give support to patients and caregivers. In particular, we aim to explore the benefits of the concept of situated intelligence to build intelligent artefacts that will enhance the autonomy of the target group during their daily life. We present here a multi-level architecture and our preliminary research on navigation schemes for a robotic wheelchair.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cos-Aguilera, Ignasi</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">U Nehmzow</style></author><author><style face="normal" font="default" size="100%">C Melhuish</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Learning Object Functionalities in the Context of Action Selection</style></title><secondary-title><style face="normal" font="default" size="100%">Towards Intelligent Mobile Robots, TIMR'03: 4th British Conference on Mobile Robotics</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year></dates><pub-location><style face="normal" font="default" size="100%">University of the West of England, Bristol</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cos-Aguilera, Ignasi</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Detjer, Frank</style></author><author><style face="normal" font="default" size="100%">Dörner, Dietrich</style></author><author><style face="normal" font="default" size="100%">Harald Schaub</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Motivation-driven learning of object affordances: First experiments using a simulated khepera robot</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 5th International Conference in Cognitive Modelling (ICCM'03)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year></dates><pub-location><style face="normal" font="default" size="100%">Bamberg, Germany</style></pub-location><pages><style face="normal" font="default" size="100%">57–62</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">René te Boekhorst</style></author><author><style face="normal" font="default" size="100%">Davey, Neil</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">U Nehmzow</style></author><author><style face="normal" font="default" size="100%">C Melhuish</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Optimization Criteria Underlying &quot;Winner-Take-All&quot; and &quot;Voting-Based&quot; Action Selection Policies</style></title><secondary-title><style face="normal" font="default" size="100%">Towards Intelligent Mobile Robots, TIMR'03: 4th British Conference on Mobile Robotics</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2003</style></year></dates><pub-location><style face="normal" font="default" size="100%">University of the West of England, Bristol</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Garijo, Francisco J</style></author><author><style face="normal" font="default" size="100%">Riquelme, José C</style></author><author><style face="normal" font="default" size="100%">Toro, Miguel</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Comparing a Voting-Based Policy with Winner-Takes-All to Perform Action Selection in Motivational Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Advances in Artificial Intelligence – IBERAMIA 2002; Proc. 8th Ibero-American Conference on AI</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Computer Science</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer</style></publisher><pub-location><style face="normal" font="default" size="100%">Seville, Spain</style></pub-location><volume><style face="normal" font="default" size="100%">2527</style></volume><pages><style face="normal" font="default" size="100%">855–864</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-00131-7</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Embodied autonomous agents are systems that inhabit dynamic, unpredictable environments in which they try to satisfy a set of time-dependent goals or motivations in order to survive. One of the problems that this implies is action selection, the task of resolving conflicts between competing behavioral alternatives. We present an experimental comparison of two action selection mechanisms (ASM), implementing &quot;winner-takes-all&quot; (WTA) and &quot;voting-based&quot; (VB) policies respectively, modeled using a motivational behavior-based approach. This research shows the adequacy of these two ASM with respect to different sources of environmental complexity and the tendency of each of them to show different behavioral phenomena.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Comparison of Behavior Selection Architectures Using Viability Indicators</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. EPSRC/BBSRC International Workshop Biologically-Inspired Robotics: The Legacy of W. Grey Walter</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><pub-location><style face="normal" font="default" size="100%">HP Labs Bristol, UK</style></pub-location><pages><style face="normal" font="default" size="100%">86–93</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Hafner, Elena</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">First Experiments Relating Behavior Selection Architectures to Environmental Complexity</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2002 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2002)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><publisher><style face="normal" font="default" size="100%">IEEE Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Lausanne, Switzerland</style></pub-location><pages><style face="normal" font="default" size="100%">3024–3029</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Assessing the performance of behavior selection architectures for autonomous robots is a complex task that depends on many factors. This paper reports a study comparing four motivated behavior-based architectures in different worlds with varying degrees and types of complexity, and analyzes performance results (in terms of viability, life span, and global life quality) relating architectural features to environmental complexity.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Nehaniv, Chrystopher L</style></author><author><style face="normal" font="default" size="100%">Daniel Polani</style></author><author><style face="normal" font="default" size="100%">Kerstin Dautenhahn</style></author><author><style face="normal" font="default" size="100%">René te Boekhorst</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Russell Standish</style></author><author><style face="normal" font="default" size="100%">Mark A Bedau</style></author><author><style face="normal" font="default" size="100%">Hussein A Abbass</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Meaningful Information, Sensor Evolution, and the Temporal Horizon of Embodied Organisms</style></title><secondary-title><style face="normal" font="default" size="100%">Artificial Life VIII: Proceedings of the Eighth International Conference on Artificial Life</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Sydney, Australia</style></pub-location><pages><style face="normal" font="default" size="100%">345–349</style></pages><isbn><style face="normal" font="default" size="100%">9780262692816</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We survey and outline how an agent-centered, information-theoretic approach to meaningful information extending classical Shannon information theory by means of utility measures relevant for the goals of particular agents can be applied to sensor evolution for real and constructed organisms. Furthermore, we discuss the relationship of this approach to the programme of freeing artificial life and robotic systems from reactivity, by describing useful types of information with broader temporal horizon, for signaling, communication, affective grounding, two-process learning, individual learning, imitation and social learning, and episodic experiential information (memories, narrative, and culturally transmitted information).</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kerstin Dautenhahn</style></author><author><style face="normal" font="default" size="100%">Alan H Bond</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Bruce Edmonds</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Playing the emotion game with Feelix: What can a LEGO robot tell us about emotion?</style></title><secondary-title><style face="normal" font="default" size="100%">Socially Intelligent Agents: Creating Relationships with Computers and Robots</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><publisher><style face="normal" font="default" size="100%">Kluwer Academic Publishers</style></publisher><pages><style face="normal" font="default" size="100%">69–76</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This chapter reports the motivations and choices underlying the design of Feelix, a simple humanoid LEGO robot that displays different emotions through facial expression in response to physical contact. It concludes by discussing what this simple technology can tell us about emotional expression and interaction.</style></abstract><section><style face="normal" font="default" size="100%">8</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Aylett, Ruth</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Proceedings of the AISB'02 Symposium Animating Expressive Characters for Social Interactions</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.aisb.org.uk/publications/proceedings/aisb2002/AISB02_ExpressiveCharacters.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AISB</style></publisher><pub-location><style face="normal" font="default" size="100%">Imperial College, London, UK</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Avila-García, Orlando</style></author><author><style face="normal" font="default" size="100%">Hafner, Elena</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Bridget Hallam</style></author><author><style face="normal" font="default" size="100%">Dario Floreano</style></author><author><style face="normal" font="default" size="100%">John Hallam</style></author><author><style face="normal" font="default" size="100%">Gillian M Hayes</style></author><author><style face="normal" font="default" size="100%">Jean-Arcady Meyer</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Relating Behavior Selection Architectures to Environmental Complexity</style></title><secondary-title><style face="normal" font="default" size="100%">From Animals to Animats: Proc. 7th International Conference on Simulation of Adaptive Behavior</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><publisher><style face="normal" font="default" size="100%">MIT Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Edinburgh, Scotland</style></pub-location><pages><style face="normal" font="default" size="100%">127–128</style></pages><isbn><style face="normal" font="default" size="100%">9780-262-58217-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Kerstin Dautenhahn</style></author><author><style face="normal" font="default" size="100%">Alan H Bond</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Bruce Edmonds</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kerstin Dautenhahn</style></author><author><style face="normal" font="default" size="100%">Alan H Bond</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Bruce Edmonds</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Socially Intelligent Agents: Creating Relationships with Computers and Robots</style></title><secondary-title><style face="normal" font="default" size="100%">Socially Intelligent Agents: Creating Relationships with Computers and Robots</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><publisher><style face="normal" font="default" size="100%">Kluwer Academic Publishers</style></publisher><pages><style face="normal" font="default" size="100%">1–20</style></pages><isbn><style face="normal" font="default" size="100%">978-0-306-47373-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This introduction explains the motivation to edit this book and provides an overview of the chapters included in this book. Main themes and common threads that can be found across different chapters are identified that might help the reader in navigating the book. 
</style></abstract><section><style face="normal" font="default" size="100%">1</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>6</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Kerstin Dautenhahn</style></author><author><style face="normal" font="default" size="100%">Alan H Bond</style></author><author><style face="normal" font="default" size="100%">Lola Cañamero</style></author><author><style face="normal" font="default" size="100%">Bruce Edmonds</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Socially Intelligent Agents: Creating Relationships with Computers and Robots</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2002</style></year></dates><publisher><style face="normal" font="default" size="100%">Kluwer Academic Publishers</style></publisher><isbn><style face="normal" font="default" size="100%">978-0-306-47373-9</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Building Emotional Artifacts in Social Worlds: Challenges and Perspectives</style></title><secondary-title><style face="normal" font="default" size="100%">Emotional and Intelligent II: The Tangled Knot of Social Cognition; Papers from the 2001 AAAI Fall Symposium</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2001</style></year></dates><publisher><style face="normal" font="default" size="100%">AAAI Press</style></publisher><pub-location><style face="normal" font="default" size="100%">North Falmouth, Massachusetts</style></pub-location><pages><style face="normal" font="default" size="100%">22–30</style></pages><isbn><style face="normal" font="default" size="100%">978-1-57735-136-8</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper discusses ideas relative to the construction of emotional artifacts that have to interact in a social world, and in particular with humans. It first examines some of the ways in which emotions can enhance social interactions with artifacts, and some of the challenges posed to the designer. After considering the debate that opposes &quot;shallow&quot; versus &quot;deep&quot; modeling, it sketches some ways in which we can anchor emotions in the architecture of artifacts in order to make emotional interactions meaningful not only to the human, but also to the artifact itself. It finally outlines some of the cognitive capabilities that artifacts should incorporate for their emotions to be properly grounded and to give rise to rich social exchanges with humans.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotional and Intelligent II: The Tangled Knot of Social Cognition. Papers from the 2001 AAAI Fall Symposium</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2001</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.aaai.org/Press/Reports/Symposia/Fall/fs-01-02.php</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AAAI Press</style></publisher><pub-location><style face="normal" font="default" size="100%">North Falmouth, Massachusetts</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-57735-136-8</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author><author><style face="normal" font="default" size="100%">Paolo Petta</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotions and Adaptation in Autonomous Agents: A Design Perspective</style></title><secondary-title><style face="normal" font="default" size="100%">Cybernetics and Systems: An International Journal</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2001</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.tandfonline.com/doi/abs/10.1080/01969720120250</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Taylor &amp; Francis</style></publisher><volume><style face="normal" font="default" size="100%">32</style></volume><pages><style face="normal" font="default" size="100%">507–529</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Why would we want to endow artificial autonomous agents with emotions? The main answer to this question seems to rely on what has been called the functional view of emotions, arising from (analytic) studies of natural systems. In this paper, I examine to what extent this hypothesis can be applied to the (synthetic) investigation of artificial emotions and what are its implications for the design of emotional agents, the main approaches that can be appropriately used to model emotions in autonomous agents, and why situated autonomous agents provide a good framework to study the relation between emotion and adaptation.</style></abstract><issue><style face="normal" font="default" size="100%">5</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author><author><style face="normal" font="default" size="100%">Paolo Petta</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Grounding Emotions in Adaptive Systems. Volume I</style></title><secondary-title><style face="normal" font="default" size="100%">Special Issue of Cybernetics and Systems: An International Journal</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2001</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.tandfonline.com/toc/ucbs20/32/5</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Taylor &amp; Francis</style></publisher><volume><style face="normal" font="default" size="100%">32</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">5</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>13</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author><author><style face="normal" font="default" size="100%">Paolo Petta</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Grounding Emotions in Adaptive Systems. Volume II</style></title><secondary-title><style face="normal" font="default" size="100%">Special Issue of Cybernetics and Systems: An International Journal</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2001</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.tandfonline.com/toc/ucbs20/32/6</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Taylor &amp; Francis</style></publisher><volume><style face="normal" font="default" size="100%">32</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">6</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author><author><style face="normal" font="default" size="100%">Fredslund, Jakob</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">I Show You How I Like You—Can You Read it in My Face?</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE Transactions on Systems, Man and Cybernetics, Part A: Systems and Humans</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2001</style></year><pub-dates><date><style  face="normal" font="default" size="100%">09/2001</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ieeexplore.ieee.org/document/952719/</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">IEEE</style></publisher><volume><style face="normal" font="default" size="100%">31</style></volume><pages><style face="normal" font="default" size="100%">454–459</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We report work on a LEGO robot that displays different emotional expressions in response to physical stimulation, for the purpose of social interaction with humans. This is a first step toward our longer-term goal of exploring believable emotional exchanges to achieve plausible interaction with a simple robot. Drawing inspiration from theories of human basic emotions, we have implemented several prototypical expressions in the robot’s caricaturized face and conducted experiments to assess the recognizability of these expressions.</style></abstract><issue><style face="normal" font="default" size="100%">5</style></issue><accession-num><style face="normal" font="default" size="100%">7064042</style></accession-num></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Bisson, Gilles</style></author><author><style face="normal" font="default" size="100%">Nédellec, Claire</style></author><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Staab, S</style></author><author><style face="normal" font="default" size="100%">Maedche, A</style></author><author><style face="normal" font="default" size="100%">Nédellec, Claire</style></author><author><style face="normal" font="default" size="100%">Wiemer-Hastins, P</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Designing Clustering Methods for Ontology Building: The Mo'K Workbench</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. First Workshop on Ontology Learning. Workshop of the 14th European Conference on Artificial Intelligence (ECAI 2000)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2000</style></year></dates><pages><style face="normal" font="default" size="100%">13–18</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper describes Mo'K, a configurable workbench that supports the development of conceptual clustering methods for ontology building. Mo'K is intended to assist ontology developers in the exploratory process of defining the most suitable learning methods for a given task. To do so, it provides facilities for evaluation, comparison, characterization and elaboration of conceptual clustering methods. Also, the model underlying Mo'K permits a fine- grained definition of similarity measures and class construction operators, easing the tasks of method instantiation and configuration. This paper presents some experimental results that illustrate the suitability of the model to help characterize and assess the performance of different methods that learn semantic classes from parsed corpora.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>27</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Designing Emotions for Activity Selection</style></title><secondary-title><style face="normal" font="default" size="100%">Dept. of Computer Science Technical Report DAIMI PB</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2000</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.daimi.au.dk/PB/545/PB-545.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">University of Aarhus, Denmark</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">This paper advocates a &quot;bottom-up&quot; philosophy for the design of emotional systems for autonomous agents that is guided by functional concerns, and considers the particular case of designing emotions as mechanisms for action selection. The concrete realization of these ideas implies that the design process must start with an analysis of the requirements that the features of the environment, the characteristics of the action-selection task, and the agent architecture impose on the emotional system. This is particularly important if we see emotions as mechanisms that aim at modifying or maintaining the relation of the agent with its (external and internal) environment (rather than modifying the environment itself) in order to preserve the agent's goals. Emotions can then be selected and designed according to the roles they play with respect to this relation.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">Walter Van de Velde</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kerstin Dautenhahn</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotionally Grounded Social Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Human Cognition and Social Agent Technology</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Advances in Consciousness Research</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">2000</style></year></dates><number><style face="normal" font="default" size="100%">19</style></number><publisher><style face="normal" font="default" size="100%">John Benjamins Publishing Co.</style></publisher><pages><style face="normal" font="default" size="100%">137–162</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">6</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author><author><style face="normal" font="default" size="100%">Fredslund, Jakob</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kerstin Dautenhahn</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">How Does It Feel? Emotional Interaction with a Humanoid LEGO Robot</style></title><secondary-title><style face="normal" font="default" size="100%">Socially Intelligent Agents: The Human in the Loop. Papers from the AAAI 2000 Fall Symposium</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2000</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.aaai.org/Papers/Symposia/Fall/2000/FS-00-04/FS00-04-006.pdf</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AAAI Press</style></publisher><pub-location><style face="normal" font="default" size="100%">North Falmouth, Massachusetts</style></pub-location><pages><style face="normal" font="default" size="100%">23–28</style></pages><isbn><style face="normal" font="default" size="100%">978-1-57735-127-6</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We report work on a LEGO robot capable of displaying several emotional expressions in response to physical contact. Our motivation has been to explore believable emotional exchanges to achieve plausible interaction with a simple robot. We have worked toward this goal in two ways. First, acknowledging the importance of physical manipulation in children's interactions, interaction with the robot is through tactile stimulation; the various kinds of stimulation that can elicit the robot's emotions are grounded in a model of emotion activation based on different stimulation patterns. Second, emotional states need to be clearly conveyed. We have drawn inspiration from theories of human basic emotions with associated universal facial expressions, which we have implemented in a caricaturized face. We have conducted experiments on children and adults to assess the recognizability of these expressions, and observed how people spontaneously interacting with Feelix respond to its emotional displays.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>27</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cañamero, Lola D</style></author><author><style face="normal" font="default" size="100%">Fredslund, Jakob</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">I Show You How I Like You: Human-Robot Interaction through Emotional Expression and Tactile Stimulation</style></title><secondary-title><style face="normal" font="default" size="100%">Dept. of Computer Science Technical Report DAIMI PB 544</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2000</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://ojs.statsbiblioteket.dk/index.php/daimipb/article/view/7078</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">University of Aarhus, Denmark</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We report work on a LEGO robot capable of displaying several emotional expressions in response to physical contact. Our motivation has been to explore believable emotional exchanges to achieve plausible interaction with a simple robot. We have worked toward this goal in two ways. First, acknowledging the importance of physical manipulation in children's interactions, interaction with the robot is through tactile stimulation; the various kinds of stimulation that can elicit the robot's emotions are grounded in a model of emotion activation based on different stimulation patterns. Second, emotional states need to be clearly conveyed. We have drawn inspiration from theories of human basic emotions with associated universal facial expressions, which we have implemented in a caricaturized face. We have conducted experiments on both children and adults to assess the recognizability of these expressions.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Josep Lluís Arcos</style></author><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">Ramon López de Mántaras</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Althoff, Klaus-Dieter</style></author><author><style face="normal" font="default" size="100%">Bergmann, Ralph</style></author><author><style face="normal" font="default" size="100%">L Karl Branting</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Affect-Driven CBR to Generate Expressive Music</style></title><secondary-title><style face="normal" font="default" size="100%">Case-Based Reasoning Research and Development. Third International Conference on Case-Based Reasoning, ICCBR'99</style></secondary-title><tertiary-title><style face="normal" font="default" size="100%">Lecture Notes in Artificial Intelligence</style></tertiary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer Berlin Heidelberg</style></publisher><volume><style face="normal" font="default" size="100%">1650</style></volume><pages><style face="normal" font="default" size="100%">1–13</style></pages><isbn><style face="normal" font="default" size="100%">978-3-540-66237-2</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We present an extension of an existing system, called SaxEx, capable of generating expressive musical performances based on Case-Based Reasoning (CBR) techniques. The previous version of SaxEx did not take into account the possibility of using affective labels to guide the CBR task. This paper discusses the introduction of such affective knowledge to improve the retrieval capabilities of the system. Three affective dimensions are considered—tender-aggressive, sad-joyful, and calm-restless that allow the user to declaratively instruct the system to perform according to any combination of five qualitative values along these three dimensions.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Alexis Drogoul</style></author><author><style face="normal" font="default" size="100%">Jean-Arcady Meyer</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotions pour les agents situés</style></title><secondary-title><style face="normal" font="default" size="100%">Intelligence Artificielle Située</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year></dates><publisher><style face="normal" font="default" size="100%">Hermès science publications</style></publisher><pub-location><style face="normal" font="default" size="100%">Paris</style></pub-location><isbn><style face="normal" font="default" size="100%">978-274620076-0</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Contrairement à l'intelligence artificielle (IA) symbolique, l'IA située, qui adopte une vision plus large de l'intelligence &quot;complète&quot; qui ne la détache pas de sa réalisation corporelle et qui s'intéresse à son rôle adaptatif, ouvre naturellement la porte à l'étude des rôles des émotions d'un point de vue évolutif et à leur intégration dans les agents autonomes ou animats comme des mécanismes favorisant l'adaptation. Cet article examine les raisons pour lesquelles il semble intéressant de doter d'émotions les agents situés, en établissant un lien avec les émotions naturelles, ainsi que les différentes approches envisageables permettant de modéliser les émotions dans le cadre de l'IA située, et les différents problèmes qui en découlent. 

The notion of intelligence underlying symbolic Artificial Intelligence (AI) is tightly coupled to the idea of rationality. On the contrary, situated AI, with a wider view of intelligence that focuses on its embodiment and its adaptive value, allows to study emotional phenomena in animats from the point of view of evolution, and to investigate their adaptive roles. This paper examines the main reasons why it seems interesting to endow animats with emotions, establishing a parallel with natural emotions. It also considers the main approches that can be used to model emotions within situated AI, and the problems they pose. 
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Eduard Giménez</style></author><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">First Proposal for an Agent Architecture for Team and Multiple Task Coordination: A Case Study in Robotic Soccer</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. 2nd Catalan Conference on Artificial Intelligence (CCIA'99)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year><pub-dates><date><style  face="normal" font="default" size="100%">10/1999</style></date></pub-dates></dates><pub-location><style face="normal" font="default" size="100%">Girona, Spain</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">In this paper we propose a general agent architecture from which heterogeneous agents can be easily derived. Based on roles and ploy patterns, each agent is capable of performing its own individual duty while cooperating with other agents. We also describe a mechanism for real-time coordination of multiple behaviors.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">Josep Lluís Arcos</style></author><author><style face="normal" font="default" size="100%">Ramon López de Mántaras</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Imitating Human Performances to Automatically Generate Expressive Jazz Ballads</style></title><secondary-title><style face="normal" font="default" size="100%">Proc. AISB'99 Symposium on Imitation in Animals and Artifacts</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year></dates><publisher><style face="normal" font="default" size="100%">AISB</style></publisher><pub-location><style face="normal" font="default" size="100%">Edinburgh, Scotland</style></pub-location><pages><style face="normal" font="default" size="100%">115–20</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">One of the main problems with the automatic generation of expressive musical performances is to grasp the way in which human performers use musical knowledge that is not explicitly noted in musical scores. Moreover, this knowledge is tacit, difficult to verbalize, and therefore it must be acquired through a process of observation, imitation, and experimentation. For this reason, AI approaches based on declarative knowledge representations have serious limitations. An alternative approach is that of directly using the implicit knowledge that is in examples from recordings of human performances. In this paper, we describe a case-based reasoning system that generates expressive musical performances imitating examples of expressive human performances.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">Vincent Corruble</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Joan Bliss</style></author><author><style face="normal" font="default" size="100%">Roger Säljö</style></author><author><style face="normal" font="default" size="100%">Paul Light</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Situated Cognition: A Challenge to Artificial Intelligence?</style></title><secondary-title><style face="normal" font="default" size="100%">Learning Sites: Social and Technological Contexts for Learning</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year></dates><publisher><style face="normal" font="default" size="100%">Elsevier</style></publisher><pages><style face="normal" font="default" size="100%">223–235</style></pages><isbn><style face="normal" font="default" size="100%">978-0080433509</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">17</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Bart de Boer</style></author><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Joan Bliss</style></author><author><style face="normal" font="default" size="100%">Roger Säljö</style></author><author><style face="normal" font="default" size="100%">Paul Light</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Situated Learning in Autonomous Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Learning Sites: Social and Technological Contexts for Learning</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year></dates><publisher><style face="normal" font="default" size="100%">Elsevier</style></publisher><pages><style face="normal" font="default" size="100%">236–248</style></pages><isbn><style face="normal" font="default" size="100%">978-0080433509</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language><section><style face="normal" font="default" size="100%">18</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Hans-Jorg Bullinger</style></author><author><style face="normal" font="default" size="100%">Jurgen Ziegler</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">What Emotions are Necessary for HCI?</style></title><secondary-title><style face="normal" font="default" size="100%">Human-Computer Interaction: Ergonomics and User Interfaces Vol. 1</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1999</style></year></dates><publisher><style face="normal" font="default" size="100%">Taylor &amp; Francis</style></publisher><pub-location><style face="normal" font="default" size="100%">Munich, Germany</style></pub-location><pages><style face="normal" font="default" size="100%">838–842</style></pages><isbn><style face="normal" font="default" size="100%">978-080583391-1</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Josep Lluís Arcos</style></author><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">Ramon López de Mántaras</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Affect-Driven Generation of Expressive Musical Performances</style></title><secondary-title><style face="normal" font="default" size="100%">Emotional and Intelligent: The Tangled Knot of Cognition. Papers from the 1998 AAAI Fall Symposium</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1998</style></year></dates><publisher><style face="normal" font="default" size="100%">AAAI Press</style></publisher><pages><style face="normal" font="default" size="100%">1–6</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Emotional and Intelligent: The Tangled Knot of Cognition. Papers from the 1998 AAAI Fall Symposium</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1998</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.aaai.org/Press/Reports/Symposia/Fall/fs-98-03.php</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">AAAI Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Orlando, Florida</style></pub-location><isbn><style face="normal" font="default" size="100%">978-1-57735-077-4</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">Chisato Numaoka</style></author><author><style face="normal" font="default" size="100%">Paolo Petta</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Grounding Emotions in Adaptive Systems. Papers of the workshop held during the Fifth International Conference of The Society for Adaptive Behavior (SAB'98)</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1998</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.ofai.at/~paolo.petta/conf/sab98/sab98ws.html</style></url></web-urls></urls><pub-location><style face="normal" font="default" size="100%">University of Zurich, Switzerland</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Issues in the Design of Emotional Agents</style></title><secondary-title><style face="normal" font="default" size="100%">Emotional and Intelligent: The Tangled Knot of Cognition. Papers from the 1998 AAAI Fall Symposium</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1998</style></year></dates><publisher><style face="normal" font="default" size="100%">AAAI Press</style></publisher><pages><style face="normal" font="default" size="100%">49–54</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><secondary-authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">M Van Someren</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Learning in Humans and Machines. Notes of the ECML'98 Workshop Human Learning Meets Machine Learning</style></title></titles><dates><year><style  face="normal" font="default" size="100%">1998</style></year></dates><pub-location><style face="normal" font="default" size="100%">Technische Universität Chemnitz, Germany</style></pub-location><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>27</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Hormonal Model of Emotions for Behavior Control</style></title><secondary-title><style face="normal" font="default" size="100%">VUB AI-Lab Memo</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1997</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">W Lewis Johnson</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Modeling Motivations and Emotions as a Basis for Intelligent Behavior</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the First International Conference on Autonomous Agents (Agents'97)</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1997</style></year></dates><publisher><style face="normal" font="default" size="100%">The ACM Press</style></publisher><pub-location><style face="normal" font="default" size="100%">Marina del Rey, CA, USA</style></pub-location><pages><style face="normal" font="default" size="100%">148–155</style></pages><isbn><style face="normal" font="default" size="100%">0-89791-877-0</style></isbn><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">D Cañamero</style></author><author><style face="normal" font="default" size="100%">Walter Van de Velde</style></author></authors><secondary-authors><author><style face="normal" font="default" size="100%">Kerstin Dautenhahn</style></author></secondary-authors></contributors><titles><title><style face="normal" font="default" size="100%">Socially Emotional: Using Emotions to Ground Social Interaction</style></title><secondary-title><style face="normal" font="default" size="100%">Socially Intelligent Agents. Papers from the 1997 AAAI Fall Symposium</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">1997</style></year></dates><publisher><style face="normal" font="default" size="100%">The AAAI Press</style></publisher><pages><style face="normal" font="default" size="100%">10–15</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record></records></xml>