@article {2013, title = {Interpretation of Emotional Body Language Displayed by a Humanoid Robot: A Case Study with Children}, journal = {International Journal of Social Robotics}, volume = {5}, year = {2013}, note = {Download}, pages = {325{\textendash}334}, abstract = {The work reported in this paper focuses on giving humanoid robots the capacity to express emotions with their body. Previous results show that adults are able to interpret different key poses displayed by a humanoid robot and also that changing the head position affects the expressiveness of the key poses in a consistent way. Moving the head down leads to decreased arousal (the level of energy) and valence (positive or negative emotion) whereas moving the head up produces an increase along these dimensions. Hence, changing the head position during an interaction should send intuitive signals. The study reported in this paper tested children{\textquoteright}s ability to recognize the emotional body language displayed by a humanoid robot. The results suggest that body postures and head position can be used to convey emotions during child-robot interaction.}, keywords = {emotion, emotional body language, perception, Social robotics}, issn = {1875-4791}, doi = {10.1007/s12369-013-0193-z}, url = {https://link.springer.com/article/10.1007/s12369-013-0193-z}, author = {Aryel Beck and Lola Ca{\~n}amero and Antoine Hiolle and Luisa Damiano and Cosi, Piero and Tesser, Fabio and Sommavilla, Giacomo} } @inproceedings {2013, title = {Using Perlin Noise to Generate Emotional Expressions in a Robot}, booktitle = {Proc. Annual Meeting of the Cognitive Science Society (CogSci 2013)}, year = {2013}, note = {Download (Open Access)}, pages = {1845{\textendash}1850}, publisher = {Cognitive Science Society}, organization = {Cognitive Science Society}, address = {Berlin, Germany}, abstract = {The development of social robots that convey emotion with their bodies---instead of or in conjunction with their faces---is an increasingly active research topic in the field of human-robot interaction (HRI). Rather than focusing either on postural or on dynamics aspects of bodily expression in isolation, we present a model and an empirical study where we combine both elements and produce expressive behaviors by adding dynamic elements (in the form of Perlin noise) to a subset of static postures prototypical of basic emotions, with the aim of creating expressions easily understandable by children and at the same time lively and flexible enough to be believable and engaging. Results show that the noise increases the recognition rate of the emotions portrayed by the robot.}, isbn = {978-0-9768318 -9-1}, url = {https://mindmodeling.org/cogsci2013/papers/0343/index.html}, author = {Aryel Beck and Antoine Hiolle and Lola Ca{\~n}amero} } @inproceedings {2012, title = {Children{\textquoteright}s Adaptation in Multi-session Interaction with a Humanoid Robot}, booktitle = {2012 IEEE RO-MAN: The 21st IEEE International Symposium on Robot and Human Interactive Communication}, year = {2012}, note = {Download}, pages = {351{\textendash}357}, publisher = {IEEE}, organization = {IEEE}, abstract = {This work presents preliminary observations from a study of children (N=19, age 5{\textendash}12) interacting in multiple sessions with a humanoid robot in a scenario involving game activities. The main purpose of the study was to see how their perception of the robot, their engagement, and their enjoyment of the robot as a companion evolve across multiple interactions, separated by one-two weeks. However, an interesting phenomenon was observed during the experiment: most of the children soon adapted to the behaviors of the robot, in terms of speech timing, speed and tone, verbal input formulation, nodding, gestures, etc. We describe the experimental setup and the system, and our observations and preliminary analysis results, which open interesting questions for further research.}, issn = {1944-9445}, doi = {10.1109/ROMAN.2012.6343778}, url = {http://ieeexplore.ieee.org/document/6343778/}, author = {Nalin, Marco and Baroni, Ilaria and Kruijff-Korbayov{\'a}, Ivana and Lola Ca{\~n}amero and Lewis, Matthew and Aryel Beck and Cuay{\'a}huitl, Heriberto and Alberto Sanna} } @article {2012, title = {Emotional Body Language Displayed by Artificial Agents}, journal = {ACM Transactions on Interactive Intelligent Systems}, volume = {2}, year = {2012}, note = {Download (Open Access)}, pages = {2:1{\textendash}2:29}, publisher = {ACM}, address = {New York, NY}, abstract = {Complex and natural social interaction between artificial agents (computer-generated or robotic) and humans necessitates the display of rich emotions in order to be believable, socially relevant, and accepted, and to generate the natural emotional responses that humans show in the context of social interaction, such as engagement or empathy. Whereas some robots use faces to display (simplified) emotional expressions, for other robots such as Nao, body language is the best medium available given their inability to convey facial expressions. Displaying emotional body language that can be interpreted whilst interacting with the robot should significantly improve naturalness. This research investigates the creation of an affect space for the generation of emotional body language to be displayed by humanoid robots. To do so, three experiments investigating how emotional body language displayed by agents is interpreted were conducted. The first experiment compared the interpretation of emotional body language displayed by humans and agents. The results showed that emotional body language displayed by an agent or a human is interpreted in a similar way in terms of recognition. Following these results, emotional key poses were extracted from an actor{\textquoteright}s performances and implemented in a Nao robot. The interpretation of these key poses was validated in a second study where it was found that participants were better than chance at interpreting the key poses displayed. Finally, an affect space was generated by blending key poses and validated in a third study. Overall, these experiments confirmed that body language is an appropriate medium for robots to display emotions and suggest that an affect space for body expressions can be used to improve the expressiveness of humanoid robots.}, issn = {2160-6455}, doi = {10.1145/2133366.2133368}, url = {https://dl.acm.org/doi/10.1145/2133366.2133368}, author = {Aryel Beck and Stevens, Brett and Kim A. Bard and Lola Ca{\~n}amero} } @article {2012, title = {Multimodal Child-Robot Interaction: Building Social Bonds}, journal = {Journal of Human-Robot Interaction}, volume = {1}, year = {2012}, note = {Download (Open Access)}, pages = {33{\textendash}53}, abstract = {For robots to interact effectively with human users they must be capable of coordinated, timely behavior in response to social context. The Adaptive Strategies for Sustainable Long-Term Social Interaction (ALIZ-E) project focuses on the design of long-term, adaptive social interaction between robots and child users in real-world settings. In this paper, we report on the iterative approach taken to scientific and technical developments toward this goal: advancing individual technical competencies and integrating them to form an autonomous robotic system for evaluation {\textquotedblleft}in the wild.{\textquotedblright} The first evaluation iterations have shown the potential of this methodology in terms of adaptation of the robot to the interactant and the resulting influences on engagement. This sets the foundation for an ongoing research program that seeks to develop technologies for social robot companions.}, doi = {10.5898/JHRI.1.2.Belpaeme}, url = {https://dl.acm.org/doi/10.5555/3109688.3109691}, author = {Tony Belpaeme and Paul E. Baxter and Robin Read and Rachel Wood and Cuay{\'a}huitl, Heriberto and Kiefer, Bernd and Racioppa, Stefania and Kruijff-Korbayov{\'a}, Ivana and Athanasopoulos, Georgios and Valentin Enescu and Rosemarijn Looije and Mark A. Neerincx and Yiannis Demiris and Raquel Ros-Espinoza and Aryel Beck and Lola Ca{\~n}amero and Lewis, Matthew and Baroni, Ilaria and Nalin, Marco and Cosi, Piero and Giulio Paci and Tesser, Fabio and Sommavilla, Giacomo and Remi Humbert} } @inproceedings {2011, title = {Children Interpretation of Emotional Body Language Displayed by a Robot}, booktitle = {Proc. 3rd International Conference on Social Robotics (ICSR 2011)}, year = {2011}, note = {Download}, pages = {62{\textendash}70}, publisher = {Springer}, organization = {Springer}, address = {Amsterdam, The Netherlands}, abstract = {Previous results show that adults are able to interpret different key poses displayed by the robot and also that changing the head position affects the expressiveness of the key poses in a consistent way. Moving the head down leads to decreased arousal (the level of energy), valence (positive or negative) and stance (approaching or avoiding) whereas moving the head up produces an increase along these dimensions [1]. Hence, changing the head position during an interaction should send intuitive signals which could be used during an interaction. The ALIZ-E target group are children between the age of 8 and 11. Existing results suggest that they would be able to interpret human emotional body language [2, 3]. Based on these results, an experiment was conducted to test whether the results of [1] can be applied to children. If yes body postures and head position could be used to convey emotions during an interaction.}, isbn = {978-3-642-25504-5}, doi = {10.1007/978-3-642-25504-5_7}, url = {https://link.springer.com/chapter/10.1007\%2F978-3-642-25504-5_7}, author = {Aryel Beck and Lola Ca{\~n}amero and Luisa Damiano and Sommavilla, Giacomo and Tesser, Fabio and Cosi, Piero} } @inproceedings {2010, title = {Interpretation of Emotional Body Language Displayed by Robots}, booktitle = {Proc. 3rd International Workshop on Affective Interaction in Natural Environments, AFFINE{\textquoteright}10}, year = {2010}, pages = {37{\textendash}42}, publisher = {ACM}, organization = {ACM}, address = {Firenze, Italy}, abstract = {In order for robots to be socially accepted and generate empathy they must display emotions. For robots such as Nao, body language is the best medium available, as they do not have the ability to display facial expressions. Displaying emotional body language that can be interpreted whilst interacting with the robot should greatly improve its acceptance. This research investigates the creation of an "Affect Space" for the generation of emotional body language that could be displayed by robots. An Affect Space is generated by "blending" (i.e. interpolating between) different emotional expressions to create new ones. An Affect Space for body language based on the Circumplex Model of emotions has been created. The experiment reported in this paper investigated the perception of specific key poses from the Affect Space. The results suggest that this Affect Space for body expressions can be used to improve the expressiveness of humanoid robots. In addition, early results of a pilot study are described. It revealed that the context helps human subjects improve their recognition rate during a human-robot imitation game, and in turn this recognition leads to better outcome of the interactions.}, isbn = {978-1-4503-0170-1}, doi = {10.1145/1877826.1877837}, author = {Aryel Beck and Antoine Hiolle and Alexandre Mazel and Lola Ca{\~n}amero} } @inproceedings {2010, title = {Towards an Affect Space for Robots to Display Emotional Body Language}, booktitle = {Proc. 19th Annual IEEE International Symposium on Robot and Human Interactive Communication (IEEE RO-MAN 2010)}, year = {2010}, pages = {464{\textendash}469}, publisher = {IEEE}, organization = {IEEE}, address = {Viareggio, Italy}, abstract = {In order for robots to be socially accepted and generate empathy it is necessary that they display rich emotions. For robots such as Nao, body language is the best medium available given their inability to convey facial expressions. Displaying emotional body language that can be interpreted whilst interacting with the robot should significantly improve its sociability. This research investigates the creation of an Affect Space for the generation of emotional body language to be displayed by robots. To create an Affect Space for body language, one has to establish the contribution of the different positions of the joints to the emotional expression. The experiment reported in this paper investigated the effect of varying a robot{\textquoteright}s head position on the interpretation, Valence, Arousal and Stance of emotional key poses. It was found that participants were better than chance level in interpreting the key poses. This finding confirms that body language is an appropriate medium for robot to express emotions. Moreover, the results of this study support the conclusion that Head Position is an important body posture variable. Head Position up increased correct identification for some emotion displays (pride, happiness, and excitement), whereas Head Position down increased correct identification for other displays (anger, sadness). Fear, however, was identified well regardless of Head Position. Head up was always evaluated as more highly Aroused than Head straight or down. Evaluations of Valence (degree of negativity to positivity) and Stance (degree to which the robot was aversive to approaching), however, depended on both Head Position and the emotion displayed. The effects of varying this single body posture variable were complex.}, isbn = {978-1-4244-7991-7}, issn = {1944-9445}, doi = {10.1109/ROMAN.2010.5598649}, author = {Aryel Beck and Lola Ca{\~n}amero and Kim A. Bard} }