klinger.bib

@proceedings{onion-2020-lrec2020,
  title = {Proceedings of LREC2020 Workshop ``People in language, vision and the mind'' (ONION2020)},
  editor = {Paggio, Patrizia  and
      Gatt, Albert  and
      Klinger, Roman},
  month = may,
  year = {2020},
  address = {Marseille, France},
  publisher = {European Language Resources Association (ELRA)},
  url = {https://www.aclweb.org/anthology/2020.onion-1.0},
  isbn = {979-10-95546-70-2}
}
@incollection{Klinger2020,
  author = {Roman Klinger and Evgeny Kim and Sebastian Pad\'o},
  title = {Emotion Analysis for Literary Studies},
  booktitle = {Reflektierte algorithmische Textanalyse},
  year = {2020},
  publisher = {De Gruyter},
  address = {Berlin, Boston},
  doi = {https://doi.org/10.1515/9783110693973-011},
  pages = {237 - 268},
  url = {https://www.degruyter.com/view/book/9783110693973/10.1515/9783110693973-011.xml}
}
@inproceedings{Helbig2020,
  title = {Challenges in Emotion Style Transfer: An Exploration with a Lexical Substitution Pipeline},
  author = {Helbig, David  and
      Troiano, Enrica  and
      Klinger, Roman},
  booktitle = {Proceedings of the Eighth International Workshop on Natural Language Processing for Social Media},
  month = jul,
  year = {2020},
  address = {Online},
  publisher = {Association for Computational Linguistics},
  url = {https://www.aclweb.org/anthology/2020.socialnlp-1.6},
  pdf = {http://www.romanklinger.de/publications/HelbigTroianoKlingerSocialNLP2020.pdf},
  pages = {41--50},
  abstract = {We propose the task of emotion style transfer, which is particularly challenging, as emotions (here: anger, disgust, fear, joy, sadness, surprise) are on the fence between content and style. To understand the particular difficulties of this task, we design a transparent emotion style transfer pipeline based on three steps: (1) select the words that are promising to be substituted to change the emotion (with a brute-force approach and selection based on the attention mechanism of an emotion classifier), (2) find sets of words as candidates for substituting the words (based on lexical and distributional semantics), and (3) select the most promising combination of substitutions with an objective function which consists of components for content (based on BERT sentence embeddings), emotion (based on an emotion classifier), and fluency (based on a neural language model). This comparably straight-forward setup enables us to explore the task and understand in what cases lexical substitution can vary the emotional load of texts, how changes in content and style interact and if they are at odds. We further evaluate our pipeline quantitatively in an automated and an annotation study based on Tweets and find, indeed, that simultaneous adjustments of content and emotion are conflicting objectives: as we show in a qualitative analysis motivated by Scherer{'}s emotion component model, this is particularly the case for implicit emotion expressions based on cognitive appraisal or descriptions of bodily reactions.}
}
@misc{Hofmann2020,
  title = {Appraisal Theories for Emotion Classification in Text},
  author = {Jan Hofmann and Enrica Troiano and Kai Sassenberg and Roman Klinger},
  year = {2020},
  eprint = {2003.14155},
  archiveprefix = {arXiv},
  primaryclass = {cs.CL},
  url = {https://arxiv.org/abs/2003.14155}
}
@inproceedings{Haider2020,
  title = {{PO}-{EMO}: Conceptualization, Annotation, and Modeling of Aesthetic Emotions in {G}erman and {E}nglish Poetry},
  author = {Haider, Thomas  and
      Eger, Steffen  and
      Kim, Evgeny  and
      Klinger, Roman  and
      Menninghaus, Winfried},
  booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
  month = may,
  year = {2020},
  address = {Marseille, France},
  publisher = {European Language Resources Association},
  url = {https://www.aclweb.org/anthology/2020.lrec-1.205},
  pages = {1652--1663},
  abstract = {Most approaches to emotion analysis of social media, literature, news, and other domains focus exclusively on basic emotion categories as defined by Ekman or Plutchik. However, art (such as literature) enables engagement in a broader range of more complex and subtle emotions. These have been shown to also include mixed emotional responses. We consider emotions in poetry as they are elicited in the reader, rather than what is expressed in the text or intended by the author. Thus, we conceptualize a set of aesthetic emotions that are predictive of aesthetic appreciation in the reader, and allow the annotation of multiple labels per line to capture mixed emotions within their context. We evaluate this novel setting in an annotation experiment both with carefully trained experts and via crowdsourcing. Our annotation with experts leads to an acceptable agreement of k = .70, resulting in a consistent dataset for future large scale analysis. Finally, we conduct first emotion classification experiments based on BERT, showing that identifying aesthetic emotions is challenging in our data, with up to .52 F1-micro on the German subset. Data and resources are available at https://github.com/tnhaider/poetry-emotion.},
  language = {English},
  isbn = {979-10-95546-34-4},
  pdf = {http://www.romanklinger.de/publications/HaiderEgerKimKlingerMenninghaus2020LREC_PO-EMO.pdf}
}
@inproceedings{Bostan2020,
  title = {{G}ood{N}ews{E}veryone: A Corpus of News Headlines Annotated with Emotions, Semantic Roles, and Reader Perception},
  author = {Bostan, Laura Ana Maria  and
      Kim, Evgeny  and
      Klinger, Roman},
  booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
  month = may,
  year = {2020},
  address = {Marseille, France},
  publisher = {European Language Resources Association},
  url = {https://www.aclweb.org/anthology/2020.lrec-1.194},
  pages = {1554--1566},
  abstract = {Most research on emotion analysis from text focuses on the task of emotion classification or emotion intensity regression. Fewer works address emotions as a phenomenon to be tackled with structured learning, which can be explained by the lack of relevant datasets. We fill this gap by releasing a dataset of 5000 English news headlines annotated via crowdsourcing with their associated emotions, the corresponding emotion experiencers and textual cues, related emotion causes and targets, as well as the reader{'}s perception of the emotion of the headline. This annotation task is comparably challenging, given the large number of classes and roles to be identified. We therefore propose a multiphase annotation procedure in which we first find relevant instances with emotional content and then annotate the more fine-grained aspects. Finally, we develop a baseline for the task of automatic prediction of semantic role structures and discuss the results. The corpus we release enables further research on emotion classification, emotion intensity prediction, emotion cause detection, and supports further qualitative studies.},
  language = {English},
  isbn = {979-10-95546-34-4},
  pdf = {http://www.romanklinger.de/publications/BostanKimKlinger2020LREC.pdf}
}
@inproceedings{Sabbatino2020,
  title = {Automatic Section Recognition in Obituaries},
  author = {Sabbatino, Valentino  and
      Bostan, Laura Ana Maria  and
      Klinger, Roman},
  booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
  month = may,
  year = {2020},
  address = {Marseille, France},
  publisher = {European Language Resources Association},
  url = {https://www.aclweb.org/anthology/2020.lrec-1.102},
  pages = {817--825},
  abstract = {Obituaries contain information about people{'}s values across times and cultures, which makes them a useful resource for exploring cultural history. They are typically structured similarly, with sections corresponding to Personal Information, Biographical Sketch, Characteristics, Family, Gratitude, Tribute, Funeral Information and Other aspects of the person. To make this information available for further studies, we propose a statistical model which recognizes these sections. To achieve that, we collect a corpus of 20058 English obituaries from TheDaily Item, Remembering.CA and The London Free Press. The evaluation of our annotation guidelines with three annotators on 1008 obituaries shows a substantial agreement of Fleiss κ = 0.87. Formulated as an automatic segmentation task, a convolutional neural network outperforms bag-of-words and embedding-based BiLSTMs and BiLSTM-CRFs with a micro F1 = 0.81.},
  language = {English},
  isbn = {979-10-95546-34-4},
  pdf = {http://www.romanklinger.de/publications/valentino2020.pdf}
}