AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Casas, L.; Hannah, S.; Mitchell, K.
HoloJig: Interactive Spoken Prompt Specified Generative AI Environments Journal Article
In: IEEE Computer Graphics and Applications, vol. 45, no. 2, pp. 69–77, 2025, ISSN: 02721716 (ISSN).
Abstract | Links | BibTeX | Tags: 3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems
@article{casas_holojig_2025,
title = {HoloJig: Interactive Spoken Prompt Specified Generative AI Environments},
author = {L. Casas and S. Hannah and K. Mitchell},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001182100&doi=10.1109%2fMCG.2025.3553780&partnerID=40&md5=ec5dc44023314b6f9221169357d81dcd},
doi = {10.1109/MCG.2025.3553780},
issn = {02721716 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Computer Graphics and Applications},
volume = {45},
number = {2},
pages = {69–77},
abstract = {HoloJig offers an interactive, speech-to-virtual reality (VR), VR experience that generates diverse environments in real time based on live spoken descriptions. Unlike traditional VR systems that rely on prebuilt assets, HoloJig dynamically creates personalized and immersive virtual spaces with depth-based parallax 3-D rendering, allowing users to define the characteristics of their immersive environment through verbal prompts. This generative approach opens up new possibilities for interactive experiences, including simulations, training, collaborative workspaces, and entertainment. In addition to speech-to-VR environment generation, a key innovation of HoloJig is its progressive visual transition mechanism, which smoothly dissolves between previously generated and newly requested environments, mitigating the delay caused by neural computations. This feature ensures a seamless and continuous user experience, even as new scenes are being rendered on remote servers. © 1981-2012 IEEE.},
keywords = {3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems},
pubstate = {published},
tppubtype = {article}
}
Nygren, T.; Samuelsson, M.; Hansson, P. -O.; Efimova, E.; Bachelder, S.
In: International Journal of Artificial Intelligence in Education, 2025, ISSN: 15604292 (ISSN).
Abstract | Links | BibTeX | Tags: AI-generated feedback, Controversial issue in social study education, Controversial issues in social studies education, Curricula, Domain knowledge, Economic and social effects, Expert systems, Generative AI, Human engineering, Knowledge engineering, Language Model, Large language model, large language models (LLMs), Mixed reality, Mixed reality simulation, Mixed reality simulation (MRS), Pedagogical content knowledge, Pedagogical content knowledge (PCK), Personnel training, Preservice teachers, Social studies education, Teacher training, Teacher training simulation, Teacher training simulations, Teaching, Training simulation
@article{nygren_ai_2025,
title = {AI Versus Human Feedback in Mixed Reality Simulations: Comparing LLM and Expert Mentoring in Preservice Teacher Education on Controversial Issues},
author = {T. Nygren and M. Samuelsson and P. -O. Hansson and E. Efimova and S. Bachelder},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007244772&doi=10.1007%2fs40593-025-00484-8&partnerID=40&md5=d3cb14a8117045505cbbeb174b32b88d},
doi = {10.1007/s40593-025-00484-8},
issn = {15604292 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Artificial Intelligence in Education},
abstract = {This study explores the potential role of AI-generated mentoring within simulated environments designed for teacher education, specifically focused on the challenges of teaching controversial issues. Using a mixed-methods approach, we empirically investigate the potential and challenges of AI-generated feedback compared to that provided by human experts when mentoring preservice teachers in the context of mixed reality simulations. Findings reveal that human experts offered more mixed and nuanced feedback than ChatGPT-4o and Perplexity, especially when identifying missed teaching opportunities and balancing classroom discussions. The AI models evaluated were publicly available pro versions of LLMs and were tested using detailed prompts and coding schemes aligned with educational theories. AI systems were not very good at identifying aspects of general, pedagogical or content knowledge based on Shulman’s theories but were still quite effective in generating feedback in line with human experts. The study highlights the promise of AI to enhance teacher training but underscores the importance of combining AI feedback with expert insights to address the complexities of real-world teaching. This research contributes to a growing understanding of AI's potential role and limitations in education. It suggests that, while AI can be valuable to scale mixed reality simulations, it should be carefully evaluated and balanced by human expertise in teacher education. © The Author(s) 2025.},
keywords = {AI-generated feedback, Controversial issue in social study education, Controversial issues in social studies education, Curricula, Domain knowledge, Economic and social effects, Expert systems, Generative AI, Human engineering, Knowledge engineering, Language Model, Large language model, large language models (LLMs), Mixed reality, Mixed reality simulation, Mixed reality simulation (MRS), Pedagogical content knowledge, Pedagogical content knowledge (PCK), Personnel training, Preservice teachers, Social studies education, Teacher training, Teacher training simulation, Teacher training simulations, Teaching, Training simulation},
pubstate = {published},
tppubtype = {article}
}
Shibuya, K.
Transforming phenomenological sociology for virtual personalities and virtual worlds Journal Article
In: AI and Society, vol. 40, no. 5, pp. 3317–3331, 2025, ISSN: 09515666 (ISSN).
Abstract | Links | BibTeX | Tags: Advanced technology, Economic and social effects, Generative adversarial networks, Generative AI, Human being, Identity, Intersubjectivity, Metadata, Phenomenological Sociology, Sociology, Technological innovation, Virtual environments, Virtual Personality, Virtual Reality, Virtual worlds, Virtualization, Virtualizations
@article{shibuya_transforming_2025,
title = {Transforming phenomenological sociology for virtual personalities and virtual worlds},
author = {K. Shibuya},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85217199972&doi=10.1007%2fs00146-025-02189-x&partnerID=40&md5=aa9db1cb1f99419b605f1091469eb77c},
doi = {10.1007/s00146-025-02189-x},
issn = {09515666 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {AI and Society},
volume = {40},
number = {5},
pages = {3317–3331},
abstract = {Are there opportunities to use the plural to express the first person (“I”) of “the same person” in English? It means that the self is an entity that guarantees uniqueness and is at the core of identity. Recently, radical and rapid innovations in AI technologies have made it possible to alter our existential fundamentals. Principally, we are now interacting with “virtual personalities” generated by generative AI. Thus, there is an inevitability to explore the relationship between AI and society, and the problem domain of phenomenological sociology related to the “virtuality” of personalities and the world. Encountering and interacting with “others without subject” artificially generated by generative AI based on individual big data and attribute data is a situation that mankind has never experienced before from the perspective of sociology and phenomenological sociology related to the ego. The virtual personalities can be perceived as if it were interacting with existing humans in the form of video and audio, and it is also possible to arbitrarily change their attributes (e.g., gender, race, age, physical characteristics) and other settings, as well as to virtually create deceased persons or great figures from the past. Such technological innovation is, so to speak, a virtualization of human existential identity, and advanced technologies such as AI will transform not only the boundary between self and others but also the aspect of human existence itself (Shibuya in Digital transformation of identity in the age of artificial intelligence. Springer, Belrin, 2020). In addition, from a phenomenological viewpoint, the boundary between reality and virtuality is blurring due to technological innovation in the living world itself, and there is a concern that this will lead to an artificial state of detachment. Actually, the use of advanced technologies such as AI, VR in virtual worlds and cyberspace will not only cause people to lose their reality and actuality but will also endanger the very foundations of their existential identity. Therefore, we must ask what it means for us as existences to interact with virtual personalities in a virtually generated world, and what is the nature of the intersubjectivity formation and semantic understanding as well as the modes of existence, facts, and worlds, and what are their evidential natures. In line with what Husserl, the founder of phenomenology, once declared at the beginning of his “Cartesianische Meditationen” (Husserl in CartesianischeMeditationen, e-artnow, 2018), that “we need to begin philosophy radically anew”, as also phenomenological sociology, it can now state that “we need to begin phenomenological sociology radically anew”. Then, this paper reviews and discusses the following issues based on technological trends. Is there an intersubjectivity between the virtual personalities generated by the AI and the human being? How does the virtualization of identity, as well as the difference between self and others, transform the nature of existence? How is a mutual semantic understanding possible between a human being and the virtual personality that is generated by a generative AI and a generative AI? How can we verify discourses and propositions of fact and worldliness in our interactions with generative AIs, and how can we overcome the illusion (i.e., hallucination) that generative AIs create? What does the transformation of the world and its aspect as existence mean? How is it possible to collaborate between a human being and the virtual personality that is generated by a generative AI and a generative AI? © The Author(s), under exclusive licence to Springer-Verlag London Ltd., part of Springer Nature 2025.},
keywords = {Advanced technology, Economic and social effects, Generative adversarial networks, Generative AI, Human being, Identity, Intersubjectivity, Metadata, Phenomenological Sociology, Sociology, Technological innovation, Virtual environments, Virtual Personality, Virtual Reality, Virtual worlds, Virtualization, Virtualizations},
pubstate = {published},
tppubtype = {article}
}
2024
Omirgaliyev, R.; Kenzhe, D.; Mirambekov, S.
Simulating life: the application of generative agents in virtual environments Proceedings Article
In: IEEE AITU: Digit. Gener., Conf. Proc. - AITU, pp. 181–187, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835036437-8 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Artificial intelligence agent, Artificial Intelligence Agents, Autonomous agents, Behavioral Research, Behaviour models, Computational Linguistics, Decision making, Dynamics, Dynamics simulation, Economic and social effects, Game Development, Game environment, Language Model, Large language model, large language models, Modeling languages, Social dynamic simulation, Social dynamics, Social Dynamics Simulation, Software design, Virtual Reality, Virtual Societies
@inproceedings{omirgaliyev_simulating_2024,
title = {Simulating life: the application of generative agents in virtual environments},
author = {R. Omirgaliyev and D. Kenzhe and S. Mirambekov},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199876250&doi=10.1109%2fIEEECONF61558.2024.10585387&partnerID=40&md5=70f8b598d10bec13c39d3506a15534a1},
doi = {10.1109/IEEECONF61558.2024.10585387},
isbn = {979-835036437-8 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE AITU: Digit. Gener., Conf. Proc. - AITU},
pages = {181–187},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This research explores the innovative integration of Large Language Models (LLMs) in game development, focusing on the autonomous creation, development, and governance of a virtual village by AI agents within a 2D game environment. The core of this study lies in observing and analyzing the interactions and societal development among AI agents, utilizing advanced algorithms for generative behavior modeling and dynamic skill tree learning. These AI agents are endowed with human-like decision-making capabilities, enabled by LLMs, allowing them to engage in complex social interactions and contribute to emergent societal structures within the game. The uniqueness of this project stems from its approach to simulating lifelike social dynamics in a virtual setting, thus addressing a gap in existing research and marking a significant contribution to the interdisciplinary fields of artificial intelligence and game development. By comparing AI-generated societal behaviors with human social interactions, the study delves into the potential of AI to mirror or enhance human social structures, offering a fresh perspective on the capabilities of AI in game development. This research not only aims to push the boundaries of AI applications in game development but also seeks to provide valuable insights into the potential for AI-driven simulations in studying complex social and behavioral dynamics. ©2024 IEEE.},
keywords = {Artificial intelligence, Artificial intelligence agent, Artificial Intelligence Agents, Autonomous agents, Behavioral Research, Behaviour models, Computational Linguistics, Decision making, Dynamics, Dynamics simulation, Economic and social effects, Game Development, Game environment, Language Model, Large language model, large language models, Modeling languages, Social dynamic simulation, Social dynamics, Social Dynamics Simulation, Software design, Virtual Reality, Virtual Societies},
pubstate = {published},
tppubtype = {inproceedings}
}
Cigliano, A.; Fallucchi, F.; Gerardi, M.
An Analysis of the State of Art of the Metaverse and Its Disruptive Impact on Services Proceedings Article
In: R., Fazzolari; A.A., Jaber; C., Randieri (Ed.): CEUR Workshop Proc., pp. 38–46, CEUR-WS, 2024, ISBN: 16130073 (ISSN).
Abstract | Links | BibTeX | Tags: AI: Artificial Intelligence, Artificial intelligence: artificial intelligence, Block-chain, Blockchain, DH: Digital Humanity, Digital humanities, Digital twin: digital twin, DT: Digital Twin, Economic and social effects, ER: Extended Reality, Extended reality: extended reality, GenI: Generative Artificial Intelligence, IoT, Language Model, LLM: Large Language Model, Metaverse, Metaverses, Natural language model, Nft, NLP: Natural Language Model, Quantum Computing, Virtual environments, VR: Virtual Reality
@inproceedings{cigliano_analysis_2024,
title = {An Analysis of the State of Art of the Metaverse and Its Disruptive Impact on Services},
author = {A. Cigliano and F. Fallucchi and M. Gerardi},
editor = {Fazzolari R. and Jaber A.A. and Randieri C.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85213681173&partnerID=40&md5=cf59e812c4d5bdc51c9daae3eb7c0406},
isbn = {16130073 (ISSN)},
year = {2024},
date = {2024-01-01},
booktitle = {CEUR Workshop Proc.},
volume = {3869},
pages = {38–46},
publisher = {CEUR-WS},
abstract = {The metaverse is a virtual environment where individuals may connect with one another, partake in a variety of activities, and access digital content. It coexists with the real world and it has the potential to significantly affect daily life as well as a variety of services and applications as it develops. To understand the importance of the impacts of the metaverse in today’s society, just think of the disruptive technologies that enable it: Artificial Intelligence (AI), Extended Reality (ER), IoT, Digital Twin (DT) and Blockchain/NFT all with assistance with the availability of large, rich and structured/unstructured dataset and advanced computational models. Metaverse is cadidated (and not only) to redefine everyday life, designing activities and services/products provisioning towards increasing efficiencies, money saving and quality performance, with a big impact in everyone and helping the design and testing of next generation of internet. However, there still remain not only technical and interoperability issues but above all ethical, human, social, and cultural concerns as to the metaverse’s influence upon its prospective scope in reconstructing the quality of urban life. This paper undertakes an upper-level scientific literature review of the area of the metaverse from a broader perspective. Further, it maps the some services and the relative requirements for declinate the enabling technologies of the metaverse, and explores their contributions. © 2024 Copyright for this paper by its authors. Use permitted under Creative Commons License Attribution 4.0 International (CC BY 4.0).},
keywords = {AI: Artificial Intelligence, Artificial intelligence: artificial intelligence, Block-chain, Blockchain, DH: Digital Humanity, Digital humanities, Digital twin: digital twin, DT: Digital Twin, Economic and social effects, ER: Extended Reality, Extended reality: extended reality, GenI: Generative Artificial Intelligence, IoT, Language Model, LLM: Large Language Model, Metaverse, Metaverses, Natural language model, Nft, NLP: Natural Language Model, Quantum Computing, Virtual environments, VR: Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Min, Y.; Jeong, J. -W.
Public Speaking Q&A Practice with LLM-Generated Personas in Virtual Reality Proceedings Article
In: U., Eck; M., Sra; J., Stefanucci; M., Sugimoto; M., Tatzgern; I., Williams (Ed.): Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct, pp. 493–496, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833150691-9 (ISBN).
Abstract | Links | BibTeX | Tags: Digital elevation model, Economic and social effects, Language Model, Large language model-based persona generation, LLM-based Persona Generation, Model-based OPC, Personnel training, Power, Practice systems, Presentation Anxiety, Public speaking, Q&A practice, user experience, Users' experiences, Virtual environments, Virtual Reality, VR training
@inproceedings{min_public_2024,
title = {Public Speaking Q&A Practice with LLM-Generated Personas in Virtual Reality},
author = {Y. Min and J. -W. Jeong},
editor = {Eck U. and Sra M. and Stefanucci J. and Sugimoto M. and Tatzgern M. and Williams I.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214393734&doi=10.1109%2fISMAR-Adjunct64951.2024.00143&partnerID=40&md5=992d9599bde26f9d57d549639869d124},
doi = {10.1109/ISMAR-Adjunct64951.2024.00143},
isbn = {979-833150691-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct},
pages = {493–496},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This paper introduces a novel VR-based Q&A practice system that harnesses the power of Large Language Models (LLMs). We support Q&A practice for upcoming public speaking by providing an immersive VR training environment populated with LLM-generated audiences, each capable of posing diverse and realistic questions based on different personas. We conducted a pilot user study involving 20 participants who engaged in VR-based Q&A practice sessions. The sessions featured a variety of questions regarding presentation material provided by the participants, all of which were generated by LLM-based personas. Through post-surveys and interviews, we evaluated the effectiveness of the proposed method. The participants valued the system for engagement and focus while also identifying several areas for improvement. Our study demonstrated the potential of integrating VR and LLMs to create a powerful, immersive tool for Q&A practice. © 2024 IEEE.},
keywords = {Digital elevation model, Economic and social effects, Language Model, Large language model-based persona generation, LLM-based Persona Generation, Model-based OPC, Personnel training, Power, Practice systems, Presentation Anxiety, Public speaking, Q&A practice, user experience, Users' experiences, Virtual environments, Virtual Reality, VR training},
pubstate = {published},
tppubtype = {inproceedings}
}
Martini, M.; Valentini, V.; Ciprian, A.; Bottino, A.; Iacoviello, R.; Montagnuolo, M.; Messina, A.; Strada, F.; Zappia, D.
Semi -Automated Digital Human Production for Enhanced Media Broadcasting Proceedings Article
In: IEEE Gaming, Entertain., Media Conf., GEM, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037453-7 (ISBN).
Abstract | Links | BibTeX | Tags: AI automation, Automation, Creation process, Digital humans, Economic and social effects, Extensive explorations, Face reconstruction, Generative AI, Image enhancement, media archive, Media archives, Metaverses, Rendering (computer graphics), Synthetic human, Synthetic Humans, Textures, Three dimensional computer graphics, Virtual production, Virtual Reality
@inproceedings{martini_semi_2024,
title = {Semi -Automated Digital Human Production for Enhanced Media Broadcasting},
author = {M. Martini and V. Valentini and A. Ciprian and A. Bottino and R. Iacoviello and M. Montagnuolo and A. Messina and F. Strada and D. Zappia},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199536742&doi=10.1109%2fGEM61861.2024.10585601&partnerID=40&md5=3703fba931b02f9615316db8ebbca70c},
doi = {10.1109/GEM61861.2024.10585601},
isbn = {979-835037453-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Gaming, Entertain., Media Conf., GEM},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In recent years, the application of synthetic humans in various fields has attracted considerable attention, leading to extensive exploration of their integration into the Metaverse and virtual production environments. This work presents a semi-automated approach that aims to find a fair trade-off between high-quality outputs and efficient production times. The project focuses on the Rai photo and video archives to find images of target characters for texturing and 3D reconstruction with the goal of reviving Rai's 2D footage and enhance the media experience. A key aspect of this study is to minimize the human intervention, ensuring an efficient, flexible, and scalable creation process. In this work, the improvements have been distributed among different stages of the digital human creation process, starting with the generation of 3D head meshes from 2D images of the reference character and then moving on to the generation, using a Diffusion model, of suitable images for texture development. These assets are then integrated into the Unreal Engine, where a custom widget facilitates posing, rendering, and texturing of Synthetic Humans models. Finally, an in-depth quantitative comparison and subjective tests were carried out between the original character images and the rendered synthetic humans, confirming the validity of the approach. © 2024 IEEE.},
keywords = {AI automation, Automation, Creation process, Digital humans, Economic and social effects, Extensive explorations, Face reconstruction, Generative AI, Image enhancement, media archive, Media archives, Metaverses, Rendering (computer graphics), Synthetic human, Synthetic Humans, Textures, Three dimensional computer graphics, Virtual production, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Christiansen, F. R.; Hollensberg, L. Nø.; Jensen, N. B.; Julsgaard, K.; Jespersen, K. N.; Nikolov, I.
Exploring Presence in Interactions with LLM-Driven NPCs: A Comparative Study of Speech Recognition and Dialogue Options Proceedings Article
In: S.N., Spencer (Ed.): Proc. ACM Symp. Virtual Reality Softw. Technol. VRST, Association for Computing Machinery, 2024, ISBN: 979-840070535-9 (ISBN).
Abstract | Links | BibTeX | Tags: Comparatives studies, Computer simulation languages, Economic and social effects, Immersive System, Immersive systems, Language Model, Large language model, Large language models (LLM), Model-driven, Modern technologies, Non-playable character, NPC, Presence, Social Actors, Speech enhancement, Speech recognition, Text to speech, Virtual environments, Virtual Reality, VR
@inproceedings{christiansen_exploring_2024,
title = {Exploring Presence in Interactions with LLM-Driven NPCs: A Comparative Study of Speech Recognition and Dialogue Options},
author = {F. R. Christiansen and L. Nø. Hollensberg and N. B. Jensen and K. Julsgaard and K. N. Jespersen and I. Nikolov},
editor = {Spencer S.N.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85212512351&doi=10.1145%2f3641825.3687716&partnerID=40&md5=56ec6982b399fd97196ea73e7c659c31},
doi = {10.1145/3641825.3687716},
isbn = {979-840070535-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. ACM Symp. Virtual Reality Softw. Technol. VRST},
publisher = {Association for Computing Machinery},
abstract = {Combining modern technologies like large-language models (LLMs), speech-to-text, and text-to-speech can enhance immersion in virtual reality (VR) environments. However, challenges exist in effectively implementing LLMs and educating users. This paper explores implementing LLM-powered virtual social actors and facilitating user communication. We developed a murder mystery game where users interact with LLM-based non-playable characters (NPCs) through interrogation, clue-gathering, and exploration. Two versions were tested: one using speech recognition and another with traditional dialog boxes. While both provided similar social presence, users felt more immersed with speech recognition but found it overwhelming, while the dialog version was more challenging. Slow NPC response times were a source of frustration, highlighting the need for faster generation or better masking for a seamless experience. © 2024 Owner/Author.},
keywords = {Comparatives studies, Computer simulation languages, Economic and social effects, Immersive System, Immersive systems, Language Model, Large language model, Large language models (LLM), Model-driven, Modern technologies, Non-playable character, NPC, Presence, Social Actors, Speech enhancement, Speech recognition, Text to speech, Virtual environments, Virtual Reality, VR},
pubstate = {published},
tppubtype = {inproceedings}
}