AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Zhang, Z.; Wang, J.; Chen, J.; Fu, H.; Tong, Z.; Jiang, C.
Diffusion-Based Reinforcement Learning for Cooperative Offloading and Resource Allocation in Multi-UAV Assisted Edge-Enabled Metaverse Journal Article
In: IEEE Transactions on Vehicular Technology, 2025, ISSN: 00189545 (ISSN).
Abstract | Links | BibTeX | Tags: Aerial vehicle, Content creation, Content services, Contrastive Learning, Decision making, Deep learning, Deep reinforcement learning, Diffusion Model, Global industry, Helicopter services, Markov processes, Metaverse, Metaverses, Reinforcement Learning, Reinforcement learnings, Resource allocation, Resources allocation, Typical application, Unmanned aerial vehicle, Unmanned aerial vehicle (UAV), Unmanned aerial vehicles (UAV)
@article{zhang_diffusion-based_2025,
title = {Diffusion-Based Reinforcement Learning for Cooperative Offloading and Resource Allocation in Multi-UAV Assisted Edge-Enabled Metaverse},
author = {Z. Zhang and J. Wang and J. Chen and H. Fu and Z. Tong and C. Jiang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85219108203&doi=10.1109%2fTVT.2025.3544879&partnerID=40&md5=fdbe1554f6cf7d47d4bbbb73b4b0d487},
doi = {10.1109/TVT.2025.3544879},
issn = {00189545 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Vehicular Technology},
abstract = {As one of the typical applications of 6G, the metaverse, with its superior immersion and diversified services, has garnered widespread attention from both the global industry and academia. Simultaneously, the emergence of AI-generated content (AIGC), exemplified by ChatGPT, has revolutionized the mean of content creation in the metaverse. Providing meataverse users with diversified AIGC services anytime and anywhere to meet the demand for immersive and blended virtual-real experiences in the physical world has become a major challenge in the development of the metaverse. Considering the flexibility and mobility of unmanned aerial vehicles (UAVs), we innovatively incorporate multiple UAVs as one of the AIGC service providers and construct a multi-UAV assisted edge-enabled metaverse system in the context of AIGC-as-a-Service (AaaS) scenario. To solve the complex resource management and allocation problem in the aforementioned system, we formulate it as a Markov decision process (MDP) and propose utilizing the generative capabilities of the diffusion model in combination with the robust decision-making abilities of reinforcement learning to tackle these issues. In order to substantiate the efficacy of the proposed diffusion-based reinforcement learning framework, we propose a novel diffusion-based soft actor-critic algorithm for metaverse (Meta-DSAC). Subsequently, a series of experiments are executed and the simulation results empirically validate the proposed algorithm's comparative advantages of the ability to provide stable and substantial long-term rewards, as well as the enhanced capacity to model complex environment. © 2025 IEEE.},
keywords = {Aerial vehicle, Content creation, Content services, Contrastive Learning, Decision making, Deep learning, Deep reinforcement learning, Diffusion Model, Global industry, Helicopter services, Markov processes, Metaverse, Metaverses, Reinforcement Learning, Reinforcement learnings, Resource allocation, Resources allocation, Typical application, Unmanned aerial vehicle, Unmanned aerial vehicle (UAV), Unmanned aerial vehicles (UAV)},
pubstate = {published},
tppubtype = {article}
}
Linares-Pellicer, J.; Izquierdo-Domenech, J.; Ferri-Molla, I.; Aliaga-Torro, C.
Breaking the Bottleneck: Generative AI as the Solution for XR Content Creation in Education Book Section
In: Lecture Notes in Networks and Systems, vol. 1140, pp. 9–30, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 23673370 (ISSN).
Abstract | Links | BibTeX | Tags: Adversarial machine learning, Augmented Reality, Breakings, Content creation, Contrastive Learning, Development process, Educational context, Federated learning, Generative adversarial networks, Immersive learning, Intelligence models, Learning experiences, Mixed reality, Resource intensity, Technical skills, Virtual environments
@incollection{linares-pellicer_breaking_2025,
title = {Breaking the Bottleneck: Generative AI as the Solution for XR Content Creation in Education},
author = {J. Linares-Pellicer and J. Izquierdo-Domenech and I. Ferri-Molla and C. Aliaga-Torro},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85212478399&doi=10.1007%2f978-3-031-71530-3_2&partnerID=40&md5=aefee938cd5b8a74ee811a463d7409ae},
doi = {10.1007/978-3-031-71530-3_2},
isbn = {23673370 (ISSN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lecture Notes in Networks and Systems},
volume = {1140},
pages = {9–30},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {The integration of Extended Reality (XR) technologies-Virtual Reality (VR), Augmented Reality (AR), and Mixed Reality (MR)-promises to revolutionize education by offering immersive learning experiences. However, the complexity and resource intensity of content creation hinders the adoption of XR in educational contexts. This chapter explores Generative Artificial Intelligence (GenAI) as a solution, highlighting how GenAI models can facilitate the creation of educational XR content. GenAI enables educators to produce engaging XR experiences without needing advanced technical skills by automating aspects of the development process from ideation to deployment. Practical examples demonstrate GenAI’s current capability to generate assets and program applications, significantly lowering the barrier to creating personalized and interactive learning environments. The chapter also addresses challenges related to GenAI’s application in education, including technical limitations and ethical considerations. Ultimately, GenAI’s integration into XR content creation makes immersive educational experiences more accessible and practical, driven by only natural interactions, promising a future where technology-enhanced learning is universally attainable. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Adversarial machine learning, Augmented Reality, Breakings, Content creation, Contrastive Learning, Development process, Educational context, Federated learning, Generative adversarial networks, Immersive learning, Intelligence models, Learning experiences, Mixed reality, Resource intensity, Technical skills, Virtual environments},
pubstate = {published},
tppubtype = {incollection}
}
Mao, H.; Xu, Z.; Wei, S.; Quan, Y.; Deng, N.; Yang, X.
LLM-powered Gaussian Splatting in VR interactions Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 1654–1655, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833151484-6 (ISBN).
Abstract | Links | BibTeX | Tags: 3D Gaussian Splatting, 3D reconstruction, Content creation, Digital elevation model, Gaussians, High quality, Language Model, material analysis, Materials analysis, Physical simulation, Quality rendering, Rendering (computer graphics), Splatting, Virtual Reality, Volume Rendering, VR systems
@inproceedings{mao_llm-powered_2025,
title = {LLM-powered Gaussian Splatting in VR interactions},
author = {H. Mao and Z. Xu and S. Wei and Y. Quan and N. Deng and X. Yang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005148017&doi=10.1109%2fVRW66409.2025.00472&partnerID=40&md5=ee725f655a37251ff335ad2098d15f22},
doi = {10.1109/VRW66409.2025.00472},
isbn = {979-833151484-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {1654–1655},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Recent advances in radiance field rendering, particularly 3D Gaussian Splatting (3DGS), have demonstrated significant potential for VR content creation, offering both high-quality rendering and an efficient production pipeline. However, current physics-based interaction systems for 3DGS are limited to either simplistic, unrealistic simulations or require substantial user input for complex scenes, largely due to the lack of scene comprehension. In this demonstration, we present a highly realistic interactive VR system powered by large language models (LLMs). After object-aware GS reconstruction, we prompt GPT-4o to analyze the physical properties of objects in the scene, which then guide physical simulations that adhere to real-world phenomena. Additionally, We design a GPT-assisted GS inpainting module to complete the areas occluded by manipulated objects. To facilitate rich interaction, we introduce a computationally efficient physical simulation framework through a PBD-based unified interpolation method, which supports various forms of physical interactions. In our research demonstrations, we reconstruct varieties of scenes enhanced by LLM's understanding, showcasing how our VR system can support complex, realistic interactions without additional manual design or annotation. © 2025 IEEE.},
keywords = {3D Gaussian Splatting, 3D reconstruction, Content creation, Digital elevation model, Gaussians, High quality, Language Model, material analysis, Materials analysis, Physical simulation, Quality rendering, Rendering (computer graphics), Splatting, Virtual Reality, Volume Rendering, VR systems},
pubstate = {published},
tppubtype = {inproceedings}
}
Suzuki, R.; Gonzalez-Franco, M.; Sra, M.; Lindlbauer, D.
Everyday AR through AI-in-the-Loop Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071395-8 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Augmented reality content, Augmented reality hardware, Computer vision, Content creation, Context-Aware, Generative AI, generative artificial intelligence, Human-AI Interaction, Human-artificial intelligence interaction, Language Model, Large language model, large language models, machine learning, Machine-learning, Mixed reality, Virtual Reality, Virtualization
@inproceedings{suzuki_everyday_2025,
title = {Everyday AR through AI-in-the-Loop},
author = {R. Suzuki and M. Gonzalez-Franco and M. Sra and D. Lindlbauer},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005752990&doi=10.1145%2f3706599.3706741&partnerID=40&md5=56b5e447819dde7aa4a29f8e3899e535},
doi = {10.1145/3706599.3706741},
isbn = {979-840071395-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {This workshop brings together experts and practitioners from augmented reality (AR) and artificial intelligence (AI) to shape the future of AI-in-the-loop everyday AR experiences. With recent advancements in both AR hardware and AI capabilities, we envision that everyday AR—always-available and seamlessly integrated into users’ daily environments—is becoming increasingly feasible. This workshop will explore how AI can drive such everyday AR experiences. We discuss a range of topics, including adaptive and context-aware AR, generative AR content creation, always-on AI assistants, AI-driven accessible design, and real-world-oriented AI agents. Our goal is to identify the opportunities and challenges in AI-enabled AR, focusing on creating novel AR experiences that seamlessly blend the digital and physical worlds. Through the workshop, we aim to foster collaboration, inspire future research, and build a community to advance the research field of AI-enhanced AR. © 2025 Copyright held by the owner/author(s).},
keywords = {Augmented Reality, Augmented reality content, Augmented reality hardware, Computer vision, Content creation, Context-Aware, Generative AI, generative artificial intelligence, Human-AI Interaction, Human-artificial intelligence interaction, Language Model, Large language model, large language models, machine learning, Machine-learning, Mixed reality, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Shawash, J.; Thibault, M.; Hamari, J.
Who Killed Helene Pumpulivaara?: AI-Assisted Content Creation and XR Implementation for Interactive Built Heritage Storytelling Proceedings Article
In: IMX - Proc. ACM Int. Conf. Interact. Media Experiences, pp. 377–379, Association for Computing Machinery, Inc, 2025, ISBN: 979-840071391-0 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Built heritage, Content creation, Digital heritage, Digital Interpretation, Extended reality, Human computer interaction, Human engineering, Industrial Heritage, Interactive computer graphics, Interactive computer systems, Mobile photographies, Narrative Design, Narrative designs, Production pipelines, Uncanny valley, Virtual Reality
@inproceedings{shawash_who_2025,
title = {Who Killed Helene Pumpulivaara?: AI-Assisted Content Creation and XR Implementation for Interactive Built Heritage Storytelling},
author = {J. Shawash and M. Thibault and J. Hamari},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008003446&doi=10.1145%2f3706370.3731703&partnerID=40&md5=bc8a8d221abcf6c560446979fbd06cbc},
doi = {10.1145/3706370.3731703},
isbn = {979-840071391-0 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {IMX - Proc. ACM Int. Conf. Interact. Media Experiences},
pages = {377–379},
publisher = {Association for Computing Machinery, Inc},
abstract = {This demo presents "Who Killed Helene Pumpulivaara?", an innovative interactive heritage experience that combines crime mystery narrative with XR technology to address key challenges in digital heritage interpretation. Our work makes six significant contributions: (1) the discovery of a "Historical Uncanny Valley"effect where varying fidelity levels between AI-generated and authentic content serve as implicit markers distinguishing fact from interpretation; (2) an accessible production pipeline combining mobile photography with AI tools that democratizes XR heritage creation for resource-limited institutions; (3) a spatial storytelling approach that effectively counters decontextualization in digital heritage; (4) a multi-platform implementation strategy across web and VR environments; (5) a practical model for AI-assisted heritage content creation balancing authenticity with engagement; and (6) a pathway toward spatial augmented reality for future heritage interpretation. Using the historic Finlayson Factory in Tampere, Finland as a case study, our implementation demonstrates how emerging technologies can enrich the authenticity of heritage experiences, fostering deeper emotional connections between visitors and the histories embedded in place. © 2025 Copyright held by the owner/author(s).},
keywords = {Artificial intelligence, Augmented Reality, Built heritage, Content creation, Digital heritage, Digital Interpretation, Extended reality, Human computer interaction, Human engineering, Industrial Heritage, Interactive computer graphics, Interactive computer systems, Mobile photographies, Narrative Design, Narrative designs, Production pipelines, Uncanny valley, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Behravan, M.; Gracanin, D.
Generative Multi-Modal Artificial Intelligence for Dynamic Real-Time Context-Aware Content Creation in Augmented Reality Proceedings Article
In: S.N., Spencer (Ed.): Proc. ACM Symp. Virtual Reality Softw. Technol. VRST, Association for Computing Machinery, 2024, ISBN: 979-840070535-9 (ISBN).
Abstract | Links | BibTeX | Tags: 3D object, 3D Object Generation, Augmented Reality, Content creation, Context-Aware, Generative adversarial networks, Generative AI, generative artificial intelligence, Language Model, Multi-modal, Real- time, Time contexts, Vision language model, vision language models, Visual languages
@inproceedings{behravan_generative_2024,
title = {Generative Multi-Modal Artificial Intelligence for Dynamic Real-Time Context-Aware Content Creation in Augmented Reality},
author = {M. Behravan and D. Gracanin},
editor = {Spencer S.N.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85212524068&doi=10.1145%2f3641825.3689685&partnerID=40&md5=daf8aa8960d9dd4dbdbf67ccb1e7fb83},
doi = {10.1145/3641825.3689685},
isbn = {979-840070535-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. ACM Symp. Virtual Reality Softw. Technol. VRST},
publisher = {Association for Computing Machinery},
abstract = {We introduce a framework that uses generative Artificial Intelligence (AI) for dynamic and context-aware content creation in Augmented Reality (AR). By integrating Vision Language Models (VLMs), our system detects and understands the physical space around the user, recommending contextually relevant objects. These objects are transformed into 3D models using a text-to-3D generative AI techniques, allowing for real-time content inclusion within the AR space. This approach enhances user experience by enabling intuitive customization through spoken commands, while reducing costs and improving accessibility to advanced AR interactions. The framework's vision and language capabilities support the generation of comprehensive and context-specific 3D objects. © 2024 Owner/Author.},
keywords = {3D object, 3D Object Generation, Augmented Reality, Content creation, Context-Aware, Generative adversarial networks, Generative AI, generative artificial intelligence, Language Model, Multi-modal, Real- time, Time contexts, Vision language model, vision language models, Visual languages},
pubstate = {published},
tppubtype = {inproceedings}
}
He, K.; Yao, K.; Zhang, Q.; Yu, J.; Liu, L.; Xu, L.
DressCode: Autoregressively Sewing and Generating Garments from Text Guidance Journal Article
In: ACM Transactions on Graphics, vol. 43, no. 4, 2024, ISSN: 07300301 (ISSN).
Abstract | Links | BibTeX | Tags: 3D content, 3d garments, autoregressive model, Autoregressive modelling, Content creation, Digital humans, Embeddings, Fashion design, Garment generation, Interactive computer graphics, Sewing pattern, sewing patterns, Textures, Virtual Reality, Virtual Try-On
@article{he_dresscode_2024,
title = {DressCode: Autoregressively Sewing and Generating Garments from Text Guidance},
author = {K. He and K. Yao and Q. Zhang and J. Yu and L. Liu and L. Xu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199257820&doi=10.1145%2f3658147&partnerID=40&md5=8996e62e4d9dabb5a7034f8bf4df5a43},
doi = {10.1145/3658147},
issn = {07300301 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {ACM Transactions on Graphics},
volume = {43},
number = {4},
abstract = {Apparel's significant role in human appearance underscores the importance of garment digitalization for digital human creation. Recent advances in 3D content creation are pivotal for digital human creation. Nonetheless, garment generation from text guidance is still nascent. We introduce a text-driven 3D garment generation framework, DressCode, which aims to democratize design for novices and offer immense potential in fashion design, virtual try-on, and digital human creation. We first introduce SewingGPT, a GPT-based architecture integrating cross-attention with text-conditioned embedding to generate sewing patterns with text guidance. We then tailor a pre-trained Stable Diffusion to generate tile-based Physically-based Rendering (PBR) textures for the garments. By leveraging a large language model, our framework generates CG-friendly garments through natural language interaction. It also facilitates pattern completion and texture editing, streamlining the design process through user-friendly interaction. This framework fosters innovation by allowing creators to freely experiment with designs and incorporate unique elements into their work. With comprehensive evaluations and comparisons with other state-of-the-art methods, our method showcases superior quality and alignment with input prompts. User studies further validate our high-quality rendering results, highlighting its practical utility and potential in production settings. Copyright © 2024 held by the owner/author(s).},
keywords = {3D content, 3d garments, autoregressive model, Autoregressive modelling, Content creation, Digital humans, Embeddings, Fashion design, Garment generation, Interactive computer graphics, Sewing pattern, sewing patterns, Textures, Virtual Reality, Virtual Try-On},
pubstate = {published},
tppubtype = {article}
}
Yang, S.; Tsui, Y. H.; Wang, X.; Alhilal, A.; Mogavi, R. H.; Wang, X.; Hui, P.
From Prompt to Metaverse: User Perceptions of Personalized Spaces Crafted by Generative AI Proceedings Article
In: M., Bernstein; A., Bruckman; U., Gadiraju; A., Halfaker; X., Ma; F., Pinatti; M., Redi; D., Ribes; S., Savage; A., Zhang (Ed.): Proc. ACM Conf. Comput. Support. Coop. Work CSCW, pp. 497–504, Association for Computing Machinery, 2024, ISBN: 979-840071114-5 (ISBN).
Abstract | Links | BibTeX | Tags: AI-generated content, Content creation, Generation tools, Generative adversarial networks, generative artificial intelligence, HCI, Metaverse, Metaverses, Personalization, Personalizations, Space generation, User perceptions, Virtual environments, Virtual Reality, Virtual spaces
@inproceedings{yang_prompt_2024,
title = {From Prompt to Metaverse: User Perceptions of Personalized Spaces Crafted by Generative AI},
author = {S. Yang and Y. H. Tsui and X. Wang and A. Alhilal and R. H. Mogavi and X. Wang and P. Hui},
editor = {Bernstein M. and Bruckman A. and Gadiraju U. and Halfaker A. and Ma X. and Pinatti F. and Redi M. and Ribes D. and Savage S. and Zhang A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214583858&doi=10.1145%2f3678884.3681897&partnerID=40&md5=c914b32a0cee1520712062e6ec35eb3a},
doi = {10.1145/3678884.3681897},
isbn = {979-840071114-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. ACM Conf. Comput. Support. Coop. Work CSCW},
pages = {497–504},
publisher = {Association for Computing Machinery},
abstract = {Generative artificial intelligence (AI) has revolutionized content creation. In parallel, the Metaverse has emerged to transcend the constraints of our physical reality. While Generative AI has a multitude of exciting applications for the fields of writing, coding, and graphic design, its usage to personalize our virtual space has not yet been explored. In this paper, we investigate the application of Artificial Intelligence Generated Content (AIGC) to personalize our virtual spaces and enhance the metaverse experience. To this end, we present a pipeline to enable users to customize their virtual spaces. Moreover, we explore the hardware resources and latency required for personalized spaces, as well as user acceptance of the AI-generated spaces. Comprehensive user studies follow extensive system experiments. Our research evaluates users' perceptions of two generated spaces: panoramic images and 3D virtual spaces. According to our findings, users have shown a great interest in 3D personalized spaces, and the practicality and immersion of 3D space generation tools surpass panoramic space generation tools. © 2024 ACM.},
keywords = {AI-generated content, Content creation, Generation tools, Generative adversarial networks, generative artificial intelligence, HCI, Metaverse, Metaverses, Personalization, Personalizations, Space generation, User perceptions, Virtual environments, Virtual Reality, Virtual spaces},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Basyoni, L.; Qadir, J.
AI Generated Content in the Metaverse: Risks and Mitigation Strategies Proceedings Article
In: Int. Symp. Networks, Comput. Commun., ISNCC, Institute of Electrical and Electronics Engineers Inc., 2023, ISBN: 979-835033559-0 (ISBN).
Abstract | Links | BibTeX | Tags: AI-generated content, Content creation, Content integrity, Detection methods, Metaverse, Metaverses, Mitigation strategy, Privacy threats, Risk strategies, Security and privacy, Security threats
@inproceedings{basyoni_ai_2023,
title = {AI Generated Content in the Metaverse: Risks and Mitigation Strategies},
author = {L. Basyoni and J. Qadir},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85179838633&doi=10.1109%2fISNCC58260.2023.10323860&partnerID=40&md5=8cd3943a54210ed3027f856436c687b8},
doi = {10.1109/ISNCC58260.2023.10323860},
isbn = {979-835033559-0 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {Int. Symp. Networks, Comput. Commun., ISNCC},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The Metaverse has introduced a vast virtual environment where Artificial Intelligence Generated Content (AIGC) plays a crucial role in content creation. However, the increasing popularity of AIGC has raised concerns regarding its potential misuse and the need for effective detection methods to ensure content integrity. Security and privacy threats in the Metaverse have garnered significant attention, but the specific risks of AIGC in this context remain understudied. This paper aims to analyze the potential risks of AIGC in the Metaverse and explore mitigation techniques. By addressing these challenges, the paper contributes to a comprehensive understanding of AIGC's implications in the Metaverse. © 2023 IEEE.},
keywords = {AI-generated content, Content creation, Content integrity, Detection methods, Metaverse, Metaverses, Mitigation strategy, Privacy threats, Risk strategies, Security and privacy, Security threats},
pubstate = {published},
tppubtype = {inproceedings}
}