AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Martini, M.; Valentini, V.; Ciprian, A.; Bottino, A.; Iacoviello, R.; Montagnuolo, M.; Messina, A.; Strada, F.; Zappia, D.
Semi -Automated Digital Human Production for Enhanced Media Broadcasting Proceedings Article
In: IEEE Gaming, Entertain., Media Conf., GEM, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037453-7 (ISBN).
Abstract | Links | BibTeX | Tags: AI automation, Automation, Creation process, Digital humans, Economic and social effects, Extensive explorations, Face reconstruction, Generative AI, Image enhancement, media archive, Media archives, Metaverses, Rendering (computer graphics), Synthetic human, Synthetic Humans, Textures, Three dimensional computer graphics, Virtual production, Virtual Reality
@inproceedings{martini_semi_2024,
title = {Semi -Automated Digital Human Production for Enhanced Media Broadcasting},
author = {M. Martini and V. Valentini and A. Ciprian and A. Bottino and R. Iacoviello and M. Montagnuolo and A. Messina and F. Strada and D. Zappia},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199536742&doi=10.1109%2fGEM61861.2024.10585601&partnerID=40&md5=3703fba931b02f9615316db8ebbca70c},
doi = {10.1109/GEM61861.2024.10585601},
isbn = {979-835037453-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Gaming, Entertain., Media Conf., GEM},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In recent years, the application of synthetic humans in various fields has attracted considerable attention, leading to extensive exploration of their integration into the Metaverse and virtual production environments. This work presents a semi-automated approach that aims to find a fair trade-off between high-quality outputs and efficient production times. The project focuses on the Rai photo and video archives to find images of target characters for texturing and 3D reconstruction with the goal of reviving Rai's 2D footage and enhance the media experience. A key aspect of this study is to minimize the human intervention, ensuring an efficient, flexible, and scalable creation process. In this work, the improvements have been distributed among different stages of the digital human creation process, starting with the generation of 3D head meshes from 2D images of the reference character and then moving on to the generation, using a Diffusion model, of suitable images for texture development. These assets are then integrated into the Unreal Engine, where a custom widget facilitates posing, rendering, and texturing of Synthetic Humans models. Finally, an in-depth quantitative comparison and subjective tests were carried out between the original character images and the rendered synthetic humans, confirming the validity of the approach. © 2024 IEEE.},
keywords = {AI automation, Automation, Creation process, Digital humans, Economic and social effects, Extensive explorations, Face reconstruction, Generative AI, Image enhancement, media archive, Media archives, Metaverses, Rendering (computer graphics), Synthetic human, Synthetic Humans, Textures, Three dimensional computer graphics, Virtual production, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Upadhyay, A.; Dubey, A.; Bhardwaj, N.; Kuriakose, S. M.; Mohan, R.
CIGMA: Automated 3D House Layout Generation through Generative Models Proceedings Article
In: ACM Int. Conf. Proc. Ser., pp. 542–546, Association for Computing Machinery, 2024, ISBN: 979-840071634-8 (ISBN).
Abstract | Links | BibTeX | Tags: 3d house, 3D House Layout, 3D modeling, Floor Plan, Floorplans, Floors, Generative AI, Generative model, Houses, Large datasets, Layout designs, Layout generations, Metaverses, Textures, User constraints, Wall design
@inproceedings{upadhyay_cigma_2024,
title = {CIGMA: Automated 3D House Layout Generation through Generative Models},
author = {A. Upadhyay and A. Dubey and N. Bhardwaj and S. M. Kuriakose and R. Mohan},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85183577885&doi=10.1145%2f3632410.3632490&partnerID=40&md5=cf0c249faf0ce03590010426e0f6c1e0},
doi = {10.1145/3632410.3632490},
isbn = {979-840071634-8 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ACM Int. Conf. Proc. Ser.},
pages = {542–546},
publisher = {Association for Computing Machinery},
abstract = {In this work, we introduce CIGMA, a metaverse platform that empowers designers to generate multiple house layout designs using generative models. We propose a generative adversarial network that synthesizes 2D layouts guided by user constraints. Our platform generates 3D views of house layouts and provides users with the ability to customize the 3D house model by generating furniture items and applying various textures for personalized floor and wall designs. We evaluate our approach on a large-scale dataset, RPLAN, consisting of 80,000 real floor plans from residential buildings. The qualitative and quantitative evaluations demonstrate the effectiveness of our approach over the existing baselines. The demo is accessible at https://youtu.be/lgb_V-yZ5lw. © 2024 Owner/Author.},
keywords = {3d house, 3D House Layout, 3D modeling, Floor Plan, Floorplans, Floors, Generative AI, Generative model, Houses, Large datasets, Layout designs, Layout generations, Metaverses, Textures, User constraints, Wall design},
pubstate = {published},
tppubtype = {inproceedings}
}
He, K.; Yao, K.; Zhang, Q.; Yu, J.; Liu, L.; Xu, L.
DressCode: Autoregressively Sewing and Generating Garments from Text Guidance Journal Article
In: ACM Transactions on Graphics, vol. 43, no. 4, 2024, ISSN: 07300301 (ISSN).
Abstract | Links | BibTeX | Tags: 3D content, 3d garments, autoregressive model, Autoregressive modelling, Content creation, Digital humans, Embeddings, Fashion design, Garment generation, Interactive computer graphics, Sewing pattern, sewing patterns, Textures, Virtual Reality, Virtual Try-On
@article{he_dresscode_2024,
title = {DressCode: Autoregressively Sewing and Generating Garments from Text Guidance},
author = {K. He and K. Yao and Q. Zhang and J. Yu and L. Liu and L. Xu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199257820&doi=10.1145%2f3658147&partnerID=40&md5=8996e62e4d9dabb5a7034f8bf4df5a43},
doi = {10.1145/3658147},
issn = {07300301 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {ACM Transactions on Graphics},
volume = {43},
number = {4},
abstract = {Apparel's significant role in human appearance underscores the importance of garment digitalization for digital human creation. Recent advances in 3D content creation are pivotal for digital human creation. Nonetheless, garment generation from text guidance is still nascent. We introduce a text-driven 3D garment generation framework, DressCode, which aims to democratize design for novices and offer immense potential in fashion design, virtual try-on, and digital human creation. We first introduce SewingGPT, a GPT-based architecture integrating cross-attention with text-conditioned embedding to generate sewing patterns with text guidance. We then tailor a pre-trained Stable Diffusion to generate tile-based Physically-based Rendering (PBR) textures for the garments. By leveraging a large language model, our framework generates CG-friendly garments through natural language interaction. It also facilitates pattern completion and texture editing, streamlining the design process through user-friendly interaction. This framework fosters innovation by allowing creators to freely experiment with designs and incorporate unique elements into their work. With comprehensive evaluations and comparisons with other state-of-the-art methods, our method showcases superior quality and alignment with input prompts. User studies further validate our high-quality rendering results, highlighting its practical utility and potential in production settings. Copyright © 2024 held by the owner/author(s).},
keywords = {3D content, 3d garments, autoregressive model, Autoregressive modelling, Content creation, Digital humans, Embeddings, Fashion design, Garment generation, Interactive computer graphics, Sewing pattern, sewing patterns, Textures, Virtual Reality, Virtual Try-On},
pubstate = {published},
tppubtype = {article}
}