AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Xi, Z.; Yao, Z.; Huang, J.; Lu, Z. -Q.; Yan, H.; Mu, T. -J.; Wang, Z.; Xu, Q. -C.
TerraCraft: City-scale generative procedural modeling with natural languages Journal Article
In: Graphical Models, vol. 141, 2025, ISSN: 15240703 (ISSN), (Publisher: Elsevier Inc.).
Abstract | Links | BibTeX | Tags: 3D scene generation, 3D scenes, algorithm, Automation, City layout, City scale, data set, Diffusion Model, Game design, Geometry, High quality, Language, Language Model, Large datasets, Large language model, LLMs, Modeling languages, Natural language processing systems, Procedural modeling, Procedural models, Scene Generation, Three dimensional computer graphics, three-dimensional modeling, urban area, Virtual Reality
@article{xi_terracraft_2025,
title = {TerraCraft: City-scale generative procedural modeling with natural languages},
author = {Z. Xi and Z. Yao and J. Huang and Z. -Q. Lu and H. Yan and T. -J. Mu and Z. Wang and Q. -C. Xu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105012397682&doi=10.1016%2Fj.gmod.2025.101285&partnerID=40&md5=15a84050280e5015b1f7b1ef40c62100},
doi = {10.1016/j.gmod.2025.101285},
issn = {15240703 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Graphical Models},
volume = {141},
abstract = {Automated generation of large-scale 3D scenes presents a significant challenge due to the resource-intensive training and datasets required. This is in sharp contrast to the 2D counterparts that have become readily available due to their superior speed and quality. However, prior work in 3D procedural modeling has demonstrated promise in generating high-quality assets using the combination of algorithms and user-defined rules. To leverage the best of both 2D generative models and procedural modeling tools, we present TerraCraft, a novel framework for generating geometrically high-quality 3D city-scale scenes. By utilizing Large Language Models (LLMs), TerraCraft can generate city-scale 3D scenes from natural text descriptions. With its intuitive operation and powerful capabilities, TerraCraft enables users to easily create geometrically high-quality scenes readily for various applications, such as virtual reality and game design. We validate TerraCraft's effectiveness through extensive experiments and user studies, showing its superior performance compared to existing baselines. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Elsevier Inc.},
keywords = {3D scene generation, 3D scenes, algorithm, Automation, City layout, City scale, data set, Diffusion Model, Game design, Geometry, High quality, Language, Language Model, Large datasets, Large language model, LLMs, Modeling languages, Natural language processing systems, Procedural modeling, Procedural models, Scene Generation, Three dimensional computer graphics, three-dimensional modeling, urban area, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Wei, Q.; Huang, J.; Gao, Y.; Dong, W.
One Model to Fit Them All: Universal IMU-based Human Activity Recognition with LLM-assisted Cross-dataset Representation Journal Article
In: Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies, vol. 9, no. 3, 2025, ISSN: 24749567 (ISSN), (Publisher: Association for Computing Machinery).
Abstract | Links | BibTeX | Tags: Broad application, Contrastive Learning, Cross-dataset, Data collection, Human activity recognition, Human activity recognition systems, Human computer interaction, Intelligent interactions, Language Model, Large datasets, Large language model, large language models, Learning systems, Neural-networks, Pattern recognition, Spatial relationships, Ubiquitous computing, Virtual Reality
@article{wei_one_2025,
title = {One Model to Fit Them All: Universal IMU-based Human Activity Recognition with LLM-assisted Cross-dataset Representation},
author = {Q. Wei and J. Huang and Y. Gao and W. Dong},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015431117&doi=10.1145%2F3749509&partnerID=40&md5=2a6f26a05856c48ba3aaaf356b375dc0},
doi = {10.1145/3749509},
issn = {24749567 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies},
volume = {9},
number = {3},
abstract = {Human Activity Recognition (HAR) is essential for pervasive computing and intelligent interaction, with broad applications across various fields. However, there is still no one model capable of fitting various HAR datasets, severely limiting its applicability in practical scenarios. To address this, we propose oneHAR, an LLM-assisted universal IMU-based HAR system designed to achieve "one model to fit them all" — just one model that can adapt to diverse HAR datasets without any dataset-specific operation. In particular, we propose Cross-Dataset neural network (CDNet) for the "one model," which models both the temporal context and spatial relationships of IMU data to capture cross-dataset representations, encompassing differences in device, participant, data collection position, and environment, etc. Additionally, we introduce LLM-driven data synthesis, which enhances the training process by generating virtual IMU data through three carefully designed strategies. Furthermore, LLM-assisted adaptive position processing optimizes the inference process by flexibly handling a variable combination of positional inputs. Our model demonstrates strong generalization across five public IMU-based HAR datasets, outperforming the best baselines by up to 46.9% in the unseen-dataset scenario, and 6.5% in the cross-dataset scenario. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Association for Computing Machinery},
keywords = {Broad application, Contrastive Learning, Cross-dataset, Data collection, Human activity recognition, Human activity recognition systems, Human computer interaction, Intelligent interactions, Language Model, Large datasets, Large language model, large language models, Learning systems, Neural-networks, Pattern recognition, Spatial relationships, Ubiquitous computing, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
2024
Upadhyay, A.; Dubey, A.; Bhardwaj, N.; Kuriakose, S. M.; Mohan, R.
CIGMA: Automated 3D House Layout Generation through Generative Models Proceedings Article
In: ACM Int. Conf. Proc. Ser., pp. 542–546, Association for Computing Machinery, 2024, ISBN: 9798400716348 (ISBN).
Abstract | Links | BibTeX | Tags: 3d house, 3D House Layout, 3D modeling, Floor Plan, Floorplans, Floors, Generative AI, Generative model, Houses, Large datasets, Layout designs, Layout generations, Metaverses, Textures, User constraints, Wall design
@inproceedings{upadhyay_cigma_2024,
title = {CIGMA: Automated 3D House Layout Generation through Generative Models},
author = {A. Upadhyay and A. Dubey and N. Bhardwaj and S. M. Kuriakose and R. Mohan},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85183577885&doi=10.1145%2F3632410.3632490&partnerID=40&md5=1de9026f0986501d81613867321df1e5},
doi = {10.1145/3632410.3632490},
isbn = {9798400716348 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ACM Int. Conf. Proc. Ser.},
pages = {542–546},
publisher = {Association for Computing Machinery},
abstract = {In this work, we introduce CIGMA, a metaverse platform that empowers designers to generate multiple house layout designs using generative models. We propose a generative adversarial network that synthesizes 2D layouts guided by user constraints. Our platform generates 3D views of house layouts and provides users with the ability to customize the 3D house model by generating furniture items and applying various textures for personalized floor and wall designs. We evaluate our approach on a large-scale dataset, RPLAN, consisting of 80,000 real floor plans from residential buildings. The qualitative and quantitative evaluations demonstrate the effectiveness of our approach over the existing baselines. The demo is accessible at https://youtu.be/lgb_V-yZ5lw. © 2024 Elsevier B.V., All rights reserved.},
keywords = {3d house, 3D House Layout, 3D modeling, Floor Plan, Floorplans, Floors, Generative AI, Generative model, Houses, Large datasets, Layout designs, Layout generations, Metaverses, Textures, User constraints, Wall design},
pubstate = {published},
tppubtype = {inproceedings}
}