AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Arai, K.
Digital Twin Model from Freehanded Sketch to Facade Design, 2D-3D Conversion for Volume Design Journal Article
In: International Journal of Advanced Computer Science and Applications, vol. 16, no. 1, pp. 88–95, 2025, ISSN: 2158107X (ISSN).
Abstract | Links | BibTeX | Tags: 2D/3D conversion, AI, Architectural design, BIM, Digital Twins, Facade design, Facades, GauGAN, Generative AI, GeoTiff, GIS, IFC format, Metaverse, Metaverses, SketchUp, TriPo, Volume design, Volume Rendering
@article{arai_digital_2025,
title = {Digital Twin Model from Freehanded Sketch to Facade Design, 2D-3D Conversion for Volume Design},
author = {K. Arai},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85216872163&doi=10.14569%2fIJACSA.2025.0160109&partnerID=40&md5=fd4e69f9b20d86e3b5d07b4cdcb00b2d},
doi = {10.14569/IJACSA.2025.0160109},
issn = {2158107X (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Advanced Computer Science and Applications},
volume = {16},
number = {1},
pages = {88–95},
abstract = {The article proposes a method for creating digital twins from freehand sketches for facade design, converting 2D designs to 3D volumes, and integrating these designs into real-world GIS systems. It outlines a process that involves generating 2D exterior images from sketches using generative AI (Gemini 1.5 Pro), converting these 2D images into 3D models with TriPo, and creating design drawings with SketchUp. Additionally, it describes a method for creating 3D exterior images using GauGAN, all for the purpose of construction exterior evaluation. The paper also discusses generating BIM data using generative AI, converting BIM data (in IFC file format) to GeoTiff, and displaying this information in GIS using QGIS software. Moreover, it suggests a method for generating digital twins with SketchUp to facilitate digital design information sharing and simulation within a virtual space. Lastly, it advocates for a cost-effective AI system designed for small and medium-sized construction companies, which often struggle to adopt BIM, to harness the advantages of digital twins. © (2025), (Science and Information Organization). All rights reserved.},
keywords = {2D/3D conversion, AI, Architectural design, BIM, Digital Twins, Facade design, Facades, GauGAN, Generative AI, GeoTiff, GIS, IFC format, Metaverse, Metaverses, SketchUp, TriPo, Volume design, Volume Rendering},
pubstate = {published},
tppubtype = {article}
}
2024
Kim, S. J.; Cao, D. D.; Spinola, F.; Lee, S. J.; Cho, K. S.
RoomRecon: High-Quality Textured Room Layout Reconstruction on Mobile Devices Proceedings Article
In: U., Eck; M., Sra; J., Stefanucci; M., Sugimoto; M., Tatzgern; I., Williams (Ed.): Proc. - IEEE Int. Symp. Mixed Augment. Real., ISMAR, pp. 544–553, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833151647-5 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3D reconstruction, 3d-modeling, AR-assisted image capturing, Architectural design, Augmented Reality, Augmented reality-assisted image capturing, Image capturing, Indoor 3D reconstruction, Indoor space, Mobile application, Mobile Applications, Mortar, Room layout, Texturing, Texturing quality
@inproceedings{kim_roomrecon_2024,
title = {RoomRecon: High-Quality Textured Room Layout Reconstruction on Mobile Devices},
author = {S. J. Kim and D. D. Cao and F. Spinola and S. J. Lee and K. S. Cho},
editor = {Eck U. and Sra M. and Stefanucci J. and Sugimoto M. and Tatzgern M. and Williams I.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85213494599&doi=10.1109%2fISMAR62088.2024.00069&partnerID=40&md5=0f6b9d4c44d9c55cafba7ad76651ea07},
doi = {10.1109/ISMAR62088.2024.00069},
isbn = {979-833151647-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Symp. Mixed Augment. Real., ISMAR},
pages = {544–553},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Widespread RGB-Depth (RGB-D) sensors and advanced 3D reconstruction technologies facilitate the capture of indoor spaces, improving the fields of augmented reality (AR), virtual reality (VR), and extended reality (XR). Nevertheless, current technologies still face limitations, such as the inability to reflect minor scene changes without a complete recapture, the lack of semantic scene understanding, and various texturing challenges that affect the 3D model's visual quality. These issues affect the realism required for VR experiences and other applications such as in interior design and real estate. To address these challenges, we introduce RoomRecon, an interactive, real-time scanning and texturing pipeline for 3D room models. We propose a two-phase texturing pipeline that integrates AR-guided image capturing for texturing and generative AI models to improve texturing quality and provide better replicas of indoor spaces. Moreover, we suggest to focus only on permanent room elements such as walls, floors, and ceilings, to allow for easily customizable 3D models. We conduct experiments in a variety of indoor spaces to assess the texturing quality and speed of our method. The quantitative results and user study demonstrate that RoomRecon surpasses state-of-the-art methods in terms of texturing quality and on-device computation time. © 2024 IEEE.},
keywords = {3D modeling, 3D models, 3D reconstruction, 3d-modeling, AR-assisted image capturing, Architectural design, Augmented Reality, Augmented reality-assisted image capturing, Image capturing, Indoor 3D reconstruction, Indoor space, Mobile application, Mobile Applications, Mortar, Room layout, Texturing, Texturing quality},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, X.; Gao, W.; Chu, Y.; Song, Y.
Enhancing interaction in virtual-real architectural environments: A comparative analysis of generative AI-driven reality approaches Journal Article
In: Building and Environment, vol. 266, 2024, ISSN: 03601323 (ISSN).
Abstract | Links | BibTeX | Tags: Architectural design, Architectural environment, Architectural environments, Artificial intelligence, cluster analysis, Comparative analyzes, comparative study, Computational design, Generative adversarial networks, Generative AI, generative artificial intelligence, Mixed reality, Real time interactions, Real-space, Unity3d, Virtual addresses, Virtual environments, Virtual Reality, Virtual spaces, Work-flows
@article{chen_enhancing_2024,
title = {Enhancing interaction in virtual-real architectural environments: A comparative analysis of generative AI-driven reality approaches},
author = {X. Chen and W. Gao and Y. Chu and Y. Song},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85205298350&doi=10.1016%2fj.buildenv.2024.112113&partnerID=40&md5=8c7d4f5477e25b021dfc5e013a851620},
doi = {10.1016/j.buildenv.2024.112113},
issn = {03601323 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Building and Environment},
volume = {266},
abstract = {The architectural environment is expanding into digital, virtual, and informational dimensions, introducing challenges in virtual-real space interaction. Traditional design methods struggle with real-time interaction, integration with existing workflows, and rapid space modification. To address these issues, we present a generative design method that enables symbiotic interaction between virtual and real spaces using Mixed Reality (MR) and Generative Artificial Intelligence (AI) technologies. We developed two approaches: one using the Rhino modeling platform and the other based on the Unity3D game engine, tailored to different application needs. User experience testing in exhibition, leisure, and residential spaces evaluated our method's effectiveness. Results showed significant improvements in design flexibility, interactive efficiency, and user satisfaction. In the exhibition scenario, the Unity3D-based method excelled in rapid design modifications and immersive experiences. Questionnaire data indicated that MR offers good visual comfort and higher immersion than VR, effectively supporting architects in interface and scale design. Clustering analysis of participants' position and gaze data revealed diverse behavioral patterns in the virtual-physical exhibition space, providing insights for optimizing spatial layouts and interaction methods. Our findings suggest that the generative AI-driven MR method simplifies traditional design processes by enabling real-time modification and interaction with spatial interfaces through simple verbal and motion interactions. This approach streamlines workflows by reducing steps like measuring, modeling, and rendering, while enhancing user engagement and creativity. Overall, this method offers new possibilities for experiential exhibition and architectural design, contributing to future environments where virtual and real spaces coexist seamlessly. © 2024},
keywords = {Architectural design, Architectural environment, Architectural environments, Artificial intelligence, cluster analysis, Comparative analyzes, comparative study, Computational design, Generative adversarial networks, Generative AI, generative artificial intelligence, Mixed reality, Real time interactions, Real-space, Unity3d, Virtual addresses, Virtual environments, Virtual Reality, Virtual spaces, Work-flows},
pubstate = {published},
tppubtype = {article}
}
2023
Yeo, J. Q.; Wang, Y.; Tanary, S.; Cheng, J.; Lau, M.; Ng, A. B.; Guan, F.
AICRID: AI-Empowered CR For Interior Design Proceedings Article
In: G., Bruder; A.H., Olivier; A., Cunningham; E.Y., Peng; J., Grubert; I., Williams (Ed.): Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct, pp. 837–841, Institute of Electrical and Electronics Engineers Inc., 2023, ISBN: 979-835032891-2 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3d-modeling, Architectural design, Artificial intelligence, Artificial intelligence technologies, Augmented Reality, Augmented reality technology, Interior Design, Interior designs, machine learning, Machine-learning, Model generation, Novel design, Text images, User need, Visualization
@inproceedings{yeo_aicrid_2023,
title = {AICRID: AI-Empowered CR For Interior Design},
author = {J. Q. Yeo and Y. Wang and S. Tanary and J. Cheng and M. Lau and A. B. Ng and F. Guan},
editor = {Bruder G. and Olivier A.H. and Cunningham A. and Peng E.Y. and Grubert J. and Williams I.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85180375829&doi=10.1109%2fISMAR-Adjunct60411.2023.00184&partnerID=40&md5=b14d89dbd38a4dfe3f85b90800d42e78},
doi = {10.1109/ISMAR-Adjunct60411.2023.00184},
isbn = {979-835032891-2 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct},
pages = {837–841},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Augmented Reality (AR) technologies have been utilized for interior design for years. Normally 3D furniture models need to be created manually or by scanning with specialized devices and this is usually a costly process. Additionally, users need controllers or hands for manipulating the virtual furniture which may lead to fatigue for long-time usage. Artificial Intelligence (AI) technologies have made it possible to generate 3D models from texts, images or both and show potential to automate interactions through the user's voice. We propose a novel design, AICRID in short, which aims to automate the 3D model generation and to facilitate the interactions for interior design AR by leveraging on AI technologies. Specifically, our design will allow the users to directly generate 3D furniture models with generative AI, enabling them to directly interact with the virtual objects through their voices. © 2023 IEEE.},
keywords = {3D modeling, 3D models, 3d-modeling, Architectural design, Artificial intelligence, Artificial intelligence technologies, Augmented Reality, Augmented reality technology, Interior Design, Interior designs, machine learning, Machine-learning, Model generation, Novel design, Text images, User need, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Vaidhyanathan, V.; Radhakrishnan, T. R.; López, J. L. G.
Spacify A Generative Framework for Spatial Comprehension, Articulation and Visualization using Large Language Models (LLMs) and eXtended Reality (XR) Proceedings Article
In: A., Crawford; N.M., Diniz; R., Beckett; J., Vanucchi; M., Swackhamer (Ed.): Habits Anthropocene: Scarcity Abundance Post-Mater. Econ. - Proc. Annu. Conf. Assoc. Comput. Aided Des. Archit., ACADIA, pp. 430–443, Association for Computer Aided Design in Architecture, 2023, ISBN: 979-898608059-8 (ISBN).
Abstract | Links | BibTeX | Tags: 3D data processing, 3D spaces, Architectural design, Built environment, C (programming language), Computational Linguistics, Computer aided design, Computer architecture, Data handling, Data users, Data visualization, Immersive media, Interior designers, Language Model, Natural languages, Spatial design, Three dimensional computer graphics, Urban designers, User interfaces, Visualization
@inproceedings{vaidhyanathan_spacify_2023,
title = {Spacify A Generative Framework for Spatial Comprehension, Articulation and Visualization using Large Language Models (LLMs) and eXtended Reality (XR)},
author = {V. Vaidhyanathan and T. R. Radhakrishnan and J. L. G. López},
editor = {Crawford A. and Diniz N.M. and Beckett R. and Vanucchi J. and Swackhamer M.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85192831586&partnerID=40&md5=996906de0f5ef1e6c88b10bb65caabc0},
isbn = {979-898608059-8 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {Habits Anthropocene: Scarcity Abundance Post-Mater. Econ. - Proc. Annu. Conf. Assoc. Comput. Aided Des. Archit., ACADIA},
volume = {2},
pages = {430–443},
publisher = {Association for Computer Aided Design in Architecture},
abstract = {Spatial design, the thoughtful planning and creation of built environments, typically requires advanced technical knowledge and visuospatial skills, making it largely exclusive to professionals like architects, interior designers, and urban designers. This exclusivity limits non-experts' access to spatial design, despite their ability to describe requirements and suggestions in natural language. Recent advancements in generative artificial intelligence (AI), particularly large language models (LLMs), and extended reality, (XR) offer the potential to address this limitation. This paper introduces Spacify (Figure 1), a framework that utilizes the generalizing capabilities of LLMs, 3D data-processing, and XR interfaces to create an immersive medium for language-driven spatial understanding, design, and visualization for non-experts. This paper describes the five components of Spacify: External Data, User Input, Spatial Interface, Large Language Model, and Current Spatial Design; which enable the use of generative AI models in a) question/ answering about 3D spaces with reasoning, b) (re)generating 3D spatial designs with natural language prompts, and c) visualizing designed 3D spaces with natural language descriptions. An implementation of Spacify is demonstrated via an XR smartphone application, allowing for an end-to-end, language-driven interior design process. User survey results from non-experts redesigning their spaces in 3D using this application suggest that Spacify can make spatial design accessible using natural language prompts, thereby pioneering a new realm of spatial design that is naturally language-driven. © ACADIA 2023. All rights reserved.},
keywords = {3D data processing, 3D spaces, Architectural design, Built environment, C (programming language), Computational Linguistics, Computer aided design, Computer architecture, Data handling, Data users, Data visualization, Immersive media, Interior designers, Language Model, Natural languages, Spatial design, Three dimensional computer graphics, Urban designers, User interfaces, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Le, M. -H.; Chu, C. -B.; Le, K. -D.; Nguyen, T. V.; Tran, M. -T.; Le, T. -N.
VIDES: Virtual Interior Design via Natural Language and Visual Guidance Proceedings Article
In: G., Bruder; A.H., Olivier; A., Cunningham; E.Y., Peng; J., Grubert; I., Williams (Ed.): Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct, pp. 689–694, Institute of Electrical and Electronics Engineers Inc., 2023, ISBN: 979-835032891-2 (ISBN).
Abstract | Links | BibTeX | Tags: Architectural design, Customisation, Cutting edge technology, Design concept, Design systems, Image editing, Image generation, Image generations, Indoor space, Interior Design, Interior designs, Interiors (building), Natural languages, Virtual Reality, Visual guidance, Visual languages
@inproceedings{le_vides_2023,
title = {VIDES: Virtual Interior Design via Natural Language and Visual Guidance},
author = {M. -H. Le and C. -B. Chu and K. -D. Le and T. V. Nguyen and M. -T. Tran and T. -N. Le},
editor = {Bruder G. and Olivier A.H. and Cunningham A. and Peng E.Y. and Grubert J. and Williams I.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85180376943&doi=10.1109%2fISMAR-Adjunct60411.2023.00148&partnerID=40&md5=5ce45d9e97fc5a9fdc31eb7514b3def3},
doi = {10.1109/ISMAR-Adjunct60411.2023.00148},
isbn = {979-835032891-2 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct},
pages = {689–694},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Interior design is crucial in creating aesthetically pleasing and functional indoor spaces. However, developing and editing interior design concepts requires significant time and expertise. We propose Virtual Interior DESign (VIDES) system in response to this challenge. Leveraging cutting-edge technology in generative AI, our system can assist users in generating and editing indoor scene concepts quickly, given user text description and visual guidance. Using both visual guidance and language as the conditional inputs significantly enhances the accuracy and coherence of the generated scenes, resulting in visually appealing designs. Through extensive experimentation, we demonstrate the effectiveness of VIDES in developing new indoor concepts, changing indoor styles, and replacing and removing interior objects. The system successfully captures the essence of users' descriptions while providing flexibility for customization. Consequently, this system can potentially reduce the entry barrier for indoor design, making it more accessible to users with limited technical skills and reducing the time required to create high-quality images. Individuals who have a background in design can now easily communicate their ideas visually and effectively present their design concepts. © 2023 IEEE.},
keywords = {Architectural design, Customisation, Cutting edge technology, Design concept, Design systems, Image editing, Image generation, Image generations, Indoor space, Interior Design, Interior designs, Interiors (building), Natural languages, Virtual Reality, Visual guidance, Visual languages},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Scianna, Andrea; Gaglio, Giuseppe Fulvio; Guardia, Marcello La
BIM Modelling of Ancient Buildings Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 11196 LNCS, pp. 344–355, 2018, (Publisher: Springer Verlag).
Abstract | Links | BibTeX | Tags: 3D Modelling, Archaeological Site, Archaeology, Architectural design, Building components, Computer aided design, Cultural heritage, Cultural heritages, Electronic data interchange, Geo-spatial, HBIM, Historic Preservation, Information Management, Information Systems, Semantics, Structural design, Surveying, Three dimensional computer graphics
@article{scianna_bim_2018,
title = {BIM Modelling of Ancient Buildings},
author = {Andrea Scianna and Giuseppe Fulvio Gaglio and Marcello La Guardia},
editor = {Wallace M. Brumana R. Fink E.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85055423142&doi=10.1007%2f978-3-030-01762-0_29&partnerID=40&md5=4803fe8c1e8d844de2a786296d5e530a},
doi = {10.1007/978-3-030-01762-0_29},
year = {2018},
date = {2018-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {11196 LNCS},
pages = {344–355},
abstract = {In the last years, new procedures on design and management of constructions, based on 3D standardised models of building elements, have been proposed. It’s the case of Building Information Modelling (BIM) applications, that, differently from CAD ones, allow to work with libraries of 3D parametrical objects (smart objects) describing geometric, structural and material properties of building elements. This methodology is based on the Industry Foundation Classes (IFC) model, that represents a global standard for the building data exchange. Initially used for the design of new architectures, BIM methodology has been even more considered also for the management and the conservation of historical buildings, thanks to the possibilities of implementation of semantic information of 3D objects, guaranteed by the connection with the external database. At the same time, the lack of regular surfaces and standardised objects are relevant problems that nowadays strongly limit the use of BIM for Cultural Heritage (CH). Anyway, in recent times, the study of parameterised objects has opened new perspectives for BIM applications on historical buildings (HBIM). The present work shows the last achievements on this topic, focusing the problems derived from the application of BIM methodology to CH. In fact, the irregular shape of ancient architectural components, the wide variety of architectural languages that characterise historical buildings, the presence, sometimes, of different stratifications, are clear examples of the difficulties of implementing HBIM methodology for CH. © 2018, Springer Nature Switzerland AG.},
note = {Publisher: Springer Verlag},
keywords = {3D Modelling, Archaeological Site, Archaeology, Architectural design, Building components, Computer aided design, Cultural heritage, Cultural heritages, Electronic data interchange, Geo-spatial, HBIM, Historic Preservation, Information Management, Information Systems, Semantics, Structural design, Surveying, Three dimensional computer graphics},
pubstate = {published},
tppubtype = {article}
}
2014
Scianna, Andrea; Gristina, Susanna; Paliaga, Silvia
Experimental BIM applications in archaeology: A work-flow Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 8740, pp. 490–498, 2014, (Publisher: Springer Verlag).
Abstract | Links | BibTeX | Tags: Archaeological Site, Archaeology, Architectural design, BIM, Data export, Database systems, History, Semantic Model, Semantic Web, Semantics, Social networking (online), Web documentation
@article{scianna_experimental_2014,
title = {Experimental BIM applications in archaeology: A work-flow},
author = {Andrea Scianna and Susanna Gristina and Silvia Paliaga},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84911945081&doi=10.1007%2f978-3-319-13695-0&partnerID=40&md5=afa2444b3a602b178283e29c86786bd5},
doi = {10.1007/978-3-319-13695-0},
year = {2014},
date = {2014-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {8740},
pages = {490–498},
abstract = {In the last few decades various conceptual models, methods and techniques have been studied to allow 3D digital access to Cultural Heritage (CH). Among these is BIM (Building Information Modeling): originally built up for construction projects, it has been already experimented in the CH domain, but not enough in the archaeological field. This paper illustrates a framework to create 3D archaeological models integrated with databases using BIM. The models implemented are queryable by the connection with a Relational Database Management System and sharable on the web. Parametric solid and semantic models are integrated with 3D standardized database models that are finally manageable in the public cloud. The BIM application’s work-flow here described has been experimented on the Roman structures inside the Crypt of St. Sergius and Bacchus Church (Rome). The experiment has highlighted capabilities and limitations of BIM applications in the archaeological domain. © Springer International Publishing Switzerland 2014.},
note = {Publisher: Springer Verlag},
keywords = {Archaeological Site, Archaeology, Architectural design, BIM, Data export, Database systems, History, Semantic Model, Semantic Web, Semantics, Social networking (online), Web documentation},
pubstate = {published},
tppubtype = {article}
}