AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Ding, P.; Liu, J.; Sun, M.; Li, L.; Liu, H.
Enhancing Computational Processing Performance for Generative AI Large Models with Autonomous Decision-Making in Metaverse Applications Proceedings Article
In: Proc. - IEEE Int. Conf. Metaverse Comput., Netw., Appl., MetaCom, pp. 253–258, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833151599-7 (ISBN).
Abstract | Links | BibTeX | Tags: Adversarial machine learning, AGI (Artificial General Intelligence), Artificial general intelligence, Artificial general intelligences, Autonomous decision, Autonomous Decision-Making, Data assimilation, Data integration, Decisions makings, Digital Twin Technology, Emotion Recognition, Generative adversarial networks, Generative AI large model, Generative AI Large Models, Large models, Metaverse, Metaverses, Model Acceleration, Model Compression, Multi agent systems, Multi-agent systems, Multi-modal data, Multi-Modal Data Integration, Multiagent systems (MASs), Reinforcement Learning, Reinforcement learnings, Spatio-temporal data
@inproceedings{ding_enhancing_2024,
title = {Enhancing Computational Processing Performance for Generative AI Large Models with Autonomous Decision-Making in Metaverse Applications},
author = {P. Ding and J. Liu and M. Sun and L. Li and H. Liu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85211489063&doi=10.1109%2fMetaCom62920.2024.00048&partnerID=40&md5=ae085a7d90b12c9090f5bf7a274bc7ce},
doi = {10.1109/MetaCom62920.2024.00048},
isbn = {979-833151599-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Metaverse Comput., Netw., Appl., MetaCom},
pages = {253–258},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {We explore how to enhance the computational processing performance for generative AI large models with autonomous decision-making in metaverse applications. We first introduce the relationship between AI large models and the Metaverse. We elaborate on the application scenarios of generative AI large models in Metaverse, including real-time weather simulation, embodied intelligence of agents, dynamic environment interaction, and user emotion recognition. We then propose the method of Multi-Dimensional Optimization Generation Framework (MDOGF) to improve computational processing performance. The experiment results show great improvement in computational processing performance. © 2024 IEEE.},
keywords = {Adversarial machine learning, AGI (Artificial General Intelligence), Artificial general intelligence, Artificial general intelligences, Autonomous decision, Autonomous Decision-Making, Data assimilation, Data integration, Decisions makings, Digital Twin Technology, Emotion Recognition, Generative adversarial networks, Generative AI large model, Generative AI Large Models, Large models, Metaverse, Metaverses, Model Acceleration, Model Compression, Multi agent systems, Multi-agent systems, Multi-modal data, Multi-Modal Data Integration, Multiagent systems (MASs), Reinforcement Learning, Reinforcement learnings, Spatio-temporal data},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, M.; Liu, M.; Wang, C.; Song, X.; Zhang, Z.; Xie, Y.; Wang, L.
Cross-Modal Graph Semantic Communication Assisted by Generative AI in the Metaverse for 6G Journal Article
In: Research, vol. 7, 2024, ISSN: 20965168 (ISSN).
Abstract | Links | BibTeX | Tags: 3-dimensional, 3Dimensional models, Cross-modal, Graph neural networks, Graph semantics, Metaverses, Multi-modal data, Point-clouds, Semantic communication, Semantic features, Semantics, Three dimensional computer graphics, Virtual scenario
@article{chen_cross-modal_2024,
title = {Cross-Modal Graph Semantic Communication Assisted by Generative AI in the Metaverse for 6G},
author = {M. Chen and M. Liu and C. Wang and X. Song and Z. Zhang and Y. Xie and L. Wang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85192245049&doi=10.34133%2fresearch.0342&partnerID=40&md5=4a1c3e0a3ac877fcdf04937a96da32a1},
doi = {10.34133/research.0342},
issn = {20965168 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Research},
volume = {7},
abstract = {Recently, the development of the Metaverse has become a frontier spotlight, which is an important demonstration of the integration innovation of advanced technologies in the Internet. Moreover, artificial intelligence (AI) and 6G communications will be widely used in our daily lives. However, the effective interactions with the representations of multimodal data among users via 6G communications is the main challenge in the Metaverse. In this work, we introduce an intelligent cross-modal graph semantic communication approach based on generative AI and 3-dimensional (3D) point clouds to improve the diversity of multimodal representations in the Metaverse. Using a graph neural network, multimodal data can be recorded by key semantic features related to the real scenarios. Then, we compress the semantic features using a graph transformer encoder at the transmitter, which can extract the semantic representations through the cross-modal attention mechanisms. Next, we leverage a graph semantic validation mechanism to guarantee the exactness of the overall data at the receiver. Furthermore, we adopt generative AI to regenerate multimodal data in virtual scenarios. Simultaneously, a novel 3D generative reconstruction network is constructed from the 3D point clouds, which can transfer the data from images to 3D models, and we infer the multimodal data into the 3D models to increase realism in virtual scenarios. Finally, the experiment results demonstrate that cross-modal graph semantic communication, assisted by generative AI, has substantial potential for enhancing user interactions in the 6G communications and Metaverse. Copyright © 2024 Mingkai Chen et al.},
keywords = {3-dimensional, 3Dimensional models, Cross-modal, Graph neural networks, Graph semantics, Metaverses, Multi-modal data, Point-clouds, Semantic communication, Semantic features, Semantics, Three dimensional computer graphics, Virtual scenario},
pubstate = {published},
tppubtype = {article}
}
2023
Park, J.; Choi, J.; Kim, S. -L.; Bennis, M.
Enabling the Wireless Metaverse via Semantic Multiverse Communication Proceedings Article
In: Annu. IEEE Commun.Soc. Conf. Sens., Mesh Ad Hoc Commun. Netw. workshops, pp. 85–90, IEEE Computer Society, 2023, ISBN: 21555486 (ISSN); 979-835030052-9 (ISBN).
Abstract | Links | BibTeX | Tags: Deep learning, Extended reality (XR), Federated learning, Fertilizers, Learn+, Learning systems, Metaverse, Metaverses, Modal analysis, Multi agent systems, Multi-agent reinforcement learning, Multi-modal data, Reinforcement Learning, Semantic communication, Semantics, Signal encoding, Signaling game, Split learning, Symbolic artificial intelligence
@inproceedings{park_enabling_2023,
title = {Enabling the Wireless Metaverse via Semantic Multiverse Communication},
author = {J. Park and J. Choi and S. -L. Kim and M. Bennis},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85177465286&doi=10.1109%2fSECON58729.2023.10287438&partnerID=40&md5=b052572fb2f78ce0694c7ae5726c8daf},
doi = {10.1109/SECON58729.2023.10287438},
isbn = {21555486 (ISSN); 979-835030052-9 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {Annu. IEEE Commun.Soc. Conf. Sens., Mesh Ad Hoc Commun. Netw. workshops},
volume = {2023-September},
pages = {85–90},
publisher = {IEEE Computer Society},
abstract = {Metaverse over wireless networks is an emerging use case of the sixth generation (6G) wireless systems, posing unprecedented challenges in terms of its multi-modal data transmissions with stringent latency and reliability requirements. Towards enabling this wireless metaverse, in this article we propose a novel semantic communication (SC) framework by decomposing the metaverse into human/machine agent-specific semantic multiverses (SMs). An SM stored at each agent comprises a semantic encoder and a generator, leveraging recent advances in generative artificial intelligence (AI). To improve communication efficiency, the encoder learns the semantic representations (SRs) of multi-modal data, while the generator learns how to manipulate them for locally rendering scenes and interactions in the metaverse. Since these learned SMs are biased towards local environments, their success hinges on synchronizing heterogeneous SMs in the background while communicating SRs in the foreground, turning the wireless metaverse problem into the problem of semantic multiverse communication (SMC). Based on this SMC architecture, we propose several promising algorithmic and analytic tools for modeling and designing SMC, ranging from distributed learning and multi-agent reinforcement learning (MARL) to signaling games and symbolic AI. © 2023 IEEE.},
keywords = {Deep learning, Extended reality (XR), Federated learning, Fertilizers, Learn+, Learning systems, Metaverse, Metaverses, Modal analysis, Multi agent systems, Multi-agent reinforcement learning, Multi-modal data, Reinforcement Learning, Semantic communication, Semantics, Signal encoding, Signaling game, Split learning, Symbolic artificial intelligence},
pubstate = {published},
tppubtype = {inproceedings}
}