AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Lu, J.; Gao, J.; Feng, F.; He, Z.; Zheng, M.; Liu, K.; He, J.; Liao, B.; Xu, S.; Sun, K.; Mo, Y.; Peng, Q.; Luo, J.; Li, Q.; Lu, G.; Wang, Z.; Dong, J.; He, K.; Cheng, S.; Cao, J.; Jiao, H.; Zhang, P.; Ma, S.; Zhu, L.; Shi, C.; Zhang, Y.; Chen, Y.; Wang, W.; Zhu, S.; Li, X.; Wang, Q.; Liu, J.; Wang, C.; Lin, W.; Zhai, E.; Wu, J.; Liu, Q.; Fu, B.; Cai, D.
Alibaba Stellar: A New Generation RDMA Network for Cloud AI Proceedings Article
In: pp. 453–466, Association for Computing Machinery, Inc, 2025, ISBN: 9798400715242 (ISBN).
Abstract | Links | BibTeX | Tags: Access network, Cloud computing, Congestion control (communication), Containers, data center networking, Data center networkings, Language Model, Learning systems, Machine learning applications, Memory architecture, Network support, Network support for AI and machine learning application, network support for AI and machine learning applications, Performance, Program processors, Remote direct memory access, Stellars, Transport and congestion control, Virtual Reality, Virtualization
@inproceedings{lu_alibaba_2025,
title = {Alibaba Stellar: A New Generation RDMA Network for Cloud AI},
author = {J. Lu and J. Gao and F. Feng and Z. He and M. Zheng and K. Liu and J. He and B. Liao and S. Xu and K. Sun and Y. Mo and Q. Peng and J. Luo and Q. Li and G. Lu and Z. Wang and J. Dong and K. He and S. Cheng and J. Cao and H. Jiao and P. Zhang and S. Ma and L. Zhu and C. Shi and Y. Zhang and Y. Chen and W. Wang and S. Zhu and X. Li and Q. Wang and J. Liu and C. Wang and W. Lin and E. Zhai and J. Wu and Q. Liu and B. Fu and D. Cai},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105016208536&doi=10.1145%2F3718958.3750539&partnerID=40&md5=901fdd27c510072633f0390a0abfb653},
doi = {10.1145/3718958.3750539},
isbn = {9798400715242 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {453–466},
publisher = {Association for Computing Machinery, Inc},
abstract = {The rapid adoption of Large Language Models (LLMs) in cloud environments has intensified the demand for high-performance AI training and inference, where Remote Direct Memory Access (RDMA) plays a critical role. However, existing RDMA virtualization solutions, such as Single-Root Input/Output Virtualization (SR-IOV), face significant limitations in scalability, performance, and stability. These issues include lengthy container initialization times, hardware resource constraints, and inefficient traffic steering. To address these challenges, we propose Stellar, a new generation RDMA network for cloud AI. Stellar introduces three key innovations: Para-Virtualized Direct Memory Access (PVDMA) for on-demand memory pinning, extended Memory Translation Table (eMTT) for optimized GPU Direct RDMA (GDR) performance, and RDMA Packet Spray for efficient multi-path utilization. Deployed in our large-scale AI clusters, Stellar spins up virtual devices in seconds, reduces container initialization time by 15 times, and improves LLM training speed by up to 14%. Our evaluations demonstrate that Stellar significantly outperforms existing solutions, offering a scalable, stable, and high-performance RDMA network for cloud AI. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Access network, Cloud computing, Congestion control (communication), Containers, data center networking, Data center networkings, Language Model, Learning systems, Machine learning applications, Memory architecture, Network support, Network support for AI and machine learning application, network support for AI and machine learning applications, Performance, Program processors, Remote direct memory access, Stellars, Transport and congestion control, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Patel, P.; Goiri, Í.; Choukse, E.; Warrier, B.; Bianchini, R.; Zhang, C.; Mahalingam, N.
Characterizing Power Management Opportunities for LLMs in the Cloud Proceedings Article
In: Int Conf Archit Support Program Lang Oper Syst ASPLOS, pp. 207–222, Association for Computing Machinery, 2024, ISBN: 979-840070386-7 (ISBN).
Abstract | Links | BibTeX | Tags: Cloud, Cloud providers, Computational Linguistics, Computing power, Consumption patterns, Datacenter, datacenters, Electric power utilization, GPUs, Language Model, Large language model, large language models, Model inference, Power, Power management, Power oversubscription, Power usage, Profiling, Program processors, Virtual Reality
@inproceedings{patel_characterizing_2024,
title = {Characterizing Power Management Opportunities for LLMs in the Cloud},
author = {P. Patel and Í. Goiri and E. Choukse and B. Warrier and R. Bianchini and C. Zhang and N. Mahalingam},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85192199791&doi=10.1145%2f3620666.3651329&partnerID=40&md5=6102cbb096a789e297711420d4b8427a},
doi = {10.1145/3620666.3651329},
isbn = {979-840070386-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Int Conf Archit Support Program Lang Oper Syst ASPLOS},
volume = {3},
pages = {207–222},
publisher = {Association for Computing Machinery},
abstract = {Recent innovation in large language models (LLMs), and their myriad use cases have rapidly driven up the compute demand for datacenter GPUs. Several cloud providers and other enterprises plan to substantially grow their datacenter capacity to support these new workloads. A key bottleneck resource in datacenters is power, which LLMs are quickly saturating due to their rapidly increasing model sizes. We extensively characterize the power consumption patterns of a variety of LLMs and their configurations. We identify the differences between the training and inference power consumption patterns. Based on our analysis, we claim that the average and peak power utilization in LLM inference clusters should not be very high. Our deductions align with data from production LLM clusters, revealing that inference workloads offer substantial headroom for power oversubscription. However, the stringent set of telemetry and controls that GPUs offer in a virtualized environment make it challenging to build a reliable and robust power management framework. We leverage the insights from our characterization to identify opportunities for better power management. As a detailed use case, we propose a new framework called POLCA, which enables power oversubscription in LLM inference clouds. POLCA is robust, reliable, and readily deployable. Using open-source models to replicate the power patterns observed in production, we simulate POLCA and demonstrate that we can deploy 30% more servers in existing clusters with minimal performance loss. © 2024 Copyright held by the owner/author(s).},
keywords = {Cloud, Cloud providers, Computational Linguistics, Computing power, Consumption patterns, Datacenter, datacenters, Electric power utilization, GPUs, Language Model, Large language model, large language models, Model inference, Power, Power management, Power oversubscription, Power usage, Profiling, Program processors, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}