AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Zeng, S. -Y.; Liang, T. -Y.
PartConverter: A Part-Oriented Transformation Framework for Point Clouds Journal Article
In: IET Image Processing, vol. 19, no. 1, 2025, ISSN: 17519659 (ISSN); 17519667 (ISSN), (Publisher: John Wiley and Sons Inc).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3d-modeling, Adversarial networks, attention mechanism, Attention mechanisms, Auto encoders, Cloud transformations, Generative Adversarial Network, Part assembler, Part-oriented, Point cloud transformation, Point-clouds
@article{zeng_partconverter_2025,
title = {PartConverter: A Part-Oriented Transformation Framework for Point Clouds},
author = {S. -Y. Zeng and T. -Y. Liang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005775417&doi=10.1049%2Fipr2.70104&partnerID=40&md5=d1eccf7d6b58a93978c55e8f404be38b},
doi = {10.1049/ipr2.70104},
issn = {17519659 (ISSN); 17519667 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IET Image Processing},
volume = {19},
number = {1},
abstract = {With generative AI technologies advancing rapidly, the capabilities for 3D model generation and transformation are expanding across industries like manufacturing, healthcare, and virtual reality. However, existing methods based on generative adversarial networks (GANs), autoencoders, or transformers still have notable limitations. They primarily generate entire objects without providing flexibility for independent part transformation or precise control over model components. These constraints pose challenges for applications requiring complex object manipulation and fine-grained adjustments. To overcome these limitations, we propose PartConverter, a novel part-oriented point cloud transformation framework emphasizing flexibility and precision in 3D model transformations. PartConverter leverages attention mechanisms and autoencoders to capture crucial details within each part while modeling the relationships between components, thereby enabling highly customizable, part-wise transformations that maintain overall consistency. Additionally, our part assembler ensures that transformed parts align coherently, resulting in a consistent and realistic final 3D shape. This framework significantly enhances control over detailed part modeling, increasing the flexibility and efficiency of 3D model transformation workflows. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: John Wiley and Sons Inc},
keywords = {3D modeling, 3D models, 3d-modeling, Adversarial networks, attention mechanism, Attention mechanisms, Auto encoders, Cloud transformations, Generative Adversarial Network, Part assembler, Part-oriented, Point cloud transformation, Point-clouds},
pubstate = {published},
tppubtype = {article}
}
Salinas, C. S.; Magudia, K.; Sangal, A.; Ren, L.; Segars, W. P.
In-silico CT simulations of deep learning generated heterogeneous phantoms Journal Article
In: Biomedical Physics and Engineering Express, vol. 11, no. 4, 2025, ISSN: 20571976 (ISSN), (Publisher: Institute of Physics).
Abstract | Links | BibTeX | Tags: adult, algorithm, Algorithms, anatomical concepts, anatomical location, anatomical variation, Article, Biological organs, bladder, Bone, bone marrow, CGAN, colon, comparative study, computer assisted tomography, Computer graphics, computer model, Computer Simulation, Computer-Assisted, Computerized tomography, CT organ texture, CT organ textures, CT scanners, CT synthesis, CT-scan, Deep learning, fluorodeoxyglucose f 18, Generative Adversarial Network, Generative AI, histogram, human, human tissue, Humans, III-V semiconductors, image analysis, Image processing, Image segmentation, Image texture, Imaging, imaging phantom, intra-abdominal fat, kidney blood vessel, Learning systems, liver, lung, major clinical study, male, mean absolute error, Medical Imaging, neoplasm, Phantoms, procedures, prostate muscle, radiological parameters, signal noise ratio, Signal to noise ratio, Signal-To-Noise Ratio, simulation, Simulation platform, small intestine, Statistical tests, stomach, structural similarity index, subcutaneous fat, Textures, three dimensional double u net conditional generative adversarial network, Three-Dimensional, three-dimensional imaging, Tomography, Virtual CT scanner, Virtual Reality, Virtual trial, virtual trials, whole body CT, X-Ray Computed, x-ray computed tomography
@article{salinas_-silico_2025,
title = {In-silico CT simulations of deep learning generated heterogeneous phantoms},
author = {C. S. Salinas and K. Magudia and A. Sangal and L. Ren and W. P. Segars},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105010297226&doi=10.1088%2F2057-1976%2Fade9c9&partnerID=40&md5=47f211fd93f80e407dcd7e4c490976c2},
doi = {10.1088/2057-1976/ade9c9},
issn = {20571976 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Biomedical Physics and Engineering Express},
volume = {11},
number = {4},
abstract = {Current virtual imaging phantoms primarily emphasize geometric accuracy of anatomical structures. However, to enhance realism, it is also important to incorporate intra-organ detail. Because biological tissues are heterogeneous in composition, virtual phantoms should reflect this by including realistic intra-organ texture and material variation. We propose training two 3D Double U-Net conditional generative adversarial networks (3D DUC-GAN) to generate sixteen unique textures that encompass organs found within the torso. The model was trained on 378 CT image-segmentation pairs taken from a publicly available dataset with 18 additional pairs reserved for testing. Textured phantoms were generated and imaged using DukeSim, a virtual CT simulation platform. Results showed that the deep learning model was able to synthesize realistic heterogeneous phantoms from a set of homogeneous phantoms. These phantoms were compared with original CT scans and had a mean absolute difference of 46.15 ± 1.06 HU. The structural similarity index (SSIM) and peak signal-to-noise ratio (PSNR) were 0.86 ± 0.004 and 28.62 ± 0.14, respectively. The maximum mean discrepancy between the generated and actual distribution was 0.0016. These metrics marked an improvement of 27%, 5.9%, 6.2%, and 28% respectively, compared to current homogeneous texture methods. The generated phantoms that underwent a virtual CT scan had a closer visual resemblance to the true CT scan compared to the previous method. The resulting heterogeneous phantoms offer a significant step toward more realistic in silico trials, enabling enhanced simulation of imaging procedures with greater fidelity to true anatomical variation. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Institute of Physics},
keywords = {adult, algorithm, Algorithms, anatomical concepts, anatomical location, anatomical variation, Article, Biological organs, bladder, Bone, bone marrow, CGAN, colon, comparative study, computer assisted tomography, Computer graphics, computer model, Computer Simulation, Computer-Assisted, Computerized tomography, CT organ texture, CT organ textures, CT scanners, CT synthesis, CT-scan, Deep learning, fluorodeoxyglucose f 18, Generative Adversarial Network, Generative AI, histogram, human, human tissue, Humans, III-V semiconductors, image analysis, Image processing, Image segmentation, Image texture, Imaging, imaging phantom, intra-abdominal fat, kidney blood vessel, Learning systems, liver, lung, major clinical study, male, mean absolute error, Medical Imaging, neoplasm, Phantoms, procedures, prostate muscle, radiological parameters, signal noise ratio, Signal to noise ratio, Signal-To-Noise Ratio, simulation, Simulation platform, small intestine, Statistical tests, stomach, structural similarity index, subcutaneous fat, Textures, three dimensional double u net conditional generative adversarial network, Three-Dimensional, three-dimensional imaging, Tomography, Virtual CT scanner, Virtual Reality, Virtual trial, virtual trials, whole body CT, X-Ray Computed, x-ray computed tomography},
pubstate = {published},
tppubtype = {article}
}