\n \n \n
\n
\n \n 2025\n \n \n (14)\n \n \n
\n
\n \n \n
\n\n\n
\n
\n\n \n \n \n \n \n Leveraging large-scale pretrained vision foundation models for label-efficient 3d point cloud segmentation.\n \n \n \n\n\n \n Dong, S.; Liu, F.; Yao, R.; and Lin, G.\n\n\n \n\n\n\n In
International Conference on Image and Graphics, pages 304–315, 2025. Springer Nature Singapore Singapore\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{dong2025leveraging,\n title={Leveraging large-scale pretrained vision foundation models for label-efficient 3d point cloud segmentation},\n author={Dong, Shichao and Liu, Fayao and Yao, Rui and Lin, Guosheng},\n booktitle={International Conference on Image and Graphics},\n pages={304--315},\n year={2025},\n organization={Springer Nature Singapore Singapore}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Weakly Supervised Segmentation on Outdoor 4D Point Clouds With Progressive 4D Grouping.\n \n \n \n\n\n \n Shi, H.; Liu, F.; Wu, Z.; Xu, Y.; and Lin, G.\n\n\n \n\n\n\n
IEEE Transactions on Pattern Analysis and Machine Intelligence. 2025.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{shi2025weakly,\n title={Weakly Supervised Segmentation on Outdoor 4D Point Clouds With Progressive 4D Grouping},\n author={Shi, Hanyu and Liu, Fayao and Wu, Zhonghua and Xu, Yi and Lin, Guosheng},\n journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},\n year={2025},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Robust-PIFu: Robust Pixel-aligned Implicit Function for 3D Human Digitalization from a Single Image.\n \n \n \n\n\n \n Chan, K.; Liu, F.; Lin, G.; Foo, C.; and Lin, W.\n\n\n \n\n\n\n In
The Thirteenth International Conference on Learning Representations, 2025. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{chan2025robust,\n title={Robust-PIFu: Robust Pixel-aligned Implicit Function for 3D Human Digitalization from a Single Image},\n author={Chan, Kennard and Liu, Fayao and Lin, Guosheng and Foo, Chuan-Sheng and Lin, Weisi},\n booktitle={The Thirteenth International Conference on Learning Representations},\n year={2025}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Text-to-image rectified flow as plug-and-play priors.\n \n \n \n\n\n \n Yang, X.; Chen, C.; Yang, X.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n In
The Thirteenth International Conference on Learning Representations, 2025. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{yang2025text,\n title={Text-to-image rectified flow as plug-and-play priors},\n author={Yang, Xiaofeng and Chen, Cheng and Yang, Xulei and Liu, Fayao and Lin, Guosheng},\n booktitle={The Thirteenth International Conference on Learning Representations},\n year={2025}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Cadcrafter: Generating computer-aided design models from unconstrained images.\n \n \n \n\n\n \n Chen, C.; Wei, J.; Chen, T.; Zhang, C.; Yang, X.; Zhang, S.; Yang, B.; Foo, C.; Lin, G.; Huang, Q.; and others\n\n\n \n\n\n\n In
Proceedings of the Computer Vision and Pattern Recognition Conference, pages 11073–11082, 2025. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{chen2025cadcrafter,\n title={Cadcrafter: Generating computer-aided design models from unconstrained images},\n author={Chen, Cheng and Wei, Jiacheng and Chen, Tianrun and Zhang, Chi and Yang, Xiaofeng and Zhang, Shangzhan and Yang, Bingchen and Foo, Chuan-Sheng and Lin, Guosheng and Huang, Qixing and others},\n booktitle={Proceedings of the Computer Vision and Pattern Recognition Conference},\n pages={11073--11082},\n year={2025}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n OccLE: Label-Efficient 3D Semantic Occupancy Prediction.\n \n \n \n\n\n \n Fang, N.; Zhou, Z.; Liu, F.; Yang, X.; Wei, J.; Qiu, L.; and Lin, G.\n\n\n \n\n\n\n
arXiv preprint arXiv:2505.20617. 2025.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{fang2025occle,\n title={OccLE: Label-Efficient 3D Semantic Occupancy Prediction},\n author={Fang, Naiyu and Zhou, Zheyuan and Liu, Fayao and Yang, Xulei and Wei, Jiacheng and Qiu, Lemiao and Lin, Guosheng},\n journal={arXiv preprint arXiv:2505.20617},\n year={2025}\n}\n\n\n
\n\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n CCFL: Customized Client Federated Learning for Unsupervised Person Re-identification.\n \n \n \n\n\n \n Zheng, Y.; Zhou, Y.; Liu, F.; Zhao, J.; Zhu, H.; and Du, W.\n\n\n \n\n\n\n
ACM Transactions on Multimedia Computing, Communications and Applications. 2025.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{zheng2025ccfl,\n title={CCFL: Customized Client Federated Learning for Unsupervised Person Re-identification},\n author={Zheng, Yi and Zhou, Yong and Liu, Fayao and Zhao, Jiaqi and Zhu, Hancheng and Du, Wenliang},\n journal={ACM Transactions on Multimedia Computing, Communications and Applications},\n year={2025},\n publisher={ACM New York, NY}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Magicarticulate: Make your 3d models articulation-ready.\n \n \n \n\n\n \n Song, C.; Zhang, J.; Li, X.; Yang, F.; Chen, Y.; Xu, Z.; Liew, J. H.; Guo, X.; Liu, F.; Feng, J.; and others\n\n\n \n\n\n\n In
Proceedings of the Computer Vision and Pattern Recognition Conference, pages 15998–16007, 2025. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{song2025magicarticulate,\n title={Magicarticulate: Make your 3d models articulation-ready},\n author={Song, Chaoyue and Zhang, Jianfeng and Li, Xiu and Yang, Fan and Chen, Yiwen and Xu, Zhongcong and Liew, Jun Hao and Guo, Xiaoyang and Liu, Fayao and Feng, Jiashi and others},\n booktitle={Proceedings of the Computer Vision and Pattern Recognition Conference},\n pages={15998--16007},\n year={2025}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Exploring Active Learning for Label-Efficient Training of Semantic Neural Radiance Field.\n \n \n \n\n\n \n Zhu, Y.; Cai, L.; Lu, K.; Liu, F.; and Yang, X.\n\n\n \n\n\n\n
arXiv preprint arXiv:2507.17351. 2025.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{zhu2025exploring,\n title={Exploring Active Learning for Label-Efficient Training of Semantic Neural Radiance Field},\n author={Zhu, Yuzhe and Cai, Lile and Lu, Kangkang and Liu, Fayao and Yang, Xulei},\n journal={arXiv preprint arXiv:2507.17351},\n year={2025}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Puppeteer: Rig and animate your 3d models.\n \n \n \n\n\n \n Song, C.; Li, X.; Yang, F.; Xu, Z.; Wei, J.; Liu, F.; Feng, J.; Lin, G.; and Zhang, J.\n\n\n \n\n\n\n
arXiv preprint arXiv:2508.10898. 2025.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{song2025puppeteer,\n title={Puppeteer: Rig and animate your 3d models},\n author={Song, Chaoyue and Li, Xiu and Yang, Fan and Xu, Zhongcong and Wei, Jiacheng and Liu, Fayao and Feng, Jiashi and Lin, Guosheng and Zhang, Jianfeng},\n journal={arXiv preprint arXiv:2508.10898},\n year={2025}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Temporal and heterogeneous graph neural network for remaining useful life prediction.\n \n \n \n\n\n \n Wen, Z.; Fang, Y.; Wei, P.; Liu, F.; Chen, Z.; and Wu, M.\n\n\n \n\n\n\n
IEEE Transactions on Neural Networks and Learning Systems. 2025.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{wen2025temporal,\n title={Temporal and heterogeneous graph neural network for remaining useful life prediction},\n author={Wen, Zhihao and Fang, Yuan and Wei, Pengcheng and Liu, Fayao and Chen, Zhenghua and Wu, Min},\n journal={IEEE Transactions on Neural Networks and Learning Systems},\n year={2025},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n FIND: Few-Shot Anomaly Inspection with Normal-Only Multi-Modal Data.\n \n \n \n\n\n \n Li, Y.; Liu, F.; Liao, J.; Tian, S.; Foo, C.; and Yang, X.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23290–23299, 2025. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{li2025find,\n title={FIND: Few-Shot Anomaly Inspection with Normal-Only Multi-Modal Data},\n author={Li, Yiting and Liu, Fayao and Liao, Jingyi and Tian, Sichao and Foo, Chuan-Sheng and Yang, Xulei},\n booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},\n pages={23290--23299},\n year={2025}\n}\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2024\n \n \n (20)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n LCReg: Long-tailed image classification with latent categories based recognition.\n \n \n \n\n\n \n Liu, W.; Wu, Z.; Wang, Y.; Ding, H.; Liu, F.; Lin, J.; and Lin, G.\n\n\n \n\n\n\n
Pattern Recognition, 145: 109971. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2024lcreg,\n title={LCReg: Long-tailed image classification with latent categories based recognition},\n author={Liu, Weide and Wu, Zhonghua and Wang, Yiming and Ding, Henghui and Liu, Fayao and Lin, Jie and Lin, Guosheng},\n journal={Pattern Recognition},\n volume={145},\n pages={109971},\n year={2024},\n publisher={Pergamon}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Promptad: Zero-shot anomaly detection using text prompts.\n \n \n \n\n\n \n Li, Y.; Goodge, A.; Liu, F.; and Foo, C.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1093–1102, 2024. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{li2024promptad,\n title={Promptad: Zero-shot anomaly detection using text prompts},\n author={Li, Yiting and Goodge, Adam and Liu, Fayao and Foo, Chuan-Sheng},\n booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision},\n pages={1093--1102},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Learn to optimize denoising scores for 3d generation: A unified and improved diffusion prior on nerf and 3d gaussian splatting.\n \n \n \n\n\n \n Yang, X.; Chen, Y.; Chen, C.; Zhang, C.; Xu, Y.; Yang, X.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
ECCV 2024. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{yang2024learn,\n title={Learn to optimize denoising scores for 3d generation: A unified and improved diffusion prior on nerf and 3d gaussian splatting},\n author={Yang, Xiaofeng and Chen, Yiwen and Chen, Cheng and Zhang, Chi and Xu, Yi and Yang, Xulei and Liu, Fayao and Lin, Guosheng},\n journal={ECCV 2024},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Fine Structure-Aware Sampling: A New Sampling Training Scheme for Pixel-Aligned Implicit Models in Single-View Human Reconstruction.\n \n \n \n\n\n \n Yanting Chan, K.; Liu, F.; Lin, G.; Foo, C. S.; and Lin, W.\n\n\n \n\n\n\n
AAAI 2024. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{yanting2024fine,\n title={Fine Structure-Aware Sampling: A New Sampling Training Scheme for Pixel-Aligned Implicit Models in Single-View Human Reconstruction},\n author={Yanting Chan, Kennard and Liu, Fayao and Lin, Guosheng and Foo, Chuan Sheng and Lin, Weisi},\n journal={AAAI 2024},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Rethinking Few-shot 3D Point Cloud Semantic Segmentation.\n \n \n \n\n\n \n An, Z.; Sun, G.; Liu, Y.; Liu, F.; Wu, Z.; Wang, D.; Van Gool, L.; and Belongie, S.\n\n\n \n\n\n\n
CVPR 2024. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{an2024rethinking,\n title={Rethinking Few-shot 3D Point Cloud Semantic Segmentation},\n author={An, Zhaochong and Sun, Guolei and Liu, Yun and Liu, Fayao and Wu, Zongwei and Wang, Dan and Van Gool, Luc and Belongie, Serge},\n journal={CVPR 2024},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Sculpt3D: Multi-View Consistent Text-to-3D Generation with Sparse 3D Prior.\n \n \n \n\n\n \n Chen, C.; Yang, X.; Yang, F.; Feng, C.; Fu, Z.; Foo, C.; Lin, G.; and Liu, F.\n\n\n \n\n\n\n
CVPR 2024. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{chen2024sculpt3d,\n title={Sculpt3D: Multi-View Consistent Text-to-3D Generation with Sparse 3D Prior},\n author={Chen, Cheng and Yang, Xiaofeng and Yang, Fan and Feng, Chengzeng and Fu, Zhoujie and Foo, Chuan-Sheng and Lin, Guosheng and Liu, Fayao},\n journal={CVPR 2024},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Training neural networks with classification rules for incorporating domain knowledge.\n \n \n \n\n\n \n Zhang, W.; Liu, F.; Nguyen, C. M.; Yang, Z. L. O.; Ramasamy, S.; and Foo, C.\n\n\n \n\n\n\n
Knowledge-Based Systems, 294: 111716. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{zhang2024training,\n title={Training neural networks with classification rules for incorporating domain knowledge},\n author={Zhang, Wenyu and Liu, Fayao and Nguyen, Cuong Manh and Yang, Zhong Liang Ou and Ramasamy, Savitha and Foo, Chuan-Sheng},\n journal={Knowledge-Based Systems},\n volume={294},\n pages={111716},\n year={2024},\n publisher={Elsevier}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Mining Semantic Correlations Between Mispredictions and Corrections for Interactive Semantic Segmentation.\n \n \n \n\n\n \n Gao, Y.; Lang, C.; Liu, F.; Foo, C.; Cao, Y.; Sun, L.; and Wei, Y.\n\n\n \n\n\n\n
IEEE transactions on neural networks and learning systems, 36(3): 5230–5243. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{gao2024mining,\n title={Mining Semantic Correlations Between Mispredictions and Corrections for Interactive Semantic Segmentation},\n author={Gao, Yutong and Lang, Congyan and Liu, Fayao and Foo, Chuan-Sheng and Cao, Yuanzhouhan and Sun, Lijuan and Wei, Yunchao},\n journal={IEEE transactions on neural networks and learning systems},\n volume={36},\n number={3},\n pages={5230--5243},\n year={2024},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n REACTO: Reconstructing Articulated Objects from a Single Video.\n \n \n \n\n\n \n Song, C.; Wei, J.; Foo, C.; Lin, G.; and Liu, F.\n\n\n \n\n\n\n
CVPR 2024. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{song2024reacto,\n title={REACTO: Reconstructing Articulated Objects from a Single Video},\n author={Song, Chaoyue and Wei, Jiacheng and Foo, Chuan-Sheng and Lin, Guosheng and Liu, Fayao},\n journal={CVPR 2024},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Multi-level self attention for unsupervised learning person re-identification.\n \n \n \n\n\n \n Zheng, Y.; Zhao, J.; Zhou, Y.; Liu, F.; Yao, R.; Zhu, H.; and El Saddik, A.\n\n\n \n\n\n\n
Multimedia Tools and Applications, 83(26): 68855–68874. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{zheng2024multi,\n title={Multi-level self attention for unsupervised learning person re-identification},\n author={Zheng, Yi and Zhao, Jiaqi and Zhou, Yong and Liu, Fayao and Yao, Rui and Zhu, Hancheng and El Saddik, Abdulmotaleb},\n journal={Multimedia Tools and Applications},\n volume={83},\n number={26},\n pages={68855--68874},\n year={2024},\n publisher={Springer US New York}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Sync4d: Video guided controllable dynamics for physics-based 4d generation.\n \n \n \n\n\n \n Fu, Z.; Wei, J.; Shen, W.; Song, C.; Yang, X.; Liu, F.; Yang, X.; and Lin, G.\n\n\n \n\n\n\n
arXiv preprint arXiv:2405.16849. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{fu2024sync4d,\n title={Sync4d: Video guided controllable dynamics for physics-based 4d generation},\n author={Fu, Zhoujie and Wei, Jiacheng and Shen, Wenhao and Song, Chaoyue and Yang, Xiaofeng and Liu, Fayao and Yang, Xulei and Lin, Guosheng},\n journal={arXiv preprint arXiv:2405.16849},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n R-cyclic diffuser: Reductive and cyclic latent diffusion for 3d clothed human digitalization.\n \n \n \n\n\n \n Chan, K. Y.; Liu, F.; Lin, G.; Foo, C. S.; and Lin, W.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10304–10313, 2024. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{chan2024r,\n title={R-cyclic diffuser: Reductive and cyclic latent diffusion for 3d clothed human digitalization},\n author={Chan, Kennard Yanting and Liu, Fayao and Lin, Guosheng and Foo, Chuan Sheng and Lin, Weisi},\n booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n pages={10304--10313},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Learning temporal variations for 4D point cloud segmentation.\n \n \n \n\n\n \n Shi, H.; Wei, J.; Wang, H.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
International Journal of Computer Vision, 132(12): 5603–5617. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{shi2024learning,\n title={Learning temporal variations for 4D point cloud segmentation},\n author={Shi, Hanyu and Wei, Jiacheng and Wang, Hao and Liu, Fayao and Lin, Guosheng},\n journal={International Journal of Computer Vision},\n volume={132},\n number={12},\n pages={5603--5617},\n year={2024},\n publisher={Springer US New York}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Gaussian Mixture based Evidential Learning for Stereo Matching.\n \n \n \n\n\n \n Liu, W.; Wang, X.; Wang, L.; Cheng, J.; Liu, F.; and Yang, X.\n\n\n \n\n\n\n
arXiv preprint arXiv:2408.02796. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2024gaussian,\n title={Gaussian Mixture based Evidential Learning for Stereo Matching},\n author={Liu, Weide and Wang, Xingxing and Wang, Lu and Cheng, Jun and Liu, Fayao and Yang, Xulei},\n journal={arXiv preprint arXiv:2408.02796},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Diverse and stable 2D diffusion guided text to 3D generation with noise recalibration.\n \n \n \n\n\n \n Yang, X.; Liu, F.; Xu, Y.; Su, H.; Wu, Q.; and Lin, G.\n\n\n \n\n\n\n In
Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 6549–6557, 2024. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{yang2024diverse,\n title={Diverse and stable 2D diffusion guided text to 3D generation with noise recalibration},\n author={Yang, Xiaofeng and Liu, Fayao and Xu, Yi and Su, Hanjing and Wu, Qingyao and Lin, Guosheng},\n booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},\n volume={38},\n number={7},\n pages={6549--6557},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n 3DFG-PIFu: 3D Feature Grids for Human Digitization from Sparse Views.\n \n \n \n\n\n \n Chan, K. Y.; Liu, F.; Lin, G.; Foo, C. S.; and Lin, W.\n\n\n \n\n\n\n In
European Conference on Computer Vision, pages 38–54, 2024. Springer Nature Switzerland Cham\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{chan20243dfg,\n title={3DFG-PIFu: 3D Feature Grids for Human Digitization from Sparse Views},\n author={Chan, Kennard Yanting and Liu, Fayao and Lin, Guosheng and Foo, Chuan Sheng and Lin, Weisi},\n booktitle={European Conference on Computer Vision},\n pages={38--54},\n year={2024},\n organization={Springer Nature Switzerland Cham}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Few-shot image generation via style adaptation and content preservation.\n \n \n \n\n\n \n He, X.; Yang, F.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
IEEE Transactions on Neural Networks and Learning Systems. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{he2024few,\n title={Few-shot image generation via style adaptation and content preservation},\n author={He, Xiaosheng and Yang, Fan and Liu, Fayao and Lin, Guosheng},\n journal={IEEE Transactions on Neural Networks and Learning Systems},\n year={2024},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Neural Radiance Selector: Find the best 2D representations of 3D data for CLIP based 3D tasks.\n \n \n \n\n\n \n Yang, X.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
Knowledge-Based Systems, 299: 112002. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{yang2024neural,\n title={Neural Radiance Selector: Find the best 2D representations of 3D data for CLIP based 3D tasks},\n author={Yang, Xiaofeng and Liu, Fayao and Lin, Guosheng},\n journal={Knowledge-Based Systems},\n volume={299},\n pages={112002},\n year={2024},\n publisher={Elsevier}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Learn to Optimize Denoising Scores: A Unified and Improved Diffusion Prior for 3D Generation.\n \n \n \n\n\n \n Yang, X.; Chen, Y.; Chen, C.; Zhang, C.; Xu, Y.; Yang, X.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n In
European Conference on Computer Vision, pages 136–152, 2024. Springer Nature Switzerland Cham\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{yang2024learn,\n title={Learn to Optimize Denoising Scores: A Unified and Improved Diffusion Prior for 3D Generation},\n author={Yang, Xiaofeng and Chen, Yiwen and Chen, Cheng and Zhang, Chi and Xu, Yi and Yang, Xulei and Liu, Fayao and Lin, Guosheng},\n booktitle={European Conference on Computer Vision},\n pages={136--152},\n year={2024},\n organization={Springer Nature Switzerland Cham}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Prim2room: Layout-controllable room mesh generation from primitives.\n \n \n \n\n\n \n Feng, C.; Wei, J.; Chen, C.; Li, Y.; Ji, P.; Liu, F.; Li, H.; and Lin, G.\n\n\n \n\n\n\n
arXiv preprint arXiv:2409.05380. 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{feng2024prim2room,\n title={Prim2room: Layout-controllable room mesh generation from primitives},\n author={Feng, Chengzeng and Wei, Jiacheng and Chen, Cheng and Li, Yang and Ji, Pan and Liu, Fayao and Li, Hongdong and Lin, Guosheng},\n journal={arXiv preprint arXiv:2409.05380},\n year={2024}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2023\n \n \n (10)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Dense supervision propagation for weakly supervised semantic segmentation on 3d point clouds.\n \n \n \n\n\n \n Wei, J.; Lin, G.; Yap, K.; Liu, F.; and Hung, T.\n\n\n \n\n\n\n
IEEE Transactions on Circuits and Systems for Video Technology, 34(6): 4367–4377. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{wei2023dense,\n title={Dense supervision propagation for weakly supervised semantic segmentation on 3d point clouds},\n author={Wei, Jiacheng and Lin, Guosheng and Yap, Kim-Hui and Liu, Fayao and Hung, Tzu-Yi},\n journal={IEEE Transactions on Circuits and Systems for Video Technology},\n volume={34},\n number={6},\n pages={4367--4377},\n year={2023},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Self-training vision language BERTs with a unified conditional model.\n \n \n \n\n\n \n Yang, X.; Lv, F.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
IEEE Transactions on Circuits and Systems for Video Technology, 33(8): 3560–3569. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{yang2023self,\n title={Self-training vision language BERTs with a unified conditional model},\n author={Yang, Xiaofeng and Lv, Fengmao and Liu, Fayao and Lin, Guosheng},\n journal={IEEE Transactions on Circuits and Systems for Video Technology},\n volume={33},\n number={8},\n pages={3560--3569},\n year={2023},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Unsupervised 3d pose transfer with cross consistency and dual reconstruction.\n \n \n \n\n\n \n Song, C.; Wei, J.; Li, R.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(8): 10488–10499. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{song2023unsupervised,\n title={Unsupervised 3d pose transfer with cross consistency and dual reconstruction},\n author={Song, Chaoyue and Wei, Jiacheng and Li, Ruibo and Liu, Fayao and Lin, Guosheng},\n journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},\n volume={45},\n number={8},\n pages={10488--10499},\n year={2023},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Effective end-to-end vision language pretraining with semantic visual loss.\n \n \n \n\n\n \n Yang, X.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
IEEE Transactions on Multimedia, 25: 8408–8417. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{yang2023effective,\n title={Effective end-to-end vision language pretraining with semantic visual loss},\n author={Yang, Xiaofeng and Liu, Fayao and Lin, Guosheng},\n journal={IEEE Transactions on Multimedia},\n volume={25},\n pages={8408--8417},\n year={2023},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Temporal feature matching and propagation for semantic segmentation on 3D point cloud sequences.\n \n \n \n\n\n \n Shi, H.; Li, R.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
IEEE Transactions on Circuits and Systems for Video Technology, 33(12): 7491–7502. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{shi2023temporal,\n title={Temporal feature matching and propagation for semantic segmentation on 3D point cloud sequences},\n author={Shi, Hanyu and Li, Ruibo and Liu, Fayao and Lin, Guosheng},\n journal={IEEE Transactions on Circuits and Systems for Video Technology},\n volume={33},\n number={12},\n pages={7491--7502},\n year={2023},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Integrating topology beyond descriptions for zero-shot learning.\n \n \n \n\n\n \n Chen, Z.; Gao, Y.; Lang, C.; Wei, L.; Li, Y.; Liu, H.; and Liu, F.\n\n\n \n\n\n\n
Pattern Recognition, 143: 109738. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{chen2023integrating,\n title={Integrating topology beyond descriptions for zero-shot learning},\n author={Chen, Ziyi and Gao, Yutong and Lang, Congyan and Wei, Lili and Li, Yidong and Liu, Hongzhe and Liu, Fayao},\n journal={Pattern Recognition},\n volume={143},\n pages={109738},\n year={2023},\n publisher={Pergamon}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Dynamic interaction dilation for interactive human parsing.\n \n \n \n\n\n \n Gao, Y.; Lang, C.; Liu, F.; Cao, Y.; Sun, L.; and Wei, Y.\n\n\n \n\n\n\n
IEEE Transactions on Multimedia, 26: 178–189. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{gao2023dynamic,\n title={Dynamic interaction dilation for interactive human parsing},\n author={Gao, Yutong and Lang, Congyan and Liu, Fayao and Cao, Yuanzhouhan and Sun, Lijuan and Wei, Yunchao},\n journal={IEEE Transactions on Multimedia},\n volume={26},\n pages={178--189},\n year={2023},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Neural logic vision language explainer.\n \n \n \n\n\n \n Yang, X.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
IEEE Transactions on Multimedia, 26: 3331–3340. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{yang2023neural,\n title={Neural logic vision language explainer},\n author={Yang, Xiaofeng and Liu, Fayao and Lin, Guosheng},\n journal={IEEE Transactions on Multimedia},\n volume={26},\n pages={3331--3340},\n year={2023},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Collaborative propagation on multiple instance graphs for 3d instance segmentation with single-point supervision.\n \n \n \n\n\n \n Dong, S.; Li, R.; Wei, J.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16665–16674, 2023. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{dong2023collaborative,\n title={Collaborative propagation on multiple instance graphs for 3d instance segmentation with single-point supervision},\n author={Dong, Shichao and Li, Ruibo and Wei, Jiacheng and Liu, Fayao and Lin, Guosheng},\n booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},\n pages={16665--16674},\n year={2023}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Elfnet: Evidential local-global fusion for stereo matching.\n \n \n \n\n\n \n Lou, J.; Liu, W.; Chen, Z.; Liu, F.; and Cheng, J.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17784–17793, 2023. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{lou2023elfnet,\n title={Elfnet: Evidential local-global fusion for stereo matching},\n author={Lou, Jieming and Liu, Weide and Chen, Zhuo and Liu, Fayao and Cheng, Jun},\n booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},\n pages={17784--17793},\n year={2023}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2022\n \n \n (8)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Feature flow: In-network feature flow estimation for video object detection.\n \n \n \n\n\n \n Jin, R.; Lin, G.; Wen, C.; Wang, J.; and Liu, F.\n\n\n \n\n\n\n
Pattern Recognition, 122: 108323. 2022.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{jin2022feature,\n title={Feature flow: In-network feature flow estimation for video object detection},\n author={Jin, Ruibing and Lin, Guosheng and Wen, Changyun and Wang, Jianliang and Liu, Fayao},\n journal={Pattern Recognition},\n volume={122},\n pages={108323},\n year={2022},\n publisher={Pergamon}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n On representation knowledge distillation for graph neural networks.\n \n \n \n\n\n \n Joshi, C. K; Liu, F.; Xun, X.; Lin, J.; and Foo, C. S.\n\n\n \n\n\n\n
IEEE transactions on neural networks and learning systems, 35(4): 4656–4667. 2022.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{joshi2022representation,\n title={On representation knowledge distillation for graph neural networks},\n author={Joshi, Chaitanya K and Liu, Fayao and Xun, Xu and Lin, Jie and Foo, Chuan Sheng},\n journal={IEEE transactions on neural networks and learning systems},\n volume={35},\n number={4},\n pages={4656--4667},\n year={2022},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Long-tailed recognition by learning from latent categories.\n \n \n \n\n\n \n Liu, W.; Wu, Z.; Wang, Y.; Ding, H.; Liu, F.; Lin, J.; and Lin, G.\n\n\n \n\n\n\n
arXiv preprint arXiv:2206.01010. 2022.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2022long,\n title={Long-tailed recognition by learning from latent categories},\n author={Liu, Weide and Wu, Zhonghua and Wang, Yiming and Ding, Henghui and Liu, Fayao and Lin, Jie and Lin, Guosheng},\n journal={arXiv preprint arXiv:2206.01010},\n year={2022}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Depth and video segmentation based visual attention for embodied question answering.\n \n \n \n\n\n \n Luo, H.; Lin, G.; Yao, Y.; Liu, F.; Liu, Z.; and Tang, Z.\n\n\n \n\n\n\n
IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(6): 6807–6819. 2022.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{luo2022depth,\n title={Depth and video segmentation based visual attention for embodied question answering},\n author={Luo, Haonan and Lin, Guosheng and Yao, Yazhou and Liu, Fayao and Liu, Zichuan and Tang, Zhenmin},\n journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},\n volume={45},\n number={6},\n pages={6807--6819},\n year={2022},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Weakly supervised segmentation on outdoor 4d point clouds with temporal matching and spatial graph propagation.\n \n \n \n\n\n \n Shi, H.; Wei, J.; Li, R.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11840–11849, 2022. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{shi2022weakly,\n title={Weakly supervised segmentation on outdoor 4d point clouds with temporal matching and spatial graph propagation},\n author={Shi, Hanyu and Wei, Jiacheng and Li, Ruibo and Liu, Fayao and Lin, Guosheng},\n booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},\n pages={11840--11849},\n year={2022}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Rwseg: Cross-graph competing random walks for weakly supervised 3D instance segmentation.\n \n \n \n\n\n \n Dong, S.; Li, R.; Wei, J.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
arXiv preprint arXiv:2208.05110, 1(2). 2022.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{dong2022rwseg,\n title={Rwseg: Cross-graph competing random walks for weakly supervised 3D instance segmentation},\n author={Dong, Shichao and Li, Ruibo and Wei, Jiacheng and Liu, Fayao and Lin, Guosheng},\n journal={arXiv preprint arXiv:2208.05110},\n volume={1},\n number={2},\n year={2022}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Crcnet: Few-shot segmentation with cross-reference and region–global conditional networks.\n \n \n \n\n\n \n Liu, W.; Zhang, C.; Lin, G.; and Liu, F.\n\n\n \n\n\n\n
International Journal of Computer Vision, 130(12): 3140–3157. 2022.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2022crcnet,\n title={Crcnet: Few-shot segmentation with cross-reference and region--global conditional networks},\n author={Liu, Weide and Zhang, Chi and Lin, Guosheng and Liu, Fayao},\n journal={International Journal of Computer Vision},\n volume={130},\n number={12},\n pages={3140--3157},\n year={2022},\n publisher={Springer US New York}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Point discriminative learning for data-efficient 3d point cloud analysis.\n \n \n \n\n\n \n Liu, F.; Lin, G.; Foo, C.; Joshi, C. K; and Lin, J.\n\n\n \n\n\n\n In
2022 International Conference on 3D Vision (3DV), pages 42–51, 2022. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{liu2022point,\n title={Point discriminative learning for data-efficient 3d point cloud analysis},\n author={Liu, Fayao and Lin, Guosheng and Foo, Chuan-Sheng and Joshi, Chaitanya K and Lin, Jie},\n booktitle={2022 International Conference on 3D Vision (3DV)},\n pages={42--51},\n year={2022},\n organization={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2021\n \n \n (4)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Hcrf-flow: Scene flow from point clouds with continuous high-order crfs and position-aware flow embedding.\n \n \n \n\n\n \n Li, R.; Lin, G.; He, T.; Liu, F.; and Shen, C.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 364–373, 2021. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{li2021hcrf,\n title={Hcrf-flow: Scene flow from point clouds with continuous high-order crfs and position-aware flow embedding},\n author={Li, Ruibo and Lin, Guosheng and He, Tong and Liu, Fayao and Shen, Chunhua},\n booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n pages={364--373},\n year={2021}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Few-shot segmentation with global and local contrastive learning.\n \n \n \n\n\n \n Liu, W.; Wu, Z.; Ding, H.; Liu, F.; Lin, J.; Lin, G.; and Zhou, W.\n\n\n \n\n\n\n
arXiv preprint arXiv:2108.05293. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2021few,\n title={Few-shot segmentation with global and local contrastive learning},\n author={Liu, Weide and Wu, Zhonghua and Ding, Henghui and Liu, Fayao and Lin, Jie and Lin, Guosheng and Zhou, Wei},\n journal={arXiv preprint arXiv:2108.05293},\n year={2021}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n 3d pose transfer with correspondence learning and mesh refinement.\n \n \n \n\n\n \n Song, C.; Wei, J.; Li, R.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n
Advances in Neural Information Processing Systems, 34: 3108–3120. 2021.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{song20213d,\n title={3d pose transfer with correspondence learning and mesh refinement},\n author={Song, Chaoyue and Wei, Jiacheng and Li, Ruibo and Liu, Fayao and Lin, Guosheng},\n journal={Advances in Neural Information Processing Systems},\n volume={34},\n pages={3108--3120},\n year={2021}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n On Automatic Data Augmentation for 3D Point Cloud Classification.\n \n \n \n\n\n \n Zhang, W.; Xu, X.; Liu, F.; Zhang, L.; and Foo, C.\n\n\n \n\n\n\n In
BMVC 2021, 2021. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{zhang2021automatic,\n title={On Automatic Data Augmentation for 3D Point Cloud Classification},\n author={Zhang, Wanyue and Xu, Xun and Liu, Fayao and Zhang, Le and Foo, Chuan-Sheng},\n booktitle={BMVC 2021},\n year={2021}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2020\n \n \n (2)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Crnet: Cross-reference networks for few-shot segmentation.\n \n \n \n\n\n \n Liu, W.; Zhang, C.; Lin, G.; and Liu, F.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4165–4173, 2020. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{liu2020crnet,\n title={Crnet: Cross-reference networks for few-shot segmentation},\n author={Liu, Weide and Zhang, Chi and Lin, Guosheng and Liu, Fayao},\n booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},\n pages={4165--4173},\n year={2020}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n TRRNet: Tiered Relation Reasoning for Compositional Visual Question Answering.\n \n \n \n\n\n \n Yang, X.; Lin, G.; Lv, F.; and Liu, F.\n\n\n \n\n\n\n
ECCV 2020. 2020.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{yang2020trrnet,\n title={TRRNet: Tiered Relation Reasoning for Compositional Visual Question Answering},\n author={Yang, Xiaofeng and Lin, Guosheng and Lv, Fengmao and Liu, Fayao},\n journal={ECCV 2020},\n year={2020}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2019\n \n \n (6)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Local fusion networks with chained residual pooling for video action recognition.\n \n \n \n\n\n \n He, F.; Liu, F.; Yao, R.; and Lin, G.\n\n\n \n\n\n\n
Image and Vision Computing, 81: 34–41. 2019.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{he2019local,\n title={Local fusion networks with chained residual pooling for video action recognition},\n author={He, Feixiang and Liu, Fayao and Yao, Rui and Lin, Guosheng},\n journal={Image and Vision Computing},\n volume={81},\n pages={34--41},\n year={2019},\n publisher={Elsevier}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Canet: Class-agnostic segmentation networks with iterative refinement and attentive few-shot learning.\n \n \n \n\n\n \n Zhang, C.; Lin, G.; Liu, F.; Yao, R.; and Shen, C.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5217–5226, 2019. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{zhang2019canet,\n title={Canet: Class-agnostic segmentation networks with iterative refinement and attentive few-shot learning},\n author={Zhang, Chi and Lin, Guosheng and Liu, Fayao and Yao, Rui and Shen, Chunhua},\n booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},\n pages={5217--5226},\n year={2019}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Towards robust curve text detection with conditional spatial expansion.\n \n \n \n\n\n \n Liu, Z.; Lin, G.; Yang, S.; Liu, F.; Lin, W.; and Goh, W. L.\n\n\n \n\n\n\n In
proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7269–7278, 2019. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{liu2019towards,\n title={Towards robust curve text detection with conditional spatial expansion},\n author={Liu, Zichuan and Lin, Guosheng and Yang, Sheng and Liu, Fayao and Lin, Weisi and Goh, Wang Ling},\n booktitle={proceedings of the IEEE/CVF conference on computer vision and pattern recognition},\n pages={7269--7278},\n year={2019}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Segeqa: Video segmentation based visual attention for embodied question answering.\n \n \n \n\n\n \n Luo, H.; Lin, G.; Liu, Z.; Liu, F.; Tang, Z.; and Yao, Y.\n\n\n \n\n\n\n In
2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 9666–9675, 2019. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{luo2019segeqa,\n title={Segeqa: Video segmentation based visual attention for embodied question answering},\n author={Luo, Haonan and Lin, Guosheng and Liu, Zichuan and Liu, Fayao and Tang, Zhenmin and Yao, Yazhou},\n booktitle={2019 IEEE/CVF International Conference on Computer Vision (ICCV)},\n pages={9666--9675},\n year={2019},\n organization={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Pyramid graph networks with connection attentions for region-based one-shot semantic segmentation.\n \n \n \n\n\n \n Zhang, C.; Lin, G.; Liu, F.; Guo, J.; Wu, Q.; and Yao, R.\n\n\n \n\n\n\n In
Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9587–9595, 2019. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{zhang2019pyramid,\n title={Pyramid graph networks with connection attentions for region-based one-shot semantic segmentation},\n author={Zhang, Chi and Lin, Guosheng and Liu, Fayao and Guo, Jiushuang and Wu, Qingyao and Yao, Rui},\n booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},\n pages={9587--9595},\n year={2019}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n RefineNet: Multi-path refinement networks for dense prediction.\n \n \n \n\n\n \n Lin, G.; Liu, F.; Milan, A.; Shen, C.; and Reid, I.\n\n\n \n\n\n\n
IEEE Transactions on Pattern Analysis & Machine Intelligence, (1): 1–1. 2019.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{lin2019refinenet,\n title={RefineNet: Multi-path refinement networks for dense prediction},\n author={Lin, Guosheng and Liu, Fayao and Milan, Anton and Shen, Chunhua and Reid, Ian},\n journal={IEEE Transactions on Pattern Analysis \\& Machine Intelligence},\n number={1},\n pages={1--1},\n year={2019},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2018\n \n \n (1)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Correlation Propagation Networks for Scene Text Detection.\n \n \n \n\n\n \n Liu, Z.; Lin, G.; Goh, W. L.; Liu, F.; Shen, C.; and Yang, X.\n\n\n \n\n\n\n
arXiv preprint arXiv:1810.00304. 2018.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2018correlation,\n title={Correlation Propagation Networks for Scene Text Detection},\n author={Liu, Zichuan and Lin, Guosheng and Goh, Wang Ling and Liu, Fayao and Shen, Chunhua and Yang, Xiaokang},\n journal={arXiv preprint arXiv:1810.00304},\n year={2018}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2017\n \n \n (4)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Discriminative training of deep fully connected continuous CRFs with task-specific loss.\n \n \n \n\n\n \n Liu, F.; Lin, G.; and Shen, C.\n\n\n \n\n\n\n
IEEE Transactions on Image Processing, 26(5): 2127–2136. 2017.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2017discriminative,\n title={Discriminative training of deep fully connected continuous CRFs with task-specific loss},\n author={Liu, Fayao and Lin, Guosheng and Shen, Chunhua},\n journal={IEEE Transactions on Image Processing},\n volume={26},\n number={5},\n pages={2127--2136},\n year={2017},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Structured learning of binary codes with column generation for optimizing ranking measures.\n \n \n \n\n\n \n Lin, G.; Liu, F.; Shen, C.; Wu, J.; and Shen, H. T.\n\n\n \n\n\n\n
International Journal of Computer Vision, 123(2): 287–308. 2017.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{lin2017structured,\n title={Structured learning of binary codes with column generation for optimizing ranking measures},\n author={Lin, Guosheng and Liu, Fayao and Shen, Chunhua and Wu, Jianxin and Shen, Heng Tao},\n journal={International Journal of Computer Vision},\n volume={123},\n number={2},\n pages={287--308},\n year={2017},\n publisher={Springer US New York}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Structured learning of tree potentials in CRF for image segmentation.\n \n \n \n\n\n \n Liu, F.; Lin, G.; Qiao, R.; and Shen, C.\n\n\n \n\n\n\n
IEEE transactions on neural networks and learning systems, 29(6): 2631–2637. 2017.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2017structured,\n title={Structured learning of tree potentials in CRF for image segmentation},\n author={Liu, Fayao and Lin, Guosheng and Qiao, Ruizhi and Shen, Chunhua},\n journal={IEEE transactions on neural networks and learning systems},\n volume={29},\n number={6},\n pages={2631--2637},\n year={2017},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Designing ensemble learning algorithms using kernel methods.\n \n \n \n\n\n \n Liu, F.; Qiao, R.; Shen, C.; and Luo, L.\n\n\n \n\n\n\n
International Journal of Machine Intelligence and Sensory Signal Processing, 2(1): 1–31. 2017.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2017designing,\n title={Designing ensemble learning algorithms using kernel methods},\n author={Liu, Fayao and Qiao, Ruizhi and Shen, Chunhua and Luo, Lei},\n journal={International Journal of Machine Intelligence and Sensory Signal Processing},\n volume={2},\n number={1},\n pages={1--31},\n year={2017},\n publisher={Inderscience Publishers (IEL)}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2016\n \n \n (1)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Online unsupervised feature learning for visual tracking.\n \n \n \n\n\n \n Liu, F.; Shen, C.; Reid, I.; and van den Hengel, A.\n\n\n \n\n\n\n
Image and Vision Computing, 51: 84–94. 2016.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2016online,\n title={Online unsupervised feature learning for visual tracking},\n author={Liu, Fayao and Shen, Chunhua and Reid, Ian and van den Hengel, Anton},\n journal={Image and Vision Computing},\n volume={51},\n pages={84--94},\n year={2016},\n publisher={Elsevier}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2015\n \n \n (5)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Learning depth from single monocular images using deep convolutional neural fields.\n \n \n \n\n\n \n Liu, F.; Shen, C.; Lin, G.; and Reid, I.\n\n\n \n\n\n\n . 2015.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2015learning,\n title={Learning depth from single monocular images using deep convolutional neural fields},\n author={Liu, Fayao and Shen, Chunhua and Lin, Guosheng and Reid, Ian},\n year={2015},\n publisher={IEEE Transactions on Pattern Analysis and Machine Intelligence}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Deep convolutional neural fields for depth estimation from a single image.\n \n \n \n\n\n \n Liu, F.; Shen, C.; and Lin, G.\n\n\n \n\n\n\n In
Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5162–5170, 2015. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{liu2015deep,\n title={Deep convolutional neural fields for depth estimation from a single image},\n author={Liu, Fayao and Shen, Chunhua and Lin, Guosheng},\n booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},\n pages={5162--5170},\n year={2015}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n CRF learning with CNN features for image segmentation.\n \n \n \n\n\n \n Liu, F.; Lin, G.; and Shen, C.\n\n\n \n\n\n\n
Pattern Recognition, 48(10): 2983–2992. 2015.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2015crf,\n title={CRF learning with CNN features for image segmentation},\n author={Liu, Fayao and Lin, Guosheng and Shen, Chunhua},\n journal={Pattern Recognition},\n volume={48},\n number={10},\n pages={2983--2992},\n year={2015},\n publisher={Pergamon}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Sequence searching with deep-learnt depth for condition-and viewpoint-invariant route-based place recognition.\n \n \n \n\n\n \n Milford, M.; Shen, C.; Lowry, S.; Suenderhauf, N.; Shirazi, S.; Lin, G.; Liu, F.; Pepperell, E.; Lerma, C.; Upcroft, B.; and others\n\n\n \n\n\n\n In
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 18–25, 2015. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{milford2015sequence,\n title={Sequence searching with deep-learnt depth for condition-and viewpoint-invariant route-based place recognition},\n author={Milford, Michael and Shen, Chunhua and Lowry, Stephanie and Suenderhauf, Niko and Shirazi, Sareh and Lin, Guosheng and Liu, Fayao and Pepperell, Edward and Lerma, Cesar and Upcroft, Ben and others},\n booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},\n pages={18--25},\n year={2015}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Learning Structured Prediction Models in Computer Vision.\n \n \n \n\n\n \n Liu, F.\n\n\n \n\n\n\n Ph.D. Thesis, University of Adelaide, School of Computer Science, 2015.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@phdthesis{liu2015learning,\n title={Learning Structured Prediction Models in Computer Vision},\n author={Liu, Fayao},\n year={2015},\n school={University of Adelaide, School of Computer Science}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2014\n \n \n (1)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n From Kernel Machines to Ensemble Learning.\n \n \n \n\n\n \n Shen, C.; and Liu, F.\n\n\n \n\n\n\n
arXiv preprint arXiv:1401.0767. 2014.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{shen2014kernel,\n title={From Kernel Machines to Ensemble Learning},\n author={Shen, Chunhua and Liu, Fayao},\n journal={arXiv preprint arXiv:1401.0767},\n year={2014}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2013\n \n \n (2)\n \n \n
\n
\n \n \n
\n\n\n
\n
\n\n \n \n \n \n \n Efficient dual approach to distance metric learning.\n \n \n \n\n\n \n Shen, C.; Kim, J.; Liu, F.; Wang, L.; and Van Den Hengel, A.\n\n\n \n\n\n\n
IEEE transactions on neural networks and learning systems, 25(2): 394–406. 2013.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{shen2013efficient,\n title={Efficient dual approach to distance metric learning},\n author={Shen, Chunhua and Kim, Junae and Liu, Fayao and Wang, Lei and Van Den Hengel, Anton},\n journal={IEEE transactions on neural networks and learning systems},\n volume={25},\n number={2},\n pages={394--406},\n year={2013},\n publisher={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2012\n \n \n (1)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Local statistical analysis of gabor coefficients and adaptive feature extraction for face description and recognition.\n \n \n \n\n\n \n Kuan, L.; Jianping, Y.; Yong, L.; and Fayao, L\n\n\n \n\n\n\n
J Comput Res Dev, 49(4): 777–784. 2012.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{kuan2012local,\n title={Local statistical analysis of gabor coefficients and adaptive feature extraction for face description and recognition},\n author={Kuan, Li and Jianping, Yin and Yong, Li and Fayao, L},\n journal={J Comput Res Dev},\n volume={49},\n number={4},\n pages={777--784},\n year={2012}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2011\n \n \n (1)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n PSAEC: an improved algorithm for short read error correction using partial suffix arrays.\n \n \n \n\n\n \n Zhao, Z.; Yin, J.; Zhan, Y.; Xiong, W.; Li, Y.; and Liu, F.\n\n\n \n\n\n\n In
Frontiers in Algorithmics and Algorithmic Aspects in Information and Management: Joint International Conference, FAW-AAIM 2011, Jinhua, China, May 28-31, 2011. Proceedings, pages 220–232, 2011. Springer Berlin Heidelberg Berlin, Heidelberg\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{zhao2011psaec,\n title={PSAEC: an improved algorithm for short read error correction using partial suffix arrays},\n author={Zhao, Zhiheng and Yin, Jianping and Zhan, Yubin and Xiong, Wei and Li, Yong and Liu, Fayao},\n booktitle={Frontiers in Algorithmics and Algorithmic Aspects in Information and Management: Joint International Conference, FAW-AAIM 2011, Jinhua, China, May 28-31, 2011. Proceedings},\n pages={220--232},\n year={2011},\n organization={Springer Berlin Heidelberg Berlin, Heidelberg}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n 2010\n \n \n (2)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n An improved recognition method of PDF417 barcode.\n \n \n \n\n\n \n Liu, F.; Yin, J.; Li, K.; and Liu, Q.\n\n\n \n\n\n\n In
2010 Chinese Conference on Pattern Recognition (CCPR), pages 1–5, 2010. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{liu2010improved,\n title={An improved recognition method of PDF417 barcode},\n author={Liu, Fayao and Yin, Jianping and Li, Kuan and Liu, Qiang},\n booktitle={2010 Chinese Conference on Pattern Recognition (CCPR)},\n pages={1--5},\n year={2010},\n organization={IEEE}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Research on the location of PDF417 under the complicated background.\n \n \n \n\n\n \n LIU, F.; and others\n\n\n \n\n\n\n
Computer Engineering & Science, 32(6): 55. 2010.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{liu2010research,\n title={Research on the location of PDF417 under the complicated background},\n author={LIU, Fa and others},\n journal={Computer Engineering \\& Science},\n volume={32},\n number={6},\n pages={55},\n year={2010}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n \n undefined\n \n \n (3)\n \n \n
\n
\n \n \n
\n
\n\n \n \n \n \n \n Rethinking Few-shot 3D Point Cloud Semantic Segmentation Supplementary Material.\n \n \n \n\n\n \n An, Z.; Sun, G.; Liu, Y.; Liu, F.; Wu, Z.; Wang, D.; Van Gool, L.; and Belongie, S.\n\n\n \n\n\n\n . .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{anrethinking,\n title={Rethinking Few-shot 3D Point Cloud Semantic Segmentation Supplementary Material},\n author={An, Zhaochong and Sun, Guolei and Liu, Yun and Liu, Fayao and Wu, Zongwei and Wang, Dan and Van Gool, Luc and Belongie, Serge}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Supplementary Materials for 3DFG-PIFu: 3D Feature Grids for Human Digitization from Sparse Views.\n \n \n \n\n\n \n Chan, K. Y.; Liu, F.; Lin, G.; Foo, C. S.; and Lin, W.\n\n\n \n\n\n\n . .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{chansupplementary,\n title={Supplementary Materials for 3DFG-PIFu: 3D Feature Grids for Human Digitization from Sparse Views},\n author={Chan, Kennard Yanting and Liu, Fayao and Lin, Guosheng and Foo, Chuan Sheng and Lin, Weisi}\n}\n\n\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Appendix for “Learn to Optimize Denoising Scores: A Unified and Improved Diffusion Prior for 3D Generation”.\n \n \n \n\n\n \n Yang, X.; Chen, Y.; Chen, C.; Zhang, C.; Xu, Y.; Yang, X.; Liu, F.; and Lin, G.\n\n\n \n\n\n\n . .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{yangappendix,\n title={Appendix for “Learn to Optimize Denoising Scores: A Unified and Improved Diffusion Prior for 3D Generation”},\n author={Yang, Xiaofeng and Chen, Yiwen and Chen, Cheng and Zhang, Chi and Xu, Yi and Yang, Xulei and Liu, Fayao and Lin, Guosheng}\n}\n\n\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n