@article{li2024infibench,title={InfiBench: Evaluating the Question-Answering Capabilities of Code Large Language Models},author={Li, Linyi and Geng, Shijie and Li, Zhenwen and He, Yibo and Yu, Hao and Hua, Ziyue and Ning, Guanghan and Wang, Siwei and Xie, Tao and Yang, Hongxia},journal={NeurIPS},year={2024},url={https://arxiv.org/abs/2404.07940},}
@article{ai2024dreamclear,title={DreamClear: High-Capacity Real-World Image Restoration with Privacy-Safe Dataset Curation},author={Ai, Yuang and Zhou, Xiaoqiang and Huang, Huaibo and Han, Xiaotian and Chen, Zhengyu and You, Quanzeng and Yang, Hongxia},journal={NeurIPS},year={2024},url={https://neurips.cc/virtual/2024/poster/96507},}
@article{liu2024visual,title={Visual Anchors Are Strong Information Aggregators For Multimodal Large Language Model},author={Liu, Haogeng and You, Quanzeng and Han, Xiaotian and Liu, Yongfei and Huang, Huaibo and He, Ran and Yang, Hongxia},journal={NeurIPS},year={2024},url={https://arxiv.org/abs/2405.17815},}
COLM
2024
Empowering Large Language Model Agents through Action Learning
Haiteng
Zhao, Chang
Ma, Guoyin
Wang, Jing
Su, Lingpeng
Kong, Jingjing
Xu, Zhi-Hong
Deng, and Hongxia
Yang
@article{zhao2024empowering,title={Empowering Large Language Model Agents through Action Learning},author={Zhao, Haiteng and Ma, Chang and Wang, Guoyin and Su, Jing and Kong, Lingpeng and Xu, Jingjing and Deng, Zhi-Hong and Yang, Hongxia},journal={COLM},year={2024},url={https://arxiv.org/abs/2402.15809},}
@article{chai2024expert,title={An Expert is Worth One Token: Synergizing Multiple Expert LLMs as Generalist via Expert Token Routing},author={Chai, Ziwei and Wang, Guoyin and Su, Jing and Zhang, Tianjie and Huang, Xuanwen and Wang, Xuwu and Xu, Jingjing and Yuan, Jianbo and Yang, Hongxia and Wu, Fei and Yang, Yang},journal={ACL},year={2024},url={https://arxiv.org/abs/2403.16854},}
@article{liu2024devan,title={DeVAn: Dense Video Annotation for Video-Language Models},author={Liu, Tingkai and Tao, Yunzhe and Liu, Haogeng and Fang, Qihang and Zhou, Ding and Huang, Huaibo and He, Ran and Yang, Hongxia},journal={ACL},year={2024},url={https://arxiv.org/abs/2310.05060},}
@article{jian2024expedited,title={Expedited Training of Visual Conditioned Language Generation via Redundancy Reduction},author={Jian, Yiren and Liu, Tingkai and Tao, Yunzhe and Zhang, Chunhui and Vosoughi, Soroush and Yang, Hongxia},journal={ACL},year={2024},url={https://arxiv.org/abs/2310.03291},}
@article{liu2024infimm,title={InfiMM: Advancing Multimodal Understanding with an Open-Sourced Visual Language Model},author={Liu, Haogeng and You, Quanzeng and Wang, Yiqi and Han, Xiaotian and Zhai, Bohan and Liu, Yongfei and Chen, Wentao and Jian, Yiren and Tao, Yunzhe and Yuan, Jianbo and He, Ran and Yang, Hongxia},journal={ACL},year={2024},url={https://aclanthology.org/2024.findings-acl.27/},}
@article{zhao2024loraretriever,title={LoraRetriever: Input-Aware LoRA Retrieval and Composition for Mixed Tasks in the Wild},author={Zhao, Ziyu and Gan, Leilei and Wang, Guoyin and Zhou, Wangchunshu and Yang, Hongxia and Kuang, Kun and Wu, Fei},journal={ACL},year={2024},url={https://arxiv.org/abs/2402.09997},}
@article{he2024two,title={Two Stones Hit One Bird: Bilevel Positional Encoding for Better Length Extrapolation},author={He, Zhenyu and Feng, Guhao and Luo, Shengjie and Yang, Kai and Wang, Liwei and Xu, Jingjing and Zhang, Zhi and Yang, Hongxia and He, Di},journal={ICML},year={2024},url={https://arxiv.org/abs/2401.16421},}
@article{zheng2024self,title={Self-Infilling Code Generation},author={Zheng, Lin and Yuan, Jianbo and Zhang, Zhi and Yang, Hongxia and Kong, Lingpeng},journal={ICML},year={2024},url={https://arxiv.org/abs/2311.17972},}
@article{hu2024infiagent,title={InfiAgent-DABench: Evaluating Agents on Data Analysis Tasks},author={Hu, Xueyu and Zhao, Ziyu and Wei, Shuang and Chai, Ziwei and Ma, Qianli and Wang, Guoyin and Wang, Xuwu and Su, Jing and Xu, Jingjing and Zhu, Ming and Cheng, Yao and Yuan, Jianbo and Li, Jiwei and Kuang, Kun and Yang, Yang and Yang, Hongxia and Wu, Fei},journal={ICML},year={2024},url={https://arxiv.org/abs/2401.05507},}
@article{yu2024beta,title={β-Coder: Value-Based Deep Reinforcement Learning for Program Synthesis},author={Yu, Zishun and Tao, Yunzhe and Chen, Liyu and Sun, Tao and Yang, Hongxia},journal={ICLR},year={2024},url={https://arxiv.org/abs/2310.03173},}
@article{wang2024lemon,title={LEMON: Lossless model expansion},author={Wang, Yite and Su, Jiahao and Lu, Hanlin and Xie, Cong and Liu, Tianyi and Yuan, Jianbo and Lin, Haibin and Sun, Ruoyu and Yang, Hongxia},journal={ICLR},year={2024},url={https://openreview.net/pdf?id=3Vw7DQqq7U},}
@article{zheng2024learning,title={Learning Stackable and Skippable LEGO Bricks for Efficient, Reconfigurable, and Variable-Resolution Diffusion Modeling},author={Zheng, Huangjie and Wang, Zhendong and Yuan, Jianbo and Ning, Guanghan and He, Pengcheng and You, Quanzeng and Yang, Hongxia and Zhou, Mingyuan},journal={ICLR},year={2024},url={https://openreview.net/pdf?id=qmXedvwrT1},}
@article{pham2024let,title={Let Models Speak Ciphers: Multiagent Debate through Embeddings},author={Pham, Chau and Liu, Boyi and Yang, Yingxiang and Chen, Zhengyu and Liu, Tianyi and Yuan, Jianbo and Plummer, Bryan A. and Wang, Zhaoran and Yang, Hongxia},journal={ICLR},year={2024},url={https://openreview.net/pdf?id=sehRvaIPQQ},}