@inproceedings{bf8bc29e0d934c189f3b2b64287a1f9b,
title = "Exploring Brain Hemodynamic Response Patterns via Deep Recurrent Autoencoder",
abstract = "For decades, task-based functional MRI (tfMRI) has been widely used in exploring functional brain networks and modeling brain activities. A variety of brain activity analysis methods for tfMRI data have been developed. However, these methods are mainly shallow models and are limited in faithfully modeling the complex spatial-temporal diverse and concurrent functional brain activities. Recently, recurrent neural networks (RNNs) demonstrate great superiority in modeling temporal dependency signals and autoencoder models have been proven to be effective in automatically estimating the optimal representations of the original data. These characteristics meet the requirement of modeling hemodynamic response patterns in tfMRI data. In order to take the advantages of both models, we proposed a novel unsupervised framework of deep recurrent autoencoder (DRAE) for modeling tfMRI data in this work. The basic idea of the DRAE model is to combine the deep recurrent neural network and autoencoder to automatically characterize the meaningful functional brain networks and corresponding diverse and complex hemodynamic response patterns underlying tfMRI data simultaneously. The proposed DRAE model has been tested on the motor tfMRI dataset of HCP 900 subjects release and all seven tfMRI datasets of HCP Q1 release. Extensive experimental results demonstrated the great superiority of the proposed method.",
keywords = "Autoencoder, Brain network, Deep learning, Hemodynamic response pattern, RNN, Task fMRI",
author = "Shijie Zhao and Yan Cui and Yaowu Chen and Xin Zhang and Wei Zhang and Huan Liu and Junwei Han and Lei Guo and Li Xie and Tianming Liu",
note = "Funding Information: Acknowledgements. This work was supported by the National Science Foundation of China (61806167, 61603399, 31627802 and U1801265), the Fundamental Research Funds for the Central Universities (3102019PJ005), Natural Science Basic Research Plan in Shaanxi Province of China (2019JQ-630) and the China Postdoctoral Science Foundation (2019T120945). Funding Information: This work was supported by the National Science Foundation of China (61806167, 61603399, 31627802 and U1801265), the Fundamental Research Funds for the Central Universities (3102019PJ005), Natural Science Basic Research Plan in Shaanxi Province of China (2019JQ-630) and the China Postdoctoral Science Foundation (2019T120945). Publisher Copyright: {\textcopyright} Springer Nature Switzerland AG 2019.; 4th International Workshop on Multimodal Brain Image Analysis, MBAI 2019, and the 7th International Workshop on Mathematical Foundations of Computational Anatomy, MFCA 2019, held in conjunction with the 22nd International Conference on Medical Imaging and Computer Assisted Intervention, MICCAI 2019 ; Conference date: 17-10-2019 Through 17-10-2019",
year = "2019",
doi = "10.1007/978-3-030-33226-6_8",
language = "English (US)",
isbn = "9783030332259",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer",
pages = "66--74",
editor = "Dajiang Zhu and Jingwen Yan and Heng Huang and Li Shen and Thompson, {Paul M.} and Carl-Fredrik Westin and Xavier Pennec and Sarang Joshi and Mads Nielsen and Stefan Sommer and Tom Fletcher and Stanley Durrleman",
booktitle = "Multimodal Brain Image Analysis and Mathematical Foundations of Computational Anatomy - 4th International Workshop, MBIA 2019, and 7th International Workshop, MFCA 2019, Held in Conjunction with MICCAI 2019, Proceedings",
}