@Article{csse.2021.016633, AUTHOR = {Jingjun Zhou, Jing Liu, Jingbing Li, Mengxing Huang, Jieren Cheng, Yen-Wei Chen, Yingying Xu,6, Saqib Ali Nawaz}, TITLE = {Mixed Attention Densely Residual Network for Single Image Super-Resolution}, JOURNAL = {Computer Systems Science and Engineering}, VOLUME = {39}, YEAR = {2021}, NUMBER = {1}, PAGES = {133--146}, URL = {http://www.techscience.com/csse/v39n1/42879}, ISSN = {}, ABSTRACT = {Recent applications of convolutional neural networks (CNNs) in single image super-resolution (SISR) have achieved unprecedented performance. However, existing CNN-based SISR network structure design consider mostly only channel or spatial information, and cannot make full use of both channel and spatial information to improve SISR performance further. The present work addresses this problem by proposing a mixed attention densely residual network architecture that can make full and simultaneous use of both channel and spatial information. Specifically, we propose a residual in dense network structure composed of dense connections between multiple dense residual groups to form a very deep network. This structure allows each dense residual group to apply a local residual skip connection and enables the cascading of multiple residual blocks to reuse previous features. A mixed attention module is inserted into each dense residual group, to enable the algorithm to fuse channel attention with laplacian spatial attention effectively, and thereby more adaptively focus on valuable feature learning. The qualitative and quantitative results of extensive experiments have demonstrate that the proposed method has a comparable performance with other state-of-the-art methods.}, DOI = {10.32604/csse.2021.016633} }