@Article{cmc.2023.027379, AUTHOR = {Lin Zhou, Xijin Chen, Chaoyan Wu, Qiuyue Zhong, Xu Cheng, Yibin Tang}, TITLE = {Speech Enhancement via Mask-Mapping Based Residual Dense Network}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {74}, YEAR = {2023}, NUMBER = {1}, PAGES = {1259--1277}, URL = {http://www.techscience.com/cmc/v74n1/49769}, ISSN = {1546-2226}, ABSTRACT = {Masking-based and spectrum mapping-based methods are the two main algorithms of speech enhancement with deep neural network (DNN). But the mapping-based methods only utilizes the phase of noisy speech, which limits the upper bound of speech enhancement performance. Masking-based methods need to accurately estimate the masking which is still the key problem. Combining the advantages of above two types of methods, this paper proposes the speech enhancement algorithm MM-RDN (masking-mapping residual dense network) based on masking-mapping (MM) and residual dense network (RDN). Using the logarithmic power spectrogram (LPS) of consecutive frames, MM estimates the ideal ratio masking (IRM) matrix of consecutive frames. RDN can make full use of feature maps of all layers. Meanwhile, using the global residual learning to combine the shallow features and deep features, RDN obtains the global dense features from the LPS, thereby improves estimated accuracy of the IRM matrix. Simulations show that the proposed method achieves attractive speech enhancement performance in various acoustic environments. Specifically, in the untrained acoustic test with limited priors, e.g., unmatched signal-to-noise ratio (SNR) and unmatched noise category, MM-RDN can still outperform the existing convolutional recurrent network (CRN) method in the measures of perceptual evaluation of speech quality (PESQ) and other evaluation indexes. It indicates that the proposed algorithm is more generalized in untrained conditions.}, DOI = {10.32604/cmc.2023.027379} }