@Article{cmc.2021.017800, AUTHOR = {Sadia Kiran, Muhammad Attique Khan, Muhammad Younus Javed, Majed Alhaisoni, Usman Tariq, Yunyoung Nam, Robertas Damaševičius, Muhammad Sharif}, TITLE = {Multi-Layered Deep Learning Features Fusion for Human Action Recognition}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {69}, YEAR = {2021}, NUMBER = {3}, PAGES = {4061--4075}, URL = {http://www.techscience.com/cmc/v69n3/44139}, ISSN = {1546-2226}, ABSTRACT = {Human Action Recognition (HAR) is an active research topic in machine learning for the last few decades. Visual surveillance, robotics, and pedestrian detection are the main applications for action recognition. Computer vision researchers have introduced many HAR techniques, but they still face challenges such as redundant features and the cost of computing. In this article, we proposed a new method for the use of deep learning for HAR. In the proposed method, video frames are initially pre-processed using a global contrast approach and later used to train a deep learning model using domain transfer learning. The Resnet-50 Pre-Trained Model is used as a deep learning model in this work. Features are extracted from two layers: Global Average Pool (GAP) and Fully Connected (FC). The features of both layers are fused by the Canonical Correlation Analysis (CCA). Then features are selected using the Shanon Entropy-based threshold function. The selected features are finally passed to multiple classifiers for final classification. Experiments are conducted on five publicly available datasets as IXMAS, UCF Sports, YouTube, UT-Interaction, and KTH. The accuracy of these data sets was 89.6%, 99.7%, 100%, 96.7% and 96.6%, respectively. Comparison with existing techniques has shown that the proposed method provides improved accuracy for HAR. Also, the proposed method is computationally fast based on the time of execution.}, DOI = {10.32604/cmc.2021.017800} }