@Article{iasc.2023.033577, AUTHOR = {Mrim M. Alnfiai}, TITLE = {Deep Learning-Based Sign Language Recognition for Hearing and Speaking Impaired People}, JOURNAL = {Intelligent Automation \& Soft Computing}, VOLUME = {36}, YEAR = {2023}, NUMBER = {2}, PAGES = {1653--1669}, URL = {http://www.techscience.com/iasc/v36n2/51151}, ISSN = {2326-005X}, ABSTRACT = {Sign language is mainly utilized in communication with people who have hearing disabilities. Sign language is used to communicate with people having developmental impairments who have some or no interaction skills. The interaction via Sign language becomes a fruitful means of communication for hearing and speech impaired persons. A Hand gesture recognition system finds helpful for deaf and dumb people by making use of human computer interface (HCI) and convolutional neural networks (CNN) for identifying the static indications of Indian Sign Language (ISL). This study introduces a shark smell optimization with deep learning based automated sign language recognition (SSODL-ASLR) model for hearing and speaking impaired people. The presented SSODL-ASLR technique majorly concentrates on the recognition and classification of sign language provided by deaf and dumb people. The presented SSODL-ASLR model encompasses a two stage process namely sign language detection and sign language classification. In the first stage, the Mask Region based Convolution Neural Network (Mask RCNN) model is exploited for sign language recognition. Secondly, SSO algorithm with soft margin support vector machine (SM-SVM) model can be utilized for sign language classification. To assure the enhanced classification performance of the SSODL-ASLR model, a brief set of simulations was carried out. The extensive results portrayed the supremacy of the SSODL-ASLR model over other techniques.}, DOI = {10.32604/iasc.2023.033577} }