@Article{cmc.2022.019521, AUTHOR = {HongGeun Ji, Soyoung Oh, Jina Kim, Seong Choi, Eunil Park}, TITLE = {Integrating Deep Learning and Machine Translation for Understanding Unrefined Languages}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {70}, YEAR = {2022}, NUMBER = {1}, PAGES = {669--678}, URL = {http://www.techscience.com/cmc/v70n1/44419}, ISSN = {1546-2226}, ABSTRACT = {In the field of natural language processing (NLP), the advancement of neural machine translation has paved the way for cross-lingual research. Yet, most studies in NLP have evaluated the proposed language models on well-refined datasets. We investigate whether a machine translation approach is suitable for multilingual analysis of unrefined datasets, particularly, chat messages in Twitch. In order to address it, we collected the dataset, which included 7,066,854 and 3,365,569 chat messages from English and Korean streams, respectively. We employed several machine learning classifiers and neural networks with two different types of embedding: word-sequence embedding and the final layer of a pre-trained language model. The results of the employed models indicate that the accuracy difference between English, and English to Korean was relatively high, ranging from 3% to 12%. For Korean data (Korean, and Korean to English), it ranged from 0% to 2%. Therefore, the results imply that translation from a low-resource language (e.g., Korean) into a high-resource language (e.g., English) shows higher performance, in contrast to vice versa. Several implications and limitations of the presented results are also discussed. For instance, we suggest the feasibility of translation from resource-poor languages for using the tools of resource-rich languages in further analysis.}, DOI = {10.32604/cmc.2022.019521} }