@Article{cmc.2022.022236, AUTHOR = {Muhammad Usman Younus, Rabia Shafi, Ammar Rafiq, Muhammad Rizwan Anjum, Sharjeel Afridi, Abdul Aleem Jamali, Zulfiqar Ali Arain}, TITLE = {Encoder-Decoder Based LSTM Model to Advance User QoE in 360-Degree Video}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {71}, YEAR = {2022}, NUMBER = {2}, PAGES = {2617--2631}, URL = {http://www.techscience.com/cmc/v71n2/45811}, ISSN = {1546-2226}, ABSTRACT = {The development of multimedia content has resulted in a massive increase in network traffic for video streaming. It demands such types of solutions that can be addressed to obtain the user's Quality-of-Experience (QoE). 360-degree videos have already taken up the user's behavior by storm. However, the users only focus on the part of 360-degree videos, known as a viewport. Despite the immense hype, 360-degree videos convey a loathsome side effect about viewport prediction, making viewers feel uncomfortable because user viewport needs to be pre-fetched in advance. Ideally, we can minimize the bandwidth consumption if we know what the user motion in advance. Looking into the problem definition, we propose an Encoder-Decoder based Long-Short Term Memory (LSTM) model to more accurately capture the non-linear relationship between past and future viewport positions. This model takes the transforming data instead of taking the direct input to predict the future user movement. Then, this prediction model is combined with a rate adaptation approach that assigns the bitrates to various tiles for 360-degree video frames under a given network capacity. Hence, our proposed work aims to facilitate improved system performance when QoE parameters are jointly optimized. Some experiments were carried out and compared with existing work to prove the performance of the proposed model. Last but not least, the experiments implementation of our proposed work provides high user's QoE than its competitors.}, DOI = {10.32604/cmc.2022.022236} }