@Article{iasc.2022.026385, AUTHOR = {M. Kavitha, B. Sankara Babu, B. Sumathy, T. Jackulin, N. Ramkumar, A. Manimaran, Ranjan Walia, S. Neelakandan}, TITLE = {Convolutional Neural Networks Based Video Reconstruction and Computation in Digital Twins}, JOURNAL = {Intelligent Automation \& Soft Computing}, VOLUME = {34}, YEAR = {2022}, NUMBER = {3}, PAGES = {1571--1586}, URL = {http://www.techscience.com/iasc/v34n3/47935}, ISSN = {2326-005X}, ABSTRACT = {With the advancement of communication and computing technologies, multimedia technologies involving video and image applications have become an important part of the information society and have become inextricably linked to people's daily productivity and lives. Simultaneously, there is a growing interest in super-resolution (SR) video reconstruction techniques. At the moment, the design of digital twins in video computing and video reconstruction is based on a number of difficult issues. Although there are several SR reconstruction techniques available in the literature, most of the works have not considered the spatio-temporal relationship between the video frames. With this motivation in mind, this paper presents VDCNN-SS, a novel very deep convolutional neural networks (VDCNN) with spatiotemporal similarity (SS) model for video reconstruction in digital twins. The VDCNN-SS technique proposed here maps the relationship between interconnected low resolution (LR) and high resolution (HR) image blocks. It also considers the spatiotemporal non-local complementary and repetitive data among nearby low-resolution video frames. Furthermore, the VDCNN technique is used to learn the LR–HR correlation mapping learning process. A series of simulations were run to examine the improved performance of the VDCNN-SS model, and the experimental results demonstrated the superiority of the VDCNN-SS technique over recent techniques.}, DOI = {10.32604/iasc.2022.026385} }