@Article{cmc.2023.029914, AUTHOR = {Sultan Alkaabi, Salman Yussof, Sameera Al-Mulla}, TITLE = {Enhancing CNN for Forensics Age Estimation Using CGAN and Pseudo-Labelling}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {74}, YEAR = {2023}, NUMBER = {2}, PAGES = {2499--2516}, URL = {http://www.techscience.com/cmc/v74n2/50209}, ISSN = {1546-2226}, ABSTRACT = {Age estimation using forensics odontology is an important process in identifying victims in criminal or mass disaster cases. Traditionally, this process is done manually by human expert. However, the speed and accuracy may vary depending on the expertise level of the human expert and other human factors such as level of fatigue and attentiveness. To improve the recognition speed and consistency, researchers have proposed automated age estimation using deep learning techniques such as Convolutional Neural Network (CNN). CNN requires many training images to obtain high percentage of recognition accuracy. Unfortunately, it is very difficult to get large number of samples of dental images for training the CNN due to the need to comply to privacy acts. A promising solution to this problem is a technique called Generative Adversarial Network (GAN). GAN is a technique that can generate synthetic images that has similar statistics as the training set. A variation of GAN called Conditional GAN (CGAN) enables the generation of the synthetic images to be controlled more precisely such that only the specified type of images will be generated. This paper proposes a CGAN for generating new dental images to increase the number of images available for training a CNN model to perform age estimation. We also propose a pseudo-labelling technique to label the generated images with proper age and gender. We used the combination of real and generated images to train Dental Age and Sex Net (DASNET), which is a CNN model for dental age estimation. Based on the experiment conducted, the accuracy, coefficient of determination (R2) and Absolute Error (AE) of DASNET have improved to 87%, 0.85 and 1.18 years respectively as opposed to 74%, 0.72 and 3.45 years when DASNET is trained using real, but smaller number of images.}, DOI = {10.32604/cmc.2023.029914} }