@Article{cmes.2023.025923, AUTHOR = {Honglu He, Zhiying Zhu, Xinpeng Zhang}, TITLE = {Adaptive Backdoor Attack against Deep Neural Networks}, JOURNAL = {Computer Modeling in Engineering \& Sciences}, VOLUME = {136}, YEAR = {2023}, NUMBER = {3}, PAGES = {2617--2633}, URL = {http://www.techscience.com/CMES/v136n3/51803}, ISSN = {1526-1506}, ABSTRACT = {In recent years, the number of parameters of deep neural networks (DNNs) has been increasing rapidly. The training of DNNs is typically computation-intensive. As a result, many users leverage cloud computing and outsource their training procedures. Outsourcing computation results in a potential risk called backdoor attack, in which a welltrained DNN would perform abnormally on inputs with a certain trigger. Backdoor attacks can also be classified as attacks that exploit fake images. However, most backdoor attacks design a uniform trigger for all images, which can be easily detected and removed. In this paper, we propose a novel adaptive backdoor attack. We overcome this defect and design a generator to assign a unique trigger for each image depending on its texture. To achieve this goal, we use a texture complexity metric to create a special mask for each image, which forces the trigger to be embedded into the rich texture regions. The trigger is distributed in texture regions, which makes it invisible to humans. Besides the stealthiness of triggers, we limit the range of modification of backdoor models to evade detection. Experiments show that our method is efficient in multiple datasets, and traditional detectors cannot reveal the existence of a backdoor.}, DOI = {10.32604/cmes.2023.025923} }