@Article{cmc.2021.014984, AUTHOR = {Kang-Cheng Chen, Chia-Mu Yu, Tooska Dargahi}, TITLE = {Evaluating the Risk of Disclosure and Utility in a Synthetic Dataset}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {68}, YEAR = {2021}, NUMBER = {1}, PAGES = {761--787}, URL = {http://www.techscience.com/cmc/v68n1/41805}, ISSN = {1546-2226}, ABSTRACT = {The advancement of information technology has improved the delivery of financial services by the introduction of Financial Technology (FinTech). To enhance their customer satisfaction, Fintech companies leverage artificial intelligence (AI) to collect fine-grained data about individuals, which enables them to provide more intelligent and customized services. However, although visions thereof promise to make customers’ lives easier, they also raise major security and privacy concerns for their users. Differential privacy (DP) is a common privacy-preserving data publishing technique that is proved to ensure a high level of privacy preservation. However, an important concern arises from the trade-off between the data utility the risk of data disclosure (RoD), which has not been well investigated. In this paper, to address this challenge, we propose data-dependent approaches for evaluating whether the sufficient privacy is guaranteed in differentially private data release. At the same time, by taking into account the utility of the differentially private synthetic dataset, we present a data-dependent algorithm that, through a curve fitting technique, measures the error of the statistical result imposed to the original dataset due to the injection of random noise. Moreover, we also propose a method that ensures a proper privacy budget, i.e., will be chosen so as to maintain the trade-off between the privacy and utility. Our comprehensive experimental analysis proves both the efficiency and estimation accuracy of the proposed algorithms.}, DOI = {10.32604/cmc.2021.014984} }