Wang, Yixiang; Liu, Jiqiang; Chang, Xiaolin; Wang, Jianhua; Rodríguez, Ricardo J.
AB-FGSM: AdaBelief Optimizer and FGSM-Based Approach to Generate Adversarial Examples Journal Article
In: Journal of Information Security and Applications, vol. 68, pp. 103227, 2022, ISSN: 2214-2126.
Abstract | Links | BibTeX | Tags: adversarial examples, deep learning, generalization, optimization, Security, Transferability
@article{WLCWR-JISA-22,
title = {AB-FGSM: AdaBelief Optimizer and FGSM-Based Approach to Generate Adversarial Examples},
author = {Yixiang Wang and Jiqiang Liu and Xiaolin Chang and Jianhua Wang and Ricardo J. Rodríguez},
url = {http://webdiis.unizar.es/~ricardo/files/papers/WLCWR-JISA-22.pdf},
doi = {10.1016/j.jisa.2022.103227},
issn = {2214-2126},
year = {2022},
date = {2022-08-01},
journal = {Journal of Information Security and Applications},
volume = {68},
pages = {103227},
abstract = {Deep neural networks (DNNs) can be misclassified by adversarial examples, which are legitimate inputs integrated with imperceptible perturbations at the testing stage. Extensive research has made progress for white-box adversarial attacks to craft adversarial examples with a high success rate. However, these crafted examples have a low success rate in misleading black-box models with defensive mechanisms. To tackle this problem, we design an AdaBelief based iterative Fast Gradient Sign Method (AB-FGSM) to generalize adversarial examples. By integrating the AdaBelief optimizer into the iterative-FGSM (I-FGSM), the generalization of adversarial examples is boosted, considering that the AdaBelief method can find the transferable adversarial point in the ε ball around the legitimate input on different optimization surfaces. We carry out white-box and black-box attacks on various adversarially trained models and ensemble models to verify the effectiveness and transferability of the adversarial examples crafted by AB-FGSM. Our experimental results indicate that the proposed AB-FGSM can efficiently and effectively craft adversarial examples in the white-box setting compared with state-of-the-art attacks. In addition, the transfer rate of adversarial examples is 4% to 21% higher than that of state-of-the-art attacks in the black-box manner.},
keywords = {adversarial examples, deep learning, generalization, optimization, Security, Transferability},
pubstate = {published},
tppubtype = {article}
}
Wang, Jianhua; Chang, Xiaolin; Wang, Yixiang; Rodríguez, Ricardo J; Zhang, Jianan
LSGAN-AT: Enhancing Malware Detector Robustness against Adversarial Examples Journal Article
In: Cybersecurity, vol. 4:38, no. 1, pp. 15, 2021, ISSN: 2523-3246.
Abstract | Links | BibTeX | Tags: Adversarial malware example, Generative adversarial network, Machine learning, Malware detector, Transferability
@article{WCWRZ-CYSE-21,
title = {LSGAN-AT: Enhancing Malware Detector Robustness against Adversarial Examples},
author = {Jianhua Wang and Xiaolin Chang and Yixiang Wang and Ricardo J Rodríguez and Jianan Zhang},
url = {http://webdiis.unizar.es/~ricardo/files/papers/WCWRZ-CYSE-21.pdf},
doi = {10.1186/s42400-021-00102-9},
issn = {2523-3246},
year = {2021},
date = {2021-01-01},
journal = {Cybersecurity},
volume = {4:38},
number = {1},
pages = {15},
abstract = {Adversarial Malware Example (AME)-based adversarial training can effectively enhance the robustness of Machine Learning (ML)-based malware detectors against AME. AME quality is a key factor to the robustness enhancement. Generative Adversarial Network (GAN) is a kind of AME generation method, but the existing GAN-based AME generation methods have the issues of inadequate optimization, mode collapse and training instability. In this paper, we propose a novel approach (denote as LSGAN-AT) to enhance ML-based malware detector robustness against Adversarial Examples, which includes LSGAN module and AT module. LSGAN module can generate more effective and smoother AME by utilizing brand-new network structures and Least Square (LS) loss to optimize boundary samples. AT module makes adversarial training using AME generated by LSGAN to generate ML-based Robust Malware Detector (RMD). Extensive experiment results validate the better transferability of AME in terms of attacking 6 ML detectors and the RMD transferability in terms of resisting the MalGAN black-box attack. The results also verify the performance of the generated RMD in the recognition rate of AME.},
keywords = {Adversarial malware example, Generative adversarial network, Machine learning, Malware detector, Transferability},
pubstate = {published},
tppubtype = {article}
}