Wang, Yixiang; Liu, Jiqiang; Chang, Xiaolin; Wang, Jianhua; Rodríguez, Ricardo J.
AB-FGSM: AdaBelief Optimizer and FGSM-Based Approach to Generate Adversarial Examples Journal Article
In: Journal of Information Security and Applications, vol. 68, pp. 103227, 2022, ISSN: 2214-2126.
Abstract | Links | BibTeX | Tags: adversarial examples, deep learning, generalization, optimization, Security, Transferability
@article{WLCWR-JISA-22,
title = {AB-FGSM: AdaBelief Optimizer and FGSM-Based Approach to Generate Adversarial Examples},
author = {Yixiang Wang and Jiqiang Liu and Xiaolin Chang and Jianhua Wang and Ricardo J. Rodríguez},
url = {http://webdiis.unizar.es/~ricardo/files/papers/WLCWR-JISA-22.pdf},
doi = {10.1016/j.jisa.2022.103227},
issn = {2214-2126},
year = {2022},
date = {2022-08-01},
journal = {Journal of Information Security and Applications},
volume = {68},
pages = {103227},
abstract = {Deep neural networks (DNNs) can be misclassified by adversarial examples, which are legitimate inputs integrated with imperceptible perturbations at the testing stage. Extensive research has made progress for white-box adversarial attacks to craft adversarial examples with a high success rate. However, these crafted examples have a low success rate in misleading black-box models with defensive mechanisms. To tackle this problem, we design an AdaBelief based iterative Fast Gradient Sign Method (AB-FGSM) to generalize adversarial examples. By integrating the AdaBelief optimizer into the iterative-FGSM (I-FGSM), the generalization of adversarial examples is boosted, considering that the AdaBelief method can find the transferable adversarial point in the ε ball around the legitimate input on different optimization surfaces. We carry out white-box and black-box attacks on various adversarially trained models and ensemble models to verify the effectiveness and transferability of the adversarial examples crafted by AB-FGSM. Our experimental results indicate that the proposed AB-FGSM can efficiently and effectively craft adversarial examples in the white-box setting compared with state-of-the-art attacks. In addition, the transfer rate of adversarial examples is 4% to 21% higher than that of state-of-the-art attacks in the black-box manner.},
keywords = {adversarial examples, deep learning, generalization, optimization, Security, Transferability},
pubstate = {published},
tppubtype = {article}
}
Wang, Yixiang; Liu, Jiqiang; Chang, Xiaolin; Rodríguez, Ricardo J.; Wang, Jianhua
DI-AA: An Interpretable White-box Attack for Fooling Deep Neural Networks Journal Article
In: Information Sciences, vol. 610, pp. 14–32, 2022, ISSN: 0020-0255.
Abstract | Links | BibTeX | Tags: adversarial example, deep learning, interpretability, robustness, white-box attack
@article{WLCRW-INS-22,
title = {DI-AA: An Interpretable White-box Attack for Fooling Deep Neural Networks},
author = {Yixiang Wang and Jiqiang Liu and Xiaolin Chang and Ricardo J. Rodríguez and Jianhua Wang},
url = {http://webdiis.unizar.es/~ricardo/files/papers/WLCRW-INS-22.pdf},
doi = {10.1016/j.ins.2022.07.157},
issn = {0020-0255},
year = {2022},
date = {2022-09-01},
journal = {Information Sciences},
volume = {610},
pages = {14--32},
abstract = {White-box adversarial example (AE) attacks on deep neural networks (DNNs) have a more powerful destructive capacity than black-box attacks using AE strategies. However, few studies have been conducted on the generation of low-perturbation adversarial examples from the interpretability perspective. Specifically, adversaries who conducted attacks lacked interpretation from the point of view of DNNs, and the perturbation was not further considered. To address these, we propose an interpretable white-box AE attack approach, DI-AA, which not only explores the application of the interpretable method of deep Taylor decomposition in selecting the most contributing features but also adopts the Lagrangian relaxation optimization of the logit output and norm to make the perturbation more unnoticeable. We compare DI-AA with eight baseline attacks on four representative datasets. Experimental results reveal that our approach can (1) attack nonrobust models with low perturbation, where the perturbation is closer to or lower than that of the state-of-the-art white-box AE attacks; (2) evade the detection of the adversarial-training robust models with the highest success rate; (3) be flexible in the degree of AE generation saturation. Additionally, the AE generated by DI-AA can reduce the accuracy of the robust black-box models by 16%~31% in the black-box manner.},
keywords = {adversarial example, deep learning, interpretability, robustness, white-box attack},
pubstate = {published},
tppubtype = {article}
}
Selvi, Jose; Rodríguez, Ricardo J; Soria-Olivas, Emilio
Towards Optimal LSTM Neural Networks for Detecting Algorithmically Generated Domain Names Journal Article
In: IEEE Access, vol. 9, pp. 126446–126456, 2021.
Abstract | Links | BibTeX | Tags: deep learning, domain generation algorithms, LSTM, malware
@article{SRS-ACCESS-21,
title = {Towards Optimal LSTM Neural Networks for Detecting Algorithmically Generated Domain Names},
author = {Jose Selvi and Ricardo J Rodríguez and Emilio Soria-Olivas},
url = {http://webdiis.unizar.es/~ricardo/files/papers/SRS-ACCESS-21.pdf},
doi = {10.1109/ACCESS.2021.3111307},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {IEEE Access},
volume = {9},
pages = {126446--126456},
abstract = {Malware detection is a problem that has become particularly challenging over the last decade. A common strategy for detecting malware is to scan network traffic for malicious connections between infected devices and their command and control (C&C) servers. However, malware developers are aware of this detection method and begin to incorporate new strategies to go unnoticed. In particular, they generate domain names instead of using static Internet Protocol addresses or regular domain names pointing to their C&C servers. By using a domain generation algorithm, the effectiveness of the blacklisting of domains is reduced, as the large number of domain names that must be blocked greatly increases the size of the blacklist. In this paper, we study different Long Short-Term Memory neural network hyperparameters to find the best network configuration for algorithmically generated domain name detection. In particular, we focus on determining whether the (complex) feature engineering efforts required when using other deep learning techniques, such as Random Forest, can be avoided. In this regard, we have conducted a comparative analysis to study the effect of using different network sizes and configurations on network performance metrics. Our results show an accuracy of 97:62% and an area under the receiver operating characteristic curve of 0:9956 in the test dataset, indicating that it is possible to obtain good classification results despite avoiding the feature engineering process and additional readjustments required in other machine learning techniques.},
keywords = {deep learning, domain generation algorithms, LSTM, malware},
pubstate = {published},
tppubtype = {article}
}