Huici, Daniel; Rodríguez, Ricardo J.
A Dataset of Windows System Binaries and Similarity Digests for Enhanced Forensic Analysis Journal Article
In: Data in Brief, vol. PP, no. PP, pp. PP, 2025, ISSN: 2352-3409, (Accepted for publication. To appear.).
Abstract | Links | BibTeX | Tags: Approximate matching, forensic artifacts, Malware Detection, operating system Windows, Similarity digest algorithm, Static Analysis, system binaries
@article{HuiciR-DIB-25b,
title = {A Dataset of Windows System Binaries and Similarity Digests for Enhanced Forensic Analysis},
author = {Daniel Huici and Ricardo J. Rodríguez},
url = {https://webdiis.unizar.es/~ricardo/files/papers/HuiciR-DIB-25.pdf},
issn = {2352-3409},
year = {2025},
date = {2025-01-01},
journal = {Data in Brief},
volume = {PP},
number = {PP},
pages = {PP},
abstract = {Similarity digest algorithms, such as TLSH, ssdeep, or sdhash, to name a few, generate intermediate representations (i.e., digests) of digital artifacts to efficiently identify similar objects and measure their degree of similarity. This dataset provides the results of a static analysis performed on system binary files extracted from multiple versions of the Windows operating system, accompanied by their similarity digests. An automated static analysis process was applied to all extracted binaries to decompose them into individual functions and capture detailed metadata for each of them. Specifically, similarity hashes (in particular, TLSH, ssdeep, and LZJD) were computed to enable forensic analysts to effectively assess artifact similarities. The dataset serves as an “allow list” of legitimate Windows artifacts, allowing forensic analysts to detect deviations from trusted binaries, verify system integrity, perform software audits, and improve malware detection efforts. This paper describes the structure of the dataset, the methodology and tools used in its creation, and its value for forensic analysis and cybersecurity investigation.},
note = {Accepted for publication. To appear.},
keywords = {Approximate matching, forensic artifacts, Malware Detection, operating system Windows, Similarity digest algorithm, Static Analysis, system binaries},
pubstate = {published},
tppubtype = {article}
}
Martín-Pérez, Miguel; Rodríguez, Ricardo J; Breitinger, Frank
Bringing Order to Approximate Matching: Classification and Attacks on Similarity Digest Algorithms Journal Article
In: Forensic Science International: Digital Investigation, vol. 36, pp. 301120, 2021, ISSN: 2666-2817.
Abstract | Links | BibTeX | Tags: Approximate matching, Bytewise, Classification scheme, Fuzzy hashing, Similarity digest algorithm, Similarity hashing
@article{MRB-FSIDI-21,
title = {Bringing Order to Approximate Matching: Classification and Attacks on Similarity Digest Algorithms},
author = {Miguel Martín-Pérez and Ricardo J Rodríguez and Frank Breitinger},
url = {http://webdiis.unizar.es/~ricardo/files/papers/MRB-FSIDI-21.pdf},
doi = {10.1016/j.fsidi.2021.301120},
issn = {2666-2817},
year = {2021},
date = {2021-01-01},
journal = {Forensic Science International: Digital Investigation},
volume = {36},
pages = {301120},
abstract = {Bytewise approximate matching algorithms (a.k.a.~fuzzy hashing or similarity hashing) convert digital artifacts into an intermediate representation to allow a faster comparison them. They gained a lot of popularity over the past decade with new algorithms being developed and released to the digital forensics community. When releasing algorithms (e.g., as part of a scientific article), they are frequently compared with other algorithms to outline the benefits and sometimes also the weaknesses of the proposed approach. However, given the wide variety of algorithms and approaches, it is impossible to provide direct comparisons with all existing algorithms.
In this paper, we present the first classification of approximate matching algorithms which allows an easier description and comparisons.
Therefore, we first reviewed existing literature to understand the techniques various algorithms use and to familiarize ourselves with the common terminology. Our findings allowed us to develop a categorization relying heavily on the terminology proposed by NIST SP 800-168. In addition to the categorization, this article also presents an abstract set of attacks against algorithms and why they are feasible. Lastly, we detail the characteristics needed to build robust algorithms to prevent attacks. We believe that this article helps newcomers, practitioners, and experts alike to better compare algorithms, understand their potential, as well as characteristics and implications they may have on forensic investigations.},
keywords = {Approximate matching, Bytewise, Classification scheme, Fuzzy hashing, Similarity digest algorithm, Similarity hashing},
pubstate = {published},
tppubtype = {article}
}
In this paper, we present the first classification of approximate matching algorithms which allows an easier description and comparisons.
Therefore, we first reviewed existing literature to understand the techniques various algorithms use and to familiarize ourselves with the common terminology. Our findings allowed us to develop a categorization relying heavily on the terminology proposed by NIST SP 800-168. In addition to the categorization, this article also presents an abstract set of attacks against algorithms and why they are feasible. Lastly, we detail the characteristics needed to build robust algorithms to prevent attacks. We believe that this article helps newcomers, practitioners, and experts alike to better compare algorithms, understand their potential, as well as characteristics and implications they may have on forensic investigations.