File size: 2,517 Bytes
b8a9b88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import datasets

_CITATION = """\
@misc{deepurlbench2025,
  author       = {Deep Instinct Research Team},
  title        = {DeepURLBench: A large-scale benchmark for URL classification},
  year         = {2025},
  howpublished = {\\url{https://huggingface.co/datasets/DeepInstinct/DeepURLBench}}
}
"""

_DESCRIPTION = """\
DeepURLBench is a large-scale benchmark for real-world URL classification.
It includes two subsets: one with DNS resolution information and one without.
"""

_HOMEPAGE = "https://huggingface.co/datasets/DeepInstinct/DeepURLBench"

_LICENSE = "cc-by-nc-4.0"

# If your files are hosted in the root of the repo
_URLS = {
    "with_dns": "https://huggingface.co/datasets/DeepInstinct/DeepURLBench/resolve/main/urls_with_dns.parquet",
    "without_dns": "https://huggingface.co/datasets/DeepInstinct/DeepURLBench/resolve/main/urls_without_dns.parquet",
}


class DeepURLBench(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="with_dns", version=VERSION, description="URLs with DNS info"),
        datasets.BuilderConfig(name="without_dns", version=VERSION, description="URLs without DNS info"),
    ]

    def _info(self):
        if self.config.name == "with_dns":
            features = datasets.Features({
                "url": datasets.Value("string"),
                "first_seen": datasets.Value("string"),
                "TTL": datasets.Value("int32"),
                "label": datasets.Value("string"),
                "ip_address": datasets.Sequence(datasets.Value("string")),
            })
        else:  # without_dns
            features = datasets.Features({
                "url": datasets.Value("string"),
                "first_seen": datasets.Value("string"),
                "label": datasets.Value("string"),
            })
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_file = _URLS[self.config.name]
        downloaded_file = dl_manager.download_and_extract(data_file)
        return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file})]

    def _generate_examples(self, filepath):
        import pandas as pd
        df = pd.read_parquet(filepath)
        for idx, row in df.iterrows():
            yield idx, row.to_dict()