Update dga-detection.py
Browse files- dga-detection.py +15 -24
dga-detection.py
CHANGED
|
@@ -10,12 +10,11 @@ of DGA domains of Andrey Abakumov and John Bambenek. The total amount of DGA dom
|
|
| 10 |
were generated by 51 different malware families. About the 55% of of the DGA portion of dataset is composed of samples from the Banjori, Post, Timba, Cryptolocker,
|
| 11 |
Ramdo and Conficker malware.
|
| 12 |
"""
|
| 13 |
-
|
| 14 |
_HOMEPAGE = "https://https://huggingface.co/datasets/harpomaxx/dga-detection"
|
| 15 |
|
| 16 |
class MyDataset(datasets.GeneratorBasedBuilder):
|
| 17 |
def _info(self):
|
| 18 |
-
|
| 19 |
return datasets.DatasetInfo(
|
| 20 |
description=_DESCRIPTION,
|
| 21 |
features=datasets.Features(
|
|
@@ -24,15 +23,15 @@ class MyDataset(datasets.GeneratorBasedBuilder):
|
|
| 24 |
"class": datasets.Value("int")
|
| 25 |
}
|
| 26 |
),
|
| 27 |
-
supervised_keys=("domain", "
|
| 28 |
homepage="_HOMEPAGE",
|
| 29 |
)
|
| 30 |
|
| 31 |
-
|
| 32 |
def _split_generators(self, dl_manager: datasets.DownloadConfig):
|
| 33 |
-
# Load your
|
| 34 |
csv_path = "https://huggingface.co/datasets/harpomaxx/dga-detection/resolve/main/argencon.csv.gz"
|
| 35 |
|
|
|
|
| 36 |
return [
|
| 37 |
datasets.SplitGenerator(
|
| 38 |
name=split,
|
|
@@ -44,24 +43,14 @@ class MyDataset(datasets.GeneratorBasedBuilder):
|
|
| 44 |
for split in ["train", "test", "validation"]
|
| 45 |
]
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
)
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
# You can filter or split your dataset based on the 'split' argument if necessary
|
| 56 |
-
dataset = dataset[dataset["split"] == split]
|
| 57 |
-
# Generate examples
|
| 58 |
-
for index, row in dataset.iterrows():
|
| 59 |
-
yield index, {
|
| 60 |
-
"domain": row["domain"],
|
| 61 |
-
"label": row["label"],
|
| 62 |
-
}
|
| 63 |
-
|
| 64 |
-
|
| 65 |
def _generate_examples(
|
| 66 |
self,
|
| 67 |
filepath: str,
|
|
@@ -69,7 +58,10 @@ class MyDataset(datasets.GeneratorBasedBuilder):
|
|
| 69 |
):
|
| 70 |
# Read your CSV dataset
|
| 71 |
dataset = pd.read_csv(filepath,compression='gzip')
|
|
|
|
|
|
|
| 72 |
dataset['class'] = dataset['label'].apply(lambda x: 0 if 'normal' in x else 1)
|
|
|
|
| 73 |
# Get the total number of rows
|
| 74 |
total_rows = len(dataset)
|
| 75 |
|
|
@@ -96,4 +88,3 @@ class MyDataset(datasets.GeneratorBasedBuilder):
|
|
| 96 |
"label": row["label"],
|
| 97 |
"class": row["class"],
|
| 98 |
}
|
| 99 |
-
|
|
|
|
| 10 |
were generated by 51 different malware families. About the 55% of of the DGA portion of dataset is composed of samples from the Banjori, Post, Timba, Cryptolocker,
|
| 11 |
Ramdo and Conficker malware.
|
| 12 |
"""
|
|
|
|
| 13 |
_HOMEPAGE = "https://https://huggingface.co/datasets/harpomaxx/dga-detection"
|
| 14 |
|
| 15 |
class MyDataset(datasets.GeneratorBasedBuilder):
|
| 16 |
def _info(self):
|
| 17 |
+
# Provide metadata for the dataset
|
| 18 |
return datasets.DatasetInfo(
|
| 19 |
description=_DESCRIPTION,
|
| 20 |
features=datasets.Features(
|
|
|
|
| 23 |
"class": datasets.Value("int")
|
| 24 |
}
|
| 25 |
),
|
| 26 |
+
supervised_keys=("domain", "class"),
|
| 27 |
homepage="_HOMEPAGE",
|
| 28 |
)
|
| 29 |
|
|
|
|
| 30 |
def _split_generators(self, dl_manager: datasets.DownloadConfig):
|
| 31 |
+
# Load your dataset file
|
| 32 |
csv_path = "https://huggingface.co/datasets/harpomaxx/dga-detection/resolve/main/argencon.csv.gz"
|
| 33 |
|
| 34 |
+
# Create SplitGenerators for each dataset split (train, test, validation)
|
| 35 |
return [
|
| 36 |
datasets.SplitGenerator(
|
| 37 |
name=split,
|
|
|
|
| 43 |
for split in ["train", "test", "validation"]
|
| 44 |
]
|
| 45 |
|
| 46 |
+
"""""
|
| 47 |
+
The data variable in the _generate_examples() method is a temporary variable that holds the portion of the dataset based on the current split.
|
| 48 |
+
The datasets.SplitGenerator in the _split_generators() method is responsible for creating the three different keys ('train', 'test', 'validation').When you load your
|
| 49 |
+
dataset using load_dataset(), the Hugging Face Datasets library will automatically call the _split_generators() method to create the three different dataset splits.
|
| 50 |
+
Then, it will call the _generate_examples() method for each split separately, passing the corresponding split name as the split argument.
|
| 51 |
+
This is how the different keys are created. To clarify, the _generate_examples() method processes one split at a time, and the Datasets library combines the results
|
| 52 |
+
to create a final dataset with keys for 'train', 'test', and 'validation'.
|
| 53 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
def _generate_examples(
|
| 55 |
self,
|
| 56 |
filepath: str,
|
|
|
|
| 58 |
):
|
| 59 |
# Read your CSV dataset
|
| 60 |
dataset = pd.read_csv(filepath,compression='gzip')
|
| 61 |
+
|
| 62 |
+
# Create the 'class' column based on the 'label' column
|
| 63 |
dataset['class'] = dataset['label'].apply(lambda x: 0 if 'normal' in x else 1)
|
| 64 |
+
|
| 65 |
# Get the total number of rows
|
| 66 |
total_rows = len(dataset)
|
| 67 |
|
|
|
|
| 88 |
"label": row["label"],
|
| 89 |
"class": row["class"],
|
| 90 |
}
|
|
|