@@ -32,11 +32,11 @@ class SamplePoisoningAttack(DatasetAttack):
32
32
engine (object): The training engine object, including the associated
33
33
datamodule.
34
34
attack_params (dict): Attack parameters including:
35
- - poisoned_percent (float): The percentage of data points to be poisoned.
36
- - poisoned_ratio (float): The ratio of poisoned data relative to the total dataset .
35
+ - poisoned_sample_percent (float): The percentage of data points to be poisoned (0-100) .
36
+ - poisoned_noise_percent (float): The percentage of noise to be added to poisoned data (0-100) .
37
37
- targeted (bool): Whether the attack is targeted at a specific label.
38
- - target_label (int): The target label for the attack (used if targeted is True).
39
- - noise_type (str): The type of noise to introduce during the attack.
38
+ - target_label/targetLabel (int): The target label for the attack (used if targeted is True).
39
+ - noise_type/noiseType (str): The type of noise to introduce during the attack.
40
40
"""
41
41
42
42
def __init__ (self , engine , attack_params ):
@@ -58,15 +58,17 @@ def __init__(self, engine, attack_params):
58
58
59
59
super ().__init__ (engine , round_start , round_stop , attack_interval )
60
60
self .datamodule = engine ._trainer .datamodule
61
- self .poisoned_percent = float (attack_params ["poisoned_percent " ])
62
- self .poisoned_ratio = float (attack_params ["poisoned_ratio " ])
61
+ self .poisoned_percent = float (attack_params ["poisoned_sample_percent " ])
62
+ self .poisoned_noise_percent = float (attack_params ["poisoned_noise_percent " ])
63
63
self .targeted = attack_params ["targeted" ]
64
- self .target_label = int (attack_params ["target_label" ])
65
- self .noise_type = attack_params ["noise_type" ].lower ()
64
+
65
+ # Handle both camelCase and snake_case parameter names
66
+ self .target_label = int (attack_params .get ("target_label" ) or attack_params .get ("targetLabel" , 4 ))
67
+ self .noise_type = (attack_params .get ("noise_type" ) or attack_params .get ("noiseType" , "Gaussian" )).lower ()
66
68
67
- def apply_noise (self , t , noise_type , poisoned_ratio ):
69
+ def apply_noise (self , t , noise_type , poisoned_noise_percent ):
68
70
"""
69
- Applies noise to a tensor based on the specified noise type and poisoning ratio .
71
+ Applies noise to a tensor based on the specified noise type and poisoning percentage .
70
72
71
73
Args:
72
74
t (torch.Tensor): The input tensor to which noise will be applied.
@@ -75,7 +77,7 @@ def apply_noise(self, t, noise_type, poisoned_ratio):
75
77
- "gaussian": Gaussian noise with mean 0 and specified variance.
76
78
- "s&p": Salt-and-pepper noise.
77
79
- "nlp_rawdata": Applies a custom NLP raw data poisoning function.
78
- poisoned_ratio (float): The ratio or variance of noise to be applied, depending on the noise type .
80
+ poisoned_noise_percent (float): The percentage of noise to be applied (0-100) .
79
81
80
82
Returns:
81
83
torch.Tensor: The tensor with noise applied. If the noise type is not supported,
@@ -90,6 +92,9 @@ def apply_noise(self, t, noise_type, poisoned_ratio):
90
92
the `skimage.util` package, and returned as a `torch.Tensor`.
91
93
"""
92
94
arr = t .detach ().cpu ().numpy () if isinstance (t , torch .Tensor ) else np .array (t )
95
+
96
+ # Convert percentage to ratio for noise application
97
+ poisoned_ratio = poisoned_noise_percent / 100.0
93
98
94
99
if noise_type == "salt" :
95
100
return torch .tensor (random_noise (arr , mode = noise_type , amount = poisoned_ratio ))
@@ -108,7 +113,7 @@ def datapoison(
108
113
dataset ,
109
114
indices ,
110
115
poisoned_percent ,
111
- poisoned_ratio ,
116
+ poisoned_noise_percent ,
112
117
targeted = False ,
113
118
target_label = 3 ,
114
119
noise_type = "salt" ,
@@ -118,14 +123,14 @@ def datapoison(
118
123
119
124
This function applies noise to randomly selected samples within a dataset.
120
125
Noise can be targeted or non-targeted. In non-targeted poisoning, random samples
121
- are chosen and altered using the specified noise type and ratio . In targeted poisoning,
126
+ are chosen and altered using the specified noise type and percentage . In targeted poisoning,
122
127
only samples with a specified label are altered by adding an 'X' pattern.
123
128
124
129
Args:
125
130
dataset (Dataset): The dataset to poison, expected to have `.data` and `.targets` attributes.
126
131
indices (list of int): The list of indices in the dataset to consider for poisoning.
127
- poisoned_percent (float): The percentage of `indices` to poison, as a fraction (0 <= poisoned_percent <= 1 ).
128
- poisoned_ratio (float): The intensity or probability parameter for the noise, depending on the noise type .
132
+ poisoned_percent (float): The percentage of `indices` to poison (0-100 ).
133
+ poisoned_noise_percent (float): The percentage of noise to apply to poisoned samples (0-100) .
129
134
targeted (bool, optional): If True, applies targeted poisoning by adding an 'X' only to samples with `target_label`.
130
135
Default is False.
131
136
target_label (int, optional): The label to target when `targeted` is True. Default is 3.
@@ -139,11 +144,11 @@ def datapoison(
139
144
Dataset: A deep copy of the original dataset with poisoned data in `.data`.
140
145
141
146
Raises:
142
- ValueError: If `poisoned_percent` is not between 0 and 1 , or if `noise_type` is unsupported.
147
+ ValueError: If `poisoned_percent` or `poisoned_noise_percent` is not between 0 and 100 , or if `noise_type` is unsupported.
143
148
144
149
Notes:
145
150
- Non-targeted poisoning randomly selects samples from `indices` based on `poisoned_percent`.
146
- - Targeted poisoning modifies only samples with `target_label` by adding an 'X' pattern, regardless of `poisoned_ratio `.
151
+ - Targeted poisoning modifies only samples with `target_label` by adding an 'X' pattern, regardless of `poisoned_noise_percent `.
147
152
"""
148
153
new_dataset = copy .deepcopy (dataset )
149
154
if not isinstance (new_dataset .targets , np .ndarray ):
@@ -156,7 +161,7 @@ def datapoison(
156
161
noise_type = noise_type [0 ]
157
162
158
163
if not targeted :
159
- num_poisoned = int (poisoned_percent * num_indices )
164
+ num_poisoned = int (poisoned_percent * num_indices / 100.0 ) # Convert percentage to count
160
165
if num_indices == 0 :
161
166
return new_dataset
162
167
if num_poisoned > num_indices :
@@ -168,7 +173,7 @@ def datapoison(
168
173
t = new_dataset .data [i ]
169
174
if isinstance (t , tuple ):
170
175
t = t [0 ]
171
- poisoned = self .apply_noise (t , noise_type , poisoned_ratio )
176
+ poisoned = self .apply_noise (t , noise_type , poisoned_noise_percent )
172
177
if isinstance (t , tuple ):
173
178
poisoned = (poisoned , t [1 ])
174
179
if isinstance (poisoned , torch .Tensor ):
@@ -267,7 +272,7 @@ def get_malicious_dataset(self):
267
272
self .datamodule .train_set ,
268
273
self .datamodule .train_set_indices ,
269
274
self .poisoned_percent ,
270
- self .poisoned_ratio ,
275
+ self .poisoned_noise_percent ,
271
276
self .targeted ,
272
277
self .target_label ,
273
278
self .noise_type ,
0 commit comments