Skip to content

Feature/new attacks #35

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions nebula/addons/attacks/attacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ def create_attack(engine) -> Attack:
AttackException: If the specified attack name is not found in the `ATTACK_MAP`.
"""
from nebula.addons.attacks.communications.delayerattack import DelayerAttack
from nebula.addons.attacks.communications.floodingattack import FloodingAttack
from nebula.addons.attacks.dataset.datapoison import SamplePoisoningAttack
from nebula.addons.attacks.dataset.labelflipping import LabelFlippingAttack
from nebula.addons.attacks.model.gllneuroninversion import GLLNeuronInversionAttack
Expand All @@ -123,6 +124,7 @@ def create_attack(engine) -> Attack:
"Noise Injection": NoiseInjectionAttack,
"Swapping Weights": SwappingWeightsAttack,
"Delayer": DelayerAttack,
"Flooding": FloodingAttack,
"Label Flipping": LabelFlippingAttack,
"Sample Poisoning": SamplePoisoningAttack,
"Model Poisoning": ModelPoisonAttack,
Expand Down
53 changes: 45 additions & 8 deletions nebula/addons/attacks/communications/communicationattack.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,35 @@
import logging
import types
from abc import abstractmethod
import random

from nebula.addons.attacks.attacks import Attack


class CommunicationAttack(Attack):
def __init__(self, engine, target_class, target_method, round_start_attack, round_stop_attack, decorator_args=None):
def __init__(self, engine,
target_class,
target_method,
round_start_attack,
round_stop_attack,
attack_interval,
decorator_args=None,
selectivity_percentage: int = 100,
selection_interval: int = None
):
super().__init__()
self.engine = engine
self.target_class = target_class
self.target_method = target_method
self.decorator_args = decorator_args
self.round_start_attack = round_start_attack
self.round_stop_attack = round_stop_attack
self.attack_interval = attack_interval
self.original_method = getattr(target_class, target_method, None)
self.selectivity_percentage = selectivity_percentage
self.selection_interval = selection_interval
self.last_selection_round = 0
self.targets = set()

if not self.original_method:
raise AttributeError(f"Method {target_method} not found in class {target_class}")
Expand All @@ -24,10 +39,28 @@ def decorator(self, *args):
"""Decorator that adds malicious behavior to the execution of the original method."""
pass

async def select_targets(self):
if self.selectivity_percentage != 100:
if self.selection_interval:
if self.last_selection_round % self.selection_interval == 0:
logging.info("Recalculating targets...")
all_nodes = await self.engine.cm.get_addrs_current_connections(only_direct=True)
num_targets = max(1, int(len(all_nodes) * (self.selectivity_percentage / 100)))
self.targets = set(random.sample(list(all_nodes), num_targets))
elif not self.targets:
logging.info("Calculating targets...")
all_nodes = await self.engine.cm.get_addrs_current_connections(only_direct=True)
num_targets = max(1, int(len(all_nodes) * (self.selectivity_percentage / 100)))
self.targets = set(random.sample(list(all_nodes), num_targets))
else:
logging.info("All neighbors selected as targets")
self.targets = await self.engine.cm.get_addrs_current_connections(only_direct=True)

logging.info(f"Selected {self.selectivity_percentage}% targets from neighbors: {self.targets}")
self.last_selection_round+=1

async def _inject_malicious_behaviour(self):
"""Inject malicious behavior into the target method."""
logging.info("Injecting malicious behavior")

decorated_method = self.decorator(self.decorator_args)(self.original_method)

setattr(
Expand All @@ -38,14 +71,18 @@ async def _inject_malicious_behaviour(self):

async def _restore_original_behaviour(self):
"""Restore the original behavior of the target method."""
logging.info(f"Restoring original behavior of {self.target_class}.{self.target_method}")
setattr(self.target_class, self.target_method, self.original_method)

async def attack(self):
"""Perform the attack logic based on the current round."""
if self.engine.round == self.round_stop_attack:
logging.info(f"[{self.__class__.__name__}] Restoring original behavior")
if self.engine.round not in range(self.round_start_attack, self.round_stop_attack + 1):
pass
elif self.engine.round == self.round_stop_attack:
logging.info(f"[{self.__class__.__name__}] Stoping attack")
await self._restore_original_behaviour()
elif self.engine.round == self.round_start_attack:
logging.info(f"[{self.__class__.__name__}] Injecting malicious behavior")
elif (self.engine.round == self.round_start_attack) or ((self.engine.round - self.round_start_attack) % self.attack_interval == 0):
await self.select_targets()
logging.info(f"[{self.__class__.__name__}] Performing attack")
await self._inject_malicious_behaviour()
else:
await self._restore_original_behaviour()
17 changes: 13 additions & 4 deletions nebula/addons/attacks/communications/delayerattack.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,24 @@ def __init__(self, engine, attack_params: dict):
self.delay = int(attack_params["delay"])
round_start = int(attack_params["round_start_attack"])
round_stop = int(attack_params["round_stop_attack"])
attack_interval = int(attack_params["attack_interval"])
self.target_percentage = int(attack_params["target_percentage"])
self.selection_interval = int(attack_params["selection_interval"])
except KeyError as e:
raise ValueError(f"Missing required attack parameter: {e}")
except ValueError:
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")

super().__init__(
engine,
engine._cm._propagator,
"propagate",
engine._cm,
"send_model",
round_start,
round_stop,
attack_interval,
self.delay,
self.target_percentage,
self.selection_interval,
)

def decorator(self, delay: int):
Expand All @@ -50,8 +56,11 @@ def decorator(self, delay: int):
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
logging.info(f"[DelayerAttack] Adding delay of {delay} seconds to {func.__name__}")
await asyncio.sleep(delay)
if len(args) > 1:
dest_addr = args[1]
if dest_addr in self.targets:
logging.info(f"[DelayerAttack] Delaying model propagation to {dest_addr} by {delay} seconds")
await asyncio.sleep(delay)
_, *new_args = args # Exclude self argument
return await func(*new_args)

Expand Down
74 changes: 74 additions & 0 deletions nebula/addons/attacks/communications/floodingattack.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import logging
from functools import wraps

from nebula.addons.attacks.communications.communicationattack import CommunicationAttack


class FloodingAttack(CommunicationAttack):
"""
Implements an attack that delays the execution of a target method by a specified amount of time.
"""

def __init__(self, engine, attack_params: dict):
"""
Initializes the DelayerAttack with the engine and attack parameters.

Args:
engine: The engine managing the attack context.
attack_params (dict): Parameters for the attack, including the delay duration.
"""
try:
round_start = int(attack_params["round_start_attack"])
round_stop = int(attack_params["round_stop_attack"])
attack_interval = int(attack_params["attack_interval"])
self.flooding_factor = int(attack_params["flooding_factor"])
self.target_percentage = int(attack_params["target_percentage"])
self.selection_interval = int(attack_params["selection_interval"])
except KeyError as e:
raise ValueError(f"Missing required attack parameter: {e}")
except ValueError:
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")

self.verbose = False

super().__init__(
engine,
engine._cm,
"send_message",
round_start,
round_stop,
attack_interval,
self.flooding_factor,
self.target_percentage,
self.selection_interval,
)

def decorator(self, flooding_factor: int):
"""
Decorator that adds a delay to the execution of the original method.

Args:
flooding_factor (int): The number of times to repeat the function execution.

Returns:
function: A decorator function that wraps the target method with the delay logic.
"""

def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
if len(args) > 1:
dest_addr = args[1]
if dest_addr in self.targets:
logging.info(f"[FloodingAttack] Flooding message to {dest_addr} by {flooding_factor} times")
for i in range(flooding_factor):
if self.verbose:
logging.info(f"[FloodingAttack] Sending duplicate {i+1}/{flooding_factor} to {dest_addr}")
_, *new_args = args # Exclude self argument
await func(*new_args, **kwargs)
_, *new_args = args # Exclude self argument
return await func(*new_args)

return wrapper

return decorator
13 changes: 10 additions & 3 deletions nebula/addons/attacks/dataset/datapoison.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,15 +46,22 @@ def __init__(self, engine, attack_params):
engine (object): The training engine object.
attack_params (dict): Dictionary of attack parameters.
"""
super().__init__(engine)
try:
round_start = int(attack_params["round_start_attack"])
round_stop = int(attack_params["round_stop_attack"])
attack_interval = int(attack_params["attack_interval"])
except KeyError as e:
raise ValueError(f"Missing required attack parameter: {e}")
except ValueError:
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")

super().__init__(engine, round_start, round_stop, attack_interval)
self.datamodule = engine._trainer.datamodule
self.poisoned_percent = float(attack_params["poisoned_percent"])
self.poisoned_ratio = float(attack_params["poisoned_ratio"])
self.targeted = attack_params["targeted"]
self.target_label = int(attack_params["target_label"])
self.noise_type = attack_params["noise_type"]
self.round_start_attack = int(attack_params["round_start_attack"])
self.round_stop_attack = int(attack_params["round_stop_attack"])

def apply_noise(self, t, noise_type, poisoned_ratio):
"""
Expand Down
17 changes: 10 additions & 7 deletions nebula/addons/attacks/dataset/datasetattack.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,17 @@ class DatasetAttack(Attack):
data, potentially impacting the model's training process.
"""

def __init__(self, engine):
def __init__(self, engine, round_start_attack, round_stop_attack, attack_interval):
"""
Initializes the DatasetAttack with the given engine.

Args:
engine: The engine managing the attack context.
"""
self.engine = engine
self.round_start_attack = 0
self.round_stop_attack = 10
self.round_start_attack = round_start_attack
self.round_stop_attack = round_stop_attack
self.attack_interval = attack_interval

async def attack(self):
"""
Expand All @@ -32,11 +33,13 @@ async def attack(self):
with a malicious dataset. The attack is stopped when the engine reaches the
designated stop round.
"""
if self.engine.round in range(self.round_start_attack, self.round_stop_attack):
logging.info("[DatasetAttack] Performing attack")
if self.engine.round not in range(self.round_start_attack, self.round_stop_attack + 1):
pass
elif self.engine.round == self.round_stop_attack:
logging.info(f"[{self.__class__.__name__}] Stopping attack")
elif self.engine.round >= self.round_start_attack and ((self.engine.round - self.round_start_attack) % self.attack_interval == 0):
logging.info(f"[{self.__class__.__name__}] Performing attack")
self.engine.trainer.datamodule.train_set = self.get_malicious_dataset()
elif self.engine.round == self.round_stop_attack + 1:
logging.info("[DatasetAttack] Stopping attack")

async def _inject_malicious_behaviour(self, target_function, *args, **kwargs):
"""
Expand Down
13 changes: 10 additions & 3 deletions nebula/addons/attacks/dataset/labelflipping.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,21 @@ def __init__(self, engine, attack_params):
attack_params (dict): Parameters for the attack, including the percentage of
poisoned data, targeting options, and label specifications.
"""
super().__init__(engine)
try:
round_start = int(attack_params["round_start_attack"])
round_stop = int(attack_params["round_stop_attack"])
attack_interval = int(attack_params["attack_interval"])
except KeyError as e:
raise ValueError(f"Missing required attack parameter: {e}")
except ValueError:
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")

super().__init__(engine, round_start, round_stop, attack_interval)
self.datamodule = engine._trainer.datamodule
self.poisoned_percent = float(attack_params["poisoned_percent"])
self.targeted = attack_params["targeted"]
self.target_label = int(attack_params["target_label"])
self.target_changed_label = int(attack_params["target_changed_label"])
self.round_start_attack = int(attack_params["round_start_attack"])
self.round_stop_attack = int(attack_params["round_stop_attack"])

def labelFlipping(
self,
Expand Down
13 changes: 10 additions & 3 deletions nebula/addons/attacks/model/gllneuroninversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,16 @@ def __init__(self, engine, attack_params):
engine (object): The training engine object.
_ (any): A placeholder argument (not used in this class).
"""
super().__init__(engine)
self.round_start_attack = int(attack_params["round_start_attack"])
self.round_stop_attack = int(attack_params["round_stop_attack"])
try:
round_start = int(attack_params["round_start_attack"])
round_stop = int(attack_params["round_stop_attack"])
attack_interval = int(attack_params["attack_interval"])
except KeyError as e:
raise ValueError(f"Missing required attack parameter: {e}")
except ValueError:
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")

super().__init__(engine, round_start, round_stop, attack_interval)

def model_attack(self, received_weights):
"""
Expand Down
21 changes: 12 additions & 9 deletions nebula/addons/attacks/model/modelattack.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class ModelAttack(Attack):
model aggregation.
"""

def __init__(self, engine):
def __init__(self, engine, round_start_attack, round_stop_attack, attack_interval):
"""
Initializes the ModelAttack with the specified engine.

Expand All @@ -31,8 +31,9 @@ def __init__(self, engine):
self.engine = engine
self.aggregator = engine._aggregator
self.original_aggregation = engine.aggregator.run_aggregation
self.round_start_attack = 0
self.round_stop_attack = 10
self.round_start_attack = round_start_attack
self.round_stop_attack = round_stop_attack
self.attack_interval = attack_interval

def aggregator_decorator(self):
"""
Expand Down Expand Up @@ -104,11 +105,13 @@ async def attack(self):

This method logs the attack and calls the method to modify the aggregator.
"""
if self.engine.round == self.round_start_attack:
logging.info("[ModelAttack] Injecting malicious behaviour")
if self.engine.round not in range(self.round_start_attack, self.round_stop_attack + 1):
pass
elif self.engine.round == self.round_stop_attack:
logging.info(f"[{self.__class__.__name__}] Stopping attack")
await self._restore_original_behaviour()
elif (self.engine.round == self.round_start_attack) or ((self.engine.round - self.round_start_attack) % self.attack_interval == 0):
logging.info(f"[{self.__class__.__name__}] Performing attack")
await self._inject_malicious_behaviour()
elif self.engine.round == self.round_stop_attack + 1:
logging.info("[ModelAttack] Stopping attack")
else:
await self._restore_original_behaviour()
elif self.engine.round in range(self.round_start_attack, self.round_stop_attack):
logging.info("[ModelAttack] Performing attack")
14 changes: 11 additions & 3 deletions nebula/addons/attacks/model/modelpoison.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,19 @@ def __init__(self, engine, attack_params):
engine (object): The training engine object.
attack_params (dict): Dictionary of attack parameters.
"""
super().__init__(engine)
try:
round_start = int(attack_params["round_start_attack"])
round_stop = int(attack_params["round_stop_attack"])
attack_interval = int(attack_params["attack_interval"])
except KeyError as e:
raise ValueError(f"Missing required attack parameter: {e}")
except ValueError:
raise ValueError("Invalid value in attack_params. Ensure all values are integers.")

super().__init__(engine, round_start, round_stop, attack_interval)

self.poisoned_ratio = float(attack_params["poisoned_ratio"])
self.noise_type = attack_params["noise_type"].lower()
self.round_start_attack = int(attack_params["round_start_attack"])
self.round_stop_attack = int(attack_params["round_stop_attack"])

def modelPoison(self, model: OrderedDict, poisoned_ratio, noise_type="gaussian"):
"""
Expand Down
Loading
Loading