|
| 1 | +import torch |
| 2 | +import time |
| 3 | +from typing import Dict, List, Tuple, Optional, Any |
| 4 | + |
| 5 | +from ..server_class import Server |
| 6 | +from .dp_mechanisms import DPMechanism, DPAccountant |
| 7 | + |
| 8 | + |
| 9 | +class Server_DP(Server): |
| 10 | + """ |
| 11 | + Enhanced server class with Differential Privacy support for FedGCN. |
| 12 | + Extends the original Server class to support DP in pre-training aggregation. |
| 13 | + """ |
| 14 | + |
| 15 | + def __init__(self, feature_dim: int, args_hidden: int, class_num: int, |
| 16 | + device: torch.device, trainers: list, args: Any): |
| 17 | + super().__init__(feature_dim, args_hidden, class_num, device, trainers, args) |
| 18 | + |
| 19 | + # DP configuration |
| 20 | + self.use_dp = getattr(args, 'use_dp', False) |
| 21 | + |
| 22 | + if self.use_dp: |
| 23 | + self.dp_epsilon = getattr(args, 'dp_epsilon', 1.0) |
| 24 | + self.dp_delta = getattr(args, 'dp_delta', 1e-5) |
| 25 | + self.dp_sensitivity = getattr(args, 'dp_sensitivity', 1.0) |
| 26 | + self.dp_mechanism = getattr(args, 'dp_mechanism', 'gaussian') |
| 27 | + self.dp_clip_norm = getattr(args, 'dp_clip_norm', 1.0) |
| 28 | + |
| 29 | + # Initialize DP mechanism |
| 30 | + self.dp_mechanism_obj = DPMechanism( |
| 31 | + epsilon=self.dp_epsilon, |
| 32 | + delta=self.dp_delta, |
| 33 | + sensitivity=self.dp_sensitivity, |
| 34 | + mechanism=self.dp_mechanism |
| 35 | + ) |
| 36 | + |
| 37 | + # Privacy accountant |
| 38 | + self.privacy_accountant = DPAccountant() |
| 39 | + |
| 40 | + print(f"Server initialized with Differential Privacy:") |
| 41 | + print(f" Mechanism: {self.dp_mechanism}") |
| 42 | + print(f" Privacy parameters: ε={self.dp_epsilon}, δ={self.dp_delta}") |
| 43 | + print(f" Sensitivity: {self.dp_sensitivity}") |
| 44 | + print(f" Clipping norm: {self.dp_clip_norm}") |
| 45 | + |
| 46 | + def aggregate_dp_feature_sums(self, local_feature_sums: List[torch.Tensor]) -> Tuple[torch.Tensor, Dict]: |
| 47 | + """ |
| 48 | + Aggregate feature sums with differential privacy. |
| 49 | + |
| 50 | + Parameters |
| 51 | + ---------- |
| 52 | + local_feature_sums : List[torch.Tensor] |
| 53 | + List of local feature sums from trainers |
| 54 | + |
| 55 | + Returns |
| 56 | + ------- |
| 57 | + Tuple[torch.Tensor, Dict] |
| 58 | + Aggregated feature sum with DP noise and statistics |
| 59 | + """ |
| 60 | + aggregation_start = time.time() |
| 61 | + |
| 62 | + # Step 1: Clip individual contributions |
| 63 | + clipped_sums = [] |
| 64 | + clipping_stats = [] |
| 65 | + |
| 66 | + for i, local_sum in enumerate(local_feature_sums): |
| 67 | + original_norm = torch.norm(local_sum).item() |
| 68 | + clipped_sum = self.dp_mechanism_obj.clip_gradients(local_sum, self.dp_clip_norm) |
| 69 | + clipped_norm = torch.norm(clipped_sum).item() |
| 70 | + |
| 71 | + clipped_sums.append(clipped_sum) |
| 72 | + clipping_stats.append({ |
| 73 | + 'trainer_id': i, |
| 74 | + 'original_norm': original_norm, |
| 75 | + 'clipped_norm': clipped_norm, |
| 76 | + 'was_clipped': original_norm > self.dp_clip_norm |
| 77 | + }) |
| 78 | + |
| 79 | + # Step 2: Aggregate clipped sums |
| 80 | + aggregated_sum = torch.stack(clipped_sums).sum(dim=0) |
| 81 | + |
| 82 | + # Step 3: Add DP noise |
| 83 | + noisy_aggregated_sum = self.dp_mechanism_obj.add_noise(aggregated_sum) |
| 84 | + |
| 85 | + aggregation_time = time.time() - aggregation_start |
| 86 | + |
| 87 | + # Step 4: Update privacy accountant |
| 88 | + self.privacy_accountant.add_step(self.dp_epsilon, self.dp_delta) |
| 89 | + |
| 90 | + # Statistics |
| 91 | + dp_stats = { |
| 92 | + 'aggregation_time': aggregation_time, |
| 93 | + 'clipping_stats': clipping_stats, |
| 94 | + 'num_clipped': sum(1 for stat in clipping_stats if stat['was_clipped']), |
| 95 | + 'pre_noise_norm': torch.norm(aggregated_sum).item(), |
| 96 | + 'post_noise_norm': torch.norm(noisy_aggregated_sum).item(), |
| 97 | + 'noise_magnitude': torch.norm(noisy_aggregated_sum - aggregated_sum).item(), |
| 98 | + 'privacy_spent': self.privacy_accountant.get_total_privacy_spent() |
| 99 | + } |
| 100 | + |
| 101 | + return noisy_aggregated_sum, dp_stats |
| 102 | + |
| 103 | + def print_dp_stats(self, dp_stats: Dict): |
| 104 | + """Print differential privacy statistics.""" |
| 105 | + print("\n=== Differential Privacy Statistics ===") |
| 106 | + print(f"Aggregation time: {dp_stats['aggregation_time']:.4f}s") |
| 107 | + print(f"Trainers clipped: {dp_stats['num_clipped']}/{len(dp_stats['clipping_stats'])}") |
| 108 | + print(f"Pre-noise norm: {dp_stats['pre_noise_norm']:.4f}") |
| 109 | + print(f"Post-noise norm: {dp_stats['post_noise_norm']:.4f}") |
| 110 | + print(f"Noise magnitude: {dp_stats['noise_magnitude']:.4f}") |
| 111 | + |
| 112 | + total_eps, total_delta = dp_stats['privacy_spent'] |
| 113 | + print(f"Total privacy spent: ε={total_eps:.4f}, δ={total_delta:.8f}") |
| 114 | + |
| 115 | + # Per-trainer clipping details |
| 116 | + clipped_trainers = [stat for stat in dp_stats['clipping_stats'] if stat['was_clipped']] |
| 117 | + if clipped_trainers: |
| 118 | + print("Clipped trainers:") |
| 119 | + for stat in clipped_trainers: |
| 120 | + print(f" Trainer {stat['trainer_id']}: {stat['original_norm']:.4f} -> {stat['clipped_norm']:.4f}") |
0 commit comments