@@ -167,40 +167,40 @@ async def _generate_factsheet(self, trust_config, experiment_name):
167
167
factsheet .populate_factsheet_post_train (experiment_name , self ._start_time , self ._end_time , class_counter )
168
168
logging .info ("[FER] factsheet post train done" )
169
169
170
- # data_file_path = os.path.join(os.environ.get('NEBULA_CONFIG_DIR'), experiment_name, "scenario.json")
171
- # with open(data_file_path, 'r') as data_file:
172
- # data = json.load(data_file)
170
+ data_file_path = os .path .join (os .environ .get ('NEBULA_CONFIG_DIR' ), experiment_name , "scenario.json" )
171
+ with open (data_file_path , 'r' ) as data_file :
172
+ data = json .load (data_file )
173
173
174
- # weights = {
175
- # "robustness": float(data["robustness_pillar"]),
176
- # "resilience_to_attacks": float(data["resilience_to_attacks"]),
177
- # "algorithm_robustness": float(data["algorithm_robustness"]),
178
- # "client_reliability": float(data["client_reliability"]),
179
- # "privacy": float(data["privacy_pillar"]),
180
- # "technique": float(data["technique"]),
181
- # "uncertainty": float(data["uncertainty"]),
182
- # "indistinguishability": float(data["indistinguishability"]),
183
- # "fairness": float(data["fairness_pillar"]),
184
- # "selection_fairness": float(data["selection_fairness"]),
185
- # "performance_fairness": float(data["performance_fairness"]),
186
- # "class_distribution": float(data["class_distribution"]),
187
- # "explainability": float(data["explainability_pillar"]),
188
- # "interpretability": float(data["interpretability"]),
189
- # "post_hoc_methods": float(data["post_hoc_methods"]),
190
- # "accountability": float(data["accountability_pillar"]),
191
- # "factsheet_completeness": float(data["factsheet_completeness"]),
192
- # "architectural_soundness": float(data["architectural_soundness_pillar"]),
193
- # "client_management": float(data["client_management"]),
194
- # "optimization": float(data["optimization"]),
195
- # "sustainability": float(data["sustainability_pillar"]),
196
- # "energy_source": float(data["energy_source"]),
197
- # "hardware_efficiency": float(data["hardware_efficiency"]),
198
- # "federation_complexity": float(data["federation_complexity"])
199
- # }
174
+ weights = {
175
+ "robustness" : float (data ["robustness_pillar" ]),
176
+ "resilience_to_attacks" : float (data ["resilience_to_attacks" ]),
177
+ "algorithm_robustness" : float (data ["algorithm_robustness" ]),
178
+ "client_reliability" : float (data ["client_reliability" ]),
179
+ "privacy" : float (data ["privacy_pillar" ]),
180
+ "technique" : float (data ["technique" ]),
181
+ "uncertainty" : float (data ["uncertainty" ]),
182
+ "indistinguishability" : float (data ["indistinguishability" ]),
183
+ "fairness" : float (data ["fairness_pillar" ]),
184
+ "selection_fairness" : float (data ["selection_fairness" ]),
185
+ "performance_fairness" : float (data ["performance_fairness" ]),
186
+ "class_distribution" : float (data ["class_distribution" ]),
187
+ "explainability" : float (data ["explainability_pillar" ]),
188
+ "interpretability" : float (data ["interpretability" ]),
189
+ "post_hoc_methods" : float (data ["post_hoc_methods" ]),
190
+ "accountability" : float (data ["accountability_pillar" ]),
191
+ "factsheet_completeness" : float (data ["factsheet_completeness" ]),
192
+ "architectural_soundness" : float (data ["architectural_soundness_pillar" ]),
193
+ "client_management" : float (data ["client_management" ]),
194
+ "optimization" : float (data ["optimization" ]),
195
+ "sustainability" : float (data ["sustainability_pillar" ]),
196
+ "energy_source" : float (data ["energy_source" ]),
197
+ "hardware_efficiency" : float (data ["hardware_efficiency" ]),
198
+ "federation_complexity" : float (data ["federation_complexity" ])
199
+ }
200
200
201
- # trust_metric_manager = TrustMetricManager(self._start_time)
202
- # trust_metric_manager.evaluate(experiment_name, weights, use_weights=True)
203
- # logging.info("[FER] evaluation done")
201
+ trust_metric_manager = TrustMetricManager (self ._start_time )
202
+ # trust_metric_manager.evaluate(experiment_name, weights, use_weights=True)
203
+ logging .info ("[FER] evaluation done" )
204
204
205
205
async def _process_test_metrics_event (self , tme : TestMetricsEvent ):
206
206
cur_loss , cur_acc = await tme .get_event_data ()
0 commit comments