Skip to content

Commit c8cf61c

Browse files
committed
trust fixes
1 parent fd0ee78 commit c8cf61c

File tree

2 files changed

+33
-33
lines changed

2 files changed

+33
-33
lines changed

nebula/addons/trustworthiness/trustworthiness.py

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -167,40 +167,40 @@ async def _generate_factsheet(self, trust_config, experiment_name):
167167
factsheet.populate_factsheet_post_train(experiment_name, self._start_time, self._end_time, class_counter)
168168
logging.info("[FER] factsheet post train done")
169169

170-
# data_file_path = os.path.join(os.environ.get('NEBULA_CONFIG_DIR'), experiment_name, "scenario.json")
171-
# with open(data_file_path, 'r') as data_file:
172-
# data = json.load(data_file)
170+
data_file_path = os.path.join(os.environ.get('NEBULA_CONFIG_DIR'), experiment_name, "scenario.json")
171+
with open(data_file_path, 'r') as data_file:
172+
data = json.load(data_file)
173173

174-
# weights = {
175-
# "robustness": float(data["robustness_pillar"]),
176-
# "resilience_to_attacks": float(data["resilience_to_attacks"]),
177-
# "algorithm_robustness": float(data["algorithm_robustness"]),
178-
# "client_reliability": float(data["client_reliability"]),
179-
# "privacy": float(data["privacy_pillar"]),
180-
# "technique": float(data["technique"]),
181-
# "uncertainty": float(data["uncertainty"]),
182-
# "indistinguishability": float(data["indistinguishability"]),
183-
# "fairness": float(data["fairness_pillar"]),
184-
# "selection_fairness": float(data["selection_fairness"]),
185-
# "performance_fairness": float(data["performance_fairness"]),
186-
# "class_distribution": float(data["class_distribution"]),
187-
# "explainability": float(data["explainability_pillar"]),
188-
# "interpretability": float(data["interpretability"]),
189-
# "post_hoc_methods": float(data["post_hoc_methods"]),
190-
# "accountability": float(data["accountability_pillar"]),
191-
# "factsheet_completeness": float(data["factsheet_completeness"]),
192-
# "architectural_soundness": float(data["architectural_soundness_pillar"]),
193-
# "client_management": float(data["client_management"]),
194-
# "optimization": float(data["optimization"]),
195-
# "sustainability": float(data["sustainability_pillar"]),
196-
# "energy_source": float(data["energy_source"]),
197-
# "hardware_efficiency": float(data["hardware_efficiency"]),
198-
# "federation_complexity": float(data["federation_complexity"])
199-
# }
174+
weights = {
175+
"robustness": float(data["robustness_pillar"]),
176+
"resilience_to_attacks": float(data["resilience_to_attacks"]),
177+
"algorithm_robustness": float(data["algorithm_robustness"]),
178+
"client_reliability": float(data["client_reliability"]),
179+
"privacy": float(data["privacy_pillar"]),
180+
"technique": float(data["technique"]),
181+
"uncertainty": float(data["uncertainty"]),
182+
"indistinguishability": float(data["indistinguishability"]),
183+
"fairness": float(data["fairness_pillar"]),
184+
"selection_fairness": float(data["selection_fairness"]),
185+
"performance_fairness": float(data["performance_fairness"]),
186+
"class_distribution": float(data["class_distribution"]),
187+
"explainability": float(data["explainability_pillar"]),
188+
"interpretability": float(data["interpretability"]),
189+
"post_hoc_methods": float(data["post_hoc_methods"]),
190+
"accountability": float(data["accountability_pillar"]),
191+
"factsheet_completeness": float(data["factsheet_completeness"]),
192+
"architectural_soundness": float(data["architectural_soundness_pillar"]),
193+
"client_management": float(data["client_management"]),
194+
"optimization": float(data["optimization"]),
195+
"sustainability": float(data["sustainability_pillar"]),
196+
"energy_source": float(data["energy_source"]),
197+
"hardware_efficiency": float(data["hardware_efficiency"]),
198+
"federation_complexity": float(data["federation_complexity"])
199+
}
200200

201-
# trust_metric_manager = TrustMetricManager(self._start_time)
202-
# trust_metric_manager.evaluate(experiment_name, weights, use_weights=True)
203-
# logging.info("[FER] evaluation done")
201+
trust_metric_manager = TrustMetricManager(self._start_time)
202+
# trust_metric_manager.evaluate(experiment_name, weights, use_weights=True)
203+
logging.info("[FER] evaluation done")
204204

205205
async def _process_test_metrics_event(self, tme: TestMetricsEvent):
206206
cur_loss, cur_acc = await tme.get_event_data()

nebula/addons/trustworthiness/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def count_class_samples(scenario_name, dataloaders_files, class_counter: Counter
3131
dataloaders = []
3232

3333
if class_counter:
34-
result = dict(class_counter)
34+
result = {hashids.encode(int(class_id)): count for class_id, count in class_counter.items()}
3535
else:
3636
for file in dataloaders_files:
3737
with open(file, "rb") as f:

0 commit comments

Comments
 (0)