Skip to content

Commit 7448eed

Browse files
committed
formatted
1 parent 720128e commit 7448eed

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

src/llm_change_agent/evaluations/evaluator.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import logging
55
import os
66
import random
7+
import secrets
78
import time
89
from pathlib import Path
910
from typing import Any, List, Union
@@ -111,7 +112,7 @@ def run_llm_change_agent(prompt, provider, model, docs: List[Any] = None) -> Lis
111112
from llm_change_agent.cli import execute
112113

113114
# Sleep for a random time between 1 and 5 seconds before running the LLM Change Agent
114-
sleep_time = random.randint(1, 5)
115+
sleep_time = secrets.randbelow(5) + 1
115116
logger.info(f"Sleeping for {sleep_time} seconds before running the LLM Change Agent.")
116117
time.sleep(sleep_time)
117118

@@ -175,7 +176,6 @@ def generate_changes_via_llm(eval_dir, output_dir, provider, model):
175176
except Exception as e:
176177
logger.error(f"Error while generating changes for {doc.name} and PR {pr_id}: {e}")
177178
predicted_changes = []
178-
179179

180180
with open(output_sub_dir / doc.name, mode) as out:
181181
yaml.dump({pr_id: predicted_changes}, out, sort_keys=False)
@@ -193,7 +193,7 @@ def compare_changes(expected_dir: Path, output_dir: Path):
193193
output_files_list_of_dicts = [{f"{file.parts[-3]}_{file.parts[-2]}": {file.name: file}} for file in output_files]
194194

195195
for model_output in output_files_list_of_dicts:
196-
for provider_model, file_info in model_output.items():
196+
for _provider_model, file_info in model_output.items():
197197
for filename, filepath in file_info.items():
198198
filename = filepath.name
199199
expected_file = expected_dir / filename
@@ -206,15 +206,15 @@ def compare_changes(expected_dir: Path, output_dir: Path):
206206
expected_change = expected_yaml_subset.get(pr_id)
207207
if len(output_changes) > 0:
208208
compare_output_vs_expected(expected_change, output_changes)
209-
logger.info(f"Finished comparing changes for {provider_model}")
209+
logger.info(f"Finished comparing changes for {_provider_model}")
210210

211211

212212
def compare_output_vs_expected(expected_changes, output_changes: List):
213213
"""Compare the expected changes with the output changes."""
214214
output_changes = normalize_to_curies_in_changes(output_changes)
215-
accuracy = 0.0
216-
total = len(expected_changes)
217-
correct = 0
215+
# accuracy = 0.0
216+
# total = len(expected_changes)
217+
# correct = 0
218218
# import pdb
219219

220220
# pdb.set_trace()

0 commit comments

Comments
 (0)