Skip to content

Commit 6dd92f3

Browse files
committed
Add return type annotation. Remove logic related to actual eval run name.
1 parent 8acef9e commit 6dd92f3

File tree

1 file changed

+2
-4
lines changed

1 file changed

+2
-4
lines changed

judgeval/run_evaluation.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ def check_eval_run_name_exists(eval_name: str, project_name: str, judgment_api_k
167167
error(f"Failed to check if eval run name exists: {str(e)}")
168168
raise JudgmentAPIError(f"Failed to check if eval run name exists: {str(e)}")
169169

170-
def run_eval(evaluation_run: EvaluationRun, override: bool = False):
170+
def run_eval(evaluation_run: EvaluationRun, override: bool = False) -> List[ScoringResult]:
171171
"""
172172
Executes an evaluation of `Example`s using one or more `Scorer`s
173173
@@ -310,7 +310,6 @@ def run_eval(evaluation_run: EvaluationRun, override: bool = False):
310310

311311
info(f"Successfully merged {len(merged_results)} results")
312312

313-
actual_eval_run_name = evaluation_run.eval_name
314313
if evaluation_run.log_results:
315314
try:
316315
res = requests.post(
@@ -328,7 +327,6 @@ def run_eval(evaluation_run: EvaluationRun, override: bool = False):
328327
error(f"Error {res.status_code}: {error_message}")
329328
raise Exception(f"Error {res.status_code}: {error_message}")
330329
else:
331-
actual_eval_run_name = res.json()["eval_results_name"]
332330
if "ui_results_url" in res.json():
333331
rprint(f"\n🔍 You can view your evaluation results here: [rgb(106,0,255)]{res.json()['ui_results_url']}[/]\n")
334332

@@ -342,7 +340,7 @@ def run_eval(evaluation_run: EvaluationRun, override: bool = False):
342340
for i, result in enumerate(merged_results):
343341
if not result.scorers_data: # none of the scorers could be executed on this example
344342
info(f"None of the scorers could be executed on example {i}. This is usually because the Example is missing the fields needed by the scorers. Try checking that the Example has the necessary fields for your scorers.")
345-
return actual_eval_run_name, merged_results
343+
return merged_results
346344

347345

348346
if __name__ == "__main__":

0 commit comments

Comments
 (0)