Skip to content

Commit

Permalink
Remove unused variables and error handling in validation runner and s…
Browse files Browse the repository at this point in the history
…ubmission worker
  • Loading branch information
MitchellAV committed May 17, 2024
1 parent a9761fb commit 56c7507
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 8 deletions.
5 changes: 0 additions & 5 deletions workers/src/pvinsight-validation-runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -768,7 +768,6 @@ def generate_performance_metrics_for_submission(
associated_metadata: dict[str, Any],
config_data: dict[str, Any],
function_parameters: list[str],
number_of_errors: int,
performance_metrics: list[str],
):
# Get the ground truth scalars that we will compare to
Expand All @@ -792,7 +791,6 @@ def generate_performance_metrics_for_submission(
f"{file_name} submission result length {file_submission_result_length} does not match ground truth file length {ground_truth_file_length}"
)

number_of_errors += 1
raise RunnerException(
100,
f"submission result length {file_submission_result_length} does not match ground truth file length {ground_truth_file_length}",
Expand Down Expand Up @@ -842,7 +840,6 @@ def run_submission_and_generate_performance_metrics(
submission_function: Callable[..., pd.Series],
function_parameters: list[str],
file_metadata_row: pd.Series,
number_of_errors: int,
function_name: str,
performance_metrics: list[str],
file_number: int,
Expand Down Expand Up @@ -874,14 +871,12 @@ def run_submission_and_generate_performance_metrics(
associated_system_metadata,
config_data,
function_parameters,
number_of_errors,
performance_metrics,
)

return results_dictionary, error
except Exception as e:
logger.error(f"error running function {function_name}: {e}")
number_of_errors += 1
error = True
return None, error

Expand Down
6 changes: 3 additions & 3 deletions workers/src/submission_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,6 @@ def update_submission_status(
CURRENT_EVALUATION_DIR = os.path.abspath(
os.path.join(FILE_DIR, "..", "current_evaluation")
)
logger.info(f"FILE_DIR: {FILE_DIR}")
logger.info(f"LOG_FILE_DIR: {LOG_FILE_DIR}")
logger.info(f"CURRENT_EVALUATION_DIR: {CURRENT_EVALUATION_DIR}")


def push_to_s3(local_file_path, s3_file_path, analysis_id, submission_id):
Expand Down Expand Up @@ -771,6 +768,9 @@ def main():

# base
BASE_TEMP_DIR = tempfile.mkdtemp()
logger.info(f"FILE_DIR: {FILE_DIR}")
logger.info(f"LOG_FILE_DIR: {LOG_FILE_DIR}")
logger.info(f"CURRENT_EVALUATION_DIR: {CURRENT_EVALUATION_DIR}")
# Set to folder where the evaluation scripts are stored
logger.info(f"BASE_TEMP_DIR: {BASE_TEMP_DIR}")
_, execution_time = main()
Expand Down

0 comments on commit 56c7507

Please sign in to comment.