Skip to content

Commit

Permalink
fix existing tests
Browse files Browse the repository at this point in the history
TODO: new bundle tests for serverless
TODO: deploy classic and serverless workflows
TODO: validate backwards compatibility
  • Loading branch information
Maxim Mityutko committed Oct 8, 2024
1 parent 4a1d375 commit 2543874
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 12 deletions.
16 changes: 8 additions & 8 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ repos:
pass_filenames: false
always_run: true
stages: [commit]
# - id: make-cov
# name: Running Lint Checks & Test Suite
# entry: make cov
# language: system
# files: '\.py$'
# pass_filenames: false
# always_run: true
# stages: [push]
- id: make-cov
name: Running Lint Checks & Test Suite
entry: make cov
language: system
files: '\.py$'
pass_filenames: false
always_run: true
stages: [push]
24 changes: 20 additions & 4 deletions tests/engine/test_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,16 @@ def test_add_task(self):
assert t.trigger_rule == BrickflowTriggerRule.ALL_SUCCESS
assert t.custom_execute_callback is None

def test_create_workflow_no_compute(self):
with pytest.raises(NoWorkflowComputeError):
def test_create_workflow_no_compute(self, caplog):
try:
# if cluster details are not provided, the job must be treated as serverless
Workflow("test")
assert (
"Default cluster details are not provided, switching to serverless compute."
in caplog.text
)
except NoWorkflowComputeError:
pytest.fail("NoWorkflowComputeError was raised")

def test_create_workflow_with_duplicate_compute(self):
with pytest.raises(DuplicateClustersDefinitionError):
Expand Down Expand Up @@ -110,15 +117,24 @@ def test_create_workflow_with_default_cluster(self):
for field in ["spark_version", "autoscale", "node_type_id"]:
assert field in new_cluster

def test_default_cluster_isnt_empty(self):
with pytest.raises(RuntimeError):
def test_default_cluster_isnt_empty(self, caplog):
try:
compute = [
Cluster("name", "spark", "vmnode"),
]
this_wf = Workflow("test", clusters=compute)
this_wf.default_cluster = None
this_wf._add_task(f=lambda: 123, task_id="taskid")

assert (
"Default cluster details are not provided, switching to serverless compute."
in caplog.text
)
except RuntimeError:
# The error should not be raised because when default cluster is not configure, the job will be treated
# as serverless conpute job
pytest.fail("RuntimeError was raised")

def test_max_tasks_reached_error(self):
with pytest.raises(ValueError):
compute = [
Expand Down

0 comments on commit 2543874

Please sign in to comment.