Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,11 @@ jobs:
python-version: ${{ matrix.python-version }}
enable-cache: true

- name: Install dependencies
run: python3 -m pip install .

- name: Run tests
run: uv run pytest -Werror

- name: Run shell-based deployment tests
run: bash cacts/tests/test_deploy.sh
57 changes: 57 additions & 0 deletions cacts/tests/test_build_type.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import pytest
from cacts.build_type import BuildType
from cacts.utils import expand_variables, evaluate_commands, str_to_bool

class MockProject:
def __init__(self):
self.name = "MockProject"

class MockMachine:
def __init__(self):
self.env_setup = ["echo 'Setting up environment'"]

@pytest.fixture
def build_type():
project = MockProject()
machine = MockMachine()
builds_specs = {
'default': {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should add some ${} and $() syntax in the specs, to make sure that evaluate_commands and expand_variables work correctly.

Copy link
Collaborator

@bartgol bartgol Apr 25, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, given that ${} allow to execute py code, we should prob call it evaluate_py_expressions, and evaluate_commands should be evaluate_sh_expressions...or something. But that can wait.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mean, we do have unit tests for expand/evaluate, but I wonder if that's enough, or if we should verify they are used correctly in the BT init...

'longname': 'default_longname',
'description': 'default_description',
'uses_baselines': 'True',
'on_by_default': 'True',
'cmake_args': {'arg1': 'value1'}
},
'test_build': {
'longname': 'test_longname',
'description': 'test_description',
'uses_baselines': 'False',
'on_by_default': 'False',
'cmake_args': {'arg2': 'value2'}
}
}
return BuildType('test_build', project, machine, builds_specs)

def test_initialization(build_type):
assert build_type.name == 'test_build'
assert build_type.longname == 'test_longname'
assert build_type.description == 'test_description'
assert build_type.uses_baselines is False
assert build_type.on_by_default is False
assert build_type.cmake_args == {'arg1': 'value1', 'arg2': 'value2'}

def test_expand_variables(build_type):
build_type.longname = "${project.name}_longname"
expand_variables(build_type, {'project': build_type.project, 'machine': build_type.machine, 'build': build_type})
assert build_type.longname == "MockProject_longname"

def test_evaluate_commands(build_type):
build_type.description = "$(echo 'test_description')"
evaluate_commands(build_type, "echo 'Setting up environment'")
assert build_type.description == "test_description"

def test_str_to_bool():
assert str_to_bool("True", "test_var") is True
assert str_to_bool("False", "test_var") is False
with pytest.raises(ValueError):
str_to_bool("Invalid", "test_var")
77 changes: 77 additions & 0 deletions cacts/tests/test_cacts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import pytest
from cacts.cacts import Driver, parse_command_line

class TestDriver:
def test_initialization(self):
driver = Driver(config_file="config.yaml", machine_name="test_machine", local=False, build_types=["debug"],
work_dir="work_dir", root_dir="root_dir", baseline_dir="baseline_dir", cmake_args=["arg1"],
test_regex="test_*", test_labels=["label1"], config_only=False, build_only=False,
skip_config=False, skip_build=False, generate=False, submit=False, parallel=False, verbose=False)
assert driver._config_file == "config.yaml"
assert driver._machine.name == "test_machine"
assert driver._local == False
assert driver._builds[0].name == "debug"
assert driver._work_dir == "work_dir"
assert driver._root_dir == "root_dir"
assert driver._baselines_dir == "baseline_dir"
assert driver._cmake_args == ["arg1"]
assert driver._test_regex == "test_*"
assert driver._test_labels == ["label1"]
assert driver._config_only == False
assert driver._build_only == False
assert driver._skip_config == False
assert driver._skip_build == False
assert driver._generate == False
assert driver._submit == False
assert driver._parallel == False
assert driver._verbose == False

def test_run(self):
driver = Driver(config_file="config.yaml", machine_name="test_machine", local=False, build_types=["debug"],
work_dir="work_dir", root_dir="root_dir", baseline_dir="baseline_dir", cmake_args=["arg1"],
test_regex="test_*", test_labels=["label1"], config_only=False, build_only=False,
skip_config=False, skip_build=False, generate=False, submit=False, parallel=False, verbose=False)
success = driver.run()
assert success == True

def test_generate_cmake_config(self):
driver = Driver(config_file="config.yaml", machine_name="test_machine", local=False, build_types=["debug"],
work_dir="work_dir", root_dir="root_dir", baseline_dir="baseline_dir", cmake_args=["arg1"],
test_regex="test_*", test_labels=["label1"], config_only=False, build_only=False,
skip_config=False, skip_build=False, generate=False, submit=False, parallel=False, verbose=False)
build = driver._builds[0]
cmake_config = driver.generate_cmake_config(build)
assert "CMAKE_BUILD_TYPE" in cmake_config

def test_generate_ctest_cmd(self):
driver = Driver(config_file="config.yaml", machine_name="test_machine", local=False, build_types=["debug"],
work_dir="work_dir", root_dir="root_dir", baseline_dir="baseline_dir", cmake_args=["arg1"],
test_regex="test_*", test_labels=["label1"], config_only=False, build_only=False,
skip_config=False, skip_build=False, generate=False, submit=False, parallel=False, verbose=False)
build = driver._builds[0]
cmake_config = driver.generate_cmake_config(build)
ctest_cmd = driver.generate_ctest_cmd(build, cmake_config)
assert "ctest" in ctest_cmd

def test_parse_command_line():
args = ["cacts.py", "-f", "config.yaml", "-m", "test_machine", "-t", "debug", "-w", "work_dir", "-r", "root_dir",
"-b", "baseline_dir", "-c", "arg1", "--test-regex", "test_*", "--test-labels", "label1", "--config-only",
"--build-only", "--skip-config", "--skip-build", "-g", "--submit", "-p", "-v"]
parsed_args = parse_command_line(args, "description", "version")
assert parsed_args.config_file == "config.yaml"
assert parsed_args.machine_name == "test_machine"
assert parsed_args.build_types == ["debug"]
assert parsed_args.work_dir == "work_dir"
assert parsed_args.root_dir == "root_dir"
assert parsed_args.baseline_dir == "baseline_dir"
assert parsed_args.cmake_args == ["arg1"]
assert parsed_args.test_regex == "test_*"
assert parsed_args.test_labels == ["label1"]
assert parsed_args.config_only == True
assert parsed_args.build_only == True
assert parsed_args.skip_config == True
assert parsed_args.skip_build == True
assert parsed_args.generate == True
assert parsed_args.submit == True
assert parsed_args.parallel == True
assert parsed_args.verbose == True
31 changes: 31 additions & 0 deletions cacts/tests/test_deploy.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#!/bin/bash

# Test script for deployment scenarios

# Test installing dependencies
echo "Testing: Installing dependencies"
pip install -r requirements.txt
if [ $? -ne 0 ]; then
echo "Failed to install dependencies"
exit 1
fi

# Test running the application
echo "Testing: Running the application"
python -m cacts
if [ $? -ne 0 ]; then
echo "Failed to run the application"
exit 1
fi

# Test verifying the application's output and behavior
echo "Testing: Verifying the application's output and behavior"
output=$(python -m cacts)
expected_output="CACTS: Cmake Application Configurable Testing System"
if [[ "$output" != *"$expected_output"* ]]; then
echo "Application output verification failed"
exit 1
fi

echo "All deployment tests passed"
exit 0
34 changes: 34 additions & 0 deletions cacts/tests/test_machine.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import pytest
from cacts.machine import Machine

class MockProject:
def __init__(self):
self.name = "MockProject"

@pytest.fixture
def machine():
project = MockProject()
machines_specs = {
'default': {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe we can add some ${} and $() here too, and check that Machine init does the expansion correctly..

'num_bld_res': 4,
'num_run_res': 8,
'env_setup': ['echo "Setting up environment"']
},
'test_machine': {
'num_bld_res': 2,
'num_run_res': 4,
'env_setup': ['echo "Setting up test environment"']
}
}
return Machine('test_machine', project, machines_specs)

def test_initialization(machine):
assert machine.name == 'test_machine'
assert machine.num_bld_res == 2
assert machine.num_run_res == 4
assert machine.env_setup == ['echo "Setting up test environment"']

def test_uses_gpu(machine):
assert machine.uses_gpu() is False
machine.gpu_arch = 'test_gpu_arch'
assert machine.uses_gpu() is True
28 changes: 28 additions & 0 deletions cacts/tests/test_project.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import pytest
from cacts.project import Project

@pytest.fixture
def project():
project_specs = {
'name': 'TestProject',
'baseline_gen_label': 'gen_label',
'baseline_cmp_label': 'cmp_label',
'baseline_summary_file': 'summary_file',
'cmake_vars_names': {'var1': 'value1'},
'cdash': {'key1': 'value1'}
}
root_dir = '/path/to/root'
return Project(project_specs, root_dir)

def test_initialization(project):
assert project.name == 'TestProject'
assert project.baselines_gen_label == 'gen_label'
assert project.baselines_cmp_label == 'cmp_label'
assert project.baselines_summary_file == 'summary_file'
assert project.cmake_vars_names == {'var1': 'value1'}
assert project.cdash == {'key1': 'value1'}

def test_post_init(project):
project.baselines_gen_label = '$(echo gen_label)'
project.__post_init__()
assert project.baselines_gen_label == 'gen_label'
43 changes: 43 additions & 0 deletions cacts/tests/test_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import pytest
from cacts.utils import expect, run_cmd, run_cmd_no_fail, expand_variables, evaluate_commands, str_to_bool, is_git_repo

def test_expect():
with pytest.raises(RuntimeError):
expect(False, "This is an error message")

def test_run_cmd():
stat, output, errput = run_cmd("echo Hello, World!")
assert stat == 0
assert output == "Hello, World!"

def test_run_cmd_no_fail():
output = run_cmd_no_fail("echo Hello, World!")
assert output == "Hello, World!"

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would also test that

Suggested change
with pytest.raises(RuntimeError):
run_cmd_no_fail("false")

to check that this throws.

def test_expand_variables():
class MockObject:
def __init__(self):
self.name = "MockObject"
self.value = "${project.name}_value"

mock_obj = MockObject()
expand_variables(mock_obj, {'project': mock_obj})
assert mock_obj.value == "MockObject_value"

def test_evaluate_commands():
class MockObject:
def __init__(self):
self.command = "$(echo 'Hello, World!')"

mock_obj = MockObject()
evaluate_commands(mock_obj)
assert mock_obj.command == "Hello, World!"

def test_str_to_bool():
assert str_to_bool("True", "test_var") is True
assert str_to_bool("False", "test_var") is False
with pytest.raises(ValueError):
str_to_bool("Invalid", "test_var")

def test_is_git_repo():
assert is_git_repo() is True
Loading