Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cache removed and filelist unit test #32

Merged
merged 20 commits into from
Dec 17, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
modernmetric.db


# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down
23 changes: 12 additions & 11 deletions modernmetric/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
import os
import textwrap
import multiprocessing as mp
from pathlib import Path
from cachehash.main import Cache
# from pathlib import Path
# from cachehash.main import Cache

from modernmetric.cls.importer.pick import importer_pick
from modernmetric.cls.modules import get_additional_parser_args
Expand Down Expand Up @@ -87,16 +87,16 @@ def ArgParser(custom_args=None):
help="Ignore unparseable files")

parser.add_argument(
'--file',
type=str,
'--file',
type=str,
help='Path to the JSON file list of file paths'
)

parser.add_argument(
'files',
metavar='file',
type=str,
nargs='*',
'files',
metavar='file',
type=str,
nargs='*',
help='List of file paths'
)

Expand Down Expand Up @@ -158,7 +158,8 @@ def main(custom_args=None, license_identifier: str | int = None):
else:
_args = ArgParser()
_result = {"files": {}, "overall": {}}
cache = (None if _args.no_cache else Cache(Path(_args.cache_db), "modernmetric"))
# cache = (None if _args.no_cache else Cache(Path(_args.cache_db),
# "modernmetric"))

# Get importer
_importer = {}
Expand All @@ -179,7 +180,7 @@ def main(custom_args=None, license_identifier: str | int = None):

with mp.Pool(processes=_args.jobs) as pool:
results = [pool.apply(file_process, args=(
f, _args, _importer, cache)) for f in _args.files]
f, _args, _importer)) for f in _args.files]

for x in results:
oldpath = _args.oldfiles[x[1]]
Expand Down
24 changes: 24 additions & 0 deletions test/test_self_scan.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import os
import json
from modernmetric.fp import file_process
from modernmetric.cls.modules import (
get_modules_metrics,
get_modules_calculated
)
from pathlib import Path
from modernmetric.__main__ import main as modernmetric


class MockArgs:
Expand Down Expand Up @@ -124,6 +126,28 @@ def test_scan_self():
f"Aggregate {metric} should equal sum of individual values"


def test_filelist_scan():
curr_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(curr_dir)
stats_input_file = os.path.join(
project_root, 'testfiles', 'samplefilelist.json'
)
stats_output_file = os.path.join(curr_dir, "test.stats.json")
custom_args = [
f"--file={stats_input_file}",
f"--output={stats_output_file}"
Comment on lines +129 to +138
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (testing): Test should verify file list processing functionality more thoroughly

While the test checks basic file metrics, it should also verify: 1) handling of invalid file paths in the JSON, 2) empty file lists, 3) proper error handling for missing or malformed JSON files, and 4) relative/absolute path handling.

Suggested implementation:

def create_temp_json_file(content, tmp_path):
    temp_file = tmp_path / "test_filelist.json"
    with open(temp_file, 'w') as f:
        json.dump(content, f)
    return str(temp_file)

def run_modernmetric_with_files(input_file, tmp_path):
    output_file = str(tmp_path / "output.stats.json")
    custom_args = [
        f"--file={input_file}",
        f"--output={output_file}"
    ]
    return modernmetric(custom_args=custom_args, license_identifier='unit_test'), output_file

def test_filelist_scan_valid_files(tmp_path):
    """Test processing of valid file list"""
    curr_dir = os.path.dirname(os.path.abspath(__file__))
    project_root = os.path.dirname(curr_dir)
    stats_input_file = os.path.join(
        project_root, 'testfiles', 'samplefilelist.json'
    )
    _, output_file = run_modernmetric_with_files(stats_input_file, tmp_path)

    with open(output_file, 'r') as f:
        stats = json.load(f)

    files = stats['files']
    assert files is not None
    assert files["../testfiles/test.c"]["loc"] == 25
    assert files["../testfiles/test.c"]["cyclomatic_complexity"] == 0
    assert stats["overall"]["loc"] == 178

def test_filelist_scan_invalid_paths(tmp_path):
    """Test handling of invalid file paths in JSON"""
    invalid_files = {
        "files": [
            "nonexistent_file.c",
            "/invalid/path/file.cpp",
            "../../../outside/scope.py"
        ]
    }
    input_file = create_temp_json_file(invalid_files, tmp_path)
    _, output_file = run_modernmetric_with_files(input_file, tmp_path)

    with open(output_file, 'r') as f:
        stats = json.load(f)

    assert stats['files'] == {}
    assert stats["overall"]["loc"] == 0

def test_filelist_scan_empty_list(tmp_path):
    """Test handling of empty file list"""
    empty_files = {"files": []}
    input_file = create_temp_json_file(empty_files, tmp_path)
    _, output_file = run_modernmetric_with_files(input_file, tmp_path)

    with open(output_file, 'r') as f:
        stats = json.load(f)

    assert stats['files'] == {}
    assert stats["overall"]["loc"] == 0

def test_filelist_scan_malformed_json(tmp_path):
    """Test handling of malformed JSON file"""
    malformed_file = tmp_path / "malformed.json"
    with open(malformed_file, 'w') as f:
        f.write("{invalid json content")

    with pytest.raises(json.JSONDecodeError):
        run_modernmetric_with_files(str(malformed_file), tmp_path)

def test_filelist_scan_missing_file():
    """Test handling of missing input file"""
    with pytest.raises(FileNotFoundError):
        run_modernmetric_with_files("/nonexistent/input.json", Path("/tmp"))

def test_filelist_scan_relative_paths(tmp_path):
    """Test handling of relative and absolute paths"""
    curr_dir = os.path.dirname(os.path.abspath(__file__))
    files = {
        "files": [
            "./relative/path/file.c",
            "../parent/path/file.cpp",
            str(Path(curr_dir) / "existing_file.py")
        ]
    }
    input_file = create_temp_json_file(files, tmp_path)
    _, output_file = run_modernmetric_with_files(input_file, tmp_path)

    with open(output_file, 'r') as f:
        stats = json.load(f)

    assert stats['files'] == {}  # No files should be processed as they don't exist
    assert stats["overall"]["loc"] == 0

You'll need to:

  1. Add import pytest at the top of the file if not already present
  2. Ensure the test framework supports the tmp_path fixture (most recent pytest versions do)
  3. Create appropriate test files in the testfiles directory if they don't exist
  4. Update any existing error handling in the main modernmetric code if it doesn't properly handle these error cases

]
modernmetric(custom_args=custom_args, license_identifier='unit_test')
with open(stats_output_file, 'r') as f:
stats = json.load(f)
files = stats['files']
assert files is not None
assert files["../testfiles/test.c"]["loc"] == 25
assert files["../testfiles/test.c"]["cyclomatic_complexity"] == 0
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (testing): Test assertions could be more specific and descriptive

Consider using more specific assertions with meaningful error messages. For example: assert len(files) > 0, 'Files list should not be empty' and verify the exact number of expected files. Also, the cyclomatic complexity of 0 seems suspicious - is this the expected value?

Suggested implementation:

    files = stats['files']
    assert files is not None, "Files dictionary should not be None"
    assert len(files) == 1, "Expected exactly 1 file in analysis"

    test_file = files["../testfiles/test.c"]
    assert test_file is not None, "Test file '../testfiles/test.c' should be present in results"
    assert test_file["loc"] == 25, "Test file should have exactly 25 lines of code"
    # TODO: Verify if cyclomatic complexity of 0 is correct
    assert test_file["cyclomatic_complexity"] >= 1, "Cyclomatic complexity should be at least 1 for any non-empty file"
    assert stats["overall"]["loc"] == 178, "Overall LOC count should be 178"

Note: I've added a comment about cyclomatic complexity and changed the assertion to expect at least 1, since a value of 0 is suspicious for any non-empty file. The developer should:

  1. Verify the expected cyclomatic complexity value for test.c
  2. Update the assertion with the correct expected value once verified
  3. Consider adding more assertions for other metrics if they are available in the stats output

assert stats["overall"]["loc"] == 178
os.remove(stats_output_file)


def main():
"""Run the self-scan test"""
test_scan_self()
Expand Down
Loading