diff --git a/requirements.txt b/requirements.txt index 6dcda41..78c2af8 100644 Binary files a/requirements.txt and b/requirements.txt differ diff --git a/tests/__pycache__/test_hf_handler.cpython-312-pytest-8.4.2.pyc b/tests/__pycache__/test_hf_handler.cpython-312-pytest-8.4.2.pyc deleted file mode 100644 index 7ffc898..0000000 Binary files a/tests/__pycache__/test_hf_handler.cpython-312-pytest-8.4.2.pyc and /dev/null differ diff --git a/tests/__pycache__/test_metric_heuristics.cpython-312-pytest-8.4.2.pyc b/tests/__pycache__/test_metric_heuristics.cpython-312-pytest-8.4.2.pyc deleted file mode 100644 index a07ecaf..0000000 Binary files a/tests/__pycache__/test_metric_heuristics.cpython-312-pytest-8.4.2.pyc and /dev/null differ diff --git a/tests/__pycache__/test_metrics_contract.cpython-313-pytest-8.4.2.pyc b/tests/__pycache__/test_metrics_contract.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index ba1c99c..0000000 Binary files a/tests/__pycache__/test_metrics_contract.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/tests/__pycache__/test_reporter_schema.cpython-313-pytest-8.4.2.pyc b/tests/__pycache__/test_reporter_schema.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index f2d1c77..0000000 Binary files a/tests/__pycache__/test_reporter_schema.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/tests/test_bus_factor_metric.py b/tests/test_bus_factor_metric.py new file mode 100644 index 0000000..d521fa2 --- /dev/null +++ b/tests/test_bus_factor_metric.py @@ -0,0 +1,16 @@ +from acmecli.metrics.bus_factor_metric import BusFactorMetric + +def test_bus_factor_range(): + metric = BusFactorMetric() + mv = metric.score({"contributors": 10}) + assert 0.0 <= mv.value <= 1.0 + +def test_bus_factor_latency(): + metric = BusFactorMetric() + mv = metric.score({"contributors": 10}) + assert mv.latency_ms >= 0 + +def test_bus_factor_edge_case_zero_contrib(): + metric = BusFactorMetric() + mv = metric.score({"contributors": 0}) + assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..367d204 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,18 @@ +import subprocess + +def test_run_install(): + result = subprocess.run(["./run", "install"], capture_output=True) + assert result.returncode == 0 + +def test_run_test(): + result = subprocess.run(["./run", "test"], capture_output=True) + assert b"test cases passed" in result.stdout + +def test_run_score_valid_url(): + result = subprocess.run(["./run", "score", "urls.txt"], capture_output=True) + assert result.stdout.decode().strip().startswith("{") + assert result.returncode == 0 + +def test_run_score_invalid_url(): + result = subprocess.run(["./run", "score", "bad_urls.txt"], capture_output=True) + assert result.returncode == 1 or "error" in result.stderr.decode().lower() \ No newline at end of file diff --git a/tests/test_code_quality_metric.py b/tests/test_code_quality_metric.py new file mode 100644 index 0000000..e3f2389 --- /dev/null +++ b/tests/test_code_quality_metric.py @@ -0,0 +1,16 @@ +from acmecli.metrics.code_quality_metric import CodeQualityMetric + +def test_code_quality_range(): + metric = CodeQualityMetric() + mv = metric.score({"lint_score": 0.9}) + assert 0.0 <= mv.value <= 1.0 + +def test_code_quality_latency(): + metric = CodeQualityMetric() + mv = metric.score({"lint_score": 0.9}) + assert mv.latency_ms >= 0 + +def test_code_quality_missing(): + metric = CodeQualityMetric() + mv = metric.score({}) + assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_dataset_and_code_metric.py b/tests/test_dataset_and_code_metric.py new file mode 100644 index 0000000..a65ef12 --- /dev/null +++ b/tests/test_dataset_and_code_metric.py @@ -0,0 +1,16 @@ +from acmecli.metrics.dataset_and_code_metric import DatasetAndCodeMetric + +def test_dataset_and_code_range(): + metric = DatasetAndCodeMetric() + mv = metric.score({"linked": True}) + assert 0.0 <= mv.value <= 1.0 + +def test_dataset_and_code_latency(): + metric = DatasetAndCodeMetric() + mv = metric.score({"linked": True}) + assert mv.latency_ms >= 0 + +def test_dataset_and_code_missing(): + metric = DatasetAndCodeMetric() + mv = metric.score({}) + assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_dataset_quality_metric.py b/tests/test_dataset_quality_metric.py new file mode 100644 index 0000000..706c080 --- /dev/null +++ b/tests/test_dataset_quality_metric.py @@ -0,0 +1,16 @@ +from acmecli.metrics.dataset_quality_metric import DatasetQualityMetric + +def test_dataset_quality_range(): + metric = DatasetQualityMetric() + mv = metric.score({"quality_score": 0.8}) + assert 0.0 <= mv.value <= 1.0 + +def test_dataset_quality_latency(): + metric = DatasetQualityMetric() + mv = metric.score({"quality_score": 0.8}) + assert mv.latency_ms >= 0 + +def test_dataset_quality_missing(): + metric = DatasetQualityMetric() + mv = metric.score({}) + assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_hf_handler.py b/tests/test_hf_handler.py deleted file mode 100644 index a4bbfaf..0000000 --- a/tests/test_hf_handler.py +++ /dev/null @@ -1,9 +0,0 @@ -from src.acmecli.hf_handler import HFHandler - -def test_hf_fetch_meta(monkeypatch): - h = HFHandler() - meta = h.fetch_meta("https://huggingface.co/google/gemma-3-270m") - assert isinstance(meta, dict) - assert "modelId" in meta - assert "downloads" in meta - assert "license" in meta \ No newline at end of file diff --git a/tests/test_logging_env.py b/tests/test_logging_env.py new file mode 100644 index 0000000..5577fc9 --- /dev/null +++ b/tests/test_logging_env.py @@ -0,0 +1,13 @@ +import os +import tempfile + +def test_logging_env(monkeypatch): + with tempfile.NamedTemporaryFile() as log_file: + monkeypatch.setenv("LOG_FILE", log_file.name) + monkeypatch.setenv("LOG_LEVEL", "DEBUG") + # You should call your project's logging function here if available + from acmecli import main_logger + main_logger.debug("test message") + log_file.seek(0) + contents = log_file.read().decode() + assert "test message" in contents \ No newline at end of file diff --git a/tests/test_metric_heuristics.py b/tests/test_metric_heuristics.py deleted file mode 100644 index bf80dd7..0000000 --- a/tests/test_metric_heuristics.py +++ /dev/null @@ -1,7 +0,0 @@ -from src.acmecli.metrics.hf_downloads_metric import HFDownloadsMetric - -def test_hf_downloads_metric(): - m = HFDownloadsMetric() - mv = m.score({"downloads": 5000}) - assert 0.0 <= mv.value <= 1.0 - assert mv.value == 0.5 \ No newline at end of file diff --git a/tests/test_metrics_contract.py b/tests/test_metrics_contract.py deleted file mode 100644 index 51199a7..0000000 --- a/tests/test_metrics_contract.py +++ /dev/null @@ -1,11 +0,0 @@ -from acmecli.metrics.base import REGISTRY -from acmecli.metrics.license_metric import LicenseMetric - -def test_registry_has_license_metric(): - assert any(m.name == "license" for m in REGISTRY) - -def test_metric_value_range(): - m = LicenseMetric() - mv = m.score({}) - assert 0.0 <= mv.value <= 1.0 - assert mv.latency_ms >= 0 diff --git a/tests/test_parallel_metrics.py b/tests/test_parallel_metrics.py new file mode 100644 index 0000000..b611bb4 --- /dev/null +++ b/tests/test_parallel_metrics.py @@ -0,0 +1,11 @@ +import time +from acmecli.cli import score_all_metrics_parallel + +def test_metrics_run_parallel(): + start = time.time() + # Simulate input that runs several metrics + report = score_all_metrics_parallel({"contributors": 10, "size_kb": 100, "linked": True}) + duration = time.time() - start + # For parallel, duration should be less than sum of all metric durations + assert isinstance(report, dict) + assert duration < 2.0 # Example threshold, adjust to your actual metric timings \ No newline at end of file diff --git a/tests/test_performance_claims_metric.py b/tests/test_performance_claims_metric.py new file mode 100644 index 0000000..33f2f02 --- /dev/null +++ b/tests/test_performance_claims_metric.py @@ -0,0 +1,16 @@ +from acmecli.metrics.performance_claims_metric import PerformanceClaimsMetric + +def test_performance_claims_range(): + metric = PerformanceClaimsMetric() + mv = metric.score({"claims": 5}) + assert 0.0 <= mv.value <= 1.0 + +def test_performance_claims_latency(): + metric = PerformanceClaimsMetric() + mv = metric.score({"claims": 5}) + assert mv.latency_ms >= 0 + +def test_performance_claims_missing(): + metric = PerformanceClaimsMetric() + mv = metric.score({}) + assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_ramp_up_metric.py b/tests/test_ramp_up_metric.py new file mode 100644 index 0000000..53b2e9e --- /dev/null +++ b/tests/test_ramp_up_metric.py @@ -0,0 +1,16 @@ +from acmecli.metrics.ramp_up_metric import RampUpMetric + +def test_ramp_up_range(): + metric = RampUpMetric() + mv = metric.score({"readme_size": 1000}) + assert 0.0 <= mv.value <= 1.0 + +def test_ramp_up_latency(): + metric = RampUpMetric() + mv = metric.score({"readme_size": 1000}) + assert mv.latency_ms >= 0 + +def test_ramp_up_missing_readme(): + metric = RampUpMetric() + mv = metric.score({}) + assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_reporter_schema.py b/tests/test_reporter_schema.py deleted file mode 100644 index ba0dbcd..0000000 --- a/tests/test_reporter_schema.py +++ /dev/null @@ -1,16 +0,0 @@ -from acmecli.types import ReportRow - -def test_reportrow_has_required_fields(): - row = ReportRow( - name="demo", category="MODEL", - net_score=0.0, net_score_latency=0, - ramp_up_time=0.0, ramp_up_time_latency=0, - bus_factor=0.0, bus_factor_latency=0, - performance_claims=0.0, performance_claims_latency=0, - license=0.0, license_latency=0, - size_score={}, size_score_latency=0, - dataset_and_code_score=0.0, dataset_and_code_score_latency=0, - dataset_quality=0.0, dataset_quality_latency=0, - code_quality=0.0, code_quality_latency=0 - ) - assert row.category == "MODEL" diff --git a/tests/test_size_metric.py b/tests/test_size_metric.py new file mode 100644 index 0000000..50bb85a --- /dev/null +++ b/tests/test_size_metric.py @@ -0,0 +1,16 @@ +from acmecli.metrics.size_metric import SizeMetric + +def test_size_range(): + metric = SizeMetric() + mv = metric.score({"size_kb": 100}) + assert 0.0 <= mv.value <= 1.0 + +def test_size_latency(): + metric = SizeMetric() + mv = metric.score({"size_kb": 100}) + assert mv.latency_ms >= 0 + +def test_size_edge_case_zero(): + metric = SizeMetric() + mv = metric.score({"size_kb": 0}) + assert mv.value == 0.0 \ No newline at end of file