diff --git a/src/acmecli.egg-info/SOURCES.txt b/src/acmecli.egg-info/SOURCES.txt index fbd80e1..ee8a4d8 100644 --- a/src/acmecli.egg-info/SOURCES.txt +++ b/src/acmecli.egg-info/SOURCES.txt @@ -13,22 +13,24 @@ src/acmecli.egg-info/dependency_links.txt src/acmecli.egg-info/top_level.txt src/acmecli/metrics/__init__.py src/acmecli/metrics/base.py -src/acmecli/metrics/busfactor_metric.py +src/acmecli/metrics/bus_factor_metric.py +src/acmecli/metrics/cli_metric.py src/acmecli/metrics/code_quality_metric.py -src/acmecli/metrics/dataset_code_metric.py +src/acmecli/metrics/dataset_and_code_metric.py src/acmecli/metrics/dataset_quality_metric.py src/acmecli/metrics/hf_downloads_metric.py src/acmecli/metrics/license_metric.py -src/acmecli/metrics/performance_metric.py -src/acmecli/metrics/rampup_metric.py +src/acmecli/metrics/logging_env_metric.py +src/acmecli/metrics/performance_claims_metric.py +src/acmecli/metrics/ramp_up_metric.py src/acmecli/metrics/size_metric.py tests/test_bus_factor_metric.py -tests/test_cli.py +tests/test_cli_metric.py tests/test_code_quality_metric.py tests/test_dataset_and_code_metric.py tests/test_dataset_quality_metric.py +tests/test_license_metric.py tests/test_logging_env.py -tests/test_parallel_metrics.py tests/test_performance_claims_metric.py tests/test_ramp_up_metric.py tests/test_size_metric.py \ No newline at end of file diff --git a/tests/test_bus_factor_metric.py b/tests/test_bus_factor_metric.py index e755746..4bdb5a7 100644 --- a/tests/test_bus_factor_metric.py +++ b/tests/test_bus_factor_metric.py @@ -1,12 +1,31 @@ from acmecli.metrics.bus_factor_metric import BusFactorMetric -def test_bus_factor_dict_input(): +def test_bus_factor_many_contributors_even(): metric = BusFactorMetric() - # contributors dict: name -> count - mv = metric.score({"contributors": {"alice": 10, "bob": 5}}) - assert 0.0 <= mv.value <= 1.0 + mv = metric.score({"contributors": {"a": 5, "b": 5, "c": 5, "d": 5, "e": 5, "f": 5}}) + assert mv.value > 0.5 + +def test_bus_factor_one_contributor(): + metric = BusFactorMetric() + mv = metric.score({"contributors": {"alice": 50}}) + assert mv.value < 0.5 def test_bus_factor_zero_contrib(): metric = BusFactorMetric() mv = metric.score({"contributors": {}}) - assert mv.value == 0.0 \ No newline at end of file + assert mv.value == 0.0 + +def test_bus_factor_org_name(): + metric = BusFactorMetric() + mv = metric.score({"contributors": {"a": 10}, "full_name": "ACMECorp/repo"}) + assert mv.value >= 0.1 + +def test_bus_factor_forks(): + metric = BusFactorMetric() + mv = metric.score({"contributors": {"a": 5, "b": 5}, "forks": 100}) + assert mv.value > 0.1 + +def test_bus_factor_latency(): + metric = BusFactorMetric() + mv = metric.score({"contributors": {"a": 1}}) + assert mv.latency_ms >= 0 \ No newline at end of file diff --git a/tests/test_cache.py b/tests/test_cache.py new file mode 100644 index 0000000..6b20f52 --- /dev/null +++ b/tests/test_cache.py @@ -0,0 +1,10 @@ +from acmecli.cache import SimpleCache + +def test_cache_set_get(): + cache = SimpleCache() + cache.set("foo", "bar") + assert cache.get("foo") == "bar" + +def test_cache_miss(): + cache = SimpleCache() + assert cache.get("missing") is None \ No newline at end of file diff --git a/tests/test_cli_integration.py b/tests/test_cli_integration.py new file mode 100644 index 0000000..d1b74e8 --- /dev/null +++ b/tests/test_cli_integration.py @@ -0,0 +1,10 @@ +import subprocess + +def test_run_test_success(): + result = subprocess.run(["python", "run", "test"], capture_output=True, text=True) + assert "test cases passed" in result.stdout + +def test_run_score_failure(): + result = subprocess.run(["python", "run", "score", "NON_EXISTENT.txt"], capture_output=True, text=True) + assert result.returncode != 0 + assert "Usage" in result.stdout or "error" in result.stderr.lower() \ No newline at end of file diff --git a/tests/test_cli_metric.py b/tests/test_cli_metric.py index 3242d2b..a48a0ee 100644 --- a/tests/test_cli_metric.py +++ b/tests/test_cli_metric.py @@ -8,4 +8,14 @@ def test_cli_metric_documentation(): def test_cli_metric_no_cli(): metric = CLIMetric() mv = metric.score({"readme_text": "This project is for data analysis"}) - assert mv.value < 0.5 \ No newline at end of file + assert mv.value < 0.5 + +def test_cli_metric_automation(): + metric = CLIMetric() + mv = metric.score({"readme_text": "Provides automation"}) + assert mv.value >= 0.3 + +def test_cli_metric_latency(): + metric = CLIMetric() + mv = metric.score({"readme_text": "CLI"}) + assert mv.latency_ms >= 0 \ No newline at end of file diff --git a/tests/test_code_quality_metric.py b/tests/test_code_quality_metric.py index 641ce2d..6218e5b 100644 --- a/tests/test_code_quality_metric.py +++ b/tests/test_code_quality_metric.py @@ -2,10 +2,25 @@ def test_code_quality_range(): metric = CodeQualityMetric() - mv = metric.score({"lint_score": 0.9}) + mv = metric.score({"readme_text": "testing with pytest", "language": "python", "pushed_at": "2025-09-01T00:00:00Z"}) assert 0.0 <= mv.value <= 1.0 def test_code_quality_missing(): metric = CodeQualityMetric() mv = metric.score({}) - assert mv.value == 0.0 \ No newline at end of file + assert mv.value == 0.0 + +def test_code_quality_old_push(): + metric = CodeQualityMetric() + mv = metric.score({"pushed_at": "2020-01-01T00:00:00Z"}) + assert mv.value <= 0.1 + +def test_code_quality_issues_vs_stars(): + metric = CodeQualityMetric() + mv = metric.score({"open_issues_count": 1, "stars": 50}) + assert mv.value >= 0.0 + +def test_code_quality_latency(): + metric = CodeQualityMetric() + mv = metric.score({"readme_text": "docs"}) + assert mv.latency_ms >= 0 \ No newline at end of file diff --git a/tests/test_dataset_and_code_metric.py b/tests/test_dataset_and_code_metric.py index 6af1145..544dac1 100644 --- a/tests/test_dataset_and_code_metric.py +++ b/tests/test_dataset_and_code_metric.py @@ -5,7 +5,22 @@ def test_dataset_and_code_range(): mv = metric.score({"readme_text": "data and code available"}) assert 0.0 <= mv.value <= 1.0 +def test_dataset_and_code_known_dataset(): + metric = DatasetAndCodeMetric() + mv = metric.score({"readme_text": "Uses ImageNet and example code"}) + assert mv.value > 0.3 + def test_dataset_and_code_missing(): metric = DatasetAndCodeMetric() mv = metric.score({}) - assert mv.value == 0.0 \ No newline at end of file + assert mv.value == 0.0 + +def test_dataset_and_code_large_size(): + metric = DatasetAndCodeMetric() + mv = metric.score({"size": 20000}) + assert mv.value > 0.0 + +def test_dataset_and_code_latency(): + metric = DatasetAndCodeMetric() + mv = metric.score({"readme_text": "code"}) + assert mv.latency_ms >= 0 \ No newline at end of file diff --git a/tests/test_dataset_quality_metric.py b/tests/test_dataset_quality_metric.py index 769f9f5..3b05385 100644 --- a/tests/test_dataset_quality_metric.py +++ b/tests/test_dataset_quality_metric.py @@ -8,4 +8,19 @@ def test_dataset_quality_range(): def test_dataset_quality_missing(): metric = DatasetQualityMetric() mv = metric.score({}) - assert mv.value == 0.0 \ No newline at end of file + assert mv.value == 0.0 + +def test_dataset_quality_stars_high(): + metric = DatasetQualityMetric() + mv = metric.score({"stars": 1000}) + assert mv.value > 0.1 + +def test_dataset_quality_stars_low(): + metric = DatasetQualityMetric() + mv = metric.score({"stars": 10}) + assert mv.value >= 0.0 + +def test_dataset_quality_latency(): + metric = DatasetQualityMetric() + mv = metric.score({"readme_text": "benchmark"}) + assert mv.latency_ms >= 0 \ No newline at end of file diff --git a/tests/test_github_handler.py b/tests/test_github_handler.py new file mode 100644 index 0000000..5e3e322 --- /dev/null +++ b/tests/test_github_handler.py @@ -0,0 +1,12 @@ +from acmecli.github_handler import fetch_github_metadata + +def test_fetch_github_metadata_valid(): + # Use a public repo; adjust as needed for your logic + url = "https://github.com/AF-Warsame/test" + meta = fetch_github_metadata(url) + assert isinstance(meta, dict) or meta is not None + +def test_fetch_github_metadata_invalid(): + url = "https://github.com/invalid/repo" + meta = fetch_github_metadata(url) + assert meta is None or meta == {} \ No newline at end of file diff --git a/tests/test_hf_downloads_metric b/tests/test_hf_downloads_metric deleted file mode 100644 index 021d45f..0000000 --- a/tests/test_hf_downloads_metric +++ /dev/null @@ -1,11 +0,0 @@ -from acmecli.metrics.hf_downloads_metric import HFDownloadsMetric - -def test_downloads_zero(): - metric = HFDownloadsMetric() - mv = metric.score({"downloads": 0}) - assert mv.value == 0.0 - -def test_downloads_high(): - metric = HFDownloadsMetric() - mv = metric.score({"downloads": 20000}) - assert mv.value == 1.0 \ No newline at end of file diff --git a/tests/test_hf_downloads_metric.py b/tests/test_hf_downloads_metric.py new file mode 100644 index 0000000..bf26a68 --- /dev/null +++ b/tests/test_hf_downloads_metric.py @@ -0,0 +1,26 @@ +from acmecli.metrics.hf_downloads_metric import HFDownloadsMetric + +def test_downloads_zero(): + metric = HFDownloadsMetric() + mv = metric.score({"downloads": 0}) + assert mv.value == 0.0 + +def test_downloads_high(): + metric = HFDownloadsMetric() + mv = metric.score({"downloads": 20000}) + assert mv.value == 1.0 + +def test_downloads_medium(): + metric = HFDownloadsMetric() + mv = metric.score({"downloads": 5000}) + assert 0.0 < mv.value < 1.0 + +def test_downloads_none(): + metric = HFDownloadsMetric() + mv = metric.score({}) + assert mv.value == 0.0 + +def test_downloads_latency(): + metric = HFDownloadsMetric() + mv = metric.score({"downloads": 100}) + assert mv.latency_ms >= 0 \ No newline at end of file diff --git a/tests/test_hf_handler.py b/tests/test_hf_handler.py new file mode 100644 index 0000000..c3ff334 --- /dev/null +++ b/tests/test_hf_handler.py @@ -0,0 +1,11 @@ +from acmecli.hf_handler import fetch_hf_metadata + +def test_fetch_hf_metadata_valid(): + url = "https://huggingface.co/bert-base-uncased" + meta = fetch_hf_metadata(url) + assert isinstance(meta, dict) or meta is not None + +def test_fetch_hf_metadata_invalid(): + url = "https://huggingface.co/invalid" + meta = fetch_hf_metadata(url) + assert meta is None or meta == {} \ No newline at end of file diff --git a/tests/test_license_metric.py b/tests/test_license_metric.py index 6af9af8..043c1c1 100644 --- a/tests/test_license_metric.py +++ b/tests/test_license_metric.py @@ -8,4 +8,29 @@ def test_license_metric_high_score(): def test_license_metric_no_license(): metric = LicenseMetric() mv = metric.score({"license": "", "readme_text": ""}) - assert mv.value == 0.0 or mv.value < 0.2 \ No newline at end of file + assert mv.value == 0.0 or mv.value < 0.2 + +def test_license_metric_medium_score(): + metric = LicenseMetric() + mv = metric.score({"license": "GPL-2", "readme_text": "GPL-2 license"}) + assert 0.6 <= mv.value < 0.8 + +def test_license_metric_low_score(): + metric = LicenseMetric() + mv = metric.score({"license": "GPL-3", "readme_text": ""}) + assert 0.3 <= mv.value < 0.6 + +def test_license_metric_readme_license_only(): + metric = LicenseMetric() + mv = metric.score({"license": "", "readme_text": "licensed under the Apache license"}) + assert mv.value > 0.0 + +def test_license_metric_latency(): + metric = LicenseMetric() + mv = metric.score({"license": "MIT"}) + assert mv.latency_ms >= 0 + +def test_license_metric_weird_license(): + metric = LicenseMetric() + mv = metric.score({"license": "unknown", "readme_text": "no info"}) + assert mv.value < 0.5 \ No newline at end of file diff --git a/tests/test_logging_env.py b/tests/test_logging_env.py index d09952c..d20255a 100644 --- a/tests/test_logging_env.py +++ b/tests/test_logging_env.py @@ -13,4 +13,9 @@ def test_logging_env_metric_readme(): def test_logging_env_metric_missing(): metric = LoggingEnvMetric() mv = metric.score({}) - assert mv.value == 0.0 \ No newline at end of file + assert mv.value == 0.0 + +def test_logging_env_metric_latency(): + metric = LoggingEnvMetric() + mv = metric.score({"env_vars": {"LOG_FILE": "file"}}) + assert mv.latency_ms >= 0 \ No newline at end of file diff --git a/tests/test_performance_claims_metric.py b/tests/test_performance_claims_metric.py index 4d6b0dc..6772aa3 100644 --- a/tests/test_performance_claims_metric.py +++ b/tests/test_performance_claims_metric.py @@ -8,4 +8,14 @@ def test_performance_metric_range(): def test_performance_metric_missing(): metric = PerformanceClaimsMetric() mv = metric.score({}) - assert mv.value == 0.0 \ No newline at end of file + assert mv.value == 0.0 + +def test_performance_metric_numbers(): + metric = PerformanceClaimsMetric() + mv = metric.score({"readme_text": "score 99"}) + assert mv.value > 0.0 + +def test_performance_metric_latency(): + metric = PerformanceClaimsMetric() + mv = metric.score({"readme_text": "benchmarks"}) + assert mv.latency_ms >= 0 \ No newline at end of file diff --git a/tests/test_ramp_up_metric.py b/tests/test_ramp_up_metric.py index bc3b596..500a420 100644 --- a/tests/test_ramp_up_metric.py +++ b/tests/test_ramp_up_metric.py @@ -8,4 +8,24 @@ def test_rampup_metric_range(): def test_rampup_metric_missing(): metric = RampUpMetric() mv = metric.score({}) - assert mv.value == 0.0 \ No newline at end of file + assert mv.value == 0.0 + +def test_rampup_metric_has_wiki(): + metric = RampUpMetric() + mv = metric.score({"has_wiki": True}) + assert mv.value > 0.0 + +def test_rampup_metric_old_push(): + metric = RampUpMetric() + mv = metric.score({"pushed_at": "2020-01-01T00:00:00Z"}) + assert mv.value < 0.2 + +def test_rampup_metric_stars_high(): + metric = RampUpMetric() + mv = metric.score({"stars": 200}) + assert mv.value > 0.0 + +def test_rampup_metric_latency(): + metric = RampUpMetric() + mv = metric.score({"readme_text": "docs"}) + assert mv.latency_ms >= 0 \ No newline at end of file diff --git a/tests/test_reporter.py b/tests/test_reporter.py new file mode 100644 index 0000000..96434ec --- /dev/null +++ b/tests/test_reporter.py @@ -0,0 +1,7 @@ +from acmecli.reporter import Reporter + +def test_reporter_format(): + reporter = Reporter() + data = {"foo": "bar"} + out = reporter.format(data) + assert '"foo": "bar"' in out or "'foo': 'bar'" in out \ No newline at end of file diff --git a/tests/test_scoring.py b/tests/test_scoring.py new file mode 100644 index 0000000..e67a275 --- /dev/null +++ b/tests/test_scoring.py @@ -0,0 +1,13 @@ +from acmecli.scoring import compute_netscore + +def test_compute_netscore_typical(): + scores = [0.9, 0.8, 0.7] + weights = [0.5, 0.3, 0.2] + net = compute_netscore(scores, weights) + assert 0.0 <= net <= 1.0 + +def test_compute_netscore_zero(): + scores = [0, 0, 0] + weights = [0.5, 0.3, 0.2] + net = compute_netscore(scores, weights) + assert net == 0.0 \ No newline at end of file diff --git a/tests/test_size_metric.py b/tests/test_size_metric.py index f79df26..a3c5c6e 100644 --- a/tests/test_size_metric.py +++ b/tests/test_size_metric.py @@ -8,4 +8,25 @@ def test_size_metric_range(): def test_size_metric_zero(): metric = SizeMetric() mv = metric.score({"size": 0}) - assert all(v == 0.5 for v in mv.value.values()) \ No newline at end of file + assert all(v == 0.5 for v in mv.value.values()) + +def test_size_metric_large(): + metric = SizeMetric() + mv = metric.score({"size": 100_000_000}) + assert all(0.0 < v < 0.6 for v in mv.value.values()) + +def test_size_metric_lightweight(): + metric = SizeMetric() + mv = metric.score({"size": 1000, "readme_text": "lightweight"}) + assert all(v >= 0.6 for v in mv.value.values()) + +def test_size_metric_resource_intensive(): + metric = SizeMetric() + mv = metric.score({"size": 100_000, "readme_text": "resource-intensive"}) + assert all(v < 0.6 for v in mv.value.values()) + +def test_size_metric_latency(): + metric = SizeMetric() + mv = metric.score({"size": 1000}) + assert isinstance(mv.latency_ms, int) + assert mv.latency_ms >= 0 \ No newline at end of file