From b5ea680d788a7071b42d6734cc9166cc1a2278a4 Mon Sep 17 00:00:00 2001 From: Warsame Date: Sat, 27 Sep 2025 18:06:13 -0400 Subject: [PATCH] Test and URL --- requirements.txt | Bin 106 -> 47 bytes src/acmecli.egg-info/SOURCES.txt | 14 ++++++--- src/acmecli/metrics/__init__.py | 12 ++++---- .../__pycache__/__init__.cpython-312.pyc | Bin 692 -> 810 bytes ...sfactor_metric.py => bus_factor_metric.py} | 0 src/acmecli/metrics/cli_metric.py | 24 ++++++++++++++++ ...e_metric.py => dataset_and_code_metric.py} | 0 src/acmecli/metrics/logging_env_metric.py | 23 +++++++++++++++ ...metric.py => performance_claims_metric.py} | 0 .../{rampup_metric.py => ramp_up_metric.py} | 0 tests/__init__.py | 0 tests/test_bus_factor_metric.py | 14 ++++----- tests/test_cli.py | 18 ------------ tests/test_cli_metric.py | 11 +++++++ tests/test_code_quality_metric.py | 5 ---- tests/test_dataset_and_code_metric.py | 7 +---- tests/test_dataset_quality_metric.py | 7 +---- tests/test_hf_downloads_metric | 11 +++++++ tests/test_license_metric.py | 11 +++++++ tests/test_logging_env.py | 27 ++++++++++-------- tests/test_parallel_metrics.py | 11 ------- tests/test_performance_claims_metric.py | 11 ++----- tests/test_ramp_up_metric.py | 11 ++----- tests/test_size_metric.py | 17 ++++------- 24 files changed, 131 insertions(+), 103 deletions(-) rename src/acmecli/metrics/{busfactor_metric.py => bus_factor_metric.py} (100%) create mode 100644 src/acmecli/metrics/cli_metric.py rename src/acmecli/metrics/{dataset_code_metric.py => dataset_and_code_metric.py} (100%) create mode 100644 src/acmecli/metrics/logging_env_metric.py rename src/acmecli/metrics/{performance_metric.py => performance_claims_metric.py} (100%) rename src/acmecli/metrics/{rampup_metric.py => ramp_up_metric.py} (100%) create mode 100644 tests/__init__.py delete mode 100644 tests/test_cli.py create mode 100644 tests/test_cli_metric.py create mode 100644 tests/test_hf_downloads_metric create mode 100644 tests/test_license_metric.py delete mode 100644 tests/test_parallel_metrics.py diff --git a/requirements.txt b/requirements.txt index 78c2af88257cd7a61985587dd833ec3c2cb33869..6dcda410bb422255737d3a5fae8e40301cf499a8 100644 GIT binary patch literal 47 ucmXR)Ei6qfE-B_Js4M}}P)avBzlTg)2uWS2#+Tkpal&&JoEKjS^*K zsN~V)oj7Mty(Z%=PG=ua-_(+#%w!-(z$ZUFJu@%eHLnaJ=BFuki!UcLIW@01H9i-t ztcV$C&n>>9#N2}T(gK(u3rJ8fskAsgEit(yzX&GI3KEwrNG(dsFUn2KOHPeX&PmM7 zEe6X^Udt%OBc762l31Kt5}%ltGWk4XlKL&K_;#2d=U=qBO^_V=J zZ*kb<=BJeAq}mn9097&qaj_bZ_`uA_$at4Q^DcwNT?W0o40d-JlWlEa7kjc&V7l%!5eoARhs$G!;PzNIr7b^gX56p~=jCUC{?=q<0 dWzfCLV1AcD`7VR$T?UK03`X}DjEcm7iU1ZiJ}v+N diff --git a/src/acmecli/metrics/busfactor_metric.py b/src/acmecli/metrics/bus_factor_metric.py similarity index 100% rename from src/acmecli/metrics/busfactor_metric.py rename to src/acmecli/metrics/bus_factor_metric.py diff --git a/src/acmecli/metrics/cli_metric.py b/src/acmecli/metrics/cli_metric.py new file mode 100644 index 0000000..670cced --- /dev/null +++ b/src/acmecli/metrics/cli_metric.py @@ -0,0 +1,24 @@ +import time +from ..types import MetricValue +from .base import register + +class CLIMetric: + """Metric to assess CLI usability and script-based automation.""" + name = "cli" + + def score(self, meta: dict) -> MetricValue: + t0 = time.perf_counter() + # Heuristic: score higher if CLI commands or install/test/score are documented + score = 0.0 + readme_text = meta.get("readme_text", "").lower() + if "cli" in readme_text or "command line" in readme_text: + score += 0.5 + if any(cmd in readme_text for cmd in ["install", "test", "score"]): + score += 0.2 + if "automation" in readme_text or "script" in readme_text: + score += 0.3 + value = min(1.0, score) + latency_ms = int((time.perf_counter() - t0) * 1000) + return MetricValue(self.name, value, latency_ms) + +register(CLIMetric()) \ No newline at end of file diff --git a/src/acmecli/metrics/dataset_code_metric.py b/src/acmecli/metrics/dataset_and_code_metric.py similarity index 100% rename from src/acmecli/metrics/dataset_code_metric.py rename to src/acmecli/metrics/dataset_and_code_metric.py diff --git a/src/acmecli/metrics/logging_env_metric.py b/src/acmecli/metrics/logging_env_metric.py new file mode 100644 index 0000000..99eb9da --- /dev/null +++ b/src/acmecli/metrics/logging_env_metric.py @@ -0,0 +1,23 @@ +import time +from ..types import MetricValue +from .base import register + +class LoggingEnvMetric: + """Metric to assess logging configuration via environment variables.""" + name = "logging_env" + + def score(self, meta: dict) -> MetricValue: + t0 = time.perf_counter() + # Heuristic: score higher if LOG_FILE or LOG_LEVEL are mentioned/configured + score = 0.0 + env_vars = meta.get("env_vars", {}) + readme_text = meta.get("readme_text", "").lower() + if "log_file" in env_vars or "log_level" in env_vars: + score += 0.5 + if "debug" in readme_text or "logging" in readme_text: + score += 0.3 + value = min(1.0, score) + latency_ms = int((time.perf_counter() - t0) * 1000) + return MetricValue(self.name, value, latency_ms) + +register(LoggingEnvMetric()) \ No newline at end of file diff --git a/src/acmecli/metrics/performance_metric.py b/src/acmecli/metrics/performance_claims_metric.py similarity index 100% rename from src/acmecli/metrics/performance_metric.py rename to src/acmecli/metrics/performance_claims_metric.py diff --git a/src/acmecli/metrics/rampup_metric.py b/src/acmecli/metrics/ramp_up_metric.py similarity index 100% rename from src/acmecli/metrics/rampup_metric.py rename to src/acmecli/metrics/ramp_up_metric.py diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_bus_factor_metric.py b/tests/test_bus_factor_metric.py index d521fa2..e755746 100644 --- a/tests/test_bus_factor_metric.py +++ b/tests/test_bus_factor_metric.py @@ -1,16 +1,12 @@ from acmecli.metrics.bus_factor_metric import BusFactorMetric -def test_bus_factor_range(): +def test_bus_factor_dict_input(): metric = BusFactorMetric() - mv = metric.score({"contributors": 10}) + # contributors dict: name -> count + mv = metric.score({"contributors": {"alice": 10, "bob": 5}}) assert 0.0 <= mv.value <= 1.0 -def test_bus_factor_latency(): +def test_bus_factor_zero_contrib(): metric = BusFactorMetric() - mv = metric.score({"contributors": 10}) - assert mv.latency_ms >= 0 - -def test_bus_factor_edge_case_zero_contrib(): - metric = BusFactorMetric() - mv = metric.score({"contributors": 0}) + mv = metric.score({"contributors": {}}) assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_cli.py b/tests/test_cli.py deleted file mode 100644 index 367d204..0000000 --- a/tests/test_cli.py +++ /dev/null @@ -1,18 +0,0 @@ -import subprocess - -def test_run_install(): - result = subprocess.run(["./run", "install"], capture_output=True) - assert result.returncode == 0 - -def test_run_test(): - result = subprocess.run(["./run", "test"], capture_output=True) - assert b"test cases passed" in result.stdout - -def test_run_score_valid_url(): - result = subprocess.run(["./run", "score", "urls.txt"], capture_output=True) - assert result.stdout.decode().strip().startswith("{") - assert result.returncode == 0 - -def test_run_score_invalid_url(): - result = subprocess.run(["./run", "score", "bad_urls.txt"], capture_output=True) - assert result.returncode == 1 or "error" in result.stderr.decode().lower() \ No newline at end of file diff --git a/tests/test_cli_metric.py b/tests/test_cli_metric.py new file mode 100644 index 0000000..3242d2b --- /dev/null +++ b/tests/test_cli_metric.py @@ -0,0 +1,11 @@ +from acmecli.metrics.cli_metric import CLIMetric + +def test_cli_metric_documentation(): + metric = CLIMetric() + mv = metric.score({"readme_text": "Supports install, test, score via CLI"}) + assert 0.0 <= mv.value <= 1.0 + +def test_cli_metric_no_cli(): + metric = CLIMetric() + mv = metric.score({"readme_text": "This project is for data analysis"}) + assert mv.value < 0.5 \ No newline at end of file diff --git a/tests/test_code_quality_metric.py b/tests/test_code_quality_metric.py index e3f2389..641ce2d 100644 --- a/tests/test_code_quality_metric.py +++ b/tests/test_code_quality_metric.py @@ -5,11 +5,6 @@ def test_code_quality_range(): mv = metric.score({"lint_score": 0.9}) assert 0.0 <= mv.value <= 1.0 -def test_code_quality_latency(): - metric = CodeQualityMetric() - mv = metric.score({"lint_score": 0.9}) - assert mv.latency_ms >= 0 - def test_code_quality_missing(): metric = CodeQualityMetric() mv = metric.score({}) diff --git a/tests/test_dataset_and_code_metric.py b/tests/test_dataset_and_code_metric.py index a65ef12..6af1145 100644 --- a/tests/test_dataset_and_code_metric.py +++ b/tests/test_dataset_and_code_metric.py @@ -2,14 +2,9 @@ def test_dataset_and_code_range(): metric = DatasetAndCodeMetric() - mv = metric.score({"linked": True}) + mv = metric.score({"readme_text": "data and code available"}) assert 0.0 <= mv.value <= 1.0 -def test_dataset_and_code_latency(): - metric = DatasetAndCodeMetric() - mv = metric.score({"linked": True}) - assert mv.latency_ms >= 0 - def test_dataset_and_code_missing(): metric = DatasetAndCodeMetric() mv = metric.score({}) diff --git a/tests/test_dataset_quality_metric.py b/tests/test_dataset_quality_metric.py index 706c080..769f9f5 100644 --- a/tests/test_dataset_quality_metric.py +++ b/tests/test_dataset_quality_metric.py @@ -2,14 +2,9 @@ def test_dataset_quality_range(): metric = DatasetQualityMetric() - mv = metric.score({"quality_score": 0.8}) + mv = metric.score({"readme_text": "imagenet large-scale curated"}) assert 0.0 <= mv.value <= 1.0 -def test_dataset_quality_latency(): - metric = DatasetQualityMetric() - mv = metric.score({"quality_score": 0.8}) - assert mv.latency_ms >= 0 - def test_dataset_quality_missing(): metric = DatasetQualityMetric() mv = metric.score({}) diff --git a/tests/test_hf_downloads_metric b/tests/test_hf_downloads_metric new file mode 100644 index 0000000..021d45f --- /dev/null +++ b/tests/test_hf_downloads_metric @@ -0,0 +1,11 @@ +from acmecli.metrics.hf_downloads_metric import HFDownloadsMetric + +def test_downloads_zero(): + metric = HFDownloadsMetric() + mv = metric.score({"downloads": 0}) + assert mv.value == 0.0 + +def test_downloads_high(): + metric = HFDownloadsMetric() + mv = metric.score({"downloads": 20000}) + assert mv.value == 1.0 \ No newline at end of file diff --git a/tests/test_license_metric.py b/tests/test_license_metric.py new file mode 100644 index 0000000..6af9af8 --- /dev/null +++ b/tests/test_license_metric.py @@ -0,0 +1,11 @@ +from acmecli.metrics.license_metric import LicenseMetric + +def test_license_metric_high_score(): + metric = LicenseMetric() + mv = metric.score({"license": "MIT", "readme_text": "MIT license"}) + assert 0.8 <= mv.value <= 1.0 + +def test_license_metric_no_license(): + metric = LicenseMetric() + mv = metric.score({"license": "", "readme_text": ""}) + assert mv.value == 0.0 or mv.value < 0.2 \ No newline at end of file diff --git a/tests/test_logging_env.py b/tests/test_logging_env.py index 5577fc9..d09952c 100644 --- a/tests/test_logging_env.py +++ b/tests/test_logging_env.py @@ -1,13 +1,16 @@ -import os -import tempfile +from acmecli.metrics.logging_env_metric import LoggingEnvMetric -def test_logging_env(monkeypatch): - with tempfile.NamedTemporaryFile() as log_file: - monkeypatch.setenv("LOG_FILE", log_file.name) - monkeypatch.setenv("LOG_LEVEL", "DEBUG") - # You should call your project's logging function here if available - from acmecli import main_logger - main_logger.debug("test message") - log_file.seek(0) - contents = log_file.read().decode() - assert "test message" in contents \ No newline at end of file +def test_logging_env_metric_env_vars(): + metric = LoggingEnvMetric() + mv = metric.score({"env_vars": {"LOG_FILE": "log.txt", "LOG_LEVEL": "DEBUG"}}) + assert 0.0 <= mv.value <= 1.0 + +def test_logging_env_metric_readme(): + metric = LoggingEnvMetric() + mv = metric.score({"readme_text": "This project uses logging and debug level"}) + assert 0.0 <= mv.value <= 1.0 + +def test_logging_env_metric_missing(): + metric = LoggingEnvMetric() + mv = metric.score({}) + assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_parallel_metrics.py b/tests/test_parallel_metrics.py deleted file mode 100644 index b611bb4..0000000 --- a/tests/test_parallel_metrics.py +++ /dev/null @@ -1,11 +0,0 @@ -import time -from acmecli.cli import score_all_metrics_parallel - -def test_metrics_run_parallel(): - start = time.time() - # Simulate input that runs several metrics - report = score_all_metrics_parallel({"contributors": 10, "size_kb": 100, "linked": True}) - duration = time.time() - start - # For parallel, duration should be less than sum of all metric durations - assert isinstance(report, dict) - assert duration < 2.0 # Example threshold, adjust to your actual metric timings \ No newline at end of file diff --git a/tests/test_performance_claims_metric.py b/tests/test_performance_claims_metric.py index 33f2f02..4d6b0dc 100644 --- a/tests/test_performance_claims_metric.py +++ b/tests/test_performance_claims_metric.py @@ -1,16 +1,11 @@ from acmecli.metrics.performance_claims_metric import PerformanceClaimsMetric -def test_performance_claims_range(): +def test_performance_metric_range(): metric = PerformanceClaimsMetric() - mv = metric.score({"claims": 5}) + mv = metric.score({"readme_text": "benchmark performance 99%"}) assert 0.0 <= mv.value <= 1.0 -def test_performance_claims_latency(): - metric = PerformanceClaimsMetric() - mv = metric.score({"claims": 5}) - assert mv.latency_ms >= 0 - -def test_performance_claims_missing(): +def test_performance_metric_missing(): metric = PerformanceClaimsMetric() mv = metric.score({}) assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_ramp_up_metric.py b/tests/test_ramp_up_metric.py index 53b2e9e..bc3b596 100644 --- a/tests/test_ramp_up_metric.py +++ b/tests/test_ramp_up_metric.py @@ -1,16 +1,11 @@ from acmecli.metrics.ramp_up_metric import RampUpMetric -def test_ramp_up_range(): +def test_rampup_metric_range(): metric = RampUpMetric() - mv = metric.score({"readme_size": 1000}) + mv = metric.score({"readme_text": "Install and usage quickstart", "pushed_at": "2025-09-01T00:00:00Z"}) assert 0.0 <= mv.value <= 1.0 -def test_ramp_up_latency(): - metric = RampUpMetric() - mv = metric.score({"readme_size": 1000}) - assert mv.latency_ms >= 0 - -def test_ramp_up_missing_readme(): +def test_rampup_metric_missing(): metric = RampUpMetric() mv = metric.score({}) assert mv.value == 0.0 \ No newline at end of file diff --git a/tests/test_size_metric.py b/tests/test_size_metric.py index 50bb85a..f79df26 100644 --- a/tests/test_size_metric.py +++ b/tests/test_size_metric.py @@ -1,16 +1,11 @@ from acmecli.metrics.size_metric import SizeMetric -def test_size_range(): +def test_size_metric_range(): metric = SizeMetric() - mv = metric.score({"size_kb": 100}) - assert 0.0 <= mv.value <= 1.0 + mv = metric.score({"size": 1000}) + assert all(0.0 <= v <= 1.0 for v in mv.value.values()) -def test_size_latency(): +def test_size_metric_zero(): metric = SizeMetric() - mv = metric.score({"size_kb": 100}) - assert mv.latency_ms >= 0 - -def test_size_edge_case_zero(): - metric = SizeMetric() - mv = metric.score({"size_kb": 0}) - assert mv.value == 0.0 \ No newline at end of file + mv = metric.score({"size": 0}) + assert all(v == 0.5 for v in mv.value.values()) \ No newline at end of file