Skip to content

Commit

Permalink
another test
Browse files Browse the repository at this point in the history
  • Loading branch information
Warsame committed Sep 27, 2025
1 parent b5ea680 commit 291c00f
Show file tree
Hide file tree
Showing 19 changed files with 267 additions and 32 deletions.
14 changes: 8 additions & 6 deletions src/acmecli.egg-info/SOURCES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,22 +13,24 @@ src/acmecli.egg-info/dependency_links.txt
src/acmecli.egg-info/top_level.txt
src/acmecli/metrics/__init__.py
src/acmecli/metrics/base.py
src/acmecli/metrics/busfactor_metric.py
src/acmecli/metrics/bus_factor_metric.py
src/acmecli/metrics/cli_metric.py
src/acmecli/metrics/code_quality_metric.py
src/acmecli/metrics/dataset_code_metric.py
src/acmecli/metrics/dataset_and_code_metric.py
src/acmecli/metrics/dataset_quality_metric.py
src/acmecli/metrics/hf_downloads_metric.py
src/acmecli/metrics/license_metric.py
src/acmecli/metrics/performance_metric.py
src/acmecli/metrics/rampup_metric.py
src/acmecli/metrics/logging_env_metric.py
src/acmecli/metrics/performance_claims_metric.py
src/acmecli/metrics/ramp_up_metric.py
src/acmecli/metrics/size_metric.py
tests/test_bus_factor_metric.py
tests/test_cli.py
tests/test_cli_metric.py
tests/test_code_quality_metric.py
tests/test_dataset_and_code_metric.py
tests/test_dataset_quality_metric.py
tests/test_license_metric.py
tests/test_logging_env.py
tests/test_parallel_metrics.py
tests/test_performance_claims_metric.py
tests/test_ramp_up_metric.py
tests/test_size_metric.py
29 changes: 24 additions & 5 deletions tests/test_bus_factor_metric.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,31 @@
from acmecli.metrics.bus_factor_metric import BusFactorMetric

def test_bus_factor_dict_input():
def test_bus_factor_many_contributors_even():
metric = BusFactorMetric()
# contributors dict: name -> count
mv = metric.score({"contributors": {"alice": 10, "bob": 5}})
assert 0.0 <= mv.value <= 1.0
mv = metric.score({"contributors": {"a": 5, "b": 5, "c": 5, "d": 5, "e": 5, "f": 5}})
assert mv.value > 0.5

def test_bus_factor_one_contributor():
metric = BusFactorMetric()
mv = metric.score({"contributors": {"alice": 50}})
assert mv.value < 0.5

def test_bus_factor_zero_contrib():
metric = BusFactorMetric()
mv = metric.score({"contributors": {}})
assert mv.value == 0.0
assert mv.value == 0.0

def test_bus_factor_org_name():
metric = BusFactorMetric()
mv = metric.score({"contributors": {"a": 10}, "full_name": "ACMECorp/repo"})
assert mv.value >= 0.1

def test_bus_factor_forks():
metric = BusFactorMetric()
mv = metric.score({"contributors": {"a": 5, "b": 5}, "forks": 100})
assert mv.value > 0.1

def test_bus_factor_latency():
metric = BusFactorMetric()
mv = metric.score({"contributors": {"a": 1}})
assert mv.latency_ms >= 0
10 changes: 10 additions & 0 deletions tests/test_cache.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from acmecli.cache import SimpleCache

def test_cache_set_get():
cache = SimpleCache()
cache.set("foo", "bar")
assert cache.get("foo") == "bar"

def test_cache_miss():
cache = SimpleCache()
assert cache.get("missing") is None
10 changes: 10 additions & 0 deletions tests/test_cli_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import subprocess

def test_run_test_success():
result = subprocess.run(["python", "run", "test"], capture_output=True, text=True)
assert "test cases passed" in result.stdout

def test_run_score_failure():
result = subprocess.run(["python", "run", "score", "NON_EXISTENT.txt"], capture_output=True, text=True)
assert result.returncode != 0
assert "Usage" in result.stdout or "error" in result.stderr.lower()
12 changes: 11 additions & 1 deletion tests/test_cli_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,14 @@ def test_cli_metric_documentation():
def test_cli_metric_no_cli():
metric = CLIMetric()
mv = metric.score({"readme_text": "This project is for data analysis"})
assert mv.value < 0.5
assert mv.value < 0.5

def test_cli_metric_automation():
metric = CLIMetric()
mv = metric.score({"readme_text": "Provides automation"})
assert mv.value >= 0.3

def test_cli_metric_latency():
metric = CLIMetric()
mv = metric.score({"readme_text": "CLI"})
assert mv.latency_ms >= 0
19 changes: 17 additions & 2 deletions tests/test_code_quality_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,25 @@

def test_code_quality_range():
metric = CodeQualityMetric()
mv = metric.score({"lint_score": 0.9})
mv = metric.score({"readme_text": "testing with pytest", "language": "python", "pushed_at": "2025-09-01T00:00:00Z"})
assert 0.0 <= mv.value <= 1.0

def test_code_quality_missing():
metric = CodeQualityMetric()
mv = metric.score({})
assert mv.value == 0.0
assert mv.value == 0.0

def test_code_quality_old_push():
metric = CodeQualityMetric()
mv = metric.score({"pushed_at": "2020-01-01T00:00:00Z"})
assert mv.value <= 0.1

def test_code_quality_issues_vs_stars():
metric = CodeQualityMetric()
mv = metric.score({"open_issues_count": 1, "stars": 50})
assert mv.value >= 0.0

def test_code_quality_latency():
metric = CodeQualityMetric()
mv = metric.score({"readme_text": "docs"})
assert mv.latency_ms >= 0
17 changes: 16 additions & 1 deletion tests/test_dataset_and_code_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,22 @@ def test_dataset_and_code_range():
mv = metric.score({"readme_text": "data and code available"})
assert 0.0 <= mv.value <= 1.0

def test_dataset_and_code_known_dataset():
metric = DatasetAndCodeMetric()
mv = metric.score({"readme_text": "Uses ImageNet and example code"})
assert mv.value > 0.3

def test_dataset_and_code_missing():
metric = DatasetAndCodeMetric()
mv = metric.score({})
assert mv.value == 0.0
assert mv.value == 0.0

def test_dataset_and_code_large_size():
metric = DatasetAndCodeMetric()
mv = metric.score({"size": 20000})
assert mv.value > 0.0

def test_dataset_and_code_latency():
metric = DatasetAndCodeMetric()
mv = metric.score({"readme_text": "code"})
assert mv.latency_ms >= 0
17 changes: 16 additions & 1 deletion tests/test_dataset_quality_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,19 @@ def test_dataset_quality_range():
def test_dataset_quality_missing():
metric = DatasetQualityMetric()
mv = metric.score({})
assert mv.value == 0.0
assert mv.value == 0.0

def test_dataset_quality_stars_high():
metric = DatasetQualityMetric()
mv = metric.score({"stars": 1000})
assert mv.value > 0.1

def test_dataset_quality_stars_low():
metric = DatasetQualityMetric()
mv = metric.score({"stars": 10})
assert mv.value >= 0.0

def test_dataset_quality_latency():
metric = DatasetQualityMetric()
mv = metric.score({"readme_text": "benchmark"})
assert mv.latency_ms >= 0
12 changes: 12 additions & 0 deletions tests/test_github_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from acmecli.github_handler import fetch_github_metadata

def test_fetch_github_metadata_valid():
# Use a public repo; adjust as needed for your logic
url = "https://github.com/AF-Warsame/test"
meta = fetch_github_metadata(url)
assert isinstance(meta, dict) or meta is not None

def test_fetch_github_metadata_invalid():
url = "https://github.com/invalid/repo"
meta = fetch_github_metadata(url)
assert meta is None or meta == {}
11 changes: 0 additions & 11 deletions tests/test_hf_downloads_metric

This file was deleted.

26 changes: 26 additions & 0 deletions tests/test_hf_downloads_metric.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from acmecli.metrics.hf_downloads_metric import HFDownloadsMetric

def test_downloads_zero():
metric = HFDownloadsMetric()
mv = metric.score({"downloads": 0})
assert mv.value == 0.0

def test_downloads_high():
metric = HFDownloadsMetric()
mv = metric.score({"downloads": 20000})
assert mv.value == 1.0

def test_downloads_medium():
metric = HFDownloadsMetric()
mv = metric.score({"downloads": 5000})
assert 0.0 < mv.value < 1.0

def test_downloads_none():
metric = HFDownloadsMetric()
mv = metric.score({})
assert mv.value == 0.0

def test_downloads_latency():
metric = HFDownloadsMetric()
mv = metric.score({"downloads": 100})
assert mv.latency_ms >= 0
11 changes: 11 additions & 0 deletions tests/test_hf_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from acmecli.hf_handler import fetch_hf_metadata

def test_fetch_hf_metadata_valid():
url = "https://huggingface.co/bert-base-uncased"
meta = fetch_hf_metadata(url)
assert isinstance(meta, dict) or meta is not None

def test_fetch_hf_metadata_invalid():
url = "https://huggingface.co/invalid"
meta = fetch_hf_metadata(url)
assert meta is None or meta == {}
27 changes: 26 additions & 1 deletion tests/test_license_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,29 @@ def test_license_metric_high_score():
def test_license_metric_no_license():
metric = LicenseMetric()
mv = metric.score({"license": "", "readme_text": ""})
assert mv.value == 0.0 or mv.value < 0.2
assert mv.value == 0.0 or mv.value < 0.2

def test_license_metric_medium_score():
metric = LicenseMetric()
mv = metric.score({"license": "GPL-2", "readme_text": "GPL-2 license"})
assert 0.6 <= mv.value < 0.8

def test_license_metric_low_score():
metric = LicenseMetric()
mv = metric.score({"license": "GPL-3", "readme_text": ""})
assert 0.3 <= mv.value < 0.6

def test_license_metric_readme_license_only():
metric = LicenseMetric()
mv = metric.score({"license": "", "readme_text": "licensed under the Apache license"})
assert mv.value > 0.0

def test_license_metric_latency():
metric = LicenseMetric()
mv = metric.score({"license": "MIT"})
assert mv.latency_ms >= 0

def test_license_metric_weird_license():
metric = LicenseMetric()
mv = metric.score({"license": "unknown", "readme_text": "no info"})
assert mv.value < 0.5
7 changes: 6 additions & 1 deletion tests/test_logging_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,9 @@ def test_logging_env_metric_readme():
def test_logging_env_metric_missing():
metric = LoggingEnvMetric()
mv = metric.score({})
assert mv.value == 0.0
assert mv.value == 0.0

def test_logging_env_metric_latency():
metric = LoggingEnvMetric()
mv = metric.score({"env_vars": {"LOG_FILE": "file"}})
assert mv.latency_ms >= 0
12 changes: 11 additions & 1 deletion tests/test_performance_claims_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,14 @@ def test_performance_metric_range():
def test_performance_metric_missing():
metric = PerformanceClaimsMetric()
mv = metric.score({})
assert mv.value == 0.0
assert mv.value == 0.0

def test_performance_metric_numbers():
metric = PerformanceClaimsMetric()
mv = metric.score({"readme_text": "score 99"})
assert mv.value > 0.0

def test_performance_metric_latency():
metric = PerformanceClaimsMetric()
mv = metric.score({"readme_text": "benchmarks"})
assert mv.latency_ms >= 0
22 changes: 21 additions & 1 deletion tests/test_ramp_up_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,24 @@ def test_rampup_metric_range():
def test_rampup_metric_missing():
metric = RampUpMetric()
mv = metric.score({})
assert mv.value == 0.0
assert mv.value == 0.0

def test_rampup_metric_has_wiki():
metric = RampUpMetric()
mv = metric.score({"has_wiki": True})
assert mv.value > 0.0

def test_rampup_metric_old_push():
metric = RampUpMetric()
mv = metric.score({"pushed_at": "2020-01-01T00:00:00Z"})
assert mv.value < 0.2

def test_rampup_metric_stars_high():
metric = RampUpMetric()
mv = metric.score({"stars": 200})
assert mv.value > 0.0

def test_rampup_metric_latency():
metric = RampUpMetric()
mv = metric.score({"readme_text": "docs"})
assert mv.latency_ms >= 0
7 changes: 7 additions & 0 deletions tests/test_reporter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from acmecli.reporter import Reporter

def test_reporter_format():
reporter = Reporter()
data = {"foo": "bar"}
out = reporter.format(data)
assert '"foo": "bar"' in out or "'foo': 'bar'" in out
13 changes: 13 additions & 0 deletions tests/test_scoring.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from acmecli.scoring import compute_netscore

def test_compute_netscore_typical():
scores = [0.9, 0.8, 0.7]
weights = [0.5, 0.3, 0.2]
net = compute_netscore(scores, weights)
assert 0.0 <= net <= 1.0

def test_compute_netscore_zero():
scores = [0, 0, 0]
weights = [0.5, 0.3, 0.2]
net = compute_netscore(scores, weights)
assert net == 0.0
Loading

0 comments on commit 291c00f

Please sign in to comment.