Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .codespell-ignore-words.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
parm
1 change: 1 addition & 0 deletions .github/cppcheck-baseline.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

5 changes: 5 additions & 0 deletions .github/instructions/instructions.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ GNU autotools.
bounds.
- String buffers: declare length constants; do not use magic numbers for
buffer sizes.
- Public APIs: prefer `const char *` for input-only string parameters.
Document ownership expectations in function comments when transfer is not
obvious.

## SNMP

Expand Down Expand Up @@ -62,6 +65,8 @@ GNU autotools.
- Before opening a PR, run `cppcheck --enable=all --std=c11 *.c *.h`
locally and fix all errors (warnings are informational).
- flawfinder level-5 hits fail CI; lower levels are informational.
- CI has a guardrail for newly introduced unsafe C APIs (`sprintf`, `strcpy`,
`strcat`, `gets`, `vsprintf`) and fails closed on additions.

## Commits and PRs

Expand Down
12 changes: 12 additions & 0 deletions .github/nightly-leak-baseline.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
{
"valgrind": {
"max_definitely_lost_bytes": 0,
"max_indirectly_lost_bytes": 0,
"max_possibly_lost_bytes": 0,
"max_error_summary": 0
},
"asan": {
"max_asan_error_events": 0,
"max_ubsan_error_events": 0
}
}
20 changes: 20 additions & 0 deletions .github/perf-baseline.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
{
"sample_size": 20,
"commands": {
"./spine --version": {
"median_seconds": 0.35,
"allowed_regression_factor": 1.5,
"max_rss_kb": 32768
},
"./spine --help": {
"median_seconds": 0.45,
"allowed_regression_factor": 1.5,
"max_rss_kb": 40960
},
"snmpget -v2c -c public -On 127.0.0.1:1161 1.3.6.1.2.1.1.3.0": {
"median_seconds": 0.25,
"allowed_regression_factor": 2.0,
"max_rss_kb": 32768
}
}
}
92 changes: 92 additions & 0 deletions .github/scripts/check-leak-trend.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
#!/usr/bin/env python3
"""Parse sanitizer/valgrind logs and enforce nightly leak thresholds."""

from __future__ import annotations

import argparse
import glob
import json
import re
from pathlib import Path


DEF_RE = re.compile(r"definitely lost:\s*([0-9,]+)\s+bytes")
IND_RE = re.compile(r"indirectly lost:\s*([0-9,]+)\s+bytes")
POS_RE = re.compile(r"possibly lost:\s*([0-9,]+)\s+bytes")
ERR_RE = re.compile(r"ERROR SUMMARY:\s*([0-9,]+)\s+errors")


def as_int(value: str) -> int:
return int(value.replace(",", ""))


def parse_valgrind(log_text: str) -> dict[str, int]:
return {
"definitely_lost_bytes": sum(as_int(v) for v in DEF_RE.findall(log_text)),
"indirectly_lost_bytes": sum(as_int(v) for v in IND_RE.findall(log_text)),
"possibly_lost_bytes": sum(as_int(v) for v in POS_RE.findall(log_text)),
"error_summary": sum(as_int(v) for v in ERR_RE.findall(log_text)),
}


def parse_asan(log_text: str) -> dict[str, int]:
return {
"asan_error_events": len(re.findall(r"AddressSanitizer", log_text)),
"ubsan_error_events": len(re.findall(r"runtime error:", log_text)),
}


def collect_text(patterns: list[str]) -> str:
parts: list[str] = []
for pat in patterns:
matches = sorted(glob.glob(pat))
for path in matches:
try:
parts.append(Path(path).read_text(encoding="utf-8", errors="replace"))
except OSError:
continue
return "\n".join(parts)


def enforce(summary: dict[str, int], baseline: dict[str, int]) -> list[str]:
failures: list[str] = []
for key, value in summary.items():
limit = int(baseline.get(f"max_{key}", 0))
if value > limit:
failures.append(f"{key}={value} exceeded max_{key}={limit}")
return failures


def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=("valgrind", "asan"), required=True)
parser.add_argument("--baseline", required=True)
parser.add_argument("--output", required=True)
parser.add_argument("--logs", nargs="+", required=True)
args = parser.parse_args()

baseline_doc = json.loads(Path(args.baseline).read_text(encoding="utf-8"))
mode_cfg = baseline_doc.get(args.mode, {})
text = collect_text(args.logs)

if args.mode == "valgrind":
summary = parse_valgrind(text)
else:
summary = parse_asan(text)

Path(args.output).write_text(json.dumps(summary, indent=2) + "\n", encoding="utf-8")

failures = enforce(summary, mode_cfg)
if failures:
print("Leak trend gate failed:")
for line in failures:
print(f"- {line}")
return 1

print(f"{args.mode} leak trend gate passed.")
print(json.dumps(summary, indent=2))
return 0


if __name__ == "__main__":
raise SystemExit(main())
30 changes: 30 additions & 0 deletions .github/scripts/check-unsafe-api-additions.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/usr/bin/env bash
set -euo pipefail

base_commit=""

if [[ -n "${GITHUB_BASE_REF:-}" ]]; then
git fetch --no-tags --unshallow origin "${GITHUB_BASE_REF}" 2>/dev/null || \
git fetch --no-tags origin "${GITHUB_BASE_REF}"
base_commit="$(git merge-base HEAD "origin/${GITHUB_BASE_REF}" 2>/dev/null || true)"
fi

if [[ -z "${base_commit}" ]]; then
base_commit="$(git rev-parse HEAD~1 2>/dev/null || git rev-list --max-parents=0 HEAD)"
fi

banned_regex='\b(sprintf|vsprintf|strcpy|strcat|gets)\s*\('

new_hits="$(
git diff --unified=0 "${base_commit}"...HEAD -- '*.c' '*.h' \
| grep -E '^\+[^+]' \
| grep -E "${banned_regex}" || true
)"

if [[ -n "${new_hits}" ]]; then
echo "Unsafe C APIs were newly added in this change:"
echo "${new_hits}"
exit 1
fi

echo "No newly added banned C APIs detected."
101 changes: 101 additions & 0 deletions .github/scripts/check-workflow-policy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
#!/usr/bin/env python3
"""Enforce workflow hygiene policy on GitHub Actions files."""

from __future__ import annotations

import re
import sys
from pathlib import Path

import yaml


PINNED_REF_RE = re.compile(r"^[0-9a-f]{40}$")
CURL_PIPE_RE = re.compile(r"curl\b[^\n|]*\|\s*(?:sh|bash)\b")
STRICT_LINE = "set -euo pipefail"
WORKFLOW_GLOB = ".github/workflows/*"
ALLOWLIST_CURL_PIPE = {}


def normalize_steps(job: dict) -> list[dict]:
steps = job.get("steps")
return steps if isinstance(steps, list) else []


def check_uses(path: str, step_name: str, uses_value: str, violations: list[str]) -> None:
if uses_value.startswith("./") or uses_value.startswith("docker://"):
return

if "@" not in uses_value:
violations.append(f"{path}:{step_name}: uses reference is missing @ref: {uses_value}")
return

ref = uses_value.split("@", 1)[1]
if not PINNED_REF_RE.fullmatch(ref):
violations.append(f"{path}:{step_name}: action ref must be a pinned SHA: {uses_value}")


def check_run(path: str, step_name: str, run_value: str, violations: list[str]) -> None:
lines = [ln.strip() for ln in run_value.splitlines() if ln.strip()]
if not lines:
return

if len(run_value.splitlines()) > 1:
if lines[0] != STRICT_LINE:
violations.append(f"{path}:{step_name}: multiline run must start with '{STRICT_LINE}'")

for match in CURL_PIPE_RE.finditer(run_value):
_ = match
allow_tokens = ALLOWLIST_CURL_PIPE.get(path, [])
if not any(token in run_value for token in allow_tokens):
violations.append(f"{path}:{step_name}: curl|sh is not allowlisted")


def main() -> int:
root = Path(__file__).resolve().parents[2]
workflow_files = sorted(
p for p in root.glob(WORKFLOW_GLOB) if p.suffix in (".yml", ".yaml")
)
violations: list[str] = []

for wf in workflow_files:
rel = str(wf.relative_to(root))
try:
doc = yaml.safe_load(wf.read_text(encoding="utf-8"))
except Exception as exc: # pragma: no cover
violations.append(f"{rel}: failed to parse YAML: {exc}")
continue

jobs = doc.get("jobs", {}) if isinstance(doc, dict) else {}
if not isinstance(jobs, dict):
continue

for job_name, job in jobs.items():
if not isinstance(job, dict):
continue

for idx, step in enumerate(normalize_steps(job), start=1):
if not isinstance(step, dict):
continue
step_name = str(step.get("name", f"{job_name}.step{idx}"))

uses_value = step.get("uses")
if isinstance(uses_value, str):
check_uses(rel, step_name, uses_value.strip(), violations)

run_value = step.get("run")
if isinstance(run_value, str):
check_run(rel, step_name, run_value, violations)

if violations:
print("Workflow policy violations:")
for v in violations:
print(f"- {v}")
return 1

print("Workflow policy checks passed.")
return 0


if __name__ == "__main__":
raise SystemExit(main())
105 changes: 105 additions & 0 deletions .github/scripts/clang_tidy_to_sarif.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
#!/usr/bin/env python3
"""Convert clang-tidy text output to SARIF 2.1.0."""

from __future__ import annotations

import json
import re
import sys
from pathlib import Path


LINE_RE = re.compile(
r"^(?P<file>[^:\n]+):(?P<line>\d+):(?P<col>\d+):\s+"
r"(?P<severity>warning|error|note):\s+"
r"(?P<message>.*?)(?:\s+\[(?P<rule>[^\]]+)\])?\s*$"
)


def level_from_severity(severity: str) -> str:
if severity == "error":
return "error"
if severity == "warning":
return "warning"
return "note"


def build_sarif(results: list[dict], rules: dict[str, dict]) -> dict:
return {
"$schema": "https://json.schemastore.org/sarif-2.1.0.json",
"version": "2.1.0",
"runs": [
{
"tool": {
"driver": {
"name": "clang-tidy",
"informationUri": "https://clang.llvm.org/extra/clang-tidy/",
"rules": sorted(rules.values(), key=lambda r: r["id"]),
}
},
"results": results,
}
],
}


def main() -> int:
if len(sys.argv) != 3:
print("usage: clang_tidy_to_sarif.py <input.txt> <output.sarif>", file=sys.stderr)
return 2

in_path = Path(sys.argv[1])
out_path = Path(sys.argv[2])
text = in_path.read_text(encoding="utf-8", errors="replace") if in_path.exists() else ""

results = []
seen = set()
rules: dict[str, dict] = {}

for raw_line in text.splitlines():
m = LINE_RE.match(raw_line)
if not m:
continue

rule_id = m.group("rule") or "clang-tidy"
file_path = m.group("file")
line = int(m.group("line"))
col = int(m.group("col"))
message = m.group("message").strip()
level = level_from_severity(m.group("severity"))
key = (file_path, line, col, rule_id, message, level)
if key in seen:
continue
seen.add(key)

rules.setdefault(
rule_id,
{
"id": rule_id,
"shortDescription": {"text": rule_id},
},
)

results.append(
{
"ruleId": rule_id,
"level": level,
"message": {"text": message},
"locations": [
{
"physicalLocation": {
"artifactLocation": {"uri": file_path},
"region": {"startLine": line, "startColumn": col},
}
}
],
}
)

sarif = build_sarif(results, rules)
out_path.write_text(json.dumps(sarif, indent=2) + "\n", encoding="utf-8")
return 0


if __name__ == "__main__":
raise SystemExit(main())
Loading