wire neuropose analyze to run_analysis
Replaces the placeholder stub that returned EXIT_PENDING with a real analyze --config <yaml> [--output <json>] command. Loads the YAML, validates through AnalysisConfig (so typos fail with a clear ValidationError before any predictions load), runs the pipeline, and writes the AnalysisReport atomically. Surfaces YAML parse errors and schema violations as EXIT_USAGE=2 with messages pointing at the offending file. Missing predictions files during execution also surface as EXIT_USAGE rather than a bare traceback. Prints a one-line summary after the run: segmentation counts, the analysis kind, and — for DTW — the per-segment distance count and mean. --output / -o overrides the report path declared in the config, useful when sweeping a single config over multiple input pairs from a shell loop.
This commit is contained in:
parent
dc48988450
commit
01b374451f
16
CHANGELOG.md
16
CHANGELOG.md
|
|
@ -292,8 +292,16 @@ be split into per-release sections once tagging begins.
|
|||
`"left_heel_strikes[i]"` / `"right_heel_strikes[i]"` labels.
|
||||
`load_config(path)` parses YAML, `save_report(path, report)`
|
||||
writes atomically, and `load_report(path)` rehydrates via the
|
||||
migration chain. CLI wiring and example configs land in
|
||||
follow-up commits.
|
||||
migration chain. Wired to the CLI as `neuropose analyze --config
|
||||
<yaml> [--output <json>]` — replaces the placeholder stub that
|
||||
previously returned `EXIT_PENDING`. The CLI surfaces schema
|
||||
violations and YAML parse errors as `EXIT_USAGE=2` with a clear
|
||||
message pointing at the offending file, prints a one-line summary
|
||||
of the run (segmentation counts, analysis kind, per-segment
|
||||
distance count + mean for DTW), and supports `--output`/`-o` to
|
||||
override the report path declared in the config (useful for
|
||||
sweeping a single config over multiple input pairs from a shell
|
||||
loop). Example configs land in a follow-up commit.
|
||||
- **`neuropose.analyzer.segment.segment_gait_cycles`** and
|
||||
**`segment_gait_cycles_bilateral`** — clinical convenience
|
||||
wrappers over `segment_predictions` that pre-fill a `joint_axis`
|
||||
|
|
@ -435,7 +443,9 @@ be split into per-release sections once tagging begins.
|
|||
the resulting `poses3d` arrays, and reports throughput speedup
|
||||
and max divergence in mm — the missing Apple Silicon numerical
|
||||
verification answer from `RESEARCH.md`), and
|
||||
`analyze <results>` (stub). The `segment` subcommand accepts
|
||||
`analyze --config <yaml>` (run the declarative analysis
|
||||
pipeline — see the dedicated entry above for scope). The
|
||||
`segment` subcommand accepts
|
||||
joint specifiers as either berkeley_mhad_43 names (`lwri`,
|
||||
`rwri`, …) or integer indices, and refuses to overwrite an
|
||||
existing segmentation of the same name without `--force`.
|
||||
|
|
|
|||
|
|
@ -25,8 +25,11 @@ Eight subcommands:
|
|||
vs CPU numerical-divergence checks. Prints a human report to stdout
|
||||
and (optionally) writes a structured :class:`~neuropose.io.BenchmarkResult`
|
||||
JSON to ``--output``.
|
||||
- ``neuropose analyze <results>`` — stubbed placeholder pending the
|
||||
analyzer rewrite in commit 10.
|
||||
- ``neuropose analyze --config <yaml>`` — run the declarative analysis
|
||||
pipeline described in a YAML config. Loads the named predictions
|
||||
files, applies segmentation + analysis, writes an
|
||||
:class:`~neuropose.analyzer.pipeline.AnalysisReport` JSON. See
|
||||
``examples/analysis/*.yaml`` for runnable references.
|
||||
|
||||
User-facing error handling
|
||||
--------------------------
|
||||
|
|
@ -57,6 +60,7 @@ from pathlib import Path
|
|||
from typing import Annotated
|
||||
|
||||
import typer
|
||||
import yaml
|
||||
from pydantic import ValidationError
|
||||
|
||||
from neuropose import __version__
|
||||
|
|
@ -1196,26 +1200,94 @@ def benchmark(
|
|||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# analyze (stub)
|
||||
# analyze
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@app.command()
|
||||
def analyze(
|
||||
ctx: typer.Context,
|
||||
results: Annotated[
|
||||
config: Annotated[
|
||||
Path,
|
||||
typer.Argument(help="Path to a results.json produced by watch or process."),
|
||||
typer.Option(
|
||||
"--config",
|
||||
"-c",
|
||||
help=(
|
||||
"Path to a YAML AnalysisConfig file. See examples/analysis/ "
|
||||
"for runnable references."
|
||||
),
|
||||
),
|
||||
],
|
||||
output: Annotated[
|
||||
Path | None,
|
||||
typer.Option(
|
||||
"--output",
|
||||
"-o",
|
||||
help=(
|
||||
"Override the report path declared in the config's "
|
||||
"output.report field. Useful when running the same config "
|
||||
"against multiple input pairs from a shell loop."
|
||||
),
|
||||
),
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Run the analyzer subpackage against a results.json (pending commit 10)."""
|
||||
del ctx, results
|
||||
typer.echo(
|
||||
"error: the analyzer subpackage is pending commit 10. "
|
||||
"Until it lands, use neuropose.io to load results.json from Python.",
|
||||
err=True,
|
||||
)
|
||||
raise typer.Exit(code=EXIT_PENDING)
|
||||
"""Run the declarative analysis pipeline described by a YAML config.
|
||||
|
||||
Loads the config, parses it through
|
||||
:class:`~neuropose.analyzer.pipeline.AnalysisConfig` (so typos fail
|
||||
immediately with a clear error), executes the pipeline via
|
||||
:func:`~neuropose.analyzer.pipeline.run_analysis`, and writes the
|
||||
resulting :class:`~neuropose.analyzer.pipeline.AnalysisReport` to
|
||||
``--output`` (or to ``output.report`` declared in the config).
|
||||
|
||||
Cross-field invariants (for example,
|
||||
``method='dtw_relation'`` requires ``joint_i`` / ``joint_j``) are
|
||||
enforced at parse time, so a typo fails before any predictions
|
||||
are loaded.
|
||||
"""
|
||||
del ctx
|
||||
# Deferred import keeps the CLI module's top-level imports free of
|
||||
# pipeline dependencies so ``watch`` / ``process`` startup stays
|
||||
# cheap.
|
||||
from neuropose.analyzer.pipeline import load_config, run_analysis, save_report
|
||||
|
||||
if not config.exists():
|
||||
typer.echo(f"error: config file not found: {config}", err=True)
|
||||
raise typer.Exit(code=EXIT_USAGE)
|
||||
|
||||
try:
|
||||
analysis_config = load_config(config)
|
||||
except ValidationError as exc:
|
||||
typer.echo(f"error: invalid config {config}:\n{exc}", err=True)
|
||||
raise typer.Exit(code=EXIT_USAGE) from exc
|
||||
except yaml.YAMLError as exc:
|
||||
typer.echo(f"error: could not parse YAML {config}: {exc}", err=True)
|
||||
raise typer.Exit(code=EXIT_USAGE) from exc
|
||||
|
||||
report_path = output if output is not None else analysis_config.output.report
|
||||
|
||||
try:
|
||||
report = run_analysis(analysis_config)
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
typer.echo(f"error: analysis failed: {exc}", err=True)
|
||||
raise typer.Exit(code=EXIT_USAGE) from exc
|
||||
|
||||
save_report(report_path, report)
|
||||
|
||||
typer.echo(f"wrote analysis report to {report_path}")
|
||||
if report.segmentations:
|
||||
seg_summary = ", ".join(
|
||||
f"{name}={len(seg.segments)}" for name, seg in report.segmentations.items()
|
||||
)
|
||||
typer.echo(f"segmentations: {seg_summary}")
|
||||
# Emit a one-line summary of the results regardless of kind.
|
||||
typer.echo(f"analysis kind: {report.results.kind}")
|
||||
if report.results.kind == "dtw":
|
||||
n = len(report.results.distances)
|
||||
mean = report.results.summary.get("mean", float("nan"))
|
||||
typer.echo(f"distances computed: {n} (mean={mean:.4f})")
|
||||
elif report.results.kind == "stats":
|
||||
typer.echo(f"statistic blocks computed: {len(report.results.statistics)}")
|
||||
|
||||
|
||||
def run() -> None:
|
||||
|
|
|
|||
|
|
@ -776,17 +776,142 @@ class TestBenchmarkSubcommand:
|
|||
|
||||
|
||||
class TestAnalyze:
|
||||
def test_analyze_stub_exits_with_pending_message(
|
||||
"""Covers the ``neuropose analyze --config <yaml>`` subcommand.
|
||||
|
||||
Execution happy path is exercised in detail in
|
||||
:mod:`tests.unit.test_analyzer_pipeline` — this file focuses on
|
||||
the CLI wiring: argument parsing, config-loading error modes, and
|
||||
end-to-end smoke.
|
||||
"""
|
||||
|
||||
def _make_predictions_file(self, tmp_path: Path, name: str, num_frames: int = 30) -> Path:
|
||||
"""Write a trivial VideoPredictions file to disk for the CLI to load."""
|
||||
import math
|
||||
|
||||
from neuropose.io import VideoPredictions, save_video_predictions
|
||||
|
||||
num_joints = 43
|
||||
frames = {}
|
||||
for i in range(num_frames):
|
||||
poses = [[[0.0, 0.0, 0.0] for _ in range(num_joints)]]
|
||||
poses[0][41][1] = float(math.sin(i * 0.3)) * 100.0 # rhee Y
|
||||
frames[f"frame_{i:06d}"] = {
|
||||
"boxes": [[0.0, 0.0, 1.0, 1.0, 0.9]],
|
||||
"poses3d": poses,
|
||||
"poses2d": [[[0.0, 0.0]] * num_joints],
|
||||
}
|
||||
preds = VideoPredictions.model_validate(
|
||||
{
|
||||
"metadata": {
|
||||
"frame_count": num_frames,
|
||||
"fps": 30.0,
|
||||
"width": 640,
|
||||
"height": 480,
|
||||
},
|
||||
"frames": frames,
|
||||
}
|
||||
)
|
||||
path = tmp_path / name
|
||||
save_video_predictions(path, preds)
|
||||
return path
|
||||
|
||||
def _write_dtw_config(
|
||||
self,
|
||||
tmp_path: Path,
|
||||
*,
|
||||
primary: Path,
|
||||
reference: Path,
|
||||
report: Path,
|
||||
) -> Path:
|
||||
import yaml as _yaml
|
||||
|
||||
config_path = tmp_path / "config.yaml"
|
||||
config_path.write_text(
|
||||
_yaml.safe_dump(
|
||||
{
|
||||
"inputs": {"primary": str(primary), "reference": str(reference)},
|
||||
"analysis": {"kind": "dtw", "method": "dtw_all"},
|
||||
"output": {"report": str(report)},
|
||||
}
|
||||
)
|
||||
)
|
||||
return config_path
|
||||
|
||||
def test_missing_config_is_usage_error(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
result = runner.invoke(app, ["analyze", "--config", str(tmp_path / "nope.yaml")])
|
||||
assert result.exit_code == EXIT_USAGE
|
||||
assert "config file not found" in result.output
|
||||
|
||||
def test_missing_config_flag_is_usage_error(self, runner: CliRunner) -> None:
|
||||
result = runner.invoke(app, ["analyze"])
|
||||
assert result.exit_code == EXIT_USAGE
|
||||
|
||||
def test_invalid_yaml_is_usage_error(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
bad = tmp_path / "bad.yaml"
|
||||
bad.write_text("inputs: {primary: foo\n") # unclosed flow mapping
|
||||
result = runner.invoke(app, ["analyze", "--config", str(bad)])
|
||||
assert result.exit_code == EXIT_USAGE
|
||||
assert "could not parse YAML" in result.output
|
||||
|
||||
def test_schema_violation_is_usage_error(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
import yaml as _yaml
|
||||
|
||||
bad = tmp_path / "schema.yaml"
|
||||
bad.write_text(
|
||||
_yaml.safe_dump(
|
||||
{
|
||||
"inputs": {"primary": str(tmp_path / "a.json")},
|
||||
# dtw without reference — violates cross-field invariant.
|
||||
"analysis": {"kind": "dtw", "method": "dtw_all"},
|
||||
"output": {"report": str(tmp_path / "r.json")},
|
||||
}
|
||||
)
|
||||
)
|
||||
result = runner.invoke(app, ["analyze", "--config", str(bad)])
|
||||
assert result.exit_code == EXIT_USAGE
|
||||
assert "invalid config" in result.output
|
||||
|
||||
def test_happy_path_writes_report(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
primary = self._make_predictions_file(tmp_path, "a.json")
|
||||
reference = self._make_predictions_file(tmp_path, "b.json")
|
||||
report_path = tmp_path / "report.json"
|
||||
config = self._write_dtw_config(
|
||||
tmp_path, primary=primary, reference=reference, report=report_path
|
||||
)
|
||||
result = runner.invoke(app, ["analyze", "--config", str(config)])
|
||||
assert result.exit_code == EXIT_OK, result.output
|
||||
assert report_path.exists()
|
||||
assert "wrote analysis report" in result.output
|
||||
assert "analysis kind: dtw" in result.output
|
||||
|
||||
def test_output_option_overrides_config_path(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
primary = self._make_predictions_file(tmp_path, "a.json")
|
||||
reference = self._make_predictions_file(tmp_path, "b.json")
|
||||
# Config points at one report path ...
|
||||
config = self._write_dtw_config(
|
||||
tmp_path,
|
||||
primary=primary,
|
||||
reference=reference,
|
||||
report=tmp_path / "declared.json",
|
||||
)
|
||||
# ... but --output overrides.
|
||||
override = tmp_path / "override.json"
|
||||
result = runner.invoke(app, ["analyze", "--config", str(config), "--output", str(override)])
|
||||
assert result.exit_code == EXIT_OK, result.output
|
||||
assert override.exists()
|
||||
assert not (tmp_path / "declared.json").exists()
|
||||
|
||||
def test_missing_predictions_file_is_usage_error(
|
||||
self, runner: CliRunner, tmp_path: Path
|
||||
) -> None:
|
||||
results_path = tmp_path / "results.json"
|
||||
results_path.write_text("{}")
|
||||
result = runner.invoke(app, ["analyze", str(results_path)])
|
||||
assert result.exit_code == EXIT_PENDING
|
||||
assert "commit 10" in result.output
|
||||
|
||||
def test_analyze_requires_an_argument(self, runner: CliRunner) -> None:
|
||||
result = runner.invoke(app, ["analyze"])
|
||||
# Config points at a primary that does not exist.
|
||||
config = self._write_dtw_config(
|
||||
tmp_path,
|
||||
primary=tmp_path / "missing_primary.json",
|
||||
reference=tmp_path / "missing_reference.json",
|
||||
report=tmp_path / "report.json",
|
||||
)
|
||||
result = runner.invoke(app, ["analyze", "--config", str(config)])
|
||||
assert result.exit_code == EXIT_USAGE
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue