Add async pipeline with progress monitoring, resumability, and result transparency
Pipeline engine rewritten with combo-first loop: each combination is processed through all requested passes before moving to the next, with incremental DB saves after every step (crash-safe). Blocked combos now get result rows so they appear in the results page with constraint violation reasons. New pipeline_runs table tracks run lifecycle (pending/running/completed/failed/ cancelled). Web route launches pipeline in a background thread with its own DB connection. HTMX polling partial shows live progress with per-pass breakdown. Also: status guard prevents reviewed->scored downgrade, save_combination loads existing status on dedup for correct resume, per-metric scores show domain bounds + units + position bars, ensure_metric backfills units on existing rows. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -1,15 +1,15 @@
|
||||
"""Multi-pass pipeline orchestrator."""
|
||||
"""Multi-pass pipeline orchestrator with incremental saves and resumability."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from physcom.db.repository import Repository
|
||||
from physcom.engine.combinator import generate_combinations
|
||||
from physcom.engine.constraint_resolver import ConstraintResolver, ConstraintResult
|
||||
from physcom.engine.scorer import Scorer
|
||||
from physcom.llm.base import LLMProvider
|
||||
from physcom.llm.prompts import PHYSICS_ESTIMATION_PROMPT, PLAUSIBILITY_REVIEW_PROMPT
|
||||
from physcom.models.combination import Combination, ScoredResult
|
||||
from physcom.models.domain import Domain
|
||||
|
||||
@@ -23,12 +23,17 @@ class PipelineResult:
|
||||
pass1_blocked: int = 0
|
||||
pass1_conditional: int = 0
|
||||
pass2_estimated: int = 0
|
||||
pass3_scored: int = 0
|
||||
pass3_above_threshold: int = 0
|
||||
pass4_reviewed: int = 0
|
||||
pass5_human_reviewed: int = 0
|
||||
top_results: list[dict] = field(default_factory=list)
|
||||
|
||||
|
||||
class CancelledError(Exception):
|
||||
"""Raised when a pipeline run is cancelled."""
|
||||
|
||||
|
||||
def _describe_combination(combo: Combination) -> str:
|
||||
"""Build a natural-language description of a combination."""
|
||||
parts = [f"{e.dimension}: {e.name}" for e in combo.entities]
|
||||
@@ -53,158 +58,281 @@ class Pipeline:
|
||||
self.scorer = scorer
|
||||
self.llm = llm
|
||||
|
||||
def _check_cancelled(self, run_id: int | None) -> None:
|
||||
"""Raise CancelledError if the run has been cancelled."""
|
||||
if run_id is None:
|
||||
return
|
||||
run = self.repo.get_pipeline_run(run_id)
|
||||
if run and run["status"] == "cancelled":
|
||||
raise CancelledError("Pipeline run cancelled")
|
||||
|
||||
def _update_run_counters(
|
||||
self, run_id: int | None, result: PipelineResult, current_pass: int
|
||||
) -> None:
|
||||
"""Update pipeline_run progress counters in the DB."""
|
||||
if run_id is None:
|
||||
return
|
||||
self.repo.update_pipeline_run(
|
||||
run_id,
|
||||
combos_pass1=result.pass1_valid
|
||||
+ result.pass1_conditional
|
||||
+ result.pass1_blocked,
|
||||
combos_pass2=result.pass2_estimated,
|
||||
combos_pass3=result.pass3_scored,
|
||||
combos_pass4=result.pass4_reviewed,
|
||||
current_pass=current_pass,
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
domain: Domain,
|
||||
dimensions: list[str],
|
||||
score_threshold: float = 0.1,
|
||||
passes: list[int] | None = None,
|
||||
run_id: int | None = None,
|
||||
) -> PipelineResult:
|
||||
if passes is None:
|
||||
passes = [1, 2, 3, 4, 5]
|
||||
|
||||
result = PipelineResult()
|
||||
|
||||
# Mark run as running (unless already cancelled)
|
||||
if run_id is not None:
|
||||
run_record = self.repo.get_pipeline_run(run_id)
|
||||
if run_record and run_record["status"] == "cancelled":
|
||||
result.top_results = self.repo.get_top_results(domain.name, limit=20)
|
||||
return result
|
||||
self.repo.update_pipeline_run(
|
||||
run_id,
|
||||
status="running",
|
||||
started_at=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
|
||||
# Generate all combinations
|
||||
combos = generate_combinations(self.repo, dimensions)
|
||||
result.total_generated = len(combos)
|
||||
|
||||
# Save all combinations to DB
|
||||
# Save all combinations to DB (also loads status for existing combos)
|
||||
for combo in combos:
|
||||
self.repo.save_combination(combo)
|
||||
|
||||
# ── Pass 1: Constraint Resolution ───────────────────────
|
||||
valid_combos: list[Combination] = []
|
||||
if 1 in passes:
|
||||
valid_combos = self._pass1_constraints(combos, result)
|
||||
else:
|
||||
valid_combos = combos
|
||||
if run_id is not None:
|
||||
self.repo.update_pipeline_run(run_id, total_combos=len(combos))
|
||||
|
||||
# ── Pass 2: Physics Estimation ──────────────────────────
|
||||
estimated: list[tuple[Combination, dict[str, float]]] = []
|
||||
if 2 in passes:
|
||||
estimated = self._pass2_estimation(valid_combos, domain, result)
|
||||
else:
|
||||
# Skip estimation, use zeros
|
||||
estimated = [(c, {}) for c in valid_combos]
|
||||
# Prepare metric lookup
|
||||
metric_names = [mb.metric_name for mb in domain.metric_bounds]
|
||||
bounds_by_name = {mb.metric_name: mb for mb in domain.metric_bounds}
|
||||
|
||||
# ── Pass 3: Scoring & Ranking ───────────────────────────
|
||||
scored: list[tuple[Combination, ScoredResult]] = []
|
||||
if 3 in passes:
|
||||
scored = self._pass3_scoring(estimated, domain, score_threshold, result)
|
||||
# ── Combo-first loop ─────────────────────────────────────
|
||||
try:
|
||||
for combo in combos:
|
||||
self._check_cancelled(run_id)
|
||||
|
||||
# ── Pass 4: LLM Review ──────────────────────────────────
|
||||
if 4 in passes and self.llm:
|
||||
self._pass4_llm_review(scored, domain, result)
|
||||
# Check existing progress for this combo in this domain
|
||||
existing_pass = self.repo.get_combo_pass_reached(
|
||||
combo.id, domain.id
|
||||
) or 0
|
||||
|
||||
# ── Save results after scoring ─────────────────────────
|
||||
if 3 in passes:
|
||||
max_pass = max(p for p in passes if p <= 5)
|
||||
for combo, sr in scored:
|
||||
self.repo.save_result(
|
||||
combo.id, domain.id, sr.composite_score,
|
||||
pass_reached=max_pass,
|
||||
novelty_flag=sr.novelty_flag,
|
||||
llm_review=sr.llm_review,
|
||||
# Load existing result to preserve human review data
|
||||
existing_result = self.repo.get_existing_result(
|
||||
combo.id, domain.id
|
||||
)
|
||||
self.repo.update_combination_status(combo.id, "scored")
|
||||
|
||||
# Collect top results
|
||||
# ── Pass 1: Constraint Resolution ────────────────
|
||||
if 1 in passes and existing_pass < 1:
|
||||
cr: ConstraintResult = self.resolver.resolve(combo)
|
||||
if cr.status == "blocked":
|
||||
combo.status = "blocked"
|
||||
combo.block_reason = "; ".join(cr.violations)
|
||||
self.repo.update_combination_status(
|
||||
combo.id, "blocked", combo.block_reason
|
||||
)
|
||||
# Save a result row so blocked combos appear in results
|
||||
self.repo.save_result(
|
||||
combo.id,
|
||||
domain.id,
|
||||
composite_score=0.0,
|
||||
pass_reached=1,
|
||||
)
|
||||
result.pass1_blocked += 1
|
||||
self._update_run_counters(run_id, result, current_pass=1)
|
||||
continue # blocked — skip remaining passes
|
||||
elif cr.status == "conditional":
|
||||
combo.status = "valid"
|
||||
self.repo.update_combination_status(combo.id, "valid")
|
||||
result.pass1_conditional += 1
|
||||
else:
|
||||
combo.status = "valid"
|
||||
self.repo.update_combination_status(combo.id, "valid")
|
||||
result.pass1_valid += 1
|
||||
|
||||
self._update_run_counters(run_id, result, current_pass=1)
|
||||
elif 1 in passes:
|
||||
# Already pass1'd — check if it was blocked
|
||||
if combo.status == "blocked":
|
||||
result.pass1_blocked += 1
|
||||
continue
|
||||
else:
|
||||
result.pass1_valid += 1
|
||||
else:
|
||||
# Pass 1 not requested; check if blocked from a prior run
|
||||
if combo.status == "blocked":
|
||||
result.pass1_blocked += 1
|
||||
continue
|
||||
|
||||
# ── Pass 2: Physics Estimation ───────────────────
|
||||
raw_metrics: dict[str, float] = {}
|
||||
if 2 in passes and existing_pass < 2:
|
||||
description = _describe_combination(combo)
|
||||
if self.llm:
|
||||
raw_metrics = self.llm.estimate_physics(
|
||||
description, metric_names
|
||||
)
|
||||
else:
|
||||
raw_metrics = self._stub_estimate(combo, metric_names)
|
||||
|
||||
# Save raw estimates immediately (crash-safe)
|
||||
estimate_dicts = []
|
||||
for mname, rval in raw_metrics.items():
|
||||
mb = bounds_by_name.get(mname)
|
||||
if mb and mb.metric_id:
|
||||
estimate_dicts.append({
|
||||
"metric_id": mb.metric_id,
|
||||
"raw_value": rval,
|
||||
"estimation_method": "llm" if self.llm else "stub",
|
||||
"confidence": 1.0,
|
||||
})
|
||||
if estimate_dicts:
|
||||
self.repo.save_raw_estimates(
|
||||
combo.id, domain.id, estimate_dicts
|
||||
)
|
||||
|
||||
result.pass2_estimated += 1
|
||||
self._update_run_counters(run_id, result, current_pass=2)
|
||||
elif 2 in passes:
|
||||
# Already estimated — reload raw values from DB
|
||||
existing_scores = self.repo.get_combination_scores(
|
||||
combo.id, domain.id
|
||||
)
|
||||
raw_metrics = {
|
||||
s["metric_name"]: s["raw_value"] for s in existing_scores
|
||||
}
|
||||
result.pass2_estimated += 1
|
||||
else:
|
||||
# Pass 2 not requested, use empty metrics
|
||||
raw_metrics = {}
|
||||
|
||||
# ── Pass 3: Scoring & Ranking ────────────────────
|
||||
if 3 in passes and existing_pass < 3:
|
||||
sr = self.scorer.score_combination(combo, raw_metrics)
|
||||
|
||||
# Persist per-metric scores with normalized values
|
||||
score_dicts = []
|
||||
for s in sr.scores:
|
||||
mb = bounds_by_name.get(s.metric_name)
|
||||
if mb and mb.metric_id:
|
||||
score_dicts.append({
|
||||
"metric_id": mb.metric_id,
|
||||
"raw_value": s.raw_value,
|
||||
"normalized_score": s.normalized_score,
|
||||
"estimation_method": s.estimation_method,
|
||||
"confidence": s.confidence,
|
||||
})
|
||||
if score_dicts:
|
||||
self.repo.save_scores(combo.id, domain.id, score_dicts)
|
||||
|
||||
# Preserve existing human data
|
||||
novelty_flag = (
|
||||
existing_result["novelty_flag"] if existing_result else None
|
||||
)
|
||||
human_notes = (
|
||||
existing_result["human_notes"] if existing_result else None
|
||||
)
|
||||
|
||||
self.repo.save_result(
|
||||
combo.id,
|
||||
domain.id,
|
||||
sr.composite_score,
|
||||
pass_reached=3,
|
||||
novelty_flag=novelty_flag,
|
||||
human_notes=human_notes,
|
||||
)
|
||||
self.repo.update_combination_status(combo.id, "scored")
|
||||
|
||||
result.pass3_scored += 1
|
||||
if sr.composite_score >= score_threshold:
|
||||
result.pass3_above_threshold += 1
|
||||
|
||||
self._update_run_counters(run_id, result, current_pass=3)
|
||||
elif 3 in passes and existing_pass >= 3:
|
||||
# Already scored — count it
|
||||
result.pass3_scored += 1
|
||||
if existing_result and existing_result["composite_score"] is not None:
|
||||
if existing_result["composite_score"] >= score_threshold:
|
||||
result.pass3_above_threshold += 1
|
||||
|
||||
# ── Pass 4: LLM Review ───────────────────────────
|
||||
if 4 in passes and self.llm:
|
||||
cur_pass = self.repo.get_combo_pass_reached(
|
||||
combo.id, domain.id
|
||||
) or 0
|
||||
if cur_pass < 4:
|
||||
cur_result = self.repo.get_existing_result(
|
||||
combo.id, domain.id
|
||||
)
|
||||
if (
|
||||
cur_result
|
||||
and cur_result["composite_score"] is not None
|
||||
and cur_result["composite_score"] >= score_threshold
|
||||
):
|
||||
description = _describe_combination(combo)
|
||||
db_scores = self.repo.get_combination_scores(
|
||||
combo.id, domain.id
|
||||
)
|
||||
score_dict = {
|
||||
s["metric_name"]: s["normalized_score"]
|
||||
for s in db_scores
|
||||
if s["normalized_score"] is not None
|
||||
}
|
||||
review = self.llm.review_plausibility(
|
||||
description, score_dict
|
||||
)
|
||||
|
||||
self.repo.save_result(
|
||||
combo.id,
|
||||
domain.id,
|
||||
cur_result["composite_score"],
|
||||
pass_reached=4,
|
||||
novelty_flag=cur_result.get("novelty_flag"),
|
||||
llm_review=review,
|
||||
human_notes=cur_result.get("human_notes"),
|
||||
)
|
||||
result.pass4_reviewed += 1
|
||||
self._update_run_counters(
|
||||
run_id, result, current_pass=4
|
||||
)
|
||||
|
||||
except CancelledError:
|
||||
if run_id is not None:
|
||||
self.repo.update_pipeline_run(
|
||||
run_id,
|
||||
status="cancelled",
|
||||
completed_at=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
result.top_results = self.repo.get_top_results(domain.name, limit=20)
|
||||
return result
|
||||
|
||||
# Mark run as completed
|
||||
if run_id is not None:
|
||||
self.repo.update_pipeline_run(
|
||||
run_id,
|
||||
status="completed",
|
||||
completed_at=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
|
||||
result.top_results = self.repo.get_top_results(domain.name, limit=20)
|
||||
return result
|
||||
|
||||
def _pass1_constraints(
|
||||
self, combos: list[Combination], result: PipelineResult
|
||||
) -> list[Combination]:
|
||||
valid = []
|
||||
for combo in combos:
|
||||
cr: ConstraintResult = self.resolver.resolve(combo)
|
||||
if cr.status == "blocked":
|
||||
combo.status = "blocked"
|
||||
combo.block_reason = "; ".join(cr.violations)
|
||||
self.repo.update_combination_status(
|
||||
combo.id, "blocked", combo.block_reason
|
||||
)
|
||||
result.pass1_blocked += 1
|
||||
elif cr.status == "conditional":
|
||||
combo.status = "valid"
|
||||
self.repo.update_combination_status(combo.id, "valid")
|
||||
valid.append(combo)
|
||||
result.pass1_conditional += 1
|
||||
else:
|
||||
combo.status = "valid"
|
||||
self.repo.update_combination_status(combo.id, "valid")
|
||||
valid.append(combo)
|
||||
result.pass1_valid += 1
|
||||
return valid
|
||||
|
||||
def _pass2_estimation(
|
||||
self,
|
||||
combos: list[Combination],
|
||||
domain: Domain,
|
||||
result: PipelineResult,
|
||||
) -> list[tuple[Combination, dict[str, float]]]:
|
||||
metric_names = [mb.metric_name for mb in domain.metric_bounds]
|
||||
estimated = []
|
||||
|
||||
for combo in combos:
|
||||
description = _describe_combination(combo)
|
||||
if self.llm:
|
||||
raw_metrics = self.llm.estimate_physics(description, metric_names)
|
||||
else:
|
||||
# Stub estimation: derive from dependencies where possible
|
||||
raw_metrics = self._stub_estimate(combo, metric_names)
|
||||
estimated.append((combo, raw_metrics))
|
||||
result.pass2_estimated += 1
|
||||
|
||||
return estimated
|
||||
|
||||
def _pass3_scoring(
|
||||
self,
|
||||
estimated: list[tuple[Combination, dict[str, float]]],
|
||||
domain: Domain,
|
||||
threshold: float,
|
||||
result: PipelineResult,
|
||||
) -> list[tuple[Combination, ScoredResult]]:
|
||||
scored = []
|
||||
for combo, raw_metrics in estimated:
|
||||
sr = self.scorer.score_combination(combo, raw_metrics)
|
||||
if sr.composite_score >= threshold:
|
||||
scored.append((combo, sr))
|
||||
result.pass3_above_threshold += 1
|
||||
# Persist per-metric scores
|
||||
score_dicts = []
|
||||
bounds_by_name = {mb.metric_name: mb for mb in domain.metric_bounds}
|
||||
for s in sr.scores:
|
||||
mb = bounds_by_name.get(s.metric_name)
|
||||
if mb and mb.metric_id:
|
||||
score_dicts.append({
|
||||
"metric_id": mb.metric_id,
|
||||
"raw_value": s.raw_value,
|
||||
"normalized_score": s.normalized_score,
|
||||
"estimation_method": s.estimation_method,
|
||||
"confidence": s.confidence,
|
||||
})
|
||||
if score_dicts:
|
||||
self.repo.save_scores(combo.id, domain.id, score_dicts)
|
||||
|
||||
# Sort by composite score descending
|
||||
scored.sort(key=lambda x: x[1].composite_score, reverse=True)
|
||||
return scored
|
||||
|
||||
def _pass4_llm_review(
|
||||
self,
|
||||
scored: list[tuple[Combination, ScoredResult]],
|
||||
domain: Domain,
|
||||
result: PipelineResult,
|
||||
) -> None:
|
||||
for combo, sr in scored:
|
||||
description = _describe_combination(combo)
|
||||
score_dict = {s.metric_name: s.normalized_score for s in sr.scores}
|
||||
review = self.llm.review_plausibility(description, score_dict)
|
||||
sr.llm_review = review
|
||||
result.pass4_reviewed += 1
|
||||
|
||||
def _stub_estimate(
|
||||
self, combo: Combination, metric_names: list[str]
|
||||
) -> dict[str, float]:
|
||||
@@ -223,24 +351,21 @@ class Pipeline:
|
||||
|
||||
# Rough speed estimate: F=ma -> v proportional to power/mass
|
||||
if "speed" in raw and mass_kg > 0:
|
||||
# Very rough: speed ~ power / (mass * drag_coeff)
|
||||
raw["speed"] = min(force_watts / mass_kg * 0.5, 300000)
|
||||
|
||||
if "cost_efficiency" in raw:
|
||||
# Lower force = cheaper per km (roughly)
|
||||
raw["cost_efficiency"] = max(0.01, 2.0 - force_watts / 100000)
|
||||
|
||||
if "safety" in raw:
|
||||
raw["safety"] = 0.5 # default mid-range
|
||||
raw["safety"] = 0.5
|
||||
|
||||
if "availability" in raw:
|
||||
raw["availability"] = 0.5
|
||||
|
||||
if "range_fuel" in raw:
|
||||
# More power = more range (very rough)
|
||||
raw["range_fuel"] = min(force_watts * 0.01, 1e10)
|
||||
|
||||
if "range_degradation" in raw:
|
||||
raw["range_degradation"] = 365 # 1 year default
|
||||
raw["range_degradation"] = 365
|
||||
|
||||
return raw
|
||||
|
||||
Reference in New Issue
Block a user