Handle LLM rate limits gracefully — stop and resume rather than fail
- Add LLMRateLimitError to llm/base.py (provider-agnostic) - GeminiLLMProvider raises it on 429/RESOURCE_EXHAUSTED responses - Pipeline catches it, marks the run completed (not failed), and returns partial results — already-reviewed combos are saved, and re-running pass 4 resumes from where it left off Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -9,7 +9,7 @@ from physcom.db.repository import Repository
|
||||
from physcom.engine.combinator import generate_combinations
|
||||
from physcom.engine.constraint_resolver import ConstraintResolver, ConstraintResult
|
||||
from physcom.engine.scorer import Scorer
|
||||
from physcom.llm.base import LLMProvider
|
||||
from physcom.llm.base import LLMProvider, LLMRateLimitError
|
||||
from physcom.models.combination import Combination, ScoredResult
|
||||
from physcom.models.domain import Domain
|
||||
|
||||
@@ -184,12 +184,9 @@ class Pipeline:
|
||||
if 2 in passes and existing_pass < 2:
|
||||
description = _describe_combination(combo)
|
||||
if self.llm:
|
||||
try:
|
||||
raw_metrics = self.llm.estimate_physics(
|
||||
description, metric_names
|
||||
)
|
||||
except Exception:
|
||||
raw_metrics = self._stub_estimate(combo, metric_names)
|
||||
raw_metrics = self.llm.estimate_physics(
|
||||
description, metric_names
|
||||
)
|
||||
else:
|
||||
raw_metrics = self._stub_estimate(combo, metric_names)
|
||||
|
||||
@@ -287,34 +284,31 @@ class Pipeline:
|
||||
and cur_result["composite_score"] is not None
|
||||
and cur_result["composite_score"] >= score_threshold
|
||||
):
|
||||
try:
|
||||
description = _describe_combination(combo)
|
||||
db_scores = self.repo.get_combination_scores(
|
||||
combo.id, domain.id
|
||||
)
|
||||
score_dict = {
|
||||
s["metric_name"]: s["normalized_score"]
|
||||
for s in db_scores
|
||||
if s["normalized_score"] is not None
|
||||
}
|
||||
review = self.llm.review_plausibility(
|
||||
description, score_dict
|
||||
)
|
||||
self.repo.save_result(
|
||||
combo.id,
|
||||
domain.id,
|
||||
cur_result["composite_score"],
|
||||
pass_reached=4,
|
||||
novelty_flag=cur_result.get("novelty_flag"),
|
||||
llm_review=review,
|
||||
human_notes=cur_result.get("human_notes"),
|
||||
)
|
||||
result.pass4_reviewed += 1
|
||||
self._update_run_counters(
|
||||
run_id, result, current_pass=4
|
||||
)
|
||||
except Exception:
|
||||
pass # skip this combo; don't abort the run
|
||||
description = _describe_combination(combo)
|
||||
db_scores = self.repo.get_combination_scores(
|
||||
combo.id, domain.id
|
||||
)
|
||||
score_dict = {
|
||||
s["metric_name"]: s["normalized_score"]
|
||||
for s in db_scores
|
||||
if s["normalized_score"] is not None
|
||||
}
|
||||
review = self.llm.review_plausibility(
|
||||
description, score_dict
|
||||
)
|
||||
self.repo.save_result(
|
||||
combo.id,
|
||||
domain.id,
|
||||
cur_result["composite_score"],
|
||||
pass_reached=4,
|
||||
novelty_flag=cur_result.get("novelty_flag"),
|
||||
llm_review=review,
|
||||
human_notes=cur_result.get("human_notes"),
|
||||
)
|
||||
result.pass4_reviewed += 1
|
||||
self._update_run_counters(
|
||||
run_id, result, current_pass=4
|
||||
)
|
||||
|
||||
except CancelledError:
|
||||
if run_id is not None:
|
||||
@@ -325,6 +319,17 @@ class Pipeline:
|
||||
)
|
||||
result.top_results = self.repo.get_top_results(domain.name, limit=20)
|
||||
return result
|
||||
except LLMRateLimitError:
|
||||
# Rate limit hit — save progress and let the user re-run to continue.
|
||||
# Already-reviewed combos are persisted; resumability skips them next time.
|
||||
if run_id is not None:
|
||||
self.repo.update_pipeline_run(
|
||||
run_id,
|
||||
status="completed",
|
||||
completed_at=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
result.top_results = self.repo.get_top_results(domain.name, limit=20)
|
||||
return result
|
||||
|
||||
# Mark run as completed
|
||||
if run_id is not None:
|
||||
|
||||
Reference in New Issue
Block a user