Add Flask web UI, Docker Compose, core engine + tests

- physcom core: CLI, 5-pass pipeline, SQLite repo, 37 tests
- physcom_web: Flask app with HTMX for entity/domain/pipeline/results CRUD
- Docker Compose: web + cli services sharing a named volume for the DB
- Clean up local settings to use wildcard permissions

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Simonson, Andrew
2026-02-18 13:59:53 -06:00
parent 6e0f82835a
commit 8118a62242
54 changed files with 3505 additions and 1 deletions

View File

25
src/physcom/llm/base.py Normal file
View File

@@ -0,0 +1,25 @@
"""Abstract LLM interface — provider-agnostic."""
from __future__ import annotations
from abc import ABC, abstractmethod
class LLMProvider(ABC):
"""Abstract LLM interface for physics estimation and plausibility review."""
@abstractmethod
def estimate_physics(
self, combination_description: str, metrics: list[str]
) -> dict[str, float]:
"""Given a natural-language description of a combination,
estimate raw metric values. Returns {metric_name: estimated_value}."""
...
@abstractmethod
def review_plausibility(
self, combination_description: str, scores: dict[str, float]
) -> str:
"""Given a combination and its scores, return a natural-language
plausibility and novelty assessment."""
...

View File

@@ -0,0 +1,37 @@
"""Prompt templates for LLM-assisted passes."""
PHYSICS_ESTIMATION_PROMPT = """\
You are a physics estimation assistant. Given the following transportation concept, \
estimate the requested metrics using order-of-magnitude physics reasoning.
## Concept
{description}
## Metrics to estimate
{metrics}
## Instructions
- Use real-world physics to estimate each metric.
- If the concept is implausible, still provide your best estimate.
- Return ONLY valid JSON mapping metric names to numeric values.
- Example: {{"speed": 45.0, "cost_efficiency": 0.15, "safety": 0.7}}
"""
PLAUSIBILITY_REVIEW_PROMPT = """\
You are reviewing a novel transportation concept for social and practical viability.
## Concept
{description}
## Metric Scores
{scores}
## Instructions
Review this concept for:
1. Social viability — would people actually use this?
2. Practical barriers — what engineering or regulatory obstacles exist?
3. Novelty — does anything similar already exist?
4. Overall plausibility — is this a genuinely interesting innovation or nonsense?
Provide a concise 2-4 sentence assessment.
"""

View File

View File

@@ -0,0 +1,28 @@
"""Mock LLM provider for testing."""
from __future__ import annotations
from physcom.llm.base import LLMProvider
class MockLLMProvider(LLMProvider):
"""Returns deterministic stub responses for testing."""
def __init__(self, default_estimates: dict[str, float] | None = None) -> None:
self._defaults = default_estimates or {}
def estimate_physics(
self, combination_description: str, metrics: list[str]
) -> dict[str, float]:
result = {}
for metric in metrics:
result[metric] = self._defaults.get(metric, 0.5)
return result
def review_plausibility(
self, combination_description: str, scores: dict[str, float]
) -> str:
avg = sum(scores.values()) / max(len(scores), 1)
if avg > 0.5:
return "This concept appears plausible and worth further investigation."
return "This concept has significant feasibility challenges."