первый коммит

This commit is contained in:
2026-02-27 21:28:09 +03:00
parent 43c404f958
commit 1bc57a7c25
171 changed files with 6400 additions and 556 deletions

View File

@@ -0,0 +1,61 @@
from __future__ import annotations
from app.modules.agent.engine.orchestrator.actions.edit_actions import EditActions
from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext
from app.modules.agent.engine.orchestrator.models import (
ExecutionPlan,
OutputContract,
RoutingMeta,
Scenario,
TaskConstraints,
TaskSpec,
)
def _ctx() -> ExecutionContext:
task = TaskSpec(
task_id="task-1",
dialog_session_id="dialog-1",
rag_session_id="rag-1",
mode="auto",
user_message="Добавь в readme.md в конце строку про автора",
scenario=Scenario.TARGETED_EDIT,
routing=RoutingMeta(domain_id="project", process_id="edits", confidence=0.95, reason="test"),
constraints=TaskConstraints(allow_writes=True),
output_contract=OutputContract(result_type="changeset"),
metadata={
"files_map": {
"README.md": {
"path": "README.md",
"content": "# Title\n",
"content_hash": "hash123",
}
}
},
)
plan = ExecutionPlan(
plan_id="plan-1",
task_id="task-1",
scenario=Scenario.TARGETED_EDIT,
template_id="targeted_edit_v1",
template_version="1.0",
steps=[],
)
return ExecutionContext(task=task, plan=plan, graph_resolver=lambda *_: None, graph_invoker=lambda *_: {})
def test_edit_actions_resolve_path_case_insensitive_and_keep_update() -> None:
actions = EditActions()
ctx = _ctx()
actions.resolve_target(ctx)
actions.load_target_context(ctx)
actions.plan_minimal_patch(ctx)
actions.generate_patch(ctx)
target = ctx.artifacts.get_content("target_context", {})
changeset = ctx.artifacts.get_content("raw_changeset", [])
assert target["path"] == "README.md"
assert changeset[0]["path"] == "README.md"
assert changeset[0]["op"] == "update"

View File

@@ -0,0 +1,56 @@
import asyncio
import pytest
from app.modules.agent.engine.orchestrator.models import OutputContract, RoutingMeta, Scenario, TaskConstraints, TaskSpec
from app.modules.agent.engine.orchestrator.service import OrchestratorService
@pytest.mark.parametrize(
"scenario,expect_changeset",
[
(Scenario.EXPLAIN_PART, False),
(Scenario.ANALYTICS_REVIEW, False),
(Scenario.DOCS_FROM_ANALYTICS, True),
(Scenario.TARGETED_EDIT, True),
(Scenario.GHERKIN_MODEL, True),
],
)
def test_eval_suite_scenarios_run(scenario: Scenario, expect_changeset: bool) -> None:
service = OrchestratorService()
task = TaskSpec(
task_id=f"task-{scenario.value}",
dialog_session_id="dialog-1",
rag_session_id="rag-1",
mode="auto",
user_message="Please process this scenario using project docs and requirements.",
scenario=scenario,
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.95, reason="eval"),
constraints=TaskConstraints(
allow_writes=scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL},
max_steps=20,
max_retries_per_step=2,
step_timeout_sec=90,
),
output_contract=OutputContract(result_type="answer"),
attachments=[{"type": "http_url", "value": "https://example.com/doc"}],
metadata={
"rag_context": "Requirements context is available.",
"confluence_context": "",
"files_map": {"docs/api/increment.md": {"content": "old", "content_hash": "h1"}},
},
)
result = asyncio.run(
service.run(
task=task,
graph_resolver=lambda _domain, _process: object(),
graph_invoker=lambda _graph, _state, _dialog: {"answer": "fallback", "changeset": []},
)
)
assert result.meta["plan"]["status"] in {"completed", "partial"}
assert bool(result.changeset) is expect_changeset
if not expect_changeset:
assert result.answer

View File

@@ -0,0 +1,72 @@
import asyncio
from app.modules.agent.engine.orchestrator.models import (
OutputContract,
RoutingMeta,
Scenario,
TaskConstraints,
TaskSpec,
)
from app.modules.agent.engine.orchestrator.service import OrchestratorService
class DummyGraph:
pass
def _task(scenario: Scenario) -> TaskSpec:
allow_writes = scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL}
return TaskSpec(
task_id="task-1",
dialog_session_id="dialog-1",
rag_session_id="rag-1",
mode="auto",
user_message="Explain this module",
scenario=scenario,
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.95, reason="unit-test"),
constraints=TaskConstraints(allow_writes=allow_writes, max_steps=16, max_retries_per_step=2, step_timeout_sec=90),
output_contract=OutputContract(result_type="answer"),
metadata={
"rag_context": "RAG",
"confluence_context": "",
"files_map": {},
},
)
def test_orchestrator_service_returns_answer() -> None:
service = OrchestratorService()
def graph_resolver(domain_id: str, process_id: str):
assert domain_id == "project"
assert process_id == "qa"
return DummyGraph()
def graph_invoker(_graph, state: dict, dialog_session_id: str):
assert state["message"] == "Explain this module"
assert dialog_session_id == "dialog-1"
return {"answer": "It works.", "changeset": []}
result = asyncio.run(service.run(task=_task(Scenario.GENERAL_QA), graph_resolver=graph_resolver, graph_invoker=graph_invoker))
assert result.answer == "It works."
assert result.meta["plan"]["status"] == "completed"
def test_orchestrator_service_generates_changeset_for_docs_scenario() -> None:
service = OrchestratorService()
def graph_resolver(_domain_id: str, _process_id: str):
return DummyGraph()
def graph_invoker(_graph, _state: dict, _dialog_session_id: str):
return {"answer": "unused", "changeset": []}
result = asyncio.run(
service.run(
task=_task(Scenario.DOCS_FROM_ANALYTICS),
graph_resolver=graph_resolver,
graph_invoker=graph_invoker,
)
)
assert result.meta["plan"]["status"] == "completed"
assert len(result.changeset) > 0

View File

@@ -0,0 +1,49 @@
from app.modules.agent.engine.orchestrator.models import (
ExecutionPlan,
OutputContract,
PlanStep,
RetryPolicy,
RoutingMeta,
Scenario,
TaskConstraints,
TaskSpec,
)
from app.modules.agent.engine.orchestrator.plan_validator import PlanValidator
def _task(*, allow_writes: bool) -> TaskSpec:
return TaskSpec(
task_id="t1",
dialog_session_id="d1",
rag_session_id="r1",
mode="auto",
user_message="hello",
scenario=Scenario.GENERAL_QA,
routing=RoutingMeta(domain_id="default", process_id="general", confidence=0.9, reason="test"),
constraints=TaskConstraints(allow_writes=allow_writes, max_steps=10, max_retries_per_step=2, step_timeout_sec=60),
output_contract=OutputContract(result_type="answer"),
)
def test_plan_validator_rejects_write_step_when_not_allowed() -> None:
plan = ExecutionPlan(
plan_id="p1",
task_id="t1",
scenario=Scenario.GENERAL_QA,
template_id="tmp",
template_version="1.0",
steps=[
PlanStep(
step_id="s1",
title="write",
action_id="collect_state",
executor="function",
side_effect="write",
retry=RetryPolicy(max_attempts=1),
)
],
)
errors = PlanValidator().validate(plan, _task(allow_writes=False))
assert "write_step_not_allowed:s1" in errors

View File

@@ -0,0 +1,42 @@
import asyncio
from app.modules.agent.engine.orchestrator.models import OutputContract, OutputSection, RoutingMeta, Scenario, TaskConstraints, TaskSpec
from app.modules.agent.engine.orchestrator.service import OrchestratorService
def test_quality_metrics_present_and_scored() -> None:
service = OrchestratorService()
task = TaskSpec(
task_id="quality-1",
dialog_session_id="dialog-1",
rag_session_id="rag-1",
mode="auto",
user_message="Explain architecture",
scenario=Scenario.EXPLAIN_PART,
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"),
constraints=TaskConstraints(allow_writes=False),
output_contract=OutputContract(
result_type="answer",
sections=[
OutputSection(name="sequence_diagram", format="mermaid"),
OutputSection(name="use_cases", format="markdown"),
OutputSection(name="summary", format="markdown"),
],
),
metadata={"rag_context": "A\nB", "confluence_context": "", "files_map": {}},
)
result = asyncio.run(
service.run(
task=task,
graph_resolver=lambda _d, _p: object(),
graph_invoker=lambda _g, _s, _id: {"answer": "unused", "changeset": []},
)
)
quality = result.meta.get("quality", {})
assert quality
assert quality.get("faithfulness", {}).get("score") is not None
assert quality.get("coverage", {}).get("score") is not None
assert quality.get("status") in {"ok", "needs_review", "fail"}
assert quality.get("coverage", {}).get("covered_count", 0) >= 1

View File

@@ -0,0 +1,50 @@
from app.modules.agent.engine.orchestrator.models import (
ArtifactType,
OutputContract,
OutputSection,
RoutingMeta,
Scenario,
TaskConstraints,
TaskSpec,
)
from app.modules.agent.engine.orchestrator.quality_metrics import QualityMetricsCalculator
from app.modules.agent.engine.orchestrator.template_registry import ScenarioTemplateRegistry
from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext
from app.modules.agent.engine.orchestrator.models import PlanStatus
def test_quality_metrics_coverage_reflects_missing_required_sections() -> None:
task = TaskSpec(
task_id="quality-2",
dialog_session_id="dialog-1",
rag_session_id="rag-1",
mode="auto",
user_message="Explain architecture",
scenario=Scenario.EXPLAIN_PART,
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"),
constraints=TaskConstraints(allow_writes=False),
output_contract=OutputContract(
result_type="answer",
sections=[
OutputSection(name="sequence_diagram", format="mermaid"),
OutputSection(name="use_cases", format="markdown"),
OutputSection(name="summary", format="markdown"),
],
),
metadata={"rag_context": "A", "confluence_context": "", "files_map": {}},
)
plan = ScenarioTemplateRegistry().build(task)
plan.status = PlanStatus.COMPLETED
ctx = ExecutionContext(
task=task,
plan=plan,
graph_resolver=lambda _d, _p: object(),
graph_invoker=lambda _g, _s, _id: {},
)
ctx.artifacts.put(key="final_answer", artifact_type=ArtifactType.TEXT, content="Only summary text")
metrics = QualityMetricsCalculator().build(ctx, step_results=[])
assert metrics["coverage"]["score"] < 1.0
assert "sequence_diagram" in metrics["coverage"]["missing_items"]

View File

@@ -0,0 +1,38 @@
from app.modules.agent.engine.orchestrator.models import OutputContract, RoutingMeta, Scenario, TaskConstraints, TaskSpec
from app.modules.agent.engine.orchestrator.template_registry import ScenarioTemplateRegistry
def _task(scenario: Scenario) -> TaskSpec:
return TaskSpec(
task_id="t1",
dialog_session_id="d1",
rag_session_id="r1",
mode="auto",
user_message="run scenario",
scenario=scenario,
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"),
constraints=TaskConstraints(
allow_writes=scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL}
),
output_contract=OutputContract(result_type="answer"),
metadata={"rag_context": "ctx", "confluence_context": "", "files_map": {}},
)
def test_template_registry_has_multi_step_review_docs_edit_gherkin() -> None:
registry = ScenarioTemplateRegistry()
review_steps = [step.step_id for step in registry.build(_task(Scenario.ANALYTICS_REVIEW)).steps]
docs_steps = [step.step_id for step in registry.build(_task(Scenario.DOCS_FROM_ANALYTICS)).steps]
edit_steps = [step.step_id for step in registry.build(_task(Scenario.TARGETED_EDIT)).steps]
gherkin_steps = [step.step_id for step in registry.build(_task(Scenario.GHERKIN_MODEL)).steps]
assert "structural_check" in review_steps and "compose_review_report" in review_steps
assert "extract_change_intents" in docs_steps and "build_changeset" in docs_steps
assert "resolve_target" in edit_steps and "finalize_changeset" in edit_steps
assert "generate_gherkin_bundle" in gherkin_steps and "validate_coverage" in gherkin_steps
assert len(review_steps) >= 7
assert len(docs_steps) >= 9
assert len(edit_steps) >= 7
assert len(gherkin_steps) >= 8

View File

@@ -0,0 +1,98 @@
from __future__ import annotations
from app.modules.rag_repo.webhook_service import RepoWebhookService
class FakeStoryWriter:
def __init__(self) -> None:
self.calls: list[dict] = []
def record_story_commit(self, **kwargs) -> None:
self.calls.append(kwargs)
class FakeCacheWriter:
def __init__(self) -> None:
self.calls: list[dict] = []
def record_repo_cache(self, **kwargs) -> None:
self.calls.append(kwargs)
def test_gitea_webhook_binds_story() -> None:
writer = FakeStoryWriter()
cache = FakeCacheWriter()
service = RepoWebhookService(writer, cache)
result = service.process(
provider="gitea",
payload={
"repository": {"full_name": "acme/proj"},
"ref": "refs/heads/feature/AAAA-1234",
"pusher": {"username": "alice"},
"commits": [
{
"id": "abc123",
"message": "FEAT-1 update docs",
"added": ["docs/new.md"],
"modified": ["docs/api.md"],
"removed": [],
}
],
},
)
assert result["accepted"] is True
assert result["story_bound"] is True
assert result["story_id"] == "FEAT-1"
assert result["cache_recorded"] is True
assert len(writer.calls) == 1
assert len(cache.calls) == 1
assert writer.calls[0]["project_id"] == "acme/proj"
def test_webhook_without_story_id_is_non_fatal() -> None:
writer = FakeStoryWriter()
cache = FakeCacheWriter()
service = RepoWebhookService(writer, cache)
result = service.process(
provider="bitbucket",
payload={
"repository": {"full_name": "acme/proj"},
"push": {
"changes": [
{
"new": {
"name": "feature/no-story",
"target": {"hash": "abc123", "message": "update docs"},
}
}
]
},
},
)
assert result["accepted"] is True
assert result["story_bound"] is False
assert result["cache_recorded"] is True
assert len(cache.calls) == 1
assert writer.calls == []
def test_provider_autodetect_by_headers() -> None:
writer = FakeStoryWriter()
service = RepoWebhookService(writer)
result = service.process(
headers={"X-Gitea-Event": "push"},
payload={
"repository": {"full_name": "acme/proj"},
"ref": "refs/heads/feature/AAAA-1234",
"commits": [{"id": "abc123", "message": "AAAA-1234 update"}],
},
)
assert result["accepted"] is True
assert result["story_bound"] is True
assert result["story_id"] == "AAAA-1234"

View File

@@ -0,0 +1,48 @@
from __future__ import annotations
from app.modules.agent.story_session_recorder import StorySessionRecorder
from app.schemas.changeset import ChangeItem, ChangeOp
class FakeStoryRepo:
def __init__(self) -> None:
self.calls: list[dict] = []
def add_session_artifact(self, **kwargs) -> None:
self.calls.append(kwargs)
def test_record_run_stores_attachment_and_changeset_artifacts() -> None:
repo = FakeStoryRepo()
recorder = StorySessionRecorder(repo)
recorder.record_run(
dialog_session_id="dialog-1",
rag_session_id="rag-1",
scenario="docs_from_analytics",
attachments=[
{"type": "confluence_url", "value": "https://example.org/doc"},
{"type": "file_ref", "value": "local.md"},
],
answer="Generated docs update summary",
changeset=[
ChangeItem(
op=ChangeOp.UPDATE,
path="docs/api.md",
base_hash="abc",
proposed_content="new",
reason="sync endpoint section",
)
],
)
assert len(repo.calls) == 3
assert repo.calls[0]["artifact_role"] == "analysis"
assert repo.calls[0]["source_ref"] == "https://example.org/doc"
assert repo.calls[1]["artifact_role"] == "doc_change"
assert repo.calls[1]["summary"] == "Generated docs update summary"
assert repo.calls[2]["artifact_role"] == "doc_change"
assert repo.calls[2]["path"] == "docs/api.md"
assert repo.calls[2]["change_type"] == "updated"

8
tests/conftest.py Normal file
View File

@@ -0,0 +1,8 @@
from __future__ import annotations
import sys
from pathlib import Path
ROOT = Path(__file__).resolve().parents[1]
if str(ROOT) not in sys.path:
sys.path.insert(0, str(ROOT))