Удаление легаси
This commit is contained in:
@@ -1,181 +0,0 @@
|
||||
import sys
|
||||
import types
|
||||
|
||||
sqlalchemy = types.ModuleType("sqlalchemy")
|
||||
sqlalchemy.text = lambda value: value
|
||||
sqlalchemy.create_engine = lambda *args, **kwargs: object()
|
||||
sys.modules.setdefault("sqlalchemy", sqlalchemy)
|
||||
|
||||
sqlalchemy_engine = types.ModuleType("sqlalchemy.engine")
|
||||
sqlalchemy_engine.Engine = object
|
||||
sys.modules.setdefault("sqlalchemy.engine", sqlalchemy_engine)
|
||||
|
||||
sqlalchemy_orm = types.ModuleType("sqlalchemy.orm")
|
||||
sqlalchemy_orm.sessionmaker = lambda *args, **kwargs: object()
|
||||
sys.modules.setdefault("sqlalchemy.orm", sqlalchemy_orm)
|
||||
|
||||
sqlalchemy_pool = types.ModuleType("sqlalchemy.pool")
|
||||
sqlalchemy_pool.NullPool = object
|
||||
sys.modules.setdefault("sqlalchemy.pool", sqlalchemy_pool)
|
||||
|
||||
from app.modules.agent.engine.router.router_service import RouterService
|
||||
from app.modules.agent.engine.router.schemas import RouteDecision, RouterContext
|
||||
|
||||
|
||||
class _FakeRegistry:
|
||||
def is_valid(self, domain_id: str, process_id: str) -> bool:
|
||||
return (domain_id, process_id) in {
|
||||
("default", "general"),
|
||||
("project", "qa"),
|
||||
("project", "edits"),
|
||||
("docs", "generation"),
|
||||
}
|
||||
|
||||
def get_factory(self, domain_id: str, process_id: str):
|
||||
return object()
|
||||
|
||||
|
||||
class _FakeClassifier:
|
||||
def __init__(self, decision: RouteDecision | None = None, forced: RouteDecision | None = None) -> None:
|
||||
self._decision = decision or RouteDecision(domain_id="project", process_id="qa", confidence=0.95, reason="new_intent")
|
||||
self._forced = forced
|
||||
self.calls = 0
|
||||
|
||||
def from_mode(self, mode: str) -> RouteDecision | None:
|
||||
return self._forced if mode != "auto" else None
|
||||
|
||||
def classify_new_intent(self, user_message: str, context: RouterContext) -> RouteDecision:
|
||||
self.calls += 1
|
||||
return self._decision
|
||||
|
||||
|
||||
class _FakeContextStore:
|
||||
def __init__(self, context: RouterContext) -> None:
|
||||
self._context = context
|
||||
self.updated: list[dict] = []
|
||||
|
||||
def get(self, conversation_key: str) -> RouterContext:
|
||||
return self._context
|
||||
|
||||
def update(self, conversation_key: str, **kwargs) -> None:
|
||||
self.updated.append({"conversation_key": conversation_key, **kwargs})
|
||||
|
||||
|
||||
class _FakeSwitchDetector:
|
||||
def __init__(self, should_switch: bool) -> None:
|
||||
self._should_switch = should_switch
|
||||
|
||||
def should_switch(self, user_message: str, context: RouterContext) -> bool:
|
||||
return self._should_switch
|
||||
|
||||
|
||||
def test_router_service_classifies_first_message() -> None:
|
||||
service = RouterService(
|
||||
registry=_FakeRegistry(),
|
||||
classifier=_FakeClassifier(),
|
||||
context_store=_FakeContextStore(RouterContext()),
|
||||
switch_detector=_FakeSwitchDetector(False),
|
||||
)
|
||||
|
||||
route = service.resolve("Объясни как работает endpoint", "dialog-1")
|
||||
|
||||
assert route.domain_id == "project"
|
||||
assert route.process_id == "qa"
|
||||
assert route.decision_type == "start"
|
||||
|
||||
|
||||
def test_router_service_keeps_current_intent_for_follow_up() -> None:
|
||||
context = RouterContext(
|
||||
active_intent={"domain_id": "project", "process_id": "qa"},
|
||||
last_routing={"domain_id": "project", "process_id": "qa"},
|
||||
dialog_started=True,
|
||||
turn_index=1,
|
||||
)
|
||||
classifier = _FakeClassifier(
|
||||
decision=RouteDecision(domain_id="docs", process_id="generation", confidence=0.99, reason="should_not_run")
|
||||
)
|
||||
service = RouterService(
|
||||
registry=_FakeRegistry(),
|
||||
classifier=classifier,
|
||||
context_store=_FakeContextStore(context),
|
||||
switch_detector=_FakeSwitchDetector(False),
|
||||
)
|
||||
|
||||
route = service.resolve("Покажи подробнее", "dialog-1")
|
||||
|
||||
assert route.domain_id == "project"
|
||||
assert route.process_id == "qa"
|
||||
assert route.decision_type == "continue"
|
||||
assert classifier.calls == 0
|
||||
|
||||
|
||||
def test_router_service_switches_only_on_explicit_new_intent() -> None:
|
||||
context = RouterContext(
|
||||
active_intent={"domain_id": "project", "process_id": "qa"},
|
||||
last_routing={"domain_id": "project", "process_id": "qa"},
|
||||
dialog_started=True,
|
||||
turn_index=2,
|
||||
)
|
||||
classifier = _FakeClassifier(
|
||||
decision=RouteDecision(domain_id="project", process_id="edits", confidence=0.96, reason="explicit_edit")
|
||||
)
|
||||
service = RouterService(
|
||||
registry=_FakeRegistry(),
|
||||
classifier=classifier,
|
||||
context_store=_FakeContextStore(context),
|
||||
switch_detector=_FakeSwitchDetector(True),
|
||||
)
|
||||
|
||||
route = service.resolve("Теперь измени файл README.md", "dialog-1")
|
||||
|
||||
assert route.domain_id == "project"
|
||||
assert route.process_id == "edits"
|
||||
assert route.decision_type == "switch"
|
||||
assert route.explicit_switch is True
|
||||
assert classifier.calls == 1
|
||||
|
||||
|
||||
def test_router_service_keeps_current_when_explicit_switch_is_unresolved() -> None:
|
||||
context = RouterContext(
|
||||
active_intent={"domain_id": "project", "process_id": "qa"},
|
||||
last_routing={"domain_id": "project", "process_id": "qa"},
|
||||
dialog_started=True,
|
||||
turn_index=2,
|
||||
)
|
||||
classifier = _FakeClassifier(
|
||||
decision=RouteDecision(domain_id="docs", process_id="generation", confidence=0.2, reason="low_confidence")
|
||||
)
|
||||
service = RouterService(
|
||||
registry=_FakeRegistry(),
|
||||
classifier=classifier,
|
||||
context_store=_FakeContextStore(context),
|
||||
switch_detector=_FakeSwitchDetector(True),
|
||||
)
|
||||
|
||||
route = service.resolve("Теперь сделай что-то другое", "dialog-1")
|
||||
|
||||
assert route.domain_id == "project"
|
||||
assert route.process_id == "qa"
|
||||
assert route.decision_type == "continue"
|
||||
assert route.reason == "explicit_switch_unresolved_keep_current"
|
||||
|
||||
|
||||
def test_router_service_persists_decision_type() -> None:
|
||||
store = _FakeContextStore(RouterContext())
|
||||
service = RouterService(
|
||||
registry=_FakeRegistry(),
|
||||
classifier=_FakeClassifier(),
|
||||
context_store=store,
|
||||
switch_detector=_FakeSwitchDetector(False),
|
||||
)
|
||||
|
||||
service.persist_context(
|
||||
"dialog-1",
|
||||
domain_id="project",
|
||||
process_id="qa",
|
||||
user_message="Объясни",
|
||||
assistant_message="Ответ",
|
||||
decision_type="continue",
|
||||
)
|
||||
|
||||
assert store.updated[0]["decision_type"] == "continue"
|
||||
@@ -1,59 +0,0 @@
|
||||
from app.modules.agent.engine.orchestrator.actions.code_explain_actions import CodeExplainActions
|
||||
from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext
|
||||
from app.modules.agent.engine.orchestrator.models import (
|
||||
ArtifactType,
|
||||
ExecutionPlan,
|
||||
OutputContract,
|
||||
RoutingMeta,
|
||||
Scenario,
|
||||
TaskConstraints,
|
||||
TaskSpec,
|
||||
)
|
||||
from app.modules.rag.explain.models import ExplainIntent, ExplainPack
|
||||
|
||||
|
||||
class _FakeRetriever:
|
||||
def build_pack(self, rag_session_id: str, user_query: str, *, file_candidates: list[dict] | None = None) -> ExplainPack:
|
||||
assert rag_session_id == "rag-1"
|
||||
assert "endpoint" in user_query
|
||||
assert file_candidates == [{"path": "app/api/users.py", "content": "..." }]
|
||||
return ExplainPack(intent=ExplainIntent(raw_query=user_query, normalized_query=user_query))
|
||||
|
||||
|
||||
def _ctx() -> ExecutionContext:
|
||||
task = TaskSpec(
|
||||
task_id="task-1",
|
||||
dialog_session_id="dialog-1",
|
||||
rag_session_id="rag-1",
|
||||
user_message="Explain endpoint get_user",
|
||||
scenario=Scenario.EXPLAIN_PART,
|
||||
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"),
|
||||
constraints=TaskConstraints(),
|
||||
output_contract=OutputContract(result_type="answer"),
|
||||
metadata={"rag_context": "", "confluence_context": "", "files_map": {}},
|
||||
)
|
||||
plan = ExecutionPlan(
|
||||
plan_id="plan-1",
|
||||
task_id="task-1",
|
||||
scenario=Scenario.EXPLAIN_PART,
|
||||
template_id="tpl",
|
||||
template_version="1",
|
||||
steps=[],
|
||||
)
|
||||
ctx = ExecutionContext(task=task, plan=plan, graph_resolver=lambda *_: None, graph_invoker=lambda *_: {})
|
||||
ctx.artifacts.put(
|
||||
key="source_bundle",
|
||||
artifact_type=ArtifactType.STRUCTURED_JSON,
|
||||
content={"file_candidates": [{"path": "app/api/users.py", "content": "..."}]},
|
||||
)
|
||||
return ctx
|
||||
|
||||
|
||||
def test_code_explain_actions_store_explain_pack() -> None:
|
||||
ctx = _ctx()
|
||||
actions = CodeExplainActions(_FakeRetriever())
|
||||
|
||||
actions.build_code_explain_pack(ctx)
|
||||
|
||||
stored = ctx.artifacts.get_content("explain_pack", {})
|
||||
assert stored["intent"]["raw_query"] == "Explain endpoint get_user"
|
||||
@@ -1,61 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from app.modules.agent.engine.orchestrator.actions.edit_actions import EditActions
|
||||
from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext
|
||||
from app.modules.agent.engine.orchestrator.models import (
|
||||
ExecutionPlan,
|
||||
OutputContract,
|
||||
RoutingMeta,
|
||||
Scenario,
|
||||
TaskConstraints,
|
||||
TaskSpec,
|
||||
)
|
||||
|
||||
|
||||
def _ctx() -> ExecutionContext:
|
||||
task = TaskSpec(
|
||||
task_id="task-1",
|
||||
dialog_session_id="dialog-1",
|
||||
rag_session_id="rag-1",
|
||||
mode="auto",
|
||||
user_message="Добавь в readme.md в конце строку про автора",
|
||||
scenario=Scenario.TARGETED_EDIT,
|
||||
routing=RoutingMeta(domain_id="project", process_id="edits", confidence=0.95, reason="test"),
|
||||
constraints=TaskConstraints(allow_writes=True),
|
||||
output_contract=OutputContract(result_type="changeset"),
|
||||
metadata={
|
||||
"files_map": {
|
||||
"README.md": {
|
||||
"path": "README.md",
|
||||
"content": "# Title\n",
|
||||
"content_hash": "hash123",
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
plan = ExecutionPlan(
|
||||
plan_id="plan-1",
|
||||
task_id="task-1",
|
||||
scenario=Scenario.TARGETED_EDIT,
|
||||
template_id="targeted_edit_v1",
|
||||
template_version="1.0",
|
||||
steps=[],
|
||||
)
|
||||
return ExecutionContext(task=task, plan=plan, graph_resolver=lambda *_: None, graph_invoker=lambda *_: {})
|
||||
|
||||
|
||||
def test_edit_actions_resolve_path_case_insensitive_and_keep_update() -> None:
|
||||
actions = EditActions()
|
||||
ctx = _ctx()
|
||||
|
||||
actions.resolve_target(ctx)
|
||||
actions.load_target_context(ctx)
|
||||
actions.plan_minimal_patch(ctx)
|
||||
actions.generate_patch(ctx)
|
||||
|
||||
target = ctx.artifacts.get_content("target_context", {})
|
||||
changeset = ctx.artifacts.get_content("raw_changeset", [])
|
||||
|
||||
assert target["path"] == "README.md"
|
||||
assert changeset[0]["path"] == "README.md"
|
||||
assert changeset[0]["op"] == "update"
|
||||
@@ -1,56 +0,0 @@
|
||||
import asyncio
|
||||
|
||||
import pytest
|
||||
|
||||
from app.modules.agent.engine.orchestrator.models import OutputContract, RoutingMeta, Scenario, TaskConstraints, TaskSpec
|
||||
from app.modules.agent.engine.orchestrator.service import OrchestratorService
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"scenario,expect_changeset",
|
||||
[
|
||||
(Scenario.EXPLAIN_PART, False),
|
||||
(Scenario.ANALYTICS_REVIEW, False),
|
||||
(Scenario.DOCS_FROM_ANALYTICS, True),
|
||||
(Scenario.TARGETED_EDIT, True),
|
||||
(Scenario.GHERKIN_MODEL, True),
|
||||
],
|
||||
)
|
||||
def test_eval_suite_scenarios_run(scenario: Scenario, expect_changeset: bool) -> None:
|
||||
service = OrchestratorService()
|
||||
|
||||
task = TaskSpec(
|
||||
task_id=f"task-{scenario.value}",
|
||||
dialog_session_id="dialog-1",
|
||||
rag_session_id="rag-1",
|
||||
mode="auto",
|
||||
user_message="Please process this scenario using project docs and requirements.",
|
||||
scenario=scenario,
|
||||
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.95, reason="eval"),
|
||||
constraints=TaskConstraints(
|
||||
allow_writes=scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL},
|
||||
max_steps=20,
|
||||
max_retries_per_step=2,
|
||||
step_timeout_sec=90,
|
||||
),
|
||||
output_contract=OutputContract(result_type="answer"),
|
||||
attachments=[{"type": "http_url", "value": "https://example.com/doc"}],
|
||||
metadata={
|
||||
"rag_context": "Requirements context is available.",
|
||||
"confluence_context": "",
|
||||
"files_map": {"docs/api/increment.md": {"content": "old", "content_hash": "h1"}},
|
||||
},
|
||||
)
|
||||
|
||||
result = asyncio.run(
|
||||
service.run(
|
||||
task=task,
|
||||
graph_resolver=lambda _domain, _process: object(),
|
||||
graph_invoker=lambda _graph, _state, _dialog: {"answer": "fallback", "changeset": []},
|
||||
)
|
||||
)
|
||||
|
||||
assert result.meta["plan"]["status"] in {"completed", "partial"}
|
||||
assert bool(result.changeset) is expect_changeset
|
||||
if not expect_changeset:
|
||||
assert result.answer
|
||||
@@ -1,131 +0,0 @@
|
||||
from app.modules.agent.engine.orchestrator.actions.explain_actions import ExplainActions
|
||||
from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext
|
||||
from app.modules.agent.engine.orchestrator.models import (
|
||||
ExecutionPlan,
|
||||
OutputContract,
|
||||
RoutingMeta,
|
||||
Scenario,
|
||||
TaskConstraints,
|
||||
TaskSpec,
|
||||
)
|
||||
|
||||
|
||||
def _ctx(rag_items: list[dict]) -> ExecutionContext:
|
||||
task = TaskSpec(
|
||||
task_id="task-1",
|
||||
dialog_session_id="dialog-1",
|
||||
rag_session_id="rag-1",
|
||||
user_message="Объясни по коду как работает task_processor",
|
||||
scenario=Scenario.EXPLAIN_PART,
|
||||
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"),
|
||||
constraints=TaskConstraints(),
|
||||
output_contract=OutputContract(result_type="answer"),
|
||||
metadata={
|
||||
"rag_items": rag_items,
|
||||
"rag_context": "",
|
||||
"confluence_context": "",
|
||||
"files_map": {},
|
||||
},
|
||||
)
|
||||
plan = ExecutionPlan(
|
||||
plan_id="plan-1",
|
||||
task_id="task-1",
|
||||
scenario=Scenario.EXPLAIN_PART,
|
||||
template_id="tpl",
|
||||
template_version="1",
|
||||
steps=[],
|
||||
)
|
||||
return ExecutionContext(task=task, plan=plan, graph_resolver=lambda *_: None, graph_invoker=lambda *_: {})
|
||||
|
||||
|
||||
def test_explain_actions_switch_to_code_profile_when_code_layers_present() -> None:
|
||||
ctx = _ctx(
|
||||
[
|
||||
{
|
||||
"source": "app/task_processor.py",
|
||||
"layer": "C1_SYMBOL_CATALOG",
|
||||
"title": "task_processor.process_task",
|
||||
"content": "function task_processor.process_task(task)",
|
||||
"metadata": {"qname": "task_processor.process_task", "kind": "function"},
|
||||
},
|
||||
{
|
||||
"source": "app/task_processor.py",
|
||||
"layer": "C2_DEPENDENCY_GRAPH",
|
||||
"title": "task_processor.process_task:calls",
|
||||
"content": "task_processor.process_task calls queue.publish",
|
||||
"metadata": {"edge_type": "calls"},
|
||||
},
|
||||
]
|
||||
)
|
||||
actions = ExplainActions()
|
||||
|
||||
actions.collect_sources(ctx)
|
||||
actions.extract_logic(ctx)
|
||||
actions.summarize(ctx)
|
||||
|
||||
sources = ctx.artifacts.get_content("sources", {})
|
||||
assert sources["source_profile"] == "code"
|
||||
answer = str(ctx.artifacts.get_content("final_answer", ""))
|
||||
assert "кодовых слоев индекса" not in answer
|
||||
assert "CodeRAG" not in answer
|
||||
assert "app/task_processor.py" in answer
|
||||
assert "requirements/docs context" not in answer
|
||||
|
||||
|
||||
def test_explain_actions_add_code_details_block() -> None:
|
||||
ctx = _ctx(
|
||||
[
|
||||
{
|
||||
"source": "src/config_manager/__init__.py",
|
||||
"layer": "C1_SYMBOL_CATALOG",
|
||||
"title": "ConfigManager",
|
||||
"content": "const ConfigManager\nConfigManager = config_manager.v2.ConfigManagerV2",
|
||||
"metadata": {
|
||||
"qname": "ConfigManager",
|
||||
"kind": "const",
|
||||
"lang_payload": {"imported_from": "v2.ConfigManagerV2", "import_alias": True},
|
||||
},
|
||||
},
|
||||
{
|
||||
"source": "src/config_manager/v2/control/base.py",
|
||||
"layer": "C1_SYMBOL_CATALOG",
|
||||
"title": "ControlChannel",
|
||||
"content": "class ControlChannel\nControlChannel(ABC)",
|
||||
"metadata": {"qname": "ControlChannel", "kind": "class"},
|
||||
},
|
||||
{
|
||||
"source": "src/config_manager/v2/core/control_bridge.py",
|
||||
"layer": "C1_SYMBOL_CATALOG",
|
||||
"title": "ControlChannelBridge",
|
||||
"content": "class ControlChannelBridge\nПредоставляет halt и status как обработчики start/stop/status",
|
||||
"metadata": {"qname": "ControlChannelBridge", "kind": "class"},
|
||||
},
|
||||
{
|
||||
"source": "src/config_manager/v2/core/control_bridge.py",
|
||||
"layer": "C2_DEPENDENCY_GRAPH",
|
||||
"title": "ControlChannelBridge.on_start:calls",
|
||||
"content": "ControlChannelBridge.on_start calls self._start_runtime",
|
||||
"metadata": {"src_qname": "ControlChannelBridge.on_start", "dst_ref": "self._start_runtime"},
|
||||
},
|
||||
{
|
||||
"source": "src/config_manager/v2/__init__.py",
|
||||
"layer": "C0_SOURCE_CHUNKS",
|
||||
"title": "src/config_manager/v2/__init__.py:1-6",
|
||||
"content": '"""Контракт: управление через API (config.yaml, секция management)."""',
|
||||
"metadata": {},
|
||||
},
|
||||
]
|
||||
)
|
||||
actions = ExplainActions()
|
||||
|
||||
actions.collect_sources(ctx)
|
||||
actions.extract_logic(ctx)
|
||||
actions.summarize(ctx)
|
||||
|
||||
answer = str(ctx.artifacts.get_content("final_answer", ""))
|
||||
assert "### Что видно по коду" in answer
|
||||
assert "ConfigManager` в проекте доступен как alias" in answer
|
||||
assert "ControlChannelBridge.on_start" in answer
|
||||
assert "### Где смотреть в проекте" in answer
|
||||
assert "В индексе нет точного символа" not in answer
|
||||
assert "отдельный интерфейс управления" in answer
|
||||
@@ -1,175 +0,0 @@
|
||||
import asyncio
|
||||
|
||||
from app.modules.agent.engine.orchestrator.models import (
|
||||
OutputContract,
|
||||
RoutingMeta,
|
||||
Scenario,
|
||||
TaskConstraints,
|
||||
TaskSpec,
|
||||
)
|
||||
from app.modules.agent.engine.orchestrator.service import OrchestratorService
|
||||
|
||||
|
||||
class DummyGraph:
|
||||
pass
|
||||
|
||||
|
||||
def _task(scenario: Scenario, *, domain_id: str = "project", process_id: str = "qa") -> TaskSpec:
|
||||
allow_writes = scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL}
|
||||
return TaskSpec(
|
||||
task_id="task-1",
|
||||
dialog_session_id="dialog-1",
|
||||
rag_session_id="rag-1",
|
||||
mode="auto",
|
||||
user_message="Explain this module",
|
||||
scenario=scenario,
|
||||
routing=RoutingMeta(domain_id=domain_id, process_id=process_id, confidence=0.95, reason="unit-test"),
|
||||
constraints=TaskConstraints(allow_writes=allow_writes, max_steps=16, max_retries_per_step=2, step_timeout_sec=90),
|
||||
output_contract=OutputContract(result_type="answer"),
|
||||
metadata={
|
||||
"rag_context": "RAG",
|
||||
"confluence_context": "",
|
||||
"files_map": {},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def test_orchestrator_service_returns_answer() -> None:
|
||||
service = OrchestratorService()
|
||||
|
||||
def graph_resolver(domain_id: str, process_id: str):
|
||||
assert domain_id == "default"
|
||||
assert process_id == "general"
|
||||
return DummyGraph()
|
||||
|
||||
def graph_invoker(_graph, state: dict, dialog_session_id: str):
|
||||
assert state["message"] == "Explain this module"
|
||||
assert dialog_session_id == "dialog-1"
|
||||
return {"answer": "It works.", "changeset": []}
|
||||
|
||||
result = asyncio.run(
|
||||
service.run(
|
||||
task=_task(Scenario.GENERAL_QA, domain_id="default", process_id="general"),
|
||||
graph_resolver=graph_resolver,
|
||||
graph_invoker=graph_invoker,
|
||||
)
|
||||
)
|
||||
assert result.answer == "It works."
|
||||
assert result.meta["plan"]["status"] == "completed"
|
||||
|
||||
|
||||
def test_orchestrator_service_generates_changeset_for_docs_scenario() -> None:
|
||||
service = OrchestratorService()
|
||||
|
||||
def graph_resolver(_domain_id: str, _process_id: str):
|
||||
return DummyGraph()
|
||||
|
||||
def graph_invoker(_graph, _state: dict, _dialog_session_id: str):
|
||||
return {"answer": "unused", "changeset": []}
|
||||
|
||||
result = asyncio.run(
|
||||
service.run(
|
||||
task=_task(Scenario.DOCS_FROM_ANALYTICS),
|
||||
graph_resolver=graph_resolver,
|
||||
graph_invoker=graph_invoker,
|
||||
)
|
||||
)
|
||||
assert result.meta["plan"]["status"] == "completed"
|
||||
assert len(result.changeset) > 0
|
||||
|
||||
|
||||
def test_orchestrator_service_uses_project_qa_reasoning_without_graph() -> None:
|
||||
service = OrchestratorService()
|
||||
requested_graphs: list[tuple[str, str]] = []
|
||||
|
||||
def graph_resolver(domain_id: str, process_id: str):
|
||||
requested_graphs.append((domain_id, process_id))
|
||||
return DummyGraph()
|
||||
|
||||
def graph_invoker(_graph, state: dict, _dialog_session_id: str):
|
||||
if "resolved_request" not in state:
|
||||
return {
|
||||
"resolved_request": {
|
||||
"original_message": state["message"],
|
||||
"normalized_message": state["message"],
|
||||
"subject_hint": "",
|
||||
"source_hint": "code",
|
||||
"russian": True,
|
||||
}
|
||||
}
|
||||
if "question_profile" not in state:
|
||||
return {
|
||||
"question_profile": {
|
||||
"domain": "code",
|
||||
"intent": "inventory",
|
||||
"terms": ["control", "channel"],
|
||||
"entities": [],
|
||||
"russian": True,
|
||||
}
|
||||
}
|
||||
if "source_bundle" not in state:
|
||||
return {
|
||||
"source_bundle": {
|
||||
"profile": state["question_profile"],
|
||||
"rag_items": [],
|
||||
"file_candidates": [
|
||||
{"path": "src/config_manager/v2/control/base.py", "content": "class ControlChannel: pass"},
|
||||
{"path": "src/config_manager/v2/control/http_channel.py", "content": "class HttpControlChannel(ControlChannel): pass # http api"},
|
||||
],
|
||||
"rag_total": 0,
|
||||
"files_total": 2,
|
||||
}
|
||||
}
|
||||
if "analysis_brief" not in state:
|
||||
return {
|
||||
"analysis_brief": {
|
||||
"subject": "management channels",
|
||||
"findings": ["В коде найдены конкретные реализации каналов управления: http channel (`src/config_manager/v2/control/http_channel.py`)."],
|
||||
"evidence": ["src/config_manager/v2/control/http_channel.py"],
|
||||
"gaps": [],
|
||||
"answer_mode": "inventory",
|
||||
}
|
||||
}
|
||||
return {
|
||||
"answer_brief": {
|
||||
"question_profile": state["question_profile"],
|
||||
"resolved_subject": "management channels",
|
||||
"key_findings": ["В коде найдены конкретные реализации каналов управления: http channel (`src/config_manager/v2/control/http_channel.py`)."],
|
||||
"supporting_evidence": ["src/config_manager/v2/control/http_channel.py"],
|
||||
"missing_evidence": [],
|
||||
"answer_mode": "inventory",
|
||||
},
|
||||
"final_answer": "## Кратко\n### Что реализовано\n- В коде найдены конкретные реализации каналов управления: http channel (`src/config_manager/v2/control/http_channel.py`).",
|
||||
}
|
||||
|
||||
task = _task(Scenario.GENERAL_QA).model_copy(
|
||||
update={
|
||||
"user_message": "Какие каналы управления уже реализованы?",
|
||||
"metadata": {
|
||||
"rag_context": "",
|
||||
"confluence_context": "",
|
||||
"files_map": {
|
||||
"src/config_manager/v2/control/base.py": {
|
||||
"content": "class ControlChannel:\n async def start(self):\n ..."
|
||||
},
|
||||
"src/config_manager/v2/control/http_channel.py": {
|
||||
"content": "class HttpControlChannel(ControlChannel):\n async def start(self):\n ...\n# http api"
|
||||
},
|
||||
},
|
||||
"rag_items": [],
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
result = asyncio.run(service.run(task=task, graph_resolver=graph_resolver, graph_invoker=graph_invoker))
|
||||
|
||||
assert "Что реализовано" in result.answer
|
||||
assert "http channel" in result.answer.lower()
|
||||
assert result.meta["plan"]["status"] == "completed"
|
||||
assert requested_graphs == [
|
||||
("project_qa", "conversation_understanding"),
|
||||
("project_qa", "question_classification"),
|
||||
("project_qa", "context_retrieval"),
|
||||
("project_qa", "context_analysis"),
|
||||
("project_qa", "answer_composition"),
|
||||
]
|
||||
@@ -1,49 +0,0 @@
|
||||
from app.modules.agent.engine.orchestrator.models import (
|
||||
ExecutionPlan,
|
||||
OutputContract,
|
||||
PlanStep,
|
||||
RetryPolicy,
|
||||
RoutingMeta,
|
||||
Scenario,
|
||||
TaskConstraints,
|
||||
TaskSpec,
|
||||
)
|
||||
from app.modules.agent.engine.orchestrator.plan_validator import PlanValidator
|
||||
|
||||
|
||||
def _task(*, allow_writes: bool) -> TaskSpec:
|
||||
return TaskSpec(
|
||||
task_id="t1",
|
||||
dialog_session_id="d1",
|
||||
rag_session_id="r1",
|
||||
mode="auto",
|
||||
user_message="hello",
|
||||
scenario=Scenario.GENERAL_QA,
|
||||
routing=RoutingMeta(domain_id="default", process_id="general", confidence=0.9, reason="test"),
|
||||
constraints=TaskConstraints(allow_writes=allow_writes, max_steps=10, max_retries_per_step=2, step_timeout_sec=60),
|
||||
output_contract=OutputContract(result_type="answer"),
|
||||
)
|
||||
|
||||
|
||||
def test_plan_validator_rejects_write_step_when_not_allowed() -> None:
|
||||
plan = ExecutionPlan(
|
||||
plan_id="p1",
|
||||
task_id="t1",
|
||||
scenario=Scenario.GENERAL_QA,
|
||||
template_id="tmp",
|
||||
template_version="1.0",
|
||||
steps=[
|
||||
PlanStep(
|
||||
step_id="s1",
|
||||
title="write",
|
||||
action_id="collect_state",
|
||||
executor="function",
|
||||
side_effect="write",
|
||||
retry=RetryPolicy(max_attempts=1),
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
errors = PlanValidator().validate(plan, _task(allow_writes=False))
|
||||
|
||||
assert "write_step_not_allowed:s1" in errors
|
||||
@@ -1,71 +0,0 @@
|
||||
from app.modules.agent.engine.orchestrator.actions.project_qa_actions import ProjectQaActions
|
||||
from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext
|
||||
from app.modules.agent.engine.orchestrator.models import (
|
||||
ExecutionPlan,
|
||||
OutputContract,
|
||||
RoutingMeta,
|
||||
Scenario,
|
||||
TaskConstraints,
|
||||
TaskSpec,
|
||||
)
|
||||
|
||||
|
||||
def _ctx(message: str, rag_items: list[dict], files_map: dict[str, dict]) -> ExecutionContext:
|
||||
task = TaskSpec(
|
||||
task_id="task-1",
|
||||
dialog_session_id="dialog-1",
|
||||
rag_session_id="rag-1",
|
||||
user_message=message,
|
||||
scenario=Scenario.GENERAL_QA,
|
||||
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"),
|
||||
constraints=TaskConstraints(),
|
||||
output_contract=OutputContract(result_type="answer"),
|
||||
metadata={
|
||||
"rag_items": rag_items,
|
||||
"rag_context": "",
|
||||
"confluence_context": "",
|
||||
"files_map": files_map,
|
||||
},
|
||||
)
|
||||
plan = ExecutionPlan(
|
||||
plan_id="plan-1",
|
||||
task_id="task-1",
|
||||
scenario=Scenario.GENERAL_QA,
|
||||
template_id="tpl",
|
||||
template_version="1",
|
||||
steps=[],
|
||||
)
|
||||
return ExecutionContext(task=task, plan=plan, graph_resolver=lambda *_: None, graph_invoker=lambda *_: {})
|
||||
|
||||
|
||||
def test_project_qa_actions_build_inventory_answer_from_code_sources() -> None:
|
||||
ctx = _ctx(
|
||||
"Какие каналы управления уже реализованы?",
|
||||
[],
|
||||
{
|
||||
"src/config_manager/v2/control/base.py": {"content": "class ControlChannel:\n async def start(self):\n ..."},
|
||||
"src/config_manager/v2/core/control_bridge.py": {
|
||||
"content": "class ControlChannelBridge:\n async def on_start(self):\n ...\n async def on_status(self):\n ..."
|
||||
},
|
||||
"src/config_manager/v2/control/http_channel.py": {
|
||||
"content": "class HttpControlChannel(ControlChannel):\n async def start(self):\n ...\n# http api"
|
||||
},
|
||||
"src/config_manager/v2/control/telegram_channel.py": {
|
||||
"content": "class TelegramControlChannel(ControlChannel):\n async def start(self):\n ...\n# telegram bot"
|
||||
},
|
||||
},
|
||||
)
|
||||
actions = ProjectQaActions()
|
||||
|
||||
actions.classify_project_question(ctx)
|
||||
actions.collect_project_sources(ctx)
|
||||
actions.analyze_project_sources(ctx)
|
||||
actions.build_project_answer_brief(ctx)
|
||||
actions.compose_project_answer(ctx)
|
||||
|
||||
answer = str(ctx.artifacts.get_content("final_answer", ""))
|
||||
assert "### Что реализовано" in answer
|
||||
assert "http channel" in answer.lower()
|
||||
assert "telegram channel" in answer.lower()
|
||||
assert "### Где смотреть в проекте" in answer
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
import sys
|
||||
import types
|
||||
|
||||
langgraph = types.ModuleType("langgraph")
|
||||
langgraph_graph = types.ModuleType("langgraph.graph")
|
||||
langgraph_graph.END = "END"
|
||||
langgraph_graph.START = "START"
|
||||
langgraph_graph.StateGraph = object
|
||||
sys.modules.setdefault("langgraph", langgraph)
|
||||
sys.modules.setdefault("langgraph.graph", langgraph_graph)
|
||||
|
||||
from app.modules.agent.engine.graphs.project_qa_step_graphs import ProjectQaAnswerGraphFactory
|
||||
|
||||
|
||||
class _FakeLlm:
|
||||
def __init__(self) -> None:
|
||||
self.calls: list[tuple[str, str, str | None]] = []
|
||||
|
||||
def generate(self, prompt_name: str, user_input: str, *, log_context: str | None = None) -> str:
|
||||
self.calls.append((prompt_name, user_input, log_context))
|
||||
return "## Summary\n[entrypoint_1] [excerpt_1]"
|
||||
|
||||
|
||||
def test_project_qa_answer_graph_uses_v2_prompt_when_explain_pack_present() -> None:
|
||||
llm = _FakeLlm()
|
||||
factory = ProjectQaAnswerGraphFactory(llm)
|
||||
|
||||
result = factory._compose_answer(
|
||||
{
|
||||
"message": "Explain endpoint get_user",
|
||||
"question_profile": {"russian": False},
|
||||
"analysis_brief": {"findings": [], "evidence": [], "gaps": [], "answer_mode": "summary"},
|
||||
"explain_pack": {
|
||||
"intent": {
|
||||
"raw_query": "Explain endpoint get_user",
|
||||
"normalized_query": "Explain endpoint get_user",
|
||||
"keywords": ["get_user"],
|
||||
"hints": {"paths": [], "symbols": [], "endpoints": [], "commands": []},
|
||||
"expected_entry_types": ["http"],
|
||||
"depth": "medium",
|
||||
},
|
||||
"selected_entrypoints": [],
|
||||
"seed_symbols": [],
|
||||
"trace_paths": [],
|
||||
"evidence_index": {
|
||||
"entrypoint_1": {
|
||||
"evidence_id": "entrypoint_1",
|
||||
"kind": "entrypoint",
|
||||
"summary": "/users/{id}",
|
||||
"location": {"path": "app/api/users.py", "start_line": 10, "end_line": 10},
|
||||
"supports": ["handler-1"],
|
||||
}
|
||||
},
|
||||
"code_excerpts": [
|
||||
{
|
||||
"evidence_id": "excerpt_1",
|
||||
"symbol_id": "handler-1",
|
||||
"title": "get_user",
|
||||
"path": "app/api/users.py",
|
||||
"start_line": 10,
|
||||
"end_line": 18,
|
||||
"content": "async def get_user():\n return 1",
|
||||
"focus": "overview",
|
||||
}
|
||||
],
|
||||
"missing": [],
|
||||
"conflicts": [],
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
assert result["final_answer"].startswith("## Summary")
|
||||
assert llm.calls[0][0] == "code_explain_answer_v2"
|
||||
assert '"evidence_id": "excerpt_1"' in llm.calls[0][1]
|
||||
@@ -1,49 +0,0 @@
|
||||
import sys
|
||||
import types
|
||||
|
||||
langgraph = types.ModuleType("langgraph")
|
||||
langgraph_graph = types.ModuleType("langgraph.graph")
|
||||
langgraph_graph.END = "END"
|
||||
langgraph_graph.START = "START"
|
||||
langgraph_graph.StateGraph = object
|
||||
sys.modules.setdefault("langgraph", langgraph)
|
||||
sys.modules.setdefault("langgraph.graph", langgraph_graph)
|
||||
|
||||
from app.modules.agent.engine.graphs.project_qa_step_graphs import ProjectQaRetrievalGraphFactory
|
||||
|
||||
|
||||
class _FailingRag:
|
||||
async def retrieve(self, rag_session_id: str, query: str):
|
||||
raise AssertionError("legacy rag should not be called for explain_part")
|
||||
|
||||
|
||||
def test_project_qa_retrieval_skips_legacy_rag_for_explain_part() -> None:
|
||||
factory = ProjectQaRetrievalGraphFactory(_FailingRag())
|
||||
|
||||
result = factory._retrieve_context(
|
||||
{
|
||||
"scenario": "explain_part",
|
||||
"project_id": "rag-1",
|
||||
"resolved_request": {
|
||||
"original_message": "Explain how ConfigManager works",
|
||||
"normalized_message": "Explain how ConfigManager works",
|
||||
},
|
||||
"question_profile": {
|
||||
"domain": "code",
|
||||
"intent": "explain",
|
||||
"terms": ["configmanager"],
|
||||
"entities": ["ConfigManager"],
|
||||
"russian": False,
|
||||
},
|
||||
"files_map": {
|
||||
"src/config_manager/__init__.py": {
|
||||
"content": "from .v2 import ConfigManagerV2 as ConfigManager",
|
||||
"content_hash": "hash-1",
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
bundle = result["source_bundle"]
|
||||
assert bundle["rag_items"] == []
|
||||
assert bundle["files_total"] >= 1
|
||||
@@ -1,42 +0,0 @@
|
||||
import asyncio
|
||||
|
||||
from app.modules.agent.engine.orchestrator.models import OutputContract, OutputSection, RoutingMeta, Scenario, TaskConstraints, TaskSpec
|
||||
from app.modules.agent.engine.orchestrator.service import OrchestratorService
|
||||
|
||||
|
||||
def test_quality_metrics_present_and_scored() -> None:
|
||||
service = OrchestratorService()
|
||||
task = TaskSpec(
|
||||
task_id="quality-1",
|
||||
dialog_session_id="dialog-1",
|
||||
rag_session_id="rag-1",
|
||||
mode="auto",
|
||||
user_message="Explain architecture",
|
||||
scenario=Scenario.EXPLAIN_PART,
|
||||
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"),
|
||||
constraints=TaskConstraints(allow_writes=False),
|
||||
output_contract=OutputContract(
|
||||
result_type="answer",
|
||||
sections=[
|
||||
OutputSection(name="sequence_diagram", format="mermaid"),
|
||||
OutputSection(name="use_cases", format="markdown"),
|
||||
OutputSection(name="summary", format="markdown"),
|
||||
],
|
||||
),
|
||||
metadata={"rag_context": "A\nB", "confluence_context": "", "files_map": {}},
|
||||
)
|
||||
|
||||
result = asyncio.run(
|
||||
service.run(
|
||||
task=task,
|
||||
graph_resolver=lambda _d, _p: object(),
|
||||
graph_invoker=lambda _g, _s, _id: {"answer": "unused", "changeset": []},
|
||||
)
|
||||
)
|
||||
|
||||
quality = result.meta.get("quality", {})
|
||||
assert quality
|
||||
assert quality.get("faithfulness", {}).get("score") is not None
|
||||
assert quality.get("coverage", {}).get("score") is not None
|
||||
assert quality.get("status") in {"ok", "needs_review", "fail"}
|
||||
assert quality.get("coverage", {}).get("covered_count", 0) >= 1
|
||||
@@ -1,50 +0,0 @@
|
||||
from app.modules.agent.engine.orchestrator.models import (
|
||||
ArtifactType,
|
||||
OutputContract,
|
||||
OutputSection,
|
||||
RoutingMeta,
|
||||
Scenario,
|
||||
TaskConstraints,
|
||||
TaskSpec,
|
||||
)
|
||||
from app.modules.agent.engine.orchestrator.quality_metrics import QualityMetricsCalculator
|
||||
from app.modules.agent.engine.orchestrator.template_registry import ScenarioTemplateRegistry
|
||||
from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext
|
||||
from app.modules.agent.engine.orchestrator.models import PlanStatus
|
||||
|
||||
|
||||
def test_quality_metrics_coverage_reflects_missing_required_sections() -> None:
|
||||
task = TaskSpec(
|
||||
task_id="quality-2",
|
||||
dialog_session_id="dialog-1",
|
||||
rag_session_id="rag-1",
|
||||
mode="auto",
|
||||
user_message="Explain architecture",
|
||||
scenario=Scenario.EXPLAIN_PART,
|
||||
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"),
|
||||
constraints=TaskConstraints(allow_writes=False),
|
||||
output_contract=OutputContract(
|
||||
result_type="answer",
|
||||
sections=[
|
||||
OutputSection(name="sequence_diagram", format="mermaid"),
|
||||
OutputSection(name="use_cases", format="markdown"),
|
||||
OutputSection(name="summary", format="markdown"),
|
||||
],
|
||||
),
|
||||
metadata={"rag_context": "A", "confluence_context": "", "files_map": {}},
|
||||
)
|
||||
|
||||
plan = ScenarioTemplateRegistry().build(task)
|
||||
plan.status = PlanStatus.COMPLETED
|
||||
ctx = ExecutionContext(
|
||||
task=task,
|
||||
plan=plan,
|
||||
graph_resolver=lambda _d, _p: object(),
|
||||
graph_invoker=lambda _g, _s, _id: {},
|
||||
)
|
||||
ctx.artifacts.put(key="final_answer", artifact_type=ArtifactType.TEXT, content="Only summary text")
|
||||
|
||||
metrics = QualityMetricsCalculator().build(ctx, step_results=[])
|
||||
|
||||
assert metrics["coverage"]["score"] < 1.0
|
||||
assert "sequence_diagram" in metrics["coverage"]["missing_items"]
|
||||
@@ -1,48 +0,0 @@
|
||||
from app.modules.agent.engine.orchestrator.models import OutputContract, RoutingMeta, Scenario, TaskConstraints, TaskSpec
|
||||
from app.modules.agent.engine.orchestrator.template_registry import ScenarioTemplateRegistry
|
||||
|
||||
|
||||
def _task(scenario: Scenario) -> TaskSpec:
|
||||
return TaskSpec(
|
||||
task_id="t1",
|
||||
dialog_session_id="d1",
|
||||
rag_session_id="r1",
|
||||
mode="auto",
|
||||
user_message="run scenario",
|
||||
scenario=scenario,
|
||||
routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"),
|
||||
constraints=TaskConstraints(
|
||||
allow_writes=scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL}
|
||||
),
|
||||
output_contract=OutputContract(result_type="answer"),
|
||||
metadata={"rag_context": "ctx", "confluence_context": "", "files_map": {}},
|
||||
)
|
||||
|
||||
|
||||
def test_template_registry_has_multi_step_review_docs_edit_gherkin() -> None:
|
||||
registry = ScenarioTemplateRegistry()
|
||||
|
||||
review_steps = [step.step_id for step in registry.build(_task(Scenario.ANALYTICS_REVIEW)).steps]
|
||||
docs_steps = [step.step_id for step in registry.build(_task(Scenario.DOCS_FROM_ANALYTICS)).steps]
|
||||
edit_steps = [step.step_id for step in registry.build(_task(Scenario.TARGETED_EDIT)).steps]
|
||||
gherkin_steps = [step.step_id for step in registry.build(_task(Scenario.GHERKIN_MODEL)).steps]
|
||||
|
||||
assert "structural_check" in review_steps and "compose_review_report" in review_steps
|
||||
assert "extract_change_intents" in docs_steps and "build_changeset" in docs_steps
|
||||
assert "resolve_target" in edit_steps and "finalize_changeset" in edit_steps
|
||||
assert "generate_gherkin_bundle" in gherkin_steps and "validate_coverage" in gherkin_steps
|
||||
|
||||
assert len(review_steps) >= 7
|
||||
assert len(docs_steps) >= 9
|
||||
assert len(edit_steps) >= 7
|
||||
assert len(gherkin_steps) >= 8
|
||||
|
||||
|
||||
def test_template_registry_adds_code_explain_pack_step_for_project_explain() -> None:
|
||||
registry = ScenarioTemplateRegistry()
|
||||
|
||||
steps = [step.step_id for step in registry.build(_task(Scenario.EXPLAIN_PART)).steps]
|
||||
|
||||
assert "code_explain_pack_step" in steps
|
||||
assert steps.index("code_explain_pack_step") > steps.index("context_retrieval")
|
||||
assert steps.index("code_explain_pack_step") < steps.index("context_analysis")
|
||||
@@ -1,48 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from app.modules.agent.story_session_recorder import StorySessionRecorder
|
||||
from app.schemas.changeset import ChangeItem, ChangeOp
|
||||
|
||||
|
||||
class FakeStoryRepo:
|
||||
def __init__(self) -> None:
|
||||
self.calls: list[dict] = []
|
||||
|
||||
def add_session_artifact(self, **kwargs) -> None:
|
||||
self.calls.append(kwargs)
|
||||
|
||||
|
||||
def test_record_run_stores_attachment_and_changeset_artifacts() -> None:
|
||||
repo = FakeStoryRepo()
|
||||
recorder = StorySessionRecorder(repo)
|
||||
|
||||
recorder.record_run(
|
||||
dialog_session_id="dialog-1",
|
||||
rag_session_id="rag-1",
|
||||
scenario="docs_from_analytics",
|
||||
attachments=[
|
||||
{"type": "confluence_url", "value": "https://example.org/doc"},
|
||||
{"type": "file_ref", "value": "local.md"},
|
||||
],
|
||||
answer="Generated docs update summary",
|
||||
changeset=[
|
||||
ChangeItem(
|
||||
op=ChangeOp.UPDATE,
|
||||
path="docs/api.md",
|
||||
base_hash="abc",
|
||||
proposed_content="new",
|
||||
reason="sync endpoint section",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
assert len(repo.calls) == 3
|
||||
assert repo.calls[0]["artifact_role"] == "analysis"
|
||||
assert repo.calls[0]["source_ref"] == "https://example.org/doc"
|
||||
|
||||
assert repo.calls[1]["artifact_role"] == "doc_change"
|
||||
assert repo.calls[1]["summary"] == "Generated docs update summary"
|
||||
|
||||
assert repo.calls[2]["artifact_role"] == "doc_change"
|
||||
assert repo.calls[2]["path"] == "docs/api.md"
|
||||
assert repo.calls[2]["change_type"] == "updated"
|
||||
Reference in New Issue
Block a user