From 1bc57a7c257d84f118d890a4f6fb068042ce1cc8 Mon Sep 17 00:00:00 2001 From: zosimovaa Date: Fri, 27 Feb 2026 21:28:09 +0300 Subject: [PATCH] =?UTF-8?q?=D0=BF=D0=B5=D1=80=D0=B2=D1=8B=D0=B9=20=D0=BA?= =?UTF-8?q?=D0=BE=D0=BC=D0=BC=D0=B8=D1=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.ARCH.md | 155 ++++ README.DB.STORY_PLAN.md | 271 +++++++ README.md | 4 +- app/__pycache__/main.cpython-312.pyc | Bin 1990 -> 2123 bytes app/main.py | 5 +- .../__pycache__/application.cpython-312.pyc | Bin 2224 -> 2821 bytes .../__pycache__/contracts.cpython-312.pyc | Bin 2655 -> 2675 bytes app/modules/agent/README.md | 60 ++ .../agent/__pycache__/module.cpython-312.pyc | Bin 2920 -> 3243 bytes .../repo_webhook_service.cpython-312.pyc | Bin 0 -> 7828 bytes .../__pycache__/repository.cpython-312.pyc | Bin 4919 -> 11646 bytes .../agent/__pycache__/service.cpython-312.pyc | Bin 16481 -> 21670 bytes .../story_context_repository.cpython-312.pyc | Bin 0 -> 30273 bytes .../story_session_recorder.cpython-312.pyc | Bin 0 -> 4272 bytes app/modules/agent/engine/graphs/__init__.py | 25 +- .../__pycache__/__init__.cpython-312.pyc | Bin 552 -> 945 bytes .../__pycache__/base_graph.cpython-312.pyc | Bin 3155 -> 3687 bytes .../project_edits_contract.cpython-312.pyc | Bin 0 -> 8575 bytes .../project_edits_graph.cpython-312.pyc | Bin 4941 -> 6074 bytes .../project_edits_logic.cpython-312.pyc | Bin 18555 -> 13770 bytes .../project_edits_patcher.cpython-312.pyc | Bin 0 -> 9223 bytes .../project_edits_support.cpython-312.pyc | Bin 0 -> 8954 bytes .../project_qa_graph.cpython-312.pyc | Bin 2349 -> 2645 bytes .../graphs/__pycache__/state.cpython-312.pyc | Bin 1382 -> 1468 bytes app/modules/agent/engine/graphs/base_graph.py | 19 +- .../engine/graphs/project_edits_contract.py | 171 ++++ .../engine/graphs/project_edits_graph.py | 33 +- .../engine/graphs/project_edits_logic.py | 305 +++---- .../engine/graphs/project_edits_patcher.py | 142 ++++ .../engine/graphs/project_edits_support.py | 116 +++ .../agent/engine/graphs/project_qa_graph.py | 11 +- app/modules/agent/engine/graphs/state.py | 2 + .../agent/engine/orchestrator/__init__.py | 21 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 659 bytes .../artifact_store.cpython-312.pyc | Bin 0 -> 2857 bytes .../evidence_store.cpython-312.pyc | Bin 0 -> 1138 bytes .../execution_context.cpython-312.pyc | Bin 0 -> 1777 bytes .../execution_engine.cpython-312.pyc | Bin 0 -> 7288 bytes .../metrics_persister.cpython-312.pyc | Bin 0 -> 1607 bytes .../__pycache__/plan_compiler.cpython-312.pyc | Bin 0 -> 2376 bytes .../plan_validator.cpython-312.pyc | Bin 0 -> 5108 bytes .../__pycache__/quality_gates.cpython-312.pyc | Bin 0 -> 11005 bytes .../quality_metrics.cpython-312.pyc | Bin 0 -> 7195 bytes .../result_assembler.cpython-312.pyc | Bin 0 -> 3831 bytes .../__pycache__/service.cpython-312.pyc | Bin 0 -> 5560 bytes .../__pycache__/step_registry.cpython-312.pyc | Bin 0 -> 8461 bytes .../task_spec_builder.cpython-312.pyc | Bin 0 -> 8278 bytes .../template_registry.cpython-312.pyc | Bin 0 -> 16379 bytes .../engine/orchestrator/actions/__init__.py | 13 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 757 bytes .../__pycache__/common.cpython-312.pyc | Bin 0 -> 2131 bytes .../__pycache__/docs_actions.cpython-312.pyc | Bin 0 -> 6642 bytes .../__pycache__/edit_actions.cpython-312.pyc | Bin 0 -> 7472 bytes .../explain_actions.cpython-312.pyc | Bin 0 -> 4535 bytes .../gherkin_actions.cpython-312.pyc | Bin 0 -> 5353 bytes .../review_actions.cpython-312.pyc | Bin 0 -> 7826 bytes .../engine/orchestrator/actions/common.py | 26 + .../orchestrator/actions/docs_actions.py | 95 +++ .../orchestrator/actions/edit_actions.py | 101 +++ .../orchestrator/actions/explain_actions.py | 87 ++ .../orchestrator/actions/gherkin_actions.py | 76 ++ .../orchestrator/actions/review_actions.py | 102 +++ .../engine/orchestrator/artifact_store.py | 50 ++ .../engine/orchestrator/evidence_store.py | 14 + .../engine/orchestrator/execution_context.py | 30 + .../engine/orchestrator/execution_engine.py | 115 +++ .../engine/orchestrator/metrics_persister.py | 36 + .../engine/orchestrator/models/__init__.py | 51 ++ .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 1049 bytes .../models/__pycache__/plan.cpython-312.pyc | Bin 0 -> 4107 bytes .../models/__pycache__/result.cpython-312.pyc | Bin 0 -> 3137 bytes .../__pycache__/task_spec.cpython-312.pyc | Bin 0 -> 4791 bytes .../agent/engine/orchestrator/models/plan.py | 88 +++ .../engine/orchestrator/models/result.py | 62 ++ .../engine/orchestrator/models/task_spec.py | 93 +++ .../engine/orchestrator/plan_compiler.py | 30 + .../engine/orchestrator/plan_validator.py | 79 ++ .../engine/orchestrator/quality_gates.py | 116 +++ .../engine/orchestrator/quality_metrics.py | 116 +++ .../engine/orchestrator/result_assembler.py | 55 ++ .../agent/engine/orchestrator/service.py | 84 ++ .../engine/orchestrator/step_registry.py | 123 +++ .../engine/orchestrator/task_spec_builder.py | 150 ++++ .../engine/orchestrator/template_registry.py | 150 ++++ app/modules/agent/engine/router/__init__.py | 31 +- .../__pycache__/__init__.cpython-312.pyc | Bin 1991 -> 2038 bytes app/modules/agent/module.py | 16 +- .../agent/prompts/project_edits_apply.txt | 10 - .../agent/prompts/project_edits_hunks.txt | 32 + .../agent/prompts/project_edits_plan.txt | 33 +- .../prompts/project_edits_self_check.txt | 3 +- app/modules/agent/repository.py | 140 ++++ app/modules/agent/service.py | 211 ++++- app/modules/agent/story_context_repository.py | 745 ++++++++++++++++++ app/modules/agent/story_session_recorder.py | 106 +++ app/modules/application.py | 29 +- app/modules/chat/README.md | 98 +++ .../chat/__pycache__/module.cpython-312.pyc | Bin 6513 -> 6521 bytes .../chat/__pycache__/service.cpython-312.pyc | Bin 15649 -> 16558 bytes app/modules/chat/module.py | 2 +- app/modules/chat/service.py | 17 + app/modules/contracts.py | 4 +- .../rag/__pycache__/job_store.cpython-312.pyc | Bin 3589 -> 0 bytes .../__pycache__/repository.cpython-312.pyc | Bin 13283 -> 0 bytes .../rag/__pycache__/service.cpython-312.pyc | Bin 7405 -> 0 bytes app/modules/rag/repository.py | 261 ------ app/modules/rag_repo/README.md | 56 ++ app/modules/rag_repo/__init__.py | 1 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 265 bytes .../__pycache__/module.cpython-312.pyc | Bin 0 -> 1806 bytes .../webhook_service.cpython-312.pyc | Bin 0 -> 9854 bytes app/modules/rag_repo/module.py | 24 + app/modules/rag_repo/webhook_service.py | 217 +++++ app/modules/rag_session/README.md | 218 +++++ app/modules/{rag => rag_session}/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin .../indexing_service.cpython-312.pyc | Bin 7550 -> 7716 bytes .../__pycache__/job_store.cpython-312.pyc | Bin 0 -> 3863 bytes .../__pycache__/module.cpython-312.pyc | Bin 15646 -> 16053 bytes .../__pycache__/repository.cpython-312.pyc | Bin 0 -> 31339 bytes .../__pycache__/service.cpython-312.pyc | Bin 0 -> 11562 bytes .../__pycache__/session_store.cpython-312.pyc | Bin 2067 -> 2083 bytes .../embedding/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin .../gigachat_embedder.cpython-312.pyc | Bin .../embedding/gigachat_embedder.py | 0 .../{rag => rag_session}/indexing_service.py | 10 +- app/modules/{rag => rag_session}/job_store.py | 8 +- app/modules/{rag => rag_session}/module.py | 38 +- app/modules/rag_session/repository.py | 660 ++++++++++++++++ .../retrieval/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin .../__pycache__/chunker.cpython-312.pyc | Bin .../__pycache__/scoring.cpython-312.pyc | Bin .../{rag => rag_session}/retrieval/chunker.py | 0 .../{rag => rag_session}/retrieval/scoring.py | 0 app/modules/{rag => rag_session}/service.py | 105 ++- .../{rag => rag_session}/session_store.py | 2 +- app/modules/shared/README.md | 40 + .../__pycache__/bootstrap.cpython-312.pyc | Bin 933 -> 1005 bytes app/modules/shared/bootstrap.py | 3 +- .../__pycache__/changeset.cpython-312.pyc | Bin 2202 -> 3786 bytes .../__pycache__/indexing.cpython-312.pyc | Bin 2698 -> 2792 bytes .../__pycache__/rag_sessions.cpython-312.pyc | Bin 1530 -> 1624 bytes app/schemas/changeset.py | 28 + app/schemas/indexing.py | 2 + app/schemas/rag_sessions.py | 2 + .../conftest.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 698 bytes ...bhook_service.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 10659 bytes ...sion_recorder.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 5853 bytes ...ensitive_path.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 4115 bytes ...st_eval_suite.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 5126 bytes .../test_eval_suite.cpython-312.pyc | Bin 0 -> 3000 bytes ...rator_service.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 8437 bytes .../test_orchestrator_service.cpython-312.pyc | Bin 0 -> 4010 bytes ...lan_validator.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 2749 bytes .../test_plan_validator.cpython-312.pyc | Bin 0 -> 1947 bytes ...ality_metrics.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 7250 bytes ..._expectations.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 3834 bytes ...late_registry.cpython-312-pytest-9.0.2.pyc | Bin 0 -> 10032 bytes .../test_template_registry.cpython-312.pyc | Bin 0 -> 2997 bytes ...test_edit_actions_case_insensitive_path.py | 61 ++ tests/agent/orchestrator/test_eval_suite.py | 56 ++ .../orchestrator/test_orchestrator_service.py | 72 ++ .../agent/orchestrator/test_plan_validator.py | 49 ++ .../orchestrator/test_quality_metrics.py | 42 + .../test_quality_metrics_gate_expectations.py | 50 ++ .../orchestrator/test_template_registry.py | 38 + tests/agent/test_repo_webhook_service.py | 98 +++ tests/agent/test_story_session_recorder.py | 48 ++ tests/conftest.py | 8 + 171 files changed, 6400 insertions(+), 556 deletions(-) create mode 100644 README.ARCH.md create mode 100644 README.DB.STORY_PLAN.md create mode 100644 app/modules/agent/README.md create mode 100644 app/modules/agent/__pycache__/repo_webhook_service.cpython-312.pyc create mode 100644 app/modules/agent/__pycache__/story_context_repository.cpython-312.pyc create mode 100644 app/modules/agent/__pycache__/story_session_recorder.cpython-312.pyc create mode 100644 app/modules/agent/engine/graphs/__pycache__/project_edits_contract.cpython-312.pyc create mode 100644 app/modules/agent/engine/graphs/__pycache__/project_edits_patcher.cpython-312.pyc create mode 100644 app/modules/agent/engine/graphs/__pycache__/project_edits_support.cpython-312.pyc create mode 100644 app/modules/agent/engine/graphs/project_edits_contract.py create mode 100644 app/modules/agent/engine/graphs/project_edits_patcher.py create mode 100644 app/modules/agent/engine/graphs/project_edits_support.py create mode 100644 app/modules/agent/engine/orchestrator/__init__.py create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/__init__.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/artifact_store.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/evidence_store.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/execution_context.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/execution_engine.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/metrics_persister.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/plan_compiler.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/plan_validator.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/quality_gates.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/quality_metrics.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/result_assembler.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/service.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/step_registry.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/task_spec_builder.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/__pycache__/template_registry.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/actions/__init__.py create mode 100644 app/modules/agent/engine/orchestrator/actions/__pycache__/__init__.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/actions/__pycache__/common.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/actions/__pycache__/docs_actions.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/actions/__pycache__/edit_actions.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/actions/__pycache__/explain_actions.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/actions/__pycache__/gherkin_actions.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/actions/__pycache__/review_actions.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/actions/common.py create mode 100644 app/modules/agent/engine/orchestrator/actions/docs_actions.py create mode 100644 app/modules/agent/engine/orchestrator/actions/edit_actions.py create mode 100644 app/modules/agent/engine/orchestrator/actions/explain_actions.py create mode 100644 app/modules/agent/engine/orchestrator/actions/gherkin_actions.py create mode 100644 app/modules/agent/engine/orchestrator/actions/review_actions.py create mode 100644 app/modules/agent/engine/orchestrator/artifact_store.py create mode 100644 app/modules/agent/engine/orchestrator/evidence_store.py create mode 100644 app/modules/agent/engine/orchestrator/execution_context.py create mode 100644 app/modules/agent/engine/orchestrator/execution_engine.py create mode 100644 app/modules/agent/engine/orchestrator/metrics_persister.py create mode 100644 app/modules/agent/engine/orchestrator/models/__init__.py create mode 100644 app/modules/agent/engine/orchestrator/models/__pycache__/__init__.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/models/__pycache__/plan.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/models/__pycache__/result.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/models/__pycache__/task_spec.cpython-312.pyc create mode 100644 app/modules/agent/engine/orchestrator/models/plan.py create mode 100644 app/modules/agent/engine/orchestrator/models/result.py create mode 100644 app/modules/agent/engine/orchestrator/models/task_spec.py create mode 100644 app/modules/agent/engine/orchestrator/plan_compiler.py create mode 100644 app/modules/agent/engine/orchestrator/plan_validator.py create mode 100644 app/modules/agent/engine/orchestrator/quality_gates.py create mode 100644 app/modules/agent/engine/orchestrator/quality_metrics.py create mode 100644 app/modules/agent/engine/orchestrator/result_assembler.py create mode 100644 app/modules/agent/engine/orchestrator/service.py create mode 100644 app/modules/agent/engine/orchestrator/step_registry.py create mode 100644 app/modules/agent/engine/orchestrator/task_spec_builder.py create mode 100644 app/modules/agent/engine/orchestrator/template_registry.py delete mode 100644 app/modules/agent/prompts/project_edits_apply.txt create mode 100644 app/modules/agent/prompts/project_edits_hunks.txt create mode 100644 app/modules/agent/story_context_repository.py create mode 100644 app/modules/agent/story_session_recorder.py create mode 100644 app/modules/chat/README.md delete mode 100644 app/modules/rag/__pycache__/job_store.cpython-312.pyc delete mode 100644 app/modules/rag/__pycache__/repository.cpython-312.pyc delete mode 100644 app/modules/rag/__pycache__/service.cpython-312.pyc delete mode 100644 app/modules/rag/repository.py create mode 100644 app/modules/rag_repo/README.md create mode 100644 app/modules/rag_repo/__init__.py create mode 100644 app/modules/rag_repo/__pycache__/__init__.cpython-312.pyc create mode 100644 app/modules/rag_repo/__pycache__/module.cpython-312.pyc create mode 100644 app/modules/rag_repo/__pycache__/webhook_service.cpython-312.pyc create mode 100644 app/modules/rag_repo/module.py create mode 100644 app/modules/rag_repo/webhook_service.py create mode 100644 app/modules/rag_session/README.md rename app/modules/{rag => rag_session}/__init__.py (100%) rename app/modules/{rag => rag_session}/__pycache__/__init__.cpython-312.pyc (100%) rename app/modules/{rag => rag_session}/__pycache__/indexing_service.cpython-312.pyc (54%) create mode 100644 app/modules/rag_session/__pycache__/job_store.cpython-312.pyc rename app/modules/{rag => rag_session}/__pycache__/module.cpython-312.pyc (58%) create mode 100644 app/modules/rag_session/__pycache__/repository.cpython-312.pyc create mode 100644 app/modules/rag_session/__pycache__/service.cpython-312.pyc rename app/modules/{rag => rag_session}/__pycache__/session_store.cpython-312.pyc (76%) rename app/modules/{rag => rag_session}/embedding/__init__.py (100%) rename app/modules/{rag => rag_session}/embedding/__pycache__/__init__.cpython-312.pyc (100%) rename app/modules/{rag => rag_session}/embedding/__pycache__/gigachat_embedder.cpython-312.pyc (100%) rename app/modules/{rag => rag_session}/embedding/gigachat_embedder.py (100%) rename app/modules/{rag => rag_session}/indexing_service.py (91%) rename app/modules/{rag => rag_session}/job_store.py (86%) rename app/modules/{rag => rag_session}/module.py (88%) create mode 100644 app/modules/rag_session/repository.py rename app/modules/{rag => rag_session}/retrieval/__init__.py (100%) rename app/modules/{rag => rag_session}/retrieval/__pycache__/__init__.cpython-312.pyc (100%) rename app/modules/{rag => rag_session}/retrieval/__pycache__/chunker.cpython-312.pyc (100%) rename app/modules/{rag => rag_session}/retrieval/__pycache__/scoring.cpython-312.pyc (100%) rename app/modules/{rag => rag_session}/retrieval/chunker.py (100%) rename app/modules/{rag => rag_session}/retrieval/scoring.py (100%) rename app/modules/{rag => rag_session}/service.py (52%) rename app/modules/{rag => rag_session}/session_store.py (94%) create mode 100644 app/modules/shared/README.md create mode 100644 tests/__pycache__/conftest.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/__pycache__/test_repo_webhook_service.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/__pycache__/test_story_session_recorder.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_edit_actions_case_insensitive_path.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_eval_suite.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_eval_suite.cpython-312.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_orchestrator_service.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_orchestrator_service.cpython-312.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_plan_validator.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_plan_validator.cpython-312.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_quality_metrics.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_quality_metrics_gate_expectations.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_template_registry.cpython-312-pytest-9.0.2.pyc create mode 100644 tests/agent/orchestrator/__pycache__/test_template_registry.cpython-312.pyc create mode 100644 tests/agent/orchestrator/test_edit_actions_case_insensitive_path.py create mode 100644 tests/agent/orchestrator/test_eval_suite.py create mode 100644 tests/agent/orchestrator/test_orchestrator_service.py create mode 100644 tests/agent/orchestrator/test_plan_validator.py create mode 100644 tests/agent/orchestrator/test_quality_metrics.py create mode 100644 tests/agent/orchestrator/test_quality_metrics_gate_expectations.py create mode 100644 tests/agent/orchestrator/test_template_registry.py create mode 100644 tests/agent/test_repo_webhook_service.py create mode 100644 tests/agent/test_story_session_recorder.py create mode 100644 tests/conftest.py diff --git a/README.ARCH.md b/README.ARCH.md new file mode 100644 index 0000000..4453d5b --- /dev/null +++ b/README.ARCH.md @@ -0,0 +1,155 @@ +# Архитектура приложения + +Документ описывает модульную архитектуру backend-приложения, связи между модулями и контрактные границы. + +## 1. Диаграмма модулей и взаимосвязей + +```mermaid +flowchart LR + UI["Клиент / Frontend"] --> API["FastAPI (app/main.py)"] + + API --> CHAT["chat модуль"] + API --> RAG["rag модуль"] + API --> AGENT_INTERNAL["internal tools API"] + + CHAT -->|AgentRunner| AGENT["agent модуль"] + AGENT -->|RagRetriever| RAG + AGENT --> DB[(PostgreSQL + pgvector)] + CHAT --> DB + RAG --> DB + + AGENT --> GIGA["GigaChat API"] + RAG --> GIGA + + AGENT_INTERNAL --> AGENT + + APP["ModularApplication\n(app/modules/application.py)"] --> CHAT + APP --> AGENT + APP --> RAG +``` + +### Внутренние слои `agent` + +```mermaid +flowchart TB + ROUTER["Router\n(intent + context)"] --> ORCH["Orchestrator\n(task spec + plan + execution + quality)"] + ORCH --> GRAPHS["Action Graphs\nLangGraph"] + + ORCH --> METRICS["Quality Metrics\n(faithfulness/coverage)"] + METRICS --> DB[(agent_quality_metrics)] +``` + +## 2. Описание модулей + +### Модуль `app/modules/chat` + +- Цель: принять пользовательский запрос, управлять задачей обработки, отдать результат и прогресс. +- Кратко о реализации: + - `ChatModule` публикует HTTP API (`/api/chat/*`, `/api/tasks/*`, `/api/events`). + - `ChatOrchestrator` запускает обработку асинхронной задачи, публикует SSE-события, обрабатывает retry и ошибки. + - `TaskStore`, `DialogSessionStore`, `IdempotencyStore` держат состояние задач/диалогов. +- С кем взаимодействует: + - с `agent` через контракт `AgentRunner`. + - с `rag` для проверки `rag_session_id`. + - с `shared/event_bus` для стриминга прогресса. + - с БД через `ChatRepository` (`dialog_sessions`, `chat_messages`). +- Контракты: + - потребляет `AgentRunner.run(...)` из `app/modules/contracts.py`. + +### Модуль `app/modules/agent` + +- Цель: интеллектуальная обработка запроса, маршрутизация, оркестрация сценария, генерация ответа/changeset. +- Кратко о реализации: + - `GraphAgentRuntime` выполняет pipeline: route -> task spec -> orchestrator -> post-processing. + - Router (`engine/router/*`) выбирает `domain/process` и хранит routing-context. + - Orchestrator (`engine/orchestrator/*`) строит и валидирует plan, исполняет шаги, запускает графовые/функциональные actions. + - Graphs (`engine/graphs/*`) выполняют целевые действия (QA, edits, docs). + - Рассчитывает quality-метрики (`faithfulness`, `coverage`) и сохраняет их в БД. + - Поддерживает внутренний инструмент получения страниц Confluence (`/internal/tools/confluence/fetch`). +- С кем взаимодействует: + - с `rag` через контракт `RagRetriever`. + - с `shared/checkpointer` (LangGraph checkpoints в PostgreSQL). + - с GigaChat (LLM-запросы, промпты). + - с БД через `AgentRepository` (`router_context`, `agent_quality_metrics`). +- Контракты: + - реализует `AgentRunner` (используется `chat`). + - потребляет `RagRetriever` (реализуется `rag`). + +### Модуль `app/modules/rag` + +- Цель: индексация проектных файлов и retrieval релевантного контекста. +- Кратко о реализации: + - API для snapshot/changes индексации и retrieval. + - Индексация хранит чанки, эмбеддинги и состояние job. + - Retrieval ищет релевантные куски в `rag_chunks` (pgvector). +- С кем взаимодействует: + - с `agent` (выдача контекста через `RagRetriever`). + - с БД (`rag_sessions`, `rag_chunks`, `rag_index_jobs`). + - с GigaChat Embeddings. +- Контракты: + - реализует `RagRetriever` и `RagIndexer` из `app/modules/contracts.py`. + +### Модуль `app/modules/shared` + +- Цель: общие инфраструктурные компоненты, переиспользуемые всеми модулями. +- Кратко о реализации: + - `db.py`: engine/session factory. + - `event_bus.py`: pub/sub для SSE. + - `retry_executor.py`: общий retry. + - `checkpointer.py`: PostgresSaver для LangGraph. + - `bootstrap.py`: инициализация схем БД на старте. +- С кем взаимодействует: + - со всеми бизнес-модулями (`chat`, `agent`, `rag`). +- Контракты: + - внутренние инфраструктурные API без отдельного публичного контракта уровня `contracts.py`. + +### Модуль `app/modules/contracts.py` + +- Цель: зафиксировать межмодульные интерфейсы и отделить реализацию от потребителей. +- Кратко о реализации: + - `AgentRunner`, `RagRetriever`, `RagIndexer` определены как `Protocol`. +- С кем взаимодействует: + - используется `chat` (как потребитель `AgentRunner`), `agent` (как потребитель `RagRetriever`), `rag` (как реализация `RagRetriever`/`RagIndexer`). +- Контракты: + - это и есть контрактный слой. + +### Модуль композиции `app/modules/application.py` + +- Цель: централизованный wiring зависимостей. +- Кратко о реализации: + - `ModularApplication` создаёт `EventBus`, `RetryExecutor`, репозитории и модули. + - На `startup()` выполняет bootstrap БД. +- С кем взаимодействует: + - со всеми модулями, но не содержит бизнес-логики. +- Контракты: + - использует `contracts.py` для сборки зависимостей без жёсткого сцепления по реализациям. + +## 3. Ключевые контрактные границы + +- `chat -> agent`: только через `AgentRunner`. +- `agent -> rag`: только через `RagRetriever`. +- `rag`: не зависит от `agent` internals. +- `application.py`: единственная точка связывания реализаций. + +## 4. Схема данных (кратко) + +- `chat`: `dialog_sessions`, `chat_messages`. +- `rag`: `rag_sessions`, `rag_chunks`, `rag_index_jobs`. +- `agent`: + - `router_context` — контекст маршрутизации по диалогу. + - `agent_quality_metrics` — пер-сценарные quality-метрики для отчетности: + - `faithfulness_score`, `coverage_score`, `quality_status`, `metrics_json`, `created_at`. + - `story_records` — карточка Story (статус, владелец, метаданные). + - `story_artifacts` — артефакты по Story (analytics/doc_increment/test_model и версии). + - `story_links` — внешние связи Story (тикеты, документы, URL). + +## 5. Поток обработки запроса + +1. Пользователь отправляет сообщение в `chat`. +2. `chat` создаёт task и вызывает `AgentRunner.run(...)`. +3. `agent/router` выбирает маршрут (domain/process). +4. `agent/orchestrator` строит `TaskSpec` и `ExecutionPlan`. +5. Выполняются шаги плана (function/actions/graph steps). +6. Формируется `answer` или `changeset`. +7. Считаются `faithfulness/coverage`, сохраняются в `agent_quality_metrics`. +8. `chat` возвращает результат и стримит прогресс через SSE. diff --git a/README.DB.STORY_PLAN.md b/README.DB.STORY_PLAN.md new file mode 100644 index 0000000..2af8111 --- /dev/null +++ b/README.DB.STORY_PLAN.md @@ -0,0 +1,271 @@ +# План доработки БД для хранения контекста Story и метаданных RAG + +## Цель +Зафиксировать проект миграции, который: +- добавляет в таблицу чанков признаки артефакта (тип, источник, контекст), +- вводит отдельный контур хранения инкремента по `story_id`, +- не зависит от выбранного режима RAG (общий/сессионный/гибридный). + +## Границы +- Документ описывает план и целевую схему. +- Реализация SQL-миграций и backfill выполняется отдельным шагом после согласования. + +## 1) Метаданные чанков (RAG-слой) + +### 1.1. Что добавить +Для таблицы `rag_chunks` (или эквивалента таблицы чанков) добавить поля: +- `artifact_type` (`REQ|ARCH|API|DB|UI|CODE|OTHER`) +- `path` (нормализованный относительный путь файла) +- `section` (заголовок/логический раздел документа) +- `doc_id` (стабильный идентификатор документа) +- `doc_version` (версия документа/ревизия) +- `owner` (ответственная команда/человек) +- `system_component` (система/подсистема/компонент) +- `last_modified` (время последнего изменения источника) +- `staleness_score` (0..1, в первую очередь для `CODE`) + +### 1.2. Ограничения и индексы +- `CHECK` для `artifact_type` и диапазона `staleness_score`. +- Индексы: + - `(artifact_type)` + - `(doc_id, doc_version)` + - `(system_component)` + - `(path)` + - GIN/BTREE по потребности для фильтрации в retrieval. + +## 2) Контур Story (отдельно от чанков) + +### 2.1. Таблица `story_records` +Карточка Story: +- `story_id` (PK, строковый уникальный идентификатор) +- `project_id` (идентификатор проекта/репозитория) +- `title` +- `status` (`draft|in_progress|review|done|archived`) +- `baseline_commit_sha` (базовый снимок) +- `snapshot_id` (опционально для session-RAG) +- `created_at`, `updated_at` +- `created_by`, `updated_by` + +Индексы: +- `(project_id)` +- `(status)` +- `(updated_at)` + +### 2.2. Таблица `story_artifacts` +Связь Story с артефактами изменений: +- `id` (PK) +- `story_id` (FK -> `story_records.story_id`) +- `artifact_role` (`requirement|analysis|doc_change|test_model|note|decision|risk`) +- `doc_id` +- `doc_version` +- `path` +- `section` +- `chunk_id` (nullable; ссылка на chunk если стабильно поддерживается) +- `change_type` (`added|updated|removed|linked`) +- `summary` (краткое описание изменения) +- `source_ref` (ссылка/внешний id) +- `created_at` +- `created_by` + +Уникальность (черновик): +- `UNIQUE(story_id, artifact_role, COALESCE(doc_id,''), COALESCE(path,''), COALESCE(section,''), COALESCE(change_type,''))` + +Индексы: +- `(story_id, artifact_role)` +- `(story_id, change_type)` +- `(doc_id, doc_version)` +- `(path)` + +### 2.3. Таблица `story_links` +Связи Story с внешними сущностями и Story-to-Story: +- `id` (PK) +- `story_id` (FK) +- `link_type` (`story|adr|ticket|pr|commit|doc|external`) +- `target_ref` (идентификатор/ссылка) +- `description` +- `created_at` + +Индексы: +- `(story_id, link_type)` +- `(target_ref)` + +## 3) Почему `story_id` не в чанках +- Один чанк может относиться к нескольким Story. +- Чанки нестабильны при переиндексации. +- Разделение слоев упрощает поддержку и не привязывает модель к типу RAG. + +Итог: связь Story и чанков/документов хранить в `story_artifacts`, а не в `rag_chunks`. + +## 4) Целевая модель RAG: Hybrid-Lite +Выбранный вектор на текущем этапе: `Session-first + Shared Cache + Story Ledger`. + +### 4.1. Принципы +- Рабочий retrieval выполняется из сессионного индекса (видит незакоммиченные изменения). +- Общий кэш чанков/эмбеддингов используется только для ускорения индексации. +- Источник правды по инкременту Story находится в Story-таблицах, а не в RAG-индексе. + +### 4.2. Что хранить дополнительно +- `rag_blob_cache`: кэш файловых blob по `repo_id + blob_sha`. +- `rag_chunk_cache`: кэш чанков/эмбеддингов, привязанный к `blob_sha`. +- `rag_session_chunk_map`: привязка сессии к используемым chunk (чтобы retrieval был изолированным). +- `session_artifacts`: временные артефакты сессии до появления `story_id` (late binding). + +### 4.3. Алгоритм индексации (delta-only) +1. На старте сессии сканировать рабочее дерево и считать `blob_sha` для файлов индексации. +2. Для каждого файла: + - `cache hit`: взять chunk/embedding из кэша и связать с текущей сессией. + - `cache miss`: выполнить chunk+embed и записать результат в кэш. +3. Для retrieval использовать `rag_session_chunk_map` как первичный источник. +4. При необходимости делать fallback к cache-scoped данным по `repo_id` (опционально, под флагом). + +### 4.4. Почему это подходит +- Нет необходимости в сложном ACL общего RAG на уровне приложения. +- Нет обязательной зависимости от ручного commit, индекс отражает локальные изменения. +- Снижается время загрузки сессии за счет переиспользования эмбеддингов. +- История Story не теряется и не зависит от режима RAG. + +### 4.5. Late binding `story_id` (целевой процесс) +1. Аналитик запускает работу только со ссылкой на документ (без `story_id`). +2. Агент обрабатывает задачу в `session-RAG` и сохраняет все изменения в `session_artifacts`. +3. Аналитик вручную делает commit и указывает `story_id`. +4. Вебхук на commit: + - извлекает `story_id` из commit metadata/message, + - обновляет репозиторный RAG, + - выполняет `bind session -> story`: переносит/привязывает `session_artifacts` к `story_artifacts`, + - фиксирует связь `story_id <-> commit_sha <-> changed_files`. +5. Исходный документ аналитики тоже попадает в контекст Story ретроспективно, даже если изначально был без `story_id`. + +## 5) Черновик DDL (PostgreSQL) +```sql +-- 0. Enum-like checks можно заменить на справочники при необходимости + +-- A) Session artifacts (временный слой до появления story_id) +CREATE TABLE IF NOT EXISTS session_artifacts ( + id BIGSERIAL PRIMARY KEY, + session_id TEXT NOT NULL, + project_id TEXT NOT NULL, + artifact_role TEXT NOT NULL, + source_ref TEXT, + doc_id TEXT, + doc_version TEXT, + path TEXT, + section TEXT, + chunk_id TEXT, + change_type TEXT, + summary TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_by TEXT, + CONSTRAINT chk_session_artifact_role CHECK (artifact_role IN ( + 'analysis','doc_change','note','decision','risk','test_model' + )), + CONSTRAINT chk_session_change_type CHECK (change_type IS NULL OR change_type IN ( + 'added','updated','removed','linked' + )) +); + +CREATE INDEX IF NOT EXISTS idx_session_artifacts_session ON session_artifacts(session_id); +CREATE INDEX IF NOT EXISTS idx_session_artifacts_project ON session_artifacts(project_id); +CREATE INDEX IF NOT EXISTS idx_session_artifacts_role ON session_artifacts(artifact_role); + +-- 1) Story records +CREATE TABLE IF NOT EXISTS story_records ( + story_id TEXT PRIMARY KEY, + project_id TEXT NOT NULL, + title TEXT, + status TEXT NOT NULL DEFAULT 'draft', + baseline_commit_sha TEXT, + snapshot_id TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_by TEXT, + updated_by TEXT, + CONSTRAINT chk_story_status CHECK (status IN ( + 'draft','in_progress','review','done','archived' + )) +); + +CREATE INDEX IF NOT EXISTS idx_story_records_project ON story_records(project_id); +CREATE INDEX IF NOT EXISTS idx_story_records_status ON story_records(status); +CREATE INDEX IF NOT EXISTS idx_story_records_updated_at ON story_records(updated_at DESC); + +-- 2) Story artifacts +CREATE TABLE IF NOT EXISTS story_artifacts ( + id BIGSERIAL PRIMARY KEY, + story_id TEXT NOT NULL REFERENCES story_records(story_id) ON DELETE CASCADE, + artifact_role TEXT NOT NULL, + doc_id TEXT, + doc_version TEXT, + path TEXT, + section TEXT, + chunk_id TEXT, + change_type TEXT, + summary TEXT, + source_ref TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_by TEXT, + CONSTRAINT chk_story_artifact_role CHECK (artifact_role IN ( + 'requirement','analysis','doc_change','test_model','note','decision','risk' + )), + CONSTRAINT chk_story_change_type CHECK (change_type IS NULL OR change_type IN ( + 'added','updated','removed','linked' + )) +); + +CREATE INDEX IF NOT EXISTS idx_story_artifacts_story_role ON story_artifacts(story_id, artifact_role); +CREATE INDEX IF NOT EXISTS idx_story_artifacts_story_change ON story_artifacts(story_id, change_type); +CREATE INDEX IF NOT EXISTS idx_story_artifacts_doc ON story_artifacts(doc_id, doc_version); +CREATE INDEX IF NOT EXISTS idx_story_artifacts_path ON story_artifacts(path); + +-- Вариант уникальности можно уточнить после согласования процессов + +-- 3) Story links +CREATE TABLE IF NOT EXISTS story_links ( + id BIGSERIAL PRIMARY KEY, + story_id TEXT NOT NULL REFERENCES story_records(story_id) ON DELETE CASCADE, + link_type TEXT NOT NULL, + target_ref TEXT NOT NULL, + description TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT chk_story_link_type CHECK (link_type IN ( + 'story','adr','ticket','pr','commit','doc','external' + )) +); + +CREATE INDEX IF NOT EXISTS idx_story_links_story_type ON story_links(story_id, link_type); +CREATE INDEX IF NOT EXISTS idx_story_links_target_ref ON story_links(target_ref); +``` + +## 6) План внедрения (после согласования) +1. Подтвердить перечень полей и enum-значений. +2. Подготовить SQL-миграцию `Vxxx__story_context.sql`. +3. Обновить bootstrap/инициализацию схемы. +4. Обновить репозитории для `story_records/story_artifacts/story_links`. +5. Добавить таблицу и репозиторий `session_artifacts` (session-scoped артефакты без `story_id`). +6. Добавить запись session-артефактов в оркестраторе во время работы аналитика. +7. Добавить webhook-обработчик `bind session -> story` при появлении commit со `story_id`. +8. Добавить API/сервисный метод `get_story_context(story_id)` для повторного входа в Story. +9. Добавить тесты: + - unit на репозитории, + - интеграционные на happy-path записи/чтения, + - регресс на отсутствие зависимости от типа RAG. +10. Добавить миграцию для `rag_blob_cache/rag_chunk_cache/rag_session_chunk_map`. +11. Внедрить `delta-only` индексацию для session-RAG с переиспользованием кэша. + +## 7) Открытые вопросы +- Нужен ли отдельный справочник для `artifact_type`, `artifact_role`, `link_type`. +- Что считать `doc_version`: semver, дата, commit, hash файла. +- Нужна ли soft-delete политика для Story. +- Требуется ли аудит (кто/когда менял `summary` и связи). +- Какой уровень обязательности `chunk_id` (опционален по умолчанию). +- Нужна ли TTL/очистка для `rag_blob_cache/rag_chunk_cache`. +- Делать ли fallback к репозиторному кэшу по умолчанию или только при explicit-флаге. +- Как определять соответствие `session_id` и commit в webhook (1:1, последний активный, explicit token). +- Как долго хранить `session_artifacts` до bind/cleanup. + +## 8) Критерии готовности +- По `story_id` можно восстановить инкремент без исходной сессии. +- История изменений не теряется при переиндексации RAG. +- Аналитик и тестировщик используют один `story_id` как общий ключ контекста. +- Схема работает при любом выбранном режиме RAG. +- Session-RAG поднимается быстрее за счет cache hit по неизмененным файлам. +- Артефакты аналитика, созданные до появления `story_id`, корректно попадают в Story после commit/webhook bind. diff --git a/README.md b/README.md index 304b0f4..62b827b 100644 --- a/README.md +++ b/README.md @@ -89,14 +89,14 @@ Notes: - Route selection: - `default/general` -> answer flow - `project/qa` -> answer flow - - `project/edits` -> conservative changeset flow for non-code file updates + - `project/edits` -> conservative changeset flow for non-code file updates (hybrid output: `proposed_content` + `hunks`) - `docs/generation` -> answer and/or changeset flow - LLM provider: GigaChat (`chat/completions`) - Prompts for graph LLM nodes: `app/modules/agent/prompts/*.txt` - `general_answer.txt` - `project_answer.txt` - `project_edits_plan.txt` - - `project_edits_apply.txt` + - `project_edits_hunks.txt` - `project_edits_self_check.txt` - `docs_generation.txt` - `docs_execution_summary.txt` diff --git a/app/__pycache__/main.cpython-312.pyc b/app/__pycache__/main.cpython-312.pyc index 009e082b3c382b4407087739bb1bea5e27c02df6..2ec3f68da712494ca7c252a216fdbefdf1dbae71 100644 GIT binary patch delta 307 zcmX@ce_DX=G%qg~0}#mXUzn-OGLi2taLYfgU8TJCdQ zT(qPW#%ahoeFwkVS77sb`Di<@5* zH@_@yd4mkYVJX;QpBbNPV9u8X=E;v04f3i D5jawm delta 273 zcmX>taEzbtG%qg~0}x0~os;>9c_QCk#)OS;ZZb-hNPJBZS2WWM@fFHe@!Q zY``osxq_L8QEqc9vmqm+^yKX?G&Kks}1~iNVh>JNU2eXxHPp_U>eO=t(qPW3jaic3N#y42lud|30~kXThSoxMXDe|97ITf2w@=ci^C>2KczG$)vibz$OSpM7^L FastAPI: ) app.include_router(modules.chat.public_router()) - app.include_router(modules.rag.public_router()) - app.include_router(modules.rag.internal_router()) + app.include_router(modules.rag_session.public_router()) + app.include_router(modules.rag_session.internal_router()) + app.include_router(modules.rag_repo.internal_router()) app.include_router(modules.agent.internal_router()) register_error_handlers(app) diff --git a/app/modules/__pycache__/application.cpython-312.pyc b/app/modules/__pycache__/application.cpython-312.pyc index 1f2b3b09802d86ceae1f6ffd16fde8faf015f870..ad16c08597f8a562d5e514444c82301f53bceaac 100644 GIT binary patch literal 2821 zcmb7G&u<$=6rQ!cj=f$dY1%lkoy1AiKWTMecf4!j9#|rS^TASuLZ5^{`v>A>S>$rVF zJHc_rnzeIUP9ib#4ymgTNjcrfq~$a5Z`Pwh_^X9R+uWM`f=Ww?+)ouyI5> z+?cu9YPpU}jkaDlT;rzUV7#d-A42$-Iu79MHPfX#SMHj%4n$n@v-QUyb#kx(>s?|J zO{x-Iu^pb~E(+-b zvMsJoY{3k>$;dK*u*|gbDQIGsS01=3|=S0l|auE zdqEw+UCoQ#9a5IJ%5j!9v2FC59f!#jyM`$=+Vk@c_Pk#6n_drPY?2zcB5UxK^>5q8%QV-6^fMzeWeD1PP1uJiq}t%!@yLucTC5DMsp}$a5+!07Fm)<=}=6M>ryOl zn!^FxfKAnaP@+d62~*6LVu7;KXc`VDsi;0)5s!~z>9R~D6pGElQX*Z7Rm|eBWHL{L zrf_3|cA9R(Hd#t!z!DhLGl?Y}(|X@QB3OP=Fxu^Po0F`w4pSU(kQl+X9) zS9`1F?&>9P^-`}`>K3bBv3e-S^9r1qJxr0@O0TfgEmXWhrCX?ag=%kcrMtN4Ep86& z4kr>hR96laGF$597e6n2TIlB2y!=`(SJ=Jt_)c#ow_AB!0Z}(!_VVR!e$&fu_U4M+ zxixQY?J$#^1qYv`4ekKRKOUba`3r79_~EO*NS*g1 zzCh1|04d2qjX&a&Bn`;MfV}xLxicW=2BbP5=Lh8V0eNFU-WrgzF#er9DJg#v7`VrO E0a){uK>z>% delta 889 zcmZuu-Ahwp7=PcL&z*g_P3NX2YYg03vMf!5LafB@2D_->*pTHJrtNIO6XG0;o9s%|2viw4GEc<81-ATkKL>V40WBziA?{GR9eJ)iHL(6-f1-IeRJT{jLUc;yP zZ0;sx>7^BYtz9RA*2N=Zq>8$F#-K}3~ph-0q{GX!@3j!7dH%V9l>xF z&4pU(!#Ed$9WDXOoCm%*LU4r(!ZWTN#`xnM|5Hby#;Xx-Q)tzwqACADwF**sc*n;f zBt)~S&~)c!^2PELStu6sg!rh8v{7ifNp5D!%$w$Hsb~_Gt_BD@SX0d3q!}i09cYQf z++LPI^7AEgwp=0=BESQot0%}rGK+IGPB_K{yCo69CmN!Mdd!z_TO1kv8ti#eezEj) zX(#?V`6~G-bE%Qh>luAFn7_y0m-jlN>*cM*jm53%M)kekNKe<()4LryjVXKWy>L$I zRUU`e!i`9>9!bItDXy^nS<8J5)}+kIO~&cNh+YAwRh~GhMLHRT8Q9bIQf6WbQ4)m_ zIdRN(M5$vL?a5uMLyhiKy*stdHwMP*1LJS>#>8ZOV)C(%5yL_jx0KACR)gSmUVyvKBU!dUvokk8pE64NLLqOarmyG9 ze+(kBXxHH=y>aT$AJ`ug`d82@MPwLMV%Q0nDJoD=`f~P<+HzuMG%qg~0}%9IT$stjx{=SCjf)q=2ZGNVCI_?mvZb=8vZt|3Uc}}DV}4*e z$fT;Vc{4jRqsB3y!WM=PTnvGXp^P6nSy?q6NJxKR1=1hb7z9KnxO7&1UL@9}C(kod~L L%cxN#26Phu?l?U% delta 201 zcmew?a$kh+G%qg~0}!Nbn3ws3Wh0+88y7c-4+NiAOb%xAWlCe3yo}8UMEzhp$RsVd zc{e*VqsA_vf)<7kTnvGXp^P6nSy|;CNJxKR1=1hb7z9KnxO7&1Uf{PQYD5}r-aN5mx&0S!l*jGfOJmo;Jn8xH$mbn123aokr>b&0EBEd AZ~y=R diff --git a/app/modules/agent/README.md b/app/modules/agent/README.md new file mode 100644 index 0000000..ad8574c --- /dev/null +++ b/app/modules/agent/README.md @@ -0,0 +1,60 @@ +# Модуль agent + +## 1. Функции модуля +- Оркестрация выполнения пользовательского запроса поверх роутера интентов и графов. +- Формирование `TaskSpec`, запуск оркестратора шагов и сборка финального результата. +- Реализация необходимых для агента tools и их интеграция с остальной логикой выполнения. +- Сохранение quality-метрик и session-артефактов для последующей привязки к Story. + +## 2. Диаграмма классов и взаимосвязей +```mermaid +classDiagram + class AgentModule + class GraphAgentRuntime + class OrchestratorService + class TaskSpecBuilder + class StorySessionRecorder + class StoryContextRepository + class ConfluenceService + class AgentRepository + + AgentModule --> GraphAgentRuntime + AgentModule --> ConfluenceService + AgentModule --> StorySessionRecorder + StorySessionRecorder --> StoryContextRepository + GraphAgentRuntime --> OrchestratorService + GraphAgentRuntime --> TaskSpecBuilder + GraphAgentRuntime --> AgentRepository + GraphAgentRuntime --> ConfluenceService +``` + +## 3. Описание классов +- `AgentModule`: собирает runtime и публикует внутренние tools-роуты. + Методы: `__init__` — связывает зависимости модуля; `internal_router` — регистрирует internal API tools. +- `GraphAgentRuntime`: основной исполнитель агентного запроса. + Методы: `run` — выполняет цикл route -> retrieval -> orchestration -> ответ/changeset. +- `OrchestratorService`: управляет планом шагов и выполнением quality gates. + Методы: `run` — строит, валидирует и исполняет execution plan. +- `TaskSpecBuilder`: формирует спецификацию задачи для оркестратора. + Методы: `build` — собирает `TaskSpec` из route, контекстов и ограничений. +- `StorySessionRecorder`: пишет session-scoped артефакты для последующего bind к Story. + Методы: `record_run` — сохраняет входные источники и выходные артефакты сессии. +- `StoryContextRepository`: репозиторий Story-контекста и его связей. + Методы: `record_story_commit` — фиксирует commit-контекст Story; `upsert_story` — создает/обновляет карточку Story; `add_session_artifact` — добавляет session-артефакт; `bind_session_to_story` — переносит артефакты сессии в Story; `add_artifact` — добавляет версионный Story-артефакт; `get_story_context` — возвращает агрегированный контекст Story. +- `ConfluenceService`: tool для загрузки страницы по URL. + Методы: `fetch_page` — валидирует URL и возвращает нормализованный payload страницы. +- `AgentRepository`: хранение router-контекста и quality-метрик. + Методы: `ensure_tables` — создает таблицы модуля; `get_router_context` — читает контекст маршрутизации; `update_router_context` — обновляет историю диалога и last-route; `save_quality_metrics` — сохраняет метрики качества; `get_quality_metrics` — читает историю метрик. + +## 4. Сиквенс-диаграммы API + +### POST /internal/tools/confluence/fetch +Назначение: загружает страницу Confluence по URL и возвращает ее контент для дальнейшего использования в сценариях агента. +```mermaid +sequenceDiagram + participant Router as AgentModule.APIRouter + participant Confluence as ConfluenceService + + Router->>Confluence: fetch_page(url) + Confluence-->>Router: page(content_markdown, metadata) +``` diff --git a/app/modules/agent/__pycache__/module.cpython-312.pyc b/app/modules/agent/__pycache__/module.cpython-312.pyc index ce87d5a65f6c0d83ac3e2283c50ce5a31eec8e38..8912070a687c30b1f59df4d566202f9d1871f13e 100644 GIT binary patch delta 1457 zcmZ`(O>7%Q6rTP6ads0oi4)st;?&T3gVTf(T0$a3Rgs9&1Syp`_+l*YI2dfNnH{5~ za+DkbG6IC6IUvLVIJ-UdjC$dK#Klr_h*m}FfdeOGksJ^w=B;B_LSm%-=G*t)ynXM@ zd;3SM9k+kAEEDlH{^ZB$jHwJPyyB9{;l=XNDooytuKNJQ(%9X>?v z2$tFs97HI0Ih01Z^io^AkWKH7I%7wF?q*~6sYiD2cr}W5=oYSfei;Og5R8SI``Gu( zUTq5(J@U99%nSRY+yw!+#2;Vd@F_YqMh@?hL|;?nWU%xLcDBgLHwdGg;G&%93RrSQ zEV~j`T-jAhx~m?_In|Z1RuWx}bA4BF^dNG38`tQD=Qb)hun04xndM0zg3K3tVdu#p z?m#7&Hb`x=C=UWa2tt5roUO8T>Gpn8YNGr=Pa}QH4~P2f`QE;udomsFOPg|A=Kjh3s}F$3Ahl@gf-6?`&u7U({cmC5UAu(j&kXPxFNUV(=|GGOFz z@S%fKgGG|;j*^~(g#9l#9H8?dl*}F{=3DZY!%x$%9;a`$E?K0ZD@3Ci_lX5^k`Lwv~Yv2xPZhq>dC?^6vB+JVtU8jzLGhpGxt1 z+|^&+UjWfv4u2n@r_n^`$}63fcRH`%JBe7%Q6rS0i*|pc}*iGZu-o#zvLbA}{HUcFFR8*-HA<-V-f?TKx-f=K;?C^Gt zNaZLw1j&dZhz2PFaj1mE#g!=H$^miZk}8$3Ry}awz|kU=K!^kLHcl%>+V8#j=FRLk zZ{8ae{>Y_&vF#+0E}ch%jlH9D(%C1RRH;v@ich)f zD_rwcUu!U5-_@$RuX5H#B2x0S~*-zE&UW zj(>qtaDJ2%->H{UVIpQFyq6S7&8exOeZutA0X_D&v~c1v+tW^Ny`_hE$9N3Jc8i6R zW0&;w9=k&yMH}Ii_)1&O8a?HXGSH8sdZcEF-?b;GE|`9*STj#TEJTkn8Br4G0;o%E(X{e@{EnJ*6OVK z9N~aN^#SB9C%;q2l=i{+Dj?b8(G#a*`dda=@MhaB3gtxDMGw1P^#({8l7 zyj|xvCr_3ZACsOEu?^sj=7*6MuC4_)S*5|l`n&Ri{QWKuLMX{|Pm6DiPiR?G%&F2T zDb`cqyfmajZA9Vj5NB;GNfqYGl+d~E?=Ez;mP zY!0e7j&M41jhvplU3f?gJ$HNzZocL@;zIIAan*8YQZ%hOYKdEx!%#&uCunNMthbu7 zhrky1tfy&O{A-nFA&$A59--)!Vv#cMmUE!!Yy&d8!Hk?I}F zj^t61hhV`J212(FqOmEgAOS+dLD&1VX@N!k(1)UZ5jhG(4YYuP?aRK&mVp9!Y0sG< zDVn04ZPC31&)m5`=bn4!+;hHr?|(QPRst#U`>zuJ93bS6_)xDKBBY5fd4&N4(t1l^R%t-?Ti^6X$J|nzQU?1ztr6#r1 z1f}N3x^kI$UKg?a311?NVv8k{sdOxzNF~KFcsHM;sdOrynqKf+yK(jS;P~~sZ!ZpB z95aN6gqG60s<(C`?Z%W)XWw6TD)brKu?)p2oZ(ok&j$iZ!ls zjf#^o#dw3plJQB!5uc1DZwh>LA~7w9ibg8&yPo zNW4gl6@vCD#(;cAHgz_Gchg9`{!J6+%)}Siv|!E(^V_sx&Q5&0HXWF2BJR+p6LT&S z=-6~)&OVNwI52q7{nBDjZgoi60@6Or)dHh0sm`hANdo=piDfPf4jQ zl3{-^2UV3+7Y?#S<549v!g2x$$y8-oK}98j+`8|WifdTZOM213>tKDdU+OMkAK*}* zBt36vfx?RGS!5+vH1S60r{hh$`Ab&qiR-m4O`(z3C5Z)o7UaiUCHB|kA9V0;!*_Vv z%TGu7*;IZy@%f$WaoItD)5Ot@+q`OcsC{|&m!=CO;u(wR6f-!A+X-HvifJ}BKb?y4 zXO#B2q&PP_o1$s(3@Hj9;dX2~!AHvtuvaV|7iOUv7fwt}B^C3GAd27=#EXij;)SB% zOrsO2xg;N9X&(rwm=Z}|_yoGyv^F=el189PUDOLk)kTCUu4Zio+MtWL4g_r1*RmA7AI(obHD#l+ z?`Ik7y0fJe?q1pZ%Yyg(iv9dEZ}XZrQt(Fd{ZGaV{qM=%;~De1v-w$TxD*PPc6F74 zL(eS+u4$7P98KUyERHpEuwV|ZnTHDIp^~qy)ZDQ=P-q@5alzcdT>pxvyISIH%S{%% zy`^B+aPm{7wrnYAuC!l>%XGf2|PQSADki z=ADJMgU_92mvxhvE!Le&5f_oV6myV4XVGXWK889>jh0-c4i_lT+ycX*ZdjaU6ruIg zsHCebOIU<@URQ0~XzA6oohK3fm}0mI@i(SeZY0t-=Hj=6G?!RxfYzeLjUjkjXlr07+x-3tnuuqCfUVfMFUCF}lkKf*_RfO6 zQ?_?yn3Bz&HT~_;5}HY0!P}Q_mc4sd?0d1?R6N=_|yyyD8y ziKt)aAA!E^>+noPub(iYjv)!MsB8Qk`Kng6mN&_wK8+TjCP~-m+w=Mw?mmXRrd1W~ zX`FdOrPm@`GeSe_;*FKI<-VrM`=UWIG_oW>jOBnqJc+0hGKH|KE>-2QOLnT0?2Fc)51bB5xa#T1% zcyxd!J^)QLNeyQwHC5Vbn!QP&{V=q5*2wLww0YxdlA@}yO z?BO(Apb}YI|QDGIBo`PoR~;J&P0Wo)NO(P(|<$yvAv8I!l zh^OfRV51mew<08YMGt~1Mo|Sbu!}&TsKsvhA)ug9tIu+miNdl zL&cVhPr82B_uIbj56JsYz}wTL?E5HV-Dq-Wd)2Oj?CC0c`tv%uX-`I9+JE48$9{Y4 z>EP=Ab6M|F=zb`7J%3`=J-qVqmEy;-mFuyLal_@!URt_(|LSr;_VpKiZ{=^SaU%t8 zA{-EP<-`}Y+6 zd-GKG>|663EO-vep2J#0S76C`-?^+`b#;}R-+4h;$APTA)Y|!Q^ug$|`0Edgt9U7oSA95`!(?dt<)ZFq&?y?tx~fU0r0tcKP%2B^BA3c-klzad=(X4ODogn(p%zZw3P z*Nj;WDU~rHY*fQ8I70*~YN`Y-I#Qz_Ys0Wr4dTEP_0zaj)wyaT?+CmoGkc0;b=SMp zaJns(ZKqV={(X(Z3||*S0YuqrW@z8Zi~eiP4DAhPSpRLSpkb;S^ecLUJ!?6;h=Efl zITp=QW3F}K7$j|~k+i9Sq^TN7n;J@54aoIcexsyL$qAA=7fnCOzb=Sxf5kWQhky+b z!Z3w<%eqt>u3z&-tDr!e69R`Ua~NTS&Jgb2`SUIl~nB(^<{<@%&>M==7z z0=%WLlQD6Up2kWQ9i744RV1TG&La6akcfr;0`uxo>N(8)63KZa2*KzDBo~o<1SI0$ z24D1JEXBPC#e+f(_*KEyHVRWL>Opjr#y~u6CW;AU1>icN*tA1zZ7w%}jlxXQE6DB= zl5s3yVGBP)ui-nYQAAl^1!a{t4V>Ze{X*}X!C7@*59Ue4e}~ZC5TN~eXzZDjV@2=jr~5Ld(heNe){!xmTpU0wF0^#^{@G=d%=HyH1c8JH`u3B~tZ^FwZUjQPGb%c}Ew>I8 zTcH}nT?-s31dhmoceBUWn**ipo}6_f2%z!d)Pt#fi`>4y*na2%P6oe!LO{c2#~Sup&t|3Uxq6*)Lu430e6w?$lz!!R@)gY|sZwTcH}t*9j9 zL+b;+=sI#OW~extzTL5`@eP;!yv{BV)Px(u^XZ8W&XQU ztD(Ick%9ch-&}rl`N@Pl_-=9V24%@k>G(N#ko>+c@&jYf0wlj%a9;L5xagywsef4f6Ev)Shr3wMgzCy z+6~!J=7_=ia)B`zG##kD;8qgu&L-yIUJdTiDA*8DbX0&aGe4V1-lS+)>KVl>+&$7; z@TRFu9TxXU+Qk+gocw4$s@=gIqawWGxtDkd$fllQm_LxtKN8CyNC>XjT)_ie;bS)Z~~#31QIZ%kdTiC^3f7(2njJMm?V&9UDq=ZJN_UuCJ<`0 zm9{@$OK&SJ*;LwWb<#$<{gL*1r`6v5Fqk{&i0Z1_+aLXNa+>NQ=VW?v{brM4Vx}V$sZmag2%knCMtA$^=9~;;0sH z#M3DN<`IKN2pYlK3^ehMW<*UJX%*Zx22}cwHqmOJAxl$;7}`v0fKE%3w3gO=q@pdf z{vGXz?zYxh#+P@FG12${GaloD@fdqMK;*1a3otB{cCld%rE8Vxiqn2o;`xbaK60LL>cns3ovv+C7f(n6} zNh(92+o%p5LL4!vBG51*;O{n?QV*j^!by7kpTXXZl)}|D;O_LgDR1YQJ~!3dO?mpg zl>18Wpm&gB1^w~`Lq<8_aF$Qs+R|+0KE+V@I6S#ev=nodd(v zdG~O0NeN*;7x&Szh(8$h1?fC~&ro0ACXC~3EWmKw_BbQJ;U8ms*Ml5ZMqh}FMJccQ zidQ%S{8Y;2?(Q7w^HL30uQilj!o)c3k2AE-AE&&%7u(7=G(i8>5KX;I+Y~qH*8N34b^kzwL`KaW)v>Hh6n*-e-Dy2HgX_oqbz-TX8>k zqv)_fZ#I5abkHA;jrlk@cQ9q!=V$#tf|(02Q9m1u<*U}_YgSgtP5DY`LCk*MU=nI4sWVm?DdJ{S6Fnqjihp=dZ6?_AXiWrYv=7bj(n_5MMJ}-|u_3Z{4~ht)TqCxAnCJ>_;H_OIYUd_c>-WcS9QT+S>!)wB@5?(_G!NRK{ z@8-2&fSD-mG_m+2<*kJ|5sSZAUd_fPd6SfKu>MJ2m-zt_AqV0ufiUBbN=XT?!I4gO zuN+=}j!v-USf~x>{rSn-^u9v-UQFW#UhXQqj$$KkH0%N+Z>ySbo@*A2yatwG`Lx1dFMDP~<&Jl+|2&koBa8K!8W^AKL{|H{o%*X+lIDHiRPDOAcW(3- z(FM({DXmf)cCA-c&xhth3pJ^#mu7p?dSu-(t64LZ&E9z2yvzC(mz*IL!i_j=N5fSz;Mu6)?+YEyq+SLr&9zHB31Cv;z) zu5?$a|FM#QQTAC_3-Rj}iXeXruVo%3P$*-aVP3uxav=jOvRo7a8-@dng6lzse@36s zusag^kX#PQ64FFML#uM_T!Jh~t4@#wv|3to!v!CeHla^wP2k(AvF(?A;DgMALU?Zl zmxP1zjUizO$=)qX7RL%MTr?@|B|#)q33a?e!h~!Rl)WePwDwcEo<{3G(nxk13M+f{F7V2BW2D4|_e3W_xks(qCY3H^|QmElI}I%l7lf zn0_auGJQktL8=kk@Mfw*lnOw`CIgf)P|EQ+gw0+3p6O7S2H{nP$J|D;G30)gD0V0as)cObiC)#hBXIg?EtDcgxz-MX#n?&PYY zWy#T!vbE0YzA`!1%=Y&^?|K%xA10P+Tq|bxSEk+4-1+3m3rjVg6?6ZZsp?+Ed#|ij z)!uvSUie-lS=Bt-^PdHYPGw=DW57Pp1U|4D?QULsp|`VqY5(!2>{HvbFBxnfpPH_;?jLWeb%wtQM9oeEw4kqmOTa>ujt2eUAs_!*+vCkDv2oy#s;;h zbY9x;}rMeEpxrgdyp)jBrUwM^UzO`*~!AcdqG z6Xm^Uq5ZLeMEUJ*MwlmcYYNw)#IpsB>yQ_P%sgjO!5iwW%B+5v56=yjo_{pG;f5T-E13(ga0bf zWt5t)DAHXJk=HZi^^zv$M7_4W_arQoFO%Pa_&AF%Vw6OKCYxV#h8t_nu2ZflQCr6=_1+}RlMCcSII)(=!`ZtTwe>a}! z!NXZR^x`3#4CpfJU{B%s(|C9V54ema6yC8xkuEXR-@*BBF@dSEBj=OZ^!H zsc)n2tI!EI@%Zw;&?_B-cPHoHntN;E^ux9l$Fbz_)mig;+sQ|lzob{%&fk4`{>0pg zd((?;EB2P;YlE};^$u5t?*seY%kx*~t}g6cY)@4lU8w-(H)pk*HZv$3rJf>&8r7rT{`&cW5*xsQU^WD2ZxdaL#d`q>jz$3Jp0?e z`+bibkLyy+-OJ4vfZ%{<{m_wzudg0;FCBG1jwUZ%P943neDqrK^=qj^KUqI|{L${! z_THuT-sHL0lRxpL+TU1iXOc9NIy$=E+V=4H?@#{jWb)OE$$`PtVej(c;pCO!RO`rk z%b|yjzi<9s^P`_8FTR#)8(40;j77IxSv%0YUf=lZb044kLsRA;?oYz0B}1{5`tc{V z<&d8A5=S6Efx42MpcJ)LaE9_lnakSUp#G-@viqR!Tl}ecpwLQ&gV4MZR7K8Oa8`md z7a?mIfmaCs2}MAkLzRv2Xmw7VO0Hf>ab1v@(Iix?5jql@X?XUG8tOY5nmh?Yapyvu zi_3AZ6yqt?vLqZ-l*k0B)QQQd-^>4~1D%shZl#n_NHRlmEeaNvxg^|-wy3Y8b)om6 zBQ=Hss%v;xx}5Am&M4+ocgi(DF{4xHAM6wR2Lb5+V2VlP)Oq~J3u{YaJD|wtmvr?- zR2`6j*?pKJ1qvDv-Dr zS5nz3%(GPDX1@+>vX*7y6Ks^%c%YXB6+@w@CxiuW0E^Jva+c-H3NBv2pmQfgwK(PAe+}4Fb0iIS7ytJ_wKyLkN)4Q@mKt0X)2c2SK7%)Jfqv zn0wj?oi-{#;mrtS(?;BByNVla*PMD@@AF0d5yt1^jXt0F{|*3~eS#W@7#8m7(2x(&ZSkqN-i#3JlqhvFxcGLter~z)g#6m1&&->M^}R z0F7wZ?zBk&&B$Jrwg_Mueq6Z#TJiHL1ki?TRcX5b?!eM31+Y(r4j=ua_rK9@@rCXf*a&Ta@3STIuOlsj0fB!8!T&_uBK8zp zI85y4#6D?dE;V=E4*_`#{igWk3unsUCke~I{_?s0q$v6{djoz#cz`$x2565F#Mfxw cpONV=sOB%I^J~=nOnXC3y!ai$qafk`01p6cpa1{> delta 342 zcmewtwOx(xG%qg~0}ymfnUlGmZzA7zbtVv(fr0U}1CX4~kjfCnn8Fanl){?A*uoOU zoWhpE1f*Fe3oy#?q_C$j139dfESemXeHhi5HZe>-#%(%z6{E%EIwlWxCcnf@j0_Bu z6FEdD&t#F_{FkYTak3GY4Ws!Ss7(F-xZk1%BZ;~VpV4aaDQ!94 zaG*d7!)GRbR;kZQlfP+uvVRfQV3jIzn!HJ;QOXgd#|cF61BqW8Ho5sJr8%i~MedV* Rb(Q$z85yNMF@Q+0A^4_?x_k8Nlic54;M#s>u^0(F&HwoP$Xz=K`8O=o9^E{0b@+7K zX01AH7&8u>e&|aDfe7|=(^&IBGe-b-q@!ak11$`9rd!9_2HH5n6Hzz%+1f~pZ!va} zQ%VCJLcLHY#s$?BOeDC(PQfk4hqZ#|G0i}i$nj(c5xkEP!FQ2YdR$aq^<*Hy8vW21 zs5K6BpHRhu^Z?ge;45{zrJ*BYHYcTW8EKy=Jv=fbR&4uI*`xa=#G$Q|BWXdDVqC?3 zkC>B2hO(a(rR+#H$LdUbGn2WI@x#!R0ut9Dq)tjuDN4z#G8WX^wj376b9bl5=7&ep zeK6aJeO57>9m$OE6^AksOhbRfJ=0%N=fvZ=ieW5uJUMhEC1opGNz6@3<3sanV@p#5 zRKf4+R{@+Mc|y;0x6_wxF8Z8RADiZL8b$^n&)3A5=Z*#lk@!5n4{8OH=K}QPDmV5R z6x35dyN|>)yDH=p&N#(?ZhXp`NF?shNt5G4shlW05K~ulk~oo0frK<5v5|?2DlLu! zf25Mb0zBqO2zPEJ^vduMPmQD6kfm5-f zDOff&mrTvGrj}y+nv!WvQN89Wj7WwMC=VUye|x|Mc*T}XrpCuJxl|5hpG_vEcC1yw zA|w=RMV$r(OC5kpb*wCm4CSOapcjY~#p)&mtu@d{|5>$F%Nkm#RqZcKfTqq6kqmGG z8Q={>SSj$LN>GVvK`m+oji?p0qE66>dVym*8s5WJ!2rA2AaH{58BQ>XM!^g}3;e9` zv%$|Unnbf7W)&O@m{SA__GebcI;<88R)J$=Z17=z5f}8LZP+Q)J*FM7!!a^L**vTk z+>faT9LuKkz;s@02gb~T589mz>|om>D}E>`iwFQybvRdnb+Zcf(9I5c;Qx@ z{%J-VU|3vp7=968WKm9Lp%K`33r*0{EE<9RD9qy#T7U{92IWF4RCt&0sIQ3&8rz`F zC$!gUKmyn$2DN^n9uIHY*Z^UY*chFLW)f6%wKm)?ecoY*aT81=)m5 zI5&_ZYpfn9R{&+`Q&IK+WrNTQ5>VLhgL3!;7h6?v?2uBni6hM7?42CXjf{!A=mzb^ zxHXFwCn?RF8VMCOR5ROv6WsdDqWqYi)9xwcOjwD}ye$PAlhi6Tgyd{BY)PxWR+qzL z!=Aim!T69Q`+Q(^UJGL_j1Pv0B+UvvAt%TMZdc4g|ETRKAW20xZ%`#1zT!;gz%(bb zV4IW7+=`NftEgC#nI(o?GAP7jiqTF^)T}lf{^f#zj$r?Xj!tAo#?h+lD(bA59+p-U zLPLRoHH)Kz3ByNfqFG(cf|emW2;d~CI_U*n>*~3tp4q0p8TC8*@LXucY^WE}z+6*r zv2WXK(`~Fl4tL=@D2Ezn)bHv8Qk4FS?ojX!7-wkyJ4bMdR``7pepdw>J*a<)CiP*R zF3;upyqcbNbg2xZR{Fff&Dj}r(&r6zdR#3$+*iy2m0mCaU8fq%2v1+shw15Fmq}Ms z!?cldX1~fLm}!rxUPVSdw4|@&{Pg>VI=a~yvHB3S3!>*@2WgY+-$cJ;J57or8j zN_E~as!YxpX?7};FNK$E7w5k})0R9LZD4aW(v;Qh;Pa}yIk% zO$#jn%h^KB)7kYjUK?Yfrt$tHCVw4p(%G?tub$|4J0(>oq4y={EYTe^dcHe zc)*_`qXSGY4*+*zi|kk4iM~#r<(G(v+>+-9 zk!PJZ@2hd0xHZ>|=hkO5m)b6(p@(PM|9_RGyT;ivSu)kNgX+3f{?S7ygT}4O3`^mX z1^P!*;`_BpR76z?$`f57*bYd;sQmp;tlKub92JaXG!wGi9bqpZGJjV#MsGk~U&<2ouD*q3vV_Yo0YwC7zR3?tlSDasKz6Ul+ zjAITqys$JirUX$DK^z*v2#LPwjID~W%Fnc9yIZmmNlaxk<4}kUWyXg`1aW*w#L{po zoj#NrI+~o!iUL%;oT^*T4QQmjFsy{9zG7VLfT0x*Q?V@T7BfkABKchi?nZ!2Nf@J4 zGzw>Eq$CVl>Gxe-1!oE)yu?xuWO5TU7N0MDp>(qNYU#_7;+078rP7y*-z&aa{7&(^ zrKhp(dw^apem4RXCyU=MzEXOW(O)h-Uc6Ggl7N6qjqXmxdKiTT&RaHW~hy6Z&boToCasD=pLF(rAKln_5%^t(k^e`SjD_l z{uSra5;%S0L>3~{HXMWT8wYI42od)9VKFP_*2OezC`mvH5y4>uM*zgsOZu_6FixNM z{+8>dU-C7r+X56A7%RG2*k{tc*d4zv(gCEfEB6_?LFiqv*3_P4N(X;cQM*!9)}ru)UEE1X9!RO7TykDSNpOKv7_P(m zE5@;mAf}VTM*#_Ox2B0WcsF(zq=qc zV5@E3u};F7!n6eo=qtXOzrB(2V1V}YV zVJsT9?cB0!$M${O_cQ6o0@4nsp5GKqh0BkovKR48N_ZX=Z4S`g0l#2mhgK1VVIhF1 z)Gga0pux#>Zc(5W$FgQ7NAohNsz-9-7~3HgE=5lU8aptTfsw+kA0=;)1)3>0z3g=B z(80@=bo5^X`P=98)~BscSY`7gvbjz+hh(!|Hani){lso~mCeDDIf&)EpSXKzxoi%X z%;9TwMuV~HA+|c2s$avq{q$S)Ckk`!i0r99lR2G{y}`1#t>kT6MCyEJHk{rd2g2n* zS1HhS&7}1hXN*-Var?`zc*zwnyLw8l9@$kdH+0MOG1=ip+!>U8&9c)g`+{X(d&$@S zf_K)}BZr&I;nk(^>dRBJ;jOYKT=f!nq4z2=yNxr3Y7iJJTN_K(#`E!F_qHk_w{tuA ztAyBg@*fjo*vUf`Ksnl8j;=06SC^yfO3`()BOup@%k_y;eL@a2lmnfmKqpLJHISC| zSBa*@_%YFhjYWNZ)x2zmoiN;8T;X0e#Q`?O0XD?}UUo#wj(EutpV7*$Hf4=(yZSHh zoiWJ4?s9NtDY)|O;JPb(QSX<%^<{5Q$=g%x-8SpJ4bn|J2Fg1gEbVyky-?~(?yT@! z=3M6T>a&@58`c#=siHmrP}ZBv`p%NRQyyS)n{)e${*JOgQSv8Vn7XpIy#Aij`g_Xj z_m$S~oAvLXb>Ay**k0bSr?g?ud%nGwA3E!JE_g2Z!s@fZcm2I@`}WFpew3$$7`mc%}NU^cE+_<{b2-YS=Vn6> z$ia4)-jRUm9nNcZ;%lCZB+8LhrN}C|o_-Xn_sZ>S<;3O>ExLf`n$F~NR*4Db^U;Qe z#|j^Wn#!TBQmAV-)GbG%b8Yb#?woDwm!qrXjtw8eu5(^9nB3Nx?NuMKz&?+a%rV&) zmYez@V215G=MNS`iE^m76zaXa?#hYsrah%id&-**mNp%n4gJNe|329l1^z4@z@Nnm z{8{Rr-u1*T*?v^Edu2M?(A~SF@^-AWyU*B8+u&8UM@sg{oW1dU%incA-#KgV22ok8 zRV!78j}$)eH(zk9V7y+1kvH?#W#_i*Rh}5`0n~ll^=c=cIjDg%bRABR;Q;@K zkBp?@5O*D3{(W54ND3Al#7G<>yaebgaev5O1FQ9gzQGmTE8HNj{^r&Qpg;6*gBtY@ zy>Y-_=ea?>`gK(^;;r1EN&R|T0Pr_BZqTZJgZCjG;07J)H|kmEAFbyGUFsigSO*P1 zUd;`9)j#gX4nJ`exWRz>C(bQ4DE+k2KNwQ~blnyc6n?Jp4>nnU-hgww3FEe@-{iOW zq3~8cH`t+mD|n|H3UUuO*rk?xw>zNlOAUum&!CAxD}xROT?~2|1m=J;WE#8|ULCB^ zg;1J-rQbRaKLIUgNN(PrhB%JPK^9nfY7umnAHh(Oqo?&Q`dX{80D1bw4*bm>lw3V_ zit)RoAs8*Dsu;l|9?#}d;P6VAp^z|h53UXSIwFINu$FpPzBvy=^;8>kxtF8{3u5Ay z?t%xH%3>npB$>1Mi-Gv1{qNX%})GJ+g>otI(uhybAjmjgO`MN0(~>b ziu(0eoB)&@IrABi<%R4PW*X4h!Jzr-*8rH|;zFC{PGIy*Fmj}$`Z_1eb*b{2#VZV2 z?D4wi1+`e4SC8s3G_1uidF>_cq7H_Ihl4yKh%BbN(T8MQE&6Coq#Efq=mL4a6!_LF z09_0gV1Ok~2R1REW5E+eQM<_U93fJDo?2G=3N`0wiPL~yP!Pq76r5d&aNug$4m9`S#Kv$Rc*x3@_V8;7!{#2 zw+OvK6#5!?6M|CL!;eY*?*YNvqEUF?_?!eq7?!CbF|P{D*P(D1Qbi|7Gs&rI14QA@ zha)*H{e?bAjlD)t$bBIHW8}E%J_6C6YM1mN&`!0n%xh%c-;5}p=1>koIlTUYdo&2% zz&RGc6W6{FISL@(H|{rHeLffqK?2(}r-TDhj8#b7PYl)NM|fetsT=mfIVLyJ*& z0jIyE8w_4>FDh#oXd&6_B4&Hp6fT*J2`OFlnbk}IbJfy=f7P3MmY%X zR!9ysZ2p$0*jc@wTHk;It$&TI88QwMK%)aI0crAF#Wa#l z9ZQYmQisxFO7y;vE!d-? zVWE!n*GP?e&Ej4|pIWhIleDol$_pIb(b~gBUp~>A;`XBvtY}jc6Cwy^6BdlaVs7Y2 zass8Zz!M8CYK-9)sc4WOD?JJL4bx<3cYA^#=yM%m1Ij>p5di(9 zefPpl8Tw@0*~Dg(zKImy0}8!QqWU*C^b47MdzrxFesK%rU96v9g%gLk-WXc#Qr{qK!Y! z(YF%MvOOXV;E+F{4|o6ieq>JiA%fQd05>>iMU&1zVnISt*LJzm2OEv!8EFi{%PCQS zOY6+Bit4C%LTZGjEV86`7o*Zy>=9drzx~0oi10s^~?qX>bQ6NKevgpI12}kH80{n?J?T zjVoUPMy{=F<=t;#7t{tOS;%j}Fp;V4bkCYPy0w2*xQGpmOE&8wtBPvqSNl7IH?U${ z$_o8ue@u&~KzfH-*0d>XA#2x55zGJ-w?jALfH zR$&oA41U?$;hXvx0q?qh{f9P(rs)HVzp8mBtOjIo(wqL-iyWt{NB$K6o>R;0Bt)J{Bsv(eK7RR=3NiQRSTwkI@G)w+W!`tJJf0;~2Aqx-6tp^{du z*>E{mCGdJpeHC9-vrA)~X8{Ypyz2Ss9<98(#Koc$RZA zB{9`-BXh7LVk_vYpXsxG9TZj3!T&Wm3=W3$0WECwhQ)Cxh|AKo>=IeIV(zCaHu_tZ z=6(`4T_Y>^%biFWSWbxvwk*r(Rl?Z3PstZcCgj5n-=_rR;3gpjn1Ig?4$em3+t>tm!Phs|a}Mg>)WSLG`c3sd-1!wd4w(hh zGM36FAk&$XTy%Vszu?A3=d#Ak*ciB0%$6A-<99d63v4Mnp3G?Av8$-yikxNml*7gj z!}0_2iGG8!yp7U-LJu@i*{=cs=Z52cMcRKwx_?3Z)?X0#XZQ^{@IKl9KDq6EviW_| z^FFzQ8aH?C{7|dr)}GyVK6>s>*ooZQ%YE$iS~F}&ZY}IK-t;lA=DM)9>#7QhRV;pN OJ)q^*enb$m6Z^mB1=}(J delta 5632 zcmb7I3viUzb-wrCf8Xzyw9@Li(n_o(Bm~C1gp5Gqp<-c>sBHzav*`aNR!Ca;?#h6& zM2;b`Qscz-_rbbM(`vY2)A8?G^&R(;uCQpIS!9KVhTz(-(*vO(z;R zSaf6NL9;>>dF;gabAu95O{3jK%{yI?n#yyVQMlNW$H*gnPP_{RC9KXs1vE}W8?iQdn!Jp@}1Ht zen!5z*#(9*XEYj{m`J2zsd!={8I7_M39$P&V@=1b33CWRNOkP*SL>QzTVZbgEvFRfD2ejfz1r4%-yd zF?rCW7!~s=s#sLBVui;Bj~yNdJWkc3T5({-wMe;D&=CKuigDUV$cU`k6{FA+?co{e zPD3%Mj$x1DIc6AiDqa}McS=(JxQV+h_zTXIz`Il}3|a^v0CrtU8A!@i^RTQ0At3jR zQUPiZ3ot5y;khdgLop7JRe{W_R2MnmD)y)Wu1~4Gj~fE6|E??;RIsmFhlQiis}AHP zO10RtjKdG(?;x+eIb*$A3t|0m5QpjoaO9V(=oYh zFnHJ)Y1Z6ZSZs2?SctC1S5d?s9;qNzTQ+)&|Q$W$mo|zNwsrc*44`VkO9i3Y(RfpkamQ zC_FcIS@`d)KjJ&OOZB*hP5eZ8i=>$Ov-Vo*BbGE7b@MK}m%nMZ zP-~%&MX~ajqmp}^UTM@X#7i6%)K(DN6g!G(sgV51HZy<4;kMh;G_6bP0m*0?O6<1c zSpu9Oa@_Vsj#Y7-G9GahEIJhzSPVK%-z-U0LI~nFT7`XrG;J!LFqD`{3KNr;hfuB~ zqxJZ1v7{Yo={!ys{EiZZd>y9kFLWn$-`4BN5oh{t+dSv=JJPi14cbLdc*a?ybB;zh zwRxT1qV$L>UEr1Zio8QKDN!;)j=0nA7byEpAr8M%A~Mq~qIIXOMbANxj4nlP<-J^Y z^3}8}Z%rv3ZR4K@%K2v=qb_j7BRtyU3GuxFA8p6Je9*hB&hvWkd|?4nl3J0Emypy0 zMQZdx)TJK&yFhhwMQUA&0zR|7x$c}5U2^fD)5-V z?mF@ZD}2@6EBqiiNDk>9CkLs}ZwHi)=LEhCNROL;s5?j_;lAn3{AXc59}oVSXAMkf zgo+CmUf8;HsA3%r8`*eNQG!}A*a^D1xaTbYa4%3RF6m3=1jM)qh8G%sgzDrxwCwPrbKQ1yUh5msy@Lz{Q>o5VD5o)5u*i$OY4~{Gclt_XX6^YVZ z(S1|#F(o=2A5)Xj@z^A5<*juMT&)Xr<4LcX@+B4AGsal*@WfC&p_x;OXlg%GV+!0o z@rkDr2h@T!%hbf8x^6^GX#xIOPk?vS`)4#g)Kl?E%{i&EB-B>Xp#)r4hf?_xO4MYA zuDRZKrQ28sGVnGNN!9cs2doj)kzaCEO791Xa zreSrJ+duEB%ev~$_WWSS_jg>_J-@OiyRv7_wRu**;0y4DhQl+TlvXX2FIfPrs9OkD z&j*{c!RFVt%mp7<2sO-y)?`C#t~AYsdKSt;pF2%%%d9@NDgdgXy@1CM1M zADSCbvTpUJQN|43wD1pFHqPW+#O0s08+dza&-HvQVn>q};#T4H7*qg^{%7*vVU0YqVxW;; zru_{w^51m1fqutG`y=u@Cee7ejP|$4@0Qnr;NA7KzfFF(vmEHF0ouP@zFHPQx}El~ zl&>!DlA!VP2WbCl`R6MufPT*t?(Zb;t)l(wjlsO(9gSsDLg#ft+=fdnOE zd?GmshrecxCu0X=@lMzWx3}2M4Ex}{Q0R@+0e~b{%K2J1zeiagkF5$g)$w?0 z(Tz)lODn%tIG`^Lt$dYQu^46!gm!LURl8z4k`DvG=_?MEI|r(9I5w0@u)|3#2XQEI z7O+QPoUrr`;j^p0Z5E$}qaf#HtB))Z21d{#ve+ts`#L`ktm$gr)#Y{EeZ2lX;h(N< zr=d&UH8DE)HBizFvB^mlGKeRRW*JsfL;Jx5tJSXUw9{0;~frPyx(V7gKa(YoFueL(oy_0{|7y zdwuVIGcTfl(UKLlmo zrN~1Es13)vP50cUZR~k|c7xCQGMafF;WU4HL(A^P!0~3C9E%O9Y%fahOSV2gCSEq0 z0lv{Dr&4Sd_0V1t3u;^^g@v+{cXTyMpAxQiowkaFg4YpyncFvh+J+vnR}fACKrL_v zUYdR^aZqIpQ(N2xg+7&=%00utzIWa8ZJXMq&j?R%N#{w(wmrb#+*~T%BK-Z$m%+d1wlqlAAD|_X`5yk}wq4cdur1sb!{6~a zKP7x=Z%f5}K3VSeiBI)L3}Oy`$p5mp(SY|gdxB?so2dEHm%Scp!PILqRwTT1`*dp$ zG$T4qVo+@)r}l|?E-t1YgY2F)eUZPty_q)hf7{+XgLWbntQ=k}h){uGN5HRUWaQ@Ni(bQN>nbyOG+7Ad)iUL~;+p69^X(@H>`? zwI$*XV-ukUp5!+qLXMG~WYdQ}v6toa@Xi^CzaeKt%0#M`<;+M~h@&!RMao8kO*uPK z4q|cKa3Zyg?4|qY7yd^#N_s!^_xN*0L7GTKbFak7XOV>K@P~5p5Jkr=C=(76lvrgJ6HHp zK(a>xG~-xeWF$T@!rtIDyTZHiI?&qT%xr}ca|$Zn)_ldUPJoh{|8_{W7VoFUza}zK z3H$%B-`LnVGxHO>n&E`}>8>XDtHy74h2W1D*8U*1^Tz%#b?}G#g8`fr&4vAwuqno4 z$yTWFQ_RJm?k}6c9jAHj5hljR6BGGeXofO+I|Kn6UaTpx%Eh`6(|GUH-ZpjQ1M6ge z2UaOONxU0#I!fusWXZ>*?E~T{{x|;%+3+EecYH|NdHcXpDQDo{7$|dYmFb!<2%=as F{|$ftgoywE diff --git a/app/modules/agent/__pycache__/story_context_repository.cpython-312.pyc b/app/modules/agent/__pycache__/story_context_repository.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9652ab6b8ab18a2a7cd57f27a8aaa2375456230c GIT binary patch literal 30273 zcmeG_TWlNGm6z|gL`jrHN|rQw*`lJzv7I=v;v|+O*-;`(i6l3+lT@WOv_)ADXGkR$ zmu|9)ERdi9k{~VOpf0MQA64Ldcpo4BSS*_DVt28-EhNimxP)62DEiSqI(8v!(2qUm z-nlbFa)ulU0lEw9IJt9p@44sRxv%q_(SHO2UILf6@oA!eg^=Ij4}RGhV7V^>@G%jI zXjveOmU9*h!qx@rIV;?43${i3IXlg>FE|#R=bRLFEVvfk=iC-zB`1mKe4B`_8&*c~ zoQLMQAfIfo{C?=yZ2%| zH4|UDm{^L>GSV!S+p*ja!S9broSd_W1bS-|EpOY;*`OtRZqHO|Sy~-hUP4+^vvcvq z*kt_jax#J0vlb0GC%`sBEcgE%ppQvPDZ3#f%|mJN#mgQeS1lRKyra0Bkd%w(iIy8O zO@6nKigm0Um)}j)v2k2}H&MsVarxau9S6tdcN2A-9GBls)NyfKem7Ca&2jnNL>&*u z<#!Wxyd0O`P1NymTz)rE$Io&3-9()L$K`hub*eZnzniF2&2jnNM4ceV<#!WxYB(;x zo2XOEarxauojQ)o?r$K`hub=o*CztcJ%&NtLM=Qfn{0(a(I z)k0QDCpkcpmN%>xa+X-&-zvFcJ4@cMM1}F#AaE|PpmPmPjtowZ2-Abd$3}$F6Tazk;Cxv7cq<4RxE zX8+>qzJUS6IB(qa`B*Z(0J6`_?DFDbA~lnoizywUgI&HO$)(ukFq^10E5&1} zxHxluRcXw`Q?FcxXWnrw-c~FnOpl%#nVKFv^}_TyWt4_aPfm`EPtPc2#$(0XH%m!( zZANu-W7k2t(=DmEi(*nLaUnLFDz}ed6C58sIW;mlIyh!-`{?hy%KL%$W+t_IIj&5u zc3T*auM~ehkpyvF7#*J;IXN<^ENSB^v&$fRFTu>3+pRMC%v>xvr*zK5dX-`qXOi(G zc20Ys(8nsfQTgKA*+S>F%q}$J@lmEjT81-?pQ4rfEiTXEgR)^Q`-F)3dR(IOW&-_k zEH!6_Y!c3HTnZ*g&d#kYT{79**}2%##dzW3JcL}K>@y};78hgEs+rCtmsh0OIIy-0 zrm#!bl!?_iG%-FkJvj)wM7VHihV4{7(Mk4O_bA~!`ozTK$mq#&L;XCxvU@+`&xiy&hXwF)bFQViMX_Ol|`) zo}85um(6bYZ%+E*2koEhc-Aa-Whc|+@_%C&>uy7X{qvLE>>v4fx)23S0C>ZKf(M(N4_?xPV(e!*Js>PGSO=XmNabb_T5BmhZq?B>n zd0X1~DQQbPN5yS@=8(`UpL2mx5PbisoGX4MKD&~NOXz6NIhjE`YR%cfLVh71L(iJnhV62k}DFJonq&qLb`XV zOxQqw0j?j~Oa3FY`WCqxX#CzQUk2JY1MORZ&O8~mdYaZ!S%2{Tv3JMr219vkb$$50 zt)`~-fvql3mG2>e8l`9t7PVrL07b_w+Oi;))nZu)%6hd$by!r7MU7DOs9bbCw9z`h z6}fcBzaTe%J(MS;ZPF>y@p@>K%VtlXH11^4cB0q{&&{id??q zf2}mB{D5f0C$tkUs*ow^G@n6#5$h9-6E{zM03`9(+vI_D z#Oi30!n|k}0OgWBpO*XIhjqHk&b12l@*X@;Siy_eiwVZM-?{!=GI%2)(1ALd*Ao?NpsbW9HMlk?* zo^n*huvi5+UNIt8!`&w~i9xvgS8dVeT!Z$2)QPCVX|96*!K7rlZ->)y7ZFvS44K!7 zsLFUUmW(w6QC>L92}{PFaer>Vfi1!XzH__O1T~eoE0&Kb))XBXyXX{M87tIuf9}2^ z<3T+~*?OLgL-dM1(O*)}IZR&NL&#NcnN|ZCuUI8kXY9qb-)prKsHfxYXTAMAcmu`| zudBX{XCC)Z(Ul7D3{i=Yq0i~dxFB+`wuxM^Bq1&Up4<(qF;tRkhjNJK%Q$4bda>cU z60ryQp80Ctf_mRoEJEXW&X-t1f$gFMl0eSMl!u%jneogU@$+-b%a=ZL<=l)_C&Zjr zl@D?b>h_09)S;hqbAdWnr(-P0dAY=QepSK|N>{za=nG72klG>hGp8iLE$2~$shn3; z8!#YIF?&m0i0I`q%K6ot8$t@>(o$?eC4F9sEzQp5?3WTtV$Lc5vVne^ZWy9|Em63T z05@M^N$v#@RJ{71Sy^WST&$$hUF>}$v+U?NxQnOJ6+0mdzPw^ND=Z&cWB(4P!hA8h zmMJvYaUYE_A=#H(5rH~;jj;+{yT*9x)EMI^&xY1rV>FEu(ilm$Se8LhJdL`87zJDaH038~z|;&4RGgv#)qJ5p zN4mL*H8>G-UbI`We&n2ZfW*a|OFqP+9=eyMgLr7T`Os5M2BRUQhcI8===iWXpXQlz zF1hPDuiC??o9aUnGJ#y}%*y3tT!MHY8mWc459B45GQD4k zjQS;H)2y?2B1#>YJc3C-B)I_VlsJRuR#FU zvklGJ#;$Br$J+Qt@TR1uO*SaUqNfN zAJ|JOc4pf;v%(X_P5lv@%G;n$K19NMzHIE>Z0ucs^j6Qt$~O(!I`qW9{& zpglen1?_Q~DxFM4NLX=fw+W{AXez4+L!qI=wx1t%3_at@IcV72c+@3z(se<-43tqy z-I(Fi*xKomm-a%o5_XR-CEy~WLgJqQ*oBbj%lPz!M7^+kCN_e&=twEz);xVN0-Dxb z87C8j67c%N=QbW>nQ>*@Ad-1S`*knR1L4RwuZUkNiCqatj`3J0m^*Y2oKjZLJwC>~ zqcZDIWh-O0L1ixEVkV0jm(vwtT+Br{8YO9rrX$}OBWu>2F_IZxqzaI_bE5A>|3+B4 znTZ*wFf$-JVBD+m5DZk98;Ap%?inbUAz^Teg|bG=kloQ_4VaoObE3np<_m1O5aMxS zY;XGg z)>No8z0BjQsZe3M3nyArub{yAiFBqN24x8RsbFp&=}|^U2d}TtZr`tiQf9$vR*-Ik zn)2d?G(}|^P}h|%eYLolyjo&{IvChtxQauab(h|W&n!8=Y=n*_XA_C2w^)Efek=_` zk_!~fALYx^2$rG-fK&zSBLGedy_IXFuX}^xdoXfmk$QIRtXioJKCXWPv8O2_*M0k%gkc z;NU^q7Y7}KPr5)zKZ2wABqTW}st91l@ZnvK!IcKF#3lQfb8hY=rW2ybMkuDEDV54b zzXV_xvQZ!t*r9CXis0 zNL>1psh{fybA>X6)WRw~0cRII45d>Djbnl*pfrKW3z*RJ5-kgiC!tCftzveGNzA2o z2}*||DaWfZMatVywM?RQ1NXgpF|$M{CPD~b(czMQ$bkRoAs+>sL^1>9fe#7AeCUy4 zKE&O-`C}H+`7rXB?t_>DD*~3YM*s~S)pvja0QM6J0Q(uV^aK0(wub%0EI@giKynY0 zuaiFLz$Zrrn{8h-I|jR4QM+opk)DAv=@=vmS3xHUau&)}a**zVs~Edwz$pXH6>!UB zEa2t$#XNf02NbrlqWt#wz+jyAObsz#&#$w?gkVblz(NH;qS+`=xLO=p`yILbE5}e6yT| zpqy(396n?oWq_g!tPoaC`>J0IUJYb?@N_wNx~fziN6u7;uvVxG7TuX>&o_d$c`<(w2-HT6KV(rAlm(>(x+7t&r0y=d=ObCc^^2f(*9<+%Cf% z0C&i6C%~ODT$c$#J6({|CFj8E`^dt_m%6_cYY}u@zBIGm%||$6P1W;qptl$4@iKAW zb!EAGs<_0+p6Re`h90yTQB!)$>=ui(YsDdQJ^qvH~IB}pzQ2XLw9 zavNkbpIF^-3}qHBNXv_Yd>9H6J#+UVP9G%cz!Y_32+W%$J&Xlt8kY8BX?RHM*P?`Y zSPmnErsUX3ScJM9V+!6m0?ixvz4{6emAV8CV-{roOYfY`3Y42dujyN85U9j?BS5M& z!^6teK&JDneHy)8w^Ju3Crh(@pGe7 z^i?)pY7>*g5ZHbEEQ|-3T84CSBn)t(T=fsQvzbMnq-{(4W3m4Pce!pQzFLxqKIuXD+WPAjwKH zB*)?F+i)HcvAiUS7<^xpC8O+_3RvoXDvMZAs-zuLoTBB2R^3I0ZJN+1utgJ2REnld zW_d>{#WlBrD?3z?Sb4W8Yp&q!=JErl5#1{wDi2(F^;T4YXg6_t)CxAC z5>aAWo?y71ShFlHzQWLHxKjmk3NtXo(|06bXH*|D^{`5h!-1cxDm=)Tc0?7P@(LCj zq|*Ol*bCX*&9@z28~SVyI|qjGV%~<#k8&2|=Zx>|z^sWx89!xwG$9&xHfK$UQ5G&E zorUL$+SpK`=#r~lOk9-cXDF1105`X4;zWs8dI}T87Y@Nh5HFlqx|qzlFT_)@S{4>^ z)iVprF;VfHCv&!BO3K+1;JydhnM7+fDyo3w+A%5gpQ}0;HG(qRpIushBboE?GX&b? zJE)GuN#q`+v^AKvJD6*+#=4Vg$87Q*4*xv;~B3T34pq`8U zS*u61&?sgDAA8^W{6^$RiI2U((8u2K*vtUR&PZ zK9KD@{ImJZmKV9BUgK|XAffQbM?XAD{pt?18N0w{93-`^*U}ro-urdlP}O#bR5!jm z_r3Wahd&Abuqj^;!PfaOiL`tf?%NFatxw;4Vk>;~ozZM#82t2&&0mo*OQ7`w+ug>d zYlp7Ax!JsbtML(x28dki`lx#Cz-AC`cbi(S&8|g1S-RbN@TPUE>CgwRY*YK~rrx#F z00l!I`#<#G4Tba8y^%flZ9Vl3`AJJneM2_fvG#1<2GDQA`_@Bw2S9lziFB@wu8W^e zD~xr*4J*=Ir0KZibFMsPO=Hh}1-`4SpE-(49YP#p%eu zV#OGF8sq>5ms{SAbLThfX18Co(}liG=7zPcgDRF%e@-UkWoJVhj9)WwC7vFCSxrK=ydCak*fM; zv5r~h<5FP7|0lbF&apdvL*XBWXFosGE9e>gbTww=74-}4L>rSSWXHyMs(IWPM|rmL z5}7P$>kg3#p;%?ox)hMjsP^;n!tgf|RmjEU3QF97*^xFS0jp0~eyn*6q3%%Wi*C9x z1sVef9a2RJHXCdpuST#iD8BR^tzeys6$ZR;ytVZUON`gEy5_aD)m-D61v^1oxuC$f zzPv@WHOuZAO}wTmYfBg4_*Rd2=F31IMAWH~Clx7Dnn83b8+tThl?O2wBP6LbS4@+7 zDbT{GNOw%I^P|s!Kdhl+T#FO}%TV$Spaf!EL$O1mzt|zsfvIxN;OCR_m&S`uRuUdfUHi6_GCbBQ$nZb737wwM0Ue_#I zs`!O|0Ll^1K<+L$qBUbR%MqFXfm?9(Z1$YjmE(g~Z5dnA>!J46!cQ$$?1{pIchD1D zHANTin8FsXX1A_F?kAbJlv1%vxWC{$F*fSQ+I*zaLuP ztF{BiRp*$Xz+_IoGThb?E}l@_G>2BaK{I>%$R6->>FfT6Jh_X#Zg3M`#8 zW=>$SQbIxK3~02aUt)xiANBi+g+^vI`bW)=rL=D%>#`JPe5P8o9OJ2rCL5k*Z5xg` zg{kYJQ}sCZlL}+Qj~gahsTN(zAr*!NkHfy{2M;eBOH3N$_3Ze%-IdQmjg>`XV^`W| z?5o(YtGu#|mVVA^VkzBrG=t5bUEkob!{Js8`J5^pQMBo+-!`LfgzNSy_M-~(q`a+U z%>Q5nmDoG=jix?bi()jVrbxFd@`w1)UTCALAGPVVsMv6kxDR{i^`@PAh#m0i1a3nn#;u(FK7H*20$( z;DrR`Z!X|>pQG?1K_k#F*;b_rx$+W$DW+Jsx%IiB%JfQABOV&BaH>8*8_Olg5$(vy z7#A)dfi4W_^5cPWUC`Cc%qU=L%$ZiBP;>PUv7JPD?<~T*U@o*>Dk#wJmCmpM1%+EE z+NBO&afGuDib1HeIxD(usl~-js3jFBoAflCkWRvuf|zFkZW@z)C;lA8q#KjHkbGv> z9N^Dk?(>kqt9@wRWA=Ojp>|9>4C9l7(Yqm~<@z!bCi@OK@YwIoAJ*8U*wu*8g!>3#4HvsH}*@K=AAD~cd6w3Bd zD3cp9sP`cYUUt={TK-cQHcPJ?vcLg$0~^EzLbTUahZ?U2(Tz53WLn0qen0}s?AkIs zr`~Ia2sQf@iWTYqDp?eA{1z92WyMk9C8q|@^l(Fbv`=`1>Ku!)D~eOKgrF&nYzNxd z$;pY+FJMSjK^IG!rQ9Ir*LID*yA6CoHSeJePbGKBju5r>eH$RK1{N*o??5ho)q;Hs zdj_1#(kx^p@wjE{nXTqm;dgqk5hEycKmP)kavCbUMY7d(?`Pi4P#)kw9^e8V5CoIo zweW_&1DusXFE}fGb?^3kulM6KAD;Q>95^dI0k8`0ANYsPt^TLq@n`EAzakdTgyo%~ zyN8e5>b$kIIdE$0@c27p*XyoLt+j2o9N3B+yzM`fw;|SVtD866&4s)J`a|%6=muB= zU?&>@rCo4KDM68X#y!5w%WeTXeD9@3-3H+7Dp=m49fxd+i1HFH4Sr zhu%wmaOi{YZr1JHs@}KWzFGayhWp4Z3nX-O-Q)Mn2<%G4kZs`OTYF)RECfd z@Do)mE_D^lpMfL|pLI#5UWW2lSu_Q$ehiB#4#rhTb&y0GDm2uIn6&n=Qo9SFuYIV( z^VX@%M=6geSjfg$Yba3`x7);GWQ!i~3B;s%=xbom5v4Z9@i0246lh$51{G*Rf#^#X zv|nm()Mu~N^g+NnMZGB;Zq{OxGZ7h&o2-QHs$` ztnmv#%?qc=@ens%tHLMc-DhC)=x zp^!>ZZ>Wp=o37XV#Y0@Deeh+V)T_Wx1nd;0sE!nULb{~`68_0U38T%W2274)GKR?% zCUH#WFu9D08t{btMtT~PMohw(1Tksi$@c+BJ_|?s+az!Oj>XXgQ6+T^c^3uUq&}SY zP|!<)b$K5J{iMDvAE00rscFtvQ!q%v?fDuCg1M(YUq?aw@;#GtC{A4tEzJ!cBQ2q`_$Z)ZSBA>cRc(^FY zdPYGn2{-3`6!hbh2PjyD4Odezh^^I7uoefbj)JUb6a<4v=ZnhbQZ>Nk8rPk+UgK3Fn`!d2D@=CJq-3@zK_9voXr4(tB`gz zgM-+;8V1*rFbpDt*)%ek%_D=;cGA_8?T+FQ)g4h54?=Dn6%RsQ?1T>?HarMbk;e9X zH9`<@)S0hAs1`dysf6$u>ml#{>y`r!Z~lU%3sUwl2(d>&h&>2GuoNML2_aYrjrkgc z*g8NcgtY4sV%Gq`kw8t{=6~UBt|9=3DRT;kk literal 0 HcmV?d00001 diff --git a/app/modules/agent/__pycache__/story_session_recorder.cpython-312.pyc b/app/modules/agent/__pycache__/story_session_recorder.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edf89cf2315b9852f022d298773b905d332df250 GIT binary patch literal 4272 zcmb7HU2Gf25#A&3j>msWqAW_LEL#3O$CeT|b)7bFVc2RDqflGhO%t{5;mTV{CzCv8 z?dfAerdZlQ zm(p-{c5Ze#^UdtsUz5ogf$`O!|7iauLC8OF(jJP>NLwEOvqmgpNhMO2<|GOEP$^Un z&xHjaF3DwOPARK%szgHM$HbDa6HB4t^{{VaE+V8VNF(c^K&I6-J^F8)=n{{cm5Sq; zp6ygzm});`j^`Aek{;r*GmBe4G$zOBQD8x-xy1E*n+mEYYIp8q3G0O}k(*dzn@WX0=*?l_KmujSCC3 z;!T5ay6jlB5{2i5#Pu9jH(dXW2BSrXS(Hsx>pW{(R%gKsPQQAj@6Va)ryGUyibMa7 zYynGV;TNfldNo$z@~l&#;2q5{DrT7)29Ftre=^_`hVgODEcq)e4bL+04`CT3DI{8( zAmbupu9K(oJH7_WcEl+_gS6Fvm998N{Ww_=vCpL_dELi~0k+gR*@{~V#8!E&Zo*PQ zN41hx1im9y%F^Il13aX8_5yl5@Z$Nv-^Bn)a6AG)CIn6_Z4CmmMjBFs7$_H`P8enm zKvjWgRUJLVAb$vt;l#Dkb^sLND0Hx0u|30B9q+gCRC_rIMqG^8HPRfOxFP>OCVV-I z8v$yamxzsk5_(u1@z=n>5y?_7ojaQ%isYrdBUEZZzO3!SupGo+Z=J!gAy z{{oUpB>Rxy(OmRdAsBvaAZD0u2CG$8C;CR`r2<0VJqP3(Y3(A>%baI{pfq;kFG+ zUY3}2>qh{d02&=rz$_tZdjDE7!tW<%LuFJELWDzQjK!LWv^bP zr!ecdw&;0PqsB@+Rdg!zuwE%rG1ttBS*p9XyPA6+p;8cY&n~(J%PH0nf?O@xmCMx9 zBV8a&wkxN>oDu~j3xol@=6S@emCGip^SBR71JA)@MMh1JTE=hdy28V*$9Q;wdRzg{ zuCh@$rl^Rp3nu={t$ffS)VK^+Oqm@4*2N#*4LY_T1W?8M6rrg$=y&9Tuu_MIpfF@7 zo|9BGb>r;Q?8sgDzIIQ0FtVA`zs^lRJinQJ^GWu^M)t&J_T-J3Z-#QsOs<(8y1jC9 z<*vG!o_Ks{W+Od=!bIlwxtr&nB;MFaywTi!0RQ$JZtgBLbH|(c{lK)+Bzg2(q9s$$ zlCYg9L>-(W0`M{rIQO?S6(d6v(HaLq2K$_g>l(mXDs%(3UV`dj{CXV%{@A7cb79(atb9XSgdz zE{GhFXYB`Y_v%U)#@+%0>^n%l3#1$A441fEHN8b&;q8u=#s>xZjd_O|?V`+1;(7

<9QbvXmF|>2tMrBPWHYzte&Sx@52**Whw|gx zF+_A65dDIv$y6&thW9)tQOu%OJU%?S_R*b>nz{V__`P^DUwD!~wvj*f(E8KDqlL}< ziJb`&21|Z|U;%3n1g%IAbTYKi5`3^NmEog&u3il_Lj7-SOK!Kx7vM6T_f0p)b^I$FY33)JV})nQ$WZJ?1cH?rx&6t_Pwt+4@WE#K z=wt2Z|N0vJE_{6v7;M)(`1Z$AcT57=<--7bsV?o%>r;3Fy-+=@%WzpigH0_SVYKWl zBU7tdcuRm+upn~v%=wwq7iYM7cIGED7iWU5ko^$Wy4yi^3IwZr`@5(s;hVRxmnDwQ^GJdfd00)1v`%f zug2bIQhlF!c|cKM#6IW^h9T~2NA~+Zv9~x8a^MsUfQXQPP9cr5A(Up33?tc%1YkkL z<;<{7F~qWnR_~k72reMO+u!{P$aS(6l9WO#N5=O)8_FrMmP+yytq5`&8O^t%$i>Le zu2vkm1ld(+C6P;!f!(b%av74{-x@$}kYvVM!b+CJQd`3y+j={#q*{~XKvXiEYekUL zkc%ScTShK{_L9h@&|VrjNTIRT0CIz9ZwR=pQ&HvEi(QH`DzGDd&8!~9SHSriydJ{q zAj8DwDs)7U oEL&kolKw^}|4DZLi=23&O49f5zW0K_*MGF%NYdzYf>a#wzhhOvQvd(} literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/graphs/__init__.py b/app/modules/agent/engine/graphs/__init__.py index caf5035..2821b8a 100644 --- a/app/modules/agent/engine/graphs/__init__.py +++ b/app/modules/agent/engine/graphs/__init__.py @@ -1,11 +1,26 @@ -from app.modules.agent.engine.graphs.base_graph import BaseGraphFactory -from app.modules.agent.engine.graphs.docs_graph import DocsGraphFactory -from app.modules.agent.engine.graphs.project_edits_graph import ProjectEditsGraphFactory -from app.modules.agent.engine.graphs.project_qa_graph import ProjectQaGraphFactory - __all__ = [ "BaseGraphFactory", "DocsGraphFactory", "ProjectEditsGraphFactory", "ProjectQaGraphFactory", ] + + +def __getattr__(name: str): + if name == "BaseGraphFactory": + from app.modules.agent.engine.graphs.base_graph import BaseGraphFactory + + return BaseGraphFactory + if name == "DocsGraphFactory": + from app.modules.agent.engine.graphs.docs_graph import DocsGraphFactory + + return DocsGraphFactory + if name == "ProjectEditsGraphFactory": + from app.modules.agent.engine.graphs.project_edits_graph import ProjectEditsGraphFactory + + return ProjectEditsGraphFactory + if name == "ProjectQaGraphFactory": + from app.modules.agent.engine.graphs.project_qa_graph import ProjectQaGraphFactory + + return ProjectQaGraphFactory + raise AttributeError(name) diff --git a/app/modules/agent/engine/graphs/__pycache__/__init__.cpython-312.pyc b/app/modules/agent/engine/graphs/__pycache__/__init__.cpython-312.pyc index a7cc9ef71f0b3dbfa68650415c3662d25023e5dc..55518beb101f1a0e8c4b374e2fe1bdc9961d7bfc 100644 GIT binary patch literal 945 zcma)5&ubGw6rS1LWH$+IQmBwqlR{}-4_VQx6qHtPRVnq5%Q)Sg#kB+k zJ1>yE)4SJo=w^>Wn0$@AQZh|%|8M0hnwQB-rK|Uog!{*53ESs9=5~P>A^lerrG}D0 z$o)-emnY7VU|cvfV6UA$)+0G9L3(!_>Nh>^_gK^fMomCmCWIpaDVPIECzmkKWD+`= zgbpEo*>vm-^@JuEeJM2p4t0T{)zxTRPcK&LlzTBW1q5r8`g{a>-CJmbkQs17Dw5>#^awk>VtmMWQ zqPk;gdrI?mwYF<&m*kJg5w@Hm90jfdPr`Y)(eIJEj*5ytaMna5V`Uq$GRsRAWonTo zx{*6!&2@|Q0xB0a0yVBbTsNT}++e2c7^T{=Klrcmo*0>O)%zF|&M|(0g$!Z2FWbb% zVyrF3gT;8T7-O85d5dvWcgxXaczGnYrB;4AG-C;0HbQ6)Q+==JaP{rDTh^m50$Kb4 D%{Qo* diff --git a/app/modules/agent/engine/graphs/__pycache__/base_graph.cpython-312.pyc b/app/modules/agent/engine/graphs/__pycache__/base_graph.cpython-312.pyc index 5d55c828aca0fca007b5b40b28626f3143f34670..b8ac2260d417a13da80066377c3e922f9ad47cd2 100644 GIT binary patch delta 1088 zcma)5Jxml)5Pom}ckc%-DB_{vK!_e7Dq0}%L=qLFc=*$hZODav99(wy$lJZ}69}46 zU?@bkfXdjJNQ0rVx3o|~p;;lZF~rzhfP`q}yuBj0$m0W*v@MN879z!yx}QgkKoMN_pj zUBie&1m&t(=c7WxE4YN{5f1erLz+c~yuy{S=!A-s$`2N$-##s199i4PRUEGLLc&5w z0hotpcL#Z`@~{^b8)v2kS77B#a&pNY)`sS)2k{g)jvWqOR6KZzLplEM+zJ~7g3*-l z7$rmL$>Uj?869pj$WW$Wx^07b+g-aS#_-JK-5`^2Tr!%V#DQ#QB7jom;vkh`C&;*) zpqcD|N$gaUlK^PQED(E;l#!%JC79~-S>pOqZ(mQ(tw+8*mZA2Do%MM*1{IZ1LF^a30>7v( z(D^iTg+*imsGHD_LR_ znsEs|2O&)|)eI`B946Cpvb3d<0A)*a)BA6L}z22}t$cXgMj&j^6O%p}MP8V)d)8A_BAO z?lMzst_A2q5rJ7X_LvDSf-X_;SaF R+W17;mr}TT2Z0Kj{sDY~`lbK? delta 516 zcmXX@JxClu6n<}aXLoMzPZ7m{sArDTZTvYb3`#CyVPz5$gRm(?+~qvkJ!ST;uob~Z z&@hEoVrLPMOAf@w%1Ufb(g~6(gmgy?1Z_N%^Fsrim2I>- Xq4*8v?_uH|W`CPS9Qp~=;`4t21EFug diff --git a/app/modules/agent/engine/graphs/__pycache__/project_edits_contract.cpython-312.pyc b/app/modules/agent/engine/graphs/__pycache__/project_edits_contract.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eeca60adf8f64efa0584469042a05ad8208c08be GIT binary patch literal 8575 zcmcgyYfKzjcCPBKs(!zqp#cMKgR$LUgN^ZXJRZL=*xq1|u`_GdcDvnGyxep%Rn6G2 z&6v$hlAejBfkYcXnXE}RN^ndxXq70F>}KVR@?)Y%Q8mSGw<|~)DT);TYa2^DV<}S3 zxz*J)J$SqmMY*QWeV=>l);Zrj=am1&;jmJWxc~goz<*az)PG<_Pi(HR8iB$z#Zx>T zp?WYkMCc(yk73BzW27m;V4ylE-uOPnGY<{goQJuoJxs2bgOoG8Rc z#Auk#JqBjT5g%*y9U#+Gf*ORpI6-UfpbLATl_{)iWoSj7MG0EXMNLrU>5tiNDq!e( zz=|fQixwm{Bz7bYByJ!9T4f|5(ktQC{VxuMhKE~*V*E%%kXk~0LNwkYMEl?*TKdG$ zaKF?tEXD?fa6BmR192%B&K+X&@Th7ENkM)f93LxNcfQTJW?WI)3uJ=&!do$YX6j64 zOGA3gfz*}*nQDK!dRMA?SEj1=e#>l2rlR`3W7hH1#<{IoinCZ(VHyaLT{Q=T(a?|( z464>(P}?Sy?ZM#NBcVvH$0)@`)fI%76ph70@qt)W3I@sZtHuF%=j#taWpK}8DJ<}S z61|X#B}nk32|?YFP>z0^`oijX?@XqqR%Xg3C55TYR8&oTXR=;lsxsvj6K5xl3R98s zmQQph!wOTLWlXGf#`~OtOk<2ZH3-P_LLcsyaiF zNbEMK)J;@8Np!;Cb*h5MMO5Wqq458$Dxg3FIbrF7bc*_j4jcNYu;CiW!#JIw2T_8H zl8%m3hI?KH#*WjVcw9G>Xi~dPRFD%l!#vTCqz=heBt#>(Ln&bT=6)ia0fXp)u_~*b zA%ceAuZb$k?pQ_f3)J<_553ZxK-ToGylVQ^)U8Y|tE{Qm9VS#qlg0YgGds2UOE{zuMtyw2GbaEWD56U&kKkiZ>8 zi70agk6eXS^4th-Diaw1UBx5N)NK=%@M*+qBx}l8i&Z4ZSvm(qS4O+*y>pq8s)^Id zlc>#UZt60z`q$92 zrkni$tv{xFjJyGFB7x-@_>p@FpS9P_bD&cu-Xxfz)xw*{0DXHIa{E|$D`~amTWv7b zUcf~k&k@aZ@D9S8h51g@RNj@ZyP@uax|#O~ZoUM5rSM|~cdwQALZ4^U94J$rx;9@3 ziIN~5MIBSE0P%Qzcro$(g_!4MNH93|6m<;*Yn%h^Mu96z2~NkxaZ|!1(f51_b`URyqLeV{<7nz$ zfT0qsv<*0JfCLFc!nk12cQI~Gm_^S^c$(LECRo53t^jAa1UKj^+5}X;hBIpz!vV$^ zyU?bBD;rRv6^4s@fdtH|bwC=3g832+3nD6)YU~r@ViT6U@Zvxwf;cUIjjNm{8)^w) zcO-H%6uy=3(@@AlP)OW^i{w`l_aR4)FyvKcATA84X5BPU%{nUyUxZ&z1iMt0Od9j9 z7u06b+6=>`9|4)5R(9@Ry!}ti^3L{2)3j~MHq$Y8aoN4&^VTGr2{a|Sm74AI>_U9G zrfsVI)4FGr(Xu0XD&yOf_U%sjb}xDr-~ME$>hfjW<Xjxy#AG z=9I5_@%HEM%D!gBcRktpxM1|Md;1qndw%I^ookz9rtMSqA3B#@t+KiGE64~8e&+F= zXktXXTf4$}0f!o1j6@NZq4iaYAkm{#Jg*3#6aXH@iKqFznCXJFk%;rgH6wl_(bh$3 z+>kK*TI;eKth<5A=zQ^Kk(mcL5&`V9fNA)jeyFH*PL~I&&PYb&_# z=`S_gR8F(fRI3=eowIsXBb3EPm?oiY>o5qpX2l(Z#K;6ephbXCr5 z{WE6S)sQLmt++}i+olgr9h^BmcS0%MuDEujUAt2*fWJjaalMvy9Z$KAE3T7C=CP|3 zCwRP*m!`iv_1&5Nd4p2cq3q;B@9R`{o{_sR$u~ms(D2N8rTT5zT$$On1NfFz z;x6Ud=0(`%-lTP9OVj+>pE1i@UY|Prs1?z^A$bzkOM4qq-iEob;%!W}t1hpO_QEgR zTM_LWo|+Nu@u)UuJY|{EO8hI|oT+WeY})cXZ{KPbA==FApd1}%0_6+8CX|Ch=D{51 z=2AQl>Hq;7*d`kS{QnXTbqGR`huxs#Rg?fCLCvAhQ`7;~m+a4|bLAPH<+%k@9w3eJ zJOCkFyz)uS-|k9zB|ppmF+c0KaM1BiBZX+a=_XbmrkzfM5*0=BgyK^ZHG4v8(;wi`&mxrU}{ zSuh$CheDBoG4LGFrw?KbO>_AWWW{qxz5_%p4@x7$!!a?gyZ>aUB>o1Dz)M8i+4{8u ztOl~Ef+}-sAj)gICg_PrOwd!q%`T{pZQ2+&$=-TkfaJ^LW&OwTW!F{!fsCs(?Fyt^ zfra|{@F#r_`WAaXy{R-GR~k>G8#_~tol4``bmN6o;{~PhqT=dKG8vm|GM@Ierr^)k ziY#&vcJh;s2OV--hqAL%zSu1XZ^)4-LRbtSY{gxJSX0}wQdvFIegCc5x8|-dUQ}uh zDwT)Qm2adf-%u)#ulTBF_TE1*dtmO!Vu`Y;P4VqZ`(8`=UQ>Lpuas5Hl-~Ev`sS+U zFDjM00fp0L2U2ARl(Iw5ZJfuNWV24n?)m8O2Z!a##wA3>u+ANH7}0*?YlpDmmGmNX(wox569qroCmrJIJMtpAs}NIgi>*Om}?j}CX7VX z=$y?*4zbF4)LFbr5a~l2em>*&aW1d_!6!*vb1=7@foP->9#iEf>zZOaK4@7$fQ3T_# zSBwp57Gf@_(5%^k^{}ScfCL>u@NDtk)NlaJyRr7}foNYu*oBb_f6k~w>#~UR+aoc+ z9={~?R zsY^TSQ_gzD*^o4@*qn;&g7xEdLUhmJ%(|(P^0cQhgm#XBJ~46RLehp`OPEC7 zPX^D81<$7%0y25yOP>qhO7aHHH=-PbW`M`h1cYm7s+$7+1zpiFAi#`SZtVEp^&8ik z0yoC&SL>jBwGJv*>&7hGp|Tyy+eZ*rKYIw(&wdK^&weVRhzA%VWvW4h=(;3?#BjgL z_KC5PVTcP8ak>ekR95qQ%n)t^M=45za-^iPL!mg#v4lfWegK2?5F~yS8go` z8tct{(jy3^I`sG_F&)<8qY`}cf-gyH{UUxORPFk31R>Q{5Htqwjz4{h57p4d4jM<{jEupN83D2a--8`BTT!MyVmE-VcRo%CatKo4+{=Ztryl zK12rebJz23s-9X4_HLy_mIg~=oV`NbtG`X%FFini2d%4Ki~*E_M??qH6aEI{um^jHaxJ7lL>L2?_=|7|mF& z#wz+0+QqR8OGuEdBm;S$Qfn^DOjVX~uzP1NWhuz!C6YZlNwUvR$!}i%=d-KWldavv zTIb8N6l8MSsYmft+qqS&WDl3HWpiAXf^7cvPuo&WZ>(ZDyQ6~j&Ua@i$R5>`Og`Hq zUkjzqhE}mL+vZ|R=1a2_WQ(P%m}Tp1Y+!zGmV#_??>*o(Z9#^+gG|2k zmX==)KF7NDu=t38ggj)bE&qQA1W80$70K<*VF`mXDs0jxyMOWpL8)c?-y@5vo~Its@<7>;RJMfYPtF9W&4eo>552Hvb1c Cm^-8Z literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/graphs/__pycache__/project_edits_graph.cpython-312.pyc b/app/modules/agent/engine/graphs/__pycache__/project_edits_graph.cpython-312.pyc index dcd3bccb6bcdc7457bd212dee23dddd7ee5c8487..bcc97b59b6da38dd557ffbc86503b7902d116abd 100644 GIT binary patch delta 2033 zcmbu9O-vg{6vtz>*U6G9UtU_g$b1 z8w}CBPo**%|Lxe7X zp9x;RgEY-i{I_r-T{*QYGoDxbmiU4oZwn`L!pX-i-PwjMVX)w>U#h{U z<>vphkbB~CJ8B{m9#{w!P!Bb|l*NI?=cWFL(>zVIJ77l$6lAsF?=RHNKX-GYRFps& zH79h|I{wCYrO)Q* zOF?1s=Sh0a%0(d++TMFc%qp$vw;TK2<%D91Rq>v3z2bEy z2FmYz9-0gUq`jYvDr2o0X9Ud?WLgiN=UNs^(K=U-e&P2CAy^;FryU#_21X-Ild^nUDEp1A|US7Y^&oR_5VSe zC0L3`7Um|q8Y5?f+x@whY%yk77l>dW^bTFeThur!dKdrYZyFmXZF)D&L1Kz5gzza) wegWYe2>%QsyWsLJII|0Se-Z;rjoV@%CkECA^J42A{!jiogwpSTT4(-$08`$;)Bpeg delta 965 zcmZvZO=uHA7>4KD>~41Rx7#$)v@r;38WNkv{Iw884Fz)u6rxpd5sH{vq`L$*Ra$yb zL=aCk4C+mD@Q~uAkV`2Zyh|Y=qM;;sQs^!CLqQOn*;4H;?&1Bud3X1F-kI4-Y>#=~ zyWI{%p6k7rcP_eXp1{HlvU-8as0=F7KmmYoL4aBO*nO3~f}$+03eQhOZ8}e8=tESF5?N<-FYQkgsZtm&ic{&6 zbQL4=0QH2?u4}04o$C53KkcmO7&wsZTU&jy`bmnlrC1}|k&-PTi5K0$!?Xp*TF^ub zka7AMI@rJ*b7s;8)Qrh=ealu%q%B1nTt}K{2@_UK#$~~bm@&JqTslpmW=yW@)2*0z zTZ%X4J5r`4WULs$^UA!NQ8Q-SlbSqBF>xaQ4lOlckiW=SN#V8>t`pLDOBlD3zDoDZ zq@bDfP|onvloZ2n0})A6eTHBA!}obERj&*XDVwLh(=w2GE>XWgt;EI>wG!0IHjYy(yE#j( zEVbfw!-{VhR{Xy|vRnxoa|1-{yWt;3-A_cr%tXPB7mu{B?MKVge1GHq63%Ir@?CZQ pyj~zLfMyO88Q1{eGfK5l>I*9LQK^seeU#keF9DR`8#0P9<_|Hs;Hdxr diff --git a/app/modules/agent/engine/graphs/__pycache__/project_edits_logic.cpython-312.pyc b/app/modules/agent/engine/graphs/__pycache__/project_edits_logic.cpython-312.pyc index 91ab4d34218a15c68cded1a879744634a03d96f6..13de4c0cbb9aa827a0025abaf34d5010f891df5b 100644 GIT binary patch literal 13770 zcmbVTdr%uknjgI%2qg4C2n-$`#t4iY1AbxS2L{L9*x2zaHa1p-W&nYZl$pU8?QqG} z?bgb>zKYq~JD@f>%U8KFx#TLO?yf?rwl1V9RiW& zlLAv>&truCM;KvvM=7<`Q#h`08^1&^-EM_YbSfNTD4(CX=%Z+of*f-;hw%@JS+V^L2&_j*C`c8dBvk!sLj5BcGoc|hgp$<4yB6McIqy1n*XO+J z;oXq)Zh&`V&btxb3&i&VQcIXf6VNRri%6{tdH`>#s1C#WmFTTsPZUCFGf^a#GC}KR zc(;tJ+!moYmmTNA0|CFF3I<06Ek!b;RH)mZX0rYQT zO(CN2J>rvQTq&q>2X{7HHHR9RK!!FA~{15++82B)!*%CVnp&8VH2Qy#r_g(0e1& z8u5~7k$8iMEuP3YQ1u2v0mkc%mTh92NB(9)4ef^LH7sR!#?E2WKmX%nOhSIX-7 zvIa<*S4wyDrS6#ePYc(eC?;rVwCdJz8;xJ&_EQCh{X~QKJp`$Zflv@ic|%MnwU$dMcy=AcH2n(LGahb}HO|F}L%4->D6)UjqILN9&IO@tULPO}^P+i8{hHgPOLeexWPyrJR{Q+35 z-VtBKpSuXqWVOKm+Bt~+T=4>?KpPY5Eyu741q)ILNq~FGyu-lc&JWjP*k3576bkHq zg&G@I{HxK)DE^PHf#D-==WE2AXMkObOfgEU&BIQ|-iIPk6E zC~PP4ZiG4(ZQZ;_{NZ2_WUshe$-9i_XfW*e1?gj+Of4r6q3a;}!|$*^yoN39>f

dq&2^?gI5-To(4wF(NYWNHUQU!n~Ny|qi5cYS9C zo0N;h1$4kGGF7nM@dX2fj|qfBULOO(J;Km#BUOP&qO~Iw1jFIs(TG@FuzH~;I%dS- zdj|s{MzBQ!{$Vdg2FW|VkW5zP3yllfKnTtvMlktkuiqCUP&=fcq8SS9c`6WbYbh&g zs02l&C@Mou8Z(7y3Wp3Di31#k6C$St^9E){tG4AgYKQ(UM8MgU)j8#!bWfj+Q@phy zrc2pfQ)eg7P7lvE^LEdYy>+fSW=Odz9#+g$Bwfu3SM%KAc_Ml2YU0>c?piN@>^AT6 z#)?u7*HrIhZ_;rf;W#iiwqW2LKZqHh6u921;bHDx@-6S`TdwX)nM#tTiiD|R+84J! zu6R_DtU8jYIx^qQSG}-oI`{9jyJstx-0kzt$@-2&eaFIq#b&;~o3Fi)ti773y~@|V zv{d^;?xr_+lS+d6=T z3~g*Eb1uTT(qSl%bj`}rEvV*^VuVg^2V{_PQhEQJ8u~V&Bn*U+C?HI4sEEQh)I`y{ zX1Sbrisb829{CIsut@H4_BoW1y4gI!l06w@AvK|f`e+4j2t#PXHw0FlGx}|3RBmD0 zIyrMJ*Lk=29kjL}OlVk5&U^v__)hbgf5xrZ87FMwOt{t9gcc}jSS?|HLwOnIMIp}3 zSWe3A=$1qXlKNB)ifU1W%65KTV=XbpjH`8+wPdPdvLadRP87RmYu|If z?M^lvO*9&_SSRXktbf~~VPO3#D9w?gZ1BFOSp!l46z*5Myc5RfvePV9lQ z_a`7V65Phc`1C4}j%Qaz<%*rp930%;gCq^fuqwrC_zJQ9e~}0cM#fm@u&>s`MHCDqu~& z#NJfNoK#>B6!&dFAA!mZVF;ZK8v<*DHVf9ZSvXO|7H!jJ(WW-fvxTpfJfokcT>e2H z$qeTQ*+SMR&cUCZ4sk zr1eEuCd{mX8e|Q5cXP4aU+N{$k&#BWAuvd(5d)#m>0y3qne`;K4Lc@{owu!7ym@Re zzh<`JUF$n(v?$YpwW_=y*tWMeR+pw=e_`Di>8 zW^h0#cLD@Sc&L4-sq-xTHl6`4Fa~HBR)Ub6b=Q)<-!4}3u%1j5%Ev7?2@dg;|Pl2 zK4J*490FJ#!6f6pK_5Nn)(XmSL;{3{J|CbRlq*=E3z0AlcrRU8Br9k{#3yL|0J#7n zYN$&YHeNv~guSS%hy^N~#Q}vG(rAWc1eb)r(g+Ia+x;X-^!fb5Zi|S(1Y>{(jhXR< z{G^~J0)9q9VE`fl`1nbx2?z@s1VbrNH>c`Qh87r1h6rJYR|aQkT-qO|GPXeqX&QnC z&BeH2L+Z0H0%OgxlmcLk0V)g!LeQP8R*n>M3KTSqdI=TK%Y7E~GMtlE(M9xCE3FOC z&qpZ=c%5M$Vu_%EZigvBFSAxC5WC|Qvqjd?2vw#bVdf!WLu+Pm8IDG4FXq^@MFXy9 zbE%_1lKwk5N{~%5pPnyea+P}l_t{&Q>}~T((EjV{A*#e<`Y%eW*02_XHFjpjRxv&J zu6o(ll(Lkr*h(JUonj~1`03dW-no~zJ(si{O4ts~+vjQCb}DH*o3NebZ7;++pV*wJ zf}?c0^x=+~9r3|AJ?}cqJKBW z-ftQ|3u?yft3O)!*Xq-et>e)JSDj{J>uwoxPN-ZcS9T&fQp0 z^3|vK%Fft@RQb+i`O!rA(fO+jXBNYJ`R#k>VqFjRKQY^Lh|MZz_s{O;@aAPp%M(j! zs^dsVa8n!SEAv+4`zSW}7(t(6Z$)s%my}=AAj; zx%x{SetFq)Mf@lYC1oq0es#&_N!fQG%o}Io{oJ1OTy6KV{lXLbj+D(g)jrvtw6!K| zt#j9T+p&e(Pn$k&N_O}Y9e$3W`3`2u#;mJ-g{!@~Y`+Gzb2+Z$T`jz$HR)(eINEr} z(Iv+%?!>Lw=_kO`x`z!j4e{H{WvwZTD`~+K7JS8CnJTYYslekUkINpF&7S6~T9&Gg z%rSh$(Ut05@&3m{kA~(d0ejbV^3`1__rBE5U8%~NuM2gKlHaQg#n#mlthg+fSgN8b zTdY8fis_-)OA1(9t8k!I?q0b1Y0t+!+{NpB*Nud^htv0LHs~N@hX}ZFI7)I2I$(G~ z`5Qv6@g{wUWHGuj<$k@6e8%+C-e3bq(uKG2<8&$LCod_Xq0ULmWcltfWTL6`xsqqGIm#Q+iu*rH+R z6;*~-31Q98OHgTSYyn~aB|xV)#g&B2O0b$tta-TeJJc>=O?hfVKLFg@*+N2zsL<%G ze4Bna-&CQMEVU0{F8?meS({swD9tL*tO{Hzt}!wwN%y zTPd%}%rgz%C#;NHuE1Iu@Zw8BE=QFy2xN(YwdUPXJ7swwKVi)oCT!c#b?5BV{B-fn zJM~$T5L(rNK}5;gSWSB8R!Pz@Z5*|XVeaZ~3NcG`5PgYQAc8na0l1D{(bkMsJfAk| z5qJh$;ee_|jP5z@R*6J};v7#=#76@*UgtcLfuUrCMH+;t3k*O4!tag`{L8OV`0wKb z{bKk}Iq})?sOB_MCZcue_6eK`hjHJ)0O$vF;0_5`zn%_^1cE*)z>K#?Ri5VlsM76; z)~u`0M}>U^Dl3h`3wFvtxINkk;GX~;fgHnWaKHiZrt$D7LxaAOquJ2@aD?=TYSEWS zIKrs2*dD?9=nJPL!z7NZ!Z^V6aU9S(q9P+DIez&Qr$0+DIo z<5o-d+bpnFQ{a&oN&y?A8yacvJM!LxJ(TnhQ%9f+$SXH^@)2Eas86H9w7ZD{p2I}hi=}{33%G(g72tP z2OhS~w8aCURF${#&i&8{XL-`OC*j<)nK)4aJ z?BFUrOJ+}=dM|Un5iax!_cFy%4A(fCt+!^x?5^qCv%ckuy?H7Qa*cs(;nn@X&D9PC zR$77PKJF6L#+0k_p=-v))wIs_fl`?2fnE-9>CXP3AlJQGUB7g;ZxQ?g6gF2fyizwsjy_+%IC() z{r*0x-U`uH=81~Bx!Mn!-fv1apHDQOUnIG!xA|so!u&F)e_7NIDL>SvNQnGuh~$uQ z%{&FJZta@MI$OUN7ga>&`VwH#G`flFV#>f~H(kjK4kJGRa3z!eCMTpt{9zXp9#FV2<&%@#P7;O;9J`b%=ILj*3K1`#m= zToDQ=-++7vEf51QiC_iz+7EaStj_WWxR)hfFUUw1VTX7}B0Nfav!F9$zox(;i%~H| z-nJgVistguB6yX|;ZPV2)U!%mD~fYD!ig zOjIA7*L-C7(7<(El@+e9=Hml`3}RT(YR)i#>o|8u!oj|B6^{JoP|3Wt=olAB=Y_TXrv6W11Df zi@V;kFPrP$CQ~*C=WOR}?VPDSRaFx+0uXi6_->qYx< zeJ5Yrg-puCu0MjexDV5k-xW+6}MyJXf1<)~-NF8VPNHhQ*~nubA#gT6QNauzF_wyrns5Ih3#* z;w^2UtU1b_Vj4qDtSePgJ~cc!%vCq@B`vY%Q)cUffqQ=xubFi|@}!)VKQBs|OOPh_ zvsaU5cf#zRt^bX=<%zR;wE(Jv7Gbf9>%?7*F8qglBc#_0zcBEE6Bf)9kT!X}NXklI zC>?Z;Lhyk?Xi3!Psl6z54n+YJ{V|Guj3OlQfH|X)AW8ih%0R+MWXIc>v^e23!YHF0 zApu5n5d8>SRjM@&(-+{9tEOR24VRoW4XdUCjWO|gU4LW z*XSV)7LC(gB$`A7^rH-q#ADhc{wTvE{V2mD{V2n;{#2FiP0|lQ_J6<1=^uwkzZLnO zb>$zPpd~Hr+OA#LCU8*@tqj4jxh(oU3tB(YHyW&5BR<+AS!HOF5&hB9V)$z)m0cj; z>W@mCI!3(Hhlre*N{=_RJ& z_lV7lz9##y@+CxWP+n?4MpFZFx|WuC6|@|F3R*d&rd2Pi29&hgtD-et@V)9Gc@31H z6<&D88068utQ^qNII}3Slg8WuUE2Ha3i3;eGG6el5Wz}>g^sKy~t}4iDUQ@mr(kumO3aGON z(yY@;N4;Qqf|(rkdICMPFTnN(r=}*EfKWc}zU=Z$P6WJ{1Fj($b5=~6V!Ri9-b*ge zh?`*rHRBBgnTbA+B0G*4;QZqAiXFTgR9F_KTo4moSPoAz>1mp}BkOa>1+(k^=ESsB z?Ku>JlJ)wB7$jlF03M4C+3ohPcevH)fd;ym$R9|EmoYGq(wq*wIMJm(0J)&IHHj?mTZQ}~0Qi5jO z%d*g}pd*6?-!rvpP>|5bpmf9V;GkTVvACRMqq)JrdPMhTHXJP+)@&Lrrxnt28C7Ky z3j(QZH+4ra_l^Om+Y!Jtaw#^wL(eFC}$MI)Ql=D zAJr99nvLe`9(;kK&cm0?sAn_*oCxB=n$Z#|o)2jiEt7gIwLGJx)ib)V`kTIfMjzIG z<9kC`*-Z^rKrf78V_1KYVpL%rtqDONpl)N>a1p<)9MZI6+SMv06;_R!@_mzHVO^LS zEtj71VOS&ect|eAWYpCcNj)&GYaI>j?C1J?&ajH6pl0jZd=ynn!3>pLEA_y5!yLiv zknt>;DGirimtWN>DUV{Nbf%;q-pb%Nqqro-Na`iZVdxX6uVq`mmf1f?zAb3TYMlr$ zxc)vz%K03p*;7bgn2Nn#X#fwX5aZ6zR;dx%V)~x8FMjSzxSehBFjdcd;OChx1Zey5wR#Xz+$s6 zQn3Ru_pR>Oj(Ai2z@66k#XApi&AVQt7#u%nTQTOHb{GVefAW%-5j2c<%J24g1r3Q?|Xy z{`nT(b~tT&JY{>FxAlEe{%lmXc1NwpTg;a*^R*p(<&MaajI|=tlc}n^5xO4wVK{O$ zQ`>a2eXjk-TO-FaHI4TuWl7_#Dr2dR9!*(VGuE2ulh=>U_F%&C1GtkadpFnGlQtcmtBRIKyI)oEro)`}@K^V2l-VWwYNehkJ0fForr)-7S1CWz zDZ&4Z#@Jo2W&j4k(gII%T8I#4{TqnfpaQui6qXfb$ue5LmcbQc&4?wWd=IQsb$^`? zETg`^46S)x0gGANM<|%7h7bB$cAmlICQ`96OR%mHH#_1`h}n?7nMc8cge@3vLvJG{ z$i?M?W5(i`*+ovkv_m{(FGjY73$F=_odWMg>YuGuQO!-moFR5Nevq%;%3B{!TX(0d zyOY)=%UcgERnhy$3z@kJDDChv60N!K0fmKo%Wg3~-2AERde{xnf4Ub-8AVu5Q?vnj{a`mpC1qGqlz@z3<)~6>H6M!%B)a6_eC?WB?>n))(9l)o?dcFLDX)m)x(oq@8=8 zd)uD+Zt7+314w<3OM?Fayi0KJr+(0B&keer`!MxA$oG;nxEbqsHT47TL&*7StN?zH z8!F=d9v)@DJ z`W7_w7U}f+cIfp7(ArDT(66u|>=+JB63GQ8JA3hMf z`s>s+u`~|G2i&im!N$Vg{u1iM?|I*j2(=xyE1t1VyI{HK_M^!0O-{Imyk7c(+cOr# z)d#J-&AlUj%zND1sqaH;@58sg8?@uwD10R(uNp6@M=;c%qtFb=+wDv(iFDe(mgyZD zah*N{NrHTGN>D-LL9Zyg0IIv$$q7L@I_aBmsKiyg7jiOPm|f)utjsdIAkHGHByvg= zP$d_FK0obZgX80FW;)bVY>ltVf>X}k0`EoYQ&V-ua{4pl&P-`n()*$BUEf_D*MElJ zcb4mViZhVMORaIhl(W2r zc@lCI!R!S&9)lb?q@J%xLf(MmbGSUeT4@F1c&3?Fj^?m>7@Gm zfhWS+*Ypud5mG$&+;fZ#s$veHheY@J{{iPvBco+}Q-U7&EfZib`2hcD0cVLZJwaq5 zNg*{bJ>WT%BGDbighS{Z29HI)htwt#TgAjAGY(58QFFSiIaSuom$gPz8ADmxP?s{)MSB(v&9UI^7jC_f@GfrJ3s@v=ZB1EQ zdF#eV&yvYJyWxPhr>nN7sIc7g|^ZnC6Jj+Ds}Z{bbOh;mu3&{Zy3s%~^%?~KyCrG+!L{Bg-t zjqA8FW2tzBEX~TVS=?j)(7Kt58+Iz*mhDss+DXO-TX?r$&;+Ltw$rPGISv3BVC;Dz zN`oy|ZhIY;OB?Msvx8e~iRQ`QAs0y>a1Q1dR1ETNX`s$R67v*zYX*w-jr;s=#uu0_ zK2Z7hgyeCD@glWkYKR76=YINZ!kl>OmxJ?i-gGE!>P?w?dDC%Ddt9U|9np-Swq@1F zW9P`n{|F+8WZ*N6GNdwiUh4pdWYzO_GK?r!XkMCU6$1>lyyW;r)?c6TL>u@cFaU%Pna;_Oh=6YKuzvG~Ds%Z^mb4!&g3w6V=_i_==8QM^6b>5e)AL?W8fCuZevfR?QswmMT*Cu|cVx=&3tw;UNu<&wE6)*9C*dU^A{h$dsU zfY-7m-Lf;)vXgJweUDP;+Ed2H$e~&D?5U_ZdMX2wefX6ZGG)~nqvbU=x-BN3+Z8() zH{UwKS3Q(6K6K^CvIYu$sin%TuN{i&e|Yj|{qdT=c{cGRU$zUFhN&`IapgOlw&8wK z&oO|BKY`Q>)U?}x{1nAEh!UX|BjP-G1Z8K-H}*E71Qy|1AW;@)QlxB= ztV`WA?G^LEp%WWwE=JR<^SGf3_8NFV&|7!C@j`N2+S!|O_VUi-pIH4d`flJKL%$3C zKAb)|lsY-Y4Uc|&a%}eWje+X}e|es>`Xf5pYHRf`n=qb@8iDFe8Dj+IEMIjPjotE+02_9oYwKxJrz|kF8fNbj}`i<~hI<0V&2<3%~>2W-l15LhvoIzUbTZOO? z(V(1M!3B<3qVQlbFaITG32$@AD=(7^-b$lDO=sy{B`96kG`;|Whn=l>%uEOl)&eUY zj!H1=dys9F4ipzbsP?|^$(g2tJ@>WKQ9rd z3^g;WK0YU1z6P5Az8XKj7&Jm)DM1oYNEFYb@a15O2(x@Bd8lGQwV1@oZ$trrDVh3p zO5$o?;|MUzPRazFw7;T;vf4G7;IXJ0Lk?P4)B_n6z}Y75idq`QqBD{!3%S&=MN?*>ZEib1if2!2 zob(lVfJkd$+ElR z7YVEWTDKRD;k}oqn0=v+`_}|+G{ERUvN0$)7L-b{&>q_Y-XH&k`s0h#g8kfGS*$#^ z^HxpV_lxm6T6a#e5%IMzJR-+!ND+ z+Bd;^;iQqsz%BUhIC!9_C3+WJ$0j^||v(miYl>WpPD`xa2Vrn3&d#;%enA zVW!SCS;n!VKY#~AnlV@1a9nppdt(f5-U8U!T63fKdT(?rzKyp!7pxtL=7=s+W4qZn z*O;!^o2uEHe2TB>iIjfYcYe0}#>wj^)7Gsi>(+!Wxo^>WjJq%d*(+*p3|=2hSL{qx z>`Yvm*YOp{BKl7a*4O)an8ATX`?*CM=zS~GMqA2gi@Iah+qPS_bkn1$rbm;#eAD4Y z_ zWH6g*_-P92QU?$budYiKRs+{9sim+4Cf6 zlr~A+52zeU{^Fx=76R%~HdHlv{{!Wy5faL?zc>|urFY@(uO@a1)iXygX2@ID168! ziijHAUorn4-)m8(f2DuN!r z3#{efJVsCx)j!SrHNMbbeeOS%vTa z9aO;n3YIp=t7_X#-JA|=f|6#wdhbH@!FhS4WC=LeAH1&opnk z?YQOO9y&OGkZ(S+Y^0i;|7uV-o59Yc#^Ce>9)sG zZIAuNGJpEF=RP`j*T+5m4BvB}KQPF*J-eb(R8-zmKn-7L^d+TWi&ES8Wlc$?KB8Z) zr_7Z%Y}ak+a!0D%5pVs4`b|i-0qZiu_&8lzc zuj`|rdRi>obk_`ur3QOMv((fU@A>HOjCr4yEOhW&iQfrSP)*W>;|r6@Q5Z-olJu>_j_ zqM9nJ%G5XA?49f7HttPc;Oh^h>yM=BkMQ-q3-u%1iIK>OjJ=)P^GvGhJXbjgWfRn0F#=pZG9D;BQf*u%luaHoiL{VZ?%>bs})7?>sq?(=jBp6Aef5UagJ8q-+$U1 z&RGL}(@7avxEAky5l|4$S(U=sgn^aM%C3%tHOymh4xuII6qxe-I`tI+5n+QwA%8*{ ztyCu0;u{6D`9La>N`KRsWI=`nYeAI6_ozM16~QmPDi53K(O^|cDt8kVnqQz`a;Rr%+@ZmTA& zL_p-I?GuevV~|e-KLiWqK?!0Zac_yRgV_(BU|`(C(uUg?frTfS0R#A=|!mf z#3bz-nzm0)_^0g%J$$gW2S@^D9(FLGwF966AaK!Z4^Dt-FP`YyvsepKvWG`bhnAVk zbq#v}fGh|)0sv&$%tAduW|KkIRm4V$c>_x$0~HJiSeT#(Efx+lKgJ}>YHS83zW(V@ ztB5?O2QMf*Vdm(e6Up!R^( z6x=CM=1VcZ#PVcJyCCK$6JZzuTzMFU+5+Y}dWg@MTj&wh06}Mn$(Wy_NA%zxj3MwM z3UcYhjwlCkZ^Ha7de|7jVPfAk+0kTvj_E%`56@`@ea1EJRY zwKVtVx(ThYeX##S9cD3gK~|^mc)Xc6w=bBtC(NK&*ti)q1J=3~YP0Tnu-*F80Ze7> zjmhhi>B>h^m5(GG$33A7fqbG4r} zw%(kcn~v8c4)Tq=GWGUML-Wm%xsljqzsD8~Wp}D&ce3Kcns;mFySP)Q`8{X2r=HVA%)@zo{Fc#7i|>naUAcMLN;R~6S!Jv(jT~F9rN9IRlr3pb<}#ZdagRQ87ypTw(=DZi)tCZV)sJD zX>RZ7$YH2EYi0YwV%<)#kw}->Q)TugYeS~4WyxlbRo$+;RTn?YH+3vD?M*6qTh~(a zhS>1!v0Gya=X^82v4?LyoN;W=G;a9PpsuL=SA~{*d3mi^)6Z(-myI_HEH+Jk zQZ}{xqztKG`IL+*ua7>>nKp6SP2zECR;>UeMp1ZZL9pR0!G;eselAdaXJtrklS(T> zP#IB&=a3XOAv#)F3^M`-DVA4N1dE~)qzF39pE;~k7g!TZ=tAQ2QV2J26s#JohrzCx z)}!K=;8)q86TSn`WKpF#Znf4p+As!tH?U!7fIb@6yoAT1cVU!Je-TV2^>s!A+kM&9 zBL&~5DLPih`@auV7V1&+{r$+!nC4GGSz&D+Wf3HmQwrtlFQ#5uRn7q8QmkAt#8lXx zF|1fp0@m0Nn3vcw&JhDT`d{d zOo8+-9RuO=z|s|ALy^+ZCqRMLFr2bsEd(XjJ)j?zKT+h{2USjSx-XzruqUPHDsq3x z9dkID+M@x3N5jN#@evS+UzU?3cA~uJ$NQJr%i6dYi2gRVBm|ICj39fcuob&-| z&4Fq#x9|=5z+@6|FNQM4 z?f^bA=v_t+znmC)5JjX2dh?L@Z}2UuAqC}y$w|L>IA+aj9|f&+43{S_p=@m*MfIeb zoeVOd)jW&UX(c>Q3`+!K;baP>Xyt>3JT79hBb1K&CR};a5X!P9w>cv=a?VMvi822H zYX|KoA=3BZfyK#2)#h3!syg-gSy_FT+_~^jIZik zsM?=&BhY&!W$BJ+i{tL1V7#s|UAHGyx97c<)weJB&ZqdgbDS2iWp$=)``r!8VE7|D zD*u9_ti5u0DCw1dX0@$Q4LUPiwSZd_m#$p8Hgjbrrd=>?MvWpUU0v5*qLp5vCfWUA z@4LNx#bH2{mKs!1!tJe{H#_G#$$i1P4zQPp`+-$;X|S@ZdT60)TS5-c66Ad(Rr5$< zbMh!2E*{I&HQwx+>q^&krs_JAyXVjGb$wuqYiqhWI5(KK?Md18Bsb3=0iGXv1AVa_|u4UBwJ1Vy;Yld z@}0A9ot=OFx0gS<%-24#P~*#3`=LUMKB9pxudj7}YVqm8^wXZy(@^&q7y8bZ6#TQn z0r?6=wH-v?d`J%J==NPn^>37m?LDtqXNPW#ULTEB@|Bwx%N=)jz}tOC?^Z58d1i6n z+1IwuUc@_|u`TfneAOeoWn0>^J7w9uXz7I0SRlwvTjz&3@Ay6V9|Rd89|i)TrLtp4 zOS)t7ukMvlwWDDF095|lk{!nt>Q#DRM9+w}SN!0uuLmxI)8WnyNO)K_Yh5u!4i-Z> z;}}~0I^J{v8+GKUa705GtJ*!m!rfwEw#W~~E&CCCpS>wTHszRe^#uTf6`P0$}G zcA!!C9&9owwgc)|ZwGDLm}q^c{jK)&wiBssC+>Q=GtctdTq)CYoc1}fSgqFp2$%r&bv z#kMX}5G3do43>||+Ett5rez9(WcLaN%g@RlRvnYaAQ8g^q++;C%j7Dz49jDP6=1lm zHK=x@F((8`>z5daixRu&C>0N6H2~d){JsmsG@o(98^H-$N@w;TN}S{$N}S?9lsLtI zC~@N1a)HeuE@@%Ig9?%B&cwP_sJK5b8Z+Wdi0@bchvRP6nM2~DK`ts<$oQ@qc=&S% zvAg@2G-Lt#pjcc{aFbFd`zOlq2dd?tDBb^{j{bo<@CWLVKTr?-oBC;)Y|9rE`u}!W OJ|dTa2|oH{fc`Jx!O}Va diff --git a/app/modules/agent/engine/graphs/__pycache__/project_edits_patcher.cpython-312.pyc b/app/modules/agent/engine/graphs/__pycache__/project_edits_patcher.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a3773c4e31e252e063b061a10f91368e5eae48b GIT binary patch literal 9223 zcmd5>UrZakaI6L1~;&%X@)StCLGJATkFvUs!I4mS@7 zj^GSoV$LvSFc3!KEWsH+Avn{wMontUxJ3G>AMk_EpfH#D4;J`HkiQU!1#k00P!rNh zy7;htz<~Ag#FT*}ri`3{H*rRuRNO)kdOdB#J=0ds{0TW_;VjT@ zg?DVb-{Gw#c~eo|4)V6AWJyi?w3%~2zrCc72A%5Y6*M_P((zo9RUk<}mm~v{POche zT?0Rox5A8Sp{;6(^05kYIvR-yfne-y&72AoTo43467vV418T*JZ?J?y-Cl^?5f!t* z#})r;>W$QY%I zOQlwcyfa1+f(v#`#s1$D93_!Zn!lo^jhBgMjBA-8^xc)9Vz39aOQK>{{a)`Ej1mQF zu}X%7nX@Gu-xk!_xxx1Oaa;0Z%}{+eKiYzX6%IGVfexy6Hgddp6&e) zS3}DBkY1tF@2>8b-A6av16hOY8cf=@997AM&j(Ys4afdG+kl$7|JJlR{x|lFlpR>R zzV^oY2gq{~u@@12@q5qyhs`U^=%v1_QT7~9R&6n!RM&UykD2a2e3WNLzOV5n&H0vA zWNq5=97w;mYIyAF`H`S(M?O85JiYA7cOKsC9LjYLWv{MHu8+u_Q#sm~JiB}&J&tGD z(3Dz8Urvu_npUr@o<;kHP{Yth^-#X9Ie8{ex2A`4^x+)cT5y2EpQ?y`-n8`_`Voy@ z9bMZmd&f4s=hh8!^Lb?T)k1+X zxvC0;$yrsf64mv~v7`j^-Uh=FilqpnLJK^p`=ByxVyAwv%e7<0zdxb`RU#!BI0I*# zHeDvlz|lBc0x&$Jc5wjpItb{_KWo!_cj|A8x3eW!d%7<{O7Lg)p+Rt}>d$#+$-gyCscHa*PV-e7zl?r6}`Okfu(RX04PVsmgaed7{QpW*uf+C2L|mAkj|zZNnr5tT&>OdO@N@IAC7*yj zIo_!v8W-h7ZzLK66a{KiLPZ@0i}2Vb_}~@c5EMSUP><(Dav}J8Hi)IGN zzve;xZ>%3dtrrk`0nr!0xi_nQxoTggeYHxiK9QvIj_TxaaYS9n!7LALI9dVPHd$Ye z^<`dOJ({hP*%OF9fyZInHyrJ1Z&!}(%G}M8Szczx5IvS>SZ#8}MrI6I$AorRVz8VR z;Xe%gygdr#E;KMyq)&hax_6a)yhNHnPw^`GcmdjtYt$L%DV@yaF{KPrreWb>Xo|N# zg@qLWyB}+~uJW+wg}%DBZKB5JNse!^`%?VZlaJX$`D*u;r#W@y;q{g4=?^o4yst<0 z^lo|va-IR;VVj*@DbFUigQ zvU_0DeIn;RA-hj5Q(N_(RQtn@m5%h#YOUOORIYz*v;KIl{ zWTvvUa@&yHI=tC>I@fwyZXMrX$FVBb{n?v^HZW14ov3!Hc&vWJT@($2rV7e8pQ32b z3w)SB&He%~tHcfnTaidrfN9PsmFOHqtUK^*syGLVGwWboq-tjU4+5e|N;0bvAPscR z#91T?(>Tctr46EHOy&%g=pbh=>9AB@PniU`d2v+^Rij0)s<9@m#<=#c$Ou`yGeWcs zQ87fFFtX1%p;#mQU4M*UjA@>W>0^A~4o&gu-QJMsRY_Er8Wa71c}9GHnIQDXASMk5 zgc+b*dW|||+<1JWaWwmZTz@j@$UEGr z;~S2biuCH#+VEO8x;Ta2okmkLXy!I*4Iws!=un=i$D^eEy2JdE^MgD8n#KwY0o<5WQ&Tvi z0(UREZ3XB6;3(=(SR@Na-2uA@+FKUQHOt_nBpZh%CLrlmDYgUvvFa&pJ{v6yB6L~e>AbJCk z6cZrW3ki@Di?@~&3^kOS22|j1Mr%40=DXq@JDj7eJ1`yN1#kJg8l(v$FpKz0d&mAY zT=QA>&=-ZoghHspYZ}D`Mk!xaMRZb4P+GlW(`Gdv6%~3e#BpKXueK-_yb|2(z^qB+ zXXYT|q{5nT3J)<3MTIn#hG0F=8Gq>+2)1XrH?X2}u!RA}8X8kaANH^GrzhYqzv-CV z(7)L*l4}@|8%C4#R-HTLdgxwpr#n|6snjdi9o?)O%+(Fbbwf#eMF@PtG(8$DLeCk* zoAUKNHQACgC8@yllrX{)UXs+RClFrw99(3OC zOvN)pa&3Rol6TZDU;ONNQSCXzoR5=7BpPmhGocTLT3E<3SP*!3*W5gGhFRVe+3=uTmXfEdHEdXxTO3=hvb!~9g z0HFf}yG@eizVTiQNhBcupRks}l}(rBY-$iv3t$Xstx6IAN{(WxjQZtqIjagy7&NAJahjIWVxhfSv8GH%>GuA9`pE2OG3r;;d(`(L# z=h+0ObrlnBCHw0Di$k-Z2~I~Wck$_h8WU+0Ph~_Z&iZucFUv7`^{@;Rz7p0>!g+x8 z4$tF+oG6@tnF#pwFMhO=27Yl~Jt%7qh z0@K?aFe-Kta`-Xz&_QtH$(_W-ZUk1y*=Q)@b5_J*95JeYK#<16#~H#tXuwAh5ni#V zxjY`Qw>Zz;*el}y(;dII?K(y8p`KtJl#~K)!klT?wKfhgx~W z@`&d1^(_yg_oHb}u74%N^$Y7K&{Pm{Q4~}Y|E&UI z1w;$^u7S*rB$b?54tzSBI+i}NGMF)}i~u{$Gc{k_{q*kg!sj2Q5*eVZJ;>Vg-`gy~ zzH1QiZv9hd^H`_(?`s?+HjH;mGlqh%B6dwgI|AoQqKsuoBKTfpBWcI>S8%Qeh=} z3K5?*4zUejj~&-KI;p3e=6eU>lygVcymOQR5h69Q6Bojn7|ye+p6??C9PtQv1&RX> z@chyFV3Y$dF~QlOkJh|w5{qE_DmEQfM8$?fO4ZAB*An31MmsoEOtHX>VhkODUyov( zg?hF}{dnabra9Os=#95MT?s@86I`d5UOb2)$;aDGHBa5N`!u(3ZO3dSa?Ieoc6K(#uiR%@;`?oW;CEqeP3 z0swgLX71`ubc;u~!?{~wbTyoNdu|)QQ5bYl6wCy}7A)$mmGJCKO|IAr zHucd?*qNlbd^*LZVk?b^Y0KH#w;gI%r;BoBTm=HEY;0}pk8$;OokLfy|Kpu)++65% zQm;|?`m6tfGRtwR_pC*g)U!1*WW|$2T*Vth30a> zIdpw4cR8|+MGFTURBPsRfq*L8t5$2hKjA0s17O=v_)Ln03r$alL$_2X(!R(v{w(dd qz+dTS>yWk)Mugk&h|x#HS<-^ZU@-hE(e_{F^F~AKj|48&xBeHzEQ+!K literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/graphs/__pycache__/project_edits_support.cpython-312.pyc b/app/modules/agent/engine/graphs/__pycache__/project_edits_support.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54377ce0362cc332b44636de242e6cb470764732 GIT binary patch literal 8954 zcmc&ZTW}LsmbYJe+p;A;co~FkLIhZ3c$ol!0Ok#5f*F!=Ac|18jci#~x+TCWDIp29 zBNBEoBr}N3?jmNVYQ$u>)|*`_B2TiFkEHT3t%yXL9#S<`yHmCNDL7S|@Uwf)ZApHh z$z;B^tNK3AJ@?*o&+DH1SzVolz_;a({}3=a3HeW~R6f33n0yKfmx)M3CP@1Aoatwv z&IZ{LuAgJDjSKQ4Lcc)Ee9$mr>^IV~5HyXL`^^kt$u1%q&Jxl1CaYuaw~R4v(_O6e zx*5&hCp{OHLVoFhFXBHT$5(o_<%+^6E6xk*F!YBG7zK8@(mhC5e zp+RX+L>lqyc=gm+u1r46;PNGcOYUdIHj$Ayk%gZV`2m9{oE7?c(I5$;QG!%546t)y zsK`lB#}dGp&hq^x(JUFD)iA(|7O0y=D~#bJ0qQoWTf{mjvtm7zt>Qx3Ylm6*7_5ZC z8N>#d%QkIXBf!>)4#4B2xawi9CTO$IYHNnJhG}gaz_mbIqu44r#5u51XL-#6^vs2x zCb}a|YKGO#gL=!X-OZ=-8T8e`m<0gSI>x&fY0e+ZqeGHEvO^3+l)h**va}#|#e}>yw)yxlH0roOq$6 ze{mLSa6bAM5h|rO%LVAI;u<5@nO--qIlK=pMsJ(38X5Ulvv?y__Te;Mk%9xV89HR- zdj)a3T(0};ec|xRkx?-U8(Zl^7F#KW1_L2!kPsmZ& zNy3mYMvzMMiyMdPD&?tM6hx-7;>yeklW0hobwa6)w#>is}(I3qY6O3%7PeZ2l6d)~|wKbNRa z)b&9x1Al@$rI%>yDdM*FYUDn%@;>9bOGU;=_0Totcx@SE%jX zXNZjSgSok3Y0Pcagy85YN!E&f0+NGIbHC`l-X2nwZ6f}V%&={}A(9Xxo%)+m!9Q)sb)8S!mmzYulf1>;1CvnG|#T&`;DO zPv#Fjr5-(&KXmNUVzu#^qydtmdwJ4OYI*YUHSG+ z>b%Fkux&0i!%9jm#B9G{J!j3$=}ua2o7NXQm#NEpGKULYJ-MzPwR1p)pw*93Q~69#pvwAV zC|o9ystCoInktzQ+1U!NmTE@Nr2_Y7aS9LO{Ig>W4~`Lyf5(9^n|dh+%X45u_nD(I zikr^Gwlc$dPxzD*ZcYb7{+2gF0u9Tsyd0B4LWy zhaH0C4Ee97)|Bz8^@=sUGqWu}XJx+WkwVk@T+{k&Q&!10ZNJkxFV%f@CTk;DY|JukmSW1L9Td0!Icyx7U946XGdYJd1@Ix(c zf&~kRqh@_T;4(AFPXI&>`#TNdae$~=-6=#BD+)HbYGbF_WDaez+%ED%Rl7PhHqL=H zHOwQGReYa)>?3j1M`!wXP=eZ0M&iQLY4a6|Ao(O06i9w17XdgfXfw8P6a@d znLuOeB@5N9LGY;*`C%BW@k675kek=JU=y|oCw;*vXkiMZ@X&-xPFHTtp;39n7YxKC z>VA4hqmghl5_8w$lbWD7rxJh!6kKETrFkhwp`kt3(4KGTObSJ7L%}*fXPuwgaogIS zj{f@jYtK(ex0h}L)lz8c%r$l9n;uH;xMO#WuYEO^dg7HAP+5a{K+pUXJX^Yg7; zh1S)%*46pewQ9rKqP;0;)A@*#eWzxy5$=n!kqiYiowZ0GXDSDiV9g{_WuPG(J?o2C zS*ee7kcgqu-3e|CLjs_YeoFetDRP?oDLKW+=vV>ScR>bcpp1g4LsO(^<{hW~QaBPA z4S}#yQ??smWIURRqpY8WB@E8K57{ znOp8STQ6_8xFIFxogJ#JK4 z|Hm8049^&eL<#!~C;(wu@QiH?5*X^UzXUhUbFPwi3aTnw{M4l+&Q}g4&JE84(y(zJ zWP=CKjGITkPeMgD2B<-XxUvk{Egp~O>4+Q+`N4}2kHLYUU6&ungo=kjrqbjVrUXRp zggjaWI8H_PWmeajekR9P;=X%MQN+?>YB61Z*A8F1A%!Fg&$+wQ(V)ORx1^b?yeNW!LPc`k+S!24Gp*f?h;Q=%FgN-RSMT1&ev$FRD zALkS<&MV9qJEc2OOhJUpt4KiXCSia}Tn*D(!2_4V4!BMMM8cRbCCmv+!WuV<{4hwU z%$&E!4bv{e=C~;aL4~+AZo$0bpV14W;rtSwOu^PN*VEdnYDN^QtF^@qaa+tvm54z< z*Fhj5{pj^d6Cir#3sQp&N@F|p=*Mn_%Zqn3>>m{&Xv2Z(r255Or|Xp}8)z0}Q7^T< znmGh99ldBA(W7yi5n?nZxrLjYh53RymN(3%rXk4g5swBF_Xhd8X zBmx9p%7(EewN%stU_F3Oxef^^tCoeALh1HG^CP+DNAk_9l9pogf=kEJY@x}MYx02C zmo)#*KKGit=xn*;Se)+6SSI%59h;NJqQePE$Ff4lx?IP)e8>8Ggfn;LY>SfH#~tH` zQjXN2B3SmpSDr66%qiNOuPdolY4%EYdRxYEZCAc^Mb5V3+^&)l27O~9jZLp_Pg!0& z_^ZCm+<$y#;)#4iH)u|KORD+Y3#w`1gREW^?$BSr>eHewtAPEuIQMsV>Ulb!2VxWC(MXF32~l^S=ozzAi6>092I+epXmS#OJ(A5U?v$=mo5f| z*x&LDS&yT82!K4|I z;>vZuNo3m#p4~am?!0I3mrcVkQ2g!Cr$e6w3%ydVS5gN8U-S-54IIXS!|1<0ni|VF zAI{s|s>%J`J%KcjFyCoS!wN<$E_ZJ0;@@*X^3g+`+g9=aVRb-?em#ad%)0z4Qe=kh z!m9`B)X9Z9d$XW*!IX>OVAaymA`e=vZ2sRMf6s~r(fB7IA>uv*QV=bJ1`q`no*S$f z_BsaY0d;*X(GYEMzLvK5E z<2%_tY$;A3)!P`hM=!4X!HD zUFwIqmjUi2PZS+Q^-}It^+SMs7ZJb@(11w&cc|P%FhG&bUDyy^3BX-0I^_q56#jSB ztoru!iCj>f0Q@j>?!^r*J%)u>{Ag4keXZ8=T(JHRVBVMLZr;hggiF*n_JR5i?8!xI zZ$o+`m#i)lw&+s-6&3+NFe|O%dfuZDxRtDW(-U1Zecf+CAMWgZ7e?^A;CaYDyuqb8 z;l_w@IJmQT2P8>6?(+{vF(d)_Z>VqU+XB3~pV6HI-lz+EQ!}R1DF-{8P;aX50@3ec zBr(Qzxu{}AZV_YUqNz){LVh7=sf%f+qz=?V_E6Lz|FuXX0`7p z`OQbv$DUMePnB3w*TWX;yE4+h1g;0v^#|00LwV1z+7(o7BSl+%(dH@D85=E0q0~T{ z7pD9vdAt{IUwfHkPtn$RVgI@PsZ}ZQ>fn{Zt0PxNQr^tgjEJ~0PiNWenh!VL+?efB z=kET(Mz3mnnLpT_7sk$wrJD2hMXG7hgBJoGR!(&VI-Wn{eyHE~3cS}Prg?HnZh#TF zv!*!PLn^*nMHcC$K)(tYqYW?(2+0lPK*+Peh1sY>Z`36R_#OE7eC}_e7{kr|6~Li% zAY;E51$T=87XypG4194RW*dkGgDzP*1@5sFvz7}kUoaS5il8_P3h}>{x$SKjcN0Yt zI30?V!E-M?5J+DQV<#ViGR4&oIQXIS3nN|4v9r^ktNv_12 zoR*~V8wY7{<{jOcQ@0&(Me9~g-IPzM9M3RpRc6Kb1s+PRI6^Jc15cdQFm4~w11_wL zyoizg(cx&A#;__kG5S{Eg5DW7G5KX^i7l$N|7kcB9Hu{cKz!xG3+G-a*q7z(%QBBn zhqju!PQyd;}6r7=V~kM1C3@kZf`o6V!e(+F$uJBpQEw zbTlZxh;`g7Woi8^P()=k$239fs~iFE;w<@^<%DIKjuL_FmO?Z2NdF{OOBTDZi^;4m z5zMn|ZgIB`-!k9aqSrr(sE42WG&V^)OYMSSN%xcpWSPU0n3d`cLNg+UY@%Zlvr>Z~ zw50sl-STz4AZ$!+#}OMd+b24&@1@0C00}LndPaCO!<7hR6WkI7FBwgXuo#(m9;YuJ?CPP1D*pRD<4-#6HzlN_n- zw#s{v=oqfijACG5AQ(8V-y%Km`sFFVkbCG0xkrB?_vkO=9(=8ydTZAo-qZ280xFpEMJ5UU=E^mKmsvChWS0Q{+^h>A{~Du YyT2k^zalIC2Ofx-P4^(y%IN$3KL*y!M*si- literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/graphs/__pycache__/project_qa_graph.cpython-312.pyc b/app/modules/agent/engine/graphs/__pycache__/project_qa_graph.cpython-312.pyc index 8d231ffa080d023daa500fb1bd9d9fade81048d2..5166f16364ebea2628cb38bbfdefa49689f6e1a1 100644 GIT binary patch delta 624 zcmY*W&ubGw6n?WmW_QExCNXVVBE?{9F-VZcOA#aFpgC9+>unE);0`fiH;Jy#rk{|8NyBbDtT}tH>ew;HNJ@GE`1h$PbnKW@n(35UJE;-E0tEhCZSfkbU?X2gM zc9s&a z`99uJ%_rt#^Sgd!M~^?n&-KB{ZGCQE7PZ(&g+#h<1g{l>^gvb@7H8%3o3kTe<-NVn z3r#Ug%AYA04;80XtFhgJvbwPrYBl1oFo95-=S`<4opgz#=wgzgL-m5&vO5mBNS7Jq z-{zfTX(S?qet`KK3R_V41?62R?ZVPeEz_U*re(IY%wTz2TX={M@H|TI0ZWnTKhUd` AzW@LL delta 319 zcmcaAvQ~)iG%qg~0}!;{oR_(Vbt0ccbO4YyogsxGg)xUAmobVlmnn)VmpO`=5hTWx z!;;Gy#R_IK=dk6nN3nz1EIAyxoKc*N3``8}3@NNF3@L1@nLy?;Fhp@xvTJg@1Ti%Lo0qT#F*0egPBv$kXVPSuoX0NBTFe5} zHF-L_5t}fOQM_pKdG^^{f|4`TF7PW};82`Al_QmrWAbkf6**y6r3LyQ7=YAe>#uw>=2t6S2i^C>2KczG$ a)vm~BvJI=GpbVq(gvUpaoglfmTjz8 zSfq-yC!4UkxM%@26lsG99U$?G!zMRBr8FniuE+q$Wd!14Ng(lonURt4E`!Wd2AwZ# RQjF3QG8dG7V*oP1S^?^FAxr=O diff --git a/app/modules/agent/engine/graphs/base_graph.py b/app/modules/agent/engine/graphs/base_graph.py index 28b4c81..b153ec9 100644 --- a/app/modules/agent/engine/graphs/base_graph.py +++ b/app/modules/agent/engine/graphs/base_graph.py @@ -1,9 +1,13 @@ +import logging + from langgraph.graph import END, START, StateGraph from app.modules.agent.engine.graphs.progress import emit_progress_sync from app.modules.agent.llm import AgentLlmService from app.modules.agent.engine.graphs.state import AgentGraphState +LOGGER = logging.getLogger(__name__) + class BaseGraphFactory: def __init__(self, llm: AgentLlmService) -> None: @@ -31,7 +35,13 @@ class BaseGraphFactory: stage="graph.default.context.done", message="Контекст собран, перехожу к формированию ответа.", ) - return {"rag_context": rag, "confluence_context": conf} + result = {"rag_context": rag, "confluence_context": conf} + LOGGER.warning( + "graph step result: graph=default step=context rag_len=%s confluence_len=%s", + len(rag or ""), + len(conf or ""), + ) + return result def _answer_node(self, state: AgentGraphState) -> dict: emit_progress_sync( @@ -55,4 +65,9 @@ class BaseGraphFactory: stage="graph.default.answer.done", message="Черновик ответа подготовлен.", ) - return {"answer": answer} + result = {"answer": answer} + LOGGER.warning( + "graph step result: graph=default step=answer answer_len=%s", + len(answer or ""), + ) + return result diff --git a/app/modules/agent/engine/graphs/project_edits_contract.py b/app/modules/agent/engine/graphs/project_edits_contract.py new file mode 100644 index 0000000..1c9e72a --- /dev/null +++ b/app/modules/agent/engine/graphs/project_edits_contract.py @@ -0,0 +1,171 @@ +import re +from dataclasses import dataclass, field + + +@dataclass +class BlockContract: + type: str + max_changed_lines: int = 6 + start_anchor: str = "" + end_anchor: str = "" + old_line: str = "" + + def as_dict(self) -> dict: + return { + "type": self.type, + "max_changed_lines": self.max_changed_lines, + "start_anchor": self.start_anchor, + "end_anchor": self.end_anchor, + "old_line": self.old_line, + } + + +@dataclass +class FileEditContract: + path: str + reason: str + intent: str = "update" + max_hunks: int = 1 + max_changed_lines: int = 8 + allowed_blocks: list[BlockContract] = field(default_factory=list) + + def as_dict(self) -> dict: + return { + "path": self.path, + "reason": self.reason, + "intent": self.intent, + "max_hunks": self.max_hunks, + "max_changed_lines": self.max_changed_lines, + "allowed_blocks": [block.as_dict() for block in self.allowed_blocks], + } + + +class ContractParser: + _supported_block_types = {"append_end", "replace_between", "replace_line_equals"} + + def parse(self, payload: dict, *, request: str, requested_path: str) -> list[dict]: + files = payload.get("files", []) if isinstance(payload, dict) else [] + parsed: list[FileEditContract] = [] + for item in files if isinstance(files, list) else []: + contract = self._parse_file_contract(item) + if contract: + parsed.append(contract) + + if not parsed: + fallback = self._fallback_contract(request=request, requested_path=requested_path) + if fallback: + parsed.append(fallback) + + return [item.as_dict() for item in parsed] + + def _parse_file_contract(self, item: object) -> FileEditContract | None: + if not isinstance(item, dict): + return None + path = str(item.get("path", "")).replace("\\", "/").strip() + if not path: + return None + reason = str(item.get("reason", "")).strip() or "Requested user adjustment." + intent = str(item.get("intent", "update")).strip().lower() or "update" + if intent not in {"update", "create"}: + intent = "update" + max_hunks = self._clamp_int(item.get("max_hunks"), default=1, min_value=1, max_value=5) + max_changed_lines = self._clamp_int(item.get("max_changed_lines"), default=8, min_value=1, max_value=120) + blocks: list[BlockContract] = [] + raw_blocks = item.get("allowed_blocks", []) + for raw in raw_blocks if isinstance(raw_blocks, list) else []: + block = self._parse_block(raw) + if block: + blocks.append(block) + if not blocks: + return None + return FileEditContract( + path=path, + reason=reason, + intent=intent, + max_hunks=max_hunks, + max_changed_lines=max_changed_lines, + allowed_blocks=blocks, + ) + + def _parse_block(self, raw: object) -> BlockContract | None: + if not isinstance(raw, dict): + return None + kind = self._normalize_block_type(str(raw.get("type", "")).strip().lower()) + if kind not in self._supported_block_types: + return None + max_changed_lines = self._clamp_int(raw.get("max_changed_lines"), default=6, min_value=1, max_value=80) + block = BlockContract( + type=kind, + max_changed_lines=max_changed_lines, + start_anchor=str(raw.get("start_anchor", "")).strip(), + end_anchor=str(raw.get("end_anchor", "")).strip(), + old_line=str(raw.get("old_line", "")).strip(), + ) + if block.type == "replace_between" and (not block.start_anchor or not block.end_anchor): + return None + if block.type == "replace_line_equals" and not block.old_line: + return None + return block + + def _fallback_contract(self, *, request: str, requested_path: str) -> FileEditContract | None: + path = requested_path.strip() + if not path: + return None + low = (request or "").lower() + if any(marker in low for marker in ("в конец", "в самый конец", "append to end", "append at the end")): + return FileEditContract( + path=path, + reason="Append-only update inferred from user request.", + intent="update", + max_hunks=1, + max_changed_lines=8, + allowed_blocks=[BlockContract(type="append_end", max_changed_lines=8)], + ) + quoted = self._extract_quoted_line(request) + if quoted: + return FileEditContract( + path=path, + reason="Single-line replacement inferred from quoted segment in user request.", + intent="update", + max_hunks=1, + max_changed_lines=4, + allowed_blocks=[BlockContract(type="replace_line_equals", old_line=quoted, max_changed_lines=4)], + ) + return None + + def _extract_quoted_line(self, text: str) -> str: + value = (text or "").strip() + patterns = [ + r"`([^`]+)`", + r"\"([^\"]+)\"", + r"'([^']+)'", + r"«([^»]+)»", + ] + for pattern in patterns: + match = re.search(pattern, value) + if not match: + continue + candidate = match.group(1).strip() + if candidate: + return candidate + return "" + + def _normalize_block_type(self, value: str) -> str: + mapping = { + "append": "append_end", + "append_eof": "append_end", + "end_append": "append_end", + "replace_block": "replace_between", + "replace_section": "replace_between", + "replace_range": "replace_between", + "replace_line": "replace_line_equals", + "line_equals": "replace_line_equals", + } + return mapping.get(value, value) + + def _clamp_int(self, value: object, *, default: int, min_value: int, max_value: int) -> int: + try: + numeric = int(value) # type: ignore[arg-type] + except Exception: + numeric = default + return max(min_value, min(max_value, numeric)) diff --git a/app/modules/agent/engine/graphs/project_edits_graph.py b/app/modules/agent/engine/graphs/project_edits_graph.py index c390847..47291d4 100644 --- a/app/modules/agent/engine/graphs/project_edits_graph.py +++ b/app/modules/agent/engine/graphs/project_edits_graph.py @@ -1,3 +1,5 @@ +import logging + from langgraph.graph import END, START, StateGraph from app.modules.agent.engine.graphs.progress import emit_progress_sync @@ -5,6 +7,8 @@ from app.modules.agent.engine.graphs.project_edits_logic import ProjectEditsLogi from app.modules.agent.engine.graphs.state import AgentGraphState from app.modules.agent.llm import AgentLlmService +LOGGER = logging.getLogger(__name__) + class ProjectEditsGraphFactory: _max_validation_attempts = 2 @@ -38,7 +42,9 @@ class ProjectEditsGraphFactory: stage="graph.project_edits.collect_context", message="Собираю контекст и релевантные файлы для правок.", ) - return self._logic.collect_context(state) + result = self._logic.collect_context(state) + self._log_step_result("collect_context", result) + return result def _plan_changes(self, state: AgentGraphState) -> dict: emit_progress_sync( @@ -46,7 +52,9 @@ class ProjectEditsGraphFactory: stage="graph.project_edits.plan_changes", message="Определяю, что именно нужно изменить и в каких файлах.", ) - return self._logic.plan_changes(state) + result = self._logic.plan_changes(state) + self._log_step_result("plan_changes", result) + return result def _generate_changeset(self, state: AgentGraphState) -> dict: emit_progress_sync( @@ -54,7 +62,9 @@ class ProjectEditsGraphFactory: stage="graph.project_edits.generate_changeset", message="Формирую предлагаемые правки по выбранным файлам.", ) - return self._logic.generate_changeset(state) + result = self._logic.generate_changeset(state) + self._log_step_result("generate_changeset", result) + return result def _self_check(self, state: AgentGraphState) -> dict: emit_progress_sync( @@ -62,7 +72,9 @@ class ProjectEditsGraphFactory: stage="graph.project_edits.self_check", message="Проверяю, что правки соответствуют запросу и не трогают лишнее.", ) - return self._logic.self_check(state) + result = self._logic.self_check(state) + self._log_step_result("self_check", result) + return result def _build_result(self, state: AgentGraphState) -> dict: emit_progress_sync( @@ -70,10 +82,21 @@ class ProjectEditsGraphFactory: stage="graph.project_edits.build_result", message="Формирую итоговый changeset и краткий обзор.", ) - return self._logic.build_result(state) + result = self._logic.build_result(state) + self._log_step_result("build_result", result) + return result def _route_after_self_check(self, state: AgentGraphState) -> str: if state.get("validation_passed"): return "ready" attempts = int(state.get("validation_attempts", 0) or 0) return "ready" if attempts >= self._max_validation_attempts else "retry" + + def _log_step_result(self, step: str, result: dict) -> None: + LOGGER.warning( + "graph step result: graph=project_edits step=%s keys=%s changeset_items=%s answer_len=%s", + step, + sorted(result.keys()), + len(result.get("changeset", []) or []), + len(str(result.get("answer", "") or "")), + ) diff --git a/app/modules/agent/engine/graphs/project_edits_logic.py b/app/modules/agent/engine/graphs/project_edits_logic.py index 47bce45..b48245f 100644 --- a/app/modules/agent/engine/graphs/project_edits_logic.py +++ b/app/modules/agent/engine/graphs/project_edits_logic.py @@ -1,127 +1,34 @@ import json -from difflib import SequenceMatcher -import re -from app.modules.agent.engine.graphs.file_targeting import FileTargeting +from app.modules.agent.engine.graphs.project_edits_contract import ContractParser +from app.modules.agent.engine.graphs.project_edits_patcher import ContractPatcher +from app.modules.agent.engine.graphs.project_edits_support import ProjectEditsSupport from app.modules.agent.engine.graphs.state import AgentGraphState from app.modules.agent.llm import AgentLlmService from app.schemas.changeset import ChangeItem -class ProjectEditsSupport: - def __init__(self, max_context_files: int = 12, max_preview_chars: int = 2500) -> None: - self._max_context_files = max_context_files - self._max_preview_chars = max_preview_chars - - def pick_relevant_files(self, message: str, files_map: dict[str, dict]) -> list[dict]: - tokens = {x for x in (message or "").lower().replace("/", " ").split() if len(x) >= 4} - scored: list[tuple[int, dict]] = [] - for path, payload in files_map.items(): - content = str(payload.get("content", "")) - score = 0 - low_path = path.lower() - low_content = content.lower() - for token in tokens: - if token in low_path: - score += 3 - if token in low_content: - score += 1 - scored.append((score, self.as_candidate(payload))) - scored.sort(key=lambda x: (-x[0], x[1]["path"])) - return [item for _, item in scored[: self._max_context_files]] - - def as_candidate(self, payload: dict) -> dict: - return { - "path": str(payload.get("path", "")).replace("\\", "/"), - "content": str(payload.get("content", "")), - "content_hash": str(payload.get("content_hash", "")), - } - - def build_summary(self, state: AgentGraphState, changeset: list[ChangeItem]) -> str: - if not changeset: - return "Правки не сформированы: changeset пуст." - lines = [ - "Выполненные действия:", - f"- Проанализирован запрос: {state.get('message', '')}", - "- Собран контекст проекта и выбран набор файлов для правок.", - f"- Проведен self-check: {state.get('validation_feedback', 'без замечаний')}", - "", - "Измененные файлы:", - ] - for item in changeset[:30]: - lines.append(f"- {item.op.value} {item.path}: {item.reason}") - return "\n".join(lines) - - def normalize_file_output(self, text: str) -> str: - value = (text or "").strip() - if value.startswith("```") and value.endswith("```"): - lines = value.splitlines() - if len(lines) >= 3: - return "\n".join(lines[1:-1]).strip() - return value - - def parse_json(self, raw: str): - text = self.normalize_file_output(raw) - try: - return json.loads(text) - except Exception: - return {} - - def similarity(self, original: str, updated: str) -> float: - return SequenceMatcher(None, original or "", updated or "").ratio() - - def shorten(self, text: str, max_chars: int | None = None) -> str: - limit = max_chars or self._max_preview_chars - value = (text or "").strip() - if len(value) <= limit: - return value - return value[:limit].rstrip() + "\n...[truncated]" - - def collapse_whitespace(self, text: str) -> str: - return re.sub(r"\s+", " ", (text or "").strip()) - - def line_change_ratio(self, original: str, updated: str) -> float: - orig_lines = (original or "").splitlines() - new_lines = (updated or "").splitlines() - if not orig_lines and not new_lines: - return 0.0 - matcher = SequenceMatcher(None, orig_lines, new_lines) - changed = 0 - for tag, i1, i2, j1, j2 in matcher.get_opcodes(): - if tag == "equal": - continue - changed += max(i2 - i1, j2 - j1) - total = max(len(orig_lines), len(new_lines), 1) - return changed / total - - def added_headings(self, original: str, updated: str) -> int: - old_heads = {line.strip() for line in (original or "").splitlines() if line.strip().startswith("#")} - new_heads = {line.strip() for line in (updated or "").splitlines() if line.strip().startswith("#")} - return len(new_heads - old_heads) - - class ProjectEditsLogic: def __init__(self, llm: AgentLlmService) -> None: self._llm = llm - self._targeting = FileTargeting() self._support = ProjectEditsSupport() + self._contracts = ContractParser() + self._patcher = ContractPatcher() def collect_context(self, state: AgentGraphState) -> dict: message = state.get("message", "") files_map = state.get("files_map", {}) or {} - requested_path = self._targeting.extract_target_path(message) - preferred = self._targeting.lookup_file(files_map, requested_path) if requested_path else None + requested_path = self._support.lookup_file(files_map, self._extract_path_hint(message)) candidates = self._support.pick_relevant_files(message, files_map) - if preferred and not any(x["path"] == preferred.get("path") for x in candidates): - candidates.insert(0, self._support.as_candidate(preferred)) + if requested_path and not any(x["path"] == requested_path.get("path") for x in candidates): + candidates.insert(0, self._support.as_candidate(requested_path)) return { - "edits_requested_path": str((preferred or {}).get("path") or (requested_path or "")).strip(), + "edits_requested_path": str((requested_path or {}).get("path", "")).strip() or self._extract_path_hint(message), "edits_context_files": candidates[:12], "validation_attempts": 0, } def plan_changes(self, state: AgentGraphState) -> dict: - context_files = state.get("edits_context_files", []) or [] user_input = json.dumps( { "request": state.get("message", ""), @@ -129,101 +36,110 @@ class ProjectEditsLogic: "context_files": [ { "path": item.get("path", ""), - "content_preview": self._support.shorten(str(item.get("content", ""))), + "content_preview": self._support.shorten(str(item.get("content", "")), 2200), } - for item in context_files + for item in (state.get("edits_context_files", []) or []) ], + "contract_requirements": { + "must_define_allowed_blocks": True, + "max_hunks_per_file": 5, + "default_intent": "update", + }, }, ensure_ascii=False, ) parsed = self._support.parse_json(self._llm.generate("project_edits_plan", user_input)) - files = parsed.get("files", []) if isinstance(parsed, dict) else [] - planned: list[dict] = [] - for item in files[:8] if isinstance(files, list) else []: - if not isinstance(item, dict): - continue - path = str(item.get("path", "")).replace("\\", "/").strip() - if not path: - continue - planned.append( - { - "path": path, - "reason": str(item.get("reason", "")).strip() or "Requested user adjustment.", - } - ) - if not planned: - fallback_path = state.get("edits_requested_path", "").strip() or "docs/REQUESTED_UPDATES.md" - planned = [{"path": fallback_path, "reason": "Fallback path from user request."}] - return {"edits_plan": planned} + contracts = self._contracts.parse( + parsed, + request=str(state.get("message", "")), + requested_path=str(state.get("edits_requested_path", "")), + ) + plan = [{"path": item.get("path", ""), "reason": item.get("reason", "")} for item in contracts] + return {"edits_contracts": contracts, "edits_plan": plan} def generate_changeset(self, state: AgentGraphState) -> dict: files_map = state.get("files_map", {}) or {} - planned = state.get("edits_plan", []) or [] + contracts = state.get("edits_contracts", []) or [] changeset: list[ChangeItem] = [] - for item in planned: - path = str(item.get("path", "")).replace("\\", "/").strip() + feedback: list[str] = [] + + for contract in contracts: + if not isinstance(contract, dict): + continue + path = str(contract.get("path", "")).replace("\\", "/").strip() if not path: continue - current = self._targeting.lookup_file(files_map, path) - current_content = str((current or {}).get("content", "")) - user_input = json.dumps( - { - "request": state.get("message", ""), - "path": path, - "reason": item.get("reason", ""), - "current_content": current_content, - "previous_validation_feedback": state.get("validation_feedback", ""), - "rag_context": self._support.shorten(state.get("rag_context", ""), 5000), - "confluence_context": self._support.shorten(state.get("confluence_context", ""), 5000), - "instruction": "Modify only required parts and preserve unrelated content unchanged.", - }, - ensure_ascii=False, - ) - raw = self._llm.generate("project_edits_apply", user_input).strip() - normalized = self._support.normalize_file_output(raw) - if not normalized: + intent = str(contract.get("intent", "update")).strip().lower() or "update" + source = self._support.lookup_file(files_map, path) + if intent == "update" and source is None: + feedback.append(f"{path}: update requested but source file was not provided.") continue - if current: - if normalized == current_content: + current_content = str((source or {}).get("content", "")) + hunks, error = self._generate_hunks_for_contract(state, contract, current_content) + if error: + feedback.append(f"{path}: {error}") + continue + proposed, apply_error = self._patcher.apply(current_content, contract, hunks) + if apply_error: + feedback.append(f"{path}: {apply_error}") + continue + if proposed is None: + feedback.append(f"{path}: patch application returned empty result.") + continue + if intent == "update": + if proposed == current_content: + feedback.append(f"{path}: no-op update produced by model.") continue - if self._support.collapse_whitespace(normalized) == self._support.collapse_whitespace(current_content): + if self._support.collapse_whitespace(proposed) == self._support.collapse_whitespace(current_content): + feedback.append(f"{path}: whitespace-only update is not allowed.") continue - reason = str(item.get("reason", "")).strip() or "User-requested update." - if current and current.get("content_hash"): + reason = str(contract.get("reason", "")).strip() or "Requested user adjustment." + if source and source.get("content_hash"): changeset.append( ChangeItem( op="update", - path=str(current.get("path") or path), - base_hash=str(current.get("content_hash", "")), - proposed_content=normalized, + path=str(source.get("path") or path), + base_hash=str(source.get("content_hash", "")), + proposed_content=proposed, reason=reason, + hunks=hunks, ) ) else: - changeset.append(ChangeItem(op="create", path=path, proposed_content=normalized, reason=reason)) - return {"changeset": changeset} + changeset.append( + ChangeItem( + op="create", + path=path, + proposed_content=proposed, + reason=reason, + hunks=hunks, + ) + ) + + return {"changeset": changeset, "edits_generation_feedback": " | ".join(feedback)} def self_check(self, state: AgentGraphState) -> dict: attempts = int(state.get("validation_attempts", 0) or 0) + 1 changeset = state.get("changeset", []) or [] files_map = state.get("files_map", {}) or {} - is_broad_rewrite = self._is_broad_rewrite_request(str(state.get("message", ""))) if not changeset: - return {"validation_attempts": attempts, "validation_passed": False, "validation_feedback": "Generated changeset is empty."} + feedback = str(state.get("edits_generation_feedback", "")).strip() or "Generated changeset is empty." + return {"validation_attempts": attempts, "validation_passed": False, "validation_feedback": feedback} + broad = self._support.is_broad_rewrite_request(str(state.get("message", ""))) for item in changeset: if item.op.value != "update": continue - source = self._targeting.lookup_file(files_map, item.path) + source = self._support.lookup_file(files_map, item.path) if not source: continue original = str(source.get("content", "")) proposed = item.proposed_content or "" similarity = self._support.similarity(original, proposed) change_ratio = self._support.line_change_ratio(original, proposed) - headings_added = self._support.added_headings(original, proposed) - min_similarity = 0.75 if is_broad_rewrite else 0.9 - max_change_ratio = 0.7 if is_broad_rewrite else 0.35 + added_headings = self._support.added_headings(original, proposed) + min_similarity = 0.75 if broad else 0.9 + max_change_ratio = 0.7 if broad else 0.35 if similarity < min_similarity: return { "validation_attempts": attempts, @@ -236,7 +152,7 @@ class ProjectEditsLogic: "validation_passed": False, "validation_feedback": f"File {item.path} changed too broadly (change_ratio={change_ratio:.2f}).", } - if not is_broad_rewrite and headings_added > 0: + if not broad and added_headings > 0: return { "validation_attempts": attempts, "validation_passed": False, @@ -245,27 +161,68 @@ class ProjectEditsLogic: payload = { "request": state.get("message", ""), + "contracts": state.get("edits_contracts", []), "changeset": [{"op": x.op.value, "path": x.path, "reason": x.reason} for x in changeset[:20]], - "rule": "Changes must match request and avoid unrelated modifications.", + "rule": "Changes must stay inside contract blocks and not affect unrelated sections.", } parsed = self._support.parse_json(self._llm.generate("project_edits_self_check", json.dumps(payload, ensure_ascii=False))) passed = bool(parsed.get("pass")) if isinstance(parsed, dict) else False feedback = str(parsed.get("feedback", "")).strip() if isinstance(parsed, dict) else "" - return {"validation_attempts": attempts, "validation_passed": passed, "validation_feedback": feedback or "No feedback provided."} + return { + "validation_attempts": attempts, + "validation_passed": passed, + "validation_feedback": feedback or "No validation feedback provided.", + } def build_result(self, state: AgentGraphState) -> dict: changeset = state.get("changeset", []) or [] return {"changeset": changeset, "answer": self._support.build_summary(state, changeset)} - def _is_broad_rewrite_request(self, message: str) -> bool: - low = (message or "").lower() - markers = ( - "перепиши", - "полностью", - "целиком", - "с нуля", - "full rewrite", - "rewrite all", - "реорганизуй документ", - ) - return any(marker in low for marker in markers) + def _generate_hunks_for_contract( + self, + state: AgentGraphState, + contract: dict, + current_content: str, + ) -> tuple[list[dict], str | None]: + prompt_payload = { + "request": state.get("message", ""), + "contract": contract, + "current_content": self._support.shorten(current_content, 18000), + "previous_validation_feedback": state.get("validation_feedback", ""), + "rag_context": self._support.shorten(state.get("rag_context", ""), 5000), + "confluence_context": self._support.shorten(state.get("confluence_context", ""), 5000), + } + raw = self._llm.generate("project_edits_hunks", json.dumps(prompt_payload, ensure_ascii=False)) + parsed = self._support.parse_json(raw) + hunks = parsed.get("hunks", []) if isinstance(parsed, dict) else [] + if not isinstance(hunks, list) or not hunks: + return [], "Model did not return contract hunks." + normalized: list[dict] = [] + for hunk in hunks: + if not isinstance(hunk, dict): + continue + kind = str(hunk.get("type", "")).strip().lower() + if kind not in {"append_end", "replace_between", "replace_line_equals"}: + continue + normalized.append( + { + "type": kind, + "start_anchor": str(hunk.get("start_anchor", "")), + "end_anchor": str(hunk.get("end_anchor", "")), + "old_line": str(hunk.get("old_line", "")), + "new_text": str(hunk.get("new_text", "")), + } + ) + if not normalized: + return [], "Model hunks are empty or invalid." + return normalized, None + + def _extract_path_hint(self, message: str) -> str: + words = (message or "").replace("\\", "/").split() + for token in words: + cleaned = token.strip("`'\".,:;()[]{}") + if "/" in cleaned and "." in cleaned: + return cleaned + if cleaned.lower().startswith("readme"): + return "README.md" + return "" diff --git a/app/modules/agent/engine/graphs/project_edits_patcher.py b/app/modules/agent/engine/graphs/project_edits_patcher.py new file mode 100644 index 0000000..71eecc1 --- /dev/null +++ b/app/modules/agent/engine/graphs/project_edits_patcher.py @@ -0,0 +1,142 @@ +from difflib import SequenceMatcher + + +class ContractPatcher: + def apply(self, current_content: str, contract: dict, hunks: list[dict]) -> tuple[str | None, str | None]: + if not hunks: + return None, "No hunks were generated." + + max_hunks = int(contract.get("max_hunks", 1) or 1) + if len(hunks) > max_hunks: + return None, f"Too many hunks: got={len(hunks)} allowed={max_hunks}." + + allowed_blocks = contract.get("allowed_blocks", []) + if not isinstance(allowed_blocks, list) or not allowed_blocks: + return None, "No allowed blocks in edit contract." + + result = current_content + total_changed_lines = 0 + for idx, hunk in enumerate(hunks, start=1): + applied, changed_lines, error = self._apply_hunk(result, hunk, allowed_blocks) + if error: + return None, f"Hunk {idx} rejected: {error}" + result = applied + total_changed_lines += changed_lines + + max_changed_lines = int(contract.get("max_changed_lines", 8) or 8) + if total_changed_lines > max_changed_lines: + return ( + None, + f"Changed lines exceed contract limit: changed={total_changed_lines} allowed={max_changed_lines}.", + ) + return result, None + + def _apply_hunk( + self, + content: str, + hunk: dict, + allowed_blocks: list[dict], + ) -> tuple[str, int, str | None]: + if not isinstance(hunk, dict): + return content, 0, "Invalid hunk payload." + kind = str(hunk.get("type", "")).strip().lower() + if kind not in {"append_end", "replace_between", "replace_line_equals"}: + return content, 0, f"Unsupported hunk type: {kind or '(empty)'}." + + block = self._find_matching_block(hunk, allowed_blocks) + if block is None: + return content, 0, "Hunk does not match allowed contract blocks." + + if kind == "append_end": + return self._apply_append_end(content, hunk, block) + if kind == "replace_between": + return self._apply_replace_between(content, hunk, block) + return self._apply_replace_line_equals(content, hunk, block) + + def _find_matching_block(self, hunk: dict, allowed_blocks: list[dict]) -> dict | None: + kind = str(hunk.get("type", "")).strip().lower() + for block in allowed_blocks: + if not isinstance(block, dict): + continue + block_type = str(block.get("type", "")).strip().lower() + if block_type != kind: + continue + if kind == "replace_between": + start = str(hunk.get("start_anchor", "")).strip() + end = str(hunk.get("end_anchor", "")).strip() + if start != str(block.get("start_anchor", "")).strip(): + continue + if end != str(block.get("end_anchor", "")).strip(): + continue + if kind == "replace_line_equals": + old_line = str(hunk.get("old_line", "")).strip() + if old_line != str(block.get("old_line", "")).strip(): + continue + return block + return None + + def _apply_append_end(self, content: str, hunk: dict, block: dict) -> tuple[str, int, str | None]: + new_text = str(hunk.get("new_text", "")) + if not new_text.strip(): + return content, 0, "append_end new_text is empty." + changed_lines = self._changed_line_count("", new_text) + block_limit = int(block.get("max_changed_lines", 6) or 6) + if changed_lines > block_limit: + return content, 0, f"append_end is too large: changed={changed_lines} allowed={block_limit}." + base = content.rstrip("\n") + suffix = new_text.strip("\n") + if not suffix: + return content, 0, "append_end resolved to empty suffix." + merged = f"{base}\n\n{suffix}\n" if base else f"{suffix}\n" + return merged, changed_lines, None + + def _apply_replace_between(self, content: str, hunk: dict, block: dict) -> tuple[str, int, str | None]: + start_anchor = str(hunk.get("start_anchor", "")).strip() + end_anchor = str(hunk.get("end_anchor", "")).strip() + new_text = str(hunk.get("new_text", "")) + if not start_anchor or not end_anchor: + return content, 0, "replace_between anchors are required." + start_pos = content.find(start_anchor) + if start_pos < 0: + return content, 0, "start_anchor not found in file." + middle_start = start_pos + len(start_anchor) + end_pos = content.find(end_anchor, middle_start) + if end_pos < 0: + return content, 0, "end_anchor not found after start_anchor." + old_segment = content[middle_start:end_pos] + changed_lines = self._changed_line_count(old_segment, new_text) + block_limit = int(block.get("max_changed_lines", 6) or 6) + if changed_lines > block_limit: + return content, 0, f"replace_between is too large: changed={changed_lines} allowed={block_limit}." + merged = content[:middle_start] + new_text + content[end_pos:] + return merged, changed_lines, None + + def _apply_replace_line_equals(self, content: str, hunk: dict, block: dict) -> tuple[str, int, str | None]: + old_line = str(hunk.get("old_line", "")).strip() + new_text = str(hunk.get("new_text", "")) + if not old_line: + return content, 0, "replace_line_equals old_line is required." + lines = content.splitlines(keepends=True) + matches = [idx for idx, line in enumerate(lines) if line.rstrip("\n") == old_line] + if len(matches) != 1: + return content, 0, f"replace_line_equals expected exactly one match, got={len(matches)}." + replacement = new_text.rstrip("\n") + "\n" + changed_lines = self._changed_line_count(old_line + "\n", replacement) + block_limit = int(block.get("max_changed_lines", 6) or 6) + if changed_lines > block_limit: + return content, 0, f"replace_line_equals is too large: changed={changed_lines} allowed={block_limit}." + lines[matches[0] : matches[0] + 1] = [replacement] + return "".join(lines), changed_lines, None + + def _changed_line_count(self, old_text: str, new_text: str) -> int: + old_lines = (old_text or "").splitlines() + new_lines = (new_text or "").splitlines() + if not old_lines and not new_lines: + return 0 + matcher = SequenceMatcher(None, old_lines, new_lines) + changed = 0 + for tag, i1, i2, j1, j2 in matcher.get_opcodes(): + if tag == "equal": + continue + changed += max(i2 - i1, j2 - j1) + return max(changed, 1) diff --git a/app/modules/agent/engine/graphs/project_edits_support.py b/app/modules/agent/engine/graphs/project_edits_support.py new file mode 100644 index 0000000..95cf245 --- /dev/null +++ b/app/modules/agent/engine/graphs/project_edits_support.py @@ -0,0 +1,116 @@ +import json +import re +from difflib import SequenceMatcher + +from app.modules.agent.engine.graphs.file_targeting import FileTargeting +from app.modules.agent.engine.graphs.state import AgentGraphState +from app.schemas.changeset import ChangeItem + + +class ProjectEditsSupport: + def __init__(self, max_context_files: int = 12, max_preview_chars: int = 2500) -> None: + self._max_context_files = max_context_files + self._max_preview_chars = max_preview_chars + self._targeting = FileTargeting() + + def pick_relevant_files(self, message: str, files_map: dict[str, dict]) -> list[dict]: + tokens = {x for x in (message or "").lower().replace("/", " ").split() if len(x) >= 4} + scored: list[tuple[int, dict]] = [] + for path, payload in files_map.items(): + content = str(payload.get("content", "")) + score = 0 + low_path = path.lower() + low_content = content.lower() + for token in tokens: + if token in low_path: + score += 3 + if token in low_content: + score += 1 + scored.append((score, self.as_candidate(payload))) + scored.sort(key=lambda x: (-x[0], x[1]["path"])) + return [item for _, item in scored[: self._max_context_files]] + + def as_candidate(self, payload: dict) -> dict: + return { + "path": str(payload.get("path", "")).replace("\\", "/"), + "content": str(payload.get("content", "")), + "content_hash": str(payload.get("content_hash", "")), + } + + def normalize_file_output(self, text: str) -> str: + value = (text or "").strip() + if value.startswith("```") and value.endswith("```"): + lines = value.splitlines() + if len(lines) >= 3: + return "\n".join(lines[1:-1]).strip() + return value + + def parse_json(self, raw: str): + text = self.normalize_file_output(raw) + try: + return json.loads(text) + except Exception: + return {} + + def shorten(self, text: str, max_chars: int | None = None) -> str: + limit = max_chars or self._max_preview_chars + value = (text or "").strip() + if len(value) <= limit: + return value + return value[:limit].rstrip() + "\n...[truncated]" + + def collapse_whitespace(self, text: str) -> str: + return re.sub(r"\s+", " ", (text or "").strip()) + + def similarity(self, original: str, updated: str) -> float: + return SequenceMatcher(None, original or "", updated or "").ratio() + + def line_change_ratio(self, original: str, updated: str) -> float: + orig_lines = (original or "").splitlines() + new_lines = (updated or "").splitlines() + if not orig_lines and not new_lines: + return 0.0 + matcher = SequenceMatcher(None, orig_lines, new_lines) + changed = 0 + for tag, i1, i2, j1, j2 in matcher.get_opcodes(): + if tag == "equal": + continue + changed += max(i2 - i1, j2 - j1) + total = max(len(orig_lines), len(new_lines), 1) + return changed / total + + def added_headings(self, original: str, updated: str) -> int: + old_heads = {line.strip() for line in (original or "").splitlines() if line.strip().startswith("#")} + new_heads = {line.strip() for line in (updated or "").splitlines() if line.strip().startswith("#")} + return len(new_heads - old_heads) + + def build_summary(self, state: AgentGraphState, changeset: list[ChangeItem]) -> str: + if not changeset: + return "Правки не сформированы: changeset пуст." + lines = [ + "Выполненные действия:", + f"- Проанализирован запрос: {state.get('message', '')}", + "- Сформирован контракт правок с разрешенными блоками изменений.", + f"- Проведен self-check: {state.get('validation_feedback', 'без замечаний')}", + "", + "Измененные файлы:", + ] + for item in changeset[:30]: + lines.append(f"- {item.op.value} {item.path}: {item.reason}") + return "\n".join(lines) + + def is_broad_rewrite_request(self, message: str) -> bool: + low = (message or "").lower() + markers = ( + "перепиши", + "полностью", + "целиком", + "с нуля", + "full rewrite", + "rewrite all", + "реорганизуй документ", + ) + return any(marker in low for marker in markers) + + def lookup_file(self, files_map: dict[str, dict], path: str) -> dict | None: + return self._targeting.lookup_file(files_map, path) diff --git a/app/modules/agent/engine/graphs/project_qa_graph.py b/app/modules/agent/engine/graphs/project_qa_graph.py index 681543f..6dead1d 100644 --- a/app/modules/agent/engine/graphs/project_qa_graph.py +++ b/app/modules/agent/engine/graphs/project_qa_graph.py @@ -1,9 +1,13 @@ +import logging + from langgraph.graph import END, START, StateGraph from app.modules.agent.engine.graphs.progress import emit_progress_sync from app.modules.agent.engine.graphs.state import AgentGraphState from app.modules.agent.llm import AgentLlmService +LOGGER = logging.getLogger(__name__) + class ProjectQaGraphFactory: def __init__(self, llm: AgentLlmService) -> None: @@ -35,4 +39,9 @@ class ProjectQaGraphFactory: stage="graph.project_qa.answer.done", message="Ответ по проекту сформирован.", ) - return {"answer": answer} + result = {"answer": answer} + LOGGER.warning( + "graph step result: graph=project_qa step=answer answer_len=%s", + len(answer or ""), + ) + return result diff --git a/app/modules/agent/engine/graphs/state.py b/app/modules/agent/engine/graphs/state.py index 14e63da..8492114 100644 --- a/app/modules/agent/engine/graphs/state.py +++ b/app/modules/agent/engine/graphs/state.py @@ -30,3 +30,5 @@ class AgentGraphState(TypedDict, total=False): edits_requested_path: str edits_context_files: list[dict] edits_plan: list[dict] + edits_contracts: list[dict] + edits_generation_feedback: str diff --git a/app/modules/agent/engine/orchestrator/__init__.py b/app/modules/agent/engine/orchestrator/__init__.py new file mode 100644 index 0000000..7163dac --- /dev/null +++ b/app/modules/agent/engine/orchestrator/__init__.py @@ -0,0 +1,21 @@ +from app.modules.agent.engine.orchestrator.models import ( + ExecutionPlan, + OrchestratorResult, + PlanStep, + Scenario, + StepResult, + TaskSpec, +) +from app.modules.agent.engine.orchestrator.service import OrchestratorService +from app.modules.agent.engine.orchestrator.task_spec_builder import TaskSpecBuilder + +__all__ = [ + "ExecutionPlan", + "OrchestratorResult", + "OrchestratorService", + "PlanStep", + "Scenario", + "StepResult", + "TaskSpec", + "TaskSpecBuilder", +] diff --git a/app/modules/agent/engine/orchestrator/__pycache__/__init__.cpython-312.pyc b/app/modules/agent/engine/orchestrator/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7990a03fdb9eff52d6b5c18de2e5414c33a13ef1 GIT binary patch literal 659 zcma)4J#X7E5G5s9lI5FQ`Ec zmPlcWRYW5d(^w@mQ3ECAX%ht^6m=i zr4?QKLGsoez1Q_eFjjNh=`%1(X$N zQG-uE-Icsu3fX`zavl5rItefgFbI$a$O7a6f+6$biF?lbzEoYalweA}fYz4KE<_7u zw>NF50BQDw|D)+%9HwP9qpxRkuMT@-y$#HGAK2w)%k=^-V~k75*mrd6_dySmTzGXu zxP&Wm3MY##jMd{1?|lDoOz}OV@M#v+QcWx)`)FpV+7eZf-ojpVHqp? v>xcaHrv8mS^3DCnLgvK7pBQ6&A7wbYM;`u75uW3psQQ}>$I(rur@rP9F!sZw literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/__pycache__/artifact_store.cpython-312.pyc b/app/modules/agent/engine/orchestrator/__pycache__/artifact_store.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79082f6eb8c0f7ff8ad891e43dbec40d9648bfa7 GIT binary patch literal 2857 zcmZuz-D?|15Z}Eo$v!!k&fI@p*bZ6&OeYkrk zW0^BJ;0Kwu*nxsWn}SONt#Kdv2lPMai;M#nNBUGs>078$3VG_xolalz3eC;U&hF0s z=C^bE+rU6Sfp+KC_vWxf$RGF;G{IMB<9$%JiB5EyCk2|J6m>4o75EJA>3m)&h#9dU zWh6>Ca-HbHI?=^F&YvL@sz^%cZ+uaxD{IAK$<}PMRJ7n#ESF9FO@(tKA28dT*Ru8v z+bFc(KCUbpS$`g1^8SN1Zi2E+43eQb$#8nVPS=GDuXFI@A91=2K}Z=vkLV)UMSVa& zrc3K$M$)5t2rMCeP!Gd1toN;xOkaglhTMT5hO}K`MwWK0i+h2kX(Qn+YZ8a3t#0qF z7g;H1WNk}b zx|Gt)gwD(*qnOYZ7ZcEvH7qNI9ScU$PJuC1DCyh&57AU91XU%JnxcRfVr24csj5DwHi1 zZHU1Y3WlwC|Mo-np zCLdq>;rfrW-_Pz`sf}K&560?4EzhKzATpL&mmrEo(q}W-|dyxYF(_pqREpYy#u}x>gIgFm!kDl4# z5291G@RT2SXB3!U@6de>iWhh5*ny$h+A*&34!_1ZTpEsCu5OoYgg_V~vZtW`x0}lGP`b;3!Y zAG?TV%!wj&j6my3EPxX)L4kj>reJOMx7It%74DWwdDyWx$1o=FPUl3QDE;{5FKPut zRo#A7^{)<~M^yFmvX=LI7=GQ@BnqrfOhNG)iWCY2ZkKpl_e&Ul?imXFTj;Y+8XPBH zeikOz=nY!?^zOf8K#VuWsd)CeB#EiUAQdMY5*5!kBvFj}bK&SYC5&;+mjOt<8 literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/__pycache__/evidence_store.cpython-312.pyc b/app/modules/agent/engine/orchestrator/__pycache__/evidence_store.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ba90aa2fa6716573d33c7063c15c9f268d8a79d GIT binary patch literal 1138 zcmZ8f&1=*^6raf^Y1-YjShbZRn2MK3-H6~t#8Zn%VK1eZhzub!ttr{bI+IQkUf##N_kQo=eX}`3fau1%_!%YS2Pzer zr9elwKo}BEI88{029%;~Bu1we)F?5?1Hx+ugquf384;KR%d>x>;?cYwN||P17N=5! z+F0$!T*yc~%tR+D14=%Qrlaei4~ZZFk;#p!_=)VR$J_?vtRs+oZ`Fg+)4R@v6G zREY>$m=LC`L2IKq5Qb!n>=8C`4#**G0~(vf07DLak z2pw;u^$cy}t24AmkH)j3WI&VyZqHCyEw{x^CL{;7j;d7*Ok-8oRSh&b-Co8zp&Xor zUfJ=+^hIC*-D$k4zo>t47C$n*DKt) znZz(?M6Zg{#a2tK(_xa7JKtS6J?Dv|332*7uzm7v_QLCx;o6(EPqXd^$1R|?JtMc3 z$a+fVW;>O_GZnr*p2Mn-s-$?{kJUV4SO5m&E9A>4+qi8Rb_8Sj3}fZFfZSm0X)jDl zk3t*@k5^p+c1ViqMHk9OD>D2FtOsOdQ0wN%rq;E;z{;@}Z`z)PRW^G787e9qZ@vt7 z+8We literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/__pycache__/execution_context.cpython-312.pyc b/app/modules/agent/engine/orchestrator/__pycache__/execution_context.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..993f9f0b8db1ed22898db3c6c6102fd004c08197 GIT binary patch literal 1777 zcma)6O=#Rk6dp;lYweG`>z~+elGbaxl&o8?pyW_Wa4~iXA+T)g`l8DqOXIa-t+bud zZoS5bElr_?mL9#(i+fARrN{QxW1&5)@nsP;36xTLb8w)LQ{PBhW4o7(;LZ2FHzPg0 z_s#sKX;}nA+W5gyF+zU|G92j`NN2wTI6yYCB^Na$Ly`oRUAdtcN&_1>;)Xfh zu{+XG4K>0kH`B-(*$AiITqAGfBRt}2jZtH?Q7{S;qL>z=XUtajMi8oFs@Upl@ZFei z$pRbW@w&Q9)xEK9S?UcoI*@xYkk2xqk#JOJUCR zJU=i4$M<+y4l^sCnNDDCx-?W*P1h9&R%L86FfG^Q9Eh^wbzss~Sm11#Ra01gmEhGZi{-hg653FJHA zwG}`(LMcG$2#tsWMJQunI}0qAu#}w#rX?(Gj{+-1Y|Jh`#KuS`QyveC=h?0LUO;yP zmV-})IDjmf&_sxvB5mz1Gn?BVQ|`NM%EHmQpo9&1J__IS+WrniwK(d;w7NH4V}2dh za#74p)7mNRm#V%;!(1~75NmU9@=_^>OEP9sV7@nbskG+{Xi{))Ns#Ywh<-y+r85TL z0PUj(!^nlXTL_ZEBQ#XTJ|gEUU>BT*tD{>CK%4Qnuq#m*LLGR@QnifPHCV({VWx!1 zfB}hMPTegok}bV#=^LCfuA461)z@g7Kt&!lUjmCRi=BdR!hY4t;6$>Y!#vl-+8sKLSEG6(YcTB0R>bPV%xET zuvoith?yug8y6<3{=7iv9eJtViT`q$3FQd2^UuMYDv~6fp!vU%euCys(9~b3dV;D? zQSI5pjo$jFiP+Rys0Ye(@JO)ikV literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/__pycache__/execution_engine.cpython-312.pyc b/app/modules/agent/engine/orchestrator/__pycache__/execution_engine.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48a46a27d1cb522ba54b4729855f3b7e76cd4ef5 GIT binary patch literal 7288 zcmb_hYit`=cD}BC)ftMQGc}O z&TvRdVF6*=3+cY++;h)8_nhxs{!>wr1wndn=3ek;r3n2VH?%~l72@SAR8|m!7$S@! z#3VuBx-P7n)F~x7Nh&2ZNkK`5^%29QL778^jS9M=X<;h;`Dc^y|ZPq-e58 zsT;z@5!D=b;al=i>R(~`a6Z}Fj7UjLrwhzy;ff?L9 z7K;k(tl*|(%V^ji9fwIXylhhI?6hnd7ue}BmY)d=YF(WHG)hj-_`^YA?u1`p$7Z5Y zmV-`Oojw)h1#T{&vZCf0zKE9xp}m4wG)XWBq+n!-PbrXsiP1qxF=mE@QqNcz3Q7ZG zW%N)Q=L~LIwq@BKj!p%mY=F#L0WSb;;-yPjAWp=QFFPF9L5t9ubLb)Ab?ann?lzap5^#1f0&)^8e%W|rny*%4G6sN za(9$Oqhl5l^#~vNmq;uyw8Hr{6dWD zVztxpsiU3KbF#_j3r2&2&o^JaO$wdbNHH+T+adcHNtI0rYSAiHIug`hTVLXh2Qnvs z>&OKEpI~7f1x)5ooCsxfIy((2zU%=^p#@#xIU&5(wlsv!rx98p z<0xd^HU!N$8P{ogygq7>OR<6wBu{zhQ z)$&GbwZ2^WzV0&OiN(Kz-Ph%MwHl+lvLq7CPDgs~pCW(4uh; z)(ORRL+DYaLpx`#;s13N{SH>yt?`sA3(G(MOkpqC=^@aQ?P(_zde9QF_|e~!R|vPk z3&zu^n9U#~8~BVHx#H)d!RQn(Qy2ZguuM;dV;B5k)!GKK<`~w-w&eh}N0=+s9>vl5 zeV`EpaI&2_XbV{zRmd*ojTX3b_}+X@tbi|G?55Bh!aLxw@pMyM4HO);h~2*S=YHwrh)<+;V~C37gr9&rnwIcIy*bf_0R9#ZlbxF zXlGwI7VwAp{?6>YgYX%@7qUO1KYfhWil>re?<9m9A720P_I%p8Pjv44Wm~%2BX)a| z^>077y)70`{lH^UT|0D`*djk{#T5Cy1C^}&;AZ}L^23%RBcZ0P5+d^?WE_9%yw& zmyKg*yxx=E6EZao$VK5x)~gP^#K*AHY?NW6fjM80=Vw^i0InB$I3s+@VUwYpGKVj; zZ1chS_^t$n_rd7{aN)3-<1kOMB^LEz-JRiB4!anc20tCR z6lv#O!*jY-vQ^wzxW4d3ZMvabZ0P>wo9TmI@t`;9I<;mym7I7tJrNKm0?DaJY9g}U z*q&}YAT}OYB3Deyrq66rRnwZiX`=x+-e^Wmt@rod+na9e6&rgWANcj*e>j{zGAbS! zO^%&OHNGR7oC(8{?HgM~mVtG9tyI^x?r2Fn+C@jZw5uKXFDV87i;7}eyLQwu;v}B#B1Y;APwNK> zX#B=y!}-3U4k)%jU#uISVC&hJLvvZ{$`As!4oWI))eGU0S%L?z&k&kl(~!>s$>ImDcpkwwt{@PU@^7E)IZ@e>qM6ENX zxSlH!_G=?KTn@W3=XM42wzL(5)2ih9CL}a#-!~)f+=4Z3&ASC}{V#W`06GI?j`!3p zxHWpAHm#N`cTP_`Wkqwr0(?$k&Fw*Yi0j^iNM*r#)fzRaoO9B1)beo)$pJyz!?aYU|GJlO|o^sit!#Y3`8@Q2c<%WR=#VL2|w`2sP9 zm5bDR#yN8RK+jzxlM(-{Oa`NZtXJH)OhbpyF91-R#y(dFMp)S#iNRAyEE){R#XdFy z5jF5Q1>Toe_?GoTPzbXe4lQM)%23AhTU5`lcy1bAJTSRo%#%!oV!^12EEMQy96xjH z*zowcY#yEsu+w;xvh{*La4B~2qK{_-veD1aMFYVY;FU1TPAgDD0Ua_4Zvbvu!3`V$ zI>cd}<%Sh*GCPqiVLvbU6cNZ4fZCjZ1uV$)79nF?B^TkR3!ereU_)X5G=>)>I~(9> zn2|vgIatAAJUZ{(4lA}Hq&}F$|63MfRMp-ru`F`8Iwshm`V&m&;js1^Lr|Jh% zRX<74QfXPjD7EfQSiUQ+xJj=SH%oNc(g)W)i4*HJyYAHAs!!Fp6K_hTmFdzJv9#rG zAlWhSg!)JNEBf)(frY`W#J*mC%B z`C~rSaxCpUwnR&N+P}0uw5Ip;i+lPX3s1&Vdrpe>mL(LzS$_TxwYzQ$cdp*LnsRonwu#QZCzMz}l&T$GGOkxONOi6|o?9LT zK6HqV4yo;sw5v^WwZ5_%s!KPJ0gyyBDy#fVSh>1FBxLbTjnxE~(lHbnO

MKeN_P&4 zokLGPSnC{FuWh)~ajWA2^`+&ZMHNu0b|76oAeImO_n$V5n8M~pE3#GIq@NczZ}R8i zsILvaF;Yi-H9#ESO+7^v)Sv2|$J^12ckthe^+9Jq$l9F+qI`l)(-LtO)UP4=C3P0wV|=+Y?XBDZ%#* zGN22>!`6AYt?-yc7JRrO|3~OK3HV*lE7Eg>XC&R@6??qNL#L8eqigoj4J#(TK_h$l4=-$}>*HQ)Rij3{Etb1@^(H(&oF71-j9qxid%kSRf| zWG%iiaD5>0pRdOz2Shtp@t@Wa{{^sT9*4Rz{j!%+f<0~W3fXE=zF4FDl#xTs8d zV^J0!h7>=H4}yz8wrLN;PaJ2*0TYcWdVU_rDE=D1H*K4&AcHOAe}N~(aE6J^gA?am zahI}^DevP;c*Z!+!r2=BfWyLZoA7S}QGbN>`DBaFr~a$}b=v2{;7lFiD)9WnIKz%z zxqTd#|3k##phmru*w%13!%-QJ1C&qEOPx+%_tLZnvI(MDZ+Yne*8yr?o?ljJVI6DrR+yf6UotfV}I+fo%Iy1j&&ube;6n?W>t+cWv%kq!dih{RJ+_KPEB)x=^76MIuaorL~jYVNuv=en*X;+j6x?eMJrwsZ=&iRxDb$oUVriiedgyJ5Q(AoLd$X&Ay3i5&_M7+Kym{}< zoB6I%DFKeJw?Fs4&jCE4$zAR^xn8cXw{->@aBW9< zn>a|n+&Ez=YZo+F2Twl&FMpzCvf@y)jX!8&vv|r4f-rI;KMdq1&ZqBmc@W*>gHZZW zD2APMQHq%dc6J-*$H1Y@JiwTAj~(gloC*K`^YWgCcKR@98*%L&9tpoA-{V61GUB3> zc~@Hz$Lg@Ny4q>PM&Kv~!4LzdSf?EEJWgASb;)}V;?fJ5BrleAzT3JlahI0c_PE?UEjJ&RZ=9BIj2D;3E3b{0uaB2r z8aJ+DAD|?ehRV%9p;)Q@{j3hPrKiAfJ>6n`6{N8R!n8Cm`YzUszI!K zekT%_XKIauD3g5y;Y$z|;#56BZKr_Zl^o#+&NP;P zIWZRUTP!i4KA#j6EkbiSF%>PrQZt!Sv<$NgNk!2r%w0;R6|KSiVlty>9qJ3otfF&J zsy>@1nnpmI+a|^QU>8e-@e8_0aRFNxZ+bUw;v2A*y#Z_L4On~e#-!CXyFA)Jb1uX( zt`&;}DytP@&oezQ-cB98DPBPZU2^#<0v;n{zrxBBsQwPOelr`7EO9kb0=N0YR$7sc+VvIM8Gc^M3F9 z&6~Hge>68YAz0@w{iyvWA@nz6_yV3SVe3=amyn87K}UHZBM1!pbYEV~h8tZDn0UfjwlmY@FxN<7?TiaNHGl4R&32Qtb|CL&)vY;BI71?#h^`Wp8|E! zq7BoEb#1DEv)O8#>Rt;fY_$NtgfPkoD$4j&Q59}W8F9gvkZ71iIcw$%nvO|U+~M8; z7J%hhu3wEP|p3Es8lysP;^EQ#`V*4_QVVmq_V zs`6IOy9UlxDOd)$1Y+&uxK4BX>zE_3Jiu|C=2+9-TYFfxzGnvP9+vI@w^XGEqUwL( zgW8+9wj}1dIIhzay8H`5w#<0u#Bs80C^a$1t)54d*E zjQ!lAPICiw%+VfI403&)^6Z?#sdmXBAL1UMALh7D)!;SovV_IuJu-h>x8;Zki|c9w zoVtTc7N1DSKE}D-?}X3(9(j&)?>fDfLWuP59!+I)v)g^`5~mZv#B5J^Nt{B7#*5SN zYDg-^g}1^uycmtDnO8JJ)>PV5AZ8X@mWQ*ZF{`QA$l`>LHdt9~C`2=vGmCEYP40j}40xvT?qkD>i0K5D?2m{GNO2i!+~1(}3l@ zPbA{W<`un&X^>^njH@IJLc(UAuw&51c_>2pQpXncTQ)9ORLm;oWadUg7IZRC^0b>v}ot55iOtM$Z4<=cY4GI^(8ICv8@S3YGX=IdwOEC;c@1(PreJ8|iT( zJ*#7t$iVt1?&w)(^sGBNSs9(&jP-vjeHVGz2o<~uqj02b-@SR~<~OJAPrK0}CpzRt z$DHWcpP{kMj_69~a_2_h=~dN@A9v!%-S}xIetM(hOgXifejYhk4sAi3(Zxi7uxE}) zY#U-5`$N$H%wmxko*4eZ4YF(~c`VDcNtUY@4md2!b45k3YRCxq{e;L-7K6QCg#8M= zjYYKBt=>j&d~N=gstr?LOaxiP*&-x^=!E@g=^#wiaBZrFy*V4MPSx&YN>MyH^J8u zH+yZW6Xk!`9}bFsB}xNak$f|Z;h)+1J%hI!B{N)-2^A3-L)o3=LdBf<1(A6GoY)wW z479vqWR-~A$ww5eP(*a2m zWntVuty6d_RFh6>_QzsUL{z!36jAK~K&?Xk!H6QOwgR zw9Zy&FKPQ#S)y{gKP*kR_ezsOj7CE!RG2*8?hm$#G&m_mTH!{muo94Dxg8fqrHIlF zlkMYCaUv`MzcwR9MuQQl9gJm6!b^kMv};e9zhjj8VsX`K+&JOcVPQUF-U6qSe*xW1 zl51>EnR708CUGZ`b5v(8r7x}Q?aVq(=G^tEOMkalrj$$~o%qq)i-Q|YM^~GUZZvhS zHg&GqI}6m^S;y(WSSvHmv~y0}@El$B9KA2DdroAn9l7o4W2>HH%LmpyomuP2uS`GY z*FC4R)}Eq;_Ek^&GEBdfwSFV#s!ch!5Nv_G-@!3Klh&NXpJ1>+l)S+c@&H$%&9HgL zQOu<;kzR7`0#NWtK9qN*`33EhWG4x22Ghl-Drq89p64F)io677( zUUOGob|6tLPQ=2&fL|#fx65l%IayLDIuzBSVKFFD2mWz>QS_Qnt!5oCqPPY4z_}#s@w||^A#&ipuI8O#l?9^4w)W0jq~#)s!?P`AJ4*W~~q{$^)eA?)M-rCrtnozDVJRc{2M= zPQNm*02 z5gtOhj;clia9Bxw^EE)!>9#A_oqZ3-3h04260**-_Q zYzC2ptiy`8X>Gj;O}BF| z{3nj>X6J?Hh4|;|4o3t2u-xN=9gYB-EJ61r1PPaWZhF<#lB;XL;Z*uYuC6iXsLAxF z`xk4nj%N5>h|kBDYL@7F-Qnkib+*oNpSnGB^4*i4HneP-U}DouoVA(D>C20~S;xMd z!=1U9zPQL|9Zfm+Yq|$0))VUX1nSlYgS*T{$m3AI<@E3qW`SYIY$Z?PST|FNT0!I_BasEi$Te0o+={}gH;46q~)1p1*I~2T@{s{Cij@q!( z^o!+1tJb~+O~*?$%q0%I%A+7o<-bDrZ-9sv6@15k75XUjNzJJx5gq;e!z(qXQa0#{ z;zd6L0i&*A;l}(81WG?M$K~9f#o(IzV6LVPqsq#@j^)8e!w-j-2cGPIa^=a{m6pDz zH5dN{DC%c6InD;XgS^9>WB#!BcY8mn>VI4_SF_MK-}qBysrToXKfJu&(y_XyW38$` zWqQ`sdhcq=`mC|#-r1Dp3*K(4-z3m|zLz*$>y>RwSF@FES$kW~;mY)-`&R0XXB{VU zuIg09i|22V+O8MRE6JWqjEwH`uKSFS|Ck&+bAsPdtQ7K)$3O%iWy=*R<-!q#{N##J z+>~+L;C9{ww}UxKnW98wu7hI131JCT2mx?}e1l-ahfu>MZD*b`dpw_UK!R?XqToyt zb=pTym!U8e{NZqPN)o%gW(u*Ds19vH(5PfR7qJ)R%Nj^E=oaTAv<_Ie;aQrEU@5UX z>WZYxr-4$w1|5LT?%J?7tlAqk?1$FuhjQ-P)P z0c9%n9>trunQ$Co=EbwBK0_N_F0`)SoXn@&8zX63eR41hPl;(+2!b>l`0 zp%AmlOB+V0??I5d2G6rig=@4IIr6Wq{lji zVo;*!BPc#4#hgG(8T9AH9Gqf@UQ)(l;uhIrohHv#v&H1zs&0g?8%Cd3+e~hq52@h_ zxcSHg1YS4@#SB3`0#&y!Uv>Mms@tbm-M-?g8!4o)tY2Qcw|0h|=Xz=F)&=#@J`jx{ dmC*}saty=#ne6`udG`zR1%`1yC)jHT`wx#wS ziFATW(t|XOG?QSG>>#VtY=TR=23<+_pqr)`>Kr9IVN8D4A8j7zaGeCtc66lq)uGn)5uBAOadq*1Nr%2+fJS0>Iy73r!p6f-(B z(qixQ$|HE6r6g*Q7O6o-42blH+#oB~i45eN7!+B^U1B}ViW_u`n?x7n9h;&aFCFv0< zrpUs*10B(LNF?!lQYsW39SuQAOp@geEEtwjN(cPekxYwY2?^Tk|46CfcuML>lh|!Z zR!CGyla5t0X&;@?DxolTN5Dx>Ze7Ew_EM=AC%O%iY3e0k_wmGoiMe2*u`Sow_I&$7 zUv?tTpHjW2R-gjq%-}sxGE1$;V3AtQOoF>1Op_YSSW*j_=F<7Mie-9 z8(*WSDfc>yayz1i$*?8Vybt|Pd8WJ>51}9J%D6|0TO9_n9umlKBOdG3A~N1*MXb`) zqca)Tm)5z?QA&l`Jj0o-Mdr?Kipp>q*CZ4UF$2Kxeu8Wwxcab5a|=3yH4h3vh>M!{ zRw5m{6Hg6mZqO4_O4QhdlnPhqaw7HE)IzuB9g4;iV?>fQ@BJuAK`U9~uqTa4-_bmx zq(HeW_PjUHFrm)`}B%6#_%P4+-G0DU|gZNwDz?%2N@}MqX}d`m?sr+Mb9% zNPIuB5L`Ss*OuRM?!m=u?~Lc~dH>7W&4}Wu?@R$T%RlCycz#j4`=4#}D<1m!buCp} zH`DoW>It3Wo1b*f-_ErjQTZdP_sCk9;8xaQ^0z>IM1913#LiM#HcMrhAr>^D7wI7e zbfHPH7R6bV%c9&C<*_KQMO9c-rA1X)RP~2m(f8fzDGs3HsL)K|1B_kMoLDog3!b4e zu4gcNd;kV~f6Snb6tM}LgI)o+P@lr$9?(vmKVTkE=79rRk?x1H#V-))?pT~iqL3a_ zM#mITgZf4`5QAXRwczyhv@ZJB-tI&+c}tA;Ozv@zzYfRtZUA94A@=}b)J63q2O;^d zX=D?UN4{!l!cKQ;T)^@R<#sd=1R`UR=5_Y!ug9>=Ta^_MXh`J-Nu{_eM5& z;tfKKq-9wciYFu?1}1MDjGk0%;>~f>YF~j^XuptazwqKX?N__jpA<`EL{l=9|`$75losOIMn71~bZ+D5vJntI7 z`aOw&ORzzR*=1w+^X-KLXL1M5EXMK&E^P3`eS$PTnur3!Z*K_0(P$xEm>}H4)mm-2 zNuq5l9qQ4Pd|x6$nv~8NZR^?tQy798_emU#+1gU6p&B*+8a1y4i^`KoqiLcPb*Q$< zSu<(d?fX4BCM&=a&{{aFTapk>B+~aKQ42b4EUnIjt2~iXqT@PhwJjDF(nH0C3BX+N zA7!o1WH&4+8C7(yUjtKz1&tGS8+KYFvae*>yLCHE)|jD`?1rM7p_?~DA;Wgx4DXYh z*R$Q;f>L<^lGoGJFGJ^-_g!Azdu2J)x7>VYd3$7e=cVOC*O#|nTyE`M-g5!cUp4vy zm09l)?)Go9H>VAnrXvQ>tmhsK@x;N0MyUio(* zzz3zl;-w9X7hHR@mMxD^AG0xL7~H@c5pdAh4C~~|$@HVz4CCaoIl5x(2>Q@vk7-(h z`*%bH*U+iXpHkmr9y5>5a^QGQu?l)-`eSq9Si^-w=Pkz2@S9@qQ(>RCFKCfB~xK$yNs#m_wQVKnGNmzXJ*Qb5(q{+mhd_j5nk0m)B%NLrBr zC@Vz7Gq7zsDHtu;q~%V?g|Z zwZ#Ig5ur3hQ#OdP3T}7(Db149Kpo!vKf{{y6}daOUBKzga0JFgIEt=eT8}BX`?cz! zcnS;vL$_*_#vAXJ;?x1D1q`>!C^MiL_G&DIu*ek{Q>#J~2n;w>Tbc|0j*n{Gt#mq} zRmK77vJwT$TH_L+DOW)j{d3J-9K515XbAo zKt32*3SM}@WGj|^{+XQ*-(T`w`R>Sk<6lMcO~;m+F8?^T82(Y}#ob)bWl(y&|Fgi% z0hK?hdXK6tN7Uw{2E95Y0dt}_`00x8AAvx{=cMeMRGX7hlkG|qRchHYV#St91tKWX z5^fOPfIP;AFXnf^mxWG|)$s-DIKx`_;)?ik^`JudT_mc1A4+ow%KKtfEG`H`h z4H2hR?`gH=l-hh+<&j>C2-HA2B6jK;YaK*Djq(Q%Ob1C?8ZE;p7ADMkvP)z{){u{r zwt@KRMT*j6pzIW^ev3qcR_p;G!r^& zlTBr{%tLu7E+s^{>sr|3ghZ`EN{uDKcn9;66XP+30K2Rs*n+0U#>H`s!{N*I*gmN5 zsK-k4F^tt-s3^}v0{HxG7~~u0!k=IJIlt#+-DdTz4t2|ZwQj#!y??o}<;k&p2HIa336NU}YV9_<kMi2loEO8m+ig&J0S@ItAQ`-4YZHJELHhe^D)*_Ry?^~)rzew`_3t*A>w#*HwzU@oCb87g+;wCkG&UUKwsop-drB`k4 zQ+cGp7k(@v>7Tj`l(6Bw(Cu;F5)GZmu%Hm9`e>>{6%?aExxZ_Lk** z#L@(I6liD}?ph%f4i@D$XJ=f%TP#Tg5up)km<-=U3RN~p z#=c-{`Gd3D9&ao7_vHM0)Hbva{gEX<+J|o4KAhe2c#pcRD<3$X_jTI@)vJ1Y)t1w0 zGwd&<*9r{f3~ROtxtCJ07d{f$@qH zJCz3^StHf%1@H<^FZs_cUe%@gN$bqe?8xJhC!6L2`AvKCfqjL*!Cc^AK5#hiJ7N>^ z3DtW-ZRu8(W*$JOR;l}GvyiuqPZ z5F^+o<6XKeO5sAi>`S;hlX#_lfF+oiS~Q=7X~9_h90{$Df6xwi5L3S;#o_Xg>l-6o&{wQl|fV;4jAw zU=D+0-oY>`}p#GPA3rj;cO5Kn@ z#$NvflCanKp63(%0|$J~o8JV6GNJx7d{C!5qB!`fR{C9me!S|_{1nTbY8IEVZaL@J zs!sC?LMob+1VO751mnv$psNMpE_`igln}gMBbzY6&xHuyNst|wV0@3XVS*kqK@W(a zRZY-pBY3(Gk3%5`*Q zOTZOe+0D83tWsc>yx*~Gc7(JR%gD?+bSvaUWo?LqPcCdV*2{%c4s z(Yn0#q52`D1*;tz0};c@30l<(Jg8f1;D0UatwX| literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/__pycache__/quality_metrics.cpython-312.pyc b/app/modules/agent/engine/orchestrator/__pycache__/quality_metrics.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9b6bce3f95aaa2fccabef081d892e3324f37be0 GIT binary patch literal 7195 zcmb_hYit|Wm7XDIhC@;kDUlNOqD(o8EZTb2avr;SI0>cNZEYpC?ADPj(PPe7rp$+O zXDCa;aFb$#1q%d-RAV7IF<`X~unKHo6*NWtX8}3gMP0POk{rk6PE{a9vcMYq$BzID z=hvP)!yzeE)~3K-2$%P9?m6e)x#xW6T>ZYX(ndfT9RFGP7cPSM5_gP3=?(J9Ff^74 zp5Vy{5hce-61Pne)3^zK%@K2y8mF`|Dndt@afT#J#1Vq0-z9kFrb*{AZkadvEe~+V zPpWn<7K=-q6pqKlQRq8I=7dlJcLVX5B+N-LW*d`)nUjK;h)5xwhh7&Ll1~Ppzf1_k zILQ;B3d@u4QsZXc%A4Sq;%&Scerev$Q}E016`&9`Zs99=27cLji@!>(Kat=fVQK!j zAfa$b3~-T9BEm^=6f&2Iv_l*E?WlE|v24&jN@fLNn za3VGya_PU9;UZ=)jl+=7m^D2!`T`7$!1@q&ZXpx*+@+R9Xzhk)T?{L?|SPqTi&_5?D?|wT9x+nTQ|> zyq^N$Tv)nvF%gLYjmn1NvjXC#1eFoN$PyxO0b3INm8w~kkZR?S6u!uXBvG|b2@;3~ zn~O=R)lfzRl1L;NmV~INRtC2)H%Ek+YK1ig5td*<1~n7mV#tFv#|N#_vs@%0_-!gB z3XzKlUm3Lm>k&knQH!Nrq9(iwe^^wDD{VcciO25AS1jL%J*Pzhi9K9InClr9W`i>* zJ}tmbg0lyExUi2$;aMT(<7Q@jFoRF52Tx4F_IhBjCmQDy5dqd~BSLH{920uL+%E}m zFxc>W-qJlnFsi$R?wNU&zK{q<_=T2dIg4&>9&0K-g6bV&v)a8lwmh*kk*n>_)ON4^ z-EE}QzLu>$x^Y3N{$7g7yFJUtmyYM$T^V;*y8G}(jp828y1#$dl(H7*4rJU16n9t3 z`h}+{=kaGe{`ImLeTGv4+|S#xtrrw;=n-wMt9oQ*owkPrYqdRnTtVy& zks`jXzwxvT)6U<#NI@MkJS6rR*fVL~a~&GX1b78w7Wzg3?@{!R;Lj+Yyxa&E!Ni-A zlvMCM;4>)M%$pa$zZSg)4I|`bKqzFA=@`kA;GYb%?x1#4jUYW&I;ch3^ z!C7D(3~@7D2*3<=;WcMfE{14`Xs%KkM&)HSWVu-bYxm~ZaL`+Da1au&_Trm~~v@5m-p9!uQIgLrSl^vlPf6Q$KfP*Rk2e>7C8FvC#fCnecV9DDG zK!%qhk~FNOMYx<(WReDuW@LJpnDFCk`3&f_U?Y&tw@fz+T7irPnVBxaaY>MDG2n z1m3_V4TehHXjl}(v8lpriCP0!wbG#0vDT_m5+`0(9lCf81`(j5%8Gy}I5bsRL!L^- zxTpY@8WuI?I{tx$Gk_{Wj7Jb-5U3BUYST>>pLA�N^@L^eT+2mVzeQhE+>JZEcU@ zR>afobPp_A*!yfdHPE;R=)^xl1vc8-_iOeOHrIPJ(|a`6dm__&;=b$R>W|W=&*e@F z%4uQo$nvqP$I`BgDHf{#aCq|d?P<@x)X^`zds3FXyZJF;vOb?0&O2N=M_b0xw$`>D z*kIBp&!!!1pF7Uwou1{YrK%O_b7$K_8b}|q#PePEYlc^b*Qwh_vb}+y9!l2?rz}uy zI$euLu2x(ZS6}`xnQ?Ze=yEG>Y0rDx;BT{~b+zNeC;-u_=fiW4DlORHEf{7R2=^5E z^l=U08X-j-9sHs?V55Fj1=XD)YrsK$#!^skiQ2K-)|^5wG_X?uoHz&HgpmPNv=%uJ zkvC5jN9n>S1Gxb^It9z1{MX^BKCS%sVbv7BJoUjfT>fl~etB4}hzSBOLR2#w7Otoi zL`#v63BOst2F0L8#NYw$SO1_s=q&i2{XzAZY6Pf6h(+<|1P6O4Yu~eabe+n!9Rk!Z z8!`X<^}MS!@2EvD0`ZnpD7wbGTJ8d_aR>A@vI1}-=7JLexPiRHJdL&9Rt#!zF7Dka z!0|i}j)zK`OGqqJWm5JPq!f|3B#I{u2PM{kl$|Jw85C&JE}KD`UyIev#>ymJk}DOn zp=oSk+X@di!EkV(A5%C5h(>Wd@qeBI1F}7u?4sjKLQIh?&t^;##%!$^l2Af60VvFX z6l+=TVwEs>Z>2maL3*wi$YgMoyshwbDcLsmznk7kn{0z~J1n!jWdR^2$-XyIk|E(Z zUMt}GG3UZP_$xR^$~Ys6kZ{tcbEG94@9S*~&l;y8vxbx+V3>=5QED*&9oErr$S#%K zzrddDZaqA4PCTGRX)1#RPK?J8{=x{bmUw_eQ%Lk};tb^Z5s3D+WC(?4G>;AA5+p<7 z708}c6XaR6MyJ}s03$F3iJ3}8!lI;7d^jW_oGq$m$eC0NRGcIsbP$XB4whoy7Ng;n zF<_}nt-O$kK)Pl`c+?EYRUINECVKj(2Y$|CYFHrDA@L31FG5ILir)-Jf}Puf|JjaY zTV#0*GwJ{r03Kz3Z*G5oW`Do3fAGF@bhT~6{oBSnjsMoX=w5DIYW&CMv~x5?L$z7o zv?knd=2!W3>DO<6^7e*!@1-BymHz$h-@TmE3fY(&zgH}?skpRI-qz5bKXG48&JF>+bJJbgP5<~1YpwON6)Rs*O9XKZ_@+E z6vy#2d;IC+R-*cK@~PU)KUG^M#l3K1S8GRVsgG-*y2I=nX=LwoQ&4|aS39zY{j8IM zx)_e)(5;y9oCmAH$WwUn+12;%YOAzN17-mdp#G9a1FyJ?FE=a|Wcx^>EZ3o=V$f<> z-W-XlfCgVMt;m$Inm6OzY77EQo|+|)@Be`h4SbH4c{|f|(JvbE;SH@AdX~m`1~37m zIuzqX4Lx@|i6p~YWb-UuV~1T9bHY*xtnH80&i6%mXMKv^SC@}#@LYxSN94S9cO(Qf zu*nup2mDlO$8Ak&1)Rp;e|wpw7dcHP%~xt#zw!(YJDpys~?0%YjUQQh6Y44*_pfgyZHwAV9)Z?x8h7+XM5F3dx^3@L=> za``Yfg}5lY{``4^#F`Kx6X!%hJhi|=+lSi;j3k*KXZ|7mtMtF5*E8=7;fsVH z=Auy!&8sZD_wkT>z=N3{2|_@KUjfvrsY?R79F7GqKq4OzR0k5^=`IRMgZ}=F!k7;k z{D$-agpA=19f{*dMCw939zj2X5p)JuT8@Ds2w^Nm@IV4!bcqvH3y6wvA;EYi*eQ*A z4O2sAqiXT zaUezI?T*Evti5&9IuWd@_k!WgWYKkMqhb!xriCdlf? z3ifC1EqO=H`^ldqa}Hm|;ahFG=jehbk9?-Tnmv3mk;l1X~H5|+|98C8GHcl!HN3#v1cVAm% zHa)GY?YG(#Pfykp_%v{v`Q;BbT0eO`?F5OQ<$-Xw=#i)CMkhp=uMagdVJ5dKnG#JteV3y z2}yXYFkj+VqAZ`s8IMX|jKn!ff6bl6i!};-cOlE)wl0B3Sv}CvZuc`K#4x2*WrIPS zjR%9OEf~~abfH}l488?#xB3i%%tC+OLO6FuK3w5g3E_*4j^PUDN_sYilM93^oFj;> zP`yh$F;&yHC+$wC+T2ie(sbREs%m3+^s)2cb7jncMHb9zVvtaISGPgSl#*URs(+_A|mXIv>j~Q+BfWguE7vW o0tv<|5kKTTG?OIxCt~lHg!Mm(Q-2}?|HE7+$$gIrTx)y#U%QTnrT_o{ literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/__pycache__/result_assembler.cpython-312.pyc b/app/modules/agent/engine/orchestrator/__pycache__/result_assembler.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..022bd7ba12935027a9687ec18caadd3abdb091c3 GIT binary patch literal 3831 zcmbtXUu+Y}8J}J6`d{M2$=P&CaAFe&Fc4dT9{w@?LF|22Tm~b~&7j{F_a3 z9Borfvtlc@UNJ40#^0a8YL(4KDyEHR>>N+KPZRY#wrrx<74i|bs)kLY<2Ii5#~>wr zSXB(&zWg(66J51N6ho~Vj2mdtk@JcY@sX{g9+q88Nn zD`GLAg|xsGp(to!O@Oa>naf3Jr@!ZWmW9jb3{1wbQ z3&eA=th8**PhW;}$-1fAvOFuT@j$*sVOPrPhVFCJkoq4=M;;CQq&{$DUic#R3|5ia z^f()%Hu!m->Vx~Xu*yR2vp-0h z!OoiY7AN1aMii!+cwMx2wbozr&+cx~Uw&J+b(JiC0{u3m@>59VC&24-p<1Z)rvG2? z0-Vj_uBK|CYy8!;06OMEtv$5Rb-otV!q>&Quw7`Kp%$o#wQ!C9L*#Zd`;3DW=Sf~d zx#$>epVUpokQLLqgb78o)1Rvzv|HHD2{bfqD5k7yv<;9R#x`b?_{!uWfF`6B8nhtW zRIQvyBU+`b=$=fp36)GFs#eTNUBjk|X^>#W0-@NXVi@NX^@3crutq~xRb|KuAl+s` zBC=!~a+pD5{-i|}Y1mS+sSv$F1$I0tl1kOaw0$++l1}gg7!Yh)7g@223T137RP?mw zBpQGmNFxfd^+^Q)pSDe5TZYrxfbEQn=+iVIn-x-qY&eVM6-9&r4i(Q;bwi_qtymYx zfVXl{F{+rbi%fpRIu`h3GwWDj(-1qZZcdS2w#04#NwJPaBI$>Y1`KQxmhA{b=v;&_ z*rEZ|o}umR*s`BYEV7-g$B1%i)v4Ge_N<3g2fQQ)bBiHry446nJQ{ALqV>tAMPUY7 zUjX`lj=t|o{BC?<;@X6p%sa{atzX|E_2lt~$RqSYuG3D}>3Y|P54+Aj`1tI6 zup#v>#TH|(H0(&j5B7}Q{irUDK9t73Jw6|6BqTR6?f=dh2k#`@no?IfyoISqd+PT`A{CIaew;QsE!HeM<)x54gR%oZelH{;f+hi!*Nj zUZ;O=BfI_P=#5b~d&J2eY4oI)b}a5#Dl8Vf5a8t+gI7$zEwfE+M8wYM4y>YbOzvuf*_K&r{4o=>(@7ni2yZ4!U@{Dux%!5yH z{UrXq_Hb~ro|$?g22vf%VI=iG*`A0;=Y{7xQGDyIO?M7{`{M_nsE<0e2Vw1hPa|mC z6lb-=-*UgbZ!{Xeze|Arm1-#nM6U*b5@}5hV6W5wt*mPS8jT~!x_pg0fhLN8N`aSk zRV`pQkppV1rYz~M@f+0vwLlAvICORJ6&+zXk=I%}0LS)4(8hY~m6~Lu9RG5cRl9vE zfUa>gfNbU%uVoQ|YWgg?TDpX00%y@Bjxbs-M5DT;n*de}hXg~1@MgyYn3$f%CWNu3 zt2XU`at<3Z#Iddca?}Vv+1&LZS{hXwd+~pT!)-;>(LI!a#U7yRN3& z07O`nTfc!0LacN1mxEs?9(8VOypxRif0;e$tT#~b!M0OKv$}f*u3z;wGY1TaWjXU%%MAb>zQNUC4Rab11-zlD4v{e z|L=1VNtvgJ>q`ACv1K^`-xpqud9Ne;-ah``zQ}Q2{1eAP@4HR#@MO0Cbq|7cEj?B- zJz%%1#H0e7<$~m0u!}IykbTh2#+ko=Ill77m>?sJ=3yM~jhiel9~Tq3Aghv1rHo~n zMrGMAv|-#XdqurZAze(-LDn(Xe}f1sg8cu2m5PLQ3=iNh{7_6x z`s|EpEZ5GqnXG~#uqA;|LUcdej(oGk&wC|)-Y@aGG7cFy~--84)^ zniJi3?m73|d+xpG^ZwcGb`bc5Cw`l};UVNp>=-{*1DHFX0&s;0L}1b+!^9W{VN==^ zGr`xKHfPuvtK!(SC1Z_QGq#v5!^OCaJ!V(&mb4?|j5$@oUHWPldU3ea0X2tFS%YkZFuHs<0y+$OL0S6?UeZGR?7O6?UatGOe*z6?UiFGVQT; zhM33*5j+=(;9WLpyNGqnnZkAdz)qM^obhZnC&%SvE-Q^fpBtK*Iz(xXhFPWV(5#r4 zM$*AtRu*Sv#Wh0XQ)i#d&g9OCRB@}lthK%SVZkPt zE?Qz{ff3AaFq_ahfxT#rv4UN&Tx4UGIVS8-{Iy3mCeoQ?LL5zSHNv5Wf_%)KK?Y^Z zGqa53V2BJo?YFFd^MtS9@+N(hWs_0*ZVAbo^-=asMpZcTtUk)U$tatF%UkqOJ_A!D zS7rXZ6-I?X`bJ|^o}|DgH~s)yzY#}>q35tur$Noz^mUsJOwGC#iRZu|M#CP8lPV5g z)>T8II6|@W30)B>$^rvs3`L~49T-4$C8A#JTlq;)EQ#X26dNz920-gH-grehIfa$P z^z(|nvYx^jD=GHM8j4#26|O9ySk%cCt2V7-uS_VRUWD}D`nl){Nu*LVo)%}L!{Q7- zMRO@JAxr$sKs25V2{bt)W<&9*sSr>SV7*Z!Oo~}K3dm?iwFTf;G1_9$T#W@sVS3cZ z$kd#|@q99ylzD!>Z4-@+7*sz9D9u9ib5d>~t=m>xhCb`wU+f-QWG{J3&0VX_4_`m^ z)^pJFlv=i~w(PmS@YV&5wqv#V(RccmBPuP>x*8bxtaDefa{&6zQm}nBc=USb`wf5S z_kONs*K>0=e~Ho&`fTE$hbe5`=@F0sJe|74t7F7T^Oe-R&A2_)S&Jri#ro%liBa z7T~g0DF&3P7>I!ZhK#S1-`psMUiiC2zfzLA_qbYCBv+vlJ z_48WbzVjA1!EUU#=`C~^^H#s9t}1LmdCu73##t8Zd26c07+3w)>}^XGYe46E2%)d! z?Zf1DtbRI&*kkEO?Q5$ z9(aYi|DgxYF|wEZlDYI3we{>Jm;RM7F>osjj`Kjd@scE0Jr zbTTa@vy&mwNQ**9h$w+%R&~>Sg}|#ETPn$qIxwXBa>%Db&&MI75e7r^rpH5&6S0Y$ zAS$dNN(sfPx!kZtVP(jK6c*=FJd>Dtf&15!Ew|Q(l`$2gVK}h)x_gAC)7kk(T>=mq zX9-~lks^vsnogjrUuOo_nLefQUb%TqxcT4)@hdXldpJTaE6z7>7>fZcEy|)02{$Po zUd%vLFq|z7g0PC^3?3&%f1ucmc!%Lx;q-G7DY{~XP34kViZMg642>Kb9j6%975l`{ zktc_T#t#iER!PkqDS8RohD0-LUa=k?dh!U6Eim7-2%ET2!e#&)JZXh9Vl~}`U8_2yq?i-(tm4w+G$ytZ#CBL! zvBJG!*9D`E(2QS-22J-vGauY6V!}4I15x@X80O!}e;{7g5wCyE)3xI1y0)_rJx~~a zuIS;{IKIH~C4cbB{#W;}`FmFUJxdKme`Jv>dHt7Pc=?4j@Aeh%_G@R~XN%rP7TGU+ zjaNEf?OgMPSA5~6ZGiPXvS|I>>AO|mQfllk1s}Vy?ZfZ~;g1d%2cIfDcBIgF^j5HI zE!e#h?7nMb{hl&m-JZKH0?0t`N}w0R?=4S9$=8Vgt>IEjcd2RXU8mLSSTt{td8lwO zR`i@)<4zX1lcn~qH&4BGYOTG0rM-VCQEVSr3l1#0N>15tS(1M@_x9Y4 zKr#A*LS(4WcwmzwC_6}fpynvIF6E29Cl;+GPyIcY#1}oM*SOOK?sUo9xackqF~okF z`L^6k?5CJ-OTmHluUQnL>>>6zvo8G@F8#&e$R6f3W~7__^+6MWpR_R}z3eCLTM*vQ zjP$dglI;M0y1ip0%KniZ#CG2hl5d8Xk=^XgE*rwV47UAh8?^$QK(jJ_n3}7Z&J+N8 zro;T4nS;U+A~3{X%x?s9l3CWd7T9W0u~}}DXN@RAoq2X+6lKU9{DG&ns-Bh|Rov+0 zD!xInq~mi?OS)=ZS=gl5l358#LRoPprTF=HQjSAm7q(~?s+yz)oB&v}O*d=$6zoSr zkAQZj8yW;sGQVXrL%Y{QJ&n9W(5N<7as~>{j*_eXb6?xl6W2zT_#5Z{YHYQJzD z?L=`pzpD}rqLp?FJEj(R^S-b*&T8$H~()tFS0_*rEANba-q5pt2bp0XPu5Ak)Ey$IEjhF*1!ay@Z) z?)VY6i|pF{mAl6hfcd<^vJD}QH18v_L`?`Ns+#ifgE3<=R4wP# z@_zUC;M5b89MxU|9&l0ALU^juUj?w-deDQ?vIpMCOsMvV@PNze>4^69rM*-<;AI<0 zPUX=-dl{+6lak;KFB6wQnKGH2mY%7$Lr=oKsz$ZWJ)tSpK6(}iSYJrP(3H&#!`voY zz9gQ%k)Cf%gc-)b4ef1m{I4XwLgKf{k=tbSHhBgxDtziT34dcfz%b2U6KvI0{tLdu Bne6}o literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/__pycache__/step_registry.cpython-312.pyc b/app/modules/agent/engine/orchestrator/__pycache__/step_registry.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d945215c9510c827bb4c25b0e779450dd2dd52b7 GIT binary patch literal 8461 zcmcgyYit`=cAnvz;#1W7MN-t0ie8qLD6u8mk}S)PV>z(p#ML&LrOA;*nH(y2hSnn# z)|;dSg>B*W23X73EG+*htiUbmpWgmi0}Xb!=#O&D6vGY_)WEht_fNr8ZPOb4(Q{@< zijv3MEL!vex}1Byb02db-#Pb;{@UTFpdkJGyZ58tt)-}6W1*M~>I1#^BY3z&MW_fJ zqvG@&P2;mJrkm5juRf-a8|Dm(%n&oiO>-vY*%&j&EpwK*btU-JpwD?7=5y6LYMP2zKA|GkhdOmkb2V$a zV8w5+5Ts>Wh~pAMNQfpleg?{x$xtj7x)x*MxpJ8#76{Ap3}F|dyg=51M%i{c5$4Ck zN(I?A6^RPPf^%whITng?rSkM5ORn#i9hcY}QFf&OL;LQjRW_W&mL?ONz^)2GoopW` zLUcY97G~F$S=sV-EW}+F*yXS~b~P`&2)#E9!9{1Z5-qn%7Lh3fYdWUMjp+m@aD`mrhSn^xi*1euolD*u#R-1}y-hrjkr2 z7+;X-1@j9sgJ69@W{kr2NKs45b}FLk@27M`2Z;Tk6ia~UQ}mKk6EOhd)V7B1CW3w;sm5!6LF>V zOP!jC8<1{Iq!N%GEdmilUNp_@#wlj_yt)_I1yh?z{rTg&+xOHAc9<5e53GU zh+ANpC{Q#f@Ukl&T4sa<4wMlH%gPP0L@2_9lY{_{Vjc&Cd@ffnupCQZ9u@86as(cE zxh6~!U{vO#F_yUjuL)AH0mV5|yW?49lGTB!bG0Tr!5AfCH&{jpkp)nwQhHVkr9M4i zgg7Q1<)Sc!%yJ0kR(6)gp_Xe3-Lfjq@F7@(HQ7^I?EOJh7iWhBQ^yoXQdgXuC>JIy zE)c_q6JW7C%ONme_&&@$8^YP=<+@Vu3#vtAt|hrhjFs&%m{g`9lq*Yv3MX#BtSzu| zW3h9XHi3yJB5bTUgkU$Zz|JLTG>8c^G&TgSi|mBSDBr}jFxMKpm{eg6SqzyCVT+)FVaxB5)MUYcT5HsXZ@(O0i zn4}LgWJofA88RX{f*CTPT*1sIIn~t&nq&jd#^z-+t9lu-N!_lp0e75i#O2{-b73-h zoDsj0o_{cKg=Yyr5Q?#@1E<*=47fE*a58x2#?U}0>W`4<4VLqVmY4m2z+vVG5P(G! z20%Cf)1Hj6P+yU-+(MLN2NH0;U`=3eC&&OCz2%aR)W5tYTNpSluw9t-nsPs*zbJOW z==dLlA9Iu1_4u=%K-#=p>HXA~Hto6^f3T4@?7Evib)=0&FmGtp1+y(_g0i7i8UD@rOTN6XfN zvpFUI4%Z` z=c`asU9MAul3L0U-075h&Wk2Z(%i9T}q^Df+3QBr$##qTaBzlMPeJJUt9D(gurM6MA zZ8SfCl0kfn4k>U<3>?cJLCH{gt)nOz#@??K*6UT2yjEUo1SPNIn5U%n6Jq;`{2M4a zMmajSSEQ~pV%M4cag>Z=tu84rDh5XLV<uClxL^xlk~`-^Yqm3=iii8XRMA z3gP*1iqO~Lu#-k`TFdJf)KYXxSGWXzfqIFbC52GuzahV#S{^Cj>nz4`O9F@12O-6F~Mc5wI54`ui zS;t^nm$Nz2$96s5OzYU=$j`Z-ahb6T8Q)CSb1`GT_;;?#AGZBqCGE^xsp_%kJl-`v z>KK2Oen5{OHSCPr;V}&KR!Vkwg0-giPJr}fduOSjGeuDI`uJM90gZM{a@Xke7|zZ|zAN)^YwO7l9Jf^y-6M2w%A zH}GU>Tr`)^vpAF@CZTk_)VfZlbbo4oSl|pi!CDu~1^O}-w9Lph@Qz9jA0>Oh-<*#r zo_0xUMfZ|tz*!BNWwQ|Cufr*oD+;zBpUrVF_2^~UL8!(#uCr_8JdB%Mz-$IG2#}!J zS8oHK>t#Kt$PV=JFi61&7^N^P?k*<;9kKyCA{Viu6UPXyKCcplYzOC`FvULg;5*2c zI4gv}`wfvvtYL^m!^jPV#(7x}R}F$kM|NGFy>w-A_R6KH)6AQfFTxBIwct$(tU;zx z)2QBC4B-674-yAC3ix`{0n#|=DB>viFCn{0J*#WHvwC|~s_PQ#x^j&z_qy+PON~dw z#v{2R(ks^W=9+!?&fYyMHTR0my}M1`E&kr>-BrmqDEbB^->~Q#e)QgxiL7rr+jMp} z(6JqUuy}t_>KqX}N2Jcn#QWmJ`(e2N;S~Hebf*02VMh&UqV~`rmHU)U6+(aa$G)+&DxRGzdeBn3gms zXv6S3)YmX|8%C}h%$oiNci)r|MZS4)C#q# z|2=AjR@^i&3q}Ir8TL6n@Ef#I>$O)nKDms+3^w9d!#C)tk;?$ec2xlP^JGjQvmgrA z$(EuPxr#bwY%j_!$ks?A4w3Dm1;INaJ{$SvSX3ZWsOv1XhY<=w-bbMgoITYdK?KKE zGBe?81f#{WMUAV2wqh~NA%2YuM-#FY!YjfeVM7twQ5X?-BXON2a%ChMf(TQ&U8zWN zrDhmtRWTUkyZ97>ELfqql;jKw6boThn?zfOejrnUXLRc=T`3^Hh`K zf07B1t@{s}kr%z)x3T&fY^l)7G4=ddvLO=F8PL-Lu@aNcG)f zeYaFUB-Rh5&*d7L@73R}ml}d%Lr`iM78`~)-^@AOKRhQoT5|1u4=&!n_-OKRWw!me zXm3rQyS@Go=%tQ4wm$kWxy?UVy}v4Tjfh<%kE?#}`e+R-O733K4ORa9>gQLbzA>?H>~GyCP-EYOz5m6iG|7mQOeUI8OxW@ZCM@ru zYM1E${GVIS>nZmu|HWe@<)g_X(_Z>7hu=8UMt#vopJ_FG(Qm-#q45C#e$h%p;TLVM zPdDj**`x=_0lyvZUSJ&Qy}y8!e5v0K8i!Ma)}a5gk0dFbP%?`}dgg8WJ~8;Tc!nW; z1b0SF1E())fLYeoFcdXZ1GE4}@Fo6m$twI=gn9k9Aq+@u$x5$0V)abv10YeI|VAYR5~k z?_}0G-I=~Uy>&iY-Tkz>e`g|XeP(yvy0CdcvbTu# zmRxmU*W06dv$^j6O!u+Rs4#lyZfp4e4+z@c)WW!8?V_^yv%Ez6p@IFB&k$eom3@un(v9TAZ3DIsdpH1@3YhhmWKOGI-|x@qo*WQFxay(DJ979wBbJ|DC5vZfkyF^su(O5oSl$j zF;f;3mIR|$RrtO|Nz^bcQo|M}_3eJ5Q>3pm!&cEHTHwuyZWuT-Y!f{q18=+N6>ac# zh(6H{Z&vh+4tP7K*hs(>6(u{Ci6=x!j?pCv-SGCo-%3&R>K}-JS?Y4RwnT)lER(fm z?f2-rK#9+c2k1(;Sz^Mh=WeO>j29?u)phGAd|7AKm351jPpmV3)lsI+(pgK^N_zBU z?ZCxZu339$9p!$Y9!F90WZ6o+FYC?v?^*A9Op>!ygc*uh4JMfuCBu=F6h#=744Vv- zEJ3+j7Do9Pn1VE^8V!JqBr;MeCRNIuBVaZPpA;q{9>bx+obz$f*dWG*M0%80fFBqQ z7Wt6V+PCZn0Sr$@kz~HyE31^P$$|ZYk%7%tj8rjfq%Xq_VpPn`4}@T?L>^+UY!VDp z6jUK%Hv+sUsbIH!g>itdr4L}!GCO`GEz77tXbMB(t9y5m5+VW^5u-?n5xEgaEyu zxYuUcLhGh{YgBKIF0i@QL$m%uQ)|9yuimtG{$#G{`C0ex+jbVV9w@YI{iA~kx@Osj zE-KJm2(}eAMGI|_!iL5|OSsUuu@G!2G`HuQJN4$y`SD!y3x$TJThSj!=XT^8b{Cqr z725X#)x#EVz*VHYPS+|(MOEAx9TLMTapy^Z@7r!sS$dLIED%)bdX9Y;8NgG|nag!E zOxE(h-;dF=)s!ZFmQpJLVAeK5 zLmXs9doNX%z8nZ1<5Rca8a@u-!#;ygDX3ISwN4%<@NqqZ4+i0y=PZ@AUPjZac|oxigJ#>e< zV>K*Y`wW|!9+y%EgG-l!yu#skWMuWoc^CquG&vz3nSQP|#MQ1CU569tn2=D8bX6K1 z0UioY%b)&>`qMkql5b|NaW-?CxjCczX1-ZPWxVIxKu4_80dt_?%E&Vq@AY@IUunP4 zZ|Xk`YYW;J@c)grsK2W%YM*I~8C)B*e+HT_wO;_;FSW(+T7yb#1EN1iKXMe>$@>s| z2f$-$k)MZ>KT9D;krgg>rb51=DE~!EStoxaU*E_Fp*~{G?1h*9J^i}&C5-BG823B+ zd)lwVFwQTv&j9@##{2~+V?qBbsQh6DWqnd*7*zI8b#yrG#rcKxO$v+YnDYNqX~$q- zk6DpY)|l>7O6p=V7*RkQU+V8`zk>f48MHebwy7vIj_Y7Rzt(;;?NtSN6w)0rEQxV- z+BG4lv9U1bD~6+#HW)UnQVa&wVz^3L!mQqMMKJ89wMM*#4YGsygkejhuSv3u38xY0 z8R{8)^W4Dkv;3L9*9ZFEFueUI`_8;FFvOoa-P<>4ScTM-!S$X7f)i&>pW-X@hWA|0 znf|_WeZ72N@4z|3DW;RKMZlcURZ3bAZ{ri`^ti$&;^Ptz&Y3_OS-O&eRK@W0oqu(( z2l{%o=gc|7)!#SNccy2Mf2}8Cmv?}8HJO*eVmix@OH=Xzs8uj(k<{ua2>@~!zNf>_ za(%Q|B?}2WIC&x1{Nc;DkL9@?I=3Uo?aFhVI@g)wqLAwZLLYi>@6Y?U>;COI|IWO> zL-%*&{9Uum@7(IFl&C^kE2X8aFyM4|zFY4j_q$;9w5zP}>ar<(< z8EsE?**CN7o~dpHRgm3)Ull28FK>BBaDbkR7D5))xv1ue5)O*CM_%?R%WkEbdBGj{ z;H4Wc-HtE0cg^?ZI}Ym|hwnR<-{y?^!_%O zABgJ%aqV(yc_2-iU~Z^+c0ajEv-aIL^?^4DaY;if{!re(Rrhbz*sU_;NK~w}Z3MI` zrNhwhJ`H zgWZkcfc?v;EF=v_`chm_Qt@fexG)};g-fu3mQ?dA174DrQt1q)6cE0CuKz&)K>r>@ z^u-Kr2Oyj;XrE#9UeJCHE(}2pocLYSg-dnsK`eZi1bYa3CL@UWK=_ZO+2{Xvu`6Pg z_k&P|Jt@fJ;MK%2s+UDG{vTQA6;F1{I&0(G&_#7rYv`UE`O(4K=RP{3H*}Yx8Z93O zDv*bGF3JHLPa>3JE=U){rkkFGs_DJoLZBh+9hX?}*rLV5xn>;?Td4q-Yv^8ZEH`xL z{M{Pcz22m7vkj`CSI>hP-J(XR8B3P#r7rv}WE$2r(}HXX6;|_zbU7P?T|py_d1^f> zOBEIFKdj~NddC9gY#sI*j6&b+d!`IbnOkXRQppgX#{F{TkYQ~|m7Z-`n`npqh9zrD z!7gIPp0(6FdRW^fz~KaQ&rv>lD$4`e+21i#)&j1_4jI;roG@%x1vo^I_dutRolQ+h z20Nyz6Fk%%LCqN~E?N-#N96PP>8m<+0GfzJeglf-*8#vD-wIx6SYhFlU&7L(01P|q zeWjErV=5MLn0AKw2^v4thz1i^r6l2Cu(EKCCsO#=cKf72LG_SjBV01bZBS5W-KaS! z_zR#?<^aI5v2*vVy%5~|6=io0(zCrQ!O(5yr|ie%JuyJf05@=)r+p@RixV-*L@_K=Vqd_4a|Ec7NPGcOuuecd6~r{B$n#LOyg<4;{^g zj^#rq_0Y*&=;hgyg+2SVgBSHZ7e5};MkZ!Y7d#CgoVs!9lg@eme&2HI%m40qrRYUL ziasjPH0xdO2$*ib6ws?jA9sYGd}~KQ6R((jJtGvYYv+_tHAK?|jy==-m}fdk#VJZo zQjzbOE$wAmn%{E;-POW1EivfXRlpqRF;=MY)_luvo6?N9wJ~A5YHNbD>>Jq0agJIj$?u9n`sl z3+m$89M_ZQ`gN{9#|_N({*DWQ#g^=DB@imKY+PyIJjeWu{gj<=$%UU=3LjdKbL}sz zY}r0{?q}yeJwKny?Rar%$MO3Oxh=g99n6M?|F+t_zM>O)FE%~FOfjD$W;zE5p6RIY zHlNGYsY0o&sum+c%~Ms2d~2RDE5ty*S~+wTA-O=)y=S{y;sqxhYqfUK0kv#(ZNxcr z?m+<9K0whPOah27hDS(1cFkXtArdKuGbv2+iYiSgMi5aM!dV=SRVUzd6pM`jLSBt0 zB{;u?W7OD*2k5I0hLM|%r08-l;Bp^=V+ioB;f}>L5!_TN*nK&YZ3VneB&#_JMOl>r z0ALnw?h`iOd_ZqL@XN9M!9o4t;J=OKheUly)IyT(mR7i}3(S3s_U5pboLJ_rXr3!8 z{!R0Ji)Xag-_R~6mi&3k4VUQ)?AWvk?*Kz-{cYLwWD&JnR}JXkE(K5 z$?BFns>?^M_t^641?MK!HdW`GjmWX1N)BgLf39l2RpsvL@=?nZ`yd#!UaMn+l9*IUKcEBc zk13#b%aW2wsOAC>`8~PAQVg%T)dfGBNXsfF94Qg58x+GYOR;nk?!@2*0q@BSXKYMJ zjY^878ottT6`Yi(#DpYwz|2Hg)A!IxWDFlNCJk~Af*OBRpzJXp+yVu4e6Z$%Vg{Ri z1AzG8N0a$bhaT!!a(Aq7TMF&l{}9}w?K*TnvK$;-32vGD%f%O#gC`K1T5Meo_7)vX zYr{jEBj_tq4hX9O$`eE$`}EMhCHFqUV=wTSn_k?r9PERJ+OF>V&~mV7vtmm-w#0z! zgO;@sKntDH-KT)b{P@3I(9XxS#I@zVNv(HknVZ%;(-1STuTDMGx#aF7eRZs~?$SD6 zydPg~eN}6D?V-(z{HVYsobXdM~+)dE`pO74M;FyoBn6d0UJ1OPLP|1oxJp zi;)b>Ea*jswTLo0V4KiYx;*P5*J)km literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/__pycache__/template_registry.cpython-312.pyc b/app/modules/agent/engine/orchestrator/__pycache__/template_registry.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d944184c948168e95b5d40e0c3f35aaeacc89cff GIT binary patch literal 16379 zcmeHOYit`=cIFI4iIhZxnQXuIql+L@@Bu1L71U5U!+%0$(4Rl+^(PI#t0fFrrjB5Beb ziHDP>o6>PzSvKoN)8TkbU+oL)%9yfXy2m3*GOWf@rh6i+Es?1R_s!EPPK1X)#>Gbt z#W5|&j%iVDl!f;yrk$W7Xa%Sf)CF1rS_$d`tpcqCb%R!cdgOrY2CbGoa<%nWBiH`0 z(&KoNKecO>H+jD7Tu=9LK8tELHFx`@}f}V{fO@C*ob8u?neAl=% z)^p)}&qcFJQk82l<+|C}Jq&K|*zk~)Q!-tWoQi0sccOEwuVl~CuJBh0#6*U|;ol!+M-J10ZbvVrmO^Gj< z6*^ue(>v=~D@CW|r z@~Y`#1<5>9>ZLf!8Uzh1k4E%0gkC+*wbszCMo?rp!r z|K|HE-_3p}`*HV=hQ2@a*~y>0{rTG)zBleax8XZ&xKBSsK^!~YRxcRrZGFJrRy>Q= zQ$m{dPZnfR79gW0f8-}S_1qhLrO4ZAKYvl-Zp9r-vxFCyTq(QcD%mXy(Nf+#PwEXm z=mCG^Cs*5V0m@Tz#~MdUl&4nq%C)kqWLfG8%2H&^k$rdlcdQm;Hj0&}zMwn}cdYh{ zI&X?76ZOp_zpNlPW=ZxKTXsw1T@QpzZzL6u=qLsJ@{PqxnNeM z()w~**UUzR&31~Uf}==LHN3o-Y35U)62vPBi;5g%eA8tSrfb%+JG?GhIg62&#)zeB%>ilU);&V;J)K$A?2up+cJ#6Frw`{KIIPgkc)4 z)pp9@A`7{}OWjX`HYAw?B?l6FV5Ip*tHS&FuHzTA{_pmGr~iZ7pZV_ZH^wh+j?b=- z&l+=A9>iCE(;v5Ov`reV7jnirH(Jgb&0X7GW~>Uv+5+{} z@4L1SJN$v~X19quDKB zjgM#s|DPVsN{?h!1*1p#anpT=9yR<*)Q3=?r_4B2Q{brxaw-_1ALr?tjEMg|Pd?c% z`%tUeD2;Z-=AHU)UaP2!jjJ`hH49UWs|{>iEi&fHjrJIgk`$8%YMc{#(Z()fjX2&FN-$_fw%#_4z}8jED`xxsFO?AEMdL;_q<;-8E8vC6Dt zNJG*v$|}i-lBqjO5D6nRzc>B%l_bZ)7;O`#kLO5hiWCWJR4|?ej38r}o}A3|Xz4@( zA2S)n^o-k=>TxQt&w`86a#+1q z5rH*bxI)%6Vac)n4zz1>NnT*o6x>3jFQ1|Q6humqD%&f zz+hTX$e=DPSO%kcgLVyPmyDUXas37|uLzx#nFw;76^F?1usB18hsDbzBLc%KLYP#p z3-gxYh;8_T8*2mi_8R@u#%#hkpEOo)B6CLQqRd2)t6Ll+!xJzC*@UQ&j0g;8P*kX_ z3Q^1OqPSgC$%gd0+>mYrkKcRk{>d-j{NhcccWUD;_`19~yRbgHfUJu`9~qhx&XJ+F z&eM&b&0wB>arc;CkIrXf{!fwqX2*QF;;!p4V{s+$Yh@Yy>Mj_y%8&Y<$9SriYcTFt zmPDtu1)j=}#gFskMJ&Ku#*^=amCvK|B%BF-(T5mWjG?OJ*6U&*|o}H-`~- zi0P<6T2-Wlm~MOTEZ_&hH=7>OwEQ=umi1U7mSOQsD|5qe0nhT?v_miFzbTSRET;eq zc&rCxIi>2ErY_D7ayAIDJ1W$RXe@P#k{L?osO`Kv@^YXM*uj%Dv>+hGYoy*7Mi;K4 z#WdULwd3j_Sv!fO!~H07V4#8EgX!gwDFmoShQ}s2hG1x7H;|x7A`>kAg~f|U4z+*4 zr$g=o%V_B^j=XK09%U1hvOcRA3)e99tqMIf^{wK|3R52uHe(IYj5W(41;V-345-*s ztI=}Ac)8Q)x@aiZ4S5B+-Qqd2(~X%Lc8Gu(0uq3au$UlOSX4~o80(KGJZz9MJl(2+m`V&PU}O_XnECmt;gt# zK&%SAq;(b3G{~5Eft-v1WT2Epz?>CRRD-MHO_E&|hn$cRfs-*YB=9K{TZBV=SyoFoTCAiIFJfrFHIgJdajfE*BkgF$hS91Or6cM!@u z$hH5fA!ne}EB2FtUU8U=69IEXjFRz~m?l|F>>=YsV7yoCCF4C}pJn`i zfBByN>EPzc(E7>HM%%D4I=MMIy*@f^%)}lnUEf^Ft}kUbMsFIew{l;@uWz)xVKl#4 z@HOo7G3XR5fKIR7OW)VO%zTkC`sX%IzhhjMH!ojVzkCJ7igQpK=1<`1AqJg7R3}fU z2y~)=0)tM22+aLIbV6&epZv6;Q*}w0@wB0nx1^`14W0ZYJrzQyGH@n}rKYE+|mD}W2xv`|LLhw@-FVvLJWm1Xo-GOKp0goai55I$%XdJ!=R;};4%A^vMkpDj zWQ>vtN+v0}KncTJ&35F2M`NW}62M7ET*3~4SPZjnr-JEAgqI~fMUYX_RYl48hQiB1 zJw*U7NDQ-viNY!tl{B+0o(juSB(16x@`gksoJP1WQ$Gk%&=OI0%?uP%Gy^;;jF324 zPA|(8q|OBT*y@$8FwYh@uPZa<`E*i_E2itZ3XHcS-aL+`FqwcFi{vd$5Ng zJ=4>5t~1ovGu|`7LrA+pUnAPI%=#Sb{yykv>zD+8J0Twc(^;c?*0^%Zh!JKQ0D=K# z84xE3&kzA~5|J6;nJAzmWKpq;@C*^)nE|nz@Jv6@4F^lTHlE3c&}MaGCJm)dk%8V$ zU?vzK!aQCg@&{G5Nkp=X{mtJAR{O< z42>YX217)^oEDeKi3)rQnF<6684)-c0g!~susCAXU^HKYmyKfpt#c5^Ia*!fL8Wo= zA{iPNXUPc>sLY{#;3O?}Iw4B~MM6ddPR0Qyp<-dzS!cxZCFgw|HC{o5E<-EDYm($R zc@&5h_D02N@d#Hq=|Gn`z;>>MSLWTnWqvcY^-Kzx48iA zzJCAE&z}36!+&+y7+Tsm8#j{bW^!dcNgq!cz+4nGh1f0b&I%z0cNv8D3>SgBe!S}p z?tc6mj}&6EEVe>hk((@q-_l6p6M~Hr5Q;|oU>!CNq~g~UNoTMvQ#Z!%gZ$p~S#sG@FrVWBEJNvwQX;MC*vEv}8!hk8 zD^fTfPhD5!4jv}tOZ$xs2wK@}2-wIVAZG?GrdhXBiR0Aj#x*Trg`gJ+P$aTF4Y9eB1?M6@&7_HPK60^wyeF}FZX<m*x2vOA2&7693@Uxt+OOM$y_ToV+^;W+X}-xFH{&se_X`+8|IG-!WfYmz6>|R7qy6hxxi7t3`YUe zn4mdW8qUv!FBwM>x|%i?SB&T?vh*O_23I{|h+Gi?bGS_Kt%nESy2T>Fw_MmNW5_pP zr%xOp<9!H?!8j2xN5wgs%q4M&WD-JSkP$&0`@}4%^dca~OMWYl#O!98N*I?)%;Mu@ zcv1|L;YrwsVInY`5#nTcRY(xIA)d8brYqkv07hqwvr|U&y0K6u=2<%C*3DyXU5L5C z$B1)+0MQ6G{h}B|z?^YjWmwRu(%rCAvx=eTi;-)?tRd5~>v;rQVdp&7GGT`Zn6&JQ zsxBbd3fTf$4KgCA>UlICRL=2!8g!0ZJ|^-$fHzUqOU6RRP|EaE>F{nJuoryv7KC@V zeQpMd8VW$siF-ZwyT9!JqTlFKHcl-Vi}B4xb$wAqR$b_&!p1n>J#~(5{4Db8IbQ@5 zG#hC9 zQH)|`(t;AErBe-P%;p7co>^yIVJ~Ycy{wrnIYee{NSAnkF5}NZGkd3c-A;`HbXFTj zY|`G*o~a$gUC_?eXy;-&R*Wvk)K-&vcts)^M`QIlpchG9!&dmHlBv&uJex(k%DB+R zcYAV-)PjZM>;S3>8DG#vj8f-&$95w(4)6SSVc8Bd?B-&~jjzI%3Cz{moBIobb1K7h zCDvkF@KBfA)MXVT3^nZ^cOp7_g_Cv*{V!jPeZ@F2U<}cIrZhI=vWc0+%O?50u}SeA zj-uWnnN%jeHOb>!6UE|NIZSiQke_<^4>jmEdLM~LIMA&%cB5zyiobj}7&AKY5Ib?=rj9yi7k#!40jR^buG ziNJW5SUSeIDzF&iim*bOoz5Nthn-HgAWQ_zL1&nX(aB?HXNm>y4jV6>HaaJa`5Q)< zHu;?s2dN(C#8D~+5irNZ2-Q&$uaZnboDec1Fn$im7b<;-sxprwRE$CA1yUgb=0%`V zXbw7cx*Kq&t!B7pHv?N_?={ZxpeWnI*M-$}D60Z%C#aIR9mgtMyx`G_Fjh_kx_Dq*pQ$T*Io@gAG0 z*sk|5G#V(5MyEDTmD$U8s@PsWYi~1~=_pEoH(q!c#|6%g<(33-;;G2#u^-?lU{?Hr z;}4z1<=_I)oeqh>0U@DBQ$}A%nDTr%N>>KOR%fH zOvzD7o~MLPMX2LQB6hn{{b)Zm>$S7E*x75q7HH|LD_glDXrkbVIW>Na1Z+fX07$O^Jszx)pTWiMMn(f# z+PndsrgKSI#hE#rTUB307PJQS8TM@L15y7JiRmSXL}y*4oSH^TRB2vk+OVBzFTHOH z=U~1E6|Ei#J%oGLoS(h?vlk!u1`PKAd$e%E);%9jMR43NnmemO6(UEhLR6kdD}?G; z?>n!KXjai1MZN!LP)B75$m9KDmH7#-8Gq;Qaf&XL>oBtTd(hEnnA)52V#yrRG9!{ z?mQ(tr`{tQ^agbVRQ7oOIWgEX%C6Xh%C6Yc%C6{*%v8f$FdGb|aA1<&&WxYg2Y1e9 zKTqs7#_lXQ(VUahtDM^3H{*2Q!&lrc?B5I2yRd-t ze64F(e7LXLg{i8whWL9tuIlZBj<#pF_CLGz!jY|)kJJC&1HWu(aaI4KKCtZq-L7=_ z8@H?I%IyfWYzo{)`_b!l5UkJJSANt$=u3mm0$i@;CA?lxfC4N~gk6kv z-Vlw*aE&rIgc(_`RpzE>Mz(90xg}ar+ihbMqM-SP-Ok(|w7xaz02kd+;;VB%<4LSE z90xr6hkN5$Dm)(VR4%4WKI~R{6P9!Kv^io)1DK_$gQY`L2OGlGpj(`HY3f8t@F4 literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/actions/__pycache__/common.cpython-312.pyc b/app/modules/agent/engine/orchestrator/actions/__pycache__/common.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39cb9a8003e46a4c6a43e98ea40d4efa62a691ca GIT binary patch literal 2131 zcmb7F&5smC6tC*9>G`5zcli*I9av0g#hCyGj8O@Z4Ka{CxERS8({{RRhaP9T$LgBR z%w|Y9>;X;~FN=!F5=dOK`~m(6CSGQNOXl^f_o`mi z`@N6(xme60pw1_EgO4DanrWI<6C(v(K)$|+d}CFS&QLYm&DiD>v9K}EyI~kHmjzK6!%}az1OF`$c20MQ z*B0szqL7gevlU)AM_DlEdhBwyMR;L)KJZECkq;SZdWr9ZtH}p%odjVC5md!KswzJA zl{?rseRa{Os(#kb`P!mh)qLGIK%e(Bpx3+DF7Vml_h*f{;`q>Rlx7K@7os&tP~t8i$0 zkf7QDw)hobBS9WpvyQEduhiD9(|z+a-3!**x8DeQa#plU>_wD#X=fpEjd+%f5kZkI zAtp*yl8@Q*Vq8si|5kM0q3@NXL_0WEpre(#sETbg^Z;`}-H%vyX!bRhe=h4xl$oTS zzM8&dh~x14&{$2|3fchI1q$1!zH6HTmOZ0}haNGk>H_+01nrZV;aQT-35TQL3P4pG zZcByDv$oFj&vykr&&N@ldL(JIT)LoSj%VU9XtfBVuYwOdM+Gocpw88+qyy<-G-z?P z>2`nxA=l>`k;`5@A?7Ajv(t3LEp~x7rCwGI9*_u1Nx~>4fhDyCs|Y4^w!uxu3Ed`f9G-KWWLkk- zaGWpOZX>bCfkY3ALvI3mfMiTE0aVnyWI~7!09!PaOX~anaWD(0?PJxFP0X{R|O`h>SeH#p-ePxuJD#rL%bmTGG^B7(J4bA>-?7~O? JK)~eTe*oLb@zekS literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/actions/__pycache__/docs_actions.cpython-312.pyc b/app/modules/agent/engine/orchestrator/actions/__pycache__/docs_actions.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a6c3931a8224bb5ca95f393d2864e9cc760a5f4 GIT binary patch literal 6642 zcmcIpZA@F&8NQ$X00$e}5IzhzKvUc{CQV2nA5E5E0wjS_APuESz0189F!NW=y-vUb zEt;Zzn2(wbj;&%L%W z;3So{U5Rs!&w0;#&VBEBp7Xvhf7!94h=AvJ=kEkgl@i3)_(%3(^#-|q8X9wiKnP@z z2$91iiQ80=3em%~)~16@h#h9NHWM_2jKfBfpomUFV6PE^L8R}~x~^f3-0%rFf#1vLXT4K)ij1GNEa7HT8Z2B=L?8=*E&nLI^m zu`j|)`nDuKvw*gY5lVO`a{X`GVlt`qLNQHDWZpH~gX?KhAO$LGg-KE>C!ueG$(PaF z6Nc@5_cPjOsA&o;PgfBFGe!z5{2AcS2!Ezq>=#FaA}_loeq0Q3&9Wv0=RlO)fl+r@ z6a`U;H)OPoMo?&D5iTsaFLS{F>;VpihgFSIEEM9H zG{a|+{Fxu#=#*Lx8^Xr-Z+_!=ecti)7M8&WMnX_rC04EW8!dBfH{0ghm0kN5PbM9$ zD~`UU{eNuxeOu~CU*bq#()!jc^Jr)Fyl;NsMm+6oRUB>U@`FlQYua&0u^&!5+7$bt z$HfLm(X4T;geWbWGu<@JJCs`Q150Y}*~H$n%VSB~+luwPVmiMLs}hA#H3%bf#27Hn zuShuc#6%8Xc#i(Sxu)bCCJSLnpr@$-x#?^(@GawI*nVL0Kyx6>BQXRYz#9_cJGE}@ z3~e58Y6HSZq2AbLC2BFq`ehEmA(K>Nrs1K{PS~_|xKIVOpraCZRN*+|SGh>Z3>k zes543QD_eSxFD6G((J54k^()J(>??38D^UH5f>u_F~d$X1qqRr@tbPTG*w9FG@ISu zb5~{rHv3*#`Tvn-!*C(w$^ns?u5cw z{e&=@OJ;p(i{pdd_j@0?+^co-x z#{x=i%Y$I*V1MFZzjE$^@}8&+jwWqmigjEujc?}xE4;YI1A@i_&!k_B>n#X<9$9af zE=23kz;ky7&zbdLhq-5(6_|;PvzqIKhQLm<9}}NYfx9)fyc56tC zn;IJ21AHVZBFq5sBKX;^IXhDP)jb(m!Oih}1POugm|KpxxiA-;k^?+=Sy`ptcmp_7 zV_`SOtNTho9xo&XcNGSihI7?`L{5r?RVR2YA~+?Ypj}cUv9J&nJw^@AXb*_1%mg@y zTM(@l`QB{zpFG#=JKN#O2eu0@paI*Ut167^+^X85G0hj1;_iG%XZMva@hWJS{tU%c zB5g09Yr5Gqzh80hT?AMjSg{{jvI8uC2!2Z)vjz>nr8<7?AN0K61NcqXHGDet(bS{b zeTyxNyA{`=w5vAl+P&(kyKTH%e5ZJ!dl4nwhm)=&+CWp<)xBmlRaULjMeg#oa-z;h zKCUjo*tKEog7&q>t=R2J9PC-Xob#QwgBBk{4YXY^04-dXn?MU!Cd3s!;scte#=s%C z=mn_Jat>(Iw9J5R?a4w!BCk91AVn916eZAE`7DHPQrGt~a7Bau9Bd#ABc>T2aX-68 z2A-27vXexf9K@1^W6(H5kl-{;6Bprzn;{{(%a??4{0KS6=a=Lh3D@{e zhL|FMLti01RNRbVsJW%R_eix!g4j+D&^g)LjwCznQQK;`6xc^lTmam-ja_2 z9z`U{kFQ<-MzrKT8jSElVx{Wzl5f7{)8iivdpAf_V*+D6nftwcavp})F z15CYm2?^h1;OXE;_jbw(e((6M7aoar&?I5l0WU(RR&ldpzCo!w0Ay8qc%}45OCvyc zt#AC@HnbrBscPBxMgQAho;d%Z0M?t5@H)zX(} z=~McLlD2mg>#$-P{{D%PD1C>Na33~~ieKbKLYzgjfU+SUpU7o zn6~Z$!fo>{Lzb$6oPP0~7^CRJ*8LS9$mlS|o{NrE7IH3u=muIi`-p-DJjzJH*6|-hN?*b~3#;MpD%>$@) zBo+t)h3FodC~pquw+!NBtMmsbo&!o}IYb=Jswad6J0h!&nt9>ZzAqe}w5=j-DN9*g z35)AdRXuhyAG{3y}IE0yz5@q;yVwi5E`3O3(w4#ZU<@hXwFaqcq|`7CiD|p%X$`Y{sHEUjN?M> znJ{Hr8}*hW%J!l@7>w`k1D@r8^08N(ckXguPj{5#R<{<{X3nGFRW17I;=1lwim6lkijXhUvsBo|_zsd|C>%Ej;of@}AaW6s= zEq?!{7#Gw>P%X?x4Yy()tBG=1} z>VW z+3MBOuUdogOZ#KUQmJUV2h{01^f7nzp`+tjU5(H`1d+mqKhOLP~NlzAPhVaAE z8^(ERI;wB}7!qIrUop}yDAs6_B)=l+|3UPAMGSsLbbM<#Pm*2ajXmEHT0>LuABgTI A00000 literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/actions/__pycache__/edit_actions.cpython-312.pyc b/app/modules/agent/engine/orchestrator/actions/__pycache__/edit_actions.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d5dc26c20743ed3c898e79e13c1fe5bc2a64eaf GIT binary patch literal 7472 zcmbVROKclQn(ijMn-4vjBB=)@*=GETX2&E${+gA5RBb^*~d8CW@u1~xDPHV5tQ0OQL72KLfa8z9|qfCXmvkjubX&0dT> z>|fn%J|sH!P64Va)~l=P|Nig)>({@xv^WV!e|h>%RB{u!4o_g zClcgMlEigO+;Y9{RnziB543o%0Q%;yAe6R3w)W6hh6 z1xvvGJ^m3OwI(i^Oi7#+O(n%K_}y|og6k8RbUKA3XmAbB3y}uPR za?hegDqlG9s#$pLEh*;M(f~hsGDNuPuF=9^eIJ&D_-~MXMwHy`%LkSY6y0wZ+;8V> z&%IrF&x5Y}T}9u)g709_7cBUKtDmmFtNPAu_=X=3sNVN-j*_>l=-prN?q8Wxy&?GR z?kKwV72Nw)qH8|Y{dYOrvnKcRuAcnJgG=`>tuktFXruStwNsEiwy0f0xr@)(uKPEh zclCZfxf1wY@SC8zdqCZBRP8#JyIAVk`|ES7mT#r?i~sbCf_o%qE4jSO_9c7XqwEZQ z+gm(xxp3t2lR4G(k-}bA?AJHpAVeL{*r7q^nU^}xNM)4k+|httVrBH=b<7mQLfw*n zlshb-Jq_l7_E>;n%G7Q$&Z1SSa1&^9gL%A_uP}8?YKf-o}Fu;Bl=6Gllh0U{@Uo6EVWfj=&!v5N2c4fiuj-s0U}5 zS`a45fDP@y^_@7=NyKhaoN&aL`N{e~&iviYG7?r?Kr*B@y_~fwe5V4`A=_Wo_XNB?_4>tI-qt1fg3iwC)bWW@9fT> zcyRjuX`lzU+H*|pJpR0Q*9!AH=QqyPv+Dz@|E$`3?gty?apr6>r_haJD*iuecQo^nhDI#3~984dn!ih)w@iMYPD4Mx%KP7~(Qj z@Dw;IZ%7;9^U>Yl(E*6DGtoe?VIMXL6O^7}x`%20GD_K6XHM;7C;%Nb_-#!@lhFhh z4`OENM*k9_|`C#z=V6msa(9^%>UE8DdoB*V_oH?onDf#%L4~mCJ z3x`La>{VS?6!xlOzq%bLSR4QVa`P{`UHE?j+?WTlN>3 z8h(G>Y}@0oKlL;x`1}^jqtqO$H3fjBLxob;c@wIs7Om(+f?q zvhBzE7wxc;g>O!he2Yxsx=cfzknMcy48gNsGc)9(eQ&MtVTy+lT}^P zi9Xv=>tOYPFD8JiCGZHFf^4_bdFHLYK7Z{PKRn5V(|$An1HR|+DB9oGch;#fhDvHo z1PQQ2)@x{x!27XB>^FJ?jAlut4I|*V&543;e>4|3gz1zB97M`pR?j39!;T-Ic4e>57DJ({lSvgQ~XJwG-wJTW{Oa8@-M9fQH>IL>r$oWft% z>?srlKa-1t(+ua*?B+%^N9AiGCMvA`Mlv{{)x0X~heu}XUC#Qx!!TX^U&vmq`z6-% z)%+Lpd24yOu|nM)MOZp*48>oY?T5S>K&=U`(F+^5-jU1r`um>fZUtvwGvS$AHhNM-(r~>TbN3OLEHoKu3!buRZbvp zt3;Wv-wBZ*)mc0PHZd%a=Xk-4q3GD;1+|mw}Q3Da%{{MlvF`XL$0WIV?mBM zRGQs?6||K@U=b^(QGiH>zhY_pb5Uv5pLO^#R~!%nL9N-4I=G;70t(P*4A$8qa*diC zzCKwibB&IqGVsQua7c8E*u9Zd0$bWJ{&HB%BoZ82$ogxEq0tD8LlH9$ysTWFdlw95 z0Ku+-KnFLxXCX}DI+vqMZ70|1wYl}E!j1wvn9+-T@=?4PJXZ*wd+b$R?r{;p8zFUW;f(H`3EX_+Y5+!+oOT^^`_=|yiBTFf$ z!IMUgOwRg&Q@!_HK6T=!k zgHoBaW&>=+xrl(y!z6eT=?s9GT%+}T<=@*~ok9_&_BD=4^?pxtk zhg7z|$et*$CscM&u@4#ouhq$lz|R=muM4L}gDX7Vh);QlFI5E6Ku=A9V2?l-I@w6* zz`13i<47_1;S{jFwGjX+>(THiZi36%lFa@K#4T_$r$Bg%%%ZL4G?n+tYfn>&kX1in zt}fHq%Mr<69+hnoh~ZBV6tMsTA45|x7~O&l++#g5a~n5cI2c|9jib*9eSk;U4SZnF z)^LZN$rOTjHP{0l9fDS^wb}qN<~hv@fn<#arvYNY*gev{jS8M{5UAbHHQFL$}*+f(!f3ci5qJCtiJb?1a{c^s@ z?kcdmR;ZN-c+N$(zrgmZ?7&)5WiOOEdw2+>X9bz=`)coq?NyJtM5k=o84aV-Ud)mLvE_ljQ%}CxRKfgTpBWBBx|j~X*=*RF5EqRQrG70by!CxJ+XLB4`x#^NXA!m~m=VO? nkiD>yB>5e&`_F{qzlgE#h{^AW;lDB;lce`v-~SM})YtewrtEqD literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/actions/__pycache__/explain_actions.cpython-312.pyc b/app/modules/agent/engine/orchestrator/actions/__pycache__/explain_actions.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad3fd1e6f7087fa0d0c69c95a5d9fc3a1e7d3521 GIT binary patch literal 4535 zcmb_gO>7&-72f6U@}Ck#{r^~6NpURN7HP|lT??vQTUJuLsccI!(yC3@8_tSan%re( zm$I~x03UqdA&2NB84X~im%5M-Mo|>*sXiKL3y@|3E*3Iipy(k$4@%@-eCnHBQW9mU z3DC}A=FOX#H$VHm?>+J_p^%?~^7%(!$sdL&>K~+2Eu16J+t)x?r4WVaG?k%eXqw2b zv@65RFt*I3*$g+s*)p4UXFM|=nsQMeP>8!vA-BYAFpjMm@1iH_{hD;5v=tB)Mb$-J zRuv7pp-TxOPUW&$73-h~k1t4x9MO%biY_e>UGNgtb2^|7qV$lR-BV)iHc3zvJYyw z2EBb8w5ya(JNr@85}n#RucA(|)$!dn<=K_15~7i7qq2UZVsqK`f3waUV4cy)UF`Bd zo}HDc>e&})%|%ntuik-OaxF1>U}q<`I)b<_nPo0XExVUoDFEI6vQbgs3rp@){r*vS zZz|PrfR;w?Lv9l}U^d|5v3H{a@#@KFV!Qc(*);O$EmeK0tt#!dOC0hqxn=rsplWm3 zv*bzAQ&coKVFj?56cP@GEy{|3IGfH%Nm)3L?sqISpf5)~44(PENdfkkpcx0tk~fH57*S<5YgX$4t95h0;! z&#EIQ)Fe#%uePQ>)nBc+k zWW~j_Dk4n21<*vr<-wxn1r1**Z$69P(j=_K#k8~#zaq^GS*)g{gsutmLvc~&5tipA zg@>c^Fp_|C#mPWYQuH_|;~5p@;MFS{NlD6z6jyO#PJ&B{x{BjKG=!bvlEVu^r60>K zTJ;GvorcYX%Cj2Xwd4`oH%@#`bAxx>qc&UG*ZLn0JQyf^)aF|C8sFrQRE*BR31)j?JdFwZ?~S58BG@ z$BXU9%k8mZd+gC?PcN6+FTQ9We>PNVdOz=b)%mX3cXqS0du{R8qu+SgMt|P3<)`|O z=7VOSr5xxg2D<(p=r)6GX4f&Z z28zJ}GuUWG`1O;8hB&Nf?SU1o z{(NAoj%w)2FBiOB+wdGz4Z#we*aZ7~5LSsR?oce5>y#xL(Lgelcv2;uW!E9De*g(D zGfO~M%eISb?;floJAAT1rRVxC4%Y2~%0Y95{)P0rUg%aWB!6r^bCyMLFD0 z9N{$443vrs=-@J`|=Y*B+GLY+2}{s;mfiB3VgzZ?{_mtBZX zRf59a`N)MB#3Bwsx&Wx5khg;}U;^?XKMJ5=(q9cBfUd*P6#5P{F*Rtan|MR}4gc-i zw=)vXh(MX9+VVlT542>;F#o=+Ai!%{nkTmR0@vsvGX{6pPgZAD0-SNwF$FWoM#KD+j&lh; zhb6?@G^*GcpTWS@tZ?;C0&*?|$R(h~r=YUflqxH`h_%@1@t;jc**%awO{M{4?HAIB zHxjHmP_6qUC#MmFBj?qOBm1ygJ4`HT3RDEFHK-)mjjlvrg}Y4P#c-b)?zd}x{rsad zk1nr|6vJ^dJZy$fo8j|jxOuBN5USf|LN?cS?O?aied-S<%R^(up|NKlmBK$Q)K3(= z6K{i;P>u~=a+p;r32FClXdt-5=+LF_WS4hWLighXA}EFVk}66C1( zJ$NyI$1cSUg92Y5T4onCz-$obHI2lO9kA3pXxOp!Ry5x?J_d`mA*k+A-@4wSsXLEH z$|pvOCq|y4Kg)lTpL34 z;68xiloQ;H52oiF_(3!TKveQPp3ogHYYwl#M@0ZjHB-Y^PN1!DQ_Kp^$^d3TRJ6Mi z2C``-TiUUbz+@kE_C`m`TO|_vgpdH+CicfipA(K{Lo(X-@y*W2hp>}I!Utr6jV)j3 zs|za&>rCP3`KOUm>&T1N%gLtNcq0D1dn>%dJ8R10bUAjh7`ynar4;^gq5f*Yd)3Au zIJ*=0NyM|<*!DcbmzFjC7vNqWLyqnh1Vzk9f?)Xtl3GBz402EqKFx_~X9N$BhdV>6 z5mNn#RO3+njL$ruw>vnwKKJy6}Jwq2dvk?mn0H$`u^gt_bV_B$T#CjB}R;I7fm zS~7bg>La_)=12??1Wa&iorU~8R>|LEB8hY{Cx4Ia&EI3yd_AFNGOB_}fUwSgzok<3 z&xh)7G;BZ9$?vz?PQx@O(X#JO{zTb*k2l^C=kTNWIt&ok)d+2EF*HrTr276zO}wP0 WUsB_*-PdXQ8vVt`|E6rgHt`=#c3c?% literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/actions/__pycache__/gherkin_actions.cpython-312.pyc b/app/modules/agent/engine/orchestrator/actions/__pycache__/gherkin_actions.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c7237a66ef981523774ebd910d827f723fd3243 GIT binary patch literal 5353 zcmbVQZEO=q8s2Yz#EBgz_6)3odl%9rBCG_>j~mt_@SIwt3a;30fl)&A|x_O#KO@6} z>!L2l6%8KiPlj;M+8B%K@p>C4d*v2YPa$D=}mAubU z5}l@C*_jSlREdjFlI<=%N5hzot-WAOyKZhQemCi=(Sp8Lnn8pN5OZHR*$gHBHtp~(Y6#4`#H!nSt7rb$?PCkQ+1)QJpUOAA z_^|21%Hc1M{rTAA6BlwPF67;BW|(hxJX2`xT+>zM3l*CCkoJS}VxwYA1*DKtp#fWq@a#b%(F6B5U z$_hUojZdo{cvkDc_$b3Rjzm4FB7=#<#i-#N6{E6bDo$$e83o|RPgQDoQIUA0s&SRS zjwZw@Ij$NFrT#)hjVBYDk5QYk_f~B1lBsQY97b)&lE2d-4bR zbFR~cw(hJym~(}SEbVn>xT1XnB>eYXj}H#z4i0@aUV((#1w(9tgdHkONSp#F{2U}A zRgj1PB;ZHul??{zqCTg~Q0ODBg$Uv*0Ar4sWA)NG15(M>1Q8>#Y33v1W2)T0B8r^E zO=3B&N|G(j;r1Zu4#_FiMc7IYTbiyVn34;RAk}|Nr)^ThU8>xkB;FroFc~nMsuGaF ztyiRVvZ3lVJXS%~Gpu$)?RS`1aFVf_4scL+!&AzB+JY(n6X{GjN)N~RR141s!$=WT zIewfU1d*EM6QVwm+Jyygz?3u1)L~xMcyUUUqvG`_N8c}XLzglqQY|_NVnqZ4Wrn2e{!$Tm$KYuiwlF@o z72RkkE9Uj2B1MtUZqmqbASD??psh%4f@Lt6$+)Z-983Z!(mKiDI3Dc3c=pV&aB5_@ zZ>S%-uk-C`+>srZLAecDmSzmrE1Zc*)R z+F{0PUxEQ{g5tK_Zd=%!?Fs-l8+#r$Vs3H;k2k}vcI;g|y}bKTN6+^}okJksdn40( zYsYHmQwzyYX79`{wJ%SsXe%h&)0gk+&v%~A3>8{B=H=V+w=G?Zu7%%boBf66{e|Yk zh2~cZ&1Z^rT%&i5wzoE928v#y-uuDOdqdgo<12?(o?e;B?itLET+Ov#``Yz(u@w(j zY^x^!+1$aiUybBF!K^!!wTHHnAD>4i`7tGb4f)d)(EK{4_y!qZ*0sG%X0=nVP&m!h z6aa{Q%B{=9R7i=QV75<}<}0RU2C0z;DQ zH}+wYOOCtL90wu+0d#c&k#Oo2X_&-EQ&mcHnosJ@9orBgvXIm$p!C723Y^C80MuvdUoVdYR!)SPCO}do{=Q1Zzno+8AU{jdw z-}n;$L+y`Xeui4!=8Ub--0?j@J5G|BzSX0zKWP4&{^;n?hv9|pPxjr}xA@Z1^?b*n zM=dX8FNQJ$1$X;mYtFrI>Fmn%!~LfVp4Rz>+YNWM#h%|E`}Ek->E$!|y|3oGUi;ef zdeMe`{$M9|b}dYQ^zzc7T<89k>0ILgW_0BgK79Y`<0Eh8j=Y%-Ue0^QvhFKc`<1O^ zdg9=!)=9d7K9zGxqHgpNM9nuefDvB{DyJ$eVJQI^86D5XOp{m+t5Vg8#g@QW!WYQ- z6jG67)R(o@LYUjEhp@SfF4l^YUIp*f2-+Aby@ubP0#g+yHI`yBB$OSe_$s7qX>kO* zIS*!gxUBBt7h68MvUKQ<@K^1@(Z9TtYdpJYH`gQOsk~=2>%Nq=U)s8v_{cYv@z$?U zMv1DmX%KqQALH8`Xyyk@7=ouTb?uy~ot14&Z`iuBR;edoLo4^cZPXKPi{EAD9TUE`UIb9$Sa(d}HI*#YUOxHP9hIXT z&YogTG{`uV912qjf47OsM~@km2YZJUKkjAM6kLxOHJxOUKCGf@$2b>TX}?{vGqk{dBRBXzf~UgPZLqgLeiON0%<< zcOJ~Q9V+bFz1sck67z-qGy8J;%BlRm*Ye%37dnm<+B?_kZS@UB8k}VuMFRWmAnF@G z7<_N=R&c@p`Tobxoya|RV&zia)n9OVtom%n^PgXRe4saXp!dP$yz4w%lswKN93{>l zzi$T@=4#zL>FuQ%`21j9?l2qi;RaVqCFO|9ntlbo4P1jSMs`8Kfut1cHz!(g@zA8W2U$!Ry rsw%(>9EE9JV2U(JlHU+}{!I*jLxjE|`hVd1NYZ)h+J6b0n-2aD8<4@4dIbs;smSP(FO^gTT8^g7|NIp%-JeLvEad z&JBVmcrr+Y$Z?XyeM8U?qQ)t$PX+0aaoni&>7XfO9ye=!V~`0|j8~9^fq04FO>Ysr zSuozAvU0~QR~c``|KbZTsajc449lz>2#XS2GEo$aut*Z1UwM?nw@0IqNEpd*!#*%C za8Z2kcvzH$d3?`y6v=@}mXpV>Mg%UqL$)?#8*-x`IyVS`7$iFVtROMplgM}Maq@@c4FJeHE}FPX{3~^ZyiE?n@ifk{ z!KmOhs0QC2m1HF>+~fH=*7d3+AgPND3iDn4!kj;X!ZQLVOa8fiU2MR^qrjXXde}(B z10$RuNnLngN)Y8PxY-p7^U$N+AI290ut}x=J1xZG%97bJ$2n?w%q#am)?$Q^z z`gKJgEE47;=x}WBrsM3J6l8ANFNLFs6a0Lb^YsP892=Al`|?ZhS&;DK{T=bAYs6Z^ z;H{b!_TkhgQ#bb}8wS6U7#Mo9W-Ixwc`F&K3Wz~atTzPugQhWNR1@9niJ2{DSa}*c z7R!k6MijK}^{6&Lw17l5i1(fXK~R2o#7nEzoJul4T(#sbkT9v5B{2|*2(oIFxG)mb zs)Fu*#9r|l5Iz$icrL&TBK)ZUkBtl+5@gnEQ)ze%vP#RWGz)e}mRW8(1W#B}ZBckT z{+!wfqZCzQK_*d<;gP6x1hiLeEYJIO(JB>*%E*jo(PIPW$Gm2hmW1G>N^$bMYAHyE z{b1LuVkXVh@v2pqf)&IvJlTTmSf0J~y~G;7`Mp_TmGoawT_Zks)x&$a#r=5tgX#3n z-sH~S#|?vz=#=YJ!jiGN6zlek(|zOM;=#0YSJJsF(VwaJzCH9muI)=R=|*3&(U)>{ zB?dF~t?7Divfi5*%D~X%`=R$jOa6~uUOspK!p9euPpxi!H2UlFpPf&inn<3SP)>4+ zlLt}mhP1mq>2A;1sxw=5X6l+VwOcX{_qvs++qFTM>MRNCx}B)3`N7L?znrlBqpVuOL0W>=yZ zWo3*@J1i_RE{#LWwM+8*-+k}9?=K`9hrW{V|9!Jz8>vksY#!O{d&fWoSp>uoo7E$&+CRy=!_TT+hwYmP%Jj#O1&g37q+Z@j+vI{d>G=flPa zjj0z-JZ>CHxrWlNGfCH(lxqZ_YOlS)EHX<@rLpVYw)Ea($-T!`hf?;_O7$6qIfFW2 zoAO|2fH(Z-gJFu`sSimwtW-bo1DZ2T!ETq}ye-hcs)lkv(NEDF0rcpO(~E|8>S#$LCIZ7b-a8)sE{um`Rq7E6w|skENUk z)|@Y{>`PT2PSC1zEWu>jI)7syT-v{ET&aE7@Sx$7ZEJf5{=uc#2NOo9zHIhpwzp)u z5B;WTdjHwv{Cs-@xoccIj3>UAZ2pZ9P zSfzXnz_{;GR+Rh7&!PLN_FGXNXuyEWUnu_u5O@%K9*1XaK$JWaVR_oqE`~kaG%HRC zQoDy0dAK25@k|KQ>|7v>+C4c`_*6r9R;?BVftUQ?F9AmYcEEM6;y@f?`9Ka#CJExj zsHE0NLWqS)TmdrX0%Xv4KxNeg-m)O_1>m5gaNBDv0Sg+!lg$zKYB0?55+;7l1IwEK zhWtjLi4>4PHSVfE3y@u#Y2fKI_k&Dn0jg&}B~!mOQI)ao{HX0G9k)B~bSACOuk=0Y z`E~DSy}#;99(pZfYe?IgleT81W&E-2LS{$zXNMoiia(Ncypb>$5yjqned+Gg$?nt2 z+4Cv;>q_;w!i+!36xifxZd3nX>@F~u2HprzFkvtNys+C|QVPt3APF!cGIqqZ zGUL#Dg|-AZwj$uD6_Wu7)WFb35RUy+Q%K+g(U3|{2d1WD`v!1)$jX9eG9Y`{DGVCf zBh14Y0}Akfvmt4S2|&67L6$?1e5wgwOd-{pZMD=1u#TOD@Gl?(<3y1Fh=8C75-WnF znswH3mCVS|jHQ4Xl?nn+&=_fGE7q35krL0!tcIp%p-8o6|11Y&C!=8yP>LjXi3)NK zMQxi=RDRZrSl|z#dIo|%ch}!+TawqMqw-!gxH=Hr>429q7fmMRy&_s9U`R30jF52+kR?$@MbXjT6lKsLQHwH;LFXUIe4GKP0vu<& zZJ5a+R^J-81tjKTjV&dyhIZ<_lon{9WY$SN5V4kl%J=zFIh{LQu|5)ft%0#L#7tJVmB5eGDo zQpj@OFO-MLH-Q*`9&0pz%2@} zkn0g792x=X)ouO{n0I&qd`h(n30yb?5fs1@_)&FjnyCABo190A6|a5+c2#DYmHhZW zX_(DEW1RxOlZ${h?UJRwk_hO@886`{Jc*z1vk>car`(*vv#2&l`z?Z@H;xt~lQwSg+fl=_Gf}%4Ffaqzn4P|CyfR#Dn zARDt+scg(Nwl>cWIpcD2Yk0?|c}~8Tckyn%Zpxx-`VCRwmS>o-d=v;zy}S{u zFOq=IOi3w7n9XD}DQ#kTMEoXQ4z9zAdpbZ-}O zpN38kIt+^-4;dDPm}9pG`^^PMx7VY&rLo%Go})$Fj$H*9I)_)9qvEU>z9Po#yFI6i zObU$iSRG32Vzt;d<^aV;HdziSwhML)vM4Zl6&!VlPoXPh{S8d{o~+vqNoil~IZ&b4 zu0bdoMw(}W!olDK%gyHfd0%XIc5+ByB?xFIfh-7g{MFi?8HJxwx@Hx7Fym5{AAnhZOZe5j|?gAE2$kPm9|rgeK_OTc`Kf7Kagxcpd1`}bRyM$PTBdIVn3g8 zwA|`Tw{|C6yOo~)RUy@SQfbjv*c};rTgKj-vA>wHA6a)^S=i_kDLe(;bJB9fwxVrELA{MhYi-sS1ncRt-b8t_{C= z{Iv_eCvbAbSVa*0L7Qa9HeFXY8zyFOW8_bR%FBb6vt_E>+ z8dsNbHH|A7s@sINU)Ht2Q8dC84x6P8sNNzr3|owrjULK)mfWyZ7%!6>ZjxnXCF zesZJM4D}x!Zevz$ZSkPy4Xr z%YNAL6@S?A>0ffN6A%^=HreWdZ?{y?KcD(BwDDI6?QwQ~yVanY5R|eBRPD)Jg{=!u z>uaVjtNdZ~Js7}dUTT4Aogzu{3!>$>MDuTn;V+1>FNlFZnl6&0<@%}rBXFz9_ str: + item = ctx.artifacts.put(key=key, artifact_type=artifact_type, content=value, meta=meta) + return item.artifact_id + + def get(self, ctx: ExecutionContext, key: str, default=None): + return ctx.artifacts.get_content(key, default) + + def add_evidence(self, ctx: ExecutionContext, *, source_type: str, source_ref: str, snippet: str, score: float = 0.8) -> str: + evidence = EvidenceItem( + evidence_id=f"evidence_{uuid4().hex}", + source_type=source_type, + source_ref=source_ref, + snippet=(snippet or "").strip()[:600], + score=max(0.0, min(1.0, float(score))), + ) + ctx.evidences.put_many([evidence]) + return evidence.evidence_id diff --git a/app/modules/agent/engine/orchestrator/actions/docs_actions.py b/app/modules/agent/engine/orchestrator/actions/docs_actions.py new file mode 100644 index 0000000..b289b74 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/actions/docs_actions.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from app.modules.agent.engine.orchestrator.actions.common import ActionSupport +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import ArtifactType + + +class DocsActions(ActionSupport): + def extract_change_intents(self, ctx: ExecutionContext) -> list[str]: + text = str(self.get(ctx, "source_doc_text", "") or ctx.task.user_message) + intents = { + "summary": text[:240], + "api": ["Update endpoint behavior contract"], + "logic": ["Adjust reusable business rules"], + "db": ["Reflect schema/table notes if needed"], + "ui": ["Adjust form behavior and validation"], + } + return [self.put(ctx, "change_intents", ArtifactType.STRUCTURED_JSON, intents)] + + def map_to_doc_tree(self, ctx: ExecutionContext) -> list[str]: + targets = [ + "docs/api/increment.md", + "docs/logic/increment.md", + "docs/db/increment.md", + "docs/ui/increment.md", + ] + return [self.put(ctx, "doc_targets", ArtifactType.STRUCTURED_JSON, {"targets": targets})] + + def load_current_docs_context(self, ctx: ExecutionContext) -> list[str]: + files_map = dict(ctx.task.metadata.get("files_map", {}) or {}) + targets = (self.get(ctx, "doc_targets", {}) or {}).get("targets", []) + current = [] + for path in targets: + current.append( + { + "path": path, + "content": str((files_map.get(path) or {}).get("content", "")), + "content_hash": str((files_map.get(path) or {}).get("content_hash", "")), + } + ) + return [self.put(ctx, "current_docs_context", ArtifactType.STRUCTURED_JSON, {"files": current})] + + def generate_doc_updates(self, ctx: ExecutionContext) -> list[str]: + intents = self.get(ctx, "change_intents", {}) or {} + targets = (self.get(ctx, "doc_targets", {}) or {}).get("targets", []) + bundle = [] + for path in targets: + bundle.append( + { + "path": path, + "content": "\n".join( + [ + f"# Increment Update: {path}", + "", + "## Scope", + str(intents.get("summary", "")), + "", + "## Changes", + "- Updated according to analytics increment.", + ] + ), + "reason": "align docs with analytics increment", + } + ) + return [self.put(ctx, "generated_doc_bundle", ArtifactType.DOC_BUNDLE, bundle)] + + def cross_file_validation(self, ctx: ExecutionContext) -> list[str]: + bundle = self.get(ctx, "generated_doc_bundle", []) or [] + paths = [str(item.get("path", "")) for item in bundle if isinstance(item, dict)] + has_required = any(path.startswith("docs/api/") for path in paths) and any(path.startswith("docs/logic/") for path in paths) + report = {"paths": paths, "required_core_paths_present": has_required} + return [self.put(ctx, "consistency_report", ArtifactType.STRUCTURED_JSON, report)] + + def build_changeset(self, ctx: ExecutionContext) -> list[str]: + bundle = self.get(ctx, "generated_doc_bundle", []) or [] + changeset = [] + for item in bundle: + if not isinstance(item, dict): + continue + changeset.append( + { + "op": "update", + "path": str(item.get("path", "")).strip(), + "base_hash": "orchestrator-generated", + "proposed_content": str(item.get("content", "")), + "reason": str(item.get("reason", "documentation update")), + "hunks": [], + } + ) + return [self.put(ctx, "final_changeset", ArtifactType.CHANGESET, changeset)] + + def compose_summary(self, ctx: ExecutionContext) -> list[str]: + count = len(self.get(ctx, "final_changeset", []) or []) + text = f"Prepared documentation changeset with {count} files updated." + return [self.put(ctx, "final_answer", ArtifactType.TEXT, text)] diff --git a/app/modules/agent/engine/orchestrator/actions/edit_actions.py b/app/modules/agent/engine/orchestrator/actions/edit_actions.py new file mode 100644 index 0000000..14806f0 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/actions/edit_actions.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import re + +from app.modules.agent.engine.orchestrator.actions.common import ActionSupport +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import ArtifactType + + +class EditActions(ActionSupport): + def resolve_target(self, ctx: ExecutionContext) -> list[str]: + message = ctx.task.user_message + files_map = dict(ctx.task.metadata.get("files_map", {}) or {}) + requested = self._extract_path(message) + matched = self._lookup_source(files_map, requested) + if matched: + requested = str(matched.get("path") or requested or "") + if not requested and files_map: + requested = next(iter(files_map.keys())) + payload = {"path": requested or "", "allowed": bool(requested)} + return [self.put(ctx, "resolved_target", ArtifactType.STRUCTURED_JSON, payload)] + + def load_target_context(self, ctx: ExecutionContext) -> list[str]: + files_map = dict(ctx.task.metadata.get("files_map", {}) or {}) + resolved = self.get(ctx, "resolved_target", {}) or {} + path = str(resolved.get("path", "")) + source = dict(self._lookup_source(files_map, path) or {}) + current = { + "path": str(source.get("path", "")) or path, + "content": str(source.get("content", "")), + "content_hash": str(source.get("content_hash", "")), + } + return [self.put(ctx, "target_context", ArtifactType.STRUCTURED_JSON, current)] + + def plan_minimal_patch(self, ctx: ExecutionContext) -> list[str]: + target = self.get(ctx, "target_context", {}) or {} + plan = { + "path": target.get("path", ""), + "intent": "minimal_update", + "instruction": ctx.task.user_message[:240], + } + return [self.put(ctx, "patch_plan", ArtifactType.STRUCTURED_JSON, plan)] + + def generate_patch(self, ctx: ExecutionContext) -> list[str]: + target = self.get(ctx, "target_context", {}) or {} + plan = self.get(ctx, "patch_plan", {}) or {} + path = str(target.get("path", "")) + base = str(target.get("content_hash", "") or "orchestrator-generated") + original = str(target.get("content", "")) + note = f"\n\n\n" + proposed = (original + note).strip() if original else note.strip() + changeset = [ + { + "op": "update" if original else "create", + "path": path, + "base_hash": base if original else None, + "proposed_content": proposed, + "reason": "targeted file update", + "hunks": [], + } + ] + return [self.put(ctx, "raw_changeset", ArtifactType.CHANGESET, changeset)] + + def validate_patch_safety(self, ctx: ExecutionContext) -> list[str]: + changeset = self.get(ctx, "raw_changeset", []) or [] + safe = len(changeset) == 1 + report = {"safe": safe, "items": len(changeset), "reason": "single-file patch expected"} + return [self.put(ctx, "patch_validation_report", ArtifactType.STRUCTURED_JSON, report)] + + def finalize_changeset(self, ctx: ExecutionContext) -> list[str]: + report = self.get(ctx, "patch_validation_report", {}) or {} + if not report.get("safe"): + return [self.put(ctx, "final_changeset", ArtifactType.CHANGESET, [])] + changeset = self.get(ctx, "raw_changeset", []) or [] + return [self.put(ctx, "final_changeset", ArtifactType.CHANGESET, changeset)] + + def compose_edit_summary(self, ctx: ExecutionContext) -> list[str]: + count = len(self.get(ctx, "final_changeset", []) or []) + text = f"Prepared targeted edit changeset with {count} item(s)." + return [self.put(ctx, "final_answer", ArtifactType.TEXT, text)] + + def _extract_path(self, text: str) -> str | None: + match = re.search(r"\b[\w./-]+\.(md|txt|rst|yaml|yml|json|toml|ini|cfg)\b", text or "", flags=re.IGNORECASE) + if not match: + return None + return match.group(0).replace("\\", "/").strip() + + def _lookup_source(self, files_map: dict[str, dict], path: str | None) -> dict | None: + if not path: + return None + normalized = str(path).replace("\\", "/").strip() + if not normalized: + return None + source = files_map.get(normalized) + if source: + return source + normalized_low = normalized.lower() + for key, value in files_map.items(): + if str(key).replace("\\", "/").lower() == normalized_low: + return value + return None diff --git a/app/modules/agent/engine/orchestrator/actions/explain_actions.py b/app/modules/agent/engine/orchestrator/actions/explain_actions.py new file mode 100644 index 0000000..eb97284 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/actions/explain_actions.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +from app.modules.agent.engine.orchestrator.actions.common import ActionSupport +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import ArtifactType + + +class ExplainActions(ActionSupport): + def collect_sources(self, ctx: ExecutionContext) -> list[str]: + rag_context = str(ctx.task.metadata.get("rag_context", "")) + confluence_context = str(ctx.task.metadata.get("confluence_context", "")) + files_map = dict(ctx.task.metadata.get("files_map", {}) or {}) + payload = { + "rag_context": rag_context, + "confluence_context": confluence_context, + "files_count": len(files_map), + } + evidence_ids: list[str] = [] + if rag_context.strip(): + evidence_ids.append( + self.add_evidence( + ctx, + source_type="rag_chunk", + source_ref=ctx.task.rag_session_id, + snippet=rag_context, + score=0.9, + ) + ) + artifact_id = self.put( + ctx, + "sources", + ArtifactType.STRUCTURED_JSON, + payload, + meta={"evidence_ids": evidence_ids}, + ) + return [artifact_id] + + def extract_logic(self, ctx: ExecutionContext) -> list[str]: + sources = self.get(ctx, "sources", {}) or {} + message = ctx.task.user_message + logic = { + "request": message, + "assumptions": ["requirements-first"], + "notes": "Use requirements/docs as primary source over code.", + "source_summary": sources, + } + return [self.put(ctx, "logic_model", ArtifactType.STRUCTURED_JSON, logic)] + + def build_sequence(self, ctx: ExecutionContext) -> list[str]: + message = ctx.task.user_message + mermaid = "\n".join( + [ + "```mermaid", + "sequenceDiagram", + "participant User", + "participant Agent", + "participant Docs", + "User->>Agent: " + message[:80], + "Agent->>Docs: Find relevant requirements", + "Docs-->>Agent: Relevant context", + "Agent-->>User: Structured explanation", + "```", + ] + ) + return [self.put(ctx, "sequence_diagram", ArtifactType.TEXT, mermaid)] + + def build_use_cases(self, ctx: ExecutionContext) -> list[str]: + lines = [ + "### Use Cases", + "- Analyze requirement fragments relevant to user question", + "- Reconstruct behavior flow and decision points", + "- Return user-focused explanation with constraints", + ] + return [self.put(ctx, "use_cases", ArtifactType.TEXT, "\n".join(lines))] + + def summarize(self, ctx: ExecutionContext) -> list[str]: + sequence = str(self.get(ctx, "sequence_diagram", "") or "") + use_cases = str(self.get(ctx, "use_cases", "") or "") + answer = "\n\n".join( + [ + "## Summary", + "The requested project part is explained from requirements/docs context.", + sequence, + use_cases, + ] + ) + return [self.put(ctx, "final_answer", ArtifactType.TEXT, answer)] diff --git a/app/modules/agent/engine/orchestrator/actions/gherkin_actions.py b/app/modules/agent/engine/orchestrator/actions/gherkin_actions.py new file mode 100644 index 0000000..25ed754 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/actions/gherkin_actions.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +from app.modules.agent.engine.orchestrator.actions.common import ActionSupport +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import ArtifactType + + +class GherkinActions(ActionSupport): + def extract_increment_scope(self, ctx: ExecutionContext) -> list[str]: + text = str(self.get(ctx, "source_doc_text", "") or ctx.task.user_message) + scope = { + "title": "Increment scope", + "summary": text[:220], + "entities": ["User", "System"], + } + return [self.put(ctx, "increment_scope", ArtifactType.STRUCTURED_JSON, scope)] + + def partition_features(self, ctx: ExecutionContext) -> list[str]: + scope = self.get(ctx, "increment_scope", {}) or {} + groups = [ + {"feature": "Main flow", "goal": scope.get("summary", "")}, + {"feature": "Validation", "goal": "Input validation and error behavior"}, + ] + return [self.put(ctx, "feature_groups", ArtifactType.STRUCTURED_JSON, groups)] + + def generate_gherkin_bundle(self, ctx: ExecutionContext) -> list[str]: + groups = self.get(ctx, "feature_groups", []) or [] + files = [] + for idx, group in enumerate(groups, start=1): + feature_name = str(group.get("feature", f"Feature {idx}")) + content = "\n".join( + [ + f"Feature: {feature_name}", + " Scenario: Happy path", + " Given system is available", + " When user performs increment action", + " Then system applies expected increment behavior", + ] + ) + files.append({"path": f"tests/gherkin/feature_{idx}.feature", "content": content}) + return [self.put(ctx, "gherkin_bundle", ArtifactType.GHERKIN_BUNDLE, files)] + + def lint_gherkin(self, ctx: ExecutionContext) -> list[str]: + bundle = self.get(ctx, "gherkin_bundle", []) or [] + invalid = [] + for item in bundle: + content = str(item.get("content", "")) if isinstance(item, dict) else "" + if "Feature:" not in content or "Scenario:" not in content: + invalid.append(str(item.get("path", "unknown"))) + report = {"valid": len(invalid) == 0, "invalid_files": invalid} + return [self.put(ctx, "gherkin_lint_report", ArtifactType.STRUCTURED_JSON, report)] + + def validate_coverage(self, ctx: ExecutionContext) -> list[str]: + bundle = self.get(ctx, "gherkin_bundle", []) or [] + report = {"covered": len(bundle) > 0, "feature_files": len(bundle)} + return [self.put(ctx, "coverage_report", ArtifactType.STRUCTURED_JSON, report)] + + def compose_test_model_summary(self, ctx: ExecutionContext) -> list[str]: + bundle = self.get(ctx, "gherkin_bundle", []) or [] + summary = f"Prepared gherkin model with {len(bundle)} feature file(s)." + changeset = [ + { + "op": "create", + "path": str(item.get("path", "")), + "base_hash": None, + "proposed_content": str(item.get("content", "")), + "reason": "generated gherkin feature", + "hunks": [], + } + for item in bundle + if isinstance(item, dict) + ] + return [ + self.put(ctx, "final_answer", ArtifactType.TEXT, summary), + self.put(ctx, "final_changeset", ArtifactType.CHANGESET, changeset), + ] diff --git a/app/modules/agent/engine/orchestrator/actions/review_actions.py b/app/modules/agent/engine/orchestrator/actions/review_actions.py new file mode 100644 index 0000000..da7e9e3 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/actions/review_actions.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +from urllib.parse import urlparse + +from app.modules.agent.engine.orchestrator.actions.common import ActionSupport +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import ArtifactType + + +class ReviewActions(ActionSupport): + def fetch_source_doc(self, ctx: ExecutionContext) -> list[str]: + attachment = next((a for a in ctx.task.attachments if a.value), None) + if attachment is None: + text = ctx.task.user_message + source_ref = "inline:message" + else: + parsed = urlparse(attachment.value) + source_ref = attachment.value + text = f"Source: {parsed.netloc}\nPath: {parsed.path}\nRequest: {ctx.task.user_message}" + evidence_id = self.add_evidence( + ctx, + source_type="external_doc", + source_ref=source_ref, + snippet=text, + score=0.75, + ) + return [ + self.put( + ctx, + "source_doc_raw", + ArtifactType.TEXT, + text, + meta={"source_ref": source_ref, "evidence_ids": [evidence_id]}, + ) + ] + + def normalize_document(self, ctx: ExecutionContext) -> list[str]: + raw = str(self.get(ctx, "source_doc_raw", "") or "") + normalized = "\n".join(line.rstrip() for line in raw.splitlines()).strip() + return [self.put(ctx, "source_doc_text", ArtifactType.TEXT, normalized)] + + def structural_check(self, ctx: ExecutionContext) -> list[str]: + text = str(self.get(ctx, "source_doc_text", "") or "") + required = ["цель", "границ", "риски", "api", "данные"] + found = [token for token in required if token in text.lower()] + findings = { + "required_sections": required, + "found_markers": found, + "missing_markers": [token for token in required if token not in found], + } + return [self.put(ctx, "structural_findings", ArtifactType.STRUCTURED_JSON, findings)] + + def semantic_consistency_check(self, ctx: ExecutionContext) -> list[str]: + text = str(self.get(ctx, "source_doc_text", "") or "") + contradictions = [] + if "без изменений" in text.lower() and "новый" in text.lower(): + contradictions.append("Contains both 'no changes' and 'new behavior' markers.") + payload = {"contradictions": contradictions, "status": "ok" if not contradictions else "needs_attention"} + return [self.put(ctx, "semantic_findings", ArtifactType.STRUCTURED_JSON, payload)] + + def architecture_fit_check(self, ctx: ExecutionContext) -> list[str]: + text = str(self.get(ctx, "source_doc_text", "") or "") + files_count = len(dict(ctx.task.metadata.get("files_map", {}) or {})) + payload = { + "architecture_fit": "medium" if files_count == 0 else "high", + "notes": "Evaluate fit against existing docs and interfaces.", + "markers": ["integration"] if "integr" in text.lower() else [], + } + return [self.put(ctx, "architecture_findings", ArtifactType.STRUCTURED_JSON, payload)] + + def optimization_check(self, ctx: ExecutionContext) -> list[str]: + text = str(self.get(ctx, "source_doc_text", "") or "") + has_perf = any(token in text.lower() for token in ("latency", "performance", "оптим")) + payload = { + "optimization_considered": has_perf, + "recommendation": "Add explicit non-functional targets." if not has_perf else "Optimization criteria present.", + } + return [self.put(ctx, "optimization_findings", ArtifactType.STRUCTURED_JSON, payload)] + + def compose_review_report(self, ctx: ExecutionContext) -> list[str]: + structural = self.get(ctx, "structural_findings", {}) or {} + semantic = self.get(ctx, "semantic_findings", {}) or {} + architecture = self.get(ctx, "architecture_findings", {}) or {} + optimization = self.get(ctx, "optimization_findings", {}) or {} + report = "\n".join( + [ + "## Findings", + f"- Missing structure markers: {', '.join(structural.get('missing_markers', [])) or 'none'}", + f"- Contradictions: {len(semantic.get('contradictions', []))}", + f"- Architecture fit: {architecture.get('architecture_fit', 'unknown')}", + f"- Optimization: {optimization.get('recommendation', 'n/a')}", + "", + "## Recommendations", + "- Clarify boundaries and data contracts.", + "- Add explicit error and rollback behavior.", + "- Add measurable non-functional requirements.", + ] + ) + return [ + self.put(ctx, "review_report", ArtifactType.REVIEW_REPORT, report), + self.put(ctx, "final_answer", ArtifactType.TEXT, report), + ] diff --git a/app/modules/agent/engine/orchestrator/artifact_store.py b/app/modules/agent/engine/orchestrator/artifact_store.py new file mode 100644 index 0000000..83c668e --- /dev/null +++ b/app/modules/agent/engine/orchestrator/artifact_store.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +from uuid import uuid4 + +from app.modules.agent.engine.orchestrator.models import ArtifactItem, ArtifactType + + +class ArtifactStore: + def __init__(self) -> None: + self._by_id: dict[str, ArtifactItem] = {} + self._by_key: dict[str, ArtifactItem] = {} + + def put(self, *, key: str, artifact_type: ArtifactType, content=None, meta: dict | None = None) -> ArtifactItem: + item_meta = dict(meta or {}) + if content is not None and not isinstance(content, str): + item_meta.setdefault("value", content) + item = ArtifactItem( + artifact_id=f"artifact_{uuid4().hex}", + key=key, + type=artifact_type, + content=self._as_content(content), + meta=item_meta, + ) + self._by_id[item.artifact_id] = item + self._by_key[key] = item + return item + + def get(self, key: str) -> ArtifactItem | None: + return self._by_key.get(key) + + def get_content(self, key: str, default=None): + item = self.get(key) + if item is None: + return default + if item.content is not None: + return item.content + return item.meta.get("value", default) + + def has(self, key: str) -> bool: + return key in self._by_key + + def all_items(self) -> list[ArtifactItem]: + return list(self._by_id.values()) + + def _as_content(self, value): + if value is None: + return None + if isinstance(value, str): + return value + return None diff --git a/app/modules/agent/engine/orchestrator/evidence_store.py b/app/modules/agent/engine/orchestrator/evidence_store.py new file mode 100644 index 0000000..7197978 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/evidence_store.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +from app.modules.agent.engine.orchestrator.models import EvidenceItem + + +class EvidenceStore: + def __init__(self) -> None: + self._items: list[EvidenceItem] = [] + + def put_many(self, items: list[EvidenceItem]) -> None: + self._items.extend(items) + + def all_items(self) -> list[EvidenceItem]: + return list(self._items) diff --git a/app/modules/agent/engine/orchestrator/execution_context.py b/app/modules/agent/engine/orchestrator/execution_context.py new file mode 100644 index 0000000..f054d28 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/execution_context.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from collections.abc import Awaitable, Callable +from dataclasses import dataclass +from typing import Any + +from app.modules.agent.engine.orchestrator.artifact_store import ArtifactStore +from app.modules.agent.engine.orchestrator.evidence_store import EvidenceStore +from app.modules.agent.engine.orchestrator.models import ExecutionPlan, TaskSpec + +ProgressCallback = Callable[[str, str, str, dict | None], Awaitable[None] | None] +GraphResolver = Callable[[str, str], Any] +GraphInvoker = Callable[[Any, dict, str], dict] + + +@dataclass +class ExecutionContext: + task: TaskSpec + plan: ExecutionPlan + graph_resolver: GraphResolver + graph_invoker: GraphInvoker + progress_cb: ProgressCallback | None = None + artifacts: ArtifactStore | None = None + evidences: EvidenceStore | None = None + + def __post_init__(self) -> None: + if self.artifacts is None: + self.artifacts = ArtifactStore() + if self.evidences is None: + self.evidences = EvidenceStore() diff --git a/app/modules/agent/engine/orchestrator/execution_engine.py b/app/modules/agent/engine/orchestrator/execution_engine.py new file mode 100644 index 0000000..5d87aab --- /dev/null +++ b/app/modules/agent/engine/orchestrator/execution_engine.py @@ -0,0 +1,115 @@ +from __future__ import annotations + +import asyncio +import inspect +import time + +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import PlanStatus, PlanStep, StepResult, StepStatus +from app.modules.agent.engine.orchestrator.quality_gates import QualityGateRunner +from app.modules.agent.engine.orchestrator.step_registry import StepRegistry + + +class ExecutionEngine: + def __init__(self, step_registry: StepRegistry, gates: QualityGateRunner) -> None: + self._steps = step_registry + self._gates = gates + + async def run(self, ctx: ExecutionContext) -> list[StepResult]: + ctx.plan.status = PlanStatus.RUNNING + step_results: list[StepResult] = [] + + for step in ctx.plan.steps: + dep_issue = self._dependency_issue(step, step_results) + if dep_issue: + step_results.append( + StepResult( + step_id=step.step_id, + status=StepStatus.SKIPPED, + warnings=[dep_issue], + ) + ) + continue + + result = await self._run_with_retry(step, ctx) + step_results.append(result) + if result.status in {StepStatus.FAILED, StepStatus.RETRY_EXHAUSTED} and step.on_failure == "fail": + ctx.plan.status = PlanStatus.FAILED + return step_results + + passed, global_messages = self._gates.check_global(ctx.plan.global_gates, ctx) + if not passed: + step_results.append( + StepResult( + step_id="global_gates", + status=StepStatus.FAILED, + warnings=global_messages, + ) + ) + ctx.plan.status = PlanStatus.FAILED + return step_results + + if any(item.status in {StepStatus.FAILED, StepStatus.RETRY_EXHAUSTED} for item in step_results): + ctx.plan.status = PlanStatus.FAILED + elif any(item.status == StepStatus.SKIPPED for item in step_results): + ctx.plan.status = PlanStatus.PARTIAL + else: + ctx.plan.status = PlanStatus.COMPLETED + return step_results + + async def _run_with_retry(self, step: PlanStep, ctx: ExecutionContext) -> StepResult: + max_attempts = max(1, int(step.retry.max_attempts or 1)) + attempt = 0 + last_error: Exception | None = None + + while attempt < max_attempts: + attempt += 1 + started_at = time.monotonic() + await self._emit_progress(ctx, f"orchestrator.step.{step.step_id}", step.title) + + try: + artifact_ids = await self._steps.execute(step, ctx) + passed, gate_messages = self._gates.check_step(step, ctx) + if not passed: + raise RuntimeError(";".join(gate_messages) or "step_quality_gate_failed") + + elapsed = int((time.monotonic() - started_at) * 1000) + return StepResult( + step_id=step.step_id, + status=StepStatus.SUCCESS, + produced_artifact_ids=artifact_ids, + warnings=gate_messages, + duration_ms=elapsed, + ) + except Exception as exc: + last_error = exc + if attempt < max_attempts and step.retry.backoff_sec > 0: + await asyncio.sleep(step.retry.backoff_sec) + + elapsed = int((time.monotonic() - started_at) * 1000) + return StepResult( + step_id=step.step_id, + status=StepStatus.RETRY_EXHAUSTED if max_attempts > 1 else StepStatus.FAILED, + error_code="step_execution_failed", + error_message=str(last_error) if last_error else "step_execution_failed", + duration_ms=elapsed, + ) + + def _dependency_issue(self, step: PlanStep, results: list[StepResult]) -> str | None: + if not step.depends_on: + return None + by_step = {item.step_id: item for item in results} + for dep in step.depends_on: + dep_result = by_step.get(dep) + if dep_result is None: + return f"dependency_not_executed:{dep}" + if dep_result.status != StepStatus.SUCCESS: + return f"dependency_not_success:{dep}:{dep_result.status.value}" + return None + + async def _emit_progress(self, ctx: ExecutionContext, stage: str, message: str) -> None: + if ctx.progress_cb is None: + return + result = ctx.progress_cb(stage, message, "task_progress", {"layer": "orchestrator"}) + if inspect.isawaitable(result): + await result diff --git a/app/modules/agent/engine/orchestrator/metrics_persister.py b/app/modules/agent/engine/orchestrator/metrics_persister.py new file mode 100644 index 0000000..618c73f --- /dev/null +++ b/app/modules/agent/engine/orchestrator/metrics_persister.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +import logging + +from app.modules.agent.repository import AgentRepository + +LOGGER = logging.getLogger(__name__) + + +class MetricsPersister: + def __init__(self, repository: AgentRepository) -> None: + self._repository = repository + + def save( + self, + *, + task_id: str, + dialog_session_id: str, + rag_session_id: str, + scenario: str, + domain_id: str, + process_id: str, + quality: dict, + ) -> None: + try: + self._repository.save_quality_metrics( + task_id=task_id, + dialog_session_id=dialog_session_id, + rag_session_id=rag_session_id, + scenario=scenario, + domain_id=domain_id, + process_id=process_id, + quality=quality, + ) + except Exception: + LOGGER.exception("Failed to persist quality metrics: task_id=%s", task_id) diff --git a/app/modules/agent/engine/orchestrator/models/__init__.py b/app/modules/agent/engine/orchestrator/models/__init__.py new file mode 100644 index 0000000..e1402a3 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/models/__init__.py @@ -0,0 +1,51 @@ +from app.modules.agent.engine.orchestrator.models.plan import ( + ArtifactSpec, + ArtifactType, + ExecutionPlan, + PlanStatus, + PlanStep, + QualityGateRef, + RetryPolicy, +) +from app.modules.agent.engine.orchestrator.models.result import ( + ArtifactItem, + EvidenceItem, + OrchestratorResult, + StepResult, + StepStatus, +) +from app.modules.agent.engine.orchestrator.models.task_spec import ( + AttachmentRef, + FileRef, + OutputContract, + OutputSection, + RoutingMeta, + Scenario, + SourcePolicy, + TaskConstraints, + TaskSpec, +) + +__all__ = [ + "ArtifactItem", + "ArtifactSpec", + "ArtifactType", + "AttachmentRef", + "EvidenceItem", + "ExecutionPlan", + "FileRef", + "OrchestratorResult", + "OutputContract", + "OutputSection", + "PlanStatus", + "PlanStep", + "QualityGateRef", + "RetryPolicy", + "RoutingMeta", + "Scenario", + "SourcePolicy", + "StepResult", + "StepStatus", + "TaskConstraints", + "TaskSpec", +] diff --git a/app/modules/agent/engine/orchestrator/models/__pycache__/__init__.cpython-312.pyc b/app/modules/agent/engine/orchestrator/models/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55042f6a3b90f7d767c5d8ee8a43e36239ecaa93 GIT binary patch literal 1049 zcmb7>zi-n(6vu7nhx5xxn>J|~5M8sx;)j?JQUwJuKuc4WTdb4wnjYNQ*4;U3M*a$B z*!Wx67;j~P4XCO}ow#S0HbN}5S1=#WNmnh$d-@7HNq#X^Rf&h=2q|uEM*b zM|xt7tci89F8ZV|Hpqt9B%5MD24YBtVvB6K&dixsKe=RkQ5)5Npp21kI=fP{V;bv8 z2Jv-tw8+2&d#4cRnx)ABr-^AHCz|S9nMS#W%yeJplrz0}PBk3Dv1uQImWzXwvv@J8 zT~~aeL72|ooJ}E#p@{nXGCl#NCDo}s1eJ4bS`!V~>biVz)HK1a)-*m5kZ63zf5v$6 zV0S;)S+1X?i7gas6O{1;VpMMRq_%}*_7XHTjY$j%l`J)#Nt(+TN}Z;6MAbW60kyD1 z+x~C^Eg20Y&QjVjIJXZ5KY7eViPey1c)wTh}g1tHM(W) z(kvT`beeNeV>+|Fjv<+`1jgx#S9k)r8fSJOE6NHz|9RWozfo0LOrmVN=(FhVO0ao& zqiWh387b>zXE%x8X}Ibsp0J*VQN$9aqiCEhE*g)@Ymem} wtHHIudSLOh;=1nAYq;LhvG|2p_Uo>9?pQ3lElgOPBYtlW+unO$-nJM20EBEUx&QzG literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/models/__pycache__/plan.cpython-312.pyc b/app/modules/agent/engine/orchestrator/models/__pycache__/plan.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82717376d10d0f236ced02cedf339c4f61d74e98 GIT binary patch literal 4107 zcmb_fO>7(25#AM-uL`5v>U>MQyL z?=zLEzv!<9iUA=jX0WOj)oQ31s)mbUf%(lyHCl|yQjat%k-(P{3DOw#JyE<_igAIf zfQJs@y#fyd9--|K+ zQArymm-}Z7Y7*$DE|sXIGs7-FL@5S{ zzZfKeqS^?iR34pXu2I%YZlO`5Tyg0`m&X`gGwA0UqcxkkJXE@;TNUb1mxqa6(pKvh zF)5E%?osx@u=eC$$7S`BTW6GL_Z{2Hr9xcQG)u2iP2*urtJ zPYXPj&)msoex&6yw-@sT9=dUJIyakH$`p7wy?8^rzMMBFfsUDImJzE4Z8qpiI?r5j1Yj5TT{^;#_n zni6%KDO6ObwxFgnRWo&KvesZxm{pJnFisN4 zmr^Iv-%NLg&Nml2eJ7jQPXDRqTxa-VGuIiJ_+pWr+UxQ^pJDhmtYQhgs_t+Z;oGGqkmBl6RpC%fK{eon#9uR%M( z`2`2r1#*U;s3wx!m|{1`9E0754H&iO9jDFDo7z=scJK&Db!oNcI;;or$f{m?V3*69Lrb}odYmJ$_n{#}mt`Lyxq;+}L4JrBlDFXh z>cNjCn)93ITS}rcI@Y|kIoMXlJZoZ?xPQ4uEuEi})vyoDH`z3+W?I z%LnH04m?-^Tr-Gw(p+6N?GjuAm0arBSny0Bc0UX2I=cG|Vo2VBzjG4EA$N%NZOk8a z1Z;i$#-Tv??i6Qt_X=zfj~LFM2l@ndyis2}{K z?^s;~st7+BKphz@M#&IJ!}~2`T{FbV29uPnI3f_(DXZOcO5r=dh7&ZJtRRq^9 z3>*e)p%N;dmqTJFhXZGY$FR(SNYXH5oYcU7PZ1(Ma=4%nE=a-Q*e59Gkt`uuMk2Ds z9ppYmQbh6-B;!a9(rgw*B$zgx-vD_vt|IY`xlZrEMy?aT)KV^WPM(3A<$g;!vps(H z*@gD_mFC=LsHI%lKKsTqw|(|nb7m`gx}{v(J~#2T(mwZ5b9O6srlow;dH>_)t*!od zTgu1ViPOLTX*+SXnR#@zrCjZdp1_>dQciT<`LH>+)pxn2e7JpS>Q74hQl`1EH8{~$ zGTy3jt;a|8A3*b+)7e2(?1q8#zHjwG`=7G zkwXz4qR}p0>>$D$gQ+1N%ZO-!$ihH;B?!@#FbIRL?mPg7;fso^&Te@Ro(UI@=_Op= zw=b+gaSKC=@`?u>QTnaNg%9F1OAy*C+>a6NutgYgJx<{LfC2bs5}q0>SRXP&Up1-2 z?&6HmifOOvrst@+RQxrbnuI=V0trRp`7BPsDk!fYxrgM4S89kMS%ANT&0h6Nk@DlB z0_Y!VDMOu+1m1p$^M6UCzDcB-Gmqw4N~#krr_#;& zt${b%O4{=*oG+Ie!6!($j_(twzfjQK%e_;%%LtwD6lHA*aq z7?yS3KHq}}!sPA)VNyI0CcOv3WcLFBTfhThQk<)UbrYVfxIw+gLJ~zR-b69v#Wmh+ zB4~w;ju`Kydd-{lV`f7;EKQtW0Qr|smgT=n=f0JO|0bRJr*!!n>GHSIt(`zX{@&&f zey!|C!0ZeJY~=8QvwAOLF-oiIQwdl|;({E})}s@i9FH(!uB{KcPT@!UcJt1_&A;2#VeUNWJ*f_nloyl;R>;fX}`k zGqb~apLu5fp2=tuJb&Hzi@m2x(zoawKj7mdAN~#SbBRbqcBGnY$THdyCsK5q!bsGTMzW?Enux`mR4r|!g{?Z7TGq(QQbf8bk@z!-BxsIC_Z0C# z$rupvB*ZnE*o%hIK@m+sG~E{+644AqvwhLLh~^+V&=(zU4eCSx!Qph7r!3d4`<8Fl zT@PHPka*g8N10c&9QwGpOdpyJR{uS%_@4Rj?V@ED2(uqjw_r6I1<2rRix0z8v?*$eHPz062^ zC9p;A2&R++98wV!!1akuJ#5njz)>RAK44*fh$j8<==yLBnFl$ER4lZtOQ!n zQzc2F?Wgu4;KF0Xfr&Aa!E7t5=XkpG&?eNai0VVZ>g;zm`BLUF`WXHvkHWv@DH^sh zZSrI>r_8k+lhiBdSAd636F)S&$x~jv$tu+J zfqz^J9Y(i+C@vDkuZqb6gvR$D0?i{;$|DZtu`Q==0iC@{ozZnY!G@qA%OeaUBmj6? zl*h!++Et!2&AzQ;nkCXWHf^L_#FXXzk6`Ngen{;l@fH-p0+Wwq37US{Cw+Cl!}^QAAAy7?>Z($4j+awQlKhdNW?P!;6EXOQ{H6~?;8L|xUB z3MUMxD|niSC!nh6r9v+uyfk{ifF#xSGbh|hf#$?W4s)%9K5(i9xck`@gxNI0B>>Qy zP$;1AJogCjqk@#s+E9KCPtEBCC}!PA}1K|gMOOG1$;;w zDUyLvvVt9Oo(WGdfD9NJG6=b$Fqb8H@Pr!O8)JhGT;yH#(UU4#Byqm0!V>|{cD zCa`ND*C5OcMU6*opk0bIL6zZLG;7`y4I;=9lbPC?_l=oSNFTKi!YV2 z=kpz9@nGumi{?a%B5c6jrK>o?{<_o zdKWIX@9gF~%EjK)3|QkGWhS(`%1ppkoXP9i`X|Wz7+dkIhtotT5%4We6Z`53Pm?4L zx#Ke@4vKS{OMx1xfl~dTUz(&KCzPrc)iZqPZr=%eo@pT;RLk`q0S^SZ1_ZfI?=PD? zSrL~R4=x(-z~2(e@FZx4SCnATO*lQk@$}69CjkmZAUnteb`#+a!aRZyoHex06@3+B z2-w0yZk#Fl*kpTU=XOV#JeZmNO6ktL-(G#0o9-y@_X|u%xp6{)(#zCDSD6n=h~uv7 zBY0EVYWhtEHzkIr3Rfsx1sPr;xC-hF+lR%#VR#|tiAIZ9u5VXZ9AkJs2HJTIZaQ<} zO=nKrbmoGa&RqXZ2V1~RXAZYcZ~%;8KM(FgHOrejUGEeQj0Z<>6%t3O5Mi+q#KN3i ztXGNmg0FwT%Fs=QpZ5vC_fc7vzm{gclScj}P5)cE_D|{Bchc>zrK?A(B463f?~)@4 tjH7H^)_3QRBzPSSC*^n0hS$->w0wDY>_~#w(UdB``+V%b61>DB{tqbo7E}NL literal 0 HcmV?d00001 diff --git a/app/modules/agent/engine/orchestrator/models/__pycache__/task_spec.cpython-312.pyc b/app/modules/agent/engine/orchestrator/models/__pycache__/task_spec.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b064594248c90ab9fb6dcd6c210e7b9a7548da2 GIT binary patch literal 4791 zcmb7IOK%&=5uPE3@25n{lI4eFNr^(lUcte}i6607wkTP)9$rbi@zN1y$LuCG_Hc&o z8QP{!4D0{`a?#B>ZgR@PLg1tSLoQCp1Jyu~4T2!>&48H8o>J9AQKX=?cLz{SbyZJu z=Bui&tM%7dEG)tGm$g6YyCF&XHxBkMzpKc5L0OVsNJJvCA(iBuETir*JSA_=D|D}+ zlzcf~$)EF!v0?;D!CbHu%7sedTv(Po(wszm&n4og5$b)ZxGU$P!X5y7kOp6R+t!${ zhQJ!`vc`oq0@i4kH6g4qu*SQrNnuTZHQ8nD6V^Vk_IFwPg*64%beA$L*$&7cybEy=6poS`H3$VApTsi8ps5B za5Ya&jp-JT(x+uZ(@nLkF^4BLQ!}cLp0`y-AM5l9PZ0|S1!k4hj*rJ2jTNawiAss? z@K|w!vWKun$$j4uEGrz`Tb2qaq zw_%ON<(cdP56@+nvMbXI>Zj9ROM8gWx6agOHf8pdX3(coGxV`qX4V6mcWm|X&!#ke zlra4Iow8VdgW3+$9E;%$YS>ebW=eR&1?CYSXnxd0=ws84qvbbB@KGzY!2Jb_-Pg(2 zGWXG^@D-cq+c|h|G7o8IRH&J!YK0k-XGbSD9H+dm7j%dtqlL+Jt~k{)ZqvFDPktbA0sZK4ZDm#G_#h0H#ZDe2IQuZK--$}cvQ(xU| zq|VhAz6oEhE0>#*`13YeIQ?j}X2J9Q(SlMz>;z(>XmAt|_yr-I1GDhqfE*izRoNL7 zSa@71Yt9CjcmTf60dRS=t*9HCy|I)D9)$?|F+M+u;w*}DC=S7pK^uzW@Mr%BL@x}n zXYj9JB%HIu1l%ezCTzDoT$)fhTg zyS44BE9aX1>DuhpjXD;g>!lyv^ zA`k%xOAtmOF$zOiA}|s`s>U*L9$mF6EKl!PhMun;#HHtLFx>D)@*eK|FQg)D>wl70 zrHp3@cB?3(k19H&$SO7uIW)ha;>N+1JTRgQUN3^*2Co=e=|0h}ZVq(+`bYa_gu@Pi zs7Zex&DLCE0Gdh=TF@$nqZYJ0P;Pbex}n?7y$)-2zX@_y=Us4sZ-aNBI?7~ zU+2Lx?1fBss%z{Q@RK~*UPXmXP~_FyD4M-G)OEU~JymT1n5~v6Q*DQq-Qe8qD3QS@fmF}ea*XrHfp=`_RS(E*>n{VQG_9Tzb|IRO+9)oRUo79yv z%~4Q?{s84a=H?Wx(Gx|8AYSsG1M>YjSI6 zTny95POOuOQ6K?-7YQWRV0*j)?GJ1>0H}SCaI-G@Q7BjIiTVTrLhZL=`^duU@{m9t z(6--VHoB2u)L}Y3h{W5c=t^QxR7WpSARN`EdG-mIH$x&<;D^_F)a_xQ?{PNlrA+L* zIbj~2bR%~gwM7)mC@`*E!6n%p9QS58yNNawNF_FYL3cR6Ier?LdZMnJZVn9A=C|xR zbOEDhMEo$6Cr2=<`MNT)dwTrUOyl%Ln4W1U7v0F>x<^MA6YWEh1-3ZU8;}6(dqJ=Z zj;Mw+5guEvIOU473LO$Os|QhP?|0a`ZC>rvT}fjPiS@+9@+wgVR8CNBw}z-b07}Mt zenT^h)TR!!4%mK_G^bs5!FJgJM+d$;9%8%v9KP}};HzjuaSi@#Wab{g!?;sMZ`GAs z&4J^!n_Hxz9CyLQXO0H*hqMm3H`2h4uao#eIJb1S4sjPyNPWosymNr)=5hx{&9t9T z)(aD0=62)IZ+0dHH@d4y3LNfJ9VH}z8@^?O1Jya$ybP9o5# ze*kbnVxIy~EQjJyuHQu)iaGcbO>PhLeP7m=)cuIHj8 z1-^907whuH$pH9{p^prLHq@Dw=$e%z$H6y@zUqn0NghPHT!q8LLB6;Z7!bz{XqWnk zt{GMlXwJ5wQn7=IGp%Q=Xcfoe3ijVT3SAIWOE4J@GU8+cdC)!&ik^lC7_cU^Y2BIA zb8LVF(>lipdow0-tKAr{Lt6$dvMqKp9&>j}we0p5NLxGt{aT0oG5$OOzXw!?jP!|qBP%v`we7~`H+#mOs!L+rv_;4pH6Tt%4+WDY?FEuDKp zzRJol8LC&ZpM!Ykm1X%K(%65bfqzOP|CP@FO*-H5`{mPH_V!Fm0;Scj$cgRwmISv} z+$W#d(pwVTTFHPsx}9lBaBHOl^7!^e)W=c3v<-9M)=K;2VSE;Dt)wD ExecutionPlan: + plan = template.model_copy(deep=True) + plan.plan_id = f"{task.task_id}:{template.template_id}" + plan.task_id = task.task_id + plan.status = PlanStatus.DRAFT + plan.variables = { + "scenario": task.scenario.value, + "route": { + "domain_id": task.routing.domain_id, + "process_id": task.routing.process_id, + "confidence": task.routing.confidence, + }, + } + + for step in plan.steps: + step.timeout_sec = max(1, min(step.timeout_sec, task.constraints.step_timeout_sec)) + step.retry.max_attempts = max(1, min(step.retry.max_attempts, task.constraints.max_retries_per_step)) + if step.side_effect == "write" and not task.constraints.allow_writes: + step.on_failure = "fail" + + if len(plan.steps) > task.constraints.max_steps: + plan.steps = plan.steps[: task.constraints.max_steps] + + return plan diff --git a/app/modules/agent/engine/orchestrator/plan_validator.py b/app/modules/agent/engine/orchestrator/plan_validator.py new file mode 100644 index 0000000..ffdb9cd --- /dev/null +++ b/app/modules/agent/engine/orchestrator/plan_validator.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +from app.modules.agent.engine.orchestrator.models import ExecutionPlan, TaskSpec + + +class PlanValidator: + def validate(self, plan: ExecutionPlan, task: TaskSpec) -> list[str]: + errors: list[str] = [] + if not plan.steps: + errors.append("execution_plan_has_no_steps") + return errors + + if len(plan.steps) > task.constraints.max_steps: + errors.append("execution_plan_exceeds_max_steps") + + errors.extend(self._validate_step_ids(plan)) + errors.extend(self._validate_dependencies(plan)) + errors.extend(self._validate_side_effects(plan, task)) + errors.extend(self._validate_step_shape(plan)) + return errors + + def _validate_step_ids(self, plan: ExecutionPlan) -> list[str]: + seen: set[str] = set() + out: list[str] = [] + for step in plan.steps: + if step.step_id in seen: + out.append(f"duplicate_step_id:{step.step_id}") + seen.add(step.step_id) + return out + + def _validate_dependencies(self, plan: ExecutionPlan) -> list[str]: + out: list[str] = [] + valid_ids = {step.step_id for step in plan.steps} + for step in plan.steps: + for dep in step.depends_on: + if dep not in valid_ids: + out.append(f"unknown_dependency:{step.step_id}->{dep}") + + # lightweight cycle detection for directed graph + graph = {step.step_id: list(step.depends_on) for step in plan.steps} + visiting: set[str] = set() + visited: set[str] = set() + + def dfs(node: str) -> bool: + if node in visiting: + return True + if node in visited: + return False + visiting.add(node) + for dep in graph.get(node, []): + if dfs(dep): + return True + visiting.remove(node) + visited.add(node) + return False + + if any(dfs(node) for node in graph): + out.append("dependency_cycle_detected") + return out + + def _validate_side_effects(self, plan: ExecutionPlan, task: TaskSpec) -> list[str]: + if task.constraints.allow_writes: + return [] + out: list[str] = [] + for step in plan.steps: + if step.side_effect == "write": + out.append(f"write_step_not_allowed:{step.step_id}") + return out + + def _validate_step_shape(self, plan: ExecutionPlan) -> list[str]: + out: list[str] = [] + for step in plan.steps: + if step.executor == "graph" and not step.graph_id: + out.append(f"graph_step_missing_graph_id:{step.step_id}") + if step.retry.max_attempts < 1: + out.append(f"invalid_retry_attempts:{step.step_id}") + if step.timeout_sec < 1: + out.append(f"invalid_step_timeout:{step.step_id}") + return out diff --git a/app/modules/agent/engine/orchestrator/quality_gates.py b/app/modules/agent/engine/orchestrator/quality_gates.py new file mode 100644 index 0000000..4230804 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/quality_gates.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import PlanStep, QualityGateRef + + +class QualityGateRunner: + def check_step(self, step: PlanStep, ctx: ExecutionContext) -> tuple[bool, list[str]]: + return self._run(step.quality_gates, step=step, ctx=ctx) + + def check_global(self, gates: list[QualityGateRef], ctx: ExecutionContext) -> tuple[bool, list[str]]: + return self._run(gates, step=None, ctx=ctx) + + def _run(self, gates: list[QualityGateRef], *, step: PlanStep | None, ctx: ExecutionContext) -> tuple[bool, list[str]]: + failures: list[str] = [] + warnings: list[str] = [] + for gate in gates: + ok, details = self._check(gate.gate_id, step=step, ctx=ctx) + if ok: + continue + if gate.blocking: + failures.extend(details) + else: + warnings.extend(details) + return len(failures) == 0, failures + warnings + + def _check(self, gate_id: str, *, step: PlanStep | None, ctx: ExecutionContext) -> tuple[bool, list[str]]: + checks = { + "required_outputs": lambda: self._required_outputs(step, ctx), + "non_empty_answer_or_changeset": lambda: self._non_empty_output(ctx), + "changeset_required_for_write": lambda: self._changeset_required(ctx), + "changeset_schema": lambda: self._changeset_schema(ctx), + "evidence_required": lambda: self._evidence_required(ctx), + "review_report_schema": lambda: self._review_schema(ctx), + "cross_file_consistency": lambda: self._cross_file_consistency(ctx), + "target_path_must_exist_or_be_allowed": lambda: self._target_path_gate(ctx), + "minimal_patch_policy": lambda: self._minimal_patch_policy(ctx), + "gherkin_syntax_lint": lambda: self._gherkin_lint(ctx), + "coverage_of_change_intents": lambda: self._coverage_gate(ctx), + "explain_format_hint": lambda: self._explain_hint(ctx), + } + fn = checks.get(gate_id) + if fn is None: + return True, [] + return fn() + + def _required_outputs(self, step: PlanStep | None, ctx: ExecutionContext) -> tuple[bool, list[str]]: + if step is None: + return True, [] + missing = [f"missing_required_artifact:{spec.key}" for spec in step.outputs if spec.required and not ctx.artifacts.has(spec.key)] + return len(missing) == 0, missing + + def _non_empty_output(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + answer = str(ctx.artifacts.get_content("final_answer", "") or "").strip() + changeset = ctx.artifacts.get_content("final_changeset", []) or [] + ok = bool(answer) or (isinstance(changeset, list) and len(changeset) > 0) + return ok, [] if ok else ["empty_final_output"] + + def _changeset_required(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + if not ctx.task.constraints.allow_writes: + return True, [] + changeset = ctx.artifacts.get_content("final_changeset", []) or [] + ok = isinstance(changeset, list) and len(changeset) > 0 + return ok, [] if ok else ["changeset_required_for_write"] + + def _changeset_schema(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + changeset = ctx.artifacts.get_content("final_changeset", []) or [] + if not isinstance(changeset, list): + return False, ["changeset_not_list"] + for idx, item in enumerate(changeset): + if not isinstance(item, dict): + return False, [f"changeset_item_not_object:{idx}"] + if not item.get("op") or not item.get("path"): + return False, [f"changeset_item_missing_fields:{idx}"] + return True, [] + + def _evidence_required(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + if not ctx.task.source_policy.require_evidence: + return True, [] + evidences = ctx.evidences.all_items() + return len(evidences) > 0, ([] if evidences else ["no_evidence_collected"]) + + def _review_schema(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + report = str(ctx.artifacts.get_content("review_report", "") or "") + ok = "## Findings" in report and "## Recommendations" in report + return ok, [] if ok else ["review_report_missing_sections"] + + def _cross_file_consistency(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + report = ctx.artifacts.get_content("consistency_report", {}) or {} + ok = bool(report.get("required_core_paths_present")) + return ok, [] if ok else ["cross_file_consistency_failed"] + + def _target_path_gate(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + target = ctx.artifacts.get_content("resolved_target", {}) or {} + ok = bool(str(target.get("path", "")).strip()) + return ok, [] if ok else ["target_path_not_resolved"] + + def _minimal_patch_policy(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + report = ctx.artifacts.get_content("patch_validation_report", {}) or {} + ok = bool(report.get("safe")) + return ok, [] if ok else ["patch_validation_failed"] + + def _gherkin_lint(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + report = ctx.artifacts.get_content("gherkin_lint_report", {}) or {} + ok = bool(report.get("valid")) + return ok, [] if ok else ["gherkin_lint_failed"] + + def _coverage_gate(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + report = ctx.artifacts.get_content("coverage_report", {}) or {} + ok = bool(report.get("covered")) + return ok, [] if ok else ["coverage_check_failed"] + + def _explain_hint(self, ctx: ExecutionContext) -> tuple[bool, list[str]]: + answer = str(ctx.artifacts.get_content("final_answer", "") or "") + ok = "```mermaid" in answer or "sequenceDiagram" in answer + return ok, [] if ok else ["hint:explain_answer_missing_mermaid_block"] diff --git a/app/modules/agent/engine/orchestrator/quality_metrics.py b/app/modules/agent/engine/orchestrator/quality_metrics.py new file mode 100644 index 0000000..b3f7ce8 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/quality_metrics.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +import re + +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import StepResult + + +class QualityMetricsCalculator: + def build(self, ctx: ExecutionContext, step_results: list[StepResult]) -> dict: + answer = str(ctx.artifacts.get_content("final_answer", "") or "") + changeset = ctx.artifacts.get_content("final_changeset", []) or [] + evidences = ctx.evidences.all_items() + + faithfulness = self._faithfulness(answer=answer, changeset=changeset, evidence_count=len(evidences)) + coverage = self._coverage(ctx=ctx, answer=answer, changeset=changeset) + status = self._status(faithfulness["score"], coverage["score"]) + + return { + "faithfulness": faithfulness, + "coverage": coverage, + "status": status, + "steps": { + "total": len(ctx.plan.steps), + "completed": len([item for item in step_results if item.status.value == "success"]), + }, + } + + def _faithfulness(self, *, answer: str, changeset, evidence_count: int) -> dict: + claims_total = self._estimate_claims(answer, changeset) + if claims_total <= 0: + claims_total = 1 + + support_capacity = min(claims_total, evidence_count * 3) + claims_supported = support_capacity if evidence_count > 0 else 0 + score = claims_supported / claims_total + unsupported = max(0, claims_total - claims_supported) + + return { + "score": round(score, 4), + "claims_total": claims_total, + "claims_supported": claims_supported, + "claims_unsupported": unsupported, + "evidence_items": evidence_count, + } + + def _coverage(self, *, ctx: ExecutionContext, answer: str, changeset) -> dict: + required = [section.name for section in ctx.task.output_contract.sections if section.required] + if not required: + required = ["final_output"] + + covered: list[str] = [] + for item in required: + if self._is_item_covered(item=item, ctx=ctx, answer=answer, changeset=changeset): + covered.append(item) + + missing = [item for item in required if item not in covered] + score = len(covered) / len(required) + + return { + "score": round(score, 4), + "required_items": required, + "covered_items": covered, + "missing_items": missing, + "required_count": len(required), + "covered_count": len(covered), + } + + def _status(self, faithfulness: float, coverage: float) -> str: + if faithfulness >= 0.75 and coverage >= 0.85: + return "ok" + if faithfulness >= 0.55 and coverage >= 0.6: + return "needs_review" + return "fail" + + def _estimate_claims(self, answer: str, changeset) -> int: + lines = [line.strip() for line in answer.splitlines() if line.strip()] + bullet_claims = len([line for line in lines if line.startswith("-") or line.startswith("*")]) + sentence_claims = len([part for part in re.split(r"[.!?]\s+", answer) if part.strip()]) + + changeset_claims = 0 + if isinstance(changeset, list): + for item in changeset: + if isinstance(item, dict): + reason = str(item.get("reason", "")).strip() + if reason: + changeset_claims += 1 + else: + reason = str(getattr(item, "reason", "")).strip() + if reason: + changeset_claims += 1 + + return max(bullet_claims, min(sentence_claims, 12), changeset_claims) + + def _is_item_covered(self, *, item: str, ctx: ExecutionContext, answer: str, changeset) -> bool: + name = (item or "").strip().lower() + if name == "final_output": + return bool(answer.strip()) or (isinstance(changeset, list) and len(changeset) > 0) + if name in {"changeset", "final_changeset"}: + return isinstance(changeset, list) and len(changeset) > 0 + if name in {"sequence_diagram", "mermaid"}: + sequence = str(ctx.artifacts.get_content("sequence_diagram", "") or "").strip() + return "```mermaid" in answer or bool(sequence) + if name == "use_cases": + if ctx.artifacts.has("use_cases"): + return True + low = answer.lower() + return "use case" in low or "сценар" in low + if name in {"summary", "findings", "recommendations", "gherkin_bundle", "review_report"}: + if ctx.artifacts.has(name): + return True + if name == "gherkin_bundle": + bundle = ctx.artifacts.get_content("gherkin_bundle", []) or [] + return isinstance(bundle, list) and len(bundle) > 0 + return name.replace("_", " ") in answer.lower() + return ctx.artifacts.has(name) diff --git a/app/modules/agent/engine/orchestrator/result_assembler.py b/app/modules/agent/engine/orchestrator/result_assembler.py new file mode 100644 index 0000000..57430bb --- /dev/null +++ b/app/modules/agent/engine/orchestrator/result_assembler.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import OrchestratorResult, StepResult +from app.modules.agent.engine.orchestrator.quality_metrics import QualityMetricsCalculator +from app.schemas.changeset import ChangeItem + + +class ResultAssembler: + def __init__(self, quality: QualityMetricsCalculator | None = None) -> None: + self._quality = quality or QualityMetricsCalculator() + + def assemble(self, ctx: ExecutionContext, step_results: list[StepResult]) -> OrchestratorResult: + answer = str(ctx.artifacts.get_content("final_answer", "") or "").strip() or None + raw_changeset = ctx.artifacts.get_content("final_changeset", []) or [] + changeset = self._normalize_changeset(raw_changeset) + quality = self._quality.build(ctx, step_results) + + meta = { + "scenario": ctx.task.scenario.value, + "plan": { + "plan_id": ctx.plan.plan_id, + "template_id": ctx.plan.template_id, + "template_version": ctx.plan.template_version, + "status": ctx.plan.status.value, + }, + "route": { + "domain_id": ctx.task.routing.domain_id, + "process_id": ctx.task.routing.process_id, + "confidence": ctx.task.routing.confidence, + "reason": ctx.task.routing.reason, + "fallback_used": ctx.task.routing.fallback_used, + }, + "orchestrator": { + "steps_total": len(ctx.plan.steps), + "steps_success": len([step for step in step_results if step.status.value == "success"]), + }, + "quality": quality, + } + return OrchestratorResult(answer=answer, changeset=changeset, meta=meta, steps=step_results) + + def _normalize_changeset(self, value) -> list[ChangeItem]: + if not isinstance(value, list): + return [] + items: list[ChangeItem] = [] + for raw in value: + if isinstance(raw, ChangeItem): + items.append(raw) + continue + if isinstance(raw, dict): + try: + items.append(ChangeItem.model_validate(raw)) + except Exception: + continue + return items diff --git a/app/modules/agent/engine/orchestrator/service.py b/app/modules/agent/engine/orchestrator/service.py new file mode 100644 index 0000000..06227d0 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/service.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +import inspect + +from app.core.exceptions import AppError +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext, GraphInvoker, GraphResolver, ProgressCallback +from app.modules.agent.engine.orchestrator.execution_engine import ExecutionEngine +from app.modules.agent.engine.orchestrator.models import OrchestratorResult, PlanStatus, TaskSpec +from app.modules.agent.engine.orchestrator.plan_compiler import PlanCompiler +from app.modules.agent.engine.orchestrator.plan_validator import PlanValidator +from app.modules.agent.engine.orchestrator.quality_gates import QualityGateRunner +from app.modules.agent.engine.orchestrator.result_assembler import ResultAssembler +from app.modules.agent.engine.orchestrator.step_registry import StepRegistry +from app.modules.agent.engine.orchestrator.template_registry import ScenarioTemplateRegistry +from app.schemas.common import ModuleName + + +class OrchestratorService: + def __init__( + self, + templates: ScenarioTemplateRegistry | None = None, + compiler: PlanCompiler | None = None, + validator: PlanValidator | None = None, + step_registry: StepRegistry | None = None, + gates: QualityGateRunner | None = None, + engine: ExecutionEngine | None = None, + assembler: ResultAssembler | None = None, + ) -> None: + self._templates = templates or ScenarioTemplateRegistry() + self._compiler = compiler or PlanCompiler() + self._validator = validator or PlanValidator() + self._registry = step_registry or StepRegistry() + self._gates = gates or QualityGateRunner() + self._engine = engine or ExecutionEngine(self._registry, self._gates) + self._assembler = assembler or ResultAssembler() + + async def run( + self, + *, + task: TaskSpec, + graph_resolver: GraphResolver, + graph_invoker: GraphInvoker, + progress_cb: ProgressCallback | None = None, + ) -> OrchestratorResult: + await self._emit_progress(progress_cb, "orchestrator.plan", "Building execution plan.") + template = self._templates.build(task) + plan = self._compiler.compile(template, task) + + errors = self._validator.validate(plan, task) + if errors: + raise AppError( + code="invalid_execution_plan", + desc=f"Execution plan validation failed: {'; '.join(errors)}", + module=ModuleName.AGENT, + ) + + plan.status = PlanStatus.VALIDATED + ctx = ExecutionContext( + task=task, + plan=plan, + graph_resolver=graph_resolver, + graph_invoker=graph_invoker, + progress_cb=progress_cb, + ) + + await self._emit_progress(progress_cb, "orchestrator.run", "Executing plan steps.") + step_results = await self._engine.run(ctx) + if plan.status == PlanStatus.FAILED: + errors = [f"{step.step_id}:{step.error_message or ','.join(step.warnings)}" for step in step_results if step.status.value != "success"] + raise AppError( + code="execution_plan_failed", + desc=f"Execution plan failed: {'; '.join(errors)}", + module=ModuleName.AGENT, + ) + result = self._assembler.assemble(ctx, step_results) + await self._emit_progress(progress_cb, "orchestrator.done", "Execution plan completed.") + return result + + async def _emit_progress(self, progress_cb: ProgressCallback | None, stage: str, message: str) -> None: + if progress_cb is None: + return + result = progress_cb(stage, message, "task_progress", {"layer": "orchestrator"}) + if inspect.isawaitable(result): + await result diff --git a/app/modules/agent/engine/orchestrator/step_registry.py b/app/modules/agent/engine/orchestrator/step_registry.py new file mode 100644 index 0000000..918809e --- /dev/null +++ b/app/modules/agent/engine/orchestrator/step_registry.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +import asyncio +from collections.abc import Callable + +from app.modules.agent.engine.graphs.progress_registry import progress_registry +from app.modules.agent.engine.orchestrator.actions import DocsActions, EditActions, ExplainActions, GherkinActions, ReviewActions +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import ArtifactType, PlanStep + +StepFn = Callable[[ExecutionContext], list[str]] + + +class StepRegistry: + def __init__(self) -> None: + explain = ExplainActions() + review = ReviewActions() + docs = DocsActions() + edits = EditActions() + gherkin = GherkinActions() + + self._functions: dict[str, StepFn] = { + "collect_state": self._collect_state, + "finalize_graph_output": self._finalize_graph_output, + "collect_sources": explain.collect_sources, + "extract_logic": explain.extract_logic, + "build_sequence": explain.build_sequence, + "build_use_cases": explain.build_use_cases, + "summarize": explain.summarize, + "fetch_source_doc": review.fetch_source_doc, + "normalize_document": review.normalize_document, + "structural_check": review.structural_check, + "semantic_consistency_check": review.semantic_consistency_check, + "architecture_fit_check": review.architecture_fit_check, + "optimization_check": review.optimization_check, + "compose_review_report": review.compose_review_report, + "extract_change_intents": docs.extract_change_intents, + "map_to_doc_tree": docs.map_to_doc_tree, + "load_current_docs_context": docs.load_current_docs_context, + "generate_doc_updates": docs.generate_doc_updates, + "cross_file_validation": docs.cross_file_validation, + "build_changeset": docs.build_changeset, + "compose_summary": docs.compose_summary, + "resolve_target": edits.resolve_target, + "load_target_context": edits.load_target_context, + "plan_minimal_patch": edits.plan_minimal_patch, + "generate_patch": edits.generate_patch, + "validate_patch_safety": edits.validate_patch_safety, + "finalize_changeset": edits.finalize_changeset, + "compose_edit_summary": edits.compose_edit_summary, + "extract_increment_scope": gherkin.extract_increment_scope, + "partition_features": gherkin.partition_features, + "generate_gherkin_bundle": gherkin.generate_gherkin_bundle, + "lint_gherkin": gherkin.lint_gherkin, + "validate_coverage": gherkin.validate_coverage, + "compose_test_model_summary": gherkin.compose_test_model_summary, + } + + async def execute(self, step: PlanStep, ctx: ExecutionContext) -> list[str]: + if step.executor == "graph": + return await self._execute_graph_step(step, ctx) + fn = self._functions.get(step.action_id) + if fn is None: + raise RuntimeError(f"Unknown function action_id: {step.action_id}") + return fn(ctx) + + def _collect_state(self, ctx: ExecutionContext) -> list[str]: + state = { + "task_id": ctx.task.task_id, + "project_id": ctx.task.rag_session_id, + "message": ctx.task.user_message, + "progress_key": ctx.task.task_id, + "rag_context": str(ctx.task.metadata.get("rag_context", "")), + "confluence_context": str(ctx.task.metadata.get("confluence_context", "")), + "files_map": dict(ctx.task.metadata.get("files_map", {}) or {}), + } + item = ctx.artifacts.put(key="agent_state", artifact_type=ArtifactType.STRUCTURED_JSON, content=state) + return [item.artifact_id] + + async def _execute_graph_step(self, step: PlanStep, ctx: ExecutionContext) -> list[str]: + graph_key = step.graph_id or "route" + if graph_key == "route": + domain_id = ctx.task.routing.domain_id + process_id = ctx.task.routing.process_id + elif "/" in graph_key: + domain_id, process_id = graph_key.split("/", 1) + else: + raise RuntimeError(f"Unsupported graph_id: {graph_key}") + + graph = ctx.graph_resolver(domain_id, process_id) + state = ctx.artifacts.get_content("agent_state", {}) or {} + + if ctx.progress_cb is not None: + progress_registry.register(ctx.task.task_id, ctx.progress_cb) + try: + result = await asyncio.to_thread(ctx.graph_invoker, graph, state, ctx.task.dialog_session_id) + finally: + if ctx.progress_cb is not None: + progress_registry.unregister(ctx.task.task_id) + + item = ctx.artifacts.put(key="graph_result", artifact_type=ArtifactType.STRUCTURED_JSON, content=result) + return [item.artifact_id] + + def _finalize_graph_output(self, ctx: ExecutionContext) -> list[str]: + raw = ctx.artifacts.get_content("graph_result", {}) or {} + if not isinstance(raw, dict): + raise RuntimeError("graph_result must be an object") + + answer = raw.get("answer") + changeset = raw.get("changeset") or [] + output = [ + ctx.artifacts.put( + key="final_answer", + artifact_type=ArtifactType.TEXT, + content=(str(answer) if answer is not None else ""), + ).artifact_id, + ctx.artifacts.put( + key="final_changeset", + artifact_type=ArtifactType.CHANGESET, + content=changeset, + ).artifact_id, + ] + return output diff --git a/app/modules/agent/engine/orchestrator/task_spec_builder.py b/app/modules/agent/engine/orchestrator/task_spec_builder.py new file mode 100644 index 0000000..1c85dda --- /dev/null +++ b/app/modules/agent/engine/orchestrator/task_spec_builder.py @@ -0,0 +1,150 @@ +from __future__ import annotations + +from app.modules.agent.engine.orchestrator.models import ( + AttachmentRef, + FileRef, + OutputContract, + OutputSection, + RoutingMeta, + Scenario, + TaskConstraints, + TaskSpec, +) + + +class TaskSpecBuilder: + def build( + self, + *, + task_id: str, + dialog_session_id: str, + rag_session_id: str, + mode: str, + message: str, + route: RoutingMeta, + attachments: list[dict], + files: list[dict], + rag_context: str, + confluence_context: str, + files_map: dict[str, dict], + ) -> TaskSpec: + scenario = self._detect_scenario(mode=mode, message=message, route=route) + output_contract = self._output_contract(scenario) + constraints = self._constraints_for(scenario) + metadata = { + "rag_context": rag_context, + "confluence_context": confluence_context, + "files_map": files_map, + } + return TaskSpec( + task_id=task_id, + dialog_session_id=dialog_session_id, + rag_session_id=rag_session_id, + mode=mode, + user_message=message, + scenario=scenario, + routing=route, + attachments=self._map_attachments(attachments), + files=self._map_files(files), + constraints=constraints, + output_contract=output_contract, + metadata=metadata, + ) + + def _detect_scenario(self, *, mode: str, message: str, route: RoutingMeta) -> Scenario: + mode_key = (mode or "").strip().lower() + text = (message or "").strip().lower() + + if mode_key == "analytics_review": + return Scenario.ANALYTICS_REVIEW + if "gherkin" in text or "cucumber" in text: + return Scenario.GHERKIN_MODEL + if any(token in text for token in ("review analytics", "ревью аналитики", "проведи ревью")): + return Scenario.ANALYTICS_REVIEW + if any(token in text for token in ("сформируй документацию", "документацию из аналитики", "generate docs")): + return Scenario.DOCS_FROM_ANALYTICS + if any(token in text for token in ("точечн", "измени файл", "targeted edit", "patch file")): + return Scenario.TARGETED_EDIT + if route.domain_id == "project" and route.process_id == "edits": + return Scenario.TARGETED_EDIT + if route.domain_id == "docs" and route.process_id == "generation": + return Scenario.DOCS_FROM_ANALYTICS + if route.domain_id == "project" and route.process_id == "qa" and self._looks_like_explain_request(text): + return Scenario.EXPLAIN_PART + if route.domain_id == "project" and route.process_id == "qa" and "review" in text: + return Scenario.ANALYTICS_REVIEW + return Scenario.GENERAL_QA + + def _looks_like_explain_request(self, text: str) -> bool: + markers = ( + "explain", + "how it works", + "sequence", + "diagram", + "obiasni", + "kak rabotaet", + "kak ustroeno", + "объясни", + "как работает", + "как устроен", + "диаграм", + ) + return any(marker in text for marker in markers) + + def _map_attachments(self, attachments: list[dict]) -> list[AttachmentRef]: + mapped: list[AttachmentRef] = [] + for item in attachments: + value = str(item.get("url") or item.get("value") or "").strip() + if not value: + continue + raw_type = str(item.get("type") or "http_url").strip().lower() + attachment_type = raw_type if raw_type in {"confluence_url", "http_url", "file_ref"} else "http_url" + mapped.append(AttachmentRef(type=attachment_type, value=value)) + return mapped + + def _map_files(self, files: list[dict]) -> list[FileRef]: + mapped: list[FileRef] = [] + for item in files: + path = str(item.get("path") or "").replace("\\", "/").strip() + if not path: + continue + mapped.append( + FileRef( + path=path, + content=str(item.get("content") or ""), + content_hash=str(item.get("content_hash") or ""), + ) + ) + return mapped + + def _constraints_for(self, scenario: Scenario) -> TaskConstraints: + if scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL}: + return TaskConstraints(allow_writes=True, max_steps=16, max_retries_per_step=2, step_timeout_sec=120) + return TaskConstraints(allow_writes=False, max_steps=12, max_retries_per_step=2, step_timeout_sec=90) + + def _output_contract(self, scenario: Scenario) -> OutputContract: + if scenario == Scenario.EXPLAIN_PART: + return OutputContract( + result_type="answer", + sections=[ + OutputSection(name="sequence_diagram", format="mermaid"), + OutputSection(name="use_cases", format="markdown"), + OutputSection(name="summary", format="markdown"), + ], + ) + if scenario == Scenario.ANALYTICS_REVIEW: + return OutputContract( + result_type="review_report", + sections=[ + OutputSection(name="findings", format="markdown"), + OutputSection(name="recommendations", format="markdown"), + ], + ) + if scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT}: + return OutputContract(result_type="changeset", sections=[OutputSection(name="changeset", format="changeset")]) + if scenario == Scenario.GHERKIN_MODEL: + return OutputContract( + result_type="gherkin_bundle", + sections=[OutputSection(name="gherkin_bundle", format="gherkin")], + ) + return OutputContract(result_type="answer", sections=[OutputSection(name="summary", format="markdown")]) diff --git a/app/modules/agent/engine/orchestrator/template_registry.py b/app/modules/agent/engine/orchestrator/template_registry.py new file mode 100644 index 0000000..d1b6ff1 --- /dev/null +++ b/app/modules/agent/engine/orchestrator/template_registry.py @@ -0,0 +1,150 @@ +from __future__ import annotations + +from app.modules.agent.engine.orchestrator.models import ArtifactSpec, ArtifactType, ExecutionPlan, PlanStep, QualityGateRef, Scenario, TaskSpec + + +class ScenarioTemplateRegistry: + def build(self, task: TaskSpec) -> ExecutionPlan: + builders = { + Scenario.EXPLAIN_PART: self._explain, + Scenario.ANALYTICS_REVIEW: self._review, + Scenario.DOCS_FROM_ANALYTICS: self._docs, + Scenario.TARGETED_EDIT: self._edit, + Scenario.GHERKIN_MODEL: self._gherkin, + Scenario.GENERAL_QA: self._general, + } + return builders.get(task.scenario, self._general)(task) + + def _general(self, task: TaskSpec) -> ExecutionPlan: + steps = [ + self._step("collect_state", "Collect state", "collect_state", outputs=[self._out("agent_state", ArtifactType.STRUCTURED_JSON)]), + self._step( + "execute_route_graph", + "Execute selected graph", + "execute_route_graph", + executor="graph", + graph_id="route", + depends_on=["collect_state"], + outputs=[self._out("graph_result", ArtifactType.STRUCTURED_JSON)], + gates=[self._gate("required_outputs")], + ), + self._step( + "finalize_graph_output", + "Finalize graph output", + "finalize_graph_output", + depends_on=["execute_route_graph"], + outputs=[self._out("final_answer", ArtifactType.TEXT, required=False)], + gates=[self._gate("non_empty_answer_or_changeset")], + ), + ] + return self._plan(task, "general_qa_v1", steps, [self._gate("non_empty_answer_or_changeset")]) + + def _explain(self, task: TaskSpec) -> ExecutionPlan: + steps = [ + self._step("collect_sources", "Collect sources", "collect_sources", outputs=[self._out("sources", ArtifactType.STRUCTURED_JSON)]), + self._step("extract_logic", "Extract logic", "extract_logic", depends_on=["collect_sources"], outputs=[self._out("logic_model", ArtifactType.STRUCTURED_JSON)]), + self._step("build_sequence", "Build sequence", "build_sequence", depends_on=["extract_logic"], outputs=[self._out("sequence_diagram", ArtifactType.TEXT)]), + self._step("build_use_cases", "Build use cases", "build_use_cases", depends_on=["extract_logic"], outputs=[self._out("use_cases", ArtifactType.TEXT)]), + self._step("summarize", "Summarize", "summarize", depends_on=["build_sequence", "build_use_cases"], outputs=[self._out("final_answer", ArtifactType.TEXT)]), + ] + return self._plan(task, "explain_part_v1", steps, [self._gate("evidence_required"), self._gate("non_empty_answer_or_changeset")]) + + def _review(self, task: TaskSpec) -> ExecutionPlan: + steps = [ + self._step("fetch_source_doc", "Fetch source doc", "fetch_source_doc", outputs=[self._out("source_doc_raw", ArtifactType.TEXT)], side_effect="external"), + self._step("normalize_document", "Normalize document", "normalize_document", depends_on=["fetch_source_doc"], outputs=[self._out("source_doc_text", ArtifactType.TEXT)]), + self._step("structural_check", "Structural check", "structural_check", depends_on=["normalize_document"], outputs=[self._out("structural_findings", ArtifactType.STRUCTURED_JSON)]), + self._step("semantic_consistency_check", "Semantic check", "semantic_consistency_check", depends_on=["normalize_document"], outputs=[self._out("semantic_findings", ArtifactType.STRUCTURED_JSON)]), + self._step("architecture_fit_check", "Architecture fit", "architecture_fit_check", depends_on=["normalize_document"], outputs=[self._out("architecture_findings", ArtifactType.STRUCTURED_JSON)]), + self._step("optimization_check", "Optimization check", "optimization_check", depends_on=["normalize_document"], outputs=[self._out("optimization_findings", ArtifactType.STRUCTURED_JSON)]), + self._step( + "compose_review_report", + "Compose review report", + "compose_review_report", + depends_on=["structural_check", "semantic_consistency_check", "architecture_fit_check", "optimization_check"], + outputs=[self._out("review_report", ArtifactType.REVIEW_REPORT), self._out("final_answer", ArtifactType.TEXT)], + gates=[self._gate("review_report_schema")], + ), + ] + return self._plan(task, "analytics_review_v1", steps, [self._gate("evidence_required"), self._gate("non_empty_answer_or_changeset")]) + + def _docs(self, task: TaskSpec) -> ExecutionPlan: + steps = [ + self._step("fetch_source_doc", "Fetch source doc", "fetch_source_doc", outputs=[self._out("source_doc_raw", ArtifactType.TEXT)], side_effect="external"), + self._step("normalize_document", "Normalize document", "normalize_document", depends_on=["fetch_source_doc"], outputs=[self._out("source_doc_text", ArtifactType.TEXT)]), + self._step("extract_change_intents", "Extract intents", "extract_change_intents", depends_on=["normalize_document"], outputs=[self._out("change_intents", ArtifactType.STRUCTURED_JSON)]), + self._step("map_to_doc_tree", "Map to doc tree", "map_to_doc_tree", depends_on=["extract_change_intents"], outputs=[self._out("doc_targets", ArtifactType.STRUCTURED_JSON)]), + self._step("load_current_docs_context", "Load current docs", "load_current_docs_context", depends_on=["map_to_doc_tree"], outputs=[self._out("current_docs_context", ArtifactType.STRUCTURED_JSON)]), + self._step("generate_doc_updates", "Generate doc updates", "generate_doc_updates", depends_on=["load_current_docs_context"], outputs=[self._out("generated_doc_bundle", ArtifactType.DOC_BUNDLE)], side_effect="write"), + self._step("cross_file_validation", "Cross-file validation", "cross_file_validation", depends_on=["generate_doc_updates"], outputs=[self._out("consistency_report", ArtifactType.STRUCTURED_JSON)], gates=[self._gate("cross_file_consistency")]), + self._step("build_changeset", "Build changeset", "build_changeset", depends_on=["cross_file_validation"], outputs=[self._out("final_changeset", ArtifactType.CHANGESET)], side_effect="write"), + self._step("compose_summary", "Compose summary", "compose_summary", depends_on=["build_changeset"], outputs=[self._out("final_answer", ArtifactType.TEXT)]), + ] + return self._plan(task, "docs_from_analytics_v1", steps, [self._gate("changeset_required_for_write"), self._gate("changeset_schema")]) + + def _edit(self, task: TaskSpec) -> ExecutionPlan: + steps = [ + self._step("resolve_target", "Resolve target", "resolve_target", outputs=[self._out("resolved_target", ArtifactType.STRUCTURED_JSON)], gates=[self._gate("target_path_must_exist_or_be_allowed")]), + self._step("load_target_context", "Load target context", "load_target_context", depends_on=["resolve_target"], outputs=[self._out("target_context", ArtifactType.STRUCTURED_JSON)]), + self._step("plan_minimal_patch", "Plan minimal patch", "plan_minimal_patch", depends_on=["load_target_context"], outputs=[self._out("patch_plan", ArtifactType.STRUCTURED_JSON)]), + self._step("generate_patch", "Generate patch", "generate_patch", depends_on=["plan_minimal_patch"], outputs=[self._out("raw_changeset", ArtifactType.CHANGESET)], side_effect="write"), + self._step("validate_patch_safety", "Validate patch", "validate_patch_safety", depends_on=["generate_patch"], outputs=[self._out("patch_validation_report", ArtifactType.STRUCTURED_JSON)], gates=[self._gate("minimal_patch_policy")]), + self._step("finalize_changeset", "Finalize changeset", "finalize_changeset", depends_on=["validate_patch_safety"], outputs=[self._out("final_changeset", ArtifactType.CHANGESET)], side_effect="write"), + self._step("compose_edit_summary", "Compose summary", "compose_edit_summary", depends_on=["finalize_changeset"], outputs=[self._out("final_answer", ArtifactType.TEXT)]), + ] + return self._plan(task, "targeted_edit_v1", steps, [self._gate("changeset_required_for_write"), self._gate("changeset_schema")]) + + def _gherkin(self, task: TaskSpec) -> ExecutionPlan: + steps = [ + self._step("fetch_source_doc", "Fetch source doc", "fetch_source_doc", outputs=[self._out("source_doc_raw", ArtifactType.TEXT)], side_effect="external"), + self._step("normalize_document", "Normalize document", "normalize_document", depends_on=["fetch_source_doc"], outputs=[self._out("source_doc_text", ArtifactType.TEXT)]), + self._step("extract_increment_scope", "Extract increment scope", "extract_increment_scope", depends_on=["normalize_document"], outputs=[self._out("increment_scope", ArtifactType.STRUCTURED_JSON)]), + self._step("partition_features", "Partition features", "partition_features", depends_on=["extract_increment_scope"], outputs=[self._out("feature_groups", ArtifactType.STRUCTURED_JSON)]), + self._step("generate_gherkin_bundle", "Generate gherkin", "generate_gherkin_bundle", depends_on=["partition_features"], outputs=[self._out("gherkin_bundle", ArtifactType.GHERKIN_BUNDLE)], side_effect="write"), + self._step("lint_gherkin", "Lint gherkin", "lint_gherkin", depends_on=["generate_gherkin_bundle"], outputs=[self._out("gherkin_lint_report", ArtifactType.STRUCTURED_JSON)], gates=[self._gate("gherkin_syntax_lint")]), + self._step("validate_coverage", "Validate coverage", "validate_coverage", depends_on=["generate_gherkin_bundle"], outputs=[self._out("coverage_report", ArtifactType.STRUCTURED_JSON)], gates=[self._gate("coverage_of_change_intents")]), + self._step("compose_test_model_summary", "Compose summary", "compose_test_model_summary", depends_on=["lint_gherkin", "validate_coverage"], outputs=[self._out("final_answer", ArtifactType.TEXT), self._out("final_changeset", ArtifactType.CHANGESET)], side_effect="write"), + ] + return self._plan(task, "gherkin_model_v1", steps, [self._gate("changeset_schema"), self._gate("non_empty_answer_or_changeset")]) + + def _plan(self, task: TaskSpec, template_id: str, steps: list[PlanStep], gates: list[QualityGateRef]) -> ExecutionPlan: + return ExecutionPlan( + plan_id=f"{task.task_id}:{template_id}", + task_id=task.task_id, + scenario=task.scenario, + template_id=template_id, + template_version="1.0", + steps=steps, + global_gates=gates, + ) + + def _step( + self, + step_id: str, + title: str, + action_id: str, + *, + executor: str = "function", + graph_id: str | None = None, + depends_on: list[str] | None = None, + outputs: list[ArtifactSpec] | None = None, + gates: list[QualityGateRef] | None = None, + side_effect: str = "read", + ) -> PlanStep: + return PlanStep( + step_id=step_id, + title=title, + action_id=action_id, + executor=executor, + graph_id=graph_id, + depends_on=depends_on or [], + outputs=outputs or [], + quality_gates=gates or [], + side_effect=side_effect, + ) + + def _out(self, key: str, artifact_type: ArtifactType, *, required: bool = True) -> ArtifactSpec: + return ArtifactSpec(key=key, type=artifact_type, required=required) + + def _gate(self, gate_id: str, *, blocking: bool = True) -> QualityGateRef: + return QualityGateRef(gate_id=gate_id, blocking=blocking) diff --git a/app/modules/agent/engine/router/__init__.py b/app/modules/agent/engine/router/__init__.py index cc36c49..50da4fa 100644 --- a/app/modules/agent/engine/router/__init__.py +++ b/app/modules/agent/engine/router/__init__.py @@ -1,20 +1,25 @@ from pathlib import Path +from typing import TYPE_CHECKING -from app.modules.agent.engine.graphs import ( - BaseGraphFactory, - DocsGraphFactory, - ProjectEditsGraphFactory, - ProjectQaGraphFactory, -) -from app.modules.agent.repository import AgentRepository from app.modules.agent.llm import AgentLlmService -from app.modules.agent.engine.router.context_store import RouterContextStore -from app.modules.agent.engine.router.intent_classifier import IntentClassifier -from app.modules.agent.engine.router.registry import IntentRegistry -from app.modules.agent.engine.router.router_service import RouterService + +if TYPE_CHECKING: + from app.modules.agent.repository import AgentRepository + from app.modules.agent.engine.router.router_service import RouterService -def build_router_service(llm: AgentLlmService, agent_repository: AgentRepository) -> RouterService: +def build_router_service(llm: AgentLlmService, agent_repository: "AgentRepository") -> "RouterService": + from app.modules.agent.engine.graphs import ( + BaseGraphFactory, + DocsGraphFactory, + ProjectEditsGraphFactory, + ProjectQaGraphFactory, + ) + from app.modules.agent.engine.router.context_store import RouterContextStore + from app.modules.agent.engine.router.intent_classifier import IntentClassifier + from app.modules.agent.engine.router.registry import IntentRegistry + from app.modules.agent.engine.router.router_service import RouterService + registry_path = Path(__file__).resolve().parent / "intents_registry.yaml" registry = IntentRegistry(registry_path=registry_path) registry.register("default", "general", BaseGraphFactory(llm).build) @@ -31,4 +36,4 @@ def build_router_service(llm: AgentLlmService, agent_repository: AgentRepository ) -__all__ = ["build_router_service", "IntentRegistry", "RouterService"] +__all__ = ["build_router_service"] diff --git a/app/modules/agent/engine/router/__pycache__/__init__.cpython-312.pyc b/app/modules/agent/engine/router/__pycache__/__init__.cpython-312.pyc index 1284b7ed3c49d39f64b5f3cea7501bf342275f81..12f70d904921e7f2d66efc3e45ca65760ac5670a 100644 GIT binary patch literal 2038 zcmbtV&2Jk;6rc6Z?)o!vnhhkPp`k$yc7vl52`Utj(h%I1+Da27gb%E?-m$yUtZimD z6|THekoW_TIM4%!a-&LbTsU&)5-X9Mj^sndhr}&XKuA3x-t1=c(Xwxw?gW+3bAxomjkt&iT<}n&`PU3jU^lrnL z`S{bNMPuRW;=)JQ%0<{1oUfCHcY``Bgx#^L~qrk|k5c zQGGR-W2!lEUtpFjg(jS&9}ng#O*v^F@O!UB2Y6?r*#?{-X!Tq;9_1VRi`9vvyW!l@ z?xqm(lQGnZcajg|ksm+RNv%#kLmzSuy0H$5G*@3gskI?c14-yPUQ$*XyeKd1B?DgY z3wjx_2EmK`f4zXNcBtzM9BN03vf+z}5}i~h-N`)6MtN94xuG%>t&zh#d&P7~k(sUA zADUIsZ9Mxyv+5q(8C_z{RZ{g9E!#UT8QGWIG>;lU51$DecA?qu$QRxUI2N6qy#~vM zw?IwTwQDwEus0CyEt9(KdaTVvo8BF<14S3QJKQd`O^1SZCfYDsaG|(nk(#+iJ+8rB zAk3s(YlX4$*yko!h=`bD3lQ;SsN&hpMo#AO$PrIg55nP@Y8bHL1`!YQb7remaGKT{ zC2k?Oa0Sw++YM5vi=4SE1L3j(!3G7(0^@H!*N6qzxNz(gHY98=on{ke#5fF=oe_3# zoMso{WWz$eaH@I`q2Y$FQSgc|aesK+Flsg>hQT#P+$Oz4xY{xqlrzdDVO+VjW>YIS zz!G4>#2fsq1OguKS(xbugA4=PU@2+3n!Ra0z>Ee;E?fYZh>A9a)wx;KM z)5Y#|aT_0#>fI?Jotp1W6}wYUOJDBhulzi_l`r=4rEb2ojmw9gYxn30&yR;*yFa-# zbYcDaBmKhW)H_@HWe6)Zx-&GkGd#ZY(&S$nPOG~J3wS_G059j#a7W_(kokn)j}{Q>_PrU=>Dld`9NK1{sPVL8QFTr&)lQp0$F*hL}90ecdk3(NODdkdE0 zPvO1?Y*&^fsgFkcXySKt>9L&c<2fk8t~w^E_nNy1hRylqaQvjUC)T@{QTqI5^8A-t G;Ql|lpc9z@ literal 1991 zcmb_c&1)M+6ra_8$@;dvYUDN{O zH$SD*3BbnJ``@|`RRDhBi)d&g$Iu<*twMa?twRP)t@s@R@uJ@%@KdtN z966f1#g;9yi0mM68!p5Q%Y@hpY`Osv;nLyWQ7|ea-Pn^@6m3)P%InO2 zayYW1kaUE0LFB)liLj%2o8O!b3H(DW)zQdg=mWrij-`|7|0!17nIQRp8;hJfy<<}$ zGh6I6F#!|otL zbw(zLZAA5U$bTC9#HK3bT|gCwbqP&`F2wa)C7H&fKr~Swlb5FJA-%0Y@H>Nf1!>%} z8ZLsCMRg1VAKiykYulL77pH_hj0G`P_)=Z#xX8hrx|s70b7Er7CCr~8%-K`YFeePo zaxull%UtlZ;PXrbeB-NPJb%M(x2vA-bPx=xqHC(K)pT31ibYFSEz51W#Ih>w4QhzP ztlnylc!2jQO51zw=hDXVkW8``HWM*ZSG_ zwr=hzsezL3Dfutl$20x8Yd@~;&Mge)mU?qbedU{b zOM`N)SFZJy<)g@rN9a`KUneg;p52|ivUPjkxUy4xd)Ih}T`O7Go1EF3zOeVw>>s+4 z(hh*4YKQEHKuH#VD5@dSyIjgMCNPE|cP*^pMV7?MD2Wx3#7daNN|QgRfhb*ADN!?C zJ$nB1rla=?$GJ=Xv+us8%a544P=J@O9#o1R`2)6U0hhkW__WstlfeWr_}{s#MT B{3!qc diff --git a/app/modules/agent/module.py b/app/modules/agent/module.py index 03c547b..78cf353 100644 --- a/app/modules/agent/module.py +++ b/app/modules/agent/module.py @@ -5,6 +5,8 @@ from app.modules.agent.changeset_validator import ChangeSetValidator from app.modules.agent.confluence_service import ConfluenceService from app.modules.agent.llm import AgentLlmService from app.modules.agent.prompt_loader import PromptLoader +from app.modules.agent.story_context_repository import StoryContextRepository +from app.modules.agent.story_session_recorder import StorySessionRecorder from app.modules.agent.service import GraphAgentRuntime from app.modules.agent.repository import AgentRepository from app.modules.contracts import RagRetriever @@ -18,26 +20,34 @@ class ConfluenceFetchRequest(BaseModel): class AgentModule: - def __init__(self, rag_retriever: RagRetriever, agent_repository: AgentRepository) -> None: + def __init__( + self, + rag_retriever: RagRetriever, + agent_repository: AgentRepository, + story_context_repository: StoryContextRepository, + ) -> None: self.confluence = ConfluenceService() self.changeset_validator = ChangeSetValidator() + self.story_context_repository = story_context_repository settings = GigaChatSettings.from_env() token_provider = GigaChatTokenProvider(settings) client = GigaChatClient(settings, token_provider) prompt_loader = PromptLoader() llm = AgentLlmService(client=client, prompts=prompt_loader) + story_recorder = StorySessionRecorder(story_context_repository) self.runtime = GraphAgentRuntime( rag=rag_retriever, confluence=self.confluence, changeset_validator=self.changeset_validator, llm=llm, agent_repository=agent_repository, + story_recorder=story_recorder, ) def internal_router(self) -> APIRouter: - router = APIRouter(prefix="/internal/tools/confluence", tags=["internal-confluence"]) + router = APIRouter(prefix="/internal/tools", tags=["internal-tools"]) - @router.post("/fetch") + @router.post("/confluence/fetch") async def fetch_page(request: ConfluenceFetchRequest) -> dict: return await self.confluence.fetch_page(str(request.url)) diff --git a/app/modules/agent/prompts/project_edits_apply.txt b/app/modules/agent/prompts/project_edits_apply.txt deleted file mode 100644 index 09cc113..0000000 --- a/app/modules/agent/prompts/project_edits_apply.txt +++ /dev/null @@ -1,10 +0,0 @@ -Ты вносишь правку в один файл по запросу пользователя. -На вход приходит JSON с request, path, reason, current_content, previous_validation_feedback, rag_context, confluence_context. - -Верни только полное итоговое содержимое файла (без JSON). - -Критичные правила: -- Измени только те части, которые нужны по запросу. -- Не переписывай файл целиком без необходимости. -- Сохрани структуру, стиль и все нерелевантные разделы без изменений. -- Если данных недостаточно, внеси минимально безопасную правку и явно отрази ограничение в тексте файла. diff --git a/app/modules/agent/prompts/project_edits_hunks.txt b/app/modules/agent/prompts/project_edits_hunks.txt new file mode 100644 index 0000000..dad0294 --- /dev/null +++ b/app/modules/agent/prompts/project_edits_hunks.txt @@ -0,0 +1,32 @@ +Ты формируешь hunks строго по контракту правок. +На вход приходит JSON с request, contract, current_content, previous_validation_feedback, rag_context, confluence_context. + +Верни только JSON: +{ + "hunks": [ + { + "type": "append_end", + "new_text": "<текст для добавления в конец>" + } + ] +} + +Для replace_between: +{ + "type": "replace_between", + "start_anchor": "<точно как в contract>", + "end_anchor": "<точно как в contract>", + "new_text": "<новый текст между якорями>" +} + +Для replace_line_equals: +{ + "type": "replace_line_equals", + "old_line": "<точно как в contract>", + "new_text": "<новая строка/текст>" +} + +Критичные правила: +- Не выходи за рамки allowed_blocks. +- Не добавляй hunks, которых нет в контракте. +- Минимизируй изменения и не трогай нерелевантные части файла. diff --git a/app/modules/agent/prompts/project_edits_plan.txt b/app/modules/agent/prompts/project_edits_plan.txt index f0600a7..217ba53 100644 --- a/app/modules/agent/prompts/project_edits_plan.txt +++ b/app/modules/agent/prompts/project_edits_plan.txt @@ -1,15 +1,32 @@ -Ты анализируешь запрос на правки файлов проекта (не про написание нового кода). -На вход приходит JSON с request, requested_path, context_files. +Ты планируешь строго ограниченный контракт правок файла. +На вход приходит JSON с request, requested_path, context_files, contract_requirements. Верни только JSON: { "files": [ - {"path": "", "reason": ""} + { + "path": "README.md", + "reason": "коротко зачем меняем", + "intent": "update", + "max_hunks": 1, + "max_changed_lines": 8, + "allowed_blocks": [ + { + "type": "append_end", + "max_changed_lines": 8 + } + ] + } ] } -Правила: -- Выбирай только файлы, реально нужные для выполнения запроса. -- Не добавляй лишние файлы. -- Обычно 1-3 файла, максимум 8. -- Если в request указан конкретный файл, включи его в первую очередь. +Поддерживаемые block type: +- append_end: добавить текст только в конец файла. +- replace_between: заменить текст только между start_anchor и end_anchor. +- replace_line_equals: заменить только строку old_line. + +Критичные правила: +- Обязательно задавай allowed_blocks для каждого файла. +- Не добавляй файлы, которых нет в запросе. +- Точечные запросы: max_hunks=1 и маленький max_changed_lines. +- Если запрос "добавь в конец", используй append_end. diff --git a/app/modules/agent/prompts/project_edits_self_check.txt b/app/modules/agent/prompts/project_edits_self_check.txt index bb00a32..4cc5ddf 100644 --- a/app/modules/agent/prompts/project_edits_self_check.txt +++ b/app/modules/agent/prompts/project_edits_self_check.txt @@ -1,8 +1,9 @@ Ты валидируешь changeset правок файла. -На вход приходит JSON с request и changeset (op, path, reason). +На вход приходит JSON с request, contracts и changeset (op, path, reason). Проверь: 1) изменения соответствуют запросу, +1.1) изменения соответствуют контракту (разрешенные блоки и лимиты), 2) нет лишних нерелевантных правок, 3) изменены только действительно нужные файлы, 4) нет косметических правок (пробелы/форматирование без смысла), diff --git a/app/modules/agent/repository.py b/app/modules/agent/repository.py index 552d188..e9d3d46 100644 --- a/app/modules/agent/repository.py +++ b/app/modules/agent/repository.py @@ -24,6 +24,46 @@ class AgentRepository: """ ) ) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS agent_quality_metrics ( + id BIGSERIAL PRIMARY KEY, + task_id VARCHAR(64) NOT NULL, + dialog_session_id VARCHAR(64) NOT NULL, + rag_session_id VARCHAR(64) NOT NULL, + scenario VARCHAR(64) NOT NULL, + domain_id VARCHAR(64) NOT NULL, + process_id VARCHAR(64) NOT NULL, + faithfulness_score DOUBLE PRECISION NOT NULL, + coverage_score DOUBLE PRECISION NOT NULL, + faithfulness_claims_total INTEGER NOT NULL, + faithfulness_claims_supported INTEGER NOT NULL, + coverage_required_items INTEGER NOT NULL, + coverage_covered_items INTEGER NOT NULL, + quality_status VARCHAR(32) NOT NULL, + metrics_json JSONB NOT NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + ) + conn.execute( + text( + """ + CREATE INDEX IF NOT EXISTS idx_agent_quality_metrics_task + ON agent_quality_metrics(task_id, created_at DESC) + """ + ) + ) + conn.execute( + text( + """ + CREATE INDEX IF NOT EXISTS idx_agent_quality_metrics_scenario + ON agent_quality_metrics(scenario, created_at DESC) + """ + ) + ) conn.commit() def get_router_context(self, conversation_key: str) -> RouterContext: @@ -104,3 +144,103 @@ class AgentRepository: }, ) conn.commit() + + def save_quality_metrics( + self, + *, + task_id: str, + dialog_session_id: str, + rag_session_id: str, + scenario: str, + domain_id: str, + process_id: str, + quality: dict, + ) -> None: + faithfulness = quality.get("faithfulness", {}) if isinstance(quality, dict) else {} + coverage = quality.get("coverage", {}) if isinstance(quality, dict) else {} + status = str(quality.get("status", "unknown")) if isinstance(quality, dict) else "unknown" + with get_engine().connect() as conn: + conn.execute( + text( + """ + INSERT INTO agent_quality_metrics ( + task_id, + dialog_session_id, + rag_session_id, + scenario, + domain_id, + process_id, + faithfulness_score, + coverage_score, + faithfulness_claims_total, + faithfulness_claims_supported, + coverage_required_items, + coverage_covered_items, + quality_status, + metrics_json + ) VALUES ( + :task_id, + :dialog_session_id, + :rag_session_id, + :scenario, + :domain_id, + :process_id, + :faithfulness_score, + :coverage_score, + :faithfulness_claims_total, + :faithfulness_claims_supported, + :coverage_required_items, + :coverage_covered_items, + :quality_status, + CAST(:metrics_json AS JSONB) + ) + """ + ), + { + "task_id": task_id, + "dialog_session_id": dialog_session_id, + "rag_session_id": rag_session_id, + "scenario": scenario, + "domain_id": domain_id, + "process_id": process_id, + "faithfulness_score": float(faithfulness.get("score", 0.0) or 0.0), + "coverage_score": float(coverage.get("score", 0.0) or 0.0), + "faithfulness_claims_total": int(faithfulness.get("claims_total", 0) or 0), + "faithfulness_claims_supported": int(faithfulness.get("claims_supported", 0) or 0), + "coverage_required_items": int(coverage.get("required_count", 0) or 0), + "coverage_covered_items": int(coverage.get("covered_count", 0) or 0), + "quality_status": status, + "metrics_json": json.dumps(quality if isinstance(quality, dict) else {}, ensure_ascii=False), + }, + ) + conn.commit() + + def get_quality_metrics(self, *, limit: int = 50, scenario: str | None = None) -> list[dict]: + query = """ + SELECT + task_id, + dialog_session_id, + rag_session_id, + scenario, + domain_id, + process_id, + faithfulness_score, + coverage_score, + faithfulness_claims_total, + faithfulness_claims_supported, + coverage_required_items, + coverage_covered_items, + quality_status, + metrics_json, + created_at + FROM agent_quality_metrics + """ + params: dict = {"limit": max(1, int(limit))} + if scenario: + query += " WHERE scenario = :scenario" + params["scenario"] = scenario + query += " ORDER BY created_at DESC LIMIT :limit" + + with get_engine().connect() as conn: + rows = conn.execute(text(query), params).mappings().fetchall() + return [dict(row) for row in rows] diff --git a/app/modules/agent/service.py b/app/modules/agent/service.py index a1036ae..9467b56 100644 --- a/app/modules/agent/service.py +++ b/app/modules/agent/service.py @@ -1,13 +1,15 @@ from dataclasses import dataclass, field from collections.abc import Awaitable, Callable import inspect -import asyncio import logging import re +from app.modules.agent.engine.orchestrator import OrchestratorService, TaskSpecBuilder +from app.modules.agent.engine.orchestrator.metrics_persister import MetricsPersister +from app.modules.agent.engine.orchestrator.models import RoutingMeta from app.modules.agent.engine.router import build_router_service -from app.modules.agent.engine.graphs.progress_registry import progress_registry from app.modules.agent.llm import AgentLlmService +from app.modules.agent.story_session_recorder import StorySessionRecorder from app.modules.agent.changeset_validator import ChangeSetValidator from app.modules.agent.confluence_service import ConfluenceService from app.modules.agent.repository import AgentRepository @@ -21,6 +23,13 @@ from app.schemas.common import ModuleName LOGGER = logging.getLogger(__name__) +def _truncate_for_log(text: str | None, max_chars: int = 1500) -> str: + value = (text or "").replace("\n", "\\n").strip() + if len(value) <= max_chars: + return value + return value[:max_chars].rstrip() + "...[truncated]" + + @dataclass class AgentResult: result_type: TaskResultType @@ -37,11 +46,16 @@ class GraphAgentRuntime: changeset_validator: ChangeSetValidator, llm: AgentLlmService, agent_repository: AgentRepository, + story_recorder: StorySessionRecorder | None = None, ) -> None: self._rag = rag self._confluence = confluence self._changeset_validator = changeset_validator self._router = build_router_service(llm, agent_repository) + self._task_spec_builder = TaskSpecBuilder() + self._orchestrator = OrchestratorService() + self._metrics_persister = MetricsPersister(agent_repository) + self._story_recorder = story_recorder self._checkpointer = None async def run( @@ -64,45 +78,71 @@ class GraphAgentRuntime: ) await self._emit_progress(progress_cb, "agent.route", "Определяю тип запроса и подбираю граф.", meta={"mode": mode}) route = self._router.resolve(message, dialog_session_id, mode=mode) + LOGGER.warning( + "router decision: task_id=%s dialog_session_id=%s mode=%s route=%s/%s reason=%s confidence=%s fallback_used=%s", + task_id, + dialog_session_id, + mode, + route.domain_id, + route.process_id, + route.reason, + route.confidence, + route.fallback_used, + ) await self._emit_progress( progress_cb, "agent.route.resolved", "Маршрут выбран, готовлю контекст для выполнения.", meta={"domain_id": route.domain_id, "process_id": route.process_id}, ) - graph = self._resolve_graph(route.domain_id, route.process_id) files_map = self._build_files_map(files) await self._emit_progress(progress_cb, "agent.rag", "Собираю релевантный контекст из RAG.") rag_ctx = await self._rag.retrieve(rag_session_id, message) await self._emit_progress(progress_cb, "agent.attachments", "Обрабатываю дополнительные вложения.") conf_pages = await self._fetch_confluence_pages(attachments) - state = { - "task_id": task_id, - "project_id": rag_session_id, - "message": message, - "progress_key": task_id, - "rag_context": self._format_rag(rag_ctx), - "confluence_context": self._format_confluence(conf_pages), - "files_map": files_map, - } + route_meta = RoutingMeta( + domain_id=route.domain_id, + process_id=route.process_id, + confidence=route.confidence, + reason=route.reason, + fallback_used=route.fallback_used, + ) + task_spec = self._task_spec_builder.build( + task_id=task_id, + dialog_session_id=dialog_session_id, + rag_session_id=rag_session_id, + mode=mode, + message=message, + route=route_meta, + attachments=attachments, + files=files, + rag_context=self._format_rag(rag_ctx), + confluence_context=self._format_confluence(conf_pages), + files_map=files_map, + ) - await self._emit_progress(progress_cb, "agent.graph", "Запускаю выполнение графа.") - if progress_cb is not None: - progress_registry.register(task_id, progress_cb) - try: - result = await asyncio.to_thread( - self._invoke_graph, - graph, - state, - dialog_session_id, - ) - finally: - if progress_cb is not None: - progress_registry.unregister(task_id) - await self._emit_progress(progress_cb, "agent.graph.done", "Граф завершил обработку результата.") - answer = result.get("answer") - changeset = result.get("changeset") or [] + await self._emit_progress(progress_cb, "agent.orchestrator", "Строю и выполняю план оркестрации.") + orchestrator_result = await self._orchestrator.run( + task=task_spec, + graph_resolver=self._resolve_graph, + graph_invoker=self._invoke_graph, + progress_cb=progress_cb, + ) + await self._emit_progress(progress_cb, "agent.orchestrator.done", "Оркестратор завершил выполнение плана.") + answer = orchestrator_result.answer + changeset = orchestrator_result.changeset or [] + orchestrator_meta = orchestrator_result.meta or {} + quality_meta = self._extract_quality_meta(orchestrator_meta) + orchestrator_steps = [item.model_dump(mode="json") for item in orchestrator_result.steps] + self._record_session_story_artifacts( + dialog_session_id=dialog_session_id, + rag_session_id=rag_session_id, + scenario=str(orchestrator_meta.get("scenario", task_spec.scenario.value)), + attachments=[a.model_dump(mode="json") for a in task_spec.attachments], + answer=answer, + changeset=changeset, + ) if changeset: await self._emit_progress(progress_cb, "agent.changeset", "Проверяю и валидирую предложенные изменения.") changeset = self._enrich_changeset_hashes(changeset, files_map) @@ -117,6 +157,21 @@ class GraphAgentRuntime: user_message=message, assistant_message=final_answer, ) + LOGGER.warning( + "final agent answer: task_id=%s route=%s/%s answer=%s", + task_id, + route.domain_id, + route.process_id, + _truncate_for_log(final_answer), + ) + self._persist_quality_metrics( + task_id=task_id, + dialog_session_id=dialog_session_id, + rag_session_id=rag_session_id, + route=route, + scenario=str(orchestrator_meta.get("scenario", task_spec.scenario.value)), + quality=quality_meta, + ) return AgentResult( result_type=TaskResultType.ANSWER, answer=final_answer, @@ -125,6 +180,8 @@ class GraphAgentRuntime: "used_rag": True, "used_confluence": bool(conf_pages), "changeset_filtered_out": True, + "orchestrator": orchestrator_meta, + "orchestrator_steps": orchestrator_steps, }, ) validated = self._changeset_validator.validate(task_id, changeset) @@ -140,7 +197,21 @@ class GraphAgentRuntime: result_type=TaskResultType.CHANGESET, answer=final_answer, changeset=validated, - meta={"route": route.model_dump(), "used_rag": True, "used_confluence": bool(conf_pages)}, + meta={ + "route": route.model_dump(), + "used_rag": True, + "used_confluence": bool(conf_pages), + "orchestrator": orchestrator_meta, + "orchestrator_steps": orchestrator_steps, + }, + ) + self._persist_quality_metrics( + task_id=task_id, + dialog_session_id=dialog_session_id, + rag_session_id=rag_session_id, + route=route, + scenario=str(orchestrator_meta.get("scenario", task_spec.scenario.value)), + quality=quality_meta, ) LOGGER.warning( "GraphAgentRuntime.run completed: task_id=%s route=%s/%s result_type=%s changeset_items=%s", @@ -150,6 +221,13 @@ class GraphAgentRuntime: final.result_type.value, len(final.changeset), ) + LOGGER.warning( + "final agent answer: task_id=%s route=%s/%s answer=%s", + task_id, + route.domain_id, + route.process_id, + _truncate_for_log(final.answer), + ) return final final_answer = answer or "" @@ -164,7 +242,21 @@ class GraphAgentRuntime: final = AgentResult( result_type=TaskResultType.ANSWER, answer=final_answer, - meta={"route": route.model_dump(), "used_rag": True, "used_confluence": bool(conf_pages)}, + meta={ + "route": route.model_dump(), + "used_rag": True, + "used_confluence": bool(conf_pages), + "orchestrator": orchestrator_meta, + "orchestrator_steps": orchestrator_steps, + }, + ) + self._persist_quality_metrics( + task_id=task_id, + dialog_session_id=dialog_session_id, + rag_session_id=rag_session_id, + route=route, + scenario=str(orchestrator_meta.get("scenario", task_spec.scenario.value)), + quality=quality_meta, ) LOGGER.warning( "GraphAgentRuntime.run completed: task_id=%s route=%s/%s result_type=%s answer_len=%s", @@ -174,8 +266,67 @@ class GraphAgentRuntime: final.result_type.value, len(final.answer or ""), ) + LOGGER.warning( + "final agent answer: task_id=%s route=%s/%s answer=%s", + task_id, + route.domain_id, + route.process_id, + _truncate_for_log(final.answer), + ) return final + def _extract_quality_meta(self, orchestrator_meta: dict) -> dict: + if not isinstance(orchestrator_meta, dict): + return {} + quality = orchestrator_meta.get("quality") + return quality if isinstance(quality, dict) else {} + + def _persist_quality_metrics( + self, + *, + task_id: str, + dialog_session_id: str, + rag_session_id: str, + route, + scenario: str, + quality: dict, + ) -> None: + if not quality: + return + self._metrics_persister.save( + task_id=task_id, + dialog_session_id=dialog_session_id, + rag_session_id=rag_session_id, + scenario=scenario, + domain_id=str(route.domain_id), + process_id=str(route.process_id), + quality=quality, + ) + + def _record_session_story_artifacts( + self, + *, + dialog_session_id: str, + rag_session_id: str, + scenario: str, + attachments: list[dict], + answer: str | None, + changeset: list[ChangeItem], + ) -> None: + if self._story_recorder is None: + return + try: + self._story_recorder.record_run( + dialog_session_id=dialog_session_id, + rag_session_id=rag_session_id, + scenario=scenario, + attachments=attachments, + answer=answer, + changeset=changeset, + ) + except Exception: # noqa: BLE001 + LOGGER.exception("story session artifact recording failed") + async def _emit_progress( self, progress_cb: Callable[[str, str, str, dict | None], Awaitable[None] | None] | None, diff --git a/app/modules/agent/story_context_repository.py b/app/modules/agent/story_context_repository.py new file mode 100644 index 0000000..81f6dfd --- /dev/null +++ b/app/modules/agent/story_context_repository.py @@ -0,0 +1,745 @@ +from __future__ import annotations + +import json +from collections import defaultdict + +from sqlalchemy import text + +from app.modules.shared.db import get_engine + + +class StoryContextSchemaRepository: + def ensure_tables(self) -> None: + with get_engine().connect() as conn: + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS story_records ( + story_id VARCHAR(128) PRIMARY KEY, + project_id VARCHAR(512) NOT NULL, + title TEXT NOT NULL, + status VARCHAR(64) NOT NULL DEFAULT 'draft', + owner VARCHAR(256) NULL, + metadata_json JSONB NOT NULL DEFAULT '{}'::jsonb, + baseline_commit_sha VARCHAR(128) NULL, + snapshot_id VARCHAR(128) NULL, + created_by VARCHAR(256) NULL, + updated_by VARCHAR(256) NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + ) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS story_artifacts ( + id BIGSERIAL PRIMARY KEY, + story_id VARCHAR(128) NOT NULL, + artifact_type VARCHAR(64) NOT NULL, + revision INTEGER NOT NULL, + content TEXT NOT NULL, + content_hash VARCHAR(128) NULL, + rag_session_id VARCHAR(64) NULL, + created_by VARCHAR(256) NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + artifact_role VARCHAR(64) NULL, + doc_id TEXT NULL, + doc_version TEXT NULL, + path TEXT NULL, + section TEXT NULL, + chunk_id TEXT NULL, + change_type VARCHAR(32) NULL, + summary TEXT NULL, + source_ref TEXT NULL, + session_id VARCHAR(128) NULL, + CONSTRAINT fk_story_artifacts_story + FOREIGN KEY (story_id) + REFERENCES story_records(story_id) + ON DELETE CASCADE, + CONSTRAINT uq_story_artifact_revision + UNIQUE (story_id, artifact_type, revision) + ) + """ + ) + ) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS story_links ( + id BIGSERIAL PRIMARY KEY, + story_id VARCHAR(128) NOT NULL, + link_type VARCHAR(64) NOT NULL, + link_value TEXT NOT NULL, + target_ref TEXT NULL, + description TEXT NULL, + metadata_json JSONB NOT NULL DEFAULT '{}'::jsonb, + created_by VARCHAR(256) NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT fk_story_links_story + FOREIGN KEY (story_id) + REFERENCES story_records(story_id) + ON DELETE CASCADE + ) + """ + ) + ) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS session_artifacts ( + id BIGSERIAL PRIMARY KEY, + session_id VARCHAR(128) NOT NULL, + project_id VARCHAR(512) NOT NULL, + artifact_role VARCHAR(64) NOT NULL, + source_ref TEXT NULL, + doc_id TEXT NULL, + doc_version TEXT NULL, + path TEXT NULL, + section TEXT NULL, + chunk_id TEXT NULL, + change_type VARCHAR(32) NULL, + summary TEXT NULL, + created_by VARCHAR(256) NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + bound_story_id VARCHAR(128) NULL, + bound_at TIMESTAMPTZ NULL + ) + """ + ) + ) + + # Backward-compatible schema upgrades for existing installations. + conn.execute(text("ALTER TABLE story_records ADD COLUMN IF NOT EXISTS owner VARCHAR(256) NULL")) + conn.execute(text("ALTER TABLE story_records ADD COLUMN IF NOT EXISTS metadata_json JSONB NOT NULL DEFAULT '{}'::jsonb")) + conn.execute(text("ALTER TABLE story_records ADD COLUMN IF NOT EXISTS baseline_commit_sha VARCHAR(128) NULL")) + conn.execute(text("ALTER TABLE story_records ADD COLUMN IF NOT EXISTS snapshot_id VARCHAR(128) NULL")) + conn.execute(text("ALTER TABLE story_records ADD COLUMN IF NOT EXISTS created_by VARCHAR(256) NULL")) + conn.execute(text("ALTER TABLE story_records ADD COLUMN IF NOT EXISTS updated_by VARCHAR(256) NULL")) + + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS artifact_role VARCHAR(64) NULL")) + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS doc_id TEXT NULL")) + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS doc_version TEXT NULL")) + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS path TEXT NULL")) + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS section TEXT NULL")) + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS chunk_id TEXT NULL")) + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS change_type VARCHAR(32) NULL")) + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS summary TEXT NULL")) + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS source_ref TEXT NULL")) + conn.execute(text("ALTER TABLE story_artifacts ADD COLUMN IF NOT EXISTS session_id VARCHAR(128) NULL")) + + conn.execute(text("ALTER TABLE story_links ADD COLUMN IF NOT EXISTS target_ref TEXT NULL")) + conn.execute(text("ALTER TABLE story_links ADD COLUMN IF NOT EXISTS description TEXT NULL")) + conn.execute(text("ALTER TABLE story_links ADD COLUMN IF NOT EXISTS metadata_json JSONB NOT NULL DEFAULT '{}'::jsonb")) + conn.execute(text("ALTER TABLE story_links ADD COLUMN IF NOT EXISTS created_by VARCHAR(256) NULL")) + + conn.execute(text("ALTER TABLE session_artifacts ADD COLUMN IF NOT EXISTS bound_story_id VARCHAR(128) NULL")) + conn.execute(text("ALTER TABLE session_artifacts ADD COLUMN IF NOT EXISTS bound_at TIMESTAMPTZ NULL")) + + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_story_records_project ON story_records(project_id, updated_at DESC)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_story_artifacts_story_type ON story_artifacts(story_id, artifact_type, revision DESC)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_story_links_story ON story_links(story_id, link_type)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_session_artifacts_session ON session_artifacts(session_id, created_at DESC)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_session_artifacts_project ON session_artifacts(project_id, created_at DESC)")) + conn.commit() + + +class StoryContextRepository: + def record_story_commit( + self, + *, + story_id: str, + project_id: str, + title: str, + commit_sha: str | None, + branch: str | None, + changed_files: list[str], + summary: str, + actor: str | None, + ) -> None: + with get_engine().connect() as conn: + self._upsert_story_conn( + conn, + story_id=story_id, + project_id=project_id, + title=title, + status="in_progress", + metadata={"source": "repo_webhook"}, + baseline_commit_sha=commit_sha, + updated_by=actor, + ) + if commit_sha: + self._insert_story_link( + conn, + story_id=story_id, + link_type="commit", + target_ref=commit_sha, + description="Webhook commit", + metadata={"project_id": project_id}, + created_by=actor, + ) + if branch: + self._insert_story_link( + conn, + story_id=story_id, + link_type="external", + target_ref=branch, + description="Webhook branch", + metadata={"kind": "branch"}, + created_by=actor, + ) + for path in changed_files: + self._insert_story_link( + conn, + story_id=story_id, + link_type="doc", + target_ref=path, + description="Changed file in commit", + metadata={"commit_sha": commit_sha}, + created_by=actor, + ) + revision = self._next_revision(conn, story_id=story_id, artifact_type="commit") + conn.execute( + text( + """ + INSERT INTO story_artifacts ( + story_id, + artifact_type, + revision, + content, + content_hash, + rag_session_id, + created_by, + artifact_role, + change_type, + summary, + source_ref + ) VALUES ( + :story_id, + :artifact_type, + :revision, + :content, + NULL, + NULL, + :created_by, + :artifact_role, + :change_type, + :summary, + :source_ref + ) + """ + ), + { + "story_id": story_id, + "artifact_type": "commit", + "revision": revision, + "content": summary[:4000] or "commit", + "created_by": actor, + "artifact_role": "doc_change", + "change_type": "linked", + "summary": summary[:4000] or "commit", + "source_ref": commit_sha or branch or "webhook", + }, + ) + conn.commit() + + def upsert_story( + self, + *, + story_id: str, + project_id: str, + title: str, + status: str = "draft", + owner: str | None = None, + metadata: dict | None = None, + baseline_commit_sha: str | None = None, + snapshot_id: str | None = None, + created_by: str | None = None, + updated_by: str | None = None, + ) -> None: + with get_engine().connect() as conn: + conn.execute( + text( + """ + INSERT INTO story_records ( + story_id, + project_id, + title, + status, + owner, + metadata_json, + baseline_commit_sha, + snapshot_id, + created_by, + updated_by + ) + VALUES ( + :story_id, + :project_id, + :title, + :status, + :owner, + CAST(:metadata_json AS JSONB), + :baseline_commit_sha, + :snapshot_id, + :created_by, + :updated_by + ) + ON CONFLICT (story_id) DO UPDATE SET + project_id = EXCLUDED.project_id, + title = EXCLUDED.title, + status = EXCLUDED.status, + owner = EXCLUDED.owner, + metadata_json = EXCLUDED.metadata_json, + baseline_commit_sha = COALESCE(EXCLUDED.baseline_commit_sha, story_records.baseline_commit_sha), + snapshot_id = COALESCE(EXCLUDED.snapshot_id, story_records.snapshot_id), + updated_by = COALESCE(EXCLUDED.updated_by, story_records.updated_by), + updated_at = CURRENT_TIMESTAMP + """ + ), + { + "story_id": story_id, + "project_id": project_id, + "title": title, + "status": status, + "owner": owner, + "metadata_json": json.dumps(metadata or {}, ensure_ascii=False), + "baseline_commit_sha": baseline_commit_sha, + "snapshot_id": snapshot_id, + "created_by": created_by, + "updated_by": updated_by, + }, + ) + conn.commit() + + def add_session_artifact( + self, + *, + session_id: str, + project_id: str, + artifact_role: str, + summary: str, + change_type: str | None = None, + source_ref: str | None = None, + doc_id: str | None = None, + doc_version: str | None = None, + path: str | None = None, + section: str | None = None, + chunk_id: str | None = None, + created_by: str | None = None, + ) -> None: + with get_engine().connect() as conn: + conn.execute( + text( + """ + INSERT INTO session_artifacts ( + session_id, + project_id, + artifact_role, + source_ref, + doc_id, + doc_version, + path, + section, + chunk_id, + change_type, + summary, + created_by + ) VALUES ( + :session_id, + :project_id, + :artifact_role, + :source_ref, + :doc_id, + :doc_version, + :path, + :section, + :chunk_id, + :change_type, + :summary, + :created_by + ) + """ + ), + { + "session_id": session_id, + "project_id": project_id, + "artifact_role": artifact_role, + "source_ref": source_ref, + "doc_id": doc_id, + "doc_version": doc_version, + "path": path, + "section": section, + "chunk_id": chunk_id, + "change_type": change_type, + "summary": summary, + "created_by": created_by, + }, + ) + conn.commit() + + def bind_session_to_story( + self, + *, + session_id: str, + story_id: str, + project_id: str, + title: str, + commit_sha: str | None = None, + branch: str | None = None, + changed_files: list[str] | None = None, + actor: str | None = None, + ) -> dict: + with get_engine().connect() as conn: + self._upsert_story_conn( + conn, + story_id=story_id, + project_id=project_id, + title=title, + status="in_progress", + metadata={"bound_session_id": session_id}, + baseline_commit_sha=commit_sha, + updated_by=actor, + ) + + if commit_sha: + self._insert_story_link( + conn, + story_id=story_id, + link_type="commit", + target_ref=commit_sha, + description=f"Bound from session {session_id}", + metadata={"session_id": session_id}, + created_by=actor, + ) + if branch: + self._insert_story_link( + conn, + story_id=story_id, + link_type="external", + target_ref=branch, + description="Source branch", + metadata={"kind": "branch"}, + created_by=actor, + ) + for path in changed_files or []: + self._insert_story_link( + conn, + story_id=story_id, + link_type="doc", + target_ref=path, + description="Changed file from commit", + metadata={"session_id": session_id, "commit_sha": commit_sha}, + created_by=actor, + ) + + rows = conn.execute( + text( + """ + SELECT id, artifact_role, source_ref, doc_id, doc_version, path, section, chunk_id, change_type, summary, created_by, project_id + FROM session_artifacts + WHERE session_id = :session_id AND bound_story_id IS NULL + ORDER BY id ASC + """ + ), + {"session_id": session_id}, + ).mappings().fetchall() + + revisions = self._load_revisions(conn, story_id) + migrated = 0 + for row in rows: + role = str(row["artifact_role"]) + revisions[role] += 1 + summary = str(row["summary"] or "") + content = summary or "n/a" + conn.execute( + text( + """ + INSERT INTO story_artifacts ( + story_id, + artifact_type, + revision, + content, + content_hash, + rag_session_id, + created_by, + artifact_role, + doc_id, + doc_version, + path, + section, + chunk_id, + change_type, + summary, + source_ref, + session_id + ) VALUES ( + :story_id, + :artifact_type, + :revision, + :content, + NULL, + :rag_session_id, + :created_by, + :artifact_role, + :doc_id, + :doc_version, + :path, + :section, + :chunk_id, + :change_type, + :summary, + :source_ref, + :session_id + ) + """ + ), + { + "story_id": story_id, + "artifact_type": role, + "revision": revisions[role], + "content": content, + "rag_session_id": str(row["project_id"]), + "created_by": actor or row["created_by"], + "artifact_role": role, + "doc_id": row["doc_id"], + "doc_version": row["doc_version"], + "path": row["path"], + "section": row["section"], + "chunk_id": row["chunk_id"], + "change_type": row["change_type"], + "summary": summary, + "source_ref": row["source_ref"], + "session_id": session_id, + }, + ) + conn.execute( + text( + """ + UPDATE session_artifacts + SET bound_story_id = :story_id, bound_at = CURRENT_TIMESTAMP + WHERE id = :id + """ + ), + {"story_id": story_id, "id": int(row["id"])}, + ) + migrated += 1 + + conn.commit() + return {"story_id": story_id, "session_id": session_id, "migrated_artifacts": migrated} + + def add_artifact( + self, + *, + story_id: str, + artifact_type: str, + revision: int, + content: str, + content_hash: str | None = None, + rag_session_id: str | None = None, + created_by: str | None = None, + ) -> None: + with get_engine().connect() as conn: + conn.execute( + text( + """ + INSERT INTO story_artifacts ( + story_id, + artifact_type, + revision, + content, + content_hash, + rag_session_id, + created_by, + artifact_role, + summary + ) VALUES ( + :story_id, + :artifact_type, + :revision, + :content, + :content_hash, + :rag_session_id, + :created_by, + :artifact_role, + :summary + ) + ON CONFLICT (story_id, artifact_type, revision) DO UPDATE SET + content = EXCLUDED.content, + content_hash = EXCLUDED.content_hash, + rag_session_id = EXCLUDED.rag_session_id, + created_by = EXCLUDED.created_by, + artifact_role = EXCLUDED.artifact_role, + summary = EXCLUDED.summary + """ + ), + { + "story_id": story_id, + "artifact_type": artifact_type, + "revision": int(revision), + "content": content, + "content_hash": content_hash, + "rag_session_id": rag_session_id, + "created_by": created_by, + "artifact_role": artifact_type, + "summary": content, + }, + ) + conn.commit() + + def get_story_context(self, story_id: str) -> dict | None: + with get_engine().connect() as conn: + story = conn.execute( + text( + """ + SELECT story_id, project_id, title, status, owner, metadata_json, baseline_commit_sha, snapshot_id, created_by, updated_by, created_at, updated_at + FROM story_records + WHERE story_id = :story_id + """ + ), + {"story_id": story_id}, + ).mappings().fetchone() + if not story: + return None + + artifacts = conn.execute( + text( + """ + SELECT artifact_type, artifact_role, revision, content, summary, path, section, chunk_id, change_type, doc_id, doc_version, + source_ref, session_id, content_hash, rag_session_id, created_by, created_at + FROM story_artifacts + WHERE story_id = :story_id + ORDER BY artifact_type ASC, revision DESC + """ + ), + {"story_id": story_id}, + ).mappings().fetchall() + + links = conn.execute( + text( + """ + SELECT link_type, + COALESCE(target_ref, link_value) AS target_ref, + description, + metadata_json, + created_by, + created_at + FROM story_links + WHERE story_id = :story_id + ORDER BY id ASC + """ + ), + {"story_id": story_id}, + ).mappings().fetchall() + + return { + "story": dict(story), + "artifacts": [dict(item) for item in artifacts], + "links": [dict(item) for item in links], + } + + def _upsert_story_conn( + self, + conn, + *, + story_id: str, + project_id: str, + title: str, + status: str, + metadata: dict, + baseline_commit_sha: str | None, + updated_by: str | None, + ) -> None: + conn.execute( + text( + """ + INSERT INTO story_records (story_id, project_id, title, status, metadata_json, baseline_commit_sha, updated_by) + VALUES (:story_id, :project_id, :title, :status, CAST(:metadata_json AS JSONB), :baseline_commit_sha, :updated_by) + ON CONFLICT (story_id) DO UPDATE SET + project_id = EXCLUDED.project_id, + title = EXCLUDED.title, + status = EXCLUDED.status, + metadata_json = EXCLUDED.metadata_json, + baseline_commit_sha = COALESCE(EXCLUDED.baseline_commit_sha, story_records.baseline_commit_sha), + updated_by = COALESCE(EXCLUDED.updated_by, story_records.updated_by), + updated_at = CURRENT_TIMESTAMP + """ + ), + { + "story_id": story_id, + "project_id": project_id, + "title": title, + "status": status, + "metadata_json": json.dumps(metadata, ensure_ascii=False), + "baseline_commit_sha": baseline_commit_sha, + "updated_by": updated_by, + }, + ) + + def _insert_story_link( + self, + conn, + *, + story_id: str, + link_type: str, + target_ref: str, + description: str, + metadata: dict, + created_by: str | None, + ) -> None: + conn.execute( + text( + """ + INSERT INTO story_links (story_id, link_type, link_value, target_ref, description, metadata_json, created_by) + VALUES ( + :story_id, + :link_type, + :link_value, + :target_ref, + :description, + CAST(:metadata_json AS JSONB), + :created_by + ) + """ + ), + { + "story_id": story_id, + "link_type": link_type, + "link_value": target_ref, + "target_ref": target_ref, + "description": description, + "metadata_json": json.dumps(metadata, ensure_ascii=False), + "created_by": created_by, + }, + ) + + def _load_revisions(self, conn, story_id: str) -> defaultdict[str, int]: + rows = conn.execute( + text( + """ + SELECT artifact_type, COALESCE(MAX(revision), 0) AS max_revision + FROM story_artifacts + WHERE story_id = :story_id + GROUP BY artifact_type + """ + ), + {"story_id": story_id}, + ).mappings().fetchall() + revision_map: defaultdict[str, int] = defaultdict(int) + for row in rows: + revision_map[str(row["artifact_type"])] = int(row["max_revision"]) + return revision_map + + def _next_revision(self, conn, *, story_id: str, artifact_type: str) -> int: + row = conn.execute( + text( + """ + SELECT COALESCE(MAX(revision), 0) AS max_revision + FROM story_artifacts + WHERE story_id = :story_id AND artifact_type = :artifact_type + """ + ), + {"story_id": story_id, "artifact_type": artifact_type}, + ).mappings().fetchone() + return int(row["max_revision"]) + 1 if row else 1 diff --git a/app/modules/agent/story_session_recorder.py b/app/modules/agent/story_session_recorder.py new file mode 100644 index 0000000..4171e60 --- /dev/null +++ b/app/modules/agent/story_session_recorder.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +from typing import Protocol + +from app.schemas.changeset import ChangeItem, ChangeOp + + +class SessionArtifactWriter(Protocol): + def add_session_artifact(self, **kwargs) -> None: ... + + +class StorySessionRecorder: + def __init__(self, repository: SessionArtifactWriter) -> None: + self._repo = repository + + def record_run( + self, + *, + dialog_session_id: str, + rag_session_id: str, + scenario: str, + attachments: list[dict], + answer: str | None, + changeset: list[ChangeItem], + actor: str | None = None, + ) -> None: + self._record_input_sources( + session_id=dialog_session_id, + project_id=rag_session_id, + attachments=attachments, + actor=actor, + ) + self._record_outputs( + session_id=dialog_session_id, + project_id=rag_session_id, + scenario=scenario, + answer=answer, + changeset=changeset, + actor=actor, + ) + + def _record_input_sources(self, *, session_id: str, project_id: str, attachments: list[dict], actor: str | None) -> None: + for item in attachments: + value = str(item.get("value") or "").strip() + if not value: + continue + if item.get("type") not in {"confluence_url", "http_url"}: + continue + self._repo.add_session_artifact( + session_id=session_id, + project_id=project_id, + artifact_role="analysis", + source_ref=value, + summary="Input analytics document", + change_type="linked", + created_by=actor, + ) + + def _record_outputs( + self, + *, + session_id: str, + project_id: str, + scenario: str, + answer: str | None, + changeset: list[ChangeItem], + actor: str | None, + ) -> None: + role = self._role_for_scenario(scenario) + if answer and answer.strip(): + self._repo.add_session_artifact( + session_id=session_id, + project_id=project_id, + artifact_role=role, + summary=answer.strip()[:4000], + created_by=actor, + ) + + for item in changeset: + self._repo.add_session_artifact( + session_id=session_id, + project_id=project_id, + artifact_role=role, + path=item.path, + summary=item.reason, + change_type=self._change_type(item.op), + created_by=actor, + ) + + def _role_for_scenario(self, scenario: str) -> str: + mapping = { + "docs_from_analytics": "doc_change", + "targeted_edit": "doc_change", + "gherkin_model": "test_model", + "analytics_review": "analysis", + "explain_part": "note", + "general_qa": "note", + } + return mapping.get(scenario, "note") + + def _change_type(self, op: ChangeOp) -> str: + if op == ChangeOp.CREATE: + return "added" + if op == ChangeOp.DELETE: + return "removed" + return "updated" diff --git a/app/modules/application.py b/app/modules/application.py index f82f165..8f4cbba 100644 --- a/app/modules/application.py +++ b/app/modules/application.py @@ -1,9 +1,11 @@ from app.modules.agent.module import AgentModule from app.modules.agent.repository import AgentRepository +from app.modules.agent.story_context_repository import StoryContextRepository, StoryContextSchemaRepository from app.modules.chat.repository import ChatRepository from app.modules.chat.module import ChatModule -from app.modules.rag.repository import RagRepository -from app.modules.rag.module import RagModule +from app.modules.rag_session.repository import RagRepository +from app.modules.rag_session.module import RagModule +from app.modules.rag_repo.module import RagRepoModule from app.modules.shared.bootstrap import bootstrap_database from app.modules.shared.event_bus import EventBus from app.modules.shared.retry_executor import RetryExecutor @@ -16,16 +18,31 @@ class ModularApplication: self.rag_repository = RagRepository() self.chat_repository = ChatRepository() self.agent_repository = AgentRepository() + self.story_context_schema_repository = StoryContextSchemaRepository() + self.story_context_repository = StoryContextRepository() - self.rag = RagModule(event_bus=self.events, retry=self.retry, repository=self.rag_repository) - self.agent = AgentModule(rag_retriever=self.rag.rag, agent_repository=self.agent_repository) + self.rag_session = RagModule(event_bus=self.events, retry=self.retry, repository=self.rag_repository) + self.rag_repo = RagRepoModule( + story_context_repository=self.story_context_repository, + rag_repository=self.rag_repository, + ) + self.agent = AgentModule( + rag_retriever=self.rag_session.rag, + agent_repository=self.agent_repository, + story_context_repository=self.story_context_repository, + ) self.chat = ChatModule( agent_runner=self.agent.runtime, event_bus=self.events, retry=self.retry, - rag_sessions=self.rag.sessions, + rag_sessions=self.rag_session.sessions, repository=self.chat_repository, ) def startup(self) -> None: - bootstrap_database(self.rag_repository, self.chat_repository, self.agent_repository) + bootstrap_database( + self.rag_repository, + self.chat_repository, + self.agent_repository, + self.story_context_schema_repository, + ) diff --git a/app/modules/chat/README.md b/app/modules/chat/README.md new file mode 100644 index 0000000..c6edc18 --- /dev/null +++ b/app/modules/chat/README.md @@ -0,0 +1,98 @@ +# Модуль chat + +## 1. Функции модуля +- Внешний API чата: создание диалога, отправка сообщения, получение статуса задачи. +- Асинхронная оркестрация выполнения через `ChatOrchestrator`. +- Idempotency и стриминг событий по SSE. + +## 2. Диаграмма классов и взаимосвязей +```mermaid +classDiagram + class ChatModule + class ChatOrchestrator + class TaskStore + class DialogSessionStore + class IdempotencyStore + class EventBus + class AgentRunner + + ChatModule --> ChatOrchestrator + ChatModule --> TaskStore + ChatModule --> DialogSessionStore + ChatModule --> IdempotencyStore + ChatModule --> EventBus + ChatOrchestrator --> AgentRunner + ChatOrchestrator --> TaskStore + ChatOrchestrator --> DialogSessionStore + ChatOrchestrator --> EventBus +``` + +## 3. Описание классов +- `ChatModule`: фасад модуля и регистрация публичных chat endpoint'ов. + Методы: `__init__` — собирает stores/orchestrator; `public_router` — публикует REST и SSE маршруты чата. +- `ChatOrchestrator`: выполняет жизненный цикл user-message как фоновой задачи. + Методы: `enqueue_message` — создает задачу и запускает обработку; `_process_task` — исполняет runtime и сохраняет результат; `_resolve_sessions` — валидирует и сопоставляет dialog/rag сессии. +- `TaskStore`: in-memory store состояний задач. + Методы: `create` — создает новую `TaskState`; `get` — возвращает задачу по `task_id`; `save` — обновляет состояние задачи. +- `DialogSessionStore`: хранилище dialog-сессий поверх БД. + Методы: `create` — создает новую dialog-сессию; `get` — читает dialog-сессию по id. +- `IdempotencyStore`: предотвращает дубль задач по идемпотентному ключу. + Методы: `get_task_id` — возвращает существующий `task_id` по ключу; `put` — сохраняет ключ и `task_id`. +- `EventBus`: асинхронная публикация/подписка событий. + Методы: `subscribe` — создает подписку на канал; `unsubscribe` — снимает подписку; `publish` — отправляет событие подписчикам; `as_sse` — сериализует событие в SSE формат. +- `AgentRunner` (контракт): интерфейс выполнения агентного запроса из chat-слоя. + Методы: `run` — принимает данные задачи и возвращает итог `answer`/`changeset`. + +## 4. Сиквенс-диаграммы API + +### POST /api/chat/dialogs +Назначение: создает новый диалог, связанный с существующей `rag_session`, чтобы пользователь мог отправлять сообщения в контексте конкретного индекса. +```mermaid +sequenceDiagram + participant Router as ChatModule.APIRouter + participant RagSessions as RagSessionStore + participant Dialogs as DialogSessionStore + + Router->>RagSessions: get(rag_session_id) + RagSessions-->>Router: exists + Router->>Dialogs: create(rag_session_id) + Dialogs-->>Router: dialog_session +``` + +### POST /api/chat/messages +Назначение: ставит сообщение пользователя в асинхронную обработку и возвращает `task_id` для отслеживания результата. +```mermaid +sequenceDiagram + participant Router as ChatModule.APIRouter + participant Orchestrator as ChatOrchestrator + participant TaskStore as TaskStore + + Router->>Orchestrator: enqueue_message(request, idempotency_key) + Orchestrator->>TaskStore: create()/save() + Orchestrator-->>Router: task_id,status +``` + +### GET /api/tasks/{task_id} +Назначение: отдает текущее состояние задачи и финальный результат (answer/changeset/error), когда обработка завершена. +```mermaid +sequenceDiagram + participant Router as ChatModule.APIRouter + participant TaskStore as TaskStore + + Router->>TaskStore: get(task_id) + TaskStore-->>Router: task_state +``` + +### GET /api/events?task_id=... +Назначение: открывает SSE-поток с прогрессом выполнения задачи и промежуточными событиями. +```mermaid +sequenceDiagram + participant Router as ChatModule.APIRouter + participant Events as EventBus + + Router->>Events: subscribe(task_id) + loop until disconnect + Events-->>Router: SSE event + end + Router->>Events: unsubscribe(task_id) +``` diff --git a/app/modules/chat/__pycache__/module.cpython-312.pyc b/app/modules/chat/__pycache__/module.cpython-312.pyc index f8e125af01d8e61dc1a4da537b63209f6ee5bc88..ffc426a83a8882b961830d4aafa33f267ed6678d 100644 GIT binary patch delta 58 zcmexp^wWslCc41jqV0kVZyx6?VBaRJDFh&XMbaCR^I9ac`7qe?v@a!da zCBJ5U^+b;2vkn@}n*e$24RkPx_IbP9a zNp6;3B^i2_oG;}#%FpscAeFdTQpzO^Xm>WNEQvEf4RJwTSmnMT<{AEUa>m}&)HJNn z>EuXEk)>yXqH3V>)Oc(}R&^}6<8jeo|dQoDsJ)8QU#;(Fv{wZDXg66{L)P!?>S>*w2lBFY5;J2&N?0>_EH- zzwJ-cw@nZ8R;41>lc-`ZI9wL<5?Qh=*_Zf4HG5D{Z&&Ja3LvV71uXrtr~yS*rEy1# zU9kE1RU`XeQI*ajStT2*w3SddYR!IHzCY5^d~Ofs1soDzl6I#oDHGOGQ#K zL=+&MAgiz|59i#@8WIO^GLBgxn$HRd4}xBit#X%Ik0$ZwB?1`!$>y?PwzZZx%vS6k z-JV$itS>lSIzL3x#~zqm28-m;qz_E)!kZ{Z`&;^E)cV{&~ zy9c9XpV$q%20j9d79>4v*ilCYa6!vTZ}w)**eW_B5= z$qstK>CuL+FH91t_P=HqAQQM**uS0C1>#j*2e&L-{aC|mEL7NB`9v-Z1y8irlqd64 zgv}L}vqG1Py;f)ooyf^&O-kM_SJG%mIl0rHI0bod38utp{1Ov8R8$nG%T035ob>Hz zOZ8GfIg_(ZoMpX5F6S=YSu<>>7IwjP!q6ZErH5`dvOBH;*c#@9wm!C930@W0x(g1| zSF&XYEfG-91Z7S<3riIc%n%%=X3bqeM%mxo-rPjJ`H4tP(%xO6js5@7ObyaL3~g0p z&Bdkck42%K00jt;1&Zq|J5im-jb&)gMs=aMJ-gAjjhK*Asn|JGN&nj8BJ+6eD6T0TL^uWz6zM_us3mZhL`Np+>sVkn;4V_g{o`vyHMu(NP>hOr$20HweWt6Es31BEEng89K=3eI@fxq1;THGzoy} zjAmnsqHhXQr4ON8fSG7sw8~GPMVT2hH6E3wC#Gl=1VN)pVk#M>r-7m9nm&V|-3~j` zFoH~*#KM9A6Jg%+R-&W_%a@4f>(Di0hUH0~lT)(NO;4fyF#Dv!53{mY`pUIrA3^CD zfU1urQx|1Qv7pmqtiSRlk-Fh#+N^wsgwsFr_md+XG=QBQ2$(_D5P2#b9tihSbfb!? z*mIfXphKuqif{%&TR~mlbFcyEA=X@7-#|N2-i4s$ummYgg*JWdg4gx{J5gORum|Na z09AbKSh$bsk;kCvI08nj>U+aOC;B2<{#DbXM|;EleM5c6DZWxv16;eX2Pu2E`iLco zHj@ZbtfJ--M?RgI>>g(4YieN^EZ2C75~!su!EvNA>%e|fQ=%_JzK5wbHHHBoQ!RQX z*8Yy~<=LIuqeCBA3ttK^h41s6^9aA?@N7A%wj32eT(hoNHyzdMj_QvbuI2tscVOKe z*l;vx+_k}#!42%Gx*quP-dFZ+I9fh)nr!y_ZqDUNm)2e9J;y}Olq8>nW77ct`F$&A z8-h(v>7qBfjN-%mB=y49U{rTK1hJGr?_C%c(63>QhV>Sd zv}kl&>9AASupe$X(IvdUpFGv2f4`$0=nwXgr}yeV2=(fL*=*Ghd$`T6-VRXyaNj&R z*{c7rxg8j_o}3Em)j&Pa_iO@a-E$D6of_@NIPQ6hhuegEAu_zrc&|y1bejNjIt}Xy z4^muKny(+u;MzR+_C#2;@A8~vQ&G0!+ClChM%AS)?V~NU~iJfharwHoE&hFc( zdtV-b;x0^Ojyl#Xzh5g^^bG3>1;|!L+TqJb7S!zu3jaf>#%!HVsk(Slk}sw3f5NUvo@XyN*OKk@ zZ=2uN1-0X%6{`mHtrZJ=RZtVcw-7Qzv7dGL_Jn{}g-CBdWL!%HRyb8J#bfZ!*Zu?K z=Q{)Q^)N@LgEx|Nb>~q+%Gvp@qtD`Vl43-37Quq>C_)dyw-M$L&LChW=p}^b0aTqb zJvA=V8K7@)v>fHy$;L+_MJQp;?iv3eud8~s^*)E+wXVmr#FNLjHOc)1TkZa?TeC)+ zAc_AJc15s~9!y6b4wK#+=uMln7NHiBFE;{x4XOOlUl|r^5(nB!uL=FgW=L4o7jQEDClR6YBC3Ro0h9pP1E3T@8Tq?qKY7Ym zk$sXM6_H_Sn18~heiffqj3e7lV95FtcK-MY<58=$<4e>`Mr9B3rC*i1c~a%}SR9J1UB2+^`#ycXa!2~l^JCnOq8Jh$uhCZ1>`ORkFjv;m%7*hd5I zc$Uj?>#|b1EYQSs+sw96XZbpr!BUV5G zPNEJ^{@)Ju9`nd$tKy1t8$yP@j5Ho`aZahaE^P?ptpYdsd7j5ot@xB0xZ~{W%9$4iUQ%<^)BwG!nM~K z43l?DTC4jrnjEJe&0;TRv0?I6NhLaNc3@clt|?l!>seEC3g9OFWKNzK0W-m#7=t}# zPmG(g+`j2Ii*?13^yaLUS|y-Qn0kqqOf0wWwp?~B3(+R>m$Ea~pi-xVHtWgp(qk13 z%Z|izE|I)nT3h?=7BOTt zYBfEkFK8hiHxu1+Grd_}gjSlV6ATxQ2LKFG5E5}a-3&KgNJghpIJH1WuNu`aU^S*_ zv1t`of!I2~aDFzfT?kos*+Z`}Cx$qiO3h=Ko*2&1=2AalWpaQP(DFEy% zluN@#c|IRaVs%xZi4ePQKblJS_|{Qbgo>6NQx{^%8C6sDgLnk&N68m8KAP3PYrK`L z*oS}%tzt;Aq_(JH90krG>Dv1fWRQ>deu9psf3a@}z1W7!K?0KkL-`Er*y%{*SY!x; z4@1;qmoj&OkAVuy4c=Df`YeN&)TE9(h`%Qa zI>US(JsBCq63`GLJ`VumGo-%AsWXFPtlEZsV7MWH@|Y2eqYx6rL+0yMgbaW5O27=fH$I% zuy~`XhvGLI+@msgOF$#7;w`ZS@OCuPF5c?s^Hbus2aR-zx68eNccPJl;_a@ZR!V%_ z>lx`Ox>MHAQR34w&nRE=X$xp=<)Kllxa9NU*or{f&R`dV2N~>gL6e+xzc)bf2Y!KSJqRMe>loeuaXbi@o|Oed zL}#rw!y*AL3zUy%Y579}J`8u3oejI@#Ww|0=C~EO^v!Suat(nU4`2$wVe+VXassAL zrsH#s25*Fph+Ip-y=O++H;-VAq-9=lreiYQGC_Xb5=0-SKWJGLJaKAl*e|Ftte;n7 z`Zyh#nc&Ik_9NYFINBCtaXmVnqE84gyaea5$9Ea9Y-AzJuAqKQw%e;En0|PkeA(_t z57XrxcPt^+*KFEUfQ&tOcske)Uv6EdVqCG647`!<_(r1V9InohCR1>wlKPZ;O%1hMu0%E_$3 zCgjEK>EHK8P}@7;hGokf{szeGA^)@;qGNkuR_(>Vr2_gyIc str: + value = (text or "").replace("\n", "\\n").strip() + if len(value) <= max_chars: + return value + return value[:max_chars].rstrip() + "...[truncated]" + + class ChatOrchestrator: def __init__( self, @@ -78,6 +85,16 @@ class ChatOrchestrator: try: await self._publish_progress(task_id, "task.sessions", "Проверяю сессии диалога и проекта.", progress=10) dialog_session_id, rag_session_id = self._resolve_sessions(request) + LOGGER.warning( + "incoming chat request: task_id=%s dialog_session_id=%s rag_session_id=%s mode=%s attachments=%s files=%s message=%s", + task_id, + dialog_session_id, + rag_session_id, + request.mode.value, + len(request.attachments), + len(request.files), + _truncate_for_log(request.message), + ) await self._publish_progress(task_id, "task.sessions.done", "Сессии проверены, запускаю агента.", progress=15) loop = asyncio.get_running_loop() diff --git a/app/modules/contracts.py b/app/modules/contracts.py index 402c14d..d9898e1 100644 --- a/app/modules/contracts.py +++ b/app/modules/contracts.py @@ -37,11 +37,11 @@ class RagIndexer(Protocol): rag_session_id: str, files: list[dict], progress_cb: Callable[[int, int, str], Awaitable[None] | None] | None = None, - ) -> tuple[int, int]: ... + ) -> tuple[int, int, int, int]: ... async def index_changes( self, rag_session_id: str, changed_files: list[dict], progress_cb: Callable[[int, int, str], Awaitable[None] | None] | None = None, - ) -> tuple[int, int]: ... + ) -> tuple[int, int, int, int]: ... diff --git a/app/modules/rag/__pycache__/job_store.cpython-312.pyc b/app/modules/rag/__pycache__/job_store.cpython-312.pyc deleted file mode 100644 index e196f1e21727337c63c103b43ba5bdaafb979ca4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3589 zcmai1O>7&-6`uVexm;4DMOvaLi!v3(QP#Go)KwcdaNPusZL~-PM~aJ>2Q3)x+9kms zb#`T&0uAWk0!-1sl2L#TP@rm4AP(e%(Mx-DkV6ke53~$$yRgv!MGw6Zk%|;P^}SiH zWG*)71Mz0wn>RoEe!dz0b!bQ?K(GD!H>JB0A^$|dr9i*4dmTD$q7jX&kSdqwIPCKk zzAEGe))y*bHINUmzE}xXL-|lu%1cZRRKis`FLOlacyf_w!P`U&ZS&6B+x;!_5jG~l zSlAuQM;o#d`8Qf9oE_E*W}#Rq7zPM|)zy+V51paK!nH+xrEZkWI&CPt9X(HJon9(5 zD)oY9%kS2;)r!7QsOn%G`*BUvZ~mnI;gVS}SB+wSZ`KHXVb_FCo9HCZX(Z2ICc2=D z8g~Y?tjDunpd9VnztH4EdH`$%w9v%7q=!KY^d*@|K_-Rr5k0Df)*Nj|hfI?YOEIYPCA-0J|BgN+BzV9enTd`OD|u zrb8gm7;1!4&7^S@4yG;eGb3FfB@;al?KIpV=N3E<1Jvn zAYUa$cEl0uaB4?PS(y{v%&AW1)cS?i!j5>#N{_Fn9b60XfTweTFF$^wb6m?bFTg{&Oq<36KmvX zOZ-@N9E3SLa2yO8JI(@f03v z@?{1&gKBUE?^jf1Og0Z~_fN->Cci`!enH`B1Z*gNpdvXLhEAX&vMexLE>gW<>R3{a zEoQPExL&BN>WV<8&=9BF0$3QhLDuzm(3!e+c+lI9HU*Z(kASU{r?JsLoO=-14t@I0 zqsb?+OD(~YqIcwvYAGLhinKsq^NgC(?UXJkjb$2)|+= zLRfq(LSKQ`Az?@v6kaOtN9$mu$+>ZY*zg9|0LZ>2fRE879-<$Z`>`wIjvl%t`p1lp zhKUI!;*h`1{wYKg!Ge+hDL{)z;-Q( z!6u4ei^aO8+oGl$MLX!=s1iKL@B=Oi+0q} zI&0eUPci%1g~{;SorD`ZMYLa4>=y8BtOT60K_gqB^&56*#kqIsNl+Px98g4V83}N- zwc)`U0u_4N_!}?)t61Vr`J-|-mhHr{TN696TubB!Oc z^5$~u0@O`6e)MttXpa|$Pd?4e+!wm9p6 zg~U{>b#YH7hsLb*L^rK;(u#FtvU_BP_n%I1~!^4;Z!T1)mCQZwDu?Bmqz zBe4}7xWKGvyc?b9L?^7|X#0)LH@eB`PIB6(L{d{0{L;sIaT1>V50S#o?WV|d7{3lf zK@q?yA_RR28N0198|VzO?;$&lYz!HD-XUO9L<`1nzOe}GHtF$PAiMR$9)afJ`@7im zf$f#$ySaQ^f7E_QqK(SUqU>JezR-rvK<*$#JFfQNHr z|9^*WP^?s2#6KVGXE__2n>%;_vX8S9XkZk7rdJC_u2`>D>oo?o4kjOd zakPX_{!;Cl1GUT->}v--@LW3M?01%)g~Q-m%y=Cbzz)YfBU8`F=)cJPKgjFP$XqW- d_{cYWkb9*^fb9w(_E7vLF~`YY6JYFH{|AH=x3~ZR diff --git a/app/modules/rag/__pycache__/repository.cpython-312.pyc b/app/modules/rag/__pycache__/repository.cpython-312.pyc deleted file mode 100644 index dd6d4db92f81b88fcc627dd8aefb6bdaf6db3f79..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13283 zcmdrzS!^6fcGc6}b6>nqiLF~BbE(6Y6Uma+Lvm!%qDEdGT8p+k7)>{&p@&1dduWlw z5Njk}#D;~~%I-!|ti%l9AQFTG{)>_1W9j50Nb*67f<4>N1`8OQWPut9Vkyq1xKVzHr#P3gk2;1N43StHd4Y)h+eEbAw`qOd*ZUrF(mDs!IgPp@ z*OV*d{uQgtPM zd2#G=-`F*kRWh>bO{PU@GBz@HIhGVvzZ}03QzS)!55Z@<0v|P@sBAKwQGK+jB*uo5 zDM?Yi!*RIlPb+TdOvnn+v9zSRC0QPmV~H`*#6?L-s9vpnbWEH`Nzo9eI%BbPd{m0X zRCg?<6$9>##om~Rr}UaYELJkPvDiIQzzJK~79Sr!sL?0~Vb}*{X?#pcX2#^Hj`1nk z32i8F0iqlR@I&&8>bga)E?3*I$Tj4=f!{rwtEpe)>T_Cguz8Vd&S|i^^&!_PHyM4D ztrrKSa505j0E_D-21Mp=f!G~gS%a<0p{X{9HO26IqtA7vfiIq zD>~n{4>`p;(FJrav0ik;-7PkV9=LnNMzKlszReGL0c#e0fcc<}AL{&5_E3xJgO%~gX3qIEHk1rW(gfJ>W`8cTh=zB%Uo%TjKg>mnfrRb z#+@M>(%6i)#(fhstkKrEZ-Rz1+8Xyw(C|iE%L9>Uf6w`-&~sXd zo*#gMp8kP;;Y}$4Via$O<#P)bmrgzq?(B_Jke28VMc8jijUARW12rRD2={fJ3HR-P z_L-3IQeV%xaNkAYY~*75n)KuH*a!@=l=kT1BO&?`(ZSx{wJ8&_6wiRxif4p@o^z4@ zf$+JP28M+0$m#H4?|{%X*w+_{4#dnl%OQj;OXL3xcEhq}g9$TL@Oo2bHY)w-O;6Z5 z6OIeN{+ww>z+9!n&u(U-%Z!E46CH@W5a}})$ei#)YcEx)eR-k|ie{)d_9MRygI||J zG%7S$!T?Cfrdn%u$b1Vf%la?bDjTjnOB3VZYw-zz~k}ERedoxWOh3C80FqoOta?FM=kwz~|qL@ry(RJp0ZezXRM=lGB%yn}jGmtA0PcjKBcnQ`NhvXrkz{n-RC{7 zu_~uXsbSTw1t%(p%@y?i&{8*JQ=gPpCS)m=iC+fSf4Zfw)V1s=tE`9i%9jA%B9E$C ze$@JoJ?E==|Fw5tTl8(qljj*%^K5O-8+^a_-QGL3^V=hfO{X7vU(ib)1#9y*dtGCJ zbyihBX5Bu2zMlB1uraOjVO0RDs-S9jX_W`7d{`BLDxtKB#VQ`F98eY9xC#e0zS#7} zL$8bjJNxe0>*tokHXQsi{e&e7R}?RN=Z9U6uBX_)dy4OR#*rxXnb8x5XQuGifLM9i z8+h1kmffs}on^8%(RRmH^Zoo2H245&$@8Ti9Vq^CMr;>~{+hMx zbRpb380i=GA6JrMyKsCwDO$G!e(yRTJ>A>WHBkAIkkEZz7<{Q49ozoMK*=kGli=8P z^$vDNx;u&kvHYTt4WeR*ds!HPyd9P;#K}A1CVK#=?jp6k8wwN*1wlr1A@NU)E0UZs zgmb#JTuRDohmd{{0EmgN;k{SC@aAPn6m+hmXUG^!HR<8y?roT#m-gn;buod&uBm?A!hXVkMQsWe7S|lPqby3 zBx7*pdK@{!W^l~(%9;^f!UnmUvdE@??Z^iB^x6JMZv=SbuyDHX{JCj`}P67?@ zUVU#Q?;yV5V<)L?yv6?L*s=#mmkEHc5a|5yY`BS?YvRK@*Wic$fI85fmQc^XJl&=F=%{ZKEcRAw>F9xqzGtNhiw;MlfrtGuK z0^O3EY!j|$KInRHYTg-|9RRTGf$}^7pfl3(@Bw!203UwZkw6t_T$ppC2JdeHc>+e_ zvfNe-UKX;WcUVKYK$3)8Pr+;{Ndk;<$I?JlnmjWq2cu`=pjg@~6ivIJ1<}@K%`|@_ z)ck2ia6JO>*;r^x^_GlIh-R#K%~)ZQoLVfcYBxM8sLRk)>9)$Pxzw2Voo+1Wuwu<5 z5`G`RY6f#x+9<=v%6m!Ep1ZI8^v#7}^l`umR;s#arRsoenE+Tqpfl5UNRX=vE)hgcJvetnlqZQUprEzof&C+w10ZR+7xAZ7$DG|*KS{uc7fIsZUb!8 zm}XnBF(Ml^){`yRTtn9;C<)9?ZZfy0xe4Z!>rMg)o8{bx9P+ckja1N7hd!06U7I46 zrymbDy8YqMhpWk|t3+0*1pBd{0D^KEpk9W8&ODXh5*gOM&Ps-hH9rJ8B?;gy^0=P( z8_PV+rb;Pxqo?TwPqUS@?$2%Cooj2)Z9kCPwh#Z?o>~fEShlSJr20@%s-K2#mI;6* z1iDl|6Rv0H>iKY6xvycyM8Ae1J%J2=E57GeeGP?~B1LaQWRoO=>qf7l@9{D7`;3L} zA?Usb7md5tl6~DLDP_X73`)D8Go7|8*DjPw>)L_=TvQa(b{(g}r{kJrS@OK;TASmu z)=OKHBu$PQVG=`5EAL4@0#(#;()RYA#+JG>P7NNKIr$)f3UA7RE)&|hx^g?G7o$_D zpru=FYtC%jg7!gXJ&4qWkN7O0mRNw6kY)9@(J zxDeQL=b2gd!()KVJNGPagK7-*iZ0a=cJ2rtey%)+V8&|y0wa6^dD%h^p*&UtcZyiq zxh#vhhWmP+16r}`NzYcDt|Q$Lzd--y=agl;$@_f5b`^ZMOF=?r_>%aZ7g^j}zYinD>kn+#t2t;l zWA3od*tawid&Nk|h#3oNZ$Jp%90S_9$hq8zskMu(j2P33a}%(eile-?b`FAzPWNn` zhlOyoTcDd1Cjq^<|2X1GC}g7!zRJ@_mBX90a)g5#V>0Xl$vFRVCj!drj*{$^SCmpx zMv7^jIka??#``KWDW3)+1)i6p!!_-+8QD&deb9MJi_^BicrEK8B9W z3l4w4X@_p}#Du}?)6hcX5YBKJofkn8bULf}#HfQa7kriI+ms>(x;$&CCm zyndd*_{dms_xk0QmJ+%_O)_!C>&OPIT6?bKkA*6u8f7 z`Oaiu)G7Ri zIt8qB2-0|;TXN}dh6KX?LwBUV%W{MpiJ9ga$QNOPAqRnBLdC{~7BAtxf+kk>qFBl3 zs#5Wh@yw|vJ=jI5>;#5EA}_}iuNxllwsH}wsQwbjl_>zLt^WqHGO-Xm`i)7-?dQTr z**T%Qli}u$^5IkEw%hcP{}l!brUZP{ODVL(;B8vjaMQ3%G79*9SEvP+w~IEBwUQ|~ z=t;8t8aAIto1ccGz2Mt2R(8JRjIk5wl%0t0Mu3W3?gF547%x*= zI7>2mIR=NmSH%o8{HTnV%a^>pJ~G;#Sv3XKtT?mH(vw;qGILt;ZJXkA2$pp#Ho6(EU!?xe;>oblQX4eEQLNh~8+fiuP2brupBB-h(AA zi}f@~j0>b?iYZ3Z#Smia`z92t!AFXfj<@)3ES(O5SZcooFosw9$++Mv?i6fbzk*+I zNiO!}Ecx;t?8!x&0XJNdU&J?$A}F?Jeuaz$AMe3(`k}7^HtnHg!>O@2bWe>E{UAz+ zU6d_>ReeB4Eg{z-z=KEf9s~yv976C6g5N@L3c>RTx)Jmuh#`n0pb<0X9l32iloRKmbjHSQt?#c zsx&&K9doFISiVC``YIjD)wnE)9pYupu|UUKn~w4+T5+DxC{D==G(mf$;BH8sWfD2!({%GA_ diff --git a/app/modules/rag/__pycache__/service.cpython-312.pyc b/app/modules/rag/__pycache__/service.cpython-312.pyc deleted file mode 100644 index 71628b30660b3776595b592e540df073504dd85d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7405 zcmbtZYit`=cD^&58NMG9Nj+%mtw^G5NwKZ9EIZb&UUm}4a%|adENn{9oRLI@qU4#8 zEh%I-O^kvr+=kAgl@<+J8C@U>+@k6~{giBHVW2=EhVGI(tDr`=1-kzj%k4TwfApLg z4)sj!wwIVQ=f2LpbMLv|Irs1%>~;%*G)Vttc)y2`-(W^hOrF^o1!j)$gr_28NPAO5 z6y)g$J#H8>j59+Fh2@3_JI)PpD$hiWugeIp6Z@fu( z(>+=nX~;f9`OUw@%z&R(%-uJFVL5m;A}FTbU?hUvmy}`+OF_NDPbsd`;gMkPXi)AS zzbf#&AVQh_LU81QFgYQGeV8>Qk8A!8$G)F<2_5Uf&2GX27&dUxDt4kZ^c*jj<$i+JavswzZIMZPR&J3Sn zhIc`WBEAUv?5f=&iQ$W(UGXb^ty{429(cSs8(2LHUjn_|u+L?R1$Jpr5U+}QEga;%+lEM?wK$ut9;V=xTSSQ7a5fO?5p{v`L z43wCB0?!5qi+6@lux#??BGLEqe4OE_IBZv(8zFI{Tv(tO&C`El(2r{$dH*&cKL$y0 zQ{23{v@!D-jxYz#;qvTA&>t6nM~YeXu|5G{ z`VhY0&kb{iKVn%Dx5bQOMLU{9HfGaDq)3eA=~>vAxtCYR!WgtR z21RcFDMMa5?~>h;$I5n>fnpo)<@aFsNHI3X4by|9gWR5|BXY6czn+KyzA-NUCEOKq zd*lY0He4Y$C}=wfPxap#Zcu)1K*W>uTiknqS5}gPqO35JL3vame!F6b2vNl>M5o3D zF(?a)L6SwqFe1oGN#N>KIKpekLJAB|hym0;#Tb-kqM`7FVwNWY@~9{TdBv`sseD_7 z13VU@ykhR34hfSosITG*L?`6%@Jv8gUeOIR0u>h~Bf*f6@9DRTxFI59mgqyW4_jH~ z2{{;1hnEz4ILZst0-w*?hJ!FGJ!b(+qR)a=8!8OWTC7GnoO;A1#hFI{{o^D&<#@IW zNyOb#9IAwX6b(*FqZ9IM`Ia-iy$YL4VD`wrljk3iXEsvmn}6-6V+m`vtUO`QmX;-K z*_yh9Wqn`$!lk8}$NSo!60`Xnb?Z!`KiR(SE=kIB(|4v9xJ5c$dLZpSnCQ#VoV_+% zUXdwpPnEYXT}+oBPjc%`ZJDOdR8!}2edhQ(spIcF8csJ|%G6#;ITDi(@TuG*wM#}RjFrnoUx+_-36y0m=ektba|kg*M2)s+6N@ zf%(F6*OIC3OjUO-cRf0uc3jSwF0Yv`XPx`!BWup)HB)oWN$M_B-#z1qr{XE0V3Mx= z3lt3enJs64?29M1f#+G%rLj4eh;tdbNY5aa39c~1|BZ}-nMhK%_kp2!+1Kx36vhzy zb|f?uG1IPHD~z_qRQv|u|IgqNz+@E6&>kjZe2dAr0`2QnlVQXBdGzR z&=Zpi#|sewOa(VJ37(G(Zrnr^;9mAS#9C}vhom0KYuJSYcL|Jvco0h(kTfCzZ6=~0 zNi&iINK}(xhxU;f0C7CqlHa+-B6MIQv1doOKIQbjOX&2=W^DwKi_$Q|6=s+ z=;BPep>wUFKU-0gsW_IZIJWFbS9B#$tsgv+Ie0R4@Z>U|>3TQS_3qk*_tFQiWE!p{ z&%r<$PgBa%v{?1n)A`E7=8Z(s_Pqw_YcNQSxG2w>3kE4`bA9^ut+z9_`jo9cYx8Ao zWm#MKhM9F*en(idC8rV_aa9z|lb?8hk9wXnk;Wl-ahQ9FvC1(Z>4h{0JNa>U=XhJq+??6qMgD@3uhZK^@iK$vvn)1pGrU)qjF}Sb~in`moW%eJt|T zrO0c$lShpjMZS9w@i_s&Lj+okgHf{i<%R9YW1?*+t z0F4albs;Y~&cv8L@+ljlM@Wc%UvsZnou&K~y|5=3QXC9YVB#jfBU;vW5k3U=giw(!`V) z5=1r2iD-@#R`n4zNHW3wR16E(1r??g=P-DgSA(Gs@(APSwqR5bc6O?}mA`0)qaz9< zPTY_b15(_!EjS~-3HfceMSZZ#P$7K=MD;0iG-EzZWh-m{D*Cf%rt)yA^6*msQb)S- z#Bz74vMX^e>n@!;e&=|`-H>uOEd0UJPaXx=+zn~>yWmQ4iOC^xMmuB{){EAa&YEw2whG~%H6UUUFlwPx1`;t{(w^RyK_6Lv!(|y1 zT=fmvI2U8~^cQH=LKpgip9S7Qj2R?;W5W?R}4+`6xBq3?^cch6?(J5u!> z%f@v5kJ7$w)UMWBZzp<_-mIE69sSN zkJmPJLFZRhmoWf63=2sBID=c_96Yt@!f$TwUR%r4d?DJQZ=|jyagZ7}$OTuMXEyKq zu>x$yX#M663Lrp4fdN6ae=d@y-FdF`hc*F642KagyRZ!UT#nki{cl{7E@AYF1OaeDpOiRJ#4 zj`XoJPl?H1oN~3NTnCe$yv`}bp(#-W&t46+4BkMKm}rgbJ}& zs7L-2cs6jUHO~YNDm4Q^T{s{#+&~%q;KbQ2*v;x{C|YCe?qJvqnZZC9hgMxg^%qr> zp0~d0y#O~wyBv%Fhk<2jDkAT)xp)_u-M1A3cSNT?ga#7c4^-Rxt%JDs0cicIX#ZM8 z+tR+Zu5;<4cLC|FCCTfLt(Dp0vQOW+^-j(}Y*pZ(zK}4I`EjM6TA&)4yGFk@TfYax z!?;+1fMN**v=B1zwm{(8R4}5|hzQss`bBD7S}a8lje){M!V-k;_;5%T87#em1i=vW zd}8wolUD$;9LEd1UtiHfY65kQYhM1qg>cYrE4NzOtVK|f6Fp(n+)ITqgN zJlE!o$eBoS#k`y@*mXlik6SdrUL&Qq7DZ_Qrzz>~6lITqf~&E-k<-I~oAF=rwr zmGja<@1lEtVsU2K^W{%gJgK(+waWgK_tYa#%6m44y-}t!Atieg)3>TNtXOh1FS{`> z8{QzhZ2x@U9gvN=SCNhFw#&wn3lzs*q!tHr1mDYU?Y-hz;a7bdYFW<3u_cS`SW&Xn zx9nY(R(c<~m*c6#_Myj;gLo&F%)O*Bp$r=KosJG+Na?^ei^@@rCIPZ7yBnb*MliCp7>8 nuYjU{O&q@_=6@z_-;k<*BX52~+P@*4zvDinDDYU3s&f7t45_5A diff --git a/app/modules/rag/repository.py b/app/modules/rag/repository.py deleted file mode 100644 index b34c5fe..0000000 --- a/app/modules/rag/repository.py +++ /dev/null @@ -1,261 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from sqlalchemy import text - -from app.modules.shared.db import get_engine - - -@dataclass -class RagJobRow: - index_job_id: str - rag_session_id: str - status: str - indexed_files: int - failed_files: int - error_code: str | None - error_desc: str | None - error_module: str | None - - -class RagRepository: - def ensure_tables(self) -> None: - engine = get_engine() - with engine.connect() as conn: - conn.execute(text("CREATE EXTENSION IF NOT EXISTS vector")) - conn.execute( - text( - """ - CREATE TABLE IF NOT EXISTS rag_sessions ( - rag_session_id VARCHAR(64) PRIMARY KEY, - project_id VARCHAR(512) NOT NULL, - created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP - ) - """ - ) - ) - conn.execute( - text( - """ - CREATE TABLE IF NOT EXISTS rag_index_jobs ( - index_job_id VARCHAR(64) PRIMARY KEY, - rag_session_id VARCHAR(64) NOT NULL, - status VARCHAR(16) NOT NULL, - indexed_files INTEGER NOT NULL DEFAULT 0, - failed_files INTEGER NOT NULL DEFAULT 0, - error_code VARCHAR(128) NULL, - error_desc TEXT NULL, - error_module VARCHAR(64) NULL, - created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP - ) - """ - ) - ) - conn.execute( - text( - """ - CREATE TABLE IF NOT EXISTS rag_chunks ( - id BIGSERIAL PRIMARY KEY, - rag_session_id VARCHAR(64) NOT NULL, - path TEXT NOT NULL, - chunk_index INTEGER NOT NULL, - content TEXT NOT NULL, - embedding vector NULL, - created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP - ) - """ - ) - ) - conn.execute( - text( - """ - ALTER TABLE rag_chunks - ADD COLUMN IF NOT EXISTS created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP - """ - ) - ) - conn.execute( - text( - """ - ALTER TABLE rag_chunks - ADD COLUMN IF NOT EXISTS updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP - """ - ) - ) - conn.execute(text("CREATE INDEX IF NOT EXISTS idx_rag_chunks_session ON rag_chunks (rag_session_id)")) - conn.commit() - - def upsert_session(self, rag_session_id: str, project_id: str) -> None: - with get_engine().connect() as conn: - conn.execute( - text( - """ - INSERT INTO rag_sessions (rag_session_id, project_id) - VALUES (:sid, :pid) - ON CONFLICT (rag_session_id) DO UPDATE SET project_id = EXCLUDED.project_id - """ - ), - {"sid": rag_session_id, "pid": project_id}, - ) - conn.commit() - - def session_exists(self, rag_session_id: str) -> bool: - with get_engine().connect() as conn: - row = conn.execute( - text("SELECT 1 FROM rag_sessions WHERE rag_session_id = :sid"), - {"sid": rag_session_id}, - ).fetchone() - return bool(row) - - def get_session(self, rag_session_id: str) -> dict | None: - with get_engine().connect() as conn: - row = conn.execute( - text("SELECT rag_session_id, project_id FROM rag_sessions WHERE rag_session_id = :sid"), - {"sid": rag_session_id}, - ).mappings().fetchone() - return dict(row) if row else None - - def create_job(self, index_job_id: str, rag_session_id: str, status: str) -> None: - with get_engine().connect() as conn: - conn.execute( - text( - """ - INSERT INTO rag_index_jobs (index_job_id, rag_session_id, status) - VALUES (:jid, :sid, :status) - """ - ), - {"jid": index_job_id, "sid": rag_session_id, "status": status}, - ) - conn.commit() - - def update_job( - self, - index_job_id: str, - *, - status: str, - indexed_files: int, - failed_files: int, - error_code: str | None = None, - error_desc: str | None = None, - error_module: str | None = None, - ) -> None: - with get_engine().connect() as conn: - conn.execute( - text( - """ - UPDATE rag_index_jobs - SET status = :status, - indexed_files = :indexed, - failed_files = :failed, - error_code = :ecode, - error_desc = :edesc, - error_module = :emodule, - updated_at = CURRENT_TIMESTAMP - WHERE index_job_id = :jid - """ - ), - { - "jid": index_job_id, - "status": status, - "indexed": indexed_files, - "failed": failed_files, - "ecode": error_code, - "edesc": error_desc, - "emodule": error_module, - }, - ) - conn.commit() - - def get_job(self, index_job_id: str) -> RagJobRow | None: - with get_engine().connect() as conn: - row = conn.execute( - text( - """ - SELECT index_job_id, rag_session_id, status, indexed_files, failed_files, - error_code, error_desc, error_module - FROM rag_index_jobs - WHERE index_job_id = :jid - """ - ), - {"jid": index_job_id}, - ).mappings().fetchone() - if not row: - return None - return RagJobRow(**dict(row)) - - def replace_chunks(self, rag_session_id: str, items: list[dict]) -> None: - with get_engine().connect() as conn: - conn.execute(text("DELETE FROM rag_chunks WHERE rag_session_id = :sid"), {"sid": rag_session_id}) - self._insert_chunks(conn, rag_session_id, items) - conn.commit() - - def apply_changes(self, rag_session_id: str, delete_paths: list[str], upserts: list[dict]) -> None: - with get_engine().connect() as conn: - if delete_paths: - conn.execute( - text("DELETE FROM rag_chunks WHERE rag_session_id = :sid AND path = ANY(:paths)"), - {"sid": rag_session_id, "paths": delete_paths}, - ) - if upserts: - paths = sorted({str(x["path"]) for x in upserts}) - conn.execute( - text("DELETE FROM rag_chunks WHERE rag_session_id = :sid AND path = ANY(:paths)"), - {"sid": rag_session_id, "paths": paths}, - ) - self._insert_chunks(conn, rag_session_id, upserts) - conn.commit() - - def retrieve(self, rag_session_id: str, query_embedding: list[float], limit: int = 5) -> list[dict]: - emb = "[" + ",".join(str(x) for x in query_embedding) + "]" - with get_engine().connect() as conn: - rows = conn.execute( - text( - """ - SELECT path, content - FROM rag_chunks - WHERE rag_session_id = :sid - ORDER BY embedding <=> CAST(:emb AS vector) - LIMIT :lim - """ - ), - {"sid": rag_session_id, "emb": emb, "lim": limit}, - ).mappings().fetchall() - return [dict(x) for x in rows] - - def fallback_chunks(self, rag_session_id: str, limit: int = 5) -> list[dict]: - with get_engine().connect() as conn: - rows = conn.execute( - text( - """ - SELECT path, content - FROM rag_chunks - WHERE rag_session_id = :sid - ORDER BY id DESC - LIMIT :lim - """ - ), - {"sid": rag_session_id, "lim": limit}, - ).mappings().fetchall() - return [dict(x) for x in rows] - - def _insert_chunks(self, conn, rag_session_id: str, items: list[dict]) -> None: - for item in items: - emb = item.get("embedding") or [] - emb_str = "[" + ",".join(str(x) for x in emb) + "]" if emb else None - conn.execute( - text( - """ - INSERT INTO rag_chunks (rag_session_id, path, chunk_index, content, embedding, created_at, updated_at) - VALUES (:sid, :path, :idx, :content, CAST(:emb AS vector), CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) - """ - ), - { - "sid": rag_session_id, - "path": item["path"], - "idx": int(item["chunk_index"]), - "content": item["content"], - "emb": emb_str, - }, - ) diff --git a/app/modules/rag_repo/README.md b/app/modules/rag_repo/README.md new file mode 100644 index 0000000..2f032af --- /dev/null +++ b/app/modules/rag_repo/README.md @@ -0,0 +1,56 @@ +# Модуль rag_repo + +## 1. Функции модуля +- Прием webhook-событий от Git-провайдеров (`gitea`, `bitbucket`). +- Нормализация payload в единый формат. +- Определение `story_id` и фиксация контекста, необходимого для определения изменений, произведенных в Story. +- Запись контекста коммита в Story-хранилище через `StoryContextRepository`. +- Подготовка данных, которые позволяют ускорять формирование чанков в `rag_session` за счет кэш-переиспользования. + +Ускорение `rag_session` обеспечивается связкой полей: +- `project_id` (идентификатор репозитория/проекта), +- `commit_sha` (снимок состояния), +- `changed_files` (точный набор затронутых файлов), +- `story_id` (бизнес-контекст инкремента). + +Эта связка позволяет в `rag_session` выполнять delta-индексацию и переиспользовать ранее рассчитанные чанки/эмбеддинги для неизмененных файлов. + +## 2. Диаграмма классов и взаимосвязей +```mermaid +classDiagram + class RagRepoModule + class RepoWebhookService + class StoryContextRepository + + RagRepoModule --> RepoWebhookService + RepoWebhookService --> StoryContextRepository +``` + +## 3. Описание классов +- `RagRepoModule`: точка входа модуля и публикация webhook endpoint'а. + Методы: `__init__` — создает сервис обработки webhook; `internal_router` — регистрирует internal route `webhook`. +- `RepoWebhookService`: нормализует payload, извлекает `story_id` и формирует запись commit-контекста. + Методы: `process` — основной обработчик webhook; `_normalize_gitea` — маппинг payload Gitea; `_normalize_bitbucket` — маппинг payload Bitbucket; `_extract_story_id` — извлечение `story_id` из commit message. +- `StoryContextRepository`: persistence-слой Story-контекста. + Методы: `record_story_commit` — сохраняет commit, ветку и список измененных файлов в контекст Story. + +## 4. Сиквенс-диаграммы API + +### POST /internal/rag-repo/webhook +Назначение: принимает webhook коммита, автоматически определяет провайдера (`gitea`/`bitbucket`), извлекает `story_id` и сохраняет commit-контекст для трассировки изменений по Story. +```mermaid +sequenceDiagram + participant Router as RagRepoModule.APIRouter + participant Webhook as RepoWebhookService + participant StoryRepo as StoryContextRepository + + Router->>Webhook: process(payload, headers) + Webhook->>Webhook: normalize payload + extract story_id + alt story_id found + Webhook->>StoryRepo: record_story_commit(...) + StoryRepo-->>Webhook: ok + else story_id missing + Webhook-->>Router: accepted, story_bound=false + end + Webhook-->>Router: result +``` diff --git a/app/modules/rag_repo/__init__.py b/app/modules/rag_repo/__init__.py new file mode 100644 index 0000000..c9c2ef6 --- /dev/null +++ b/app/modules/rag_repo/__init__.py @@ -0,0 +1 @@ +__all__: list[str] = [] diff --git a/app/modules/rag_repo/__pycache__/__init__.cpython-312.pyc b/app/modules/rag_repo/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2835125140fa97cc3932db79ce1bf6b05920aae GIT binary patch literal 265 zcmXwyu}T9$5Qb;(0%wF^qr_NUo5ls}U}bF~#6pWr|%8D3!y4cq`J$|kZ?Uc*d$ zp{7}1Aey#CyqpeVCSgBlEvG)`6#D~V&iQ1L^^OtWVJ#Q1xz%%~$lA2@s4u9)5d9rh ziLpD=#+G%o9l6qkPztGtS#Lo@E@=t%y{{iyVjWF`k%HHV*4c!38Wx+q*&jZSL;0no b_(N>&yz10$3n<2C5B3l literal 0 HcmV?d00001 diff --git a/app/modules/rag_repo/__pycache__/module.cpython-312.pyc b/app/modules/rag_repo/__pycache__/module.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1cf84ed582969f1857078017b40874b187e2428 GIT binary patch literal 1806 zcmaJ?U1%It6h3!mcJ?Q`o92gfG1_ipDVde-B1m6MQj!QF+F%n!S=4cynWU4>?riU! zZMw@Y1Pqvh5~WBg_>exE21Yc)1LQoi~~~j0HR8}7`-gj77oaa zN0HCxpPtZawNk~8jFlprP*JQk@D0_fTCS=#hr(*f9BJdW@e8hS&X`iu$nm>fs)V zGOkj87K>xHqiZ&sC_d{l?;-XUlE;6-<7Id-aD09ItMQ$I(Yphq_i`ui6<+?Kbi1@u zIDfZre!DQalbgDAdgIi_#hZOMC%+lo&Q0ybU{L9RIGEi7F_V4rFgNvt@nAoFWpaT0 zn3>F^f0lZYRb)DhYRVx6I3>fwL~>oKxhW-X5{{4Refw0Gd literal 0 HcmV?d00001 diff --git a/app/modules/rag_repo/__pycache__/webhook_service.cpython-312.pyc b/app/modules/rag_repo/__pycache__/webhook_service.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97d09534db89f123e19beca653fd5122900d7924 GIT binary patch literal 9854 zcmcIKTWlLwb~AjxpF@hGB$|@-qF%P8NU)48uIJ-(>XJcEj%g`K3qAZch3}r{k zu$ux#%M=DeZwg4py0C%-hzuKD2mOc(w5UJ!BWU+S1>;!T8>t+7-porz_V=@dT(_olbGbS527&nz~a zyYT)i{p05m?Cn1gJ>S!Rj_o~nA+C^7V3{VpwIA-6s01}mv(&i8O#S$e)w1*z-MALG z(aG*pnKZX>EIm7$%$(+unFJRvPnR;4$AgK|TRJk#0(FZXyG2FJvW`#8Ovol4dq-tPGGn0@laY8l9PsL=z1um9~Ps#T9R4g@_V51YsnFKEz`T5z|7`Gtn zV{xFwn_-l&T7Kw?&CTtZO|$cWx`&HRM!CdXde7y=g{gGq{VUu|#$!Fe&_Vb(@ACm4K6-PF3iOf8gl6BY@R>2gFred>+XjHaD zqsok-Zi_}goR7^YEgXZrQ9zszffs>61*o`u48Rp?Q)AV8Hw~26ziGspiSh+D%~-R* z{5Gvvvr)cXn|7=@D0kDQ6KgIi(7NfynuqfEHoaJ5C|7XPhc!QCaXe{&nld^LTwfo% zq2>AdxI_P(Ct*#G#o|+m8tp@c!?HvBIMhiZgovKQIV+2Ut3l*Wgw5m5CrU(wB)iMY zj@lEA<4}vOHKVGUv5gW08meA6KyGB(V-ZW0Hjq&(OU?z5iS5O1#9O2i(Q!>!Z${99 zpcO&QN`m#GI0V>_KL7w%(g>0_;QAS{1}nL1(~LC>u9X#QHe4$^)*QG(PN*p>gtT_7 z5C{?ry)_1KdxcmFYh^W8^y6R^;HSNzVQsALieX&G+QHW8#`Ua&HNe%tI$0xJjc_%= z)x^43GhEFJbi^%tP@<=mt$9k>nPc(tkYrpmSY$23WO#uVs3>VbAZXxDwL8=TMb}X zTlEu4YKi1@>!u*Nes2mA$pTv@Fm{)cY=V=GQ;8T{`HIoR0nv1jsosStEl~nBU6~rR zlnYFm7PKGHyoRQd)E!#&Cs`ffBbAyW=mh$XM)f6Gy`bmyu#Gs*bqdB`QGc(g;?Gr&PiS`&em%F#P7L{l@vVYk zn_3kWGE3Hb!*q&@Fk_%tJ-~x(oQo~Yq+=}C3wIF>dC2uc{kR;OPx13}b7?LEzAVjw zUR{dKB-v=W2kJ;H9#70cGaCCCpH9^?RbU&4ciCyc-jz+W37(Hlf`{~|9ySWmLUbZM zpJKuE?IG+XQ*7cR7{IFFT);-zq3FM2^Aaq#7f=@s#21DYFNL}tF%bu+j85DdVS#ut zbYO}hD`Vnl+z^5n5DX(g9k1a%VwBzBqWSd9r9`x{LoC2JHw(g9OmLmp39TO>CO*n= z;PlHIJjrsn?}(EF-iT>ONh>l(`J)*pq(R8)?z2=#v)f$pxa7qAS2n&8dc;AZi@`ek zzT6gpUi{mzQDNIRhfB?ah33I~uYWhW7M&MU7X3f7i2YecfSO14nJ7Ao2H7HoT^ zrjC`FLeo&?8tPu%RR|rB0&Ob?3W5GjGiBfRgwop`VBbS+xv_`dhLSf@@J3d9?v59F zj)>k@a;68)hKJ2zsVOY&YM0vfO2L6gRs-YMqzur{MVaj-Q?OtPmP`W$(}3h_ks4Z8 z`U(wuBqq4Lf4OJP(@}ZiZCRcwc)O%v`%1JB9F~~S@pVf6H}eiF_SG1+_}nYl0@znI7@+CSqw^$=Po=Vbj%`Z6@@VXRr61lh7B z$tP2M22L9ZSv#4?$T|$gWgVN0XCh|BvY>S*hMZV-Sr4{*J|WW=W%`nABqYIb9s{JE z2NEUb9zn+2>ll)!Y~LBA@LK?2QrwNd3@x85xjGB3PSMp}at#z*1EOnhPA^$qYfMkU z+9P=bxt~59dHLJfZ)X27UGf~i#jeKgy8mt{|KaB&ONXu<`uy-$Ln|MCIRbWI&2v0w zEO?Gf*5LAF!P+5tgOW9{{93`fTk?kfVD;rEmo8qrxDpZleRtS(>(D*l{jS1+H@-Ww zcJ_n9&ptqWr~k8$KmB-lM0B?0bQ{oxy)n9OZT)KcF7s{Ro4~z@xNr0~?0V;$Kv8nK z|7)v(^1eos==z`gkA)4DJzWm%ls&x+^M#n_|Gt;C0z{%0VP|q~h!N`SWI?0qB<^)q zQ^Bq5U|^wKs&N;nh<1$V9Z@y#y~hEIdli7}+V)aWMRS>|Clw`zKEyRu6iC*B=JFU6 zp~q1a) zc)M2{MDM;e+dh2WTeP;2hQ5NgZ}qh39bU5yOHL*1kK1X{xbk z79!JnmYA)`UBR5Bvf4}~QUzO7C(32DRn&bn1p0NcDmj+2C#YE9tgdQ=`dn44Z@F*9 zhGL9dPk&gG$`j`Sj5=Dwn!pm5C7A*$Wy%zIiS7B;=fouYupPW-4Apus&^}dHO0-#j&-n>?;<&mX~Ra25U-%51U{E zZ)?k1ew4oLS#y0ZeY;?X{coDa19eRi>?%cw>g^zjH$V~&q3-Mihl;DrZju6gh-b6T zD!!d6&T~0(2`-T5F2OvFsh64}xKtW6b#K6Npb zzMLY7G42(>;a&!?Xa(QN@4=%f+`<%rBk{WF3l! z2u9(7OrJ17WP>sRSsRD>YiIdM?i3+17XwBt2%{tlP%=nJ0S)@jqL}x0 zplxyYbIrTrLO4L={{=QW82PfVJtR82i_U@7qdA@AX5kQJ~D3j0?W+J z!1cgNuh=+HY<%}_`?uZSbl)2h2am$t{gmkYdCs!oaOb;7*M#V4FM4`bHKL;ze8$ko zw}-zue7}Ev=+U^k@0~5a7h5|Y%NaIY-u&sM_piOb5)geoMc>}l z3ngZ_zzpA=7MWLy%xm{gimum-uD8D%&FLS8!a37MTgR6jIlI)_k+VqtmdBLcJd!)U z0SAVOuh?~OujC4BczpSZrRi(aD~#yxEc*La+eA-)$um^&42hoM+_48vkJJ**TecA! zMSpM6zi*WjJ%c6B{(@(}=sBn$x&lkiYt9wzx~pAkc=0i%vybGpQghqQ6W33y@V|Pi z*bLnsQ(E^(esrU?edXOR8^7N5#V)aRFn>aBXq5sTr9h+*fRobqf#F9+z zRYrcw*&@%nQ7W%F-%ecUHdofCxdM~#tM6La8poNWvbL%j+McGx{ibS$wz@N{UE4~a zQ15V7Sr2rU8o{153H7PgxM1MaQN^jF4yWlVP91eQtvKOYDLuf`hQl5 zu`URl{u(6V5$cXEc+>!BTXyGX0N-0BIq%bwl3sP(it-t2$3RG zEUcgxe1e%}NT}x<<9-h9+>xH#Ymz08TK^r8Q@eTnt24Ow-bv*WO$)icEKr!T8UNIhf>E%(}I6 zgK1bc-?U%1ucXAla4~S^UgN*E{8P*QMe)E%xLbSgjL4iVG9;*b^`XCIi)u9=tO?5? zvLQj4PxN;c{r#(hxz{$FjmuqMy|C`=-Uzj>bba0XMepi_*fv~jd+DA>3>_+kjut{k z#n5Qp`T%l1E#3L!Qe*gL)Agp6GbAE?;ci>rupRtu1e%tQlfdkf*gQ~dhGt;56nLo+ zcu5SroPXs(LqO{2T()ckAzr*WeSLbhQ49?gLkI41V(?%oc%%?KA_kAzKrDMj;t)TCQ7GM#Ml* zF#y4=$m}aIFBX^=Mdm=Be&A)K*0%hK4S(Zu=T3T}f28C;RPY}X{fG0~2VOtWTYBr- zTPw%bnQp1+rN@-feK3Da3U_{O`NFa~^rf>H1~wj32H(MD-3H|E**l}_P5U+?eXH;O z`pm5}cPGUDmy7+!?t8?@Xen}{5IG@6etP}H9}wYobaiYR0S{iQyBOu2ImVyyqf)vB#nVd*G)d zePQzb2pI;AA8VK+WQ&<=lo5Wp4ciAJOE0f`==N9nb5AN&W^ueWUd1?|#%w%QGPX3Mo3 z?oZG!=`BwdX}!KIcO(+A;I}Um^YC2^d1{>{NHa(%>@+4%Uz4@sp6kPt(Kd+&g#}q=MF8>3bxfeSC literal 0 HcmV?d00001 diff --git a/app/modules/rag_repo/module.py b/app/modules/rag_repo/module.py new file mode 100644 index 0000000..15f7c86 --- /dev/null +++ b/app/modules/rag_repo/module.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from fastapi import APIRouter, Request + +from app.modules.agent.story_context_repository import StoryContextRepository +from app.modules.rag_repo.webhook_service import RepoWebhookService +from app.modules.rag_session.repository import RagRepository + + +class RagRepoModule: + def __init__(self, story_context_repository: StoryContextRepository, rag_repository: RagRepository) -> None: + self._webhook = RepoWebhookService(story_context_repository, rag_repository) + + def internal_router(self) -> APIRouter: + router = APIRouter(prefix="/internal/rag-repo", tags=["internal-rag-repo"]) + + @router.post("/webhook") + async def process_repo_webhook(request: Request, payload: dict) -> dict: + return self._webhook.process( + payload=payload, + headers={k: v for k, v in request.headers.items()}, + ) + + return router diff --git a/app/modules/rag_repo/webhook_service.py b/app/modules/rag_repo/webhook_service.py new file mode 100644 index 0000000..f4322e3 --- /dev/null +++ b/app/modules/rag_repo/webhook_service.py @@ -0,0 +1,217 @@ +from __future__ import annotations + +import re +from typing import Protocol + + +_STORY_ID_RE = re.compile(r"\b[A-Z][A-Z0-9_]*-\d+\b") + + +class StoryCommitWriter(Protocol): + def record_story_commit( + self, + *, + story_id: str, + project_id: str, + title: str, + commit_sha: str | None, + branch: str | None, + changed_files: list[str], + summary: str, + actor: str | None, + ) -> None: ... + + +class RepoCacheWriter(Protocol): + def record_repo_cache( + self, + *, + project_id: str, + commit_sha: str | None, + changed_files: list[str], + summary: str, + ) -> None: ... + + +class RepoWebhookService: + def __init__(self, story_writer: StoryCommitWriter, cache_writer: RepoCacheWriter | None = None) -> None: + self._story_writer = story_writer + self._cache_writer = cache_writer + + def process(self, *, payload: dict, provider: str | None = None, headers: dict | None = None) -> dict: + resolved_provider = self._resolve_provider(provider=provider, payload=payload, headers=headers or {}) + normalized = self._normalize(provider=resolved_provider, payload=payload) + if not normalized: + return {"accepted": False, "reason": "unsupported_or_invalid_payload"} + + cache_recorded = False + if self._cache_writer is not None: + self._cache_writer.record_repo_cache( + project_id=normalized["project_id"], + commit_sha=normalized["commit_sha"], + changed_files=normalized["changed_files"], + summary=normalized["summary"], + ) + cache_recorded = True + + story_id = self._extract_story_id(normalized["messages"]) + if not story_id: + return { + "accepted": True, + "indexed": False, + "story_bound": False, + "cache_recorded": cache_recorded, + "reason": "story_id_not_found", + } + + self._story_writer.record_story_commit( + story_id=story_id, + project_id=normalized["project_id"], + title=f"Story {story_id}", + commit_sha=normalized["commit_sha"], + branch=normalized["branch"], + changed_files=normalized["changed_files"], + summary=normalized["summary"], + actor=normalized["actor"], + ) + return { + "accepted": True, + "indexed": False, + "story_bound": True, + "cache_recorded": cache_recorded, + "story_id": story_id, + "project_id": normalized["project_id"], + "commit_sha": normalized["commit_sha"], + "changed_files": normalized["changed_files"], + } + + def _resolve_provider(self, *, provider: str | None, payload: dict, headers: dict[str, str]) -> str: + value = (provider or "").strip().lower() + if value in {"gitea", "bitbucket"}: + return value + + lowered = {str(k).lower(): str(v) for k, v in headers.items()} + if "x-gitea-event" in lowered: + return "gitea" + if "x-event-key" in lowered: + return "bitbucket" + + if isinstance(payload.get("commits"), list) and ("ref" in payload or "pusher" in payload): + return "gitea" + push = payload.get("push") + if isinstance(push, dict) and isinstance(push.get("changes"), list): + return "bitbucket" + return "" + + def _normalize(self, *, provider: str, payload: dict) -> dict | None: + key = provider.lower().strip() + if key == "gitea": + return self._normalize_gitea(payload) + if key == "bitbucket": + return self._normalize_bitbucket(payload) + return None + + def _normalize_gitea(self, payload: dict) -> dict: + repo = payload.get("repository") or {} + commits = payload.get("commits") or [] + project_id = str(repo.get("full_name") or repo.get("name") or "unknown_repo") + ref = str(payload.get("ref") or "") + branch = ref.replace("refs/heads/", "") if ref.startswith("refs/heads/") else ref or None + actor = str((payload.get("pusher") or {}).get("username") or "") or None + + messages: list[str] = [] + changed_files: set[str] = set() + commit_sha: str | None = None + for commit in commits: + if not isinstance(commit, dict): + continue + cid = str(commit.get("id") or "").strip() + if cid: + commit_sha = cid + msg = str(commit.get("message") or "").strip() + if msg: + messages.append(msg) + for key in ("added", "modified", "removed"): + for path in commit.get(key) or []: + path_value = str(path).strip() + if path_value: + changed_files.add(path_value) + + summary = messages[-1] if messages else "Webhook commit without message" + return { + "project_id": project_id, + "branch": branch, + "commit_sha": commit_sha, + "changed_files": sorted(changed_files), + "messages": messages, + "summary": summary, + "actor": actor, + } + + def _normalize_bitbucket(self, payload: dict) -> dict: + repo = payload.get("repository") or {} + project_id = str(repo.get("full_name") or repo.get("name") or "unknown_repo") + + changes = (((payload.get("push") or {}).get("changes")) or []) + messages: list[str] = [] + changed_files: set[str] = set() + commit_sha: str | None = None + branch: str | None = None + actor = None + + actor_raw = payload.get("actor") or {} + if isinstance(actor_raw, dict): + actor = str(actor_raw.get("display_name") or actor_raw.get("username") or "") or None + + for change in changes: + if not isinstance(change, dict): + continue + new_ref = change.get("new") or {} + if isinstance(new_ref, dict): + branch_name = str(new_ref.get("name") or "").strip() + if branch_name: + branch = branch_name + target = new_ref.get("target") or {} + if isinstance(target, dict): + h = str(target.get("hash") or "").strip() + if h: + commit_sha = h + msg = str(target.get("message") or "").strip() + if msg: + messages.append(msg) + + for commit in change.get("commits") or []: + if not isinstance(commit, dict): + continue + h = str(commit.get("hash") or "").strip() + if h: + commit_sha = h + msg = str(commit.get("message") or "").strip() + if msg: + messages.append(msg) + for key in ("added", "modified", "removed"): + for item in commit.get(key) or []: + if isinstance(item, dict): + path_value = str(item.get("path") or "").strip() + else: + path_value = str(item).strip() + if path_value: + changed_files.add(path_value) + + summary = messages[-1] if messages else "Webhook commit without message" + return { + "project_id": project_id, + "branch": branch, + "commit_sha": commit_sha, + "changed_files": sorted(changed_files), + "messages": messages, + "summary": summary, + "actor": actor, + } + + def _extract_story_id(self, messages: list[str]) -> str | None: + for msg in messages: + match = _STORY_ID_RE.search(msg) + if match: + return match.group(0) + return None diff --git a/app/modules/rag_session/README.md b/app/modules/rag_session/README.md new file mode 100644 index 0000000..09caf3b --- /dev/null +++ b/app/modules/rag_session/README.md @@ -0,0 +1,218 @@ +# Модуль rag_session + +## 1. Функции модуля +- Создание и обслуживание сессионного RAG индекса по загруженным пользователем файлам. +- Индексация снапшота и инкрементальных изменений. +- Хранение чанков, retrieval контекста, трекинг статуса index jobs. +- Публикация прогресса индексации через SSE. + +## 2. Диаграмма классов и взаимосвязей +```mermaid +classDiagram + class RagModule + class RagService + class RagRepository + class RagSessionStore + class IndexJobStore + class IndexingOrchestrator + class TextChunker + class GigaChatEmbedder + class EventBus + + RagModule --> RagService + RagModule --> RagRepository + RagModule --> RagSessionStore + RagModule --> IndexJobStore + RagModule --> IndexingOrchestrator + RagService --> RagRepository + RagService --> TextChunker + RagService --> GigaChatEmbedder + IndexingOrchestrator --> IndexJobStore + IndexingOrchestrator --> RagService + IndexingOrchestrator --> EventBus +``` + +## 3. Описание классов +- `RagModule`: composition-root для сессионного RAG и его API. + Методы: `__init__` — собирает сервисы индексации/retrieval; `public_router` — публикует внешние endpoint'ы; `internal_router` — публикует внутренние endpoint'ы. +- `RagService`: доменный сервис индексации и retrieval. + Методы: `index_snapshot` — индексирует полный набор файлов; `index_changes` — индексирует только изменения; `retrieve` — возвращает релевантные чанки по запросу. +- `RagRepository`: слой доступа к БД для сессий, джобов и чанков. + Методы: `ensure_tables` — создает/обновляет схему; `upsert_session/get_session/session_exists` — операции по сессиям; `create_job/update_job/get_job` — операции по задачам индексации; `replace_chunks/apply_changes/retrieve/fallback_chunks` — операции по chunk-данным. +- `RagSessionStore`: управление жизненным циклом `rag_session`. + Методы: `create` — создает новую сессию; `put` — upsert с внешним id; `get` — читает сессию. +- `IndexJobStore`: управление `index_job` на уровне приложения. + Методы: `create` — создает задачу индексации; `get` — читает задачу; `save` — обновляет статус/ошибку. +- `IndexingOrchestrator`: асинхронный оркестратор index-jobs. + Методы: `enqueue_snapshot` — ставит полную индексацию в очередь; `enqueue_changes` — ставит инкрементальную индексацию в очередь. +- `TextChunker`: разбивает текст файла на чанки для embedding. + Методы: `chunk` — возвращает список чанков заданного текста. +- `GigaChatEmbedder`: адаптер embeddings-модели. + Методы: `embed` — возвращает векторы для набора текстов. +- `EventBus`: доставка событий прогресса индексации. + Методы: `publish` — отправляет событие; `subscribe/unsubscribe` — управляет подписками SSE. + +## 4. Сиквенс-диаграммы API + +### POST /api/rag/sessions +Назначение: создает новую `rag_session` и запускает фоновую индексацию полного набора файлов. +```mermaid +sequenceDiagram + participant Router as RagModule.APIRouter + participant Sessions as RagSessionStore + participant Indexing as IndexingOrchestrator + + Router->>Sessions: create(project_id) + Sessions-->>Router: rag_session_id + Router->>Indexing: enqueue_snapshot(rag_session_id, files) + Indexing-->>Router: index_job_id,status +``` + +### POST /api/rag/sessions/{rag_session_id}/changes +Назначение: ставит в очередь инкрементальную переиндексацию изменений для существующей `rag_session`. +```mermaid +sequenceDiagram + participant Router as RagModule.APIRouter + participant Sessions as RagSessionStore + participant Indexing as IndexingOrchestrator + + Router->>Sessions: get(rag_session_id) + Sessions-->>Router: session + Router->>Indexing: enqueue_changes(rag_session_id, changed_files) + Indexing-->>Router: index_job_id,status +``` + +### GET /api/rag/sessions/{rag_session_id}/jobs/{index_job_id} +Назначение: возвращает состояние и статистику конкретной задачи индексации. +```mermaid +sequenceDiagram + participant Router as RagModule.APIRouter + participant Jobs as IndexJobStore + + Router->>Jobs: get(index_job_id) + Jobs-->>Router: job_state +``` + +### GET /api/rag/sessions/{rag_session_id}/jobs/{index_job_id}/events +Назначение: дает SSE-поток событий прогресса по задаче индексации. +```mermaid +sequenceDiagram + participant Router as RagModule.APIRouter + participant Jobs as IndexJobStore + participant Events as EventBus + + Router->>Jobs: get(index_job_id) + Router->>Events: subscribe(index_job_id, replay=True) + loop until terminal + Events-->>Router: index event + end + Router->>Events: unsubscribe(index_job_id) +``` + +### POST /api/index/snapshot (legacy) +Назначение: legacy-вход для полной индексации проекта с автоматическим созданием сессии по `project_id`. +```mermaid +sequenceDiagram + participant Router as LegacyAPIRouter + participant Sessions as RagSessionStore + participant Indexing as IndexingOrchestrator + + Router->>Sessions: put(project_id, project_id) + Router->>Indexing: enqueue_snapshot(project_id, files) + Indexing-->>Router: index_job_id,status +``` + +### POST /api/index/changes (legacy) +Назначение: legacy-вход для инкрементальной индексации изменений по `project_id`. +```mermaid +sequenceDiagram + participant Router as LegacyAPIRouter + participant Sessions as RagSessionStore + participant Indexing as IndexingOrchestrator + + Router->>Sessions: get(project_id) + alt missing + Router->>Sessions: put(project_id, project_id) + end + Router->>Indexing: enqueue_changes(project_id, changed_files) + Indexing-->>Router: index_job_id,status +``` + +### GET /api/index/jobs/{index_job_id} (legacy) +Назначение: legacy-чтение статуса index-job по `index_job_id`. +```mermaid +sequenceDiagram + participant Router as LegacyAPIRouter + participant Jobs as IndexJobStore + + Router->>Jobs: get(index_job_id) + Jobs-->>Router: job_state +``` + +### GET /api/index/jobs/{index_job_id}/events (legacy) +Назначение: legacy-SSE поток событий по index-job. +```mermaid +sequenceDiagram + participant Router as LegacyAPIRouter + participant Jobs as IndexJobStore + participant Events as EventBus + + Router->>Jobs: get(index_job_id) + Router->>Events: subscribe(index_job_id, replay=True) + loop until terminal + Events-->>Router: index event + end + Router->>Events: unsubscribe(index_job_id) +``` + +### POST /internal/rag/index/snapshot +Назначение: внутренний синхронный запуск полной индексации для сервисных сценариев. +```mermaid +sequenceDiagram + participant Router as InternalRagRouter + participant Sessions as RagSessionStore + participant RagService as RagService + + Router->>Sessions: get(project_id) + alt missing + Router->>Sessions: put(project_id, project_id) + end + Router->>RagService: index_snapshot(project_id, files) + RagService-->>Router: indexed_files,failed_files +``` + +### POST /internal/rag/index/changes +Назначение: внутренний синхронный запуск индексации изменений. +```mermaid +sequenceDiagram + participant Router as InternalRagRouter + participant RagService as RagService + + Router->>RagService: index_changes(project_id, changed_files) + RagService-->>Router: indexed_files,failed_files +``` + +### GET /internal/rag/index/jobs/{index_job_id} +Назначение: внутреннее получение статуса и ошибки index-job для сервисов оркестрации. +```mermaid +sequenceDiagram + participant Router as InternalRagRouter + participant Jobs as IndexJobStore + + Router->>Jobs: get(index_job_id) + Jobs-->>Router: job_state +``` + +### POST /internal/rag/retrieve +Назначение: внутренний retrieval релевантных чанков из `rag_session` по текстовому запросу. +```mermaid +sequenceDiagram + participant Router as InternalRagRouter + participant RagService as RagService + participant RagRepo as RagRepository + + Router->>RagService: retrieve(rag_session_id, query) + RagService->>RagRepo: retrieve/fallback_chunks + RagRepo-->>RagService: chunks + RagService-->>Router: items +``` diff --git a/app/modules/rag/__init__.py b/app/modules/rag_session/__init__.py similarity index 100% rename from app/modules/rag/__init__.py rename to app/modules/rag_session/__init__.py diff --git a/app/modules/rag/__pycache__/__init__.cpython-312.pyc b/app/modules/rag_session/__pycache__/__init__.cpython-312.pyc similarity index 100% rename from app/modules/rag/__pycache__/__init__.cpython-312.pyc rename to app/modules/rag_session/__pycache__/__init__.cpython-312.pyc diff --git a/app/modules/rag/__pycache__/indexing_service.cpython-312.pyc b/app/modules/rag_session/__pycache__/indexing_service.cpython-312.pyc similarity index 54% rename from app/modules/rag/__pycache__/indexing_service.cpython-312.pyc rename to app/modules/rag_session/__pycache__/indexing_service.cpython-312.pyc index f4f1f64967efb65cee06e482614fe3d20c5662cd..2272b9f3ce9840e11c85421ce059cd43c3a93ce9 100644 GIT binary patch delta 1192 zcmaJ>U2M}<6uu{R>^QMw$6lP&ZIk|>wjmXPwa}jwM#DxV7C@9$XdO|hnif*6YnudZ z5()4ytyE%K(R~0km^x_>oA$!g32B<9sUR9e^FURkWtA%JaeE>iOldUsb8kW^d)bx! z-R~Tq^ZC2i^8LH}iyr%uBv}y|-z|S0|K9bDy@7H2hbAWj;}fy7?~Wt`iJ{Yj$&q9- zKJniCe&(@7Jjfw5jizCmofhYV;#Xi6XV91%gQVeSk}im&rkLeJ%h!OXWL-STB9!4X zmW+_G;x7BaSJFb78+6b&(vh~3NX1d*I#y&>5J=3#gd04bkPX}>cl;;Qd-U55Gz*t! z%IN7pm)FRBnA{DGDAiO^L@3qrf^FYqqs%tanNTyf6tQ0!W3Bif+3O5!#xXG_rEQ<0 zo2JxG{I;V8A5|nUh_`iUFR$*lLu1q2SUGTYZnkaBo6n%TvKe*_uQ_~@?Yxbc4#iN0 zuz5|*o2u-jY2zYV9vFTYn^+l`yoeHBlJfD`w5y$7mCSMaZ8LyGb0}i zCw7s_q(tD$P*JnfBs5+=4I`Zb2I0z?OT?&Ut;r`tBCqRP}j?tSM{s@ zEB>p2D}lMD71ej?z|WeVSM(xJYbuom)Q|VOqHqX*=JM4YfG}kJ3;t|iA@HbgawRnN zQ_s}0(lyIp)8~vYPvT!(9=U1fEb~Zd%s02uc^m%A)#LH+&bM~vn_pd*El&LjFS>}l z=uit~Fz_*VUElwe6r-?ZsbaoFoDr9$g2+ntQlSOyO~6LMPDD2f2Eo!(1OMQDeBu3a zXQ{H9mskHR`5W1`kMu}A+<>sc-DC)x^F~DGM;l=aRtRhCg3V01(%VAvcRC@gb9cH} z%J=OJdrfzP5U%3x_JkZHSd<`K%`MtnDes1GEw>nCFVNsMDwwrGL}0Tvj&cns>OhpT zRX}R8)n%+Lqt_YX%-Qyk7yHOS#ESDb}KbrcJOI5j8}JMI(wrDTxolXrjh%TF@6Kne(0V&3q?klbuK2-+Q$Q zSr!qj^S`Dpv?$ND4o2_n?CmY>xe^|@)YV@a={!GunE95a?BEcJp%{*-V`2bdbO&8W z-H3#=rs)pN7{(&`DtiEPw0XP=p&@=KiyYE6lE>QHEE5(cS@J-)ot)9t{8j$uhc##% zj}4?ztU+T7umi9iP84;}RCGo2GZk3D7CXn}!(_xbBt#)rW_1@qm>u@0d#e%Fgyliu zKAKWRi?@2+C8S56hpps+_8uuPwBa(c%A0TnjBZnPrv>?7FsTQ*O>xs;A}K??EDQ?} zwIPg#fK4ATE~_jWFx?nMGs3+4`aTPU_35!jpd>v6tLg$60 z9UmmwR#&yl^Hh2wJ=HzYE!MVvEIP5|I5D9c=kF??Z0;pn*?U{rICD!}&UcS9i~60* zP8Y3qa>MGi7FMiEoLTw7ix#jJ4d6^cA)l@G;QtTTkK=zqAR5F;L7sHcsAX*`S%jQz zcq1u-(?(JOOsy&8boRHAi|NT!<{L7YG5Fd48NU6R5|r^IZuD|f46uiJfm-I31Z+mc zjXw2^#8R&J7eW7ZEpAeAulJ}a-&fxxsovD#CYF2S_4`1~g5bGX?=Il+0^B5U@j~?o z6>e%6uf~BYb-ae7+=l_@R2ZD*SPXaO_%sS>l+1w&cFx|Q33%9rVtk}DYr*5M1d-T@ z1Nm7A2TOS&2h#};3wIM04CPhSHBrk2MLw|)2eBsM6L=7BIX)vs;BqYa$+sMnD zb!P~(#%H^eh}L-~Q{SCx-y}VKYbZpPatHO<@H$I2UuOxtPIA#UjQhwJo5Nbag34D= Q-8$!2VdHm1G5t^e0mvf(_5c6? diff --git a/app/modules/rag_session/__pycache__/job_store.cpython-312.pyc b/app/modules/rag_session/__pycache__/job_store.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..571b827958940118427968f837959f21f3f3c56e GIT binary patch literal 3863 zcmb6cOKcn0@$HA?az&9gDT$&)O0;EHUOS>{$3R@ebzLO3(IQbDDGp*Dv|zYvhXgqH?6Qz2C*&oDS=D{Pg^ zbJm=z@YO&*V9ohTuo}vTszP3{=s+c074srPRE8xNi4wd`l+Y$?e|yt8B0pfQ39uG+ z*Yc5uC=L7*B_zfSD+Rq!tQ0g2h`{o4NjV9V!TG|qd3C9-mGnAoNURw-PidWADl{tf zf?|qq)s^LnI#;NwARPT!O;Ksr6f;T-*JMrAv{Jo>bWpSIM$x|Jpe3p*^3_sB)y&A% z0*o#dD;A0$sPYFT-C5peGpZ#`vpI8SKt*dxF;kG`8W>TQO;MJsmQi6Ik>y`57b?yQ zr|Hy;$#S7qtAi4tkS5DiKt+OYUp{~N{2O!-2pUBWSE}hWhU7j(`w_(v1p$3Qwg8hC zhYCwev-WA)?4FCx;^j5)xSCsPPz+!TLHrW!T|mDge~S-y_+eux+2NB$=2$m#qMbRh zdZ9Jf;ZGQ;vDLTMUhVK>#%OBw&9$ixpEAbNtG`%V=iO20 zdxzQs6HocWblg3tFLHLWWFD4xBS&FWPGJ@9DzEUj`8-5|8cNwlKpD00K$9(7 zg{_|rreOV2w~R6AflWs{hBVm)BC&H4O9w!P;sqta4m&gg}dDtQ(i`Rw(Ffz8nUHy=+t zjb3VTh7h?UekgW@Ok2p@D|RPx?TOr@=wt3L;+JCg+@g2-(*}rfp1uiX+UGR#et2{Bp#se={vqFV^#3LE#BW}bR}C;`D?o2D^sb`R)i@^uGsmbe09M zKIYDMM^{%^#YFP}a3?%0Ac33p`VBL*WWNq*4px^F#z6GW z*W~}dUFZoW^pYPjLd`gt{Z63;_PirY-srLY!=F?b!>r;aKN+L+2KRf_jzhcDr z6(f)x^=t>S$u>DCb&(g@tW{VnwnL?$_o4-6lPj31l~Z+#@hNdS?rVUXTkUHWqpWh6nF$;m{Mu0^$bO zBQQJ=9-aImwuRH)sc1mFC-evm8}Dr6U>}Dk%}Imiz|KD~Ril{uCXau^tmlt;5T5Ee z=c%6C^Hk66K31(unsF4=;Gajepyi77YPDXo3cTG3hW4(O@Q+TZcFp<^VHbVt*8P@v zA)K+#JxkAlQuuDv&}vZ780I-S@SKeNgPi%6y!M>T_JV{R_?{J*AM^;IEe^;YlHZTK M%81_)K-QiA18DH@m;e9( literal 0 HcmV?d00001 diff --git a/app/modules/rag/__pycache__/module.cpython-312.pyc b/app/modules/rag_session/__pycache__/module.cpython-312.pyc similarity index 58% rename from app/modules/rag/__pycache__/module.cpython-312.pyc rename to app/modules/rag_session/__pycache__/module.cpython-312.pyc index aeff80bd4512760ee3c760949b63581e2e86ee96..79725b1f5aaa745ca2e1fcb90c2c27b95e3d808c 100644 GIT binary patch delta 2143 zcmb7FeQZ-z6o0oLZ)>}C8|}Ka>(+Mdx;vn}mgx0Y-P__l5K3njR{vrX^|0w3rO z|1ICEQl=1RAn(Lb_+W+}$%2wi+0b9D$&qX_&_Mrwu!YPUAR||Ua^WS1CRaKvJBOxx zhUMpyr?eG}kD(*@V@^Fv22SIcm*#EvcKO@7_@1^Gc(RdL`jlY{F9->X+>dbpb7`Rl*6Nq;WIh;QuJ#0o4gJQ>W31G z3GIPpmbchJ;v}4gJLQ@1&@!e}kfE5Va`-9#fh34vaE?|ML?mB#b2CvA+_oAB_G=8P zsofKQE)~X$3{Y&#O~Q#IEwXK7-=mTDz_6{u@t<==b^4?H8~kKiGCf*ysycBJkJghF z<_gvzv_9N#zlzKoNf7L#@FERIk83uYckkYfOQ|scZAGo<0$eW2Rc)h&?G$!Gw8&}G#wySEn)P|IEl2PhnZ@Z4YQsS5Q) zYn&nC#tKo~YVMe%!wD&)>_Kq}QR8{=`MfVpDGK$lH_i}ot9sESmDH!1_z*R244*3g z9-&uYhqDs3!m!gu5*l$V8k7a)LDnZdCR@RHC5XHs z$quH5Ih1)`x&v{t7TizwT1()J46mm88Vb)*pkIYc#xAOJ6qZuhL!p3zc;I@f%_nds z8CMel?$Vsvm~y(Ng;>f=Ir{}%Ond~|4CI5*C-F&WtiCd6uCc3?4#+hbHZ-k96O6P;{U;->Q%6#&_CGVYdT8xO#<-Hn=b~nN)ar=Z zO7AJ;v{vM#R_@E0ELU7ky#EsNNc;ge=dq)a;w+Czd0na^`gJw(*ksq!bRIi+%L*lT z7&z~=iW!~@u`>RNxs}Y4TWJekOjf~f6CkxDW>HL=SRza4)O-p@2s|Mn)T^&_d3$HG zNKa0_yT=dfYBJ&5@>KR``ky|Uo|H$aSs8Jy1XN*K-?Zxg=f%{55{WUA$OC0=C%TyB zE>w=FR74%oA$PHCBunRZ!sd#SnQr%xZeIw$R~&>R)?yatk%pfm5@04Jz^umz&?i5Z zM5&KTxKx?P?j!4PD!5(ArB5@$R6h2Ds_JR>E+em|LtM?Ypyg=B@Q5#r9#iL8oDHO(}W^y)}m$UL(t69f6q4$9sFQhK|MZKTI@^70Vb8QtM3HzfXs! z!BgABen(W{E7(_S+eI4u)?C*V1yoMAeo>~V>TICG70hR7S* zNh&A@6p9mZ19dhWFpO$TZz{?}S{Rm!gmFCPYB74XJ}H)SQ=p?w=m}syrYsQ4pUn^Zw*~~BIQC+Cz35}yES~VNK6_!oER)W} zjhV?Uz(8=s zDbuO*Y;jxs7)!Rrh*`F}WgmYyj9Ic7BTNwM&b{Z}uli5+>Mm)uYQlb->B-Q8yf1Xul?kSnt}gf1aA12!FyalhZcPnt%qQBPNZE+sgWc4*1d>q8p+Xi(O9Nh%UFb z%3K53QHyw%fnOqcQRfkLh(Sa?qMiXJ^d`8K*+~yUv;B@b4o&Qt<=~=cC2YyoL%z-h zSF?60M&)Oe@VX<-q7#FzoXg02ArxD5yA;D0XCa$aBxs4L?Z^>TL&ZgMp(gn z__Jv1M1&Dth;0Y~5kYh#cEAbVVd+6t!NA8NSc!&M9%uLf_bF>f_Ccm41Nzx2ZMM7! zQ+d1XFQZ#8!!*|sY;S4n5%!>E2(b_G7VOIZJzJyFoy!sl6BnB#Fe3&XOjczLVmC`<_c6(?oY;!xP5{hgk5>ueh zhV2pC(Sda`Li*+XiavRtA|O95Ya(qj%AS7NPEtk=CZ3h=q`aa*kak2l`_j|0#|y^C z|LewZVn9-Sm8h-2QJheUdIjQXhUvIaXCSyMI~bVeHnoI0f&pO}Ge24_wqEnAh12vF z!IcUVTrO8bp;gCSM!&V_Nl zuRgL?j3?Oz-)x#lt{GoabN{ZA7`>vB^*)GXu&IW8yx&g8mKptK^#!?x=@)d=Z&O^* zC;9ErUYtXx;BfJL{_@~@aj5;tl*#@hW%7pk`oThAdqt`rDKFBJKf|n2UW9isnAjFw z2pWBQ3!ey@gGPH?&J_CMp1yaB+#4h}(jC{*-4p5V{S8x(5>YwfsDvL9tj{^C{x=KO z1{zC)h0Qrsz*LZm9wO zDA5-v@$?EPk25K48LnZzxL32Og(k!r#9jto1v#ZWw*uR5!mFh<+)buN_DaXvPo*Bv zmzkXll`g7D1AT$sSui|0(m1~=+E`|y^aO;;v*?HLc6lZ}1)rA}(%W#O{180=;R;jc zL3|@8Z7$&uzVdP5;rSw*g3}etxu5Y1#^H8_V<29&icaQvMS{vj&zvN&wsJ)onnVLp zCmc2po5xd}S5?`fjKcwZH9MDZfMHIOI3Q=fPfWl8X~SvbhQh0=BB?+}?Th;Rk`RpEY#qJ>&yNBRX zm3_dr$mo&^E!5e*F%lGZgxZ1v9?;WxhdnvgwK?3nG146tf2S5Zw!L!;ySwm8oK9ct7zsuqp>SvHZ5s43F7k=qtX@OY@GupM v5rqs7;;ZLl^$OoYqt}vLJ41XkiI7N&1VvMCMWRL7;KLRzin2*cmgSYP5C;-T^W_03 zkyvo;+jA;%R+W`*?;@$JTTxYeF;z;ks#GdbrLL~5+e&Jat5isVw}NrHs;kOfC0lhr zC{r6|m8)F7ujh&33?8(d-MeHc_4IUq-P4b+zgK@f_!F1QLBaK-<-4P)F^c-H_#j=Z z5TVy5A##J_D2^VdCg`&?jd9bsX@WV+kT^4LK5K?F^Ef+UIcp(l?6`HpcGgBy5ynKl zPH~oxDb9M|B-D9de8*Wk$+JP8UCKM_m}3HtUm!$)P8WqIC#PcJ*yz+`6dvtdI2Ilr z4@aZ@@MMlfW@B)7T#Up*k;#jrlaXPuW^!YM2YQW%$PFq&ouxVItmzc=B*Ji}BaljB zj2ttQ7z?=;IcAk(HaTV|F$cgE$uXxKb75?*IN(m(2f`QMm^wc&bveyMV|>~|+5 zLt|6tL!(@}m=9kJMI+HD^bVgbQRwMRG|h}o#?meV8sS10M#m%3wDUq3?$VQcI6OQO z35|@##B7g{Ffkg93Mnrf@IHUS-#;}ONjoAuKgEZJr#LyzMWVxLr;t7|#m$UI`UB>) zEfkszPeekYv?CM}k|FL4g+7=GkBd3(P)Ic?q0l{Q4afOKV|aRcmry3UOVzVod}Mkm zIvShe=h~*{cpJcy05LDx0l`0{{-U%ZX|70>S0>GsDX06lkEhDKNwYU4Bv(`=%~dI{ zFKPCrJk?2awHTJxu9$228mYdFskcBXT=ZHBLS^c?a*qDke3s!VI1}8>oR?$Z&T^HU z8SWOYieuq!#Hk%287O0czfqFpY_HKn)SIN>3bpU#wTU z(^SqfCJ9!$=TU}{V5NH=Wy}(+bkCy<-Sa48lVGKL9%bwj ztaQ(#j6;Hz?s=3cl3=BK9%Y;otaQ(#j7x%*?s=3cmSClO9%bATtaQ(#j7Nf%?s=3c zkzl2J9%V`;Sm~ZenKB7hy5~`*T!NMEd6cP;V5NH=WxNusbkCzqr35S8^C(j#!Akc$ z%J?K$>7GZKY6(`l=TW9cf|c&1j9sF$%3Y$)M9Kn5Op=>uYL2R>_E1s!vWceNp=kJ< zqh^_RsLOPqwm;t3HP90r>hbrS8S3dD>^<44NJw=`C$0t0Ese>ynObu2j0d~koj|K>pNiQvFH{^LFGv>KM5 z=BLJ>k?OJs_UsLiPV}GZ>oYDn%tyj85T3#@|4{FVp24Bui8qJN`n!8x3!ds5@^_sY z80hIA3dwo8O$g|whED+p)y>H2Lf3=wbj?hIM)nP@tN>_>n^{@IBQullXI)jCzq9xC!JdKMU|)Wu z=PRwG>2PdBoXni7jx%+!0!3a~ zw((aBt%92xMnW$y{CLl1YPb~P(W0QL=9NOLnYug~;Z+s9QD_Cxxo9jh0TzdeX)r*@ zsEGqx@I-;RCx*s|(F>yy7#tM?M=BRTRdCy4;qk~Mm|a5A;VC}i?>>17Wtuk!db)av zU_`3@kuK@cf0sU}5#kj}eRXS(e{=2oT4;LsUu{w@IzJA^F~Y>Nh?1~K3Z>v+BvA`d zc}k9?HPMl5I!MIHJg>PIA zMAzI4+_-q5>(jWVLRXS;O~Lz{k?CU>hU+Dp%rkv)h}Uf zs~_=DUvbe-4gb+s5xV~FuiM{5`Ky3+{&xd)g+Uf*d=}hQ&4oU)>ZgYP*H<%P2!dm1 zB0T-|GN6pT9+{sIWi2N=5m&Zc>Ko=>s*x{13n_%>6^f|3GLZiK$KMn!FTMTUJ!dqg zs!?v1P*2iB5CDbEH$PazR4PMrrn(o1pUS_+OlGdBHzP9;e=GlLfq`oqDe&}GKi)-- z9f%L*U!@|OYg(yg)q!|d{WWN8XZT1c7CsLfA_R-CVkDNjA+t0qfD)f!_fDSgg3 zT*a$i%2k5EB+HFiC74wPSoa3sLjqjRC`Yx?FFN-jw7YsI)#im-J zsUCGNvSgKFsd6mkg;H(mQZCGLV^#@dHK?;3nB~N*V#wO68kXs#?}HU5uWEc|Q*r1% zNc#8{XH*3fOZsM3oR?H7!$}{v;*6+L=99kft~fuGQ&uUe?t}?E7pnVB&ne1v(v*>P zhGe}%vd$`5OIyF2^nHl%L3QGXSWl1ucarqupB%q>V*PD1qSiV=uxBKE5p@D8etO(} zd>`{C``F`0ti$RJc-&TpXL{}5K}2uMKF4-FlVI}Qu20Yj6KA?@Qnrtv?ANv3#PrAi zn=Z%h?FXyt5XuxM)!RW}tkn`eWwS@0%byPRo$49%H@8Po;c1^9<@Diz6@l*bT3>J1 zkoF}3u>AN>z1fYsVuL+He&v<^4%ijz>O0ll)7_?|>h>sL0?!`s-C>{wydS15VCEa( z#uq`5b||I!9gq+;LjV*^Bw@uf)6obYllVfsHj{>B<}PCSQxE_Vb5(x-TTfiumR;MD zuKHD~2nbkA#9y+S&A7rWC8evyl&@(qzU*m#USfx}R*q{;K+Z}iw2md8Vele~?Y*@t zSi(FiVT0AyVbN6!QYlJyCHYVAunD{@z>S16CFoIF!pdwIInTtf&E!hR?jge_a74@4 z79BL+76m*#IM~zI1Gurr|JuOG6PZZ)_OYIU9(58LkUIQ`a(n>Z8?X>w#&3f-za0a@ z!)zBKvEdQ)yE31jni{{SLI{EcX@;M=ti=RL7KqG_Mq^PeCS>OR8+cXpT?jT}0&Cv_ zm{97y(R#fVFkz|UMAFlrD)HVdUh*`pu}oR3}gOMT{|~Serp5t3E1$)Hn?rHBr6|nwmCqqr)+NH$*k)Q-mInB9g>? znN&oT^+#CpeF!!pj@jOji8vRMo{Rd3bEoQ7)u+`2ebyN$_sl_U@n4Ny>-zrOk}a?> z1i^X{q_0vCh=|l4Y+)X?u)%%SVI+alh0cJ4;r$^*Hi1aygt-ugmjJKN+l)k9z{A3v zjldM-VF8G7TbDs-S)NK>ZDsT&e51lVNS3KOTwD4CG3ECvX3)hp5tr8yzl`S23_P6r5nU7 z8P}q^kSG9&E+5F@h$FJGIhs@^c_SlB6cda~WcYPJ0pZygUaPV~5lvFAvMO~{F07;( zmsa_0K)DDyP3Hn8?ivV;kzBx{w+4a>QR4!t{ezA?y_5u*MV@R?Vu6*fJL>^Ee+bqB zS@mhFIEiVCFgePiO0#4b;6~iWtzyFk5>lihA%w8tGUMGC7_CY{7Dj$e2#gR2TD?Q) zvmTjIa*|LoIt9T;)bnbpxGF;!-l8QmP85cVK^XQ^wauxm4XL`;)Yg_%)4^2pq12Wp z{8zX0S(yVA(rq+85sNrxl#1a@l1mYieoX%pT1QCoi$apQ-`I^%6rR)w+Nu$;TSY?D>K74GIMLAR zSJM#VMFZHRr4(Xi6z%{l7NrNPEKv;`$iQ#M*>tHVG6E(T$P*foS!@b!RS z60%~a)9ffX2Ss%)B4G9(4-ZE~?=7t!oSEwZtS~@cvx*6z(QqK?8(cd5?$WvUmrjkZ zI45Ld-_q7|N#A=*;R{P6mzKVLdBr&kSx=mG%g(w5^TOnV$cpm_0t-eHJJh=lY7OuM z2n1{1t>D&9iuq%T?X+hqH@Gf|xVgGH>@_D${E@;G9F0XUmvSWIu%0!LkHutrV1ntU ze!vc!MnIrG2V8xgRoPxx&Wr~3`_N0ge#dmR)C_R*@==<3OF_M`e1%?U%Ti;qjxV$h zLlDbpU}nl%G>^$lPJy~FM)lU>Fu2%b4=)?rme~GyuXb_eUX}h}e>ZX30HHeA|4wr| z?wv&g0TWT&(kyw54{0kdXw@3LBW))8bh_*ZjwVj=;OEODM)2JjkVS13WIwW~om^x* z5{raL6Vq0~cquIMG(F27gF;c1kwlSC+{6L<#`qkp_VDDzjhi?bnU%QO(crX^RaM|> z|H569@|53jUUxn%uUhhP$?{05w&lUMmdiVzvkb5;8&y&vXHO-|r!%sj*(gut)2gix ztv|1O^}(JWGYi3m3wL|}p!jj!tI4Y1HCC_kQ_JNqXQ@0h`-!K1*;BvJz1a4!X~om2 zP?ep~{#6QsUw!GN{6TuXQc8Fct%DBV3Oc%4nMd^%T}_Thhk`rc;ipY3hOKPZflQW( zQ$dK%KieG9DS%~aiH08AlNM1{ITX(hXP9g+0)hF^+)bT352VJDVD%QUN-ni|EGHAB1T}_c0?n0C&Vn^+%Sj|IVHrc?g64wr%_0fpY`5jJhctDS zGgApxM_!JJ^0K`@?1YlK;-)As3+$MPoUkb@xO;=(Dmikux8qT0e{fJq3dLq)AV~{O z$+}f%3JZeomM)+uMTj1hptwrzC`ha%Uo*4Wy@0ahEPIY^zws zx1?=Swdo?&FpwQ&^m*z2@a>@~<7129MYAOvZ<48xy>=RlF6n4K03_~G&K zv;`>0M&0c(^E9ETx!HG{!{AK-)h3I0(a?{~tef!h8?F;)B=I$PS zT>lRq3?%CgrM5Lb*|u+a+rEF@^^kdZa`{l-<3j^WZw@5)4<@$_rRuidxqR#L!d$X$ z&oevhF#nS5Ff&j;82UWkE3WmIG}^$+T~_9&R<_Gyl@BzVp~E-{gtYx7A#EHSU;dQ( zec-|yk2dGnC$*qFbM77x7i+>KE^^bS6vrg2P$M&Mi^+$c$502+TnU>VspI4&e_td6 zS&5zR+t*c4h-tS*1`K9D-gm`s3$dNGt3fu1yFcx=(#A7_yYw8!m}Q!fdw zAus6mI6D)&@Y131XGUgE6fv}RceVSFF3Az)62C}xvKkmx^ zY`gwO0p>`X1zEc-5%uhikV$+Fu-rXwFQDt~o9}z({=b2PbfzOf)?wO&NfxIdZSFeR z-II0_)Jd<{NG;1|DQbWpSbaDXhjFg|M8BTDkGzc$`4orjyFR zwrtMfyIJH99jf z5$5O8Zh|KYQW4dJC(MR2Y9=p1 z@DT-D1Y5Q*v@FyvRkvMnYu(k}rHOU6s_{$e9okiT&7AVpKlbfi_|8(*z6X0A3_W;l z$@}s(=4s9LJ4bIFU9A5}%e|HdZzo$ik~K%4)Erx`IhL$>_deMFV8=uHVbkXg zpYMD4(!;u?`d+NS&O!zI>Y##GX>|>5tf2&I*g@@VPu0I9yp@3evdIVFK@;|FHD*%w z6EB;v@9PwX&qyfPkbl(qO4knNr#slLUDjb)_kd|ZNt)<9KLf&Z11y)gK?f2%jpm6> zX+D-5E)4R=nb1a>FoE&K4zJQ&oMJiF&zE%}gYt?gM!&rLNhO2wvZ=)&U;b2*LB66H z>Azq81QJ(tyGW%Up;2m%=#5t0agjf1Bxsdj$g(AD{8rAz73*n(@U<^0Ve6K)qx+)f z7>DuZJQz;GC+p@>S5#w#I2)?ZaLJz$!dX)`a6xMX`qF=3`Hx~=B2#lWQmd;$(XZdS6ecqbK^>KiYnsC9o?QI z4OX4#F@6gV-p$}KMV|s zXjf^AFe&LG)!?UDGI(i^b|BKOjDDrvnnr?!U(w)D;#FS6qwg3$jll;P;P+wqCKyRId0MeRC*%)e{!{t}4VynDX7~)- zzhExY_vrq6G}%PBXG)vecJn8onkbrHMHkx__9GYJyCk#I{L%Q{%v}H?fD<<;ahss+ z$oSN7cszQvO)m4-D1M{k>wl;I=SS4z=8gx!e_pou)=#|4%^km5!^VHFqLO|=p9}xt z|2HWio7G+-UD4%i2&>QCFefL6L!jJTjzHxWQV5vJB4fN71xRtX(6CBCU4lPK(P1#;BB`9aIYQdB+6m$P6`HfwWI`mUnea$@#T*bGO)wr+x2Vlib`6CAJ< z8Yk~{5&|ada_V|>@OXA8Z62E%ofLL9>)|;dnifcXgj|xX%pL+d5s8JxT}^b|N;5Dp zaB_Fzdr{oo5XoX0Iqxy!G;^~vlNv(|v{o?t)_PVADM z&PMc2YyzcwfTsLSDSvaS)}Lw&e0JvUnZ>DOK?xDpKu(6rivhB{9TW1z7CbzVunp=NjyJuV4)0J%Q&Zwwq=V!BbXP0&d zlTDq^D)AKSAbk+Lh9_ytxAjY^k=Qf5)i*}2kKFuD(!2Aqch4e!jrrpBH*VH1FrV4( z+JGKEEL&;rNcuaLt2;ht9#?lQy*YHvstW*R$bs&j$Ld8THqBa}2nRa7lZBdb+HX z2OphgL1)LEa?Pc9qh;)jn&D87MLE=?r)5iZ>+QEndGi*n6Uv7c@Oc9l?~Ca320OL$ zR%LHCA=@9Wo36Tvx0_YR!DGAN;30a8SuaxoYd=2&{o*e{@O26oJo@nRV40wJ9Cfu= z!6zUg9>^3Z_(+T&ja-W8$Pr~AlBnc}+Kxr~_TWOvozu&;9lzq|@fF|28lUMh6wzOW zDAE!w)&eeZ{n8CBE7=m8YEhgP90F7m^#9W}&Xp7?1S| z2HJ07jL3($5RvBNC0J%{(%i+p`g2-}6L<^CeMF^-ORja@IClLQ%-v#WrM4qk-mzQ^ zG-tWE>k6YKt4`wat@0m{zIp(T8gPs_`tZQAC%9Ho1Rf;bOrP&cAT{d@lwYl;w(t14 zd&kY-@0zc@^?TOG?j1|E9qToiMM3a4Ush9|rYM><|Lu{^a^}%1rJa8BAD6S8TQdFF zZa!PS4}%SUsjKtEjS(8dxqEsZAWC=QssFjRF3P227rnjno$S zHHqv-YQ?B;f-MXy8ln}Z4k#dMxrl5Ll z32W4JjlTLu!ba}>Ik80kH2~Orf|IjZF@)d{SE+W2gLi#|CYL}}zn};GdgQHLnS>?t zM2wEqbr4C7V!dfN{DA)P;9|*Vr+H;L7$}nLXgK12g#(>KA!i-@QtAj2g>6^K7 znz+b^cyud72Apem2ENN@+~C{}QnL9JnJdUDdEL2$Y|_I4?4d)giSvd|x0>OE-Rw#IO>4%dg88p1pU5O$JZh z0HTM+)`~a>dJ0-o1v|iYjG+vyIxxb5g>-Zp{Jt76eFp|b7@%R5|1Au@hrw@SAlDzY z!y^t9xrp9RPTgtG!)A&V`%s3M0r?pe|8n0Nb(*%9Ug^fKK{a1*zWH*pwCQo_ON)E2 zbbZnF+O^@EU3ZS%ItJp!gTa-C!^zsi%ihBeyB>RA`C0dsjBg*-qiwYj%!>UqRoeoF z)dlaZzNa-?Z^jl*EZ6N#*6ai8>O$|W4^uVUGfbu5jLo$MKIQQIB-&h`gUz+F`ewtO z=3C7Rhm+MWJ+9vOpfu^-|HRwA>}^kak6d~EDFE!hbLiHgg*TElyB^mZc(6BFdGJZ) zk>$!G$;zWwj{TY2`{^OI>9rV5uO2YHHc#M(E;q+Z6aV+~Ejo|3@>^n4$ zH2Eb$`Fk@kf0S&)5RR{dK6$*pgOyO!07SmaH(ta{D<7C%WTuW%Bg|$>sedFEh z@BZ-I6~`BDU#hI)N^h#D;z^NzxyZjzwo=r%*z%;MW4Wc{;qXdJC-^eL${E>0hCp&ZRZ@59 zrN)c!lValUgn~Jfd`p6qQEDe2by4aewU?_Pm3;E$7d+G?eEx;RJordrX!H|eXtLl- zF+hz+h~a@sA*Sww(jH8WdqAraeos|uvU(Vhq`q*B!w2-<#a^A`m6~j-(K?P`b^->p z`5fPeZ~lD@6nJr8jr5xAgDL$-NZ2e>#(V^elHQDB>_-@kVW5;DW9AU(J4x-8^Of2O zN6@r`{spoVm>6X`u0Xy)8w1^n{pFKsReipl<{6n zt;C=TgC-1s9#A|wJ@5xHIE=xo7<6Lr8V3CsyobRA1|MVak1@E4K{p1ti{RX4^wRL$1a5{#8n6?Lm+7%RtE z1;)Ho<+fF<7fwSuir0J)TRTI$*&S=!S%?;UA=q8Xg6A+t1CWKPs9Lp>s156|lPEM` z+iDSsI;ql%RTqgCV})*vuJtkJ;r9F(<7S(lZ!cz>*8FeNZ0U2}WMki^g(1Sx2^8T^ z@m`pLl@CA5%Ohg|_ZaC{kRwqZ=^D>rvLpJzcz7Is7kFY$_z-0UoL+4cK4lYa121tt z!nJYd1-mNlD+t4%IVCS_M}!hb`9A;hAy{Q-n*MWY`(IJp{){UAbE@XAs8;B^ isc7Bgpy4CR5UerqxQdVK)d@TOHvJ{|nZk{9=KlbyExF$S literal 0 HcmV?d00001 diff --git a/app/modules/rag_session/__pycache__/service.cpython-312.pyc b/app/modules/rag_session/__pycache__/service.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e20651adc424bc3b89c066b09ca00468cd0f2f97 GIT binary patch literal 11562 zcmb_iYj7J?mcA`@>;0B2%X0j7Y%8(jJWSp(IEfPi*gPBp7#NMBZrgHX%XG_e?2#E3 z*cpXo3!9mmIJ;Eg)Xo;7*sU>JwS|A|Rs{-(r{<58W=5jdQ=8gH)y|(Sftekk_Q#&n zt(Ig*24-s8*17L{Pv3j)Ip00Ee`U3r2t5Cgxe-}xBIMsOp+0mWvM~jbYlI~%6(z$; zNDWhRS~IMHP!rWo(8KfuGt5v}u8ry@^uu~NPDc$B#$lrzXQHMF^RQWt>!Owk>#&s~ z8giDf`j-i7ct@j*IBc7x0>#!bb9NIb^RLWcP~cO`ymYbhm#7#NvmeUSDPIKp%dq^k;PBc%P1_I}6Nt>w(@4*1m}f3R96wjNqngIqOACXk9D zj=vlUbAytBD~O9^;){|Z>BEYQNIIU2PxF&ubuXZpppC$yTOgYuX<% z44Jnl?EJ2TalEQRJ2GBfA=jF)V2=s=I|V^hIi^TL&uZp?>v{d;kAXkj`&*u}_j7#f z`}wythGfE_jING-*ZVlPc^7Ai%WoaG(!B1dz>6tig?Vsp#|zAdp)enY1XGAHFfUCN z`qC!M2|8hYr=T2)@1;@Ji2|v6^#o1+B;K1Vj7KGmcgaL?tOS$LCG-iy2t7m&;mB2o z$g97A-=!83uUC6(CA|#(D3rYHrFgqEJ#3W-p7L1SaT!`h}FofiUiotLwJjSt#x)UTF*lKQ)mE6IL(~&4- zE2WV*Hz7z~H4_!J5FCl|LDW5xRnehpA5OWET%=|h`>%w#sW_-b;D0g}kBrO)Rec2t zjhl*w!d#(pz{TSx<8fE;DB652GHZ^<;-RSAs~}k;lPq@y*j99(Wh4ZAs42TL{un4+ zrP!flOhiCADmf;AG%9a;)cUd_l{=Matim_qn1XeC)bh23xBzwVXs1qlkK0$l1 z`Qg3TjZN-2C`^W?gt1tBuBOZgY%W4QBK#WG|G&tWFOV-Rq}so<^C#m;bIw#C|eU+(0%Hz$$9Fto}C%b&Sf^+)|+YTU5(#7zji2n{+aam zSh0=!#50ofv@AcD@pLRce5bA{TX#59cX-t!*7Yu&S?@fM?L3j`Jh94VdmqX4K9YXy z$@K8|(%&CVN1hWq`D`1%@Nj8_*e9NY>;9&!e|N^eJ9YXO54`=lyH#>7ggm zPleK9Ui1rDkC65VIZtbf%6I~4PixLmx9rR~nis5j9dTC|rpUbV`09ge9?>n6iyWGL1)8d)JPNwhLLqg|y*9&c0(QntC{WVjyiF zNE-(74a9$u`U(toyFh(;$0L08g^qaYJ|`4Rsbj}7m2ot^ZpmvQ`Sl&k;FmeWg|CGd zV0mBc^PaU(@7LUWrbqLEk2=$(`=F+;9TFcB>daoXuo>a2=yjQ0CW!hsq%P_Yz8YsO*Qy6DxjGHK+k~KUQ znjBSkOJ%6A2I}>Xg$e){OrW&C87`Q=5iYD1;DTk|S_T)a-x)60zO|LE0?4of$N;>4 zsRLBa0Wgb|;D)Va7B`uwBJ_ZjEj$S=m|hy7#8wkkfD#mW^)ifS!0{F+zB5tz{fz2} z3)G+aVP5TO|D_PjpbFj`ai^MJ5!#S(k6K;|$5A_NdBD6n?lO2P2y!Nz0D?>v0Sg&N zHNE8lvv0*7OU7U2_%0Asx1}j0%ihoRt?ySfx21j(E@kxXfXBRhn|E=Sxct^}E6rG@-NDZ(4*rWaG{wx|B4r zDM`n2Q4YY9Zh8vNY;ib<#wGy-#RG002jg3i;0y6Pk+cDkba3heoW$?LoOUFzn+YF4 zg4l}Rg=9C9P9$AO_8{p-vKNSCg+8LQ07Fn63jt3V?CHZwxS#j~NP3XmgXAERLqH^p z!gUbQApjzU_X4Kjk6<-8d=dT_l13z_kQ_&H0!c5Ddw~cj!JA==l2X8n0<}~w=X`fz zmjeGdr0@#h*D~nRC3-uy!!FIOfMr_S0M%>(F#iDn=Gj#MFvmAY?KcCMb*E?H{Iw?+ zpIl;>Pl{FTqO&7;GOwuy6YQ>9^1a@h^E75X?HNydivGmYmGd+$KbrBhEez!R^;v&+ z#^1eSUVZ%LgXu>f7yTEqo(pNug`8)H-1sNG%j{cYH^x%4V%w2STYs*h<;~}>Kc8a7 zh64-tuea>Vw)A9LdRETAH~j8!_MS&G_dJ>&8csjMiY;8Wj$1hMX?0!Bw=?VO%=kJ} z=d)e?nXdku?Aqz{V^5`@4yUUM3desN0OMj*zM+ z0ACqM{fG>+cFzFJ^38k+gJZ!rqpds*J%26yGsxtNeFx4pQ|}+#f6k=&po6-eehr~xvLhC--)5k#K;{((=o$lkFoe;lOL!C3|ZuznK zt$GbKztxC!Ztbkb_;Ko7z3$eDgAiYXR*m#pj|t&b6yvBqzqa>_(7R<~%0{}^Ou0qJ*A&%9(p;^?t zYNw&bJTpW8m_`B_KwX`Wtzzxkr zBNLIhq6*Q82u^OD5S!-194{wzJRadBMn2dn=VJpnBKZh+nN#$&eFRR87enDo1uYxU zm1*pUu(4fUt%64}GC3;Id~8OLv`BH;%JeG#7^FA31J%JMpg?#Xh^$ES8rpc4%GEc& zIeC3DTfaY3zkj8F<&ao^Y_%^_-@WGcP;c>JG^*!=?qv`ce`lsNYd-Bss;Ai*y7mIY^quGc8RV%Nqw%X zJE>duw=AE2Yv9H}w)Ie^_0XzbY`stP_n~y{x%xoz)IwFx;(Bf1>cCRZvU{0J`#XPa z=|bV!bH`qlwDLX}6fOipGn+sR0+w*WfeKMVP$|fQ^HWfNcoQIi>KQ6?X|q(kpqxnA z9YCc;W;JZlU9B#uDlVYLNJSwiIm~q+NcwnDNwIXv=~!}*Cm8iK1;P>m^=JSUYJfjx z4qn8ME_Nd*P*XvH!m`0B+N3B>P?|;>o#%M2uHf1pFCufC80ATr)i(i`yStcc1c!2A z`vD&aO)RgatK#3|~7Udvdmb4S0DOtt}Nu^G8Oh>DNuGL9tk#K>P@cHc_MPlp z1XX4g3<@?py7$x!+qLnSgGTXFb%33+`&Y)u7 z#aa<+({_w0TM_ROibBX=1YY^?1CjIsh$*<>0}MZknP^XV7D>_Q1dPhYXvEyj8$&rY zUc!Q>fJn4_Cx91uMVVGa8qd3P^%V?F*;T^NppLK%1g<6q%P<2e@-k$ zTk>?lYTVh;xZcvX98V2o+K-AY$JQH~mwQsi8~4I_EZ4O2E#D1aYPZ;Q@HU*EnsW_p zxyFvqtVX8`M6S9aZ^xK}RMjuV-@J1DO3ElU?7!VGurl^u^xf#qBjUk<&zw-@_aKnA zycWjI`-#I_uu7YN3)&)C^b!yO`$NVd-1<}&FLiO}H3^NNNl;+5ik2;*sRXy#qPA7U zsAN|FK4=Cd6I`NYpdMSxa7s~InK3U|OZx6bMB_Jr3rDvp=-w$k1ZwOn&Gy) zcf}^!?@b%-l}&u*$%HNPolu}Uh01gQx-0Cm@!~`Re}(6rLWtAN!>=49B(}keYJXe% zpVyW&zPvUc$486%SpyD?g|?L-%=ceL%vhPEe*$!(2U$? z@goLB0NSB&&QmS`>h;$@LWzKy0zje7xzKv8W3gkYS9I;V?b@5VDBAXAZHF?pL!#|S zQv0ddmUDZP50n8#(Hpq!?M^)=y7q!OvU^{9{_68fqj;c6Jt5ZLBiavU?e}Hu_lfqC zX~W68rKG6s;Tlzje3i6)X{*567Ni0=Eqc9kQLJ)co2EGN)5sOkIznl>k?iTyqU2u-e#IHgrh5diy|?xnMsb99Kx&s zH@U%ZY+@=l2`)~_8Vw2YAbh?O8HoV8vBQ-!Tr%TVBtkgGa{&YY5_Br}%)gAWKS%N- zBtJ%yL~<2KfRY`f0ZmEJ1VnhMc0r3X=Pws71@fr~>z|-pm;~|yfg8g&pS=F$a!jl{ zn5(XpZ!L{t^)AFBc<{IaUPZjMJbeA|@_<;oKUY;F`v6D9D$x3^Zy9bF(t%@QmtA~qZaUGD(LlhJ5WP93iBG|$6y$hX4<6nX%7k(`?fCPNe4rn3kt9h{$y z3qTZC44g-(S@>KDP`(CDzKZ_mnXhFLNTpA)>QLpKJG;E~6HRChxy zX1sy~S>*o$2!1jpOh@B)IT`jr;bxt_B6RQ}?M$E$Byw@Gv~BEV-4Y$P1#Y0PDjpsfPH zJv{}N^Y>xKgGkVeQs!KjeeGz-6rVf_o$R82o5%~T=s7dyB0=tj5s=cCN!~=-cYkIy zF#GPfn)3{VdGIqf=k*vf5Lew&JmYN1V+E*cvTc|#wu3xMJx+aQY-9$hJHFaHgAr)x z@GP9ZI+xeWNdxiLFA2-1QtqW#YIfE0_787*GTr^@`umdm!En{B zgf~4m+4ub$a$eq{W4x&zEby+JUaeXcZk}3mug+(V3}ku+v5q(IsAj5Dbn5Yy1FPEA zp_LPvuD(pi$vlDVwN^PuKlO|fj$Km1sd##NHZwK1ftB+6s~PVdn={Wq2rp@O=k*wa zRjA4vF=ireZ{CbC3vpI0oL@BMt(daG!X(+N{tY{(_S4LLR7%Jb3|E8=Im}xa<`lKW zg4Y?sWyr-aZ!;Esku{C_bn^OKrP_0t}(C27_31}-iR?1+0l?U zW6T0vePM-|B7!J*0lRWtgy%b+10vbOv1n9*=fa-Q#jq^yvh+)E^c1F`h{pqvbO0`m z_7pxh+{2eXH{4S^FYg(Jk8;ECQEu_eMtQ4A_@QFOmiYaP^0ltK9i_nb6=ip5D7r`1 z-5js%TzNnJ6ki2hIPxC*+HhRqv6ufV$U}21;N2LwdnoE(h)w=C{wwMJEot~q^2l$= T@!yiZKj_YCDey)jmAm>sOM@qP literal 0 HcmV?d00001 diff --git a/app/modules/rag/__pycache__/session_store.cpython-312.pyc b/app/modules/rag_session/__pycache__/session_store.cpython-312.pyc similarity index 76% rename from app/modules/rag/__pycache__/session_store.cpython-312.pyc rename to app/modules/rag_session/__pycache__/session_store.cpython-312.pyc index fd56ff574dc456d72d574ddba3fb1cab567fb595..101d30033981ee6f98f076d845ab912f2d0f15d1 100644 GIT binary patch delta 87 zcmbO%uvmciG%qg~0}$NWw=mOYBX0nsghpaPfqrg&N@-4Nv3^lvdVFzeadBpT-sW6J ac{T|ppsXHLRu4&b@*H+QMw`j6*);&(+#S{c delta 71 zcmZ21Fj;{2G%qg~0}$j-o|9>^kvD)*Kq0Z9KtDG>r8FnCSidMSeRC dict: rag_session_id = request.project_id - indexed, failed = await self.rag.index_changes( + indexed, failed, cache_hits, cache_misses = await self.rag.index_changes( rag_session_id=rag_session_id, changed_files=[x.model_dump() for x in request.changed_files], ) - return {"indexed_files": indexed, "failed_files": failed} + return { + "indexed_files": indexed, + "failed_files": failed, + "cache_hit_files": cache_hits, + "cache_miss_files": cache_misses, + } @router.get("/index/jobs/{index_job_id}") async def get_job(index_job_id: str) -> dict: @@ -232,6 +246,8 @@ class RagModule: "status": job.status.value, "indexed_files": job.indexed_files, "failed_files": job.failed_files, + "cache_hit_files": job.cache_hit_files, + "cache_miss_files": job.cache_miss_files, "error": job.error.model_dump(mode="json") if job.error else None, } diff --git a/app/modules/rag_session/repository.py b/app/modules/rag_session/repository.py new file mode 100644 index 0000000..bdeca25 --- /dev/null +++ b/app/modules/rag_session/repository.py @@ -0,0 +1,660 @@ +from __future__ import annotations + +from dataclasses import dataclass +import hashlib +from sqlalchemy import text + +from app.modules.shared.db import get_engine + + +@dataclass +class RagJobRow: + index_job_id: str + rag_session_id: str + status: str + indexed_files: int + failed_files: int + cache_hit_files: int + cache_miss_files: int + error_code: str | None + error_desc: str | None + error_module: str | None + + +class RagRepository: + def ensure_tables(self) -> None: + engine = get_engine() + with engine.connect() as conn: + conn.execute(text("CREATE EXTENSION IF NOT EXISTS vector")) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS rag_sessions ( + rag_session_id VARCHAR(64) PRIMARY KEY, + project_id VARCHAR(512) NOT NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + ) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS rag_index_jobs ( + index_job_id VARCHAR(64) PRIMARY KEY, + rag_session_id VARCHAR(64) NOT NULL, + status VARCHAR(16) NOT NULL, + indexed_files INTEGER NOT NULL DEFAULT 0, + failed_files INTEGER NOT NULL DEFAULT 0, + cache_hit_files INTEGER NOT NULL DEFAULT 0, + cache_miss_files INTEGER NOT NULL DEFAULT 0, + error_code VARCHAR(128) NULL, + error_desc TEXT NULL, + error_module VARCHAR(64) NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + ) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS rag_chunks ( + id BIGSERIAL PRIMARY KEY, + rag_session_id VARCHAR(64) NOT NULL, + path TEXT NOT NULL, + chunk_index INTEGER NOT NULL, + content TEXT NOT NULL, + embedding vector NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + ) + conn.execute(text("ALTER TABLE rag_chunks ADD COLUMN IF NOT EXISTS artifact_type VARCHAR(16) NULL")) + conn.execute(text("ALTER TABLE rag_chunks ADD COLUMN IF NOT EXISTS section TEXT NULL")) + conn.execute(text("ALTER TABLE rag_chunks ADD COLUMN IF NOT EXISTS doc_id TEXT NULL")) + conn.execute(text("ALTER TABLE rag_chunks ADD COLUMN IF NOT EXISTS doc_version TEXT NULL")) + conn.execute(text("ALTER TABLE rag_chunks ADD COLUMN IF NOT EXISTS owner TEXT NULL")) + conn.execute(text("ALTER TABLE rag_chunks ADD COLUMN IF NOT EXISTS system_component TEXT NULL")) + conn.execute(text("ALTER TABLE rag_chunks ADD COLUMN IF NOT EXISTS last_modified TIMESTAMPTZ NULL")) + conn.execute(text("ALTER TABLE rag_chunks ADD COLUMN IF NOT EXISTS staleness_score DOUBLE PRECISION NULL")) + conn.execute( + text( + """ + ALTER TABLE rag_chunks + ADD COLUMN IF NOT EXISTS created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP + """ + ) + ) + conn.execute( + text( + """ + ALTER TABLE rag_chunks + ADD COLUMN IF NOT EXISTS updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP + """ + ) + ) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS rag_blob_cache ( + id BIGSERIAL PRIMARY KEY, + repo_id VARCHAR(512) NOT NULL, + blob_sha VARCHAR(128) NOT NULL, + path TEXT NOT NULL, + artifact_type VARCHAR(16) NULL, + section TEXT NULL, + doc_id TEXT NULL, + doc_version TEXT NULL, + owner TEXT NULL, + system_component TEXT NULL, + last_modified TIMESTAMPTZ NULL, + staleness_score DOUBLE PRECISION NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT uq_rag_blob_cache UNIQUE (repo_id, blob_sha, path) + ) + """ + ) + ) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS rag_chunk_cache ( + id BIGSERIAL PRIMARY KEY, + repo_id VARCHAR(512) NOT NULL, + blob_sha VARCHAR(128) NOT NULL, + chunk_index INTEGER NOT NULL, + content TEXT NOT NULL, + embedding vector NULL, + section TEXT NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT uq_rag_chunk_cache UNIQUE (repo_id, blob_sha, chunk_index) + ) + """ + ) + ) + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS rag_session_chunk_map ( + id BIGSERIAL PRIMARY KEY, + rag_session_id VARCHAR(64) NOT NULL, + repo_id VARCHAR(512) NOT NULL, + blob_sha VARCHAR(128) NOT NULL, + chunk_index INTEGER NOT NULL, + path TEXT NOT NULL, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + ) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_rag_chunks_session ON rag_chunks (rag_session_id)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_rag_chunks_artifact_type ON rag_chunks (artifact_type)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_rag_chunks_doc ON rag_chunks (doc_id, doc_version)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_rag_chunks_component ON rag_chunks (system_component)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_rag_chunks_path ON rag_chunks (path)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_rag_blob_cache_repo_blob ON rag_blob_cache (repo_id, blob_sha)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_rag_chunk_cache_repo_blob ON rag_chunk_cache (repo_id, blob_sha, chunk_index)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_rag_session_chunk_map_session ON rag_session_chunk_map (rag_session_id, created_at DESC)")) + conn.execute(text("ALTER TABLE rag_index_jobs ADD COLUMN IF NOT EXISTS cache_hit_files INTEGER NOT NULL DEFAULT 0")) + conn.execute(text("ALTER TABLE rag_index_jobs ADD COLUMN IF NOT EXISTS cache_miss_files INTEGER NOT NULL DEFAULT 0")) + conn.commit() + + def upsert_session(self, rag_session_id: str, project_id: str) -> None: + with get_engine().connect() as conn: + conn.execute( + text( + """ + INSERT INTO rag_sessions (rag_session_id, project_id) + VALUES (:sid, :pid) + ON CONFLICT (rag_session_id) DO UPDATE SET project_id = EXCLUDED.project_id + """ + ), + {"sid": rag_session_id, "pid": project_id}, + ) + conn.commit() + + def session_exists(self, rag_session_id: str) -> bool: + with get_engine().connect() as conn: + row = conn.execute( + text("SELECT 1 FROM rag_sessions WHERE rag_session_id = :sid"), + {"sid": rag_session_id}, + ).fetchone() + return bool(row) + + def get_session(self, rag_session_id: str) -> dict | None: + with get_engine().connect() as conn: + row = conn.execute( + text("SELECT rag_session_id, project_id FROM rag_sessions WHERE rag_session_id = :sid"), + {"sid": rag_session_id}, + ).mappings().fetchone() + return dict(row) if row else None + + def create_job(self, index_job_id: str, rag_session_id: str, status: str) -> None: + with get_engine().connect() as conn: + conn.execute( + text( + """ + INSERT INTO rag_index_jobs (index_job_id, rag_session_id, status) + VALUES (:jid, :sid, :status) + """ + ), + {"jid": index_job_id, "sid": rag_session_id, "status": status}, + ) + conn.commit() + + def update_job( + self, + index_job_id: str, + *, + status: str, + indexed_files: int, + failed_files: int, + cache_hit_files: int = 0, + cache_miss_files: int = 0, + error_code: str | None = None, + error_desc: str | None = None, + error_module: str | None = None, + ) -> None: + with get_engine().connect() as conn: + conn.execute( + text( + """ + UPDATE rag_index_jobs + SET status = :status, + indexed_files = :indexed, + failed_files = :failed, + cache_hit_files = :cache_hit_files, + cache_miss_files = :cache_miss_files, + error_code = :ecode, + error_desc = :edesc, + error_module = :emodule, + updated_at = CURRENT_TIMESTAMP + WHERE index_job_id = :jid + """ + ), + { + "jid": index_job_id, + "status": status, + "indexed": indexed_files, + "failed": failed_files, + "cache_hit_files": cache_hit_files, + "cache_miss_files": cache_miss_files, + "ecode": error_code, + "edesc": error_desc, + "emodule": error_module, + }, + ) + conn.commit() + + def get_job(self, index_job_id: str) -> RagJobRow | None: + with get_engine().connect() as conn: + row = conn.execute( + text( + """ + SELECT index_job_id, rag_session_id, status, indexed_files, failed_files, + cache_hit_files, cache_miss_files, error_code, error_desc, error_module + FROM rag_index_jobs + WHERE index_job_id = :jid + """ + ), + {"jid": index_job_id}, + ).mappings().fetchone() + if not row: + return None + return RagJobRow(**dict(row)) + + def replace_chunks(self, rag_session_id: str, items: list[dict]) -> None: + with get_engine().connect() as conn: + conn.execute(text("DELETE FROM rag_chunks WHERE rag_session_id = :sid"), {"sid": rag_session_id}) + conn.execute(text("DELETE FROM rag_session_chunk_map WHERE rag_session_id = :sid"), {"sid": rag_session_id}) + self._insert_chunks(conn, rag_session_id, items) + conn.commit() + + def apply_changes(self, rag_session_id: str, delete_paths: list[str], upserts: list[dict]) -> None: + with get_engine().connect() as conn: + if delete_paths: + conn.execute( + text("DELETE FROM rag_chunks WHERE rag_session_id = :sid AND path = ANY(:paths)"), + {"sid": rag_session_id, "paths": delete_paths}, + ) + conn.execute( + text("DELETE FROM rag_session_chunk_map WHERE rag_session_id = :sid AND path = ANY(:paths)"), + {"sid": rag_session_id, "paths": delete_paths}, + ) + if upserts: + paths = sorted({str(x["path"]) for x in upserts}) + conn.execute( + text("DELETE FROM rag_chunks WHERE rag_session_id = :sid AND path = ANY(:paths)"), + {"sid": rag_session_id, "paths": paths}, + ) + conn.execute( + text("DELETE FROM rag_session_chunk_map WHERE rag_session_id = :sid AND path = ANY(:paths)"), + {"sid": rag_session_id, "paths": paths}, + ) + self._insert_chunks(conn, rag_session_id, upserts) + conn.commit() + + def get_cached_chunks(self, repo_id: str, blob_sha: str) -> list[dict]: + with get_engine().connect() as conn: + rows = conn.execute( + text( + """ + SELECT chunk_index, content, embedding::text AS embedding_txt, section + FROM rag_chunk_cache + WHERE repo_id = :repo_id AND blob_sha = :blob_sha + ORDER BY chunk_index ASC + """ + ), + {"repo_id": repo_id, "blob_sha": blob_sha}, + ).mappings().fetchall() + output: list[dict] = [] + for row in rows: + output.append( + { + "chunk_index": int(row["chunk_index"]), + "content": str(row["content"] or ""), + "embedding": self._parse_vector(str(row["embedding_txt"] or "")), + "section": row.get("section"), + } + ) + return output + + def record_repo_cache( + self, + *, + project_id: str, + commit_sha: str | None, + changed_files: list[str], + summary: str, + ) -> None: + repo_session_id = f"repo:{project_id}" + with get_engine().connect() as conn: + for path in changed_files: + key = f"{commit_sha or 'no-commit'}:{path}" + blob_sha = hashlib.sha256(key.encode("utf-8")).hexdigest() + conn.execute( + text( + """ + INSERT INTO rag_blob_cache ( + repo_id, + blob_sha, + path, + artifact_type, + section + ) + VALUES ( + :repo_id, + :blob_sha, + :path, + :artifact_type, + :section + ) + ON CONFLICT (repo_id, blob_sha, path) DO UPDATE SET + updated_at = CURRENT_TIMESTAMP + """ + ), + { + "repo_id": project_id, + "blob_sha": blob_sha, + "path": path, + "artifact_type": "CODE", + "section": "repo_webhook", + }, + ) + conn.execute( + text( + """ + INSERT INTO rag_chunk_cache ( + repo_id, + blob_sha, + chunk_index, + content, + embedding, + section + ) + VALUES ( + :repo_id, + :blob_sha, + 0, + :content, + NULL, + :section + ) + ON CONFLICT (repo_id, blob_sha, chunk_index) DO UPDATE SET + content = EXCLUDED.content, + section = EXCLUDED.section, + updated_at = CURRENT_TIMESTAMP + """ + ), + { + "repo_id": project_id, + "blob_sha": blob_sha, + "content": f"repo_webhook:{path}:{summary[:300]}", + "section": "repo_webhook", + }, + ) + conn.execute( + text( + """ + INSERT INTO rag_session_chunk_map ( + rag_session_id, + repo_id, + blob_sha, + chunk_index, + path + ) + VALUES ( + :rag_session_id, + :repo_id, + :blob_sha, + 0, + :path + ) + """ + ), + { + "rag_session_id": repo_session_id, + "repo_id": project_id, + "blob_sha": blob_sha, + "path": path, + }, + ) + conn.commit() + + def cache_file_chunks(self, repo_id: str, path: str, blob_sha: str, items: list[dict]) -> None: + if not items: + return + meta = items[0] + with get_engine().connect() as conn: + conn.execute( + text( + """ + INSERT INTO rag_blob_cache ( + repo_id, + blob_sha, + path, + artifact_type, + section, + doc_id, + doc_version, + owner, + system_component, + last_modified, + staleness_score + ) + VALUES ( + :repo_id, + :blob_sha, + :path, + :artifact_type, + :section, + :doc_id, + :doc_version, + :owner, + :system_component, + :last_modified, + :staleness_score + ) + ON CONFLICT (repo_id, blob_sha, path) DO UPDATE SET + artifact_type = EXCLUDED.artifact_type, + section = EXCLUDED.section, + doc_id = EXCLUDED.doc_id, + doc_version = EXCLUDED.doc_version, + owner = EXCLUDED.owner, + system_component = EXCLUDED.system_component, + last_modified = EXCLUDED.last_modified, + staleness_score = EXCLUDED.staleness_score, + updated_at = CURRENT_TIMESTAMP + """ + ), + { + "repo_id": repo_id, + "blob_sha": blob_sha, + "path": path, + "artifact_type": meta.get("artifact_type"), + "section": meta.get("section"), + "doc_id": meta.get("doc_id"), + "doc_version": meta.get("doc_version"), + "owner": meta.get("owner"), + "system_component": meta.get("system_component"), + "last_modified": meta.get("last_modified"), + "staleness_score": meta.get("staleness_score"), + }, + ) + for item in items: + emb = item.get("embedding") or [] + emb_str = "[" + ",".join(str(x) for x in emb) + "]" if emb else None + conn.execute( + text( + """ + INSERT INTO rag_chunk_cache ( + repo_id, + blob_sha, + chunk_index, + content, + embedding, + section + ) + VALUES ( + :repo_id, + :blob_sha, + :chunk_index, + :content, + CAST(:embedding AS vector), + :section + ) + ON CONFLICT (repo_id, blob_sha, chunk_index) DO UPDATE SET + content = EXCLUDED.content, + embedding = EXCLUDED.embedding, + section = EXCLUDED.section, + updated_at = CURRENT_TIMESTAMP + """ + ), + { + "repo_id": repo_id, + "blob_sha": blob_sha, + "chunk_index": int(item["chunk_index"]), + "content": item["content"], + "embedding": emb_str, + "section": item.get("section"), + }, + ) + conn.commit() + + def retrieve(self, rag_session_id: str, query_embedding: list[float], limit: int = 5) -> list[dict]: + emb = "[" + ",".join(str(x) for x in query_embedding) + "]" + with get_engine().connect() as conn: + rows = conn.execute( + text( + """ + SELECT path, content + FROM rag_chunks + WHERE rag_session_id = :sid + ORDER BY embedding <=> CAST(:emb AS vector) + LIMIT :lim + """ + ), + {"sid": rag_session_id, "emb": emb, "lim": limit}, + ).mappings().fetchall() + return [dict(x) for x in rows] + + def fallback_chunks(self, rag_session_id: str, limit: int = 5) -> list[dict]: + with get_engine().connect() as conn: + rows = conn.execute( + text( + """ + SELECT path, content + FROM rag_chunks + WHERE rag_session_id = :sid + ORDER BY id DESC + LIMIT :lim + """ + ), + {"sid": rag_session_id, "lim": limit}, + ).mappings().fetchall() + return [dict(x) for x in rows] + + def _insert_chunks(self, conn, rag_session_id: str, items: list[dict]) -> None: + for item in items: + emb = item.get("embedding") or [] + emb_str = "[" + ",".join(str(x) for x in emb) + "]" if emb else None + conn.execute( + text( + """ + INSERT INTO rag_chunks ( + rag_session_id, + path, + chunk_index, + content, + embedding, + artifact_type, + section, + doc_id, + doc_version, + owner, + system_component, + last_modified, + staleness_score, + created_at, + updated_at + ) + VALUES ( + :sid, + :path, + :idx, + :content, + CAST(:emb AS vector), + :artifact_type, + :section, + :doc_id, + :doc_version, + :owner, + :system_component, + :last_modified, + :staleness_score, + CURRENT_TIMESTAMP, + CURRENT_TIMESTAMP + ) + """ + ), + { + "sid": rag_session_id, + "path": item["path"], + "idx": int(item["chunk_index"]), + "content": item["content"], + "emb": emb_str, + "artifact_type": item.get("artifact_type"), + "section": item.get("section"), + "doc_id": item.get("doc_id"), + "doc_version": item.get("doc_version"), + "owner": item.get("owner"), + "system_component": item.get("system_component"), + "last_modified": item.get("last_modified"), + "staleness_score": item.get("staleness_score"), + }, + ) + repo_id = str(item.get("repo_id") or "").strip() + blob_sha = str(item.get("blob_sha") or "").strip() + if repo_id and blob_sha: + conn.execute( + text( + """ + INSERT INTO rag_session_chunk_map ( + rag_session_id, + repo_id, + blob_sha, + chunk_index, + path + ) VALUES ( + :sid, + :repo_id, + :blob_sha, + :chunk_index, + :path + ) + """ + ), + { + "sid": rag_session_id, + "repo_id": repo_id, + "blob_sha": blob_sha, + "chunk_index": int(item["chunk_index"]), + "path": item["path"], + }, + ) + + def _parse_vector(self, value: str) -> list[float]: + text_value = value.strip() + if not text_value: + return [] + if text_value.startswith("[") and text_value.endswith("]"): + text_value = text_value[1:-1] + if not text_value: + return [] + return [float(part.strip()) for part in text_value.split(",") if part.strip()] diff --git a/app/modules/rag/retrieval/__init__.py b/app/modules/rag_session/retrieval/__init__.py similarity index 100% rename from app/modules/rag/retrieval/__init__.py rename to app/modules/rag_session/retrieval/__init__.py diff --git a/app/modules/rag/retrieval/__pycache__/__init__.cpython-312.pyc b/app/modules/rag_session/retrieval/__pycache__/__init__.cpython-312.pyc similarity index 100% rename from app/modules/rag/retrieval/__pycache__/__init__.cpython-312.pyc rename to app/modules/rag_session/retrieval/__pycache__/__init__.cpython-312.pyc diff --git a/app/modules/rag/retrieval/__pycache__/chunker.cpython-312.pyc b/app/modules/rag_session/retrieval/__pycache__/chunker.cpython-312.pyc similarity index 100% rename from app/modules/rag/retrieval/__pycache__/chunker.cpython-312.pyc rename to app/modules/rag_session/retrieval/__pycache__/chunker.cpython-312.pyc diff --git a/app/modules/rag/retrieval/__pycache__/scoring.cpython-312.pyc b/app/modules/rag_session/retrieval/__pycache__/scoring.cpython-312.pyc similarity index 100% rename from app/modules/rag/retrieval/__pycache__/scoring.cpython-312.pyc rename to app/modules/rag_session/retrieval/__pycache__/scoring.cpython-312.pyc diff --git a/app/modules/rag/retrieval/chunker.py b/app/modules/rag_session/retrieval/chunker.py similarity index 100% rename from app/modules/rag/retrieval/chunker.py rename to app/modules/rag_session/retrieval/chunker.py diff --git a/app/modules/rag/retrieval/scoring.py b/app/modules/rag_session/retrieval/scoring.py similarity index 100% rename from app/modules/rag/retrieval/scoring.py rename to app/modules/rag_session/retrieval/scoring.py diff --git a/app/modules/rag/service.py b/app/modules/rag_session/service.py similarity index 52% rename from app/modules/rag/service.py rename to app/modules/rag_session/service.py index 51753c3..89bcf84 100644 --- a/app/modules/rag/service.py +++ b/app/modules/rag_session/service.py @@ -1,11 +1,12 @@ import asyncio +import hashlib import os from collections.abc import Awaitable, Callable from inspect import isawaitable -from app.modules.rag.embedding.gigachat_embedder import GigaChatEmbedder -from app.modules.rag.repository import RagRepository -from app.modules.rag.retrieval.chunker import TextChunker +from app.modules.rag_session.embedding.gigachat_embedder import GigaChatEmbedder +from app.modules.rag_session.repository import RagRepository +from app.modules.rag_session.retrieval.chunker import TextChunker class RagService: @@ -24,35 +25,49 @@ class RagService: rag_session_id: str, files: list[dict], progress_cb: Callable[[int, int, str], Awaitable[None] | None] | None = None, - ) -> tuple[int, int]: + ) -> tuple[int, int, int, int]: total_files = len(files) indexed_files = 0 failed_files = 0 + cache_hit_files = 0 + cache_miss_files = 0 all_chunks: list[dict] = [] + repo_id = self._resolve_repo_id(rag_session_id) for index, file in enumerate(files, start=1): path = str(file.get("path", "")) try: - chunks = self._build_chunks_for_file(file) - embedded_chunks = await asyncio.to_thread(self._embed_chunks, chunks) - all_chunks.extend(embedded_chunks) + blob_sha = self._blob_sha(file) + cached = await asyncio.to_thread(self._repo.get_cached_chunks, repo_id, blob_sha) + if cached: + all_chunks.extend(self._build_cached_items(path, file, repo_id, blob_sha, cached)) + cache_hit_files += 1 + else: + chunks = self._build_chunks_for_file(file) + embedded_chunks = await asyncio.to_thread(self._embed_chunks, chunks, file, repo_id, blob_sha) + all_chunks.extend(embedded_chunks) + await asyncio.to_thread(self._repo.cache_file_chunks, repo_id, path, blob_sha, embedded_chunks) + cache_miss_files += 1 indexed_files += 1 except Exception: failed_files += 1 await self._notify_progress(progress_cb, index, total_files, path) await asyncio.to_thread(self._repo.replace_chunks, rag_session_id, all_chunks) - return indexed_files, failed_files + return indexed_files, failed_files, cache_hit_files, cache_miss_files async def index_changes( self, rag_session_id: str, changed_files: list[dict], progress_cb: Callable[[int, int, str], Awaitable[None] | None] | None = None, - ) -> tuple[int, int]: + ) -> tuple[int, int, int, int]: total_files = len(changed_files) indexed_files = 0 failed_files = 0 + cache_hit_files = 0 + cache_miss_files = 0 delete_paths: list[str] = [] upsert_chunks: list[dict] = [] + repo_id = self._resolve_repo_id(rag_session_id) for index, file in enumerate(changed_files, start=1): path = str(file.get("path", "")) @@ -64,9 +79,17 @@ class RagService: await self._notify_progress(progress_cb, index, total_files, path) continue if op == "upsert" and file.get("content") is not None: - chunks = self._build_chunks_for_file(file) - embedded_chunks = await asyncio.to_thread(self._embed_chunks, chunks) - upsert_chunks.extend(embedded_chunks) + blob_sha = self._blob_sha(file) + cached = await asyncio.to_thread(self._repo.get_cached_chunks, repo_id, blob_sha) + if cached: + upsert_chunks.extend(self._build_cached_items(path, file, repo_id, blob_sha, cached)) + cache_hit_files += 1 + else: + chunks = self._build_chunks_for_file(file) + embedded_chunks = await asyncio.to_thread(self._embed_chunks, chunks, file, repo_id, blob_sha) + upsert_chunks.extend(embedded_chunks) + await asyncio.to_thread(self._repo.cache_file_chunks, repo_id, path, blob_sha, embedded_chunks) + cache_miss_files += 1 indexed_files += 1 await self._notify_progress(progress_cb, index, total_files, path) continue @@ -81,7 +104,7 @@ class RagService: delete_paths, upsert_chunks, ) - return indexed_files, failed_files + return indexed_files, failed_files, cache_hit_files, cache_miss_files async def retrieve(self, rag_session_id: str, query: str) -> list[dict]: try: @@ -99,10 +122,11 @@ class RagService: output.append((path, idx, chunk)) return output - def _embed_chunks(self, raw_chunks: list[tuple[str, int, str]]) -> list[dict]: + def _embed_chunks(self, raw_chunks: list[tuple[str, int, str]], file: dict, repo_id: str, blob_sha: str) -> list[dict]: if not raw_chunks: return [] batch_size = max(1, int(os.getenv("RAG_EMBED_BATCH_SIZE", "16"))) + metadata = self._chunk_metadata(file) indexed: list[dict] = [] for i in range(0, len(raw_chunks), batch_size): @@ -116,10 +140,63 @@ class RagService: "chunk_index": chunk_index, "content": content, "embedding": vector, + "repo_id": repo_id, + "blob_sha": blob_sha, + **metadata, } ) return indexed + def _build_cached_items( + self, + path: str, + file: dict, + repo_id: str, + blob_sha: str, + cached: list[dict], + ) -> list[dict]: + metadata = self._chunk_metadata(file) + output: list[dict] = [] + for item in cached: + output.append( + { + "path": path, + "chunk_index": int(item["chunk_index"]), + "content": str(item["content"]), + "embedding": item.get("embedding") or [], + "repo_id": repo_id, + "blob_sha": blob_sha, + **metadata, + "section": item.get("section") or metadata.get("section"), + } + ) + return output + + def _resolve_repo_id(self, rag_session_id: str) -> str: + session = self._repo.get_session(rag_session_id) + if not session: + return rag_session_id + return str(session.get("project_id") or rag_session_id) + + def _blob_sha(self, file: dict) -> str: + raw = str(file.get("content_hash") or "").strip() + if raw: + return raw + content = str(file.get("content") or "") + return hashlib.sha256(content.encode("utf-8")).hexdigest() + + def _chunk_metadata(self, file: dict) -> dict: + return { + "artifact_type": file.get("artifact_type"), + "section": file.get("section"), + "doc_id": file.get("doc_id"), + "doc_version": file.get("doc_version"), + "owner": file.get("owner"), + "system_component": file.get("system_component"), + "last_modified": file.get("last_modified"), + "staleness_score": file.get("staleness_score"), + } + async def _notify_progress( self, progress_cb: Callable[[int, int, str], Awaitable[None] | None] | None, diff --git a/app/modules/rag/session_store.py b/app/modules/rag_session/session_store.py similarity index 94% rename from app/modules/rag/session_store.py rename to app/modules/rag_session/session_store.py index e513598..6e353b4 100644 --- a/app/modules/rag/session_store.py +++ b/app/modules/rag_session/session_store.py @@ -1,7 +1,7 @@ from dataclasses import dataclass from uuid import uuid4 -from app.modules.rag.repository import RagRepository +from app.modules.rag_session.repository import RagRepository @dataclass diff --git a/app/modules/shared/README.md b/app/modules/shared/README.md new file mode 100644 index 0000000..0f49eea --- /dev/null +++ b/app/modules/shared/README.md @@ -0,0 +1,40 @@ +# Модуль shared + +## 1. Функции модуля +- Общие инфраструктурные компоненты для всех модулей: + - подключение к БД (`db.py`, `bootstrap.py`), + - шина событий (`event_bus.py`), + - retry и idempotency (`retry_executor.py`, `idempotency_store.py`), + - checkpointer, + - клиент и настройки GigaChat (`gigachat/*`). + +## 2. Диаграмма классов и взаимосвязей +```mermaid +classDiagram + class EventBus + class RetryExecutor + class IdempotencyStore + class GigaChatClient + class GigaChatSettings + class GigaChatTokenProvider + + GigaChatClient --> GigaChatSettings + GigaChatClient --> GigaChatTokenProvider +``` + +## 3. Описание классов +- `EventBus`: общий асинхронный event-bus для публикации и SSE-стриминга. + Методы: `subscribe` — подписка на канал; `unsubscribe` — удаление подписки; `publish` — отправка события; `as_sse` — сериализация события в SSE-формат. +- `RetryExecutor`: общий механизм повторных попыток для временных ошибок. + Методы: `run` — выполняет async-операцию с retry по временным исключениям. +- `IdempotencyStore`: in-memory хранилище идемпотентных ключей. + Методы: `get_task_id` — находит ранее созданную задачу по ключу; `put` — сохраняет ключ и task_id. +- `GigaChatSettings`: конфигурация доступа к GigaChat. + Методы: `from_env` — собирает настройки из переменных окружения. +- `GigaChatTokenProvider`: управление access token для GigaChat API. + Методы: `get_access_token` — возвращает валидный токен (с обновлением при необходимости). +- `GigaChatClient`: HTTP-клиент для completion и embeddings. + Методы: `complete` — выполняет генерацию ответа модели; `embed` — получает векторные представления для списка текстов. + +## 4. Сиквенс-диаграммы API +В модуле нет HTTP endpoint'ов: он предоставляет только инфраструктурные классы для других модулей. diff --git a/app/modules/shared/__pycache__/bootstrap.cpython-312.pyc b/app/modules/shared/__pycache__/bootstrap.cpython-312.pyc index 52f9c1c73d605cd5fc714e7dcac56d4562269139..b0a5382317184f0ff85fa094e44ddd29ff33f5b6 100644 GIT binary patch delta 375 zcmZ3={+6BhG%qg~0}vFnEzCSSkvGbg1;m2_Rv^s`#Gh>#85pKBq%hR7lmJ-}kiwY4 zw3-RRc43J1V_>Ldowy{4sfKyt3rk~`1|~0tCdL|;CPrn35;mA76^0_6TDBUtT9z8t z6y|IV28JTGNRYv7b!?ME7>&7sbWb~!KZ!Ajk$v)0MiX9%;*$KL%J}5`ypq(4$vjNz ztn5G&6(*ZA*{F*GSs?R^X99@^h8xQ24PJMIC1wcDaGGrMiGhbzs)PLl6ZhmE0UaihFOw>pHZ|(0w@6h=_W%X diff --git a/app/modules/shared/bootstrap.py b/app/modules/shared/bootstrap.py index f13f70b..d62259d 100644 --- a/app/modules/shared/bootstrap.py +++ b/app/modules/shared/bootstrap.py @@ -3,13 +3,14 @@ import time from app.modules.shared.checkpointer import get_checkpointer -def bootstrap_database(rag_repository, chat_repository, agent_repository) -> None: +def bootstrap_database(rag_repository, chat_repository, agent_repository, story_context_repository) -> None: last_error: Exception | None = None for attempt in range(1, 16): try: rag_repository.ensure_tables() chat_repository.ensure_tables() agent_repository.ensure_tables() + story_context_repository.ensure_tables() get_checkpointer() return except Exception as exc: # noqa: BLE001 diff --git a/app/schemas/__pycache__/changeset.cpython-312.pyc b/app/schemas/__pycache__/changeset.cpython-312.pyc index 83f194798be75af567dfc6b5d2031e8269c38be1..5517a90b9a98bdc3c43c6216a85a47fce7790b03 100644 GIT binary patch literal 3786 zcmai1O>7(25#A+tm;WLq(UP6SHlshAYGvj}c9b@%(OQxv$59Obv<44dtofcy+T@ab zyHZ3UMVQtFq*JJ(sMJf~V_XSHfL?m&q3ETDUMNw3=|v2*=%GkYZKYmv>df0+{wPed z0B2`s=FPk}Gw*wke~Cnb0zC3RKhFKmF9?6f5BE_V=J40RYzaglVqPeSX;DPpllK(7 zX)ou!z)NX~^HN?eC~2kOOZy7`v|kiF!c~FDj|8I702Oy6+iN<=cS~ljr~~h{0zc3U`c9G1 zyd}@&Xr5Tn0)DA0T0TcKv&b^`3~VYoABQJ_*%GLb7KxDd5Hal~p0q@~X}K&VWy^mh ztLgXYyCqA>Fshldi z_ixWy%AFfC{5>;!efIY36X6hN^5w169fL9>rRC{rYKE?;C02YuGp3=gyq3~(3BqzK zR8MH7QUWv?Y8WY0+^4#k0%6L?WNAS&QW-v7gPIpgWrmgK1PU)0y+9rbjmU}0oJ&5O zV<%jjj`TRy5}eL(({o3lULu}?Aygusx8Us&s7$=O(vFkRv_gHLlTk;Mv>!Uc=h*uk zS+Gs!cVwY9S-|ldCc)j1>z71G_>r^&Uhzps%1W}w3f$1lOm==*e+Uf_gfvi{sPMC* zjF$3RhN}0d`5vXZ)$a=OIi0E$>ta}vSuRlvd&@`YlD3>TEx%6RQ%$;RE?Qy3)R?Ji zdL~-&&2nITVEH;%VUi~ENq8k4-12wBSoj8BTlo*|2K}z z-3gs#(|sYlF@GaG^tid))X9kc#lv zQ1qiWH{RS1?}tVj{exT6o6}GG$M*Wi4*Ji3K9H!ryigywR*PK&-TK=N+qiZzxgSb4 z;wPWRNB85SpNxE(`fZ9E|ENB2w-&qGX*{tXn&7T~In9mc>jMk5*g|&^1c~;qhd2x+ zJ(j|#xy*EisGRI!J@8^tBrzm-vRF|S_Mu)hHH3acRT=IBJBMD#p`A6{Ijz(&Y z=0sf@Ya~W0*Q);Q{>?~T8flyv0;_D**eKMcp})!T?Q;k6(0@EpIezFBl;M9PXNShs zb-=B2WD19W08Q&OLbwGm6&*m7iD%dAZUP5rCDakQ185aT)(*5Bd^uQqh<`V*=Rjg4+6Kz8vYhO-g7%5IVA6u+DVDy#b_IwH z7IM0pr}}*}Ye^-|%r07idvIi{Shbs%~tJ~gi?ir}U%aQ` zbNCvN?(oZjnBRD*Oce(dy|_mHHz8(sz%6Rt(8~FuMywwDMy{^FtsV|4%V%oFLp4XN zK=X>e2p0g{K5Zu)PccvXG%9$$jvZN-QGw*U@G~&qki+<3T^ejeAd^tw>b1S-*#qgU zy;?L~gv+u-;r|oErCJJHd~^DJex5R{rR6V`i3X^fVYt`rn)5@GPlz`IuPmPspF!uW zzVyFk|L^%8n*#@k`35ro@`|GPTsZMuc;UJ5;y;8-d%~q7#VZc18`YU30hpukpct

EG5D5v>Kq`_{K!YfZgmv<5l9POAGkY$s;X`tP zL}POe3gQo73M763El5a66zD_>DntjwG>C$owG%rLG^ng%5)vQAueKwOQPjE+^TAf%a=ye@cyX=7!^W+ZD0 zHvJEqjWYG@H%zJ1L|qN~{wR5Wr|k54%e=kCe1|W$x13;;aTcEMMUWPm5)2XWIROLm z$tpfr+k=N|P}s|9i(r4u+>zbw_?2;QG3RDq=Eiio?dDx;ppIv`O)Jvsc6`^oL6668 z%#xjG&PK=p9r|6Dz5ZiXQY&SVJ+*@_;4s1o0ZHbT)9JHSfG)rxWGe!&mb1 z<7?RlEyRQDlly45o=S9JVIKl0;W7`Fm>`c}BVgZ&-D){O&<&l?>ju2l0v)&JaZBML zNO{0Mr6eq1g!cqz5RM?oyD4IXItT_qDSkVCo?AcpMJqh5ebknIrVOpHs}TLv4~(W| zrMPH4zH-3WSv;mDAW=J>Fd>gUAaxFN5EkOk)}m}!F0a>fiOR&A1AdZ352KzJY|0K2 zvlqEe5PEIUuv&I1dy#_j8%Qa6_Wydn&CBAM`W?0c7ep=IIKL#|yFw}5As34~Wd18@ md?1a9uF#{8`0(n4h+vYlX<>M5Lc}yVtkURhl4S56lW@XDoYBR4Nx+LIf@I+V+Zp% zqPSCeQh6nTGC&oaAhAjgO|HpPIqbA=@h2xHXQakwWR}FIW#*(7-x7cb3c(P=UZS2;gBqvnLluM9w{NDQbI03jhXZ2$lO delta 109 zcmaDM+9k?+nwOW00SLCI&dv?B_>9bw__WNN)Z$wL5P{sx z;$n!1-!0bEqN4mFP2R}|ndB!QWf5jCQURJ#q&oR3i#ntCPv&h_# mk-fkoG9mdgi|k|#)+R=+$*Wl%_&FHmW_W&O08&MwK&=2ZCOQ}Z delta 115 zcmcb?^NXAJG%qg~0}vdWFekHQBkxmY#+b>zOuCa5SY*Z7qc~H!QrN5585mMoQaEgY z$|~74IVTsecyRgMVofb7$}iI7o_vr=eliQI@MJ|+6-KkkZmc?!+gTeKH70YiIq "PatchHunk": + if self.type == "append_end": + if not self.new_text.strip(): + raise ValueError("append_end requires non-empty new_text") + return self + if self.type == "replace_between": + if not (self.start_anchor and self.end_anchor): + raise ValueError("replace_between requires start_anchor and end_anchor") + return self + if self.type == "replace_line_equals": + if not self.old_line: + raise ValueError("replace_line_equals requires old_line") + if not self.new_text: + raise ValueError("replace_line_equals requires new_text") + return self + return self + + class ChangeItem(BaseModel): op: ChangeOp path: str = Field(min_length=1) base_hash: Optional[str] = None proposed_content: Optional[str] = None reason: str = Field(min_length=1, max_length=500) + hunks: list[PatchHunk] = Field(default_factory=list) @model_validator(mode="after") def validate_op_fields(self) -> "ChangeItem": diff --git a/app/schemas/indexing.py b/app/schemas/indexing.py index 26e6a4e..cc56187 100644 --- a/app/schemas/indexing.py +++ b/app/schemas/indexing.py @@ -51,4 +51,6 @@ class IndexJobResponse(BaseModel): status: IndexJobStatus indexed_files: int = 0 failed_files: int = 0 + cache_hit_files: int = 0 + cache_miss_files: int = 0 error: Optional[ErrorPayload] = None diff --git a/app/schemas/rag_sessions.py b/app/schemas/rag_sessions.py index 01a3bf7..3643150 100644 --- a/app/schemas/rag_sessions.py +++ b/app/schemas/rag_sessions.py @@ -24,4 +24,6 @@ class RagSessionJobResponse(BaseModel): status: IndexJobStatus indexed_files: int = 0 failed_files: int = 0 + cache_hit_files: int = 0 + cache_miss_files: int = 0 error: dict | None = None diff --git a/tests/__pycache__/conftest.cpython-312-pytest-9.0.2.pyc b/tests/__pycache__/conftest.cpython-312-pytest-9.0.2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84b1b6e5122cee4a05201149b60ad791b75dab84 GIT binary patch literal 698 zcmZuuziSjh7@gUj+-&wt0%FkU(C#{ZX*dN z#4^Dmwsy+3{ufqSC1m(5u?#eDDk@!tDp_siUzjo_?5`w)GG_)91C zX-oh<`2%1dg($=cO0kEr#ztaz<~Wjcre|Q3HavRPu{;YpW-^=Fo{f=#n9Z6)Jkg7n zYJ+JUp27I?v{s{#Xo5EnDhq7BR$*4$t|z9k#C-Yxop^aVRijXE_K)V!m~=3zjLPWD z9O<-!RWr!4Tm~}AGw}#=@;Hz$j`VXni^}%>&A#k&=KI`)lCn1XQ6i&Es0(+C%0JJR z{GyUf$kjjy#DO%TTAnS2I;K~%0P_zKoB--F)>;kq?0;ZZF@}y*X zfU`^rMV@rK>p+rR5lvDJ*aj>=p%b8@(}&JeNQDz5?3MF?m43nVnDwOa%bQLRwL>12 zENcfv(S}Zs3E}9*a}am*O2X;o+2;5yMRjW3PV=yzusdA;C;e4~2iLKQF+N1seFYsV%cEq)Q?{{rPIv5f!# literal 0 HcmV?d00001 diff --git a/tests/agent/__pycache__/test_repo_webhook_service.cpython-312-pytest-9.0.2.pyc b/tests/agent/__pycache__/test_repo_webhook_service.cpython-312-pytest-9.0.2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7133786b28dc0381c006c9a52bb4f1ed0b632c8 GIT binary patch literal 10659 zcmeHN&2JmW72hS7pG#67mTbwAZ0f^~8C#Mif5-{!G_GSgts521N0X`rwkys`TA3p0 z*`*xOOTcJzuz?iL%{u6y-YnStA9^g%3nf~Sut)&|JrudgP=EkF^}U(h)rt`+JB?Eq z?tpyz-prfXnVs30`Tb`9o=nCC2oL}EXFVtj!avb)gTxhaYYvoq0uhLq6K2J6QA9nE z3ycTZRhkWD1EMe+9*?k;U@kfv8;^-XK)50h=`(?Z9t3z<<8c-jhPa3qCli@OKa<82 zaE-w=3D-DWQ}cmL;%m&95p7w`=L@E4>V>>926prNTCwo4HZff&T>C(yb9z?G@_TUU z+)&)=1pA(#3F9IW#sfqmA`~5jYXGj&d@vKT8%EV@+6QKV&VNjGQ=?hYR}vN!6a;ar znH4l8Sc2ju&kM`q*s^GcvT818fFc=MZqmT~>8IJjj|`0(gKAFubnqQ*PASsDjFvSG zWo~#-)zgIPb6P&F7K>>}$!dl%hzV0#-W)VF!-Slku25*Ma)Y;5fnu@21I2keswjG1 zHx;GaR@aIFHzfkK*@}UcR|q)dfp@ z;5;o+m=|ail9vM%2bCR$mT7qcW`dd5Zm4w1Fz|t<(=>^3`>2*JP@)+4@Re*~c2+mb zz4Z;*#R+e))dB+A6iwWX--)kB+gGCP3rE+Y-Lw%B#+Czim}+K;=51-Lkk_E2;bG;~ zSxr&wxT4G!NGXSULQ$@l)ErNtDacFrgIE?=Ijo{IjIk*2zlH7jOxOyDp}udzVyK5Z zV$y#%1}~}EY3=#PAbaW*dzeBW3VitSdSh$&$ncfzjf@7&!UR6C7d9X$zB&|pYe(ld zIsM9pb{3mn?#8b7*YrCstf&K69_wV<25KfmJ1_=cb=rl3bz&cChfts^e;(bapwPWzCpQZQP zSns#L^u2%Z74Cgu24}yzPzjr@Zj=>v-yhyspp=Yj9 z>wm71C3rRQd+t(`ZBMAdAz!Jx`Fg#uhhNOS`yPIA*N+@t46*q%LDHn#N;tD?@|kDX zq+61Eb!OKv%&sx`%P?ld|Bo^@%6fjVQRZ!t>0YDE^Mo4o>@g}{?oqbuTt<3vb|-z# z93OlY*SL8hb7ag8O#u*9?bu`~mt&aaX-v(|YJ&(NpLSAh(ioi9RALNHYAQe+ZSW$5 z{-NPhr|oD7;30w;JEZ0SB?1nc$U;iFdGzwd5BrDGr6N&HElmnpqb#Ew%xgCWW{G2{ zMI8)4O97qOky(JH07IcHLNsDW0X6B9I%pB9%@*b~l9B8${1gL#7x zhvM|plxjfZOqO;wV_@W2TjuC-qEO01q1@0(In0~8EItifxZp4Q!UeZ%JH=oJfNMmd z8u=P``QUmL?|mj?#BVxEnrAtgF^*x(DK`leCkZM&z(|02B@?ftoqL?aI_9*z9lpVs zh2;=d`6PV{?t|JX6lYMJd)nY@+YsEzmY~@uky-#`L=j2Yafd-rbh4b{m=u8v#mziC zS56|>@wbcf$R8*wwVM>I6tV{t`l6^(&5kLCI;mldow~>#CvYm4DJ@WYpE6mXv#P0R zpB8g!p0OdM2Ind0HOPkyf$<3-z+1vs$U^cfnOY zvM%9o&57Vvjagh@MSrgPCAe(JO<(k{$myzdZ0Q{kRXGh;`PjOIzcnXX+bxCGt9EmfEUP2J6_iB4^em{H-|=+^R8)>#Gv@bJZ`w1xl)Q@5>}+zHx}2%;)?E{Q90_qv3? zFP#W(d2$7_ajUkiddaI&_w6|^*;OAwti@x&0q*1JOQM(6Q`V&JjfR%HH}Bl6N@=L$ z&6?Z++QM}ZRk>r~I`rAPgugGH2yS_D1+#Ihwyk=}t5W)Q$xC+CM-Y3H$ASag$I}95!|XVi|ecC&sD#C zB#kiAW(OsMkcCk)WuQ`i97={0GK&EcUkvEtln4}!ycl?x*g?l6o$X5Mx#*Y%2FM#< zFd%R8=$K}*?}gAYEl%rOU&+?Hbj+*R_s<6u4}4ELqP>oeIQY!4%ug=7I6C4W>3HTE z`QhI#HQGK)c`l&X@jnBKogN(lq|6JVBX$LfmDH3_iCHNt#^|k(6*B(fdkVXgTOlB~ z4r3l4xt0FG`tJKtNoilLccYS0E>W|GpGPH;?!_RZh#E)_>9rbkPQW)lGey+s-qF1} z6j2K3p-?3T@U(Btj!o$1L@9esGwExvDpO#g0f1zu)%=tOhzIawxzR)Pl$yD>QO*Y{SAzW*c?Ods3~hFbS;1|yw-ZDOsdC!vIgZ5kN1 z>3NaZCbM*zA(1p(cOVgn@F5Y{P9sJ^B;rK>7)a!$+2vQIu7zO`x6dyO0|;H0@VCZo zZg_G9V$X7`wyi!wUmm-PWqIzN0v_Mp_(x$VvW@>V$h|iH+f02$1TNbt>5RgzJ>R_x6#_AC~z8W98&0{a28O?!OElDw))P zA+){T(+rwyQE&9vXNO#d#k(I<*}@)V3pqW75obWaN*_V;8orI9=P>dO5PPia^b+`# zW8QuE;y#L{ML7{FVM)J6@iB@|Q2YkPIEvqb08hq8?!68h_*T2?C|^E%+4AX<*os;* z3q&*FYc6GCUU6GvK{jMNjY~iz`_@e)1Z$~dJ~}YHbhO%-ahFWw1P{MfJw3Y8cBOjY z-6s*rTO1>7acq8(i{lSZ{GC_=uIdC5e;q6GiFFBoYfc2WYRuyLss#RA^~*=n3I5>@ z`^2XKKiQfr;aP{GP*?{9JL*!=^mk~sE1dQo;G{B;+5=Q$w7VViqVU*)Q}H-T>^}HJ zZGe9WW(-huihX7`;BJx}%9_#ZKDHg{U$#Y}*xz+u7(I=i=jFXc{|Kot4g}*gh|QoV uivJLL{)JzTtqGUD4Tr>@Cjy9X8(nRyu}eI+^|p9WJoL?*4PrlQ{=WfKTI*5( literal 0 HcmV?d00001 diff --git a/tests/agent/__pycache__/test_story_session_recorder.cpython-312-pytest-9.0.2.pyc b/tests/agent/__pycache__/test_story_session_recorder.cpython-312-pytest-9.0.2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9608481a5bbcb548df59346333529d8157af6c5 GIT binary patch literal 5853 zcmeHLO>Eo96(%W4rbPKCu^q?RBvzbX1*jBnoMas|br<<-T5MYEc7Z}zfMqlzTMk94 zGgK0(G7uCwB;6L-p5j9e>CJ)n)>E&&_9EHq+8|OiKnoPPxk!KlIrY66QDVIAW>38| z1NzN-Gw;25^YL-yH-GN$Pbtv;@WQ;dE1?%*L+$D49Wkv zT9lq8?JT3d+z;|2g(?M=CI59_EKkrlSgJqYuIKp6YiG#&DK;)|Rz_6^cTlOuhvFD9l z4&KYsSRViZHzA+TQ?4|L72GyR0|S}`ZeYT`gPBx@R|WC*RD{@3R0F;@kq@lLd^yIUbZ!|s~In;WH(xsw3W^wLx%m)P;p^T$o2XVty>BokNDr)(+D{q5f?3t zl0_}{g|of>>lUs`hRMjcj%f6oBkH|X7S-G#lAb0+FcR^)lWzAdyhSe-;_wzdJ=7zk zkK+x!xUsEZo>#o#XvIkRJyk-g*K zhKwU!G|0ZiWYcISMQ&+LjXRNbcg`#gGV49G>19^LW+xPlFvhp&B||;3|FI_UNRb2N zU^CT0$Nbjln4zbz@1SEKLa^g+LS~|aO#B@(VZM>f8##069WvXHne60D@*Of=WDdW} zng4RmNMw$5awhdIXSU_c(GD`l{vVO4E7{}M8wM%cjyLz6sYaaH&;`T9D{`yEyozPJ zwo~`ZMIH3+RRgZM2Q)B~eb zhX2SJEj0|sgOVy&A&mo9sY{vdQXltT!=dbg`VmS1&8r4_)%NFuc7DM_Ot41 z?ZSnu+W67Y6V>{eEKmKi!(=XGkUN#-C$K*qP65-In#%H=C^b`y+03l{;EQP75sNSila5`#qCMuj06>BEaWcfWKQ0k#7ZW$=IdRYt~HGC9d{sN$~MCa^h zutls2`L7|T?<&vCy`RtCz4UDG@RLhxgW0=3c&?{6^s%-+wz==f=Gei_UB{k}9&Mev z)=vL$b97?)WIKIkD;^t7Z5hf?{vYW*&xYT7H2b^R&C$uteG{99j%|+RUYUvhfvuP^ zoZ1>z;(K2yyW@$i6UxxY!^VR~OP~17>HCe(XMtI=f!v>6vfJjwhK|o)q%4IlZKI8p z)_QDrdKHZP?sUk01)~i)iVCw`cFFEqcA0g3V$&Rac>clpFYE^wmL}R}wxyq2E&yqn z+2umpJh!3av)&ny(wdMYzl!>jjZ#pFdfwkQkG1rRt&d8kyT4v_SM{RRzL&s;m zGa#ikAxVA}^(7mnpb|yoK-)at(x;bafV9lx%QJ0rdPB!&y)z)CH6clU74;<>rC;gO z@+Jx|#XA_)iIYw`@vQL9#cPQxVjIqmT za{?~)a`Nho=fwSFZs)5>CXVb;ROiKcTCw4mD?CSfr?))a@i1Xnq4#dwUK`()t9+0B s947Dwgiiw5im9slS7q{_cmubtT>G1H_TP!PI{7aJ$g814>KWnVKRfB>WdHyG literal 0 HcmV?d00001 diff --git a/tests/agent/orchestrator/__pycache__/test_edit_actions_case_insensitive_path.cpython-312-pytest-9.0.2.pyc b/tests/agent/orchestrator/__pycache__/test_edit_actions_case_insensitive_path.cpython-312-pytest-9.0.2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa75ae28dee909fd0a69a58c3d7ffe164a2b20b6 GIT binary patch literal 4115 zcmeGfTWB1~v1fPoIlHUX!?xsyw8W0AIqp`L?AQ?7@|lz39O0actw6GenapjsRwK=0 z>FJdvu?jlJgqRED0s)zDAIjey(uZ>pFbBDi@BHlAGFloP81fPF6C7-~;ICBo?5x%o z?&ANXHL9+vuI{d`uCA*6OE#NE(EhdaJ8L(A&|d`+wn%^|PbnZ92qUc6$Wcm)BJhYE zDMe+kI7+_2iK}+n(Mp=cV|K>Lmac)G1fL7LJMj*y5BP}b({ja@MWJXF%NJi7NFL2;5$b!2SD`IobYkiK~qGOjmgddk);2VPT#I*)s8ciI+Wh z&cehk6CQ`%eb1c*x5=2c?cFqPQVXW@RC(TXE5s*EI2a{<&1ME$sS=)=vuxrUj#+)4 zx;Arp`mGtzeKmQ!Fl#ZJqxR!ug~6R;m6F5{UtVh9NxB!#hrM!kWx z3?DQEw-u}$n5~;u9S;+psriH&4v3i*!jpdJs0kYQFV|p;j<@yz6TI9I+e$Z2I^f^f zWaetT3eNzpCC^M+A&g){*w9#-SdC5;Pr-vp2mw%p4#Snv;{dqEhyz?kgns!Bu|%j) zn#X)bs{RWPLwL*~@i2rhZ=eAZ{0snvrJB z7@cPW+H#k?MMCu?6)}N|aKW>)m#4z=<}T1QW|MqX*1cNooidT~o2T7OSiE*y0#a z@p|01nMiK3E%b%hevf1I_?&{{V$NJ7Oa)zY2KHq`tgeRk{4a8`P!1|KAGI3R>G2GXIOdu!8jL zRh|pdqRgC7K=Ld#ZyKG7fF1#7p~6bK@;q|!qOPp=eRHh3GNJp0ix&$5J)`@2ghzoV zz?s#Y=|i4la_p?`3+7oW9xjh~RUWTZAuAI-OGToTaTf}JAkG_sD!kwJOxz|qdiBXH z?O3kmn6^l2<$0cigh(I}$|MJL%B0MigG}gi4N`qD7pjr;>(v#JLJgDhUdWKP4XLF{ z%brs;DU>;eZ_W|H;knCFdB~(QlzNo+8*?6YOlFW}vA- z6o=GcFC#F)sczzQH!&Fy!P?*ICeHaHrWV2$R_#U+x0rgN)0scV92WU40k#JSDsU2&m`sN^jhIVXo zu&L?0s`wtY83FZBQUU%z$OmXQ+)?$QzKMwXo7JcrRwF+a&Z1uRT%sS~X~USS!6-2d zDzY_CzFV_w_?hPOjPR!pHDm;yTns!6{Ip1ktmDD?L)YbrHjjQ!to3auDT`23*Pxa! z${Mmr+=}IrVs|N7Yy{*0TTWe4d1LhDu4CmW{aZV_Je{=mTt0=6OSP% z=R_eP!-aI_pzIWtqkJJy+y)1nq8NChSoL)PPoj#Ve2%{HH-v7ZN9fw;X!eUpLWz8V U06ZNyrg+M;*Oi>|ioB$M15NlH@Bjb+ literal 0 HcmV?d00001 diff --git a/tests/agent/orchestrator/__pycache__/test_eval_suite.cpython-312-pytest-9.0.2.pyc b/tests/agent/orchestrator/__pycache__/test_eval_suite.cpython-312-pytest-9.0.2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5b213d37cd52fa3b59c2171d5d9190e0bb77c5f GIT binary patch literal 5126 zcmd5AOKcm*b(dU{%a15pA4{@ic`aME=*X01Teg!pNtH;6joMNw%Z^JMS+6)Na^)qL znOQOtLjr1JpmkBexiv*y#07dtVIK;#2OoRrAwVx=Y*)-6MFXTj;Y*=ZKIPOmyBv~E zg~kPXC_&A<|C@R9=FOx2)X@dTZcqJ_?dtNB$`4}bY@T1 zoAQW4R!aIrm*Y?R9Znz_fZ1cp*2 zYt1YBS}=!@q=#4bbF4KZ-?9fd2U-*kH8^7&Z_UW>=p1TMIMm>bbG$V(PTF_Z<5EyS z4|Yb(;l~V{i~DGFzF_AI_G?zo##+jz;RUN;8@cp(-PWjl zA*JUuY*@5&Q6twuh=7!lvk8?M;X+7@bE&>h!TLCjWD z87-IAiEh8XNM+8Xv2SvzAM17j=TeQGILq#W(F=cDNk2Ph8&2E(@Lg3rHsd=VB$kC0 ziDRw#7s4++$b-B{LcX$K2RH}$Q2@y(crSFfp=I;D5o;To;EjT>!IIn>6D zaawE6c#8rGqX_D_7j1g(=6uw7FLt-FPqYJdq3+wBw1|38FWQOvP(K>D?J-2O>$dma z;N7sRRq@)rTz_RJhgx&lv(neXI8^qcp&J6)eM7i6e76xr?ztkBrImgz*_zRw0YH$# z#)=Q^MI#wc#b5T7{Y9Y?Cw67dk7SVpDvFvH?zNItVlI2j@$5+*6 zdpkKlmqVksJtfGPii{4RgJsze9V}??~;#FtZpaUjQ_XD zg}8S#{y*E1$Zg_p+fh>zb~KI--}X6q8bJv(QI0gSw&R(zwuAfSb0cd5khLMmKauX5$#wb6qEm^k32Q?9=v>KX!^3vCBqKvJ zt@Po^k`HSzQAsPA9579!QN? zxyL$TY^7s%>O- zc=A=Er>I}ck#}_rPkI>Zq+r^ry_VOhoU!e^TEJ$hk3r;xi3xpG%jQizk+QNAu+O-M zN{*-xc2UqvBMWU?R$%S6zG^EFDy^s)rnUr{R4Dbb2u)~tW5UR#-1sE2ho!oDZELBFbJ$eQ zLK}rNAhdTg3})~A7JH(%>t-$pad8e#!EY_GOk#mh-MpZ(+Q%#9Z{qFL*um!M?YW8b@ zFxZI*Yq6k@7t_$6-}XQpymM4uhP+nv8z z-6hPbqA2(z%=i=x|Mk)*L5Z+6tqXrhR^Pr>dqGt^+FNO)W?LY-I(wlW-OS)HOPmUz>~*#mFfPJRT=- z@Lr&?;dm*k5jbeu;hl&RMm*st6HuOK8dGr+danT}FFAro;$)0bj&T+M4HmdcGy@r` z#6uN!L7DJLur4iGmdQ#DD+8GI!0=-MTpqz^0cOxKHcYYM$e^I05@$pxra`uU$}IUzk6yPR~uB`~KqD z*DlbG#p#7NW*28?)Y+M{i?rj7GqVfdJv*nKpP!jMhgl}jfJWAGDflttg*2rSyP7og zo%|ZR1*(SW4i)m&gpUqF7N*_mvW2snt-=o&IAYBP z4nEBmd7e3ofMIp1V3_b@icr6MCuqM)GFHJvjuSO)T1#-na#~h*5<4DqDvc9u8i2!Q zKcw(9vpL6XhM7$_s7Kn7W9`UM7jYLi)sCEO7dzgLoM;z2<*wZ|F!_|D$#%J8g#D(0 z`}NevxomW|6Vp}!dAdYk3lv#RC8wc?tqV_f3Y{YlqWjk0_*=NU-qrV^{89Ah(Q4np zYHX~&Yk$3OxZcrS@4Z~#G4Oe8@Q!hR=99&0^4i0g_Ib~~`oP{_XEqMJT08LSAJ1&e zBx^IthgZ?YRb0D@tD~ekKw8D)yZ1Z^`g%i8BErDI-=2Edf4V+ARy}sE)_cC% zGxx>dEA^56^<9(q3)M@>+OaFufvaByq^{7Dy}c3t`plDr5beEn<>r;T+<7Z}GhE-% z``O@2pY75%146K8OOS%0Ew4Z7Uw?fwCPcb!y?FD*4~n($@Q16l@K{xv`S{BHcy)XR zpeo=1gl9G+_I<=DF8aVhUHDUg#~WfUr72U7?c9hdwU| z4T*jK%qcF~9BWL;#U8Z4Ci_!q>H~k1&B2FYET*_*kN{Ur1s7ocM;o5OY!hx5dn8SL z79Om3_TH-8tW@Qpo0ScDsQG;)5B<34Vh>v2Lja!=1BseEwCNQhLvAS^pfbN{_!^`Y z9Z=m{op3J;+{^^lowkSun57 zQT(eQ+z{T6ekpYPU08c0to=nezv&k|@{b08Ft{Q1*Tnw1wC9W9j*Z}OEjWC~SCtRc z88%W2j@+58%7+|mv=$t_V^`&I2isE%?zz*$C7al0*zfai`GOwLX3wa&`{Og40!*Js V&Q$&0#eXt%s~7lRF}|~be*wNm@HGGc literal 0 HcmV?d00001 diff --git a/tests/agent/orchestrator/__pycache__/test_eval_suite.cpython-312.pyc b/tests/agent/orchestrator/__pycache__/test_eval_suite.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..728c2aa3c84ce7e9ab6118d17f0dc65391f63283 GIT binary patch literal 3000 zcmb7GO>7&-6`m!R@JBYO;YHUwwvzXn<(6gM^8)93(Aw)t!cEx^rDg z=uf0Hn+S-=T*)bxoQrnBVL0OmecmoPW+6L69W1nk3@u=0+M<0C^V?v=!OAQ+TxfD) zp-3|cMRd)vOpZ#GW3vUyR?Q48_w$DDX}Shxj*-dXLY8vsTv`;GrxekrwIaxdiH2&cBL?PFhLZZscg^E>BO|ccVLv zI7tWTBwY`DS%q|y80jIsq>uDJ@R$e`LFbNT$672o`u{EKRIz1 zk)gY2cX+3fMQ`U#bwhn7sz>@^SrT{0(N)BKZ=hc(&Y-s!cs&Ex-l`i)so}Aq@OH3$ z2GZ**uA_C77@I8z9Dv)IvPMj7+1WE^%K?Ug2o;wcyF7W#qL|Zok=Yr_dECjFJnlg( zUgCgfsapXU#)+Nbaa{XP%ycLh;XGb5xI>FvbW0Y% z&P>XUB4zHhh)Sj5n0X3?XmFYlL0sT>D1$=N8RaF*F`V@x6iJ3LiSXMN5x(46STy9i797#qV2m8*xr86ihqFdwUC{~5GHeUUIhqSEn-&C{$Hjy$ zLh?>!8awK@3c+%2+HRI3a;0OknzsoRttAd=%7YosQYMR3gjl^_2_3dO>Q3!230=+I zx8ByUAASsX?LZc@fUXJqSbLn13PZxm;3Ysrvbg?(cHYAIC4w)MpVk^4h`GT^h2(k5 z&R~mQ0H*Hn6WEQz(}%XupRWJ$#uhpN9%YVNAqbLp!;;+#9zrDxPu~L{1mm9-+A@H1 z31@C6d`#A>2p@In--Ce&8d-*mIiTFZ4wV7B1<2*9@pzp56exQMhX4EMXTgcfGi{+i zrz`KgTYVXCA@(zvfPGa74H0oShZ5nowMv<2tIK9#)xHg)TC{LM1m(#}{DMq8R54}_ zoDkx$WhFS2B%(|vh{<-(;&PBpo(1I)fI)Pn<}NN6m*(eYjLF%_t8Xt(UtADvi<9%0 zrxvGD##Cy0QM6sYGBy9^^sF&6mzuiDWb`70`FbG(Z=)ZsMyL`~LS3?Li=CHi%XB6J zE=ZYtLPQ9#D_=%7DJ5tZvXr?z0wl7LN-^{lJpK~^>@BdCu;)GSGHjv49@IYeRphy? z%YWCqYMs3wX`e)X8>#fZP>G(Z4Lo1#9j>)?)nYel9sQ4^gWKj8sV^5R>31JS@#F62 zYW<_1<@Qd#UOoBxU#{$>($!S@;VrUvi&by2$~dp|^IA_|ZFH6Gm(fC*TXl49dRX!P{$6k228Xn#bRJ4;dDUDUbW7|^|?MJROUJZ|LI~DD;D~(jcBir4c s<&m_n2Lr)_K-lNo?;cl%KEJY$fPAUCr1I9!|0TtP80f#3diU`E281Xr(*OVf literal 0 HcmV?d00001 diff --git a/tests/agent/orchestrator/__pycache__/test_orchestrator_service.cpython-312-pytest-9.0.2.pyc b/tests/agent/orchestrator/__pycache__/test_orchestrator_service.cpython-312-pytest-9.0.2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9b202a4326a28204cfec1a0802c0e4821d00897 GIT binary patch literal 8437 zcmd^EO>7&-72aJgm&+wZ>eqitV>*@^Gl*%+mMyihoG6m4IJRZfQk+<6vt_s|i8B8( zyObq$Sr~~Ac3U7m6tDsqs0;WI!*1b24n5@1Lk|IZAuYKg7Dij3CtnH$auGnMzBl_T zhpF5m?WGIy&6}Awe{bKs`R45(o0=pBo{R7NI(s3^Fn`00^YAva{vj|o7==;T95cz% zl*C((tGGL0bTuA3tSvDnE&KH$( z4*94oe^5^6teQK_I`+3wI`EdJ674`br_LW3R_EjrDbA`HU6bby9Y|*r3dzo?g+#hk zNT2j8~n?0?N{UtJtfeT8N76`szN}7Ptf7!R*_pr`Q(<*5 zMC(ylg;N3wuLNP#h+8TsAw^We_oTb_2s$o}J_ZX06IGh-#oRiaKYf(=z2>|2%9IwR zHOnY%Hv{+D@6s*6FylihAfC3!Qf6!Xwb&MKy4zgWs=iiN4Hq82i$ z3Cd4v#lquenC)ZK?Bhuw&^DUR<%*Z&OC$@Onc;kTUe4WAtvmrtBKdqy4{8$5ks;^fe{*)%bD zZggZ~WLO>5L6akB%Sn#Tf+T>~P}PY9LA9Gf z85c+TfX8WSi$E?j^|sF2;%eepEphDgnbqSHwc`^j=iggBpR1kERd?sBZTZWmA4S^g z%{%J5y6W4XuQ#>TJ6@`{?0nSRc`N(7;V&ktlj=(I)T8#j_n-TswfcUhIyb-4zVK+r zcklPF>^Sz<&P4r%uKLc-FGGB*^rRym71o$|P@sEslT*UA(9QT$pf?!Z-Kck=0-p+a zY2DqH3g_|QSp!%23ajwCyD^pErg{PDIUUEK&#dThUTQG2?)LObz0Ikkx1`T(%;CJ$ z*?tCVaQ8xYaV79LrImmYn8nxd)Kh^op%OHBBY1HitWYon?KgUxBQ)FYa9*l}4PlB^ zBy2OqIj%s7=-VBc*&Pn&rH&5g7_GzC|1M9$;f#PYXX2N9&Z+lOPqF~6$=7QmS>EY& z3%Q^pr$lcC7Qw13A*D%)8KEpobE3Z6nSrmTgj43FhUoOOkgo4y;#khGj{%*O9Vf_Kx3ZM=8mYtu+GVP}3@$O(=dxOR@} zBBHb_9Y(~~O!S#G6LorWQd=`&(2P_O7MSF=F@hQ(XdfAO%RvS=5Sr)uZ=n=`6IH@>&$UWUAmH>%dk%ajr9IM(WQhEx704k6i-iW0&IN)_Ea3rZ=;SqukmhO#Y*pyx?Eg%q*)5L6%Gk|9R zwvh|0j)_N@@IOijMYW2KXX*WJ(r-5 z1r=h`2>)SnS}`}LlEv5P0vn-lglK@OHQ9n;sbkgxkEb^G0B0MceGUWw$8+6PvHR-6 z(#cx9dsW2WLo07(Tw23YE4S>iD`NN3Nms^YK)C*$Jymhf)y1WWT71u{h`)zc-paVN zhNV_+*<)A4Jxdd=jLU%VqxjCB8&$EBLKZWq>1kahFT-Lo_8||f&ojP5JtLrXmL4Jj zg{$yJz~Je=4jMr?L_E*r_pP)WID7(du!B#+Hr~GWwQ(Vi-2f8rDBeQOJ%(LRCQwf% z*q$upK_p*KjlfdK@eWSvcrrLdf;Oz-Y*-^2;+7{vzz|{8VFTKDt5zfre$Be@%Uidz zyD6;kwA#q?x_)PqH*2IzR}#FL#OdCUwsmyKnJ#cWE9C8NKPUdMlyor1dwFJ z6lCfQ$XP7Ik^FAypg(h%yy;e3j_QctQT35i*u`liXMkuCAeP_qIj<(m^<)ck#f#v( zUj0vfm)*gB9O(Bzz;(sjuV1LecUQ&!>fU~&!0+v^#rs!9{5^C^tn4z)TKogr#Nz!F zWa?e1s+g+A6Sv;1#Z#*y{vO%|R?ek0T5M&Pef)};y7i_jmSGYRWxgi|tQd3dSf9G7?+!o42v+{pkr z4jrJW9iqmw=s!05EPLxSc|5jE0Uc-=F8g>y=2Tr($T^(CIFj>7-UgCvCFpg`09>F= zUdcizER@z33KTxwNsyIZnnjzEY;-k2A*s(Lv~1s zcuL9OJkZ+w=GjYrgvDrjezWW+Kg41*UGg3h8Hk2)8=q}DxA1+>YL0|J2i@3H3LM*I z=22|Va%}JA(KS90eD3z%H3pK;4zFXfCIy21w#4b z>ActdnYemzsCIB@`K96NTbVToX0e0ZQs+9u?0TI88XMv+pQuM#J|4I>P~~4-ItAqF zz|yH&m~q?g$+ z#UZ>RJr!u!>M|RlfrjYjK!fj+is#Q$kKy8rE#$m&v&lX;LrP4E`>y#oY5MqjY80;6 z`kLS1(p?d}%d+8Ktl`4nT#?}NE?or2QHdCl>#QdE#&2}xZ!seFm0$AVmNtElGyg_m z1|M$O>FdACY2&5(^A7Le0^)Dc;2nIu1>l%ArTv*_ z*+|xX3tPxF3YRqM&&5+p$IT!Omqe9q%621aBeN#`SuL!|(Y%w|$V>pFiy@OpMS_sY zjxqQ`;zhzE`1J$7aX>VAQv?MOKaHT^q#K1f0p+n{N!GgAaYP3(QU>A==F*dD@$GLu zOW;F_3ZH)N(A_oqxdy2W(ws3#LN|P0qO`MbwCgAB7X_VJP-}r!`VqMqZFhpp8uAYYJ)q3 zy!507aJTD)`>nNjZ&mEIz8|G=I#Wodk<1`L%``$Q1m9|s-C&Och#Eue1n9^DQNVa0 zYM&ivu&N;7U{?WhPq{Bjsw4qr9j76Oe#A~WwLtJRO9m;-A`>`^SCJr4N7U87827~~ zdH}8g*jj+JjiM-$Md4W_7m?(U^dZ5x8|g(-K=O^CeiCa9B;XnS@IFS0Snw2bqJW5k zrOl{_lt9=IsrvkxqkdHNJu7r#T?2Z0G_;gWc*_2)@h|ZGGi?I$5p;~Y&}N^)SuEhJ z!)pvApN+3$vL>*>BexYSI`W4A79Dva*f^^$#y^f+i#%%C_RIOz?T2gI4==a$);p8+ zT`#VMm~i_#!-u8yfDjWdpID1A(boS8_5pZQk9VxbcjGOE@A}1IxS=dZ62RW-0aA@5 zZuQh6U8_9){$v+eIhWS3ot0bmShxBC-*t7)t#;TI=qtY4Dh3G{$mMVKxNThqhNhbg zK327Y>c^68rsxF8sD+t3Wh<TW*vvuR#91vexpQGuvLhMcri#j;V zpWxS@4e^*Ywd3SxkOhNdv>_mC0hVR|f}gKHihackEcXuvNsM8~*slbZ4S&f1Src1W b;rbhE3?#R9tYfmiFV6Nq>1tuS=^+0EkMaR@ literal 0 HcmV?d00001 diff --git a/tests/agent/orchestrator/__pycache__/test_orchestrator_service.cpython-312.pyc b/tests/agent/orchestrator/__pycache__/test_orchestrator_service.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f19bce860c7afe55962bc55beddfb61b04f5eb5f GIT binary patch literal 4010 zcmd^CO>7&-6`tiTmrJfh>d&z(1u-2xiY~T0D~S@h zWM-GLB*=hJ_)wzhEN4?gseV-Ep(A!P$1rb1DmCtnH%bP>R(zPHOi zId)+by>$S-_vX#a&YL&yee?L2_Vy5gw)oi}(l>*I`~y4nCe?)MJP7v*B~-|e6mnli z$of*gte6tBQcBADQ~s=+k_8+SGl8sEi-nJsd+MeU&M9j~_Qw1kqa7J^M!*s*p>U6H)q^-H@rlWIZ#xO0NrE@%z z)a^x>WW%hq<=9+7#Z2BbVgm1;V#d6QLr#v(m~1I+n1*MKM}=Ca+6KaXVv>|VNy-O? z_;_fnkj<`4Fg-t?h>2X$G)vE#n#Mz#md(*Z2IV$Q`=X#{ypczwir;&A+JQaolSu3d6f_NjnI(AIKe2mJ}AL>$zgrk1aFPDKN{Z9{{EhoF~Gs z`Zpl=$r^Ehdu^e12lYPkmeKlMw+y$8$}Q@eI4>@d+kyiz<25Q!9~G%Y{jjUr(8@GG z6&lh@MvyZ6X4d{X!};UF^2ZLjYhUQHftxNowO@WX!qCRR?nszt%S^w*uKQ7 z?7&4IJMmJXX+4vhJ9c8#&vam1(hE-RW%u}UJ_8{Ub>`D{)VnYq$g|vn3DM;HLZ6#; z+dnU?1`Af&Ifi!=lX#Hkvan1`QyzlZhH2ZbY~-xjG&L>5bf?P7`b*cIXODR+mj>H6XVJ8F>QQoGRfN~E{{)NolI!g zr^d#wu>h=N7%2=xj$tgve6AHN3bKu^jz$@}o%=OB96JLtnrW{BStsT0{)fs=bhs29 z{$_sXe6n;t`Rv9oc5Y-!H!{V8*9#;6r;(V{zh_*A zai4>GxNSkHOHIB;78>c<)HajTLCAC4Lbxt9c|HBsgf-t*U=vRQt**T-u89kH4X-q+ zEFQ&sxz>Ka}hM`M?2atwbsy1h@kXT!Sp_cUlF&d764$9r|tNs`W;u;m23js%b z&E5~)3|Jv1*nU{adVxF=T!e+NYV5Tqtip4M_mHY~F0*8^)sNqZ*qF|Mz_#Y4Y33i`>jCmIIO#-h z<=CPff8=8Wpy5GdUbp5<8(<0}0^mm0+{3V8T!l8-Xj8PFgYDi?)7DaM5qx#wzx7q^ z1IKWK7!Y(-&zCn#k%PsJn!yxceV_+lw?rl_28efCJ?l8Xgr zVJRC$asddJ0RAm5qeBY-#zroi&zO!$V25Jxs4xS{*p4 zx;O?`>;j|kU)g*X)ey?icu zi3u(jET~H;`xrK;K@+=(@~22X1F{E~Tnt#f@jpIl4W!QDZdk=a4~AY9Ct78}9t6p&XC<4QR?r)ep_z+b@K@k2* x!rx1>;QJRr5+*`I_+A!-;CBQ_Md=jeFE3XJum`g1bXUFi>bjihO%ZIaA;W^0n zPL`L}sM`@<6u#^`p{!i#h`1x3jzYm4SI8VvuL&B<;#DeG_vOg<+%6yy$b`!;QQ^qk zHJ64C^CCI%s>9y~Sq{QpD7efa;aW^xOP6Il>UGEWh$A8!MG@Xu!WcmgLon)5Kw$t&2 z%r4WAGRI&3?pqwcJtwmm{am!d@+9))`tJbS3- z-UXN}la*kR`M_DTIrxRkI)Y$6=233Plqs^T;-)RUfP$xNPFOrzW(7D2udfl$@uOwn<{Sb7NfqY&4L5^`P+975xF-NMXPL?zPyC-{gRqcAP-GMY z6F`KRvVo?8%E~ZQisev|!m~@4m#$uX(SGG3Lw7N>F+(SPrYluBnt$%km4(;fPQKvy zbZy}ox@v>jR;Vkuz53vS= z?bReM8*x{rZL|nmfQu{e^D8jj(sqke{c62`;#B{hll}5k|JdwqZRT$6%=fi3yVaAo zy)T~MzWUnsn`Ea-ck8D=pZ$7r`?|Zmy0%mA?oQ8t{@Bj+g~O>rDYLH?(wPG|QX54( zA~pC`wCBLt|BUwkj&sa~u~<(JqdjvJ<0|r#I4*8x@Ip;n*0)LtyrGFv)Nc;9wUzP^ z7+ZRpz`RsL<;?KZ&0!{P=wxEU=ovkIRoh7QQv73qnyIlenLtM?srFKBozzgGZHzMT zNTz8SaGBT^`0%Uetc*3VL z4^@7c>p7}g0o3LqhazOtb{h_sX{;?>6jhnx5q27mX7}Thig>OM7h|ocj*wNzR5(JkFc&Ov4K;eSPxm{c*Qx@i{M?L+~LBIs;`aV%#!h z3ifb%XuaZaCSDl$5>fN7o(S4xNC? z%0=}$Fu)Q%T~68Uhy{+Y=^9$lL3@KaO+?rdKCOs140Ku?M;;s?1M&0Y$RivTn^S`| z@Qg>5GRj4xVg_#;)W|W@-@HM?ar?73KzbH_{v}LU=BBaCRqLbb*00a>XHNZEs`sa7 z`^WF?pRoEfr~9*~caPt9=bfG7&BJoGT-i_MCkltdSJ-SXM*%V%$s zt)(wIJLi^uC||mD>F477wz;@noWC=_U0l3t;`if_Fu;CL*Z>~QAaiGaw4Z>ZZ2)%6 zMYaeESO&wYb=j)RT;enK5>VufJ#ctqU}$hcgV8|}-tZc_MY~Kme&n69U%03UDL1V`mF%BK6H z&W!9RBoIn(g*P>6Ff(ud-pqUN zd-IFymI;)(t3O0P76|zR2cu#Rg`LYF+#{4wBPNN_GLl>?m*iXd#B7;Kp;bt%mX#D+ z#l&ve2JXwnrKH>{gPdm$HJQC=(!w`pt3qvBgx>90UTF)}d$w}tZnCf|qb$7~2dS!F z>dH=6zMrL%2cc9p&MrvSQO*@6dGB%-M_~^J8JtPmpD-CHdm&_Lz@to!eHMr>;jDne zQ7VP9ablsvLeEexs=6A)5e;Oh3GkFC%RZ4=JzJRz0 zRYwur$0H{E4&!=S)o{_5QNkcnzF?uUf>bOs4v}y;6YxG?_BzZfsiK4+;q~J*3gWB{ zyM+KCaH`6K7sh6iQKl+g0rw=ZEod`ki&6ZGe1L^=z>qx%#$Xbd@?lOx3E!^P3Z9E0@G(54D>|MBND=s7V1_oZ?szHl2h0TD^bdSpSuyefMDh(A4Jm*FUjYe(-#k3U+w z^7+~qbfd;L>(h^>embyrEnHi=xl!+JPMmu5_Qu57-Gk+dwN1(eYX?-4`8{TFG!9iW z`!fhHGyC7PBGLPZEex5p{v}oOJEGB9vG77d+Qw>SB;O%&kKL8QGP&Ung?-&^A~Cyk z(c}-ct_<_(jzPz68%spwJ|u1X_fC`JA(fX&=T^Sim{mE^R0W;E$_=wPW+9*^ z0|`tlcGD1ll`Yf^%vF%mKuVT$r0^_N)O-SA1sTbhsS^A)L*{Izza;o090AGTv&BVF ztP03@50C0#N0d*4YW_Mt$w9p_T#U@t_6AFgBgm?tGvz#g1NPpU)DPBK;-{JP^*_Yu zJEG~?9Al@!}k;g}lCmC1mLh zutW?#y}=8Xud{UjGx!BKJq1;y&>*lA2yCss$7#Pl*`GZ2XQkesnCc&XwSUCxPfquz zrZ*42_Ta0H!_8f{=vKG$rLpp^TdP=iKiEA%sz<-|pSma4-IMp}>fGb*#_72y?)kgt ze|JtjIJNG~uA8%K&McpWE4>^?VCKE5IvsR64M_NI%tS+HUW29WC}oZP35YvbEC#N_ z#NFpLoza-*gS5u9S7-j8))zB+;D7U4mc`ol+D#9MezA7=bNlSzrSI~Ka0^l=-h^g5 rZy3h2T*=5iBhc(%^LL#XmyB)8F|6;;Y!m42f4GC)?$jZpsdfGX6qh2; literal 0 HcmV?d00001 diff --git a/tests/agent/orchestrator/__pycache__/test_quality_metrics.cpython-312-pytest-9.0.2.pyc b/tests/agent/orchestrator/__pycache__/test_quality_metrics.cpython-312-pytest-9.0.2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..746fd1c977fc9bc23360fcecf19edc4dd3289bd8 GIT binary patch literal 7250 zcmeHLO>Er86()DNzuZ6lIsS>$b>t{s$5~nHA4PQ{MUtJwMWX~!3N)J*EWJamDRIfo zkSkd#H&9cfjZZd^LtDTH6}_mC4ms+WV}Z7SWGS&SP|-ts3DAQ98wik7-{aq|Y!z0~ zqRqkF<(qHbym|B9yvG?1|CmlEIk*m%f2)2H<+#6L#lJ!xxL*eFD~@o4*SIq8l%ZO< z65_d

%3!OSBw?mXH>!#mn(pqMWED%gLHp7Hg?;s+KON9l5ZUsb$Mqz!4%`7s|PL zz7YKi2bw5ES>~;VRc~11hHg>0VzIPapHwQAYUoUS$7oopUVT%sWEP*SD7s8lgJs{9 z%^B!pLMK(XOcqDRWL>Egc-Hk6tz1>G$uj7qLTA;AQt=#fiY}mGxbGiGqn8%%H?DY~ z$ywO$T!bef5+(wPSc1=+hIxDVvJyUrL`jUqNn$m*;`Ok^piNv&t$5>EFuHq@w3Q8T zhGbWBD_$hp_#LEkPkvWBzZzcc_WP0^azN$CQ@6vb2Unb!b6k_RI{cBQJAKqDZEhN8 z=b_8y$)W2UIeeX4J+k7>jPFG=WQV4^AL;8Bp$k*8J8x2_qs!=^%xl4r=%)*)YZ z`mm2$r5}}9&g`*{`uyomg@pgFnKuf|$%?SBUGpcgUSctPDcGQjq{>Sb3o;?yI-%NsZ!cO|ynFDHlZUF@A{q)EBH7r1y5h$<+)wPM)(fUgl=^ow-?mM8Et9R`+XsGXi%M?ik#0 zxD#+E;TGXeL+<^n?CyJ#-EQ{cI`WukKgn)xdatMc+ne3bH>2*l?LRHe*j0|4_#NNO z*)cme%QayYH$QKS`(A5t!H&aPd=h&+S&KQYZMOExhMYgw^xHS;5Q&*-+hVFw>-n}@x! zX29--tSMoyCt35@togR@;AuYx+oHZ#_ILSmwSb`We=pw0vB|)&Fn5Kg(hCZbdMXx9< zN)_2O^w;1lmy$Kjn3LuxXg66@*3CJEvYe@W&_GcM?5a|^#$q*vf?84tF+rDWpkvAa zhQ$&MQ;{mN3BBVrna&VnPG>Q*QG?1nNQ+c48=56q^L2&AP50PoE`&#uV~i_=pxCqXS#S(RcRJh&lVDsE&aueKpr*FEP&0_aQeYpI zY9J<8(E$I18>8;Qmx(Y?aLmmD6Hd#HLC$eGUW0=%kuAC6AKme71Th}t?&FDMuLJHR zSKudLdZWe_j12nn1F*~Nmo?>D|3zh1@?vUAv!#An%@eB5DiHU2 zJ?{mv9~-KQZuMiJp{Ik5wxhVyAbCjzE?ZQsn8o`1H}RJ>xpsxf!wW;$_hAtjTQNbg zB1inG0FeK{>|@Z~ zM0*ywwS((#zq3)CT;ym87%-}U*~BuC;PtB#%z>fJLh`5Gx~k6_Gf<6r6S|O*O$eEl zf9^zm{&WFsg@UD~WYxMl)zEavFcxuUufTs3Rn5F^SOtFJ6BHjfTQHM9^-=y6N5=pt z@tA--=1V>gfX6ENEC3!0{ zz|?3gr7H?CVRmO#We%Q*kgCJ1Tt>UWT-Sd9|Aw!QL%QP(*inc$zR)okmX0I2=(#qu z+vWk!<^h&=Y*q+7I_{j+378-s9tow}!0W$G4w)W;@-r-Sfgu$Dz9&eH$Hp zJGmoE>e|IWOs>BtZRLKl)7|@N&sO)~&cVLzgZaPbj;{BfTT{1kZ|vlbE?xL^VJkO= zXz0`OR_@h%G2uYwK_qr4c`wI3dt!~^n%yz^ptmy>UA*|<1efW#`TmXfx8vEH;tg@T zqvy*br7sUp-ivXG?)#jONZt=eGttG@pm(tBkUznpM? zznJ)V5Yf-Q(&cW8oi{JsxUepa-}(^95(RSO!mSUN=%#oQ@Va>Nu2|d<}JU=LupwSaC2;~x*YwapgX0(imY-ofz$xI_bp<2#x`VqGi- zczG^>FkpZm8QE-oPY*~jbjAHF#?!segq)AK?KH^M=-xefnXR$)dz}3g7fgB{3BG1J zt1Y~Ny9N!@p8*eFHcWgoy%*+r{;wQ&o%?y_Yk}i0hrWi^%b~w;Z$60j@)vnW#XkW6 C;ZlPD literal 0 HcmV?d00001 diff --git a/tests/agent/orchestrator/__pycache__/test_quality_metrics_gate_expectations.cpython-312-pytest-9.0.2.pyc b/tests/agent/orchestrator/__pycache__/test_quality_metrics_gate_expectations.cpython-312-pytest-9.0.2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b2e4dc97225be7b81cb5da2961ad084fec598a4 GIT binary patch literal 3834 zcmdT{U2I!P6`pHf|K-|_?Iiu(cHO2;>uu}M(m$4}KihbrV*5iA&|;7-w|BCH(4b+N(B=8g2Y2ngOK{fnYq4+1qdo3 z9vJz~oO5R8%*>f{zBB$!CNqSfyjlIZ^|J&*e-unuB7Ndz8;B1OMp&^?RgrDPiJB1w zIchbgh@OZYcM{cvldLA4R5j%cRfim{syXRu+R0Qia!l0DI>XgrNvn3w$yf7|j@bog zq&gz$IOrLzjsl&;iN|VntfiDve-azHRZ8&mHR@Y6!}Mob4Z^dxn|`C|U-w*}f{bSZ zewvuR<+)tD>otALt=}ZR!IRS_aSdvDe0auS4`2?1NtWv~o)m)V1~E$t-}CLJVOxF+ z=1|LI*A3fj+J^5@(HE+iAx;B?L`3Kx1jz2`AXP{6&%4)tm2p-njX!nVed}s!$k!J$8iD-m0BKJ>W}kN+kM({ zda1A5*S4h@KeJ6|@$hnPslV2MJdX?iQa&;$uSb_h!?}12@3Iiy{c&`8&ysW~gu2Kd z4p&<06SMhHveR}@xXZ5M7ee{$Xh`j}9YxK;cl#>c47~}`rUXZ&TRi^sCRZEq;^_7-ig-~d_>HtMZ!}K%sIX@Hl#XgXxaZT zE*6f%M?a3VQUBde5d5)2XO)*r5ebuSq@PtDssKuoSS6#ya z9W@UGK2J3n(M^NF?4)DR2iSY$@&s!-z_ehrMhR=$zU~XOPO<=(8CR}xREk2cQP*K- z0xo$$Y|#$Z4P~jCWfP`5Mx!*u6F#7{I7~h;mkz5kz*`~83QYsN>Ufy&H0+M*4h%Eu zV)-xuBxsNqxCRStXMtf5LNbvXlG!BbK;FQHZootZSOBBs_Hfm?4hiv25` z6diTq`jg_CVWy-#a0_6m$WF3D@4FNnStc*N4mnCE^N`sf34y6txzT!+ykr~BJ#1WV z-xb>aX_D#95ilpwDXG~o6YOt+q1`$N9yTpXFj!h9SXRDdd!}Kt%RmG=--08uBvcFN zm$hHqUqCN`MrI#71q}e&Hz2?a7|=q~h@JuR6X6mOy$&DaQMgqmW)+J?dLCMO0jmGS zjs>tWv6Th1cz6NPi$Fpkslwa13IXWyECfO0J{+HUHrR$|0^V}xya&LhYnE%+dJsMB z{B75675gDjlrMoOk3JwRo-z7wmmnMx(#u`(P)IYMG=zSL6^Nwp26&8_9wl|y5tJ8K zO3EwcQb}p&j~;KdPM28mQc+N6OAMSW3L;URZD$Pz{^iTQ^ChrlqjjEV!b^lucNnZ0 zv0PA92vq^hgt~T9@r0n_jlt8E_wT%S?Zz$r&b7NURG5j1+oj^ML1UuQpyIK6P0Pkq zJS=$X`t6%{-mA=1rg)sm#}7?Qd3b!Wqoz;A?Q(Ud^8O4J4+cJTwb7CeX;7XAP_b>W zNQ0W5(=Y({QaUqgL`Zn{nmiylbcIrn@)13NBjM_BU((e}DS8>S(knuB)b!^Ch{>qn z8Z4v-Hu}mS^VLD-Ouz(+&kZscm@p6cWl*Gd@~i)CDA9YMGzTC12m)jQZE7PMx&0ga zj%@5cxRDvz7(20<-@Bfl=;bFibNioI&!#?|UiqHBmiyu6==jsIwb2WkdnPva6u-T6G{Yq_g066&t(RxGi1=tT}4KKksPPw)2fcech0 z>G;Cb)^U_RvZ@_j*G}}b6N^);+UbR<&B7b&g^6BaVqxm>w>F3NZDjKB`(k(bi(MzT zQZX&QaQ#IZ#gad{`JR3-5`>_10^!mjcy^A+irfVze*qU1ZGCQ%FJ-(hT_p;?h z_Upn*w!E61TKL8vw8Br{?r8@(q37je3`g)%O&MOuookU&13`(M52sN5w$^x3NPj7UQgjaB#s~;FTCDdz!Qi% zNHl;{^50MZ39mVugnygDhkyhNi6k;4=-c8z0+eN-LG}vFbD26I6S;(yp?VQj6y^5_ lJw|^+NhKnyzX447=j2t#xBtE7uzJU<9IsW882&gceZzC z4R&`G6y*V_kxG>Zc*w(6>O&An>0=&{TD3eNwUu^R!#Y<~P1U|2^#K)BDS7I-cjnHH z*RF6#8mEkQ?mhRM@1A?++%r38_skzdp&$pxX$*8dikJ}h*R83`)vY`&E@dO~}uwQ2iyht^&C>`Uit0Y|m zoNhQDFF8UicxVbHMIFUludWqj`3xXHlu>)T|brMfBq9W{&^4!hN>G80S6Q<1rC=I3JwA=rs_JS zV_JGTp{VJYiovt8uBBlg>f~fnyC7Xa37Cz&a$3KjBH)Wi)w4-MGUhTWcE^m1n2Wf9 zE5{|!W2hGm+(Z;6vqYiI3rr`HsxGDEOeBbX2JAOkI;d zii+#9x{9O}z~s1!0^l<2LuPPc5tg$l*pCTJ_#E;-E;SkwwMD`bv z4ad!cr;m(CKO8=NQaXI<@QJa}<3~ntX!P*#(V@|yL1}34_$Uq?JvKD_T0^mOFhsJUAu5mz>n@OxbsI!8bJ!yh2coU8Hn|2p4b3I)f!Ms**s}QI z%ZuyQFNT^ITQ@F-+aENpFC;!2yfac9lNK6(@}OnoVtd!G&s8=Llr|6CK2{kVFAa__ zOemEJRGL7=O?t6if8utv2A_Dj_E&$iccE?nV%O%+rtY9pc=$xh();g=7na_CH!MV+%W*UI{l})k{bJvDjyq?CyN=x*te;uo-W%k8!N**2F6Mf- zp5yX-j-OfK9{i>2!$yJ2yFk8ri$GuP!7O3@H@3+AwJd6^wTI)FC9MC(771U=qNeY_ zB2S)=b9t}A=Y2U}&Na*B{W*U=knrheLCOQgom02|dky(2v?wo&` zR|KMK+GRfrs&m?EXK-B3$NZ~!jP*=D)4`}}&tiX;(_0;Oyj(>vD(7N4krTPn>NYaX zD;~xBv1=Z_&GL2Rd6uh7@N^W)d1tzq?rNX2pD{cK9{bQ?(~7=%PXylt-UFYm+BZRV z9p3}r1TEjxE22`DtG9g9@Lzn>z%1uEhhMs@-eXkGtAuX2%u#}){%$1C>|CR@LfF{E zuntX|8L!&s!fZ{}zYjUuMLr*Z(+j5$PCuN%yhx+Ikw!h_5RRx1)2JuBXNh|Es&}>; z^-bSe)VtYrKQ-!YBt(7F|2FD#H0t4-Wc9wOM*Z6R=J|{IW*YUvlBjQ?QBQc!67|9} zM1AYG7WKlaquxeB)VF>|QNMcMRHJ_Fee?W9{R=eeJxii~9gTXzdzPs8JVVsCeQQzg zS#{LgNQnBj?;HD-R4Zkz zFVc*DJ(*LDcco+1aoq0Eze1*NY++W~@$8(jLdL#)2E=v8+ULiP>s-O=>aU!q;F_2p zl$SmhXg1ueysT`QHi0f`9E}=W%OHOJy`&h5zbj6Y2%%G8!_VX5vafMP}eHy}u}d)`MNdjJBXJuth4jC*%fr}y?n$P9sSY1=pNrE#p3F!frE+`(O52n#j8rfu<+}ks-Z$PLzUv^RCFF{PgB{nl2mbc z7N$W}h7^cq0TEYmh(YL54@o^C!hhvD@H$-l4sv1Oy5P?&qVsinJ>ZTl^qRpH!F=Zd zliN?cmQ>RbfjDmmX?oF4&_}5L`Fe*#_L>gf`;e5B9+Ij7bg0NMeXc4vT(3Gz1T2=16fzk`WF|F{r1Jh(IJrWTfJNq|4JP z;ov$zf@cy*D3(I4EvW%m$8C~+PRqhx5R4R0YEuwtX*mT|Iw&NmX$99y(;7<2hBT#V zNiBox57VuNO4<-Y8p6#?u3m&vYFaixf`-7hNGtZUf)W;p{iu)Fy`MA(NizTq7KvS^ zw|bp}KIdSUbFjlXc+I>pOM#`qw7B2InS;G1&K&gaTr%3dWYoW8wAZ=VZl}gQ&cSOs z`8U6Fr33Yl%RE4LX<4OBIwwQH5^9|Z8qMskrndu)aVucac~C-ri0CKbk9erVHN0AG zh+I1M#kSp_PF|n9HFSGRW#FCCz&qvr6Xk91R<^}T+hX^dmBQ$i!K)`eI#Fy^E|F%j zXXmE}t{=Fy>9)UeXrgpzqP*|ja?fO?M=A9v_rq#o*OgbUZvAL$F|1zt5j0EiZE?nx##Ul&t$1*@_tyV;w6HY7Nh;2rmv@OoxQ!ca%8M@WUT!9cscq`B`TMq^8J>n zf^uc_>cmGA#g?f{C!zV9Fjf>_D$=&lQ53f=0NHn(F#3+)nJD&+RRrt9KqjM1r`ST1 z07#+3mNT*zL=@W~Sf*Akj5*|ttN|?_g5jSFXN%&-B5e!vMX`4Q$o`{*(SP*LzGDB` zieP;h$Yhl16kBK#04dDdaz@sIh+-QA%hbw+vko~UYe36~VEE_4nWDJ9NZV_0F_8rz zqXUEy9r)c$F?yyVSRV#58D%=f7McV=uEE8S2_R==Er=+#L9k4%TsY&9GqMJ>d?=X%P_hIwtNW2|3j?5oGXhRMWL&}LsJwx3VccIstDwB&tjM(yDbS_AMP|q9)Nn3muCPBbhVn!|yRVEhz*JK-R6QJw+W=p z%5R%f9m4PZ&8aTo58>w2X5sdW&8Z$C;zHyNh*Ob9>}RYQ8gV;q}8bNofLvllRA9^#(a^6yu4w) zO+ykkyk~W4mOX1}&SfTB?X0jy>X_E3`c(-IMM{ej`=(9}IE5%m4rY literal 0 HcmV?d00001 diff --git a/tests/agent/orchestrator/__pycache__/test_template_registry.cpython-312.pyc b/tests/agent/orchestrator/__pycache__/test_template_registry.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..049efd0d55f9c9b607ec7369ec6dbef4a6909348 GIT binary patch literal 2997 zcmcguPiz}S7oT0P|0EkHcAe6O@~2Hpi%H|E1qzD#l{T@{2ATwtv^1{kWwSGNHnDfj z>^dQpBSoJ$An{3GQ4it3AxIpkTB#g+s04x>DglzWF10l(0tqgaI8?Nka^lU#n}ibS z10j7Q&AgfS-n@DH=FRWz*Wqw8f|fn~u|6C`=qJALn$(!wPr_soVT6S|DhO$zAg0BF zFYPNxX{q2(`voo&^MOJz9fY}$_^?F$=OpZZS4xL)2nT^S%?093ce%;9pS2E`>|)98 zGYy-nn$6@9vt;W=Zjjh23yo^TP^oURwlUS31|M2Ud{?5E8z+pS_@*Hy+WVP^q0{|LQcJdZzJ->I?W=(a7O6Q!AoAkdYVO-_c>CoQ zCfUTY;}UDaW&r}O=$JKwqDCysool9XPRGR12!qI~mTAB}w5a*KIjhW49d@&TYFM*` z0$--YD&=j(o+}dO)9iN`iu-`8<`l4FlXq;ki5tw9xWQFk(>Xm)ETy0pKt)+sA32P}?genD4Q*(qiLC9E;t|S+e;keaQY6B*upnI;H zgP{T>A69KO9-{CR5ariR`TbIU;VhOM?i*G5M}`NLqeDkejE@cUjk56A(UD`Ru~bq? zB?rb>_}KB($ZG>b%HVJ^b%OFFiVLn67Qe{tza8Kaw)TNYI1gqaAl5+m`fe$s41U zaphLztvl^I>m55jnXK*Zb$0hIAFn0TPBMKfgKHV;WN76XtI}aT@QIt79|Taxp3h#s zwe?Va$L?zrH>eXGdC(DV3EV?rf8aixh=w#idX0PHzM|7Gu75?B8yfSdjHbB8BW>;V ztjqrm-j0{!9-Z3Y1trmc1x?H$O+4F*(7aF^<5%PdxgM zJ@Wl6k0OuygOKNmNB^-$(%OvN!|J5sRE!;;ZQ20``>wAAu)v+Ti)i zlFJKl+TaDtewP>GbcJVc_WUl3UgU7obOxT5YkQsCw<>SmP%f>?H{S1GmCI}U=OAAN z=<~wt|Cq&FhFAmuqNG6;Q}YVIg*MGLYi6NnS_D91MkljCiYB$$V}Pg}$UsXqazufG z2_R#!wuzFS#|^GUY}N|EZ02VOzzWU5deb@GQ1kkEz+iVL*VzI9MF5zHlA9!S8fw8r z$-sHSqBEchD9HhP1~hWyE(goI9BJ?JXB?L)C(Q32>!Q{QenwVNQyEE}8c4m#{FY6M z77KC$AWxr#eKgL8=ePx5o*3}}3wc<`B(BI>8lSzgxfz?9r8MRQ>vm%v_WX857Kice zZlhh?_Ti1;R+&_x^gz|pT{B*U-7{R{4XZQ-MjnH1O#`k27`N@Iw#65Y|JeQF)$FD0 z_0;mSwcayM@0seMOtt%Lty^=twc9bgGvfR6CM! zj%2DmXRBS=S{HV@@a-sBdf~#Z#l08zR-$C#RT%!Df66&>s@n5rwd-`PE9-P+Z%36i zeTC~Q^~Axe#wFwW$>o=8edA8wc=ccDYT`^Sp*jimcKgH_=@T2_X z(i4u{T{$ptBjX$xcck&UymRTiBk!*qJa(hUIe5~MPS)jZ9|9|0N%Vd(hNu<}OM>B%}$_Gh{Ey>eB4VoA6u?{K6YcVK<~{Z~FzZ^}J(O7?{wETFie~%Ow zDX$Sj@ZYdkEfy2-KTv{SiG*8h6U4~r21%^1xm<(fEw@asRC`|4R!TlkYWC7!GxMja z?Y@AO)aQwE3DItDm}lN^p+hTv9i{`Y ExecutionContext: + task = TaskSpec( + task_id="task-1", + dialog_session_id="dialog-1", + rag_session_id="rag-1", + mode="auto", + user_message="Добавь в readme.md в конце строку про автора", + scenario=Scenario.TARGETED_EDIT, + routing=RoutingMeta(domain_id="project", process_id="edits", confidence=0.95, reason="test"), + constraints=TaskConstraints(allow_writes=True), + output_contract=OutputContract(result_type="changeset"), + metadata={ + "files_map": { + "README.md": { + "path": "README.md", + "content": "# Title\n", + "content_hash": "hash123", + } + } + }, + ) + plan = ExecutionPlan( + plan_id="plan-1", + task_id="task-1", + scenario=Scenario.TARGETED_EDIT, + template_id="targeted_edit_v1", + template_version="1.0", + steps=[], + ) + return ExecutionContext(task=task, plan=plan, graph_resolver=lambda *_: None, graph_invoker=lambda *_: {}) + + +def test_edit_actions_resolve_path_case_insensitive_and_keep_update() -> None: + actions = EditActions() + ctx = _ctx() + + actions.resolve_target(ctx) + actions.load_target_context(ctx) + actions.plan_minimal_patch(ctx) + actions.generate_patch(ctx) + + target = ctx.artifacts.get_content("target_context", {}) + changeset = ctx.artifacts.get_content("raw_changeset", []) + + assert target["path"] == "README.md" + assert changeset[0]["path"] == "README.md" + assert changeset[0]["op"] == "update" diff --git a/tests/agent/orchestrator/test_eval_suite.py b/tests/agent/orchestrator/test_eval_suite.py new file mode 100644 index 0000000..62dd548 --- /dev/null +++ b/tests/agent/orchestrator/test_eval_suite.py @@ -0,0 +1,56 @@ +import asyncio + +import pytest + +from app.modules.agent.engine.orchestrator.models import OutputContract, RoutingMeta, Scenario, TaskConstraints, TaskSpec +from app.modules.agent.engine.orchestrator.service import OrchestratorService + + +@pytest.mark.parametrize( + "scenario,expect_changeset", + [ + (Scenario.EXPLAIN_PART, False), + (Scenario.ANALYTICS_REVIEW, False), + (Scenario.DOCS_FROM_ANALYTICS, True), + (Scenario.TARGETED_EDIT, True), + (Scenario.GHERKIN_MODEL, True), + ], +) +def test_eval_suite_scenarios_run(scenario: Scenario, expect_changeset: bool) -> None: + service = OrchestratorService() + + task = TaskSpec( + task_id=f"task-{scenario.value}", + dialog_session_id="dialog-1", + rag_session_id="rag-1", + mode="auto", + user_message="Please process this scenario using project docs and requirements.", + scenario=scenario, + routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.95, reason="eval"), + constraints=TaskConstraints( + allow_writes=scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL}, + max_steps=20, + max_retries_per_step=2, + step_timeout_sec=90, + ), + output_contract=OutputContract(result_type="answer"), + attachments=[{"type": "http_url", "value": "https://example.com/doc"}], + metadata={ + "rag_context": "Requirements context is available.", + "confluence_context": "", + "files_map": {"docs/api/increment.md": {"content": "old", "content_hash": "h1"}}, + }, + ) + + result = asyncio.run( + service.run( + task=task, + graph_resolver=lambda _domain, _process: object(), + graph_invoker=lambda _graph, _state, _dialog: {"answer": "fallback", "changeset": []}, + ) + ) + + assert result.meta["plan"]["status"] in {"completed", "partial"} + assert bool(result.changeset) is expect_changeset + if not expect_changeset: + assert result.answer diff --git a/tests/agent/orchestrator/test_orchestrator_service.py b/tests/agent/orchestrator/test_orchestrator_service.py new file mode 100644 index 0000000..d1cdce2 --- /dev/null +++ b/tests/agent/orchestrator/test_orchestrator_service.py @@ -0,0 +1,72 @@ +import asyncio + +from app.modules.agent.engine.orchestrator.models import ( + OutputContract, + RoutingMeta, + Scenario, + TaskConstraints, + TaskSpec, +) +from app.modules.agent.engine.orchestrator.service import OrchestratorService + + +class DummyGraph: + pass + + +def _task(scenario: Scenario) -> TaskSpec: + allow_writes = scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL} + return TaskSpec( + task_id="task-1", + dialog_session_id="dialog-1", + rag_session_id="rag-1", + mode="auto", + user_message="Explain this module", + scenario=scenario, + routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.95, reason="unit-test"), + constraints=TaskConstraints(allow_writes=allow_writes, max_steps=16, max_retries_per_step=2, step_timeout_sec=90), + output_contract=OutputContract(result_type="answer"), + metadata={ + "rag_context": "RAG", + "confluence_context": "", + "files_map": {}, + }, + ) + + +def test_orchestrator_service_returns_answer() -> None: + service = OrchestratorService() + + def graph_resolver(domain_id: str, process_id: str): + assert domain_id == "project" + assert process_id == "qa" + return DummyGraph() + + def graph_invoker(_graph, state: dict, dialog_session_id: str): + assert state["message"] == "Explain this module" + assert dialog_session_id == "dialog-1" + return {"answer": "It works.", "changeset": []} + + result = asyncio.run(service.run(task=_task(Scenario.GENERAL_QA), graph_resolver=graph_resolver, graph_invoker=graph_invoker)) + assert result.answer == "It works." + assert result.meta["plan"]["status"] == "completed" + + +def test_orchestrator_service_generates_changeset_for_docs_scenario() -> None: + service = OrchestratorService() + + def graph_resolver(_domain_id: str, _process_id: str): + return DummyGraph() + + def graph_invoker(_graph, _state: dict, _dialog_session_id: str): + return {"answer": "unused", "changeset": []} + + result = asyncio.run( + service.run( + task=_task(Scenario.DOCS_FROM_ANALYTICS), + graph_resolver=graph_resolver, + graph_invoker=graph_invoker, + ) + ) + assert result.meta["plan"]["status"] == "completed" + assert len(result.changeset) > 0 diff --git a/tests/agent/orchestrator/test_plan_validator.py b/tests/agent/orchestrator/test_plan_validator.py new file mode 100644 index 0000000..d8d893d --- /dev/null +++ b/tests/agent/orchestrator/test_plan_validator.py @@ -0,0 +1,49 @@ +from app.modules.agent.engine.orchestrator.models import ( + ExecutionPlan, + OutputContract, + PlanStep, + RetryPolicy, + RoutingMeta, + Scenario, + TaskConstraints, + TaskSpec, +) +from app.modules.agent.engine.orchestrator.plan_validator import PlanValidator + + +def _task(*, allow_writes: bool) -> TaskSpec: + return TaskSpec( + task_id="t1", + dialog_session_id="d1", + rag_session_id="r1", + mode="auto", + user_message="hello", + scenario=Scenario.GENERAL_QA, + routing=RoutingMeta(domain_id="default", process_id="general", confidence=0.9, reason="test"), + constraints=TaskConstraints(allow_writes=allow_writes, max_steps=10, max_retries_per_step=2, step_timeout_sec=60), + output_contract=OutputContract(result_type="answer"), + ) + + +def test_plan_validator_rejects_write_step_when_not_allowed() -> None: + plan = ExecutionPlan( + plan_id="p1", + task_id="t1", + scenario=Scenario.GENERAL_QA, + template_id="tmp", + template_version="1.0", + steps=[ + PlanStep( + step_id="s1", + title="write", + action_id="collect_state", + executor="function", + side_effect="write", + retry=RetryPolicy(max_attempts=1), + ) + ], + ) + + errors = PlanValidator().validate(plan, _task(allow_writes=False)) + + assert "write_step_not_allowed:s1" in errors diff --git a/tests/agent/orchestrator/test_quality_metrics.py b/tests/agent/orchestrator/test_quality_metrics.py new file mode 100644 index 0000000..53b7b46 --- /dev/null +++ b/tests/agent/orchestrator/test_quality_metrics.py @@ -0,0 +1,42 @@ +import asyncio + +from app.modules.agent.engine.orchestrator.models import OutputContract, OutputSection, RoutingMeta, Scenario, TaskConstraints, TaskSpec +from app.modules.agent.engine.orchestrator.service import OrchestratorService + + +def test_quality_metrics_present_and_scored() -> None: + service = OrchestratorService() + task = TaskSpec( + task_id="quality-1", + dialog_session_id="dialog-1", + rag_session_id="rag-1", + mode="auto", + user_message="Explain architecture", + scenario=Scenario.EXPLAIN_PART, + routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"), + constraints=TaskConstraints(allow_writes=False), + output_contract=OutputContract( + result_type="answer", + sections=[ + OutputSection(name="sequence_diagram", format="mermaid"), + OutputSection(name="use_cases", format="markdown"), + OutputSection(name="summary", format="markdown"), + ], + ), + metadata={"rag_context": "A\nB", "confluence_context": "", "files_map": {}}, + ) + + result = asyncio.run( + service.run( + task=task, + graph_resolver=lambda _d, _p: object(), + graph_invoker=lambda _g, _s, _id: {"answer": "unused", "changeset": []}, + ) + ) + + quality = result.meta.get("quality", {}) + assert quality + assert quality.get("faithfulness", {}).get("score") is not None + assert quality.get("coverage", {}).get("score") is not None + assert quality.get("status") in {"ok", "needs_review", "fail"} + assert quality.get("coverage", {}).get("covered_count", 0) >= 1 diff --git a/tests/agent/orchestrator/test_quality_metrics_gate_expectations.py b/tests/agent/orchestrator/test_quality_metrics_gate_expectations.py new file mode 100644 index 0000000..a9daf61 --- /dev/null +++ b/tests/agent/orchestrator/test_quality_metrics_gate_expectations.py @@ -0,0 +1,50 @@ +from app.modules.agent.engine.orchestrator.models import ( + ArtifactType, + OutputContract, + OutputSection, + RoutingMeta, + Scenario, + TaskConstraints, + TaskSpec, +) +from app.modules.agent.engine.orchestrator.quality_metrics import QualityMetricsCalculator +from app.modules.agent.engine.orchestrator.template_registry import ScenarioTemplateRegistry +from app.modules.agent.engine.orchestrator.execution_context import ExecutionContext +from app.modules.agent.engine.orchestrator.models import PlanStatus + + +def test_quality_metrics_coverage_reflects_missing_required_sections() -> None: + task = TaskSpec( + task_id="quality-2", + dialog_session_id="dialog-1", + rag_session_id="rag-1", + mode="auto", + user_message="Explain architecture", + scenario=Scenario.EXPLAIN_PART, + routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"), + constraints=TaskConstraints(allow_writes=False), + output_contract=OutputContract( + result_type="answer", + sections=[ + OutputSection(name="sequence_diagram", format="mermaid"), + OutputSection(name="use_cases", format="markdown"), + OutputSection(name="summary", format="markdown"), + ], + ), + metadata={"rag_context": "A", "confluence_context": "", "files_map": {}}, + ) + + plan = ScenarioTemplateRegistry().build(task) + plan.status = PlanStatus.COMPLETED + ctx = ExecutionContext( + task=task, + plan=plan, + graph_resolver=lambda _d, _p: object(), + graph_invoker=lambda _g, _s, _id: {}, + ) + ctx.artifacts.put(key="final_answer", artifact_type=ArtifactType.TEXT, content="Only summary text") + + metrics = QualityMetricsCalculator().build(ctx, step_results=[]) + + assert metrics["coverage"]["score"] < 1.0 + assert "sequence_diagram" in metrics["coverage"]["missing_items"] diff --git a/tests/agent/orchestrator/test_template_registry.py b/tests/agent/orchestrator/test_template_registry.py new file mode 100644 index 0000000..30878fd --- /dev/null +++ b/tests/agent/orchestrator/test_template_registry.py @@ -0,0 +1,38 @@ +from app.modules.agent.engine.orchestrator.models import OutputContract, RoutingMeta, Scenario, TaskConstraints, TaskSpec +from app.modules.agent.engine.orchestrator.template_registry import ScenarioTemplateRegistry + + +def _task(scenario: Scenario) -> TaskSpec: + return TaskSpec( + task_id="t1", + dialog_session_id="d1", + rag_session_id="r1", + mode="auto", + user_message="run scenario", + scenario=scenario, + routing=RoutingMeta(domain_id="project", process_id="qa", confidence=0.9, reason="test"), + constraints=TaskConstraints( + allow_writes=scenario in {Scenario.DOCS_FROM_ANALYTICS, Scenario.TARGETED_EDIT, Scenario.GHERKIN_MODEL} + ), + output_contract=OutputContract(result_type="answer"), + metadata={"rag_context": "ctx", "confluence_context": "", "files_map": {}}, + ) + + +def test_template_registry_has_multi_step_review_docs_edit_gherkin() -> None: + registry = ScenarioTemplateRegistry() + + review_steps = [step.step_id for step in registry.build(_task(Scenario.ANALYTICS_REVIEW)).steps] + docs_steps = [step.step_id for step in registry.build(_task(Scenario.DOCS_FROM_ANALYTICS)).steps] + edit_steps = [step.step_id for step in registry.build(_task(Scenario.TARGETED_EDIT)).steps] + gherkin_steps = [step.step_id for step in registry.build(_task(Scenario.GHERKIN_MODEL)).steps] + + assert "structural_check" in review_steps and "compose_review_report" in review_steps + assert "extract_change_intents" in docs_steps and "build_changeset" in docs_steps + assert "resolve_target" in edit_steps and "finalize_changeset" in edit_steps + assert "generate_gherkin_bundle" in gherkin_steps and "validate_coverage" in gherkin_steps + + assert len(review_steps) >= 7 + assert len(docs_steps) >= 9 + assert len(edit_steps) >= 7 + assert len(gherkin_steps) >= 8 diff --git a/tests/agent/test_repo_webhook_service.py b/tests/agent/test_repo_webhook_service.py new file mode 100644 index 0000000..dac713f --- /dev/null +++ b/tests/agent/test_repo_webhook_service.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +from app.modules.rag_repo.webhook_service import RepoWebhookService + + +class FakeStoryWriter: + def __init__(self) -> None: + self.calls: list[dict] = [] + + def record_story_commit(self, **kwargs) -> None: + self.calls.append(kwargs) + + +class FakeCacheWriter: + def __init__(self) -> None: + self.calls: list[dict] = [] + + def record_repo_cache(self, **kwargs) -> None: + self.calls.append(kwargs) + + +def test_gitea_webhook_binds_story() -> None: + writer = FakeStoryWriter() + cache = FakeCacheWriter() + service = RepoWebhookService(writer, cache) + + result = service.process( + provider="gitea", + payload={ + "repository": {"full_name": "acme/proj"}, + "ref": "refs/heads/feature/AAAA-1234", + "pusher": {"username": "alice"}, + "commits": [ + { + "id": "abc123", + "message": "FEAT-1 update docs", + "added": ["docs/new.md"], + "modified": ["docs/api.md"], + "removed": [], + } + ], + }, + ) + + assert result["accepted"] is True + assert result["story_bound"] is True + assert result["story_id"] == "FEAT-1" + assert result["cache_recorded"] is True + assert len(writer.calls) == 1 + assert len(cache.calls) == 1 + assert writer.calls[0]["project_id"] == "acme/proj" + + +def test_webhook_without_story_id_is_non_fatal() -> None: + writer = FakeStoryWriter() + cache = FakeCacheWriter() + service = RepoWebhookService(writer, cache) + + result = service.process( + provider="bitbucket", + payload={ + "repository": {"full_name": "acme/proj"}, + "push": { + "changes": [ + { + "new": { + "name": "feature/no-story", + "target": {"hash": "abc123", "message": "update docs"}, + } + } + ] + }, + }, + ) + + assert result["accepted"] is True + assert result["story_bound"] is False + assert result["cache_recorded"] is True + assert len(cache.calls) == 1 + assert writer.calls == [] + + +def test_provider_autodetect_by_headers() -> None: + writer = FakeStoryWriter() + service = RepoWebhookService(writer) + + result = service.process( + headers={"X-Gitea-Event": "push"}, + payload={ + "repository": {"full_name": "acme/proj"}, + "ref": "refs/heads/feature/AAAA-1234", + "commits": [{"id": "abc123", "message": "AAAA-1234 update"}], + }, + ) + + assert result["accepted"] is True + assert result["story_bound"] is True + assert result["story_id"] == "AAAA-1234" diff --git a/tests/agent/test_story_session_recorder.py b/tests/agent/test_story_session_recorder.py new file mode 100644 index 0000000..4d00f92 --- /dev/null +++ b/tests/agent/test_story_session_recorder.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from app.modules.agent.story_session_recorder import StorySessionRecorder +from app.schemas.changeset import ChangeItem, ChangeOp + + +class FakeStoryRepo: + def __init__(self) -> None: + self.calls: list[dict] = [] + + def add_session_artifact(self, **kwargs) -> None: + self.calls.append(kwargs) + + +def test_record_run_stores_attachment_and_changeset_artifacts() -> None: + repo = FakeStoryRepo() + recorder = StorySessionRecorder(repo) + + recorder.record_run( + dialog_session_id="dialog-1", + rag_session_id="rag-1", + scenario="docs_from_analytics", + attachments=[ + {"type": "confluence_url", "value": "https://example.org/doc"}, + {"type": "file_ref", "value": "local.md"}, + ], + answer="Generated docs update summary", + changeset=[ + ChangeItem( + op=ChangeOp.UPDATE, + path="docs/api.md", + base_hash="abc", + proposed_content="new", + reason="sync endpoint section", + ) + ], + ) + + assert len(repo.calls) == 3 + assert repo.calls[0]["artifact_role"] == "analysis" + assert repo.calls[0]["source_ref"] == "https://example.org/doc" + + assert repo.calls[1]["artifact_role"] == "doc_change" + assert repo.calls[1]["summary"] == "Generated docs update summary" + + assert repo.calls[2]["artifact_role"] == "doc_change" + assert repo.calls[2]["path"] == "docs/api.md" + assert repo.calls[2]["change_type"] == "updated" diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..f96b4d8 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +import sys +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[1] +if str(ROOT) not in sys.path: + sys.path.insert(0, str(ROOT))