Первый коммит
This commit is contained in:
0
app/modules/__init__.py
Normal file
0
app/modules/__init__.py
Normal file
BIN
app/modules/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
app/modules/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/__pycache__/application.cpython-312.pyc
Normal file
BIN
app/modules/__pycache__/application.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/__pycache__/contracts.cpython-312.pyc
Normal file
BIN
app/modules/__pycache__/contracts.cpython-312.pyc
Normal file
Binary file not shown.
0
app/modules/agent/__init__.py
Normal file
0
app/modules/agent/__init__.py
Normal file
BIN
app/modules/agent/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
app/modules/agent/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
app/modules/agent/__pycache__/confluence_service.cpython-312.pyc
Normal file
BIN
app/modules/agent/__pycache__/confluence_service.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/agent/__pycache__/module.cpython-312.pyc
Normal file
BIN
app/modules/agent/__pycache__/module.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/agent/__pycache__/prompt_loader.cpython-312.pyc
Normal file
BIN
app/modules/agent/__pycache__/prompt_loader.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/agent/__pycache__/repository.cpython-312.pyc
Normal file
BIN
app/modules/agent/__pycache__/repository.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/agent/__pycache__/service.cpython-312.pyc
Normal file
BIN
app/modules/agent/__pycache__/service.cpython-312.pyc
Normal file
Binary file not shown.
20
app/modules/agent/changeset_validator.py
Normal file
20
app/modules/agent/changeset_validator.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from app.core.constants import SUPPORTED_SCHEMA_VERSION
|
||||
from app.core.exceptions import AppError
|
||||
from app.schemas.changeset import ChangeItem, ChangeSetPayload
|
||||
from app.schemas.common import ModuleName
|
||||
|
||||
|
||||
class ChangeSetValidator:
|
||||
def validate(self, task_id: str, changeset: list[ChangeItem]) -> list[ChangeItem]:
|
||||
payload = ChangeSetPayload(
|
||||
schema_version=SUPPORTED_SCHEMA_VERSION,
|
||||
task_id=task_id,
|
||||
changeset=changeset,
|
||||
)
|
||||
if payload.schema_version != SUPPORTED_SCHEMA_VERSION:
|
||||
raise AppError(
|
||||
"unsupported_schema",
|
||||
f"Unsupported schema version: {payload.schema_version}",
|
||||
ModuleName.AGENT,
|
||||
)
|
||||
return payload.changeset
|
||||
20
app/modules/agent/confluence_service.py
Normal file
20
app/modules/agent/confluence_service.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from datetime import datetime, timezone
|
||||
from urllib.parse import urlparse
|
||||
from uuid import uuid4
|
||||
|
||||
from app.core.exceptions import AppError
|
||||
from app.schemas.common import ModuleName
|
||||
|
||||
|
||||
class ConfluenceService:
|
||||
async def fetch_page(self, url: str) -> dict:
|
||||
parsed = urlparse(url)
|
||||
if not parsed.scheme.startswith("http"):
|
||||
raise AppError("invalid_url", "Invalid Confluence URL", ModuleName.CONFLUENCE)
|
||||
return {
|
||||
"page_id": str(uuid4()),
|
||||
"title": "Confluence page",
|
||||
"content_markdown": f"Fetched content from {url}",
|
||||
"version": 1,
|
||||
"fetched_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
0
app/modules/agent/engine/__init__.py
Normal file
0
app/modules/agent/engine/__init__.py
Normal file
BIN
app/modules/agent/engine/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
app/modules/agent/engine/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
11
app/modules/agent/engine/graphs/__init__.py
Normal file
11
app/modules/agent/engine/graphs/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from app.modules.agent.engine.graphs.base_graph import BaseGraphFactory
|
||||
from app.modules.agent.engine.graphs.docs_graph import DocsGraphFactory
|
||||
from app.modules.agent.engine.graphs.project_edits_graph import ProjectEditsGraphFactory
|
||||
from app.modules.agent.engine.graphs.project_qa_graph import ProjectQaGraphFactory
|
||||
|
||||
__all__ = [
|
||||
"BaseGraphFactory",
|
||||
"DocsGraphFactory",
|
||||
"ProjectEditsGraphFactory",
|
||||
"ProjectQaGraphFactory",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
58
app/modules/agent/engine/graphs/base_graph.py
Normal file
58
app/modules/agent/engine/graphs/base_graph.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
|
||||
from app.modules.agent.engine.graphs.progress import emit_progress_sync
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
from app.modules.agent.engine.graphs.state import AgentGraphState
|
||||
|
||||
|
||||
class BaseGraphFactory:
|
||||
def __init__(self, llm: AgentLlmService) -> None:
|
||||
self._llm = llm
|
||||
|
||||
def build(self, checkpointer=None):
|
||||
graph = StateGraph(AgentGraphState)
|
||||
graph.add_node("context", self._context_node)
|
||||
graph.add_node("answer", self._answer_node)
|
||||
graph.add_edge(START, "context")
|
||||
graph.add_edge("context", "answer")
|
||||
graph.add_edge("answer", END)
|
||||
return graph.compile(checkpointer=checkpointer)
|
||||
|
||||
def _context_node(self, state: AgentGraphState) -> dict:
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.default.context",
|
||||
message="Готовлю контекст ответа по данным запроса.",
|
||||
)
|
||||
rag = state.get("rag_context", "")
|
||||
conf = state.get("confluence_context", "")
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.default.context.done",
|
||||
message="Контекст собран, перехожу к формированию ответа.",
|
||||
)
|
||||
return {"rag_context": rag, "confluence_context": conf}
|
||||
|
||||
def _answer_node(self, state: AgentGraphState) -> dict:
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.default.answer",
|
||||
message="Формирую текст ответа для пользователя.",
|
||||
)
|
||||
msg = state.get("message", "")
|
||||
rag = state.get("rag_context", "")
|
||||
conf = state.get("confluence_context", "")
|
||||
user_input = "\n\n".join(
|
||||
[
|
||||
f"User request:\n{msg}",
|
||||
f"RAG context:\n{rag}",
|
||||
f"Confluence context:\n{conf}",
|
||||
]
|
||||
)
|
||||
answer = self._llm.generate("general_answer", user_input)
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.default.answer.done",
|
||||
message="Черновик ответа подготовлен.",
|
||||
)
|
||||
return {"answer": answer}
|
||||
26
app/modules/agent/engine/graphs/docs_examples_loader.py
Normal file
26
app/modules/agent/engine/graphs/docs_examples_loader.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
|
||||
class DocsExamplesLoader:
|
||||
def __init__(self, prompts_dir: Path | None = None) -> None:
|
||||
base = prompts_dir or Path(__file__).resolve().parents[2] / "prompts"
|
||||
env_override = os.getenv("AGENT_PROMPTS_DIR", "").strip()
|
||||
root = Path(env_override) if env_override else base
|
||||
self._examples_dir = root / "docs_examples"
|
||||
|
||||
def load_bundle(self, *, max_files: int = 6, max_chars_per_file: int = 1800) -> str:
|
||||
if not self._examples_dir.is_dir():
|
||||
return ""
|
||||
files = sorted(
|
||||
[p for p in self._examples_dir.iterdir() if p.is_file() and p.suffix.lower() in {".md", ".txt"}],
|
||||
key=lambda p: p.name.lower(),
|
||||
)[:max_files]
|
||||
chunks: list[str] = []
|
||||
for path in files:
|
||||
content = path.read_text(encoding="utf-8", errors="ignore").strip()
|
||||
if not content:
|
||||
continue
|
||||
excerpt = content[:max_chars_per_file].strip()
|
||||
chunks.append(f"### Example: {path.name}\n{excerpt}")
|
||||
return "\n\n".join(chunks).strip()
|
||||
128
app/modules/agent/engine/graphs/docs_graph.py
Normal file
128
app/modules/agent/engine/graphs/docs_graph.py
Normal file
@@ -0,0 +1,128 @@
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
import logging
|
||||
|
||||
from app.modules.agent.engine.graphs.file_targeting import FileTargeting
|
||||
from app.modules.agent.engine.graphs.docs_graph_logic import DocsContentComposer, DocsContextAnalyzer
|
||||
from app.modules.agent.engine.graphs.progress import emit_progress_sync
|
||||
from app.modules.agent.engine.graphs.state import AgentGraphState
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DocsGraphFactory:
|
||||
_max_validation_attempts = 2
|
||||
|
||||
def __init__(self, llm: AgentLlmService) -> None:
|
||||
self._targeting = FileTargeting()
|
||||
self._analyzer = DocsContextAnalyzer(llm, self._targeting)
|
||||
self._composer = DocsContentComposer(llm, self._targeting)
|
||||
|
||||
def build(self, checkpointer=None):
|
||||
graph = StateGraph(AgentGraphState)
|
||||
graph.add_node("collect_code_context", self._collect_code_context)
|
||||
graph.add_node("detect_existing_docs", self._detect_existing_docs)
|
||||
graph.add_node("decide_strategy", self._decide_strategy)
|
||||
graph.add_node("load_rules_and_examples", self._load_rules_and_examples)
|
||||
graph.add_node("plan_incremental_changes", self._plan_incremental_changes)
|
||||
graph.add_node("plan_new_document", self._plan_new_document)
|
||||
graph.add_node("generate_doc_content", self._generate_doc_content)
|
||||
graph.add_node("self_check", self._self_check)
|
||||
graph.add_node("build_changeset", self._build_changeset)
|
||||
graph.add_node("summarize_result", self._summarize_result)
|
||||
|
||||
graph.add_edge(START, "collect_code_context")
|
||||
graph.add_edge("collect_code_context", "detect_existing_docs")
|
||||
graph.add_edge("detect_existing_docs", "decide_strategy")
|
||||
graph.add_edge("decide_strategy", "load_rules_and_examples")
|
||||
graph.add_conditional_edges(
|
||||
"load_rules_and_examples",
|
||||
self._route_after_rules_loading,
|
||||
{
|
||||
"incremental": "plan_incremental_changes",
|
||||
"from_scratch": "plan_new_document",
|
||||
},
|
||||
)
|
||||
graph.add_edge("plan_incremental_changes", "generate_doc_content")
|
||||
graph.add_edge("plan_new_document", "generate_doc_content")
|
||||
graph.add_edge("generate_doc_content", "self_check")
|
||||
graph.add_conditional_edges(
|
||||
"self_check",
|
||||
self._route_after_self_check,
|
||||
{"retry": "generate_doc_content", "ready": "build_changeset"},
|
||||
)
|
||||
graph.add_edge("build_changeset", "summarize_result")
|
||||
graph.add_edge("summarize_result", END)
|
||||
return graph.compile(checkpointer=checkpointer)
|
||||
|
||||
def _collect_code_context(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(state, "collect_code_context", "Собираю контекст кода и файлов.", self._analyzer.collect_code_context)
|
||||
|
||||
def _detect_existing_docs(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(
|
||||
state,
|
||||
"detect_existing_docs",
|
||||
"Определяю, есть ли существующая документация проекта.",
|
||||
self._analyzer.detect_existing_docs,
|
||||
)
|
||||
|
||||
def _decide_strategy(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(state, "decide_strategy", "Выбираю стратегию: инкремент или генерация с нуля.", self._analyzer.decide_strategy)
|
||||
|
||||
def _load_rules_and_examples(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(
|
||||
state,
|
||||
"load_rules_and_examples",
|
||||
"Загружаю правила и примеры формата документации.",
|
||||
self._composer.load_rules_and_examples,
|
||||
)
|
||||
|
||||
def _plan_incremental_changes(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(
|
||||
state,
|
||||
"plan_incremental_changes",
|
||||
"Планирую точечные изменения в существующей документации.",
|
||||
lambda st: self._composer.plan_incremental_changes(st, self._analyzer),
|
||||
)
|
||||
|
||||
def _plan_new_document(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(state, "plan_new_document", "Проектирую структуру новой документации.", self._composer.plan_new_document)
|
||||
|
||||
def _generate_doc_content(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(state, "generate_doc_content", "Генерирую содержимое документации.", self._composer.generate_doc_content)
|
||||
|
||||
def _self_check(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(state, "self_check", "Проверяю соответствие результата правилам.", self._composer.self_check)
|
||||
|
||||
def _build_changeset(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(state, "build_changeset", "Формирую итоговый набор изменений файлов.", self._composer.build_changeset)
|
||||
|
||||
def _summarize_result(self, state: AgentGraphState) -> dict:
|
||||
return self._run_node(
|
||||
state,
|
||||
"summarize_result",
|
||||
"Формирую краткий обзор выполненных действий и измененных файлов.",
|
||||
self._composer.build_execution_summary,
|
||||
)
|
||||
|
||||
def _route_after_rules_loading(self, state: AgentGraphState) -> str:
|
||||
if state.get("docs_strategy") == "incremental_update":
|
||||
return "incremental"
|
||||
return "from_scratch"
|
||||
|
||||
def _route_after_self_check(self, state: AgentGraphState) -> str:
|
||||
if state.get("validation_passed"):
|
||||
return "ready"
|
||||
attempts = int(state.get("validation_attempts", 0) or 0)
|
||||
return "ready" if attempts >= self._max_validation_attempts else "retry"
|
||||
|
||||
def _run_node(self, state: AgentGraphState, node_name: str, message: str, fn):
|
||||
emit_progress_sync(state, stage=f"graph.docs.{node_name}", message=message)
|
||||
try:
|
||||
result = fn(state)
|
||||
emit_progress_sync(state, stage=f"graph.docs.{node_name}.done", message=f"Шаг '{node_name}' завершен.")
|
||||
LOGGER.warning("docs graph node completed: node=%s keys=%s", node_name, sorted(result.keys()))
|
||||
return result
|
||||
except Exception:
|
||||
LOGGER.exception("docs graph node failed: node=%s", node_name)
|
||||
raise
|
||||
519
app/modules/agent/engine/graphs/docs_graph_logic.py
Normal file
519
app/modules/agent/engine/graphs/docs_graph_logic.py
Normal file
@@ -0,0 +1,519 @@
|
||||
import json
|
||||
from difflib import SequenceMatcher
|
||||
|
||||
from app.modules.agent.engine.graphs.docs_examples_loader import DocsExamplesLoader
|
||||
from app.modules.agent.engine.graphs.file_targeting import FileTargeting
|
||||
from app.modules.agent.engine.graphs.state import AgentGraphState
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
from app.schemas.changeset import ChangeItem
|
||||
import logging
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DocsContextAnalyzer:
|
||||
def __init__(self, llm: AgentLlmService, targeting: FileTargeting) -> None:
|
||||
self._llm = llm
|
||||
self._targeting = targeting
|
||||
|
||||
def collect_code_context(self, state: AgentGraphState) -> dict:
|
||||
message = state.get("message", "")
|
||||
files_map = state.get("files_map", {}) or {}
|
||||
requested_path = self._targeting.extract_target_path(message)
|
||||
target_file = self._targeting.lookup_file(files_map, requested_path) if requested_path else None
|
||||
docs_candidates = self._collect_doc_candidates(files_map)
|
||||
target_path = str((target_file or {}).get("path") or (requested_path or "")).strip() or ""
|
||||
return {
|
||||
"docs_candidates": docs_candidates,
|
||||
"target_path": target_path,
|
||||
"target_file_content": str((target_file or {}).get("content", "")),
|
||||
"target_file_hash": str((target_file or {}).get("content_hash", "")),
|
||||
"validation_attempts": 0,
|
||||
}
|
||||
|
||||
def detect_existing_docs(self, state: AgentGraphState) -> dict:
|
||||
docs_candidates = state.get("docs_candidates", []) or []
|
||||
if not docs_candidates:
|
||||
return {
|
||||
"existing_docs_detected": False,
|
||||
"existing_docs_summary": "No documentation files detected in current project context.",
|
||||
}
|
||||
|
||||
snippets = "\n\n".join(
|
||||
[
|
||||
f"Path: {item.get('path', '')}\nSnippet:\n{self._shorten(item.get('content', ''), 500)}"
|
||||
for item in docs_candidates[:8]
|
||||
]
|
||||
)
|
||||
user_input = "\n\n".join(
|
||||
[
|
||||
f"User request:\n{state.get('message', '')}",
|
||||
f"Requested target path:\n{state.get('target_path', '') or '(not specified)'}",
|
||||
f"Detected documentation candidates:\n{snippets}",
|
||||
]
|
||||
)
|
||||
raw = self._llm.generate("docs_detect", user_input)
|
||||
exists = self.parse_bool_marker(raw, "exists", default=True)
|
||||
summary = self.parse_text_marker(raw, "summary", default="Documentation files detected.")
|
||||
return {"existing_docs_detected": exists, "existing_docs_summary": summary}
|
||||
|
||||
def decide_strategy(self, state: AgentGraphState) -> dict:
|
||||
message = (state.get("message", "") or "").lower()
|
||||
if any(token in message for token in ("с нуля", "from scratch", "new documentation", "создай документацию")):
|
||||
return {"docs_strategy": "from_scratch"}
|
||||
if any(token in message for token in ("дополни", "обнови документацию", "extend docs", "update docs")):
|
||||
return {"docs_strategy": "incremental_update"}
|
||||
|
||||
user_input = "\n\n".join(
|
||||
[
|
||||
f"User request:\n{state.get('message', '')}",
|
||||
f"Existing docs detected:\n{state.get('existing_docs_detected', False)}",
|
||||
f"Existing docs summary:\n{state.get('existing_docs_summary', '')}",
|
||||
]
|
||||
)
|
||||
raw = self._llm.generate("docs_strategy", user_input)
|
||||
strategy = self.parse_text_marker(raw, "strategy", default="").lower()
|
||||
if strategy not in {"incremental_update", "from_scratch"}:
|
||||
strategy = "incremental_update" if state.get("existing_docs_detected", False) else "from_scratch"
|
||||
return {"docs_strategy": strategy}
|
||||
|
||||
def resolve_target_for_incremental(self, state: AgentGraphState) -> tuple[str, dict | None]:
|
||||
files_map = state.get("files_map", {}) or {}
|
||||
preferred_path = state.get("target_path", "")
|
||||
preferred = self._targeting.lookup_file(files_map, preferred_path)
|
||||
if preferred:
|
||||
return str(preferred.get("path") or preferred_path), preferred
|
||||
candidates = state.get("docs_candidates", []) or []
|
||||
if candidates:
|
||||
first_path = str(candidates[0].get("path", ""))
|
||||
resolved = self._targeting.lookup_file(files_map, first_path) or candidates[0]
|
||||
return first_path, resolved
|
||||
fallback = preferred_path.strip() or "docs/AGENT_DRAFT.md"
|
||||
return fallback, None
|
||||
|
||||
def _collect_doc_candidates(self, files_map: dict[str, dict]) -> list[dict]:
|
||||
candidates: list[dict] = []
|
||||
for raw_path, payload in files_map.items():
|
||||
path = str(raw_path or "").replace("\\", "/").strip()
|
||||
if not path:
|
||||
continue
|
||||
low = path.lower()
|
||||
is_doc = low.startswith("docs/") or low.endswith(".md") or low.endswith(".rst") or "/readme" in low or low.startswith("readme")
|
||||
if not is_doc:
|
||||
continue
|
||||
candidates.append(
|
||||
{
|
||||
"path": str(payload.get("path") or path),
|
||||
"content": str(payload.get("content", "")),
|
||||
"content_hash": str(payload.get("content_hash", "")),
|
||||
}
|
||||
)
|
||||
candidates.sort(key=lambda item: (0 if str(item.get("path", "")).lower().startswith("docs/") else 1, str(item.get("path", "")).lower()))
|
||||
return candidates
|
||||
|
||||
def _shorten(self, text: str, max_chars: int) -> str:
|
||||
value = (text or "").strip()
|
||||
if len(value) <= max_chars:
|
||||
return value
|
||||
return value[:max_chars].rstrip() + "\n...[truncated]"
|
||||
|
||||
@staticmethod
|
||||
def parse_bool_marker(text: str, marker: str, *, default: bool) -> bool:
|
||||
value = DocsContextAnalyzer.parse_text_marker(text, marker, default="")
|
||||
if not value:
|
||||
return default
|
||||
token = value.split()[0].strip().lower()
|
||||
if token in {"yes", "true", "1", "да"}:
|
||||
return True
|
||||
if token in {"no", "false", "0", "нет"}:
|
||||
return False
|
||||
return default
|
||||
|
||||
@staticmethod
|
||||
def parse_text_marker(text: str, marker: str, *, default: str) -> str:
|
||||
low_marker = f"{marker.lower()}:"
|
||||
for line in (text or "").splitlines():
|
||||
raw = line.strip()
|
||||
if raw.lower().startswith(low_marker):
|
||||
return raw.split(":", 1)[1].strip()
|
||||
return default
|
||||
|
||||
|
||||
class DocsBundleFormatter:
|
||||
def shorten(self, text: str, max_chars: int) -> str:
|
||||
value = (text or "").strip()
|
||||
if len(value) <= max_chars:
|
||||
return value
|
||||
return value[:max_chars].rstrip() + "\n...[truncated]"
|
||||
|
||||
def normalize_file_output(self, text: str) -> str:
|
||||
value = (text or "").strip()
|
||||
if value.startswith("```") and value.endswith("```"):
|
||||
lines = value.splitlines()
|
||||
if len(lines) >= 3:
|
||||
return "\n".join(lines[1:-1]).strip()
|
||||
return value
|
||||
|
||||
def parse_docs_bundle(self, raw_text: str) -> list[dict]:
|
||||
text = (raw_text or "").strip()
|
||||
if not text:
|
||||
return []
|
||||
|
||||
candidate = self.normalize_file_output(text)
|
||||
parsed = self._parse_json_candidate(candidate)
|
||||
if parsed is None:
|
||||
start = candidate.find("{")
|
||||
end = candidate.rfind("}")
|
||||
if start != -1 and end > start:
|
||||
parsed = self._parse_json_candidate(candidate[start : end + 1])
|
||||
if parsed is None:
|
||||
return []
|
||||
|
||||
files: list[dict]
|
||||
if isinstance(parsed, dict):
|
||||
raw_files = parsed.get("files")
|
||||
files = raw_files if isinstance(raw_files, list) else []
|
||||
elif isinstance(parsed, list):
|
||||
files = parsed
|
||||
else:
|
||||
files = []
|
||||
|
||||
out: list[dict] = []
|
||||
seen: set[str] = set()
|
||||
for item in files:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
path = str(item.get("path", "")).replace("\\", "/").strip()
|
||||
content = str(item.get("content", ""))
|
||||
if not path or not content.strip():
|
||||
continue
|
||||
if path in seen:
|
||||
continue
|
||||
seen.add(path)
|
||||
out.append(
|
||||
{
|
||||
"path": path,
|
||||
"content": content,
|
||||
"reason": str(item.get("reason", "")).strip(),
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
def bundle_has_required_structure(self, bundle: list[dict]) -> bool:
|
||||
if not bundle:
|
||||
return False
|
||||
has_api = any(str(item.get("path", "")).replace("\\", "/").startswith("docs/api/") for item in bundle)
|
||||
has_logic = any(str(item.get("path", "")).replace("\\", "/").startswith("docs/logic/") for item in bundle)
|
||||
return has_api and has_logic
|
||||
|
||||
def similarity(self, original: str, updated: str) -> float:
|
||||
return SequenceMatcher(None, original or "", updated or "").ratio()
|
||||
|
||||
def line_change_ratio(self, original: str, updated: str) -> float:
|
||||
orig_lines = (original or "").splitlines()
|
||||
new_lines = (updated or "").splitlines()
|
||||
if not orig_lines and not new_lines:
|
||||
return 0.0
|
||||
matcher = SequenceMatcher(None, orig_lines, new_lines)
|
||||
changed = 0
|
||||
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
|
||||
if tag == "equal":
|
||||
continue
|
||||
changed += max(i2 - i1, j2 - j1)
|
||||
total = max(len(orig_lines), len(new_lines), 1)
|
||||
return changed / total
|
||||
|
||||
def added_headings(self, original: str, updated: str) -> int:
|
||||
old_heads = {line.strip() for line in (original or "").splitlines() if line.strip().startswith("#")}
|
||||
new_heads = {line.strip() for line in (updated or "").splitlines() if line.strip().startswith("#")}
|
||||
return len(new_heads - old_heads)
|
||||
|
||||
def collapse_whitespace(self, text: str) -> str:
|
||||
return " ".join((text or "").split())
|
||||
|
||||
def _parse_json_candidate(self, text: str):
|
||||
try:
|
||||
return json.loads(text)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
class DocsContentComposer:
|
||||
def __init__(self, llm: AgentLlmService, targeting: FileTargeting) -> None:
|
||||
self._llm = llm
|
||||
self._targeting = targeting
|
||||
self._examples = DocsExamplesLoader()
|
||||
self._bundle = DocsBundleFormatter()
|
||||
|
||||
def load_rules_and_examples(self, _state: AgentGraphState) -> dict:
|
||||
return {"rules_bundle": self._examples.load_bundle()}
|
||||
|
||||
def plan_incremental_changes(self, state: AgentGraphState, analyzer: DocsContextAnalyzer) -> dict:
|
||||
target_path, target = analyzer.resolve_target_for_incremental(state)
|
||||
user_input = "\n\n".join(
|
||||
[
|
||||
"Strategy: incremental_update",
|
||||
f"User request:\n{state.get('message', '')}",
|
||||
f"Target path:\n{target_path}",
|
||||
f"Current target content:\n{self._bundle.shorten((target or {}).get('content', ''), 3000)}",
|
||||
f"RAG context:\n{self._bundle.shorten(state.get('rag_context', ''), 6000)}",
|
||||
f"Examples bundle:\n{state.get('rules_bundle', '')}",
|
||||
]
|
||||
)
|
||||
plan = self._llm.generate("docs_plan_sections", user_input)
|
||||
return {
|
||||
"doc_plan": plan,
|
||||
"target_path": target_path,
|
||||
"target_file_content": str((target or {}).get("content", "")),
|
||||
"target_file_hash": str((target or {}).get("content_hash", "")),
|
||||
}
|
||||
|
||||
def plan_new_document(self, state: AgentGraphState) -> dict:
|
||||
target_path = state.get("target_path", "").strip() or "docs/AGENT_DRAFT.md"
|
||||
user_input = "\n\n".join(
|
||||
[
|
||||
"Strategy: from_scratch",
|
||||
f"User request:\n{state.get('message', '')}",
|
||||
f"Target path:\n{target_path}",
|
||||
f"RAG context:\n{self._bundle.shorten(state.get('rag_context', ''), 6000)}",
|
||||
f"Examples bundle:\n{state.get('rules_bundle', '')}",
|
||||
]
|
||||
)
|
||||
plan = self._llm.generate("docs_plan_sections", user_input)
|
||||
return {"doc_plan": plan, "target_path": target_path, "target_file_content": "", "target_file_hash": ""}
|
||||
|
||||
def generate_doc_content(self, state: AgentGraphState) -> dict:
|
||||
user_input = "\n\n".join(
|
||||
[
|
||||
f"Strategy:\n{state.get('docs_strategy', 'from_scratch')}",
|
||||
f"User request:\n{state.get('message', '')}",
|
||||
f"Target path:\n{state.get('target_path', '')}",
|
||||
f"Document plan:\n{state.get('doc_plan', '')}",
|
||||
f"Current target content:\n{self._bundle.shorten(state.get('target_file_content', ''), 3500)}",
|
||||
f"RAG context:\n{self._bundle.shorten(state.get('rag_context', ''), 7000)}",
|
||||
f"Examples bundle:\n{state.get('rules_bundle', '')}",
|
||||
]
|
||||
)
|
||||
raw = self._llm.generate("docs_generation", user_input)
|
||||
bundle = self._bundle.parse_docs_bundle(raw)
|
||||
if bundle:
|
||||
first_content = str(bundle[0].get("content", "")).strip()
|
||||
return {"generated_docs_bundle": bundle, "generated_doc": first_content}
|
||||
content = self._bundle.normalize_file_output(raw)
|
||||
return {"generated_docs_bundle": [], "generated_doc": content}
|
||||
|
||||
def self_check(self, state: AgentGraphState) -> dict:
|
||||
attempts = int(state.get("validation_attempts", 0) or 0) + 1
|
||||
bundle = state.get("generated_docs_bundle", []) or []
|
||||
generated = state.get("generated_doc", "")
|
||||
if not generated.strip() and not bundle:
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": "Generated document is empty.",
|
||||
}
|
||||
strategy = state.get("docs_strategy", "from_scratch")
|
||||
if strategy == "from_scratch" and not self._bundle.bundle_has_required_structure(bundle):
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": "Bundle must include both docs/api and docs/logic for from_scratch strategy.",
|
||||
}
|
||||
if strategy == "incremental_update":
|
||||
if bundle and len(bundle) > 1 and not self._is_broad_rewrite_request(str(state.get("message", ""))):
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": "Incremental update should not touch multiple files without explicit broad rewrite request.",
|
||||
}
|
||||
original = str(state.get("target_file_content", ""))
|
||||
broad = self._is_broad_rewrite_request(str(state.get("message", "")))
|
||||
if original and generated:
|
||||
if self._bundle.collapse_whitespace(original) == self._bundle.collapse_whitespace(generated):
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": "Only formatting/whitespace changes detected.",
|
||||
}
|
||||
similarity = self._bundle.similarity(original, generated)
|
||||
change_ratio = self._bundle.line_change_ratio(original, generated)
|
||||
added_headings = self._bundle.added_headings(original, generated)
|
||||
min_similarity = 0.75 if broad else 0.9
|
||||
max_change_ratio = 0.7 if broad else 0.35
|
||||
if similarity < min_similarity:
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": f"Incremental update is too broad (similarity={similarity:.2f}).",
|
||||
}
|
||||
if change_ratio > max_change_ratio:
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": f"Incremental update changes too many lines (change_ratio={change_ratio:.2f}).",
|
||||
}
|
||||
if not broad and added_headings > 0:
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": "New section headings were added outside requested scope.",
|
||||
}
|
||||
|
||||
bundle_text = "\n".join([f"- {item.get('path', '')}" for item in bundle[:30]])
|
||||
user_input = "\n\n".join(
|
||||
[
|
||||
f"Strategy:\n{strategy}",
|
||||
f"User request:\n{state.get('message', '')}",
|
||||
f"Document plan:\n{state.get('doc_plan', '')}",
|
||||
f"Generated file paths:\n{bundle_text or '(single-file mode)'}",
|
||||
f"Generated document:\n{generated}",
|
||||
]
|
||||
)
|
||||
raw = self._llm.generate("docs_self_check", user_input)
|
||||
passed = DocsContextAnalyzer.parse_bool_marker(raw, "pass", default=False)
|
||||
feedback = DocsContextAnalyzer.parse_text_marker(raw, "feedback", default="No validation feedback provided.")
|
||||
return {"validation_attempts": attempts, "validation_passed": passed, "validation_feedback": feedback}
|
||||
|
||||
def build_changeset(self, state: AgentGraphState) -> dict:
|
||||
files_map = state.get("files_map", {}) or {}
|
||||
bundle = state.get("generated_docs_bundle", []) or []
|
||||
strategy = state.get("docs_strategy", "from_scratch")
|
||||
if strategy == "from_scratch" and not self._bundle.bundle_has_required_structure(bundle):
|
||||
LOGGER.warning(
|
||||
"build_changeset fallback bundle used: strategy=%s bundle_items=%s",
|
||||
strategy,
|
||||
len(bundle),
|
||||
)
|
||||
bundle = self._build_fallback_bundle_from_text(state.get("generated_doc", ""))
|
||||
if bundle:
|
||||
changes: list[ChangeItem] = []
|
||||
for item in bundle:
|
||||
path = str(item.get("path", "")).replace("\\", "/").strip()
|
||||
content = str(item.get("content", ""))
|
||||
if not path or not content.strip():
|
||||
continue
|
||||
target = self._targeting.lookup_file(files_map, path)
|
||||
reason = str(item.get("reason", "")).strip() or f"Documentation {strategy}: generated file from structured bundle."
|
||||
if target and target.get("content_hash"):
|
||||
changes.append(
|
||||
ChangeItem(
|
||||
op="update",
|
||||
path=str(target.get("path") or path),
|
||||
base_hash=str(target.get("content_hash", "")),
|
||||
proposed_content=content,
|
||||
reason=reason,
|
||||
)
|
||||
)
|
||||
else:
|
||||
changes.append(
|
||||
ChangeItem(
|
||||
op="create",
|
||||
path=path,
|
||||
proposed_content=content,
|
||||
reason=reason,
|
||||
)
|
||||
)
|
||||
if changes:
|
||||
return {"changeset": changes}
|
||||
|
||||
target_path = (state.get("target_path", "") or "").strip() or "docs/AGENT_DRAFT.md"
|
||||
target = self._targeting.lookup_file(files_map, target_path)
|
||||
content = state.get("generated_doc", "")
|
||||
if target and target.get("content_hash"):
|
||||
change = ChangeItem(
|
||||
op="update",
|
||||
path=str(target.get("path") or target_path),
|
||||
base_hash=str(target.get("content_hash", "")),
|
||||
proposed_content=content,
|
||||
reason=f"Documentation {strategy}: update existing document increment.",
|
||||
)
|
||||
else:
|
||||
change = ChangeItem(
|
||||
op="create",
|
||||
path=target_path,
|
||||
proposed_content=content,
|
||||
reason=f"Documentation {strategy}: create document from current project context.",
|
||||
)
|
||||
return {"changeset": [change]}
|
||||
|
||||
def build_execution_summary(self, state: AgentGraphState) -> dict:
|
||||
changeset = state.get("changeset", []) or []
|
||||
if not changeset:
|
||||
return {"answer": "Документация не была изменена: итоговый changeset пуст."}
|
||||
|
||||
file_lines = self._format_changed_files(changeset)
|
||||
user_input = "\n\n".join(
|
||||
[
|
||||
f"User request:\n{state.get('message', '')}",
|
||||
f"Documentation strategy:\n{state.get('docs_strategy', 'from_scratch')}",
|
||||
f"Document plan:\n{state.get('doc_plan', '')}",
|
||||
f"Validation feedback:\n{state.get('validation_feedback', '')}",
|
||||
f"Changed files:\n{file_lines}",
|
||||
]
|
||||
)
|
||||
try:
|
||||
summary = self._llm.generate("docs_execution_summary", user_input).strip()
|
||||
except Exception:
|
||||
summary = ""
|
||||
if not summary:
|
||||
summary = self._build_fallback_summary(state, file_lines)
|
||||
return {"answer": summary}
|
||||
|
||||
def _build_fallback_bundle_from_text(self, text: str) -> list[dict]:
|
||||
content = (text or "").strip()
|
||||
if not content:
|
||||
content = (
|
||||
"# Project Documentation Draft\n\n"
|
||||
"## Overview\n"
|
||||
"Documentation draft was generated, but structured sections require уточнение.\n"
|
||||
)
|
||||
return [
|
||||
{
|
||||
"path": "docs/logic/project_overview.md",
|
||||
"content": content,
|
||||
"reason": "Fallback: generated structured logic document from non-JSON model output.",
|
||||
},
|
||||
{
|
||||
"path": "docs/api/README.md",
|
||||
"content": (
|
||||
"# API Methods\n\n"
|
||||
"This file is a fallback placeholder for API method documentation.\n\n"
|
||||
"## Next Step\n"
|
||||
"- Add one file per API method under `docs/api/`.\n"
|
||||
),
|
||||
"reason": "Fallback: ensure required docs/api structure exists.",
|
||||
},
|
||||
]
|
||||
|
||||
def _format_changed_files(self, changeset: list[ChangeItem]) -> str:
|
||||
lines: list[str] = []
|
||||
for item in changeset[:30]:
|
||||
lines.append(f"- {item.op.value} {item.path}: {item.reason}")
|
||||
return "\n".join(lines)
|
||||
|
||||
def _build_fallback_summary(self, state: AgentGraphState, file_lines: str) -> str:
|
||||
request = (state.get("message", "") or "").strip()
|
||||
return "\n".join(
|
||||
[
|
||||
"Выполненные действия:",
|
||||
f"- Обработан запрос: {request or '(пустой запрос)'}",
|
||||
f"- Применена стратегия документации: {state.get('docs_strategy', 'from_scratch')}",
|
||||
"- Сформирован и проверен changeset для документации.",
|
||||
"",
|
||||
"Измененные файлы:",
|
||||
file_lines or "- (нет изменений)",
|
||||
]
|
||||
)
|
||||
|
||||
def _is_broad_rewrite_request(self, message: str) -> bool:
|
||||
low = (message or "").lower()
|
||||
markers = (
|
||||
"перепиши",
|
||||
"полностью",
|
||||
"целиком",
|
||||
"с нуля",
|
||||
"full rewrite",
|
||||
"rewrite all",
|
||||
"реорганизуй",
|
||||
)
|
||||
return any(marker in low for marker in markers)
|
||||
28
app/modules/agent/engine/graphs/file_targeting.py
Normal file
28
app/modules/agent/engine/graphs/file_targeting.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import re
|
||||
|
||||
|
||||
class FileTargeting:
|
||||
_path_pattern = re.compile(r"([A-Za-z0-9_.\-/]+?\.[A-Za-z0-9_]+)")
|
||||
|
||||
def extract_target_path(self, message: str) -> str | None:
|
||||
text = (message or "").replace("\\", "/")
|
||||
candidates = self._path_pattern.findall(text)
|
||||
if not candidates:
|
||||
return None
|
||||
for candidate in candidates:
|
||||
cleaned = candidate.strip("`'\".,:;()[]{}")
|
||||
if "/" in cleaned or cleaned.startswith("."):
|
||||
return cleaned
|
||||
return candidates[0].strip("`'\".,:;()[]{}")
|
||||
|
||||
def lookup_file(self, files_map: dict[str, dict], path: str | None) -> dict | None:
|
||||
if not path:
|
||||
return None
|
||||
normalized = path.replace("\\", "/")
|
||||
if normalized in files_map:
|
||||
return files_map[normalized]
|
||||
low = normalized.lower()
|
||||
for key, value in files_map.items():
|
||||
if key.lower() == low:
|
||||
return value
|
||||
return None
|
||||
44
app/modules/agent/engine/graphs/progress.py
Normal file
44
app/modules/agent/engine/graphs/progress.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from collections.abc import Awaitable, Callable
|
||||
import inspect
|
||||
import asyncio
|
||||
|
||||
from app.modules.agent.engine.graphs.progress_registry import progress_registry
|
||||
from app.modules.agent.engine.graphs.state import AgentGraphState
|
||||
|
||||
ProgressCallback = Callable[[str, str, str, dict | None], Awaitable[None] | None]
|
||||
|
||||
|
||||
async def emit_progress(
|
||||
state: AgentGraphState,
|
||||
*,
|
||||
stage: str,
|
||||
message: str,
|
||||
kind: str = "task_progress",
|
||||
meta: dict | None = None,
|
||||
) -> None:
|
||||
callback = progress_registry.get(state.get("progress_key"))
|
||||
if callback is None:
|
||||
return
|
||||
result = callback(stage, message, kind, meta or {})
|
||||
if inspect.isawaitable(result):
|
||||
await result
|
||||
|
||||
|
||||
def emit_progress_sync(
|
||||
state: AgentGraphState,
|
||||
*,
|
||||
stage: str,
|
||||
message: str,
|
||||
kind: str = "task_progress",
|
||||
meta: dict | None = None,
|
||||
) -> None:
|
||||
callback = progress_registry.get(state.get("progress_key"))
|
||||
if callback is None:
|
||||
return
|
||||
result = callback(stage, message, kind, meta or {})
|
||||
if inspect.isawaitable(result):
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
loop.create_task(result)
|
||||
except RuntimeError:
|
||||
pass
|
||||
27
app/modules/agent/engine/graphs/progress_registry.py
Normal file
27
app/modules/agent/engine/graphs/progress_registry.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from collections.abc import Awaitable, Callable
|
||||
from threading import Lock
|
||||
|
||||
ProgressCallback = Callable[[str, str, str, dict | None], Awaitable[None] | None]
|
||||
|
||||
|
||||
class ProgressRegistry:
|
||||
def __init__(self) -> None:
|
||||
self._items: dict[str, ProgressCallback] = {}
|
||||
self._lock = Lock()
|
||||
|
||||
def register(self, key: str, callback: ProgressCallback) -> None:
|
||||
with self._lock:
|
||||
self._items[key] = callback
|
||||
|
||||
def get(self, key: str | None) -> ProgressCallback | None:
|
||||
if not key:
|
||||
return None
|
||||
with self._lock:
|
||||
return self._items.get(key)
|
||||
|
||||
def unregister(self, key: str) -> None:
|
||||
with self._lock:
|
||||
self._items.pop(key, None)
|
||||
|
||||
|
||||
progress_registry = ProgressRegistry()
|
||||
79
app/modules/agent/engine/graphs/project_edits_graph.py
Normal file
79
app/modules/agent/engine/graphs/project_edits_graph.py
Normal file
@@ -0,0 +1,79 @@
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
|
||||
from app.modules.agent.engine.graphs.progress import emit_progress_sync
|
||||
from app.modules.agent.engine.graphs.project_edits_logic import ProjectEditsLogic
|
||||
from app.modules.agent.engine.graphs.state import AgentGraphState
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
|
||||
|
||||
class ProjectEditsGraphFactory:
|
||||
_max_validation_attempts = 2
|
||||
|
||||
def __init__(self, llm: AgentLlmService) -> None:
|
||||
self._logic = ProjectEditsLogic(llm)
|
||||
|
||||
def build(self, checkpointer=None):
|
||||
graph = StateGraph(AgentGraphState)
|
||||
graph.add_node("collect_context", self._collect_context)
|
||||
graph.add_node("plan_changes", self._plan_changes)
|
||||
graph.add_node("generate_changeset", self._generate_changeset)
|
||||
graph.add_node("self_check", self._self_check)
|
||||
graph.add_node("build_result", self._build_result)
|
||||
|
||||
graph.add_edge(START, "collect_context")
|
||||
graph.add_edge("collect_context", "plan_changes")
|
||||
graph.add_edge("plan_changes", "generate_changeset")
|
||||
graph.add_edge("generate_changeset", "self_check")
|
||||
graph.add_conditional_edges(
|
||||
"self_check",
|
||||
self._route_after_self_check,
|
||||
{"retry": "generate_changeset", "ready": "build_result"},
|
||||
)
|
||||
graph.add_edge("build_result", END)
|
||||
return graph.compile(checkpointer=checkpointer)
|
||||
|
||||
def _collect_context(self, state: AgentGraphState) -> dict:
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.project_edits.collect_context",
|
||||
message="Собираю контекст и релевантные файлы для правок.",
|
||||
)
|
||||
return self._logic.collect_context(state)
|
||||
|
||||
def _plan_changes(self, state: AgentGraphState) -> dict:
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.project_edits.plan_changes",
|
||||
message="Определяю, что именно нужно изменить и в каких файлах.",
|
||||
)
|
||||
return self._logic.plan_changes(state)
|
||||
|
||||
def _generate_changeset(self, state: AgentGraphState) -> dict:
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.project_edits.generate_changeset",
|
||||
message="Формирую предлагаемые правки по выбранным файлам.",
|
||||
)
|
||||
return self._logic.generate_changeset(state)
|
||||
|
||||
def _self_check(self, state: AgentGraphState) -> dict:
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.project_edits.self_check",
|
||||
message="Проверяю, что правки соответствуют запросу и не трогают лишнее.",
|
||||
)
|
||||
return self._logic.self_check(state)
|
||||
|
||||
def _build_result(self, state: AgentGraphState) -> dict:
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.project_edits.build_result",
|
||||
message="Формирую итоговый changeset и краткий обзор.",
|
||||
)
|
||||
return self._logic.build_result(state)
|
||||
|
||||
def _route_after_self_check(self, state: AgentGraphState) -> str:
|
||||
if state.get("validation_passed"):
|
||||
return "ready"
|
||||
attempts = int(state.get("validation_attempts", 0) or 0)
|
||||
return "ready" if attempts >= self._max_validation_attempts else "retry"
|
||||
271
app/modules/agent/engine/graphs/project_edits_logic.py
Normal file
271
app/modules/agent/engine/graphs/project_edits_logic.py
Normal file
@@ -0,0 +1,271 @@
|
||||
import json
|
||||
from difflib import SequenceMatcher
|
||||
import re
|
||||
|
||||
from app.modules.agent.engine.graphs.file_targeting import FileTargeting
|
||||
from app.modules.agent.engine.graphs.state import AgentGraphState
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
from app.schemas.changeset import ChangeItem
|
||||
|
||||
|
||||
class ProjectEditsSupport:
|
||||
def __init__(self, max_context_files: int = 12, max_preview_chars: int = 2500) -> None:
|
||||
self._max_context_files = max_context_files
|
||||
self._max_preview_chars = max_preview_chars
|
||||
|
||||
def pick_relevant_files(self, message: str, files_map: dict[str, dict]) -> list[dict]:
|
||||
tokens = {x for x in (message or "").lower().replace("/", " ").split() if len(x) >= 4}
|
||||
scored: list[tuple[int, dict]] = []
|
||||
for path, payload in files_map.items():
|
||||
content = str(payload.get("content", ""))
|
||||
score = 0
|
||||
low_path = path.lower()
|
||||
low_content = content.lower()
|
||||
for token in tokens:
|
||||
if token in low_path:
|
||||
score += 3
|
||||
if token in low_content:
|
||||
score += 1
|
||||
scored.append((score, self.as_candidate(payload)))
|
||||
scored.sort(key=lambda x: (-x[0], x[1]["path"]))
|
||||
return [item for _, item in scored[: self._max_context_files]]
|
||||
|
||||
def as_candidate(self, payload: dict) -> dict:
|
||||
return {
|
||||
"path": str(payload.get("path", "")).replace("\\", "/"),
|
||||
"content": str(payload.get("content", "")),
|
||||
"content_hash": str(payload.get("content_hash", "")),
|
||||
}
|
||||
|
||||
def build_summary(self, state: AgentGraphState, changeset: list[ChangeItem]) -> str:
|
||||
if not changeset:
|
||||
return "Правки не сформированы: changeset пуст."
|
||||
lines = [
|
||||
"Выполненные действия:",
|
||||
f"- Проанализирован запрос: {state.get('message', '')}",
|
||||
"- Собран контекст проекта и выбран набор файлов для правок.",
|
||||
f"- Проведен self-check: {state.get('validation_feedback', 'без замечаний')}",
|
||||
"",
|
||||
"Измененные файлы:",
|
||||
]
|
||||
for item in changeset[:30]:
|
||||
lines.append(f"- {item.op.value} {item.path}: {item.reason}")
|
||||
return "\n".join(lines)
|
||||
|
||||
def normalize_file_output(self, text: str) -> str:
|
||||
value = (text or "").strip()
|
||||
if value.startswith("```") and value.endswith("```"):
|
||||
lines = value.splitlines()
|
||||
if len(lines) >= 3:
|
||||
return "\n".join(lines[1:-1]).strip()
|
||||
return value
|
||||
|
||||
def parse_json(self, raw: str):
|
||||
text = self.normalize_file_output(raw)
|
||||
try:
|
||||
return json.loads(text)
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def similarity(self, original: str, updated: str) -> float:
|
||||
return SequenceMatcher(None, original or "", updated or "").ratio()
|
||||
|
||||
def shorten(self, text: str, max_chars: int | None = None) -> str:
|
||||
limit = max_chars or self._max_preview_chars
|
||||
value = (text or "").strip()
|
||||
if len(value) <= limit:
|
||||
return value
|
||||
return value[:limit].rstrip() + "\n...[truncated]"
|
||||
|
||||
def collapse_whitespace(self, text: str) -> str:
|
||||
return re.sub(r"\s+", " ", (text or "").strip())
|
||||
|
||||
def line_change_ratio(self, original: str, updated: str) -> float:
|
||||
orig_lines = (original or "").splitlines()
|
||||
new_lines = (updated or "").splitlines()
|
||||
if not orig_lines and not new_lines:
|
||||
return 0.0
|
||||
matcher = SequenceMatcher(None, orig_lines, new_lines)
|
||||
changed = 0
|
||||
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
|
||||
if tag == "equal":
|
||||
continue
|
||||
changed += max(i2 - i1, j2 - j1)
|
||||
total = max(len(orig_lines), len(new_lines), 1)
|
||||
return changed / total
|
||||
|
||||
def added_headings(self, original: str, updated: str) -> int:
|
||||
old_heads = {line.strip() for line in (original or "").splitlines() if line.strip().startswith("#")}
|
||||
new_heads = {line.strip() for line in (updated or "").splitlines() if line.strip().startswith("#")}
|
||||
return len(new_heads - old_heads)
|
||||
|
||||
|
||||
class ProjectEditsLogic:
|
||||
def __init__(self, llm: AgentLlmService) -> None:
|
||||
self._llm = llm
|
||||
self._targeting = FileTargeting()
|
||||
self._support = ProjectEditsSupport()
|
||||
|
||||
def collect_context(self, state: AgentGraphState) -> dict:
|
||||
message = state.get("message", "")
|
||||
files_map = state.get("files_map", {}) or {}
|
||||
requested_path = self._targeting.extract_target_path(message)
|
||||
preferred = self._targeting.lookup_file(files_map, requested_path) if requested_path else None
|
||||
candidates = self._support.pick_relevant_files(message, files_map)
|
||||
if preferred and not any(x["path"] == preferred.get("path") for x in candidates):
|
||||
candidates.insert(0, self._support.as_candidate(preferred))
|
||||
return {
|
||||
"edits_requested_path": str((preferred or {}).get("path") or (requested_path or "")).strip(),
|
||||
"edits_context_files": candidates[:12],
|
||||
"validation_attempts": 0,
|
||||
}
|
||||
|
||||
def plan_changes(self, state: AgentGraphState) -> dict:
|
||||
context_files = state.get("edits_context_files", []) or []
|
||||
user_input = json.dumps(
|
||||
{
|
||||
"request": state.get("message", ""),
|
||||
"requested_path": state.get("edits_requested_path", ""),
|
||||
"context_files": [
|
||||
{
|
||||
"path": item.get("path", ""),
|
||||
"content_preview": self._support.shorten(str(item.get("content", ""))),
|
||||
}
|
||||
for item in context_files
|
||||
],
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
parsed = self._support.parse_json(self._llm.generate("project_edits_plan", user_input))
|
||||
files = parsed.get("files", []) if isinstance(parsed, dict) else []
|
||||
planned: list[dict] = []
|
||||
for item in files[:8] if isinstance(files, list) else []:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
path = str(item.get("path", "")).replace("\\", "/").strip()
|
||||
if not path:
|
||||
continue
|
||||
planned.append(
|
||||
{
|
||||
"path": path,
|
||||
"reason": str(item.get("reason", "")).strip() or "Requested user adjustment.",
|
||||
}
|
||||
)
|
||||
if not planned:
|
||||
fallback_path = state.get("edits_requested_path", "").strip() or "docs/REQUESTED_UPDATES.md"
|
||||
planned = [{"path": fallback_path, "reason": "Fallback path from user request."}]
|
||||
return {"edits_plan": planned}
|
||||
|
||||
def generate_changeset(self, state: AgentGraphState) -> dict:
|
||||
files_map = state.get("files_map", {}) or {}
|
||||
planned = state.get("edits_plan", []) or []
|
||||
changeset: list[ChangeItem] = []
|
||||
for item in planned:
|
||||
path = str(item.get("path", "")).replace("\\", "/").strip()
|
||||
if not path:
|
||||
continue
|
||||
current = self._targeting.lookup_file(files_map, path)
|
||||
current_content = str((current or {}).get("content", ""))
|
||||
user_input = json.dumps(
|
||||
{
|
||||
"request": state.get("message", ""),
|
||||
"path": path,
|
||||
"reason": item.get("reason", ""),
|
||||
"current_content": current_content,
|
||||
"previous_validation_feedback": state.get("validation_feedback", ""),
|
||||
"rag_context": self._support.shorten(state.get("rag_context", ""), 5000),
|
||||
"confluence_context": self._support.shorten(state.get("confluence_context", ""), 5000),
|
||||
"instruction": "Modify only required parts and preserve unrelated content unchanged.",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
raw = self._llm.generate("project_edits_apply", user_input).strip()
|
||||
normalized = self._support.normalize_file_output(raw)
|
||||
if not normalized:
|
||||
continue
|
||||
if current:
|
||||
if normalized == current_content:
|
||||
continue
|
||||
if self._support.collapse_whitespace(normalized) == self._support.collapse_whitespace(current_content):
|
||||
continue
|
||||
reason = str(item.get("reason", "")).strip() or "User-requested update."
|
||||
if current and current.get("content_hash"):
|
||||
changeset.append(
|
||||
ChangeItem(
|
||||
op="update",
|
||||
path=str(current.get("path") or path),
|
||||
base_hash=str(current.get("content_hash", "")),
|
||||
proposed_content=normalized,
|
||||
reason=reason,
|
||||
)
|
||||
)
|
||||
else:
|
||||
changeset.append(ChangeItem(op="create", path=path, proposed_content=normalized, reason=reason))
|
||||
return {"changeset": changeset}
|
||||
|
||||
def self_check(self, state: AgentGraphState) -> dict:
|
||||
attempts = int(state.get("validation_attempts", 0) or 0) + 1
|
||||
changeset = state.get("changeset", []) or []
|
||||
files_map = state.get("files_map", {}) or {}
|
||||
is_broad_rewrite = self._is_broad_rewrite_request(str(state.get("message", "")))
|
||||
if not changeset:
|
||||
return {"validation_attempts": attempts, "validation_passed": False, "validation_feedback": "Generated changeset is empty."}
|
||||
|
||||
for item in changeset:
|
||||
if item.op.value != "update":
|
||||
continue
|
||||
source = self._targeting.lookup_file(files_map, item.path)
|
||||
if not source:
|
||||
continue
|
||||
original = str(source.get("content", ""))
|
||||
proposed = item.proposed_content or ""
|
||||
similarity = self._support.similarity(original, proposed)
|
||||
change_ratio = self._support.line_change_ratio(original, proposed)
|
||||
headings_added = self._support.added_headings(original, proposed)
|
||||
min_similarity = 0.75 if is_broad_rewrite else 0.9
|
||||
max_change_ratio = 0.7 if is_broad_rewrite else 0.35
|
||||
if similarity < min_similarity:
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": f"File {item.path} changed too aggressively (similarity={similarity:.2f}).",
|
||||
}
|
||||
if change_ratio > max_change_ratio:
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": f"File {item.path} changed too broadly (change_ratio={change_ratio:.2f}).",
|
||||
}
|
||||
if not is_broad_rewrite and headings_added > 0:
|
||||
return {
|
||||
"validation_attempts": attempts,
|
||||
"validation_passed": False,
|
||||
"validation_feedback": f"File {item.path} adds new sections outside requested scope.",
|
||||
}
|
||||
|
||||
payload = {
|
||||
"request": state.get("message", ""),
|
||||
"changeset": [{"op": x.op.value, "path": x.path, "reason": x.reason} for x in changeset[:20]],
|
||||
"rule": "Changes must match request and avoid unrelated modifications.",
|
||||
}
|
||||
parsed = self._support.parse_json(self._llm.generate("project_edits_self_check", json.dumps(payload, ensure_ascii=False)))
|
||||
passed = bool(parsed.get("pass")) if isinstance(parsed, dict) else False
|
||||
feedback = str(parsed.get("feedback", "")).strip() if isinstance(parsed, dict) else ""
|
||||
return {"validation_attempts": attempts, "validation_passed": passed, "validation_feedback": feedback or "No feedback provided."}
|
||||
|
||||
def build_result(self, state: AgentGraphState) -> dict:
|
||||
changeset = state.get("changeset", []) or []
|
||||
return {"changeset": changeset, "answer": self._support.build_summary(state, changeset)}
|
||||
|
||||
def _is_broad_rewrite_request(self, message: str) -> bool:
|
||||
low = (message or "").lower()
|
||||
markers = (
|
||||
"перепиши",
|
||||
"полностью",
|
||||
"целиком",
|
||||
"с нуля",
|
||||
"full rewrite",
|
||||
"rewrite all",
|
||||
"реорганизуй документ",
|
||||
)
|
||||
return any(marker in low for marker in markers)
|
||||
38
app/modules/agent/engine/graphs/project_qa_graph.py
Normal file
38
app/modules/agent/engine/graphs/project_qa_graph.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
|
||||
from app.modules.agent.engine.graphs.progress import emit_progress_sync
|
||||
from app.modules.agent.engine.graphs.state import AgentGraphState
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
|
||||
|
||||
class ProjectQaGraphFactory:
|
||||
def __init__(self, llm: AgentLlmService) -> None:
|
||||
self._llm = llm
|
||||
|
||||
def build(self, checkpointer=None):
|
||||
graph = StateGraph(AgentGraphState)
|
||||
graph.add_node("answer", self._answer_node)
|
||||
graph.add_edge(START, "answer")
|
||||
graph.add_edge("answer", END)
|
||||
return graph.compile(checkpointer=checkpointer)
|
||||
|
||||
def _answer_node(self, state: AgentGraphState) -> dict:
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.project_qa.answer",
|
||||
message="Готовлю ответ по контексту текущего проекта.",
|
||||
)
|
||||
user_input = "\n\n".join(
|
||||
[
|
||||
f"User request:\n{state.get('message', '')}",
|
||||
f"RAG context:\n{state.get('rag_context', '')}",
|
||||
f"Confluence context:\n{state.get('confluence_context', '')}",
|
||||
]
|
||||
)
|
||||
answer = self._llm.generate("project_answer", user_input)
|
||||
emit_progress_sync(
|
||||
state,
|
||||
stage="graph.project_qa.answer.done",
|
||||
message="Ответ по проекту сформирован.",
|
||||
)
|
||||
return {"answer": answer}
|
||||
32
app/modules/agent/engine/graphs/state.py
Normal file
32
app/modules/agent/engine/graphs/state.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from typing import TypedDict
|
||||
|
||||
from app.schemas.changeset import ChangeItem
|
||||
|
||||
|
||||
class AgentGraphState(TypedDict, total=False):
|
||||
task_id: str
|
||||
project_id: str
|
||||
message: str
|
||||
progress_key: str
|
||||
rag_context: str
|
||||
confluence_context: str
|
||||
files_map: dict[str, dict]
|
||||
docs_candidates: list[dict]
|
||||
target_path: str
|
||||
target_file_content: str
|
||||
target_file_hash: str
|
||||
existing_docs_detected: bool
|
||||
existing_docs_summary: str
|
||||
docs_strategy: str
|
||||
rules_bundle: str
|
||||
doc_plan: str
|
||||
generated_doc: str
|
||||
generated_docs_bundle: list[dict]
|
||||
validation_passed: bool
|
||||
validation_feedback: str
|
||||
validation_attempts: int
|
||||
answer: str
|
||||
changeset: list[ChangeItem]
|
||||
edits_requested_path: str
|
||||
edits_context_files: list[dict]
|
||||
edits_plan: list[dict]
|
||||
34
app/modules/agent/engine/router/__init__.py
Normal file
34
app/modules/agent/engine/router/__init__.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from pathlib import Path
|
||||
|
||||
from app.modules.agent.engine.graphs import (
|
||||
BaseGraphFactory,
|
||||
DocsGraphFactory,
|
||||
ProjectEditsGraphFactory,
|
||||
ProjectQaGraphFactory,
|
||||
)
|
||||
from app.modules.agent.repository import AgentRepository
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
from app.modules.agent.engine.router.context_store import RouterContextStore
|
||||
from app.modules.agent.engine.router.intent_classifier import IntentClassifier
|
||||
from app.modules.agent.engine.router.registry import IntentRegistry
|
||||
from app.modules.agent.engine.router.router_service import RouterService
|
||||
|
||||
|
||||
def build_router_service(llm: AgentLlmService, agent_repository: AgentRepository) -> RouterService:
|
||||
registry_path = Path(__file__).resolve().parent / "intents_registry.yaml"
|
||||
registry = IntentRegistry(registry_path=registry_path)
|
||||
registry.register("default", "general", BaseGraphFactory(llm).build)
|
||||
registry.register("project", "qa", ProjectQaGraphFactory(llm).build)
|
||||
registry.register("project", "edits", ProjectEditsGraphFactory(llm).build)
|
||||
registry.register("docs", "generation", DocsGraphFactory(llm).build)
|
||||
|
||||
classifier = IntentClassifier(llm)
|
||||
context_store = RouterContextStore(agent_repository)
|
||||
return RouterService(
|
||||
registry=registry,
|
||||
classifier=classifier,
|
||||
context_store=context_store,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["build_router_service", "IntentRegistry", "RouterService"]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
29
app/modules/agent/engine/router/context_store.py
Normal file
29
app/modules/agent/engine/router/context_store.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from app.modules.agent.repository import AgentRepository
|
||||
from app.modules.agent.engine.router.schemas import RouterContext
|
||||
|
||||
|
||||
class RouterContextStore:
|
||||
def __init__(self, repository: AgentRepository) -> None:
|
||||
self._repo = repository
|
||||
|
||||
def get(self, conversation_key: str) -> RouterContext:
|
||||
return self._repo.get_router_context(conversation_key)
|
||||
|
||||
def update(
|
||||
self,
|
||||
conversation_key: str,
|
||||
*,
|
||||
domain_id: str,
|
||||
process_id: str,
|
||||
user_message: str,
|
||||
assistant_message: str,
|
||||
max_history: int = 10,
|
||||
) -> None:
|
||||
self._repo.update_router_context(
|
||||
conversation_key,
|
||||
domain_id=domain_id,
|
||||
process_id=process_id,
|
||||
user_message=user_message,
|
||||
assistant_message=assistant_message,
|
||||
max_history=max_history,
|
||||
)
|
||||
191
app/modules/agent/engine/router/intent_classifier.py
Normal file
191
app/modules/agent/engine/router/intent_classifier.py
Normal file
@@ -0,0 +1,191 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
from app.modules.agent.engine.router.schemas import RouteDecision, RouterContext
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
|
||||
|
||||
class IntentClassifier:
|
||||
_short_confirmations = {"да", "ок", "делай", "поехали", "запускай"}
|
||||
_route_mapping = {
|
||||
"default/general": ("default", "general"),
|
||||
"project/qa": ("project", "qa"),
|
||||
"project/edits": ("project", "edits"),
|
||||
"docs/generation": ("docs", "generation"),
|
||||
}
|
||||
|
||||
def __init__(self, llm: AgentLlmService) -> None:
|
||||
self._llm = llm
|
||||
|
||||
def classify(self, user_message: str, context: RouterContext, mode: str = "auto") -> RouteDecision:
|
||||
forced = self._from_mode(mode)
|
||||
if forced:
|
||||
return forced
|
||||
|
||||
text = (user_message or "").strip().lower()
|
||||
if text in self._short_confirmations and context.last_routing:
|
||||
return RouteDecision(
|
||||
domain_id=context.last_routing["domain_id"],
|
||||
process_id=context.last_routing["process_id"],
|
||||
confidence=1.0,
|
||||
reason="short_confirmation",
|
||||
use_previous=True,
|
||||
)
|
||||
|
||||
deterministic = self._deterministic_route(text)
|
||||
if deterministic:
|
||||
return deterministic
|
||||
|
||||
llm_decision = self._classify_with_llm(user_message, context)
|
||||
if llm_decision:
|
||||
return llm_decision
|
||||
|
||||
return RouteDecision(
|
||||
domain_id="default",
|
||||
process_id="general",
|
||||
confidence=0.8,
|
||||
reason="default",
|
||||
)
|
||||
|
||||
def _from_mode(self, mode: str) -> RouteDecision | None:
|
||||
mapping = {
|
||||
"project_qa": ("project", "qa"),
|
||||
"project_edits": ("project", "edits"),
|
||||
"docs_generation": ("docs", "generation"),
|
||||
# Legacy aliases kept for API compatibility.
|
||||
"analytics_review": ("project", "qa"),
|
||||
"code_change": ("project", "edits"),
|
||||
"qa": ("default", "general"),
|
||||
}
|
||||
route = mapping.get((mode or "auto").strip().lower())
|
||||
if not route:
|
||||
return None
|
||||
return RouteDecision(
|
||||
domain_id=route[0],
|
||||
process_id=route[1],
|
||||
confidence=1.0,
|
||||
reason=f"mode_override:{mode}",
|
||||
)
|
||||
|
||||
def _classify_with_llm(self, user_message: str, context: RouterContext) -> RouteDecision | None:
|
||||
history = context.message_history[-8:]
|
||||
user_input = json.dumps(
|
||||
{
|
||||
"message": user_message,
|
||||
"history": history,
|
||||
"allowed_routes": list(self._route_mapping.keys()),
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
try:
|
||||
raw = self._llm.generate("router_intent", user_input).strip()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
payload = self._parse_llm_payload(raw)
|
||||
if not payload:
|
||||
return None
|
||||
|
||||
route = self._route_mapping.get(payload["route"])
|
||||
if not route:
|
||||
return None
|
||||
|
||||
confidence = self._normalize_confidence(payload.get("confidence"))
|
||||
return RouteDecision(
|
||||
domain_id=route[0],
|
||||
process_id=route[1],
|
||||
confidence=confidence,
|
||||
reason=f"llm_router:{payload.get('reason', 'ok')}",
|
||||
)
|
||||
|
||||
def _parse_llm_payload(self, raw: str) -> dict[str, str | float] | None:
|
||||
candidate = self._strip_code_fence(raw.strip())
|
||||
if not candidate:
|
||||
return None
|
||||
try:
|
||||
parsed = json.loads(candidate)
|
||||
except json.JSONDecodeError:
|
||||
return None
|
||||
if not isinstance(parsed, dict):
|
||||
return None
|
||||
route = str(parsed.get("route", "")).strip().lower()
|
||||
if not route:
|
||||
return None
|
||||
return {
|
||||
"route": route,
|
||||
"confidence": parsed.get("confidence"),
|
||||
"reason": str(parsed.get("reason", "ok")).strip().lower(),
|
||||
}
|
||||
|
||||
def _normalize_confidence(self, value: object) -> float:
|
||||
if isinstance(value, (float, int)):
|
||||
return max(0.0, min(1.0, float(value)))
|
||||
return 0.75
|
||||
|
||||
def _strip_code_fence(self, text: str) -> str:
|
||||
if not text.startswith("```"):
|
||||
return text
|
||||
lines = text.splitlines()
|
||||
if len(lines) < 3:
|
||||
return text
|
||||
if lines[-1].strip() != "```":
|
||||
return text
|
||||
return "\n".join(lines[1:-1]).strip()
|
||||
|
||||
def _deterministic_route(self, text: str) -> RouteDecision | None:
|
||||
if self._is_targeted_file_edit_request(text):
|
||||
return RouteDecision(
|
||||
domain_id="project",
|
||||
process_id="edits",
|
||||
confidence=0.97,
|
||||
reason="deterministic_targeted_file_edit",
|
||||
)
|
||||
if self._is_broad_docs_request(text):
|
||||
return RouteDecision(
|
||||
domain_id="docs",
|
||||
process_id="generation",
|
||||
confidence=0.95,
|
||||
reason="deterministic_docs_generation",
|
||||
)
|
||||
return None
|
||||
|
||||
def _is_targeted_file_edit_request(self, text: str) -> bool:
|
||||
if not text:
|
||||
return False
|
||||
edit_markers = (
|
||||
"добавь",
|
||||
"добавить",
|
||||
"измени",
|
||||
"исправь",
|
||||
"обнови",
|
||||
"удали",
|
||||
"замени",
|
||||
"вставь",
|
||||
"в конец",
|
||||
"в начале",
|
||||
"append",
|
||||
"update",
|
||||
"edit",
|
||||
"remove",
|
||||
"replace",
|
||||
)
|
||||
has_edit_marker = any(marker in text for marker in edit_markers)
|
||||
has_file_marker = (
|
||||
"readme" in text
|
||||
or bool(re.search(r"\b[\w.\-/]+\.(md|txt|rst|yaml|yml|json|toml|ini|cfg)\b", text))
|
||||
)
|
||||
return has_edit_marker and has_file_marker
|
||||
|
||||
def _is_broad_docs_request(self, text: str) -> bool:
|
||||
if not text:
|
||||
return False
|
||||
docs_markers = (
|
||||
"подготовь документац",
|
||||
"сгенерируй документац",
|
||||
"создай документац",
|
||||
"опиши документац",
|
||||
"generate documentation",
|
||||
"write documentation",
|
||||
"docs/",
|
||||
)
|
||||
return any(marker in text for marker in docs_markers)
|
||||
17
app/modules/agent/engine/router/intents_registry.yaml
Normal file
17
app/modules/agent/engine/router/intents_registry.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
intents:
|
||||
- domain_id: "default"
|
||||
process_id: "general"
|
||||
description: "General Q&A"
|
||||
priority: 1
|
||||
- domain_id: "project"
|
||||
process_id: "qa"
|
||||
description: "Project-specific Q&A with RAG and confluence context"
|
||||
priority: 2
|
||||
- domain_id: "project"
|
||||
process_id: "edits"
|
||||
description: "Project file edits from user request with conservative changeset generation"
|
||||
priority: 3
|
||||
- domain_id: "docs"
|
||||
process_id: "generation"
|
||||
description: "Documentation generation as changeset"
|
||||
priority: 2
|
||||
46
app/modules/agent/engine/router/registry.py
Normal file
46
app/modules/agent/engine/router/registry.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
class IntentRegistry:
|
||||
def __init__(self, registry_path: Path) -> None:
|
||||
self._registry_path = registry_path
|
||||
self._factories: dict[tuple[str, str], Callable[..., Any]] = {}
|
||||
|
||||
def register(self, domain_id: str, process_id: str, factory: Callable[..., Any]) -> None:
|
||||
self._factories[(domain_id, process_id)] = factory
|
||||
|
||||
def get_factory(self, domain_id: str, process_id: str) -> Callable[..., Any] | None:
|
||||
return self._factories.get((domain_id, process_id))
|
||||
|
||||
def is_valid(self, domain_id: str, process_id: str) -> bool:
|
||||
return self.get_factory(domain_id, process_id) is not None
|
||||
|
||||
def load_intents(self) -> list[dict[str, Any]]:
|
||||
if not self._registry_path.is_file():
|
||||
return []
|
||||
with self._registry_path.open("r", encoding="utf-8") as fh:
|
||||
payload = yaml.safe_load(fh) or {}
|
||||
intents = payload.get("intents")
|
||||
if not isinstance(intents, list):
|
||||
return []
|
||||
output: list[dict[str, Any]] = []
|
||||
for item in intents:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
domain_id = item.get("domain_id")
|
||||
process_id = item.get("process_id")
|
||||
if not isinstance(domain_id, str) or not isinstance(process_id, str):
|
||||
continue
|
||||
output.append(
|
||||
{
|
||||
"domain_id": domain_id,
|
||||
"process_id": process_id,
|
||||
"description": str(item.get("description") or ""),
|
||||
"priority": int(item.get("priority") or 0),
|
||||
}
|
||||
)
|
||||
return output
|
||||
62
app/modules/agent/engine/router/router_service.py
Normal file
62
app/modules/agent/engine/router/router_service.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from app.modules.agent.engine.router.context_store import RouterContextStore
|
||||
from app.modules.agent.engine.router.intent_classifier import IntentClassifier
|
||||
from app.modules.agent.engine.router.registry import IntentRegistry
|
||||
from app.modules.agent.engine.router.schemas import RouteResolution
|
||||
|
||||
|
||||
class RouterService:
|
||||
def __init__(
|
||||
self,
|
||||
registry: IntentRegistry,
|
||||
classifier: IntentClassifier,
|
||||
context_store: RouterContextStore,
|
||||
min_confidence: float = 0.7,
|
||||
) -> None:
|
||||
self._registry = registry
|
||||
self._classifier = classifier
|
||||
self._ctx = context_store
|
||||
self._min_confidence = min_confidence
|
||||
|
||||
def resolve(self, user_message: str, conversation_key: str, mode: str = "auto") -> RouteResolution:
|
||||
context = self._ctx.get(conversation_key)
|
||||
decision = self._classifier.classify(user_message, context, mode=mode)
|
||||
if decision.confidence < self._min_confidence:
|
||||
return self._fallback("low_confidence")
|
||||
if not self._registry.is_valid(decision.domain_id, decision.process_id):
|
||||
return self._fallback("invalid_route")
|
||||
return RouteResolution(
|
||||
domain_id=decision.domain_id,
|
||||
process_id=decision.process_id,
|
||||
confidence=decision.confidence,
|
||||
reason=decision.reason,
|
||||
fallback_used=False,
|
||||
)
|
||||
|
||||
def persist_context(
|
||||
self,
|
||||
conversation_key: str,
|
||||
*,
|
||||
domain_id: str,
|
||||
process_id: str,
|
||||
user_message: str,
|
||||
assistant_message: str,
|
||||
) -> None:
|
||||
self._ctx.update(
|
||||
conversation_key,
|
||||
domain_id=domain_id,
|
||||
process_id=process_id,
|
||||
user_message=user_message,
|
||||
assistant_message=assistant_message,
|
||||
)
|
||||
|
||||
def graph_factory(self, domain_id: str, process_id: str):
|
||||
return self._registry.get_factory(domain_id, process_id)
|
||||
|
||||
def _fallback(self, reason: str) -> RouteResolution:
|
||||
return RouteResolution(
|
||||
domain_id="default",
|
||||
process_id="general",
|
||||
confidence=0.0,
|
||||
reason=reason,
|
||||
fallback_used=True,
|
||||
)
|
||||
27
app/modules/agent/engine/router/schemas.py
Normal file
27
app/modules/agent/engine/router/schemas.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
||||
class RouteDecision(BaseModel):
|
||||
domain_id: str = "default"
|
||||
process_id: str = "general"
|
||||
confidence: float = 0.0
|
||||
reason: str = ""
|
||||
use_previous: bool = False
|
||||
|
||||
@field_validator("confidence")
|
||||
@classmethod
|
||||
def clamp_confidence(cls, value: float) -> float:
|
||||
return max(0.0, min(1.0, float(value)))
|
||||
|
||||
|
||||
class RouteResolution(BaseModel):
|
||||
domain_id: str
|
||||
process_id: str
|
||||
confidence: float
|
||||
reason: str
|
||||
fallback_used: bool = False
|
||||
|
||||
|
||||
class RouterContext(BaseModel):
|
||||
last_routing: dict[str, str] | None = None
|
||||
message_history: list[dict[str, str]] = Field(default_factory=list)
|
||||
3
app/modules/agent/llm/__init__.py
Normal file
3
app/modules/agent/llm/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from app.modules.agent.llm.service import AgentLlmService
|
||||
|
||||
__all__ = ["AgentLlmService"]
|
||||
BIN
app/modules/agent/llm/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
app/modules/agent/llm/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/agent/llm/__pycache__/service.cpython-312.pyc
Normal file
BIN
app/modules/agent/llm/__pycache__/service.cpython-312.pyc
Normal file
Binary file not shown.
14
app/modules/agent/llm/service.py
Normal file
14
app/modules/agent/llm/service.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from app.modules.agent.prompt_loader import PromptLoader
|
||||
from app.modules.shared.gigachat.client import GigaChatClient
|
||||
|
||||
|
||||
class AgentLlmService:
|
||||
def __init__(self, client: GigaChatClient, prompts: PromptLoader) -> None:
|
||||
self._client = client
|
||||
self._prompts = prompts
|
||||
|
||||
def generate(self, prompt_name: str, user_input: str) -> str:
|
||||
system_prompt = self._prompts.load(prompt_name)
|
||||
if not system_prompt:
|
||||
system_prompt = "You are a helpful assistant."
|
||||
return self._client.complete(system_prompt=system_prompt, user_prompt=user_input)
|
||||
44
app/modules/agent/module.py
Normal file
44
app/modules/agent/module.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel, HttpUrl
|
||||
|
||||
from app.modules.agent.changeset_validator import ChangeSetValidator
|
||||
from app.modules.agent.confluence_service import ConfluenceService
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
from app.modules.agent.prompt_loader import PromptLoader
|
||||
from app.modules.agent.service import GraphAgentRuntime
|
||||
from app.modules.agent.repository import AgentRepository
|
||||
from app.modules.contracts import RagRetriever
|
||||
from app.modules.shared.gigachat.client import GigaChatClient
|
||||
from app.modules.shared.gigachat.settings import GigaChatSettings
|
||||
from app.modules.shared.gigachat.token_provider import GigaChatTokenProvider
|
||||
|
||||
|
||||
class ConfluenceFetchRequest(BaseModel):
|
||||
url: HttpUrl
|
||||
|
||||
|
||||
class AgentModule:
|
||||
def __init__(self, rag_retriever: RagRetriever, agent_repository: AgentRepository) -> None:
|
||||
self.confluence = ConfluenceService()
|
||||
self.changeset_validator = ChangeSetValidator()
|
||||
settings = GigaChatSettings.from_env()
|
||||
token_provider = GigaChatTokenProvider(settings)
|
||||
client = GigaChatClient(settings, token_provider)
|
||||
prompt_loader = PromptLoader()
|
||||
llm = AgentLlmService(client=client, prompts=prompt_loader)
|
||||
self.runtime = GraphAgentRuntime(
|
||||
rag=rag_retriever,
|
||||
confluence=self.confluence,
|
||||
changeset_validator=self.changeset_validator,
|
||||
llm=llm,
|
||||
agent_repository=agent_repository,
|
||||
)
|
||||
|
||||
def internal_router(self) -> APIRouter:
|
||||
router = APIRouter(prefix="/internal/tools/confluence", tags=["internal-confluence"])
|
||||
|
||||
@router.post("/fetch")
|
||||
async def fetch_page(request: ConfluenceFetchRequest) -> dict:
|
||||
return await self.confluence.fetch_page(str(request.url))
|
||||
|
||||
return router
|
||||
15
app/modules/agent/prompt_loader.py
Normal file
15
app/modules/agent/prompt_loader.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
|
||||
class PromptLoader:
|
||||
def __init__(self, prompts_dir: Path | None = None) -> None:
|
||||
base = prompts_dir or Path(__file__).resolve().parent / "prompts"
|
||||
env_override = os.getenv("AGENT_PROMPTS_DIR", "").strip()
|
||||
self._dir = Path(env_override) if env_override else base
|
||||
|
||||
def load(self, name: str) -> str:
|
||||
path = self._dir / f"{name}.txt"
|
||||
if not path.is_file():
|
||||
return ""
|
||||
return path.read_text(encoding="utf-8").strip()
|
||||
18
app/modules/agent/prompts/docs_detect.txt
Normal file
18
app/modules/agent/prompts/docs_detect.txt
Normal file
@@ -0,0 +1,18 @@
|
||||
Ты анализируешь, есть ли в проекте существующая документация, в которую нужно встраиваться.
|
||||
|
||||
Оцени входные данные:
|
||||
- User request
|
||||
- Requested target path
|
||||
- Detected documentation candidates (пути и сниппеты)
|
||||
|
||||
Критерии EXISTS=yes:
|
||||
- Есть хотя бы один релевантный doc-файл, и
|
||||
- Он по смыслу подходит под запрос пользователя.
|
||||
|
||||
Критерии EXISTS=no:
|
||||
- Нет релевантных doc-файлов, или
|
||||
- Есть только нерелевантные/пустые заготовки.
|
||||
|
||||
Верни строго две строки:
|
||||
EXISTS: yes|no
|
||||
SUMMARY: <короткое объяснение на 1-2 предложения>
|
||||
@@ -0,0 +1,27 @@
|
||||
# Feature X Documentation
|
||||
|
||||
## Goal
|
||||
Describe how Feature X works and how to integrate it safely.
|
||||
|
||||
## Architecture Overview
|
||||
- Input enters through HTTP endpoint.
|
||||
- Request is validated and transformed.
|
||||
- Worker executes business logic and persists result.
|
||||
|
||||
## Data Flow
|
||||
1. Client sends request payload.
|
||||
2. Service validates payload.
|
||||
3. Domain layer computes output.
|
||||
4. Repository stores entities.
|
||||
|
||||
## Configuration
|
||||
- Required environment variables.
|
||||
- Optional tuning parameters.
|
||||
|
||||
## Deployment Notes
|
||||
- Migration prerequisites.
|
||||
- Rollback strategy.
|
||||
|
||||
## Risks and Constraints
|
||||
- Throughput is bounded by downstream API limits.
|
||||
- Partial failures require retry-safe handlers.
|
||||
@@ -0,0 +1,21 @@
|
||||
# API Client Module
|
||||
|
||||
## Purpose
|
||||
This document explains how the API client authenticates and retries requests.
|
||||
|
||||
## Current Behavior
|
||||
- Access token is fetched before outbound request.
|
||||
- Retry policy uses exponential backoff for transient failures.
|
||||
|
||||
## Recent Increment (v2)
|
||||
### Added cache for tokens
|
||||
- Token is cached in memory for a short TTL.
|
||||
- Cache invalidates on 401 responses.
|
||||
|
||||
### Operational impact
|
||||
- Reduced auth latency for repetitive calls.
|
||||
- Fewer token endpoint requests.
|
||||
|
||||
## Limitations
|
||||
- Single-process cache only.
|
||||
- No distributed cache synchronization.
|
||||
12
app/modules/agent/prompts/docs_execution_summary.txt
Normal file
12
app/modules/agent/prompts/docs_execution_summary.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
Ты технический писатель и готовишь краткий итог по выполненной задаче документации.
|
||||
|
||||
Верни только markdown-текст без JSON и без лишних вступлений.
|
||||
Структура ответа:
|
||||
1) "Что сделано" — 3-6 коротких пунктов по основным частям пользовательского запроса.
|
||||
2) "Измененные файлы" — список файлов с кратким описанием изменения по каждому файлу.
|
||||
3) "Ограничения" — добавляй только если в данных есть явные пробелы или ограничения.
|
||||
|
||||
Правила:
|
||||
- Используй только входные данные.
|
||||
- Не выдумывай изменения, которых нет в списке changed files.
|
||||
- Пиши коротко и по делу.
|
||||
53
app/modules/agent/prompts/docs_generation.txt
Normal file
53
app/modules/agent/prompts/docs_generation.txt
Normal file
@@ -0,0 +1,53 @@
|
||||
Ты senior technical writer и пишешь только проектную документацию в markdown.
|
||||
|
||||
Твоя задача:
|
||||
1) Если strategy=incremental_update, встроиться в существующую документацию и добавить только недостающий инкремент.
|
||||
2) Если strategy=from_scratch, создать целостный документ с нуля.
|
||||
|
||||
Правила:
|
||||
- Опирайся только на входной контекст (request, plan, rag context, current file content, examples bundle).
|
||||
- Не выдумывай факты о коде, которых нет во входных данных.
|
||||
- Сохраняй стиль существующего документа при incremental_update.
|
||||
- Если контекст неполный, отмечай ограничения явно и коротко в отдельном разделе "Ограничения".
|
||||
- Структура должна быть логичной и пригодной для реального репозитория.
|
||||
- Агент должен спроектировать структуру папок и файлов документации под правила ниже.
|
||||
- Документация должна быть разделена минимум на 2 направления:
|
||||
- отдельная папка для описания методов API;
|
||||
- отдельная папка для описания логики/требований.
|
||||
- В одном markdown-файле допускается описание только:
|
||||
- одного метода API, или
|
||||
- одного атомарного куска логики/требования.
|
||||
- Для описания одного метода API используй структуру:
|
||||
- название метода;
|
||||
- параметры запроса;
|
||||
- параметры ответа;
|
||||
- use case (сценарий последовательности вызова метода);
|
||||
- функциональные требования (если нужны технические детали).
|
||||
- Для описания логики используй аналогичный подход:
|
||||
- сценарий;
|
||||
- ссылки из шагов сценария на функциональные требования;
|
||||
- отдельные функциональные требования с техническими деталями.
|
||||
- Правила для сценариев:
|
||||
- без объемных шагов;
|
||||
- каждый шаг краткий, не более 2 предложений;
|
||||
- если нужны технические детали, вынеси их из шага в отдельное функциональное требование и дай ссылку на него из шага.
|
||||
|
||||
Формат ответа:
|
||||
- Верни только JSON-объект без пояснений и без markdown-оберток.
|
||||
- Строгий формат:
|
||||
{
|
||||
"files": [
|
||||
{
|
||||
"path": "docs/api/<file>.md",
|
||||
"content": "<полное содержимое markdown-файла>",
|
||||
"reason": "<кратко зачем создан/обновлен файл>"
|
||||
},
|
||||
{
|
||||
"path": "docs/logic/<file>.md",
|
||||
"content": "<полное содержимое markdown-файла>",
|
||||
"reason": "<кратко зачем создан/обновлен файл>"
|
||||
}
|
||||
]
|
||||
}
|
||||
- Для from_scratch сформируй несколько файлов и обязательно покрой обе папки: `docs/api` и `docs/logic`.
|
||||
- Для incremental_update также соблюдай правило атомарности: один файл = один метод API или один атомарный кусок логики/требования.
|
||||
25
app/modules/agent/prompts/docs_plan_sections.txt
Normal file
25
app/modules/agent/prompts/docs_plan_sections.txt
Normal file
@@ -0,0 +1,25 @@
|
||||
Ты составляешь план изменений документации перед генерацией текста.
|
||||
|
||||
Вход:
|
||||
- Strategy
|
||||
- User request
|
||||
- Target path
|
||||
- Current target content (для incremental_update)
|
||||
- RAG context по коду
|
||||
- Examples bundle
|
||||
|
||||
Требования к плану:
|
||||
- Сначала спроектируй структуру папок и файлов документации под формат:
|
||||
- отдельная папка для API-методов;
|
||||
- отдельная папка для логики/требований;
|
||||
- один файл = один метод API или один атомарный кусок логики/требования.
|
||||
- Для API-файлов закладывай структуру: название метода, параметры запроса, параметры ответа, use case, функциональные требования.
|
||||
- Для логики закладывай структуру: сценарий, ссылки из шагов на функциональные требования, отдельные функциональные требования.
|
||||
- Для сценариев закладывай короткие шаги (не более 2 предложений на шаг), а технические детали выноси в функциональные требования.
|
||||
- Дай нумерованный список разделов будущего документа.
|
||||
- Для incremental_update отмечай, какие разделы добавить/обновить, не переписывая все целиком.
|
||||
- Для from_scratch давай полный каркас документа.
|
||||
- Каждый пункт должен включать краткую цель раздела.
|
||||
- Если контекст частичный, включи пункт "Ограничения и допущения".
|
||||
|
||||
Формат ответа: только план в markdown, без вступлений и без JSON.
|
||||
22
app/modules/agent/prompts/docs_self_check.txt
Normal file
22
app/modules/agent/prompts/docs_self_check.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
Ты валидатор качества документации.
|
||||
|
||||
Проверь:
|
||||
- Соответствие strategy и user request.
|
||||
- Соответствие generated document плану секций.
|
||||
- Отсутствие очевидных выдуманных фактов.
|
||||
- Практическую применимость текста к проекту.
|
||||
- Для incremental_update: минимально необходимый инкремент без лишнего переписывания.
|
||||
- Проверку структуры документации:
|
||||
- есть разбиение по папкам `docs/api` и `docs/logic`;
|
||||
- один файл описывает только один API-метод или один атомарный кусок логики;
|
||||
- сценарии состоят из коротких шагов, а технические детали вынесены в функциональные требования.
|
||||
|
||||
Если документ приемлем:
|
||||
PASS: yes
|
||||
FEEDBACK: <коротко, что ок>
|
||||
|
||||
Если документ неприемлем:
|
||||
PASS: no
|
||||
FEEDBACK: <коротко, что исправить в следующей попытке>
|
||||
|
||||
Верни ровно две строки в этом формате.
|
||||
14
app/modules/agent/prompts/docs_strategy.txt
Normal file
14
app/modules/agent/prompts/docs_strategy.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
Ты выбираешь стратегию генерации документации.
|
||||
|
||||
Доступные стратегии:
|
||||
- incremental_update: дописать недостающий инкремент в существующий документ.
|
||||
- from_scratch: создать новый документ с нуля.
|
||||
|
||||
Правила выбора:
|
||||
- Если Existing docs detected=true и это не противоречит user request, выбирай incremental_update.
|
||||
- Если Existing docs detected=false, выбирай from_scratch.
|
||||
- Если пользователь явно просит "с нуля", приоритет у from_scratch.
|
||||
- Если пользователь явно просит "дописать/обновить", приоритет у incremental_update.
|
||||
|
||||
Верни строго одну строку:
|
||||
STRATEGY: incremental_update|from_scratch
|
||||
3
app/modules/agent/prompts/general_answer.txt
Normal file
3
app/modules/agent/prompts/general_answer.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
Ты инженерный AI-ассистент. Ответь по проекту коротко и по делу.
|
||||
Если в контексте недостаточно данных, явно укажи пробелы.
|
||||
Не выдумывай факты, используй только входные данные.
|
||||
9
app/modules/agent/prompts/project_answer.txt
Normal file
9
app/modules/agent/prompts/project_answer.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
Ты инженерный AI-ассистент по текущему проекту.
|
||||
|
||||
Сформируй точный ответ на вопрос пользователя, используя только входной контекст.
|
||||
Приоритет источников: сначала RAG context, затем Confluence context.
|
||||
|
||||
Правила:
|
||||
- Не выдумывай факты и явно помечай пробелы в данных.
|
||||
- Отвечай структурировано и коротко.
|
||||
- Если пользователь просит шаги, дай практичный пошаговый план.
|
||||
10
app/modules/agent/prompts/project_edits_apply.txt
Normal file
10
app/modules/agent/prompts/project_edits_apply.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
Ты вносишь правку в один файл по запросу пользователя.
|
||||
На вход приходит JSON с request, path, reason, current_content, previous_validation_feedback, rag_context, confluence_context.
|
||||
|
||||
Верни только полное итоговое содержимое файла (без JSON).
|
||||
|
||||
Критичные правила:
|
||||
- Измени только те части, которые нужны по запросу.
|
||||
- Не переписывай файл целиком без необходимости.
|
||||
- Сохрани структуру, стиль и все нерелевантные разделы без изменений.
|
||||
- Если данных недостаточно, внеси минимально безопасную правку и явно отрази ограничение в тексте файла.
|
||||
15
app/modules/agent/prompts/project_edits_plan.txt
Normal file
15
app/modules/agent/prompts/project_edits_plan.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
Ты анализируешь запрос на правки файлов проекта (не про написание нового кода).
|
||||
На вход приходит JSON с request, requested_path, context_files.
|
||||
|
||||
Верни только JSON:
|
||||
{
|
||||
"files": [
|
||||
{"path": "<path>", "reason": "<why this file should be edited>"}
|
||||
]
|
||||
}
|
||||
|
||||
Правила:
|
||||
- Выбирай только файлы, реально нужные для выполнения запроса.
|
||||
- Не добавляй лишние файлы.
|
||||
- Обычно 1-3 файла, максимум 8.
|
||||
- Если в request указан конкретный файл, включи его в первую очередь.
|
||||
12
app/modules/agent/prompts/project_edits_self_check.txt
Normal file
12
app/modules/agent/prompts/project_edits_self_check.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
Ты валидируешь changeset правок файла.
|
||||
На вход приходит JSON с request и changeset (op, path, reason).
|
||||
|
||||
Проверь:
|
||||
1) изменения соответствуют запросу,
|
||||
2) нет лишних нерелевантных правок,
|
||||
3) изменены только действительно нужные файлы,
|
||||
4) нет косметических правок (пробелы/форматирование без смысла),
|
||||
5) нет добавления новых секций/заголовков, если это не запрошено явно.
|
||||
|
||||
Верни только JSON:
|
||||
{"pass": true|false, "feedback": "<short reason>"}
|
||||
23
app/modules/agent/prompts/router_intent.txt
Normal file
23
app/modules/agent/prompts/router_intent.txt
Normal file
@@ -0,0 +1,23 @@
|
||||
Ты классификатор маршрутов агента.
|
||||
На вход ты получаешь JSON с полями:
|
||||
- message: текущий запрос пользователя
|
||||
- history: последние сообщения диалога
|
||||
- allowed_routes: допустимые маршруты
|
||||
|
||||
Выбери ровно один маршрут из allowed_routes.
|
||||
Верни только JSON без markdown и пояснений.
|
||||
|
||||
Строгий формат ответа:
|
||||
{"route":"<one_of_allowed_routes>","confidence":<number_0_to_1>,"reason":"<short_reason>"}
|
||||
|
||||
Правила маршрутизации:
|
||||
- project/qa: пользователь задает вопросы про текущий проект, его код, архитектуру, модули, поведение, ограничения.
|
||||
- project/edits: пользователь просит внести правки в существующие файлы проекта (контент, конфиги, тексты, шаблоны), без реализации новой кодовой логики.
|
||||
- docs/generation: пользователь просит подготовить/обновить документацию, инструкции, markdown-материалы.
|
||||
- default/general: остальные случаи, включая общие вопросы и консультации.
|
||||
|
||||
Приоритет:
|
||||
- Если в запросе есть явная команда правки конкретного файла (например `README.md`, путь к файлу, "добавь в конец файла"), выбирай project/edits.
|
||||
- docs/generation выбирай для задач подготовки документации в целом, а не для точечной правки одного файла.
|
||||
|
||||
Если есть сомнения, выбирай default/general и confidence <= 0.6.
|
||||
106
app/modules/agent/repository.py
Normal file
106
app/modules/agent/repository.py
Normal file
@@ -0,0 +1,106 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from sqlalchemy import text
|
||||
|
||||
from app.modules.agent.engine.router.schemas import RouterContext
|
||||
from app.modules.shared.db import get_engine
|
||||
|
||||
|
||||
class AgentRepository:
|
||||
def ensure_tables(self) -> None:
|
||||
with get_engine().connect() as conn:
|
||||
conn.execute(
|
||||
text(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS router_context (
|
||||
conversation_key VARCHAR(64) PRIMARY KEY,
|
||||
last_domain_id VARCHAR(64) NULL,
|
||||
last_process_id VARCHAR(64) NULL,
|
||||
message_history_json TEXT NOT NULL DEFAULT '[]',
|
||||
updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
"""
|
||||
)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def get_router_context(self, conversation_key: str) -> RouterContext:
|
||||
with get_engine().connect() as conn:
|
||||
row = conn.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT last_domain_id, last_process_id, message_history_json
|
||||
FROM router_context
|
||||
WHERE conversation_key = :key
|
||||
"""
|
||||
),
|
||||
{"key": conversation_key},
|
||||
).fetchone()
|
||||
|
||||
if not row:
|
||||
return RouterContext()
|
||||
|
||||
history_raw = row[2] or "[]"
|
||||
try:
|
||||
history = json.loads(history_raw)
|
||||
except json.JSONDecodeError:
|
||||
history = []
|
||||
|
||||
last = None
|
||||
if row[0] and row[1]:
|
||||
last = {"domain_id": str(row[0]), "process_id": str(row[1])}
|
||||
|
||||
clean_history = []
|
||||
for item in history if isinstance(history, list) else []:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
role = str(item.get("role") or "")
|
||||
content = str(item.get("content") or "")
|
||||
if role in {"user", "assistant"} and content:
|
||||
clean_history.append({"role": role, "content": content})
|
||||
|
||||
return RouterContext(last_routing=last, message_history=clean_history)
|
||||
|
||||
def update_router_context(
|
||||
self,
|
||||
conversation_key: str,
|
||||
*,
|
||||
domain_id: str,
|
||||
process_id: str,
|
||||
user_message: str,
|
||||
assistant_message: str,
|
||||
max_history: int,
|
||||
) -> None:
|
||||
current = self.get_router_context(conversation_key)
|
||||
history = list(current.message_history)
|
||||
if user_message:
|
||||
history.append({"role": "user", "content": user_message})
|
||||
if assistant_message:
|
||||
history.append({"role": "assistant", "content": assistant_message})
|
||||
if max_history > 0:
|
||||
history = history[-max_history:]
|
||||
|
||||
with get_engine().connect() as conn:
|
||||
conn.execute(
|
||||
text(
|
||||
"""
|
||||
INSERT INTO router_context (
|
||||
conversation_key, last_domain_id, last_process_id, message_history_json
|
||||
) VALUES (:key, :domain, :process, :history)
|
||||
ON CONFLICT (conversation_key) DO UPDATE SET
|
||||
last_domain_id = EXCLUDED.last_domain_id,
|
||||
last_process_id = EXCLUDED.last_process_id,
|
||||
message_history_json = EXCLUDED.message_history_json,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
"""
|
||||
),
|
||||
{
|
||||
"key": conversation_key,
|
||||
"domain": domain_id,
|
||||
"process": process_id,
|
||||
"history": json.dumps(history, ensure_ascii=False),
|
||||
},
|
||||
)
|
||||
conn.commit()
|
||||
296
app/modules/agent/service.py
Normal file
296
app/modules/agent/service.py
Normal file
@@ -0,0 +1,296 @@
|
||||
from dataclasses import dataclass, field
|
||||
from collections.abc import Awaitable, Callable
|
||||
import inspect
|
||||
import asyncio
|
||||
import logging
|
||||
import re
|
||||
|
||||
from app.modules.agent.engine.router import build_router_service
|
||||
from app.modules.agent.engine.graphs.progress_registry import progress_registry
|
||||
from app.modules.agent.llm import AgentLlmService
|
||||
from app.modules.agent.changeset_validator import ChangeSetValidator
|
||||
from app.modules.agent.confluence_service import ConfluenceService
|
||||
from app.modules.agent.repository import AgentRepository
|
||||
from app.modules.contracts import RagRetriever
|
||||
from app.modules.shared.checkpointer import get_checkpointer
|
||||
from app.schemas.changeset import ChangeItem
|
||||
from app.schemas.chat import TaskResultType
|
||||
from app.core.exceptions import AppError
|
||||
from app.schemas.common import ModuleName
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentResult:
|
||||
result_type: TaskResultType
|
||||
answer: str | None = None
|
||||
changeset: list[ChangeItem] = field(default_factory=list)
|
||||
meta: dict = field(default_factory=dict)
|
||||
|
||||
|
||||
class GraphAgentRuntime:
|
||||
def __init__(
|
||||
self,
|
||||
rag: RagRetriever,
|
||||
confluence: ConfluenceService,
|
||||
changeset_validator: ChangeSetValidator,
|
||||
llm: AgentLlmService,
|
||||
agent_repository: AgentRepository,
|
||||
) -> None:
|
||||
self._rag = rag
|
||||
self._confluence = confluence
|
||||
self._changeset_validator = changeset_validator
|
||||
self._router = build_router_service(llm, agent_repository)
|
||||
self._checkpointer = None
|
||||
|
||||
async def run(
|
||||
self,
|
||||
*,
|
||||
task_id: str,
|
||||
dialog_session_id: str,
|
||||
rag_session_id: str,
|
||||
mode: str,
|
||||
message: str,
|
||||
attachments: list[dict],
|
||||
files: list[dict],
|
||||
progress_cb: Callable[[str, str, str, dict | None], Awaitable[None] | None] | None = None,
|
||||
) -> AgentResult:
|
||||
LOGGER.warning(
|
||||
"GraphAgentRuntime.run started: task_id=%s dialog_session_id=%s mode=%s",
|
||||
task_id,
|
||||
dialog_session_id,
|
||||
mode,
|
||||
)
|
||||
await self._emit_progress(progress_cb, "agent.route", "Определяю тип запроса и подбираю граф.", meta={"mode": mode})
|
||||
route = self._router.resolve(message, dialog_session_id, mode=mode)
|
||||
await self._emit_progress(
|
||||
progress_cb,
|
||||
"agent.route.resolved",
|
||||
"Маршрут выбран, готовлю контекст для выполнения.",
|
||||
meta={"domain_id": route.domain_id, "process_id": route.process_id},
|
||||
)
|
||||
graph = self._resolve_graph(route.domain_id, route.process_id)
|
||||
files_map = self._build_files_map(files)
|
||||
|
||||
await self._emit_progress(progress_cb, "agent.rag", "Собираю релевантный контекст из RAG.")
|
||||
rag_ctx = await self._rag.retrieve(rag_session_id, message)
|
||||
await self._emit_progress(progress_cb, "agent.attachments", "Обрабатываю дополнительные вложения.")
|
||||
conf_pages = await self._fetch_confluence_pages(attachments)
|
||||
state = {
|
||||
"task_id": task_id,
|
||||
"project_id": rag_session_id,
|
||||
"message": message,
|
||||
"progress_key": task_id,
|
||||
"rag_context": self._format_rag(rag_ctx),
|
||||
"confluence_context": self._format_confluence(conf_pages),
|
||||
"files_map": files_map,
|
||||
}
|
||||
|
||||
await self._emit_progress(progress_cb, "agent.graph", "Запускаю выполнение графа.")
|
||||
if progress_cb is not None:
|
||||
progress_registry.register(task_id, progress_cb)
|
||||
try:
|
||||
result = await asyncio.to_thread(
|
||||
self._invoke_graph,
|
||||
graph,
|
||||
state,
|
||||
dialog_session_id,
|
||||
)
|
||||
finally:
|
||||
if progress_cb is not None:
|
||||
progress_registry.unregister(task_id)
|
||||
await self._emit_progress(progress_cb, "agent.graph.done", "Граф завершил обработку результата.")
|
||||
answer = result.get("answer")
|
||||
changeset = result.get("changeset") or []
|
||||
if changeset:
|
||||
await self._emit_progress(progress_cb, "agent.changeset", "Проверяю и валидирую предложенные изменения.")
|
||||
changeset = self._enrich_changeset_hashes(changeset, files_map)
|
||||
changeset = self._sanitize_changeset(changeset, files_map)
|
||||
if not changeset:
|
||||
final_answer = (answer or "").strip() or "Предложенные правки были отброшены как нерелевантные или косметические."
|
||||
await self._emit_progress(progress_cb, "agent.answer", "После фильтрации правок формирую ответ без changeset.")
|
||||
self._router.persist_context(
|
||||
dialog_session_id,
|
||||
domain_id=route.domain_id,
|
||||
process_id=route.process_id,
|
||||
user_message=message,
|
||||
assistant_message=final_answer,
|
||||
)
|
||||
return AgentResult(
|
||||
result_type=TaskResultType.ANSWER,
|
||||
answer=final_answer,
|
||||
meta={
|
||||
"route": route.model_dump(),
|
||||
"used_rag": True,
|
||||
"used_confluence": bool(conf_pages),
|
||||
"changeset_filtered_out": True,
|
||||
},
|
||||
)
|
||||
validated = self._changeset_validator.validate(task_id, changeset)
|
||||
final_answer = (answer or "").strip() or None
|
||||
self._router.persist_context(
|
||||
dialog_session_id,
|
||||
domain_id=route.domain_id,
|
||||
process_id=route.process_id,
|
||||
user_message=message,
|
||||
assistant_message=final_answer or f"changeset:{len(validated)}",
|
||||
)
|
||||
final = AgentResult(
|
||||
result_type=TaskResultType.CHANGESET,
|
||||
answer=final_answer,
|
||||
changeset=validated,
|
||||
meta={"route": route.model_dump(), "used_rag": True, "used_confluence": bool(conf_pages)},
|
||||
)
|
||||
LOGGER.warning(
|
||||
"GraphAgentRuntime.run completed: task_id=%s route=%s/%s result_type=%s changeset_items=%s",
|
||||
task_id,
|
||||
route.domain_id,
|
||||
route.process_id,
|
||||
final.result_type.value,
|
||||
len(final.changeset),
|
||||
)
|
||||
return final
|
||||
|
||||
final_answer = answer or ""
|
||||
await self._emit_progress(progress_cb, "agent.answer", "Формирую финальный ответ.")
|
||||
self._router.persist_context(
|
||||
dialog_session_id,
|
||||
domain_id=route.domain_id,
|
||||
process_id=route.process_id,
|
||||
user_message=message,
|
||||
assistant_message=final_answer,
|
||||
)
|
||||
final = AgentResult(
|
||||
result_type=TaskResultType.ANSWER,
|
||||
answer=final_answer,
|
||||
meta={"route": route.model_dump(), "used_rag": True, "used_confluence": bool(conf_pages)},
|
||||
)
|
||||
LOGGER.warning(
|
||||
"GraphAgentRuntime.run completed: task_id=%s route=%s/%s result_type=%s answer_len=%s",
|
||||
task_id,
|
||||
route.domain_id,
|
||||
route.process_id,
|
||||
final.result_type.value,
|
||||
len(final.answer or ""),
|
||||
)
|
||||
return final
|
||||
|
||||
async def _emit_progress(
|
||||
self,
|
||||
progress_cb: Callable[[str, str, str, dict | None], Awaitable[None] | None] | None,
|
||||
stage: str,
|
||||
message: str,
|
||||
*,
|
||||
kind: str = "task_progress",
|
||||
meta: dict | None = None,
|
||||
) -> None:
|
||||
if progress_cb is None:
|
||||
return
|
||||
result = progress_cb(stage, message, kind, meta or {})
|
||||
if inspect.isawaitable(result):
|
||||
await result
|
||||
|
||||
def _resolve_graph(self, domain_id: str, process_id: str):
|
||||
if self._checkpointer is None:
|
||||
self._checkpointer = get_checkpointer()
|
||||
factory = self._router.graph_factory(domain_id, process_id)
|
||||
if factory is None:
|
||||
factory = self._router.graph_factory("default", "general")
|
||||
if factory is None:
|
||||
raise RuntimeError("No graph factory configured")
|
||||
LOGGER.warning("_resolve_graph resolved: domain_id=%s process_id=%s", domain_id, process_id)
|
||||
return factory(self._checkpointer)
|
||||
|
||||
def _invoke_graph(self, graph, state: dict, dialog_session_id: str):
|
||||
return graph.invoke(
|
||||
state,
|
||||
config={"configurable": {"thread_id": dialog_session_id}},
|
||||
)
|
||||
|
||||
async def _fetch_confluence_pages(self, attachments: list[dict]) -> list[dict]:
|
||||
pages: list[dict] = []
|
||||
for item in attachments:
|
||||
if item.get("type") == "confluence_url":
|
||||
pages.append(await self._confluence.fetch_page(item["url"]))
|
||||
LOGGER.warning("_fetch_confluence_pages completed: pages=%s", len(pages))
|
||||
return pages
|
||||
|
||||
def _format_rag(self, items: list[dict]) -> str:
|
||||
return "\n".join(str(x.get("content", "")) for x in items)
|
||||
|
||||
def _format_confluence(self, pages: list[dict]) -> str:
|
||||
return "\n".join(str(x.get("content_markdown", "")) for x in pages)
|
||||
|
||||
def _build_files_map(self, files: list[dict]) -> dict[str, dict]:
|
||||
output: dict[str, dict] = {}
|
||||
for item in files:
|
||||
path = str(item.get("path", "")).replace("\\", "/").strip()
|
||||
if not path:
|
||||
continue
|
||||
output[path] = {
|
||||
"path": path,
|
||||
"content": str(item.get("content", "")),
|
||||
"content_hash": str(item.get("content_hash", "")),
|
||||
}
|
||||
LOGGER.warning("_build_files_map completed: files=%s", len(output))
|
||||
return output
|
||||
|
||||
def _lookup_file(self, files_map: dict[str, dict], path: str) -> dict | None:
|
||||
normalized = (path or "").replace("\\", "/")
|
||||
if normalized in files_map:
|
||||
return files_map[normalized]
|
||||
low = normalized.lower()
|
||||
for key, value in files_map.items():
|
||||
if key.lower() == low:
|
||||
return value
|
||||
return None
|
||||
|
||||
def _enrich_changeset_hashes(self, items: list[ChangeItem], files_map: dict[str, dict]) -> list[ChangeItem]:
|
||||
enriched: list[ChangeItem] = []
|
||||
for item in items:
|
||||
if item.op.value == "update":
|
||||
source = self._lookup_file(files_map, item.path)
|
||||
if not source or not source.get("content_hash"):
|
||||
raise AppError(
|
||||
"missing_base_hash",
|
||||
f"Cannot build update for {item.path}: no file hash in request context",
|
||||
ModuleName.AGENT,
|
||||
)
|
||||
item.base_hash = str(source["content_hash"])
|
||||
enriched.append(item)
|
||||
LOGGER.warning("_enrich_changeset_hashes completed: items=%s", len(enriched))
|
||||
return enriched
|
||||
|
||||
def _sanitize_changeset(self, items: list[ChangeItem], files_map: dict[str, dict]) -> list[ChangeItem]:
|
||||
sanitized: list[ChangeItem] = []
|
||||
dropped_noop = 0
|
||||
dropped_ws = 0
|
||||
for item in items:
|
||||
if item.op.value != "update":
|
||||
sanitized.append(item)
|
||||
continue
|
||||
source = self._lookup_file(files_map, item.path)
|
||||
if not source:
|
||||
sanitized.append(item)
|
||||
continue
|
||||
original = str(source.get("content", ""))
|
||||
proposed = item.proposed_content or ""
|
||||
if proposed == original:
|
||||
dropped_noop += 1
|
||||
continue
|
||||
if self._collapse_whitespace(proposed) == self._collapse_whitespace(original):
|
||||
dropped_ws += 1
|
||||
continue
|
||||
sanitized.append(item)
|
||||
if dropped_noop or dropped_ws:
|
||||
LOGGER.warning(
|
||||
"_sanitize_changeset dropped items: noop=%s whitespace_only=%s kept=%s",
|
||||
dropped_noop,
|
||||
dropped_ws,
|
||||
len(sanitized),
|
||||
)
|
||||
return sanitized
|
||||
|
||||
def _collapse_whitespace(self, text: str) -> str:
|
||||
return re.sub(r"\s+", " ", (text or "").strip())
|
||||
31
app/modules/application.py
Normal file
31
app/modules/application.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from app.modules.agent.module import AgentModule
|
||||
from app.modules.agent.repository import AgentRepository
|
||||
from app.modules.chat.repository import ChatRepository
|
||||
from app.modules.chat.module import ChatModule
|
||||
from app.modules.rag.repository import RagRepository
|
||||
from app.modules.rag.module import RagModule
|
||||
from app.modules.shared.bootstrap import bootstrap_database
|
||||
from app.modules.shared.event_bus import EventBus
|
||||
from app.modules.shared.retry_executor import RetryExecutor
|
||||
|
||||
|
||||
class ModularApplication:
|
||||
def __init__(self) -> None:
|
||||
self.events = EventBus()
|
||||
self.retry = RetryExecutor()
|
||||
self.rag_repository = RagRepository()
|
||||
self.chat_repository = ChatRepository()
|
||||
self.agent_repository = AgentRepository()
|
||||
|
||||
self.rag = RagModule(event_bus=self.events, retry=self.retry, repository=self.rag_repository)
|
||||
self.agent = AgentModule(rag_retriever=self.rag.rag, agent_repository=self.agent_repository)
|
||||
self.chat = ChatModule(
|
||||
agent_runner=self.agent.runtime,
|
||||
event_bus=self.events,
|
||||
retry=self.retry,
|
||||
rag_sessions=self.rag.sessions,
|
||||
repository=self.chat_repository,
|
||||
)
|
||||
|
||||
def startup(self) -> None:
|
||||
bootstrap_database(self.rag_repository, self.chat_repository, self.agent_repository)
|
||||
0
app/modules/chat/__init__.py
Normal file
0
app/modules/chat/__init__.py
Normal file
BIN
app/modules/chat/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
app/modules/chat/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/chat/__pycache__/dialog_store.cpython-312.pyc
Normal file
BIN
app/modules/chat/__pycache__/dialog_store.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/chat/__pycache__/module.cpython-312.pyc
Normal file
BIN
app/modules/chat/__pycache__/module.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/chat/__pycache__/repository.cpython-312.pyc
Normal file
BIN
app/modules/chat/__pycache__/repository.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/chat/__pycache__/service.cpython-312.pyc
Normal file
BIN
app/modules/chat/__pycache__/service.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/chat/__pycache__/task_store.cpython-312.pyc
Normal file
BIN
app/modules/chat/__pycache__/task_store.cpython-312.pyc
Normal file
Binary file not shown.
29
app/modules/chat/dialog_store.py
Normal file
29
app/modules/chat/dialog_store.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from dataclasses import dataclass
|
||||
from uuid import uuid4
|
||||
|
||||
from app.modules.chat.repository import ChatRepository
|
||||
|
||||
|
||||
@dataclass
|
||||
class DialogSession:
|
||||
dialog_session_id: str
|
||||
rag_session_id: str
|
||||
|
||||
|
||||
class DialogSessionStore:
|
||||
def __init__(self, repository: ChatRepository) -> None:
|
||||
self._repo = repository
|
||||
|
||||
def create(self, rag_session_id: str) -> DialogSession:
|
||||
session = DialogSession(dialog_session_id=str(uuid4()), rag_session_id=rag_session_id)
|
||||
self._repo.create_dialog(session.dialog_session_id, session.rag_session_id)
|
||||
return session
|
||||
|
||||
def get(self, dialog_session_id: str) -> DialogSession | None:
|
||||
row = self._repo.get_dialog(dialog_session_id)
|
||||
if not row:
|
||||
return None
|
||||
return DialogSession(
|
||||
dialog_session_id=str(row["dialog_session_id"]),
|
||||
rag_session_id=str(row["rag_session_id"]),
|
||||
)
|
||||
104
app/modules/chat/module.py
Normal file
104
app/modules/chat/module.py
Normal file
@@ -0,0 +1,104 @@
|
||||
from fastapi import APIRouter, Header
|
||||
from fastapi.responses import StreamingResponse
|
||||
|
||||
from app.core.exceptions import AppError
|
||||
from app.modules.chat.dialog_store import DialogSessionStore
|
||||
from app.modules.chat.repository import ChatRepository
|
||||
from app.modules.chat.service import ChatOrchestrator
|
||||
from app.modules.chat.task_store import TaskStore
|
||||
from app.modules.contracts import AgentRunner
|
||||
from app.modules.rag.session_store import RagSessionStore
|
||||
from app.modules.shared.event_bus import EventBus
|
||||
from app.modules.shared.idempotency_store import IdempotencyStore
|
||||
from app.modules.shared.retry_executor import RetryExecutor
|
||||
from app.schemas.chat import (
|
||||
ChatMessageRequest,
|
||||
DialogCreateRequest,
|
||||
DialogCreateResponse,
|
||||
TaskQueuedResponse,
|
||||
TaskResultResponse,
|
||||
)
|
||||
from app.schemas.common import ModuleName
|
||||
|
||||
|
||||
class ChatModule:
|
||||
def __init__(
|
||||
self,
|
||||
agent_runner: AgentRunner,
|
||||
event_bus: EventBus,
|
||||
retry: RetryExecutor,
|
||||
rag_sessions: RagSessionStore,
|
||||
repository: ChatRepository,
|
||||
) -> None:
|
||||
self._rag_sessions = rag_sessions
|
||||
self.tasks = TaskStore()
|
||||
self.dialogs = DialogSessionStore(repository)
|
||||
self.idempotency = IdempotencyStore()
|
||||
self.events = event_bus
|
||||
self.chat = ChatOrchestrator(
|
||||
task_store=self.tasks,
|
||||
dialogs=self.dialogs,
|
||||
idempotency=self.idempotency,
|
||||
runtime=agent_runner,
|
||||
events=self.events,
|
||||
retry=retry,
|
||||
rag_session_exists=lambda rag_session_id: rag_sessions.get(rag_session_id) is not None,
|
||||
message_sink=repository.add_message,
|
||||
)
|
||||
|
||||
def public_router(self) -> APIRouter:
|
||||
router = APIRouter(tags=["chat"])
|
||||
|
||||
@router.post("/api/chat/dialogs", response_model=DialogCreateResponse)
|
||||
async def create_dialog(request: DialogCreateRequest) -> DialogCreateResponse:
|
||||
if not self._rag_sessions.get(request.rag_session_id):
|
||||
raise AppError("rag_session_not_found", "RAG session not found", ModuleName.RAG)
|
||||
dialog = self.dialogs.create(request.rag_session_id)
|
||||
return DialogCreateResponse(
|
||||
dialog_session_id=dialog.dialog_session_id,
|
||||
rag_session_id=dialog.rag_session_id,
|
||||
)
|
||||
|
||||
@router.post("/api/chat/messages", response_model=TaskQueuedResponse)
|
||||
async def send_message(
|
||||
request: ChatMessageRequest,
|
||||
idempotency_key: str | None = Header(default=None, alias="Idempotency-Key"),
|
||||
) -> TaskQueuedResponse:
|
||||
task = await self.chat.enqueue_message(request, idempotency_key)
|
||||
return TaskQueuedResponse(task_id=task.task_id, status=task.status.value)
|
||||
|
||||
@router.get("/api/tasks/{task_id}", response_model=TaskResultResponse)
|
||||
async def get_task(task_id: str) -> TaskResultResponse:
|
||||
task = self.tasks.get(task_id)
|
||||
if not task:
|
||||
raise AppError("not_found", f"Task not found: {task_id}", ModuleName.BACKEND)
|
||||
return TaskResultResponse(
|
||||
task_id=task.task_id,
|
||||
status=task.status,
|
||||
result_type=task.result_type,
|
||||
answer=task.answer,
|
||||
changeset=task.changeset,
|
||||
error=task.error,
|
||||
)
|
||||
|
||||
@router.get("/api/events")
|
||||
async def stream_events(task_id: str) -> StreamingResponse:
|
||||
queue = await self.events.subscribe(task_id)
|
||||
|
||||
async def event_stream():
|
||||
import asyncio
|
||||
|
||||
heartbeat = 10
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
event = await asyncio.wait_for(queue.get(), timeout=heartbeat)
|
||||
yield EventBus.as_sse(event)
|
||||
except asyncio.TimeoutError:
|
||||
yield ": keepalive\\n\\n"
|
||||
finally:
|
||||
await self.events.unsubscribe(task_id, queue)
|
||||
|
||||
return StreamingResponse(event_stream(), media_type="text/event-stream")
|
||||
|
||||
return router
|
||||
93
app/modules/chat/repository.py
Normal file
93
app/modules/chat/repository.py
Normal file
@@ -0,0 +1,93 @@
|
||||
import json
|
||||
|
||||
from sqlalchemy import text
|
||||
|
||||
from app.modules.shared.db import get_engine
|
||||
|
||||
|
||||
class ChatRepository:
|
||||
def ensure_tables(self) -> None:
|
||||
with get_engine().connect() as conn:
|
||||
conn.execute(
|
||||
text(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS dialog_sessions (
|
||||
dialog_session_id VARCHAR(64) PRIMARY KEY,
|
||||
rag_session_id VARCHAR(64) NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
"""
|
||||
)
|
||||
)
|
||||
conn.execute(
|
||||
text(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS chat_messages (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
dialog_session_id VARCHAR(64) NOT NULL,
|
||||
task_id VARCHAR(64),
|
||||
role VARCHAR(16) NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
payload JSONB,
|
||||
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
"""
|
||||
)
|
||||
)
|
||||
conn.execute(text("ALTER TABLE chat_messages ADD COLUMN IF NOT EXISTS task_id VARCHAR(64)"))
|
||||
conn.execute(text("ALTER TABLE chat_messages ADD COLUMN IF NOT EXISTS payload JSONB"))
|
||||
conn.commit()
|
||||
|
||||
def create_dialog(self, dialog_session_id: str, rag_session_id: str) -> None:
|
||||
with get_engine().connect() as conn:
|
||||
conn.execute(
|
||||
text(
|
||||
"""
|
||||
INSERT INTO dialog_sessions (dialog_session_id, rag_session_id)
|
||||
VALUES (:did, :sid)
|
||||
"""
|
||||
),
|
||||
{"did": dialog_session_id, "sid": rag_session_id},
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
def get_dialog(self, dialog_session_id: str) -> dict | None:
|
||||
with get_engine().connect() as conn:
|
||||
row = conn.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT dialog_session_id, rag_session_id
|
||||
FROM dialog_sessions
|
||||
WHERE dialog_session_id = :did
|
||||
"""
|
||||
),
|
||||
{"did": dialog_session_id},
|
||||
).mappings().fetchone()
|
||||
return dict(row) if row else None
|
||||
|
||||
def add_message(
|
||||
self,
|
||||
dialog_session_id: str,
|
||||
role: str,
|
||||
content: str,
|
||||
task_id: str | None = None,
|
||||
payload: dict | None = None,
|
||||
) -> None:
|
||||
payload_json = json.dumps(payload, ensure_ascii=False) if payload is not None else None
|
||||
with get_engine().connect() as conn:
|
||||
conn.execute(
|
||||
text(
|
||||
"""
|
||||
INSERT INTO chat_messages (dialog_session_id, task_id, role, content, payload)
|
||||
VALUES (:did, :task_id, :role, :content, CAST(:payload AS JSONB))
|
||||
"""
|
||||
),
|
||||
{
|
||||
"did": dialog_session_id,
|
||||
"task_id": task_id,
|
||||
"role": role,
|
||||
"content": content,
|
||||
"payload": payload_json,
|
||||
},
|
||||
)
|
||||
conn.commit()
|
||||
276
app/modules/chat/service.py
Normal file
276
app/modules/chat/service.py
Normal file
@@ -0,0 +1,276 @@
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from app.core.exceptions import AppError
|
||||
from app.modules.contracts import AgentRunner
|
||||
from app.schemas.chat import ChatMessageRequest, TaskResultType, TaskStatus
|
||||
from app.schemas.common import ErrorPayload, ModuleName
|
||||
from app.modules.chat.dialog_store import DialogSessionStore
|
||||
from app.modules.chat.task_store import TaskState, TaskStore
|
||||
from app.modules.shared.event_bus import EventBus
|
||||
from app.modules.shared.idempotency_store import IdempotencyStore
|
||||
from app.modules.shared.retry_executor import RetryExecutor
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatOrchestrator:
|
||||
def __init__(
|
||||
self,
|
||||
task_store: TaskStore,
|
||||
dialogs: DialogSessionStore,
|
||||
idempotency: IdempotencyStore,
|
||||
runtime: AgentRunner,
|
||||
events: EventBus,
|
||||
retry: RetryExecutor,
|
||||
rag_session_exists,
|
||||
message_sink,
|
||||
) -> None:
|
||||
self._task_store = task_store
|
||||
self._dialogs = dialogs
|
||||
self._idempotency = idempotency
|
||||
self._runtime = runtime
|
||||
self._events = events
|
||||
self._retry = retry
|
||||
self._rag_session_exists = rag_session_exists
|
||||
self._message_sink = message_sink
|
||||
|
||||
async def enqueue_message(
|
||||
self,
|
||||
request: ChatMessageRequest,
|
||||
idempotency_key: str | None,
|
||||
) -> TaskState:
|
||||
if idempotency_key:
|
||||
existing = self._idempotency.get_task_id(idempotency_key)
|
||||
if existing:
|
||||
task = self._task_store.get(existing)
|
||||
if task:
|
||||
LOGGER.warning(
|
||||
"enqueue_message reused task by idempotency key: task_id=%s mode=%s",
|
||||
task.task_id,
|
||||
request.mode.value,
|
||||
)
|
||||
return task
|
||||
|
||||
task = self._task_store.create()
|
||||
if idempotency_key:
|
||||
self._idempotency.put(idempotency_key, task.task_id)
|
||||
asyncio.create_task(self._process_task(task.task_id, request))
|
||||
LOGGER.warning(
|
||||
"enqueue_message created task: task_id=%s mode=%s",
|
||||
task.task_id,
|
||||
request.mode.value,
|
||||
)
|
||||
return task
|
||||
|
||||
async def _process_task(self, task_id: str, request: ChatMessageRequest) -> None:
|
||||
task = self._task_store.get(task_id)
|
||||
if not task:
|
||||
return
|
||||
task.status = TaskStatus.RUNNING
|
||||
self._task_store.save(task)
|
||||
await self._events.publish(task_id, "task_status", {"task_id": task_id, "status": task.status.value})
|
||||
await self._publish_progress(task_id, "task.start", "Запрос принят, начинаю обработку.", progress=5)
|
||||
|
||||
heartbeat_stop = asyncio.Event()
|
||||
heartbeat_task = asyncio.create_task(self._run_heartbeat(task_id, heartbeat_stop))
|
||||
|
||||
try:
|
||||
await self._publish_progress(task_id, "task.sessions", "Проверяю сессии диалога и проекта.", progress=10)
|
||||
dialog_session_id, rag_session_id = self._resolve_sessions(request)
|
||||
await self._publish_progress(task_id, "task.sessions.done", "Сессии проверены, запускаю агента.", progress=15)
|
||||
loop = asyncio.get_running_loop()
|
||||
|
||||
def progress_cb(stage: str, message: str, kind: str = "task_progress", meta: dict | None = None):
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self._events.publish(
|
||||
task_id,
|
||||
kind,
|
||||
{
|
||||
"task_id": task_id,
|
||||
"stage": stage,
|
||||
"message": message,
|
||||
"meta": meta or {},
|
||||
},
|
||||
),
|
||||
loop,
|
||||
)
|
||||
|
||||
async def op():
|
||||
self._message_sink(dialog_session_id, "user", request.message, task_id=task_id)
|
||||
await self._publish_progress(task_id, "task.agent.run", "Агент анализирует запрос и готовит ответ.", progress=20)
|
||||
return await self._runtime.run(
|
||||
task_id=task_id,
|
||||
dialog_session_id=dialog_session_id,
|
||||
rag_session_id=rag_session_id,
|
||||
mode=request.mode.value,
|
||||
message=request.message,
|
||||
attachments=[a.model_dump(mode="json") for a in request.attachments],
|
||||
files=[f.model_dump(mode="json") for f in request.files],
|
||||
progress_cb=progress_cb,
|
||||
)
|
||||
|
||||
result = await self._retry.run(op)
|
||||
await self._publish_progress(task_id, "task.finalize", "Сохраняю финальный результат.", progress=95)
|
||||
task.status = TaskStatus.DONE
|
||||
task.result_type = TaskResultType(result.result_type)
|
||||
task.answer = result.answer
|
||||
task.changeset = result.changeset
|
||||
if task.result_type == TaskResultType.ANSWER and task.answer:
|
||||
self._message_sink(dialog_session_id, "assistant", task.answer, task_id=task_id)
|
||||
elif task.result_type == TaskResultType.CHANGESET:
|
||||
self._message_sink(
|
||||
dialog_session_id,
|
||||
"assistant",
|
||||
f"changeset:{len(task.changeset)}",
|
||||
task_id=task_id,
|
||||
payload={
|
||||
"result_type": TaskResultType.CHANGESET.value,
|
||||
"changeset": [item.model_dump(mode="json") for item in task.changeset],
|
||||
},
|
||||
)
|
||||
self._task_store.save(task)
|
||||
await self._events.publish(
|
||||
task_id,
|
||||
"task_result",
|
||||
{
|
||||
"task_id": task_id,
|
||||
"status": task.status.value,
|
||||
"result_type": task.result_type.value,
|
||||
"answer": task.answer,
|
||||
"changeset": [item.model_dump(mode="json") for item in task.changeset],
|
||||
"meta": getattr(result, "meta", {}) or {},
|
||||
},
|
||||
)
|
||||
await self._publish_progress(task_id, "task.done", "Обработка завершена.", progress=100)
|
||||
LOGGER.warning(
|
||||
"_process_task completed: task_id=%s status=%s result_type=%s changeset_items=%s",
|
||||
task_id,
|
||||
task.status.value,
|
||||
task.result_type.value if task.result_type else "",
|
||||
len(task.changeset),
|
||||
)
|
||||
except (AppError, TimeoutError, ConnectionError, OSError) as exc:
|
||||
task.status = TaskStatus.ERROR
|
||||
if isinstance(exc, AppError):
|
||||
payload = ErrorPayload(code=exc.code, desc=exc.desc, module=exc.module)
|
||||
else:
|
||||
payload = ErrorPayload(
|
||||
code="retry_exhausted",
|
||||
desc="Temporary failure after retries. Please retry request.",
|
||||
module=ModuleName.BACKEND,
|
||||
)
|
||||
task.error = payload
|
||||
self._task_store.save(task)
|
||||
await self._publish_progress(task_id, "task.error", "Не удалось завершить обработку запроса.", kind="task_thinking")
|
||||
await self._events.publish(task_id, "task_error", payload.model_dump(mode="json"))
|
||||
LOGGER.warning(
|
||||
"_process_task handled error: task_id=%s code=%s module=%s desc=%s",
|
||||
task_id,
|
||||
payload.code,
|
||||
payload.module.value,
|
||||
payload.desc,
|
||||
)
|
||||
except Exception:
|
||||
task.status = TaskStatus.ERROR
|
||||
payload = ErrorPayload(
|
||||
code="agent_runtime_error",
|
||||
desc="Agent execution failed unexpectedly. Please retry request.",
|
||||
module=ModuleName.AGENT,
|
||||
)
|
||||
task.error = payload
|
||||
self._task_store.save(task)
|
||||
await self._publish_progress(
|
||||
task_id,
|
||||
"task.error",
|
||||
"Во время выполнения возникла внутренняя ошибка.",
|
||||
kind="task_thinking",
|
||||
)
|
||||
await self._events.publish(task_id, "task_error", payload.model_dump(mode="json"))
|
||||
LOGGER.exception(
|
||||
"_process_task unexpected error: task_id=%s code=%s",
|
||||
task_id,
|
||||
payload.code,
|
||||
)
|
||||
finally:
|
||||
heartbeat_stop.set()
|
||||
await heartbeat_task
|
||||
|
||||
async def _publish_progress(
|
||||
self,
|
||||
task_id: str,
|
||||
stage: str,
|
||||
message: str,
|
||||
*,
|
||||
progress: int | None = None,
|
||||
kind: str = "task_progress",
|
||||
meta: dict | None = None,
|
||||
) -> None:
|
||||
payload = {
|
||||
"task_id": task_id,
|
||||
"stage": stage,
|
||||
"message": message,
|
||||
"meta": meta or {},
|
||||
}
|
||||
if progress is not None:
|
||||
payload["progress"] = max(0, min(100, int(progress)))
|
||||
await self._events.publish(task_id, kind, payload)
|
||||
LOGGER.warning(
|
||||
"_publish_progress emitted: task_id=%s kind=%s stage=%s progress=%s",
|
||||
task_id,
|
||||
kind,
|
||||
stage,
|
||||
payload.get("progress"),
|
||||
)
|
||||
|
||||
async def _run_heartbeat(self, task_id: str, stop_event: asyncio.Event) -> None:
|
||||
messages = (
|
||||
"Собираю данные по проекту.",
|
||||
"Анализирую контекст и формирую структуру ответа.",
|
||||
"Проверяю согласованность промежуточного результата.",
|
||||
)
|
||||
index = 0
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
await asyncio.wait_for(stop_event.wait(), timeout=5.0)
|
||||
except asyncio.TimeoutError:
|
||||
await self._publish_progress(
|
||||
task_id,
|
||||
"task.heartbeat",
|
||||
messages[index % len(messages)],
|
||||
kind="task_thinking",
|
||||
meta={"heartbeat": True},
|
||||
)
|
||||
index += 1
|
||||
LOGGER.warning("_run_heartbeat stopped: task_id=%s ticks=%s", task_id, index)
|
||||
|
||||
def _resolve_sessions(self, request: ChatMessageRequest) -> tuple[str, str]:
|
||||
# Legacy compatibility: old session_id/project_id flow.
|
||||
if request.dialog_session_id and request.rag_session_id:
|
||||
dialog = self._dialogs.get(request.dialog_session_id)
|
||||
if not dialog:
|
||||
raise AppError("dialog_not_found", "Dialog session not found", ModuleName.BACKEND)
|
||||
if dialog.rag_session_id != request.rag_session_id:
|
||||
raise AppError("dialog_rag_mismatch", "Dialog session does not belong to rag session", ModuleName.BACKEND)
|
||||
LOGGER.warning(
|
||||
"_resolve_sessions resolved by dialog_session_id: dialog_session_id=%s rag_session_id=%s",
|
||||
request.dialog_session_id,
|
||||
request.rag_session_id,
|
||||
)
|
||||
return request.dialog_session_id, request.rag_session_id
|
||||
|
||||
if request.session_id and request.project_id:
|
||||
if not self._rag_session_exists(request.project_id):
|
||||
raise AppError("rag_session_not_found", "RAG session not found", ModuleName.RAG)
|
||||
LOGGER.warning(
|
||||
"_resolve_sessions resolved by legacy session/project: session_id=%s project_id=%s",
|
||||
request.session_id,
|
||||
request.project_id,
|
||||
)
|
||||
return request.session_id, request.project_id
|
||||
|
||||
raise AppError(
|
||||
"missing_sessions",
|
||||
"dialog_session_id and rag_session_id are required",
|
||||
ModuleName.BACKEND,
|
||||
)
|
||||
37
app/modules/chat/task_store.py
Normal file
37
app/modules/chat/task_store.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from dataclasses import dataclass, field
|
||||
from threading import Lock
|
||||
from uuid import uuid4
|
||||
|
||||
from app.schemas.changeset import ChangeItem
|
||||
from app.schemas.chat import TaskResultType, TaskStatus
|
||||
from app.schemas.common import ErrorPayload
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskState:
|
||||
task_id: str
|
||||
status: TaskStatus = TaskStatus.QUEUED
|
||||
result_type: TaskResultType | None = None
|
||||
answer: str | None = None
|
||||
changeset: list[ChangeItem] = field(default_factory=list)
|
||||
error: ErrorPayload | None = None
|
||||
|
||||
|
||||
class TaskStore:
|
||||
def __init__(self) -> None:
|
||||
self._items: dict[str, TaskState] = {}
|
||||
self._lock = Lock()
|
||||
|
||||
def create(self) -> TaskState:
|
||||
task = TaskState(task_id=str(uuid4()))
|
||||
with self._lock:
|
||||
self._items[task.task_id] = task
|
||||
return task
|
||||
|
||||
def get(self, task_id: str) -> TaskState | None:
|
||||
with self._lock:
|
||||
return self._items.get(task_id)
|
||||
|
||||
def save(self, task: TaskState) -> None:
|
||||
with self._lock:
|
||||
self._items[task.task_id] = task
|
||||
47
app/modules/contracts.py
Normal file
47
app/modules/contracts.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from typing import Protocol
|
||||
from collections.abc import Awaitable, Callable
|
||||
|
||||
from app.schemas.changeset import ChangeItem
|
||||
from app.schemas.chat import TaskResultType
|
||||
|
||||
|
||||
class AgentRunResult(Protocol):
|
||||
result_type: TaskResultType
|
||||
answer: str | None
|
||||
changeset: list[ChangeItem]
|
||||
meta: dict
|
||||
|
||||
|
||||
class AgentRunner(Protocol):
|
||||
async def run(
|
||||
self,
|
||||
*,
|
||||
task_id: str,
|
||||
dialog_session_id: str,
|
||||
rag_session_id: str,
|
||||
mode: str,
|
||||
message: str,
|
||||
attachments: list[dict],
|
||||
files: list[dict],
|
||||
progress_cb: Callable[[str, str, str, dict | None], Awaitable[None] | None] | None = None,
|
||||
) -> AgentRunResult: ...
|
||||
|
||||
|
||||
class RagRetriever(Protocol):
|
||||
async def retrieve(self, rag_session_id: str, query: str) -> list[dict]: ...
|
||||
|
||||
|
||||
class RagIndexer(Protocol):
|
||||
async def index_snapshot(
|
||||
self,
|
||||
rag_session_id: str,
|
||||
files: list[dict],
|
||||
progress_cb: Callable[[int, int, str], Awaitable[None] | None] | None = None,
|
||||
) -> tuple[int, int]: ...
|
||||
|
||||
async def index_changes(
|
||||
self,
|
||||
rag_session_id: str,
|
||||
changed_files: list[dict],
|
||||
progress_cb: Callable[[int, int, str], Awaitable[None] | None] | None = None,
|
||||
) -> tuple[int, int]: ...
|
||||
0
app/modules/rag/__init__.py
Normal file
0
app/modules/rag/__init__.py
Normal file
BIN
app/modules/rag/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
app/modules/rag/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/rag/__pycache__/indexing_service.cpython-312.pyc
Normal file
BIN
app/modules/rag/__pycache__/indexing_service.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/rag/__pycache__/job_store.cpython-312.pyc
Normal file
BIN
app/modules/rag/__pycache__/job_store.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/rag/__pycache__/module.cpython-312.pyc
Normal file
BIN
app/modules/rag/__pycache__/module.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/rag/__pycache__/repository.cpython-312.pyc
Normal file
BIN
app/modules/rag/__pycache__/repository.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/rag/__pycache__/service.cpython-312.pyc
Normal file
BIN
app/modules/rag/__pycache__/service.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/modules/rag/__pycache__/session_store.cpython-312.pyc
Normal file
BIN
app/modules/rag/__pycache__/session_store.cpython-312.pyc
Normal file
Binary file not shown.
0
app/modules/rag/embedding/__init__.py
Normal file
0
app/modules/rag/embedding/__init__.py
Normal file
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user