Files
agent/app/modules/agent/engine/graphs/base_graph.py
2026-02-25 14:47:19 +03:00

59 lines
2.2 KiB
Python

from langgraph.graph import END, START, StateGraph
from app.modules.agent.engine.graphs.progress import emit_progress_sync
from app.modules.agent.llm import AgentLlmService
from app.modules.agent.engine.graphs.state import AgentGraphState
class BaseGraphFactory:
def __init__(self, llm: AgentLlmService) -> None:
self._llm = llm
def build(self, checkpointer=None):
graph = StateGraph(AgentGraphState)
graph.add_node("context", self._context_node)
graph.add_node("answer", self._answer_node)
graph.add_edge(START, "context")
graph.add_edge("context", "answer")
graph.add_edge("answer", END)
return graph.compile(checkpointer=checkpointer)
def _context_node(self, state: AgentGraphState) -> dict:
emit_progress_sync(
state,
stage="graph.default.context",
message="Готовлю контекст ответа по данным запроса.",
)
rag = state.get("rag_context", "")
conf = state.get("confluence_context", "")
emit_progress_sync(
state,
stage="graph.default.context.done",
message="Контекст собран, перехожу к формированию ответа.",
)
return {"rag_context": rag, "confluence_context": conf}
def _answer_node(self, state: AgentGraphState) -> dict:
emit_progress_sync(
state,
stage="graph.default.answer",
message="Формирую текст ответа для пользователя.",
)
msg = state.get("message", "")
rag = state.get("rag_context", "")
conf = state.get("confluence_context", "")
user_input = "\n\n".join(
[
f"User request:\n{msg}",
f"RAG context:\n{rag}",
f"Confluence context:\n{conf}",
]
)
answer = self._llm.generate("general_answer", user_input)
emit_progress_sync(
state,
stage="graph.default.answer.done",
message="Черновик ответа подготовлен.",
)
return {"answer": answer}