From 89c0d21e8898cba5c99702d34a52f585caa3b4cb Mon Sep 17 00:00:00 2001 From: zosimovaa Date: Thu, 5 Mar 2026 11:46:05 +0300 Subject: [PATCH] =?UTF-8?q?=D0=9F=D0=B5=D1=80=D0=B5=D0=BD=D0=B5=D1=81=20wo?= =?UTF-8?q?rkflow?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 617 +++++------------- requirements/Architectural constraints.md | 135 ++-- requirements/Mail Order Bot Migration Plan.md | 142 ++-- requirements/README.md | 523 +++------------ requirements/application_guidelines.md | 148 +++++ requirements/architecture.md | 281 +++----- requirements/vision.md | 133 +--- .../__pycache__/application.cpython-312.pyc | Bin 1042 -> 1024 bytes src/app_runtime/contracts/application.py | 2 +- src/app_runtime/contracts/queue.py | 28 - src/app_runtime/contracts/tasks.py | 18 - .../__pycache__/registration.cpython-312.pyc | Bin 2291 -> 1657 bytes src/app_runtime/core/registration.py | 10 - .../__pycache__/in_memory.cpython-312.pyc | Bin 2543 -> 2257 bytes src/app_runtime/queue/in_memory.py | 41 +- .../__pycache__/__init__.cpython-312.pyc | Bin 649 -> 684 bytes .../__pycache__/service.cpython-312.pyc | Bin 9017 -> 8993 bytes .../__pycache__/transport.cpython-312.pyc | Bin 871 -> 4843 bytes src/app_runtime/tracing/service.py | 18 +- src/app_runtime/workers/__init__.py | 3 +- .../__pycache__/__init__.cpython-312.pyc | Bin 351 -> 272 bytes src/app_runtime/workers/queue_worker.py | 125 ---- src/app_runtime/workflow/__init__.py | 1 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 248 bytes .../runtime_factory.cpython-312.pyc | Bin 0 -> 1298 bytes .../workflow/contracts/__init__.py | 1 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 258 bytes .../__pycache__/context.cpython-312.pyc | Bin 0 -> 986 bytes .../__pycache__/result.cpython-312.pyc | Bin 0 -> 776 bytes .../__pycache__/step.cpython-312.pyc | Bin 0 -> 836 bytes .../__pycache__/workflow.cpython-312.pyc | Bin 0 -> 1080 bytes src/app_runtime/workflow/contracts/context.py | 16 + src/app_runtime/workflow/contracts/result.py | 11 + src/app_runtime/workflow/contracts/step.py | 12 + .../workflow/contracts/workflow.py | 19 + src/app_runtime/workflow/engine/__init__.py | 1 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 255 bytes .../engine/__pycache__/hooks.cpython-312.pyc | Bin 0 -> 987 bytes .../transition_resolver.cpython-312.pyc | Bin 0 -> 882 bytes .../workflow_engine.cpython-312.pyc | Bin 0 -> 5588 bytes src/app_runtime/workflow/engine/hooks.py | 14 + .../workflow/engine/transition_resolver.py | 9 + .../workflow/engine/workflow_engine.py | 68 ++ .../workflow/persistence/__init__.py | 3 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 311 bytes .../checkpoint_repository.cpython-312.pyc | Bin 0 -> 2242 bytes .../snapshot_sanitizer.cpython-312.pyc | Bin 0 -> 1575 bytes .../workflow_persistence.cpython-312.pyc | Bin 0 -> 4310 bytes .../workflow_repository.cpython-312.pyc | Bin 0 -> 8882 bytes .../persistence/checkpoint_repository.py | 46 ++ .../persistence/snapshot_sanitizer.py | 20 + .../persistence/workflow_persistence.py | 53 ++ .../persistence/workflow_repository.py | 140 ++++ src/app_runtime/workflow/runtime_factory.py | 15 + src/plba/__init__.py | 27 +- src/plba/__pycache__/__init__.cpython-312.pyc | Bin 1380 -> 1606 bytes .../__pycache__/contracts.cpython-312.pyc | Bin 869 -> 713 bytes src/plba/__pycache__/tracing.cpython-312.pyc | Bin 331 -> 363 bytes src/plba/__pycache__/workers.cpython-312.pyc | Bin 335 -> 256 bytes src/plba/__pycache__/workflow.cpython-312.pyc | Bin 0 -> 799 bytes src/plba/contracts.py | 5 - src/plba/workers.py | 3 +- src/plba/workflow.py | 18 + .../test_runtime.cpython-312-pytest-9.0.2.pyc | Bin 25461 -> 36104 bytes tests/test_runtime.py | 205 +++++- 65 files changed, 1271 insertions(+), 1640 deletions(-) create mode 100644 requirements/application_guidelines.md delete mode 100644 src/app_runtime/contracts/queue.py delete mode 100644 src/app_runtime/contracts/tasks.py delete mode 100644 src/app_runtime/workers/queue_worker.py create mode 100644 src/app_runtime/workflow/__init__.py create mode 100644 src/app_runtime/workflow/__pycache__/__init__.cpython-312.pyc create mode 100644 src/app_runtime/workflow/__pycache__/runtime_factory.cpython-312.pyc create mode 100644 src/app_runtime/workflow/contracts/__init__.py create mode 100644 src/app_runtime/workflow/contracts/__pycache__/__init__.cpython-312.pyc create mode 100644 src/app_runtime/workflow/contracts/__pycache__/context.cpython-312.pyc create mode 100644 src/app_runtime/workflow/contracts/__pycache__/result.cpython-312.pyc create mode 100644 src/app_runtime/workflow/contracts/__pycache__/step.cpython-312.pyc create mode 100644 src/app_runtime/workflow/contracts/__pycache__/workflow.cpython-312.pyc create mode 100644 src/app_runtime/workflow/contracts/context.py create mode 100644 src/app_runtime/workflow/contracts/result.py create mode 100644 src/app_runtime/workflow/contracts/step.py create mode 100644 src/app_runtime/workflow/contracts/workflow.py create mode 100644 src/app_runtime/workflow/engine/__init__.py create mode 100644 src/app_runtime/workflow/engine/__pycache__/__init__.cpython-312.pyc create mode 100644 src/app_runtime/workflow/engine/__pycache__/hooks.cpython-312.pyc create mode 100644 src/app_runtime/workflow/engine/__pycache__/transition_resolver.cpython-312.pyc create mode 100644 src/app_runtime/workflow/engine/__pycache__/workflow_engine.cpython-312.pyc create mode 100644 src/app_runtime/workflow/engine/hooks.py create mode 100644 src/app_runtime/workflow/engine/transition_resolver.py create mode 100644 src/app_runtime/workflow/engine/workflow_engine.py create mode 100644 src/app_runtime/workflow/persistence/__init__.py create mode 100644 src/app_runtime/workflow/persistence/__pycache__/__init__.cpython-312.pyc create mode 100644 src/app_runtime/workflow/persistence/__pycache__/checkpoint_repository.cpython-312.pyc create mode 100644 src/app_runtime/workflow/persistence/__pycache__/snapshot_sanitizer.cpython-312.pyc create mode 100644 src/app_runtime/workflow/persistence/__pycache__/workflow_persistence.cpython-312.pyc create mode 100644 src/app_runtime/workflow/persistence/__pycache__/workflow_repository.cpython-312.pyc create mode 100644 src/app_runtime/workflow/persistence/checkpoint_repository.py create mode 100644 src/app_runtime/workflow/persistence/snapshot_sanitizer.py create mode 100644 src/app_runtime/workflow/persistence/workflow_persistence.py create mode 100644 src/app_runtime/workflow/persistence/workflow_repository.py create mode 100644 src/app_runtime/workflow/runtime_factory.py create mode 100644 src/plba/__pycache__/workflow.cpython-312.pyc create mode 100644 src/plba/workflow.py diff --git a/README.md b/README.md index 99d6c8d..f3b5dbf 100644 --- a/README.md +++ b/README.md @@ -1,481 +1,206 @@ # PLBA -## 1. Общее описание сервиса и его назначение +`PLBA` (`Platform Runtime for Business Applications`) - runtime для бизнес-приложений. -`PLBA` (`Platform Runtime for Business Applications`) - это платформенный runtime для бизнес-приложений. Библиотека выносит из прикладного кода типовые инфраструктурные задачи: жизненный цикл приложения, запуск и остановку фоновых воркеров, загрузку конфигурации, health-check, tracing, логирование и control plane. +Платформа берет на себя инфраструктурные обязанности: +- lifecycle приложения +- запуск и остановку воркеров +- health/status +- tracing +- logging +- control plane +- загрузку конфигурации -Назначение сервиса: -- дать единый каркас для запуска бизнес-модулей; -- отделить платформенные обязанности от предметной логики; -- упростить разработку сервисов с очередями, polling-обработчиками и фоновыми процессами; -- обеспечить наблюдаемость и управляемость runtime через health/status и HTTP control endpoints. +Бизнес-приложение на базе `plba` собирается вокруг трех уровней: +- `ApplicationModule` собирает приложение и регистрирует воркеры +- `Worker` управляет исполнением и lifecycle +- `Routine` реализует бизнес-функцию -Текущая модель работы выглядит так: -1. приложение создаёт `RuntimeManager`; -2. runtime загружает конфигурацию; -3. применяется logging-конфигурация; -4. модуль приложения регистрирует очереди, обработчики, воркеры и health contributors; -5. `WorkerSupervisor` запускает все рабочие компоненты; -6. runtime агрегирует состояние, health и tracing; -7. control plane предоставляет снимок состояния и команды запуска/остановки. +`Routine` не является контрактом `plba`. Это рекомендуемый архитектурный паттерн для прикладного кода. -`PLBA` особенно полезен для: -- почтовых ботов и интеграционных сервисов; -- event-driven и queue-driven приложений; -- фоновых бизнес-процессов; -- внутренних платформенных сервисов с единым operational-контуром. +Правила построения приложений на платформе собраны отдельно в [application_guidelines.md](/Users/alex/Dev_projects_v2/apps/plba/requirements/application_guidelines.md). -## 2. Архитектура - диаграмма классов +## Runtime model -```mermaid -classDiagram - class RuntimeManager { - +register_module(module) - +add_config_file(path) - +start() - +stop(timeout, force, stop_control_plane) - +status() - +current_health() - } +1. приложение объявляет `ApplicationModule` +2. модуль регистрирует один или несколько `Worker` +3. `RuntimeManager` запускает все воркеры +4. каждый `Worker` запускает свою бизнес-активность +5. runtime агрегирует health и status +6. runtime останавливает воркеры graceful или forcefully - class ConfigurationManager { - +add_provider(provider) - +load() - +reload() - +get() - +section(name, default) - } +## Main contracts - class ServiceContainer { - +register(name, service) - +get(name) - +require(name, expected_type) - +snapshot() - } +### `ApplicationModule` - class ModuleRegistry { - +register_module(name) - +add_queue(name, queue) - +add_handler(name, handler) - +add_worker(worker) - +add_health_contributor(contributor) - } +Описывает, из чего состоит приложение. - class ApplicationModule { - <> - +name - +register(registry) - } +Ответственность: +- дать имя модуля +- зарегистрировать воркеры +- зарегистрировать health contributors при необходимости +- собрать прикладные зависимости - class WorkerSupervisor { - +register(worker) - +start() - +stop(timeout, force) - +snapshot() - +healths() - +statuses() - } +### `Worker` - class Worker { - <> - +start() - +stop(force) - +health() - +status() - } +Главный runtime-контракт платформы. - class QueueWorker { - +name - +critical - +start() - +stop(force) - +health() - +status() - } +Контракт: +- `name` +- `critical` +- `start()` +- `stop(force=False)` +- `health()` +- `status()` - class TaskQueue { - <> - +publish(task) - +consume(timeout) - +ack(task) - +nack(task, retry_delay) - +stats() - } +`Worker` отвечает только за runtime-поведение: +- как запускается бизнес-активность +- в одном потоке или нескольких +- single-run или loop +- graceful shutdown +- интерпретацию ошибок в `health/status` - class InMemoryTaskQueue { - +publish(task) - +consume(timeout) - +ack(task) - +nack(task, retry_delay) - +stats() - } +### `Routine` - class TaskHandler { - <> - +handle(task) - } +Рекомендуемый application-level паттерн. - class TraceService { - +create_context(...) - +open_context(...) - +resume(task_metadata, operation) - +attach(task_metadata, context) - +info(message, status, attrs) - +warning(message, status, attrs) - +error(message, status, attrs) - } +`Routine` описывает бизнес-функцию: +- что читать +- какие сервисы вызывать +- какие бизнес-решения принимать +- что сохранять или отправлять наружу - class HealthRegistry { - +register(contributor) - +snapshot(worker_healths) - +payload(state, worker_healths) - } +Обычно воркер получает одну routine через конструктор и вызывает ее в `start()` или во внутренних helper-методах. - class ControlPlaneService { - +register_channel(channel) - +start(runtime) - +stop() - +snapshot(runtime) - } - - class LogManager { - +apply_config(config) - } - - class FileConfigProvider { - +load() - } - - RuntimeManager --> ConfigurationManager - RuntimeManager --> ServiceContainer - RuntimeManager --> ModuleRegistry - RuntimeManager --> WorkerSupervisor - RuntimeManager --> TraceService - RuntimeManager --> HealthRegistry - RuntimeManager --> ControlPlaneService - RuntimeManager --> LogManager - RuntimeManager --> ApplicationModule - ModuleRegistry --> ServiceContainer - ModuleRegistry --> Worker - ModuleRegistry --> TaskQueue - ModuleRegistry --> TaskHandler - WorkerSupervisor --> Worker - QueueWorker --|> Worker - QueueWorker --> TaskQueue - QueueWorker --> TaskHandler - QueueWorker --> TraceService - InMemoryTaskQueue --|> TaskQueue - ConfigurationManager --> FileConfigProvider -``` - -### Архитектурные слои - -- `app_runtime.core` - оркестрация runtime, контейнер сервисов, регистрация модулей, типы состояния. -- `app_runtime.contracts` - абстракции для интеграции бизнес-приложений. -- `app_runtime.workers`, `queue`, `config`, `logging`, `health`, `tracing`, `control` - инфраструктурные адаптеры и платформенные сервисы. -- `plba` - публичный фасад, который реэкспортирует ключевые классы как API пакета. - -## 3. Описание доступных модулей, их назначение, краткое устройство, примеры применения в бизнес приложениях - -### `plba` - -Публичный API пакета. Реэкспортирует `RuntimeManager`, `ApplicationModule`, `QueueWorker`, `InMemoryTaskQueue`, `TraceService`, `HealthRegistry`, `ControlPlaneService` и другие классы. - -Краткое устройство: -- служит фасадом над `app_runtime`; -- упрощает импорт для прикладного кода; -- позволяет использовать пакет как библиотеку без знания внутренней структуры. - -Пример применения: -- бизнес-сервис импортирует `create_runtime` и `ApplicationModule`, собирает свой модуль и запускает runtime. - -### `app_runtime.core` - -Основной orchestration-слой. - -Ключевые классы: -- `RuntimeManager` - центральная точка запуска и остановки; -- `ConfigurationManager` - загрузка и merge конфигурации; -- `ServiceContainer` - DI-like контейнер платформенных сервисов; -- `ModuleRegistry` - регистрация очередей, обработчиков, воркеров и health contributors. - -Краткое устройство: -- `RuntimeManager` создаёт и связывает инфраструктурные сервисы; -- при старте регистрирует health contributors, воркеры и поднимает control plane; -- `ModuleRegistry` связывает бизнес-модуль с runtime без жёсткой зависимости на конкретные реализации. - -Примеры применения в бизнес-приложениях: -- CRM-интеграция с несколькими фоновых воркерами; -- сервис обработки заявок, где один модуль регистрирует очередь, handler и worker pool; -- back-office процесс с управляемым graceful shutdown. - -### `app_runtime.contracts` - -Набор абстракций для расширения платформы. - -Ключевые контракты: -- `ApplicationModule`; -- `Worker`; -- `TaskQueue`; -- `TaskHandler`; -- `ConfigProvider`; -- `HealthContributor`; -- trace-related контракты. - -Краткое устройство: -- бизнес-код реализует интерфейсы, а runtime работает только через контракты; -- это позволяет менять инфраструктуру без переписывания прикладной логики. - -Примеры применения в бизнес-приложениях: -- реализовать свой `ApplicationModule` для почтового бота; -- подключить собственный `ConfigProvider` для БД или secrets storage; -- реализовать кастомный `Worker` для long-running polling процесса. - -### `app_runtime.workers` - -Модуль управления рабочими процессами. - -Ключевые классы: -- `WorkerSupervisor` - запускает и останавливает набор воркеров; -- `QueueWorker` - стандартный worker для обработки задач из очереди. - -Краткое устройство: -- `WorkerSupervisor` агрегирует health/status всех воркеров; -- `QueueWorker` поднимает нужное число потоков, читает задачи из `TaskQueue`, вызывает `TaskHandler`, делает `ack/nack` и обновляет operational-метрики. - -Примеры применения в бизнес-приложениях: -- параллельная обработка входящих писем; -- обработка очереди заказов; -- фоновая генерация документов или актов. - -### `app_runtime.queue` - -Очереди задач. - -Ключевой класс: -- `InMemoryTaskQueue`. - -Краткое устройство: -- использует стандартный `Queue` из Python; -- хранит счётчики `published`, `acked`, `nacked`, `queued`; -- подходит как базовая реализация для разработки, тестов и простых сценариев. - -Примеры применения в бизнес-приложениях: -- локальная очередь задач в небольшом внутреннем сервисе; -- тестовая среда без внешнего брокера; -- staging-сценарии для отладки worker pipeline. - -### `app_runtime.config` - -Подсистема загрузки конфигурации. - -Ключевые классы: -- `FileConfigProvider`; -- `ConfigFileLoader`. - -Краткое устройство: -- `ConfigurationManager` собирает данные из провайдеров; -- текущая штатная реализация читает YAML-файл; -- поддерживается глубокое слияние секций конфигурации. - -Примеры применения в бизнес-приложениях: -- конфигурация платформы и прикладных модулей из `config.yml`; -- раздельное хранение `platform`, `log` и app-specific секций; -- подключение нескольких источников конфигурации с последующим merge. - -### `app_runtime.logging` - -Управление логированием. - -Ключевой класс: -- `LogManager`. - -Краткое устройство: -- применяет `dictConfig` из секции `log`; -- хранит последнюю валидную конфигурацию и пытается восстановиться при ошибке. - -Примеры применения в бизнес-приложениях: -- единообразная настройка JSON-логов; -- переключение уровней логирования между окружениями; -- централизованная logging-конфигурация для нескольких модулей. - -### `app_runtime.health` - -Подсистема health aggregation. - -Ключевой класс: -- `HealthRegistry`. - -Краткое устройство: -- собирает health от воркеров и дополнительных contributors; -- агрегирует статус в `ok`, `degraded`, `unhealthy`; -- формирует payload для readiness/liveness и operational snapshot. - -Примеры применения в бизнес-приложениях: -- показывать degraded, если обработка идёт с ошибками; -- маркировать сервис unhealthy при падении критичного worker; -- добавлять health внешней зависимости, например IMAP или ERP API. - -### `app_runtime.tracing` - -Подсистема трассировки выполнения. - -Ключевые классы: -- `TraceService`; -- `TraceContextStore`; -- `NoOpTraceTransport`. - -Краткое устройство: -- создаёт trace contexts; -- связывает source/queue/worker/handler через metadata; -- пишет контексты и сообщения через транспортный слой. - -Примеры применения в бизнес-приложениях: -- трассировка обработки письма от polling до бизнес-handler; -- аудит прохождения заказа по pipeline; -- отладка проблемных задач в фоне. - -### `app_runtime.control` - -Control plane и HTTP-канал управления. - -Ключевые классы: -- `ControlPlaneService`; -- `HttpControlChannel`; -- `ControlActionSet`. - -Краткое устройство: -- публикует health/status и команды управления runtime; -- может поднимать HTTP endpoints для start/stop/status; -- строит snapshot состояния на основе `RuntimeManager`. - -Примеры применения в бизнес-приложениях: -- административный endpoint для оператора; -- health endpoint для Kubernetes/nomad; -- runtime status для monitoring dashboard. - -## 4. Установка - `git@git.lesha.spb.ru:alex/plba.git` - -### Требования - -- Python `3.12+` -- `pip` -- SSH-доступ к `git.lesha.spb.ru` - -### Установка напрямую через `pip` из Git-репозитория - -```bash -pip install "plba @ git+ssh://git@git.lesha.spb.ru/alex/plba.git" -``` - -При такой установке `pip` ставит не только сам пакет `plba`, но и все его зависимости, объявленные в [pyproject.toml](/Users/alex/Dev_projects_v2/apps/plba/pyproject.toml), например `fastapi`, `uvicorn` и `PyYAML`. - -Если нужна установка из конкретной ветки: - -```bash -pip install "plba @ git+ssh://git@git.lesha.spb.ru/alex/plba.git@main" -``` - -Если нужна установка из конкретного тега или commit hash: - -```bash -pip install "plba @ git+ssh://git@git.lesha.spb.ru/alex/plba.git@v0.1.0" -``` - -или - -```bash -pip install "plba @ git+ssh://git@git.lesha.spb.ru/alex/plba.git@" -``` - -### Установка в виртуальное окружение - -```bash -python -m venv .venv -source .venv/bin/activate -pip install --upgrade pip -pip install "plba @ git+ssh://git@git.lesha.spb.ru/alex/plba.git" -``` - -### Подключение `plba` в бизнес-приложении - -Чтобы при установке бизнес-приложения автоматически подтягивались зависимости `plba`, нужно добавить `plba` в зависимости самого бизнес-приложения как Git dependency. - -Пример для `requirements.txt`: - -```txt -plba @ git+ssh://git@git.lesha.spb.ru/alex/plba.git -``` - -Пример для `pyproject.toml`: - -```toml -[project] -dependencies = [ - "plba @ git+ssh://git@git.lesha.spb.ru/alex/plba.git", -] -``` - -Если бизнес-приложение собирается в Docker, достаточно чтобы на этапе сборки выполнялся обычный `pip install`, например: - -```dockerfile -COPY pyproject.toml . -RUN pip install . -``` - -или при использовании `requirements.txt`: - -```dockerfile -COPY requirements.txt . -RUN pip install -r requirements.txt -``` - -В обоих случаях `pip` установит `plba` из Git и автоматически подтянет его транзитивные зависимости. - -### Локальная разработка - -Если пакет нужно не только использовать, но и разрабатывать: - -```bash -git clone git@git.lesha.spb.ru:alex/plba.git -cd plba -python -m venv .venv -source .venv/bin/activate -pip install -e . -``` - -### Быстрая проверка - -```bash -python -c "import plba; print(plba.__all__[:5])" -``` - -### Минимальный пример использования +## Minimal example ```python -from plba import ApplicationModule, InMemoryTaskQueue, QueueWorker, RuntimeManager, Task, TaskHandler +from threading import Event, Lock, Thread +from time import sleep + +from plba import ( + ApplicationModule, + Worker, + WorkerHealth, + WorkerStatus, + create_runtime, +) -class PrintHandler(TaskHandler): - def handle(self, task: Task) -> None: - print(task.payload) +class OrdersRoutine: + def __init__(self, service) -> None: + self._service = service + + def run(self) -> None: + self._service.process_new_orders() -class DemoModule(ApplicationModule): +class OrdersWorker(Worker): + def __init__(self, routine: OrdersRoutine, interval: float = 1.0) -> None: + self._routine = routine + self._interval = interval + self._thread: Thread | None = None + self._stop_requested = Event() + self._lock = Lock() + self._in_flight = 0 + self._failures = 0 + @property def name(self) -> str: - return "demo" + return "orders-worker" + + @property + def critical(self) -> bool: + return True + + def start(self) -> None: + if self._thread and self._thread.is_alive(): + return + self._stop_requested.clear() + self._thread = Thread(target=self._run_loop, daemon=True) + self._thread.start() + + def stop(self, force: bool = False) -> None: + del force + self._stop_requested.set() + + def health(self) -> WorkerHealth: + if self._failures > 0: + return WorkerHealth(self.name, "degraded", self.critical) + return WorkerHealth(self.name, "ok", self.critical) + + def status(self) -> WorkerStatus: + alive = self._thread is not None and self._thread.is_alive() + state = "busy" if self._in_flight else "idle" + if not alive: + state = "stopped" + elif self._stop_requested.is_set(): + state = "stopping" + return WorkerStatus(name=self.name, state=state, in_flight=self._in_flight) + + def _run_loop(self) -> None: + while not self._stop_requested.is_set(): + with self._lock: + self._in_flight += 1 + try: + self._routine.run() + except Exception: + self._failures += 1 + finally: + with self._lock: + self._in_flight -= 1 + sleep(self._interval) + + +class OrdersModule(ApplicationModule): + @property + def name(self) -> str: + return "orders" def register(self, registry) -> None: - queue = InMemoryTaskQueue() - traces = registry.services.get("traces") - handler = PrintHandler() - queue.publish(Task(name="demo-task", payload={"id": 1}, metadata={})) - registry.add_worker(QueueWorker("demo-worker", queue, handler, traces)) + service = OrderService() + routine = OrdersRoutine(service) + registry.add_worker(OrdersWorker(routine)) -runtime = RuntimeManager() -runtime.register_module(DemoModule()) +runtime = create_runtime(OrdersModule(), config_path="config.yml") runtime.start() -runtime.stop() ``` + +## Health and status + +Практика такая: +- `Routine` выполняет бизнес-работу +- `Worker` ловит ее ошибки +- `Worker` интерпретирует outcome в `health()` и `status()` + +То есть routine не выставляет health напрямую. + +## In-memory queue + +`InMemoryTaskQueue` остается в платформе как простой in-memory utility. + +Это не базовый платформенный контракт и не обязательный паттерн архитектуры. +Ее можно использовать в прикладном коде как локальный буфер между компонентами, если это действительно помогает. + +```python +from plba import InMemoryTaskQueue + +queue = InMemoryTaskQueue[str]() +queue.put("payload") +item = queue.get(timeout=0.1) +``` + +## Public API + +Основные публичные сущности: +- `ApplicationModule` +- `Worker` +- `WorkerHealth` +- `WorkerStatus` +- `RuntimeManager` +- `WorkerSupervisor` +- `TraceService` +- `HealthRegistry` +- `InMemoryTaskQueue` +- `create_runtime(...)` diff --git a/requirements/Architectural constraints.md b/requirements/Architectural constraints.md index 9f40030..8fd467e 100644 --- a/requirements/Architectural constraints.md +++ b/requirements/Architectural constraints.md @@ -1,111 +1,50 @@ +# Architectural Constraints -**`docs/adr/0001-new-runtime.md`** -```md -# ADR 0001: Create a new runtime project instead of evolving the legacy ConfigManager model +## Main constraints -## Status +1. `Worker` is the primary runtime abstraction. +2. `ApplicationModule` assembles the application and registers workers. +3. Business behavior should live outside worker lifecycle code. +4. The platform should avoid queue-centric abstractions as first-class architecture. +5. Utility components are allowed only when they remain optional and transparent. -Accepted +## Worker constraints -## Context +Worker should own: +- thread/process lifecycle +- execution strategy +- graceful stop +- runtime state +- health state -The previous generation of the application runtime was centered around a timer-driven execution model: -- a manager loaded configuration -- a worker loop periodically called `execute()` -- applications placed most operational logic behind that entry point +Worker should not own: +- large business workflows +- domain rules +- parsing plus persistence plus integration logic in one class -That model is adequate for simple periodic jobs, but it does not match the direction of the new platform. +## Business-code constraints -The new platform must support: -- task sources -- queue-driven processing -- multiple parallel workers -- trace propagation across producer/consumer boundaries -- richer lifecycle and status management -- health aggregation -- future admin web interface -- future authentication and user management +Business routines and services should own: +- business decisions +- external integration calls +- persistence +- domain validation -These are platform concerns, not business concerns. +They should not own: +- platform lifecycle +- worker supervision +- runtime status management -We also want business applications to describe only business functionality and rely on the runtime for infrastructure behavior. +## API constraints -## Decision +Public API should stay small and clear. -We will create a new runtime project instead of implementing a `V3` directly inside the current legacy ConfigManager codebase. - -The new runtime will be built around a platform-oriented model with explicit concepts such as: -- `RuntimeManager` +Prefer: - `ApplicationModule` -- `TaskSource` -- `TaskQueue` -- `WorkerSupervisor` -- `TaskHandler` -- `TraceService` -- `HealthRegistry` +- `Worker` +- runtime services +- utility queue -The old execute-centered model is treated as a previous-generation design and is not the architectural basis of the new runtime. - -## Rationale - -Creating a new runtime project gives us: -- freedom to design the correct abstractions from the start -- no pressure to preserve legacy contracts -- cleaner boundaries between platform and business logic -- simpler documentation and tests -- lower long-term complexity than mixing old and new models in one codebase - -If we built this as `V3` inside the old project, we would likely inherit: -- compatibility constraints -- mixed abstractions -- transitional adapters -- conceptual confusion between periodic execution and event/queue processing - -The expected long-term cost of such coupling is higher than creating a clean new runtime. - -## Consequences - -### Positive - -- the platform can be modeled cleanly -- business applications can integrate through explicit contracts -- new runtime capabilities can be added without legacy pressure -- mail_order_bot can become the first pilot application on the new runtime - -### Negative - -- some existing capabilities will need to be reintroduced in the new project -- there will be a temporary period with both legacy and new runtime lines -- migration requires explicit planning - -## Initial migration target - -The first application on the new runtime will be `mail_order_bot`. - -Initial runtime design for that application: -- IMAP polling source -- in-memory queue -- parallel workers -- business handler for email processing -- message marked as read only after successful processing - -Later: -- swap IMAP polling source for IMAP IDLE source -- keep the queue and worker model unchanged - -## What we intentionally do not carry over - -We do not keep the old architecture as the central organizing principle: -- no `execute()`-centric application model -- no timer-loop as the main abstraction -- no implicit mixing of lifecycle and business processing - -## Follow-up - -Next design work should define: -- core platform contracts -- package structure -- runtime lifecycle -- queue and worker interfaces -- config model split between platform and application -- pilot integration for `mail_order_bot` +Avoid: +- legacy abstractions preserved only for compatibility +- specialized platform roles without strong need diff --git a/requirements/Mail Order Bot Migration Plan.md b/requirements/Mail Order Bot Migration Plan.md index c613f16..cd0e386 100644 --- a/requirements/Mail Order Bot Migration Plan.md +++ b/requirements/Mail Order Bot Migration Plan.md @@ -1,116 +1,62 @@ # Mail Order Bot Migration Plan -## Purpose +## Goal -This document describes how `mail_order_bot` will be adapted to the new runtime as the first pilot business application. +Migrate `mail_order_bot` to the new PLBA model: +- application assembled by `ApplicationModule` +- runtime execution owned by `Worker` +- business behavior implemented in routines/services -## Scope +## Target structure -This is not a full migration specification for all future applications. -It is a practical first use case to validate the runtime architecture. +### Application module -## Current model - -The current application flow is tightly coupled: -- the manager checks IMAP -- unread emails are fetched -- emails are processed synchronously -- messages are marked as read after processing - -Polling and processing happen in one execution path. - -## Target model - -The new runtime-based flow should be: - -1. a mail source detects new tasks -2. tasks are published to a queue -3. workers consume tasks in parallel -4. a domain handler processes each email -5. successful tasks lead to `mark_as_read` -6. failed tasks remain retriable - -## Phase 1 - -### Source -- IMAP polling source - -### Queue -- in-memory task queue +`MailOrderBotModule` should: +- build domain services +- build routines +- register workers +- register optional health contributors ### Workers -- 2 to 4 parallel workers initially -### Handler -- domain email processing handler built around the current processing logic +Initial workers: +- `MailPollingWorker` +- `EmailProcessingWorker` +- optional `ReconciliationWorker` -### Delivery semantics -- email is marked as read only after successful processing -- unread state acts as the first safety mechanism against message loss +Each worker should own only runtime behavior: +- start/stop +- execution loop +- thread model +- health/status -## Why in-memory queue is acceptable at first +### Routines -For the first phase: -- infrastructure complexity stays low -- the runtime contracts can be tested quickly -- unread emails in IMAP provide a simple recovery path after crashes +Initial routines: +- `MailPollingRoutine` +- `EmailProcessingRoutine` +- `ReconciliationRoutine` -This allows us to validate the runtime architecture before adopting an external broker. +Each routine should own business behavior: +- fetch messages +- parse message payload +- call domain services +- persist changes +- report business failures upward -## Phase 2 +## Migration steps -Replace: -- IMAP polling source +1. Extract business logic from current worker-like components into routines/services. +2. Implement thin workers that call those routines. +3. Register workers from `MailOrderBotModule`. +4. Use runtime health and status only through worker state. +5. Keep any local queue only as an implementation detail if it still helps. -With: -- IMAP IDLE source +## Optional queue usage -The queue, workers, and handler should remain unchanged. +If email processing still benefits from buffering, `InMemoryTaskQueue` may be used inside the application. -This is an explicit architectural goal: -source replacement without redesigning the processing pipeline. - -## Domain responsibilities that remain inside mail_order_bot - -The runtime should not own: -- email parsing rules -- client resolution logic -- attachment processing rules -- order creation logic -- client-specific behavior - -These remain in the business application. - -## Platform responsibilities used by mail_order_bot - -The new runtime should provide: -- lifecycle -- configuration -- queue abstraction -- worker orchestration -- tracing -- health checks -- status/control APIs - -## Migration boundaries - -### Move into runtime -- source orchestration -- worker supervision -- queue management -- trace provisioning -- health aggregation - -### Keep in mail_order_bot -- email business handler -- mail domain services -- business pipeline -- business-specific config validation beyond platform-level schema - -## Success criteria - -The migration is successful when: -- mail polling is no longer tightly coupled to processing -- workers can process emails in parallel -- business logic is not moved into the runtime -- replacing polling with IDLE is localized to the source layer +Important: +- it should remain an app-level detail +- it should not define the platform contract +- the main architecture should still be described through workers and routines diff --git a/requirements/README.md b/requirements/README.md index 5a6c2a7..28353b0 100644 --- a/requirements/README.md +++ b/requirements/README.md @@ -1,469 +1,134 @@ -# PLBA +# PLBA Requirements -`PLBA` is a reusable platform runtime for business applications. +## Goal -It solves platform concerns that should not live inside domain code: -- application lifecycle +`PLBA` is a reusable runtime for business applications. + +The platform owns: +- lifecycle - worker orchestration -- configuration loading from YAML -- tracing -- health aggregation -- runtime status reporting -- HTTP control endpoints +- configuration loading - logging configuration +- health aggregation +- tracing +- control endpoints -Business applications depend on `plba` as a package and implement only their own business behavior. +Business applications own: +- business workflows +- domain services +- app-specific configuration schema +- business error semantics -## Architecture +## Core runtime model -Current PLBA architecture is built around one core idea: -- the runtime manages a set of application workers +The platform is built around workers. -A worker is any runtime-managed active component with a unified lifecycle: +1. application defines an `ApplicationModule` +2. module registers workers +3. runtime starts workers +4. workers execute business activity +5. runtime aggregates status and health +6. runtime stops workers gracefully + +## Contracts + +### `ApplicationModule` + +Responsibilities: +- provide module name +- assemble application components +- register workers +- register optional health contributors + +### `Worker` + +Main runtime contract. + +Responsibilities: - `start()` - `stop(force=False)` - `health()` - `status()` -This means PLBA does not require separate platform categories like `source` and `consumer`. -If an application needs polling, queue processing, listening, scheduled work, or another active loop, it is implemented as a worker. +The worker owns execution mechanics: +- single-run or loop +- thread model +- stop conditions +- runtime status +- health interpretation -### Main runtime model +### `Routine` -1. application creates `RuntimeManager` -2. runtime loads configuration -3. runtime applies logging configuration -4. application module registers workers and supporting services -5. runtime starts all workers -6. workers execute business-related loops or processing -7. runtime aggregates health and status -8. runtime stops workers gracefully or forcefully - -## Core concepts - -### `ApplicationModule` - -File: [application.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/contracts/application.py) - -Describes a business application to the runtime. +`Routine` is an application pattern, not a PLBA contract. Responsibilities: -- provide module name -- register workers -- register queues if needed -- register handlers if needed -- register health contributors -- compose application-specific objects +- execute business behavior +- call domain services +- apply business rules +- talk to external integrations -`ApplicationModule` does not run the application itself. -It only declares how the application is assembled. +Recommended rule: +- worker orchestrates +- routine executes business behavior -### `Worker` +## Architectural rules -File: [worker.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/contracts/worker.py) - -The main runtime-managed contract. - -Responsibilities: -- start its own execution -- stop gracefully or forcefully -- report health -- report runtime status - -This is the main extension point for business applications. - -### `TaskQueue` - -File: [queue.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/contracts/queue.py) - -Optional queue abstraction. - -Use it when application workers need buffered or decoupled processing. - -PLBA does not force every application to use a queue. -Queue is one supported pattern, not the foundation of the whole platform. - -### `TaskHandler` - -File: [tasks.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/contracts/tasks.py) - -Optional unit of business processing for one task. - -Useful when a worker follows queue-driven logic: -- worker takes a task -- handler executes business logic - -### `TraceService` - -File: [service.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/tracing/service.py) - -Platform trace service. - -Responsibilities: -- create trace contexts -- resume trace from task metadata -- write context records -- write trace messages - -Business code should use it as a platform service and should not implement its own tracing infrastructure. - -### `HealthRegistry` - -File: [registry.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/health/registry.py) - -Aggregates application health. - -PLBA uses three health states: -- `ok` — all critical parts work -- `degraded` — application still works, but there is a problem -- `unhealthy` — application should not be considered operational - -### Runtime status - -File: [types.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/core/types.py) - -Status is separate from health. - -Current runtime states: -- `starting` -- `idle` -- `busy` -- `stopping` -- `stopped` - -Status is used for operational lifecycle decisions such as graceful shutdown. - -### `ControlPlaneService` - -Files: -- [service.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/control/service.py) -- [http_channel.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/control/http_channel.py) - -Provides control and observability endpoints. - -Currently supported: -- health access -- runtime start action -- runtime stop action -- runtime status action - -### `ConfigurationManager` - -Files: -- [configuration.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/core/configuration.py) -- [file_loader.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/config/file_loader.py) -- [providers.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/config/providers.py) - -Loads and merges configuration. - -Current built-in source: -- YAML file provider - -### `LogManager` - -File: [manager.py](/Users/alex/Dev_projects_v2/apps/plba/src/app_runtime/logging/manager.py) - -Applies logging configuration from config. - -Current expectation: -- logging config lives in the `log` section of YAML - -## Available platform services - -PLBA currently provides these reusable services. - -### 1. Runtime lifecycle - -Service: -- `RuntimeManager` - -What it gives: -- startup orchestration -- worker registration and startup -- graceful stop with timeout -- force stop -- status snapshot - -Example use: -- start `mail_order_bot` -- stop it after active email processing is drained - -### 2. Worker supervision - -Service: -- `WorkerSupervisor` - -What it gives: -- unified worker orchestration -- aggregated worker statuses -- aggregated worker health -- stop coordination - -Example use: -- run one polling worker and three processing workers in the same application - -### 3. Queue support - -Services: -- `TaskQueue` -- `InMemoryTaskQueue` -- `QueueWorker` - -What it gives: -- buffered processing -- decoupling between task production and task consumption -- worker concurrency for task handling - -Example use: -- worker A polls IMAP and pushes tasks to queue -- worker B processes queued email tasks with concurrency `3` - -### 4. Configuration - -Services: -- `ConfigurationManager` -- `FileConfigProvider` -- `ConfigFileLoader` - -What it gives: -- YAML config loading -- config merging -- access to platform and application config - -Example use: -- load `platform` section for runtime -- load `mail_order_bot` section for app-specific config - -### 5. Tracing - -Services: -- `TraceService` -- `TraceTransport` -- `NoOpTraceTransport` - -What it gives: -- trace context creation -- trace propagation through task metadata -- trace messages for processing steps - -Example use: -- polling worker creates trace when it discovers a mail -- processing worker resumes trace and writes business steps - -### 6. Health - -Services: -- `HealthRegistry` -- `WorkerHealth` - -What it gives: -- per-worker health -- aggregated application health -- critical vs non-critical component handling - -Example use: -- email processing workers are critical -- optional diagnostic worker may be non-critical - -### 7. Status - -Services: -- `WorkerStatus` -- runtime aggregated state - -What it gives: -- current activity visibility -- ability to stop application only after in-flight work is completed - -Example use: -- stop application only after processing workers become `idle` or `stopped` - -### 8. HTTP control - -Services: -- `ControlPlaneService` -- `HttpControlChannel` - -What it gives: -- HTTP health/status/actions -- operational integration point - -Example use: -- inspect current health from orchestration -- request graceful stop remotely - -## Public package API - -Public namespace is `plba`. - -Main imports for external applications: - -```python -from plba import ApplicationModule, QueueWorker, RuntimeManager, create_runtime -from plba.contracts import Task, TaskHandler, TaskQueue, Worker, WorkerHealth, WorkerStatus -from plba.queue import InMemoryTaskQueue -from plba.tracing import TraceService -``` - -## Example application pattern - -Minimal queue-based application: - -```python -from plba import ApplicationModule, QueueWorker, Task, TaskHandler, create_runtime -from plba.queue import InMemoryTaskQueue - - -class ExampleHandler(TaskHandler): - def handle(self, task: Task) -> None: - print(task.payload) - - -class ExampleModule(ApplicationModule): - @property - def name(self) -> str: - return "example" - - def register(self, registry) -> None: - queue = InMemoryTaskQueue() - traces = registry.services.get("traces") - - queue.publish(Task(name="incoming", payload={"hello": "world"})) - - registry.add_queue("incoming", queue) - registry.add_worker(QueueWorker("example-worker", queue, ExampleHandler(), traces)) - - -runtime = create_runtime( - ExampleModule(), - config_path="config.yml", - enable_http_control=False, -) -runtime.start() -``` - -## Building business applications on PLBA - -These are the current rules for building business applications correctly. - -### 1. Keep platform and business concerns separate - -PLBA owns: -- lifecycle -- worker management -- logging -- trace infrastructure -- health aggregation -- HTTP control -- config loading - -Business application owns: -- business workflows -- domain services -- application-specific config schema -- business task payloads -- business error semantics - -### 2. Build app behavior from workers - -A business application should be described as a small set of workers. - -Typical examples: -- polling worker -- processing worker -- reconciliation worker - -Do not introduce new worker types at platform level unless there is clear need for custom runtime behavior. - -### 3. Use queues only when they help - -Queue is optional. - -Use queue when: -- one worker discovers work -- another worker processes it -- buffering or decoupling helps -- concurrency is needed - -Do not force queue into applications that do not need it. - -### 4. Keep business logic out of worker lifecycle code - -Worker should orchestrate execution. -Business rules should live in dedicated services and handlers. +### 1. Keep lifecycle and business behavior separate Good: -- worker gets config -- worker calls domain service -- worker reports trace and status +- worker manages threading, loop, stop flags, health, status +- routine contains business logic Bad: -- worker contains all parsing, decision logic, integration rules, and persistence rules in one class +- worker mixes thread management, retry policy, parsing, persistence, integration rules, and domain decisions in one class -### 5. Use trace as a platform service +### 2. Treat `Worker` as the main extension point -Business application should: -- create meaningful trace steps -- propagate trace through task metadata if queue is used -- record business-relevant processing milestones +Do not center the platform around queues, handlers, sources, or other specialized runtime categories. -Business application should not: -- implement its own trace store -- control trace transport directly unless explicitly needed +### 3. Keep `Routine` out of the platform contract set -### 6. Read config through PLBA +At the current stage PLBA should not force a universal `Routine` interface. -Business application should not read YAML directly. +Applications may use: +- `run()` +- `run_once()` +- `poll()` +- `sync_window()` -Recommended flow: -- PLBA loads config -- application reads only its own config section -- application converts it to typed app config object -- services receive typed config object +The exact business API belongs to the application. -### 7. Distinguish health from status +### 4. Health is computed by the worker -Use `health` for: -- is application operational? +Routine should not directly mutate platform health state. -Use `status` for: -- what is application doing right now? +Instead: +- routine succeeds +- routine returns outcome +- or routine raises typed exceptions -This is important for graceful stop: -- health may still be `ok` -- status may be `busy` +Worker interprets the outcome into: +- `ok` +- `degraded` +- `unhealthy` -### 8. Design workers for graceful stop +### 5. `InMemoryTaskQueue` is utility-only -Workers should support: -- stop accepting new work -- finish current in-flight work when possible -- report `busy`, `idle`, `stopping`, `stopped` +`InMemoryTaskQueue` may stay as a reusable component for business applications. -This allows runtime to stop application safely. +It is: +- optional +- local +- not part of the main runtime contract model -## Recommended repository model +## Public package direction -PLBA is intended to live in its own repository as a reusable package. +Public namespace `plba` should expose: +- application/runtime contracts +- tracing +- health +- config +- control plane +- utility queue -Recommended setup: -- repository `plba`: platform package only -- repository `mail_order_bot`: business application depending on `plba` -- repository `service_b`: business application depending on `plba` - -## Example: `mail_order_bot` - -Simple first version of `mail_order_bot` on PLBA: -- `MailPollingWorker`, concurrency `1` -- `EmailProcessingWorker`, concurrency `3` -- shared `InMemoryTaskQueue` -- domain services for mail parsing and order processing - -Flow: -1. polling worker checks IMAP -2. polling worker pushes email tasks into queue -3. processing workers consume tasks -4. processing workers execute domain logic -5. runtime aggregates health and status - -This keeps `mail_order_bot` small, explicit, and aligned with current PLBA architecture. +It should not expose queue-centric runtime abstractions as primary architecture. diff --git a/requirements/application_guidelines.md b/requirements/application_guidelines.md new file mode 100644 index 0000000..554aa32 --- /dev/null +++ b/requirements/application_guidelines.md @@ -0,0 +1,148 @@ +# Application Guidelines + +## Purpose + +This document defines the default rules for building business applications on top of `plba`. + +The goal is to keep applications: +- explicit +- small +- easy to debug +- free from platform legacy artifacts + +## Main model + +Build every application around this chain: + +`ApplicationModule` -> `Worker` -> business `Routine` + +Meaning: +- `ApplicationModule` assembles the application +- `Worker` owns runtime execution and lifecycle +- `Routine` owns business behavior + +`Routine` is an application pattern, not a mandatory platform contract. + +## Rules + +### 1. Assemble the app in `ApplicationModule` + +`ApplicationModule` should: +- create application services +- create routines +- create workers +- register workers +- register optional health contributors + +`ApplicationModule` should not: +- execute business logic itself +- contain runtime loops + +### 2. Treat `Worker` as the only primary runtime abstraction + +`Worker` is the core runtime contract of the platform. + +Worker should own: +- `start()` +- `stop(force=False)` +- `health()` +- `status()` +- thread ownership +- execution strategy +- graceful shutdown + +Worker should not own: +- large business flows +- domain decisions +- parsing, persistence, and integration rules all mixed together + +### 3. Keep one worker focused on one business activity + +Default recommendation: +- one worker -> one routine + +If a process has multiple distinct behaviors: +- split it into multiple workers +- or compose several services behind one focused routine + +Do not make a worker a container for unrelated business scenarios. + +### 4. Put business logic into routines and services + +Routine should contain: +- business flow steps +- domain service calls +- business validation +- integration calls +- persistence orchestration + +If the routine becomes too large: +- split business logic into dedicated services +- keep routine as a thin application-level orchestrator + +### 5. Let the worker define the run model + +The worker decides: +- single-run or loop +- one thread or multiple threads +- interval between iterations +- batch or long-running mode +- stop conditions + +The routine does not decide lifecycle strategy. + +### 6. Let the worker compute health + +Routine should not directly set platform health state. + +Instead: +- routine completes successfully +- or returns outcome information +- or raises typed exceptions + +Then worker interprets that into: +- `ok` +- `degraded` +- `unhealthy` + +### 7. Use queues only as optional app-level utilities + +`InMemoryTaskQueue` may be used inside an application when buffering helps. + +But: +- queue is not a core platform concept +- queue usage should stay a local implementation choice +- the app should still be described through workers and routines + +### 8. Keep tracing vocabulary neutral + +Use tracing to describe operations and execution context, not legacy architectural roles. + +Prefer terms like: +- operation +- worker +- routine +- metadata +- step + +Avoid making trace terminology define the application architecture. + +### 9. Keep classes small and responsibilities clear + +Preferred shape: +- thin `ApplicationModule` +- thin `Worker` +- focused `Routine` +- dedicated domain services + +If a class grows too much, split it by responsibility instead of adding more platform abstractions. + +## Checklist + +Before adding a new application component, check: + +1. Is this runtime behavior or business behavior? +2. If runtime behavior, should it live in a `Worker`? +3. If business behavior, should it live in a `Routine` or service? +4. Does this component stay small and single-purpose? +5. Am I adding a queue because it is useful, or because of old mental models? diff --git a/requirements/architecture.md b/requirements/architecture.md index b0ddba0..0334d3a 100644 --- a/requirements/architecture.md +++ b/requirements/architecture.md @@ -2,247 +2,130 @@ ## Overview -The runtime is built as a platform layer for business applications. - -It consists of four logical layers: +PLBA consists of four logical layers: - platform core - platform contracts -- infrastructure adapters +- infrastructure services - business applications +The runtime is centered on `Worker`. + ## Layers ### Platform core -The core contains long-lived runtime services: +Core services: - `RuntimeManager` - `ConfigurationManager` - `WorkerSupervisor` -- `TraceService` -- `HealthRegistry` -- `ControlPlaneService` - `ServiceContainer` -The core is responsible for orchestration, not domain behavior. - -### Platform contracts - -Contracts define how business applications integrate with the runtime. - -Main contracts: -- `ApplicationModule` -- `TaskSource` -- `TaskQueue` -- `Worker` -- `TaskHandler` -- `ConfigProvider` -- `HealthContributor` -- `TraceFactory` - -These contracts must remain domain-agnostic. - -### Infrastructure adapters - -Adapters implement concrete runtime capabilities: -- in-memory queue -- Redis queue -- file config loader -- database config loader -- polling source -- IMAP IDLE source -- HTTP control plane -- trace transport adapters - -Adapters may change between applications and deployments. - -### Business applications - -Applications are built on top of the contracts and adapters. - -Examples: -- `mail_order_bot` -- future event-driven business services - -Applications contain: -- domain models -- domain handlers -- application-specific configuration schema -- source/handler composition - -## Core runtime components - -### RuntimeManager - -The main platform facade. - Responsibilities: - bootstrap runtime -- initialize services -- register application modules -- start and stop all runtime-managed components -- expose status -- coordinate graceful shutdown +- wire core services +- register modules +- start and stop workers +- expose runtime snapshot -### ConfigurationManager +### Platform contracts -Responsibilities: -- load configuration -- validate configuration -- publish config updates -- provide typed config access -- notify subscribers on reload +Main contracts: +- `ApplicationModule` +- `Worker` +- `ConfigProvider` +- `HealthContributor` +- tracing contracts -Configuration should be divided into: -- platform config -- application config -- environment/runtime overrides +These contracts must remain domain-agnostic. -### WorkerSupervisor +### Infrastructure services -Responsibilities: -- register worker definitions -- start worker pools -- monitor worker health -- restart failed workers when appropriate -- manage parallelism and backpressure -- expose worker-level status +Platform services include: +- tracing +- health registry +- logging manager +- control plane +- config providers +- `InMemoryTaskQueue` as optional utility -### TraceService +### Business applications -Responsibilities: -- create traces for operations -- propagate trace context across source -> queue -> worker -> handler boundaries -- provide trace factories to applications -- remain transport-agnostic +Applications define: +- routines +- domain services +- custom worker implementations +- typed app config -### HealthRegistry - -Responsibilities: -- collect health from registered contributors -- aggregate health into liveness/readiness/status views -- expose structured runtime health - -### ControlPlaneService - -Responsibilities: -- control endpoints -- runtime state visibility -- administrative actions -- later authentication and user/session-aware access - -## Main runtime model - -The runtime should operate on this conceptual flow: +## Runtime flow 1. runtime starts 2. configuration is loaded -3. services are initialized -4. application modules register sources, queues, handlers, and workers -5. task sources start producing tasks -6. tasks are published into queues -7. workers consume tasks -8. handlers execute business logic -9. traces and health are updated throughout the flow -10. runtime stops gracefully on request +3. core services become available +4. application modules register workers +5. workers start execution +6. workers call business routines +7. runtime aggregates health and status +8. runtime stops workers on request -## Contracts +## Worker model -### ApplicationModule +Worker is responsible for runtime behavior: +- execution strategy +- thread ownership +- graceful shutdown +- runtime status +- health interpretation -Describes a business application to the runtime. +Routine is responsible for business behavior: +- business decisions +- domain orchestration +- persistence and integrations -Responsibilities: -- register domain services -- register task sources -- register queues -- register worker pools -- register handlers -- declare config requirements -- optionally register health contributors +Recommended shape: -### TaskSource +```python +class SomeWorker(Worker): + def __init__(self, routine) -> None: + self._routine = routine -Produces tasks into queues. + def start(self) -> None: + ... -Examples: -- IMAP polling source -- IMAP IDLE source -- webhook source -- scheduled source + def stop(self, force: bool = False) -> None: + ... -Responsibilities: -- start -- stop -- publish tasks -- expose source status + def health(self) -> WorkerHealth: + ... -### TaskQueue + def status(self) -> WorkerStatus: + ... +``` -A queue abstraction. +## Design rules -Expected operations: -- `publish(task)` -- `consume()` -- `ack(task)` -- `nack(task, retry_delay=None)` -- `stats()` +### 1. Runtime should not know business semantics -The first implementation may be in-memory, but the interface should support future backends. +PLBA knows: +- worker started +- worker stopped +- routine succeeded +- routine failed -### Worker +PLBA does not know: +- what the business operation means +- which domain decision was made -Consumes tasks from a queue and passes them to a handler. +### 2. Queue is not a core architecture primitive -Responsibilities: -- obtain task from queue -- open or resume trace context -- call business handler -- ack or nack the task -- expose worker state +Queues may exist inside applications as implementation details. -### TaskHandler +They must not define the platform mental model. -Executes business logic for one task. +### 3. Keep components small -The runtime should not know what the handler does. -It only knows that a task is processed. +Prefer: +- thin workers +- focused routines +- dedicated domain services -## Mail Order Bot as first application - -### Phase 1 - -- source: IMAP polling -- queue: in-memory queue -- workers: parallel email processing workers -- handler: domain email processing handler -- mark message as read only after successful processing - -### Phase 2 - -- source changes from polling to IMAP IDLE -- queue and workers remain the same - -This demonstrates one of the architectural goals: -the source can change without redesigning the rest of the processing pipeline. - -## Suggested package structure - -```text -src/ - app_runtime/ - core/ - contracts/ - config/ - workers/ - queue/ - tracing/ - health/ - control/ - container/ - adapters/ - mail_order_bot_app/ - module/ - sources/ - handlers/ - services/ - domain/ +Avoid large platform abstractions that exist only for hypothetical reuse. diff --git a/requirements/vision.md b/requirements/vision.md index 41b88b1..5356147 100644 --- a/requirements/vision.md +++ b/requirements/vision.md @@ -1,119 +1,38 @@ # Vision -## Purpose +## Product vision -This project provides a reusable runtime platform for business applications. +PLBA should be a transparent runtime for business applications. -The runtime exists to solve service and infrastructure concerns that are needed by many applications but do not belong to business logic: -- start and stop -- status and lifecycle -- configuration -- worker execution -- trace propagation -- health checks -- control and administration -- later authentication and user management +The desired feeling of the platform: +- simple to read +- explicit in behavior +- small number of core concepts +- easy to debug +- no architectural legacy artifacts -The runtime should allow a business application to focus only on domain behavior. +## Core concepts -## Vision statement +The platform should be understandable through three ideas: +- `ApplicationModule` assembles the app +- `Worker` owns lifecycle and execution +- business behavior lives in application routines and services -We build a platform where business applications are assembled from small domain modules, while the runtime consistently provides lifecycle, workers, tracing, configuration, and control-plane capabilities. +## Non-goals -## Problem +PLBA should not become: +- a framework of many specialized runtime roles +- a queue-centric architecture by default +- a compatibility shell for legacy abstractions +- a place where business logic hides inside infrastructure classes -In the previous generation, the execution model was centered around a single `execute()` entry point called by a timer loop. +## Utility components -That model works for simple periodic jobs, but it becomes too narrow when we need: -- queue-driven processing -- multiple concurrent workers -- independent task sources -- richer health semantics -- trace propagation across producer and consumer boundaries -- reusable runtime patterns across different applications -- future admin and authentication capabilities +Some utility components may still exist, for example `InMemoryTaskQueue`. -The old model couples orchestration and execution too tightly. +They are acceptable when they stay: +- optional +- local +- implementation-oriented -## Desired future state - -The new runtime should provide: -- a reusable lifecycle and orchestration core -- a clean contract for business applications -- support for sources, queues, workers, and handlers -- explicit separation between platform and domain -- trace and health as first-class platform services -- the ability to evolve into an admin platform - -## Design principles - -### 1. Platform vs domain separation - -The runtime owns platform concerns. -Applications own domain concerns. - -The runtime must not contain business rules such as: -- email parsing policies -- order creation logic -- invoice decisions -- client-specific rules - -### 2. Composition over inheritance - -Applications should be composed by registering modules and services in the runtime, not by inheriting a timer-driven class and overriding one method. - -### 3. Explicit task model - -Applications should model processing as: -- task source -- task queue -- worker -- handler - -This is more scalable than one monolithic execute loop. - -### 4. Parallelism as a first-class concern - -The runtime should supervise worker pools and concurrency safely instead of leaving this to ad hoc application code. - -### 5. Observability by default - -Trace, logs, metrics, and health should be available as platform services from the start. - -### 6. Evolvability - -The runtime should be able to support: -- different queue backends -- different task sources -- different control planes -- different admin capabilities -without forcing business applications to change their domain code. - -## First target use case - -The first business application is `mail_order_bot`. - -Its business domain is: -- fetching incoming mail -- processing attachments -- executing order-related pipelines - -Its platform/runtime needs are: -- lifecycle management -- polling or IMAP IDLE source -- queueing -- worker pools -- tracing -- health checks -- future admin API - -This makes it a good pilot for the new runtime. - -## Success criteria - -We consider the runtime direction successful if: -- `mail_order_bot` business logic can run on top of it without leaking infrastructure details into domain code -- the runtime can manage concurrent workers -- the runtime can support a queue-based flow -- the runtime can expose status and health -- the runtime can later host admin/auth features without redesigning the core +They should not redefine the main platform model. diff --git a/src/app_runtime/contracts/__pycache__/application.cpython-312.pyc b/src/app_runtime/contracts/__pycache__/application.cpython-312.pyc index 35ebf3e1b05c5157754dee67380fc8d8f099143b..a51f034d0e083596d39e914482391467615ebc41 100644 GIT binary patch delta 75 zcmbQl(ZIobnwOW00SG3RuE^Z7k(ZZ=&m<@{J+rtZwMd~nzbJdN6_YL_k2B+EW(GEq dk4%iL&XbFoC-6Hn&S3t^z`^LuSi}$14FC;S6C?lt delta 93 zcmZqRn8d+*nwOW00SKNRU7mStBQGzLf?H5(dS-D+YLP;Deo=O6QL&CfVQFe!=nVEr2w{Ucp9Pr diff --git a/src/app_runtime/contracts/application.py b/src/app_runtime/contracts/application.py index 82441f6..dadfecf 100644 --- a/src/app_runtime/contracts/application.py +++ b/src/app_runtime/contracts/application.py @@ -13,4 +13,4 @@ class ApplicationModule(ABC): @abstractmethod def register(self, registry: ModuleRegistry) -> None: - """Register workers, queues, handlers, services, and health contributors.""" + """Register workers, services, and health contributors.""" diff --git a/src/app_runtime/contracts/queue.py b/src/app_runtime/contracts/queue.py deleted file mode 100644 index 498796d..0000000 --- a/src/app_runtime/contracts/queue.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from typing import Any - -from app_runtime.contracts.tasks import Task - - -class TaskQueue(ABC): - @abstractmethod - def publish(self, task: Task) -> None: - """Push a task into the queue.""" - - @abstractmethod - def consume(self, timeout: float = 0.1) -> Task | None: - """Return the next available task or None.""" - - @abstractmethod - def ack(self, task: Task) -> None: - """Confirm successful task processing.""" - - @abstractmethod - def nack(self, task: Task, retry_delay: float | None = None) -> None: - """Signal failed task processing.""" - - @abstractmethod - def stats(self) -> dict[str, Any]: - """Return transport-level queue statistics.""" diff --git a/src/app_runtime/contracts/tasks.py b/src/app_runtime/contracts/tasks.py deleted file mode 100644 index 7f552ba..0000000 --- a/src/app_runtime/contracts/tasks.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from dataclasses import dataclass, field -from typing import Any - - -@dataclass(slots=True) -class Task: - name: str - payload: dict[str, Any] - metadata: dict[str, Any] = field(default_factory=dict) - - -class TaskHandler(ABC): - @abstractmethod - def handle(self, task: Task) -> None: - """Execute domain logic for a task.""" diff --git a/src/app_runtime/core/__pycache__/registration.cpython-312.pyc b/src/app_runtime/core/__pycache__/registration.cpython-312.pyc index ef75b124b8247ef98a4848500ab28cea3aadc8cb..b36c669121aec383718e77857030d3b97f6c9560 100644 GIT binary patch delta 664 zcmY+B&ubGw6vyA&AG4Eov!G_%FM^^3a`fOyF@osH_tGEMIegx{@B6+tGt7AAw;#OC=cj;H)_t`J zb{2$qH9K7kEm#HR4nT?57MB4UvggGzd%T=|sGeIU;W;GGKm`p;355fvVJlBr6dfg% zP0>|3d5tntXnx&xHqerBFwROrDr)?;TV(?s9j+(^tx3-d&qVy1_PHVlHr2K+%Sr^Q8`+SM@be2E^ zr2o`^19Mf1+OUD=)JlS{L8Z=870)^`*AZ(!d&gGKYwohMS`>~3TJ zV56yu5DVs-xQuh=i70IpsYUCc$2bm0AtH>m&QK23uA%q$)AMUP79mr zan6jqn)&7(F-_^AYr3*>lb^6NY>{{S2mFyM&9>^^OD^gP75I-+zU_A#5l67_1ul%> Q>#^5PQwkwzLfN==!%AR7X)je&4n$BP5s3l<`Lb+1(`M84+RUy4 zwsPQ*Ln49N15)@8pz7bjiBl?~SSwjVsyJ{%xDrxNyf?dcV%NEqIlOsq-u!sqH*enj zRxIWTv_F3R+5J%?PU*UrZx}U$k`(!n@azuZ>`@B$C!|*BGHWy0_b5Q^H{GGbgzBNn5l9EkC=O zBZe(GoDNoDWSCN)zUo2Q_yQX&(qLnxWP5(bza~Z=(D`Yb?8|qYt2M6 zA2)g16Su@&Hwa~?p6soTLOqQ%eyPscrrX3;4iqi{iw;d?jn>{pAFPaPCo z@67C#F70YR6b7(>jPZ`lDc-MxvJYHt;*^QDIpX)&u}vQ(H$q8=$O3q*lza)gNC$kv z=U|>gQ}dmMNLZ3D{QNxWu;Zjasl)x6Q>7mXPRaldd}h8R^#3IiJ(Uw3@Dfd z0pTt!?`rprJ>$XJW47?ZLOlbUkA<2YX7E_B28NV)y62~aoA?mzFyISi2)MXce83;G z#W>*6TZ3sMZeL8kAWL!{(Im;DCl4@wR59mygb|N-7p9 None: self.services = services - self.queues: dict[str, TaskQueue] = {} - self.handlers: dict[str, TaskHandler] = {} self.workers: list[Worker] = [] self.health_contributors: list[HealthContributor] = [] self.modules: list[str] = [] @@ -19,12 +15,6 @@ class ModuleRegistry: def register_module(self, name: str) -> None: self.modules.append(name) - def add_queue(self, name: str, queue: TaskQueue) -> None: - self.queues[name] = queue - - def add_handler(self, name: str, handler: TaskHandler) -> None: - self.handlers[name] = handler - def add_worker(self, worker: Worker) -> None: self.workers.append(worker) diff --git a/src/app_runtime/queue/__pycache__/in_memory.cpython-312.pyc b/src/app_runtime/queue/__pycache__/in_memory.cpython-312.pyc index 98aa8634d1f1940d106ff0d949c7dbda0db0480d..ee09eeacd95b44c8244d06d7a5e1d65652576ffb 100644 GIT binary patch delta 1252 zcmZWpUuaWT7(eI!Irk_zd}2Vn?%-CkB?*j+ZZF~-=_s<(pp+)WO3awQV52JMq}+C_70k^wxH#iy9s=0 zdBaV(45(Q*oL*s3hmMKkG&fo&jxxa&Fmp7n0|{B-O3kZzVZ1O`3kt<*tr7q~?R9za z9le>sq?+Vcd(XERUC?vcyUC6m5y?>dF0|D)ZzMdb!&}P}(@I(g5d0rO<5w zA@hoOqV77QPB=6)f>PP5)&jl_44}+T3Q!T(K)`!Zs>)k6)V~5d9`MczHKiEjcxxW*}tj+eeGLdb|Zb5 zJc_+X)$a)fEOti@F9fzg4vU1>-ncPMf#wOPXhb7w}(-HTLgV%O}$lnLhB!49P7jy00 zN|&St7H8Ts&r;dtRQ74gd2VQZ@l~S5;xZp-Xvnx1)OaP-CZL@V14qwIgxv^x0315~ zy7aPAj8G+vjDK7l>SCC|B#{I$?%_APwUjpvrW3j ScGCEg@eREfe@TFqj@|%Xy$yZ< literal 2543 zcmcIl&2Jk;6rb5I@7kNXC2gFKCNWJ>*-~P)J@imSIDh~NETO`oj?l{WY>JchM`qV; zZCNEkIkXW`o0HQD0a7JI6#fDJ1};RUs#&!X5<;lAf`f#5;=S38apLp_b9nRKn>Qct zH}B(zfq^)I@%6oLik~Wk{EEWPP(y}xvcR+mBaFJFM01oPFS$}l&dDVurwCbg)smLe z1h2TUlAhBkk;vPGsmp|Ej=jDug%+jBrV9_9vFPAHR-Y-_lo+Sn9TcxUB57ZB8 zPR)U3)mS$wC6qvc({^w-V@jP-L=igs7dUuj!}?s zhZsVNo=X5pSOdJnQ74v^A@sG(UuzScH3Hq#uH433o;av~}}VawWNb zdP_gLnnWpaGjS_5m6ckD~1qkR{SK4~hU-N!(uC(uY?QQ8+x*$G7zH2l}K4hiswuZ@Z4)g(F(zy7YfI z-kTH8$)p%uzi0HrrSJn_BVviCkV^vr5HTP42#_GbA;Ri5bKHF0L*LRnX{BK4Y7F&b zi26ye^v(i75K{=7xni!Eru?x~o=WRe-x`rwI&Jnf)?se;h!I`LSn! zR@^!?UoW4{}yr;e8qYmmhnInC740!7sW}$Vi(~+UG$0#hbQ5d zu+WF~N%f#-dW}>+l49W{G(QbwiTpA${?+82$@Yv<{0~=F0Uvg|4 z`l4N`u$qg!X+yKjyWt9d0bhsd$S07zh~zkuSAc8~;huny None: - self._queue: Queue[Task] = Queue() - self._published = 0 - self._acked = 0 - self._nacked = 0 + self._queue: Queue[T] = Queue() + self._put_count = 0 + self._get_count = 0 - def publish(self, task: Task) -> None: - self._published += 1 - self._queue.put(task) + def put(self, item: T) -> None: + self._put_count += 1 + self._queue.put(item) - def consume(self, timeout: float = 0.1) -> Task | None: + def get(self, timeout: float = 0.1) -> T | None: try: - return self._queue.get(timeout=timeout) + item = self._queue.get(timeout=timeout) except Empty: return None + self._get_count += 1 + return item - def ack(self, task: Task) -> None: - del task - self._acked += 1 + def task_done(self) -> None: self._queue.task_done() - def nack(self, task: Task, retry_delay: float | None = None) -> None: - del retry_delay - self._nacked += 1 - self._queue.put(task) - self._queue.task_done() + def qsize(self) -> int: + return self._queue.qsize() def stats(self) -> dict[str, int]: return { - "published": self._published, - "acked": self._acked, - "nacked": self._nacked, + "put": self._put_count, + "got": self._get_count, "queued": self._queue.qsize(), } diff --git a/src/app_runtime/tracing/__pycache__/__init__.cpython-312.pyc b/src/app_runtime/tracing/__pycache__/__init__.cpython-312.pyc index c81725e3e75baa6dbf39aee8fba2f0b908352634..425362a3a68edf766ffebffdbe46eee00020b62d 100644 GIT binary patch delta 124 zcmeBVUBk+InwOW00SIiqFVA$H$ScVhGEv=~FPArpmysczA%%4jV-(-SifLj@w}gEw zg9~#)iV~Akfka+$L4HxmWPV0bUalfepaDhPAcAMI4xN<3=FkQB~TUvLk&|6Qwl_$ z873dhpvmGlSy#k|F@AEDh_#X_P)`HH4Po)?!fF?V)mB9Ah`taQb0IGMLO{j^VYSP` znUk-H>ATcjKGo>gID5%L&WCv0JR>c8g zB|toK8&5uR(7&SmrMQIRu8xR|AHcS+(i(|5#xEHTIP(cI39U-v^6_fQg zPZQt3D3t)x1JYlV0wNqh1lXymlT#!O1hU|Kh`E!ONLqQ$0ty0+5e6DFy=G#~0O8um-Wf@*op6xr&M=o5(9MmQN0s4--fNNoIhETo6&Pd9OS-6I&IC bT{ZcfqBvvKlIItYj%t0-l%gb{pe9R^ z9Y_IK6$eZyP*f76gAJmiC}Hy*Q9VXokSRs!KmsUl17gF?hKYi8@s=bOXU9)A68GYB z0IF(WxFaMsq4u(n>gG=I4UBSdQ*NP|Nq=7&-hzaIHY?wS* z(yD$TP!MRAFwm^);u;slHC9Mn7B{&h?!1R}H~W6>o!pm&T{<}K2#HR}y)LGHQA~XW z=VdXYOJa5#GA;|*cd*|OQM@jqdQn96f|}W75%Uht8yq~>IixRgNZ$~WzNuk2C3%MC zl)MckHw2|%4*tN&z$x>Eft8c%3(sa(DR(BulFhv`!HkR*lOM`ji&lV~X$&I3EKuAO zRZcdPlLyKN%9(mpf&?@`geH&x23i7$Z4Dy8qM-2BWGl)534qu* rZn18UZ~Lyx`2*Iq2xh16||7RaeLqEz9+r_Q`xk|MQ9i@XKr z&6_v#KJ#YY%;TS8u?T^3;6J}F92p?w8*H?T>>@(z1|X}%AckO(l8_Sw!~<3!7l2x{ z#FCVgN@7mrbjgxSN=_*SbHP$57ZON-yhjZAJ~0$Cv@W`1=E9sB1ZrppHBt|x!~aB< zwBQWr<#NT=?Lwu@vgv>$8Mu$qqQ>bde(+57s}VMb~lu%)}7dGTChzm?`mR6-M!RI z4+oqyY2oqSBY%k|TY!-K|JVzS0uzlPhy(C;K@)(tr!To0|BmTB4jh1{5bObveJs%- zY@p&$OLmQx9VuHWo9&(;c8?+$1CW-TkfxROlBsDh!`T6l57kZ z64Z(j1(%R=K_g};P>1Tlblgduug~AK`fN;q`LN(|;TdrodmoTh(hwR%1I#Ur0JPfN zq#;79bEXXmS|I6h%FvS9tvcan3tE-68x>|lg^uEcHE=ckn$Aq1)WBAOQq>u|4Q`fJ z=pe{D5*Ls-KYFIiE^+Qqo0;Lks=T=N0;g9f7i>*i+UK+5OuH8?guMaa9%&}Y$oK>4 zlgQTa7_Y_-Ko#HF`fZ$_T#p@xK$z?6Ux2_3Cfp4)gd*Vm1*jceb^CzY(NYf;H`G^v z7ia4$Mf9}3Vvr|+b?=-Sq=?4ZS70o3k-pNFSyb-A*%s0B`U+TpxbE#kAa#-!v-&E` z6mgaB!ZX?V(-#-iGueeX6)%&{wZYUwZo6R2s8yYsWm`j3w+cF&QSExwoKekNNXDB8 zI&7OVt;j0nbk7Wz-#K^b^t^g#8l-mrdU@xv>ZwZ~oI7*!orTlt+0$3lOCOv9v}ZhA zcV|87FI0E@Lmg6jW*0kObC;Djeb{hMT`p|-Fm<=wW$n^@1iE_Z3Uq1R{XZ5Q#a=3u z7c1$o6Kd<;33oQ;$b8|Bge!9*-ASDwHLDh!oug<*tyE>eiH8CAqIt*6*KE8w(Lwpe z>c}^1CapUP46K(JLz_mOy0;--lS>Eu{IDlvOadFb4gj1n5KcrN$juQFORdZ;&uvAM zo6&t6(S3ldoLfGJ)!F5{Z$ziRTzD3pejYu#6&=FfHRbc@r_txp>1HgL zj66`9LnItqi7m$-nroN;9Gcqg%wd?EJ2T`I-@}=UUOEG#bV`pa3dR51t-(PtcJvjX zVle3|;Hr2MeFb%7u&?;I_|ors%zw9kdr`N?zq__HqcYp9BGl~~ zc&r6o){N>2eP1s}&+l3~Jq+7ON9Zh6^hXHZ0swgt%~!7T1_2JaAs2_e=8g)Imq0gI!9xPRZ*SMhL2`sJ;dVeESBL=LC&@eF z6>?kXr`L%~Ps>?~vCs+D?8UcB*WOx4hv*N0KryADX#{T~I0V26=5OefnYI(rYj!1H zDU}MgGmx)QR-t(dvYzyS6Rg%tb#~JN=1RE?sX9G?@^~}SDFo994gvrUF3Ft8sY2d9 z$s3O15}lxTM|(*W{V9t548duXjd-KOyN8|uLVx+!NPMD2!tqFx z41_a~h{Z>@5_>ljlN*W2=ZUE%5n`Dym93$X)%c_MTJ4L)7ej|%ia>lhjQl9{at|~) zF^fkUi2;?tHkzh zOhWt zYxf4+JUCd@>sCcKc0P;v1-3Nq7g^n){VUi3+Y8_x`Fb$5vb4Ok_QoINKiZqeFKisY z@M7@dv(UxeVX&hBqfY@1!S-@LLTm{-d0Ff@86AS46KvDFb=y_viH#wamxj;|Mf084 zL8QP6Ww@)$d6VKID9+)83#_tOp(WjRL}t^prx5v;Zq>{lbzA^;|J%Apr8!_@=#!w8 zf#HYdXE&Zm&n6DP7|3j;MputNI{rL0^(;E|^b~$DVLIVt!kN9E7$OKKBHzbnU8k7MJ0L> znGy#0`BhNBjOsFaJUC2RqIlGf;`X@0ydzBU_$mJTKqnCV5JC6hijm8WgQ&dzckJs( zLx2M8BLMdyz{TXrra}gj%^+eS#KMS0NNS`xfLIi1F~s6X8$@h~MB=RkV6FWrdAjvx z9Do{^Gc8q+=Y?%Wl*hTA`FwB}!n1p^1`l<3c2jgDI)|WZ7JGO6XOY4GCE%rDvkXsi z`9q(gB~pwd?t7eXruU)#D)e*rY5z<5A@HH6vm*eSq96!=Ba`2dfxnZ3FBKwuC~T0c b+lnlt*7k1`sJ4gB3c~nW=HCQszViP9@-!C) delta 371 zcmaE@`kamLG%qg~0}$-ryeu<_aU!2Yh!v1GogsxGg)xUAmobWw5yWQ7VajEWV$NlW zV##HVVr67tVsK|jVQyhaVM%3O%?vVufgy^ml2w!KC5WlXc#AtRFE76&u_QA;uXy8b z8%9R<$v+s4#X+_eaf1jRAW_T&Boq{i_$NCtnb?5k1VM5_K*CRxrHBv2VF41i*osn1 zN{jMtvH0cZrNU%bLDHfiLTvITCM{(F5K{z1fNcPoQp67;IA8?ZPO4pz3{VLp5En}Ui4V+- djEr{~l%6w0Uu1~>!p6!dJVWj)1CRoH2>@uoM}+_Y diff --git a/src/app_runtime/tracing/service.py b/src/app_runtime/tracing/service.py index 00a8470..00bc284 100644 --- a/src/app_runtime/tracing/service.py +++ b/src/app_runtime/tracing/service.py @@ -99,7 +99,7 @@ class TraceService(TraceContextFactory): self._write_message("ERROR", message, status, attrs) def new_root(self, operation: str) -> TraceContext: - trace_id = self.create_context(alias=operation, kind="source", attrs={"operation": operation}) + trace_id = self.create_context(alias=operation, kind="operation", attrs={"operation": operation}) return TraceContext(trace_id=trace_id, span_id=trace_id, attributes={"operation": operation}) def child_of(self, parent: TraceContext, operation: str) -> TraceContext: @@ -116,22 +116,22 @@ class TraceService(TraceContextFactory): attributes={"operation": operation}, ) - def attach(self, task_metadata: dict[str, object], context: TraceContext) -> dict[str, object]: - updated = dict(task_metadata) + def attach(self, metadata: dict[str, object], context: TraceContext) -> dict[str, object]: + updated = dict(metadata) updated["trace_id"] = context.trace_id updated["span_id"] = context.span_id updated["parent_span_id"] = context.parent_span_id return updated - def resume(self, task_metadata: dict[str, object], operation: str) -> TraceContext: - trace_id = str(task_metadata.get("trace_id") or uuid4().hex) - span_id = str(task_metadata.get("span_id") or trace_id) - parent_id = task_metadata.get("parent_span_id") + def resume(self, metadata: dict[str, object], operation: str) -> TraceContext: + trace_id = str(metadata.get("trace_id") or uuid4().hex) + span_id = str(metadata.get("span_id") or trace_id) + parent_id = metadata.get("parent_span_id") self.create_context( alias=operation, parent_id=str(parent_id) if parent_id else None, - kind="handler", - attrs=dict(task_metadata), + kind="worker", + attrs=dict(metadata), ) return TraceContext( trace_id=trace_id, diff --git a/src/app_runtime/workers/__init__.py b/src/app_runtime/workers/__init__.py index 69099d4..ca7672b 100644 --- a/src/app_runtime/workers/__init__.py +++ b/src/app_runtime/workers/__init__.py @@ -1,4 +1,3 @@ -from app_runtime.workers.queue_worker import QueueWorker from app_runtime.workers.supervisor import WorkerSupervisor -__all__ = ["QueueWorker", "WorkerSupervisor"] +__all__ = ["WorkerSupervisor"] diff --git a/src/app_runtime/workers/__pycache__/__init__.cpython-312.pyc b/src/app_runtime/workers/__pycache__/__init__.cpython-312.pyc index 3b1f2ec3dd661e4551fea9c60a3b3f83d7e63576..f680d9d842248d51227be1e614503a177e6160af 100644 GIT binary patch delta 126 zcmcc5G=YitG%qg~0}xCsU6C0xkyp}I49JdEy#HM&^mC%v U?=o0F;Fi6>CD+Ja#10e$09RKW<^TWy delta 206 zcmbQhbf1a$G%qg~0}zNFTb{XOBClk<3Xn6MA%!7@F^3_SF^Z89#AeE2%4LpX2C|t` zSkjrISSndH*R!AmOLU zdP^a(pdh}eG_NExH&w43tf^S95M)9;h{v7^Q9m(Rk&$I$Yo-P_P#+@@7mERj56p~= bjCUC None: - self._name = name - self._queue = queue - self._handler = handler - self._traces = traces - self._concurrency = concurrency - self._critical = critical - self._threads: list[Thread] = [] - self._stop_requested = Event() - self._force_stop = Event() - self._lock = Lock() - self._started = False - self._in_flight = 0 - self._processed = 0 - self._failures = 0 - - @property - def name(self) -> str: - return self._name - - @property - def critical(self) -> bool: - return self._critical - - def start(self) -> None: - if any(thread.is_alive() for thread in self._threads): - return - self._threads.clear() - self._stop_requested.clear() - self._force_stop.clear() - self._started = True - for index in range(self._concurrency): - thread = Thread(target=self._run_loop, name=f"{self._name}-{index + 1}", daemon=True) - self._threads.append(thread) - thread.start() - - def stop(self, force: bool = False) -> None: - self._stop_requested.set() - if force: - self._force_stop.set() - - def health(self) -> WorkerHealth: - status = self.status() - if self._started and not self._stop_requested.is_set() and self._alive_threads() == 0: - return WorkerHealth(self.name, "unhealthy", self.critical, "worker threads are not running", status.meta) - if self._failures > 0: - return WorkerHealth(self.name, "degraded", self.critical, "worker has processing failures", status.meta) - return WorkerHealth(self.name, "ok", self.critical, meta=status.meta) - - def status(self) -> WorkerStatus: - alive_threads = self._alive_threads() - with self._lock: - in_flight = self._in_flight - processed = self._processed - failures = self._failures - if self._started and alive_threads == 0: - state = "stopped" - elif self._stop_requested.is_set(): - state = "stopping" if alive_threads > 0 else "stopped" - elif not self._started: - state = "stopped" - elif in_flight > 0: - state = "busy" - else: - state = "idle" - return WorkerStatus( - name=self.name, - state=state, - in_flight=in_flight, - meta={ - "alive_threads": alive_threads, - "concurrency": self._concurrency, - "processed": processed, - "failures": failures, - }, - ) - - def _run_loop(self) -> None: - while True: - if self._force_stop.is_set() or self._stop_requested.is_set(): - return - task = self._queue.consume(timeout=0.1) - if task is None: - continue - with self._lock: - self._in_flight += 1 - self._traces.resume(task.metadata, f"worker:{self.name}") - try: - self._handler.handle(task) - except Exception: - with self._lock: - self._failures += 1 - self._queue.nack(task) - else: - with self._lock: - self._processed += 1 - self._queue.ack(task) - finally: - with self._lock: - self._in_flight -= 1 - if self._stop_requested.is_set(): - return - - def _alive_threads(self) -> int: - return sum(1 for thread in self._threads if thread.is_alive()) diff --git a/src/app_runtime/workflow/__init__.py b/src/app_runtime/workflow/__init__.py new file mode 100644 index 0000000..c9c2ef6 --- /dev/null +++ b/src/app_runtime/workflow/__init__.py @@ -0,0 +1 @@ +__all__: list[str] = [] diff --git a/src/app_runtime/workflow/__pycache__/__init__.cpython-312.pyc b/src/app_runtime/workflow/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a3f2ed872a684257e7525d3bafa155329cf5f99 GIT binary patch literal 248 zcmX@j%ge<81hSJ>WJ&<(#~=<2Fhd!irGSj-4519^3{ecJjHyhLKz=H73WE)hu4L3? zy2TzJpO}*qAMdBhQp5xlyv34}SzL09xwxe07C%rfFE76&u_QA;uQ)z_CBtW+3Wi@^ z`k}?CMaBAwIjI%;E~#bl1x5K;smUe9@nuH(i3J74`UN>jiTcGw$sk^QQE6UDW^Sr} zd45rLT26ksetdjpUS>&ryk0@&FAkgB{FKt1RJ$ThpwWy#Tr3JCJ}@&fGTvYipAgi* S-XYSF+$nj5LA;0^C<6e?vqX0Q literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/__pycache__/runtime_factory.cpython-312.pyc b/src/app_runtime/workflow/__pycache__/runtime_factory.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16993e913b4ba4b3e73db00c829f8e4e712a29be GIT binary patch literal 1298 zcmZ`&%}>-&5Pz>Y@_+~b00{m8c+$HUh$N5(PR4^bA{rA;&a~SF3{KMN%yj0>Z+`RorLC=nfK0tz zEzU`Vd_yM|B{JyY5Ev`OA{MntndT`)Te79HoR>peww1D)S1FOm17ayl#8TJcG?ctn z)AYm-G<6!J499VO!!Npy2X6ZlSIib{cmBTftmtrvbj6V|F1(`WbI0T+7RS;A)en&s z!bzT5BrjR2MJ?$)wKPj!O5|lL0Wg%jQlok@NXLgxR2{!q<|BsbyP}qZe(H?t&U$78 zEZ`^MBNIXoGhv51trHe^Cv^!t8oPBFyttOs$QsS*a?r(0*KxRsQ?f$TD@ZV37$)}u zjfE|BB~UzW7lO_+lC>yTB%zUq6Ze7~jb|Twz)IGz`HSo@pJNr_mY|Kt=5A$;O2x}o z?59T76DE30M8jt18ysgFff!PzQVWud6`i8b*uwR*Im*NdA(V>%Ws&UOxU;NmCwq4< z^eii{TX)k}mX$ZDLnNh1Bsv;AA(5}ZSRsCswo;QaOpByWN{!XV5qz&VYFALx4H(n=8Od`jFnxhYNOrC*z>AkM_)uI=HsTMSci5=smOI z1oPnnBXrlG#WCWeag}NT(?9# literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/contracts/__init__.py b/src/app_runtime/workflow/contracts/__init__.py new file mode 100644 index 0000000..c9c2ef6 --- /dev/null +++ b/src/app_runtime/workflow/contracts/__init__.py @@ -0,0 +1 @@ +__all__: list[str] = [] diff --git a/src/app_runtime/workflow/contracts/__pycache__/__init__.cpython-312.pyc b/src/app_runtime/workflow/contracts/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45a4821107cbb6742aba5b9ae15e2ee34212930c GIT binary patch literal 258 zcmXv{u}VWh5Zrq&@KgvkN{oMy#)H(s%GyFqW3!wutDL#q9ebC=be6Fb8$ZBCzs1%< zUh5A8(xu7eJbJ&hHwuzK#Gd4cq&IQ5MHQW z(Nu`WImF9eiwik6EoW11%9Ob|l+62(O}4=|gD12)%sn23`$jvn;|%`kF!pgJd< z^|gsH>F92aPF(LC^3-QLAw3T~zH?!&>)1#ymZALtci_P9(5=K);=e#dZluSC7 ZwJMcTKhSx(da9O3%lf(d4IQo|*#c@7Ne2J` literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/contracts/__pycache__/context.cpython-312.pyc b/src/app_runtime/workflow/contracts/__pycache__/context.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ab11936247f1e7a41f5f631192775668db1fc27 GIT binary patch literal 986 zcmZ8f%}Z556rZ_Y@5|TgOHDMeFzAYSp^Y06BBPDCF)41uFx@+^p53oz=BY1QxG-oL zZBz){Cc5bVX!+c9NeBv}o$`XfRcG#d_hHWBH@`FI%$eUgbG}roWdv*Q!>7guM(Bq! z@?~}j?mPyvg?!}W0EO7aSaB`TLfzFv!!d@89IP$5#Y-e7?bA^P2@^$>Pb1hEue zFHN@TT?2P!fovg$TA{NK68RoL$IJO)brz~ zOU|-jTsSmf>&`q|B?*sLm?s5Uz2nd%5l#|3qmJO7vIuWRvJo<8t=AgIgWhncS!4{l zG@Z24qKIfBUc~bCwR1h3&OHP0x|jgiKtG2^x94{kb{5*h_P4>2?TOvVoyqp#trJtf zSUyI2soa4$1a+krXR^s7uBH`!s7OdeLqxSD5P zQ=l5eF9Yl$ens(13Q9Q?fQT`Gx9EFyv~7%j9lP;);&5zcYiXymB9Pm~sf>>Jc&_lH#BO2l)E2EhYlT7P+>OrB$ z;-&wB(t`dU9@~Qk3W5jm*0KkoCtqe=@xi?Oe)C>le(xn;rlvXw?9=-XS%eY#;mFv6 z5x~Po0QM0_97~kr1Y?Ii>E$FLxu5tACo;&xB*e%=f^h#ZIDmT2JhX;*_!jZ#z#IKJ z7%eD?2CZKEmt#F_Cs)7l38Oh^tX|IxOtX*? z!NXMm`$(V!bCh_SAUYx36Fw))5CL(42oL?SxFq5M#6lP2QPOHMTaFFdy^ifXFrwNJ zx|YU9x=vF8a)>s{c8p?$&YW5hG{Z%oK@#n=)Rb!QW}0WIc?^k*S3&a=8`Tx`BeZR& zWnM{P1h=7vZfm{XYugs31)7PN$IPa$x%d2f+#yf>3?IQRJlvCzB^4# z15_8leMCd>=zJsI)I#Z)N%1;f5nHrUUoMKd&6mFYT=DK0Wf6 zzFwT$S>3&N?9Zv$@xJw5%Qe}rO|76wnf zmwg3gAHaw31thR15%H#uUiM^FG9%;39KQalQ~9dC>dwdY^&a5!;qAL|&Hz4Buy~x7 zu-O2~IV6y<6fzbvM%+rRjE6k4L%WH2>SS){Hr!4-SvTx5u;2+K&J+??@(bSTg)5Ve z-~C2`&+1i?=YSQ@yazO7YnhJfPTcmbseveZGD4n?3A3BoXn>XkQ@z22h>TwoYWk1 z&bF4_^l^s{x2gFWW=LV++iD$G+r8>w;5&6UigJ<3D5`r=lod&p5?_m=qe`Uh z8%1|QZ4#^!Y!Pe_tSo?t-bR?hC+FHvi#an$)u#BQ(tGO7) ziss$asqZd%?=QUf|9Wp2UWV4po&O}O=BhMa+h+eYaMj&yEk9J(u>t%SK;J<4&KYBu ZaO*4F{sMcKu=~s9Z0F5`-+)VF=MOt?+o}Kn literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/contracts/__pycache__/workflow.cpython-312.pyc b/src/app_runtime/workflow/contracts/__pycache__/workflow.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..571b1104b4cabb9d0384503204e2bf03b4718b4d GIT binary patch literal 1080 zcmZ`&yK59d7@yhKz49<2UMwWYU7-iz7Oaz00a5W96NHs8jJI>Jx{q^aF7Z+ciG8%m zzo5iFz}DK*E8>D6h=^Un6+$Y%Z#SDnj8pvPd;I3x@3o&MC#wX`$2V_-*OZX&s1(P} z1v=gZVW0TKry+@`M=8oiXhf!GMwVw~x*6J$<2jTVM3}zy-adeMcnorf_|7izO9w`7 zImj>Kl`iF$fm`{9TOL+hwO{zMMbjFO<3w{EB(Z84X~pN7cSEjJYWD&W`aqd@l*nhj zFxh&jMSl$pwh9xix~JdHE;Co?cm;%gB8W$Q;u-5uDb!#rfz>y$8k8VcOW5aid5-UZ zuY|r~xm7v;VkhxMYAP*LE9Q}q4lq(nK^~CCv@d#m5Ng)rU7g6`_LLuV^H0 zYg+PH1zByKRw*qrhEW(xtBgg7KL}BtVC?yTho_!t#x8V(F^RKFEsQLuKzv8WfaGM& zT~|UXmxtnoyDBzWUnWmQS1Y!8+vWYfa{J*E?kd?ujmbf*gGjhrr>%6mNetNm|IF0x z4`l^HXHW(~T?cqYzEvl8?jBa_pRM|r#?24bsImO={@%jKT9#J}`sK4&1owYtfm1QF zsaPP{S?3*S(QGm6RnZILtaTFiTGjy4veH~?#`X66d8>;TIIE4+j^R?BmL;e0*%gEt zgjs|+ge>biO4kZNu0lpp?$pfAgTq>5WHoX+GCQytFu2}84zd|qW}N^ literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/contracts/context.py b/src/app_runtime/workflow/contracts/context.py new file mode 100644 index 0000000..b64b97b --- /dev/null +++ b/src/app_runtime/workflow/contracts/context.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any + + +@dataclass(slots=True) +class WorkflowContext: + payload: dict[str, Any] + state: dict[str, Any] = field(default_factory=dict) + + def snapshot(self) -> dict[str, Any]: + return { + "payload": dict(self.payload), + "state": dict(self.state), + } diff --git a/src/app_runtime/workflow/contracts/result.py b/src/app_runtime/workflow/contracts/result.py new file mode 100644 index 0000000..f4907e3 --- /dev/null +++ b/src/app_runtime/workflow/contracts/result.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any + + +@dataclass(slots=True) +class StepResult: + transition: str = "success" + updates: dict[str, Any] = field(default_factory=dict) + status: str = "completed" diff --git a/src/app_runtime/workflow/contracts/step.py b/src/app_runtime/workflow/contracts/step.py new file mode 100644 index 0000000..fc350ed --- /dev/null +++ b/src/app_runtime/workflow/contracts/step.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod + +from app_runtime.workflow.contracts.context import WorkflowContext +from app_runtime.workflow.contracts.result import StepResult + + +class WorkflowStep(ABC): + @abstractmethod + def run(self, context: WorkflowContext) -> StepResult: + """Run the step and return transition metadata.""" diff --git a/src/app_runtime/workflow/contracts/workflow.py b/src/app_runtime/workflow/contracts/workflow.py new file mode 100644 index 0000000..92e2b58 --- /dev/null +++ b/src/app_runtime/workflow/contracts/workflow.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +from dataclasses import dataclass, field + +from app_runtime.workflow.contracts.step import WorkflowStep + + +@dataclass(slots=True) +class WorkflowNode: + name: str + step: WorkflowStep + transitions: dict[str, str] = field(default_factory=dict) + + +@dataclass(slots=True) +class WorkflowDefinition: + name: str + start_at: str + nodes: dict[str, WorkflowNode] diff --git a/src/app_runtime/workflow/engine/__init__.py b/src/app_runtime/workflow/engine/__init__.py new file mode 100644 index 0000000..c9c2ef6 --- /dev/null +++ b/src/app_runtime/workflow/engine/__init__.py @@ -0,0 +1 @@ +__all__: list[str] = [] diff --git a/src/app_runtime/workflow/engine/__pycache__/__init__.cpython-312.pyc b/src/app_runtime/workflow/engine/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdf4c3389d442082244cc9295f7dcc977e2d3689 GIT binary patch literal 255 zcmXv{u}VWh5Zrq&@Qe^_lob0XpehA@R2V8umGJeOk_2(MJX zXeuIYEzxDa6$?2wEo)M4%9LYsAenPM>ukJBL~pQ>25hfzm^dcXmhbU`7Zjqu8S%5T3Pt&Jpf%K_{9N=oBKPYtD!oQ4kPRSAme+tju|jnD}lu+w?aHJUgKjP|L#GFW!Ra`7YJ@gH zl!}Akf!O1jRxiaQH+=6-5M`MOvh;Zr7(F46>q6$qOa%L96$YYG6zq_sQ8u?dB{?NP zh;%OE!<&E6KKf(R3W_NSUc*VJ|K>@F6#iYHrfT&S0{X>GO?nL-yWZrOCNeQQsATIh zCCx4hq}K_qA^fL?wci?|Bt-?Amnx|CbT8x|4}IOC8ERBbYt}Wnw!Rj5p{4c4N(yX{ zHs>;$3C`^*=Q9--De(^HuZk$GS9Fg;^a-vLRM$o|R@bF{Vg&RVnqNTau+~QDBm5w! zZlu~X9=0gav_KJ=SH$f%`xGHrx0P2$hy!W%Tz{ literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/engine/__pycache__/transition_resolver.cpython-312.pyc b/src/app_runtime/workflow/engine/__pycache__/transition_resolver.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac5b7ffa0026a65844bbdbe9080ee84f6f7cf127 GIT binary patch literal 882 zcmah{&5P4O6rV}b54v097b1IUaaSal287+c6cG`*#f z9DSl$Mtb>)bcGrsH86($n9IJtMuIkcbonUbn9|W&urx>O6~*CH8x=~FS-78qTDT5i zte&N(8}Z-vD%B5poHkY`5H}R}2dN=ow6rR-krg&;x7+8>?Wc-MWxGCqZQti(7RhkL z`>|r<+qN4;%8vXOuB~JrJth+o4+Cx=6>`~J91aDySN#|?$Q0;AleAVi$6Nm@)G1juj_T z95+-f+tYS06de9boiX7CoUydQSP*)NkM<^GuM*eKBl04Ye<1P_Qke*~63|cxGxAHL zmHj*i4|-M&83zgU1V)KsrZ?C0>6H3(`k?@AKu;VO-dcX$TmQ)|%kwd_8(o7Qt6QGe lyZJnK6iSTmN))3AlLg-^Oq>)W`s4|bbWb*gK8n%al0+6QWMf69H?d%M%!fC063 zH&wa)-E+@5_uO+%pT770+3j`#`1{*$m!m;906(IR{`glA!)t5{N>@5M3Ds<9`#MNkmJ(Wye=OMX#gbOG5D3UzO z^2zv3mWyaJXsuOyV6Ph?SO6>-!x#XK#u&>t7-MIwxA8G6gEO|djyoKtrVcG8Wjw^qyeX(TY9}Xk8b`SRrr0%>ZO?}y!;+1)=!up#fQgCk_Vy7p!Dq1>+a?5;zwU-E;uUiR*hyn7z) z$a>pnZC{bEp&}3|q(8y{Emh5-I}j~^42Y@ZjZMtR6q61)jf^p=F(W_L(Fhak=p&?! ztkGp*hGH>TBsTx_%w8#2J7a--rJK0`hfS77A0 zsV`P#VjVG7{T+asi%WTRUh~P=U}dmmEK%$cuCGXy|4^?N@!O0wW4&kj^5{o+e}`$2 zjxtu{g&7;vQlrz%XRO#-9MGe-Sdl!~u(F+|79GWQ>BxM-I#*m*A+J?0nd}CNH5kwU zTiNwt8Mb9Si)6tTHW#YnmYRxrlPIq1Yrr@kSQ+~xmzl{~O4z)$3#P5$}&@8{ZW1!b+h=sOv2DR`qOh8|;42TXpPRxme%3w2Uod*K-in zoG&f+EO$`Vd6yN$(8q1l53Y9m3cvnM|NmCua;Bna75>o5SDCTso`BJ?FcsUcjH&v@ zIt-eCuv>S@ZovIx2H(PWtvZ+a+%i+P^=Jm)kTa$_1H}RcG{Aoyr@e+M129`xw~`G0 zHF#iSNX9Mf*9D;Eg&GNNBD&eyY8+zy)jI$}z<^5c8ISRJ|!^ZEXF>o@QyGlPGD zPhnHwmUR?NVS(x)#ZlNwDrNd!5^XN&D8r!-OxmOG8U4Hv<^+}rDvkmQSQH#WVUkzu zVL{;dbd$jhRrs6vBT$G<`CFU$Rzp0E5397a%cKP`4t;|WX*?W_v&<2{f@?@AWp72; z6gv0N!gPvFH*PbBCK627wfZB;iBz10_WGxy!Zp9S%lEgYEv>=yk!{p$BMVm&OdCfu zbC#guG^Elsn{=;Z3s@Dr;9j(1#StkS{F*ot^_{A3Q3N=&D_2|$JC1hmaQTW2oir3e z;|L27CBiBGT2fH#C=9IP;#mRmhbQ9#hxVlKAOc;j*rJK?q+(Ab8F-?pfOHNWtCTY6 zQ8W%4HLy?|0hOj$p#_TbgzkF9iCn7<5h_IEc9!FkoMJ<9;ZTWGoJO#eQeAx^j`(DX zK}I^XC$G45hpB4S1W;UNoD|AX03Ex|r!<8$2UibW&+^DQXfwe(V56SB{C*DYK)H6!8}-ovwGzO+UvZOK=77rN)W<%)e$ z#Xh;BU8-nb?UE{v&GsX^2j>Q5vQ8rF#QMHx=bw+f82!^|j=X?|2IdB2vQ;8mmmRB> zPkfJkIr1o^YwR+)Mfvu3YE?p^+Lwg1Uyk3V~Q^N%xsn0Y=f57E*PErzej;Tux;MmEgnNMVB+ z`SKl$m1wfX60zynvue5btkio}JT@#2pA)I`@cg?lGe5It2k!be!0IktBY@ju`YcZk z=BesDRk`4tcS0mn4HDJx#%YI~B;r(Es5;8Ky8B7saUe%_SmLQ?0 zCF0;^k!Hn#aWz4%ZV(Y`I-lY>q9B5~ngZ;pb>x}Sgx zhJvcp)+AGVCCYU0l-zSh>N%6``IR_)F-MKA<7<7p7Q@(cZqmk9-l}qyeUTJ@?SaW>M0X$7_ zfX!15UmZ`CrlCcmTEy0Y=jX+9qw=}WrE{O>C>rT6nJ0=Lhfyn-%qE(h#deRR?EM3R||qYERFrf*D!@QSR6jlmEraSgbV0!( z38iJ-g;8*-a2XavA0BwgCGaYOcaI!efYMNuxp14L-h*kEV(V|AOr-@rvx*ycYU*{H zx)kmdvdmcHTcS&INjG;I4xoJ&?}yEr6~nMspz%jg`WJBg73h5hhW?2k!7gGy0o1A* F{sVqneoFuV literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/engine/hooks.py b/src/app_runtime/workflow/engine/hooks.py new file mode 100644 index 0000000..6f46b57 --- /dev/null +++ b/src/app_runtime/workflow/engine/hooks.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +from app_runtime.workflow.contracts.context import WorkflowContext + + +class WorkflowEngineHooks: + def on_step_started(self, context: WorkflowContext, step: str) -> None: + del context, step + + def on_step_finished(self, context: WorkflowContext, step: str) -> None: + del context, step + + def on_step_failed(self, context: WorkflowContext, step: str) -> None: + del context, step diff --git a/src/app_runtime/workflow/engine/transition_resolver.py b/src/app_runtime/workflow/engine/transition_resolver.py new file mode 100644 index 0000000..65b179a --- /dev/null +++ b/src/app_runtime/workflow/engine/transition_resolver.py @@ -0,0 +1,9 @@ +from __future__ import annotations + +from app_runtime.workflow.contracts.result import StepResult +from app_runtime.workflow.contracts.workflow import WorkflowNode + + +class TransitionResolver: + def resolve(self, node: WorkflowNode, result: StepResult) -> str | None: + return node.transitions.get(result.transition) diff --git a/src/app_runtime/workflow/engine/workflow_engine.py b/src/app_runtime/workflow/engine/workflow_engine.py new file mode 100644 index 0000000..5383f74 --- /dev/null +++ b/src/app_runtime/workflow/engine/workflow_engine.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import logging + +from app_runtime.workflow.contracts.context import WorkflowContext +from app_runtime.workflow.engine.hooks import WorkflowEngineHooks +from app_runtime.workflow.engine.transition_resolver import TransitionResolver + + +class WorkflowEngine: + def __init__(self, workflow, persistence, *, traces, hooks: WorkflowEngineHooks | None = None) -> None: + self._workflow = workflow + self._persistence = persistence + self._transition_resolver = TransitionResolver() + self._traces = traces + self._hooks = hooks or WorkflowEngineHooks() + self._logger = logging.getLogger(__name__) + + def run(self, context: WorkflowContext) -> dict[str, object]: + run_id = self._persistence.start_run( + self._workflow.definition.name, + self._workflow.definition.start_at, + context.snapshot(), + ) + context.state.setdefault("runtime", {}) + context.state["runtime"]["workflow_run_id"] = run_id + self._traces.step("workflow") + self._traces.info("Workflow started.", status="started", attrs={"workflow_run_id": run_id}) + current_name = self._workflow.definition.start_at + while current_name is not None: + node = self._workflow.definition.nodes[current_name] + self._logger.info("Workflow run %s: step '%s' started.", run_id, node.name) + self._hooks.on_step_started(context, node.name) + self._persistence.start_step(run_id, node.name, context.snapshot()) + self._traces.step(node.name) + self._traces.info(f"Step '{node.name}' started.", status="started") + try: + result = node.step.run(context) + except Exception as error: + self._persistence.fail_step(run_id, node.name, context.snapshot(), error) + self._persistence.fail_run(run_id, context.snapshot()) + self._traces.error( + f"Step '{node.name}' failed: {error}", + status="failed", + attrs={"exception_type": type(error).__name__}, + ) + self._logger.exception("Workflow run %s: step '%s' failed.", run_id, node.name) + self._hooks.on_step_failed(context, node.name) + raise + context.state.update(result.updates) + self._persistence.complete_step(run_id, node.name, result.status, result.transition, context.snapshot()) + self._traces.info( + f"Step '{node.name}' completed with transition '{result.transition}'.", + status=result.status, + ) + self._logger.info( + "Workflow run %s: step '%s' completed with transition '%s'.", + run_id, + node.name, + result.transition, + ) + self._hooks.on_step_finished(context, node.name) + current_name = self._transition_resolver.resolve(node, result) + self._persistence.complete_run(run_id, context.snapshot()) + self._traces.step("workflow") + self._traces.info("Workflow completed.", status="completed", attrs={"workflow_run_id": run_id}) + self._logger.info("Workflow run %s completed.", run_id) + return {"run_id": run_id, "status": "completed", "context": context.snapshot()} diff --git a/src/app_runtime/workflow/persistence/__init__.py b/src/app_runtime/workflow/persistence/__init__.py new file mode 100644 index 0000000..c680823 --- /dev/null +++ b/src/app_runtime/workflow/persistence/__init__.py @@ -0,0 +1,3 @@ +from app_runtime.workflow.persistence.workflow_persistence import WorkflowPersistence + +__all__ = ["WorkflowPersistence"] diff --git a/src/app_runtime/workflow/persistence/__pycache__/__init__.cpython-312.pyc b/src/app_runtime/workflow/persistence/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fea11845752d78e8021176e0a377de234e87d6ca GIT binary patch literal 311 zcmX@j%ge<81b-&2$gBj?k3k$5V1hC}ivbza8B!Qh7;_kM8KW2(8B&u*uC&Da}c>E8+xN!wAI1 b0zl#eGb1D8U53yH+zJ=C6dT!#*nxrolptPM literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/persistence/__pycache__/checkpoint_repository.cpython-312.pyc b/src/app_runtime/workflow/persistence/__pycache__/checkpoint_repository.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..233c8b7f03e9eb4973eca2ad9f8eb25fd186b705 GIT binary patch literal 2242 zcmZt{O>Yxd@V));SNwsU08T^H4J1mI2AoooNT{mHmnx(ZDiW%a3O&s325jtI^WK`o zk&!rX;LuCeLoWRRATIm`kg6Vf!3~wIl`0`sJ@n=_ks{^PnO)m4AnKz$r-puT8 ziA0RR%Kr4MAcY9|3yJPh0)%eO0J1?$V$u>R(`8B#mr7DuUY045$XCRapAb`dAq5)C zN=?;6{~)1LZy;~mj+=K2jxB(c&)YR4kPDz3Y;@}!ARELY%hV*xlBt>@Q+lE<%Te;- z&kTc0Wmz#JrUJWKqk7aEx_Hkr?pK_G?OwMkjwrYeuPqpzsC+kwgG0Kd`OfOJPMFU? zaF+l@yJU?#rwh8|4Kl;AZOgz5u$8=lM!h&QdcFh*DZ(nPc$p6+1$xx9T_blxSX|`t zC2K8r$$H2t+$n-l!5*H;T?_dH#xc@f46cEM$Aee|GIobIy25R=FNcuZO+W}hl=BUM@8zM&%Q ztO3~llvN-#8<3)6FGXDuLSA%GJ_h{$gm=!1{T4fCkbJ2w*A%aoF}nX`&+{yK6sm`c8125*jSVEi^>8t1v?~is*R-C!J!PRyy(f;EJ+Z*;wAOPwt?g+t zbUUqe-Lj%^Y~XV%?^-6yyYHFRwQtXVedF??HZ>upwf_H&rHgFo%GJw@OXshCGc~8{ zJv}|*4X~=PSlKE=^Kl$mUI^O6vQ3U9;e|Vm=Y@teofs~XY9yW6kwI@q27A8I9SKO^*6!Znt{8`# zyh^4oOv|r6p1Gi@uTv4gUu)_GJ^FegjQF&AVK!{^&x788{SLJKX#xBGWs)jcqhf^O zG}+Gty4d+9^s~BM94Cu}a@$jo(qn%4c`|&S{2KIR;qssmPoN>J#`UCGFx>BfF|EOe z@)p;SGX?;zi@2xUah#GK!dZ{~&zKiutn8T865?^j9#r$CfWwohJc zu0B9STm9N7s?(cAPtg3wp|A8!@`J^%m! literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/persistence/__pycache__/snapshot_sanitizer.cpython-312.pyc b/src/app_runtime/workflow/persistence/__pycache__/snapshot_sanitizer.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25c5fb0e9e3a226847a81672943f5d8fbd229e34 GIT binary patch literal 1575 zcmah}L2MgE6rI`GbyCMkP3)~Hfize}$ySi1=^+xTN=hR^E~6xQ>$RC# zCv{{?9CFB^QWFKqJ@n8+53X{c2M%y+kEjP~OmVa-5(07HRxlE3PW)MW-6m?_OaA`* z|NQs=%zrZv1_z@EB(`0*zY-Aojemq9b`0Ep8;lw}RKzcavf~w$U z@E;Tt+bHaPr@OW){BM@z9Ru0>M7_u?%2eI4 zuBx-vidLkBWy|oGwlbya#UfLS&ZoM{sKLK9T5>%*Z>g)@^H+6g?g4Lv_-uScVh5cDjN<^?0&;jkdH=(=#77{rDem&XuV0EDVgB_7{WAp3^T$9F&~ zz2R>Q*U};~NR?E91pE35t|?FK^>w0xp*AsuC0;oJRXO?o6Msv%CIdcnmxZg41q*kp zt#)jPtiqkmz*URd%yxl!Fks4y`;zThdFB)5Q5xe8(E$P|A{`8(E4ov%d~(Gq`(m&% zq#8X6@gQSu;&3ALxD#-gr?3Jjpu>Q?Y2Ezj$AR?jT;}_c??xWH)=JEOJNM0nd&w^^ z+#uT@+%Q|CryHZE@1AUqPB)U%&BXk}*!**O@4Wj{V&?A2&DmCRypbHQi;t3T{g9Y> z7@OHWn%syzZTF$%CH(Z)#0(pNnz!FLe=c(W^_Slh#SCn32wZzZ{APU&TYqy5-mlAk zgj!ySy7UzY!kQHPQNCe15?YGn_iLJ~=Pgb1qneg4m?ekXF-`ltq&uAmjq~;VIPW0( z61RiX;tv?*ivJi7ucNjQk)nUcB}wiq~y0qn&RGy~X_KW=`+D;I5dsnbt@bOPMZ n(D(Y!cA7ay(-7bf9+N>mAsFMIQTlgu?4QUGmj6Pag0z1DJsD*^ literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/persistence/__pycache__/workflow_persistence.cpython-312.pyc b/src/app_runtime/workflow/persistence/__pycache__/workflow_persistence.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcab399915d110beba3e724752534fd355ba91cc GIT binary patch literal 4310 zcmb7H&2JmW6`%doaz%Ymq8vNT2PNf;vMm;oKbqQbkT!9?6xE2Lx@M8A*E=hS3dv<= zSF$NEg%3XH-h=-I1r7?K=)uPv1@uBf7AP!ipheL`Z$ufOfKPpImb**K>%?4wZ{B>o zH#6_|-kX`fkBsO9zU4pfl)eG(Uswr0IbekDN5I@Alu)5eDnd~ZkQd8hMJh_1m&$TQ zDJqAFSjBf7q;69nbViPSu5K_P1-c^>xiD zRlU#cn&Xx{ht;9|#8roVy;^o|E?3Q(yXJVyX0_y%wrmES6Jcj8Vg+*rG}=N9>UW7v ziUK7?k?K^qBNruGro%K%r8~;Ko>WnxBUFYym5##V<>F9^B>(6bRiR&_<8%nh6wOc# zN*&4+l*4r~KjBY?>$_qzx8!jAhRIv-;Oodu( ztf`z%(>?IkN0Oj`B9O^^$w9zI-7)1RxeC|?c4Y+RPMsI z*Lm}kWIft;+}@BvqkJoaRKL#Np!K16bjX3n%b~fxRX|P)d*X6^IOapHp)^E_vb}i( z(ond${qfL`4A(Q6XVbjOMnKHhBJW0hzTUGLxF1VngY24{HVj7V`6H2*s#&o?7Ihh> z2V5^${8Xrq`YI?fO4LPgAp+nFuuBDE(7W|=5^PcT6bQOkfPkN7kGHd@TG>kGnQLX{+L^Ph%-P+_57+iHzuQ(1wRBsXZE3S@?X8yf*3Plr*AB;~wnv@@7!Axln*lQrzyde<^FtdsL<~|LBf?KZCagv9$lUzwC0GszioFP2 zUgkJ3^kP3%b*OD15bRZ`yNG#gWIeDV>fCze*;dB>U!e3n5HRyohC6Y8Y482^B5f_w z_F|>ASZP)__ZM#-Xtx3*BDmp)p3IH-H(&-bq7p@kI#85|4XGhF)O)>C0Q~*=VKsA(e5U zSPpt!v=yQSUl0`|;gnqQ)^rkPPYihl)qD=*HtCF$sS`=@fpAW@wCT2Xx}}}oS=}xD zaBT0z=E_$OG$R4)=EcRm(t-Ae0NUtig*l?k|5vCC1a0K+JdvT39{HY;07Xb<{fTT) zV3ni54s&2v%~CnaetoE>u;VDELE7Wt#xdpi`HIb$!vYwOBKY${{{sL1CgqRowqcl)3O5u?; zSQt`)LKr`RmbBD&)KI5t`wsJoi zbD$K_bw2^}9MH|TwE3OQT{v^zdm6S^o4;9UUcG*xnLhzs6($-8IJhT2_YETgIR>px z7`%4ykl}ew;~tBq#EU|3{z!dEjvqScabXH$=b(~&^9)s?nOzKk-cb|ern#0jw{vdy zOnc$e*21Ulg)dqQUo=;~++XxNNr z=tdd&v|)U+VU~j)Hi7zwt+MI5728{LD0=~Cm_hOel2b^~<#}{)biX4U4t`=Wyfblv zjQb}bcSu*1ly^HiQ8QgpR`jj}T!(XMSvl5C3rbz+sJe1Z_{aEUM?t2elI#neAeLD5zdYheY87LCVGd4`1iJ;{X5v literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/persistence/__pycache__/workflow_repository.cpython-312.pyc b/src/app_runtime/workflow/persistence/__pycache__/workflow_repository.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5bce8b4a500cc6bd989f098db69d8476b36b6b8 GIT binary patch literal 8882 zcmeHNU2GHC6`miD|1wS-hu}bfH38Nb>;x7F?Xq1UA#K@(@V_jfFza|G;NaNaJ7Y*v zqwWI_S#_%|>I12ErSeiK2&syyKI~iBK&6%XVy~mx8nF^m)rWoaHb_W3^_)8%d*aLj zUD|3NI!W%Fd+#~-Z|?c-ckj(#eLgP(&%WQxC4bSyFn`C0`8W)LU3vqATa3)eY>Js+ zM_3kRF2zmoBRrM)lw(2|5m<&}e!|F(8;mU6|{BN_7R_DKd}b)6|D!VQ0oY7vLtAtf^D zw35Jqk;dW)teN2W>u$(_F;a*wNF)cN#_0}KNsZ}&l@=;i6$(9#_nlD{qV~m8%4FXm z<*Jk=nQqD1OL(wm)v zW=Y9(Qj?_V?v-QKYi7EkJ?c6LK42=F_RKgI-JyqF>u2MixMv(6`6@eJn{oWgy9D`6 zqCPGVals!QZn$O*uK(;r=9W-l`*3`nc`7N&Vc~)lg+$EU2pE^L- zY1K01Jdcw-&!56tJvRAZZ%*y&4w=6$fF5d>$-eK0yGFpeJ0otofx$!o8p5VY?;iqA_GUXJriP_1h>@|*M&M_?fr1?qy9CMBJ zP+P0@)C%1x#?Xy8hIJQAsB|)Y3FfuWoKsRdKB4GN6?{LZ>TWe1&#IR*nl8XJ%c}8F z_`u$fZA7o)G|r@>q0yY0Oe?A?T~Vf@ zq4#r2PLZ^@dPPdg(NHoynwgX)AUl3Z!ITx{Cvt=+X-!IJWGLR~LNo;Krzw+~G_GP5 z8i^;UHRcmdk)^l>Iz)+^axN>^BC2dPMtn@ z^!V2PNHp~Plt{h4NQ>Sg#?@(pGNiF1B z7>LZpKIRYWhTW52(FD?3e9|qUX_LsZ27rVEYg3%`es8GEpJlKDOg*@TRw5N@n zCzwOCpQ@XoYoBjxA8g@2-xnU(BP{fIKwj7*47}-Cc-;l^{T5+xl`COIH2kgjPFtFKgGS$p#A8jxffjtgdLEfz z@s*hQQNX~=X56h~sdJbFp}&Bm9>gAsp`;U1bT8md8VCx&8+EgI*9!qC1>0vKsNt;9 zP`$ba-Zg5Z?Lgqn=1lm@?$P!MW=?yk$Y{|!>RHStuFjYSz`s;vP z5&d<@oxN5R!)39zDE3x1gy;LeSX+oEi~jM)Jm+hzIvHO_CD>gK_7#JD<=~!TaL@eZ zQgHu_SmhjDBD}n{mpw3_XICQgdA0$~`MI(A6v&TktxKoi~jdb+|JJ>KU;I} zNa5tWh1j`8{|I9Cc^2{dJd3CgG9Wg}7(TUT)UbMI`(P`-!1WDs!op5B$oDy6u*GxV zXCF$34Gtto)Db>2<4oXgnke*kNRVcnC+Ok`6E5H`b$(1E54 zb^k<{_%7HneCEgz6Gp+~!}VIi%Sj|fgJ{Ttl91AjBhk>y!pt3417Rq;Yl0LKfo|V% zT5tu&f%IU|zzAI2aX?IX0Gu(#jbr~n zb5D3we!^oh;NTEG;V`AjUfyHM^G z2KKrVR_cV5nmYT>fN(u`dpr0{H+}{&lR25s^IF}FVdGeHCfV_6opgaSJsT(l1QSZ= z5Tu$Odj~h`p_}6uw6bfbR)>T}+s9j*ngCJR`Dq;}fJC~0M7k-7w5!`@Q*8k)B$4$^ zb)zPW29iizWSgEEi3HFC3Ryq>VwrEz1_}w9>=@r5o6R8B9RCgk(qj-v-8mLdrW9Eh z6hbm2BG^#{(hohNzz0W6uG)TBZ&fA}N|qi=X;WDxzUzC$GihgP`|p?MoMB^(0F=YD zU=Xak@s&XQ{1giXz5CM32sWlpK^u*=^1Ip!^0F%gkjgp-C0k)u=saxijkN+72i<{F z0gnLjz(tap7Lb*klrA7Ry>y>>@IybF?rnTRvK^;i6obYpgY0+{ckrd%~w_57dX1p)PW>2hG&^X@-%YtUG+4Y6v7Yb6c5F20gU$Ju8^c_q$?;RtpaVH^>iG3x{6uJm_|z+#?*? z{(mA1^UV6#9YXw1i7eRz&9P9}&;EVvc`=~*Z&OvW13R)40=1cMx~ZGv$)Mf+gPEB^ zQ#+oyYlSm^uX!ff1sz$oVMJu49P*CDn9`nrza1!{rGp!$BQvFYVguGWEV= zJKd%|@GlRv$c{te=r=tU54UfZ*DQC;a|+IFhRH@K0q;NHR)Aia;2oUqTXr3xcfbL? zLelY+<$^-zuI|)*%Wg4rE-BOFBj9TjxH_8G5fP`(Us5#kL#T`3Nv?6)k2?^$&x^R4 z&g*Hb$N=UKLZFM*yAu`n61DZ1uEk_6BusCyS%p{LBus;<`WFabIS#D76}=gqoto<@ z1!6O_K=4-NW@Ps8ZBhzsgZ$uzTR*?~^V|F92TH-c<>07De9_()s>r%e^Ht0H!M{Pu@$&$g9d zH)+aqfsz<4i#v+qj`>w3v7f4TLRc0z7sbtGaT{zj=YCKUch^h`U0X|Hq}t*2d#jAs z<0ZI_NLc3|9-dQ=d=~_|tAls*BQrS$=6^Tr3z+fdOo)%yy`qtMw?nt`J*l%?0sRux)ol>KM``OQw}p#s;kn&~w!Sah zqM6v;k6G_OFjSUJlq6eB(qNkemg2)L} zrq;*N+G(+-gtIB%Yssf4rA;MKe(D)Aw>!&9rq@|b`dG6rXgN4Bf=@zYj?qje1@w0x zW_cxPA|vNgK#+A&fY}2XX>(e#01@H_3TO!)Z(ZQJ~7REc+GH^AD!`D`xc* P=Mcxf$NrO{FKYcS7^Q|< literal 0 HcmV?d00001 diff --git a/src/app_runtime/workflow/persistence/checkpoint_repository.py b/src/app_runtime/workflow/persistence/checkpoint_repository.py new file mode 100644 index 0000000..5369466 --- /dev/null +++ b/src/app_runtime/workflow/persistence/checkpoint_repository.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from typing import Any + + +class CheckpointRepository: + def __init__(self, connection_factory: object | None = None) -> None: + self._connection_factory = connection_factory + self._checkpoints: list[dict[str, Any]] = [] + + def save( + self, + workflow_run_id: int, + node_name: str, + checkpoint_kind: str, + snapshot: dict[str, Any], + ) -> None: + if self._use_memory(): + self._checkpoints.append( + { + "workflow_run_id": workflow_run_id, + "node_name": node_name, + "checkpoint_kind": checkpoint_kind, + "snapshot": snapshot, + } + ) + return + query = """ + INSERT INTO workflow_checkpoints ( + workflow_run_id, node_name, checkpoint_kind, snapshot_json, created_at + ) VALUES (%s, %s, %s, %s, UTC_TIMESTAMP(6)) + """ + with self._connection_factory.connect() as connection: + with connection.cursor() as cursor: + cursor.execute( + query, + ( + workflow_run_id, + node_name, + checkpoint_kind, + self._connection_factory.dumps(snapshot), + ), + ) + + def _use_memory(self) -> bool: + return self._connection_factory is None or not self._connection_factory.is_configured() diff --git a/src/app_runtime/workflow/persistence/snapshot_sanitizer.py b/src/app_runtime/workflow/persistence/snapshot_sanitizer.py new file mode 100644 index 0000000..f90cd05 --- /dev/null +++ b/src/app_runtime/workflow/persistence/snapshot_sanitizer.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +from typing import Any + + +class WorkflowSnapshotSanitizer: + def sanitize(self, snapshot: dict[str, Any]) -> dict[str, Any]: + payload = dict(snapshot.get("payload", {})) + state = dict(snapshot.get("state", {})) + return { + "payload": self._sanitize_dict(payload), + "state": self._sanitize_dict(state), + } + + def _sanitize_dict(self, value: Any) -> Any: + if isinstance(value, dict): + return {str(key): self._sanitize_dict(item) for key, item in value.items()} + if isinstance(value, list): + return [self._sanitize_dict(item) for item in value] + return value diff --git a/src/app_runtime/workflow/persistence/workflow_persistence.py b/src/app_runtime/workflow/persistence/workflow_persistence.py new file mode 100644 index 0000000..71a273c --- /dev/null +++ b/src/app_runtime/workflow/persistence/workflow_persistence.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from app_runtime.workflow.persistence.checkpoint_repository import CheckpointRepository +from app_runtime.workflow.persistence.snapshot_sanitizer import WorkflowSnapshotSanitizer +from app_runtime.workflow.persistence.workflow_repository import WorkflowRepository + + +class WorkflowPersistence: + def __init__(self, workflow_repository, checkpoint_repository) -> None: + self._workflow_repository = workflow_repository + self._checkpoint_repository = checkpoint_repository + self._snapshot_sanitizer = WorkflowSnapshotSanitizer() + + @classmethod + def create_default(cls, connection_factory=None) -> "WorkflowPersistence": + return cls( + workflow_repository=WorkflowRepository(connection_factory), + checkpoint_repository=CheckpointRepository(connection_factory), + ) + + def start_run(self, workflow_name: str, start_at: str, snapshot: dict[str, object]) -> int: + sanitized = self._snapshot_sanitizer.sanitize(snapshot) + run_id = self._workflow_repository.create_run(workflow_name, sanitized) + self._checkpoint_repository.save(run_id, start_at, "workflow_started", sanitized) + return run_id + + def start_step(self, run_id: int, node_name: str, snapshot: dict[str, object]) -> None: + self._checkpoint_repository.save(run_id, node_name, "step_started", self._snapshot_sanitizer.sanitize(snapshot)) + + def complete_step( + self, + run_id: int, + node_name: str, + status: str, + transition: str, + snapshot: dict[str, object], + ) -> None: + sanitized = self._snapshot_sanitizer.sanitize(snapshot) + self._workflow_repository.record_step(run_id, node_name, status, transition, sanitized) + self._checkpoint_repository.save(run_id, node_name, "step_completed", sanitized) + + def fail_step(self, run_id: int, node_name: str, snapshot: dict[str, object], error: Exception) -> None: + sanitized = self._snapshot_sanitizer.sanitize(snapshot) + self._workflow_repository.fail_step(run_id, node_name, sanitized, error) + self._checkpoint_repository.save(run_id, node_name, "step_failed", sanitized) + + def complete_run(self, run_id: int, snapshot: dict[str, object]) -> None: + sanitized = self._snapshot_sanitizer.sanitize(snapshot) + self._workflow_repository.complete_run(run_id, sanitized) + self._checkpoint_repository.save(run_id, "workflow_done", "workflow_finished", sanitized) + + def fail_run(self, run_id: int, snapshot: dict[str, object]) -> None: + self._workflow_repository.fail_run(run_id, self._snapshot_sanitizer.sanitize(snapshot)) diff --git a/src/app_runtime/workflow/persistence/workflow_repository.py b/src/app_runtime/workflow/persistence/workflow_repository.py new file mode 100644 index 0000000..7c5512c --- /dev/null +++ b/src/app_runtime/workflow/persistence/workflow_repository.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +from itertools import count +from typing import Any + + +class WorkflowRepository: + def __init__(self, connection_factory: object | None = None) -> None: + self._connection_factory = connection_factory + self._counter = count(1) + self._runs: dict[int, dict[str, Any]] = {} + + def create_run(self, workflow_name: str, snapshot: dict[str, Any]) -> int: + if self._use_memory(): + run_id = next(self._counter) + self._runs[run_id] = { + "workflow_name": workflow_name, + "status": "running", + "snapshot": snapshot, + "steps": [], + } + return run_id + payload = self._build_run_payload(workflow_name, snapshot) + query = """ + INSERT INTO workflow_runs ( + workflow_name, workflow_version, business_key, queue_task_id, inbox_message_id, + current_node, status, context_json, trace_id, started_at, created_at, updated_at + ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, UTC_TIMESTAMP(6), UTC_TIMESTAMP(6), UTC_TIMESTAMP(6)) + """ + with self._connection_factory.connect() as connection: + with connection.cursor() as cursor: + cursor.execute(query, payload) + return int(cursor.lastrowid) + + def record_step( + self, + run_id: int, + node_name: str, + status: str, + transition: str, + snapshot: dict[str, Any], + ) -> None: + if self._use_memory(): + self._runs[run_id]["steps"].append( + {"node_name": node_name, "status": status, "transition": transition, "snapshot": snapshot} + ) + return + context_json = self._connection_factory.dumps(snapshot) + insert_query = """ + INSERT INTO workflow_steps ( + workflow_run_id, node_name, status, transition_name, input_json, output_json, created_at, started_at, finished_at + ) VALUES (%s, %s, %s, %s, %s, %s, UTC_TIMESTAMP(6), UTC_TIMESTAMP(6), UTC_TIMESTAMP(6)) + """ + update_query = """ + UPDATE workflow_runs + SET current_node = %s, status = %s, context_json = %s, updated_at = UTC_TIMESTAMP(6) + WHERE id = %s + """ + with self._connection_factory.connect() as connection: + with connection.cursor() as cursor: + cursor.execute(insert_query, (run_id, node_name, status, transition, context_json, context_json)) + cursor.execute(update_query, (node_name, "running", context_json, run_id)) + + def complete_run(self, run_id: int, snapshot: dict[str, Any]) -> None: + if self._use_memory(): + self._runs[run_id]["status"] = "completed" + self._runs[run_id]["snapshot"] = snapshot + return + query = """ + UPDATE workflow_runs + SET current_node = NULL, status = 'completed', context_json = %s, finished_at = UTC_TIMESTAMP(6), updated_at = UTC_TIMESTAMP(6) + WHERE id = %s + """ + with self._connection_factory.connect() as connection: + with connection.cursor() as cursor: + cursor.execute(query, (self._connection_factory.dumps(snapshot), run_id)) + + def fail_step(self, run_id: int, node_name: str, snapshot: dict[str, Any], error: Exception) -> None: + if self._use_memory(): + self._runs[run_id]["steps"].append( + { + "node_name": node_name, + "status": "failed", + "transition": "error", + "snapshot": snapshot, + "error": str(error), + } + ) + self._runs[run_id]["status"] = "failed" + return + snapshot_json = self._connection_factory.dumps(snapshot) + error_json = self._connection_factory.dumps({"message": str(error), "exception_type": type(error).__name__}) + insert_query = """ + INSERT INTO workflow_steps ( + workflow_run_id, node_name, status, transition_name, input_json, output_json, error_json, created_at, started_at, finished_at + ) VALUES (%s, %s, 'failed', 'error', %s, %s, %s, UTC_TIMESTAMP(6), UTC_TIMESTAMP(6), UTC_TIMESTAMP(6)) + """ + update_query = """ + UPDATE workflow_runs + SET current_node = %s, status = 'failed', context_json = %s, updated_at = UTC_TIMESTAMP(6) + WHERE id = %s + """ + with self._connection_factory.connect() as connection: + with connection.cursor() as cursor: + cursor.execute(insert_query, (run_id, node_name, snapshot_json, snapshot_json, error_json)) + cursor.execute(update_query, (node_name, snapshot_json, run_id)) + + def fail_run(self, run_id: int, snapshot: dict[str, Any]) -> None: + if self._use_memory(): + self._runs[run_id]["status"] = "failed" + self._runs[run_id]["snapshot"] = snapshot + return + query = """ + UPDATE workflow_runs + SET status = 'failed', context_json = %s, finished_at = UTC_TIMESTAMP(6), updated_at = UTC_TIMESTAMP(6) + WHERE id = %s + """ + with self._connection_factory.connect() as connection: + with connection.cursor() as cursor: + cursor.execute(query, (self._connection_factory.dumps(snapshot), run_id)) + + def _build_run_payload(self, workflow_name: str, snapshot: dict[str, Any]) -> tuple: + payload = snapshot.get("payload", {}) + state = snapshot.get("state", {}) + runtime = state.get("runtime", {}) + business_key = payload.get("inbox_message", {}).get("external_message_id") or str(next(self._counter)) + return ( + workflow_name, + "v1", + business_key, + runtime.get("queue_task_id"), + payload.get("inbox_message", {}).get("id"), + None, + "running", + self._connection_factory.dumps(snapshot), + runtime.get("email_trace_id"), + ) + + def _use_memory(self) -> bool: + return self._connection_factory is None or not self._connection_factory.is_configured() diff --git a/src/app_runtime/workflow/runtime_factory.py b/src/app_runtime/workflow/runtime_factory.py new file mode 100644 index 0000000..d579fd2 --- /dev/null +++ b/src/app_runtime/workflow/runtime_factory.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +from app_runtime.workflow.engine.workflow_engine import WorkflowEngine +from app_runtime.workflow.persistence import WorkflowPersistence + + +class WorkflowRuntimeFactory: + def __init__(self, connection_factory=None, *, traces, hooks=None) -> None: + self._connection_factory = connection_factory + self._traces = traces + self._hooks = hooks + + def create_engine(self, workflow) -> WorkflowEngine: + persistence = WorkflowPersistence.create_default(self._connection_factory) + return WorkflowEngine(workflow, persistence, traces=self._traces, hooks=self._hooks) diff --git a/src/plba/__init__.py b/src/plba/__init__.py index f8031c7..f2876d1 100644 --- a/src/plba/__init__.py +++ b/src/plba/__init__.py @@ -5,9 +5,6 @@ from plba.contracts import ( ApplicationModule, ConfigProvider, HealthContributor, - Task, - TaskHandler, - TaskQueue, TraceContext, TraceContextRecord, TraceLogMessage, @@ -21,7 +18,17 @@ from plba.health import HealthRegistry from plba.logging import LogManager from plba.queue import InMemoryTaskQueue from plba.tracing import MySqlTraceTransport, NoOpTraceTransport, TraceService -from plba.workers import QueueWorker, WorkerSupervisor +from plba.workflow import ( + StepResult, + WorkflowContext, + WorkflowDefinition, + WorkflowEngine, + WorkflowEngineHooks, + WorkflowNode, + WorkflowRuntimeFactory, + WorkflowStep, +) +from plba.workers import WorkerSupervisor __all__ = [ "ApplicationModule", @@ -40,19 +47,23 @@ __all__ = [ "LogManager", "MySqlTraceTransport", "NoOpTraceTransport", - "QueueWorker", "RuntimeManager", "ServiceContainer", - "Task", - "TaskHandler", - "TaskQueue", "TraceContext", "TraceContextRecord", "TraceLogMessage", "TraceService", "TraceTransport", + "StepResult", "Worker", "WorkerHealth", "WorkerStatus", + "WorkflowContext", + "WorkflowDefinition", + "WorkflowEngine", + "WorkflowEngineHooks", + "WorkflowNode", + "WorkflowRuntimeFactory", + "WorkflowStep", "WorkerSupervisor", ] diff --git a/src/plba/__pycache__/__init__.cpython-312.pyc b/src/plba/__pycache__/__init__.cpython-312.pyc index 5ff3d38a5a1bbcaa11bddf3949e447357786d3f6..ab767082066cee288ebe087c518009f89ea7bade 100644 GIT binary patch delta 815 zcmajcJ#P~+7yw{932k%vq)C%BP1}4Ym(ns+h=q+0G0+SR5|X!kxaq0(IrUv=Ge!IW z5akwTCVl`DKY)d)LSnEKWq^gP0->zjdrFF!IO!hQuV34*?eE&BaP)H|q9OM?citO6 zmM)?he05Rlk1^_@-VL_KQzXUHB+WA<!i+gqVoo6@Ex+_<%QTSzDr`e81+#v{0@`7 zQACfv!7s7qyux>#Z}@AN#ZSVFBT>_wR9yLZ<2>Q$N-Ivvs>^-eL*vE5gKoWI9`s#V0xKAS&~TmKz*$NyJ6 zUlit1X~~N7AwZ}*6=C{NI$uy{<_lrp3|*5!_yAv`Clgeipwa}@ fr>J>}nloPz(p-K4zmo_zeg?`R--~6r@2>j?E@Rd} delta 572 zcmZ9Hze^)Q6vy9NH>b&FgP{3sewf6>E!09#1iQ1a@D4=rV7g7Rtic(h6QfwEm94V> zfsG(o_)pk~#i1Z9gjig2-Eknc?wfec4a{f0^L>xU_uWT-MPd)88G+!e++VluLYJ`= zK3~?{6A@=|kzfgtWJ!c3G($Wk(kv}9EF-cktGq9KLgZLZ>0zE11y)dcgcrpmn^Zc? zOQOumqQWZbAK_D?%BrHqY6vbgjVol+$5G4tO(`t&V-4xpo`t7qdN;V3(>LwTj&BfG z?cFAKq#vfwde`YX-YdSK<~?fVdCfPJ^`k9!f}YXAyV2cq_FkWQv{=~w~&D6+VBmz_n E0D%iJ$^ZZW delta 357 zcmX@f`jn0DG%qg~0}!|zTb{|pJdsa=v0|dSu@@sl3PTEO4o5C$6lX416c?D!mcyOP z6UCFu8^xQ;7sZ#$AH@$AXU`GH6^s(h6^atd6^;^SWJqU7;aJ2NB~r<$$u+UVNrCYe zXGmgkc3^30X{sjEEfx^x7B`6WNX$#gNu78iSBAHU8R(2677)P-BG^F$2Z&&s9LOjx zRKyEn^MMF{AmOJeIysxMO|KASXb~Gw`XyY85tSx wGT7f`Fu%)Sb(g{LK7-|VCRRqK&kR7~I}Zb+5937!?@ugz+)Rz^MUp@*00f;=nE(I) diff --git a/src/plba/__pycache__/tracing.cpython-312.pyc b/src/plba/__pycache__/tracing.cpython-312.pyc index 84a4dfd88c2e43b67fe45ecb64e9578c950935ec..d693ce0a9cbc4c4ee01f3c32313a709a388ebf36 100644 GIT binary patch delta 178 zcmX@j^qPtHG%qg~0}$AJU!HkkBCn*L29PtIA%!7@F^3_SF^Z89#AeE2%4LpX&Si;W zVPr^WNMT;Y7{yx2qRBcjK+KNmmauPSaA8hJQDSl`kjN`8$S*3nCFGayUw|s6$y~$& zG`@%xL@)yhKTWoY9d1lTtP@X1D)9gX7=gH007!gbW@Kc%%V2V!A@C8Gdn0=h$dv&3 C5G-#1 delta 150 zcmaFObef6xG%qg~0}!XntP@wbP5d0D n&JE--0&%e@kodsN$jEq?!Q?K3%>!=v1#%a-3>w*sIDs+%I7=ca diff --git a/src/plba/__pycache__/workers.cpython-312.pyc b/src/plba/__pycache__/workers.cpython-312.pyc index 6183a0f40516ff58cb43547cf5b1eaef7832440d..75ee911df01c6002c15ee677815c478cb1f6e9c0 100644 GIT binary patch delta 126 zcmX@l)WF1hnwOW00SG3RuE>m;$SY|p2INd{ADB-8cJaLU8BlE;_kxHCEJ|hqp3jm1^%#4hT UcNwf7aLZocl51oyVh0KW07Z8kwg3PC delta 206 zcmZo*I?u#=nwOW00SH`N!l2r?la#A8o|sGpdu$jCCWHBy5csE-kdi^YJ%2WCb_ b#=8s_cNwf7a7$c}(z(E;+sIzT36uf=nRYfJ diff --git a/src/plba/__pycache__/workflow.cpython-312.pyc b/src/plba/__pycache__/workflow.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faee905a15c0c1eee507140267354f723116abc9 GIT binary patch literal 799 zcmah_L2J}N6rM?TH`#P|D^+?gS|tb@=&_W7wN_6(v=w14L)^@+u``)4lWn_4e}f0_ zo)r8i{)2(w!BcO=-g@%Qq(Owz9g=U}dwlQB`@WyUVT9n!K7Y%9cnJNrNqZo7=Q@My z3(8Q2ITDy)Y-5jm!Y96qeclmW(sgl%_e4Mf5t7j5UEUWFiCo;{D`G$fE)MumjK~P1 z8Op-1n5-`Padc@PjImjLC)Mec%k!7AsMtjXrRca~^e8hpGeXDc?fRLf=AY zA+TVrR9%3>_W<*1j^hrv$=$6ucVmYla#ZIWS^bUQpt}DOr&R935?ZA$=|1mR+A=Z-yfHYn{Y?SY|h!PT8B{>e!T&3 hGW5;Pl0*_tiQLu`P~2j literal 0 HcmV?d00001 diff --git a/src/plba/contracts.py b/src/plba/contracts.py index e525433..8fa12c1 100644 --- a/src/plba/contracts.py +++ b/src/plba/contracts.py @@ -1,8 +1,6 @@ from app_runtime.contracts.application import ApplicationModule from app_runtime.contracts.config import ConfigProvider from app_runtime.contracts.health import HealthContributor -from app_runtime.contracts.queue import TaskQueue -from app_runtime.contracts.tasks import Task, TaskHandler from app_runtime.contracts.trace import ( TraceContext, TraceContextRecord, @@ -15,9 +13,6 @@ __all__ = [ "ApplicationModule", "ConfigProvider", "HealthContributor", - "Task", - "TaskHandler", - "TaskQueue", "TraceContext", "TraceContextRecord", "TraceLogMessage", diff --git a/src/plba/workers.py b/src/plba/workers.py index 69099d4..ca7672b 100644 --- a/src/plba/workers.py +++ b/src/plba/workers.py @@ -1,4 +1,3 @@ -from app_runtime.workers.queue_worker import QueueWorker from app_runtime.workers.supervisor import WorkerSupervisor -__all__ = ["QueueWorker", "WorkerSupervisor"] +__all__ = ["WorkerSupervisor"] diff --git a/src/plba/workflow.py b/src/plba/workflow.py new file mode 100644 index 0000000..4594ccb --- /dev/null +++ b/src/plba/workflow.py @@ -0,0 +1,18 @@ +from app_runtime.workflow.contracts.context import WorkflowContext +from app_runtime.workflow.contracts.result import StepResult +from app_runtime.workflow.contracts.step import WorkflowStep +from app_runtime.workflow.contracts.workflow import WorkflowDefinition, WorkflowNode +from app_runtime.workflow.engine.hooks import WorkflowEngineHooks +from app_runtime.workflow.engine.workflow_engine import WorkflowEngine +from app_runtime.workflow.runtime_factory import WorkflowRuntimeFactory + +__all__ = [ + "StepResult", + "WorkflowContext", + "WorkflowDefinition", + "WorkflowEngine", + "WorkflowEngineHooks", + "WorkflowNode", + "WorkflowRuntimeFactory", + "WorkflowStep", +] diff --git a/tests/__pycache__/test_runtime.cpython-312-pytest-9.0.2.pyc b/tests/__pycache__/test_runtime.cpython-312-pytest-9.0.2.pyc index 0031da0c9e27dd7d4e92214a69873a6e4cee91cc..4c133f12f4b5b15d82029783aeaa7ea1468bb0dc 100644 GIT binary patch literal 36104 zcmeHw33MFCd1m)a&wb&(K@7kH7#<)<@fJymA_|4{jFKa20WjWbnh8TFD2^rHHCyw`J_fZD2wCH`C z{r;*xW(GrmI+(Y4c4P4OU)5FBb^P_$|NnLLUwS-F0oQ*XnCM#7EC_!?4(+mVM4b6; zQ4r1vvLK6Lp+{^NMZ!&CQ@fenr5;PCNfdgl?Kbs^z1@x{=CGs3+3xIdwY!+V6n6J` z+C4qqc5jcb-N*ba;ewvR_CkhR!$m#C?ZphYg-d!$+e;a450~|nx0m-+v{&?0wpTJg zN4Tn|y1km=&TvgnZF?=lUExJNb?tQwcZdBw_3ibd5OSJ?J%a2xDahWpOuSTYsikah zV9$JbR-ip=U(Eap;a3zYeap;)wl876#qcc&mFvDsnQtk4%RVHm|35aP|u(!7_8jN=J z^+wtPrnEy2MuVN*v@+1DFYx}F_~_9;lo`nf*kxsZ}}^E(fsxeg#3 z0}({^@ESn;sKPLdYDFE{Jb|Lg{L^$fl50 zHg5$kOYJt<0+*G!9I_2{Xdko(oaw@yec^DZGuqXA?GoN*xT z91NJ1D%_O{fV4H(-yiCgQU6jT6h0iG5dNEYuYN2NQX;E^;n4BbyF$-*pj=eMNXPSQ zS0hGbb$|HT;Ob~75{)p}q44Hx?jKB>fe)3=smNw{mm&2?0N{i$?ed-8^4gYZchR)n zecC(fP1$P`_S&)N_#74k*28sSWiRaHQ=W9lgPx zP)A4F+0oI%XcV}+qvP`f!7zWKP)(&JYAjw@Uq?qq=jiC*_0WJ&=LKakZc04?$|VDZ zKqBPzTf$EQ&6lFdz^)T}i71J zNHnNKLozTW6b=O=q4Q?Nk1%PQ_Qa+>h|s(Iem^TeRc%*qSG1#Js3Ip%&DxVi2p=f` zKtop4jYy|F?Dj?(ipKw9f3Mc&ed(p@qkknFPOA0 zFA5-hbK4O$)V-i_8?hYp>wu)Jfp^**2}PB)gh|JOT~VbDDDMKC5{NX0igPOggiH@2 zjQ}V=$k#LKN!cqC_R6HaT6b3^>{a7;#qCv7_C{7MZRf4ZI>b?|1lH@=Qo@!ISORd~ zqENz069MWg$_4`TMr9>|#RM3kVq6v!0=4*$xB*TIGbWS8KWjHxns`8Z+=+kvB2vO7 z&XfRV(4?8uB!&_xT~3_4t8)&+&uUnze%7#*>vKX>r$FLHvKd5f=@Ktk-%^|IfDo{? z4S7SygFXG>&???#)26=Xz$!uZa#|vGf|pCwd(&3XCBx#~%wNeTU=r!)jZZ z6xJ^EHjqnBlkBBDI_b5bX7(deXuHI-!c^-}8>g?<7 z98i={Z|9)WN+kqIu#4_QYm7vG31oh@6{8_brAi@EeSGZzsGe@6N9X)eZt zS`{i;pa9|kBW`eHE?uM@V6|#hx;rxJ5)X93w|7LDgcf0pBkcoY@9PIo|NHxIM@{)P(l&Jg=84*2q_B0IV&3; zlwFjBP0gXw2AA>>0czd|@h*%MxHu0Y<}_56MKAg&v5F!P+dm;(FA~bCFitqN;i9r0 zF7I?rBbc!(v-b|b+{~MMa+p&B8fh?8ifuBN(&wbMCB3Oa(N7^m!YEUqEd72MqtK$ zPYBn1#ixfxhvN3iPo0Iw@@0XVAh)(PIw78vk&PEeUyR#p=VpVbpVh>BsFQ5AeexC} z=Ptc(X=Cfh6Hm3?#YS=>pqwk8NShT$@3BV2`Qxx2nTJO}lF<99k(O4fUBaQy3Bs@# zCBctf2w$Y(#vQ⁣;MRC9zwSr6Kr#Rrr0gY`S34RJmbrkG#Tsc}^m5#xf+Bn5nyR zZV01pPCY4+lw)nnB6eb&chL|}`prH;WCVW*SsoT+Vz)j{8wqO(epx&xio&TfO7vUi zV-i7_aZ__k+Nv@n;%X`*xart>B-9%^-mh#QT9(~Mn6%lv6)Y(j zj%;t%!x3kRPzC(G@XHgzW$(7}$48z&{nF@5DW5;#-S$z0SpFBPYQ%###Z_$VA%zn{ z?GZ?Aj7eGLAXLWQfG2GZ_6{nKBYfJ@30YTB(2#=i1OX=LS)e{b&pb#TL`~pHOjfK;*sGKFTF8nidriV#ld?A?>tz4yRVd1kFWd2_OETfwC;ObzPshp%D02>TjCEt@HVYoQ*M94?H?~ox|@)f(+`e5_{FvlZAgeS34QQI{t0JH zLIH9scDaDiE-KX^dTPU*YyNnHpgqop&Abt}AUbQw^Cq=j*l-ugAU-Is7!q@NGx*$51p0>-OBT7&X$;u8?8{~#PbaN>r5iq{PC^&5e z`-G;M_Hrhwi*wW(jFBa*2fsvWVCrDfY|+r~>$_3IP$>r?ex6ZKn@_1oWGm)iMwV&~(NJD-R@edzMeLsyFH z#-(pKzUG*?`;zoM&v!lXJxAhQeW|W!qANOCJP>yeP+HqZw~y_(?5Z__Z@C!#&hWQ~ z-WR+86P=UAa@;LL(Hh(JhkL)WcRV_A@Q)9jKlFCl531j(PWtx8 z?fd6;G$Lcx(Jp<8jzP^^2QA0nMfq?6};5&jnEOD460 zLJ>!3lWZBbMvJwy5jS7wv%X|{$wJ1ZZZc)%TrZhll4R?f(gmBr8Wsy{U@$obBma}A zaflAD95+8H91{cfHr|Wb{X6kRFhR#ycMg1pyly zTa**zdy>G*1inDvw+Orf5Fs`VDS$4n{ZL2G?TeJlfyiMBKLtP*vYJ%IibTbV$%<7Y zo@r0v>Gsj~4+Y7w3{U>h_Z8pB-s$4mUVZMxvoDU^5Bq1TbZMe==}7Ah z2ZCOA3K+%FkH-#81TWXFo-U~!-Ho(Tp8ABRKILgjc$$))<1g7Dz0rstJsD`Cu>E~}d^uAE-n@^(pT$D@fIk3x2c z_dcH}JT~jH7C5g9R)_OO9p!`aeKSJ6^!L~7EHM9hfn{f)*0C1U;Fc{v_bh+ znvR~6d;_tT6I~dPQF9$SPs+^~YUtya72@`MW&LEe9@TXe#IG`HL&G%vN)*nBr)+b0 z#%|EJ(A4>$~5hh&CSMofZO>bqr9izJD71Jk9=Bw>qcW(ZA%{+fkViXt_ zUldRMkl&Aq#{@0sIo$E0=6@&J?$$Z{1g&Z&jy@=8B?*|?xB}+oIu+xfOvN(T?W1UJ zZl-38mt>f>Og$qfshT(|4t#KHiDd+7Ct#y_7ZX?o)!q+S@fR$QJc zZcG$6riz;r#m&j$7BJx}?!q%m&NZKHPP*$q3<$pB8&=8)W$@;WB1)Ft`m0%&P*MwR z&(nedLCXx{SDb5my=^?0C~177?MmbFiM#&IaXm*ZHxRXja^3Pzzq)QlT%?G6Ao0S$ zug)Cg$>h)L4GM#5Dh%A5o9%Q-6||$0H8&$)MjO0iF5BfWztd2)t6q3#tGm@H{F%LW zSE>2Wmf8v5T(*0=`CW%)SCQ@AQp>JIws)&+@PD`7vHNcGyUX3XH<;huux$5I%lB&> zz`wuLvb)*w{pB{o*H{qp`x`6>`Te`igm1UB+HCJRJ$wA-_sYv!H`v}=1JXInKs+FY@Ru+-0 zEa(PgSZ`B00hJIyR%bX0kD=8$ogqtV%41tON2w57=C+lh*0HjSt>1a*+Ye2x+x@$1 z5=E^au^Eall-7x3wmM`1U2~v^Sy1NKJxQz@s+lhXL=Mc7L(?inNF7#*ZC~4#ENY$f zwZ`qOOv-NyxVgMFg3t=-Drqa9IcGLztWF5`1*6Q=oHoNM&Xpo61D}am1`H;ErQ|_L zXgCY8ggnI7^O6#!=h?B82jGWVW>HD*)9}-TdvYgtHo!M+$CP(}NQn++C}?E&Q3eQ* z4#u@SVpUx5TuIns0`&wQ)ByQMeiz`R0G3i+H(gybU0M5~ugtRYnz!(}1^4S#p{U}z zjWDNBTyfn+7}!tcbq`@)p}71yg}+`PIK4B4gtZ7(?=LXBR5fD;cAYS!!u=NMev4+T zPD}M{h1s%r#%Z;*i8BS|7U#^`QcJ^ZvuN3)PEAo>iTLyjorW%PhD;|JQ&Sc`Nr;Ii zD@|Q%;tp0<*km)NkF0}Iz@GN(=97GU)kjv3)D24wEMa5PP)iD9FjzakG((VyPstO> z%pp!{tf8{37wYp5U&S-kB7UX3X|jAdRus5d6J_OzUWNjTGsG^=p;My2r)}y)C7GK(%LE~++l{p9HDbVQjAA-E`GObB{#eq^pe%QAlLseX)=A6Amm3&phDuxf z!=^gX2mN?m+ODWedonqf<{MIS8;FMvDgPRABQ#5dq1;}OvXfc#&HWQAU{8pBJ2qLf zD{0@&9yBKGjmf5aCYyFl+3#a5*v2Nyl{1KpPXC9vDP+XTU=_A}{aT@p$h-77VL6?a z7@0!g_wdh_e$b4%Xj!56txCObRnA&XmYr%3qBx!S*DtjVKVc8@YHJSEE~HZg(kCi~ zTbv<=m^r9HaW`p{k-r;JZ)TiVxeHi?Qq@&i){)Ay->dhe2?|d23kF}lZ2Z_o+hqBs zkI|d#&#H@C=IKuQItyOrX6jEwwt9b3=hX7{r#1xqEc+Ad9Ya1X+pBo5z|6p#c#+XL z6xQJ>N18=o9QaUfHEQ(!$9Nd20m!2F`0~3i9sJ(Y-+g+r=7A~uK9#(;A{ZlY0c!x@@(|ksm-L%z7YGGMMs0*-6atRTd63<^2%x85lv_@KI#3w9F$_2zz z76E9C5zE%7lCs#M=NOi>gcvVHNr(J-N|F`g64GOwM&O2D7 zZ)rtR81>SdR9a7~1yLJno`R?${v##zZwV|0;2a!_4jRd)i9mn=F(T#L1gITzY5tq! zN8nHJA0eYDU-J~0v5S_aMC40Z>MTswFi+znYw8wYG^Yilfa}&WyKKtW?Akagn=eQj zuN8$T4f*WSp2N&+$~PislFi)F8AfPRH}aHw#mvUIjS%x1o}xZ2wrt6YHIIQAMs8Dg zu@=v`Yo&oX%bd?(JZy zsmorM547)N?B5@$+iaE8EwEGvZKKi&gY{9=3 z|8}&GBzs9+8g|J(w3jvJidk8Exnu4~I3xY4oW$Wod%4kG1r*018$U9l1r!t%I2dSyx#b#GH??nK>h0IBhd>%$)OOPe+3axFk>fP$LX4g* zPQRZ^$B`}mHO_F~5%PH++XrCvB_F?E?urDT4Tm~H$FUS2@*IpT|IWrLQl_gN8%Thr(O@d)rzc zV(I9q`kzvhJmkD-8HgTUwTTurVa;kEiLXEVy|2*q!+R7G&6zFkym-^JZCgMbYFpaW zKe#RsalW7;{%wB3)@oirRIipmq*>w1I-SGK`c4q%uzwqmvoR1!Tlu=@^HSQ}KiEQG zErE3eHs182ps+cyH*7{|cy3PHc$A3p73A)`I8;RMUqi3u$s%`HmW7~ely?v!E0GO6 zkqubR$v0@Qc}#^&=ovPw9dM?tymiwywK3CXR5C9#uLu?>Njtf4N>Vy4m{Pj> z6%sAdKC%<=L^=+4p*2Y-PWv?7N8O~MFe4h90@#h`+?+;s2EIkZd2jz9tpo1}D(S)w zEEo%8p=-a=+1JyL#TRKe9(O!D&=m$5p)&cBvUEj9-GV50(_+_jF|C^I31aQ*ag?x^?R5(H()MUie@A~XdXz0~2Dvkb@|Tpu zKP5o9PP-@{ys_3~I_}DJY{+!1=MMhPO}rr7v8E+6T$W>PW~8hXHbzLyqt23XF~ei} zE>?ePd$R?g|H!b;6 zQdL5#x^8kiq#G52#rg8?SN2R7R$Q+YU|JM>7VE5E@YPOw7oFHMExA%sbwa8h+n1D@ zu6U|`r{hGc;n|v$7GINGukL?&|H+50NRE-kCm)#hl$?HM^qEOdEerYTgD*dLvhA9y zkk*;RrL`m5Q?A7c*J7X(B@>^YaE!kIH11kF{zAgFHYL$-O7-9_9VQTa19wdtXp*JL z;$3F(cnIL*FX&Nq1W8Z33eN-*u9~>CV!U$PgbR-`2QO{pH-HOX8Yzl<2D z4>g>QAc&Uc*yj?irEzKXL>WNbwREB^;aZ)N=r^T$aMvVrar`oc=V&HG5K6Ww;i?|H zpC!9$q9iV@qGU@Fu2m_Cep9LkcTF-E$1h6=&(TbXAX>6<>8Xh)FNqUvZ*O?NDY5fF z{NR&`o$ZObr|=-|s*cw^m2f?klIS<3`EchXb8`GL@H#aZ_u()pJ;jw-kfDCrfib16 z1S})d*rf^m!xyz^> z%+pHD)FE9}%%{G^1g)G$ht2O1h?V9pvHKg6unsTCsT*Fe7CQjIey7H@N#tjOc*d@Z4qHmBM_Qt&P_qZq- zkJu%bXm3S7^mdc&GUpmLQy4N=F4~hO)mXYgxt@~N`Kc801a=B`bD3B}U%2`cF*lsXdSVe z?14F#)`F~^$x4`WW$*jEO4J~NZ7}Ghz0<`tdOA8-|zgGV`@>eM4HQEm6>-Q*| z(4GgO+M2p>>cY^Yq$olI10DNw#j>($?pW zVXIF>c@B;vTt)xxJt)>5HD|Y}l51O+P_&G)kS^|`CCTc_W92_l9{!vFm7`UO0R~F> zR&7#ml}Q3$C%`n=y9rAYphdpQj|ij)OcVHkK$^f`5n!4wiLzYXW!kSyPMUQ}OWl24 zy@5i$H+(&L|0RLHCh%hdKLN%4}|%)Q=*bc2N@H>np$}nrv*fgVxsOJkuFR71Exe? z@fQEikrR9LTTQl#_-5P0#*2FGLM9^9qFgtT&GBX><2XcFjF?t7UM*HNUY;}a4`b0B33+UO({tGx*q zroW-2#ih*`Hvq(4t1oUyxHhLG`c0`G+%?Hu9KTHAIhqL(%T5 z{qFbWWYGat@h(kBrAet=QRTr0_kk(DQ>Vb?O1c@Y^|(5Mv`K^C_wv4z`%tXNT3zUiIhX8^ow7@Ykbl9WrN_=g z%HGBz=G#^>Y&PZec><>b2_Er(Oqz+)YMj%%^_HHw^`<`IgFnn0B)h@ebew zp03Ui+>WSrI9}SSF5i6ywNJgkX2tv)8Mw)`J@ObQ!RFE$&xUehRn7*n53q?O&JJp!UWf)3b>NkO)z&pAvQ zm^y8_iKZsQp@pnRgLbPheBFN{LBUrc$(L;NQSc)J?vwsy z@%t}sdVBBtL-C#MaYyZ><0=X`;KRF(~xcAcMFK!=kC++uP zZx_O0b5}uS%2%K8LDyFVgYxpdMF8rliJcU&X(Se7Z_fuG)mFb__M=XLGs!lxrFE zYMSD0VjjyTFpHTvxeE_{A~){x>r=LI*D@W~g6MO%K-lzc_WN2WzH2F)AZ?i_k4x}a zI#Hf*wWK8aO{pH-HAybKkDgp+Pjq~uoTHiWM^eiTAJGk`GQLmxV$;KQ=6Idu;ef3Z zWUF5^^Ju1p{M4IY6<;;IYJOFE)$*$KRhywiz6S0Bh0+{0^_XA7K6_FLyTsQlBQ!H% zJ!yZ`XBN5U-jRK6*#@2FdJf8v&xQg3tytL zZvgU!$T!iAs-hWAIWR#1GV5+qgspFy4hWg7F=Gj>ax#>N<)nGiHObbae7e>nkMGtuMraaQ-XG88P!@B({y^ zmu$D)bVK8W<8s5=uPnLbia&8E{>+ifPaJ*!aKd*yZa*IHJ;o7!ywNLVm16#iA=Or^ z7+FMBn6|=a(8P*vZac`!PGwVy>HYW@c*)Wpc8pPny8LNq-=`~MR$@4uS%{S2RccYI z>qaaiT{y1@?B^r4f$9~T^W&As%t|D+wek{C^YKOBxy5obd#zTg2R?17VEa|mVlT=p zls}_TN*Rir)kGL|IPH?|t2L2MtDs%bsad5_Nu|-3s8qKunwKxb(K0mj;HNDd9YLX` zR!(h3Dv#P`m5as(FINUeo{!r%O$-94r0DcyHyjD^tdEWWd&A26s7V<0l7UM$YbK;& z@QHle*bve)>KiQ>EgCJvVtEOv;nFzCD=<2}U>1Zgo6ebk+hT#K45`4Kdl`$^b7Qd0 zwrZ-Gv4%~ul@`*+%(?>iEn}5OHqlCcOH+|*8S3?y% zUtyuOqH5SNK{b?6=VCt79oob5iQ2>aoA&sW?IFx}df=S?gcV7$kC{g7asg&j?dob3 z7)r2O#h}~#J;;F`R7i1*9+VL$AFb_*{r|^-+_&IB zWuFNL`b~R$+z~jaH^0AisfxX zjx}pHHn-q!4LfA)=OmEfjPAX-@yYoC!q}uc&DH<>eR8TMth>k-bBopcst|@<0U^UA z11ndW#4{y|<>mMSMQ$HW%$9;0_D%}#z+Ie3k^KchAjk>3(F;mT zB$y;gRdTzwB>M7&Jcz_p#YxKvTl)|wPTN!kOId`PFOJ5 z{gi^Ynhw1;@;>O2i`*X+Eg666V*SNOCW|(X*rvUur+Y?wK5*Aem)B00EuJo|pDtfI z>#%sdGiIC5c|)){aN-()BEjF7s@s^T+jz+~S$A)$^4<~ew68Yh3nYAjxV>fK06^Rq zm^hH|wWRFyn^HZvONR-2%gBq|HEEzp7MOcqrsVYSI)c#bjSUH3L)^YEzHA>s;LG+U zeEU*%`eD~P_u)<*ChYs@r4(e+K$9#q_rAee0nH7BOXu{VR zx38X92N3r)POMA#R;TRrn^HZvYm&J*ewo5^G!r7oHPo(uQ^LRLQpKeIzEstHBfe?h zqLgoW!nZtbUo+7O5ce&g=uG(5r0n#YQa!j!hY9x^Q-=xrUV147nKaNO3(dVRvlR6RI)dOtgyGTf z@AUFBaj!Rm+&%)mRqYTzg3+^T%SSjwpn4}9R>4twGt!QJ@=D+llXyXVc%AgE3SjS8 z#D_OZ?^s(K;rPKS@!>7f51Q|zCx2lTAKoVYg^fLlH;a!5Qhc=)58@j|f?F8eMlc}| zrUKt-nco8}H+m1%w!-!EZZJnhf-m|JkT<%TfCo)E4|o3BbcuX)PGEOGau6T4?6 z(K0ChBhC=1{6~DQ13-);}90;_c1hnP94i~6KWXhZ!l&9)K`&bee=+{D`)`8FvG zJE1KfcP;-+$v4u^x9UOPGyoCX2*T)yIh4+anxtSg`guF{tLBsjRv&r8mm?=RBLY-E{YX(uh2pm zcP%#7cdgQVpnB;F49W3 zAa&~@LEUv3wBa{uLc@&n2^I+VFQ2-Ly4Pr>Htt%R%YNdOay>J?6hkR+#foDt#@R|@ zB{04iJR(15D*rpcwtX1gB3l4wppl((@E;nIYP1OWw!9COvQ<(3N7uz8UC~-Ou3)qB=cZS)7yz72XJJQwu%1V!+Xf1ZH(XI ze_`0Ee21c4B2a@mQ}z?KiGXT&c!UfOx4#<$-#E#?8ADw@L&pPH@CB%Wkn#`omj6lM ze*tvjU%#M%(sj!Z5K6N40Yb~orydtd4TYE_U!5GwWc=_GNhG zKmJR;gPGOO5o<+5Mre*48{#V}M|OR2FJJqu_6e3>k}OzZ+JM~ZODXCH0Lo%Gdx!S6 zJmc$7uUsthO2kSdySMx8@HlhjGnto3j9IXst)^XhaTEQ63GoL^^?J+c=c z(WB4d`DA=T51+#;km)P1PJ9Kn@a8jjqC!8%J|(gjA3w}GtxZB<82!-kO_>|q(Jl1t z=-H<^;@nO!AJ)CJa|uoCNW!!g-+Jo}DLCHNkJQ*nkC`XWv7^<7j@;SO>Ak0o+Ln|- zoEcfwIN`fgK3TN`oroWBn8gtgl2-yr&9~}A`M#f}eX-4?Y(g6RT+iG-vI}Vrb>87Z z(_xj5P=Sf3;qX^{&*AQ~cc+RMCyE!x-HYe)jnAsct$5!Ywm_bDyq&f0&OMf=*mT-? z+|56Gn$^-e@28>hmMyJzbp+8btrKUSw#eF?Ec|mSfINrp$QWUC<7Q^Uf6^#DeP(Ol z=)RP@HsP+Fa&tbG)!FsIf|l}poYG^vkS?|{!Nt@DNhOL@(AQZtuqlrSJhD1G`QCBb zr)4=$Q#FtTj4?v12Kubl`1-iJX38Cy+W~Y+pcP;rr!F)~$m#oR_9wURljl(A4)=X^ zf&w)#Q_5>V>-|d;#Y-m_>It7w`Pp<=Zu!acj+dX$5j~6YE87Vy#!K5UANLIIY#28V zW#)F?v-DsIf!7Iq2>@ig6t{E{ml1d`yPtj2H}9hZY+eeV+G?Z}nh203haVpLyMz(j zR~UC>9D?-^MkB1E*t$8&C^zvIWWJWhXk`ORvLF8uN{34)_ldBDJBx7Yx?{AMjQ$NRiACl}uiuy$?0T(|f`VEsI*MzVSg(Mq|+}pA6 zb3_;sVbwC7bmI(Od^G$floFhk#btrdOlHqI);x=QQ%;$%A^}lPN+0s7qR7GrrXRlJ zxn+k&AEsurAcr&#t3$7<2vGq9EB;w2wgaIm$GRm%$-S_C4X1~UnzRPc6iw}Jj6@&_W zx=H_zDYtf-`Wedl*8t`o$ZK>b%00THQ8=nrwP@*L-iNZNN7;qY6Nh$+`W}SDxONp`mH$BCzX4!^Jah})`SuUy!!#1kavMg4YkE_=i+fkrT z^KJM41+Ppywfe)S{pg6Fyi@J2M@aqVtg2P{A3Up@DXa5rk->hLmFzSHY>~-0kAQZ= z*p3Dxv@-`AT%dg`0xCnnM2(4|nP?A#I^^A}GkD zfhH+5HhhrxWx{bfgpOmgiH0vHWbQBb=z&HHHn3n1O56pH%zY`q6BLpAVN;8S>VdGi zQ0zB>Ej8u&d@;~IKlC^+L#o`guFm=+S$=@cp!=o z&UbVuB=Dr|+8LgVC#5Sp_^F@G{PY;?v1nF8Y!6u4p?l#BQ|`w(+H&-&v`Y>~gY0}* z96v%MtwQ5ISlLk+Z0T%Qet-+DUFDyBmS|5M`vO_Ie$F}q*e@f^@YQ%1W47s<{NcFN zuaNQTRzZ$2lTQw#PS<9qr-V?}BRCkBk4@|=Cb>%>D2#f3EA8RW-XO?~z|Ht5W)HrF ztqgKjoAnZH3nA{kSiz=5l@h9uG6H0AVV`p0AGP>0IloSTHYKrRLD{h`Y||ZE)4*6- zj%n|veJektcDq7q0DeN`PXVr*MNzyeG=M9g6khTESZMmO5J(7tt3uh|3in(Uw)|Mw zmk{<{6*gTJ?z$>$zA9{mIBjy>Fxkbb>jD5|YLn|aId7EN#HJ4gfY}lgPL_`^T0JWe zGV6R!6!(Z{T4x1@%sy>)ieNA$OJ@Z_W~&_HgJQgN*{ndwY=uKy8n0YIuB9^-e(`B> crd2Eyx6L$p#5FVi6=Kco)8aj1F{9xB2ZZp7O#lD@ delta 6420 zcma)A4R93Kk)D~|ot^#Bij{WtyOI|4JCZB`Lj1_)A7coh@Mp(f*-@=DBeBx{@n(dP zyt07EK?XbEy!aqjI6&f@Q6L6Kb!SqQ*pQ@ba#xp=N+RN>&=FrMcjY_R9SL%h%jK@F z`^_#WKMr*7R`>$3(aG+81=?-LOG1KM{{GokdM())E~?Q|oafXbr@%eIDcQrEGEdDv zZ!v8RLQux!1bxV%$QIW({Rx@rsa0}*eYZ$k1n4|mo zELm29O7XBYR15b2-0R?80{42jm%{xKxR=4bfxgW*NP=N|e4iTE>A&%hRS1T4cUbE+ zT!@>(aV4sfQQmNWAwhaovPlg`^*wZpP{GSIDb!X$mup9#+NYZRotZhod-~n^JB#T% zLVc;ISmBZOew%DJyz3LusM@JV;@#GlHLe);WVk$%wl036_MNbX~vmp#)`$%IsLQaK&{di1(fcL(~o)=?`7!kxGQ43VgMABh{*)+spf3z+2ru!P&+avLa-rgRdL9vuxtn*6* z1W#};@^tTui|OiO8$B-W*uD^$hDB3#vIr^b^Wlh2Acr{9S3%QSG=Cs(T{O|zdv8+G zjtZi1EdV&mJ&)Gf8=9M-MKJNmlDkwTQdI~Q0J4o#BVB_aBQ(=j?elZ-Wi<5VFn31~ zY{6N{La*AV?ZgATQJ$Wbmh;v0h9vKj1;SKjLx*g=U)vMFCG`mRT)q;U2xx^S0St4q z0&nZ)O`(-^QFSX_Sy92Sp&b?FyL20fuq%_GDXu*owj9^bG0(%JaqO8TH^uIZfvICsVFW(v$Q;b1KiF0n#ugy5$h}NoBpavf6a*XRVOtnn!WdrTt|bI z`TRnWsD-S98z})Wq?oFQm9QQ*9BH`(E={h^M7*<)5H;S}FS`v%Q^~#v>a}#M zy5R^bO1n8%TTzJ%+k3(tY+)l?kY!KybwnfD9y7z8DDXU6?FgE0wegm0Uc*g?NZKP&J(Y8LFn42LnHGTso<=_Mfo(53#oQDD}%VLYAu=Ns`EB&QTH-T@5e=Uf0DzP!qZ{fr@jK|Nb^CJC z-Fl{*?4$nrHE}i}{m3i_XeA1uEZ)DXVWyG&2rnSiBcQywN$*7p0cT9}0;Kcnj>Ow4 zXGH*0dN1X33&(EO7fGTS4fKyo9?fiEbXD_Hd@Fsuc|Jcv-)){>XxKD8tgD7Kg4G_| zSl8o|R@47zuHsKn|I)@L%xWXsT;^2L9%D(w1`?g%PD^kIlwx`!X~s<<)t8=feHFWg zSwAgac4ld#u4;Na>5J=;nA#3Y3ar$csWhe0dRgOxSO{zZ#q&9LH~Fl#6X7}kzH zt)Cpi0k0x_1>raXQ?eW=*#cWsnBXoV_C61UW{65yjV0pU^!SGG&`T(N1OXqp(XivY zCsjrEo0SpH4>^Uv0`m=|S`bbnoIyB?@J)noA-sw3ZG^uKh2?W!NmW_HZ<^ zPbJkWux;&0MB=iSphL`BjJ@M1Hot>#fqv!j(#3D1tb0wezr8C#VqtyJVsa6CFCn~x z@I8bdBAiF~F8!y+zhL_bkS~n2Y}{tyZDS*wmrGlM*tr7$yki4H!WvlvEueLsXof8r zCg6Bx&1&S^fv?7rI$MySqGRv$x6nfl(Rn-Df*%42r6QZiV>rN)>~BJ7Mp!;TkM2BG zjUD7?2sk+do;j1dT-L}qFlBzAo?h8mOe4D%i65ZoUV3I%>5zux-y?ihud~GcDXMf6 zrU*?m?6GhhRtCa6X1LIkrWH%bk5D^0&19PnUSWEhuH5Z#F;8DWsqfJpySLa3Adk_r zyX!nluT)*D9bfz8xT9p!@zj{HTet8{^z%*JSKD3io1g0wqD^9H}#cR?90s%ly*i zU4bFAOU{L26AJpc8YTaZidn);BZXNrcP5vT?;^)zI8R>3P4poJq|Mq7cl3<^-< z!#9%w7n~|ADa%7o5Tas-8eahIaPb^|ZW!@)@rq+uQk*G1Q^^ck1uiAP-+I2zl;w0# zYhEc!=5TPql~FZlOWAs|;Am1rFF%q+BT+YRquvgmbx=%+G`ZbZGiV2O_VZ~SgOZXJ z;UfH?BPFFAy)|I6Gv(B->wbE5WuQp$q?}+?HgNAx#c8rpc-d=YJn_R4Cq4wgC|y?ox7;+>|TA zvzA-OqL*DTd|qP-9S=L`@0+r7#r^!C9sUyhonXAJrxIU!f#Soj8j2rrFiXiBw5PIC zcJ^MLlt+t#8hEb)crOpUS3Y*$d#|*Kw~}+Lb(R;L5|}aBN}*De%1)a$?*Y@c#0y+m zV2%f#Y3r#*bJp{4>XnOI2~IR|Aou`e-tcr16>5ac&P@h^J&=`*hIL%;T7oYOI?0PD zKLn+Gp@{3t>pb0mFoTZhIlxp z{+y%l9V*SmjV~9E!#`(QPjEU!X*=Q;0K>Z-PpzGEKE7!Xk65DzgXtZh6WRk$Wn4<%|B_<} zw;t^|fZxM$$5%P&s~&m$hQDOmUp3*cn)cUD_-oHCpY$&o*?2RrdNNNQdE$oKdu-dw z+fG%S`P^mS<-M03*VauHeSXTl{f65&?Jk>uUvSZ-#w*R&s>gRdGZ_qBcekZWT(-D% z!^7^@x#iF>KL$mR!Z{~srOXppRLppw^}_-2e}H+Nqpib%^52R!$Fu3Wod+S1J%{5C zBAi4xL`Q~WL#*!k8*IOea2eq*01RiQYE>2USoDyBPxe8D80`oXCk|tWpZ`Yc>j+&4 zZy;bK8d-2&=!$grk+2>~#LXkrX>JI*&!Eg%gl{5z3*k+K`)WG21Lcsnpv%ZN=W%T^ z3A4#Gng>+Ep3mMC-`Xr)*)D|}6uE$K72!t+Z*!{ve~cunHtwtIm~(TW7lPwYuQjY# zFcxBza;z}kC!&ja!CBTU@SUP$NQ|V!&?t- zeW}$vcPKDcZlv3e1o*SG=SY*CZOXsn=$l6xB=|xLH9wK)ts~XElhz(B&NBB@f;)s! zOdF5px!F0uOo1#ASwNpUs`5GX2S>}>VF}=jAbkGKEJ6+DfmUl^-*@JIv@t)vvTp(_ zVHEo&VOHULu{nnD69mi$f_o+rY2cNTp#pes_;DIK@V15L`5A5j?B2uNmpr$)np<4? z443ySZuJb;a*JCr!!^%v%V)Tz8KfTr`nJdku35p#*PnFV;ox@10*u?pnDt0}&GFt@ z4sNq~0^beJY@Fqgn003J>-lkCW&SLO)NF;DkMZOFs#y++*;0D*l?thl&t+czKecix AvH$=8 diff --git a/tests/test_runtime.py b/tests/test_runtime.py index b85bc1f..fc5a1a6 100644 --- a/tests/test_runtime.py +++ b/tests/test_runtime.py @@ -2,35 +2,36 @@ from __future__ import annotations import asyncio from dataclasses import dataclass, field -from threading import Event, Thread +from threading import Event, Lock, Thread from time import sleep from app_runtime.contracts.application import ApplicationModule from app_runtime.contracts.health import HealthContributor -from app_runtime.contracts.tasks import Task, TaskHandler -from app_runtime.contracts.worker import WorkerHealth +from app_runtime.contracts.worker import Worker, WorkerHealth, WorkerStatus from app_runtime.core.registration import ModuleRegistry from app_runtime.core.runtime import RuntimeManager from app_runtime.queue.in_memory import InMemoryTaskQueue from app_runtime.tracing.transport import NoOpTraceTransport -from app_runtime.workers.queue_worker import QueueWorker @dataclass -class CollectingHandler(TaskHandler): +class CollectingRoutine: processed: list[dict[str, object]] = field(default_factory=list) + _done: bool = False - def handle(self, task: Task) -> None: - self.processed.append(task.payload) + def run(self) -> None: + if self._done: + return + self.processed.append({"id": 1}) + self._done = True -class BlockingHandler(TaskHandler): +class BlockingRoutine: def __init__(self, started: Event, release: Event) -> None: self._started = started self._release = release - def handle(self, task: Task) -> None: - del task + def run(self) -> None: self._started.set() self._release.wait(timeout=2.0) @@ -40,37 +41,139 @@ class StaticHealthContributor(HealthContributor): return WorkerHealth(name="example-module", status="ok", critical=False, meta={"kind": "test"}) +class RoutineWorker(Worker): + def __init__( + self, + name: str, + routine: object, + *, + interval: float = 0.01, + concurrency: int = 1, + critical: bool = True, + ) -> None: + self._name = name + self._routine = routine + self._interval = interval + self._concurrency = concurrency + self._critical = critical + self._threads: list[Thread] = [] + self._stop_requested = Event() + self._force_stop = Event() + self._lock = Lock() + self._started = False + self._in_flight = 0 + self._runs = 0 + self._failures = 0 + self._last_error: str | None = None + + @property + def name(self) -> str: + return self._name + + @property + def critical(self) -> bool: + return self._critical + + def start(self) -> None: + if any(thread.is_alive() for thread in self._threads): + return + self._threads.clear() + self._stop_requested.clear() + self._force_stop.clear() + self._started = True + for index in range(self._concurrency): + thread = Thread(target=self._run_loop, name=f"{self._name}-{index + 1}", daemon=True) + self._threads.append(thread) + thread.start() + + def stop(self, force: bool = False) -> None: + self._stop_requested.set() + if force: + self._force_stop.set() + + def health(self) -> WorkerHealth: + status = self.status() + if self._started and not self._stop_requested.is_set() and self._alive_threads() == 0: + return WorkerHealth(self.name, "unhealthy", self.critical, "worker threads are not running", status.meta) + if self._failures > 0: + return WorkerHealth(self.name, "degraded", self.critical, self._last_error, status.meta) + return WorkerHealth(self.name, "ok", self.critical, meta=status.meta) + + def status(self) -> WorkerStatus: + alive_threads = self._alive_threads() + with self._lock: + in_flight = self._in_flight + runs = self._runs + failures = self._failures + detail = self._last_error + if self._started and alive_threads == 0: + state = "stopped" + elif self._stop_requested.is_set(): + state = "stopping" if alive_threads > 0 else "stopped" + elif not self._started: + state = "stopped" + elif in_flight > 0: + state = "busy" + else: + state = "idle" + return WorkerStatus( + name=self.name, + state=state, + in_flight=in_flight, + detail=detail, + meta={"alive_threads": alive_threads, "concurrency": self._concurrency, "runs": runs, "failures": failures}, + ) + + def _run_loop(self) -> None: + while True: + if self._force_stop.is_set() or self._stop_requested.is_set(): + return + with self._lock: + self._in_flight += 1 + try: + self._routine.run() + except Exception as exc: + with self._lock: + self._failures += 1 + self._last_error = str(exc) + else: + with self._lock: + self._runs += 1 + self._last_error = None + finally: + with self._lock: + self._in_flight -= 1 + if self._stop_requested.is_set(): + return + sleep(self._interval) + + def _alive_threads(self) -> int: + return sum(1 for thread in self._threads if thread.is_alive()) + + class ExampleModule(ApplicationModule): def __init__(self) -> None: - self.handler = CollectingHandler() - self.queue = InMemoryTaskQueue() + self.routine = CollectingRoutine() @property def name(self) -> str: return "example" def register(self, registry: ModuleRegistry) -> None: - traces = registry.services.get("traces") - registry.add_queue("incoming", self.queue) - registry.add_handler("collect", self.handler) - self.queue.publish(Task(name="incoming", payload={"id": 1}, metadata={})) - registry.add_worker(QueueWorker("collector", self.queue, self.handler, traces, concurrency=1)) + registry.add_worker(RoutineWorker("collector", self.routine)) registry.add_health_contributor(StaticHealthContributor()) class BlockingModule(ApplicationModule): def __init__(self, started: Event, release: Event) -> None: - self.queue = InMemoryTaskQueue() - self.handler = BlockingHandler(started, release) + self.routine = BlockingRoutine(started, release) @property def name(self) -> str: return "blocking" def register(self, registry: ModuleRegistry) -> None: - traces = registry.services.get("traces") - self.queue.publish(Task(name="incoming", payload={"id": 1}, metadata={})) - registry.add_worker(QueueWorker("blocking-worker", self.queue, self.handler, traces, concurrency=1)) + registry.add_worker(RoutineWorker("blocking-worker", self.routine)) class RecordingTransport(NoOpTraceTransport): @@ -85,7 +188,7 @@ class RecordingTransport(NoOpTraceTransport): self.messages.append(record) -def test_runtime_processes_tasks_and_exposes_status(tmp_path) -> None: +def test_runtime_runs_worker_routine_and_exposes_status(tmp_path) -> None: config_path = tmp_path / "config.yml" config_path.write_text( """ @@ -113,7 +216,7 @@ log: status = runtime.status() runtime.stop() - assert module.handler.processed == [{"id": 1}] + assert module.routine.processed == [{"id": 1}] assert status["modules"] == ["example"] assert status["runtime"]["state"] == "idle" assert status["health"]["status"] == "ok" @@ -146,7 +249,7 @@ def test_trace_service_writes_contexts_and_messages() -> None: transport = RecordingTransport() manager = TraceService(transport=transport) - with manager.open_context(alias="worker", kind="task", attrs={"task": "incoming"}): + with manager.open_context(alias="worker", kind="worker", attrs={"routine": "incoming"}): manager.step("parse") manager.info("started", status="ok", attrs={"attempt": 1}) @@ -202,29 +305,67 @@ def test_http_control_channel_exposes_health_and_actions() -> None: asyncio.run(scenario()) -def test_public_plba_package_exports_runtime_builder(tmp_path) -> None: +def test_public_plba_package_exports_runtime_builder_and_worker_contract(tmp_path) -> None: + import plba from plba import ApplicationModule as PublicApplicationModule - from plba import QueueWorker as PublicQueueWorker + from plba import InMemoryTaskQueue + from plba import Worker as PublicWorker + from plba import WorkerHealth as PublicWorkerHealth + from plba import WorkerStatus as PublicWorkerStatus from plba import create_runtime config_path = tmp_path / "config.yml" config_path.write_text("platform: {}\n", encoding="utf-8") + queue = InMemoryTaskQueue[int]() + queue.put(2) + assert queue.get(timeout=0.01) == 2 + queue.task_done() + + class PublicRoutine: + def __init__(self) -> None: + self.runs = 0 + + def run(self) -> None: + if self.runs == 0: + self.runs += 1 + + class PublicWorkerImpl(PublicWorker): + def __init__(self, routine: PublicRoutine) -> None: + self._inner = RoutineWorker("public-worker", routine) + + @property + def name(self) -> str: + return self._inner.name + + @property + def critical(self) -> bool: + return self._inner.critical + + def start(self) -> None: + self._inner.start() + + def stop(self, force: bool = False) -> None: + self._inner.stop(force=force) + + def health(self) -> PublicWorkerHealth: + return self._inner.health() + + def status(self) -> PublicWorkerStatus: + return self._inner.status() + class PublicExampleModule(PublicApplicationModule): @property def name(self) -> str: return "public-example" def register(self, registry: ModuleRegistry) -> None: - queue = InMemoryTaskQueue() - traces = registry.services.get("traces") - handler = CollectingHandler() - queue.publish(Task(name="incoming", payload={"id": 2}, metadata={})) - registry.add_worker(PublicQueueWorker("public-worker", queue, handler, traces)) + registry.add_worker(PublicWorkerImpl(PublicRoutine())) runtime = create_runtime(PublicExampleModule(), config_path=str(config_path)) runtime.start() sleep(0.2) assert runtime.configuration.get() == {"platform": {}} assert runtime.status()["workers"]["registered"] == 1 + assert hasattr(plba, "QueueWorker") is False runtime.stop()