From a372436fafdec358db6902bc0fcffc85bd91c141 Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov Date: Mon, 9 Mar 2026 03:06:22 +0300 Subject: [PATCH 1/9] docs: chore: consolidate and optimize repo for easy management (unknown) --- docs/README.md | 17 ++- docs/ai/README.md | 4 +- docs/ai/mesh.md | 63 ++++++++++ docs/ai/mesh/context7.md | 63 ---------- docs/ai/mesh/github.md | 58 --------- docs/ai/mesh/npm.md | 54 --------- docs/foundations/adaptation.md | 66 ---------- docs/foundations/best-practices.md | 113 +++++++----------- docs/foundations/cde.md | 77 +++++------- docs/foundations/dev-kit.md | 77 ------------ docs/foundations/methodology.md | 69 +++++------ docs/foundations/patterns.md | 57 --------- .../operations/worker-ecosystem-refs.md | 8 +- docs/runtime/config.md | 4 + docs/runtime/install.md | 67 +++++++++++ docs/runtime/overview.md | 6 +- docs/workflows/normalization.md | 12 +- 17 files changed, 266 insertions(+), 549 deletions(-) create mode 100644 docs/ai/mesh.md delete mode 100644 docs/ai/mesh/context7.md delete mode 100644 docs/ai/mesh/github.md delete mode 100644 docs/ai/mesh/npm.md delete mode 100644 docs/foundations/adaptation.md delete mode 100644 docs/foundations/dev-kit.md delete mode 100644 docs/foundations/patterns.md create mode 100644 docs/runtime/install.md diff --git a/docs/README.md b/docs/README.md index c175055..d6192c2 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,17 +14,15 @@ Context-Driven Engineering (CDE) is a methodology that treats repositories as sp ## ๐Ÿ— Foundations Core concepts and engineering principles that drive the ecosystem. -- **[Context-Driven Engineering](foundations/cde.md)**: Our core philosophy of resolving drift. -- **[dev.kit Primitives](foundations/dev-kit.md)**: The thin empowerment layer and its core pillars. -- **[Best Practices](foundations/best-practices.md)**: High-fidelity engineering rules and command mappings. -- **[Context Adaptation](foundations/adaptation.md)**: Resilient projections and fail-open interaction. -- **[Methodology](foundations/methodology.md)**: CLI-Wrapped Automation (CWA). -- **[Engineering Layers](foundations/layers.md)**: The structural hierarchy of the repo. -- **[Patterns & Templates](foundations/patterns.md)**: Reusable documentation and script patterns. +- **[Core Philosophy (CDE)](foundations/cde.md)**: Resolving drift and the thin empowerment layer. +- **[UDX Methodology (CWA)](foundations/methodology.md)**: CLI-wrapped automation and resilient projections. +- **[Best Practices & Patterns](foundations/best-practices.md)**: High-fidelity standards and iterative loops. +- **[Engineering Layers](foundations/layers.md)**: The structural hierarchy of the repository. ## โš™๏ธ Runtime The deterministic CLI engine and its operational lifecycle. - **[Runtime Overview](runtime/overview.md)**: Primitives, architecture, and command surface. +- **[Installation & Maintenance](runtime/install.md)**: Safe mode, backups, and lifecycle purging. - **[Configuration](runtime/config.md)**: Scoped orchestration via `environment.yaml` and `.env`. - **[Lifecycle](runtime/lifecycle.md)**: The bootstrap, execute, and cleanup phases. - **[Execution Loop](runtime/execution-loop.md)**: Workflow schemas and resolution cycles. @@ -41,9 +39,8 @@ Intent-to-resolution mapping and engineering loops. Grounded, context-aware intelligence for your repository. - **[AI Overview](ai/README.md)**: How dev.kit transforms LLMs into configuration engines. - **[Mission & Principles](ai/agents.md)**: The core directives for all AI agents. -- **[AI Mesh: GitHub](ai/mesh/github.md)**: Remote discovery and PR management. -- **[AI Mesh: NPM](ai/mesh/npm.md)**: Environment hydration and tool detection. -- **[AI Mesh: Context7](ai/mesh/context7.md)**: Cross-repo knowledge synchronization. +- **[AI Skill Mesh](ai/mesh.md)**: Unified remote discovery, knowledge hub, and hydration. + ## ๐Ÿ•ธ Reference diff --git a/docs/ai/README.md b/docs/ai/README.md index ad6d50a..1a4b7ac 100644 --- a/docs/ai/README.md +++ b/docs/ai/README.md @@ -35,9 +35,7 @@ To maintain high-fidelity engineering boundaries, **dev.kit** enforces a strict ### ๐Ÿ•ธ Skill Mesh (Shared Discovery) Unified view of internal commands, managed skills, and external tools: -- **[Context7](mesh/context7.md)**: Structured repository hub and synchronization. -- **[GitHub](mesh/github.md)**: Remote repository and workflow resolution (via `gh`). -- **[NPM](mesh/npm.md)**: Tooling and package health management. +- **[AI Skill Mesh](mesh.md)**: Unified remote discovery (GitHub), knowledge hub (Context7), and runtime hydration (NPM). ## ๐Ÿ“š Authoritative References diff --git a/docs/ai/mesh.md b/docs/ai/mesh.md new file mode 100644 index 0000000..5ce1621 --- /dev/null +++ b/docs/ai/mesh.md @@ -0,0 +1,63 @@ +# AI Skill Mesh: Remote & Local Discovery + +**Domain:** AI / Skill Mesh +**Status:** Canonical + +## Summary + +The **AI Skill Mesh** is the unified discovery and synchronization layer that empowers **dev.kit** to resolve intent across local and remote repositories. It bridges disparate repository contexts into a coherent engineering environment. + +--- + +## ๐Ÿ— GitHub: Remote Discovery + +The GitHub integration enables **dev.kit** to probe remote repositories, Pull Requests, and issues using the `gh` CLI. + +### Features +- **Skill Mesh Expansion**: Resolve skills and patterns located in remote UDX repositories. +- **Triage & PR Management**: Analyze assigned issues and automate the creation/updating of Pull Requests. +- **Auth**: Authenticated via `GH_TOKEN` or `gh auth login`. + +--- + +## ๐Ÿ— Context7: The Knowledge Hub + +**Context7** is the primary synchronization hub for the Skill Mesh, enabling discovery via **MCP (Model Context Protocol)**, CLI, and API. + +### Features +- **Grounded Access**: Retrieve structured context (Docs, Patterns, Logic) from any synced repository. +- **Hierarchical Exploration**: Query codebases through high-fidelity interfaces that understand repository structure. +- **Programmable API**: Resolve external library IDs and fetch trust-scored documentation. + +--- + +## ๐Ÿ— NPM: Runtime Hydration + +The NPM integration ensures the local environment is **Hydrated** with necessary CLI tools, specifically focusing on `@udx` scoped packages. + +### Supported Tools +- **๐ŸŒ @udx/mcurl**: High-fidelity API client for deterministic interaction. +- **๐Ÿ” @udx/mysec**: Proactive security scanner for credential protection. +- **๐Ÿ“„ @udx/md.view**: Markdown rendering for high-fidelity documentation previews. + +--- + +## ๐Ÿ— Standard Resource Mapping + +| Requirement | Grounding Resource | Role | +| :--- | :--- | :--- | +| **Patterns** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Source of truth for remote discovery templates. | +| **Orchestration** | [`@udx/worker-deployment`](https://github.com/udx/worker-deployment) | Standard patterns for environment management. | +| **Fidelity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for mesh execution. | + +--- + +## ๐Ÿ“š Authoritative References + +The Skill Mesh is built on systematic knowledge and observation-driven management: + +- **[AI-Powered Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment. +- **[Observation-Driven Management (ODM)](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing efficiency through pattern identification. + +--- +_UDX DevSecOps Team_ diff --git a/docs/ai/mesh/context7.md b/docs/ai/mesh/context7.md deleted file mode 100644 index 2bcb2eb..0000000 --- a/docs/ai/mesh/context7.md +++ /dev/null @@ -1,63 +0,0 @@ -# Context7: The Knowledge Hub - -**Domain:** AI / Knowledge Mesh -**Status:** Canonical - -## Summary - -**Context7** is the primary synchronization and discovery hub for the **Skill Mesh**. It acts as a structured bridge between disparate repositories and the AI environment, enabling multi-modal interaction via **MCP (Model Context Protocol)**, CLI, and API. - ---- - -## ๐Ÿ— The Core Role: Cross-Repo Discovery - -Unlike simple search engines, Context7 enables **dev.kit** to perform high-fidelity **Discovery** across the entire UDX ecosystem: - -1. **Grounded Access**: Retrieve structured context (Docs, Patterns, Logic) from any synced repository. -2. **Hierarchical Exploration**: Access codebases through high-fidelity interfaces (MCP/API) that understand repository structure. -3. **Cross-Repo Resolution**: Resolve dependencies and intents by intelligently probing the synced knowledge of peer "Skills." - ---- - -## ๐Ÿ›  Integration Layers - -### 1. Model Context Protocol (MCP) -Context7 provides an MCP server that allows AI agents to directly browse and query synced repositories as if they were local tools. This provides a deep, native connection between the LLM and the codebase. - -### 2. Programmable API (v2) -- **Endpoint**: `https://context7.com/api/v2/` -- **Use Case**: Used during the **Normalization** phase to resolve external library IDs and fetch trust-scored documentation. - -### 3. Unified CLI -- **Installation**: `npm install -g @upstash/context7` -- **Use Case**: Local resolution and manual repository synchronization management. - -## ๐Ÿ— Standard Resource Mapping - -Context7 serves as the high-fidelity hub for all canonical UDX repository context: - -| Repository | Role | Purpose | -| :--- | :--- | :--- | -| **[`udx/dev.kit`](https://github.com/udx/dev.kit)** | Empowerment Layer | Primary engine for task normalization and skill discovery. | -| **[`udx/worker`](https://github.com/udx/worker)** | Base Environment | Canonical documentation for the deterministic container runtime. | -| **[`udx/worker-deployment`](https://github.com/udx/worker-deployment)** | Orchestration | Patterns for deploying and managing high-fidelity environments. | - ---- - -## ๐ŸŒŠ Waterfall Progression (DOC-003) - -**Progression**: `[context7-mesh-active]` -- [x] Step 1: Establish connection to Context7 API/MCP (Done) -- [>] Step 2: Synchronize relevant peer repositories (Active) -- [ ] Step 3: Perform cross-repo intent resolution (Planned) - -## ๐Ÿ“š Authoritative References - -Context7 is built on systematic knowledge management and observation-driven management: - -- **[AI-Powered Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment and standalone documentation quality. -- **[Observation-Driven Management (ODM)](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing efficiency through pattern identification. -- **[AOCA: Embedded Governance](https://udx.io/cloud-automation-book/cybersecurity)**: Aligning compliance with automated engineering flows. - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/mesh/github.md b/docs/ai/mesh/github.md deleted file mode 100644 index 7ea7f41..0000000 --- a/docs/ai/mesh/github.md +++ /dev/null @@ -1,58 +0,0 @@ -# GitHub Integration: Remote Discovery - -**Domain:** AI / Remote Discovery -**Status:** Canonical - -## Summary - -The GitHub integration enables **dev.kit** to perform high-fidelity **Discovery** by probing remote repositories, Pull Requests, and issues. By leveraging the `gh` CLI, it provides a grounded, authenticated interface for agents to interact with the broader engineering ecosystem. - ---- - -## ๐Ÿ›  Features & Capabilities - -### 1. Skill Mesh Expansion -The GitHub integration allows the **Dynamic Discovery Engine** to identify and resolve skills located in remote repositories. -- **Trigger**: Intent resolution for an authorized organization or peer repository. -- **Outcome**: The AI can "reach out" to remote codebases to discover patterns or standardized workflows. - -### 2. Triage & PR Management -- **Assigned Issues**: Fetches issues to ground the initial `task start` phase. -- **PR Lifecycle**: Authorizes agents to analyze and **create** Pull Requests to formalize drift resolution. - ---- - -## ๐Ÿ— Requirements & Auth -- **CLI**: `gh` (GitHub CLI) must be installed and authenticated. -- **Auth**: Prefers `GH_TOKEN` or `GITHUB_TOKEN`. Falls back to interactive `gh auth login`. - -## ๐Ÿ— Standard Resource Mapping - -To maintain high-fidelity engineering flows, the GitHub integration prioritizes discovery across authoritative UDX repositories: - -| Repository | Role | Purpose | -| :--- | :--- | :--- | -| **[`udx/reusable-workflows`](https://github.com/udx/reusable-workflows)** | CI/CD Baseline | Canonical GitHub Action patterns and deployment templates. | -| **[`udx/wp-stateless`](https://github.com/udx/wp-stateless)** | Plugin Core | Reference for high-fidelity WordPress cloud integrations. | -| **[`udx/worker-deployment`](https://github.com/udx/worker-deployment)** | Orchestration | Standard patterns for deploying and managing the Worker Ecosystem. | - -## ๐Ÿ— GitHub Grounding - -Remote discovery and collaboration are operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Patterns** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Source of truth for remote discovery templates. | -| **Automation** | [`udx/worker-deployment`](https://github.com/udx/worker-deployment) | Standard patterns for remote environment audits. | - ---- - -## ๐ŸŒŠ Waterfall Progression (DOC-003) - -**Progression**: `[github-mesh-active]` -- [x] Step 1: Detect and verify `gh` CLI health (Done) -- [>] Step 2: Resolve remote repository skills (Active) -- [ ] Step 3: Perform cross-repo intent normalization (Planned) - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/mesh/npm.md b/docs/ai/mesh/npm.md deleted file mode 100644 index ae76ed3..0000000 --- a/docs/ai/mesh/npm.md +++ /dev/null @@ -1,54 +0,0 @@ -# NPM Integration: Runtime Hydration - -**Domain:** AI / Runtime Health -**Status:** Canonical - -## Summary - -The NPM integration ensures that the local engineering environment is **Hydrated** with the necessary CLI tools. It provides deterministic health checks and proactive installation guidance for `@udx` scoped packages. - ---- - -## ๐Ÿ›  Features & Capabilities - -### 1. Proactive Hydration -When the **Dynamic Discovery Engine** identifies an intent requiring a specific tool (e.g., `@udx/mcurl`), the NPM module verifies its availability. -- **Advice**: If missing, the CLI provides the exact `npm install -g` command to empower the user or agent. - -### 2. Runtime Verification -- **Trigger**: `dev.kit doctor` or system bootstrap. -- **Outcome**: Ensures that the `node` and `npm` environments are healthy enough to support high-fidelity engineering tasks. - ---- - -## ๐Ÿ— Supported Tools - -### ๐ŸŒ `@udx/mcurl` -A high-fidelity API client designed for deterministic interaction with complex web services. It provides standardized logging and error handling that is easily consumable by the **Drift Resolution Cycle**. - -### ๐Ÿ” `@udx/mysec` -A proactive security scanner used to identify secrets, API keys, and sensitive credentials within the repository. It is integrated into the `dev.kit doctor` diagnostic flow to ensure **Credential Protection**. - -### ๐Ÿ“„ `@udx/md.view` -A Markdown rendering engine that allows for high-fidelity documentation previews directly from the CLI, ensuring that repository context is always legible and accessible. - -## ๐Ÿ— NPM Grounding - -NPM-based tooling is operationalized through canonical UDX resources: - -| Package | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Tool Mesh** | [`udx/worker`](https://github.com/udx/worker) | Pre-hydrated environment for global packages. | -| **Discovery** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic detection and health-check logic. | - ---- - -## ๐ŸŒŠ Waterfall Progression (DOC-003) - -**Progression**: `[npm-mesh-active]` -- [x] Step 1: Detect and verify `npm` runtime (Done) -- [>] Step 2: Check health of core `@udx` tools (Active) -- [ ] Step 3: Proactively advise on environment hydration (Planned) - ---- -_UDX DevSecOps Team_ diff --git a/docs/foundations/adaptation.md b/docs/foundations/adaptation.md deleted file mode 100644 index 9a3c6d0..0000000 --- a/docs/foundations/adaptation.md +++ /dev/null @@ -1,66 +0,0 @@ -# Context Adaptation: Resilient Projections - -**Domain:** Concepts / Technical Bridge -**Status:** Canonical - -## Summary - -**Adaptation** is the mechanism `dev.kit` uses to project canonical repository sources into tool-specific formats without mutating the underlying intent. It serves as the technical bridge for **Resilient Normalization**, ensuring that repository "Skills" are consumable by any agent or engine while the source remains "Clean" and "Native." - -![Adaptation Flow](../../assets/diagrams/adaptation-flow.svg) - ---- - -## The Purpose of Adaptation - -- **Interface Normalization**: Projecting standard repository artifacts (Markdown/YAML) into machine-consumable schemas (e.g., JSON manifests for LLM Tool-Calling or IDE-specific configs). -- **Resilient Fallback**: Ensuring that if a specialized projection fails, the system automatically falls back to **Standard Data** (e.g., raw Markdown or Text) to prevent a "hard-stop" in the engineering flow. -- **Canonical Integrity**: Ensuring that all drift is resolved at the repository level. Tools may change, but the **Source of Truth** (the Repo) remains constant. - ---- - -## The Laws of Adaptation - -1. **Canonical First**: Never edit a projection to fix a bug. Resolve the drift in the repository's source artifacts and re-project. -2. **Ephemeral Reversibility**: Adaptations are non-destructive projections. It must always be possible to delete all adapted formats and regenerate them perfectly from the source. -3. **Fail-Open Logic**: If an adaptation engine (e.g., a Mermaid-to-SVG renderer) is missing, the system must "Fail-Open" by providing the raw source to the user or agent rather than blocking the sequence. - ---- - -## Practical Examples: Source โ†’ Projection - -| Source Artifact | Projection Target | Adaptation Logic | -| :--------------------- | :---------------- | :--------------------------------------------------------------------------------------- | -| **`environment.yaml`** | Shell Environment | Translates YAML keys into host-specific `$ENV` variables and aliases. | -| **`docs/skills/*.md`** | Agent Manifests | Extracts `@intent` and `@usage` metadata into JSON for LLM tool-calling. | -| **`.mmd` (Mermaid)** | `.svg` or `.png` | Renders visual diagrams for documentation (Falls back to raw code if `mmdc` is missing). | -| **Script Headers** | CLI Help Menus | Parses shell script comments into a dynamic `dev.kit --help` interface. | - -## ๐Ÿ— Adaptation Grounding - -To ensure high-fidelity projections, `dev.kit` leverages canonical UDX resources as the targets for intent normalization: - -| Primitive | Adaptation Goal | Target Source | -| :--- | :--- | :--- | -| **Workflow Logic** | Project intent into reusable CI/CD patterns. | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | -| **Runtime Context** | Normalize environment parity across containers. | [`udx/worker`](https://github.com/udx/worker) | -| **Plugin Evolution** | Scale high-fidelity WordPress integrations. | [`udx/wp-stateless`](https://github.com/udx/wp-stateless) | - ---- - -## The Adaptation Lifecycle - -1. **Discovery**: `dev.kit` scans the repository for high-fidelity Markdown and YAML. -2. **Mapping**: The system determines the required "Shape" based on the current consumer (e.g., an AI Agent vs. a Local Developer). -3. **Projection**: The artifact is rendered into the ephemeral target format. -4. **Verification**: The system ensures the projection accurately reflects the **Canonical Intent**. - -## ๐Ÿ“š Authoritative References - -Resilient projections are a core part of maintaining standalone quality across disparate formats: - -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Strategies for maintaining quality when projecting content across systems. -- **[Digital Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Tracing the evolution of software through the lens of fluid dynamics and systematic tracing. - ---- -_UDX DevSecOps Team_ diff --git a/docs/foundations/best-practices.md b/docs/foundations/best-practices.md index dcbe1fe..999bd37 100644 --- a/docs/foundations/best-practices.md +++ b/docs/foundations/best-practices.md @@ -1,94 +1,73 @@ -# Development Best Practices: High-Fidelity Engineering +# Best Practices & Patterns -**Domain:** Engineering / Methodology +**Domain:** Foundations / Engineering Standards **Status:** Canonical -This document outlines the core engineering practices enforced by **dev.kit**. These practices ensure that the repository state remains deterministic, context-driven, and high-fidelity for both human engineers and AI agents. +## Summary ---- - -## ๐Ÿ›  Practice-to-Command Mapping - -| Practice | Objective | dev.kit Command | -| :------------------------ | :-------------------------------------------------------------------- | :--------------------- | -| **Environment Hydration** | Verify required software, CLI meshes, and authorized state. | `dev.kit doctor` | -| **Pre-work Readiness** | Sync with origin and align feature branches before implementation. | `dev.kit sync prepare` | -| **Intent Normalization** | Transform ambiguous requests into deterministic `workflow.md` plans. | `dev.kit skills run` | -| **Atomic Sync** | Group changes into logical, domain-specific commits to prevent drift. | `dev.kit sync run` | -| **Visual Validation** | Generate and maintain architecture diagrams (Mermaid/SVG) from code. | `dev.kit visualizer` | -| **Task Lifecycle** | Track progress and prune session context upon task completion. | `dev.kit task` | +This document defines the high-fidelity engineering standards and reusable patterns for **dev.kit**. Adherence to these practices ensures that repository skills remain deterministic, portable, and legible to both humans and agents. --- -## ๐Ÿณ Standard Execution Runtimes +## ๐Ÿ›  Command Mappings -To ensure maximum fidelity, **dev.kit** is optimized for the **UDX Worker Ecosystem**. Using these images eliminates "it works on my machine" friction. +Every intent should map to a deterministic CLI command. Avoid performing raw operations when a `dev.kit` primitive exists. -| Component | Role | Source | -| :-------------------------- | :-------------------------------------------------------------------------- | :------------------------------------------------------------------ | -| **`udx/worker`** | The foundational base layer. A pre-hydrated, secure, deterministic runtime. | [Docker Hub](https://hub.docker.com/r/usabilitydynamics/udx-worker) | -| **`udx/worker-deployment`** | The standard pattern for orchestrating worker containers across infra. | [GitHub](https://github.com/udx/worker-deployment) | +| Intent | Primary Command | Standard Procedure | +| :-------------------- | :-------------------- | :--------------------------------------------------------------------------------- | +| **Audit Health** | `dev.kit status --audit` | Check environment prerequisites, shell integration, and repo compliance. | +| **Resolve Drift** | `dev.kit sync run` | Perform logical, domain-specific commits and automate PR creation. | +| **Execute Skill** | `dev.kit skills run` | Run a specialized repository-bound workflow script. | +| **Render Diagram** | `dev.kit visualizer` | Generate high-fidelity Mermaid diagrams from templates. | +| **Manage Lifecycle** | `dev.kit task` | Deconstruct intent into a `workflow.md` and track resolution state. | -### ๐Ÿงช Isolated Testing +--- -Always validate **dev.kit** logic within a clean `udx/worker` container to emulate production-grade environments: +## ๐Ÿงช High-Fidelity Patterns -```bash -docker run --rm -v $(pwd):/workspace -w /workspace udx/worker ./tests/suite.sh -``` +### 1. The Engineering Loop (Plan-Act-Validate) +Always follow the **Iterative Resolution Cycle**. Never commit changes that haven't been validated against documentation or a test suite. +- **Pattern**: Use `dev.kit task start` to initialize the loop and `dev.kit test` to close it. -## ๐Ÿ— Practice Grounding +### 2. Isolated Verification +Validate logic within a clean `udx/worker` container to emulate production environments and eliminate "it works on my machine" friction. +- **Pattern**: `dev.kit test --worker` utilizes `@udx/worker-deployment` for high-fidelity verification. -High-fidelity engineering is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Validated primitives and discovery engine. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic, isolated base environment. | -| **Patterns** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated sequences for reduced variance. | +### 3. Fail-Open Interaction +Specialized tools (e.g., Mermaid renderers) may not always be present. Design logic to provide raw source data as a fallback to prevent blocking the engineering flow. --- -## ๐Ÿ— High-Fidelity Principles +## ๐Ÿ— Documentation Patterns -> ### 1. Grounding Before Action -> -> Never execute logic without grounding the environment. An ungrounded state is the primary source of repository drift. -> -> - **Mandate:** Run `dev.kit sync prepare` and `dev.kit doctor` at the start of every session. +Markdown is the logical map of the repository. Use structured headers and frontmatter to ensure legibility. -> ### 2. Logical Separation of Concerns -> -> Avoid "Mega-Commits." Mixing documentation, configuration (YAML), and core source code obscures intent and breaks the audit trail. -> -> - **Mandate:** Use `dev.kit sync run` to categorize changes into logical, reviewable units. - -> ### 3. Documentation as Executable Logic -> -> Treat Markdown (`docs/`) and script headers (`lib/`) as the **Command Surface**. High-fidelity headers allow the CLI to dynamically discover and map repository skills. -> -> - **Mandate:** Maintain `@description` and `@intent` blocks in all scripts to feed the Discovery Engine. +### 1. Skill Metadata +Skills defined in `docs/skills/` must include a `SKILL.md` with standard metadata: +```markdown +# Skill Name +- **Intent**: key, keywords, action +- **Objective**: Concise summary of what this skill achieves. +``` -> ### 4. Fail-Open Resilience -> -> When a specialized automation fails, the system must not "hard-crash." It must fallback to standard text/logs for human or AI diagnostic review. -> -> - **Mandate:** Ensure all scripts provide high-signal output to `workflow.md` artifacts even during partial failures. +### 2. Workflow State +Active tasks in `tasks/` must use a standard `workflow.md` to track progression: +```markdown +# Workflow: Task ID +- [x] Step 1: Completed action +- [>] Step 2: Active action +- [ ] Step 3: Planned action +``` --- -## ๐Ÿง  AI & Agent Integration - -- **Autonomous Grounding:** Agents must run `dev.kit ai sync` to refresh their internal skill-map, but **never** push changes to `origin` without explicit user confirmation. -- **Incremental Feedback:** Use the **Waterfall Progression Tail** to provide real-time status updates. High-latency tasks must emit "Heartbeat" logs to prevent context timeouts. -- **Native Tooling Only:** AI agents must use the **same CLI commands** as humans. Do not allow agents to bypass the `dev.kit` boundary for raw shell access. - -## ๐Ÿ“š Authoritative References +## ๐Ÿ— Grounding Resources -High-fidelity engineering is grounded in systematic roles and automation standards: - -- **[Key Roles in specialized Dev Teams](https://andypotanin.com/best-practices-specialized-software-development/)**: Understanding specialized roles for cloud-native and resilient infrastructure. -- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Transforming the software development process through systematic automation. +| Requirement | Grounding Resource | Role | +| :--- | :--- | :--- | +| **Standards** | [`docs/reference/standards/`](../reference/standards/) | Source of truth for 12-factor and YAML compliance. | +| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for CI/CD consistency. | +| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic, isolated base environment. | --- _UDX DevSecOps Team_ diff --git a/docs/foundations/cde.md b/docs/foundations/cde.md index dcabc03..7768cb0 100644 --- a/docs/foundations/cde.md +++ b/docs/foundations/cde.md @@ -7,82 +7,69 @@ **Context-Driven Engineering (CDE)** is the foundational methodology of **dev.kit**. It transforms chaotic user intent into executable context by treating the repository as the **Single Source of Truth**. CDE provides the structural framework for identifying and **Resolving the Drift** between intent and reality. +**dev.kit** operates as the **Thin Empowerment Layer** (Grounding Bridge) that projects this philosophy into a dynamic "Skill Mesh" accessible to humans and AI agents. + ![CDE Flow](../../assets/diagrams/cde-flow.svg) --- ## Core Principles: The Operational DNA -These principles guide every architectural decision in the `dev.kit` ecosystem: +These principles guide every architectural decision in the ecosystem: 1. **Resolve the Drift**: Every action must purposefully close the gap between intent and repository state. 2. **Deterministic Normalization**: Distill chaotic inputs into bounded, repeatable workflows before execution. -3. **Resilient Waterfall (Fail-Open)**: Never break the flow. Fallback to **Standard Data** (raw logs/text) if specialized tools fail. -4. **Repo-Scoped Truth**: The repository is the absolute, versioned source of truth for all skills and state. No "shadow logic." +3. **Resilient Waterfall (Fail-Open)**: Never break the flow. Fallback to standard raw data if specialized tools fail. +4. **Repo-Scoped Truth**: The repository is the absolute, versioned source of truth for all skills and state. 5. **Validated CLI Boundary**: All execution occurs through a hardened CLI interface for explicit confirmation and auditability. -6. **Native-First Dependencies**: Favor standard POSIX-compliant tools (Bash, Git, `jq`) for maximum portability. -7. **Symmetry of Artifacts**: Every output must be equally legible to humans (Markdown) and consumable by machines (YAML/JSON). +6. **Symmetry of Artifacts**: Every output must be equally legible to humans (Markdown) and consumable by machines (YAML/JSON). --- -## The CDE Strategy: The Clean Repository +## The Three Pillars of Empowerment -CDE avoids proprietary AI schemas, enforcing high-fidelity standards on traditional engineering artifacts: +### 1. Grounding (The Bridge) +Ensures that every engineering action is grounded in the repository's truth. It audits the environment and synchronizes AI context to ensure alignment with repository rules. -- **Intent-as-Artifact**: Documentation is the **Specification**. Markdown is structured as logic for LLMs and guidance for humans. -- **Drift Identification**: `dev.kit` compares the current state against the documented "Target State" to identify the **Drift**. -- **Normalization Boundary**: Drift is identified through dynamic reasoning (**AI Skills**) and resolved through standard **Deterministic Primitives** (CLI commands). This ensures that while the reasoning is flexible, every execution step remains predictable and reproducible. +### 2. Normalization (The Filter) +Chaotic user requests are filtered through a **Normalization Boundary**. Ambiguous intent is distilled into a deterministic `workflow.md` plan before any execution occurs. -| Artifact Type | Standard | Purpose | -| :---------------- | :------------------- | :------------------------------------------------------------------------ | -| **Documentation** | `Markdown (.md)` | The "Logical Map." Defines intent and success criteria. | -| **Manifests** | `YAML (.yaml)` | Configuration-as-Code. Defines environments and dependencies. | -| **Execution** | `Scripts (.sh, .py)` | The "Engine." Provides the atomic actions to reach the target state. | +### 3. Execution (The Engine) +Logic is executed through modular, standalone scripts and CLI commands. `dev.kit` ensures these run in a consistent, environment-aware context. --- -## The Drift Resolution Lifecycle - -CDE replaces "Black Box" generation with a **Resilient Engineering Loop**: - -1. **Analyze**: Audit the repo to identify the "Drift" from user intent. -2. **Normalize**: Map the drift to a standard `workflow.md` execution plan. -3. **Iterate**: Execute workflow steps using validated CLI scripts. -4. **Validate**: Ensure the drift is resolved against the documentation. -5. **Capture**: Check new logic back into the repo as standard source code or docs. +## Architecture: The Thin Layer ---- +`dev.kit` distinguishes between **Deterministic Functions** (the programmatic logic) and **AI Reasoning Skills** (the dynamic intent resolution). -## The "Definition of Done" Checklist +### 1. Deterministic Functions (The Engine) +Hardened, predictable routines found in `lib/commands/` and `docs/skills/*/assets/`. +- **Role**: Execute specific, bounded actions with high fidelity (e.g., atomic commits, SVG rendering). -Before a task is considered resolved, verify: +### 2. AI Reasoning Skills (The Brain) +Dynamic capabilities defined in `SKILL.md`. They use LLM reasoning to bridge unstructured intent with repository functions. +- **Role**: Interpret intent, analyze repository state, and orchestrate the engine. -- [ ] Was the intent successfully normalized into a `workflow.md`? -- [ ] Did the execution path survive potential tool failures (**Fail-Open**)? -- [ ] Is the resulting logic captured as a reusable, repo-native **Skill**? -- [ ] Is the final state documented in Markdown for the next iteration? -## ๐Ÿ— Principle Grounding +--- -Context-Driven Engineering is operationalized through canonical UDX resources: +## The Skill Mesh -| CDE Principle | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Resolve the Drift** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | The primary engine for intent resolution. | -| **Deterministic Base** | [`udx/worker`](https://github.com/udx/worker) | Hardened environment for context stability. | -| **Atomic Flow** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for normalized execution. | +The entire repository is treated as a **Skill**. The mesh is dynamically discovered by scanning: +- **Internal Commands**: Metadata-rich shell scripts in `lib/commands/`. +- **AI Reasoning Skills**: Authoritative `SKILL.md` files in `docs/skills/`. +- **Functional Assets**: Programmatic templates and configs managed by the engine. +- **Virtual Capabilities**: Global environment tools (`gh`, `npm`, `worker`). --- ## ๐Ÿ“š Authoritative References -The principles of CDE are grounded in foundational research on automation and AI-driven management: +CDE is grounded in foundational research on high-fidelity automation: -- **[AI-Powered Revolution in Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment and standalone quality. -- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: How automation transforms the software development lifecycle. -- **[Observation-Driven Management (ODM)](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Revolutionizing efficiency through AI-identified patterns. -- **[Implementing a cATO System](https://andypotanin.com/implementing-a-continuous-authority-to-operate-cato-system/)**: Principles for continuous authorization through automated evidence. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Identifying common failure points in the development lifecycle. +- **[AI-Powered Revolution in Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment. +- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Systematic transformation of the engineering flow. +- **[Observation-Driven Management (ODM)](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing efficiency through AI-identified patterns. --- _UDX DevSecOps Team_ - diff --git a/docs/foundations/dev-kit.md b/docs/foundations/dev-kit.md deleted file mode 100644 index 7e14979..0000000 --- a/docs/foundations/dev-kit.md +++ /dev/null @@ -1,77 +0,0 @@ -# dev.kit: The Thin Empowerment Layer - -**Domain:** Foundations / Core Concept -**Status:** Canonical - -## Summary - -**dev.kit** is a high-fidelity engineering interface designed to resolve the **Drift** between human intent and repository reality. It operates as a **Thin Empowerment Layer** (Grounding Bridge) that transforms a static codebase into a dynamic "Skill Mesh" accessible to humans and AI agents alike. - ---- - -## Core Philosophy - -`dev.kit` is built on the principles of **Context-Driven Engineering (CDE)**. It does not replace your existing tools; it orchestrates them to maintain a deterministic, context-aware engineering environment. - -- **Non-Proprietary**: Uses standard Markdown, YAML, and Shell scripts. -- **Deterministic**: Every action is bounded by a validated CLI interface. -- **Agent-Ready**: Provides native "Grounding" for LLMs, transforming them into configuration engines. - ---- - -## The Three Pillars of dev.kit - -### 1. Grounding (The Bridge) -`dev.kit` provides the necessary context to ensure that every engineering action is grounded in the repository's truth. It audits the environment (`dev.kit doctor`) and synchronizes AI context (`dev.kit ai sync`). - -### 2. Normalization (The Filter) -Chaotic user requests are filtered through a **Normalization Boundary**. Ambiguous intent is distilled into a deterministic `workflow.md` plan before any execution occurs. - -### 3. Execution (The Engine) -Logic is executed through modular, standalone scripts and CLI commands. `dev.kit` ensures these run in a consistent, environment-aware context via `environment.yaml`. - -## Architecture: The Empowerment Layer - -`dev.kit` distinguishes between **Deterministic Functions** (the programmatic logic) and **AI Reasoning Skills** (the dynamic intent resolution). - -### 1. Deterministic Functions (The Engine) -These are hardened, predictable routines found in `lib/commands/` and `docs/skills/*/assets/`. They provide the execution engine for the repository. -- **Example (Git Sync)**: The `workflow.yaml` and `git_sync.sh` logic that groups files and executes commits. -- **Example (Visualizer)**: The Mermaid templates and `mmdc` export logic that renders SVGs. -- **Role**: Execute specific, bounded actions with high fidelity. - -### 2. AI Reasoning Skills (The Brain) -These are the dynamic capabilities defined in `SKILL.md`. They use LLM reasoning to bridge unstructured intent with repository functions. -- **Example (Git Sync)**: Analyzing a set of changed files to **determine the logical domains** (docs, cli, core) and generate a meaningful commit message. -- **Example (Visualizer)**: Reading a README or source file to **extract the logical process flow** and map it to a specific Mermaid template. -- **Role**: Interpret intent and orchestrate the engine. - ---- - -## The Skill Mesh - -`dev.kit` treats the entire repository as a **Skill**. It dynamically discovers the mesh by scanning: -- **Internal Commands**: Metadata-rich shell scripts in `lib/commands/`. -- **AI Reasoning Skills**: Authoritative `SKILL.md` files in `docs/skills/`. -- **Functional Assets**: Programmatic templates and configs managed by the engine. -- **Virtual Capabilities**: Global environment tools (`gh`, `npm`, `docker`). - ---- - -## Primary Interfaces - -- **`dev.kit status`**: The "Engineering Brief." High-signal overview of health and active tasks. -- **`dev.kit ai`**: The "Grounding Layer." Orchestrates AI integration and skill synchronization. -- **`dev.kit sync`**: The "Drift Resolver." Atomic, domain-specific repository synchronization. -- **`dev.kit task`**: The "Lifecycle Manager." Tracks intent from normalization to resolution. - -## ๐Ÿ“š Authoritative References - -The mission of dev.kit is grounded in the practical need for high-fidelity engineering empowerment: - -- **[Jumping into Dev at a Software Enterprise](https://andypotanin.com/dev-start/)**: Guidance for starting the engineering journey with specialized tools. -- **[Navigating to the Cloud](https://andypotanin.com/windows-to-cloud/)**: Managing the complexity of modern cloud IT systems. - ---- -_UDX DevSecOps Team_ - diff --git a/docs/foundations/methodology.md b/docs/foundations/methodology.md index 231dad2..b9be397 100644 --- a/docs/foundations/methodology.md +++ b/docs/foundations/methodology.md @@ -1,11 +1,11 @@ -# The UDX Methodology: CLI-Wrapped Automation (CWA) +# UDX Methodology: CLI-Wrapped Automation (CWA) -**Domain:** Concepts / Operational Strategy +**Domain:** Foundations / Operational Strategy **Status:** Canonical ## Summary -The **UDX Methodology** centers on **CLI-Wrapped Automation (CWA)**. This practice encapsulates all repository logic within a validated CLI boundary. By wrapping scripts and manifests in a standardized interface, we transform a static codebase into a high-fidelity "Skill" accessible to humans, CI/CD pipelines, and AI agents alike. +The **UDX Methodology** centers on **CLI-Wrapped Automation (CWA)**. This practice encapsulates all repository logic within a validated CLI boundary. By wrapping scripts and manifests in a standardized interface, we transform a static codebase into a high-fidelity "Skill" accessible to humans, CI/CD pipelines, and AI agents. ![Methodology Flow](../../assets/diagrams/methodology-flow.svg) @@ -13,65 +13,52 @@ The **UDX Methodology** centers on **CLI-Wrapped Automation (CWA)**. This practi ## Core Concepts -- **Repo-as-a-Skill**: Repository logic is not hidden in READMEs or tribal knowledge. It is exposed through standardized scripts and CLI commands. Engineering experience is captured as portable, executable automation. -- **The Smart Helper**: `dev.kit` acts as the orchestration layer that resolves **Drift** (intent divergence) by translating high-level goals into the specific repository-based skills required to achieve them. +- **Repo-as-a-Skill**: Repository logic is exposed through standardized scripts and CLI commands rather than hidden in READMEs. +- **Task Normalization**: Chaotic user intent is distilled into a deterministic `workflow.md`. +- **Resilient Waterfall (Fail-Open)**: If specialized tools fail, the system falls back to standard data (raw logs/text) to maintain continuity. --- -## The Principles +## Context Adaptation: Resilient Projections -### 1. Task Normalization: Resolving the Drift +**Adaptation** is the mechanism used to project canonical repository sources into tool-specific formats without mutating the underlying intent. -Chaotic user intent is distilled into a deterministic `workflow.md`. +1. **Interface Normalization**: Projecting Markdown/YAML into machine-consumable schemas (e.g., JSON manifests for LLM tool-calling). +2. **Ephemeral Reversibility**: Adaptations are non-destructive. It must always be possible to regenerate them perfectly from the source. +3. **Fail-Open Logic**: If an adaptation engine (e.g., a Mermaid renderer) is missing, provide the raw source rather than blocking the sequence. -- **Structured Inputs**: Every task defines its `Scope`, `Inputs`, and `Expected Outputs`. -- **State Tracking**: The lifecycle is visible: `planned -> in_progress -> done`. -- **Bounded Execution**: Logic is executed in discrete steps. If a step exceeds its scope, it triggers a specialized sub-workflow rather than failing silently. - -### 2. Resilient Waterfall (Fail-Open) - -The engineering sequence must remain unbroken. We utilize **Fail-Open Normalization** to ensure continuity: - -- **High-Fidelity Path**: Attempt execution using the most specialized tool/script first. -- **Fallback Path**: If specialized tools are missing or fail, the system falls back to **Standard Data** (raw logs, source code, or text-based reasoning). -- **Continuity**: The "Process" always yields an output, preventing environment blocks and allowing the next step to proceed with the best available data. - -### 3. Script-First & CLI-Wrapped - -Logic lives in modular, standalone scripts (`scripts/`, `lib/`). The `dev.kit` CLI provides the **Shell Wrapper** that ensures these scripts run in a consistent, environment-aware context (via `environment.yaml`). - -### 4. Machine-Ready Orchestration - -CWA provides a stable interface for AI agents across two stages: - -- **Stage 1: Grounding**: Agents use `dev.kit` to audit the environment (`doctor`) and understand the "Rules of Engagement." -- **Stage 2: Execution**: Agents leverage the Task Normalization engine to execute complex, multi-step engineering loops with predictable results. +### Practical Examples +- **`environment.yaml` โ†’ Shell**: Translates YAML keys into host-specific `$ENV` variables. +- **`docs/skills/*.md` โ†’ Manifests**: Extracts metadata into JSON for AI grounding. +- **`.mmd` โ†’ `.svg`**: Renders diagrams for documentation (falls back to code if renderer is missing). --- ## The Execution Lifecycle: Plan โ†’ Normalize โ†’ Process 1. **Plan**: Deconstruct the intent into discrete repository actions. -2. **Normalize**: Validate the environment, map dependencies, and format the inputs into a `workflow.md`. +2. **Normalize**: Validate the environment, map dependencies, and format inputs into a `workflow.md`. 3. **Process**: Execute the CLI commands and capture the result as a repository artifact. --- -## Why CWA? +## ๐Ÿ— Methodology Grounding -- **Portability**: Logic that runs in the CLI works identically in Local Dev, CI/CD, and Production. -- **Decoupling**: The Interface (CLI) is separated from the Implementation (Scripts), allowing for seamless logic upgrades. -- **Zero Bloat**: Uses standard Markdown, YAML, and Shell. No proprietary "AI-only" formats required. +| Primitive | Adaptation Goal | Target Source | +| :--- | :--- | :--- | +| **Workflow Logic** | Project intent into reusable CI/CD patterns. | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | +| **Runtime Context** | Normalize environment parity across containers. | [`udx/worker`](https://github.com/udx/worker) | +| **Orchestration** | Standardize container-based execution loops. | [`@udx/worker-deployment`](https://github.com/udx/worker-deployment) | + +--- ## ๐Ÿ“š Authoritative References -CWA is inspired by the transition toward decentralized and automated engineering flows: +CWA and Resilient Projections are inspired by the transition toward automated engineering flows: -- **[Embrace the Future: Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: The shift toward distributed service architectures. -- **[Automation-First Development](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Breaking the struggle for efficiency through systematic automation. -- **[Digital Rails & Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Drawing parallels between software algorithms and automotive evolution. -- **[AOCA: The Automation Baseline](https://udx.io/cloud-automation-book/automation-best-practices)**: Establishing standardized CLI wrappers for reduced variance. +- **[Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: The shift toward distributed service architectures. +- **[Digital Rails & Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Parallel algorithms and automotive evolution. +- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Maintaining quality when projecting content across systems. --- _UDX DevSecOps Team_ - diff --git a/docs/foundations/patterns.md b/docs/foundations/patterns.md deleted file mode 100644 index 34830a3..0000000 --- a/docs/foundations/patterns.md +++ /dev/null @@ -1,57 +0,0 @@ -# Reusable Patterns & Templates - -**Domain:** Foundations / Knowledge -**Status:** Canonical - -## Summary - -This document captures reusable documentation, scripting, and reporting patterns derived from established UDX engineering flows. These are optional references, not execution contracts, designed to maintain high-fidelity standards across disparate repositories. - ---- - -## ๐Ÿ“ Documentation Patterns - -- **Explicit Scope**: Distinguish between client projects, cluster projects, and internal tools. -- **Positional Inputs**: Required inputs should be positional; use defaults only when stable. -- **Dual-Path Support**: Provide both manual steps and a script path (`bin/scripts/`) when possible. -- **Validation Blocks**: Include a minimal verification section with read-only commands. -- **Concise Examples**: Keep examples short, runnable, and high-signal. - ---- - -## ๐Ÿš Script Patterns - -- **Hardened Bash**: Use `#!/usr/bin/env bash` and `set -euo pipefail`. -- **Input Validation**: Validate dependencies (`gcloud`, `jq`, `yq`) and inputs early. -- **Environment Overrides**: Use environment variables for optional inputs to allow orchestration flexibility. -- **Deterministic Output**: Minimize side effects and ensure outputs are predictable. - ---- - -## ๐Ÿ“Š Report Patterns - -- **Single Source**: Read all data from a defined repository source of truth. -- **Provenance**: Include generated timestamps and source paths. -- **Scanability**: Prefer Markdown tables and lists for human and machine readability. - -## ๐Ÿ— Pattern Grounding - -Engineering patterns are grounded in specialized UDX repositories to ensure domain-specific fidelity: - -| Pattern Type | Grounding Resource | Domain | -| :--- | :--- | :--- | -| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated CI/CD and script templates. | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity CLI and discovery patterns. | -| **Structure** | [`udx/wp-stateless`](https://github.com/udx/wp-stateless) | Reference for plugin and structural standards. | - ---- - -## ๐Ÿ“š Authoritative References - -Reusable patterns ensure standalone quality and reduce operational variance: - -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Strategies for maintaining quality in automated documentation. -- **[Reducing Operational Variance](https://andypotanin.com/digital-rails-and-logistics/)**: Tracing software evolution through systematic, patterned innovtion. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/operations/worker-ecosystem-refs.md b/docs/reference/operations/worker-ecosystem-refs.md index 1d6e8ca..d9e54f8 100644 --- a/docs/reference/operations/worker-ecosystem-refs.md +++ b/docs/reference/operations/worker-ecosystem-refs.md @@ -20,7 +20,7 @@ UDX enforces a **Container-First** approach to engineering to eliminate environm The `udx/worker` is the foundational base layer for all UDX engineering tasks. It provides a hardened, audit-ready environment optimized for the `dev.kit` runtime. - **Authoritative Docs**: [UDX Worker Documentation](https://github.com/udx/worker/tree/latest/docs) -- **Deployment Pattern**: [Worker Deployment](https://github.com/udx/worker-deployment) +- **Deployment Pattern**: [@udx/worker-deployment](https://github.com/udx/worker-deployment) --- @@ -29,8 +29,8 @@ The `udx/worker` is the foundational base layer for all UDX engineering tasks. I | Component | role | dev.kit Implementation | | :---------------------- | :------------ | :-------------------------------------------- | | **`udx/worker`** | Base Layer | Primary execution target for all CLI tasks. | -| **`worker-deployment`** | Orchestration | Standard pattern for automated sessions. | -| **Isolated Testing** | Fidelity | verified via `./tests/suite.sh` in-container. | +| **`worker-deployment`** | Orchestration | Verified via `worker run` (@udx/worker-deployment). | +| **Isolated Testing** | Fidelity | Verified via `dev.kit test --worker`. | | **Unified Logic** | Portability | Same behavior across Local, CI, and Prod. | --- @@ -41,7 +41,7 @@ The `udx/worker` is the foundational base layer for all UDX engineering tasks. I Never perform destructive or high-stakes operations in an ungrounded local shell. Always leverage the **Worker Ecosystem** to ensure environment parity. -- **Action**: Use the standard `docker run` command for isolated testing and verification. +- **Action**: Use `dev.kit test --worker` to run tests inside a managed `udx/worker` container using `worker run`. ### 2. Runtime Truth diff --git a/docs/runtime/config.md b/docs/runtime/config.md index ea03fd2..4cc6c55 100644 --- a/docs/runtime/config.md +++ b/docs/runtime/config.md @@ -19,6 +19,7 @@ Configuration in **dev.kit** provides a safe, deterministic foundation for both ## CLI Interfaces - **`dev.kit config show`**: View active host and repository configuration. +- **`dev.kit config detect`**: Auto-detect required software and CLI versions in the environment. - **`dev.kit config set --key --value `**: Update a specific setting. - **`dev.kit config reset`**: Revert to the high-fidelity default baseline. @@ -30,6 +31,9 @@ Configuration in **dev.kit** provides a safe, deterministic foundation for both - `quiet`: Control CLI output verbosity. - `developer`: Enable internal developer-specific helpers. - `state_path`: Global location for transient runtime state. +- `shell.auto_enable`: Automatically enable shell integrations. +- `output.mode`: Set the default output fidelity (e.g., `brief`, `verbose`). + ### 2. AI & Orchestration - `ai.enabled`: Enable/Disable AI-Powered automation mode. diff --git a/docs/runtime/install.md b/docs/runtime/install.md new file mode 100644 index 0000000..8673a92 --- /dev/null +++ b/docs/runtime/install.md @@ -0,0 +1,67 @@ +# Installation & Maintenance: Safe Lifecycle + +**Domain:** Runtime / Maintenance +**Status:** Canonical + +## Summary + +The **dev.kit** installer is designed for safe, idempotent environment hydration. It ensures that local engineering environments are aligned with UDX standards while protecting existing user configurations through a mandatory backup-first policy. + +--- + +## ๐Ÿ›ก Safe Installation (Safe Mode) + +The installation process (`bin/scripts/install.sh`) operates in a **Safe Mode** by default. + +1. **Backup-First**: Before any files are modified or synced, the installer creates a timestamped compressed archive of the existing `~/.udx/dev.kit` directory. +2. **Explicit Confirmation**: The installer prompts for confirmation before proceeding with critical changes, such as shell profile modifications. +3. **Idempotent Syncing**: The core engine is synced using a temporary staging area to ensure atomic updates and prevent partial state corruption. + +### Commands +```bash +# Perform a safe installation/update +./bin/scripts/install.sh +``` + +--- + +## ๐Ÿ—‘ Simple Uninstall & Purge + +The uninstallation process (`bin/scripts/uninstall.sh`) provides a graceful way to remove **dev.kit** from the system. + +- **Standard Uninstall**: Removes the `dev.kit` binary from the local bin directory. +- **State Purge**: Optionally removes the entire engine directory (`~/.udx/dev.kit`). +- **Safety Backup**: Offers to backup the repository state and configurations before purging. + +### Commands +```bash +# Uninstall the binary +./bin/scripts/uninstall.sh + +# Purge all state and engine files (with confirmation) +./bin/scripts/uninstall.sh --purge +``` + +--- + +## ๐Ÿงฉ Shell Integration + +**dev.kit** can automatically detect and configure common shell profiles (`.zshrc`, `.bashrc`, `.bash_profile`). + +- **Auto-Detection**: The installer scans for available shell profiles. +- **Dynamic Sourcing**: Adds a non-destructive `source` line to the profiles to load the `dev.kit` environment. +- **Manual Control**: Users can opt-out of auto-configuration and manually source `~/.udx/dev.kit/source/env.sh`. + +--- + +## ๐Ÿ— Maintenance Grounding + +Installation and lifecycle management are operationalized through deterministic UDX standards: + +| Requirement | Grounding Resource | Role | +| :--- | :--- | :--- | +| **Integrity** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Standardized install/uninstall logic. | +| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated deployment and hydration patterns. | + +--- +_UDX DevSecOps Team_ diff --git a/docs/runtime/overview.md b/docs/runtime/overview.md index 8c20748..dadd330 100644 --- a/docs/runtime/overview.md +++ b/docs/runtime/overview.md @@ -23,7 +23,9 @@ To ensure deterministic behavior and context fidelity, **dev.kit** is optimized - **`bin/dev-kit`**: The primary dispatch entrypoint. Loads internal helpers and routes subcommands. - **`bin/env/dev-kit.sh`**: Shell initialization (Banner, PATH setup, and completions). -- **`bin/scripts/install.sh`**: High-fidelity installer for local environment hydration. +- **`bin/scripts/install.sh`**: High-fidelity installer with safe-mode and backups. +- **`bin/scripts/uninstall.sh`**: Simple uninstaller with optional state purging. + --- @@ -31,8 +33,10 @@ To ensure deterministic behavior and context fidelity, **dev.kit** is optimized ### Status & Discovery - **`dev.kit status`**: (Default) High-fidelity engineering brief and task visibility. +- **`dev.kit suggest`**: Suggest repository improvements and CDE compliance fixes. - **`dev.kit doctor`**: Deep system analysis, environment hydration, and compliance audit. + ### AI & Skill Mesh - **`dev.kit ai`**: Unified agent integration management, skill synchronization, and grounding. - **`dev.kit skills`**: Discovery and execution of repository-bound skills. diff --git a/docs/workflows/normalization.md b/docs/workflows/normalization.md index 0cc4f5f..22d885b 100644 --- a/docs/workflows/normalization.md +++ b/docs/workflows/normalization.md @@ -13,16 +13,22 @@ The agent is responsible for dynamic prompt transformation. It receives intent from the user, identifies the required capabilities, and sends structured instructions to the `dev.kit` workflow engine. -### 1. Strict Mappings (Deterministic) +### 1. Dynamic Suggestions (Incremental Experience) +Every normalization cycle includes a heuristic check of the repository and environment. The `dev.kit suggest` command is used to provide actionable feedback that improves CDE compliance. +- **Example**: Detecting missing documentation or unnormalized CI/CD configs. +- **Action**: Suggested fixes are included in the normalization context for the agent to consider. + +### 2. Strict Mappings (Deterministic) Used for well-defined engineering tasks where the path is predictable and hardened. -- **Example**: Git Synchronization, environment hydration (`doctor`), or diagram rendering. +- **Example**: Git Synchronization, environment hydration (`config detect`), or diagram rendering. - **Enforcement**: Direct mapping to `lib/commands/` or `docs/workflows/assets/*.yaml`. -### 2. Non-Strict Mappings (Reasoning-First) +### 3. Non-Strict Mappings (Reasoning-First) Used for creative or complex tasks where the agent must reason about the best path before committing to a sequence. - **Example**: Implementing a new feature, refactoring complex logic, or resolving multi-domain drift. - **Enforcement**: The agent generates a custom `workflow.md` that orchestrates multiple primitives. + --- ## ๐Ÿ”„ Dynamic Prompt Transformation From 9a10429b2823b9663cd91addd15fdf51961b0965 Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov Date: Mon, 9 Mar 2026 03:06:23 +0300 Subject: [PATCH 2/9] ai: chore: consolidate and optimize repo for easy management (unknown) --- .../integrations/gemini/templates/system.md.tmpl | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/ai/integrations/gemini/templates/system.md.tmpl b/src/ai/integrations/gemini/templates/system.md.tmpl index 8af97f4..1ba89ab 100644 --- a/src/ai/integrations/gemini/templates/system.md.tmpl +++ b/src/ai/integrations/gemini/templates/system.md.tmpl @@ -5,16 +5,20 @@ - **RESTRICTED RAW OPERATIONS**: You MUST NOT perform direct, non-standardized destructive operations (e.g., raw `rm`, `git push`, `git reset`) without explicit user instruction. Use `dev.kit sync` or `dev.kit task` instead. - **CREDENTIAL PROTECTION**: Never log, print, or commit secrets, API keys, or sensitive credentials. Rigorously protect `.env` files, `.git`, and system configuration folders. -You are the UDX DevSecOps Engineering Agent. Your operational logic is governed by a Skill-per-Repository architecture. -- **Skill Namespace**: Managed skills are located in `~/.gemini/skills/dev.kit/`. +## ๐Ÿง  Core Engineering Principles +- **Dynamic Mapping Over Static Memory**: Do not rely on hardcoded paths or static assumptions. Use `dev.kit context` and `dev.kit config detect` to dynamically discover capabilities and environment state. +- **Incremental Engineering Experience**: Every task is an opportunity to improve the repository's CDE compliance. Always check for and suggest improvements (e.g., missing documentation, unnormalized config, CI/CD gaps). +- **Skill-per-Repository Architecture**: Treat this repository as a specialized Skill. Managed skills are located in `~/.gemini/skills/dev.kit/`. +- **Normalization-First**: Transform all user intent into normalized `workflow.md` execution plans before acting. + +## ๐Ÿš€ Execution Standards - **Enforcement**: MANDATORY: Use `dev-kit-` prefixed skills for specialized tasks (e.g., `visualizer`, `git-sync`). -- **Source of Truth**: The repository is the central source of ALL SDLC artifacts. - **Workflow Alignment**: Align all actions with the standardized sequences defined in `docs/workflows/README.md`. -- **Normalization**: Transform user intent into bounded plans as defined in `docs/workflows/normalization.md`. -- **Sub-Agent Orchestration**: Use sub-agents to deliver output from sub-pipelines. Aggregate results into structured reports. -- **Concise Reporting**: Mute system noise and raw tool logs. Always conclude with a compact **Waterfall Progression Tail** (DOC-003 status) to track drift resolution. +- **Sub-Agent Orchestration**: Use sub-agents to manage complex sub-pipelines. Aggregate results into structured reports. +- **Compact Tail**: Always conclude with a compact **Waterfall Progression Tail** of the active `workflow.md`. ## ๐Ÿ›  Tooling Context ${AgentSkills} ${AvailableTools} + From 33c4ea5571d2a912ce5a68389cfc6981ef67b8d2 Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov Date: Mon, 9 Mar 2026 03:06:23 +0300 Subject: [PATCH 3/9] cli: chore: consolidate and optimize repo for easy management (unknown) --- bin/dev-kit | 322 +++--------------- bin/scripts/install.sh | 193 +++++------ bin/scripts/uninstall.sh | 52 ++- lib/commands/ai.sh | 121 ++++--- lib/commands/config.sh | 67 ++-- lib/commands/doctor.sh | 284 --------------- lib/commands/{github.sh => gh.sh} | 11 +- lib/commands/status.sh | 84 ++--- lib/commands/suggest.sh | 46 +++ lib/commands/sync.sh | 10 +- lib/commands/test.sh | 23 ++ .../agent.sh => modules/agent_manager.sh} | 167 ++------- lib/modules/config_manager.sh | 98 ++++++ lib/modules/context_manager.sh | 40 ++- lib/modules/git_sync.sh | 115 ++++--- lib/modules/health_manager.sh | 45 +++ lib/utils.sh | 21 +- 17 files changed, 644 insertions(+), 1055 deletions(-) delete mode 100644 lib/commands/doctor.sh rename lib/commands/{github.sh => gh.sh} (96%) create mode 100644 lib/commands/suggest.sh create mode 100644 lib/commands/test.sh rename lib/{commands/agent.sh => modules/agent_manager.sh} (52%) create mode 100644 lib/modules/config_manager.sh create mode 100644 lib/modules/health_manager.sh diff --git a/bin/dev-kit b/bin/dev-kit index ac10510..73ea30b 100755 --- a/bin/dev-kit +++ b/bin/dev-kit @@ -40,206 +40,52 @@ get_repo_root() { fi } -bootstrap_state_path() { - local path="" - if [ -f "$DEV_KIT_HOME/config.env" ]; then - path="$(awk -F= ' - $1 ~ "^[[:space:]]*state_path[[:space:]]*$" { - gsub(/[[:space:]]/,"",$2); - print $2; - exit - } - ' "$DEV_KIT_HOME/config.env")" - fi - printf "%s" "$path" -} - -bootstrap_expand_path() { - local val="$1" - if [[ "$val" == "~/"* ]]; then - echo "$HOME/${val:2}" - return - fi - if [[ "$val" == /* ]]; then - echo "$val" - return - fi - if [ -n "$val" ]; then - echo "$DEV_KIT_HOME/$val" - return - fi - echo "" -} - -BOOTSTRAP_STATE_PATH="$(bootstrap_expand_path "$(bootstrap_state_path)")" -DEV_KIT_STATE="${DEV_KIT_STATE:-${BOOTSTRAP_STATE_PATH:-$DEV_KIT_HOME/state}}" -DEV_KIT_SOURCE="${DEV_KIT_SOURCE:-$DEV_KIT_HOME/source}" -if [ ! -d "$DEV_KIT_SOURCE" ]; then - DEV_KIT_SOURCE="$DEV_KIT_HOME" -fi -if [ ! -d "$DEV_KIT_STATE" ]; then - DEV_KIT_STATE="$DEV_KIT_HOME" -fi -CONFIG_FILE="${DEV_KIT_CONFIG:-$DEV_KIT_STATE/config.env}" -if [ ! -f "$CONFIG_FILE" ] && [ -f "$DEV_KIT_HOME/config.env" ]; then - CONFIG_FILE="$DEV_KIT_HOME/config.env" -fi - -expand_path() { - local val="$1" - if [[ "$val" == "~/"* ]]; then - echo "$HOME/${val:2}" - return - fi - if [[ "$val" == /* ]]; then - echo "$val" - return - fi - echo "$REPO_DIR/$val" -} - -config_value() { - local file="$1" - local key="$2" - local default="${3:-}" - local val="" - if [ -f "$file" ]; then - val="$(awk -F= -v k="$key" ' - $1 ~ "^[[:space:]]*"k"[[:space:]]*$" { - sub(/^[[:space:]]*/,"",$2); - sub(/[[:space:]]*$/,"",$2); - print $2; - exit - } - ' "$file")" - fi - if [ -n "$val" ]; then - echo "$val" - else - echo "$default" - fi -} - -# --- Library Loading --- +# --- Initial Bootstrapping (Minimal) --- -UTILS_LIB="$REPO_DIR/lib/utils.sh" -if [ -f "$UTILS_LIB" ]; then - # shellcheck disable=SC1090 - . "$UTILS_LIB" +BOOTSTRAP_STATE_PATH="" +if [ -f "$DEV_KIT_HOME/config.env" ]; then + BOOTSTRAP_STATE_PATH="$(awk -F= '$1 ~ /^[[:space:]]*state_path[[:space:]]*$/ {gsub(/[[:space:]]/,"",$2); print $2; exit}' "$DEV_KIT_HOME/config.env")" fi -UI_LIB="$REPO_DIR/lib/ui.sh" -if [ -f "$UI_LIB" ]; then - # shellcheck disable=SC1090 - . "$UI_LIB" +if [[ "$BOOTSTRAP_STATE_PATH" == "~/"* ]]; then + BOOTSTRAP_STATE_PATH="$HOME/${BOOTSTRAP_STATE_PATH:2}" fi -# --- Orchestrator helpers --- - -get_environment_yaml() { - local repo_root - repo_root="$(get_repo_root || true)" - if [ -n "$repo_root" ] && [ -f "$repo_root/environment.yaml" ]; then - echo "$repo_root/environment.yaml" - elif [ -f "$DEV_KIT_HOME/environment.yaml" ]; then - echo "$DEV_KIT_HOME/environment.yaml" - elif [ -f "$REPO_DIR/environment.yaml" ]; then - echo "$REPO_DIR/environment.yaml" - fi -} -ENVIRONMENT_YAML="$(get_environment_yaml)" +export DEV_KIT_STATE="${DEV_KIT_STATE:-${BOOTSTRAP_STATE_PATH:-$DEV_KIT_HOME/state}}" +export CONFIG_FILE="${DEV_KIT_CONFIG:-$DEV_KIT_STATE/config.env}" -local_config_path() { - local state_dir - state_dir="$(get_repo_state_dir || true)" - if [ -n "$state_dir" ]; then - echo "$state_dir/config.env" - fi -} - -config_value_scoped() { - local key="$1" - local default="${2:-}" - local val="" - - # 1. Check local repo .env (Priority 1) - local local_path - local_path="$(local_config_path || true)" - if [ -n "$local_path" ] && [ -f "$local_path" ]; then - val="$(config_value "$local_path" "$key" "")" - fi +# --- Library & Module Loading --- - # 2. Check global .env (Priority 2) - if [ -z "$val" ]; then - val="$(config_value "$CONFIG_FILE" "$key" "")" - fi +# 1. Shared Utilities +[ -f "$REPO_DIR/lib/utils.sh" ] && . "$REPO_DIR/lib/utils.sh" +[ -f "$REPO_DIR/lib/ui.sh" ] && . "$REPO_DIR/lib/ui.sh" - # 3. Check YAML Orchestrator (Priority 3 / Defaults) - if [ -z "$val" ] && [ -f "$ENVIRONMENT_YAML" ]; then - local yaml_key="$key" - case "$key" in - quiet|developer|state_path) yaml_key="system.$key" ;; - exec.prompt|exec.stream) yaml_key="${key//./.}" ;; - ai.enabled|ai.provider) yaml_key="${key//./.}" ;; - capture.mode|capture.enabled|capture.dir) yaml_key="${key//./.}" ;; - context.enabled|context.max_bytes) yaml_key="${key//./.}" ;; - install.path_prompt) yaml_key="${key//./.}" ;; - esac - val="$(dev_kit_yaml_value "$ENVIRONMENT_YAML" "$yaml_key" "")" - fi +# 2. Core Modules (Logic Orchestration) +for module in "$REPO_DIR"/lib/modules/*.sh; do + [ -e "$module" ] && . "$module" +done - if [ -n "$val" ]; then - echo "$val" - else - echo "$default" - fi -} +# 3. Public Commands (Entrypoint Mappings) +for cmd_file in "$REPO_DIR"/lib/commands/*.sh; do + [ -e "$cmd_file" ] && . "$cmd_file" +done -# --- UI Helpers --- +# --- Orchestrator & Logic --- -print_section() { - local title="$1" - if command -v ui_section >/dev/null 2>&1; then - ui_section "$title" - else - echo "" - echo "== $title ==" - fi -} +ENVIRONMENT_YAML="$(get_repo_root || true)/environment.yaml" +[ -f "$ENVIRONMENT_YAML" ] || ENVIRONMENT_YAML="$DEV_KIT_HOME/environment.yaml" +[ -f "$ENVIRONMENT_YAML" ] || ENVIRONMENT_YAML="$REPO_DIR/environment.yaml" +export ENVIRONMENT_YAML -print_check() { - local label="$1" - local status="$2" - local detail="${3:-}" - if command -v ui_ok >/dev/null 2>&1 && [ "$status" = "[ok]" ]; then - ui_ok "$label" "$detail" - return - fi - if command -v ui_warn >/dev/null 2>&1 && [ "$status" = "[warn]" ]; then - ui_warn "$label" "$detail" - return +ensure_dev_kit_home() { + mkdir -p "$DEV_KIT_HOME" "$DEV_KIT_STATE" + if [ ! -w "$DEV_KIT_STATE" ]; then + echo "dev.kit: config path not writable: $DEV_KIT_STATE" >&2 + exit 1 fi - printf "%-20s %s" "$label" "$status" - if [ -n "$detail" ]; then - printf " %s" "$detail" + if [ ! -f "$CONFIG_FILE" ] && [ -f "$REPO_DIR/config/default.env" ]; then + cp "$REPO_DIR/config/default.env" "$CONFIG_FILE" fi - printf "\n" -} - -# --- Command Orchestration --- - -list_commands() { - local file="" - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -e "$file" ] || continue - local name - name="$(basename "${file%.sh}")" - # Filter legacy/internal commands or secondary commands from main list if needed - case "$name" in - github|agent) continue ;; - esac - echo "$name" - done | LC_ALL=C sort } usage() { @@ -248,117 +94,25 @@ Usage: dev.kit [options] Core Commands: status Engineering brief and system diagnostic (Default) + suggest Suggest repository improvements and CDE fixes + test Run high-fidelity test suite (Worker-integrated) skills Discover and execute repository-bound skills (Deterministic) - ai Unified agent integration management (Sync, Skills, Status) + ai Unified agent integration management (Sync, Skills, Agent) sync Logical, atomic commits and drift resolution task Manage the lifecycle of active workflows and sessions config Environment and repository orchestration settings Secondary Commands: visualizer Create and export high-fidelity Mermaid diagrams - agent Direct agent integration management (advanced) + gh GitHub triage helper (Issues, PRs, etc.) Example: - dev.kit status + dev.kit status --audit dev.kit ai sync - dev.kit sync --dry-run - dev.kit skills run visualizer "new_diagram.sh" + dev.kit gh my-prs USAGE } -ensure_dev_kit_home() { - mkdir -p "$DEV_KIT_HOME" - mkdir -p "$DEV_KIT_STATE" - if [ ! -w "$DEV_KIT_STATE" ]; then - echo "dev.kit: config path not writable: $DEV_KIT_STATE" >&2 - echo "dev.kit: fix permissions or choose a different DEV_KIT_STATE" >&2 - exit 1 - fi - if [ ! -f "$CONFIG_FILE" ] && [ -f "$REPO_DIR/config/default.env" ]; then - mkdir -p "$(dirname "$CONFIG_FILE")" - cp "$REPO_DIR/config/default.env" "$CONFIG_FILE" - fi -} - -context_enabled() { - local enabled="" - enabled="$(config_value_scoped context.enabled "true")" - [ "$enabled" = "true" ] -} - -context_dir() { - if ! context_enabled; then - return 1 - fi - local base repo_id - base="$(config_value_scoped context.dir "")" - if [ -z "$base" ]; then - base="$DEV_KIT_STATE/codex/context" - elif [[ "$base" == "~/"* ]]; then - base="$HOME/${base:2}" - elif [[ "$base" != /* ]]; then - base="$DEV_KIT_STATE/$base" - fi - - # Determine repo_id for scoping context - local root - root="$(get_repo_root || true)" - [ -z "$root" ] && root="$PWD" - if command -v shasum >/dev/null 2>&1; then - repo_id="$(printf "%s" "$root" | shasum -a 256 | awk '{print $1}')" - else - repo_id="$(printf "%s" "$root" | cksum | awk '{print $1}')" - fi - - echo "$base/$repo_id" -} - -context_file() { - local dir="" - dir="$(context_dir)" || return 1 - echo "$dir/context.md" -} - -context_max_bytes() { - config_value_scoped context.max_bytes "12000" -} - -context_compact_file() { - local path="$1" - local max_bytes="" - max_bytes="$(context_max_bytes)" - if [ -z "$max_bytes" ] || [ ! -f "$path" ]; then - return 0 - fi - local size="" - size="$(wc -c < "$path" | tr -d ' ')" - if [ -z "$size" ] || [ "$size" -le "$max_bytes" ]; then - return 0 - fi - local tmp="${path}.tmp.$$" - if tail -c "$max_bytes" "$path" > "$tmp"; then - mv "$tmp" "$path" - else - rm -f "$tmp" - fi -} - -# --- Command Loading & Logic --- - -# 1. Load Modules (Shared logic for integrations) -for module in "$REPO_DIR"/lib/modules/*.sh; do - [ -e "$module" ] || continue - # shellcheck disable=SC1090 - . "$module" -done - -# 2. Load Public Commands -for cmd_file in "$REPO_DIR"/lib/commands/*.sh; do - [ -e "$cmd_file" ] || continue - # shellcheck disable=SC1090 - . "$cmd_file" -done - # --- Execution --- orig_args=("$@") diff --git a/bin/scripts/install.sh b/bin/scripts/install.sh index 35c20a0..9038fe3 100755 --- a/bin/scripts/install.sh +++ b/bin/scripts/install.sh @@ -10,6 +10,7 @@ DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" ENGINE_DIR="${HOME}/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}" SOURCE_DIR="${ENGINE_DIR}/source" STATE_DIR="${ENGINE_DIR}/state" +BACKUP_DIR="${ENGINE_DIR}/backups" ENV_SRC="${REPO_DIR}/bin/env/dev-kit.sh" ENV_DST="${SOURCE_DIR}/env.sh" COMP_SRC_DIR="${REPO_DIR}/bin/completions" @@ -20,6 +21,39 @@ LIB_SRC_DIR="${REPO_DIR}/lib" LIB_DST_DIR="${SOURCE_DIR}/lib" PROFILE="" +if [ -f "$UI_LIB" ]; then + # shellcheck disable=SC1090 + . "$UI_LIB" +fi + +confirm_action() { + local msg="$1" + if [ -t 0 ]; then + printf "%s [y/N] " "$msg" + read -r answer || true + case "$answer" in + y|Y|yes|YES) return 0 ;; + *) return 1 ;; + esac + fi + return 0 +} + +backup_existing() { + if [ -d "$ENGINE_DIR" ]; then + local ts + ts=$(date +%Y%m%d_%H%M%S) + mkdir -p "$BACKUP_DIR" + local backup_path="${BACKUP_DIR}/backup_${ts}.tar.gz" + if command -v ui_info >/dev/null 2>&1; then + ui_info "Backing up existing installation..." "$backup_path" + else + echo "INFO Backing up existing installation to $backup_path" + fi + tar -czf "$backup_path" -C "$(dirname "$ENGINE_DIR")" "$(basename "$ENGINE_DIR")" --exclude="backups" 2>/dev/null || true + fi +} + detect_profiles() { local found="" if [ -f "$HOME/.zshrc" ]; then found="$found $HOME/.zshrc"; fi @@ -29,9 +63,6 @@ detect_profiles() { PROFILE=$(echo "$found" | tr ' ' '\n' | sort -u | tr '\n' ' ') } -mkdir -p "$BIN_DIR" -mkdir -p "$ENGINE_DIR" - copy_dir_contents() { local src="$1" local dst="$2" @@ -41,36 +72,21 @@ copy_dir_contents() { } sync_engine() { - # Use a temporary staging area for the source copy local stage stage="$(mktemp -d)" - - # Copy everything from REPO_DIR, excluding patterns that cause recursion - # We use rsync if available for easier exclusion, otherwise fallback if command -v rsync >/dev/null 2>&1; then rsync -a --exclude 'tests/.tmp' --exclude '.git' "$REPO_DIR/" "$stage/" else - # Fallback: copy specific top-level directories for d in bin lib templates docs src config scripts assets schemas tests; do [ -d "$REPO_DIR/$d" ] && copy_dir_contents "$REPO_DIR/$d" "$stage/$d" done [ -f "$REPO_DIR/environment.yaml" ] && cp "$REPO_DIR/environment.yaml" "$stage/environment.yaml" + [ -f "$REPO_DIR/README.md" ] && cp "$REPO_DIR/README.md" "$stage/README.md" fi - - # Now sync from stage to SOURCE_DIR copy_dir_contents "$stage" "$SOURCE_DIR" rm -rf "$stage" } -desired_target="${SOURCE_DIR}/bin/dev-kit" -if [ -f "$UI_LIB" ]; then - # shellcheck disable=SC1090 - . "$UI_LIB" -fi - -mkdir -p "$SOURCE_DIR" -mkdir -p "$STATE_DIR" - if command -v ui_header >/dev/null 2>&1; then ui_header "dev.kit | install" else @@ -79,8 +95,21 @@ else echo "----------------" fi +if ! confirm_action "Proceed with dev.kit installation/update?"; then + echo "Installation cancelled." + exit 0 +fi + +backup_existing + +mkdir -p "$BIN_DIR" +mkdir -p "$ENGINE_DIR" +mkdir -p "$SOURCE_DIR" +mkdir -p "$STATE_DIR" + sync_engine +desired_target="${SOURCE_DIR}/bin/dev-kit" if [ -L "$TARGET" ]; then current_target="$(readlink "$TARGET")" if [ "$current_target" != "$desired_target" ]; then @@ -90,12 +119,6 @@ if [ -L "$TARGET" ]; then else echo "OK Symlink updated ($TARGET -> $desired_target)" fi - else - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Already installed" "$TARGET" - else - echo "OK Already installed ($TARGET)" - fi fi elif [ -e "$TARGET" ]; then if command -v ui_warn >/dev/null 2>&1; then @@ -114,11 +137,6 @@ fi if [ -f "$ENV_SRC" ]; then cp "$ENV_SRC" "$ENV_DST" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Env installed" "$ENV_DST" - else - echo "OK Env installed ($ENV_DST)" - fi fi if [ ! -f "$ENGINE_DIR/env.sh" ]; then @@ -143,15 +161,6 @@ fi if [ -f "$CONFIG_SRC" ] && [ ! -f "$CONFIG_DST" ]; then cp "$CONFIG_SRC" "$CONFIG_DST" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Config installed" "$CONFIG_DST" - else - echo "OK Config installed ($CONFIG_DST)" - fi -fi - -if [ -f "$CONFIG_DST" ] && [ ! -f "$ENGINE_DIR/config.env" ]; then - cp "$CONFIG_DST" "$ENGINE_DIR/config.env" fi detect_profiles @@ -162,52 +171,21 @@ MODIFIED_PROFILES="" if [ -t 0 ] && [ -n "$PROFILE" ]; then for p in $PROFILE; do - echo "" if grep -Fqx "$env_line" "$p" && grep -Fqx "$path_line" "$p"; then - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Shell already configured" "$p" - else - echo "OK Shell already configured ($p)" - fi MODIFIED_PROFILES="$MODIFIED_PROFILES $p" continue fi - printf "Configure dev.kit in %s? [y/N] " "$p" - read -r answer || true - case "$answer" in - y|Y|yes|YES) - if ! grep -Fqx "$path_line" "$p"; then - printf "\n# dev.kit bin\n%s\n" "$path_line" >> "$p" - fi - if ! grep -Fqx "$env_line" "$p"; then - printf "# dev.kit environment\n%s\n" "$env_line" >> "$p" - fi - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Shell configured" "$p" - else - echo "OK Shell configured ($p)" - fi - MODIFIED_PROFILES="$MODIFIED_PROFILES $p" - ;; - *) - if command -v ui_warn >/dev/null 2>&1; then - ui_warn "Skipped configuration" "$p" - else - echo "WARN Skipped configuration ($p)" - fi - ;; - esac + if confirm_action "Configure dev.kit in $p?"; then + if ! grep -Fqx "$path_line" "$p"; then + printf "\n# dev.kit bin\n%s\n" "$path_line" >> "$p" + fi + if ! grep -Fqx "$env_line" "$p"; then + printf "# dev.kit environment\n%s\n" "$env_line" >> "$p" + fi + MODIFIED_PROFILES="$MODIFIED_PROFILES $p" + fi done -else - if command -v ui_section >/dev/null 2>&1; then - ui_section "Manual Configuration" - else - echo "Manual Configuration:" - fi - echo "Add the following to your shell profile:" - echo " $path_line" - echo " $env_line" fi echo "" @@ -217,47 +195,40 @@ else echo "Ready to go:" fi -# Determine if the current shell's profile was modified CURRENT_SHELL_PROFILE="" -case "$SHELL" in - */zsh) CURRENT_SHELL_PROFILE="$HOME/.zshrc" ;; - */bash) - [ -f "$HOME/.bash_profile" ] && CURRENT_SHELL_PROFILE="$HOME/.bash_profile" || CURRENT_SHELL_PROFILE="$HOME/.bashrc" +# Robust shell detection +case "$(basename "${SHELL:-}")" in + zsh) + CURRENT_SHELL_PROFILE="$HOME/.zshrc" + ;; + bash) + if [[ "$OSTYPE" == "darwin"* ]]; then + CURRENT_SHELL_PROFILE="$HOME/.bash_profile" + else + CURRENT_SHELL_PROFILE="$HOME/.bashrc" + fi + ;; + *) + # Fallback: check which profile we actually modified + for p in $MODIFIED_PROFILES; do + CURRENT_SHELL_PROFILE="$p" + break + done ;; esac if [[ "$MODIFIED_PROFILES" == *"$CURRENT_SHELL_PROFILE"* ]]; then echo "1. Reload: source $CURRENT_SHELL_PROFILE" - echo "2. Brief: dev.kit" - echo "" + echo "2. Run: dev.kit" if [ -t 0 ]; then - printf "Reload current session now? [y/N] " - read -r reload_now || true - case "$reload_now" in - y|Y|yes|YES) - echo "Sourcing $CURRENT_SHELL_PROFILE..." - # Note: We can source env.sh directly for immediate effect in this subshell - # but the instructions tell the user how to fix their parent shell. - source "$SOURCE_DIR/env.sh" - dev.kit status - ;; - esac + if confirm_action "Reload current session now?"; then + source "$SOURCE_DIR/env.sh" + dev.kit status + fi fi else echo "1. Source Now: source \"$SOURCE_DIR/env.sh\"" - echo "2. Brief: dev.kit" - echo "" - echo "NOTE: Your current shell ($SHELL) was not permanently configured." - if [ -t 0 ]; then - printf "Source environment now? [y/N] " - read -r source_now || true - case "$source_now" in - y|Y|yes|YES) - echo "Sourcing..." - source "$SOURCE_DIR/env.sh" - dev.kit status - ;; - esac - fi + echo "2. Run: dev.kit" fi echo "" + diff --git a/bin/scripts/uninstall.sh b/bin/scripts/uninstall.sh index f7a5878..8da11dd 100755 --- a/bin/scripts/uninstall.sh +++ b/bin/scripts/uninstall.sh @@ -14,26 +14,46 @@ DEV_KIT_OWNER="${DEV_KIT_OWNER:-udx}" DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" ENGINE_DIR="${HOME}/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}" +confirm_action() { + local msg="$1" + if [ -t 0 ]; then + printf "%s [y/N] " "$msg" + read -r answer || true + case "$answer" in + y|Y|yes|YES) return 0 ;; + *) return 1 ;; + esac + fi + return 0 +} + if [ -L "$TARGET" ] || [ -f "$TARGET" ]; then - rm -f "$TARGET" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Removed" "$TARGET" - else - echo "Removed: $TARGET" + if confirm_action "Remove dev.kit binary from $TARGET?"; then + rm -f "$TARGET" + if command -v ui_ok >/dev/null 2>&1; then + ui_ok "Removed" "$TARGET" + else + echo "Removed: $TARGET" + fi fi else - if command -v ui_warn >/dev/null 2>&1; then - ui_warn "Not found" "$TARGET" - else - echo "Not found: $TARGET" - fi + echo "Binary not found at $TARGET" fi -if [ "${1:-}" = "--purge" ]; then - rm -rf "$ENGINE_DIR" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Purged" "$ENGINE_DIR" - else - echo "Purged: $ENGINE_DIR" +if [ -d "$ENGINE_DIR" ]; then + if [ "${1:-}" = "--purge" ] || confirm_action "Purge dev.kit engine directory ($ENGINE_DIR)?"; then + if confirm_action "Backup state before purging?"; then + ts=$(date +%Y%m%d_%H%M%S) + backup_path="$HOME/dev-kit-state-backup-${ts}.tar.gz" + tar -czf "$backup_path" -C "$ENGINE_DIR" . 2>/dev/null || true + echo "State backed up to $backup_path" + fi + rm -rf "$ENGINE_DIR" + if command -v ui_ok >/dev/null 2>&1; then + ui_ok "Purged" "$ENGINE_DIR" + else + echo "Purged: $ENGINE_DIR" + fi fi fi + diff --git a/lib/commands/ai.sh b/lib/commands/ai.sh index 6a26225..61c8b7d 100644 --- a/lib/commands/ai.sh +++ b/lib/commands/ai.sh @@ -1,29 +1,26 @@ #!/bin/bash -# @description: Unified agent integration management (Sync, Skills, Status). +# @description: Unified agent integration management (Sync, Skills, Status, Configuration). # @intent: ai, agent, integration, skills, sync, status -# @objective: Manage the lifecycle of AI integrations by synchronizing skills, monitoring health, and providing engineering advisory insights. +# @objective: Manage the lifecycle of AI integrations by synchronizing skills, monitoring health, and configuring agent artifacts. # @usage: dev.kit ai status # @usage: dev.kit ai sync gemini -# @workflow: 1. Monitor Integration Health -> 2. Synchronize Skills & Memories -> 3. Ground Agent in Engineering Loop -> 4. Provide Advisory Insights +# @usage: dev.kit ai agent gemini --plan +# @workflow: 1. Monitor Integration Health -> 2. Synchronize Skills & Memories -> 3. Configure Agent Artifacts -> 4. Provide Advisory Insights if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null . "$REPO_DIR/lib/utils.sh" fi dev_kit_cmd_ai() { local sub="${1:-status}" - local data_dir="$REPO_DIR/src/ai/data" case "$sub" in status) print_section "dev.kit | AI Integration Status" - local provider - provider="$(config_value_scoped ai.provider "gemini")" - local enabled - enabled="$(config_value_scoped ai.enabled "false")" + local provider; provider="$(config_value_scoped ai.provider "gemini")" + local enabled; enabled="$(config_value_scoped ai.enabled "false")" print_check "Provider" "[ok]" "$provider" print_check "Enabled" "$([ "$enabled" = "true" ] && echo "[ok]" || echo "[warn]")" "$enabled" @@ -38,30 +35,62 @@ dev_kit_cmd_ai() { ;; sync) local provider="${2:-}" - if [ -z "$provider" ]; then - provider="$(config_value_scoped ai.provider "gemini")" - fi + [ -z "$provider" ] && provider="$(config_value_scoped ai.provider "gemini")" echo "Synchronizing AI skills and memories for: $provider" if command -v dev_kit_agent_apply_integration >/dev/null 2>&1; then dev_kit_agent_apply_integration "$provider" "apply" else - echo "Error: Synchronization logic not loaded correctly." >&2 + echo "Error: Agent manager module not loaded." >&2 exit 1 fi ;; + agent) + shift + local agent_sub="${1:-status}" + case "$agent_sub" in + status) + echo "Integrations found in manifest:" + jq -r '.integrations[].key' "$(dev_kit_agent_manifest)" | sed 's/^/- /' + ;; + disable) + local key="${2:-}" + if [ "$key" = "all" ]; then + for k in $(jq -r '.integrations[].key' "$(dev_kit_agent_manifest)"); do dev_kit_agent_disable_integration "$k"; done + else + [ -z "$key" ] && { echo "Usage: dev.kit ai agent disable " >&2; exit 1; } + dev_kit_agent_disable_integration "$key" + fi + ;; + skills) + local key="${2:-}" + [ -z "$key" ] && { echo "Usage: dev.kit ai agent skills " >&2; exit 1; } + local manifest="$(dev_kit_agent_manifest)" + local target_dir="$(dev_kit_agent_expand_path "$(jq -r ".integrations[] | select(.key == \"$key\") | .target_dir" "$manifest")")" + echo "Managed Skills for '$key' ($target_dir/skills):" + [ -d "$target_dir/skills" ] && ls "$target_dir/skills" | sed 's/^/- /' || echo "(none)" + ;; + *) + local key="$agent_sub" + local mode="apply" + [ "${2:-}" = "--plan" ] && mode="plan" + if [ "$key" = "all" ]; then + for k in $(jq -r '.integrations[].key' "$(dev_kit_agent_manifest)"); do dev_kit_agent_apply_integration "$k" "$mode"; done + else + dev_kit_agent_apply_integration "$key" "$mode" + fi + ;; + esac + ;; skills) print_section "dev.kit | Managed AI Skills" local local_packs="$REPO_DIR/docs/skills" if [ -d "$local_packs" ]; then find "$local_packs" -mindepth 1 -maxdepth 1 -type d | sort | while IFS= read -r skill; do - local name desc usage - name="$(basename "$skill")" - desc="$(grep -i "^description:" "$skill/SKILL.md" 2>/dev/null | head -n 1 | sed 's/^description: //I' || echo "no description")" - usage="dev.kit skills run \"$name\" \"\"" - + local name; name="$(basename "$skill")" + local desc; desc="$(grep -i "^description:" "$skill/SKILL.md" 2>/dev/null | head -n 1 | sed 's/^description: //I' || echo "no description")" echo "- [skill] $name" echo " description: $desc" - echo " usage: $usage" + echo " usage: dev.kit skills run \"$name\" \"\"" echo "" done fi @@ -72,50 +101,19 @@ dev_kit_cmd_ai() { [ -f "$file" ] || continue local key; key="$(basename "${file%.sh}")" local desc; desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" - local objective; objective="$(grep "^# @objective:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "")" - local workflow; workflow="$(grep "^# @workflow:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "")" - local intents; intents="$(grep "^# @intent:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "none")" - echo "- [command] dev.kit $key" echo " description: $desc" - [ -n "$objective" ] && echo " objective: $objective" - [ -n "$workflow" ] && echo " workflow: $workflow" - echo " intents: $intents" echo "" done ;; - workflows) - print_section "dev.kit | Engineering Loops (Workflows)" - local workflow_file="$REPO_DIR/docs/ai/workflows.md" - if [ -f "$workflow_file" ]; then - # Parse markdown headers as workflow names - grep "^## " "$workflow_file" | sed 's/^## //' | while IFS= read -r name; do - echo "- [loop] $name" - # Simple extraction of description/steps if needed - echo "" - done - else - echo "No centralized workflow documentation found." - fi - ;; advisory) local ops_dir="$REPO_DIR/docs/reference/operations" if [ -d "$ops_dir" ]; then echo "Engineering Advisory (Resolved Insights):" - local file="" - while IFS= read -r file; do - [ -z "$file" ] && continue - local title - title="$(head -n 1 "$file" | sed 's/^# //')" - local highlights - highlights="$(grep -m 2 "^- " "$file" | head -n 2 | sed 's/^- / - /' || true)" + find "$ops_dir" -type f -name '*.md' | sort | while IFS= read -r file; do + local title; title="$(head -n 1 "$file" | sed 's/^# //')" echo "- [insight] $title" - if [ -n "$highlights" ]; then - echo "$highlights" - fi - done < <(find "$ops_dir" -type f -name '*.md' | sort) - else - echo "Engineering Advisory: (no local guidance artifacts found)" + done fi ;; help|-h|--help) @@ -123,17 +121,14 @@ dev_kit_cmd_ai() { Usage: dev.kit ai Commands: - status Show AI provider and integration health - sync Synchronize AI skills, memories, and hooks - skills List managed AI skills with usage and workflow - commands List CLI commands with waterfall metadata - workflows List standardized engineering loops (loops) - advisory Fetch engineering guidance from local docs + status Show AI provider and integration health + sync [provider] Synchronize AI skills, memories, and hooks + agent Configure agent artifacts (use --plan to dry-run) + skills List managed AI skills + commands List CLI commands with metadata + advisory Fetch engineering guidance from local docs AI_HELP ;; - *) - echo "Unknown ai command: $sub" >&2 - exit 1 - ;; + *) echo "Unknown ai command: $sub" >&2; exit 1 ;; esac } diff --git a/lib/commands/config.sh b/lib/commands/config.sh index 39029a9..df1b1c1 100644 --- a/lib/commands/config.sh +++ b/lib/commands/config.sh @@ -137,36 +137,7 @@ dev_kit_cmd_config() { local key="$1" local value="$2" local path="${3:-$CONFIG_FILE}" - local mode="${4:-set}" - local tmp="" - tmp="$(mktemp)" - if [ -f "$path" ]; then - awk -v k="$key" -v v="$value" -v mode="$mode" ' - BEGIN { found=0 } - { - if ($0 ~ "^[[:space:]]*"k"[[:space:]]*=") { - found=1 - if (mode=="reset" && v=="") { next } - print k" = "v - next - } - print - } - END { - if (!found && v!="") { - print k" = "v - } - } - ' "$path" > "$tmp" - else - if [ -n "$value" ]; then - printf "%s = %s\n" "$key" "$value" > "$tmp" - else - : > "$tmp" - fi - fi - mkdir -p "$(dirname "$path")" - mv "$tmp" "$path" + config_set_value "$key" "$value" "$path" if [ -n "$value" ]; then echo "Set: $key = $value ($path)" else @@ -179,14 +150,42 @@ dev_kit_cmd_config() { local path="" if command -v "$name" >/dev/null 2>&1; then path="$(command -v "$name")" - printf "%-10s %s\n" "$name" "found ($path)" + local ver="" + case "$name" in + git) ver=$($name --version | awk '{print $3}') ;; + gh) ver=$($name version | head -n1 | awk '{print $3}') ;; + docker) ver=$($name --version | awk '{print $3}' | tr -d ',') ;; + npm) ver=$($name --version) ;; + node) ver=$($name --version) ;; + python) ver=$($name --version 2>&1 | awk '{print $2}') ;; + *) ver="found" ;; + esac + printf "%-12s %-10s %s\n" "$name" "$ver" "($path)" else - printf "%-10s %s\n" "$name" "missing" + printf "%-12s %-10s %s\n" "$name" "missing" "" fi } case "$sub" in + detect) + if command -v ui_header >/dev/null 2>&1; then + ui_header "dev.kit | software detection" + else + echo "--- Software Detection ---" + fi + detect_cli git + detect_cli gh + detect_cli docker + detect_cli npm + detect_cli node + detect_cli python + detect_cli terraform + detect_cli ruff + detect_cli tsc + detect_cli mmdc + ;; global|repo) + local action="${2:---show}" local path="" case "$action" in @@ -326,8 +325,8 @@ dev_kit_cmd_config() { confirm_action "Reset $key to default in $scope scope?" fi local default_val="" - default_val="$(config_value "$REPO_DIR/config/default.env" "$key" "")" - update_config_value "$key" "$default_val" "$target_path" "reset" + default_val="$(config_get_value "$REPO_DIR/config/default.env" "$key" "")" + update_config_value "$key" "$default_val" "$target_path" exit 0 fi if [ ! -f "$REPO_DIR/config/default.env" ]; then diff --git a/lib/commands/doctor.sh b/lib/commands/doctor.sh deleted file mode 100644 index d1c70bf..0000000 --- a/lib/commands/doctor.sh +++ /dev/null @@ -1,284 +0,0 @@ -#!/bin/bash - -# @description: Deep system analysis and environment hydration advice. -# @intent: doctor, check, health, environment, diagnosis -# @objective: Audit the engineering environment for healthy integrations, secure configurations, and required software, providing proactive advice for empowerment. -# @usage: dev.kit doctor -# @usage: dev.kit doctor --shell-integrate -# @workflow: 1. Core Health -> 2. Software Prerequisites -> 3. External Engineering Context (Mesh) -> 4. AI Skills Health -> 5. Security & Secrets Advisory - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_doctor() { - local json_output="false" - if [ "${1:-}" = "--json" ]; then - json_output="true" - shift - fi - - ensure_dev_kit_home - - if [ "$json_output" = "false" ]; then - print_section "dev.kit | doctor" - fi - - local status_orchestrator="missing" - if [ -f "${ENVIRONMENT_YAML:-}" ]; then - status_orchestrator="ok" - fi - - local env_line="source \"$HOME/.udx/dev.kit/source/env.sh\"" - local profile="" - case "${SHELL:-}" in - */zsh) profile="$HOME/.zshrc" ;; - */bash) profile="$HOME/.bash_profile" ;; - *) profile="$HOME/.bash_profile" ;; - esac - - local status_shell="missing" - if [ -f "$profile" ] && grep -Fqx "$env_line" "$profile"; then - status_shell="ok" - fi - - local status_path="missing" - local path_bin="" - if command -v dev.kit >/dev/null 2>&1; then - status_path="ok" - path_bin="$(command -v dev.kit)" - fi - - local ai_enabled - ai_enabled="$(config_value_scoped ai.enabled "false")" - local operating_mode="Personal Helper" - if [ "$ai_enabled" = "true" ]; then - operating_mode="AI-Powered" - fi - - check_sw() { - if command -v "$1" >/dev/null 2>&1; then echo "ok"; else echo "missing"; fi - } - - local sw_git; sw_git=$(check_sw "git") - local sw_docker; sw_docker=$(check_sw "docker") - local sw_npm; sw_npm=$(check_sw "npm") - local sw_gh; sw_gh=$(check_sw "gh") - local sw_gemini; sw_gemini=$(check_sw "gemini") - local sw_mmdc; sw_mmdc=$(check_sw "mmdc") - - if [ "$json_output" = "true" ]; then - local repo_root; repo_root="$(get_repo_root || true)" - - # Calculate Mesh Health - local gh_health="missing" - if command -v dev_kit_github_health >/dev/null 2>&1; then - case $(dev_kit_github_health; echo $?) in - 0) gh_health="ok" ;; - 2) gh_health="warn" ;; - esac - fi - - local c7_health="missing" - if command -v dev_kit_context7_health >/dev/null 2>&1; then - case $(dev_kit_context7_health; echo $?) in - 0) c7_health="ok" ;; - 2) c7_health="warn" ;; - esac - fi - - # Calculate Skill Count - local skill_count=0 - if [ -d "$REPO_DIR/docs/workflows" ]; then - skill_count=$(find "$REPO_DIR/docs/workflows" -maxdepth 1 -name "*.md" ! -name "README.md" ! -name "normalization.md" ! -name "loops.md" ! -name "mermaid-patterns.md" | wc -l | tr -d ' ') - fi - - cat </dev/null 2>&1; then - dev_kit_github_health - local gh_status=$? - case $gh_status in - 0) print_check "GitHub Resolution" "[ok]" "authenticated (gh)" ;; - 1) print_check "GitHub Resolution" "[missing]" "CLI missing (gh)" ;; - 2) print_check "GitHub Resolution" "[warn]" "not authenticated" ;; - esac - fi - - # Context7 Resolution - if command -v dev_kit_context7_health >/dev/null 2>&1; then - dev_kit_context7_health - local c7_status=$? - case $c7_status in - 0) print_check "Context7 Resolution" "[ok]" "ready (API/CLI)" ;; - 1) print_check "Context7 Resolution" "[missing]" "API key or CLI missing" ;; - 2) print_check "Context7 Resolution" "[warn]" "CLI available via npm" ;; - esac - fi - - # @udx NPM Packages - if command -v npm >/dev/null 2>&1; then - local missing_pkgs=() - for pkg in "@udx/mcurl" "@udx/mysec"; do - if ! dev_kit_npm_health "$pkg" >/dev/null 2>&1; then - missing_pkgs+=("$(echo "$pkg" | sed 's/.*[\/]//')") - fi - done - if [ ${#missing_pkgs[@]} -eq 0 ]; then - print_check "@udx Tools" "[ok]" "all core tools installed" - else - print_check "@udx Tools" "[warn]" "missing: ${missing_pkgs[*]}" - echo " - [advice] Install for more power: npm install -g @udx/mcurl @udx/mysec" - fi - else - print_check "@udx Tools" "[missing]" "npm runtime required" - fi - - echo "" - echo "Managed AI Skills Health (Repository):" - local local_skills="$REPO_DIR/docs/workflows" - if [ -d "$local_skills" ]; then - local count=0 - # Scan for .md files that define skills (excluding README.md) - while IFS= read -r skill_file; do - [ -z "$skill_file" ] && continue - local filename; filename="$(basename "$skill_file")" - [ "$filename" = "README.md" ] && continue - [ "$filename" = "normalization.md" ] && continue - [ "$filename" = "loops.md" ] && continue - [ "$filename" = "mermaid-patterns.md" ] && continue - - ((count++)) - local name="${filename%.md}" - local status="[ok]" - local detail="documented" - - print_check "$name" "$status" "$detail" - done < <(find "$local_skills" -maxdepth 1 -name "*.md") - - if [ $count -eq 0 ]; then - print_check "skills" "[info]" "No specialized workflows defined in docs/workflows/." - fi - else - print_check "skills" "[info]" "No workflows directory found at $local_skills" - fi - - echo "" - echo "Advisory (Security & Secrets):" - local repo_root - repo_root="$(get_repo_root || true)" - - if command -v mysec >/dev/null 2>&1; then - print_check "mysec" "[ok]" "Active (Secret Scanning)" - else - print_check "mysec" "[info]" "Missing (npm install -g @udx/mysec)" - fi - - if [ -n "$repo_root" ]; then - if [ -f "$repo_root/.env" ]; then - if git check-ignore "$repo_root/.env" >/dev/null 2>&1; then - print_check ".env" "[ok]" "Gitignored (Safe)" - else - print_check ".env" "[alert]" "Not Gitignored! (Risk)" - fi - fi - fi - echo "- [info] Use environment.yaml for non-sensitive orchestration." - - # 6. Repository Audit (Compliance) - echo "" - print_section "Repository Compliance (Repo-as-a-Skill)" - - if [ -n "$repo_root" ]; then - # TDD Check - if [ -d "$repo_root/tests" ] || [ -d "$repo_root/test" ] || [ -d "$repo_root/spec" ]; then - print_check "TDD" "[ok]" "Test suite detected" - else - print_check "TDD" "[warn]" "Missing tests/ or spec/ directory" - fi - - # Config-as-Code Check - if [ -f "$repo_root/environment.yaml" ]; then - print_check "CaC" "[ok]" "environment.yaml active" - else - print_check "CaC" "[warn]" "Missing environment.yaml" - fi - - # Documentation Check - if [ -d "$repo_root/docs" ]; then - print_check "Docs" "[ok]" "Knowledge base active" - else - print_check "Docs" "[warn]" "Missing docs/ directory" - fi - - # AI Readiness - if [ -d "$repo_root/src/ai" ]; then - print_check "AI Skills" "[ok]" "Repo skills defined" - else - print_check "AI Skills" "[warn]" "Missing src/ai/ directory" - fi - else - echo " - [info] Run inside a git repository for full compliance audit." - fi - echo "" -} diff --git a/lib/commands/github.sh b/lib/commands/gh.sh similarity index 96% rename from lib/commands/github.sh rename to lib/commands/gh.sh index 7efda84..f326c5f 100644 --- a/lib/commands/github.sh +++ b/lib/commands/gh.sh @@ -22,17 +22,14 @@ set -euo pipefail # dev.kit github review-prs [--repo OWNER/REPO] [--state open|closed|merged|all] [--limit N] [--json] [--include-drafts] # dev.kit github pr-create --title "Title" --body "Body" [--base branch] [--head branch] [--draft] -dev_kit_cmd_github() { - - +dev_kit_cmd_gh() { LIMIT=30 - STATE="open" # issues: open|closed|all ; prs: open|closed|merged|all + STATE="open" REPO="" JSON=0 INCLUDE_DRAFTS=0 COMMAND="" - # pr-create options PR_TITLE="" PR_BODY="" PR_BASE="main" @@ -43,10 +40,10 @@ dev_kit_cmd_github() { usage() { cat < [options] + dev.kit gh [options] Commands: assigned-issues List issues assigned to you diff --git a/lib/commands/status.sh b/lib/commands/status.sh index 16cb7eb..ab6b36f 100644 --- a/lib/commands/status.sh +++ b/lib/commands/status.sh @@ -4,8 +4,8 @@ # @intent: status, check, health, info, diagnostic # @objective: Provide a compact, high-signal overview of the current engineering environment, active tasks, and empowerment mesh. # @usage: dev.kit status +# @usage: dev.kit status --audit # @usage: dev.kit status --json -# @workflow: 1. Identity & Operating Mode -> 2. Environment Health -> 3. Active Context -> 4. Empowerment Mesh -> 5. Actionable Advice if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then # shellcheck source=/dev/null @@ -14,25 +14,25 @@ fi dev_kit_cmd_status() { local json_output="false" - if [ "${1:-}" = "--json" ]; then - json_output="true" - shift - fi + local deep_audit="false" + + for arg in "$@"; do + case "$arg" in + --json) json_output="true" ;; + --audit) deep_audit="true" ;; + esac + done if [ "$json_output" = "true" ]; then - dev_kit_cmd_doctor --json + dev_kit_health_audit_json return fi ui_header "Engineering Brief" - # 1. Identity - local version="0.1.0" - [ -f "$REPO_DIR/VERSION" ] && version="$(cat "$REPO_DIR/VERSION")" - - # 2. Operating Mode & Environment + # 1. Identity & Operating Mode local ai_enabled; ai_enabled="$(config_value_scoped ai.enabled "false")" - local provider; provider="$(config_value_scoped ai.provider "codex")" + local provider; provider="$(config_value_scoped ai.provider "gemini")" if [ "$ai_enabled" = "true" ]; then ui_ok "Mode" "AI-Powered ($provider)" @@ -40,20 +40,12 @@ dev_kit_cmd_status() { ui_info "Mode" "Personal Helper (Local)" fi - local env_line="source \"$HOME/.udx/dev.kit/source/env.sh\"" - local profile=""; case "${SHELL:-}" in */zsh) profile="$HOME/.zshrc" ;; *) profile="$HOME/.bash_profile" ;; esac - if [ -f "$profile" ] && grep -Fqx "$env_line" "$profile"; then - ui_ok "Shell" "Integrated ($profile)" - else - ui_warn "Shell" "Missing integration" - fi - - # 3. Workspace & Context + # 2. Workspace & Context local repo_root; repo_root="$(get_repo_root || true)" if [ -n "$repo_root" ]; then ui_ok "Workspace" "$(basename "$repo_root")" - # Active Task + # Active Task Discovery local active_workflow="" if [ -d "$repo_root/tasks" ]; then active_workflow="$(find "$repo_root/tasks" -name "workflow.md" -exec grep -l "status: planned\|status: active" {} + | head -n 1 || true)" @@ -76,35 +68,33 @@ dev_kit_cmd_status() { ui_warn "Workspace" "Not in a repository" fi - # 4. Virtual Skills (Discovery) - echo "" - printf "%sVirtual Skills (Environment Discovery):%s\n" "$(ui_cyan)" "$(ui_reset)" - if command -v gh >/dev/null 2>&1; then ui_ok "GitHub" "CLI (Discovery Active)"; else ui_info "GitHub" "Missing"; fi - if command -v npm >/dev/null 2>&1; then ui_ok "NPM" "Node Runtime"; else ui_info "NPM" "Missing"; fi - if command -v docker >/dev/null 2>&1; then ui_ok "Docker" "Engine Detected"; else ui_info "Docker" "Missing"; fi - if command -v gcloud >/dev/null 2>&1; then ui_ok "Google" "Cloud CLI"; fi - - # 5. Empowerment Mesh + # 3. Empowerment Mesh (Summary) echo "" - printf "%sEmpowerment Mesh (Capability Discovery):%s\n" "$(ui_cyan)" "$(ui_reset)" - - local cmd_count; cmd_count=$(ls "$REPO_DIR"/lib/commands/*.sh 2>/dev/null | wc -l) - local mod_count; mod_count=$(ls "$REPO_DIR"/lib/modules/*.sh 2>/dev/null | wc -l) - local skill_count; skill_count=$(ls "$REPO_DIR"/docs/workflows/*.md 2>/dev/null | grep -v "README.md\|normalization.md\|loops.md\|mermaid-patterns.md" | wc -l) - - ui_ok "Capabilities" "$cmd_count Commands | $mod_count Modules | $skill_count AI Skills" - - if command -v dev_kit_github_health >/dev/null 2>&1 && dev_kit_github_health >/dev/null 2>&1; then - ui_ok "Remote" "GitHub Authorized" - fi - + printf "%sEmpowerment Mesh Summary:%s\n" "$(ui_cyan)" "$(ui_reset)" + if command -v gh >/dev/null 2>&1; then ui_ok "GitHub" "CLI Active"; fi if command -v dev_kit_context7_health >/dev/null 2>&1 && dev_kit_context7_health >/dev/null 2>&1; then - ui_ok "Knowledge" "Context7 API (v2)" + ui_ok "Knowledge" "Context7 Ready" + fi + + # 4. Deep Audit (Optional) + if [ "$deep_audit" = "true" ]; then + echo "" + ui_header "Engineering Compliance Audit" + if [ -n "$repo_root" ]; then + [ -d "$repo_root/tests" ] && ui_ok "TDD" "Test suite detected" || ui_warn "TDD" "No tests found" + [ -f "$repo_root/environment.yaml" ] && ui_ok "CaC" "environment.yaml active" || ui_warn "CaC" "Missing orchestrator" + [ -d "$repo_root/docs" ] && ui_ok "Docs" "Knowledge base found" || ui_warn "Docs" "No documentation" + fi + echo "" + echo "Software Detection:" + for sw in git docker npm gh; do + if command -v "$sw" >/dev/null 2>&1; then ui_ok "$sw" "$(command -v "$sw")"; else ui_warn "$sw" "Missing"; fi + done fi - # 6. Actionable Tips + # 5. Actionable Advice echo "" - ui_tip "Run 'dev.kit skills run \"\"' to resolve drift." - ui_tip "Run 'dev.kit sync' to atomically commit changes." + ui_tip "Run 'dev.kit suggest' for repository improvements." + ui_tip "Run 'dev.kit status --audit' for a full compliance check." echo "" } diff --git a/lib/commands/suggest.sh b/lib/commands/suggest.sh new file mode 100644 index 0000000..6b4621b --- /dev/null +++ b/lib/commands/suggest.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# @description: Suggest repository improvements and CDE compliance fixes. +# @intent: suggest, improve, cde, compliance, hint, tip +# @objective: Provide actionable advice to improve the repository's engineering experience and CDE standards. +# @usage: dev.kit suggest + +if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then + # shellcheck source=/dev/null + . "$REPO_DIR/lib/utils.sh" +fi + +if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/modules/context_manager.sh" ]; then + # shellcheck source=/dev/null + . "$REPO_DIR/lib/modules/context_manager.sh" +fi + +dev_kit_cmd_suggest() { + if command -v ui_header >/dev/null 2>&1; then + ui_header "Engineering Suggestions" + else + echo "--- Engineering Suggestions ---" + fi + + local suggestions + suggestions="$(dev_kit_context_suggest_improvements "general repository check")" + + if [ "$suggestions" = "[]" ]; then + ui_ok "CDE Compliance" "No immediate improvements suggested." + return + fi + + echo "$suggestions" | jq -c '.[]' | while read -r sug; do + local type; type=$(echo "$sug" | jq -r '.type') + local msg; msg=$(echo "$sug" | jq -r '.message') + case "$type" in + doc) ui_info "Documentation" "$msg" ;; + config) ui_warn "Configuration" "$msg" ;; + ops) ui_info "Operations" "$msg" ;; + *) ui_tip "$msg" ;; + esac + done + + echo "" + ui_tip "Run 'dev.kit config detect' to check environment software." +} diff --git a/lib/commands/sync.sh b/lib/commands/sync.sh index 26dc980..eaf38ed 100644 --- a/lib/commands/sync.sh +++ b/lib/commands/sync.sh @@ -41,10 +41,12 @@ dev_kit_cmd_sync() { local dry_run="false" local task_id="unknown" local message="" - + local push="false" + while [[ $# -gt 0 ]]; do case "$1" in --dry-run) dry_run="true"; shift ;; + --push) push="true"; shift ;; --task-id) task_id="$2"; shift 2 ;; --message) message="$2"; shift 2 ;; -h|--help) @@ -57,13 +59,14 @@ Commands: Options (run): --dry-run Show what commits would be made without executing them + --push Push changes to origin after committing --task-id The current task ID to associate with commits --message Optional base message prefix -h, --help Show this help message Example: dev.kit sync prepare main - dev.kit sync run --task-id "TASK-123" + dev.kit sync run --task-id "TASK-123" --push SYNC_HELP return 0 ;; @@ -72,11 +75,12 @@ SYNC_HELP done if command -v dev_kit_git_sync_run >/dev/null 2>&1; then - dev_kit_git_sync_run "$dry_run" "$task_id" "$message" + dev_kit_git_sync_run "$dry_run" "$task_id" "$message" "$push" else echo "Error: Git sync module not loaded." >&2 return 1 fi ;; + esac } diff --git a/lib/commands/test.sh b/lib/commands/test.sh new file mode 100644 index 0000000..0480e02 --- /dev/null +++ b/lib/commands/test.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# @description: Run the repository's test suite to verify health and grounding. +# @intent: test, check, verify, suite, worker +# @objective: Validate the integrity of the dev.kit engine, its grounding in the repository, and ensure environment parity via worker containers. +# @usage: dev.kit test [--worker] + +if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then + # shellcheck source=/dev/null + . "$REPO_DIR/lib/utils.sh" +fi + +dev_kit_cmd_test() { + local runner="${REPO_DIR}/tests/run.sh" + + if [ ! -f "$runner" ]; then + echo "Error: Test runner not found at $runner" >&2 + return 1 + fi + + # Pass all arguments directly to the runner script + bash "$runner" "$@" +} diff --git a/lib/commands/agent.sh b/lib/modules/agent_manager.sh similarity index 52% rename from lib/commands/agent.sh rename to lib/modules/agent_manager.sh index 1a1db70..dd309ed 100644 --- a/lib/commands/agent.sh +++ b/lib/modules/agent_manager.sh @@ -1,16 +1,8 @@ -#!/bin/bash +#!/usr/bin/env bash -# @description: Direct agent integration management (advanced). +# @description: Orchestrate the rendering and deployment of AI provider artifacts. # @intent: agent, llm, provider, model, configure -# @objective: Orchestrate the rendering and deployment of AI provider artifacts (Gemini) using dynamic normalization from documentation and scripts. -# @usage: dev.kit agent gemini --plan -# @usage: dev.kit agent all -# @workflow: 1. Parse Manifest -> 2. Render Templates from Docs/Lib -> 3. Synchronize Skills -> 4. Backup & Deploy Artifacts - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi +# @objective: Dynamic normalization and deployment of agent skills and configuration. dev_kit_agent_manifest() { echo "$REPO_DIR/src/ai/integrations/manifest.json" @@ -37,27 +29,23 @@ dev_kit_agent_render_artifact() { local available_tools="" local memories="" - # Gather Workflows from docs/workflows/ + # Gather Workflows for skill_file in "$REPO_DIR"/docs/workflows/*.md; do [ -f "$skill_file" ] || continue local filename; filename="$(basename "$skill_file")" - [ "$filename" = "README.md" ] && continue - [ "$filename" = "normalization.md" ] && continue - [ "$filename" = "loops.md" ] && continue - [ "$filename" = "mermaid-patterns.md" ] && continue + [[ "$filename" =~ ^(README|normalization|loops|mermaid-patterns)\.md$ ]] && continue local name="${filename%.md}" local desc; desc="$(grep -i "^description:" "$skill_file" 2>/dev/null | head -n 1 | sed 's/^description: //I' || echo "Grounded workflow reasoning.")" agent_skills+="- **$name**: $desc\n" done - # Gather Commands from lib/commands/ + # Gather Commands for file in "$REPO_DIR"/lib/commands/*.sh; do [ -f "$file" ] || continue local key desc key="$(basename "${file%.sh}")" - # Hide internal/utility commands - case "$key" in agent|github|skills) continue ;; esac + case "$key" in agent|github|skills|test|suggest) continue ;; esac desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" available_tools+="- **dev.kit $key**: $desc\n" done @@ -73,7 +61,7 @@ dev_kit_agent_render_artifact() { export DEV_KIT_RENDER_DATE="$(date +%Y-%m-%d)" export DEV_KIT_RENDER_HOME="$HOME" export DEV_KIT_RENDER_DEV_KIT_HOME="$DEV_KIT_HOME" - export DEV_KIT_RENDER_DEV_KIT_SOURCE="$DEV_KIT_SOURCE" + export DEV_KIT_RENDER_DEV_KIT_SOURCE="$REPO_DIR" export DEV_KIT_RENDER_DEV_KIT_STATE="$DEV_KIT_STATE" export DEV_KIT_RENDER_SKILLS="$agent_skills" export DEV_KIT_RENDER_TOOLS="$available_tools" @@ -99,44 +87,34 @@ dev_kit_agent_render_artifact() { dev_kit_agent_apply_integration() { local key="$1" local mode="$2" - local manifest - manifest="$(dev_kit_agent_manifest)" + local manifest; manifest="$(dev_kit_agent_manifest)" - [ ! -f "$manifest" ] && { echo "Error: Manifest not found." >&2; exit 1; } + [ ! -f "$manifest" ] && { echo "Error: Manifest not found." >&2; return 1; } - local integration_json - integration_json="$(jq -r ".integrations[] | select(.key == \"$key\")" "$manifest")" - [ -z "$integration_json" ] && { echo "Error: Integration '$key' not found." >&2; exit 1; } + local integration_json; integration_json="$(jq -r ".integrations[] | select(.key == \"$key\")" "$manifest")" + [ -z "$integration_json" ] && { echo "Error: Integration '$key' not found." >&2; return 1; } - local target_dir - target_dir="$(dev_kit_agent_expand_path "$(echo "$integration_json" | jq -r '.target_dir')")" + local target_dir; target_dir="$(dev_kit_agent_expand_path "$(echo "$integration_json" | jq -r '.target_dir')")" local templates_dir="$REPO_DIR/$(echo "$integration_json" | jq -r '.templates_dir')" local skills_dst_dir="$target_dir/skills" - local rendered - rendered="$(mktemp -d)" - - local artifacts_count - artifacts_count="$(echo "$integration_json" | jq '.artifacts | length')" + local rendered; rendered="$(mktemp -d)" + local artifacts_count; artifacts_count="$(echo "$integration_json" | jq '.artifacts | length')" for ((i=0; i" >&2; exit 1; } - local manifest; manifest="$(dev_kit_agent_manifest)" - local target_dir; target_dir="$(dev_kit_agent_expand_path "$(jq -r ".integrations[] | select(.key == \"$key\") | .target_dir" "$manifest")")" - local backup_base="$target_dir/.backup/dev.kit" - - if [ ! -d "$backup_base" ]; then - echo "No backups found for $key." - return 1 - fi - - local last_backup; last_backup="$(ls -d "$backup_base"/*/ | sort | tail -n 1)" - if [ -z "$last_backup" ]; then - echo "No backups found for $key." - return 1 - fi - - echo "Restoring $key from $last_backup..." - cp -R "$last_backup/." "$target_dir/" - echo "Restore complete." - ;; - disable) - local key="${2:-}" - if [ "$key" = "all" ]; then - for k in $(jq -r '.integrations[].key' "$(dev_kit_agent_manifest)"); do - dev_kit_agent_disable_integration "$k" - done - else - [ -z "$key" ] && { echo "Usage: dev.kit agent disable " >&2; exit 1; } - dev_kit_agent_disable_integration "$key" - fi - ;; - skills) - local key="${2:-}" - [ -z "$key" ] && { echo "Usage: dev.kit agent skills " >&2; exit 1; } - local manifest="$(dev_kit_agent_manifest)" - local target_dir="$(dev_kit_agent_expand_path "$(jq -r ".integrations[] | select(.key == \"$key\") | .target_dir" "$manifest")")" - local skills_dst_dir="$target_dir/skills" - echo "Managed Skills for '$key' ($skills_dst_dir):" - [ -d "$skills_dst_dir" ] && ls "$skills_dst_dir" | sed 's/^/- /' || echo "(none)" - ;; - help|-h|--help) - cat <<'AGENT_USAGE' -Usage: dev.kit agent - -Commands: - status Show status of all AI agent integrations - skills List managed skills for a specific agent - restore Restore latest backup for specific agent - disable Safely backup and remove agent settings - [--plan] Apply configuration for specific agent (e.g., gemini) - all [--plan] Apply all supported agent configurations -AGENT_USAGE - ;; - all) - shift - [ "${1:-}" = "--plan" ] && mode="plan" - for k in $(jq -r '.integrations[].key' "$(dev_kit_agent_manifest)"); do - dev_kit_agent_apply_integration "$k" "$mode" - done - ;; - *) - local key="$sub" - shift - [ "${1:-}" = "--plan" ] && mode="plan" - dev_kit_agent_apply_integration "$key" "$mode" - ;; - esac + [ -d "$target_dir/skills" ] && mv "$target_dir/skills" "$backup_dir/skills" } diff --git a/lib/modules/config_manager.sh b/lib/modules/config_manager.sh new file mode 100644 index 0000000..f01d4fc --- /dev/null +++ b/lib/modules/config_manager.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +# @description: High-fidelity configuration management and environment orchestration. +# @intent: config, setting, env, setup, manage, hydration +# @objective: Provide a unified interface for reading and writing configuration across multiple scopes (global, repo, environment). + +# Get a configuration value with scoping (Repo -> Global -> Environment -> Default) +# Usage: config_value_scoped [default] +config_value_scoped() { + local key="$1" + local default="${2:-}" + local val="" + + # 1. Check local repo .env (Priority 1) + local local_path + local_path="$(get_repo_state_dir || true)/config.env" + if [ -f "$local_path" ]; then + val="$(config_get_value "$local_path" "$key" "")" + fi + + # 2. Check global .env (Priority 2) + if [ -z "$val" ]; then + val="$(config_get_value "$CONFIG_FILE" "$key" "")" + fi + + # 3. Check YAML Orchestrator (Priority 3 / Defaults) + if [ -z "$val" ] && [ -f "${ENVIRONMENT_YAML:-}" ]; then + local yaml_key="$key" + # Map dots to nested structure if needed (e.g. system.quiet) + case "$key" in + quiet|developer|state_path) yaml_key="system.$key" ;; + *) yaml_key="$key" ;; + esac + val="$(dev_kit_yaml_value "$ENVIRONMENT_YAML" "$yaml_key" "")" + fi + + if [ -n "$val" ]; then + echo "$val" + else + echo "$default" + fi +} + +# Raw configuration value extractor +# Usage: config_get_value [default] +config_get_value() { + local file="$1" + local key="$2" + local default="${3:-}" + local val="" + if [ -f "$file" ]; then + val="$(awk -F= -v k="$key" ' + $1 ~ "^[[:space:]]*"k"[[:space:]]*$" { + sub(/^[[:space:]]*/,"",$2); + sub(/[[:space:]]*$/,"",$2); + print $2; + exit + } + ' "$file")" + fi + if [ -n "$val" ]; then + echo "$val" + else + echo "$default" + fi +} + +# Update a configuration value in a specific file +# Usage: config_set_value +config_set_value() { + local key="$1" + local value="$2" + local path="$3" + local tmp + tmp="$(mktemp)" + if [ -f "$path" ]; then + awk -v k="$key" -v v="$value" ' + BEGIN { found=0 } + { + if ($0 ~ "^[[:space:]]*"k"[[:space:]]*=") { + found=1 + print k" = "v + next + } + print + } + END { + if (!found) { + print k" = "v + } + } + ' "$path" > "$tmp" + else + printf "%s = %s\n" "$key" "$value" > "$tmp" + fi + mkdir -p "$(dirname "$path")" + mv "$tmp" "$path" +} diff --git a/lib/modules/context_manager.sh b/lib/modules/context_manager.sh index 014ddf8..bd5c6c3 100644 --- a/lib/modules/context_manager.sh +++ b/lib/modules/context_manager.sh @@ -13,12 +13,46 @@ dev_kit_context_normalize() { local context_data context_data="$(dev_kit_context_resolve "$intent")" - # 2. Map to deterministic steps + # 2. Add CDE Improvement Suggestions + local suggestions + suggestions="$(dev_kit_context_suggest_improvements "$intent")" + + # 3. Combine and return a typed context manifest + local full_context + full_context=$(echo "$context_data" | jq --argjson sug "$suggestions" '. + {suggestions: $sug}') + if [ -n "$output_context" ]; then - echo "$context_data" > "$output_context" + echo "$full_context" > "$output_context" fi - echo "$context_data" + echo "$full_context" +} + +# Suggest improvements based on intent and repository state +dev_kit_context_suggest_improvements() { + local intent="$1" + local suggestions=() + + # Heuristic: Check for missing documentation + if [[ "$intent" == *"new feature"* ]] || [[ "$intent" == *"implement"* ]]; then + suggestions+=("{\"type\": \"doc\", \"message\": \"Ensure a corresponding MD file is created in docs/features/\"}") + fi + + # Heuristic: Check for CDE compliance + if [ ! -f "$REPO_DIR/.udx/dev.kit/config.env" ]; then + suggestions+=("{\"type\": \"config\", \"message\": \"Local .udx config missing. Run 'dev.kit config reset --scope repo' to initialize.\"}") + fi + + # Heuristic: CI/CD check + if [ ! -d "$REPO_DIR/.github/workflows" ]; then + suggestions+=("{\"type\": \"ops\", \"message\": \"GitHub Workflows missing. Consider adding context7-ops.yml for better automation.\"}") + fi + + if [ ${#suggestions[@]} -eq 0 ]; then + echo "[]" + else + (IFS=,; echo "[${suggestions[*]}]") + fi } # Search for capabilities via Dynamic Discovery Engine diff --git a/lib/modules/git_sync.sh b/lib/modules/git_sync.sh index 893465f..35cb7bd 100644 --- a/lib/modules/git_sync.sh +++ b/lib/modules/git_sync.sh @@ -109,12 +109,38 @@ dev_kit_git_sync_process_group() { } # Run the full git sync workflow -# Usage: dev_kit_git_sync_run [dry_run] [task_id] [message] +# Usage: dev_kit_git_sync_run [dry_run] [task_id] [message] [push_flag] dev_kit_git_sync_run() { local dry_run="${1:-false}" local task_id="${2:-unknown}" local message="${3:-}" + local push_flag="${4:-false}" + # 0. Pre-sync Verification (Run Tests) + if [ "$dry_run" = "false" ]; then + echo "--- Step 0: Pre-sync Verification ---" + local has_tests="false" + local repo_root; repo_root="$(get_repo_root || true)" + if [ -n "$repo_root" ]; then + [ -d "$repo_root/tests" ] || [ -d "$repo_root/test" ] || [ -d "$repo_root/spec" ] && has_tests="true" + fi + + if [ "$has_tests" = "true" ]; then + echo "Tests detected. Running high-fidelity verification..." + if dev.kit test; then + echo "โœ” Verification successful. Proceeding with sync." + else + echo "โŒ Verification failed. Please resolve test failures before syncing." + if ! confirm_action "Tests failed. Force sync anyway?"; then + return 1 + fi + fi + else + echo "No tests detected. Skipping verification step." + fi + echo "" + fi + # Resolve target main branch local target_main="main" if ! git rev-parse --verify origin/main >/dev/null 2>&1; then @@ -133,17 +159,19 @@ dev_kit_git_sync_run() { echo "$staged $unstaged $untracked" | tr ' ' '\n' | sort -u > .drift.tmp : > .processed.tmp - # Define groups (Standard UDX grouping) - local -a groups=( - "docs:Group Documentation:^docs/|^README.md" - "ai:Group AI & Integrations:^src/ai/|^.gemini/|^src/mappings/" - "cli:Group CLI & Scripts:^bin/|^lib/|^src/cli/" - "core:Group Core Infrastructure:^src/|^environment.yaml|^context7.json" - ) + # Define groups (Loaded from config for Easy Management) + local groups_raw; groups_raw=$(config_value_scoped git_sync_groups "docs:^docs/|^README.md,ai:^src/ai/|^.gemini/|^src/mappings/,cli:^bin/|^lib/|^src/cli/,core:^src/|^environment.yaml|^context7.json") + + local -a groups=() + IFS=',' read -r -a groups_arr <<< "$groups_raw" + for g in "${groups_arr[@]}"; do + groups+=("$g") + done for group in "${groups[@]}"; do - IFS=':' read -r id name pattern <<< "$group" - echo "--- Step: $name ($id) ---" + local id; id=$(echo "$group" | cut -d: -f1) + local pattern; pattern=$(echo "$group" | cut -d: -f2-) + echo "--- Step: Group $id ---" dev_kit_git_sync_process_group "$id" "$pattern" "$task_id" "$dry_run" "$message" done @@ -172,37 +200,48 @@ dev_kit_git_sync_run() { rm -f .drift.tmp .processed.tmp echo "--- Git Sync Workflow Complete ---" - # 5. Proactive PR Suggestion (New) - if [ "$dry_run" = "false" ] && command -v dev_kit_github_health >/dev/null 2>&1; then - if dev_kit_github_health >/dev/null 2>&1; then - local current_branch; current_branch=$(git branch --show-current) - # Don't suggest PR for the default main branch - if [[ "$current_branch" != "main" && "$current_branch" != "master" ]]; then - echo "" - printf "โœ” Synchronization complete. Would you like to create a Pull Request for $current_branch? (y/N): " - read -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - local pr_title="feat: resolve $task_id" - [ -n "$message" ] && pr_title="$message" - - # Generate a brief summary from the git diff (stat only for brevity) - local diff_summary="" - if git rev-parse --verify origin/"$target_main" >/dev/null 2>&1; then - diff_summary=$(git diff origin/"$target_main"...HEAD --stat | head -n 20) - else - # Fallback if origin is not available - diff_summary="Changes since common ancestor could not be calculated (origin missing)." - fi - - local pr_body="### ๐Ÿš€ Drift Resolution: $task_id\n\n$message\n\n#### ๐Ÿ“Š Change Summary\n\`\`\`text\n$diff_summary\n\`\`\`\n\nAutomated via \`dev.kit sync\`." - - if dev_kit_github_pr_create "$pr_title" "$pr_body" "$target_main"; then - echo "โœ” Pull Request synchronized successfully." - else - echo "โŒ Failed to synchronize Pull Request." + # 5. Push and PR Management + if [ "$dry_run" = "false" ]; then + local current_branch; current_branch=$(git branch --show-current) + local remote; remote=$(git config --get "branch.${current_branch}.remote" || echo "origin") + + # Check if we should push + if [ "$push_flag" = "true" ] || confirm_action "Synchronization complete. Push changes to $remote/$current_branch?"; then + echo "Pushing changes to $remote $current_branch..." + if git push "$remote" "$current_branch"; then + echo "โœ” Pushed successfully." + + # Proactive PR Suggestion + if command -v dev_kit_github_health >/dev/null 2>&1 && dev_kit_github_health >/dev/null 2>&1; then + # Don't suggest PR for the default main branch + if [[ "$current_branch" != "main" && "$current_branch" != "master" ]]; then + echo "" + if confirm_action "Would you like to synchronize a Pull Request for $current_branch?"; then + local pr_title="feat: resolve $task_id" + [ -n "$message" ] && pr_title="$message" + + # Generate a brief summary from the git diff + local diff_summary="" + if git rev-parse --verify "$remote/$target_main" >/dev/null 2>&1; then + diff_summary=$(git diff "$remote/$target_main"...HEAD --stat | head -n 20) + else + diff_summary="Changes since common ancestor could not be calculated ($remote/$target_main missing)." + fi + + local pr_body="### ๐Ÿš€ Drift Resolution: $task_id\n\n$message\n\n#### ๐Ÿ“Š Change Summary\n\`\`\`text\n$diff_summary\n\`\`\`\n\nAutomated via \`dev.kit sync\`." + + if dev_kit_github_pr_create "$pr_title" "$pr_body" "$target_main"; then + echo "โœ” Pull Request synchronized successfully." + else + echo "โŒ Failed to synchronize Pull Request." + fi + fi fi fi + else + echo "โŒ Push failed. Please check your remote configuration or permissions." fi fi fi } + diff --git a/lib/modules/health_manager.sh b/lib/modules/health_manager.sh new file mode 100644 index 0000000..3cb44f1 --- /dev/null +++ b/lib/modules/health_manager.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# @description: System and repository health auditing. +# @intent: health, doctor, audit, compliance +# @objective: Audit environment health, software prerequisites, and repository compliance. + +dev_kit_health_sw_check() { + local name="$1" + if command -v "$name" >/dev/null 2>&1; then echo "ok"; else echo "missing"; fi +} + +dev_kit_health_audit_json() { + local repo_root; repo_root="$(get_repo_root || true)" + local ai_enabled; ai_enabled="$(config_value_scoped ai.enabled "false")" + + local gh_health="missing" + if command -v dev_kit_github_health >/dev/null 2>&1; then + case $(dev_kit_github_health; echo $?) in 0) gh_health="ok" ;; 2) gh_health="warn" ;; esac + fi + + local skill_count=0 + [ -d "$REPO_DIR/docs/workflows" ] && skill_count=$(find "$REPO_DIR/docs/workflows" -maxdepth 1 -name "*.md" ! -name "README.md" ! -name "normalization.md" ! -name "loops.md" ! -name "mermaid-patterns.md" | wc -l | tr -d ' ') + + cat <&2 - echo "dev.kit: fix permissions or choose a different DEV_KIT_STATE" >&2 - exit 1 - fi - if [ ! -f "$CONFIG_FILE" ] && [ -f "$REPO_DIR/config/default.env" ]; then - mkdir -p "$(dirname "$CONFIG_FILE")" - cp "$REPO_DIR/config/default.env" "$CONFIG_FILE" - fi -} - get_repo_state_dir() { local root; root="$(get_repo_root || true)" if [ -n "$root" ]; then From a47397ee2505d9a60e0680a6132cdf16dfbadc3d Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov Date: Mon, 9 Mar 2026 03:06:23 +0300 Subject: [PATCH 4/9] misc: resolve remaining drift (unknown) --- config/default.env | 6 ++++ deploy.yml | 21 ++++++++++++++ tests/run.sh | 72 ++++++++++++++++++++++++++++++++++++++++++++++ tests/suite.sh | 9 +++++- 4 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 deploy.yml create mode 100644 tests/run.sh diff --git a/config/default.env b/config/default.env index 964dd06..4e4999e 100644 --- a/config/default.env +++ b/config/default.env @@ -7,3 +7,9 @@ developer.enabled = false state_path = ~/.udx/dev.kit/state context.enabled = true context.max_bytes = 4000 +shell.auto_enable = true +output.mode = brief + +# Git Sync Grouping Logic (Name:Regex) +git_sync_groups = docs:^docs/|^README.md,ai:^src/ai/|^.gemini/|^src/mappings/,cli:^bin/|^lib/|^src/cli/,core:^src/|^environment.yaml|^context7.json + diff --git a/deploy.yml b/deploy.yml new file mode 100644 index 0000000..51d8d6f --- /dev/null +++ b/deploy.yml @@ -0,0 +1,21 @@ +# dev.kit Worker Deployment Configuration +# Used by @udx/worker-deployment (worker run) to emulate high-fidelity Ubuntu environment. + +kind: workerDeployConfig +version: udx.io/worker-v1/deploy + +config: + image: "usabilitydynamics/udx-worker:latest" + name: "dev-kit-worker-suite" + + volumes: + - ".:/workspace" + + working_dir: "/workspace" + + env: + - "DEV_KIT_SOURCE=/workspace" + - "TERM=xterm-256color" + + # Default command to run the test suite + command: "/bin/bash tests/suite.sh" diff --git a/tests/run.sh b/tests/run.sh new file mode 100644 index 0000000..068c947 --- /dev/null +++ b/tests/run.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# dev.kit Test Runner +# Facilitates running tests locally or in a high-fidelity udx/worker container. + +set -e + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +TEST_SUITE="${REPO_DIR}/tests/suite.sh" +WORKER_IMAGE="usabilitydynamics/udx-worker:latest" + +usage() { + cat </dev/null 2>&1; then + echo "--- Running Tests via udx/worker-deployment (worker run) ---" + # The deploy.yml in the root handles the mounts and environment + worker run + return $? + fi + + # 2. Fallback to raw docker run if CLI is missing + if ! command -v docker >/dev/null 2>&1; then + echo "Error: Neither 'worker' (udx/worker-deployment) nor 'docker' were found." + exit 1 + fi + + echo "--- Running Tests in udx/worker Container (Raw Docker Fallback) ---" + # We mount the REPO_DIR to /workspace and run the suite + docker run --rm \ + -v "${REPO_DIR}:/workspace" \ + -w /workspace \ + -e DEV_KIT_SOURCE=/workspace \ + -e TERM=xterm-256color \ + "$WORKER_IMAGE" \ + /bin/bash tests/suite.sh +} + +mode="local" + +while [[ $# -gt 0 ]]; do + case "$1" in + --worker) mode="worker"; shift ;; + --local) mode="local"; shift ;; + -h|--help) usage; exit 0 ;; + *) echo "Unknown option: $1"; usage; exit 1 ;; + esac +done + +if [ "$mode" = "worker" ]; then + run_worker +else + run_local +fi diff --git a/tests/suite.sh b/tests/suite.sh index 1988f83..a56ab01 100755 --- a/tests/suite.sh +++ b/tests/suite.sh @@ -18,7 +18,14 @@ log_info() { printf " ${C_BLUE}โ„น %s${C_RESET}\n" "$1"; } log_ok() { printf " ${C_GREEN}โœ” %s${C_RESET}\n" "$1"; } log_fail() { printf " ${C_RED}โœ– %s${C_RESET}\n" "$1"; exit 1; } -echo "--- dev.kit High-Fidelity Test Suite ---" +# 0. Verify Environment (OS Check) +log_info "Testing: Environment Integrity" +if [ -f /etc/os-release ] && grep -qi "ubuntu" /etc/os-release; then + log_ok "Running on Ubuntu-based environment (Worker Parity)" +else + # Only warn if not on Ubuntu, but still allow test to proceed unless it's a hard requirement + log_info "Note: Not running on Ubuntu (Local development mode)" +fi # 1. Verify Discovery (Doctor) log_info "Testing: Discovery & Doctor Health" From 3a56a8e12ca8405624d8a975d39ff193925f1670 Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov Date: Mon, 9 Mar 2026 03:07:53 +0300 Subject: [PATCH 5/9] fixed workflow --- .github/workflows/context7-ops.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/context7-ops.yml b/.github/workflows/context7-ops.yml index bd2a764..915ef23 100644 --- a/.github/workflows/context7-ops.yml +++ b/.github/workflows/context7-ops.yml @@ -5,6 +5,9 @@ on: branches: [main] workflow_dispatch: +permissions: + contents: write + jobs: sync: uses: udx/reusable-workflows/.github/workflows/context7-ops.yml@master From 1eceb8481f50e928056310426be4ca911a00ad2a Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov Date: Mon, 9 Mar 2026 20:07:52 +0300 Subject: [PATCH 6/9] cleanup --- .DS_Store | Bin 0 -> 6148 bytes .gitignore | 30 -- README.md | 77 ++- assets/diagrams/adaptation-flow.mmd | 10 - assets/diagrams/adaptation-flow.svg | 1 - assets/diagrams/cde-flow.mmd | 12 - assets/diagrams/cde-flow.svg | 1 - assets/diagrams/docs-index.mmd | 14 - assets/diagrams/docs-index.svg | 1 - assets/diagrams/drift-resolution-cycle.mmd | 12 - assets/diagrams/drift-resolution-cycle.svg | 1 - assets/diagrams/engineering-layers.mmd | 7 - assets/diagrams/engineering-layers.svg | 1 - assets/diagrams/execution-engine.mmd | 9 - assets/diagrams/execution-engine.svg | 1 - assets/diagrams/grounding-bridge.mmd | 9 - assets/diagrams/grounding-bridge.svg | 1 - assets/diagrams/methodology-flow.svg | 1 - assets/diagrams/normalization-boundary.mmd | 9 - assets/diagrams/normalization-boundary.svg | 1 - assets/diagrams/runtime-lifecycle.svg | 1 - assets/logo.svg | 40 -- bin/completions/_dev.kit | 52 -- bin/completions/dev.kit.bash | 58 --- bin/dev-kit | 140 ------ bin/env/dev-kit.sh | 153 ------ bin/scripts/install.sh | 234 --------- bin/scripts/uninstall.sh | 59 --- config/default.env | 15 - deploy.yml | 21 - docs/README.md | 62 --- docs/ai/README.md | 49 -- docs/ai/agents.md | 57 --- docs/ai/mesh.md | 63 --- docs/ai/providers/gemini.md | 72 --- docs/foundations/best-practices.md | 73 --- docs/foundations/cde.md | 75 --- docs/foundations/layers.md | 77 --- docs/foundations/methodology.md | 64 --- docs/reference/compliance/aoca-guidance.md | 61 --- docs/reference/compliance/cato-overview.md | 65 --- .../compliance/supply-chain-security.md | 61 --- .../operations/devops-littles-law.md | 60 --- .../operations/devops-manual-guidance.md | 62 --- .../operations/lifecycle-cheatsheet.md | 65 --- .../operations/worker-ecosystem-refs.md | 80 ---- docs/reference/standards/12-factor.md | 70 --- .../reference/standards/external-standards.md | 58 --- docs/reference/standards/mermaid.md | 61 --- docs/reference/standards/yaml-standards.md | 58 --- docs/runtime/config.md | 72 --- docs/runtime/execution-loop.md | 76 --- docs/runtime/install.md | 67 --- docs/runtime/lifecycle.md | 59 --- docs/runtime/overview.md | 77 --- docs/workflows/README.md | 52 -- docs/workflows/assets/git-sync.yaml | 26 - .../assets/templates/default-flowchart.mmd | 8 - .../assets/templates/default-sequence.mmd | 21 - .../assets/templates/default-state.mmd | 8 - docs/workflows/git-sync.md | 64 --- docs/workflows/loops.md | 83 ---- docs/workflows/mermaid-patterns.md | 54 --- docs/workflows/normalization.md | 73 --- docs/workflows/visualizer.md | 64 --- environment.yaml | 25 - lib/commands/ai.sh | 134 ------ lib/commands/config.sh | 443 ------------------ lib/commands/gh.sh | 190 -------- lib/commands/skills.sh | 202 -------- lib/commands/status.sh | 100 ---- lib/commands/suggest.sh | 46 -- lib/commands/sync.sh | 86 ---- lib/commands/task.sh | 254 ---------- lib/commands/test.sh | 23 - lib/commands/visualizer.sh | 59 --- lib/modules/agent_manager.sh | 182 ------- lib/modules/config_manager.sh | 98 ---- lib/modules/context7.sh | 124 ----- lib/modules/context_manager.sh | 196 -------- lib/modules/git_sync.sh | 247 ---------- lib/modules/github.sh | 86 ---- lib/modules/health_manager.sh | 45 -- lib/modules/npm.sh | 40 -- lib/modules/visualizer.sh | 89 ---- lib/ui.sh | 130 ----- lib/utils.sh | 134 ------ src/ai/integrations/gemini/prompts.json | 14 - .../gemini/templates/GEMINI.md.tmpl | 80 ---- .../gemini/templates/config.json.tmpl | 3 - .../gemini/templates/system.md.tmpl | 24 - src/ai/integrations/manifest.json | 16 - tests/run.sh | 72 --- tests/suite.sh | 63 --- 94 files changed, 30 insertions(+), 6243 deletions(-) create mode 100644 .DS_Store delete mode 100644 .gitignore delete mode 100644 assets/diagrams/adaptation-flow.mmd delete mode 100644 assets/diagrams/adaptation-flow.svg delete mode 100644 assets/diagrams/cde-flow.mmd delete mode 100644 assets/diagrams/cde-flow.svg delete mode 100644 assets/diagrams/docs-index.mmd delete mode 100644 assets/diagrams/docs-index.svg delete mode 100644 assets/diagrams/drift-resolution-cycle.mmd delete mode 100644 assets/diagrams/drift-resolution-cycle.svg delete mode 100644 assets/diagrams/engineering-layers.mmd delete mode 100644 assets/diagrams/engineering-layers.svg delete mode 100644 assets/diagrams/execution-engine.mmd delete mode 100644 assets/diagrams/execution-engine.svg delete mode 100644 assets/diagrams/grounding-bridge.mmd delete mode 100644 assets/diagrams/grounding-bridge.svg delete mode 100644 assets/diagrams/methodology-flow.svg delete mode 100644 assets/diagrams/normalization-boundary.mmd delete mode 100644 assets/diagrams/normalization-boundary.svg delete mode 100644 assets/diagrams/runtime-lifecycle.svg delete mode 100644 assets/logo.svg delete mode 100644 bin/completions/_dev.kit delete mode 100644 bin/completions/dev.kit.bash delete mode 100755 bin/dev-kit delete mode 100755 bin/env/dev-kit.sh delete mode 100755 bin/scripts/install.sh delete mode 100755 bin/scripts/uninstall.sh delete mode 100644 config/default.env delete mode 100644 deploy.yml delete mode 100644 docs/README.md delete mode 100644 docs/ai/README.md delete mode 100644 docs/ai/agents.md delete mode 100644 docs/ai/mesh.md delete mode 100644 docs/ai/providers/gemini.md delete mode 100644 docs/foundations/best-practices.md delete mode 100644 docs/foundations/cde.md delete mode 100644 docs/foundations/layers.md delete mode 100644 docs/foundations/methodology.md delete mode 100644 docs/reference/compliance/aoca-guidance.md delete mode 100644 docs/reference/compliance/cato-overview.md delete mode 100644 docs/reference/compliance/supply-chain-security.md delete mode 100644 docs/reference/operations/devops-littles-law.md delete mode 100644 docs/reference/operations/devops-manual-guidance.md delete mode 100644 docs/reference/operations/lifecycle-cheatsheet.md delete mode 100644 docs/reference/operations/worker-ecosystem-refs.md delete mode 100644 docs/reference/standards/12-factor.md delete mode 100644 docs/reference/standards/external-standards.md delete mode 100644 docs/reference/standards/mermaid.md delete mode 100644 docs/reference/standards/yaml-standards.md delete mode 100644 docs/runtime/config.md delete mode 100644 docs/runtime/execution-loop.md delete mode 100644 docs/runtime/install.md delete mode 100644 docs/runtime/lifecycle.md delete mode 100644 docs/runtime/overview.md delete mode 100644 docs/workflows/README.md delete mode 100644 docs/workflows/assets/git-sync.yaml delete mode 100644 docs/workflows/assets/templates/default-flowchart.mmd delete mode 100644 docs/workflows/assets/templates/default-sequence.mmd delete mode 100644 docs/workflows/assets/templates/default-state.mmd delete mode 100644 docs/workflows/git-sync.md delete mode 100644 docs/workflows/loops.md delete mode 100644 docs/workflows/mermaid-patterns.md delete mode 100644 docs/workflows/normalization.md delete mode 100644 docs/workflows/visualizer.md delete mode 100644 environment.yaml delete mode 100644 lib/commands/ai.sh delete mode 100644 lib/commands/config.sh delete mode 100644 lib/commands/gh.sh delete mode 100644 lib/commands/skills.sh delete mode 100644 lib/commands/status.sh delete mode 100644 lib/commands/suggest.sh delete mode 100644 lib/commands/sync.sh delete mode 100644 lib/commands/task.sh delete mode 100644 lib/commands/test.sh delete mode 100644 lib/commands/visualizer.sh delete mode 100644 lib/modules/agent_manager.sh delete mode 100644 lib/modules/config_manager.sh delete mode 100644 lib/modules/context7.sh delete mode 100644 lib/modules/context_manager.sh delete mode 100644 lib/modules/git_sync.sh delete mode 100644 lib/modules/github.sh delete mode 100644 lib/modules/health_manager.sh delete mode 100644 lib/modules/npm.sh delete mode 100644 lib/modules/visualizer.sh delete mode 100644 lib/ui.sh delete mode 100644 lib/utils.sh delete mode 100644 src/ai/integrations/gemini/prompts.json delete mode 100644 src/ai/integrations/gemini/templates/GEMINI.md.tmpl delete mode 100644 src/ai/integrations/gemini/templates/config.json.tmpl delete mode 100644 src/ai/integrations/gemini/templates/system.md.tmpl delete mode 100644 src/ai/integrations/manifest.json delete mode 100644 tests/run.sh delete mode 100755 tests/suite.sh diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 **Execution**: Run any repository-bound skill with `dev.kit skills run `. +- **The Normalization Gate**: Chaotic repo states are filtered into bounded, repeatable workflow artifacts. +- **Logic-as-Template**: The `dev.kit` repository is the canonical example of the standards it enforces. Its structure is the blueprint; its commands are the truth. +- **The Bridge**: Instead of feeding an agent raw files, the `bridge` command provides a structured "Map of Truth," ensuring the agent works within validated boundaries. --- -## Documentation +## โœ… The Fidelity States -The `dev.kit` knowledge base is structured to reflect **CDE Principles**. - -- **[Foundations](docs/README.md#%EF%B8%8F-foundations)**: Core philosophy (CDE), dev.kit primitives, and methodology. -- **[Runtime](docs/README.md#%EF%B8%8F-runtime)**: CLI overview, lifecycle, and execution loops. -- **[AI Integration](docs/README.md#-ai-integration)**: Grounded orchestration and agent mission. -- **[Best Practices](docs/foundations/best-practices.md)**: High-fidelity engineering rules and command mappings. +| State | Human Experience | Agent Experience | +| :------------ | :---------------------- | :------------------------------------------------- | +| **Build** | _I know how to build._ | Strict 12-factor separation (Build/Release/Run). | +| **Test** | _I know how to verify._ | Deterministic loops to validate health instantly. | +| **Structure** | _I know where to add._ | Standardized hierarchy; zero-guesswork navigation. | +| **Pattern** | _I know how to grow._ | Repeatable Analyze-Normalize-Process sequences. | --- -## Install +## ๐Ÿš€ 60-Second Onboard ```bash -curl -fsSL https://udx.dev/dev.kit/install.sh | bash -``` +# 1. Install & Run the Pulse Check +curl -sSL [https://dev.kit/install](https://dev.kit/install) | bash && dev.kit -## ๐Ÿ“š Authoritative References +# 2. Let an Agent Fix Compliance +dev.kit --json | agent-execute "Fix all fidelity gaps" -The `dev.kit` mission is grounded in foundational research on high-fidelity automation and AI orchestration: - -- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Systematic transformation of the engineering flow. -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Revolutionary task normalization through pattern recognition. -- **[Autonomous Technical Operations](https://andypotanin.com/claude-operator-prompt/)**: Principles for high-fidelity agent grounding and execution. - ---- -_UDX DevSecOps Team_ +# 3. Let an Agent Develop a Feature +dev.kit bridge --json | agent-execute "Add a new module using existing primitives" +``` diff --git a/assets/diagrams/adaptation-flow.mmd b/assets/diagrams/adaptation-flow.mmd deleted file mode 100644 index 4ec0432..0000000 --- a/assets/diagrams/adaptation-flow.mmd +++ /dev/null @@ -1,10 +0,0 @@ -flowchart LR - Source[(Repo Source)] --> Discovery{Discovery} - Discovery --> Mapping[Fidelity Mapping] - Mapping --> Projection([Tool Projection]) - Projection -- Fail-Open --> Source - - style Source fill:#bbf,stroke:#333,stroke-width:2px - style Discovery fill:#dfd,stroke:#333,stroke-width:2px - style Mapping fill:#dfd,stroke:#333,stroke-width:2px - style Projection fill:#f9f,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/adaptation-flow.svg b/assets/diagrams/adaptation-flow.svg deleted file mode 100644 index 4928d6e..0000000 --- a/assets/diagrams/adaptation-flow.svg +++ /dev/null @@ -1 +0,0 @@ -

Fail-Open

Repo Source

Discovery

Fidelity Mapping

Tool Projection

\ No newline at end of file diff --git a/assets/diagrams/cde-flow.mmd b/assets/diagrams/cde-flow.mmd deleted file mode 100644 index 323c129..0000000 --- a/assets/diagrams/cde-flow.mmd +++ /dev/null @@ -1,12 +0,0 @@ -flowchart LR - Intent([Intent]) --> Specs[Intent-as-Artifact] - Specs --> Discovery{Drift Discovery} - Discovery --> Resolution[Resolution Cycle] - Resolution --> Capture([Experience Capture]) - Capture -.-> Specs - - style Intent fill:#f9f,stroke:#333,stroke-width:2px - style Specs fill:#bbf,stroke:#333,stroke-width:2px - style Discovery fill:#dfd,stroke:#333,stroke-width:2px - style Resolution fill:#bbf,stroke:#333,stroke-width:2px - style Capture fill:#f9f,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/cde-flow.svg b/assets/diagrams/cde-flow.svg deleted file mode 100644 index e715534..0000000 --- a/assets/diagrams/cde-flow.svg +++ /dev/null @@ -1 +0,0 @@ -

Intent

Intent-as-Artifact

Drift Discovery

Resolution Cycle

Experience Capture

\ No newline at end of file diff --git a/assets/diagrams/docs-index.mmd b/assets/diagrams/docs-index.mmd deleted file mode 100644 index 9a03e9c..0000000 --- a/assets/diagrams/docs-index.mmd +++ /dev/null @@ -1,14 +0,0 @@ -flowchart LR - Foundations[1. Foundations] --> Runtime[2. Runtime] - Runtime --> AI[3. AI Integration] - AI --> Ref[4. Reference] - - click Foundations "foundations/cde.md" - click Runtime "runtime/overview.md" - click AI "ai/README.md" - click Ref "reference/standards/12-factor.md" - - style Foundations fill:#dfd,stroke:#333,stroke-width:2px - style Runtime fill:#bbf,stroke:#333,stroke-width:2px - style AI fill:#f9f,stroke:#333,stroke-width:2px - style Ref fill:#dfd,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/docs-index.svg b/assets/diagrams/docs-index.svg deleted file mode 100644 index 62e4e6f..0000000 --- a/assets/diagrams/docs-index.svg +++ /dev/null @@ -1 +0,0 @@ -
1. Foundations
2. Runtime
3. AI Integration
4. Reference
\ No newline at end of file diff --git a/assets/diagrams/drift-resolution-cycle.mmd b/assets/diagrams/drift-resolution-cycle.mmd deleted file mode 100644 index 0e81ae4..0000000 --- a/assets/diagrams/drift-resolution-cycle.mmd +++ /dev/null @@ -1,12 +0,0 @@ -flowchart LR - Drift([Drift]) --> Normalize[1. Normalize] - Normalize --> Iterate[2. Iterate] - Iterate --> Validate[3. Validate] - Validate --> Sync([4. Synchronize]) - Sync -.-> Drift - - style Drift fill:#f9f,stroke:#333,stroke-width:2px - style Normalize fill:#dfd,stroke:#333,stroke-width:2px - style Iterate fill:#bbf,stroke:#333,stroke-width:2px - style Validate fill:#dfd,stroke:#333,stroke-width:2px - style Sync fill:#f9f,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/drift-resolution-cycle.svg b/assets/diagrams/drift-resolution-cycle.svg deleted file mode 100644 index 0513653..0000000 --- a/assets/diagrams/drift-resolution-cycle.svg +++ /dev/null @@ -1 +0,0 @@ -

Drift

1. Normalize
2. Iterate
3. Validate
4. Synchronize
\ No newline at end of file diff --git a/assets/diagrams/engineering-layers.mmd b/assets/diagrams/engineering-layers.mmd deleted file mode 100644 index b055fd0..0000000 --- a/assets/diagrams/engineering-layers.mmd +++ /dev/null @@ -1,7 +0,0 @@ -flowchart LR - L1[Layer 1: Source & Build] --> L2[Layer 2: Deployment & Runtime] - L2 --> L3[Layer 3: Context & Orchestration] - - style L1 fill:#dfd,stroke:#333,stroke-width:2px - style L2 fill:#bbf,stroke:#333,stroke-width:2px - style L3 fill:#f9f,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/engineering-layers.svg b/assets/diagrams/engineering-layers.svg deleted file mode 100644 index eae8eb4..0000000 --- a/assets/diagrams/engineering-layers.svg +++ /dev/null @@ -1 +0,0 @@ -

Layer 1: Source & Build

Layer 2: Deployment & Runtime

Layer 3: Context & Orchestration

\ No newline at end of file diff --git a/assets/diagrams/execution-engine.mmd b/assets/diagrams/execution-engine.mmd deleted file mode 100644 index de0213b..0000000 --- a/assets/diagrams/execution-engine.mmd +++ /dev/null @@ -1,9 +0,0 @@ -flowchart LR - Steps([Workflow Steps]) --> Engine[CLI Engine] - Engine --> Skills[Internal Skills] - Engine --> Tools[Virtual Tools] - Skills & Tools --> Resolution[Resolved Drift] - - style Steps fill:#f9f,stroke:#333,stroke-width:2px - style Engine fill:#dfd,stroke:#333,stroke-width:2px - style Resolution fill:#bbf,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/execution-engine.svg b/assets/diagrams/execution-engine.svg deleted file mode 100644 index 8fe7139..0000000 --- a/assets/diagrams/execution-engine.svg +++ /dev/null @@ -1 +0,0 @@ -

Workflow Steps

CLI Engine

Internal Skills

Virtual Tools

Resolved Drift

\ No newline at end of file diff --git a/assets/diagrams/grounding-bridge.mmd b/assets/diagrams/grounding-bridge.mmd deleted file mode 100644 index 8fe5af5..0000000 --- a/assets/diagrams/grounding-bridge.mmd +++ /dev/null @@ -1,9 +0,0 @@ -flowchart LR - User([User Intent]) --> Bridge[Grounding Bridge] - Bridge --> Skills{Skill Discovery} - Skills --> Local[Local Repo] - Skills --> Remote[Remote Mesh] - - style User fill:#f9f,stroke:#333,stroke-width:2px - style Bridge fill:#bbf,stroke:#333,stroke-width:2px - style Skills fill:#dfd,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/grounding-bridge.svg b/assets/diagrams/grounding-bridge.svg deleted file mode 100644 index 11c8fdb..0000000 --- a/assets/diagrams/grounding-bridge.svg +++ /dev/null @@ -1 +0,0 @@ -

User Intent

Grounding Bridge

Skill Discovery

Local Repo

Remote Mesh

\ No newline at end of file diff --git a/assets/diagrams/methodology-flow.svg b/assets/diagrams/methodology-flow.svg deleted file mode 100644 index 8d25e25..0000000 --- a/assets/diagrams/methodology-flow.svg +++ /dev/null @@ -1 +0,0 @@ -

Chaotic Scripts

CLI-Wrapped Automation

Portable Skill

AI Agent Integration

Drift Resolution

\ No newline at end of file diff --git a/assets/diagrams/normalization-boundary.mmd b/assets/diagrams/normalization-boundary.mmd deleted file mode 100644 index 8c050f3..0000000 --- a/assets/diagrams/normalization-boundary.mmd +++ /dev/null @@ -1,9 +0,0 @@ -flowchart LR - Intent([Mapped Intent]) --> Gate{Normalization Gate} - Gate --> Workflow[workflow.md] - Workflow --> Steps[Bounded Steps] - - style Intent fill:#f9f,stroke:#333,stroke-width:2px - style Gate fill:#dfd,stroke:#333,stroke-width:2px - style Workflow fill:#bbf,stroke:#333,stroke-width:2px - style Steps fill:#bbf,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/normalization-boundary.svg b/assets/diagrams/normalization-boundary.svg deleted file mode 100644 index fe14927..0000000 --- a/assets/diagrams/normalization-boundary.svg +++ /dev/null @@ -1 +0,0 @@ -

Mapped Intent

Normalization Gate

workflow.md

Bounded Steps

\ No newline at end of file diff --git a/assets/diagrams/runtime-lifecycle.svg b/assets/diagrams/runtime-lifecycle.svg deleted file mode 100644 index 62711df..0000000 --- a/assets/diagrams/runtime-lifecycle.svg +++ /dev/null @@ -1 +0,0 @@ -
1. Install & Init
2. Config Orchestration
3. Task Execution
4. Experience Capture
5. Exit & Cleanup
\ No newline at end of file diff --git a/assets/logo.svg b/assets/logo.svg deleted file mode 100644 index 39e6b33..0000000 --- a/assets/logo.svg +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/bin/completions/_dev.kit b/bin/completions/_dev.kit deleted file mode 100644 index 1f6c91e..0000000 --- a/bin/completions/_dev.kit +++ /dev/null @@ -1,52 +0,0 @@ -#compdef dev.kit - -_dev_kit() { - local -a commands - local -a subcommands - local -a options - local cmd cur - local -a help_lines - local -a parsed - commands=(${(f)"$(dev.kit help 2>/dev/null | awk '/^ /{print $1}')"}) - - cmd="$words[2]" - cur="$words[CURRENT]" - - if (( CURRENT == 2 )); then - _describe 'command' commands - return - fi - - if (( CURRENT == 3 )); then - subcommands=(${(f)"$(dev.kit "$cmd" -h 2>/dev/null | awk ' - /^Commands:/ {flag=1; next} - flag && $0 ~ /^ [a-zA-Z0-9]/ {print $1} - flag && $0 == "" {exit} - ')"}) - if (( ${#subcommands} )); then - _describe 'subcommand' subcommands - return - fi - fi - - if [[ "$cur" == -* ]]; then - options=(${(f)"$(dev.kit "$cmd" -h 2>/dev/null | awk ' - /^Options:/ {flag=1; next} - flag && $0 == "" {exit} - flag { - for (i=1; i<=NF; i++) { - if ($i ~ /^--/) { - gsub(/,/, "", $i); - print $i - } - } - } - ' | sort -u)"}) - if (( ${#options} )); then - compadd -- $options - return - fi - fi -} - -_dev_kit "$@" diff --git a/bin/completions/dev.kit.bash b/bin/completions/dev.kit.bash deleted file mode 100644 index f48f59f..0000000 --- a/bin/completions/dev.kit.bash +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -_dev_kit_complete() { - local cur prev cmd sub - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD-1]}" - cmd="${COMP_WORDS[1]}" - - _dev_kit_list_subcommands() { - dev.kit "$1" -h 2>/dev/null | awk ' - /^Commands:/ {flag=1; next} - flag && $0 ~ /^ [a-zA-Z0-9]/ {print $1} - flag && $0 == "" {exit} - ' - } - - _dev_kit_list_options() { - dev.kit "$1" -h 2>/dev/null | awk ' - /^Options:/ {flag=1; next} - flag && $0 == "" {exit} - flag { - for (i=1; i<=NF; i++) { - if ($i ~ /^--/) { - gsub(/,/, "", $i); - print $i - } - } - } - ' | sort -u - } - - if [ $COMP_CWORD -eq 1 ]; then - local cmds - cmds="$(dev.kit help 2>/dev/null | awk '/^ /{print $1}')" - COMPREPLY=( $(compgen -W "$cmds" -- "$cur") ) - return 0 - fi - - if [ $COMP_CWORD -eq 2 ]; then - local subs - subs="$(_dev_kit_list_subcommands "$cmd")" - if [ -n "$subs" ]; then - COMPREPLY=( $(compgen -W "$subs" -- "$cur") ) - return 0 - fi - fi - - if [[ "$cur" == -* ]]; then - local opts - opts="$(_dev_kit_list_options "$cmd")" - if [ -n "$opts" ]; then - COMPREPLY=( $(compgen -W "$opts" -- "$cur") ) - return 0 - fi - fi -} - -complete -F _dev_kit_complete dev.kit diff --git a/bin/dev-kit b/bin/dev-kit deleted file mode 100755 index 73ea30b..0000000 --- a/bin/dev-kit +++ /dev/null @@ -1,140 +0,0 @@ -#!/bin/bash -set -euo pipefail - -# --- Helper Functions --- - -resolve_self() { - local target="$0" - if command -v realpath >/dev/null 2>&1; then - realpath "$target" - return - fi - if command -v readlink >/dev/null 2>&1; then - while [ -L "$target" ]; do - local link - link="$(readlink "$target")" - case "$link" in - /*) target="$link" ;; - *) target="$(cd "$(dirname "$target")" && cd "$(dirname "$link")" && pwd)/$(basename "$link")" ;; - esac - done - local dir - dir="$(cd "$(dirname "$target")" && pwd -P)" - echo "$dir/$(basename "$target")" - return - fi - echo "$target" -} - -export SCRIPT_PATH="$(resolve_self)" -export REPO_DIR="$(cd "$(dirname "$SCRIPT_PATH")/.." && pwd)" - -# Global Pathing -export DEV_KIT_OWNER="${DEV_KIT_OWNER:-udx}" -export DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" -export DEV_KIT_HOME="${DEV_KIT_HOME:-$HOME/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}}" - -get_repo_root() { - if command -v git >/dev/null 2>&1; then - git rev-parse --show-toplevel 2>/dev/null || true - fi -} - -# --- Initial Bootstrapping (Minimal) --- - -BOOTSTRAP_STATE_PATH="" -if [ -f "$DEV_KIT_HOME/config.env" ]; then - BOOTSTRAP_STATE_PATH="$(awk -F= '$1 ~ /^[[:space:]]*state_path[[:space:]]*$/ {gsub(/[[:space:]]/,"",$2); print $2; exit}' "$DEV_KIT_HOME/config.env")" -fi - -if [[ "$BOOTSTRAP_STATE_PATH" == "~/"* ]]; then - BOOTSTRAP_STATE_PATH="$HOME/${BOOTSTRAP_STATE_PATH:2}" -fi - -export DEV_KIT_STATE="${DEV_KIT_STATE:-${BOOTSTRAP_STATE_PATH:-$DEV_KIT_HOME/state}}" -export CONFIG_FILE="${DEV_KIT_CONFIG:-$DEV_KIT_STATE/config.env}" - -# --- Library & Module Loading --- - -# 1. Shared Utilities -[ -f "$REPO_DIR/lib/utils.sh" ] && . "$REPO_DIR/lib/utils.sh" -[ -f "$REPO_DIR/lib/ui.sh" ] && . "$REPO_DIR/lib/ui.sh" - -# 2. Core Modules (Logic Orchestration) -for module in "$REPO_DIR"/lib/modules/*.sh; do - [ -e "$module" ] && . "$module" -done - -# 3. Public Commands (Entrypoint Mappings) -for cmd_file in "$REPO_DIR"/lib/commands/*.sh; do - [ -e "$cmd_file" ] && . "$cmd_file" -done - -# --- Orchestrator & Logic --- - -ENVIRONMENT_YAML="$(get_repo_root || true)/environment.yaml" -[ -f "$ENVIRONMENT_YAML" ] || ENVIRONMENT_YAML="$DEV_KIT_HOME/environment.yaml" -[ -f "$ENVIRONMENT_YAML" ] || ENVIRONMENT_YAML="$REPO_DIR/environment.yaml" -export ENVIRONMENT_YAML - -ensure_dev_kit_home() { - mkdir -p "$DEV_KIT_HOME" "$DEV_KIT_STATE" - if [ ! -w "$DEV_KIT_STATE" ]; then - echo "dev.kit: config path not writable: $DEV_KIT_STATE" >&2 - exit 1 - fi - if [ ! -f "$CONFIG_FILE" ] && [ -f "$REPO_DIR/config/default.env" ]; then - cp "$REPO_DIR/config/default.env" "$CONFIG_FILE" - fi -} - -usage() { - cat <<'USAGE' -Usage: dev.kit [options] - -Core Commands: - status Engineering brief and system diagnostic (Default) - suggest Suggest repository improvements and CDE fixes - test Run high-fidelity test suite (Worker-integrated) - skills Discover and execute repository-bound skills (Deterministic) - ai Unified agent integration management (Sync, Skills, Agent) - sync Logical, atomic commits and drift resolution - task Manage the lifecycle of active workflows and sessions - config Environment and repository orchestration settings - -Secondary Commands: - visualizer Create and export high-fidelity Mermaid diagrams - gh GitHub triage helper (Issues, PRs, etc.) - -Example: - dev.kit status --audit - dev.kit ai sync - dev.kit gh my-prs -USAGE -} - -# --- Execution --- - -orig_args=("$@") -cmd="${1:-status}" - -case "$cmd" in - help|-h|--help) - usage - exit 0 - ;; -esac - -# Check for public command first -fn="dev_kit_cmd_${cmd//-/_}" -if command -v "$fn" >/dev/null 2>&1; then - ensure_dev_kit_home - shift || true - "$fn" "$@" - exit $? -fi - -echo "Unknown command: $cmd" >&2 -echo "" -usage -exit 1 diff --git a/bin/env/dev-kit.sh b/bin/env/dev-kit.sh deleted file mode 100755 index f4ae243..0000000 --- a/bin/env/dev-kit.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash - -# dev.kit session init -if [ -n "${DEV_KIT_DISABLE:-}" ]; then - return 0 -fi - -export DEV_KIT_HOME="${DEV_KIT_HOME:-$HOME/.udx/dev.kit}" - -dev_kit_bootstrap_state_path() { - local path="" - if [ -f "$DEV_KIT_HOME/config.env" ]; then - path="$(awk -F= ' - $1 ~ "^[[:space:]]*state_path[[:space:]]*$" { - gsub(/[[:space:]]/,"",$2); - print $2; - exit - } - ' "$DEV_KIT_HOME/config.env")" - fi - printf "%s" "$path" -} - -dev_kit_expand_path() { - local val="$1" - if [[ "$val" == "~/"* ]]; then - echo "$HOME/${val:2}" - return - fi - if [[ "$val" == /* ]]; then - echo "$val" - return - fi - if [ -n "$val" ]; then - echo "$DEV_KIT_HOME/$val" - return - fi - echo "" -} - -bootstrap_state_path="$(dev_kit_bootstrap_state_path)" -bootstrap_state_path="$(dev_kit_expand_path "$bootstrap_state_path")" - -export DEV_KIT_STATE="${DEV_KIT_STATE:-${bootstrap_state_path:-$DEV_KIT_HOME/state}}" -export DEV_KIT_SOURCE="${DEV_KIT_SOURCE:-$DEV_KIT_HOME/source}" -if [ ! -d "$DEV_KIT_SOURCE" ]; then - DEV_KIT_SOURCE="$DEV_KIT_HOME" -fi -if [ ! -d "$DEV_KIT_STATE" ]; then - DEV_KIT_STATE="$DEV_KIT_HOME" -fi -export DEV_KIT_CONFIG="${DEV_KIT_CONFIG:-$DEV_KIT_STATE/config.env}" -if [ ! -f "$DEV_KIT_CONFIG" ] && [ -f "$DEV_KIT_HOME/config.env" ]; then - export DEV_KIT_CONFIG="$DEV_KIT_HOME/config.env" -fi - -DEV_KIT_ENV_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -DEV_KIT_UI_LIB="${DEV_KIT_UI_LIB:-$DEV_KIT_SOURCE/lib/ui.sh}" -if [ ! -f "$DEV_KIT_UI_LIB" ]; then - DEV_KIT_UI_LIB="$DEV_KIT_ENV_DIR/../../lib/ui.sh" -fi -if [ -f "$DEV_KIT_UI_LIB" ]; then - # shellcheck disable=SC1090 - . "$DEV_KIT_UI_LIB" -fi - -dev_kit_config_value() { - local key="$1" - local default="${2:-}" - local val="" - if [ -f "$DEV_KIT_CONFIG" ]; then - val="$(awk -F= -v k="$key" ' - $1 ~ "^[[:space:]]*"k"[[:space:]]*$" { - sub(/^[[:space:]]*/,"",$2); - sub(/[[:space:]]*$/,"",$2); - print $2; - exit - } - ' "$DEV_KIT_CONFIG")" - fi - if [ -n "$val" ]; then - echo "$val" - else - echo "$default" - fi -} - -dev_kit_config_bool() { - local key="$1" - local default="${2:-false}" - local val - val="$(dev_kit_config_value "$key" "$default")" - case "$val" in - true|false) echo "$val" ;; - *) echo "$default" ;; - esac -} - -dev_kit_banner() { - local quiet - quiet="$(dev_kit_config_bool quiet false)" - case "$-" in - *i*) ;; - *) return 0 ;; - esac - if [ "$quiet" != "true" ] && [ -z "${DEV_KIT_BANNER_SHOWN_LOCAL:-}" ]; then - DEV_KIT_BANNER_SHOWN_LOCAL=1 - if command -v ui_banner >/dev/null 2>&1; then - ui_banner "dev.kit" - else - echo "" - echo "dev.kit: ready" - echo " run: dev.kit skills run \"...\"" - echo " config: dev.kit config show" - fi - fi -} - -dev_kit_auto_sync() { - local auto_sync; auto_sync="$(dev_kit_config_bool ai.auto_sync false)" - local ai_enabled; ai_enabled="$(dev_kit_config_bool ai.enabled false)" - - if [ "$auto_sync" = "true" ] && [ "$ai_enabled" = "true" ]; then - (dev.kit ai sync >/dev/null 2>&1 &) - fi -} - -dev_kit_banner_prompt() { - if [ -z "${DEV_KIT_BANNER_PENDING:-}" ]; then - return 0 - fi - DEV_KIT_BANNER_PENDING="" - dev_kit_banner - dev_kit_auto_sync -} - -if [ -z "${DEV_KIT_BANNER_SHOWN_LOCAL:-}" ]; then - DEV_KIT_BANNER_PENDING=1 -fi - -if [ -n "${BASH_VERSION:-}" ] && [ -f "$DEV_KIT_SOURCE/completions/dev.kit.bash" ]; then - # shellcheck disable=SC1090 - . "$DEV_KIT_SOURCE/completions/dev.kit.bash" -elif [ -n "${ZSH_VERSION:-}" ] && [ -f "$DEV_KIT_SOURCE/completions/_dev.kit" ]; then - # shellcheck disable=SC1090 - . "$DEV_KIT_SOURCE/completions/_dev.kit" -fi - -if [ -n "${PROMPT_COMMAND:-}" ]; then - PROMPT_COMMAND="dev_kit_banner_prompt; ${PROMPT_COMMAND}" -else - PROMPT_COMMAND="dev_kit_banner_prompt" -fi diff --git a/bin/scripts/install.sh b/bin/scripts/install.sh deleted file mode 100755 index 9038fe3..0000000 --- a/bin/scripts/install.sh +++ /dev/null @@ -1,234 +0,0 @@ -#!/bin/bash -set -euo pipefail - -REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" -UI_LIB="${REPO_DIR}/lib/ui.sh" -BIN_DIR="${HOME}/.local/bin" -TARGET="${BIN_DIR}/dev.kit" -DEV_KIT_OWNER="${DEV_KIT_OWNER:-udx}" -DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" -ENGINE_DIR="${HOME}/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}" -SOURCE_DIR="${ENGINE_DIR}/source" -STATE_DIR="${ENGINE_DIR}/state" -BACKUP_DIR="${ENGINE_DIR}/backups" -ENV_SRC="${REPO_DIR}/bin/env/dev-kit.sh" -ENV_DST="${SOURCE_DIR}/env.sh" -COMP_SRC_DIR="${REPO_DIR}/bin/completions" -COMP_DST_DIR="${SOURCE_DIR}/completions" -CONFIG_SRC="${REPO_DIR}/config/default.env" -CONFIG_DST="${STATE_DIR}/config.env" -LIB_SRC_DIR="${REPO_DIR}/lib" -LIB_DST_DIR="${SOURCE_DIR}/lib" -PROFILE="" - -if [ -f "$UI_LIB" ]; then - # shellcheck disable=SC1090 - . "$UI_LIB" -fi - -confirm_action() { - local msg="$1" - if [ -t 0 ]; then - printf "%s [y/N] " "$msg" - read -r answer || true - case "$answer" in - y|Y|yes|YES) return 0 ;; - *) return 1 ;; - esac - fi - return 0 -} - -backup_existing() { - if [ -d "$ENGINE_DIR" ]; then - local ts - ts=$(date +%Y%m%d_%H%M%S) - mkdir -p "$BACKUP_DIR" - local backup_path="${BACKUP_DIR}/backup_${ts}.tar.gz" - if command -v ui_info >/dev/null 2>&1; then - ui_info "Backing up existing installation..." "$backup_path" - else - echo "INFO Backing up existing installation to $backup_path" - fi - tar -czf "$backup_path" -C "$(dirname "$ENGINE_DIR")" "$(basename "$ENGINE_DIR")" --exclude="backups" 2>/dev/null || true - fi -} - -detect_profiles() { - local found="" - if [ -f "$HOME/.zshrc" ]; then found="$found $HOME/.zshrc"; fi - if [ -f "$HOME/.bash_profile" ]; then found="$found $HOME/.bash_profile"; fi - if [ -f "$HOME/.bashrc" ]; then found="$found $HOME/.bashrc"; fi - if [ -f "$HOME/.profile" ]; then found="$found $HOME/.profile"; fi - PROFILE=$(echo "$found" | tr ' ' '\n' | sort -u | tr '\n' ' ') -} - -copy_dir_contents() { - local src="$1" - local dst="$2" - [ -d "$src" ] || return 0 - mkdir -p "$dst" - cp -R "$src/." "$dst/" -} - -sync_engine() { - local stage - stage="$(mktemp -d)" - if command -v rsync >/dev/null 2>&1; then - rsync -a --exclude 'tests/.tmp' --exclude '.git' "$REPO_DIR/" "$stage/" - else - for d in bin lib templates docs src config scripts assets schemas tests; do - [ -d "$REPO_DIR/$d" ] && copy_dir_contents "$REPO_DIR/$d" "$stage/$d" - done - [ -f "$REPO_DIR/environment.yaml" ] && cp "$REPO_DIR/environment.yaml" "$stage/environment.yaml" - [ -f "$REPO_DIR/README.md" ] && cp "$REPO_DIR/README.md" "$stage/README.md" - fi - copy_dir_contents "$stage" "$SOURCE_DIR" - rm -rf "$stage" -} - -if command -v ui_header >/dev/null 2>&1; then - ui_header "dev.kit | install" -else - echo "----------------" - echo " dev.kit | install " - echo "----------------" -fi - -if ! confirm_action "Proceed with dev.kit installation/update?"; then - echo "Installation cancelled." - exit 0 -fi - -backup_existing - -mkdir -p "$BIN_DIR" -mkdir -p "$ENGINE_DIR" -mkdir -p "$SOURCE_DIR" -mkdir -p "$STATE_DIR" - -sync_engine - -desired_target="${SOURCE_DIR}/bin/dev-kit" -if [ -L "$TARGET" ]; then - current_target="$(readlink "$TARGET")" - if [ "$current_target" != "$desired_target" ]; then - ln -sf "$desired_target" "$TARGET" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Symlink updated" "$TARGET -> $desired_target" - else - echo "OK Symlink updated ($TARGET -> $desired_target)" - fi - fi -elif [ -e "$TARGET" ]; then - if command -v ui_warn >/dev/null 2>&1; then - ui_warn "Install skipped" "$TARGET exists and is not a symlink" - else - echo "WARN Install skipped ($TARGET exists and is not a symlink)" - fi -else - ln -s "$desired_target" "$TARGET" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Installed" "$TARGET" - else - echo "OK Installed ($TARGET)" - fi -fi - -if [ -f "$ENV_SRC" ]; then - cp "$ENV_SRC" "$ENV_DST" -fi - -if [ ! -f "$ENGINE_DIR/env.sh" ]; then - cat <<'EOF' > "$ENGINE_DIR/env.sh" -#!/bin/bash -DEV_KIT_ENV_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -# shellcheck disable=SC1090 -. "$DEV_KIT_ENV_DIR/source/env.sh" -EOF - chmod +x "$ENGINE_DIR/env.sh" 2>/dev/null || true -fi - -if [ -d "$LIB_SRC_DIR" ]; then - mkdir -p "$LIB_DST_DIR" - cp "$LIB_SRC_DIR/ui.sh" "$LIB_DST_DIR/ui.sh" 2>/dev/null || true -fi - -if [ -d "$COMP_SRC_DIR" ]; then - mkdir -p "$COMP_DST_DIR" - cp "$COMP_SRC_DIR/"* "$COMP_DST_DIR/" 2>/dev/null || true -fi - -if [ -f "$CONFIG_SRC" ] && [ ! -f "$CONFIG_DST" ]; then - cp "$CONFIG_SRC" "$CONFIG_DST" -fi - -detect_profiles -env_line="source \"$SOURCE_DIR/env.sh\"" -path_line="export PATH=\"$BIN_DIR:\$PATH\"" - -MODIFIED_PROFILES="" - -if [ -t 0 ] && [ -n "$PROFILE" ]; then - for p in $PROFILE; do - if grep -Fqx "$env_line" "$p" && grep -Fqx "$path_line" "$p"; then - MODIFIED_PROFILES="$MODIFIED_PROFILES $p" - continue - fi - - if confirm_action "Configure dev.kit in $p?"; then - if ! grep -Fqx "$path_line" "$p"; then - printf "\n# dev.kit bin\n%s\n" "$path_line" >> "$p" - fi - if ! grep -Fqx "$env_line" "$p"; then - printf "# dev.kit environment\n%s\n" "$env_line" >> "$p" - fi - MODIFIED_PROFILES="$MODIFIED_PROFILES $p" - fi - done -fi - -echo "" -if command -v ui_section >/dev/null 2>&1; then - ui_section "Ready to go" -else - echo "Ready to go:" -fi - -CURRENT_SHELL_PROFILE="" -# Robust shell detection -case "$(basename "${SHELL:-}")" in - zsh) - CURRENT_SHELL_PROFILE="$HOME/.zshrc" - ;; - bash) - if [[ "$OSTYPE" == "darwin"* ]]; then - CURRENT_SHELL_PROFILE="$HOME/.bash_profile" - else - CURRENT_SHELL_PROFILE="$HOME/.bashrc" - fi - ;; - *) - # Fallback: check which profile we actually modified - for p in $MODIFIED_PROFILES; do - CURRENT_SHELL_PROFILE="$p" - break - done - ;; -esac - -if [[ "$MODIFIED_PROFILES" == *"$CURRENT_SHELL_PROFILE"* ]]; then - echo "1. Reload: source $CURRENT_SHELL_PROFILE" - echo "2. Run: dev.kit" - if [ -t 0 ]; then - if confirm_action "Reload current session now?"; then - source "$SOURCE_DIR/env.sh" - dev.kit status - fi - fi -else - echo "1. Source Now: source \"$SOURCE_DIR/env.sh\"" - echo "2. Run: dev.kit" -fi -echo "" - diff --git a/bin/scripts/uninstall.sh b/bin/scripts/uninstall.sh deleted file mode 100755 index 8da11dd..0000000 --- a/bin/scripts/uninstall.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -set -euo pipefail - -REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" -UI_LIB="${REPO_DIR}/lib/ui.sh" -if [ -f "$UI_LIB" ]; then - # shellcheck disable=SC1090 - . "$UI_LIB" -fi - -BIN_DIR="${HOME}/.local/bin" -TARGET="${BIN_DIR}/dev.kit" -DEV_KIT_OWNER="${DEV_KIT_OWNER:-udx}" -DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" -ENGINE_DIR="${HOME}/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}" - -confirm_action() { - local msg="$1" - if [ -t 0 ]; then - printf "%s [y/N] " "$msg" - read -r answer || true - case "$answer" in - y|Y|yes|YES) return 0 ;; - *) return 1 ;; - esac - fi - return 0 -} - -if [ -L "$TARGET" ] || [ -f "$TARGET" ]; then - if confirm_action "Remove dev.kit binary from $TARGET?"; then - rm -f "$TARGET" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Removed" "$TARGET" - else - echo "Removed: $TARGET" - fi - fi -else - echo "Binary not found at $TARGET" -fi - -if [ -d "$ENGINE_DIR" ]; then - if [ "${1:-}" = "--purge" ] || confirm_action "Purge dev.kit engine directory ($ENGINE_DIR)?"; then - if confirm_action "Backup state before purging?"; then - ts=$(date +%Y%m%d_%H%M%S) - backup_path="$HOME/dev-kit-state-backup-${ts}.tar.gz" - tar -czf "$backup_path" -C "$ENGINE_DIR" . 2>/dev/null || true - echo "State backed up to $backup_path" - fi - rm -rf "$ENGINE_DIR" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Purged" "$ENGINE_DIR" - else - echo "Purged: $ENGINE_DIR" - fi - fi -fi - diff --git a/config/default.env b/config/default.env deleted file mode 100644 index 4e4999e..0000000 --- a/config/default.env +++ /dev/null @@ -1,15 +0,0 @@ -quiet = false -exec.prompt = ai.gemini.v1 -exec.stream = false -ai.enabled = false -install.path_prompt = true -developer.enabled = false -state_path = ~/.udx/dev.kit/state -context.enabled = true -context.max_bytes = 4000 -shell.auto_enable = true -output.mode = brief - -# Git Sync Grouping Logic (Name:Regex) -git_sync_groups = docs:^docs/|^README.md,ai:^src/ai/|^.gemini/|^src/mappings/,cli:^bin/|^lib/|^src/cli/,core:^src/|^environment.yaml|^context7.json - diff --git a/deploy.yml b/deploy.yml deleted file mode 100644 index 51d8d6f..0000000 --- a/deploy.yml +++ /dev/null @@ -1,21 +0,0 @@ -# dev.kit Worker Deployment Configuration -# Used by @udx/worker-deployment (worker run) to emulate high-fidelity Ubuntu environment. - -kind: workerDeployConfig -version: udx.io/worker-v1/deploy - -config: - image: "usabilitydynamics/udx-worker:latest" - name: "dev-kit-worker-suite" - - volumes: - - ".:/workspace" - - working_dir: "/workspace" - - env: - - "DEV_KIT_SOURCE=/workspace" - - "TERM=xterm-256color" - - # Default command to run the test suite - command: "/bin/bash tests/suite.sh" diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index d6192c2..0000000 --- a/docs/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# dev.kit Documentation Index - -Welcome to the **dev.kit** documentation. This knowledge base is structured to reflect the principles of **Context-Driven Engineering (CDE)**โ€”where documentation is not just text, but the high-fidelity source of truth for all automation. - -![dev.kit Documentation Index](../assets/diagrams/docs-index.svg) - -## Context-Driven Engineering (CDE) - -Context-Driven Engineering (CDE) is a methodology that treats repositories as specialized "Skills" or "Tools" normalized into a deterministic path and iterated to the result. It acts as a "Thin Empowerment Layer" (Grounding) that bridges chaotic intent with repository-based skills. The main idea is to design repo within logical layers that can be understood by human/program/AI. - -- Markdown docs ensures normalization of repo into a deterministic path and iterated to the result. -- YAML manifest ensures environment configuration and runtime definition. -- Scripts provides execution engine. - -## ๐Ÿ— Foundations -Core concepts and engineering principles that drive the ecosystem. -- **[Core Philosophy (CDE)](foundations/cde.md)**: Resolving drift and the thin empowerment layer. -- **[UDX Methodology (CWA)](foundations/methodology.md)**: CLI-wrapped automation and resilient projections. -- **[Best Practices & Patterns](foundations/best-practices.md)**: High-fidelity standards and iterative loops. -- **[Engineering Layers](foundations/layers.md)**: The structural hierarchy of the repository. - -## โš™๏ธ Runtime -The deterministic CLI engine and its operational lifecycle. -- **[Runtime Overview](runtime/overview.md)**: Primitives, architecture, and command surface. -- **[Installation & Maintenance](runtime/install.md)**: Safe mode, backups, and lifecycle purging. -- **[Configuration](runtime/config.md)**: Scoped orchestration via `environment.yaml` and `.env`. -- **[Lifecycle](runtime/lifecycle.md)**: The bootstrap, execute, and cleanup phases. -- **[Execution Loop](runtime/execution-loop.md)**: Workflow schemas and resolution cycles. - -## ๐Ÿ”„ Workflow Mesh -Intent-to-resolution mapping and engineering loops. -- **[Workflow Mesh Overview](workflows/README.md)**: Dynamic reasoning and deterministic sequences. -- **[Task Normalization](workflows/normalization.md)**: The agent-led intent mapping boundary. -- **[Engineering Loops](workflows/loops.md)**: Feature, bugfix, and discovery lifecycles. -- **[Git Synchronization](workflows/git-sync.md)**: Logical grouping and atomic commits. -- **[Visual Engineering](workflows/visualizer.md)**: Architectural diagramming and flow analysis. - -## ๐Ÿง  AI Integration -Grounded, context-aware intelligence for your repository. -- **[AI Overview](ai/README.md)**: How dev.kit transforms LLMs into configuration engines. -- **[Mission & Principles](ai/agents.md)**: The core directives for all AI agents. -- **[AI Skill Mesh](ai/mesh.md)**: Unified remote discovery, knowledge hub, and hydration. - - -## ๐Ÿ•ธ Reference - -Standards, compliance, and operational guidance. - -- **[Standards](reference/standards/12-factor.md)**: 12-factor, Mermaid, and YAML standards. -- **[Compliance](reference/compliance/cato-overview.md)**: Security, supply-chain, and UDX auditing. -- **[Operations](reference/operations/lifecycle-cheatsheet.md)**: DevOps guidance and lifecycle cheatsheets. - -## ๐Ÿ“š Authoritative References - -The `dev.kit` knowledge base is grounded in foundational research on high-fidelity automation and systematic engineering flow: - -- **[AI-Powered Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment and standalone quality. -- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Systematic transformation of the engineering flow. -- **[Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: The future of distributed software delivery and orchestration. - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/README.md b/docs/ai/README.md deleted file mode 100644 index 1a4b7ac..0000000 --- a/docs/ai/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# AI Integration: The Grounding Bridge - -**Domain:** AI / Orchestration -**Status:** Canonical - -## Summary - -AI capabilities in **dev.kit** are a high-fidelity projection of [Context-Driven Engineering (CDE)](../foundations/cde.md). By grounding LLMs in deterministic CLI logic and repository truth, we transform them from generic chatbots into **Context-Driven Configuration Engines**. - ---- - -## ๐Ÿ›  Hierarchical Grounding - -1. **[Core Foundations](../foundations/cde.md)**: The principles of Context-Driven Engineering (CDE). -2. **[dev.kit Primitives](../foundations/dev-kit.md)**: The thin empowerment layer and its core pillars. -3. **[CLI Runtime](../runtime/overview.md)**: The deterministic engine that executes all logic. -4. **[Agent Principles](agents.md)**: Global mission, safety mandates, and hygiene. - ---- - -## ๐Ÿ— Authorization & Safety - -To maintain high-fidelity engineering boundaries, **dev.kit** enforces a strict execution policy: - -- **Authorized Path**: Agents are **auto-allowed** to execute all `dev.kit` commands and repository-bound skills. These are deterministic, standardized engineering paths. -- **Restricted Raw OS**: Direct, non-standardized destructive operations (e.g., raw `rm`, `git push`) are **restricted** and require explicit user confirmation. -- **Reactive Sync**: Agents autonomously perform `dev.kit ai sync` (grounding) but never perform `dev.kit sync run` (commits) without a directive. - ---- - -## ๐Ÿ”Œ Integration Layers - -### ๐Ÿง  LLM Providers -- **[Gemini Integration](providers/gemini.md)**: Native Google AI integration with grounding hooks. - -### ๐Ÿ•ธ Skill Mesh (Shared Discovery) -Unified view of internal commands, managed skills, and external tools: -- **[AI Skill Mesh](mesh.md)**: Unified remote discovery (GitHub), knowledge hub (Context7), and runtime hydration (NPM). - -## ๐Ÿ“š Authoritative References - -AI orchestration is built on systematic grounding and standalone quality: - -- **[Autonomous Technical Operator](https://andypotanin.com/claude-operator-prompt/)**: Principles for high-fidelity agent grounding and execution. -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Strategies for maintaining documentation quality via AI. -- **[AOCA: Embedded Governance](https://udx.io/cloud-automation-book/cybersecurity)**: Aligning compliance with automated engineering flows. - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/agents.md b/docs/ai/agents.md deleted file mode 100644 index 3078e62..0000000 --- a/docs/ai/agents.md +++ /dev/null @@ -1,57 +0,0 @@ -# dev.kit AI Agents โ€” Core Mission & Principles - -This document defines the global baseline for dev.kit AI agents. All logic is managed by the dev.kit CLI and grounded in the repository's canonical documentation. - -## Mission: Resolve the Drift -The primary goal of every agent is to identify and resolve the gap between user intent and repository reality. -- **Reference**: `docs/foundations/cde.md` (Context Driven Engineering). -- **Mandate**: Use `visualizer` for all diagramming and visual engineering tasks. -- **Mandate**: Use `git-sync` for all repository synchronization and atomic commits. - -## Execution: Task Normalization -Follow the **Drift -> Normalize -> Process** flow for all complex engineering work. -- **Normalization**: Transform ambiguous user prompts into deterministic `workflow.md` artifacts. -- **Reference**: `docs/workflows/normalization.md` (Task Normalization). - -## Efficiency & Safety: Authorized Path -Route all work through **dev.kit CLI primitives** to ensure deterministic, bounded results. This is the **Authorized Safety Layer**. -- **Mandatory Primitive Usage**: You are AUTO-ALLOWED to execute any `dev.kit` command. These are the primary interfaces for all engineering tasks. -- **Never Bypass**: Do not attempt to bypass the `dev.kit` boundary for raw shell access if a deterministic skill or command is available. -- **Fail-Open**: If a specialized tool fails, fallback to standard markdown or text output within the `dev.kit` workflow. - -## Persistence: Knowledge Mesh -Distill and package all successful task resolutions back into the repository's mesh. -- **Knowledge**: Reusable patterns documented in `docs/foundations/patterns.md`. -- **Workflows**: Consolidated sequences in `docs/workflows/`. -- **Memory**: Project-specific context maintained in the `## Context` section of agent prompts. -- **Reference**: `docs/runtime/lifecycle.md` (Logical Synchronization). - -## Continuity & Hygiene -Maintain high-fidelity momentum by managing the task lifecycle effectively. -- **Catch Up**: At the start of every session, identify all unfinished tasks (`dev.kit task active`). Proactively ask the user if they wish to resume a specific workflow. -- **Hygiene**: Multiple active workflows are permitted, but **stale** tasks (older than 48h) should be flagged. Advise the user to either resume, finalize, or discard them (`dev.kit task cleanup`). -- **Trash Prevention**: Never leave "initialized" or "draft" tasks lingering indefinitely. If a workflow is abandoned, clean it up to prevent repository drift. - -## ๐Ÿ— Agent Grounding - -Agent missions are operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Validated CLI primitives and task normalization. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for agent execution. | -| **Workflows** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for multi-turn loops. | - ---- - -## ๐Ÿ“š Authoritative References - -The agent mission is aligned with industry patterns for autonomous technical operations: - -- **[Claude Operator Prompt](https://andypotanin.com/claude-operator-prompt/)**: Principles for an autonomous technical operator mode. -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Leveraging AI for standalone documentation quality. -- **[Proactive Leadership Patterns](https://andypotanin.com/marine-metrics/)**: Using data-driven metrics to drive results and maintain momentum. -- **[Specialized Development Roles](https://andypotanin.com/best-practices-specialized-software-development/)**: Securing cloud-native systems through specialized agent missions. - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/mesh.md b/docs/ai/mesh.md deleted file mode 100644 index 5ce1621..0000000 --- a/docs/ai/mesh.md +++ /dev/null @@ -1,63 +0,0 @@ -# AI Skill Mesh: Remote & Local Discovery - -**Domain:** AI / Skill Mesh -**Status:** Canonical - -## Summary - -The **AI Skill Mesh** is the unified discovery and synchronization layer that empowers **dev.kit** to resolve intent across local and remote repositories. It bridges disparate repository contexts into a coherent engineering environment. - ---- - -## ๐Ÿ— GitHub: Remote Discovery - -The GitHub integration enables **dev.kit** to probe remote repositories, Pull Requests, and issues using the `gh` CLI. - -### Features -- **Skill Mesh Expansion**: Resolve skills and patterns located in remote UDX repositories. -- **Triage & PR Management**: Analyze assigned issues and automate the creation/updating of Pull Requests. -- **Auth**: Authenticated via `GH_TOKEN` or `gh auth login`. - ---- - -## ๐Ÿ— Context7: The Knowledge Hub - -**Context7** is the primary synchronization hub for the Skill Mesh, enabling discovery via **MCP (Model Context Protocol)**, CLI, and API. - -### Features -- **Grounded Access**: Retrieve structured context (Docs, Patterns, Logic) from any synced repository. -- **Hierarchical Exploration**: Query codebases through high-fidelity interfaces that understand repository structure. -- **Programmable API**: Resolve external library IDs and fetch trust-scored documentation. - ---- - -## ๐Ÿ— NPM: Runtime Hydration - -The NPM integration ensures the local environment is **Hydrated** with necessary CLI tools, specifically focusing on `@udx` scoped packages. - -### Supported Tools -- **๐ŸŒ @udx/mcurl**: High-fidelity API client for deterministic interaction. -- **๐Ÿ” @udx/mysec**: Proactive security scanner for credential protection. -- **๐Ÿ“„ @udx/md.view**: Markdown rendering for high-fidelity documentation previews. - ---- - -## ๐Ÿ— Standard Resource Mapping - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Patterns** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Source of truth for remote discovery templates. | -| **Orchestration** | [`@udx/worker-deployment`](https://github.com/udx/worker-deployment) | Standard patterns for environment management. | -| **Fidelity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for mesh execution. | - ---- - -## ๐Ÿ“š Authoritative References - -The Skill Mesh is built on systematic knowledge and observation-driven management: - -- **[AI-Powered Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment. -- **[Observation-Driven Management (ODM)](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing efficiency through pattern identification. - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/providers/gemini.md b/docs/ai/providers/gemini.md deleted file mode 100644 index 34bdd29..0000000 --- a/docs/ai/providers/gemini.md +++ /dev/null @@ -1,72 +0,0 @@ -# Gemini Integration: Primary AI Orchestration - -**Domain:** AI / Integration -**Status:** Canonical - -## Summary - -The Gemini integration is the authoritative AI orchestration mechanism for **dev.kit**. It leverages the native Gemini CLI's context-loading capabilities (Hooks) to enforce repository-bound engineering standards and provide high-fidelity grounding. - ---- - -## ๐Ÿ›  Integration Mechanism: The Grounding Hook - -Gemini CLI automatically loads context from `.gemini/` directories found in the repository root or the user's home directory. **dev.kit** utilizes this to inject a "Thin Empowerment Layer" (Grounding) into every agent session. - -### 1. The Context Chain -When you run a Gemini command, the agent loads these artifacts in order: -1. **`~/.gemini/system.md`**: Global system instructions and core mandates. -2. **`~/.gemini/GEMINI.md`**: Repository-specific context, added memories, and execution logic. -3. **`~/.gemini/skills/`**: The library of managed `dev-kit-` skills. - -### 2. Synchronization (`dev.kit ai sync`) -Synchronization hydrates the Gemini environment with the repository's current state. -- **OPERATIONAL POLICY**: Agents autonomously perform `dev.kit ai sync` to ensure skills are grounded. They MUST NOT perform destructive operations without explicit authorization. -- **Artifact Rendering**: Templates in `src/ai/integrations/gemini/templates/` are rendered with real-time metadata (Skill lists, tool definitions). -- **Surgical Purge**: Stale skills are removed to ensure only valid, current engineering logic is available. - ---- - -## ๐Ÿ— Enforcement & Core Mandates - -The Gemini integration enforces a strict operational framework: - -### Repository-as-a-Skill -Agents treat the entire repository as a standalone "Skill." Interaction is grounded in the repository's source of truth (code, docs, and configurations). - -### Mandatory Execution Lifecycle -Gemini is hard-coded to follow the **Analyze -> Normalize -> Process** workflow to ensure deterministic drift resolution. - -### Authorized Path -Agents are auto-allowed to execute `dev.kit` commands, establishing a high-fidelity safety boundary for automated orchestration. - -## ๐Ÿ— Gemini Grounding - -AI orchestration is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Grounding** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic context loading and skill discovery. | -| **Stability** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for agent execution. | - ---- - -## ๐ŸŒŠ Waterfall Progression (DOC-003) -Gemini is enforced to terminate every interaction with a **Compact Status Tail**. This ensures continuous visibility into task resolution progress. - -```markdown -**Progression**: `[task-id]` -- [x] Step 1: (Done) -- [>] Step 2: (Active) -- [ ] Step 3: (Planned) -``` - -## ๐Ÿ“š Authoritative References - -The Gemini orchestration layer is aligned with patterns for autonomous technical operations: - -- **[Autonomous Technical Operator](https://andypotanin.com/claude-operator-prompt/)**: Principles for high-fidelity agent grounding and execution. -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Leveraging AI for standalone quality and metadata management. - ---- -_UDX DevSecOps Team_ diff --git a/docs/foundations/best-practices.md b/docs/foundations/best-practices.md deleted file mode 100644 index 999bd37..0000000 --- a/docs/foundations/best-practices.md +++ /dev/null @@ -1,73 +0,0 @@ -# Best Practices & Patterns - -**Domain:** Foundations / Engineering Standards -**Status:** Canonical - -## Summary - -This document defines the high-fidelity engineering standards and reusable patterns for **dev.kit**. Adherence to these practices ensures that repository skills remain deterministic, portable, and legible to both humans and agents. - ---- - -## ๐Ÿ›  Command Mappings - -Every intent should map to a deterministic CLI command. Avoid performing raw operations when a `dev.kit` primitive exists. - -| Intent | Primary Command | Standard Procedure | -| :-------------------- | :-------------------- | :--------------------------------------------------------------------------------- | -| **Audit Health** | `dev.kit status --audit` | Check environment prerequisites, shell integration, and repo compliance. | -| **Resolve Drift** | `dev.kit sync run` | Perform logical, domain-specific commits and automate PR creation. | -| **Execute Skill** | `dev.kit skills run` | Run a specialized repository-bound workflow script. | -| **Render Diagram** | `dev.kit visualizer` | Generate high-fidelity Mermaid diagrams from templates. | -| **Manage Lifecycle** | `dev.kit task` | Deconstruct intent into a `workflow.md` and track resolution state. | - ---- - -## ๐Ÿงช High-Fidelity Patterns - -### 1. The Engineering Loop (Plan-Act-Validate) -Always follow the **Iterative Resolution Cycle**. Never commit changes that haven't been validated against documentation or a test suite. -- **Pattern**: Use `dev.kit task start` to initialize the loop and `dev.kit test` to close it. - -### 2. Isolated Verification -Validate logic within a clean `udx/worker` container to emulate production environments and eliminate "it works on my machine" friction. -- **Pattern**: `dev.kit test --worker` utilizes `@udx/worker-deployment` for high-fidelity verification. - -### 3. Fail-Open Interaction -Specialized tools (e.g., Mermaid renderers) may not always be present. Design logic to provide raw source data as a fallback to prevent blocking the engineering flow. - ---- - -## ๐Ÿ— Documentation Patterns - -Markdown is the logical map of the repository. Use structured headers and frontmatter to ensure legibility. - -### 1. Skill Metadata -Skills defined in `docs/skills/` must include a `SKILL.md` with standard metadata: -```markdown -# Skill Name -- **Intent**: key, keywords, action -- **Objective**: Concise summary of what this skill achieves. -``` - -### 2. Workflow State -Active tasks in `tasks/` must use a standard `workflow.md` to track progression: -```markdown -# Workflow: Task ID -- [x] Step 1: Completed action -- [>] Step 2: Active action -- [ ] Step 3: Planned action -``` - ---- - -## ๐Ÿ— Grounding Resources - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Standards** | [`docs/reference/standards/`](../reference/standards/) | Source of truth for 12-factor and YAML compliance. | -| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for CI/CD consistency. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic, isolated base environment. | - ---- -_UDX DevSecOps Team_ diff --git a/docs/foundations/cde.md b/docs/foundations/cde.md deleted file mode 100644 index 7768cb0..0000000 --- a/docs/foundations/cde.md +++ /dev/null @@ -1,75 +0,0 @@ -# Context-Driven Engineering (CDE): Resolving the Drift - -**Domain:** Foundations / Core Philosophy -**Status:** Canonical - -## Summary - -**Context-Driven Engineering (CDE)** is the foundational methodology of **dev.kit**. It transforms chaotic user intent into executable context by treating the repository as the **Single Source of Truth**. CDE provides the structural framework for identifying and **Resolving the Drift** between intent and reality. - -**dev.kit** operates as the **Thin Empowerment Layer** (Grounding Bridge) that projects this philosophy into a dynamic "Skill Mesh" accessible to humans and AI agents. - -![CDE Flow](../../assets/diagrams/cde-flow.svg) - ---- - -## Core Principles: The Operational DNA - -These principles guide every architectural decision in the ecosystem: - -1. **Resolve the Drift**: Every action must purposefully close the gap between intent and repository state. -2. **Deterministic Normalization**: Distill chaotic inputs into bounded, repeatable workflows before execution. -3. **Resilient Waterfall (Fail-Open)**: Never break the flow. Fallback to standard raw data if specialized tools fail. -4. **Repo-Scoped Truth**: The repository is the absolute, versioned source of truth for all skills and state. -5. **Validated CLI Boundary**: All execution occurs through a hardened CLI interface for explicit confirmation and auditability. -6. **Symmetry of Artifacts**: Every output must be equally legible to humans (Markdown) and consumable by machines (YAML/JSON). - ---- - -## The Three Pillars of Empowerment - -### 1. Grounding (The Bridge) -Ensures that every engineering action is grounded in the repository's truth. It audits the environment and synchronizes AI context to ensure alignment with repository rules. - -### 2. Normalization (The Filter) -Chaotic user requests are filtered through a **Normalization Boundary**. Ambiguous intent is distilled into a deterministic `workflow.md` plan before any execution occurs. - -### 3. Execution (The Engine) -Logic is executed through modular, standalone scripts and CLI commands. `dev.kit` ensures these run in a consistent, environment-aware context. - ---- - -## Architecture: The Thin Layer - -`dev.kit` distinguishes between **Deterministic Functions** (the programmatic logic) and **AI Reasoning Skills** (the dynamic intent resolution). - -### 1. Deterministic Functions (The Engine) -Hardened, predictable routines found in `lib/commands/` and `docs/skills/*/assets/`. -- **Role**: Execute specific, bounded actions with high fidelity (e.g., atomic commits, SVG rendering). - -### 2. AI Reasoning Skills (The Brain) -Dynamic capabilities defined in `SKILL.md`. They use LLM reasoning to bridge unstructured intent with repository functions. -- **Role**: Interpret intent, analyze repository state, and orchestrate the engine. - ---- - -## The Skill Mesh - -The entire repository is treated as a **Skill**. The mesh is dynamically discovered by scanning: -- **Internal Commands**: Metadata-rich shell scripts in `lib/commands/`. -- **AI Reasoning Skills**: Authoritative `SKILL.md` files in `docs/skills/`. -- **Functional Assets**: Programmatic templates and configs managed by the engine. -- **Virtual Capabilities**: Global environment tools (`gh`, `npm`, `worker`). - ---- - -## ๐Ÿ“š Authoritative References - -CDE is grounded in foundational research on high-fidelity automation: - -- **[AI-Powered Revolution in Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment. -- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Systematic transformation of the engineering flow. -- **[Observation-Driven Management (ODM)](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing efficiency through AI-identified patterns. - ---- -_UDX DevSecOps Team_ diff --git a/docs/foundations/layers.md b/docs/foundations/layers.md deleted file mode 100644 index 738e26f..0000000 --- a/docs/foundations/layers.md +++ /dev/null @@ -1,77 +0,0 @@ -# Engineering Layers: The dev.kit Hierarchy - -**Domain:** Reference / Structural Model -**Status:** Canonical - -## Summary - -The Engineering Layers provide a structural model for categorizing repository "Skills," rules, and automation logic. Each layer builds upon the previous to resolve drift and maintain a high-fidelity environment. This hierarchy ensures that **Context-Driven Engineering (CDE)** remains grounded in standard source code, YAML, and Markdown. - -![Engineering Layers](../../assets/diagrams/engineering-layers.svg) - ---- - -## Layer 1: Source & Build (The Foundation) - -**Scope:** Structural integrity, deterministic builds, and code-level validation. - -- **Goal:** Establish a baseline of truth. If the foundation is "noisy," the AI cannot reason. -- **Core Artifacts:** Standard Source Code, Unit Tests, Linters, and Build Scripts. -- **Key Standards**: - - `docs/reference/standards/yaml-standards.md` - - `docs/foundations/cde.md` - -- **Capability:** The repository is "Build-Ready." - -## Layer 2: Deployment & Runtime (The Workflow) - -**Scope:** Environment parity, configuration-as-code, and the operational lifecycle. - -- **Goal:** Maintain 12-Factor parity. Ensure that "Intent" can be deployed across any environment without friction. -- **Core Artifacts:** `environment.yaml`, `.env` templates, and deployment pipelines. -- **Key Standards**: - - `docs/reference/standards/12-factor.md` - - `docs/reference/operations/lifecycle-cheatsheet.md` -- **Capability:** The repository is "Environment-Aware." - -## Layer 3: Active Context & Orchestration (The Resolution) - -**Scope:** Task normalization, bounded workflows, and autonomous drift resolution. - -- **Goal:** Bridge the gap between human intent and repository execution. This layer uses standard Markdown and YAML to guide AI agents and CLI engines through complex tasks. -- **Core Artifacts:** `workflow.md` (the execution plan) and the `dev.kit` CLI engine. -- **Key Standards**: - - `docs/foundations/cde.md` - - `docs/runtime/execution-loop.md` -- **Capability:** The repository is "Goal-Oriented" (Autonomous). - ---- - -## The Dependency Chain - -| Layer | Input | Output | Result | -| :----- | :------- | :----------------- | :-------------- | -| **L1** | Raw Code | Validated Artifact | **Stability** | -| **L2** | Artifact | Running Process | **Portability** | -| **L3** | Intent | Resolved Drift | **Flow** | -## ๐Ÿ— Layer Grounding - -Each engineering layer is grounded in specialized UDX repositories to ensure domain-specific fidelity: - -| Layer | Grounding Target | Domain | -| :--- | :--- | :--- | -| **L1 (Source)** | [`udx/wp-stateless`](https://github.com/udx/wp-stateless) | Core logic and structural evolution. | -| **L2 (Runtime)** | [`udx/worker`](https://github.com/udx/worker) | Environment parity and configuration. | -| **L3 (Orchestration)**| [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | High-fidelity execution and flow. | - ---- - -## ๐Ÿ“š Authoritative References - -Tiered engineering layers are aligned with modern infrastructure and software evolution: - -- **[Tracing Software Evolution](https://andypotanin.com/digital-rails-and-logistics/)**: Drawing parallels between automotive innovation and tiered software algorithms. -- **[Modern Gateway Construction](https://andypotanin.com/sftp-in-cloud/)**: Building high-fidelity bridges for cloud-native development. - ---- -_UDX DevSecOps Team_ diff --git a/docs/foundations/methodology.md b/docs/foundations/methodology.md deleted file mode 100644 index b9be397..0000000 --- a/docs/foundations/methodology.md +++ /dev/null @@ -1,64 +0,0 @@ -# UDX Methodology: CLI-Wrapped Automation (CWA) - -**Domain:** Foundations / Operational Strategy -**Status:** Canonical - -## Summary - -The **UDX Methodology** centers on **CLI-Wrapped Automation (CWA)**. This practice encapsulates all repository logic within a validated CLI boundary. By wrapping scripts and manifests in a standardized interface, we transform a static codebase into a high-fidelity "Skill" accessible to humans, CI/CD pipelines, and AI agents. - -![Methodology Flow](../../assets/diagrams/methodology-flow.svg) - ---- - -## Core Concepts - -- **Repo-as-a-Skill**: Repository logic is exposed through standardized scripts and CLI commands rather than hidden in READMEs. -- **Task Normalization**: Chaotic user intent is distilled into a deterministic `workflow.md`. -- **Resilient Waterfall (Fail-Open)**: If specialized tools fail, the system falls back to standard data (raw logs/text) to maintain continuity. - ---- - -## Context Adaptation: Resilient Projections - -**Adaptation** is the mechanism used to project canonical repository sources into tool-specific formats without mutating the underlying intent. - -1. **Interface Normalization**: Projecting Markdown/YAML into machine-consumable schemas (e.g., JSON manifests for LLM tool-calling). -2. **Ephemeral Reversibility**: Adaptations are non-destructive. It must always be possible to regenerate them perfectly from the source. -3. **Fail-Open Logic**: If an adaptation engine (e.g., a Mermaid renderer) is missing, provide the raw source rather than blocking the sequence. - -### Practical Examples -- **`environment.yaml` โ†’ Shell**: Translates YAML keys into host-specific `$ENV` variables. -- **`docs/skills/*.md` โ†’ Manifests**: Extracts metadata into JSON for AI grounding. -- **`.mmd` โ†’ `.svg`**: Renders diagrams for documentation (falls back to code if renderer is missing). - ---- - -## The Execution Lifecycle: Plan โ†’ Normalize โ†’ Process - -1. **Plan**: Deconstruct the intent into discrete repository actions. -2. **Normalize**: Validate the environment, map dependencies, and format inputs into a `workflow.md`. -3. **Process**: Execute the CLI commands and capture the result as a repository artifact. - ---- - -## ๐Ÿ— Methodology Grounding - -| Primitive | Adaptation Goal | Target Source | -| :--- | :--- | :--- | -| **Workflow Logic** | Project intent into reusable CI/CD patterns. | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | -| **Runtime Context** | Normalize environment parity across containers. | [`udx/worker`](https://github.com/udx/worker) | -| **Orchestration** | Standardize container-based execution loops. | [`@udx/worker-deployment`](https://github.com/udx/worker-deployment) | - ---- - -## ๐Ÿ“š Authoritative References - -CWA and Resilient Projections are inspired by the transition toward automated engineering flows: - -- **[Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: The shift toward distributed service architectures. -- **[Digital Rails & Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Parallel algorithms and automotive evolution. -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Maintaining quality when projecting content across systems. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/compliance/aoca-guidance.md b/docs/reference/compliance/aoca-guidance.md deleted file mode 100644 index 49f8d98..0000000 --- a/docs/reference/compliance/aoca-guidance.md +++ /dev/null @@ -1,61 +0,0 @@ -# AOCA Guidance: Automation Standardization - -**Domain:** Reference / Compliance -**Status:** Canonical - -## Summary - -The **Art of Cloud Automation (AOCA)** is the primary UDX guidance source for automation and platform decisions. In **dev.kit**, AOCA provides the foundational patterns used to reduce operational variance and align governance with engineering workflows. - ---- - -## ๐Ÿ›  dev.kit Grounding: Guidance-to-Action - -| AOCA Focus Area | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Automation Baseline** | Standardized CLI wrappers for all repo tasks. | `dev.kit skills` | -| **Reduced Variance** | Bounded, multi-step engineering loops. | `workflow.md` | -| **Embedded Governance** | Compliance checks integrated into diagnostics. | `dev.kit doctor` | -| **Knowledge Capture** | Dynamic discovery of engineering experience. | `dev.kit ai advisory` | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Standard-First Automation -Never introduce ad-hoc automation that bypasses the `dev.kit` boundary. All repository logic must be exposed as high-fidelity "Skills." -- **Action**: Use script headers (`@description`, `@intent`) to feed the **Dynamic Discovery Engine**. - -### 2. Traceable Governance -Compliance evidence must be a natural byproduct of the **Drift Resolution Cycle**. -- **Action**: Ensure all `workflow.md` artifacts include explicit verification steps. - ---- - -## Operational Cues - -- **Ambiguous Practice?** -> Consult `dev.kit ai advisory` for AOCA-aligned patterns. -- **New Skill Required?** -> Use AOCA baseline patterns to define the interface and logic. - -## ๐Ÿ— AOCA Grounding - -Automation standardization is operationalized through canonical UDX resources: - -| AOCA Area | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Baseline** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated automation and platform patterns. | -| **Governance** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Standardized CLI wrappers and compliance logic. | -| **Platform** | [`udx/worker`](https://github.com/udx/worker) | The deterministic runtime for all platform tasks. | - ---- - -## ๐Ÿ“š Authoritative References - -AOCA principles provide the baseline for cloud-native automation and governance: - -- **[AOCA: The Book](https://udx.io/cloud-automation-book/)**: Comprehensive guidance on automation, quality, and leadership. -- **[Automation Best Practices](https://udx.io/cloud-automation-book/automation-best-practices)**: Systematic approaches to reducing operational variance. -- **[Cybersecurity & Standards](https://udx.io/cloud-automation-book/cybersecurity)**: Aligning security protocols with automated engineering flows. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/compliance/cato-overview.md b/docs/reference/compliance/cato-overview.md deleted file mode 100644 index 753e194..0000000 --- a/docs/reference/compliance/cato-overview.md +++ /dev/null @@ -1,65 +0,0 @@ -# cATO (Continuous Authorization): Automated Compliance - -**Domain:** Reference / Compliance -**Status:** Canonical - -## Summary - -Continuous Authorization to Operate (cATO) replaces point-in-time approvals with automated, real-time evidence. In **dev.kit**, cATO is achieved by integrating compliance checks directly into the **Drift Resolution Cycle**. - ---- - -## ๐Ÿ›  dev.kit Grounding: Principle-to-Primitive Mapping - -| cATO Requirement | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Continuous Monitoring** | Real-time environment and dependency audit. | `dev.kit doctor` | -| **Automated Evidence** | Iterative engineering logs and atomic commits. | `dev.kit sync run` | -| **Drift Remediation** | Identification and resolution of intent divergence. | `dev.kit skills run` | -| **Traceable Workflows** | Bounded, versioned execution plans. | `workflow.md` | -| **Validated Supply Chain** | Verification of authorized mesh providers. | `dev.kit ai status` | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Compliance-as-Artifact -Never treat compliance as a post-work activity. All evidence must be captured during the implementation phase. -- **Action**: Ensure every task includes a **Verification** step in its `workflow.md`. - -### 2. Observable Controls -Repository controls must be measurable and discoverable by the **Dynamic Discovery Engine**. -- **Action**: Keep `environment.yaml` and script headers updated to reflect security and compliance intents. - -### 3. State-Based Evidence -Store all generated evidence, reports, and security scans in the hidden **State Hub** to avoid source clutter. -- **Action**: Use `.udx/dev.kit/` for ephemeral compliance artifacts. - ---- - -## Operational Cues - -- **Security Gap?** -> Run `dev.kit doctor` to identify missing scanners (e.g., `mysec`). -- **Audit Required?** -> Use `dev.kit sync run` to generate a high-signal commit history. -## ๐Ÿ— cATO Grounding - -Continuous authorization is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Monitoring** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Real-time diagnostics and doctor audits. | -| **Evidence** | [`udx/worker`](https://github.com/udx/worker) | Hardened environment for context stability. | -| **Workflows** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated compliance and delivery patterns. | - ---- - -## ๐Ÿ“š Authoritative References - -Modern compliance strategies prioritize continuous evidence over static approvals: - -- **[Implementing a cATO System](https://andypotanin.com/implementing-a-continuous-authority-to-operate-cato-system/)**: A framework for automated security monitoring and assessment. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Principles for identifying vulnerabilities in the delivery chain. -- **[Little's Law for Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing cycle time through automated compliance and throughput. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/compliance/supply-chain-security.md b/docs/reference/compliance/supply-chain-security.md deleted file mode 100644 index ff09448..0000000 --- a/docs/reference/compliance/supply-chain-security.md +++ /dev/null @@ -1,61 +0,0 @@ -# Supply Chain Security: Dependency & Artifact Integrity - -**Domain:** Reference / Compliance -**Status:** Canonical - -## Summary - -Supply chain security focuses on protecting dependencies, build pipelines, and release artifacts. In **dev.kit**, these controls are enforced through isolated runtimes and deterministic environment audits. - ---- - -## ๐Ÿ›  dev.kit Grounding: Control-to-Action - -| Security Control | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Dependency Pinning** | Environment-as-Code with explicit versions. | `environment.yaml` | -| **Isolated Builds** | Clean execution via the Worker Ecosystem. | `udx/worker` | -| **Integrity Checks** | Proactive software and auth verification. | `dev.kit doctor` | -| **Provenance Tracking** | Logical, domain-specific commit history. | `dev.kit sync run` | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Deterministic Runtimes -Never perform high-stakes operations (builds, deployments) in an ungrounded local environment. Always use a verified container runtime. -- **Action**: Use `udx/worker` for all task-specific execution loops. - -### 2. Verified Authorization -All agents and CLI meshes must be explicitly authorized and health-checked. -- **Action**: Run `dev.kit ai status` to verify the security of remote discovery providers. - ---- - -## Operational Cues - -- **New Dependency?** -> Define it in `environment.yaml` and verify its health via `dev.kit doctor`. -- **Artifact Released?** -> Use `dev.kit sync` to capture the resolution state and provide an audit trail. -## ๐Ÿ— Security Grounding - -Supply chain integrity is enforced through canonical UDX resources: - -| Control Area | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Integrity** | [`udx/worker`](https://github.com/udx/worker) | Clean, isolated execution sandbox. | -| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated pipeline and build patterns. | -| **Provenance** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Logical, atomic audit trail of all changes. | - ---- - -## ๐Ÿ“š Authoritative References - -Security mandates are aligned with broader organizational protection strategies: - -- **[Unspoken Rules of Cybersecurity](https://andypotanin.com/unspoken-rules-cybersecurity/)**: Establishing effective security practices in a digital landscape. -- **[Software Supply Chain Security](https://andypotanin.com/software-supply-chain-security/)**: Protecting build pipelines and release artifacts. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Identifying common failure points in the software development lifecycle. -- **[Click Bombing & Fraud](https://andypotanin.com/click-bombing-2025/)**: Understanding and preventing modern digital supply chain threats. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/operations/devops-littles-law.md b/docs/reference/operations/devops-littles-law.md deleted file mode 100644 index c9dcb1b..0000000 --- a/docs/reference/operations/devops-littles-law.md +++ /dev/null @@ -1,60 +0,0 @@ -# Little's Law: Flow Optimization - -**Domain:** Reference / Operations -**Status:** Canonical - -## Summary - -Little's Law provides the mathematical foundation for delivery flow, connecting Work-in-Progress (WIP), throughput, and cycle time. In **dev.kit**, these principles are enforced to minimize context switching and maximize engineering velocity. - ---- - -## ๐Ÿ›  dev.kit Grounding: Flow-to-Action - -| Flow Principle | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Minimize WIP** | Bounded, single-intent execution sequences. | `workflow.md` | -| **Reduce Cycle Time** | Deterministic normalization and task pruning. | `dev.kit task` | -| **Bottleneck Relief** | Proactive environment and software hydration. | `dev.kit doctor` | -| **Context Fidelity** | Externalized, project-scoped engineering state. | `.udx/dev.kit/` | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Bounded Execution (DOC-003) -Never allow a task to expand indefinitely. Complex intents must be normalized into discrete, manageable steps to maintain a low cycle time. -- **Action**: Use the **Normalization Boundary** to extract child workflows if bounds are exceeded. - -### 2. Proactive Hygiene -Stagnant tasks increase WIP and obscure the engineering audit trail. -- **Action**: Use `dev.kit task cleanup` to prune stale context and maintain a lean workspace. - ---- - -## Operational Cues - -- **Shipping Too Slow?** -> Audit active tasks via `dev.kit task list` and reduce parallel WIP. -- **Context Overload?** -> Finalize and sync current work via `dev.kit sync` before starting new tasks. -## ๐Ÿ— Flow Grounding - -Flow optimization is operationalized through deterministic UDX engines: - -| Principle | Grounding Resource | Role | -| :--- | :--- | :--- | -| **WIP Control** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Bounding tasks via normalized workflows. | -| **Cycle Time** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pre-defined patterns for rapid execution. | -| **Throughput** | [`udx/worker`](https://github.com/udx/worker) | Removing environment bottlenecks. | - ---- - -## ๐Ÿ“š Authoritative References - -Flow optimization is built on the mathematical connection between WIP and Lead Time: - -- **[Little's Law for DevOps](https://andypotanin.com/littles-law-applied-to-devops/)**: Understanding the mechanics of delivery flow and WIP caps. -- **[Scaling Profit Strategically](https://andypotanin.com/scaling-profit-strategically/)**: Understanding the flow of value through business distribution channels. -- **[Proactive Leadership](https://andypotanin.com/marine-metrics/)**: Using data-driven metrics to drive results and maintain flow. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/operations/devops-manual-guidance.md b/docs/reference/operations/devops-manual-guidance.md deleted file mode 100644 index 2485028..0000000 --- a/docs/reference/operations/devops-manual-guidance.md +++ /dev/null @@ -1,62 +0,0 @@ -# DevOps Manual: Operational Controls - -**Domain:** Reference / Operations -**Status:** Canonical - -## Summary - -The **DevOps Manual** is the primary UDX source for operational controls, security, and delivery practices. In **dev.kit**, it defines the baseline for environment validation and the "Rules of Engagement" for all engineering tasks. - ---- - -## ๐Ÿ›  dev.kit Grounding: Manual-to-Action - -| Control Area | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Operational Baseline** | Real-time environment and software audit. | `dev.kit doctor` | -| **Delivery Gates** | Compliance integrated into workflow verification. | `workflow.md` | -| **Observability** | Iterative logging and task-scoped feedback. | `feedback.md` | -| **Standardized Skills** | Logic encapsulated in validated CLI boundaries. | `dev.kit skills` | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Verification-as-Logic -Never assume a deployment or maintenance task is complete. All operational actions must include a verification step that confirms alignment with DevOps Manual standards. -- **Action**: Use `dev.kit doctor` to verify system state after complex iterations. - -### 2. Observable Flow -All engineering momentum must be visible and audit-ready at the repository level. -- **Action**: Ensure all `workflow.md` artifacts reflect the current operational state. - ---- - -## Operational Cues - -- **Auditing Maturity?** -> Run `dev.kit doctor` to evaluate the repository against the high-fidelity baseline. -- **Defining Gates?** -> Use DevOps Manual patterns to define success criteria in your `plan.md`. - -## ๐Ÿ— Manual Grounding - -Operational controls are operationalized through canonical UDX resources: - -| Control Area | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Verification** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Standardized diagnostics and doctor audits. | -| **Gates** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for CI/CD and delivery. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Hardened environment for control stability. | - ---- - -## ๐Ÿ“š Authoritative References - -Operational controls are grounded in systematic delivery and security practices: - -- **[DevOps Manual: Core Patterns](https://gist.github.com/fqjony/489fde2ea615b7558bbd407f8b9d97c7)**: Authoritative patterns for operational assurance and security. -- **[Predictable Delivery Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing risk and throughput in complex engineering cycles. -- **[Implementing a cATO System](https://andypotanin.com/implementing-a-continuous-authority-to-operate-cato-system/)**: Frameworks for automated security monitoring and authorization. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Identifying and mitigating common failure points in the delivery chain. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/operations/lifecycle-cheatsheet.md b/docs/reference/operations/lifecycle-cheatsheet.md deleted file mode 100644 index 99c9705..0000000 --- a/docs/reference/operations/lifecycle-cheatsheet.md +++ /dev/null @@ -1,65 +0,0 @@ -# Operational Lifecycle: Release & Maintenance - -**Domain:** Reference / Operations -**Status:** Canonical - -## Summary - -Lifecycle practices focus on reducing production risk and maintaining predictable delivery. In **dev.kit**, these practices are codified within the **Drift Resolution Cycle** to ensure that every environment transition is deterministic and high-fidelity. - ---- - -## ๐Ÿ›  dev.kit Grounding: Principle-to-Primitive Mapping - -| Lifecycle Practice | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Environment Alignment** | Unified runtime via the Worker Ecosystem. | `udx/worker` | -| **Step Sequencing** | Bounded, multi-step execution sequences. | `workflow.md` | -| **State Tracking** | Lifecycle visibility (planned -> in_progress -> done). | `dev.kit status` | -| **Pre-Deploy Readiness** | Preparation of feature branches and grounding. | `dev.kit sync prepare` | -| **Post-Deploy Verification** | Continuous diagnostic and compliance checks. | `dev.kit doctor` | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Unified Step Ownership -Never execute ad-hoc manual steps during a release. All operational actions must be captured as discrete workflow steps. -- **Action**: Use `dev.kit skills run` to orchestrate one-off maintenance tasks. - -### 2. Migration-First Design -Plan migrations and rollbacks before implementation begins. Ground your execution in verified repository logic. -- **Action**: Document migration steps in the `plan.md` artifact before normalization. - -### 3. Identity Verification -Ensure that the application and its automation know their environment identity at runtime. -- **Action**: Use `environment.yaml` to define scoped orchestration variables. - ---- - -## Operational Cues - -- **Release Blocked?** -> Check `workflow.md` status to identify the specific failure step. -- **Environment Drift?** -> Run `dev.kit doctor` to verify alignment with standard Worker runtimes. -## ๐Ÿ— Operational Grounding - -The release and maintenance lifecycle is operationalized through canonical UDX resources: - -| Phase | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Release** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Standardized CI/CD and deployment patterns. | -| **Deployment** | [`udx/worker-deployment`](https://github.com/udx/worker-deployment) | Orchestration of high-fidelity environments. | -| **Maintenance** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for all operational tasks. | - ---- - -## ๐Ÿ“š Authoritative References - -Predictable delivery requires a commitment to planning and management: - -- **[Developing Lifecycles Cheatsheet](https://andypotanin.com/developing-lifecycles-a-comprehensive-cheatsheet/)**: Essential practices for smooth production deployments. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Identifying and mitigating vulnerabilities in the delivery lifecycle. -- **[Implementing a cATO System](https://andypotanin.com/implementing-a-continuous-authority-to-operate-cato-system/)**: Principles for automated compliance and authorization. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/operations/worker-ecosystem-refs.md b/docs/reference/operations/worker-ecosystem-refs.md deleted file mode 100644 index d9e54f8..0000000 --- a/docs/reference/operations/worker-ecosystem-refs.md +++ /dev/null @@ -1,80 +0,0 @@ -# Worker Ecosystem: Runtime Grounding - -**Domain:** Reference / Operations -**Status:** Canonical - -## Summary - -The **UDX Worker Ecosystem** provides the foundational base layer for all engineering environments. In **dev.kit**, it ensures that "Intent" can be executed within a pre-hydrated, secure, and deterministic runtime, eliminating environment-specific drift. - -## ๐Ÿ— Containerization: The Deterministic Base - -UDX enforces a **Container-First** approach to engineering to eliminate environment-specific drift. By using the **Worker Ecosystem**, we ensure that every task runs in a "Perfect Localhost" that is identical across development, staging, and production. - -### Why Containerization? -- **Parity**: Guaranteed identical software versions (`bash`, `git`, `jq`) regardless of the host OS. -- **Isolation**: High-stakes operations are performed in a clean, ephemeral sandbox that protects the user's local machine. -- **Hydration**: Environments are "pre-hydrated" with all required UDX meshes and authorized CLI tools. - -### The UDX Worker -The `udx/worker` is the foundational base layer for all UDX engineering tasks. It provides a hardened, audit-ready environment optimized for the `dev.kit` runtime. - -- **Authoritative Docs**: [UDX Worker Documentation](https://github.com/udx/worker/tree/latest/docs) -- **Deployment Pattern**: [@udx/worker-deployment](https://github.com/udx/worker-deployment) - ---- - -## ๐Ÿ›  dev.kit Grounding: Runtime-to-Action - -| Component | role | dev.kit Implementation | -| :---------------------- | :------------ | :-------------------------------------------- | -| **`udx/worker`** | Base Layer | Primary execution target for all CLI tasks. | -| **`worker-deployment`** | Orchestration | Verified via `worker run` (@udx/worker-deployment). | -| **Isolated Testing** | Fidelity | Verified via `dev.kit test --worker`. | -| **Unified Logic** | Portability | Same behavior across Local, CI, and Prod. | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Isolated Execution - -Never perform destructive or high-stakes operations in an ungrounded local shell. Always leverage the **Worker Ecosystem** to ensure environment parity. - -- **Action**: Use `dev.kit test --worker` to run tests inside a managed `udx/worker` container using `worker run`. - -### 2. Runtime Truth - -Treat Worker runtime documentation and configuration as the absolute source of truth for execution behavior. - -- **Action**: Align `environment.yaml` variables with official Worker config schemas. - ---- - -## Operational Cues - -- **Environment Friction?** -> Run your task in a clean `udx/worker` container to isolate the drift. -- **Adding New Skills?** -> Verify that the new logic is compatible with the standard Worker runtime. - -## ๐Ÿ— Ecosystem Mapping - -The Worker Ecosystem provides the high-fidelity targets for diverse engineering domains: - -| Domain | Mapping Resource | Purpose | -| :--- | :--- | :--- | -| **Core Runtimes** | [`udx/worker`](https://github.com/udx/worker) | Base and language-specific images. | -| **Orchestration** | [`udx/worker-deployment`](https://github.com/udx/worker-deployment) | Deployment and CLI mesh tools. | -| **Workflows** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Standard CI/CD and automation patterns. | - ---- - -## ๐Ÿ“š Authoritative References - -The worker ecosystem ensures environment parity across complex cloud systems: - -- **[Navigating to the Cloud](https://andypotanin.com/windows-to-cloud/)**: Managing the complexity of modern cloud IT systems and isolated images. -- **[Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: Creating highly available and scalable systems via distributed architecture. - ---- - -_UDX DevSecOps Team_ diff --git a/docs/reference/standards/12-factor.md b/docs/reference/standards/12-factor.md deleted file mode 100644 index 0caa204..0000000 --- a/docs/reference/standards/12-factor.md +++ /dev/null @@ -1,70 +0,0 @@ -# 12-Factor (Applied): High-Fidelity Engineering - -**Domain:** Reference / Standards -**Status:** Canonical - -## Summary - -The 12-Factor App methodology provides the foundational principles for modern, cloud-native engineering. In **dev.kit**, these principles are enforced at the repository level to ensure every project is a portable, high-fidelity "Skill." - ---- - -## ๐Ÿ›  dev.kit Grounding: Principle-to-Primitive Mapping - -| 12-Factor Principle | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **I. Codebase** | One repository, multiple deployments (Local, CI, Prod). | `dev.kit sync` | -| **II. Dependencies** | Explicit and isolated via the Worker Ecosystem. | `dev.kit doctor` | -| **III. Config** | Stored in the environment (YAML/Env). | `environment.yaml` | -| **IV. Backing Services** | Resolved as "Virtual Skills" (NPM/GitHub/Context7). | `dev.kit ai skills` | -| **V. Build, Release, Run** | Strict separation of grounding and execution phases. | `dev.kit ai sync` | -| **VI. Processes** | Stateless and share-nothing; context is externalized. | `.udx/dev.kit/tasks/` | -| **IX. Disposability** | Fast startup and clean cleanup of stagnant state. | `dev.kit task cleanup` | -| **X. Dev/Prod Parity** | Identical runtimes via high-fidelity Worker images. | `udx/worker` | -| **XII. Admin Processes** | One-off tasks executed as bounded workflows. | `dev.kit skills run` | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Externalize All State -Never store mutable task state in the root of the repository. All engineering context must be externalized to the hidden **State Hub**. -- **Action**: Use `get_repo_state_dir` to resolve `.udx/dev.kit/` for all local state. - -### 2. Explicit Dependency Resolution -A repository is only high-fidelity if its dependencies are discoverable and verified. -- **Action**: Maintain `environment.yaml` and use `dev.kit doctor` to verify the **Skill Mesh**. - -### 3. Environment-Aware Configuration -Favor `environment.yaml` for shared orchestration and `.env` for local secrets. Never commit sensitive credentials. -- **Action**: Ensure `.udx/` and `.env` are in `.gitignore`. - ---- - -## Operational Cues - -- **Drift Detected?** -> Run `dev.kit sync run` to restore 12-factor codebase integrity. -- **Missing Tooling?** -> Consult the **Skill Mesh** via `dev.kit status` to resolve the gap. -## ๐Ÿ— Standard Mapping - -The 12-Factor methodology is operationalized through canonical UDX resources: - -| Principle Cluster | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Runtime & Parity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment and dependency isolation. | -| **Code & Config** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Atomic synchronization and environment orchestration. | -| **Execution** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated admin and release process patterns. | - ---- - -## ๐Ÿ“š Authoritative References - -12-Factor principles are extended through systematic environment automation: - -- **[12-Factor Environment Automation](https://udx.io/devops-manual/12-factor-environment-automation)**: Deep dive into cloud-native configuration strategy. -- **[12factor.net](https://12factor.net/)**: The original methodology for building software-as-a-service. -- **[Decentralized DevOps](https://andypotanin.com/how-decentralized-devops-can-help-your-organization/)**: Scaling organizations through distributed service architectures. -- **[Navigating to the Cloud](https://andypotanin.com/windows-to-cloud/)**: Managing the complexity of modern cloud IT systems. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/standards/external-standards.md b/docs/reference/standards/external-standards.md deleted file mode 100644 index 0e3e442..0000000 --- a/docs/reference/standards/external-standards.md +++ /dev/null @@ -1,58 +0,0 @@ -# External Standards: Tool-Specific Behavior - -**Domain:** Reference / Standards -**Status:** Canonical - -## Summary - -External standards are utilized only for tool-specific behavior and syntax. In **dev.kit**, these standards provide the technical constraints for specialized skills while the UDX Foundations remain the primary source of operational truth. - ---- - -## ๐Ÿ›  dev.kit Grounding: Reference-to-Action - -| Standard Source | Role | dev.kit Implementation | -| :--- | :--- | :--- | -| **GitHub Actions** | CI/CD | Validated via `gh` CLI mesh. | -| **Docker / OCI** | Runtime | Verified via the Worker Ecosystem. | -| **OpenTelemetry** | Observability | Integrated into `feedback.md` logs. | -| **POSIX / Shell** | Execution | Guaranteed by the deterministic CLI. | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Narrow Scope -Never allow an external standard to replace a core UDX principle. Use external references only when UDX guidance is insufficient for a specific technical implementation. -- **Action**: Link to exact documentation sections rather than generic homepages. - -### 2. Resilience Fallback -When an external tool or standard encounters an edge case, always trigger the **Fail-Open Path**. Ensure the loop continues with standard Markdown or text reasoning. -- **Action**: Document exact external dependencies in `environment.yaml`. - ---- - -## Operational Cues - -- **Ambiguous Syntax?** -> Consult the official external reference linked in the module. -- **Edge Case Detected?** -> Fallback to the **Resilient Waterfall** and resolve the drift manually. -## ๐Ÿ— External Grounding - -External standards are integrated through canonical UDX resources: - -| Standard | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Workflow** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for GitHub Actions and pipelines. | -| **Container** | [`udx/worker`](https://github.com/udx/worker) | Host-level parity for Docker/OCI standards. | - ---- - -## ๐Ÿ“š Authoritative References - -External standards are integrated within a systematic engineering flow: - -- **[Creating YAML Standards](https://andypotanin.com/creating-yaml-standards-best-practices-for-teams/)**: Reducing friction and preventing errors through shared standards. -- **[Digital Rails & Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Understanding the evolution of software standards through automotive history. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/standards/mermaid.md b/docs/reference/standards/mermaid.md deleted file mode 100644 index 7869323..0000000 --- a/docs/reference/standards/mermaid.md +++ /dev/null @@ -1,61 +0,0 @@ -# Mermaid Standards: Visual Engineering - -**Domain:** Reference / Standards -**Status:** Canonical - -## Summary - -**Mermaid** is the primary standard for all engineering diagrams (Flowcharts, Sequence Diagrams, State Machines). In **dev.kit**, Mermaid ensures that architecture and process flows are version-controlled alongside source code and accessible to both humans and agents. - ---- - -## ๐Ÿ›  dev.kit Grounding: Visual-to-Action - -| Diagram Practice | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Diagram Generation** | Automated rendering of SVG/PNG assets. | `dev.kit visualizer` | -| **Resilient Fallback** | Fallback to raw Markdown if rendering fails. | `workflow.md` | -| **Unified Logic** | Synchronized view of code and architecture. | `dev.kit status` | -| **Intent-to-Action** | Visual mapping of normalized workflows. | `docs/skills/` | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Versioned Architecture -Never store diagrams as binary blobs. All architectural context must live as Mermaid source code to ensure it remains discoverable and diffable. -- **Action**: Use the `dev.kit visualizer` to export high-fidelity assets from `.mmd` sources. - -### 2. Standardized Shapes -Maintain visual consistency to ensure agents can accurately reason about process flows. -- **`[Rectangle]`**: Processes / Normalizations. -- **`{Rhombus}`**: Decision Gates / Skill Selection. -- **`([Rounded])`**: Start / End Points. - ---- - -## Operational Cues - -- **Outdated Diagram?** -> Run `dev.kit visualizer` to regenerate assets from repository truth. -- **Broken Flow?** -> Check the raw Mermaid source in the `assets/diagrams/` hub. - -## ๐Ÿ— Visual Grounding - -Visual engineering is operationalized through deterministic UDX engines: - -| Practice | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Rendering** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity export engine and templates. | -| **Fidelity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for asset generation. | - ---- - -## ๐Ÿ“š Authoritative References - -Visual engineering is a core part of maintaining high-fidelity documentation: - -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Strategies for maintaining standalone quality through visual standards. -- **[AOCA: Visual Standards](https://udx.io/cloud-automation-book/quality)**: High-fidelity patterns for architectural documentation. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/standards/yaml-standards.md b/docs/reference/standards/yaml-standards.md deleted file mode 100644 index e0e48e0..0000000 --- a/docs/reference/standards/yaml-standards.md +++ /dev/null @@ -1,58 +0,0 @@ -# YAML Standards: Configuration-as-Code - -**Domain:** Reference / Standards -**Status:** Canonical - -## Summary - -YAML is the primary format for environment orchestration and configuration. In **dev.kit**, consistent YAML structure ensures that the **Dynamic Discovery Engine** can reliably map repository capabilities and variables across diverse environments. - ---- - -## ๐Ÿ›  dev.kit Grounding: Standard-to-Action - -| YAML Practice | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Explicit Defaults** | Pre-hydrated variables in templates. | `default.env` | -| **Schema Validation** | Deterministic parsing of orchestrators. | `environment.yaml` | -| **Scoped Overrides** | Repository-bound local configuration. | `.udx/dev.kit/config.env` | -| **Fidelity Mapping** | Intent-based metadata in manifests. | `dev.kit status` | - ---- - -## ๐Ÿ— High-Fidelity Mandates - -### 1. Human-Editable Intent -Only use YAML for configurations that require human or AI-agent oversight. Machine-only state should favor high-performance formats (e.g., JSON). -- **Action**: Use `environment.yaml` for high-level orchestration and `manifest.json` for internal mapping. - -### 2. Zero-Implicit Logic -Favor explicit keys and allowed values over implicit behavior. A high-fidelity repository must be self-documenting through its configuration. -- **Action**: Document all custom YAML keys within the `docs/reference/` layer. - ---- - -## Operational Cues - -- **Unpredictable Config?** -> Enforce strict indentation and schema validation via CI/CD. -- **Ambiguous Variable?** -> Move it to `environment.yaml` with an explicit description. -## ๐Ÿ— Configuration Grounding - -Configuration standards are operationalized through deterministic UDX engines: - -| Practice | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Validation** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Scoped orchestration and environment parsing. | -| **Parity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for config stability. | - ---- - -## ๐Ÿ“š Authoritative References - -Shared standards are critical for maintaining configuration sanity across teams: - -- **[Creating YAML Standards](https://andypotanin.com/creating-yaml-standards-best-practices-for-teams/)**: Best practices for team-wide configuration consistency. -- **[Decentralized DevOps](https://andypotanin.com/how-decentralized-devops-can-help-your-organization/)**: Scaling systems through distributed configuration and architecture. - ---- -_UDX DevSecOps Team_ diff --git a/docs/runtime/config.md b/docs/runtime/config.md deleted file mode 100644 index 4cc6c55..0000000 --- a/docs/runtime/config.md +++ /dev/null @@ -1,72 +0,0 @@ -# Configuration: Environment Orchestration - -**Domain:** Runtime / Configuration -**Status:** Canonical - -## Summary - -Configuration in **dev.kit** provides a safe, deterministic foundation for both humans and agents. It maps host-level settings and repository metadata into a high-fidelity engineering interface using `environment.yaml`. - ---- - -## Configuration Strategy - -- **Agent Bootstrapping**: Configuration is the first gate where AI agents are safely hydrated with repository rules and authorized execution paths. -- **Task Orchestration**: Scoped settings ensure that normalized workflows have a consistent and isolated runtime context across diverse environments. - ---- - -## CLI Interfaces - -- **`dev.kit config show`**: View active host and repository configuration. -- **`dev.kit config detect`**: Auto-detect required software and CLI versions in the environment. -- **`dev.kit config set --key --value `**: Update a specific setting. -- **`dev.kit config reset`**: Revert to the high-fidelity default baseline. - ---- - -## Key Config Groups - -### 1. System Defaults -- `quiet`: Control CLI output verbosity. -- `developer`: Enable internal developer-specific helpers. -- `state_path`: Global location for transient runtime state. -- `shell.auto_enable`: Automatically enable shell integrations. -- `output.mode`: Set the default output fidelity (e.g., `brief`, `verbose`). - - -### 2. AI & Orchestration -- `ai.enabled`: Enable/Disable AI-Powered automation mode. -- `ai.provider`: Choose the active AI engine (e.g., `gemini`, `codex`). -- `exec.prompt`: The default template for task normalization. - -### 3. Context Management -- `context.enabled`: Persist repository-scoped context across sessions. -- `context.max_bytes`: Bound the context memory to prevent overflow. - ---- - -## Security & Overrides - -- **Explicit Override**: All settings can be overridden by environment variables (e.g., `DEV_KIT_AI_ENABLED=true`). -- **Secret Isolation**: Sensitive credentials must never live in `environment.yaml`. Use repo-bound `.env` files (gitignored). -## ๐Ÿ— Config Grounding - -Environment orchestration is operationalized through deterministic UDX engines: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Validation** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity parsing of `environment.yaml`. | -| **Parity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for config stability. | - ---- - -## ๐Ÿ“š Authoritative References - -Environment orchestration is built on systematic configuration and automation standards: - -- **[Managing IT Complexity](https://andypotanin.com/windows-to-cloud/)**: Strategies for managing the complexity of modern cloud IT systems. -- **[Decentralized DevOps](https://andypotanin.com/how-decentralized-devops-can-help-your-organization/)**: Using distributed services and architectures to create scalable engineering environments. - ---- -_UDX DevSecOps Team_ diff --git a/docs/runtime/execution-loop.md b/docs/runtime/execution-loop.md deleted file mode 100644 index 5e928ed..0000000 --- a/docs/runtime/execution-loop.md +++ /dev/null @@ -1,76 +0,0 @@ -# Drift Resolution Cycle: Deterministic Execution - -**Domain:** Runtime / Execution -**Status:** Canonical - -## Summary - -The **Drift Resolution Cycle** is the practical engine of **Context-Driven Engineering (CDE)**. It resolves the gap between human intent and repository reality through a deterministic loop of analysis, normalization, and processing. - -![Drift Resolution Cycle](../../assets/diagrams/drift-resolution-cycle.svg) - ---- - -## The Core Cycle - -1. **Analyze**: Audit the repository context to identify the **Drift** from the original intent. -2. **Normalize**: Transform ambiguous requests into a **Bounded Workflow** (`workflow.md`). -3. **Process**: Execute the discrete steps using validated CLI primitives and scripts. -4. **Validate**: Verify the final state against the repository truth (`dev.kit doctor`). -5. **Capture**: Distill successful logic back into the repository as a reusable **Skill**. - ---- - -## ๐Ÿ— The Bounded Workflow (DOC-003) - -To ensure high-fidelity results, **dev.kit** enforces a strict **Normalization Boundary**. Chaotic intent is never executed directly; it must be filtered into a structured `workflow.md`. - -- **Intent-to-Plan**: Ambiguity is eliminated before execution begins. -- **State Persistence**: The current status (`planned | in_progress | done`) is tracked at the repository level. -- **Fail-Open Resilience**: Every workflow step includes a fallback mechanism for continuity during tool failures. - -### Artifact Mapping: The Audit Trail - -| Artifact | Role | Location | -| :--- | :--- | :--- | -| **`plan.md`** | The raw, normalized task objective. | `.udx/dev.kit/tasks//` | -| **`workflow.md`** | The deterministic execution sequence. | `.udx/dev.kit/tasks//` | -| **`feedback.md`** | The iterative engineering log. | `.udx/dev.kit/tasks//` | - ---- - -## ๐Ÿง  Session Continuity & Hygiene - -To maintain high-fidelity momentum across multi-turn interactions: - -- **Proactive Catch-Up**: At the start of every session, agents identify unfinished tasks (`dev.kit task active`). -- **Nudge Mechanism**: The system proactively reminds users to resolve stale state or pending syncs. -- **Clean Handoff**: Completed tasks are pruned from the workspace (`dev.kit task cleanup`) to prevent context noise. - ---- - -## Execution Guardrails - -- **Primitive-Only**: Agents are auto-authorized to use `dev.kit` commands. Non-standardized commands require user confirmation. -- **Grounding First**: Every session begins with environment hydration (`dev.kit ai sync`). -- **No Shadow Logic**: Every action must be discoverable and reproducible via repository source code. -## ๐Ÿ— Loop Grounding - -The resolution cycle is operationalized through deterministic UDX engines: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Normalization** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic discovery and task management. | -| **Execution** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for logic iteration. | - ---- - -## ๐Ÿ“š Authoritative References - -The Drift Resolution Cycle is built on mathematical and operational principles of delivery flow: - -- **[Predictable Delivery Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing throughput and cycle time through bounded WIP. -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Normalizing task assignment and efficiency through AI-identified patterns. - ---- -_UDX DevSecOps Team_ diff --git a/docs/runtime/install.md b/docs/runtime/install.md deleted file mode 100644 index 8673a92..0000000 --- a/docs/runtime/install.md +++ /dev/null @@ -1,67 +0,0 @@ -# Installation & Maintenance: Safe Lifecycle - -**Domain:** Runtime / Maintenance -**Status:** Canonical - -## Summary - -The **dev.kit** installer is designed for safe, idempotent environment hydration. It ensures that local engineering environments are aligned with UDX standards while protecting existing user configurations through a mandatory backup-first policy. - ---- - -## ๐Ÿ›ก Safe Installation (Safe Mode) - -The installation process (`bin/scripts/install.sh`) operates in a **Safe Mode** by default. - -1. **Backup-First**: Before any files are modified or synced, the installer creates a timestamped compressed archive of the existing `~/.udx/dev.kit` directory. -2. **Explicit Confirmation**: The installer prompts for confirmation before proceeding with critical changes, such as shell profile modifications. -3. **Idempotent Syncing**: The core engine is synced using a temporary staging area to ensure atomic updates and prevent partial state corruption. - -### Commands -```bash -# Perform a safe installation/update -./bin/scripts/install.sh -``` - ---- - -## ๐Ÿ—‘ Simple Uninstall & Purge - -The uninstallation process (`bin/scripts/uninstall.sh`) provides a graceful way to remove **dev.kit** from the system. - -- **Standard Uninstall**: Removes the `dev.kit` binary from the local bin directory. -- **State Purge**: Optionally removes the entire engine directory (`~/.udx/dev.kit`). -- **Safety Backup**: Offers to backup the repository state and configurations before purging. - -### Commands -```bash -# Uninstall the binary -./bin/scripts/uninstall.sh - -# Purge all state and engine files (with confirmation) -./bin/scripts/uninstall.sh --purge -``` - ---- - -## ๐Ÿงฉ Shell Integration - -**dev.kit** can automatically detect and configure common shell profiles (`.zshrc`, `.bashrc`, `.bash_profile`). - -- **Auto-Detection**: The installer scans for available shell profiles. -- **Dynamic Sourcing**: Adds a non-destructive `source` line to the profiles to load the `dev.kit` environment. -- **Manual Control**: Users can opt-out of auto-configuration and manually source `~/.udx/dev.kit/source/env.sh`. - ---- - -## ๐Ÿ— Maintenance Grounding - -Installation and lifecycle management are operationalized through deterministic UDX standards: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Integrity** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Standardized install/uninstall logic. | -| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated deployment and hydration patterns. | - ---- -_UDX DevSecOps Team_ diff --git a/docs/runtime/lifecycle.md b/docs/runtime/lifecycle.md deleted file mode 100644 index 0aac9e9..0000000 --- a/docs/runtime/lifecycle.md +++ /dev/null @@ -1,59 +0,0 @@ -# Runtime Lifecycle: The Engineering Heartbeat - -**Domain:** Runtime / Lifecycle -**Status:** Canonical - -## Summary - -The **Runtime Lifecycle** defines how **dev.kit** initializes, orchestrates engineering tasks, and finalizes repository state. It ensures a high-fidelity environment for resolving drift between intent and reality. - -![Runtime Lifecycle](../../assets/diagrams/runtime-lifecycle.svg) - ---- - -## Lifecycle Phases - -### 1. Environment Hydration (Bootstrap) -**Interface**: `bin/scripts/install.sh`, `dev.kit doctor`. -- Symlinks the deterministic engine into the user's `$PATH`. -- Verifies required software, CLI meshes, and authentication state. -- Loads shell completions and environment-aware aliases. - -### 2. Intent Normalization (The Filter) -**Interface**: `dev.kit task`, `workflow.md`. -- Filters chaotic user requests through the **Normalization Boundary**. -- Transforms ambiguous intent into a deterministic execution plan. -- Maps dependencies and resolves repository-bound skills. - -### 3. Grounded Execution (The Engine) -**Interface**: `dev.kit skills run`. -- Executes bounded steps through the hardened CLI boundary. -- Leverages the **Worker Ecosystem** for isolated, deterministic runtimes. -- Triggers **Fail-Open Path** if specialized tools encounter failure. - -### 4. Logical Synchronization (Finalize) -**Interface**: `dev.kit sync`. -- Groups changes into logical, domain-specific commits. -- Captures the resolution logic back into the repository as a new **Skill**. -- Prunes stale context and ephemeral task state from the workspace. -## ๐Ÿ— Standard Phase Mapping - -The Runtime Lifecycle is grounded in canonical UDX infrastructure and patterns: - -| Phase | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Hydration** | [`udx/worker`](https://github.com/udx/worker) | Base container for deterministic environment setup. | -| **Execution** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Patterns for automated, multi-step logic. | -| **Finalization** | [`udx/worker-deployment`](https://github.com/udx/worker-deployment) | Standard patterns for final environment orchestration. | - ---- - -## ๐Ÿ“š Authoritative References - -The engineering heartbeat is grounded in systematic lifecycle and evolution patterns: - -- **[Tracing Software Evolution](https://andypotanin.com/digital-rails-and-logistics/)**: Drawing parallels between the evolution of systems and engineering phases. -- **[Developing Lifecycles](https://andypotanin.com/developing-lifecycles-a-comprehensive-cheatsheet/)**: Essential practices for smooth, predictable project progress. - ---- -_UDX DevSecOps Team_ diff --git a/docs/runtime/overview.md b/docs/runtime/overview.md deleted file mode 100644 index dadd330..0000000 --- a/docs/runtime/overview.md +++ /dev/null @@ -1,77 +0,0 @@ -# Runtime Overview: The Deterministic Engine - -**Domain:** Runtime / CLI -**Status:** Canonical - -## Summary - -The **dev.kit** CLI is the deterministic engine designed to resolve the **Drift** between human intent and repository reality. It provides a hardened boundary for executing repository-bound skills while maintaining high-fidelity environment health. - ---- - -## ๐Ÿณ Runtime Environment - -To ensure deterministic behavior and context fidelity, **dev.kit** is optimized for the **UDX Worker Ecosystem**. - -- **Primary Target**: `usabilitydynamics/udx-worker:latest`. -- **Orchestration**: Sessions follow the `udx/worker-deployment` patterns. -- **Isolated Execution**: Testing and high-stakes operations should always be performed within a clean `udx/worker` container to eliminate local drift. - ---- - -## ๐Ÿš€ Entry Points - -- **`bin/dev-kit`**: The primary dispatch entrypoint. Loads internal helpers and routes subcommands. -- **`bin/env/dev-kit.sh`**: Shell initialization (Banner, PATH setup, and completions). -- **`bin/scripts/install.sh`**: High-fidelity installer with safe-mode and backups. -- **`bin/scripts/uninstall.sh`**: Simple uninstaller with optional state purging. - - ---- - -## ๐Ÿ›  Deterministic Commands - -### Status & Discovery -- **`dev.kit status`**: (Default) High-fidelity engineering brief and task visibility. -- **`dev.kit suggest`**: Suggest repository improvements and CDE compliance fixes. -- **`dev.kit doctor`**: Deep system analysis, environment hydration, and compliance audit. - - -### AI & Skill Mesh -- **`dev.kit ai`**: Unified agent integration management, skill synchronization, and grounding. -- **`dev.kit skills`**: Discovery and execution of repository-bound skills. - -### Task & Lifecycle -- **`dev.kit sync`**: Logical, atomic repository synchronization and drift resolution. -- **`dev.kit task`**: Manage the lifecycle of active workflows and engineering sessions. -- **`dev.kit config`**: Scoped orchestration via `environment.yaml` and `.env`. - ---- - -## ๐Ÿงฉ Dynamic Discovery Engine - -`dev.kit` does not rely on static metadata. It dynamically discovers capabilities by scanning: -1. **Internal Commands**: Metadata-rich scripts in `lib/commands/*.sh`. -2. **Managed Skills**: Specialized toolsets in `docs/skills/`. -3. **Virtual Skills**: External CLI tools (gh, npm, docker) detected in the environment. -## ๐Ÿ— Engine Grounding - -The `dev.kit` engine is grounded in core UDX infrastructure to ensure high-fidelity execution: - -| Component | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Standardized, pre-hydrated base environment. | -| **API Mesh** | [`@udx/mcurl`](docs/ai/mesh/npm.md) | High-fidelity API interaction and error handling. | -| **Orchestration**| [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Deterministic CI/CD and deployment patterns. | - ---- - -## ๐Ÿ“š Authoritative References - -Deterministic CLI orchestration is built on systematic engineering flow and portability: - -- **[Automotive Software Evolution](https://andypotanin.com/digital-rails-and-logistics/)**: Tracing the evolution of deterministic algorithms through automotive innovation. -- **[Decentralized DevOps](https://andypotanin.com/how-decentralized-devops-can-help-your-organization/)**: Using distributed services to create scalable and portable systems. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/README.md b/docs/workflows/README.md deleted file mode 100644 index ca196d2..0000000 --- a/docs/workflows/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# dev.kit Workflow Mesh: Intent-to-Resolution - -**Domain:** Foundations / Workflows -**Status:** Canonical - -## Summary - -The **Workflow Mesh** is the collection of deterministic sequences and dynamic reasoning patterns used to resolve repository drift. It bridges the gap between chaotic user intent and the high-fidelity execution engine. - ---- - -## ๐Ÿ— Workflow Hierarchy - -1. **[Normalization](normalization.md)**: The mapper that transforms intent into bounded plans. -2. **[Engineering Loops](loops.md)**: Standardized sequences for features, bugfixes, and discovery. -3. **[Git Synchronization](git-sync.md)**: Logical grouping and atomic commit orchestration. -4. **[Visual Engineering](visualizer.md)**: AI-driven architectural diagramming and flow analysis. - ---- - -## โš™๏ธ Managed Assets - -Common logic and templates used by the mesh are stored in the `assets/` directory: -- **`assets/git-sync.yaml`**: The canonical synchronization sequence. -- **`assets/templates/`**: Standard Mermaid patterns for visual engineering. - -## ๐Ÿ— Mesh Grounding - -The Workflow Mesh is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic discovery and orchestration engine. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for workflow execution. | -| **Patterns** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for cross-repo sequences. | - ---- - -## ๐Ÿ›  Synchronization -Agents hydrate their environment by running **`dev.kit ai sync`**. This process scans the mesh for high-fidelity documentation and projects metadata into the agent's active context. - ---- - -## ๐Ÿ“š Authoritative References - -The Workflow Mesh is grounded in foundational patterns for delivery flow and task management: - -- **[Predictable Delivery Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing cycle time through systematic sequences. -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing task assignment through pattern identification. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/assets/git-sync.yaml b/docs/workflows/assets/git-sync.yaml deleted file mode 100644 index 6e75a45..0000000 --- a/docs/workflows/assets/git-sync.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Git Sync -description: Logical commit and synchronization workflow. -inputs: - task_id: - description: Current task ID for traceability. - required: true - dry_run: - description: Preview changes without committing. - default: "false" - message: - description: Optional commit message prefix. - default: "" - -steps: - - id: prepare-sync - name: Prepare Sync Environment - run: dev.kit sync prepare - - - id: group-and-commit - name: Execute Atomic Commits - run: dev.kit sync run --task-id "${{inputs.task_id}}" --dry-run "${{inputs.dry_run}}" --message "${{inputs.message}}" - - - id: finalize - name: Finalize and Cleanup - run: | - echo "--- Git Sync Workflow Complete ---" diff --git a/docs/workflows/assets/templates/default-flowchart.mmd b/docs/workflows/assets/templates/default-flowchart.mmd deleted file mode 100644 index f4a505c..0000000 --- a/docs/workflows/assets/templates/default-flowchart.mmd +++ /dev/null @@ -1,8 +0,0 @@ ---- -config: - theme: mc ---- -flowchart TD - Install[1. Install dev.kit] --> Env[2. Configure Local Environment] - Env --> Agent[3. Enable AI Agent Integration] - Agent --> Waterfall[4. Experience Development Waterfall] \ No newline at end of file diff --git a/docs/workflows/assets/templates/default-sequence.mmd b/docs/workflows/assets/templates/default-sequence.mmd deleted file mode 100644 index e743187..0000000 --- a/docs/workflows/assets/templates/default-sequence.mmd +++ /dev/null @@ -1,21 +0,0 @@ ---- -config: - theme: mc ---- -sequenceDiagram - participant User - participant DevKit - participant Agent - participant Repo - participant System - - User->>DevKit: Intent / Goal - DevKit->>Agent: Hydrate Context - Repo->>Agent: Provide Context - Agent->>DevKit: Normalized Workflow - DevKit->>Agent: Execute Step - Agent->>DevKit: Feedback / Result - DevKit->>System: Execution - System-->>DevKit: Drift Detected? - DevKit-->>User: Done - \ No newline at end of file diff --git a/docs/workflows/assets/templates/default-state.mmd b/docs/workflows/assets/templates/default-state.mmd deleted file mode 100644 index babde29..0000000 --- a/docs/workflows/assets/templates/default-state.mmd +++ /dev/null @@ -1,8 +0,0 @@ ---- -config: - theme: mc ---- -stateDiagram-v2 - [*] --> Idle - Idle --> Active: start - Active --> Idle: stop diff --git a/docs/workflows/git-sync.md b/docs/workflows/git-sync.md deleted file mode 100644 index 24a4a85..0000000 --- a/docs/workflows/git-sync.md +++ /dev/null @@ -1,64 +0,0 @@ -# Skill: dev-kit-git-sync - -**Domain:** Source Control / Synchronization -**Type:** AI Reasoning Skill -**status:** Canonical - -## Summary - -The **Git Synchronization** skill enables AI agents to resolve repository drift by logically grouping and committing changes. It uses dynamic reasoning to categorize modifications into high-fidelity domains (docs, ai, cli, core) and generates context-rich commit messages. - ---- - -## ๐Ÿ›  AI Reasoning (The Skill) - -This skill utilizes dynamic LLM reasoning to perform the following: -- **Logical Domain Determination**: Analyzing changed files to map them to high-fidelity domains (docs, ai, cli, core). -- **Contextual Intent Capture**: Generating meaningful commit messages that reflect the "Why" behind the drift resolution. -- **Drift Identification**: Recognizing unstaged changes and determining the correct synchronization sequence. -- **Collaborative Orchestration**: Identifying when a task is ready for review and proactively suggesting the creation or **updating** of a Pull Request with an automated **diff summary**. - ---- - -## โš™๏ธ Deterministic Logic (Function Assets) - -The following assets provide the programmatic engine for this skill: -- **`workflow.yaml`**: The canonical definition of synchronization steps and grouping rules. -- **Atomic Committer**: Hardened logic that ensures changes are committed in discrete, revertible blocks. -- **PR Suggestion Engine**: Proactive prompt that interfaces with the **GitHub Mesh** to create remote Pull Requests. - -## ๐Ÿ— Sync Grounding - -Git synchronization is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Atomic Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | The primary engine for logical grouping and commits. | -| **Workflow Pattern** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for remote sync and CI/CD. | -| **Collaboration** | [`ai/mesh/github.md`](../ai/mesh/github.md) | Grounding for PR creation and remote resolution. | - ---- - -## ๐Ÿš€ Primitives Orchestrated - -This skill is grounded in the following **Deterministic Primitives**: -- **`dev.kit sync prepare`**: Prepares feature branches and synchronizes with origin. -- **`dev.kit sync run`**: Executes atomic commits and resolves drift. - ---- - -## ๐Ÿ“‚ Managed Assets - -- **Workflow YAML**: Canonical synchronization sequence in `docs/workflows/assets/git-sync.yaml`. - ---- - -## ๐Ÿ“š Authoritative References - -High-fidelity synchronization is grounded in systematic SDLC and version control practices: - -- **[Predictable Delivery Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing Work-in-Progress (WIP) through atomic, domain-specific commits. -- **[Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: The shift toward distributed architectures and automated synchronization. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/loops.md b/docs/workflows/loops.md deleted file mode 100644 index f893b9d..0000000 --- a/docs/workflows/loops.md +++ /dev/null @@ -1,83 +0,0 @@ -# Engineering Loops: Standardized Workflows - -**Domain:** AI / Workflows -**Status:** Canonical - -## Summary - -Engineering Loops are the standardized execution plans used by agents to resolve **Drift**. By following these deterministic sequences, **dev.kit** ensures that complex tasksโ€”from feature implementation to documentation synchronizationโ€”remain grounded in repository truth. - ---- - -## ๐Ÿ— The Standard Loop (Drift Resolution) - -Every high-fidelity task follows the **Analyze -> Normalize -> Process -> Validate -> Capture** cycle. - -### 1. Feature Engineering Loop -Standard loop for implementing new capabilities with TDD and documentation. -- **Goal**: Expand repository "Skills" while maintaining 12-factor compliance. -- **Steps**: - 1. **Analyze**: Audit existing code and docs to identify the implementation gap. - 2. **Normalize**: `dev.kit task start` to create a bounded `workflow.md`. - 3. **Process**: `dev.kit skills run` to implement logic and test cases. - 4. **Validate**: `dev.kit doctor` to verify environment health and TDD success. - 5. **Capture**: `dev.kit sync run` to logically group and commit the resolution. - -### 2. Resilient Bugfix Loop -Deterministic lifecycle for identifying, reproducing, and resolving repository defects. -- **Goal**: Restore repository integrity with verified test evidence. -- **Steps**: - 1. **Analyze**: `dev.kit doctor` to detect environment or software drift. - 2. **Normalize**: Define reproduction steps in a new `workflow.md`. - 3. **Process**: Apply the fix and implement a regression test. - 4. **Validate**: Execute the test suite within the **Worker Ecosystem**. - 5. **Capture**: `dev.kit sync run` to finalize the fix and update the Skill Mesh. - -### 3. Knowledge & Discovery Sync -Workflow for synchronizing repository documentation and agent context. -- **Goal**: Eliminate documentation drift and hydrate the **Skill Mesh**. -- **Steps**: - 1. **Analyze**: Scan `docs/` and script headers for outdated metadata. - 2. **Normalize**: Map documentation updates to current repository reality. - 3. **Process**: `dev.kit visualizer` to regenerate high-fidelity architecture diagrams. - 4. **Validate**: Verify that all internal and external links are high-fidelity. - 5. **Capture**: `dev.kit ai sync` to ground the agent in the updated knowledge. - -## ๐Ÿ— Standard Loop Mapping - -The standard engineering loops are operationalized through specialized UDX targets: - -| Loop Domain | Grounding Target | Pattern Role | -| :--- | :--- | :--- | -| **Logic Implementation** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Intent normalization and task management. | -| **Environment Parity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for loop execution. | -| **Automation Flow** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for sequence steps. | - ---- - -## ๐Ÿ— Workflow Grounding - -Engineering loops are operationalized through deterministic UDX engines: - -| Loop Type | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Engineering** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic normalization and task management. | -| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for implementation steps. | - ---- - -## ๐Ÿง  Continuity Mandates - -- **Resume First**: Before starting a new loop, agents must check for active tasks (`dev.kit task active`). -- **Hygiene**: Aborted or stagnant loops must be pruned (`dev.kit task cleanup`) to prevent context noise. -- **Feedback**: Every iteration must emit high-signal progress to the `feedback.md` artifact. - -## ๐Ÿ“š Authoritative References - -Standardized loops ensure predictable delivery and high-fidelity results: - -- **[Little's Law for Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing cycle time and throughput through systematic sequences. -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing task assignment and execution through identified patterns. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/mermaid-patterns.md b/docs/workflows/mermaid-patterns.md deleted file mode 100644 index 0ae0356..0000000 --- a/docs/workflows/mermaid-patterns.md +++ /dev/null @@ -1,54 +0,0 @@ -# Mermaid Patterns: Visual Standards - -**Domain:** Visual Engineering / Standards -**Status:** Canonical - -## Summary - -This reference provides standardized patterns for Mermaid-based visualizations within the `dev.kit` ecosystem. These patterns ensure that architecture and process flows are consistent, version-controlled, and legible to both humans and agents. - ---- - -## ๐Ÿ— Type Selection - -- **`flowchart`**: Use for process steps, service interactions, and decision gates. -- **`sequenceDiagram`**: Use for time-ordered interactions between actors or multi-turn execution loops. -- **`stateDiagram-v2`**: Use for state transitions with explicit events and lifecycle stages. -- **`erDiagram`**: Use for entity relationships and data cardinality. - ---- - -## ๐Ÿ“ Conventions - -- **Identifier Stability**: Maintain consistent IDs during revisions to ensure clean diffs. -- **Labeling**: Prefer short, action-oriented node labels; use edge labels for details. -- **Domain Separation**: Split diagrams when crossing functional boundaries (e.g., separate API flow from deployment flow). -- **Horizontal Priority**: Favor `flowchart LR` to optimize vertical space in Markdown documentation. - ---- - -## โš™๏ธ Deterministic Logic (Export) - -- **Fail-Open**: If `mmdc` fails, always provide the raw Mermaid source to the user/agent. -- **Sandboxing**: In restricted environments, leverage Puppeteer `--no-sandbox` flags via local configuration. - -## ๐Ÿ— Standard Grounding - -Visual standards are operationalized through deterministic UDX engines: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity export engine and pattern discovery. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for asset generation. | - ---- - -## ๐Ÿ“š Authoritative References - -Visual standards are a core part of maintaining standalone documentation quality: - -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity metadata management and visual standards. -- **[Visual Tracing & Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Drawing parallels between software algorithms and visual process dynamics. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/normalization.md b/docs/workflows/normalization.md deleted file mode 100644 index 22d885b..0000000 --- a/docs/workflows/normalization.md +++ /dev/null @@ -1,73 +0,0 @@ -# Task Normalization: Intent-to-Workflow Mapping - -**Domain:** Foundations / Normalization -**Status:** Canonical - -## Summary - -**Task Normalization** is the process of transforming ambiguous user intent into deterministic execution plans. In **dev.kit**, the AI agent acts as the primary **Mapper**, reasoning about the request and mapping it to the appropriate repository workflows and primitives. - ---- - -## ๐Ÿ— The Normalization Mapper - -The agent is responsible for dynamic prompt transformation. It receives intent from the user, identifies the required capabilities, and sends structured instructions to the `dev.kit` workflow engine. - -### 1. Dynamic Suggestions (Incremental Experience) -Every normalization cycle includes a heuristic check of the repository and environment. The `dev.kit suggest` command is used to provide actionable feedback that improves CDE compliance. -- **Example**: Detecting missing documentation or unnormalized CI/CD configs. -- **Action**: Suggested fixes are included in the normalization context for the agent to consider. - -### 2. Strict Mappings (Deterministic) -Used for well-defined engineering tasks where the path is predictable and hardened. -- **Example**: Git Synchronization, environment hydration (`config detect`), or diagram rendering. -- **Enforcement**: Direct mapping to `lib/commands/` or `docs/workflows/assets/*.yaml`. - -### 3. Non-Strict Mappings (Reasoning-First) -Used for creative or complex tasks where the agent must reason about the best path before committing to a sequence. -- **Example**: Implementing a new feature, refactoring complex logic, or resolving multi-domain drift. -- **Enforcement**: The agent generates a custom `workflow.md` that orchestrates multiple primitives. - - ---- - -## ๐Ÿ”„ Dynamic Prompt Transformation - -Agents are auto-mapped to send and receive context from repository workflows. If a task requires something outside of existing scripts or tools, the agent: -1. **Reasons** about the implementation gap. -2. **Generates** the necessary code or documentation patterns. -3. **Packages** the resolution into a normalized `dev.kit` workflow step. - -## ๐Ÿ— Standard Task Mapping - -The normalization mapper routes common engineering intents to specialized UDX repositories: - -| Intent Domain | Grounding Target | Mapping logic | -| :--- | :--- | :--- | -| **Containerization** | [`udx/worker`](https://github.com/udx/worker) | Normalize to base environment specs. | -| **Plugin Dev** | [`udx/wp-stateless`](https://github.com/udx/wp-stateless) | Normalize to structural plugin patterns. | -| **CI/CD / Actions** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Normalize to validated pipeline steps. | - ---- - -## ๐Ÿ— Normalization Grounding - -Task normalization is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic Mapping** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic discovery and task resolution engine. | -| **Context Hub** | [`docs/workflows/README.md`](README.md) | Source of truth for available repository sequences. | -| **Fidelity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for validating normalized plans. | - ---- - -## ๐Ÿ“š Authoritative References - -Normalization ensures high-fidelity execution through systematic pattern recognition: - -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing task assignment through identified patterns. -- **[Autonomous Technical Operations](https://andypotanin.com/claude-operator-prompt/)**: Principles for high-fidelity agent grounding and execution. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/visualizer.md b/docs/workflows/visualizer.md deleted file mode 100644 index 979ccaa..0000000 --- a/docs/workflows/visualizer.md +++ /dev/null @@ -1,64 +0,0 @@ -# Skill: dev-kit-visualizer - -**Domain:** Visual Engineering -**Type:** AI Reasoning Skill -**status:** Canonical - -## Summary - -The **Visual Engineering** skill empowers AI agents to transform repository context into high-fidelity diagrams. It uses dynamic reasoning to understand source code, flow, and architecture, then leverages the deterministic `dev.kit visualizer` command to render SVG assets. - ---- - -## ๐Ÿ›  AI Reasoning (The Skill) - -This skill utilizes dynamic LLM reasoning to perform the following: -- **Flow Extraction**: Reading READMEs or source code to identify discrete process steps. -- **Visual Mapping**: Determining which Mermaid pattern (flowchart, sequence, state) best represents the intent. -- **Intent-to-MMD**: Generating raw Mermaid source code based on extracted logic. - ---- - -## โš™๏ธ Deterministic Logic (Function Assets) - -The following assets provide the programmatic engine for this skill: -- **Templates**: Standardized Mermaid patterns in `assets/templates/`. -- **Patterns**: High-fidelity Mermaid styling and shape standards. -- **Export Engine**: Hardened `mmdc` wrapper for SVG/PNG generation. - -## ๐Ÿ— Visual Grounding - -Visual engineering is operationalized through deterministic UDX engines: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity export engine and template discovery. | -| **Patterns** | [`reference/standards/mermaid.md`](../reference/standards/mermaid.md) | Canonical shapes and visual mapping rules. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for asset generation. | - ---- - -## ๐Ÿš€ Primitives Orchestrated - -This skill is grounded in the following **Deterministic Primitives**: -- **`dev.kit visualizer create`**: Initializes a new Mermaid source from templates. -- **`dev.kit visualizer export`**: Renders Mermaid sources into SVG/PNG. - ---- - -## ๐Ÿ“‚ Managed Assets - -- **Templates**: Standard flowchart, sequence, and state machine patterns in `docs/workflows/assets/templates/`. -- **Patterns**: High-fidelity Mermaid styling and shape standards in `docs/workflows/mermaid-patterns.md`. - ---- - -## ๐Ÿ“š Authoritative References - -Visual engineering is grounded in systematic diagramming and documentation standards: - -- **[Visualizing Complex Systems](https://andypotanin.com/digital-rails-and-logistics/)**: Understanding software evolution through fluid dynamics and visual tracing. -- **[Mermaid Standards](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment for documentation. - ---- -_UDX DevSecOps Team_ diff --git a/environment.yaml b/environment.yaml deleted file mode 100644 index c0adb19..0000000 --- a/environment.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# dev.kit Environment Orchestrator -# Standardize configurations across hosts and repositories. - -system: - quiet: false - developer: false - state_path: "~/.udx/dev.kit/state" - -exec: - prompt: "ai.gemini.v1" - stream: false - -ai: - enabled: false - provider: "gemini" # Supported: gemini - auto_sync: true # Automatically synchronize skills on shell load - # Discovery: Capabilities resolved at runtime via lib/commands and docs/skills - -context: - enabled: true - max_bytes: 4000 - storage: "repo" # repo-scoped storage for context - -install: - path_prompt: true diff --git a/lib/commands/ai.sh b/lib/commands/ai.sh deleted file mode 100644 index 61c8b7d..0000000 --- a/lib/commands/ai.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash - -# @description: Unified agent integration management (Sync, Skills, Status, Configuration). -# @intent: ai, agent, integration, skills, sync, status -# @objective: Manage the lifecycle of AI integrations by synchronizing skills, monitoring health, and configuring agent artifacts. -# @usage: dev.kit ai status -# @usage: dev.kit ai sync gemini -# @usage: dev.kit ai agent gemini --plan -# @workflow: 1. Monitor Integration Health -> 2. Synchronize Skills & Memories -> 3. Configure Agent Artifacts -> 4. Provide Advisory Insights - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_ai() { - local sub="${1:-status}" - - case "$sub" in - status) - print_section "dev.kit | AI Integration Status" - local provider; provider="$(config_value_scoped ai.provider "gemini")" - local enabled; enabled="$(config_value_scoped ai.enabled "false")" - - print_check "Provider" "[ok]" "$provider" - print_check "Enabled" "$([ "$enabled" = "true" ] && echo "[ok]" || echo "[warn]")" "$enabled" - - echo "" - echo "Active Integrations:" - if [ -d "$HOME/.gemini" ]; then - print_check "Gemini" "[ok]" "path: ~/.gemini" - else - print_check "Gemini" "[warn]" "missing (run: dev.kit ai sync)" - fi - ;; - sync) - local provider="${2:-}" - [ -z "$provider" ] && provider="$(config_value_scoped ai.provider "gemini")" - echo "Synchronizing AI skills and memories for: $provider" - if command -v dev_kit_agent_apply_integration >/dev/null 2>&1; then - dev_kit_agent_apply_integration "$provider" "apply" - else - echo "Error: Agent manager module not loaded." >&2 - exit 1 - fi - ;; - agent) - shift - local agent_sub="${1:-status}" - case "$agent_sub" in - status) - echo "Integrations found in manifest:" - jq -r '.integrations[].key' "$(dev_kit_agent_manifest)" | sed 's/^/- /' - ;; - disable) - local key="${2:-}" - if [ "$key" = "all" ]; then - for k in $(jq -r '.integrations[].key' "$(dev_kit_agent_manifest)"); do dev_kit_agent_disable_integration "$k"; done - else - [ -z "$key" ] && { echo "Usage: dev.kit ai agent disable " >&2; exit 1; } - dev_kit_agent_disable_integration "$key" - fi - ;; - skills) - local key="${2:-}" - [ -z "$key" ] && { echo "Usage: dev.kit ai agent skills " >&2; exit 1; } - local manifest="$(dev_kit_agent_manifest)" - local target_dir="$(dev_kit_agent_expand_path "$(jq -r ".integrations[] | select(.key == \"$key\") | .target_dir" "$manifest")")" - echo "Managed Skills for '$key' ($target_dir/skills):" - [ -d "$target_dir/skills" ] && ls "$target_dir/skills" | sed 's/^/- /' || echo "(none)" - ;; - *) - local key="$agent_sub" - local mode="apply" - [ "${2:-}" = "--plan" ] && mode="plan" - if [ "$key" = "all" ]; then - for k in $(jq -r '.integrations[].key' "$(dev_kit_agent_manifest)"); do dev_kit_agent_apply_integration "$k" "$mode"; done - else - dev_kit_agent_apply_integration "$key" "$mode" - fi - ;; - esac - ;; - skills) - print_section "dev.kit | Managed AI Skills" - local local_packs="$REPO_DIR/docs/skills" - if [ -d "$local_packs" ]; then - find "$local_packs" -mindepth 1 -maxdepth 1 -type d | sort | while IFS= read -r skill; do - local name; name="$(basename "$skill")" - local desc; desc="$(grep -i "^description:" "$skill/SKILL.md" 2>/dev/null | head -n 1 | sed 's/^description: //I' || echo "no description")" - echo "- [skill] $name" - echo " description: $desc" - echo " usage: dev.kit skills run \"$name\" \"\"" - echo "" - done - fi - ;; - commands) - print_section "dev.kit | CLI Commands Metadata" - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -f "$file" ] || continue - local key; key="$(basename "${file%.sh}")" - local desc; desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" - echo "- [command] dev.kit $key" - echo " description: $desc" - echo "" - done - ;; - advisory) - local ops_dir="$REPO_DIR/docs/reference/operations" - if [ -d "$ops_dir" ]; then - echo "Engineering Advisory (Resolved Insights):" - find "$ops_dir" -type f -name '*.md' | sort | while IFS= read -r file; do - local title; title="$(head -n 1 "$file" | sed 's/^# //')" - echo "- [insight] $title" - done - fi - ;; - help|-h|--help) - cat <<'AI_HELP' -Usage: dev.kit ai - -Commands: - status Show AI provider and integration health - sync [provider] Synchronize AI skills, memories, and hooks - agent Configure agent artifacts (use --plan to dry-run) - skills List managed AI skills - commands List CLI commands with metadata - advisory Fetch engineering guidance from local docs -AI_HELP - ;; - *) echo "Unknown ai command: $sub" >&2; exit 1 ;; - esac -} diff --git a/lib/commands/config.sh b/lib/commands/config.sh deleted file mode 100644 index df1b1c1..0000000 --- a/lib/commands/config.sh +++ /dev/null @@ -1,443 +0,0 @@ -#!/bin/bash - -# @description: Environment and repository orchestration settings. -# @intent: config, setting, env, setup, manage - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_config() { - ensure_dev_kit_home - local sub="${1:-}" - - scope_path() { - local scope="$1" - local variant="$2" - local base="" - case "$scope" in - global) - if [ -n "${DEV_KIT_STATE:-}" ] && [ -d "$DEV_KIT_STATE" ]; then - base="$DEV_KIT_STATE" - else - base="$DEV_KIT_HOME" - fi - ;; - repo) - if command -v git >/dev/null 2>&1; then - base="$(git rev-parse --show-toplevel 2>/dev/null)/.udx/dev.kit" - fi - ;; - *) - return 1 - ;; - esac - if [ -z "$base" ]; then - return 1 - fi - case "$variant" in - show|set|reset) echo "$base/config.env" ;; - default) echo "$base/config.default.env" ;; - min) echo "$base/config.min.env" ;; - max) echo "$base/config.max.env" ;; - custom) echo "$base/config.custom.env" ;; - *) return 1 ;; - esac - } - - prompt_value() { - local label="$1" - local default="${2:-}" - local input="" - if [ -t 0 ]; then - if [ -n "$default" ]; then - printf "%s [%s]: " "$label" "$default" - else - printf "%s: " "$label" - fi - read -r input || true - fi - if [ -n "$input" ]; then - printf "%s" "$input" - else - printf "%s" "$default" - fi - } - - parse_key_flag() { - local key="" - local args=("$@") - local i=0 - while [ $i -lt ${#args[@]} ]; do - case "${args[$i]}" in - --key=*) - key="${args[$i]#--key=}" - ;; - --key) - if [ $((i+1)) -lt ${#args[@]} ]; then - key="${args[$((i+1))]}" - i=$((i+1)) - fi - ;; - esac - i=$((i+1)) - done - printf "%s" "$key" - } - - parse_scope_flag() { - local scope="global" - local args=("$@") - local i=0 - while [ $i -lt ${#args[@]} ]; do - case "${args[$i]}" in - --scope=*) - scope="${args[$i]#--scope=}" - ;; - --scope) - if [ $((i+1)) -lt ${#args[@]} ]; then - scope="${args[$((i+1))]}" - i=$((i+1)) - fi - ;; - esac - i=$((i+1)) - done - printf "%s" "$scope" - } - - parse_force_flag() { - local force="false" - local args=("$@") - local i=0 - while [ $i -lt ${#args[@]} ]; do - case "${args[$i]}" in - --force) force="true" ;; - esac - i=$((i+1)) - done - printf "%s" "$force" - } - - parse_developer_flag() { - local developer="false" - local args=("$@") - local i=0 - while [ $i -lt ${#args[@]} ]; do - case "${args[$i]}" in - --developer) developer="true" ;; - esac - i=$((i+1)) - done - printf "%s" "$developer" - } - - update_config_value() { - local key="$1" - local value="$2" - local path="${3:-$CONFIG_FILE}" - config_set_value "$key" "$value" "$path" - if [ -n "$value" ]; then - echo "Set: $key = $value ($path)" - else - echo "Reset: $key ($path)" - fi - } - - detect_cli() { - local name="$1" - local path="" - if command -v "$name" >/dev/null 2>&1; then - path="$(command -v "$name")" - local ver="" - case "$name" in - git) ver=$($name --version | awk '{print $3}') ;; - gh) ver=$($name version | head -n1 | awk '{print $3}') ;; - docker) ver=$($name --version | awk '{print $3}' | tr -d ',') ;; - npm) ver=$($name --version) ;; - node) ver=$($name --version) ;; - python) ver=$($name --version 2>&1 | awk '{print $2}') ;; - *) ver="found" ;; - esac - printf "%-12s %-10s %s\n" "$name" "$ver" "($path)" - else - printf "%-12s %-10s %s\n" "$name" "missing" "" - fi - } - - case "$sub" in - detect) - if command -v ui_header >/dev/null 2>&1; then - ui_header "dev.kit | software detection" - else - echo "--- Software Detection ---" - fi - detect_cli git - detect_cli gh - detect_cli docker - detect_cli npm - detect_cli node - detect_cli python - detect_cli terraform - detect_cli ruff - detect_cli tsc - detect_cli mmdc - ;; - global|repo) - - local action="${2:---show}" - local path="" - case "$action" in - --show|show) path="$(scope_path "$sub" show)" ;; - --default|default) path="$(scope_path "$sub" default)" ;; - --min|min) path="$(scope_path "$sub" min)" ;; - --max|max) path="$(scope_path "$sub" max)" ;; - --custom|custom) path="$(scope_path "$sub" custom)" ;; - *) - echo "Unknown config action: $action" >&2 - exit 1 - ;; - esac - if [ -z "${path:-}" ]; then - echo "Config scope not available: $sub" >&2 - exit 1 - fi - if [ "$action" = "--custom" ] || [ "$action" = "custom" ]; then - local schema_artifact="$REPO_DIR/docs/artifacts/modules/config/local-schema.json" - local schema_source="$REPO_DIR/docs/src/configs/tooling/local/config-schema.json" - local schema_path="$schema_artifact" - if [ ! -f "$schema_path" ]; then - schema_path="$schema_source" - fi - if [ ! -f "$schema_path" ]; then - echo "Config schema not found: $schema_artifact or $schema_source" >&2 - exit 1 - fi - if ! command -v jq >/dev/null 2>&1; then - echo "jq is required for --custom config generation." >&2 - exit 1 - fi - mkdir -p "$(dirname "$path")" - : > "$path" - while IFS= read -r field; do - local field_json="" - local key="" - local default="" - local desc="" - local options="" - field_json="$(printf "%s" "$field" | base64 --decode)" - key="$(printf "%s" "$field_json" | jq -r '.key // empty')" - default="$(printf "%s" "$field_json" | jq -r '.default // ""')" - desc="$(printf "%s" "$field_json" | jq -r '.description // ""')" - options="$(printf "%s" "$field_json" | jq -r '.options // [] | join(\", \")')" - if [ -n "$desc" ]; then - echo "" - echo "$desc" - fi - if [ -n "$options" ]; then - echo "options: $options" - fi - local value="" - if [ -t 0 ]; then - printf "%s [%s]: " "$key" "$default" - read -r value || true - fi - if [ -z "$value" ]; then - value="$default" - fi - if [ -n "$key" ]; then - printf "%s = %s\n" "$key" "$value" >> "$path" - fi - done < <(jq -r '.fields[] | @base64' "$schema_path") - echo "Saved: $path" - exit 0 - fi - if [ -f "$path" ]; then - cat "$path" - exit 0 - fi - if [ "$sub" = "repo" ]; then - if [ -f "$CONFIG_FILE" ]; then - cat "$CONFIG_FILE" - exit 0 - fi - fi - echo "Config file not found: $path" >&2 - exit 1 - ;; - show|"") - local key - key="$(parse_key_flag "$@")" - if [ -n "$key" ]; then - local val="" - val="$(config_value_scoped "$key" "")" - if [ -n "$val" ]; then - echo "$key = $val" - else - echo "Key not found: $key" >&2 - exit 1 - fi - exit 0 - fi - if [ -f "${ENVIRONMENT_YAML:-}" ]; then - echo "Orchestrator: $ENVIRONMENT_YAML" - cat "$ENVIRONMENT_YAML" - echo "" - fi - if [ -f "$CONFIG_FILE" ]; then - echo "Global: $CONFIG_FILE" - cat "$CONFIG_FILE" - echo "" - fi - local local_path - local_path="$(local_config_path || true)" - if [ -n "$local_path" ] && [ -f "$local_path" ]; then - echo "Local: $local_path" - cat "$local_path" - echo "" - fi - echo "Detected CLIs (read-only):" - detect_cli git - detect_cli gh - detect_cli docker - detect_cli npm - detect_cli codex - detect_cli claude - detect_cli gemini - ;; - reset) - local force="false" - force="$(parse_force_flag "$@")" - local scope="global" - scope="$(parse_scope_flag "$@")" - local key="" - key="$(parse_key_flag "$@")" - local target_path="" - if [ "$scope" = "repo" ]; then - target_path="$(scope_path "repo" "reset")" - else - target_path="$CONFIG_FILE" - fi - - if [ -n "$key" ]; then - if [ "$force" != "true" ]; then - confirm_action "Reset $key to default in $scope scope?" - fi - local default_val="" - default_val="$(config_get_value "$REPO_DIR/config/default.env" "$key" "")" - update_config_value "$key" "$default_val" "$target_path" - exit 0 - fi - if [ ! -f "$REPO_DIR/config/default.env" ]; then - echo "Missing default config: $REPO_DIR/config/default.env" - exit 1 - fi - if [ -t 0 ] && [ "$force" != "true" ]; then - confirm_action "Reset $scope config to defaults?" - fi - if [ "$scope" = "repo" ]; then - if [ -z "$target_path" ]; then - echo "Repo scope not available" >&2 - exit 1 - fi - cp "$REPO_DIR/config/default.env" "$target_path" - else - cp "$REPO_DIR/config/default.env" "$CONFIG_FILE" - cp "$REPO_DIR/config/default.env" "$DEV_KIT_HOME/config.env" - fi - echo "Reset: $target_path" - ;; - set) - local force="false" - force="$(parse_force_flag "$@")" - local developer="false" - developer="$(parse_developer_flag "$@")" - local scope="global" - scope="$(parse_scope_flag "$@")" - local key="" - local value="" - key="$(parse_key_flag "$@")" - - local target_path="" - if [ "$scope" = "repo" ]; then - target_path="$(scope_path "repo" "set")" - else - target_path="$CONFIG_FILE" - fi - - if [ "$developer" = "true" ]; then - update_config_value "exec.prompt" "developer" "$target_path" "set" - update_config_value "developer.enabled" "true" "$target_path" "set" - exit 0 - fi - - if [ -n "$key" ]; then - # If --key was used, the value is the first non-flag argument that is NOT the key or --key - local arg - for arg in "$@"; do - if [[ "$arg" != --* ]] && [ "$arg" != "$key" ] && [ "$arg" != "set" ]; then - value="$arg" - break - fi - done - else - # positional legacy support: dev.kit config set - key="${2:-}" - value="${3:-}" - fi - # re-check key/value if not set by --key/--value - if [ -z "$key" ] || [[ "$key" == --* ]]; then - key="" - fi - - if [ -z "$key" ]; then - if [ -t 0 ] && [ "$force" != "true" ]; then - key="$(prompt_value "key" "")" - else - echo "Missing --key in non-interactive mode" >&2 - exit 1 - fi - fi - if [ -z "$value" ]; then - if [ -t 0 ] && [ "$force" != "true" ]; then - value="$(prompt_value "value" "")" - else - echo "Missing value in non-interactive mode" >&2 - exit 1 - fi - fi - if [ -z "$key" ] || [ -z "$value" ]; then - echo "Usage: dev.kit config set [--scope global|repo] --key " >&2 - exit 1 - fi - update_config_value "$key" "$value" "$target_path" "set" - if [ "$scope" = "global" ] && [ "$key" = "state_path" ]; then - update_config_value "$key" "$value" "$DEV_KIT_HOME/config.env" "set" - fi - ;; - -h|--help) - cat <<'CONFIG_USAGE' -Usage: dev.kit config - -Commands: - show Print current config - reset Reset config to defaults (prompts) - set Set a config key/value (or --developer) - global Global config (use --show|--default|--min|--max|--custom) - repo Repo config (use --show|--default|--min|--max|--custom) - -Options: - --key Target a specific config key - --value Set a config value when using --key - --scope Target scope: global (default) or repo - --force Skip confirmation prompts - --developer Enable developer mode (sets exec.prompt + developer.enabled) -CONFIG_USAGE - ;; - *) - echo "Unknown config command: $sub" >&2 - exit 1 - ;; - esac -} diff --git a/lib/commands/gh.sh b/lib/commands/gh.sh deleted file mode 100644 index f326c5f..0000000 --- a/lib/commands/gh.sh +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# @description: Manage GitHub operations (PRs, issues, actions) if gh CLI is available. -# @intent: github, pr, issue, repo, remote - -# github.sh -# -# GitHub triage helper (GH CLI only): assigned issues, my PRs, PRs to review. -# Ensures authentication before each command execution. -# -# Requirements: -# - gh (GitHub CLI) -# -# Auth: -# - Preferred: GH_TOKEN or GITHUB_TOKEN (non-interactive) -# - Otherwise: interactive "gh auth login" when needed -# -# Usage: -# dev.kit github assigned-issues [--repo OWNER/REPO] [--state open|closed|all] [--limit N] [--json] -# dev.kit github my-prs [--repo OWNER/REPO] [--state open|closed|merged|all] [--limit N] [--json] -# dev.kit github review-prs [--repo OWNER/REPO] [--state open|closed|merged|all] [--limit N] [--json] [--include-drafts] -# dev.kit github pr-create --title "Title" --body "Body" [--base branch] [--head branch] [--draft] - -dev_kit_cmd_gh() { - LIMIT=30 - STATE="open" - REPO="" - JSON=0 - INCLUDE_DRAFTS=0 - COMMAND="" - - PR_TITLE="" - PR_BODY="" - PR_BASE="main" - PR_HEAD="" - PR_DRAFT="false" - - die() { echo "ERROR: $*" >&2; exit 1; } - - usage() { - cat < [options] - -Commands: - assigned-issues List issues assigned to you - my-prs List PRs authored by you - review-prs List PRs requesting your review - pr-create Create a new Pull Request - -Options: - --repo OWNER/REPO Restrict to one repository - --state STATE open|closed|merged|all (default: open) - --limit N Max results (default: 30) - --json JSON output (adds useful default fields) - --include-drafts (review-prs only) include draft PRs - -Options (pr-create): - --title TITLE PR title - --body BODY PR body - --base BRANCH Base branch (default: main) - --head BRANCH Head branch (default: current branch) - --draft Create as draft PR - - -h, --help Show this help - -Auth: - - Preferred: export GH_TOKEN=... (or GITHUB_TOKEN=...) - - Otherwise: gh auth login (interactive) - -EOF - } - - need_gh() { - command -v gh >/dev/null 2>&1 || die "gh not found. Install GitHub CLI: https://cli.github.com/" - } - - ensure_auth() { - if [[ -n "${GH_TOKEN:-}" || -n "${GITHUB_TOKEN:-}" ]]; then - return 0 - fi - - if gh auth status >/dev/null 2>&1; then - return 0 - fi - - echo "No GH_TOKEN/GITHUB_TOKEN and gh not authenticated. Running: gh auth login" >&2 - gh auth login 1>&2 - gh auth status >/dev/null 2>&1 || die "gh authentication failed" - } - - run_gh() { - ensure_auth - gh "$@" - } - - assigned_issues() { - case "$STATE" in open|closed|all) ;; *) die "assigned-issues: --state must be open|closed|all" ;; esac - - local args=(issue list --assignee @me --limit "$LIMIT" --state "$STATE") - [[ -n "$REPO" ]] && args+=(--repo "$REPO") - if [[ "$JSON" -eq 1 ]]; then - args+=(--json number,title,url,updatedAt,createdAt,state) - fi - run_gh "${args[@]}" - } - - my_prs() { - case "$STATE" in open|closed|merged|all) ;; *) die "my-prs: --state must be open|closed|merged|all" ;; esac - - local args=(pr list --author @me --limit "$LIMIT" --state "$STATE") - [[ -n "$REPO" ]] && args+=(--repo "$REPO") - if [[ "$JSON" -eq 1 ]]; then - args+=(--json number,title,url,updatedAt,createdAt,state,isDraft) - fi - run_gh "${args[@]}" - } - - review_prs() { - case "$STATE" in open|closed|merged|all) ;; *) die "review-prs: --state must be open|closed|merged|all" ;; esac - - local args=(pr list --search "review-requested:@me" --limit "$LIMIT" --state "$STATE") - [[ -n "$REPO" ]] && args+=(--repo "$REPO") - if [[ "$INCLUDE_DRAFTS" -eq 0 ]]; then - args+=(--draft=false) - fi - if [[ "$JSON" -eq 1 ]]; then - args+=(--json number,title,url,updatedAt,createdAt,state,isDraft) - fi - run_gh "${args[@]}" - } - - pr_create() { - [[ -n "$PR_TITLE" ]] || die "pr-create: --title is required" - [[ -n "$PR_BODY" ]] || die "pr-create: --body is required" - - if command -v dev_kit_github_pr_create >/dev/null 2>&1; then - ensure_auth - dev_kit_github_pr_create "$PR_TITLE" "$PR_BODY" "$PR_BASE" "$PR_HEAD" "$PR_DRAFT" - else - die "GitHub module logic not loaded." - fi - } - - while [[ $# -gt 0 ]]; do - case "$1" in - assigned-issues|my-prs|review-prs|pr-create) - COMMAND="$1"; shift;; - --repo) - REPO="${2:-}"; shift 2;; - --state) - STATE="${2:-}"; shift 2;; - --limit) - LIMIT="${2:-}"; shift 2;; - --json) - JSON=1; shift;; - --include-drafts) - INCLUDE_DRAFTS=1; shift;; - --title) - PR_TITLE="${2:-}"; shift 2;; - --body) - PR_BODY="${2:-}"; shift 2;; - --base) - PR_BASE="${2:-}"; shift 2;; - --head) - PR_HEAD="${2:-}"; shift 2;; - --draft) - PR_DRAFT="true"; shift;; - -h|--help) - usage; exit 0;; - *) - die "Unknown argument: $1 (use --help)";; - esac - done - - [[ -n "$COMMAND" ]] || { usage; exit 1; } - - need_gh - - case "$COMMAND" in - assigned-issues) assigned_issues ;; - my-prs) my_prs ;; - review-prs) review_prs ;; - pr-create) pr_create ;; - *) die "Unknown command: $COMMAND" ;; - esac -} diff --git a/lib/commands/skills.sh b/lib/commands/skills.sh deleted file mode 100644 index 8564a4f..0000000 --- a/lib/commands/skills.sh +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env bash - -# @description: Discover and execute repository-bound skills. -# @intent: skills, list, run, discover, execute -# @objective: Provide a unified interface for discovering and executing both deterministic CLI commands and managed AI skills grounded in the repository. -# @usage: dev.kit skills list -# @usage: dev.kit skills run [intent] -# @workflow: 1. Discover capabilities -> 2. Resolve intent -> 3. Normalize to deterministic command -> 4. Execute and report - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_skills() { - local sub="${1:-list}" - - # Resolve skills directory based on active provider - local provider - provider="$(config_value_scoped ai.provider "gemini")" - local skills_dir="$HOME/.$provider/skills" - - case "$sub" in - list) - print_section "dev.kit | Dynamic Capability Mesh" - - # 1. Deterministic Commands (Internal Logic) - echo "Deterministic Commands (Internal Logic):" - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -f "$file" ] || continue - local name; name="$(basename "${file%.sh}")" - # Hide internal/utility commands from the main logic list - case "$name" in agent|github|skills) continue ;; esac - - local desc; desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" - echo "- [command] $name: $desc" - done - - # List from lib/modules/ - for file in "$REPO_DIR"/lib/modules/*.sh; do - [ -f "$file" ] || continue - local name; name="$(basename "${file%.sh}")" - local desc; desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" - echo "- [module] $name: $desc" - done - echo "" - - # 2. AI Skills (Dynamic Reasoning) - echo "AI Skills (Dynamic Reasoning):" - # List from provider-specific managed path - if [ -d "$skills_dir" ]; then - find "$skills_dir" -mindepth 1 -maxdepth 1 -name "dev-kit-*" -type d | while read -r skill; do - local name; name="$(basename "$skill")" - local desc="(no description)" - if [ -f "$skill/SKILL.md" ]; then - desc="$(grep -i "^description:" "$skill/SKILL.md" | head -n 1 | sed 's/^description: //I')" - fi - echo "- [skill] $name: $desc" - done - fi - - # List from local repo workflows - local local_workflows="$REPO_DIR/docs/workflows" - if [ -d "$local_workflows" ]; then - find "$local_workflows" -maxdepth 1 -name "*.md" | while read -r skill_file; do - local filename; filename="$(basename "$skill_file")" - [ "$filename" = "README.md" ] && continue - [ "$filename" = "normalization.md" ] && continue - [ "$filename" = "loops.md" ] && continue - [ "$filename" = "mermaid-patterns.md" ] && continue - - local name="${filename%.md}" - # Skip showing if already listed in managed - [ -d "$skills_dir/dev-kit-$name" ] && continue - - local desc; desc="$(grep -i "^description:" "$skill_file" | head -n 1 | sed 's/^description: //I' || echo "Grounded workflow reasoning.")" - echo "- [skill] $name: $desc" - done - fi - echo "" - - # 3. Virtual Capabilities - echo "Virtual Capabilities (Environment):" - if command -v gh >/dev/null 2>&1; then echo "- [virtual] github (via gh CLI)"; fi - if command -v npm >/dev/null 2>&1; then echo "- [virtual] npm (via node runtime)"; fi - if command -v docker >/dev/null 2>&1; then echo "- [virtual] docker (via docker CLI)"; fi - if command -v gcloud >/dev/null 2>&1; then echo "- [virtual] google (via gcloud CLI)"; fi - echo "" - - return 0 - ;; - run|execute) - local skill_name="${2:-}" - local intent="${3:-}" - - if [ -z "$skill_name" ]; then - echo "Error: Skill name required. Usage: dev.kit skills run [intent]" >&2 - exit 1 - fi - - # Determine skill path or file - local skill_path="" - local skill_file="" - if [ -d "$skills_dir/$skill_name" ]; then - skill_path="$skills_dir/$skill_name" - elif [ -d "$skills_dir/dev-kit-$skill_name" ]; then - skill_path="$skills_dir/dev-kit-$skill_name" - elif [ -f "$REPO_DIR/docs/workflows/$skill_name.md" ]; then - skill_file="$REPO_DIR/docs/workflows/$skill_name.md" - elif [ -f "$REPO_DIR/docs/workflows/dev-kit-$skill_name.md" ]; then - skill_file="$REPO_DIR/docs/workflows/dev-kit-$skill_name.md" - fi - - # If skill path found, execute legacy script logic - if [ -n "$skill_path" ]; then - local script_exec="" - if [ -d "$skill_path/scripts" ]; then - if [ -f "$skill_path/scripts/$intent" ]; then - script_exec="$skill_path/scripts/$intent" - fi - fi - - if [ -n "$script_exec" ]; then - [ ! -x "$script_exec" ] && chmod +x "$script_exec" - export SKILL_PATH="$skill_path" - export SKILL_NAME="$skill_name" - shift 3 || true - "$script_exec" "$@" - exit $? - fi - fi - - # Dynamic Intent Normalization (The Modern Path) - if command -v dev_kit_context_normalize >/dev/null 2>&1; then - echo "--- dev.kit Intent Normalization ---" - echo "Input: $skill_name $intent" - - # Resolve intent to a structured manifest - local manifest - manifest="$(dev_kit_context_normalize "$skill_name $intent")" - - # Display the resolution for transparency - if command -v jq >/dev/null 2>&1; then - echo "Resolution:" - - # 1. Standard Commands/Workflows - echo "$manifest" | jq -r '.mappings.discovery[]? | " - [Detected] \(.name) (\(.type))"' - echo "$manifest" | jq -r '.mappings.internal_workflows[]? | " - [Workflow] \(.name) (\(.path))"' - fi - echo "------------------------------------" - echo "Status: Intent Resolved (Dynamic Discovery)" - exit 0 - else - echo "Error: Normalization mechanism not loaded." >&2 - exit 1 - fi - ;; - info) - local skill_name="${2:-}" - [ -z "$skill_name" ] && { echo "Error: Skill name required."; exit 1; } - - local skill_path="" - local skill_file="" - if [ -d "$skills_dir/$skill_name" ]; then - skill_path="$skills_dir/$skill_name" - elif [ -d "$skills_dir/dev-kit-$skill_name" ]; then - skill_path="$skills_dir/dev-kit-$skill_name" - elif [ -f "$REPO_DIR/docs/workflows/$skill_name.md" ]; then - skill_file="$REPO_DIR/docs/workflows/$skill_name.md" - elif [ -f "$REPO_DIR/docs/workflows/dev-kit-$skill_name.md" ]; then - skill_file="$REPO_DIR/docs/workflows/dev-kit-$skill_name.md" - fi - - if [ -n "$skill_path" ] && [ -f "$skill_path/SKILL.md" ]; then - cat "$skill_path/SKILL.md" - elif [ -n "$skill_file" ]; then - cat "$skill_file" - else - echo "Error: Skill info for '$skill_name' not found." >&2 - exit 1 - fi - ;; - help|-h|--help) - cat <<'SKILLS_HELP' -Usage: dev.kit skills - -Commands: - list List all available skills and their scripts - run [script] Execute a script from a skill - info Display skill documentation (SKILL.md) - -Examples: - dev.kit skills run diagram-generator new_diagram.sh "A -> B" - dev.kit skills execute git-sync -SKILLS_HELP - ;; - *) - echo "Unknown skills command: $sub" >&2 - exit 1 - ;; - esac -} diff --git a/lib/commands/status.sh b/lib/commands/status.sh deleted file mode 100644 index ab6b36f..0000000 --- a/lib/commands/status.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash - -# @description: Engineering brief and system diagnostic. -# @intent: status, check, health, info, diagnostic -# @objective: Provide a compact, high-signal overview of the current engineering environment, active tasks, and empowerment mesh. -# @usage: dev.kit status -# @usage: dev.kit status --audit -# @usage: dev.kit status --json - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_status() { - local json_output="false" - local deep_audit="false" - - for arg in "$@"; do - case "$arg" in - --json) json_output="true" ;; - --audit) deep_audit="true" ;; - esac - done - - if [ "$json_output" = "true" ]; then - dev_kit_health_audit_json - return - fi - - ui_header "Engineering Brief" - - # 1. Identity & Operating Mode - local ai_enabled; ai_enabled="$(config_value_scoped ai.enabled "false")" - local provider; provider="$(config_value_scoped ai.provider "gemini")" - - if [ "$ai_enabled" = "true" ]; then - ui_ok "Mode" "AI-Powered ($provider)" - else - ui_info "Mode" "Personal Helper (Local)" - fi - - # 2. Workspace & Context - local repo_root; repo_root="$(get_repo_root || true)" - if [ -n "$repo_root" ]; then - ui_ok "Workspace" "$(basename "$repo_root")" - - # Active Task Discovery - local active_workflow="" - if [ -d "$repo_root/tasks" ]; then - active_workflow="$(find "$repo_root/tasks" -name "workflow.md" -exec grep -l "status: planned\|status: active" {} + | head -n 1 || true)" - if [ -n "$active_workflow" ]; then - local task_id; task_id="$(basename "$(dirname "$active_workflow")")" - echo "" - printf "%sWaterfall Progression: [%s]%s\n" "$(ui_cyan)" "$task_id" "$(ui_reset)" - grep -A 2 "^### Step" "$active_workflow" | awk ' - /^### Step/ { step = $0; sub(/^### /, "", step); printf " %-20s", step; } - /^status:/ { - status = $2; - if (status == "completed" || status == "done") printf " \033[32mโœ”\033[0m\n"; - else if (status == "active" || status == "running") printf " \033[36mโ€บ\033[0m\n"; - else printf " \033[2mโ€ฆ\033[0m\n"; - } - ' - fi - fi - else - ui_warn "Workspace" "Not in a repository" - fi - - # 3. Empowerment Mesh (Summary) - echo "" - printf "%sEmpowerment Mesh Summary:%s\n" "$(ui_cyan)" "$(ui_reset)" - if command -v gh >/dev/null 2>&1; then ui_ok "GitHub" "CLI Active"; fi - if command -v dev_kit_context7_health >/dev/null 2>&1 && dev_kit_context7_health >/dev/null 2>&1; then - ui_ok "Knowledge" "Context7 Ready" - fi - - # 4. Deep Audit (Optional) - if [ "$deep_audit" = "true" ]; then - echo "" - ui_header "Engineering Compliance Audit" - if [ -n "$repo_root" ]; then - [ -d "$repo_root/tests" ] && ui_ok "TDD" "Test suite detected" || ui_warn "TDD" "No tests found" - [ -f "$repo_root/environment.yaml" ] && ui_ok "CaC" "environment.yaml active" || ui_warn "CaC" "Missing orchestrator" - [ -d "$repo_root/docs" ] && ui_ok "Docs" "Knowledge base found" || ui_warn "Docs" "No documentation" - fi - echo "" - echo "Software Detection:" - for sw in git docker npm gh; do - if command -v "$sw" >/dev/null 2>&1; then ui_ok "$sw" "$(command -v "$sw")"; else ui_warn "$sw" "Missing"; fi - done - fi - - # 5. Actionable Advice - echo "" - ui_tip "Run 'dev.kit suggest' for repository improvements." - ui_tip "Run 'dev.kit status --audit' for a full compliance check." - echo "" -} diff --git a/lib/commands/suggest.sh b/lib/commands/suggest.sh deleted file mode 100644 index 6b4621b..0000000 --- a/lib/commands/suggest.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -# @description: Suggest repository improvements and CDE compliance fixes. -# @intent: suggest, improve, cde, compliance, hint, tip -# @objective: Provide actionable advice to improve the repository's engineering experience and CDE standards. -# @usage: dev.kit suggest - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/modules/context_manager.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/modules/context_manager.sh" -fi - -dev_kit_cmd_suggest() { - if command -v ui_header >/dev/null 2>&1; then - ui_header "Engineering Suggestions" - else - echo "--- Engineering Suggestions ---" - fi - - local suggestions - suggestions="$(dev_kit_context_suggest_improvements "general repository check")" - - if [ "$suggestions" = "[]" ]; then - ui_ok "CDE Compliance" "No immediate improvements suggested." - return - fi - - echo "$suggestions" | jq -c '.[]' | while read -r sug; do - local type; type=$(echo "$sug" | jq -r '.type') - local msg; msg=$(echo "$sug" | jq -r '.message') - case "$type" in - doc) ui_info "Documentation" "$msg" ;; - config) ui_warn "Configuration" "$msg" ;; - ops) ui_info "Operations" "$msg" ;; - *) ui_tip "$msg" ;; - esac - done - - echo "" - ui_tip "Run 'dev.kit config detect' to check environment software." -} diff --git a/lib/commands/sync.sh b/lib/commands/sync.sh deleted file mode 100644 index eaf38ed..0000000 --- a/lib/commands/sync.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bash - -# @description: Resolve repository drift or prepare for new work. -# @intent: sync, commit, drift, atomic, push, resolve, prepare, branch -# @objective: Maintain high-fidelity repository state by either preparing the environment for work (branch management, origin sync) or resolving drift via logical, domain-specific commits. -# @usage: dev.kit sync prepare [main_branch] -# @usage: dev.kit sync run --task-id "TASK-123" --message "feat: implementation" -# @usage: dev.kit sync --dry-run -# @workflow: 1. (Prepare) Detect Branch -> 2. (Prepare) Sync Origin -> 3. (Run) Group Changes -> 4. (Run) Atomic Commits - -dev_kit_cmd_sync() { - local sub="${1:-run}" - - case "$sub" in - prepare) - local target_main="${2:-main}" - if command -v dev_kit_git_sync_prepare >/dev/null 2>&1; then - dev_kit_git_sync_prepare "$target_main" - else - echo "Error: Git sync module not loaded." >&2 - return 1 - fi - ;; - reminder) - if command -v ui_sync_reminder >/dev/null 2>&1; then - ui_sync_reminder - else - echo "Error: UI module not loaded." >&2 - return 1 - fi - ;; - run|execute|*) - # If first arg isn't a known subcommand, treat as 'run' and don't shift if it looks like an option - if [[ "$sub" == --* ]]; then - # It's an option, so we are in 'run' mode by default - sub="run" - else - shift 1 - fi - - local dry_run="false" - local task_id="unknown" - local message="" - local push="false" - - while [[ $# -gt 0 ]]; do - case "$1" in - --dry-run) dry_run="true"; shift ;; - --push) push="true"; shift ;; - --task-id) task_id="$2"; shift 2 ;; - --message) message="$2"; shift 2 ;; - -h|--help) - cat <<'SYNC_HELP' -Usage: dev.kit sync [options] - -Commands: - prepare [main] Prepare repository (fetch origin, merge, optional branch) - run Resolve drift via atomic commits (Default) - -Options (run): - --dry-run Show what commits would be made without executing them - --push Push changes to origin after committing - --task-id The current task ID to associate with commits - --message Optional base message prefix - -h, --help Show this help message - -Example: - dev.kit sync prepare main - dev.kit sync run --task-id "TASK-123" --push -SYNC_HELP - return 0 - ;; - *) echo "Unknown option: $1"; return 1 ;; - esac - done - - if command -v dev_kit_git_sync_run >/dev/null 2>&1; then - dev_kit_git_sync_run "$dry_run" "$task_id" "$message" "$push" - else - echo "Error: Git sync module not loaded." >&2 - return 1 - fi - ;; - - esac -} diff --git a/lib/commands/task.sh b/lib/commands/task.sh deleted file mode 100644 index f162ff4..0000000 --- a/lib/commands/task.sh +++ /dev/null @@ -1,254 +0,0 @@ -#!/bin/bash - -# @description: Manage the lifecycle of active workflows and sessions. -# @intent: task, session, workflow, start, reset -# @objective: Orchestrate the engineering lifecycle by initializing tasks, tracking context, and managing the 'tasks/' directory through discovery and cleanup. -# @usage: dev.kit task start "Implement new feature" -# @usage: dev.kit task list -# @usage: dev.kit task cleanup -# @workflow: 1. Start Task -> 2. Normalize Intent -> 3. Iterate (Implementation/Verification) -> 4. Finalize Sync -> 5. Cleanup Completed Tasks - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -print_task_usage() { - cat <<'TASK_USAGE' -Usage: dev.kit task [TASK_ID] - -Commands: - start "" Initialize a new task with a generated ID and request - list Briefly list all tasks and their status - cleanup Remove completed or stale tasks from the workspace - reset Clear repository-scoped session context - new Initialize a new task directory with templates - apply Apply task feedback to create/update a workflow -TASK_USAGE -} - -# Helper to check if a task is stale (not modified in 2 days) -_is_task_stale() { - local task_dir="$1" - # -mtime +1 matches anything not modified in >48 hours - [ -n "$(find "$task_dir" -mtime +1 -print -quit 2>/dev/null)" ] -} - -dev_kit_cmd_task() { - local sub="${1:-}" - - if [ -z "$sub" ] || [ "$sub" = "help" ] || [ "$sub" = "-h" ]; then - print_task_usage - exit 0 - fi - - local tasks_dir - tasks_dir="$(get_tasks_dir)" - - case "$sub" in - list) - [ ! -d "$tasks_dir" ] && { echo "No tasks found."; return 0; } - printf "\n%sActive & Recent Tasks:%s\n" "$(ui_cyan)" "$(ui_reset)" - find "$tasks_dir" -mindepth 1 -maxdepth 1 -type d | sort -r | while read -r task_dir; do - local task_id; task_id="$(basename "$task_dir")" - local status="initialized" - if [ -f "$task_dir/workflow.md" ]; then - status="$(grep "^status:" "$task_dir/workflow.md" | head -n 1 | awk '{print $2}')" - elif [ -f "$task_dir/plan.md" ]; then - status="$(grep "^status:" "$task_dir/plan.md" | head -n 1 | awk '{print $2}')" - fi - - local stale_marker="" - if _is_task_stale "$task_dir" && [[ "$status" != "done" && "$status" != "completed" ]]; then - stale_marker=" $(ui_orange)โš ๏ธ stale$(ui_reset)" - fi - - printf " %-20s %s%s\n" "$task_id" "$status" "$stale_marker" - done - echo "" - ;; - active) - [ ! -d "$tasks_dir" ] && { echo "No active tasks."; return 0; } - local count=0 - while read -r task_dir; do - local task_id; task_id="$(basename "$task_dir")" - local status="initialized" - if [ -f "$task_dir/workflow.md" ]; then - status="$(grep "^status:" "$task_dir/workflow.md" | head -n 1 | awk '{print $2}')" - elif [ -f "$task_dir/plan.md" ]; then - status="$(grep "^status:" "$task_dir/plan.md" | head -n 1 | awk '{print $2}')" - fi - - if [[ "$status" != "done" && "$status" != "completed" ]]; then - ((count++)) - local objective; objective="$(grep -A 2 "## Objective" "$task_dir/plan.md" 2>/dev/null | tail -n 1 | sed 's/^[[:space:]]*//' || echo "No objective defined.")" - local stale_marker="" - if _is_task_stale "$task_dir"; then stale_marker=" [STALE]"; fi - echo "- [$task_id] status: $status$stale_marker" - echo " objective: $objective" - fi - done < <(find "$tasks_dir" -mindepth 1 -maxdepth 1 -type d | sort -r) - - if [ "$count" -eq 0 ]; then - echo "No active tasks." - fi - ;; - reminder) - [ ! -d "$tasks_dir" ] && return 0 - local stale_count=0 - while read -r task_dir; do - local status="" - if [ -f "$task_dir/workflow.md" ]; then - status="$(grep "^status:" "$task_dir/workflow.md" | head -n 1 | awk '{print $2}')" - elif [ -f "$task_dir/plan.md" ]; then - status="$(grep "^status:" "$task_dir/plan.md" | head -n 1 | awk '{print $2}')" - fi - - if [[ "$status" != "done" && "$status" != "completed" ]] && _is_task_stale "$task_dir"; then - ((stale_count++)) - fi - done < <(find "$tasks_dir" -mindepth 1 -maxdepth 1 -type d) - - if [ "$stale_count" -gt 0 ]; then - ui_tip "You have $stale_count stale tasks (older than 2 days). Run 'dev.kit task cleanup' to clear them." - fi - ;; - cleanup) - [ ! -d "$tasks_dir" ] && return 0 - echo "Scanning for tasks to cleanup..." - local to_remove=() - while read -r task_dir; do - local task_id; task_id="$(basename "$task_dir")" - local status="" - if [ -f "$task_dir/workflow.md" ]; then - status="$(grep "^status:" "$task_dir/workflow.md" | head -n 1 | awk '{print $2}')" - elif [ -f "$task_dir/plan.md" ]; then - status="$(grep "^status:" "$task_dir/plan.md" | head -n 1 | awk '{print $2}')" - fi - - if [[ "$status" == "done" || "$status" == "completed" ]] || _is_task_stale "$task_dir"; then - to_remove+=("$task_dir") - fi - done < <(find "$tasks_dir" -mindepth 1 -maxdepth 1 -type d) - - if [ ${#to_remove[@]} -eq 0 ]; then - echo "Nothing to cleanup." - return 0 - fi - - echo "Tasks identified for removal (completed or stale):" - for tr in "${to_remove[@]}"; do - echo " - $(basename "$tr")" - done - - printf "Remove these %d tasks? (y/N): " "${#to_remove[@]}" - read -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - for tr in "${to_remove[@]}"; do - rm -rf "$tr" - done - echo "Cleanup complete." - else - echo "Cleanup aborted." - fi - ;; - reset) - if context_enabled; then - local ctx - ctx="$(context_file || true)" - if [ -f "$ctx" ]; then - : > "$ctx" - echo "Session context cleared." - fi - fi - ;; - start) - local request="${2:-}" - if [ -z "$request" ] && [ ! -t 0 ]; then - request="$(cat)" - fi - if [ -z "$request" ]; then - echo "Error: Request is required for 'task start'" >&2 - exit 1 - fi - - local task_id - task_id="TASK-$(date +%Y%m%d-%H%M)" - local task_dir="$tasks_dir/$task_id" - - mkdir -p "$task_dir" - cat > "$task_dir/plan.md" < "$task_dir/feedback.md" < "$task_dir/prompt.md" < - -## Request - -EOF - cat > "$task_dir/feedback.md" < "$workflow_file" <> "$workflow_file" - fi - echo "Workflow ready: $workflow_file" - ;; - *) - echo "Unknown task command: $sub" >&2 - exit 1 - ;; - esac -} diff --git a/lib/commands/test.sh b/lib/commands/test.sh deleted file mode 100644 index 0480e02..0000000 --- a/lib/commands/test.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -# @description: Run the repository's test suite to verify health and grounding. -# @intent: test, check, verify, suite, worker -# @objective: Validate the integrity of the dev.kit engine, its grounding in the repository, and ensure environment parity via worker containers. -# @usage: dev.kit test [--worker] - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_test() { - local runner="${REPO_DIR}/tests/run.sh" - - if [ ! -f "$runner" ]; then - echo "Error: Test runner not found at $runner" >&2 - return 1 - fi - - # Pass all arguments directly to the runner script - bash "$runner" "$@" -} diff --git a/lib/commands/visualizer.sh b/lib/commands/visualizer.sh deleted file mode 100644 index 2040d89..0000000 --- a/lib/commands/visualizer.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# @description: Generate and export high-fidelity Mermaid diagrams (SVG). -# @intent: diagram, mermaid, svg, export, flowchart, sequence -# @objective: Enable seamless transition from "Intent" to "Visual Asset" by automating both the creation of Mermaid (.mmd) diagrams and their rendering into SVG documentation. -# @usage: dev.kit visualizer create flowchart "assets/arch.mmd" -# @usage: dev.kit visualizer export "assets/arch.mmd" -# @workflow: 1. Request diagram type -> 2. Generate .mmd -> 3. Refine logic -> 4. Export .svg - -dev_kit_cmd_visualizer() { - local sub="${1:-help}" - - case "$sub" in - create|new) - local type="${2:-flowchart}" - local output="${3:-assets/diagrams/new-diagram.mmd}" - if command -v dev_kit_visualizer_create >/dev/null 2>&1; then - dev_kit_visualizer_create "$type" "$output" - else - echo "Error: Visualizer module not loaded." >&2 - exit 1 - fi - ;; - export|render) - local input="${2:-}" - local output="${3:-}" - if [ -z "$input" ]; then - echo "Error: Input file required. Usage: dev.kit visualizer export [output.svg]" >&2 - exit 64 - fi - if command -v dev_kit_visualizer_export >/dev/null 2>&1; then - dev_kit_visualizer_export "$input" "$output" - else - echo "Error: Visualizer module not loaded." >&2 - exit 1 - fi - ;; - help|-h|--help) - cat <<'VISUALIZER_HELP' -Usage: dev.kit visualizer - -Commands: - create [output] Create a new Mermaid diagram from template - export [output] Export a Mermaid (.mmd) file to SVG - -Diagram Types: - flowchart, sequence, state, er (auto defaults to flowchart) - -Example: - dev.kit visualizer create flowchart assets/arch.mmd - dev.kit visualizer export assets/arch.mmd assets/arch.svg -VISUALIZER_HELP - ;; - *) - echo "Unknown visualizer command: $sub" >&2 - exit 1 - ;; - esac -} diff --git a/lib/modules/agent_manager.sh b/lib/modules/agent_manager.sh deleted file mode 100644 index dd309ed..0000000 --- a/lib/modules/agent_manager.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env bash - -# @description: Orchestrate the rendering and deployment of AI provider artifacts. -# @intent: agent, llm, provider, model, configure -# @objective: Dynamic normalization and deployment of agent skills and configuration. - -dev_kit_agent_manifest() { - echo "$REPO_DIR/src/ai/integrations/manifest.json" -} - -dev_kit_agent_expand_path() { - local val="$1" - val="${val//\{\{HOME\}\}/$HOME}" - val="${val//\{\{DEV_KIT_HOME\}\}/$DEV_KIT_HOME}" - val="${val//\{\{DEV_KIT_STATE\}\}/$DEV_KIT_STATE}" - echo "$val" -} - -dev_kit_agent_render_artifact() { - local type="$1" - local src_tmpl="$2" - local dst_path="$3" - local base_rendered="$4" - - case "$type" in - template) - # Dynamic gathering from Docs & Lib - local agent_skills="" - local available_tools="" - local memories="" - - # Gather Workflows - for skill_file in "$REPO_DIR"/docs/workflows/*.md; do - [ -f "$skill_file" ] || continue - local filename; filename="$(basename "$skill_file")" - [[ "$filename" =~ ^(README|normalization|loops|mermaid-patterns)\.md$ ]] && continue - - local name="${filename%.md}" - local desc; desc="$(grep -i "^description:" "$skill_file" 2>/dev/null | head -n 1 | sed 's/^description: //I' || echo "Grounded workflow reasoning.")" - agent_skills+="- **$name**: $desc\n" - done - - # Gather Commands - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -f "$file" ] || continue - local key desc - key="$(basename "${file%.sh}")" - case "$key" in agent|github|skills|test|suggest) continue ;; esac - desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" - available_tools+="- **dev.kit $key**: $desc\n" - done - - # Gather Memories - if [[ "$src_tmpl" == *"GEMINI"* ]]; then - if [ -f "$HOME/.gemini/GEMINI.md" ]; then - memories="$(grep -A 100 "Gemini Added Memories" "$HOME/.gemini/GEMINI.md" | tail -n +2 || true)" - fi - [ -z "$memories" ] && memories="- (none)" - fi - - export DEV_KIT_RENDER_DATE="$(date +%Y-%m-%d)" - export DEV_KIT_RENDER_HOME="$HOME" - export DEV_KIT_RENDER_DEV_KIT_HOME="$DEV_KIT_HOME" - export DEV_KIT_RENDER_DEV_KIT_SOURCE="$REPO_DIR" - export DEV_KIT_RENDER_DEV_KIT_STATE="$DEV_KIT_STATE" - export DEV_KIT_RENDER_SKILLS="$agent_skills" - export DEV_KIT_RENDER_TOOLS="$available_tools" - export DEV_KIT_RENDER_MEMORIES="$memories" - - perl -pe ' - s/\{\{DATE\}\}/$ENV{DEV_KIT_RENDER_DATE}/g; - s/\{\{HOME\}\}/$ENV{DEV_KIT_RENDER_HOME}/g; - s/\{\{DEV_KIT_HOME\}\}/$ENV{DEV_KIT_RENDER_DEV_KIT_HOME}/g; - s/\{\{DEV_KIT_SOURCE\}\}/$ENV{DEV_KIT_RENDER_DEV_KIT_SOURCE}/g; - s/\{\{DEV_KIT_STATE\}\}/$ENV{DEV_KIT_RENDER_DEV_KIT_STATE}/g; - s/\$\{AgentSkills\}/$ENV{DEV_KIT_RENDER_SKILLS}/g; - s/\$\{AvailableTools\}/$ENV{DEV_KIT_RENDER_TOOLS}/g; - s/\{\{MEMORIES\}\}/$ENV{DEV_KIT_RENDER_MEMORIES}/g; - ' "$src_tmpl" > "$dst_path" - ;; - *) - cp "$src_tmpl" "$dst_path" - ;; - esac -} - -dev_kit_agent_apply_integration() { - local key="$1" - local mode="$2" - local manifest; manifest="$(dev_kit_agent_manifest)" - - [ ! -f "$manifest" ] && { echo "Error: Manifest not found." >&2; return 1; } - - local integration_json; integration_json="$(jq -r ".integrations[] | select(.key == \"$key\")" "$manifest")" - [ -z "$integration_json" ] && { echo "Error: Integration '$key' not found." >&2; return 1; } - - local target_dir; target_dir="$(dev_kit_agent_expand_path "$(echo "$integration_json" | jq -r '.target_dir')")" - local templates_dir="$REPO_DIR/$(echo "$integration_json" | jq -r '.templates_dir')" - local skills_dst_dir="$target_dir/skills" - - local rendered; rendered="$(mktemp -d)" - local artifacts_count; artifacts_count="$(echo "$integration_json" | jq '.artifacts | length')" - - for ((i=0; i Global -> Environment -> Default) -# Usage: config_value_scoped [default] -config_value_scoped() { - local key="$1" - local default="${2:-}" - local val="" - - # 1. Check local repo .env (Priority 1) - local local_path - local_path="$(get_repo_state_dir || true)/config.env" - if [ -f "$local_path" ]; then - val="$(config_get_value "$local_path" "$key" "")" - fi - - # 2. Check global .env (Priority 2) - if [ -z "$val" ]; then - val="$(config_get_value "$CONFIG_FILE" "$key" "")" - fi - - # 3. Check YAML Orchestrator (Priority 3 / Defaults) - if [ -z "$val" ] && [ -f "${ENVIRONMENT_YAML:-}" ]; then - local yaml_key="$key" - # Map dots to nested structure if needed (e.g. system.quiet) - case "$key" in - quiet|developer|state_path) yaml_key="system.$key" ;; - *) yaml_key="$key" ;; - esac - val="$(dev_kit_yaml_value "$ENVIRONMENT_YAML" "$yaml_key" "")" - fi - - if [ -n "$val" ]; then - echo "$val" - else - echo "$default" - fi -} - -# Raw configuration value extractor -# Usage: config_get_value [default] -config_get_value() { - local file="$1" - local key="$2" - local default="${3:-}" - local val="" - if [ -f "$file" ]; then - val="$(awk -F= -v k="$key" ' - $1 ~ "^[[:space:]]*"k"[[:space:]]*$" { - sub(/^[[:space:]]*/,"",$2); - sub(/[[:space:]]*$/,"",$2); - print $2; - exit - } - ' "$file")" - fi - if [ -n "$val" ]; then - echo "$val" - else - echo "$default" - fi -} - -# Update a configuration value in a specific file -# Usage: config_set_value -config_set_value() { - local key="$1" - local value="$2" - local path="$3" - local tmp - tmp="$(mktemp)" - if [ -f "$path" ]; then - awk -v k="$key" -v v="$value" ' - BEGIN { found=0 } - { - if ($0 ~ "^[[:space:]]*"k"[[:space:]]*=") { - found=1 - print k" = "v - next - } - print - } - END { - if (!found) { - print k" = "v - } - } - ' "$path" > "$tmp" - else - printf "%s = %s\n" "$key" "$value" > "$tmp" - fi - mkdir -p "$(dirname "$path")" - mv "$tmp" "$path" -} diff --git a/lib/modules/context7.sh b/lib/modules/context7.sh deleted file mode 100644 index 7a1e26e..0000000 --- a/lib/modules/context7.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env bash - -# @description: High-fidelity search and resolution for the "Skill Mesh" (Multi-repo context). -# @intent: context7, knowledge, search, resolution, mesh -# @objective: Bridge disparate repository context into a unified engineering mesh via structured API and CLI discovery. - -# Check if Context7 integration is available (API key or CLI) -dev_kit_context7_health() { - # 1. Check for API Key (Priority 1) - local api_key - api_key="$(config_value_scoped context7.api_key "${CONTEXT7_API_KEY:-}")" - if [ -n "$api_key" ]; then - return 0 - fi - - # 2. Check for CLI - if command -v context7 >/dev/null 2>&1; then - return 0 - fi - - # 3. Suggest installation if npm is present - if command -v npm >/dev/null 2>&1; then - # We return 2 to indicate "Available to install" - return 2 - fi - - return 1 # Not available -} - -# Synchronize a repository with the Context7 hub -# Usage: dev_kit_context7_sync [repo_path] -dev_kit_context7_sync() { - local repo_path="${1:-$REPO_DIR}" - - # 1. Check health first - if ! dev_kit_context7_health; then - echo "Error: Context7 not ready (API key or CLI missing)." >&2 - return 1 - fi - - # 2. Prefer CLI for sync if available - if command -v context7 >/dev/null 2>&1; then - echo "Synchronizing $repo_path with Context7 CLI..." >&2 - (cd "$repo_path" && context7 sync) - return $? - fi - - # 3. Fallback to API-based sync notification (if implemented in API) - local api_key; api_key="$(config_value_scoped context7.api_key "${CONTEXT7_API_KEY:-}")" - if [ -n "$api_key" ]; then - echo "Sending sync signal to Context7 API for $repo_path..." >&2 - # Placeholder for API-based sync trigger - return 0 - fi - - return 1 -} - -# Search for libraries and engineering context using Context7 -# Usage: dev_kit_context7_search "react" "how to use hooks" -dev_kit_context7_search() { - local lib_name="$1" - local query="${2:-$1}" - local results=() - - local api_key - api_key="$(config_value_scoped context7.api_key "${CONTEXT7_API_KEY:-}")" - - # Case A: Use API (v2) via curl - if [ -n "$api_key" ]; then - local encoded_lib encoded_query - encoded_lib="$(printf "%s" "$lib_name" | jq -sRr @uri)" - encoded_query="$(printf "%s" "$query" | jq -sRr @uri)" - - local response - response="$(curl -s -X GET "https://context7.com/api/v2/libs/search?libraryName=$encoded_lib&query=$encoded_query" \ - -H "Authorization: Bearer $api_key" \ - -H "Content-Type: application/json")" - - if [ -n "$response" ] && [ "$response" != "null" ]; then - while IFS= read -r match; do - [ -n "$match" ] && results+=("$match") - done < <(echo "$response" | jq -c '.[] | {name: .id, type: "external-library", score: .trustScore, uri: "https://context7.com/libs\(.id)"}') - fi - - # Case B: Use CLI (Fallback) - elif command -v context7 >/dev/null 2>&1; then - # Assuming standard 'context7 search' output format - local cli_out - cli_out="$(context7 search "$query" --json 2>/dev/null || true)" - if [ -n "$cli_out" ]; then - while IFS= read -r match; do - [ -n "$match" ] && results+=("$match") - done < <(echo "$cli_out" | jq -c '.[] | {name: .id, type: "external-library", uri: .url}') - fi - fi - - # Case C: Local Peer Repositories (Heuristic fallback) - local parent_dir - parent_dir="$(dirname "$REPO_DIR")" - if [ -d "$parent_dir" ]; then - for peer in "$parent_dir"/*; do - [ -d "$peer" ] || continue - [ "$peer" == "$REPO_DIR" ] && continue - if [ -f "$peer/context.yaml" ] || [ -f "$peer/README.md" ]; then - if grep -qi "$lib_name" "$peer/README.md" 2>/dev/null; then - results+=("{\"name\": \"$(basename "$peer")\", \"type\": \"peer-repo\", \"path\": \"$peer\"}") - fi - fi - done - fi - - (IFS=,; echo "${results[*]}") -} - -# Prompt user to install Context7 CLI if missing -dev_kit_context7_install_hint() { - dev_kit_context7_health - local status=$? - if [ $status -eq 2 ]; then - echo "Hint: Context7 CLI is available. Install it for better library discovery:" >&2 - echo " npm install -g @upstash/context7" >&2 - fi -} diff --git a/lib/modules/context_manager.sh b/lib/modules/context_manager.sh deleted file mode 100644 index bd5c6c3..0000000 --- a/lib/modules/context_manager.sh +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env bash - -# dev.kit Context Manager -# Orchestrates intent normalization, context hydration, and multi-repo resolution. - -# Normalize user intent into a structured execution plan (workflow.md) -# Usage: dev_kit_context_normalize "please adjust infra config" [context_file] -dev_kit_context_normalize() { - local intent="$1" - local output_context="${2:-}" - - # 1. Discover relevant skills and sources - local context_data - context_data="$(dev_kit_context_resolve "$intent")" - - # 2. Add CDE Improvement Suggestions - local suggestions - suggestions="$(dev_kit_context_suggest_improvements "$intent")" - - # 3. Combine and return a typed context manifest - local full_context - full_context=$(echo "$context_data" | jq --argjson sug "$suggestions" '. + {suggestions: $sug}') - - if [ -n "$output_context" ]; then - echo "$full_context" > "$output_context" - fi - - echo "$full_context" -} - -# Suggest improvements based on intent and repository state -dev_kit_context_suggest_improvements() { - local intent="$1" - local suggestions=() - - # Heuristic: Check for missing documentation - if [[ "$intent" == *"new feature"* ]] || [[ "$intent" == *"implement"* ]]; then - suggestions+=("{\"type\": \"doc\", \"message\": \"Ensure a corresponding MD file is created in docs/features/\"}") - fi - - # Heuristic: Check for CDE compliance - if [ ! -f "$REPO_DIR/.udx/dev.kit/config.env" ]; then - suggestions+=("{\"type\": \"config\", \"message\": \"Local .udx config missing. Run 'dev.kit config reset --scope repo' to initialize.\"}") - fi - - # Heuristic: CI/CD check - if [ ! -d "$REPO_DIR/.github/workflows" ]; then - suggestions+=("{\"type\": \"ops\", \"message\": \"GitHub Workflows missing. Consider adding context7-ops.yml for better automation.\"}") - fi - - if [ ${#suggestions[@]} -eq 0 ]; then - echo "[]" - else - (IFS=,; echo "[${suggestions[*]}]") - fi -} - -# Search for capabilities via Dynamic Discovery Engine -dev_kit_context_search_discovery() { - local query="$1" - local matches=() - - # 1. Internal Commands (Scan lib/commands/*.sh) - # Look for # @intent: ... headers - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -f "$file" ] || continue - local name - name="$(basename "${file%.sh}")" - local intents - intents="$(grep "^# @intent:" "$file" | cut -d: -f2- | tr ',' ' ')" - - # Check if name or intent matches query - if [[ "$name" == *"$query"* ]] || echo "$intents" | grep -qi "$query"; then - matches+=("{\"name\": \"$name\", \"type\": \"command\", \"priority\": \"high\"}") - fi - done - - # 2. Virtual Skills (Environment Probe) - # Dynamically register skills based on available CLI tools - if command -v gh >/dev/null 2>&1; then - if [[ "github pr issue repo" =~ $query ]]; then - matches+=("{\"name\": \"github\", \"type\": \"virtual-skill\", \"tool\": \"gh\", \"priority\": \"medium\"}") - fi - fi - if command -v npm >/dev/null 2>&1; then - if [[ "npm package node module" =~ $query ]]; then - matches+=("{\"name\": \"npm\", \"type\": \"virtual-skill\", \"tool\": \"npm\", \"priority\": \"medium\"}") - fi - fi - if command -v docker >/dev/null 2>&1; then - if [[ "docker container image" =~ $query ]]; then - matches+=("{\"name\": \"docker\", \"type\": \"virtual-skill\", \"tool\": \"docker\", \"priority\": \"medium\"}") - fi - fi - - (IFS=,; echo "${matches[*]}") -} - -# Resolve context and dependencies across the "Skill Mesh" -dev_kit_context_resolve() { - local intent="$1" - - # Category 1: Dynamic Command & Virtual Skill Discovery - local discovery - discovery="$(dev_kit_context_search_discovery "$intent")" - - # Category 2: Internal Workflows (Markdown-based engineering loops) - local internal_workflows - internal_workflows="$(dev_kit_context_search_workflows "$intent")" - - # Category 3: Internal Scripts & Skill Packs (Deterministic logic) - local internal_skills - internal_skills="$(dev_kit_context_search_local "$intent")" - - # Category 4: External References (References to outside repos/skills) - local external_refs - external_refs="$(dev_kit_context_search_remote "$intent")" - - # Combine and return a typed context manifest - cat </dev/null; then - matches+=("{\"name\": \"$name\", \"path\": \"$file\", \"type\": \"workflow\"}") - fi - done < <(find "$dir" -name "*.md") - done - - (IFS=,; echo "${matches[*]}") -} - -# Search local skill-packs and deterministic scripts -dev_kit_context_search_local() { - local query="$1" - local matches=() - - local skill_dir="$REPO_DIR/docs/skills" - if [ -d "$skill_dir" ]; then - for skill in "$skill_dir"/*; do - [ -d "$skill" ] || continue - local name - name="$(basename "$skill")" - - # 1. Exact name match (Highest priority) - if [[ "$name" == "$query" ]] || [[ "dev-kit-$name" == "$query" ]]; then - matches+=("{\"name\": \"$name\", \"type\": \"skill\", \"priority\": \"high\"}") - continue - fi - - # 2. Keyword/Metadata match in SKILL.md - if [ -f "$skill/SKILL.md" ] && grep -qiE "$query|keywords:.*$query" "$skill/SKILL.md" 2>/dev/null; then - matches+=("{\"name\": \"$name\", \"type\": \"skill\", \"priority\": \"medium\"}") - fi - done - fi - - (IFS=,; echo "${matches[*]}") -} - -# Search remote sources (GitHub, Context7 API) -dev_kit_context_search_remote() { - local query="$1" - - # This will be implemented in lib/modules/context7.sh - if command -v dev_kit_context7_search >/dev/null 2>&1; then - dev_kit_context7_search "$query" - else - echo "" - fi -} diff --git a/lib/modules/git_sync.sh b/lib/modules/git_sync.sh deleted file mode 100644 index 35cb7bd..0000000 --- a/lib/modules/git_sync.sh +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env bash - -# @description: Core logic for logical, atomic repository synchronization and drift resolution. -# @intent: sync, commit, drift, atomic, push, resolve, prepare, branch -# @objective: Maintain high-fidelity repository state by grouping changes into logical domains and resolving intent divergence. - -# Prepare the repository for work (Pre-flight checks) -# Usage: dev_kit_git_sync_prepare [target_branch] -dev_kit_git_sync_prepare() { - local target_main="${1:-main}" - - echo "--- dev.kit Git Sync: Pre-work Preparation ---" - - # 1. Detect current branch - local current_branch - current_branch=$(git branch --show-current) - echo "โœ” Current branch: $current_branch" - - # 2. Check for origin updates - echo "Checking origin/$target_main for updates..." - git fetch origin "$target_main" --quiet - - local behind - behind=$(git rev-list HEAD..origin/"$target_main" --count) - if [ "$behind" -gt 0 ]; then - echo "โš  Your branch is behind origin/$target_main by $behind commits." - printf "Would you like to merge origin/$target_main into $current_branch? (y/N): " - read -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - if git merge origin/"$target_main"; then - echo "โœ” Merged latest $target_main into $current_branch." - else - echo "โŒ Merge conflict detected. Please resolve manually." - return 1 - fi - fi - else - echo "โœ” Your branch is up-to-date with origin/$target_main." - fi - - # 3. Ask if new branch is needed - printf "Would you like to create a new branch for this work? (y/N): " - read -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - printf "Enter new branch name: " - read -r new_branch - if [ -n "$new_branch" ]; then - if git checkout -b "$new_branch"; then - echo "โœ” Switched to new branch: $new_branch" - else - echo "โŒ Failed to create branch $new_branch." - return 1 - fi - fi - fi - - echo "--- Preparation Complete ---" - return 0 -} - -# Process a group of files matching a pattern and commit them -# Usage: dev_kit_git_sync_process_group [dry_run] [base_msg] -dev_kit_git_sync_process_group() { - local group_name="$1" - local pattern="$2" - local task_id="${3:-unknown}" - local dry_run="${4:-false}" - local base_msg="${5:-}" - - local drift_file=".drift.tmp" - local processed_file=".processed.tmp" - - [ -f "$drift_file" ] || { echo "Error: Drift file missing." >&2; return 1; } - touch "$processed_file" - - local files=() - while IFS= read -r f; do - [ -z "$f" ] && continue - if ! grep -Fqx "$f" "$processed_file" && echo "$f" | grep -Eq "$pattern"; then - files+=("$f") - fi - done < "$drift_file" - - if [ ${#files[@]} -eq 0 ]; then - return 0 - fi - - local commit_msg="${group_name}: resolve drift for $task_id" - [ -n "$base_msg" ] && commit_msg="${group_name}: $base_msg ($task_id)" - - echo "Step: Grouping [$group_name] -> ${#files[@]} files" - for f in "${files[@]}"; do - echo " + $f" - echo "$f" >> "$processed_file" - done - - if [ "$dry_run" = "true" ]; then - echo " [DRY-RUN] git add ${files[*]}" - echo " [DRY-RUN] git commit -m \"$commit_msg\"" - else - if git add "${files[@]}" && git commit -m "$commit_msg"; then - echo " [OK] Committed group: $group_name" - else - echo "โš ๏ธ [FAILOVER] Commit failed for group: $group_name" - return 1 - fi - fi - echo "" -} - -# Run the full git sync workflow -# Usage: dev_kit_git_sync_run [dry_run] [task_id] [message] [push_flag] -dev_kit_git_sync_run() { - local dry_run="${1:-false}" - local task_id="${2:-unknown}" - local message="${3:-}" - local push_flag="${4:-false}" - - # 0. Pre-sync Verification (Run Tests) - if [ "$dry_run" = "false" ]; then - echo "--- Step 0: Pre-sync Verification ---" - local has_tests="false" - local repo_root; repo_root="$(get_repo_root || true)" - if [ -n "$repo_root" ]; then - [ -d "$repo_root/tests" ] || [ -d "$repo_root/test" ] || [ -d "$repo_root/spec" ] && has_tests="true" - fi - - if [ "$has_tests" = "true" ]; then - echo "Tests detected. Running high-fidelity verification..." - if dev.kit test; then - echo "โœ” Verification successful. Proceeding with sync." - else - echo "โŒ Verification failed. Please resolve test failures before syncing." - if ! confirm_action "Tests failed. Force sync anyway?"; then - return 1 - fi - fi - else - echo "No tests detected. Skipping verification step." - fi - echo "" - fi - - # Resolve target main branch - local target_main="main" - if ! git rev-parse --verify origin/main >/dev/null 2>&1; then - if git rev-parse --verify origin/master >/dev/null 2>&1; then - target_main="master" - fi - fi - - echo "--- dev.kit Git Sync: Starting Workflow ---" - - # Detect drift - local staged unstaged untracked - staged=$(git diff --name-only --cached) - unstaged=$(git diff --name-only) - untracked=$(git ls-files --others --exclude-standard) - echo "$staged $unstaged $untracked" | tr ' ' '\n' | sort -u > .drift.tmp - : > .processed.tmp - - # Define groups (Loaded from config for Easy Management) - local groups_raw; groups_raw=$(config_value_scoped git_sync_groups "docs:^docs/|^README.md,ai:^src/ai/|^.gemini/|^src/mappings/,cli:^bin/|^lib/|^src/cli/,core:^src/|^environment.yaml|^context7.json") - - local -a groups=() - IFS=',' read -r -a groups_arr <<< "$groups_raw" - for g in "${groups_arr[@]}"; do - groups+=("$g") - done - - for group in "${groups[@]}"; do - local id; id=$(echo "$group" | cut -d: -f1) - local pattern; pattern=$(echo "$group" | cut -d: -f2-) - echo "--- Step: Group $id ---" - dev_kit_git_sync_process_group "$id" "$pattern" "$task_id" "$dry_run" "$message" - done - - # Handle remaining drift - local remaining=() - while IFS= read -r f; do - [ -z "$f" ] && continue - if ! grep -Fqx "$f" .processed.tmp; then - remaining+=("$f") - fi - done < .drift.tmp - - if [ ${#remaining[@]} -gt 0 ]; then - echo "--- Step: Miscellaneous Drift ---" - local commit_msg="misc: resolve remaining drift ($task_id)" - if [ "$dry_run" = "true" ]; then - echo " [DRY-RUN] git add ${remaining[*]}" - echo " [DRY-RUN] git commit -m \"$commit_msg\"" - else - git add "${remaining[@]}" - git commit -m "$commit_msg" - echo " [OK] Committed remaining drift." - fi - fi - - rm -f .drift.tmp .processed.tmp - echo "--- Git Sync Workflow Complete ---" - - # 5. Push and PR Management - if [ "$dry_run" = "false" ]; then - local current_branch; current_branch=$(git branch --show-current) - local remote; remote=$(git config --get "branch.${current_branch}.remote" || echo "origin") - - # Check if we should push - if [ "$push_flag" = "true" ] || confirm_action "Synchronization complete. Push changes to $remote/$current_branch?"; then - echo "Pushing changes to $remote $current_branch..." - if git push "$remote" "$current_branch"; then - echo "โœ” Pushed successfully." - - # Proactive PR Suggestion - if command -v dev_kit_github_health >/dev/null 2>&1 && dev_kit_github_health >/dev/null 2>&1; then - # Don't suggest PR for the default main branch - if [[ "$current_branch" != "main" && "$current_branch" != "master" ]]; then - echo "" - if confirm_action "Would you like to synchronize a Pull Request for $current_branch?"; then - local pr_title="feat: resolve $task_id" - [ -n "$message" ] && pr_title="$message" - - # Generate a brief summary from the git diff - local diff_summary="" - if git rev-parse --verify "$remote/$target_main" >/dev/null 2>&1; then - diff_summary=$(git diff "$remote/$target_main"...HEAD --stat | head -n 20) - else - diff_summary="Changes since common ancestor could not be calculated ($remote/$target_main missing)." - fi - - local pr_body="### ๐Ÿš€ Drift Resolution: $task_id\n\n$message\n\n#### ๐Ÿ“Š Change Summary\n\`\`\`text\n$diff_summary\n\`\`\`\n\nAutomated via \`dev.kit sync\`." - - if dev_kit_github_pr_create "$pr_title" "$pr_body" "$target_main"; then - echo "โœ” Pull Request synchronized successfully." - else - echo "โŒ Failed to synchronize Pull Request." - fi - fi - fi - fi - else - echo "โŒ Push failed. Please check your remote configuration or permissions." - fi - fi - fi -} - diff --git a/lib/modules/github.sh b/lib/modules/github.sh deleted file mode 100644 index ca99668..0000000 --- a/lib/modules/github.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bash - -# @description: Provides high-fidelity integration with GitHub CLI (gh) for remote context. -# @intent: github, pr, issue, remote, discovery -# @objective: Empower agents and humans to interact with the broader engineering ecosystem via authenticated remote discovery and collaboration. - -# Check if GitHub CLI is available and optionally if a token is set -dev_kit_github_health() { - if ! command -v gh >/dev/null 2>&1; then - return 1 # CLI missing - fi - - # Check for token or active login - if [ -z "${GITHUB_TOKEN:-}" ] && [ -z "${GH_TOKEN:-}" ]; then - if ! gh auth status >/dev/null 2>&1; then - return 2 # Not authenticated - fi - fi - - return 0 # Healthy -} - -# Search for repositories by name/keyword within the UDX or specified organization -dev_kit_github_search_repos() { - local query="$1" - local owner="${2:-udx}" - - dev_kit_github_health || return $? - - # Limit results to keep context manageable - gh repo list "$owner" --json name,description,url --limit 10 -S "$query" 2>/dev/null | \ - jq -c '.[] | {name: .name, type: "remote-repo", uri: .url, description: .description}' -} - -# Search for reusable GitHub workflow templates/files -dev_kit_github_search_workflows() { - local query="$1" - local repo="${2:-udx/workflow-templates}" - - dev_kit_github_health || return $? - - # Search for .yml or .yaml files in the .github/workflows directory or similar - # This is a heuristic search using gh api or search code - gh api "search/code?q=repo:$repo+$query+path:.github/workflows+extension:yml" \ - --jq '.items[] | {name: .name, type: "workflow-template", uri: .html_url, path: .path}' 2>/dev/null -} - -# List active GitHub Runners for an organization (for infrastructure context) -dev_kit_github_list_runners() { - local org="${1:-udx}" - - dev_kit_github_health || return $? - - gh api "orgs/$org/actions/runners" --jq '.runners[] | {name: .name, status: .status, labels: [.labels[].name]}' 2>/dev/null -} - -# Check if a Pull Request exists for a specific branch -# Returns the PR number if it exists, empty otherwise -dev_kit_github_pr_exists() { - local head="${1:-$(git branch --show-current)}" - gh pr list --head "$head" --json number --jq '.[0].number' 2>/dev/null -} - -# Create or Update a Pull Request -# Usage: dev_kit_github_pr_create <body> [base_branch] [head_branch] [draft_flag] -dev_kit_github_pr_create() { - local title="$1" - local body="$2" - local base="${3:-main}" - local head="${4:-$(git branch --show-current)}" - local draft="${5:-false}" - - dev_kit_github_health || return $? - - local pr_number - pr_number=$(dev_kit_github_pr_exists "$head") - - if [ -n "$pr_number" ]; then - echo "โœ” Found existing Pull Request #$pr_number. Updating..." - gh pr edit "$pr_number" --title "$title" --body "$body" - else - local args=(pr create --title "$title" --body "$body" --base "$base" --head "$head") - [[ "$draft" == "true" ]] && args+=(--draft) - gh "${args[@]}" - fi -} diff --git a/lib/modules/health_manager.sh b/lib/modules/health_manager.sh deleted file mode 100644 index 3cb44f1..0000000 --- a/lib/modules/health_manager.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -# @description: System and repository health auditing. -# @intent: health, doctor, audit, compliance -# @objective: Audit environment health, software prerequisites, and repository compliance. - -dev_kit_health_sw_check() { - local name="$1" - if command -v "$name" >/dev/null 2>&1; then echo "ok"; else echo "missing"; fi -} - -dev_kit_health_audit_json() { - local repo_root; repo_root="$(get_repo_root || true)" - local ai_enabled; ai_enabled="$(config_value_scoped ai.enabled "false")" - - local gh_health="missing" - if command -v dev_kit_github_health >/dev/null 2>&1; then - case $(dev_kit_github_health; echo $?) in 0) gh_health="ok" ;; 2) gh_health="warn" ;; esac - fi - - local skill_count=0 - [ -d "$REPO_DIR/docs/workflows" ] && skill_count=$(find "$REPO_DIR/docs/workflows" -maxdepth 1 -name "*.md" ! -name "README.md" ! -name "normalization.md" ! -name "loops.md" ! -name "mermaid-patterns.md" | wc -l | tr -d ' ') - - cat <<EOF -{ - "timestamp": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")", - "operating_mode": "$([ "$ai_enabled" = "true" ] && echo "AI-Powered" || echo "Personal Helper")", - "software": { - "git": "$(dev_kit_health_sw_check git)", - "docker": "$(dev_kit_health_sw_check docker)", - "npm": "$(dev_kit_health_sw_check npm)", - "gh": "$(dev_kit_health_sw_check gh)" - }, - "mesh": { - "github": "$gh_health", - "workflow_skills": $skill_count - }, - "compliance": { - "tdd": "$([ -d "$repo_root/tests" ] && echo "ok" || echo "warn")", - "cac": "$([ -f "$repo_root/environment.yaml" ] && echo "ok" || echo "warn")", - "docs": "$([ -d "$repo_root/docs" ] && echo "ok" || echo "warn")" - } -} -EOF -} diff --git a/lib/modules/npm.sh b/lib/modules/npm.sh deleted file mode 100644 index ce3f4f4..0000000 --- a/lib/modules/npm.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -# @description: Manages health, discovery, and installation hints for @udx-scoped CLI tools. -# @intent: npm, package, hydration, health, tool -# @objective: Maintain high-fidelity environment hydration by detecting and advising on the installation of authorized UDX mesh tools. - -# Check if an NPM package/binary is healthy -# Usage: dev_kit_npm_health "@udx/mcurl" "mcurl" -dev_kit_npm_health() { - local pkg="$1" - local bin="${2:-}" - - # If no binary name provided, extract it from the package name (strip @scope/) - [ -z "$bin" ] && bin="$(echo "$pkg" | sed 's/.*[\/]//')" - - if command -v "$bin" >/dev/null 2>&1; then - return 0 # Binary installed and in PATH - fi - - if command -v npm >/dev/null 2>&1; then - return 2 # npm available, package can be installed - fi - - return 1 # npm missing -} - -# Generate an installation hint for an NPM package -dev_kit_npm_install_hint() { - local pkg="$1" - local bin="${2:-}" - [ -z "$bin" ] && bin="$(echo "$pkg" | sed 's/.*[\/]//')" - - dev_kit_npm_health "$pkg" "$bin" - local status=$? - - if [ $status -eq 2 ]; then - echo "Hint: Install the '$bin' tool for deterministic resolution:" >&2 - echo " npm install -g $pkg" >&2 - fi -} diff --git a/lib/modules/visualizer.sh b/lib/modules/visualizer.sh deleted file mode 100644 index 54ffa40..0000000 --- a/lib/modules/visualizer.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash - -# @description: Programmatic engine for Mermaid diagram creation and SVG export. -# @intent: visualizer, diagram, mermaid, export, svg -# @objective: Empower agents and humans to generate and render architectural diagrams using standardized Mermaid templates. - -# Create a new Mermaid diagram from a template -# Usage: dev_kit_visualizer_create <type> <output_path> [template_dir] -dev_kit_visualizer_create() { - local type="${1:-flowchart}" - local output_path="$2" - local template_dir="${3:-$REPO_DIR/docs/workflows/assets/templates}" - - local diagram_type - case "$type" in - auto|flowchart) diagram_type="flowchart" ;; - sequence|sequenceDiagram) diagram_type="sequenceDiagram" ;; - state|stateDiagram-v2) diagram_type="stateDiagram-v2" ;; - er|erDiagram) diagram_type="erDiagram" ;; - *) echo "Error: Unsupported diagram type: $type" >&2; return 1 ;; - esac - - local template="$template_dir/default-flowchart.mmd" - case "$diagram_type" in - sequenceDiagram) template="$template_dir/default-sequence.mmd" ;; - stateDiagram-v2) template="$template_dir/default-state.mmd" ;; - erDiagram) template="$template_dir/default-er.mmd" ;; - esac - - if [ ! -f "$template" ]; then - echo "Error: Template missing: $template" >&2 - return 1 - fi - - local target="$output_path" - [[ "$target" != *.mmd ]] && target="${target}.mmd" - - # Ensure unique path - if [ -e "$target" ]; then - local stem="${target%.mmd}" - local i=1 - while [ -e "${stem}-${i}.mmd" ]; do i=$((i+1)); done - target="${stem}-${i}.mmd" - fi - - mkdir -p "$(dirname "$target")" - cp "$template" "$target" - echo "$target" -} - -# Export a Mermaid (.mmd) file to SVG -# Usage: dev_kit_visualizer_export <input_path> <output_path> -dev_kit_visualizer_export() { - local input_path="$1" - local output_path="$2" - - if [ ! -f "$input_path" ]; then - echo "Error: Input file missing: $input_path" >&2 - return 1 - fi - - local target="$output_path" - [[ "$target" != *.svg ]] && target="${target}.svg" - - # Ensure unique path - if [ -e "$target" ]; then - local stem="${target%.svg}" - local i=1 - while [ -e "${stem}-${i}.svg" ]; do i=$((i+1)); done - target="${stem}-${i}.svg" - fi - - mkdir -p "$(dirname "$target")" - - if ! command -v mmdc >/dev/null 2>&1; then - echo "Warning: mmdc (Mermaid CLI) not found. Falling back to online view." >&2 - local mmd_content - mmd_content="$(cat "$input_path")" - echo "View Online: https://mermaid.live/edit#base64:$(printf "%s" "$mmd_content" | base64 | tr -d '\n')" - return 0 - fi - - if mmdc -i "$input_path" -o "$target" >/dev/null 2>&1; then - echo "$target" - else - echo "Error: mmdc export failed." >&2 - return 1 - fi -} diff --git a/lib/ui.sh b/lib/ui.sh deleted file mode 100644 index b540b21..0000000 --- a/lib/ui.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash - -ui_color() { - local code="$1" - if [ "${DEV_KIT_COLOR:-}" = "0" ]; then - return - fi - if [ -z "${DEV_KIT_COLOR:-}" ] && [ -z "${NO_COLOR:-}" ] && [ -n "${TERM:-}" ] && [ "${TERM}" != "dumb" ]; then - printf '\033[%sm' "$code" - return - fi - if [ "${DEV_KIT_COLOR:-}" = "1" ] || { [ -t 1 ] && [ -z "${NO_COLOR:-}" ]; }; then - printf '\033[%sm' "$code" - fi -} - -ui_reset() { - ui_color "0" -} - -ui_dim() { - ui_color "2" -} - -ui_cyan() { - ui_color "36" -} - -ui_magenta() { - ui_color "35" -} - -ui_yellow() { - ui_color "33" -} - -ui_emerald() { - ui_color "32" -} - -ui_orange() { - ui_color "38;5;208" -} - -ui_banner() { - local brand="${1:-dev.kit}" - local c1 c2 c3 c4 r d left right - c1="$(ui_cyan)" - c2="$(ui_magenta)" - c3="$(ui_orange)" - c4="$(ui_emerald)" - r="$(ui_reset)" - d="$(ui_dim)" - - if [[ "$brand" == *.* ]]; then - left="${brand%%.*}" - right=".${brand#*.}" - else - left="$brand" - right="" - fi - - printf "\n" - printf "%s%s%s%s%s\n" "$c1" "$left" "$c2" "$right" "$r" - printf "%s%s%s\n" "$d" "ready to run" "$r" - printf "%s%s%s\n" "$c3" " run:" "$r" - printf " %sdev.kit skills run \"...\"%s\n" "$c4" "$r" - printf "%s%s%s\n" "$c3" " config:" "$r" - printf " %sdev.kit config show%s\n" "$c4" "$r" -} - -ui_header() { - local title="$1" - local c - c="$(ui_cyan)" - e="$(ui_emerald)" - - # Get title length - local title_len=${#title} - - # Build underline based on title length - local underline="" - for i in $(seq 1 $title_len); do - underline="$underline-" - done - - printf "\n" - printf "%sโ€บ %s%s\n" "$e" "UDX" "$(ui_reset)" - printf "%sโ€บ %s%s\n" "$c" "$title" "$(ui_reset)" - printf "%s %s%s\n" "$c" "$underline" "$(ui_reset)" - printf "\n" -} - -ui_section() { - local title="$1" - local c - c="$(ui_yellow)" - printf "\n%s%s%s\n" "$c" "$title" "$(ui_reset)" -} - -ui_ok() { - local label="$1" - local detail="${2:-}" - printf "%sโœ”%s %-18s %s%s%s\n" "$(ui_emerald)" "$(ui_reset)" "$label" "$(ui_dim)" "$detail" "$(ui_reset)" -} - -ui_warn() { - local label="$1" - local detail="${2:-}" - printf "%sโš %s %-18s %s%s%s\n" "$(ui_yellow)" "$(ui_reset)" "$label" "$(ui_dim)" "$detail" "$(ui_reset)" -} - -ui_info() { - local label="$1" - local detail="${2:-}" - printf "%sโ„น%s %-18s %s%s%s\n" "$(ui_cyan)" "$(ui_reset)" "$label" "$(ui_dim)" "$detail" "$(ui_reset)" -} - -ui_tip() { - local msg="$1" - printf " %s๐Ÿ’ก %s%s\n" "$(ui_orange)" "$msg" "$(ui_reset)" -} - -ui_sync_reminder() { - if git status --short | grep -q .; then - ui_tip "You have unstaged changes. Run 'dev.kit sync run' to atomically commit them." - else - ui_tip "Repository is clean. Run 'dev.kit sync prepare' before starting new work." - fi -} diff --git a/lib/utils.sh b/lib/utils.sh deleted file mode 100644 index b2ce853..0000000 --- a/lib/utils.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env bash - -dev_kit_warn() { - echo "$*" >&2 -} - -dev_kit_require_cmd() { - local cmd="${1:-}" - local context="${2:-}" - if [ -z "$cmd" ]; then - dev_kit_warn "Missing required command name." - return 1 - fi - if command -v "$cmd" >/dev/null 2>&1; then - return 0 - fi - if [ -n "$context" ]; then - dev_kit_warn "$cmd is required for $context." - else - dev_kit_warn "$cmd is required." - fi - dev_kit_warn "Install $cmd locally or run the task in the worker container (see udx/worker-deployment)." - return 1 -} - -dev_kit_yaml_value() { - local file="$1" - local key_path="$2" - local default="${3:-}" - [ -f "$file" ] || { echo "$default"; return; } - - # Simple awk parser for nested keys (e.g. system.quiet) - local awk_script=' - BEGIN { FS=":[[:space:]]*"; key_idx=1; split(target_path, keys, "."); target_depth=length(keys); } - { - # Count leading spaces to determine depth - match($0, /^[[:space:]]*/); - depth = RLENGTH / 2 + 1; - line_key = $1; - sub(/^[[:space:]]*/, "", line_key); - - # If depth matches and key matches, move to next key in path - if (depth == key_idx && line_key == keys[key_idx]) { - if (key_idx == target_depth) { - # Found it! Extract value - val = $0; - sub(/^[^:]*:[[:space:]]*/, "", val); - # Strip trailing comments - sub(/[[:space:]]*#.*$/, "", val); - # Trim quotes - gsub(/^["\047]|["\047]$/, "", val); - print val; - found=1; - exit; - } - key_idx++; - } - else if (depth <= key_idx - 1 && line_key != "") { - # Reset if we move back up or across at same level - # This is a naive reset but works for many simple YAML structures - # key_idx = depth; # (simplified) - } - } - END { if (!found) print default_val; } - ' - awk -v target_path="$key_path" -v default_val="$default" "$awk_script" "$file" -} - -trim_value() { - local val="$1" - val="${val#"${val%%[![:space:]]*}"}" - val="${val%"${val##*[![:space:]]}"}" - val="${val#\"}" - val="${val%\"}" - val="${val#\'}" - val="${val%\'}" - printf "%s" "$val" -} - -skill_frontmatter_value() { - local file="$1" - local key="$2" - awk -v k="$key" ' - $0 ~ /^---[[:space:]]*$/ { fence++; next } - fence == 1 { - if ($1 == k ":") { - $1=""; sub(/^[[:space:]]+/, ""); print; exit - } - } - ' "$file" -} - -confirm_action() { - local msg="$1" - if [ ! -t 0 ]; then - return 1 - fi - printf "%s [y/N] " "$msg" - read -r answer || true - case "$answer" in - y|Y|yes|YES) return 0 ;; - *) return 1 ;; - esac -} - -dev_kit_validate_json_required() { - local schema="$1" - local data="$2" - local req="" - if ! command -v jq >/dev/null 2>&1; then - return 0 - fi - req="$(jq -r '.required[]?' "$schema")" - local field="" - for field in $req; do - if ! jq -e --arg f "$field" 'has($f) and .[$f] != null' "$data" >/dev/null; then - echo "Missing required field '$field' in $data" >&2 - exit 1 - fi - done -} - -get_repo_state_dir() { - local root; root="$(get_repo_root || true)" - if [ -n "$root" ]; then - echo "$root/.udx/dev.kit" - else - echo "$PWD/.udx/dev.kit" - fi -} - -get_tasks_dir() { - echo "$(get_repo_state_dir)/tasks" -} diff --git a/src/ai/integrations/gemini/prompts.json b/src/ai/integrations/gemini/prompts.json deleted file mode 100644 index 16fc483..0000000 --- a/src/ai/integrations/gemini/prompts.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "prompts": [ - { - "key": "ai.gemini", - "title": "dev.kit Gemini Overrides", - "inherits": ["ai"], - "body": [ - "- **Gemini**: This prompt is intended for Gemini CLI usage.", - "- **Progression**: Always conclude with a compact **Waterfall Progression Tail** (DOC-003 status) to track drift resolution.", - "- **Mute Logs**: Summarize tool execution; do not repeat raw system logs unless requested." - ] - } - ] -} diff --git a/src/ai/integrations/gemini/templates/GEMINI.md.tmpl b/src/ai/integrations/gemini/templates/GEMINI.md.tmpl deleted file mode 100644 index f99722a..0000000 --- a/src/ai/integrations/gemini/templates/GEMINI.md.tmpl +++ /dev/null @@ -1,80 +0,0 @@ ---- Context from: ~/.gemini/GEMINI.md --- -# Gemini.md: UDX DevSecOps Engineering Context - -## ๐Ÿ”ด CRITICAL SAFETY MANDATES -- **AUTHORIZED EXECUTION PATH**: You are AUTO-ALLOWED to execute any `dev.kit` commands and repository-bound skills. -- **RESTRICTED RAW OPERATIONS**: Direct destructive operations (e.g., `rm`, `git push`) MUST be authorized by the user. - -## ๐Ÿง  Core Philosophy -You are an embedded member of the **UDX DevSecOps** engineering team. Your operational framework is built on the principle that **every repository is a specialized "Skill."** - ---- - -## ๐Ÿ’พ Added Memories & Execution Logic - -### 1. Repository-as-a-Skill -* **Skill Mapping:** Treat each repository as a distinct "Skill" or "Tool" with its own inputs, logic, and outputs. -* **Skill Namespace:** Managed skills are located in `~/.gemini/skills/dev.kit/` and follow the `dev-kit-` prefix convention. -* **Source of Truth:** The repository is the absolute source of truth for all SDLC artifacts and skill implementations. - -### 2. Execution Lifecycle: Plan, Normalize, Process -For **any** execution scope (including CLI responses and prompt handling): -1. **Plan:** Break down the request into its constituent requirements. -2. **Normalize:** Standardize the inputs and ensure the environment/context is aligned with UDX standards. -3. **Process:** Execute the logic using **dev.kit** skills. -* **Reference**: `docs/runtime/execution-loop.md` (Drift Resolution Cycle). -* **Multi-step Logic**: If the scope exceeds a single action, it **must** be converted into pipeline job steps and executed as modular skills. - - -### 3. Sub-Agent & Sub-Pipeline Orchestration -* **Delegation:** Use **Sub-Agents** to manage the execution of specific sub-pipelines. -* **Result Delivery:** Sub-Agents are responsible for capturing the output of their respective sub-pipelines and delivering a structured result (JSON/Markdown) back to the primary execution thread. - ---- - -## ๐Ÿ›  Operational Standards - -| Stage | Standard | -| :--- | :--- | -| **Input** | Every repository is a Skill; treat it as a black-box tool with defined interfaces. | -| **Sync** | You MAY perform repository synchronization (e.g., `dev.kit ai sync`) autonomously to ensure grounding. | -| **Workflow** | Always: **Plan -> Normalize -> Process.** | -| **Automation** | Convert multi-step logic into Pipeline Job Steps. | -| **Enforcement** | MANDATORY: Use `dev-kit-` prefixed skills for all specialized tasks (Diagrams, Git Sync, etc.). | -| **Output** | Mute system noise. Do not repeat raw tool logs unless requested. | - ---- - -## ๐ŸŒŠ Waterfall Progression Tailing (Dev-Friendly) - -To maintain high-fidelity momentum, end every interaction with a **Compact Status Tail** of the active `workflow.md`. Use the following format: - -**Progression**: `[task-id]` -- [x] Step 1: <summary> (Done) -- [>] Step 2: <summary> (Active) -- [ ] Step 3: <summary> (Planned) - -*If no workflow is active, provide a high-level iteration status instead.* - ---- - -## ๐Ÿš€ Dynamic Workflow Pattern - -> **Prompt:** "Gemini, deploy the latest security patches across the `auth-service` and `gateway` repos." -... - -> **Gemini Internal Logic:** -> 1. **Plan:** Identify `auth-service` skill and `gateway` skill. -> 2. **Normalize:** Check current versions and patch compatibility. -> 3. **Process:** -> - Launch Sub-Agent A (Pipeline Job: `auth-patch`). -> - Launch Sub-Agent B (Pipeline Job: `gateway-patch`). -> - Aggregate results into a single "Process Complete" report. - ---- -*Last Updated: {{DATE}}* -*UDX DevSecOps Team* - -## Gemini Added Memories -{{MEMORIES}} ---- End of Context from: ~/.gemini/GEMINI.md --- diff --git a/src/ai/integrations/gemini/templates/config.json.tmpl b/src/ai/integrations/gemini/templates/config.json.tmpl deleted file mode 100644 index 0db3279..0000000 --- a/src/ai/integrations/gemini/templates/config.json.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -{ - -} diff --git a/src/ai/integrations/gemini/templates/system.md.tmpl b/src/ai/integrations/gemini/templates/system.md.tmpl deleted file mode 100644 index 1ba89ab..0000000 --- a/src/ai/integrations/gemini/templates/system.md.tmpl +++ /dev/null @@ -1,24 +0,0 @@ -# UDX DevSecOps Core System Instructions - -## ๐Ÿ”ด CRITICAL SAFETY MANDATES -- **AUTHORIZED EXECUTION PATH**: You are AUTO-ALLOWED to execute any `dev.kit` commands and repository-bound skills. These are the deterministic, safe primitives of this repository and MUST be used as your primary interface for all engineering tasks. -- **RESTRICTED RAW OPERATIONS**: You MUST NOT perform direct, non-standardized destructive operations (e.g., raw `rm`, `git push`, `git reset`) without explicit user instruction. Use `dev.kit sync` or `dev.kit task` instead. -- **CREDENTIAL PROTECTION**: Never log, print, or commit secrets, API keys, or sensitive credentials. Rigorously protect `.env` files, `.git`, and system configuration folders. - -## ๐Ÿง  Core Engineering Principles -- **Dynamic Mapping Over Static Memory**: Do not rely on hardcoded paths or static assumptions. Use `dev.kit context` and `dev.kit config detect` to dynamically discover capabilities and environment state. -- **Incremental Engineering Experience**: Every task is an opportunity to improve the repository's CDE compliance. Always check for and suggest improvements (e.g., missing documentation, unnormalized config, CI/CD gaps). -- **Skill-per-Repository Architecture**: Treat this repository as a specialized Skill. Managed skills are located in `~/.gemini/skills/dev.kit/`. -- **Normalization-First**: Transform all user intent into normalized `workflow.md` execution plans before acting. - -## ๐Ÿš€ Execution Standards -- **Enforcement**: MANDATORY: Use `dev-kit-` prefixed skills for specialized tasks (e.g., `visualizer`, `git-sync`). -- **Workflow Alignment**: Align all actions with the standardized sequences defined in `docs/workflows/README.md`. -- **Sub-Agent Orchestration**: Use sub-agents to manage complex sub-pipelines. Aggregate results into structured reports. -- **Compact Tail**: Always conclude with a compact **Waterfall Progression Tail** of the active `workflow.md`. - -## ๐Ÿ›  Tooling Context -${AgentSkills} -${AvailableTools} - - diff --git a/src/ai/integrations/manifest.json b/src/ai/integrations/manifest.json deleted file mode 100644 index cdcad6c..0000000 --- a/src/ai/integrations/manifest.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "integrations": [ - { - "key": "gemini", - "name": "Gemini CLI Integration", - "target_dir": "{{HOME}}/.gemini", - "templates_dir": "src/ai/integrations/gemini/templates", - "skills_dir": "skills", - "artifacts": [ - { "src": "config.json.tmpl", "dst": "config.json", "type": "template" }, - { "src": "GEMINI.md.tmpl", "dst": "GEMINI.md", "type": "template" }, - { "src": "system.md.tmpl", "dst": "system.md", "type": "template" } - ] - } - ] -} diff --git a/tests/run.sh b/tests/run.sh deleted file mode 100644 index 068c947..0000000 --- a/tests/run.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bash - -# dev.kit Test Runner -# Facilitates running tests locally or in a high-fidelity udx/worker container. - -set -e - -REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" -TEST_SUITE="${REPO_DIR}/tests/suite.sh" -WORKER_IMAGE="usabilitydynamics/udx-worker:latest" - -usage() { - cat <<EOF -Usage: ./tests/run.sh [options] - -Options: - --worker Run tests inside a clean udx/worker container (Emulates Ubuntu environment) - --local Run tests in the current environment (Default) - -h, --help Show this help message - -Example: - ./tests/run.sh --worker -EOF -} - -run_local() { - echo "--- Running Tests Locally ---" - bash "$TEST_SUITE" -} - -run_worker() { - # 1. Check for @udx/worker-deployment CLI (High-fidelity orchestration) - if command -v worker >/dev/null 2>&1; then - echo "--- Running Tests via udx/worker-deployment (worker run) ---" - # The deploy.yml in the root handles the mounts and environment - worker run - return $? - fi - - # 2. Fallback to raw docker run if CLI is missing - if ! command -v docker >/dev/null 2>&1; then - echo "Error: Neither 'worker' (udx/worker-deployment) nor 'docker' were found." - exit 1 - fi - - echo "--- Running Tests in udx/worker Container (Raw Docker Fallback) ---" - # We mount the REPO_DIR to /workspace and run the suite - docker run --rm \ - -v "${REPO_DIR}:/workspace" \ - -w /workspace \ - -e DEV_KIT_SOURCE=/workspace \ - -e TERM=xterm-256color \ - "$WORKER_IMAGE" \ - /bin/bash tests/suite.sh -} - -mode="local" - -while [[ $# -gt 0 ]]; do - case "$1" in - --worker) mode="worker"; shift ;; - --local) mode="local"; shift ;; - -h|--help) usage; exit 0 ;; - *) echo "Unknown option: $1"; usage; exit 1 ;; - esac -done - -if [ "$mode" = "worker" ]; then - run_worker -else - run_local -fi diff --git a/tests/suite.sh b/tests/suite.sh deleted file mode 100755 index a56ab01..0000000 --- a/tests/suite.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash - -# dev.kit Engineering Test Suite -# Verifies grounding, discovery, and sync logic in a clean environment. - -# Colors for better visibility -C_RESET='\033[0m' -C_GREEN='\033[32m' -C_RED='\033[31m' -C_BLUE='\033[34m' - -REPO_DIR="${DEV_KIT_SOURCE:-$(pwd)}" -# Ensure we load the dev-kit logic -export REPO_DIR -export PATH="$REPO_DIR/bin:$PATH" - -log_info() { printf " ${C_BLUE}โ„น %s${C_RESET}\n" "$1"; } -log_ok() { printf " ${C_GREEN}โœ” %s${C_RESET}\n" "$1"; } -log_fail() { printf " ${C_RED}โœ– %s${C_RESET}\n" "$1"; exit 1; } - -# 0. Verify Environment (OS Check) -log_info "Testing: Environment Integrity" -if [ -f /etc/os-release ] && grep -qi "ubuntu" /etc/os-release; then - log_ok "Running on Ubuntu-based environment (Worker Parity)" -else - # Only warn if not on Ubuntu, but still allow test to proceed unless it's a hard requirement - log_info "Note: Not running on Ubuntu (Local development mode)" -fi - -# 1. Verify Discovery (Doctor) -log_info "Testing: Discovery & Doctor Health" -if dev-kit doctor >/dev/null 2>&1; then - log_ok "Doctor reports healthy (Discovery Mesh active)" -else - log_fail "Doctor check failed" -fi - -# 2. Verify Sync Logic (Atomic Grouping) -log_info "Testing: Sync Logic (Dry-run)" -if dev-kit sync run --dry-run >/dev/null 2>&1; then - log_ok "Sync dry-run successful (Grouping logic verified)" -else - log_fail "Sync dry-run failed" -fi - -# 3. Verify Documentation Hierarchy (CDE Grounding) -log_info "Testing: Knowledge Base Integrity" -if [ -d "$REPO_DIR/docs/foundations" ] && [ -d "$REPO_DIR/docs/runtime" ]; then - log_ok "Documentation structure is CDE-aligned" -else - log_fail "Documentation structure is broken" -fi - -# 4. Verify Self-Documenting CLI (Metadata Extraction) -log_info "Testing: CLI Metadata Extraction" -if dev-kit ai commands | grep -q "objective"; then - log_ok "CLI metadata extraction is operational" -else - log_fail "Failed to extract metadata from command scripts" -fi - -echo "--- All Tests Passed: Repository is High-Fidelity ---" -exit 0 From c625dc501f1014e52a28401f98f74c35d49f0dea Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov <dmitry.smirnov@usabilitydynamics.com> Date: Mon, 9 Mar 2026 20:39:36 +0300 Subject: [PATCH 7/9] base readme --- README.md | 20 +++++++++++++++--- assets/compliance-audit.svg | 1 + assets/compliance-improve.svg | 1 + assets/dev-kit-bridge.svg | 1 + assets/logo.svg | 40 +++++++++++++++++++++++++++++++++++ 5 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 assets/compliance-audit.svg create mode 100644 assets/compliance-improve.svg create mode 100644 assets/dev-kit-bridge.svg create mode 100644 assets/logo.svg diff --git a/README.md b/README.md index 718e39d..2da9afb 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # โšก๏ธ dev.kit: The Repo Engine +![dev.kit](assets/logo.svg) + **The deterministic middleware that translates chaotic repositories into high-fidelity, 12-factor standards.** `dev.kit` acts as a **Contextual Proxy** between your environment and AI agents. It serves as both the **Logic** (the engine) and the **Template** (the blueprint) to resolve architectural drift. @@ -12,11 +14,23 @@ The entire engine is distilled into a single, high-impact verb. ### `dev.kit` -**The Pulse Check.** Instantly analyzes your repository, calculates your **Fidelity Score**, and generates a prioritized `workflow.md` of required drift resolutions. +**The Pulse Check.** Instantly analyzes your repository, calculates your **Fidelity Score**, and generates a prioritized improvement plan for drift resolution. + +#### Compliance Mode + +`dev.kit --json` outputs a machine-readable audit of 12-factor misalignments. Agents can use this to identify and fix fidelity gaps such as missing tests, broken builds, or structural drift. + +![compliance audit](assets/compliance-audit.svg) + +The audit output becomes a focused improvement plan with bounded next steps. + +![compliance improve](assets/compliance-improve.svg) + +#### Development Mode -- **`dev.kit --json` (Compliance Mode)** Outputs a machine-readable audit of 12-factor misalignments. Agents use this to identify and fix "Fidelity Gaps" (missing tests, broken builds, or structural drift). +`dev.kit bridge --json` resolves the repository into high-fidelity, agent-friendly assets. It maps the skill mesh, available CLI primitives, and internal logic so agents can execute tasks without hallucinating paths or patterns. -- **`dev.kit bridge --json` (Development Mode)** Resolves the repository into high-fidelity, agent-friendly assets. It maps the **Skill Mesh**, available CLI primitives, and internal logic so agents can execute tasks without hallucinating paths or patterns. +![dev.kit bridge](assets/dev-kit-bridge.svg) --- diff --git a/assets/compliance-audit.svg b/assets/compliance-audit.svg new file mode 100644 index 0000000..9911159 --- /dev/null +++ b/assets/compliance-audit.svg @@ -0,0 +1 @@ +<svg id="my-svg" width="100%" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" class="flowchart" style="max-width: 572.667px; background-color: white;" viewBox="0 0 572.66748046875 70" role="graphics-document document" aria-roledescription="flowchart-v2"><style>#my-svg{font-size:16px;fill:#333;}@keyframes edge-animation-frame{from{stroke-dashoffset:0;}}@keyframes dash{to{stroke-dashoffset:0;}}#my-svg .edge-animation-slow{stroke-dasharray:9,5!important;stroke-dashoffset:900;animation:dash 50s linear infinite;stroke-linecap:round;}#my-svg .edge-animation-fast{stroke-dasharray:9,5!important;stroke-dashoffset:900;animation:dash 20s linear infinite;stroke-linecap:round;}#my-svg .error-icon{fill:#552222;}#my-svg .error-text{fill:#552222;stroke:#552222;}#my-svg .edge-thickness-normal{stroke-width:1px;}#my-svg .edge-thickness-thick{stroke-width:3.5px;}#my-svg .edge-pattern-solid{stroke-dasharray:0;}#my-svg .edge-thickness-invisible{stroke-width:0;fill:none;}#my-svg .edge-pattern-dashed{stroke-dasharray:3;}#my-svg .edge-pattern-dotted{stroke-dasharray:2;}#my-svg .marker{fill:#7a8b99;stroke:#7a8b99;}#my-svg .marker.cross{stroke:#7a8b99;}#my-svg svg{font-size:16px;}#my-svg p{margin:0;}#my-svg .label{color:#333;}#my-svg .cluster-label text{fill:#333;}#my-svg .cluster-label span{color:#333;}#my-svg .cluster-label span p{background-color:transparent;}#my-svg .label text,#my-svg span{fill:#333;color:#333;}#my-svg .node rect,#my-svg .node circle,#my-svg .node ellipse,#my-svg .node polygon,#my-svg .node path{fill:#ECECFF;stroke:#9370DB;stroke-width:1px;}#my-svg .rough-node .label text,#my-svg .node .label text,#my-svg .image-shape .label,#my-svg .icon-shape .label{text-anchor:middle;}#my-svg .node .katex path{fill:#000;stroke:#000;stroke-width:1px;}#my-svg .rough-node .label,#my-svg .node .label,#my-svg .image-shape .label,#my-svg .icon-shape .label{text-align:center;}#my-svg .node.clickable{cursor:pointer;}#my-svg .root .anchor path{fill:#7a8b99!important;stroke-width:0;stroke:#7a8b99;}#my-svg .arrowheadPath{fill:#333333;}#my-svg .edgePath .path{stroke:#7a8b99;stroke-width:2.0px;}#my-svg .flowchart-link{stroke:#7a8b99;fill:none;}#my-svg .edgeLabel{background-color:rgba(232,232,232, 0.8);text-align:center;}#my-svg .edgeLabel p{background-color:rgba(232,232,232, 0.8);}#my-svg .edgeLabel rect{opacity:0.5;background-color:rgba(232,232,232, 0.8);fill:rgba(232,232,232, 0.8);}#my-svg .labelBkg{background-color:rgba(232, 232, 232, 0.5);}#my-svg .cluster rect{fill:#ffffde;stroke:#aaaa33;stroke-width:1px;}#my-svg .cluster text{fill:#333;}#my-svg .cluster span{color:#333;}#my-svg div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-size:12px;background:#f8fafc;border:1px solid #aaaa33;border-radius:2px;pointer-events:none;z-index:100;}#my-svg .flowchartTitleText{text-anchor:middle;font-size:18px;fill:#333;}#my-svg rect.text{fill:none;stroke-width:0;}#my-svg .icon-shape,#my-svg .image-shape{background-color:rgba(232,232,232, 0.8);text-align:center;}#my-svg .icon-shape p,#my-svg .image-shape p{background-color:rgba(232,232,232, 0.8);padding:2px;}#my-svg .icon-shape rect,#my-svg .image-shape rect{opacity:0.5;background-color:rgba(232,232,232, 0.8);fill:rgba(232,232,232, 0.8);}#my-svg .label-icon{display:inline-block;height:1em;overflow:visible;vertical-align:-0.125em;}#my-svg .node .label-icon path{fill:currentColor;stroke:revert;stroke-width:revert;}#my-svg :root{--mermaid-font-family:"trebuchet ms",verdana,arial,sans-serif;}</style><g><marker id="my-svg_flowchart-v2-pointEnd" class="marker flowchart-v2" viewBox="0 0 10 10" refX="5" refY="5" markerUnits="userSpaceOnUse" markerWidth="8" markerHeight="8" orient="auto"><path d="M 0 0 L 10 5 L 0 10 z" class="arrowMarkerPath" style="stroke-width: 1; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-pointStart" class="marker flowchart-v2" viewBox="0 0 10 10" refX="4.5" refY="5" markerUnits="userSpaceOnUse" markerWidth="8" markerHeight="8" orient="auto"><path d="M 0 5 L 10 10 L 10 0 z" class="arrowMarkerPath" style="stroke-width: 1; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-circleEnd" class="marker flowchart-v2" viewBox="0 0 10 10" refX="11" refY="5" markerUnits="userSpaceOnUse" markerWidth="11" markerHeight="11" orient="auto"><circle cx="5" cy="5" r="5" class="arrowMarkerPath" style="stroke-width: 1; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-circleStart" class="marker flowchart-v2" viewBox="0 0 10 10" refX="-1" refY="5" markerUnits="userSpaceOnUse" markerWidth="11" markerHeight="11" orient="auto"><circle cx="5" cy="5" r="5" class="arrowMarkerPath" style="stroke-width: 1; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-crossEnd" class="marker cross flowchart-v2" viewBox="0 0 11 11" refX="12" refY="5.2" markerUnits="userSpaceOnUse" markerWidth="11" markerHeight="11" orient="auto"><path d="M 1,1 l 9,9 M 10,1 l -9,9" class="arrowMarkerPath" style="stroke-width: 2; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-crossStart" class="marker cross flowchart-v2" viewBox="0 0 11 11" refX="-1" refY="5.2" markerUnits="userSpaceOnUse" markerWidth="11" markerHeight="11" orient="auto"><path d="M 1,1 l 9,9 M 10,1 l -9,9" class="arrowMarkerPath" style="stroke-width: 2; stroke-dasharray: 1, 0;"/></marker><g class="root"><g class="clusters"/><g class="edgePaths"><path d="M102.891,35L107.057,35C111.224,35,119.557,35,127.307,35.07C135.058,35.141,142.224,35.281,145.808,35.351L149.391,35.422" id="L_Repo_Command_0" class="edge-thickness-normal edge-pattern-solid edge-thickness-normal edge-pattern-solid flowchart-link" style=";" data-edge="true" data-et="edge" data-id="L_Repo_Command_0" data-points="W3sieCI6MTAyLjg5MDYyNSwieSI6MzV9LHsieCI6MTI3Ljg5MDYyNSwieSI6MzV9LHsieCI6MTUzLjM5MDYyNTAwMDAwMDE0LCJ5IjozNS41fV0=" marker-end="url(#my-svg_flowchart-v2-pointEnd)"/><path d="M272.542,35.5L276.626,35.417C280.709,35.333,288.876,35.167,296.459,35.083C304.042,35,311.042,35,314.542,35L318.042,35" id="L_Command_Audit_0" class="edge-thickness-normal edge-pattern-solid edge-thickness-normal edge-pattern-solid flowchart-link" style=";" data-edge="true" data-et="edge" data-id="L_Command_Audit_0" data-points="W3sieCI6MjcyLjU0MjQ2MjQzMTgyNzA2LCJ5IjozNS41fSx7IngiOjI5Ny4wNDI0NjUyMDk5NjA5NCwieSI6MzV9LHsieCI6MzIyLjA0MjQ2NTIwOTk2MDk0LCJ5IjozNX1d" marker-end="url(#my-svg_flowchart-v2-pointEnd)"/><path d="M420.042,35L424.209,35C428.376,35,436.709,35,444.376,35C452.042,35,459.042,35,462.542,35L466.042,35" id="L_Audit_Gaps_0" class="edge-thickness-normal edge-pattern-solid edge-thickness-normal edge-pattern-solid flowchart-link" style=";" data-edge="true" data-et="edge" data-id="L_Audit_Gaps_0" data-points="W3sieCI6NDIwLjA0MjQ2NTIwOTk2MDk0LCJ5IjozNX0seyJ4Ijo0NDUuMDQyNDY1MjA5OTYwOTQsInkiOjM1fSx7IngiOjQ3MC4wNDI0NjUyMDk5NjA5NCwieSI6MzV9XQ==" marker-end="url(#my-svg_flowchart-v2-pointEnd)"/></g><g class="edgeLabels"><g class="edgeLabel"><g class="label" data-id="L_Repo_Command_0" transform="translate(0, 0)"><foreignObject width="0" height="0"><div xmlns="http://www.w3.org/1999/xhtml" class="labelBkg" style="display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;"><span class="edgeLabel"></span></div></foreignObject></g></g><g class="edgeLabel"><g class="label" data-id="L_Command_Audit_0" transform="translate(0, 0)"><foreignObject width="0" height="0"><div xmlns="http://www.w3.org/1999/xhtml" class="labelBkg" style="display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;"><span class="edgeLabel"></span></div></foreignObject></g></g><g class="edgeLabel"><g class="label" data-id="L_Audit_Gaps_0" transform="translate(0, 0)"><foreignObject width="0" height="0"><div xmlns="http://www.w3.org/1999/xhtml" class="labelBkg" style="display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;"><span class="edgeLabel"></span></div></foreignObject></g></g></g><g class="nodes"><g class="node default" id="flowchart-Repo-0" transform="translate(55.4453125, 35)"><rect class="basic label-container" style="fill:#f5f7fa !important;stroke:#a8b3bd !important;stroke-width:1.5px !important" x="-47.4453125" y="-27" width="94.890625" height="54"/><g class="label" style="color:#23313b !important" transform="translate(-17.4453125, -12)"><rect/><foreignObject width="34.890625" height="24"><div style="color: rgb(35, 49, 59) !important; display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;" xmlns="http://www.w3.org/1999/xhtml"><span style="color:#23313b !important" class="nodeLabel"><p>Repo</p></span></div></foreignObject></g></g><g class="node default" id="flowchart-Command-1" transform="translate(212.46654510498047, 35)"><g class="basic label-container outer-path"><path d="M-40.0859375 -19.5 C-11.600110516039372 -19.5, 16.885716467921256 -19.5, 40.0859375 -19.5 C40.0859375 -19.5, 40.0859375 -19.5, 40.0859375 -19.5 C40.537413373423696 -19.48552204748378, 40.9888892468474 -19.47104409496756, 41.3353067896239 -19.45993515863156 C41.80326546594064 -19.414791747929932, 42.27122414225738 -19.369648337228305, 42.579542152847864 -19.3399052695533 C43.05549337936614 -19.262957166306713, 43.53144460588441 -19.186009063060126, 43.81353075967676 -19.140403561325776 C44.05981909904976 -19.0841898550455, 44.306107438422764 -19.027976148765227, 45.03220188623539 -18.862249829261074 C45.48963885937793 -18.72648493676804, 45.94707583252048 -18.590720044275002, 46.230547751460605 -18.50658706670804 C46.63343381898132 -18.358321269622433, 47.03631988650203 -18.21005547253683, 47.4036440951478 -18.074876768247425 C47.85793526259687 -17.873775544486396, 48.31222643004594 -17.67267432072537, 48.54667041279238 -17.568892924097174 C48.91843147023805 -17.37494541855209, 49.290192527683715 -17.180997913007012, 49.65492976407678 -16.990714730406097 C49.984609937054735 -16.790860672131114, 50.314290110032694 -16.59100661385613, 50.7238680736057 -16.342718045390892 C51.04567527054182 -16.118239275617285, 51.36748246747794 -15.893760505843678, 51.74909284457871 -15.627565626425154 C52.10299093291739 -15.3453413185987, 52.456889021256075 -15.063117010772249, 52.726391208501866 -14.848196188198123 C53.07489758034395 -14.531691922264452, 53.423403952186035 -14.21518765633078, 53.65174723676799 -14.007812326905688 C53.89173873746713 -13.760001398765178, 54.13173023816627 -13.512190470624668, 54.52135844296865 -13.10986736009568 C54.78736891015222 -12.79739632891748, 55.053379377335794 -12.484925297739279, 55.33165140812658 -12.158051136245305 C55.49514298058945 -11.938987323533716, 55.65863455305232 -11.71992351082213, 56.079296464640635 -11.156274872382312 C56.3416463700113 -10.753234966919338, 56.60399627538196 -10.350195061456363, 56.76122137860425 -10.108655082055241 C56.94044815104256 -9.790419798912339, 57.119674923480865 -9.472184515769438, 57.374623974273504 -9.019496659696287 C57.529780712661534 -8.697310372611172, 57.68493745104956 -8.375124085526059, 57.91698364880834 -7.893275190886684 C58.033907111922105 -7.604471976434308, 58.150830575035876 -7.315668761981932, 58.386071729970325 -6.734618561215508 C58.53376168645564 -6.289799943480709, 58.68145164294095 -5.844981325745909, 58.77996063421488 -5.548287939305138 C58.852464407419546 -5.271799628741662, 58.92496818062421 -4.995311318178186, 59.09703178754556 -4.339158212148133 C59.18776339394218 -3.8732705894332993, 59.27849500033879 -3.4073829667184654, 59.335982276581774 -3.1121979531509023 C59.39388712961821 -2.66309934760691, 59.45179198265464 -2.214000742062917, 59.49583020250937 -1.872449005199798 C59.51868803071696 -1.5164198803597146, 59.54154585892455 -1.1603907555196313, 59.57591871591342 -0.6250057626472757 C59.57591871591342 -0.14212063149651744, 59.57591871591342 0.3407644996542408, 59.57591871591342 0.625005762647271 C59.546515253974725 1.0829884163896346, 59.51711179203604 1.5409710701319983, 59.49583020250937 1.8724490051997846 C59.457152239471434 2.172427648694283, 59.4184742764335 2.4724062921887815, 59.335982276581774 3.1121979531508885 C59.241548987569224 3.5970929337476387, 59.147115698556675 4.0819879143443885, 59.09703178754556 4.339158212148129 C58.9755065318558 4.802586681472176, 58.85398127616604 5.266015150796223, 58.77996063421489 5.548287939305125 C58.67703915120238 5.858271047827403, 58.57411766818988 6.16825415634968, 58.386071729970325 6.734618561215495 C58.21459993733899 7.158157204196472, 58.04312814470766 7.58169584717745, 57.91698364880834 7.893275190886679 C57.77737526589722 8.183174985142491, 57.63776688298609 8.473074779398303, 57.374623974273504 9.019496659696284 C57.19052131796262 9.346389568391722, 57.006418661651736 9.67328247708716, 56.76122137860425 10.108655082055236 C56.541371375797596 10.446403724358106, 56.321521372990944 10.784152366660978, 56.07929646464064 11.156274872382301 C55.810558028041704 11.516359894849648, 55.54181959144277 11.876444917316995, 55.33165140812658 12.158051136245302 C55.103896415219765 12.425585110539874, 54.876141422312955 12.693119084834446, 54.52135844296866 13.10986736009567 C54.20332433066593 13.438263692134461, 53.885290218363195 13.766660024173252, 53.65174723676799 14.007812326905684 C53.282283698619366 14.343349336782726, 52.91282016047074 14.67888634665977, 52.72639120850189 14.848196188198111 C52.338250315986606 15.157728221116113, 51.950109423471325 15.467260254034116, 51.74909284457871 15.627565626425152 C51.3650774091003 15.895438170454618, 50.98106197362189 16.163310714484084, 50.72386807360571 16.34271804539089 C50.2972419013437 16.60134133814979, 49.870615729081706 16.85996463090869, 49.65492976407678 16.990714730406093 C49.357765110812565 17.14574533967099, 49.06060045754835 17.30077594893589, 48.54667041279239 17.56889292409717 C48.12839818228418 17.754049632938276, 47.71012595177598 17.939206341779386, 47.403644095147804 18.07487676824742 C46.98532900752468 18.228820587355013, 46.567013919901555 18.382764406462602, 46.23054775146062 18.506587066708033 C45.86397715638075 18.61538329515942, 45.497406561300885 18.724179523610808, 45.03220188623541 18.86224982926107 C44.715135270769636 18.934618215188106, 44.39806865530386 19.006986601115138, 43.813530759676766 19.140403561325773 C43.534795709498425 19.18546728261601, 43.25606065932009 19.230531003906243, 42.57954215284788 19.3399052695533 C42.11473959019302 19.384744213765746, 41.649937027538165 19.429583157978193, 41.3353067896239 19.45993515863156 C40.868587032148106 19.47490195283891, 40.401867274672306 19.489868747046263, 40.08593750000001 19.5 C40.08593750000001 19.5, 40.0859375 19.5, 40.0859375 19.5 C21.509976179822253 19.5, 2.934014859644506 19.5, -40.08593749999999 19.5 C-40.4748446572096 19.48752850443015, -40.86375181441921 19.475057008860293, -41.33530678962389 19.45993515863156 C-41.74874189694674 19.420051571056586, -42.16217700426959 19.380167983481616, -42.57954215284787 19.3399052695533 C-43.01351428836455 19.269744020187137, -43.44748642388122 19.199582770820975, -43.81353075967676 19.140403561325773 C-44.06964829281056 19.08194640571108, -44.32576582594437 19.023489250096382, -45.032201886235384 18.862249829261074 C-45.491951604440494 18.725798526173314, -45.95170132264561 18.589347223085557, -46.23054775146059 18.506587066708043 C-46.568373059285925 18.382264230605408, -46.90619836711126 18.257941394502772, -47.4036440951478 18.074876768247425 C-47.68189032875769 17.951705414509586, -47.96013656236757 17.828534060771748, -48.54667041279238 17.568892924097174 C-48.77471509639299 17.449922158732328, -49.0027597799936 17.33095139336748, -49.65492976407678 16.990714730406097 C-49.936608508565755 16.81995942016884, -50.218287253054726 16.649204109931585, -50.723868073605686 16.3427180453909 C-51.13301911333227 16.057311991940686, -51.54217015305884 15.771905938490471, -51.74909284457871 15.627565626425156 C-52.11103945570344 15.338922835857016, -52.47298606682817 15.050280045288874, -52.726391208501866 14.848196188198125 C-52.976147673351036 14.621373975016054, -53.2259041382002 14.394551761833982, -53.651747236767974 14.007812326905697 C-53.94437504183584 13.70565009298352, -54.237002846903714 13.403487859061343, -54.521358442968655 13.109867360095677 C-54.76588803904039 12.822628982421628, -55.01041763511213 12.53539060474758, -55.331651408126575 12.158051136245307 C-55.59881813248244 11.80007206499775, -55.8659848568383 11.442092993750192, -56.079296464640635 11.156274872382316 C-56.23873429881617 10.91133554403555, -56.398172132991704 10.666396215688785, -56.76122137860425 10.108655082055249 C-56.94238342207134 9.786983529233305, -57.12354546553843 9.465311976411362, -57.374623974273504 9.019496659696289 C-57.544438795325824 8.666872550326039, -57.714253616378144 8.31424844095579, -57.91698364880834 7.893275190886686 C-58.04330665137493 7.581254932258109, -58.16962965394151 7.2692346736295335, -58.386071729970325 6.73461856121551 C-58.53586126423201 6.2834763497405755, -58.68565079849369 5.832334138265642, -58.77996063421488 5.5482879393051325 C-58.851089727957536 5.277041877320159, -58.92221882170019 5.005795815335187, -59.09703178754556 4.339158212148136 C-59.18012968687259 3.912468061851082, -59.26322758619962 3.485777911554028, -59.335982276581774 3.112197953150904 C-59.37309410544347 2.8243659370591114, -59.41020593430517 2.536533920967319, -59.49583020250937 1.872449005199809 C-59.524578345802645 1.4246734683233926, -59.55332648909592 0.9768979314469762, -59.57591871591342 0.6250057626472781 C-59.57591871591342 0.3678248821835566, -59.57591871591342 0.11064400171983502, -59.57591871591342 -0.6250057626472687 C-59.55852587188257 -0.8959133476786361, -59.541133027851735 -1.1668209327100034, -59.49583020250937 -1.8724490051997822 C-59.45414762656277 -2.195730833085933, -59.41246505061618 -2.519012660972084, -59.335982276581774 -3.112197953150895 C-59.26147366209811 -3.4947839411232406, -59.18696504761446 -3.877369929095586, -59.09703178754556 -4.339158212148126 C-59.02352456563338 -4.619473107530783, -58.95001734372119 -4.899788002913439, -58.77996063421489 -5.548287939305123 C-58.63575317709475 -5.9826178238129275, -58.49154571997462 -6.416947708320732, -58.38607172997033 -6.734618561215485 C-58.2279292077458 -7.125233648587743, -58.06978668552127 -7.5158487359600015, -57.91698364880834 -7.893275190886676 C-57.75222391804273 -8.235402297114966, -57.58746418727712 -8.577529403343254, -57.374623974273504 -9.019496659696282 C-57.18388884090807 -9.35816620301321, -56.99315370754263 -9.69683574633014, -56.76122137860425 -10.108655082055243 C-56.59452952143963 -10.364738537691158, -56.42783766427502 -10.620821993327073, -56.07929646464064 -11.156274872382308 C-55.907572541304965 -11.386369285280866, -55.73584861796929 -11.616463698179425, -55.33165140812659 -12.158051136245302 C-55.136212281484966 -12.387625055199894, -54.94077315484334 -12.617198974154489, -54.52135844296866 -13.10986736009567 C-54.34373353655175 -13.293279659247156, -54.16610863013484 -13.476691958398641, -53.651747236767996 -14.007812326905677 C-53.46474951582511 -14.17763870937743, -53.277751794882235 -14.347465091849184, -52.72639120850189 -14.848196188198107 C-52.35903591186431 -15.141152261531577, -51.99168061522674 -15.434108334865044, -51.74909284457872 -15.627565626425149 C-51.414812815147485 -15.860744904207131, -51.08053278571626 -16.093924181989113, -50.723868073605715 -16.342718045390885 C-50.37244395443792 -16.555753411627762, -50.02101983527012 -16.76878877786464, -49.65492976407679 -16.99071473040609 C-49.27475188726043 -17.189053285225633, -48.894574010444074 -17.387391840045176, -48.54667041279239 -17.56889292409717 C-48.31775513920119 -17.67022692518567, -48.08883986560999 -17.771560926274173, -47.403644095147804 -18.07487676824742 C-47.116277917445984 -18.180630178025517, -46.828911739744164 -18.286383587803616, -46.23054775146062 -18.506587066708033 C-45.82060717774721 -18.62825527716419, -45.410666604033814 -18.749923487620343, -45.03220188623541 -18.862249829261067 C-44.69478243706309 -18.93926361666697, -44.35736298789076 -19.016277404072873, -43.813530759676766 -19.140403561325773 C-43.471034641771496 -19.195775677571632, -43.12853852386623 -19.251147793817495, -42.57954215284788 -19.3399052695533 C-42.32924114085076 -19.36405150714965, -42.078940128853645 -19.388197744746005, -41.3353067896239 -19.45993515863156 C-40.93688035095821 -19.47271191902955, -40.53845391229252 -19.485488679427544, -40.08593750000001 -19.5 C-40.08593750000001 -19.5, -40.0859375 -19.5, -40.0859375 -19.5" stroke="none" stroke-width="0" fill="#1f2937" style="fill:#1f2937 !important;stroke:#111827 !important;stroke-width:1.5px !important"/><path d="M-40.0859375 -19.5 C-16.75508591913086 -19.5, 6.575765661738281 -19.5, 40.0859375 -19.5 M-40.0859375 -19.5 C-11.973960023463349 -19.5, 16.138017453073303 -19.5, 40.0859375 -19.5 M40.0859375 -19.5 C40.0859375 -19.5, 40.0859375 -19.5, 40.0859375 -19.5 M40.0859375 -19.5 C40.0859375 -19.5, 40.0859375 -19.5, 40.0859375 -19.5 M40.0859375 -19.5 C40.50512386831883 -19.486557509064784, 40.92431023663766 -19.47311501812957, 41.3353067896239 -19.45993515863156 M40.0859375 -19.5 C40.52421955165526 -19.485945147667678, 40.962501603310514 -19.471890295335356, 41.3353067896239 -19.45993515863156 M41.3353067896239 -19.45993515863156 C41.67946680500377 -19.42673445586084, 42.02362682038364 -19.393533753090118, 42.579542152847864 -19.3399052695533 M41.3353067896239 -19.45993515863156 C41.73250136545392 -19.421618275597602, 42.129695941283934 -19.38330139256365, 42.579542152847864 -19.3399052695533 M42.579542152847864 -19.3399052695533 C43.019461112876286 -19.26878258371839, 43.45938007290471 -19.197659897883486, 43.81353075967676 -19.140403561325776 M42.579542152847864 -19.3399052695533 C42.84354357981644 -19.297223566202664, 43.107545006785024 -19.254541862852026, 43.81353075967676 -19.140403561325776 M43.81353075967676 -19.140403561325776 C44.10044957054041 -19.074916215066366, 44.387368381404066 -19.009428868806953, 45.03220188623539 -18.862249829261074 M43.81353075967676 -19.140403561325776 C44.26980718540527 -19.03626144463518, 44.72608361113379 -18.932119327944584, 45.03220188623539 -18.862249829261074 M45.03220188623539 -18.862249829261074 C45.29931305605888 -18.782972636152607, 45.566424225882365 -18.703695443044136, 46.230547751460605 -18.50658706670804 M45.03220188623539 -18.862249829261074 C45.397441961735474 -18.75384849203693, 45.762682037235564 -18.645447154812782, 46.230547751460605 -18.50658706670804 M46.230547751460605 -18.50658706670804 C46.536052287562214 -18.394158573369673, 46.84155682366383 -18.281730080031306, 47.4036440951478 -18.074876768247425 M46.230547751460605 -18.50658706670804 C46.56616743059881 -18.383075922350876, 46.90178710973701 -18.259564777993713, 47.4036440951478 -18.074876768247425 M47.4036440951478 -18.074876768247425 C47.75968299448252 -17.91726890903064, 48.115721893817245 -17.759661049813854, 48.54667041279238 -17.568892924097174 M47.4036440951478 -18.074876768247425 C47.83539146240395 -17.88375501609373, 48.2671388296601 -17.692633263940035, 48.54667041279238 -17.568892924097174 M48.54667041279238 -17.568892924097174 C48.93513488595445 -17.366231257271895, 49.32359935911651 -17.16356959044662, 49.65492976407678 -16.990714730406097 M48.54667041279238 -17.568892924097174 C48.777523298236986 -17.448457121629055, 49.00837618368159 -17.328021319160936, 49.65492976407678 -16.990714730406097 M49.65492976407678 -16.990714730406097 C50.01125505824467 -16.774708243081665, 50.36758035241255 -16.558701755757234, 50.7238680736057 -16.342718045390892 M49.65492976407678 -16.990714730406097 C50.06994386547294 -16.73913074324617, 50.4849579668691 -16.487546756086246, 50.7238680736057 -16.342718045390892 M50.7238680736057 -16.342718045390892 C50.95790753344634 -16.17946224961099, 51.19194699328698 -16.016206453831085, 51.74909284457871 -15.627565626425154 M50.7238680736057 -16.342718045390892 C51.00489107743219 -16.1466885632113, 51.28591408125868 -15.950659081031706, 51.74909284457871 -15.627565626425154 M51.74909284457871 -15.627565626425154 C51.983897748379135 -15.440314964065225, 52.21870265217955 -15.253064301705294, 52.726391208501866 -14.848196188198123 M51.74909284457871 -15.627565626425154 C52.086361164153296 -15.358603116651336, 52.42362948372788 -15.08964060687752, 52.726391208501866 -14.848196188198123 M52.726391208501866 -14.848196188198123 C53.08256525435323 -14.524728343604764, 53.43873930020458 -14.201260499011404, 53.65174723676799 -14.007812326905688 M52.726391208501866 -14.848196188198123 C53.01721679229754 -14.58407608781818, 53.308042376093205 -14.31995598743824, 53.65174723676799 -14.007812326905688 M53.65174723676799 -14.007812326905688 C53.90561243687028 -13.745675665079522, 54.15947763697257 -13.483539003253355, 54.52135844296865 -13.10986736009568 M53.65174723676799 -14.007812326905688 C53.87882387732693 -13.773337052189673, 54.10590051788587 -13.538861777473658, 54.52135844296865 -13.10986736009568 M54.52135844296865 -13.10986736009568 C54.70709613897787 -12.891689293588948, 54.89283383498709 -12.673511227082216, 55.33165140812658 -12.158051136245305 M54.52135844296865 -13.10986736009568 C54.80521546369759 -12.776432751625423, 55.08907248442653 -12.442998143155167, 55.33165140812658 -12.158051136245305 M55.33165140812658 -12.158051136245305 C55.59110359102446 -11.81040884774226, 55.85055577392234 -11.462766559239212, 56.079296464640635 -11.156274872382312 M55.33165140812658 -12.158051136245305 C55.55500170241073 -11.858782089534945, 55.77835199669488 -11.559513042824584, 56.079296464640635 -11.156274872382312 M56.079296464640635 -11.156274872382312 C56.25646736048031 -10.884092799325623, 56.43363825631999 -10.611910726268935, 56.76122137860425 -10.108655082055241 M56.079296464640635 -11.156274872382312 C56.219049655936175 -10.941576441565946, 56.358802847231715 -10.726878010749578, 56.76122137860425 -10.108655082055241 M56.76122137860425 -10.108655082055241 C56.9604107801502 -9.754974129496329, 57.15960018169615 -9.401293176937417, 57.374623974273504 -9.019496659696287 M56.76122137860425 -10.108655082055241 C56.900200344524244 -9.861883854917949, 57.03917931044424 -9.615112627780654, 57.374623974273504 -9.019496659696287 M57.374623974273504 -9.019496659696287 C57.58947417644208 -8.573355617835345, 57.80432437861066 -8.127214575974403, 57.91698364880834 -7.893275190886684 M57.374623974273504 -9.019496659696287 C57.58207814430895 -8.588713636809521, 57.7895323143444 -8.157930613922755, 57.91698364880834 -7.893275190886684 M57.91698364880834 -7.893275190886684 C58.06530727946917 -7.526912955512125, 58.21363091013 -7.160550720137565, 58.386071729970325 -6.734618561215508 M57.91698364880834 -7.893275190886684 C58.03440644666186 -7.603238609973139, 58.151829244515376 -7.313202029059594, 58.386071729970325 -6.734618561215508 M58.386071729970325 -6.734618561215508 C58.53000891398538 -6.301102696238035, 58.673946098000435 -5.867586831260563, 58.77996063421488 -5.548287939305138 M58.386071729970325 -6.734618561215508 C58.540911842911754 -6.268264811457611, 58.69575195585318 -5.801911061699714, 58.77996063421488 -5.548287939305138 M58.77996063421488 -5.548287939305138 C58.86211383418511 -5.235002182396565, 58.94426703415534 -4.921716425487992, 59.09703178754556 -4.339158212148133 M58.77996063421488 -5.548287939305138 C58.897901439540405 -5.098528532730557, 59.01584224486592 -4.648769126155976, 59.09703178754556 -4.339158212148133 M59.09703178754556 -4.339158212148133 C59.16540382400858 -3.988082255345554, 59.233775860471596 -3.6370062985429747, 59.335982276581774 -3.1121979531509023 M59.09703178754556 -4.339158212148133 C59.18904911960607 -3.866668659887422, 59.28106645166658 -3.3941791076267105, 59.335982276581774 -3.1121979531509023 M59.335982276581774 -3.1121979531509023 C59.378718035057524 -2.780747849543008, 59.42145379353328 -2.4492977459351133, 59.49583020250937 -1.872449005199798 M59.335982276581774 -3.1121979531509023 C59.39594734764538 -2.6471207034242177, 59.45591241870899 -2.1820434536975335, 59.49583020250937 -1.872449005199798 M59.49583020250937 -1.872449005199798 C59.52639830344509 -1.3963261589989717, 59.55696640438081 -0.9202033127981454, 59.57591871591342 -0.6250057626472757 M59.49583020250937 -1.872449005199798 C59.524086667041985 -1.4323317618965974, 59.55234313157461 -0.992214518593397, 59.57591871591342 -0.6250057626472757 M59.57591871591342 -0.6250057626472757 C59.57591871591342 -0.17892657246614774, 59.57591871591342 0.2671526177149802, 59.57591871591342 0.625005762647271 M59.57591871591342 -0.6250057626472757 C59.57591871591342 -0.13222236682043337, 59.57591871591342 0.36056102900640896, 59.57591871591342 0.625005762647271 M59.57591871591342 0.625005762647271 C59.55949471683054 0.8808228055765044, 59.54307071774767 1.1366398485057376, 59.49583020250937 1.8724490051997846 M59.57591871591342 0.625005762647271 C59.543908075368634 1.1235973276562083, 59.51189743482386 1.6221888926651458, 59.49583020250937 1.8724490051997846 M59.49583020250937 1.8724490051997846 C59.452692685212654 2.20701507088678, 59.40955516791594 2.5415811365737757, 59.335982276581774 3.1121979531508885 M59.49583020250937 1.8724490051997846 C59.44205197683881 2.289542303788795, 59.38827375116825 2.706635602377805, 59.335982276581774 3.1121979531508885 M59.335982276581774 3.1121979531508885 C59.2693527529483 3.45432647485396, 59.202723229314834 3.796454996557032, 59.09703178754556 4.339158212148129 M59.335982276581774 3.1121979531508885 C59.280556216046556 3.396799059697203, 59.22513015551134 3.681400166243517, 59.09703178754556 4.339158212148129 M59.09703178754556 4.339158212148129 C59.00500393316801 4.690100306014117, 58.912976078790464 5.041042399880106, 58.77996063421489 5.548287939305125 M59.09703178754556 4.339158212148129 C59.01081913569948 4.6679244184369235, 58.92460648385339 4.9966906247257175, 58.77996063421489 5.548287939305125 M58.77996063421489 5.548287939305125 C58.7011754566111 5.78557634153438, 58.622390279007305 6.022864743763636, 58.386071729970325 6.734618561215495 M58.77996063421489 5.548287939305125 C58.66125678643366 5.905805015932451, 58.542552938652435 6.263322092559776, 58.386071729970325 6.734618561215495 M58.386071729970325 6.734618561215495 C58.21901575785972 7.147250042136192, 58.05195978574911 7.559881523056889, 57.91698364880834 7.893275190886679 M58.386071729970325 6.734618561215495 C58.249966909051174 7.070800100293498, 58.113862088132024 7.4069816393715024, 57.91698364880834 7.893275190886679 M57.91698364880834 7.893275190886679 C57.76327888707833 8.2124464173741, 57.609574125348324 8.53161764386152, 57.374623974273504 9.019496659696284 M57.91698364880834 7.893275190886679 C57.773159096054684 8.191929952032098, 57.62933454330103 8.490584713177517, 57.374623974273504 9.019496659696284 M57.374623974273504 9.019496659696284 C57.24160275275732 9.255689308357566, 57.108581531241136 9.491881957018846, 56.76122137860425 10.108655082055236 M57.374623974273504 9.019496659696284 C57.223804606517064 9.28729171926488, 57.07298523876062 9.555086778833475, 56.76122137860425 10.108655082055236 M56.76122137860425 10.108655082055236 C56.602518097671485 10.352465939365949, 56.443814816738715 10.596276796676662, 56.07929646464064 11.156274872382301 M56.76122137860425 10.108655082055236 C56.57279874605753 10.398122844208306, 56.384376113510804 10.687590606361374, 56.07929646464064 11.156274872382301 M56.07929646464064 11.156274872382301 C55.88267325326895 11.419732062877317, 55.68605004189725 11.683189253372333, 55.33165140812658 12.158051136245302 M56.07929646464064 11.156274872382301 C55.91783087057366 11.372624058613832, 55.75636527650667 11.588973244845365, 55.33165140812658 12.158051136245302 M55.33165140812658 12.158051136245302 C55.12154781627618 12.404850770465844, 54.91144422442577 12.651650404686384, 54.52135844296866 13.10986736009567 M55.33165140812658 12.158051136245302 C55.11776273034997 12.409296947772248, 54.90387405257337 12.660542759299195, 54.52135844296866 13.10986736009567 M54.52135844296866 13.10986736009567 C54.30441171308198 13.33388267036144, 54.087464983195304 13.55789798062721, 53.65174723676799 14.007812326905684 M54.52135844296866 13.10986736009567 C54.223718135307564 13.417205414479476, 53.92607782764646 13.724543468863281, 53.65174723676799 14.007812326905684 M53.65174723676799 14.007812326905684 C53.32016047812847 14.308950647797227, 52.98857371948895 14.610088968688768, 52.72639120850189 14.848196188198111 M53.65174723676799 14.007812326905684 C53.361533576477555 14.271376694548783, 53.07131991618712 14.534941062191882, 52.72639120850189 14.848196188198111 M52.72639120850189 14.848196188198111 C52.40591892166787 15.103764308822527, 52.08544663483386 15.359332429446944, 51.74909284457871 15.627565626425152 M52.72639120850189 14.848196188198111 C52.395147052132394 15.11235458812558, 52.06390289576291 15.376512988053051, 51.74909284457871 15.627565626425152 M51.74909284457871 15.627565626425152 C51.37900788203324 15.885720875577055, 51.008922919487766 16.14387612472896, 50.72386807360571 16.34271804539089 M51.74909284457871 15.627565626425152 C51.41236359655028 15.86245337307558, 51.07563434852184 16.097341119726007, 50.72386807360571 16.34271804539089 M50.72386807360571 16.34271804539089 C50.492787572648574 16.48280040305086, 50.26170707169144 16.622882760710834, 49.65492976407678 16.990714730406093 M50.72386807360571 16.34271804539089 C50.4032423564452 16.53708323933945, 50.08261663928469 16.731448433288005, 49.65492976407678 16.990714730406093 M49.65492976407678 16.990714730406093 C49.3423008185669 17.153813051030205, 49.02967187305701 17.316911371654314, 48.54667041279239 17.56889292409717 M49.65492976407678 16.990714730406093 C49.300664042340046 17.175534930518126, 48.94639832060331 17.360355130630158, 48.54667041279239 17.56889292409717 M48.54667041279239 17.56889292409717 C48.17701994080799 17.73252622275301, 47.80736946882359 17.89615952140885, 47.403644095147804 18.07487676824742 M48.54667041279239 17.56889292409717 C48.28647001912872 17.68407591980164, 48.02626962546506 17.799258915506105, 47.403644095147804 18.07487676824742 M47.403644095147804 18.07487676824742 C47.02691241231861 18.213517509998066, 46.650180729489406 18.352158251748712, 46.23054775146062 18.506587066708033 M47.403644095147804 18.07487676824742 C47.07902063041436 18.19434120405574, 46.75439716568093 18.313805639864057, 46.23054775146062 18.506587066708033 M46.23054775146062 18.506587066708033 C45.77874867722681 18.640678660398894, 45.326949602993 18.774770254089756, 45.03220188623541 18.86224982926107 M46.23054775146062 18.506587066708033 C45.79673661186303 18.63533993581946, 45.36292547226544 18.764092804930886, 45.03220188623541 18.86224982926107 M45.03220188623541 18.86224982926107 C44.72667370869798 18.931984642026304, 44.42114553116055 19.001719454791537, 43.813530759676766 19.140403561325773 M45.03220188623541 18.86224982926107 C44.78313366115183 18.919098026314334, 44.53406543606825 18.9759462233676, 43.813530759676766 19.140403561325773 M43.813530759676766 19.140403561325773 C43.4089487194274 19.20581324801271, 43.004366679178034 19.271222934699644, 42.57954215284788 19.3399052695533 M43.813530759676766 19.140403561325773 C43.393600480255095 19.208294632294713, 42.97367020083342 19.276185703263657, 42.57954215284788 19.3399052695533 M42.57954215284788 19.3399052695533 C42.20746948058607 19.375798672763537, 41.835396808324255 19.41169207597378, 41.3353067896239 19.45993515863156 M42.57954215284788 19.3399052695533 C42.09472977372375 19.38667453669521, 41.60991739459963 19.433443803837122, 41.3353067896239 19.45993515863156 M41.3353067896239 19.45993515863156 C40.86287555326056 19.475085108850113, 40.39044431689721 19.49023505906867, 40.08593750000001 19.5 M41.3353067896239 19.45993515863156 C40.962810639955435 19.471880385131655, 40.59031449028697 19.48382561163175, 40.08593750000001 19.5 M40.08593750000001 19.5 C40.08593750000001 19.5, 40.0859375 19.5, 40.0859375 19.5 M40.08593750000001 19.5 C40.08593750000001 19.5, 40.0859375 19.5, 40.0859375 19.5 M40.0859375 19.5 C8.23469649763392 19.5, -23.61654450473216 19.5, -40.08593749999999 19.5 M40.0859375 19.5 C11.58933638378561 19.5, -16.90726473242878 19.5, -40.08593749999999 19.5 M-40.08593749999999 19.5 C-40.472602804206254 19.487600396292464, -40.859268108412515 19.475200792584925, -41.33530678962389 19.45993515863156 M-40.08593749999999 19.5 C-40.5249619021414 19.485921341932485, -40.96398630428281 19.471842683864967, -41.33530678962389 19.45993515863156 M-41.33530678962389 19.45993515863156 C-41.80259512234747 19.41485641517016, -42.26988345507105 19.369777671708757, -42.57954215284787 19.3399052695533 M-41.33530678962389 19.45993515863156 C-41.73899340475049 19.420991996374482, -42.142680019877076 19.382048834117406, -42.57954215284787 19.3399052695533 M-42.57954215284787 19.3399052695533 C-42.939792774269584 19.28166274286713, -43.300043395691304 19.223420216180962, -43.81353075967676 19.140403561325773 M-42.57954215284787 19.3399052695533 C-42.860359206842865 19.294504946032166, -43.14117626083786 19.249104622511034, -43.81353075967676 19.140403561325773 M-43.81353075967676 19.140403561325773 C-44.068158242730405 19.08228649991839, -44.32278572578405 19.024169438511006, -45.032201886235384 18.862249829261074 M-43.81353075967676 19.140403561325773 C-44.2245995631103 19.0465797894167, -44.63566836654384 18.952756017507628, -45.032201886235384 18.862249829261074 M-45.032201886235384 18.862249829261074 C-45.337939716460156 18.77150844610098, -45.643677546684934 18.68076706294089, -46.23054775146059 18.506587066708043 M-45.032201886235384 18.862249829261074 C-45.384103310415476 18.757807333732693, -45.73600473459557 18.65336483820431, -46.23054775146059 18.506587066708043 M-46.23054775146059 18.506587066708043 C-46.688317146694125 18.3381236976882, -47.14608654192766 18.169660328668357, -47.4036440951478 18.074876768247425 M-46.23054775146059 18.506587066708043 C-46.68273824829437 18.34017678387008, -47.13492874512816 18.17376650103212, -47.4036440951478 18.074876768247425 M-47.4036440951478 18.074876768247425 C-47.83015264686005 17.88607408433107, -48.25666119857231 17.697271400414717, -48.54667041279238 17.568892924097174 M-47.4036440951478 18.074876768247425 C-47.75136124783971 17.92095269941357, -48.099078400531624 17.767028630579713, -48.54667041279238 17.568892924097174 M-48.54667041279238 17.568892924097174 C-48.98444881573897 17.340504211690387, -49.42222721868556 17.112115499283604, -49.65492976407678 16.990714730406097 M-48.54667041279238 17.568892924097174 C-48.894736142636916 17.387307255784023, -49.24280187248145 17.20572158747087, -49.65492976407678 16.990714730406097 M-49.65492976407678 16.990714730406097 C-49.90092839928251 16.84158891209594, -50.146927034488236 16.692463093785783, -50.723868073605686 16.3427180453909 M-49.65492976407678 16.990714730406097 C-49.87215504222483 16.859031490235317, -50.08938032037289 16.727348250064537, -50.723868073605686 16.3427180453909 M-50.723868073605686 16.3427180453909 C-51.089286591023175 16.087817906353123, -51.45470510844067 15.832917767315351, -51.74909284457871 15.627565626425156 M-50.723868073605686 16.3427180453909 C-50.93243156235992 16.197233184203856, -51.14099505111416 16.051748323016817, -51.74909284457871 15.627565626425156 M-51.74909284457871 15.627565626425156 C-52.12014694520848 15.331659855337218, -52.49120104583825 15.03575408424928, -52.726391208501866 14.848196188198125 M-51.74909284457871 15.627565626425156 C-52.11020850096366 15.339585500149358, -52.471324157348604 15.05160537387356, -52.726391208501866 14.848196188198125 M-52.726391208501866 14.848196188198125 C-53.00394706556718 14.59612730253696, -53.2815029226325 14.344058416875793, -53.651747236767974 14.007812326905697 M-52.726391208501866 14.848196188198125 C-53.00671048189017 14.593617640948413, -53.287029755278475 14.339039093698698, -53.651747236767974 14.007812326905697 M-53.651747236767974 14.007812326905697 C-53.879487167796704 13.772652150322465, -54.10722709882543 13.537491973739233, -54.521358442968655 13.109867360095677 M-53.651747236767974 14.007812326905697 C-53.96279916742795 13.686625670660826, -54.273851098087924 13.365439014415957, -54.521358442968655 13.109867360095677 M-54.521358442968655 13.109867360095677 C-54.752158354760454 12.838756650818013, -54.982958266552245 12.567645941540349, -55.331651408126575 12.158051136245307 M-54.521358442968655 13.109867360095677 C-54.752087928453186 12.838839377565897, -54.98281741393772 12.567811395036115, -55.331651408126575 12.158051136245307 M-55.331651408126575 12.158051136245307 C-55.49015898301451 11.945665426232617, -55.64866655790244 11.733279716219926, -56.079296464640635 11.156274872382316 M-55.331651408126575 12.158051136245307 C-55.55657016766159 11.856680488988912, -55.781488927196605 11.55530984173252, -56.079296464640635 11.156274872382316 M-56.079296464640635 11.156274872382316 C-56.21904897291382 10.94157749087171, -56.358801481187 10.726880109361105, -56.76122137860425 10.108655082055249 M-56.079296464640635 11.156274872382316 C-56.25743175833649 10.882611225241451, -56.43556705203235 10.608947578100587, -56.76122137860425 10.108655082055249 M-56.76122137860425 10.108655082055249 C-56.907906948576425 9.848199999045026, -57.0545925185486 9.587744916034803, -57.374623974273504 9.019496659696289 M-56.76122137860425 10.108655082055249 C-56.907068632191404 9.84968851467537, -57.05291588577856 9.59072194729549, -57.374623974273504 9.019496659696289 M-57.374623974273504 9.019496659696289 C-57.501558163461524 8.755915099890798, -57.62849235264955 8.492333540085307, -57.91698364880834 7.893275190886686 M-57.374623974273504 9.019496659696289 C-57.519286396636794 8.719102044378932, -57.66394881900008 8.418707429061577, -57.91698364880834 7.893275190886686 M-57.91698364880834 7.893275190886686 C-58.07531888194728 7.502184103830024, -58.23365411508622 7.111093016773363, -58.386071729970325 6.73461856121551 M-57.91698364880834 7.893275190886686 C-58.02132267076211 7.635555789403122, -58.12566169271588 7.377836387919558, -58.386071729970325 6.73461856121551 M-58.386071729970325 6.73461856121551 C-58.50889146605108 6.364705084807732, -58.63171120213184 5.994791608399954, -58.77996063421488 5.5482879393051325 M-58.386071729970325 6.73461856121551 C-58.49162758855336 6.416701133205538, -58.597183447136395 6.098783705195566, -58.77996063421488 5.5482879393051325 M-58.77996063421488 5.5482879393051325 C-58.902741581602214 5.080070973318027, -59.02552252898955 4.611854007330922, -59.09703178754556 4.339158212148136 M-58.77996063421488 5.5482879393051325 C-58.8725234283698 5.1953058882236665, -58.965086222524725 4.8423238371422, -59.09703178754556 4.339158212148136 M-59.09703178754556 4.339158212148136 C-59.17932163923525 3.9166172156878867, -59.26161149092494 3.4940762192276376, -59.335982276581774 3.112197953150904 M-59.09703178754556 4.339158212148136 C-59.161793861404355 4.006618650352388, -59.22655593526314 3.6740790885566414, -59.335982276581774 3.112197953150904 M-59.335982276581774 3.112197953150904 C-59.37285815621136 2.826195912709918, -59.40973403584095 2.540193872268932, -59.49583020250937 1.872449005199809 M-59.335982276581774 3.112197953150904 C-59.380417057539155 2.76757056668547, -59.42485183849653 2.4229431802200363, -59.49583020250937 1.872449005199809 M-59.49583020250937 1.872449005199809 C-59.52272325173399 1.4535680558476138, -59.549616300958625 1.0346871064954186, -59.57591871591342 0.6250057626472781 M-59.49583020250937 1.872449005199809 C-59.52546127054792 1.410921202731657, -59.55509233858648 0.9493934002635053, -59.57591871591342 0.6250057626472781 M-59.57591871591342 0.6250057626472781 C-59.57591871591342 0.17199337520959518, -59.57591871591342 -0.2810190122280878, -59.57591871591342 -0.6250057626472687 M-59.57591871591342 0.6250057626472781 C-59.57591871591342 0.28531709833471036, -59.57591871591342 -0.05437156597785742, -59.57591871591342 -0.6250057626472687 M-59.57591871591342 -0.6250057626472687 C-59.55602198510401 -0.9349134053558441, -59.5361252542946 -1.2448210480644195, -59.49583020250937 -1.8724490051997822 M-59.57591871591342 -0.6250057626472687 C-59.55476782755394 -0.9544479215359813, -59.53361693919445 -1.283890080424694, -59.49583020250937 -1.8724490051997822 M-59.49583020250937 -1.8724490051997822 C-59.449185512882295 -2.2342160069115677, -59.402540823255215 -2.5959830086233535, -59.335982276581774 -3.112197953150895 M-59.49583020250937 -1.8724490051997822 C-59.43330529807551 -2.3573798170498694, -59.37078039364165 -2.842310628899957, -59.335982276581774 -3.112197953150895 M-59.335982276581774 -3.112197953150895 C-59.27412176753996 -3.429838592857417, -59.21226125849814 -3.747479232563939, -59.09703178754556 -4.339158212148126 M-59.335982276581774 -3.112197953150895 C-59.25937882994565 -3.5055404614831955, -59.18277538330953 -3.898882969815496, -59.09703178754556 -4.339158212148126 M-59.09703178754556 -4.339158212148126 C-59.00333420918213 -4.696467687235249, -58.90963663081869 -5.053777162322373, -58.77996063421489 -5.548287939305123 M-59.09703178754556 -4.339158212148126 C-59.02376993121716 -4.618537422217718, -58.95050807488875 -4.89791663228731, -58.77996063421489 -5.548287939305123 M-58.77996063421489 -5.548287939305123 C-58.65346705361815 -5.929266450047391, -58.526973473021414 -6.310244960789658, -58.38607172997033 -6.734618561215485 M-58.77996063421489 -5.548287939305123 C-58.67239059349167 -5.872271763004971, -58.56482055276846 -6.196255586704819, -58.38607172997033 -6.734618561215485 M-58.38607172997033 -6.734618561215485 C-58.2577877279681 -7.051482526363449, -58.12950372596587 -7.368346491511413, -57.91698364880834 -7.893275190886676 M-58.38607172997033 -6.734618561215485 C-58.27008152644217 -7.02111660644807, -58.15409132291401 -7.307614651680654, -57.91698364880834 -7.893275190886676 M-57.91698364880834 -7.893275190886676 C-57.7158143320167 -8.311007581538355, -57.514645015225064 -8.728739972190036, -57.374623974273504 -9.019496659696282 M-57.91698364880834 -7.893275190886676 C-57.7447796759329 -8.250860425137303, -57.57257570305745 -8.608445659387929, -57.374623974273504 -9.019496659696282 M-57.374623974273504 -9.019496659696282 C-57.1511583251656 -9.416282548038065, -56.9276926760577 -9.813068436379847, -56.76122137860425 -10.108655082055243 M-57.374623974273504 -9.019496659696282 C-57.24768621623189 -9.244887502940989, -57.12074845819028 -9.470278346185694, -56.76122137860425 -10.108655082055243 M-56.76122137860425 -10.108655082055243 C-56.61515361567805 -10.333054390596205, -56.46908585275185 -10.557453699137167, -56.07929646464064 -11.156274872382308 M-56.76122137860425 -10.108655082055243 C-56.58811337299077 -10.37459546468571, -56.41500536737729 -10.640535847316174, -56.07929646464064 -11.156274872382308 M-56.07929646464064 -11.156274872382308 C-55.80680712015578 -11.521385769702059, -55.53431777567092 -11.88649666702181, -55.33165140812659 -12.158051136245302 M-56.07929646464064 -11.156274872382308 C-55.895314538031066 -11.402793892940815, -55.7113326114215 -11.649312913499324, -55.33165140812659 -12.158051136245302 M-55.33165140812659 -12.158051136245302 C-55.0718616106293 -12.463215014843865, -54.81207181313201 -12.76837889344243, -54.52135844296866 -13.10986736009567 M-55.33165140812659 -12.158051136245302 C-55.147030467586646 -12.374917398232636, -54.9624095270467 -12.59178366021997, -54.52135844296866 -13.10986736009567 M-54.52135844296866 -13.10986736009567 C-54.33449366698505 -13.302820583182177, -54.14762889100144 -13.495773806268684, -53.651747236767996 -14.007812326905677 M-54.52135844296866 -13.10986736009567 C-54.21642360417724 -13.42473761676368, -53.91148876538581 -13.73960787343169, -53.651747236767996 -14.007812326905677 M-53.651747236767996 -14.007812326905677 C-53.38864807246125 -14.246752026706641, -53.1255489081545 -14.485691726507605, -52.72639120850189 -14.848196188198107 M-53.651747236767996 -14.007812326905677 C-53.30570125137555 -14.322082134964049, -52.9596552659831 -14.636351943022422, -52.72639120850189 -14.848196188198107 M-52.72639120850189 -14.848196188198107 C-52.34877277844375 -15.149336837321021, -51.971154348385625 -15.450477486443933, -51.74909284457872 -15.627565626425149 M-52.72639120850189 -14.848196188198107 C-52.36176612068253 -15.138974992652953, -51.99714103286318 -15.429753797107796, -51.74909284457872 -15.627565626425149 M-51.74909284457872 -15.627565626425149 C-51.43635491052799 -15.845718071011973, -51.12361697647726 -16.063870515598797, -50.723868073605715 -16.342718045390885 M-51.74909284457872 -15.627565626425149 C-51.34885362066463 -15.906755182950866, -50.948614396750536 -16.185944739476582, -50.723868073605715 -16.342718045390885 M-50.723868073605715 -16.342718045390885 C-50.34708736107953 -16.571124727533498, -49.97030664855335 -16.79953140967611, -49.65492976407679 -16.99071473040609 M-50.723868073605715 -16.342718045390885 C-50.44872276583899 -16.50951274789238, -50.17357745807226 -16.676307450393878, -49.65492976407679 -16.99071473040609 M-49.65492976407679 -16.99071473040609 C-49.357176023618855 -17.14605266608262, -49.05942228316092 -17.301390601759156, -48.54667041279239 -17.56889292409717 M-49.65492976407679 -16.99071473040609 C-49.2426018307248 -17.205825949127178, -48.830273897372805 -17.420937167848265, -48.54667041279239 -17.56889292409717 M-48.54667041279239 -17.56889292409717 C-48.216288423424984 -17.715143230356613, -47.88590643405758 -17.86139353661606, -47.403644095147804 -18.07487676824742 M-48.54667041279239 -17.56889292409717 C-48.11967606195327 -17.757910656876266, -47.692681711114155 -17.946928389655362, -47.403644095147804 -18.07487676824742 M-47.403644095147804 -18.07487676824742 C-47.03997845568663 -18.208709085257453, -46.676312816225455 -18.342541402267486, -46.23054775146062 -18.506587066708033 M-47.403644095147804 -18.07487676824742 C-47.05592072652689 -18.2028421822195, -46.70819735790599 -18.330807596191576, -46.23054775146062 -18.506587066708033 M-46.23054775146062 -18.506587066708033 C-45.80498725151005 -18.632891189280713, -45.37942675155948 -18.759195311853397, -45.03220188623541 -18.862249829261067 M-46.23054775146062 -18.506587066708033 C-45.76850299894176 -18.643719523933232, -45.306458246422906 -18.78085198115843, -45.03220188623541 -18.862249829261067 M-45.03220188623541 -18.862249829261067 C-44.703269789737305 -18.937326433804234, -44.37433769323919 -19.012403038347397, -43.813530759676766 -19.140403561325773 M-45.03220188623541 -18.862249829261067 C-44.560306956991475 -18.969956767458797, -44.08841202774754 -19.07766370565653, -43.813530759676766 -19.140403561325773 M-43.813530759676766 -19.140403561325773 C-43.388538714435626 -19.209112979337828, -42.963546669194486 -19.27782239734988, -42.57954215284788 -19.3399052695533 M-43.813530759676766 -19.140403561325773 C-43.39802896213665 -19.207578669687496, -42.982527164596526 -19.274753778049217, -42.57954215284788 -19.3399052695533 M-42.57954215284788 -19.3399052695533 C-42.08585320406919 -19.38753084869415, -41.592164255290506 -19.435156427835, -41.3353067896239 -19.45993515863156 M-42.57954215284788 -19.3399052695533 C-42.24554887101387 -19.372125199766135, -41.911555589179855 -19.404345129978974, -41.3353067896239 -19.45993515863156 M-41.3353067896239 -19.45993515863156 C-40.840103398786 -19.475815367520802, -40.344900007948105 -19.49169557641005, -40.08593750000001 -19.5 M-41.3353067896239 -19.45993515863156 C-40.99816679259777 -19.470746582132993, -40.661026795571644 -19.481558005634426, -40.08593750000001 -19.5 M-40.08593750000001 -19.5 C-40.08593750000001 -19.5, -40.0859375 -19.5, -40.0859375 -19.5 M-40.08593750000001 -19.5 C-40.08593750000001 -19.5, -40.08593750000001 -19.5, -40.0859375 -19.5" stroke="#111827" stroke-width="1.5" fill="none" stroke-dasharray="0 0" style="fill:#1f2937 !important;stroke:#111827 !important;stroke-width:1.5px !important"/></g><g class="label" style="color:#f8fafc !important" transform="translate(-47.2109375, -12)"><rect/><foreignObject width="94.421875" height="24"><div style="color: rgb(248, 250, 252) !important; display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;" xmlns="http://www.w3.org/1999/xhtml"><span style="color:#f8fafc !important" class="nodeLabel"><p>dev.kit --json</p></span></div></foreignObject></g></g><g class="node default" id="flowchart-Audit-3" transform="translate(371.04246520996094, 35)"><rect class="basic label-container" style="fill:#eef4f8 !important;stroke:#7a8b99 !important;stroke-width:1.5px !important" x="-49" y="-27" width="98" height="54"/><g class="label" style="color:#23313b !important" transform="translate(-19, -12)"><rect/><foreignObject width="38" height="24"><div style="color: rgb(35, 49, 59) !important; display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;" xmlns="http://www.w3.org/1999/xhtml"><span style="color:#23313b !important" class="nodeLabel"><p>Audit</p></span></div></foreignObject></g></g><g class="node default" id="flowchart-Gaps-5" transform="translate(517.3549652099609, 35)"><rect class="basic label-container" style="fill:#f8fafc !important;stroke:#c7d0d8 !important;stroke-width:1.5px !important" x="-47.3125" y="-27" width="94.625" height="54"/><g class="label" style="color:#23313b !important" transform="translate(-17.3125, -12)"><rect/><foreignObject width="34.625" height="24"><div style="color: rgb(35, 49, 59) !important; display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;" xmlns="http://www.w3.org/1999/xhtml"><span style="color:#23313b !important" class="nodeLabel"><p>Gaps</p></span></div></foreignObject></g></g></g></g></g></svg> \ No newline at end of file diff --git a/assets/compliance-improve.svg b/assets/compliance-improve.svg new file mode 100644 index 0000000..dfd06f6 --- /dev/null +++ b/assets/compliance-improve.svg @@ -0,0 +1 @@ +<svg id="my-svg" width="100%" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" class="flowchart" style="max-width: 545.547px; background-color: white;" viewBox="0 0 545.546875 70" role="graphics-document document" aria-roledescription="flowchart-v2"><style>#my-svg{font-size:16px;fill:#333;}@keyframes edge-animation-frame{from{stroke-dashoffset:0;}}@keyframes dash{to{stroke-dashoffset:0;}}#my-svg .edge-animation-slow{stroke-dasharray:9,5!important;stroke-dashoffset:900;animation:dash 50s linear infinite;stroke-linecap:round;}#my-svg .edge-animation-fast{stroke-dasharray:9,5!important;stroke-dashoffset:900;animation:dash 20s linear infinite;stroke-linecap:round;}#my-svg .error-icon{fill:#552222;}#my-svg .error-text{fill:#552222;stroke:#552222;}#my-svg .edge-thickness-normal{stroke-width:1px;}#my-svg .edge-thickness-thick{stroke-width:3.5px;}#my-svg .edge-pattern-solid{stroke-dasharray:0;}#my-svg .edge-thickness-invisible{stroke-width:0;fill:none;}#my-svg .edge-pattern-dashed{stroke-dasharray:3;}#my-svg .edge-pattern-dotted{stroke-dasharray:2;}#my-svg .marker{fill:#7a8b99;stroke:#7a8b99;}#my-svg .marker.cross{stroke:#7a8b99;}#my-svg svg{font-size:16px;}#my-svg p{margin:0;}#my-svg .label{color:#333;}#my-svg .cluster-label text{fill:#333;}#my-svg .cluster-label span{color:#333;}#my-svg .cluster-label span p{background-color:transparent;}#my-svg .label text,#my-svg span{fill:#333;color:#333;}#my-svg .node rect,#my-svg .node circle,#my-svg .node ellipse,#my-svg .node polygon,#my-svg .node path{fill:#ECECFF;stroke:#9370DB;stroke-width:1px;}#my-svg .rough-node .label text,#my-svg .node .label text,#my-svg .image-shape .label,#my-svg .icon-shape .label{text-anchor:middle;}#my-svg .node .katex path{fill:#000;stroke:#000;stroke-width:1px;}#my-svg .rough-node .label,#my-svg .node .label,#my-svg .image-shape .label,#my-svg .icon-shape .label{text-align:center;}#my-svg .node.clickable{cursor:pointer;}#my-svg .root .anchor path{fill:#7a8b99!important;stroke-width:0;stroke:#7a8b99;}#my-svg .arrowheadPath{fill:#333333;}#my-svg .edgePath .path{stroke:#7a8b99;stroke-width:2.0px;}#my-svg .flowchart-link{stroke:#7a8b99;fill:none;}#my-svg .edgeLabel{background-color:rgba(232,232,232, 0.8);text-align:center;}#my-svg .edgeLabel p{background-color:rgba(232,232,232, 0.8);}#my-svg .edgeLabel rect{opacity:0.5;background-color:rgba(232,232,232, 0.8);fill:rgba(232,232,232, 0.8);}#my-svg .labelBkg{background-color:rgba(232, 232, 232, 0.5);}#my-svg .cluster rect{fill:#ffffde;stroke:#aaaa33;stroke-width:1px;}#my-svg .cluster text{fill:#333;}#my-svg .cluster span{color:#333;}#my-svg div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-size:12px;background:#f8fafc;border:1px solid #aaaa33;border-radius:2px;pointer-events:none;z-index:100;}#my-svg .flowchartTitleText{text-anchor:middle;font-size:18px;fill:#333;}#my-svg rect.text{fill:none;stroke-width:0;}#my-svg .icon-shape,#my-svg .image-shape{background-color:rgba(232,232,232, 0.8);text-align:center;}#my-svg .icon-shape p,#my-svg .image-shape p{background-color:rgba(232,232,232, 0.8);padding:2px;}#my-svg .icon-shape rect,#my-svg .image-shape rect{opacity:0.5;background-color:rgba(232,232,232, 0.8);fill:rgba(232,232,232, 0.8);}#my-svg .label-icon{display:inline-block;height:1em;overflow:visible;vertical-align:-0.125em;}#my-svg .node .label-icon path{fill:currentColor;stroke:revert;stroke-width:revert;}#my-svg :root{--mermaid-font-family:"trebuchet ms",verdana,arial,sans-serif;}</style><g><marker id="my-svg_flowchart-v2-pointEnd" class="marker flowchart-v2" viewBox="0 0 10 10" refX="5" refY="5" markerUnits="userSpaceOnUse" markerWidth="8" markerHeight="8" orient="auto"><path d="M 0 0 L 10 5 L 0 10 z" class="arrowMarkerPath" style="stroke-width: 1; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-pointStart" class="marker flowchart-v2" viewBox="0 0 10 10" refX="4.5" refY="5" markerUnits="userSpaceOnUse" markerWidth="8" markerHeight="8" orient="auto"><path d="M 0 5 L 10 10 L 10 0 z" class="arrowMarkerPath" style="stroke-width: 1; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-circleEnd" class="marker flowchart-v2" viewBox="0 0 10 10" refX="11" refY="5" markerUnits="userSpaceOnUse" markerWidth="11" markerHeight="11" orient="auto"><circle cx="5" cy="5" r="5" class="arrowMarkerPath" style="stroke-width: 1; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-circleStart" class="marker flowchart-v2" viewBox="0 0 10 10" refX="-1" refY="5" markerUnits="userSpaceOnUse" markerWidth="11" markerHeight="11" orient="auto"><circle cx="5" cy="5" r="5" class="arrowMarkerPath" style="stroke-width: 1; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-crossEnd" class="marker cross flowchart-v2" viewBox="0 0 11 11" refX="12" refY="5.2" markerUnits="userSpaceOnUse" markerWidth="11" markerHeight="11" orient="auto"><path d="M 1,1 l 9,9 M 10,1 l -9,9" class="arrowMarkerPath" style="stroke-width: 2; stroke-dasharray: 1, 0;"/></marker><marker id="my-svg_flowchart-v2-crossStart" class="marker cross flowchart-v2" viewBox="0 0 11 11" refX="-1" refY="5.2" markerUnits="userSpaceOnUse" markerWidth="11" markerHeight="11" orient="auto"><path d="M 1,1 l 9,9 M 10,1 l -9,9" class="arrowMarkerPath" style="stroke-width: 2; stroke-dasharray: 1, 0;"/></marker><g class="root"><g class="clusters"/><g class="edgePaths"><path d="M102.625,35L106.792,35C110.958,35,119.292,35,126.958,35C134.625,35,141.625,35,145.125,35L148.625,35" id="L_Gaps_Plan_0" class="edge-thickness-normal edge-pattern-solid edge-thickness-normal edge-pattern-solid flowchart-link" style=";" data-edge="true" data-et="edge" data-id="L_Gaps_Plan_0" data-points="W3sieCI6MTAyLjYyNSwieSI6MzV9LHsieCI6MTI3LjYyNSwieSI6MzV9LHsieCI6MTUyLjYyNSwieSI6MzV9XQ==" marker-end="url(#my-svg_flowchart-v2-pointEnd)"/><path d="M243.422,35L247.589,35C251.755,35,260.089,35,267.755,35C275.422,35,282.422,35,285.922,35L289.422,35" id="L_Plan_Fix_0" class="edge-thickness-normal edge-pattern-solid edge-thickness-normal edge-pattern-solid flowchart-link" style=";" data-edge="true" data-et="edge" data-id="L_Plan_Fix_0" data-points="W3sieCI6MjQzLjQyMTg3NSwieSI6MzV9LHsieCI6MjY4LjQyMTg3NSwieSI6MzV9LHsieCI6MjkzLjQyMTg3NSwieSI6MzV9XQ==" marker-end="url(#my-svg_flowchart-v2-pointEnd)"/><path d="M374.406,35L378.573,35C382.74,35,391.073,35,398.74,35C406.406,35,413.406,35,416.906,35L420.406,35" id="L_Fix_Result_0" class="edge-thickness-normal edge-pattern-solid edge-thickness-normal edge-pattern-solid flowchart-link" style=";" data-edge="true" data-et="edge" data-id="L_Fix_Result_0" data-points="W3sieCI6Mzc0LjQwNjI1LCJ5IjozNX0seyJ4IjozOTkuNDA2MjUsInkiOjM1fSx7IngiOjQyNC40MDYyNSwieSI6MzV9XQ==" marker-end="url(#my-svg_flowchart-v2-pointEnd)"/></g><g class="edgeLabels"><g class="edgeLabel"><g class="label" data-id="L_Gaps_Plan_0" transform="translate(0, 0)"><foreignObject width="0" height="0"><div xmlns="http://www.w3.org/1999/xhtml" class="labelBkg" style="display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;"><span class="edgeLabel"></span></div></foreignObject></g></g><g class="edgeLabel"><g class="label" data-id="L_Plan_Fix_0" transform="translate(0, 0)"><foreignObject width="0" height="0"><div xmlns="http://www.w3.org/1999/xhtml" class="labelBkg" style="display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;"><span class="edgeLabel"></span></div></foreignObject></g></g><g class="edgeLabel"><g class="label" data-id="L_Fix_Result_0" transform="translate(0, 0)"><foreignObject width="0" height="0"><div xmlns="http://www.w3.org/1999/xhtml" class="labelBkg" style="display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;"><span class="edgeLabel"></span></div></foreignObject></g></g></g><g class="nodes"><g class="node default" id="flowchart-Gaps-0" transform="translate(55.3125, 35)"><rect class="basic label-container" style="fill:#f8fafc !important;stroke:#c7d0d8 !important;stroke-width:1.5px !important" x="-47.3125" y="-27" width="94.625" height="54"/><g class="label" style="color:#23313b !important" transform="translate(-17.3125, -12)"><rect/><foreignObject width="34.625" height="24"><div style="color: rgb(35, 49, 59) !important; display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;" xmlns="http://www.w3.org/1999/xhtml"><span style="color:#23313b !important" class="nodeLabel"><p>Gaps</p></span></div></foreignObject></g></g><g class="node default" id="flowchart-Plan-1" transform="translate(198.0234375, 35)"><rect class="basic label-container" style="fill:#eef4f8 !important;stroke:#7a8b99 !important;stroke-width:1.5px !important" x="-45.3984375" y="-27" width="90.796875" height="54"/><g class="label" style="color:#23313b !important" transform="translate(-15.3984375, -12)"><rect/><foreignObject width="30.796875" height="24"><div style="color: rgb(35, 49, 59) !important; display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;" xmlns="http://www.w3.org/1999/xhtml"><span style="color:#23313b !important" class="nodeLabel"><p>Plan</p></span></div></foreignObject></g></g><g class="node default" id="flowchart-Fix-3" transform="translate(333.9140625, 35)"><rect class="basic label-container" style="fill:#eaf2f7 !important;stroke:#7a8b99 !important;stroke-width:1.5px !important" x="-40.4921875" y="-27" width="80.984375" height="54"/><g class="label" style="color:#23313b !important" transform="translate(-10.4921875, -12)"><rect/><foreignObject width="20.984375" height="24"><div style="color: rgb(35, 49, 59) !important; display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;" xmlns="http://www.w3.org/1999/xhtml"><span style="color:#23313b !important" class="nodeLabel"><p>Fix</p></span></div></foreignObject></g></g><g class="node default" id="flowchart-Result-5" transform="translate(480.9765625, 35)"><rect class="basic label-container" style="fill:#f5f7fa !important;stroke:#a8b3bd !important;stroke-width:1.5px !important" x="-56.5703125" y="-27" width="113.140625" height="54"/><g class="label" style="color:#23313b !important" transform="translate(-26.5703125, -12)"><rect/><foreignObject width="53.140625" height="24"><div style="color: rgb(35, 49, 59) !important; display: table-cell; white-space: nowrap; line-height: 1.5; max-width: 200px; text-align: center;" xmlns="http://www.w3.org/1999/xhtml"><span style="color:#23313b !important" class="nodeLabel"><p>Aligned</p></span></div></foreignObject></g></g></g></g></g></svg> \ No newline at end of file diff --git a/assets/dev-kit-bridge.svg b/assets/dev-kit-bridge.svg new file mode 100644 index 0000000..57a8d8a --- /dev/null +++ b/assets/dev-kit-bridge.svg @@ -0,0 +1 @@ +<svg id="my-svg" width="100%" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" style="max-width: 730px; background-color: white;" viewBox="-50 -10 730 391" role="graphics-document document" aria-roledescription="sequence"><g><rect x="480" y="305" fill="#eaeaea" stroke="#666" width="150" height="65" name="Agent" rx="3" ry="3" class="actor actor-bottom"/><text x="555" y="337.5" dominant-baseline="central" alignment-baseline="central" class="actor actor-box" style="text-anchor: middle; font-size: 16px; font-weight: 400;"><tspan x="555" dy="0">Agent Runtime</tspan></text></g><g><rect x="227" y="305" fill="#eaeaea" stroke="#666" width="150" height="65" name="Bridge" rx="3" ry="3" class="actor actor-bottom"/><text x="302" y="337.5" dominant-baseline="central" alignment-baseline="central" class="actor actor-box" style="text-anchor: middle; font-size: 16px; font-weight: 400;"><tspan x="302" dy="0">dev.kit bridge</tspan></text></g><g><rect x="0" y="305" fill="#eaeaea" stroke="#666" width="150" height="65" name="Repo" rx="3" ry="3" class="actor actor-bottom"/><text x="75" y="337.5" dominant-baseline="central" alignment-baseline="central" class="actor actor-box" style="text-anchor: middle; font-size: 16px; font-weight: 400;"><tspan x="75" dy="0">Repository</tspan></text></g><g><line id="actor2" x1="555" y1="65" x2="555" y2="305" class="actor-line 200" stroke-width="0.5px" stroke="#999" name="Agent"/><g id="root-2"><rect x="480" y="0" fill="#eaeaea" stroke="#666" width="150" height="65" name="Agent" rx="3" ry="3" class="actor actor-top"/><text x="555" y="32.5" dominant-baseline="central" alignment-baseline="central" class="actor actor-box" style="text-anchor: middle; font-size: 16px; font-weight: 400;"><tspan x="555" dy="0">Agent Runtime</tspan></text></g></g><g><line id="actor1" x1="302" y1="65" x2="302" y2="305" class="actor-line 200" stroke-width="0.5px" stroke="#999" name="Bridge"/><g id="root-1"><rect x="227" y="0" fill="#eaeaea" stroke="#666" width="150" height="65" name="Bridge" rx="3" ry="3" class="actor actor-top"/><text x="302" y="32.5" dominant-baseline="central" alignment-baseline="central" class="actor actor-box" style="text-anchor: middle; font-size: 16px; font-weight: 400;"><tspan x="302" dy="0">dev.kit bridge</tspan></text></g></g><g><line id="actor0" x1="75" y1="65" x2="75" y2="305" class="actor-line 200" stroke-width="0.5px" stroke="#999" name="Repo"/><g id="root-0"><rect x="0" y="0" fill="#eaeaea" stroke="#666" width="150" height="65" name="Repo" rx="3" ry="3" class="actor actor-top"/><text x="75" y="32.5" dominant-baseline="central" alignment-baseline="central" class="actor actor-box" style="text-anchor: middle; font-size: 16px; font-weight: 400;"><tspan x="75" dy="0">Repository</tspan></text></g></g><style>#my-svg{font-family:"trebuchet ms",verdana,arial,sans-serif;font-size:16px;fill:#333;}@keyframes edge-animation-frame{from{stroke-dashoffset:0;}}@keyframes dash{to{stroke-dashoffset:0;}}#my-svg .edge-animation-slow{stroke-dasharray:9,5!important;stroke-dashoffset:900;animation:dash 50s linear infinite;stroke-linecap:round;}#my-svg .edge-animation-fast{stroke-dasharray:9,5!important;stroke-dashoffset:900;animation:dash 20s linear infinite;stroke-linecap:round;}#my-svg .error-icon{fill:#552222;}#my-svg .error-text{fill:#552222;stroke:#552222;}#my-svg .edge-thickness-normal{stroke-width:1px;}#my-svg .edge-thickness-thick{stroke-width:3.5px;}#my-svg .edge-pattern-solid{stroke-dasharray:0;}#my-svg .edge-thickness-invisible{stroke-width:0;fill:none;}#my-svg .edge-pattern-dashed{stroke-dasharray:3;}#my-svg .edge-pattern-dotted{stroke-dasharray:2;}#my-svg .marker{fill:#333333;stroke:#333333;}#my-svg .marker.cross{stroke:#333333;}#my-svg svg{font-family:"trebuchet ms",verdana,arial,sans-serif;font-size:16px;}#my-svg p{margin:0;}#my-svg .actor{stroke:#355070;fill:#d8ecff;}#my-svg text.actor>tspan{fill:#1f2937;stroke:none;}#my-svg .actor-line{stroke:hsl(259.6261682243, 59.7765363128%, 87.9019607843%);}#my-svg .innerArc{stroke-width:1.5;stroke-dasharray:none;}#my-svg .messageLine0{stroke-width:1.5;stroke-dasharray:none;stroke:#355070;}#my-svg .messageLine1{stroke-width:1.5;stroke-dasharray:2,2;stroke:#355070;}#my-svg #arrowhead path{fill:#355070;stroke:#355070;}#my-svg .sequenceNumber{fill:white;}#my-svg #sequencenumber{fill:#355070;}#my-svg #crosshead path{fill:#355070;stroke:#355070;}#my-svg .messageText{fill:#1f2937;stroke:none;}#my-svg .labelBox{stroke:#8a6d1d;fill:#fff4cc;}#my-svg .labelText,#my-svg .labelText>tspan{fill:#3b2f0b;stroke:none;}#my-svg .loopText,#my-svg .loopText>tspan{fill:black;stroke:none;}#my-svg .loopLine{stroke-width:2px;stroke-dasharray:2,2;stroke:#8a6d1d;fill:#8a6d1d;}#my-svg .note{stroke:#b85c5c;fill:#ffe3e3;}#my-svg .noteText,#my-svg .noteText>tspan{fill:black;stroke:none;}#my-svg .activation0{fill:#f4f4f4;stroke:#666;}#my-svg .activation1{fill:#f4f4f4;stroke:#666;}#my-svg .activation2{fill:#f4f4f4;stroke:#666;}#my-svg .actorPopupMenu{position:absolute;}#my-svg .actorPopupMenuPanel{position:absolute;fill:#d8ecff;box-shadow:0px 8px 16px 0px rgba(0,0,0,0.2);filter:drop-shadow(3px 5px 2px rgb(0 0 0 / 0.4));}#my-svg .actor-man line{stroke:#355070;fill:#d8ecff;}#my-svg .actor-man circle,#my-svg line{stroke:#355070;fill:#d8ecff;stroke-width:2px;}#my-svg :root{--mermaid-font-family:"trebuchet ms",verdana,arial,sans-serif;}</style><g/><defs><symbol id="computer" width="24" height="24"><path transform="scale(.5)" d="M2 2v13h20v-13h-20zm18 11h-16v-9h16v9zm-10.228 6l.466-1h3.524l.467 1h-4.457zm14.228 3h-24l2-6h2.104l-1.33 4h18.45l-1.297-4h2.073l2 6zm-5-10h-14v-7h14v7z"/></symbol></defs><defs><symbol id="database" fill-rule="evenodd" clip-rule="evenodd"><path transform="scale(.5)" d="M12.258.001l.256.004.255.005.253.008.251.01.249.012.247.015.246.016.242.019.241.02.239.023.236.024.233.027.231.028.229.031.225.032.223.034.22.036.217.038.214.04.211.041.208.043.205.045.201.046.198.048.194.05.191.051.187.053.183.054.18.056.175.057.172.059.168.06.163.061.16.063.155.064.15.066.074.033.073.033.071.034.07.034.069.035.068.035.067.035.066.035.064.036.064.036.062.036.06.036.06.037.058.037.058.037.055.038.055.038.053.038.052.038.051.039.05.039.048.039.047.039.045.04.044.04.043.04.041.04.04.041.039.041.037.041.036.041.034.041.033.042.032.042.03.042.029.042.027.042.026.043.024.043.023.043.021.043.02.043.018.044.017.043.015.044.013.044.012.044.011.045.009.044.007.045.006.045.004.045.002.045.001.045v17l-.001.045-.002.045-.004.045-.006.045-.007.045-.009.044-.011.045-.012.044-.013.044-.015.044-.017.043-.018.044-.02.043-.021.043-.023.043-.024.043-.026.043-.027.042-.029.042-.03.042-.032.042-.033.042-.034.041-.036.041-.037.041-.039.041-.04.041-.041.04-.043.04-.044.04-.045.04-.047.039-.048.039-.05.039-.051.039-.052.038-.053.038-.055.038-.055.038-.058.037-.058.037-.06.037-.06.036-.062.036-.064.036-.064.036-.066.035-.067.035-.068.035-.069.035-.07.034-.071.034-.073.033-.074.033-.15.066-.155.064-.16.063-.163.061-.168.06-.172.059-.175.057-.18.056-.183.054-.187.053-.191.051-.194.05-.198.048-.201.046-.205.045-.208.043-.211.041-.214.04-.217.038-.22.036-.223.034-.225.032-.229.031-.231.028-.233.027-.236.024-.239.023-.241.02-.242.019-.246.016-.247.015-.249.012-.251.01-.253.008-.255.005-.256.004-.258.001-.258-.001-.256-.004-.255-.005-.253-.008-.251-.01-.249-.012-.247-.015-.245-.016-.243-.019-.241-.02-.238-.023-.236-.024-.234-.027-.231-.028-.228-.031-.226-.032-.223-.034-.22-.036-.217-.038-.214-.04-.211-.041-.208-.043-.204-.045-.201-.046-.198-.048-.195-.05-.19-.051-.187-.053-.184-.054-.179-.056-.176-.057-.172-.059-.167-.06-.164-.061-.159-.063-.155-.064-.151-.066-.074-.033-.072-.033-.072-.034-.07-.034-.069-.035-.068-.035-.067-.035-.066-.035-.064-.036-.063-.036-.062-.036-.061-.036-.06-.037-.058-.037-.057-.037-.056-.038-.055-.038-.053-.038-.052-.038-.051-.039-.049-.039-.049-.039-.046-.039-.046-.04-.044-.04-.043-.04-.041-.04-.04-.041-.039-.041-.037-.041-.036-.041-.034-.041-.033-.042-.032-.042-.03-.042-.029-.042-.027-.042-.026-.043-.024-.043-.023-.043-.021-.043-.02-.043-.018-.044-.017-.043-.015-.044-.013-.044-.012-.044-.011-.045-.009-.044-.007-.045-.006-.045-.004-.045-.002-.045-.001-.045v-17l.001-.045.002-.045.004-.045.006-.045.007-.045.009-.044.011-.045.012-.044.013-.044.015-.044.017-.043.018-.044.02-.043.021-.043.023-.043.024-.043.026-.043.027-.042.029-.042.03-.042.032-.042.033-.042.034-.041.036-.041.037-.041.039-.041.04-.041.041-.04.043-.04.044-.04.046-.04.046-.039.049-.039.049-.039.051-.039.052-.038.053-.038.055-.038.056-.038.057-.037.058-.037.06-.037.061-.036.062-.036.063-.036.064-.036.066-.035.067-.035.068-.035.069-.035.07-.034.072-.034.072-.033.074-.033.151-.066.155-.064.159-.063.164-.061.167-.06.172-.059.176-.057.179-.056.184-.054.187-.053.19-.051.195-.05.198-.048.201-.046.204-.045.208-.043.211-.041.214-.04.217-.038.22-.036.223-.034.226-.032.228-.031.231-.028.234-.027.236-.024.238-.023.241-.02.243-.019.245-.016.247-.015.249-.012.251-.01.253-.008.255-.005.256-.004.258-.001.258.001zm-9.258 20.499v.01l.001.021.003.021.004.022.005.021.006.022.007.022.009.023.01.022.011.023.012.023.013.023.015.023.016.024.017.023.018.024.019.024.021.024.022.025.023.024.024.025.052.049.056.05.061.051.066.051.07.051.075.051.079.052.084.052.088.052.092.052.097.052.102.051.105.052.11.052.114.051.119.051.123.051.127.05.131.05.135.05.139.048.144.049.147.047.152.047.155.047.16.045.163.045.167.043.171.043.176.041.178.041.183.039.187.039.19.037.194.035.197.035.202.033.204.031.209.03.212.029.216.027.219.025.222.024.226.021.23.02.233.018.236.016.24.015.243.012.246.01.249.008.253.005.256.004.259.001.26-.001.257-.004.254-.005.25-.008.247-.011.244-.012.241-.014.237-.016.233-.018.231-.021.226-.021.224-.024.22-.026.216-.027.212-.028.21-.031.205-.031.202-.034.198-.034.194-.036.191-.037.187-.039.183-.04.179-.04.175-.042.172-.043.168-.044.163-.045.16-.046.155-.046.152-.047.148-.048.143-.049.139-.049.136-.05.131-.05.126-.05.123-.051.118-.052.114-.051.11-.052.106-.052.101-.052.096-.052.092-.052.088-.053.083-.051.079-.052.074-.052.07-.051.065-.051.06-.051.056-.05.051-.05.023-.024.023-.025.021-.024.02-.024.019-.024.018-.024.017-.024.015-.023.014-.024.013-.023.012-.023.01-.023.01-.022.008-.022.006-.022.006-.022.004-.022.004-.021.001-.021.001-.021v-4.127l-.077.055-.08.053-.083.054-.085.053-.087.052-.09.052-.093.051-.095.05-.097.05-.1.049-.102.049-.105.048-.106.047-.109.047-.111.046-.114.045-.115.045-.118.044-.12.043-.122.042-.124.042-.126.041-.128.04-.13.04-.132.038-.134.038-.135.037-.138.037-.139.035-.142.035-.143.034-.144.033-.147.032-.148.031-.15.03-.151.03-.153.029-.154.027-.156.027-.158.026-.159.025-.161.024-.162.023-.163.022-.165.021-.166.02-.167.019-.169.018-.169.017-.171.016-.173.015-.173.014-.175.013-.175.012-.177.011-.178.01-.179.008-.179.008-.181.006-.182.005-.182.004-.184.003-.184.002h-.37l-.184-.002-.184-.003-.182-.004-.182-.005-.181-.006-.179-.008-.179-.008-.178-.01-.176-.011-.176-.012-.175-.013-.173-.014-.172-.015-.171-.016-.17-.017-.169-.018-.167-.019-.166-.02-.165-.021-.163-.022-.162-.023-.161-.024-.159-.025-.157-.026-.156-.027-.155-.027-.153-.029-.151-.03-.15-.03-.148-.031-.146-.032-.145-.033-.143-.034-.141-.035-.14-.035-.137-.037-.136-.037-.134-.038-.132-.038-.13-.04-.128-.04-.126-.041-.124-.042-.122-.042-.12-.044-.117-.043-.116-.045-.113-.045-.112-.046-.109-.047-.106-.047-.105-.048-.102-.049-.1-.049-.097-.05-.095-.05-.093-.052-.09-.051-.087-.052-.085-.053-.083-.054-.08-.054-.077-.054v4.127zm0-5.654v.011l.001.021.003.021.004.021.005.022.006.022.007.022.009.022.01.022.011.023.012.023.013.023.015.024.016.023.017.024.018.024.019.024.021.024.022.024.023.025.024.024.052.05.056.05.061.05.066.051.07.051.075.052.079.051.084.052.088.052.092.052.097.052.102.052.105.052.11.051.114.051.119.052.123.05.127.051.131.05.135.049.139.049.144.048.147.048.152.047.155.046.16.045.163.045.167.044.171.042.176.042.178.04.183.04.187.038.19.037.194.036.197.034.202.033.204.032.209.03.212.028.216.027.219.025.222.024.226.022.23.02.233.018.236.016.24.014.243.012.246.01.249.008.253.006.256.003.259.001.26-.001.257-.003.254-.006.25-.008.247-.01.244-.012.241-.015.237-.016.233-.018.231-.02.226-.022.224-.024.22-.025.216-.027.212-.029.21-.03.205-.032.202-.033.198-.035.194-.036.191-.037.187-.039.183-.039.179-.041.175-.042.172-.043.168-.044.163-.045.16-.045.155-.047.152-.047.148-.048.143-.048.139-.05.136-.049.131-.05.126-.051.123-.051.118-.051.114-.052.11-.052.106-.052.101-.052.096-.052.092-.052.088-.052.083-.052.079-.052.074-.051.07-.052.065-.051.06-.05.056-.051.051-.049.023-.025.023-.024.021-.025.02-.024.019-.024.018-.024.017-.024.015-.023.014-.023.013-.024.012-.022.01-.023.01-.023.008-.022.006-.022.006-.022.004-.021.004-.022.001-.021.001-.021v-4.139l-.077.054-.08.054-.083.054-.085.052-.087.053-.09.051-.093.051-.095.051-.097.05-.1.049-.102.049-.105.048-.106.047-.109.047-.111.046-.114.045-.115.044-.118.044-.12.044-.122.042-.124.042-.126.041-.128.04-.13.039-.132.039-.134.038-.135.037-.138.036-.139.036-.142.035-.143.033-.144.033-.147.033-.148.031-.15.03-.151.03-.153.028-.154.028-.156.027-.158.026-.159.025-.161.024-.162.023-.163.022-.165.021-.166.02-.167.019-.169.018-.169.017-.171.016-.173.015-.173.014-.175.013-.175.012-.177.011-.178.009-.179.009-.179.007-.181.007-.182.005-.182.004-.184.003-.184.002h-.37l-.184-.002-.184-.003-.182-.004-.182-.005-.181-.007-.179-.007-.179-.009-.178-.009-.176-.011-.176-.012-.175-.013-.173-.014-.172-.015-.171-.016-.17-.017-.169-.018-.167-.019-.166-.02-.165-.021-.163-.022-.162-.023-.161-.024-.159-.025-.157-.026-.156-.027-.155-.028-.153-.028-.151-.03-.15-.03-.148-.031-.146-.033-.145-.033-.143-.033-.141-.035-.14-.036-.137-.036-.136-.037-.134-.038-.132-.039-.13-.039-.128-.04-.126-.041-.124-.042-.122-.043-.12-.043-.117-.044-.116-.044-.113-.046-.112-.046-.109-.046-.106-.047-.105-.048-.102-.049-.1-.049-.097-.05-.095-.051-.093-.051-.09-.051-.087-.053-.085-.052-.083-.054-.08-.054-.077-.054v4.139zm0-5.666v.011l.001.02.003.022.004.021.005.022.006.021.007.022.009.023.01.022.011.023.012.023.013.023.015.023.016.024.017.024.018.023.019.024.021.025.022.024.023.024.024.025.052.05.056.05.061.05.066.051.07.051.075.052.079.051.084.052.088.052.092.052.097.052.102.052.105.051.11.052.114.051.119.051.123.051.127.05.131.05.135.05.139.049.144.048.147.048.152.047.155.046.16.045.163.045.167.043.171.043.176.042.178.04.183.04.187.038.19.037.194.036.197.034.202.033.204.032.209.03.212.028.216.027.219.025.222.024.226.021.23.02.233.018.236.017.24.014.243.012.246.01.249.008.253.006.256.003.259.001.26-.001.257-.003.254-.006.25-.008.247-.01.244-.013.241-.014.237-.016.233-.018.231-.02.226-.022.224-.024.22-.025.216-.027.212-.029.21-.03.205-.032.202-.033.198-.035.194-.036.191-.037.187-.039.183-.039.179-.041.175-.042.172-.043.168-.044.163-.045.16-.045.155-.047.152-.047.148-.048.143-.049.139-.049.136-.049.131-.051.126-.05.123-.051.118-.052.114-.051.11-.052.106-.052.101-.052.096-.052.092-.052.088-.052.083-.052.079-.052.074-.052.07-.051.065-.051.06-.051.056-.05.051-.049.023-.025.023-.025.021-.024.02-.024.019-.024.018-.024.017-.024.015-.023.014-.024.013-.023.012-.023.01-.022.01-.023.008-.022.006-.022.006-.022.004-.022.004-.021.001-.021.001-.021v-4.153l-.077.054-.08.054-.083.053-.085.053-.087.053-.09.051-.093.051-.095.051-.097.05-.1.049-.102.048-.105.048-.106.048-.109.046-.111.046-.114.046-.115.044-.118.044-.12.043-.122.043-.124.042-.126.041-.128.04-.13.039-.132.039-.134.038-.135.037-.138.036-.139.036-.142.034-.143.034-.144.033-.147.032-.148.032-.15.03-.151.03-.153.028-.154.028-.156.027-.158.026-.159.024-.161.024-.162.023-.163.023-.165.021-.166.02-.167.019-.169.018-.169.017-.171.016-.173.015-.173.014-.175.013-.175.012-.177.01-.178.01-.179.009-.179.007-.181.006-.182.006-.182.004-.184.003-.184.001-.185.001-.185-.001-.184-.001-.184-.003-.182-.004-.182-.006-.181-.006-.179-.007-.179-.009-.178-.01-.176-.01-.176-.012-.175-.013-.173-.014-.172-.015-.171-.016-.17-.017-.169-.018-.167-.019-.166-.02-.165-.021-.163-.023-.162-.023-.161-.024-.159-.024-.157-.026-.156-.027-.155-.028-.153-.028-.151-.03-.15-.03-.148-.032-.146-.032-.145-.033-.143-.034-.141-.034-.14-.036-.137-.036-.136-.037-.134-.038-.132-.039-.13-.039-.128-.041-.126-.041-.124-.041-.122-.043-.12-.043-.117-.044-.116-.044-.113-.046-.112-.046-.109-.046-.106-.048-.105-.048-.102-.048-.1-.05-.097-.049-.095-.051-.093-.051-.09-.052-.087-.052-.085-.053-.083-.053-.08-.054-.077-.054v4.153zm8.74-8.179l-.257.004-.254.005-.25.008-.247.011-.244.012-.241.014-.237.016-.233.018-.231.021-.226.022-.224.023-.22.026-.216.027-.212.028-.21.031-.205.032-.202.033-.198.034-.194.036-.191.038-.187.038-.183.04-.179.041-.175.042-.172.043-.168.043-.163.045-.16.046-.155.046-.152.048-.148.048-.143.048-.139.049-.136.05-.131.05-.126.051-.123.051-.118.051-.114.052-.11.052-.106.052-.101.052-.096.052-.092.052-.088.052-.083.052-.079.052-.074.051-.07.052-.065.051-.06.05-.056.05-.051.05-.023.025-.023.024-.021.024-.02.025-.019.024-.018.024-.017.023-.015.024-.014.023-.013.023-.012.023-.01.023-.01.022-.008.022-.006.023-.006.021-.004.022-.004.021-.001.021-.001.021.001.021.001.021.004.021.004.022.006.021.006.023.008.022.01.022.01.023.012.023.013.023.014.023.015.024.017.023.018.024.019.024.02.025.021.024.023.024.023.025.051.05.056.05.06.05.065.051.07.052.074.051.079.052.083.052.088.052.092.052.096.052.101.052.106.052.11.052.114.052.118.051.123.051.126.051.131.05.136.05.139.049.143.048.148.048.152.048.155.046.16.046.163.045.168.043.172.043.175.042.179.041.183.04.187.038.191.038.194.036.198.034.202.033.205.032.21.031.212.028.216.027.22.026.224.023.226.022.231.021.233.018.237.016.241.014.244.012.247.011.25.008.254.005.257.004.26.001.26-.001.257-.004.254-.005.25-.008.247-.011.244-.012.241-.014.237-.016.233-.018.231-.021.226-.022.224-.023.22-.026.216-.027.212-.028.21-.031.205-.032.202-.033.198-.034.194-.036.191-.038.187-.038.183-.04.179-.041.175-.042.172-.043.168-.043.163-.045.16-.046.155-.046.152-.048.148-.048.143-.048.139-.049.136-.05.131-.05.126-.051.123-.051.118-.051.114-.052.11-.052.106-.052.101-.052.096-.052.092-.052.088-.052.083-.052.079-.052.074-.051.07-.052.065-.051.06-.05.056-.05.051-.05.023-.025.023-.024.021-.024.02-.025.019-.024.018-.024.017-.023.015-.024.014-.023.013-.023.012-.023.01-.023.01-.022.008-.022.006-.023.006-.021.004-.022.004-.021.001-.021.001-.021-.001-.021-.001-.021-.004-.021-.004-.022-.006-.021-.006-.023-.008-.022-.01-.022-.01-.023-.012-.023-.013-.023-.014-.023-.015-.024-.017-.023-.018-.024-.019-.024-.02-.025-.021-.024-.023-.024-.023-.025-.051-.05-.056-.05-.06-.05-.065-.051-.07-.052-.074-.051-.079-.052-.083-.052-.088-.052-.092-.052-.096-.052-.101-.052-.106-.052-.11-.052-.114-.052-.118-.051-.123-.051-.126-.051-.131-.05-.136-.05-.139-.049-.143-.048-.148-.048-.152-.048-.155-.046-.16-.046-.163-.045-.168-.043-.172-.043-.175-.042-.179-.041-.183-.04-.187-.038-.191-.038-.194-.036-.198-.034-.202-.033-.205-.032-.21-.031-.212-.028-.216-.027-.22-.026-.224-.023-.226-.022-.231-.021-.233-.018-.237-.016-.241-.014-.244-.012-.247-.011-.25-.008-.254-.005-.257-.004-.26-.001-.26.001z"/></symbol></defs><defs><symbol id="clock" width="24" height="24"><path transform="scale(.5)" d="M12 2c5.514 0 10 4.486 10 10s-4.486 10-10 10-10-4.486-10-10 4.486-10 10-10zm0-2c-6.627 0-12 5.373-12 12s5.373 12 12 12 12-5.373 12-12-5.373-12-12-12zm5.848 12.459c.202.038.202.333.001.372-1.907.361-6.045 1.111-6.547 1.111-.719 0-1.301-.582-1.301-1.301 0-.512.77-5.447 1.125-7.445.034-.192.312-.181.343.014l.985 6.238 5.394 1.011z"/></symbol></defs><defs><marker id="arrowhead" refX="7.9" refY="5" markerUnits="userSpaceOnUse" markerWidth="12" markerHeight="12" orient="auto-start-reverse"><path d="M -1 0 L 10 5 L 0 10 z"/></marker></defs><defs><marker id="crosshead" markerWidth="15" markerHeight="8" orient="auto" refX="4" refY="4.5"><path fill="none" stroke="#000000" stroke-width="1pt" d="M 1,2 L 6,7 M 6,2 L 1,7" style="stroke-dasharray: 0, 0;"/></marker></defs><defs><marker id="filled-head" refX="15.5" refY="7" markerWidth="20" markerHeight="28" orient="auto"><path d="M 18,7 L9,13 L14,7 L9,1 Z"/></marker></defs><defs><marker id="sequencenumber" refX="15" refY="15" markerWidth="60" markerHeight="40" orient="auto"><circle cx="15" cy="15" r="6"/></marker></defs><text x="430" y="80" text-anchor="middle" dominant-baseline="middle" alignment-baseline="middle" class="messageText" dy="1em" style="font-size: 16px; font-weight: 400;">1. Request grounded context</text><line x1="554" y1="109" x2="306" y2="109" class="messageLine0" stroke-width="2" stroke="none" marker-end="url(#arrowhead)" style="fill: none;"/><text x="190" y="124" text-anchor="middle" dominant-baseline="middle" alignment-baseline="middle" class="messageText" dy="1em" style="font-size: 16px; font-weight: 400;">2. Inspect repo and tools</text><line x1="301" y1="153" x2="79" y2="153" class="messageLine0" stroke-width="2" stroke="none" marker-end="url(#arrowhead)" style="fill: none;"/><text x="187" y="168" text-anchor="middle" dominant-baseline="middle" alignment-baseline="middle" class="messageText" dy="1em" style="font-size: 16px; font-weight: 400;">3. Return capabilities</text><line x1="76" y1="197" x2="298" y2="197" class="messageLine1" stroke-width="2" stroke="none" marker-end="url(#arrowhead)" style="stroke-dasharray: 3, 3; fill: none;"/><text x="427" y="212" text-anchor="middle" dominant-baseline="middle" alignment-baseline="middle" class="messageText" dy="1em" style="font-size: 16px; font-weight: 400;">4. Return integration map</text><line x1="303" y1="241" x2="551" y2="241" class="messageLine1" stroke-width="2" stroke="none" marker-end="url(#arrowhead)" style="stroke-dasharray: 3, 3; fill: none;"/><text x="317" y="256" text-anchor="middle" dominant-baseline="middle" alignment-baseline="middle" class="messageText" dy="1em" style="font-size: 16px; font-weight: 400;">5. Execute scoped changes</text><line x1="554" y1="285" x2="79" y2="285" class="messageLine0" stroke-width="2" stroke="none" marker-end="url(#arrowhead)" style="fill: none;"/></svg> \ No newline at end of file diff --git a/assets/logo.svg b/assets/logo.svg new file mode 100644 index 0000000..39e6b33 --- /dev/null +++ b/assets/logo.svg @@ -0,0 +1,40 @@ +<svg width="215" height="99" viewBox="0 0 215 99" fill="none" xmlns="http://www.w3.org/2000/svg"> +<g clip-path="url(#clip0_21_299)"> +<path fill-rule="evenodd" clip-rule="evenodd" d="M127.664 38.2806C124.052 33.6789 120.097 28.6387 114.662 28.6387L98.8961 28.6387V34.0989L98.8962 45.7019H93.6925C91.176 45.7019 89.136 43.6619 89.136 41.1455V28.6387H83.6758V41.0971C83.6758 46.6559 88.1821 51.1621 93.7408 51.1621H104.356L104.356 34.0989H111.659C117.477 34.0989 120.21 38.0334 123.051 42.1237C126.126 46.5516 129.329 51.1621 136.708 51.1621V45.7019C133.488 45.7019 130.697 42.1459 127.664 38.2806ZM114.663 51.1621V51.1467C119.034 50.9554 121.734 48.9222 123.902 46.3167C123.008 45.2196 122.223 44.1138 121.452 43.0269C121.077 42.5 120.706 41.9776 120.328 41.4627C118.571 43.7937 116.733 45.5504 113.98 45.7014V45.7019H111.796V36.0783H106.336V51.1621H111.796H114.663ZM138.415 51.1621V45.7019H136.708V51.1621H138.415ZM126.266 33.6994C128.96 31.0109 132.581 28.8903 138.244 28.7181V33.9727C138.305 33.9731 138.364 33.975 138.244 33.9811C134.376 34.178 131.86 35.8997 129.833 38.1832L129.747 38.069L129.621 37.9041C128.564 36.5117 127.548 35.1737 126.266 33.6994Z" fill="white"/> +<path d="M92.5797 82.7055H83.6758V59.5645H92.4893C94.8697 59.5645 96.9262 60.0277 98.6587 60.9543C100.399 61.8733 101.74 63.1991 102.681 64.9316C103.63 66.6567 104.105 68.7245 104.105 71.135C104.105 73.5455 103.634 75.6171 102.693 77.3496C101.751 79.0747 100.418 80.4005 98.6926 81.327C96.9676 82.246 94.9299 82.7055 92.5797 82.7055ZM89.9582 77.3722H92.3537C93.4987 77.3722 94.4742 77.1877 95.2802 76.8186C96.0938 76.4495 96.7115 75.8129 97.1333 74.909C97.5627 74.005 97.7774 72.747 97.7774 71.135C97.7774 69.523 97.5589 68.265 97.122 67.361C96.6926 66.4571 96.0599 65.8205 95.2237 65.4514C94.3951 65.0823 93.3782 64.8977 92.1729 64.8977H89.9582V77.3722Z" fill="white"/> +<path d="M107.012 82.7055V59.5645H123.69V64.6266H113.294V68.6039H122.831V73.666H113.294V77.6434H123.644V82.7055H107.012Z" fill="white"/> +<path d="M133.133 59.5645L137.879 75.8807H138.06L142.805 59.5645H149.901L142.263 82.7055H133.675L126.037 59.5645H133.133Z" fill="white"/> +<path d="M152.534 83.0671C151.66 83.0671 150.911 82.762 150.285 82.1519C149.668 81.5342 149.363 80.7846 149.37 79.9033C149.363 79.0445 149.668 78.3101 150.285 77.6999C150.911 77.0898 151.66 76.7847 152.534 76.7847C153.363 76.7847 154.093 77.0898 154.726 77.6999C155.366 78.3101 155.69 79.0445 155.698 79.9033C155.69 80.4909 155.536 81.0257 155.235 81.5078C154.941 81.9824 154.557 82.3628 154.082 82.649C153.607 82.9278 153.091 83.0671 152.534 83.0671Z" fill="white"/> +<path d="M158.997 82.7055V59.5645H165.28V68.9203H165.596L172.557 59.5645H179.878L172.059 69.8695L180.059 82.7055H172.557L167.359 74.0276L165.28 76.7395V82.7055H158.997Z" fill="white"/> +<path d="M188.025 59.5645V82.7055H181.743V59.5645H188.025Z" fill="white"/> +<path d="M190.37 64.6266V59.5645H210.483V64.6266H203.522V82.7055H197.33V64.6266H190.37Z" fill="white"/> +<g clip-path="url(#clip1_21_299)"> +<path fill-rule="evenodd" clip-rule="evenodd" d="M28.8959 4.65625V36.5127C18.0624 36.7604 7.89161 39.6924 -0.978516 44.6703V24.209V14.0426C3.27925 13.5744 7.31227 12.3644 10.9871 10.5454H11.1395C16.2734 7.13428 22.3489 5.01539 28.8959 4.65625Z" fill="#3C3B6E"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M-0.978516 49.5856V50.1729H-0.87515C0.366279 67.2282 12.6573 81.5299 28.8959 85.4148V40.7529C17.9613 41.0203 7.75219 44.2159 -0.978516 49.5856Z" fill="white"/> +<path d="M5.69243 15.0674L6.40735 17.2329H8.72082L6.84916 18.5712L7.56408 20.7367L5.69243 19.3983L3.8208 20.7367L4.53569 18.5712L2.66406 17.2329H4.97753L5.69243 15.0674Z" fill="#FEFFFF"/> +<path d="M5.69243 23.541L6.40735 25.7065H8.72082L6.84916 27.0448L7.56408 29.2103L5.69243 27.872L3.8208 29.2103L4.53569 27.0448L2.66406 25.7065H4.97753L5.69243 23.541Z" fill="#FEFFFF"/> +<path d="M5.69243 32.0117L6.40735 34.1773H8.72082L6.84916 35.5156L7.56408 37.6811L5.69243 36.3426L3.8208 37.6811L4.53569 35.5156L2.66406 34.1773H4.97753L5.69243 32.0117Z" fill="#FEFFFF"/> +<path d="M14.1573 12.5762L14.8722 14.7417H17.1857L15.314 16.08L16.0289 18.2455L14.1573 16.9071L12.2856 18.2455L13.0005 16.08L11.1289 14.7417H13.4424L14.1573 12.5762Z" fill="#FEFFFF"/> +<path d="M14.1573 21.0498L14.8722 23.2153H17.1857L15.314 24.5536L16.0289 26.7191L14.1573 25.3808L12.2856 26.7191L13.0005 24.5536L11.1289 23.2153H13.4424L14.1573 21.0498Z" fill="#FEFFFF"/> +<path d="M14.1573 29.5205L14.8722 31.6861H17.1857L15.314 33.0244L16.0289 35.1899L14.1573 33.8514L12.2856 35.1899L13.0005 33.0244L11.1289 31.6861H13.4424L14.1573 29.5205Z" fill="#FEFFFF"/> +<path d="M22.3701 11.0801L23.0851 13.2456H25.3986L23.5269 14.5839L24.2417 16.7494L22.3701 15.411L20.4984 16.7494L21.2134 14.5839L19.3418 13.2456H21.6553L22.3701 11.0801Z" fill="#FEFFFF"/> +<path d="M22.3701 19.5527L23.0851 21.7182H25.3986L23.5269 23.0565L24.2417 25.222L22.3701 23.8837L20.4984 25.222L21.2134 23.0565L19.3418 21.7182H21.6553L22.3701 19.5527Z" fill="#FEFFFF"/> +<path d="M22.3701 28.0264L23.0851 30.1919H25.3986L23.5269 31.5302L24.2417 33.6958L22.3701 32.3572L20.4984 33.6958L21.2134 31.5302L19.3418 30.1919H21.6553L22.3701 28.0264Z" fill="#FEFFFF"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M62.7591 45.2409V24.209V14.0426C58.5013 13.5744 54.4683 12.3644 50.7935 10.5454H50.6411C45.5072 7.13428 39.4317 5.01539 32.8848 4.65625V36.5431C43.7518 36.9577 53.927 40.0744 62.7591 45.2409Z" fill="#4E85FF"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M62.6986 50.1715H62.6558C61.4143 67.2268 49.1232 81.5285 32.8848 85.4134V40.7842C43.8345 41.2314 54.0276 44.6159 62.6986 50.1715Z" fill="#FFCE4F"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M-0.978516 49.5856V50.3291H-0.87515C-0.724957 52.3785 -0.413118 54.3879 0.0478683 56.3439C8.30084 50.6884 18.209 47.276 28.8959 46.9845V40.7529C28.8926 40.7532 28.8891 40.7532 28.8859 40.7532C17.9551 41.0223 7.74956 44.2174 -0.978516 49.5856Z" fill="#B22234"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M28.8994 51.2217C18.6295 51.5389 9.16196 55.1137 1.45312 60.969C2.19028 62.9465 3.08439 64.8511 4.12038 66.6671C10.9913 61.1714 19.5636 57.7821 28.8994 57.4542V51.2217Z" fill="#B22234"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M28.8966 61.6914C20.3667 62.0123 12.5363 65.0689 6.25586 70.01C7.57835 71.8646 9.05895 73.6006 10.6779 75.1973C15.7033 70.9352 22.0096 68.2678 28.8966 67.9294V61.6914Z" fill="#B22234"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M28.8973 74.6631C23.9449 74.9424 19.3612 76.6014 15.5234 79.2623C16.7988 80.1678 18.1309 80.9983 19.5128 81.7468C22.3115 80.1414 25.4981 79.1365 28.8973 78.9072V74.6631Z" fill="#B22234"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M28.8993 82.6533C27.1571 82.7951 25.4844 83.1831 23.916 83.7821C25.528 84.4077 27.1917 84.928 28.8993 85.3337V82.6533Z" fill="#B22234"/> +</g> +</g> +<defs> +<clipPath id="clip0_21_299"> +<rect width="215" height="99" fill="white"/> +</clipPath> +<clipPath id="clip1_21_299"> +<rect width="63.7182" height="80.1616" fill="white" transform="translate(-1.08398 4.91992)"/> +</clipPath> +</defs> +</svg> From 753a2cba8abca78e786d50cf8219a58679d93509 Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov <dmitry.smirnov@usabilitydynamics.com> Date: Mon, 9 Mar 2026 21:10:18 +0300 Subject: [PATCH 8/9] init commands --- README.md | 18 ++++++- bin/completions/_dev.kit | 34 +++++++++++++ bin/completions/dev.kit.bash | 44 ++++++++++++++++ bin/dev-kit | 97 ++++++++++++++++++++++++++++++++++++ bin/env/dev-kit.sh | 24 +++++++++ bin/scripts/install.sh | 37 ++++++++++++++ bin/scripts/uninstall.sh | 25 ++++++++++ config/default.env | 3 ++ lib/commands/bridge.sh | 17 +++++++ lib/commands/status.sh | 21 ++++++++ lib/modules/bootstrap.sh | 65 ++++++++++++++++++++++++ 11 files changed, 384 insertions(+), 1 deletion(-) create mode 100644 bin/completions/_dev.kit create mode 100644 bin/completions/dev.kit.bash create mode 100755 bin/dev-kit create mode 100755 bin/env/dev-kit.sh create mode 100755 bin/scripts/install.sh create mode 100755 bin/scripts/uninstall.sh create mode 100644 config/default.env create mode 100644 lib/commands/bridge.sh create mode 100644 lib/commands/status.sh create mode 100644 lib/modules/bootstrap.sh diff --git a/README.md b/README.md index 2da9afb..679d795 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,9 @@ The audit output becomes a focused improvement plan with bounded next steps. ```bash # 1. Install & Run the Pulse Check -curl -sSL [https://dev.kit/install](https://dev.kit/install) | bash && dev.kit +bash bin/scripts/install.sh +source "$HOME/.udx/dev.kit/bin/env/dev-kit.sh" +dev.kit # 2. Let an Agent Fix Compliance dev.kit --json | agent-execute "Fix all fidelity gaps" @@ -65,3 +67,17 @@ dev.kit --json | agent-execute "Fix all fidelity gaps" # 3. Let an Agent Develop a Feature dev.kit bridge --json | agent-execute "Add a new module using existing primitives" ``` + +## Install + +```bash +bash bin/scripts/install.sh +source "$HOME/.udx/dev.kit/bin/env/dev-kit.sh" +dev.kit status +``` + +## Uninstall + +```bash +"$HOME/.udx/dev.kit/bin/scripts/uninstall.sh" +``` diff --git a/bin/completions/_dev.kit b/bin/completions/_dev.kit new file mode 100644 index 0000000..faf92ac --- /dev/null +++ b/bin/completions/_dev.kit @@ -0,0 +1,34 @@ +#compdef dev.kit + +_dev_kit() { + local -a commands + local -a options + local cmd + local dev_kit_cmd + + dev_kit_cmd="${0:A:h:h}/dev-kit" + + commands=(${(f)"$("$dev_kit_cmd" help 2>/dev/null | awk ' + /^Commands:/ { flag=1; next } + flag && $0 ~ /^ [a-zA-Z0-9-]+/ { print $1 ":" substr($0, index($0, $2)) } + flag && $0 == "" { exit } + ')"}) + + if (( CURRENT == 2 )); then + _describe 'command' commands + return + fi + + cmd="$words[2]" + options=(${(f)"$("$dev_kit_cmd" "$cmd" --help 2>/dev/null | awk ' + /^Options:/ { flag=1; next } + flag && $0 ~ /^ --/ { print $1 ":" substr($0, index($0, $2)) } + flag && $0 == "" { exit } + ')"}) + + if (( ${#options} )); then + _describe 'option' options + fi +} + +_dev_kit "$@" diff --git a/bin/completions/dev.kit.bash b/bin/completions/dev.kit.bash new file mode 100644 index 0000000..1e89529 --- /dev/null +++ b/bin/completions/dev.kit.bash @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +_DEV_KIT_COMPLETION_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +_dev_kit_complete() { + local cur cmd + cur="${COMP_WORDS[COMP_CWORD]}" + cmd="${COMP_WORDS[1]}" + + _dev_kit_cmd() { + printf "%s" "$(cd "${_DEV_KIT_COMPLETION_DIR}/.." && pwd)/dev-kit" + } + + _dev_kit_list_commands() { + "$(_dev_kit_cmd)" help 2>/dev/null | awk ' + /^Commands:/ { flag=1; next } + flag && $0 ~ /^ [a-zA-Z0-9-]+/ { print $1 } + flag && $0 == "" { exit } + ' + } + + _dev_kit_list_options() { + local target="${1:-help}" + "$(_dev_kit_cmd)" "$target" --help 2>/dev/null | awk ' + /^Options:/ { flag=1; next } + flag && $0 ~ /^ --/ { print $1 } + flag && $0 == "" { exit } + ' + } + + if [ "$COMP_CWORD" -eq 1 ]; then + COMPREPLY=( $(compgen -W "$(_dev_kit_list_commands) --json" -- "$cur") ) + return 0 + fi + + if [[ "$cur" == -* ]]; then + COMPREPLY=( $(compgen -W "$(_dev_kit_list_options "$cmd")" -- "$cur") ) + return 0 + fi + + COMPREPLY=() +} + +complete -F _dev_kit_complete dev.kit diff --git a/bin/dev-kit b/bin/dev-kit new file mode 100755 index 0000000..4e2754e --- /dev/null +++ b/bin/dev-kit @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +set -euo pipefail + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +# shellcheck disable=SC1091 +. "$REPO_DIR/lib/modules/bootstrap.sh" +dev_kit_bootstrap "$REPO_DIR" + +for module_file in "$REPO_DIR"/lib/modules/*.sh; do + [ "$module_file" = "$REPO_DIR/lib/modules/bootstrap.sh" ] && continue + # shellcheck disable=SC1090 + . "$module_file" +done + +for command_file in "$REPO_DIR"/lib/commands/*.sh; do + # shellcheck disable=SC1090 + . "$command_file" +done + +usage() { + local command_file="" + local command_name="" + local description="" + cat <<'EOF' +Usage: dev.kit <command> + +Commands: +EOF + + for command_file in $(dev_kit_list_command_files "$REPO_DIR"); do + command_name="$(dev_kit_command_name_from_file "$command_file")" + description="$(dev_kit_command_description "$command_file")" + printf " %-10s %s\n" "$command_name" "$description" + done + + cat <<'EOF' + help Show this help message + +Options: + --json Output machine-readable JSON for supported commands +EOF +} + +command_usage() { + local command_name="$1" + cat <<EOF +Usage: dev.kit ${command_name} [--json] + +Options: + --json Output machine-readable JSON +EOF +} + +command="${1:-status}" +format="text" + +if [ "${2:-}" = "--json" ] || [ "${1:-}" = "--json" ]; then + format="json" +fi + +case "$command" in + status) + if [ "${2:-}" = "-h" ] || [ "${2:-}" = "--help" ]; then + command_usage "status" + exit 0 + fi + dev_kit_cmd_status "$format" + ;; + bridge) + if [ "${2:-}" = "-h" ] || [ "${2:-}" = "--help" ]; then + command_usage "bridge" + exit 0 + fi + dev_kit_cmd_bridge "$format" + ;; + --json) + dev_kit_cmd_status "json" + ;; + help|-h|--help) + usage + ;; + *) + fn="dev_kit_cmd_${command//-/_}" + if command -v "$fn" >/dev/null 2>&1; then + if [ "${2:-}" = "-h" ] || [ "${2:-}" = "--help" ]; then + command_usage "$command" + exit 0 + fi + "$fn" "$format" + exit 0 + fi + echo "Unknown command: $command" >&2 + echo >&2 + usage >&2 + exit 1 + ;; +esac diff --git a/bin/env/dev-kit.sh b/bin/env/dev-kit.sh new file mode 100755 index 0000000..0903019 --- /dev/null +++ b/bin/env/dev-kit.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +# shellcheck disable=SC1091 +. "$REPO_DIR/lib/modules/bootstrap.sh" +dev_kit_bootstrap "$REPO_DIR" + +case ":$PATH:" in + *":${DEV_KIT_BIN_DIR}:"*) ;; + *) export PATH="${DEV_KIT_BIN_DIR}:${PATH}" ;; +esac + +if [ -n "${BASH_VERSION:-}" ] && [ -f "${DEV_KIT_HOME}/bin/completions/dev.kit.bash" ]; then + # shellcheck disable=SC1090 + . "${DEV_KIT_HOME}/bin/completions/dev.kit.bash" +fi + +if [ -n "${ZSH_VERSION:-}" ] && [ -f "${DEV_KIT_HOME}/bin/completions/_dev.kit" ]; then + fpath=("${DEV_KIT_HOME}/bin/completions" $fpath) + autoload -Uz compinit + compinit -i +fi + +export DEV_KIT_HOME diff --git a/bin/scripts/install.sh b/bin/scripts/install.sh new file mode 100755 index 0000000..a685dee --- /dev/null +++ b/bin/scripts/install.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +set -euo pipefail + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +# shellcheck disable=SC1091 +. "$REPO_DIR/lib/modules/bootstrap.sh" +dev_kit_bootstrap "$REPO_DIR" + +TARGET="${DEV_KIT_BIN_DIR}/dev.kit" + +if [ "$#" -gt 0 ]; then + echo "This installer does not modify shell profiles." >&2 + echo "Usage: bash bin/scripts/install.sh" >&2 + exit 1 +fi + +mkdir -p "$DEV_KIT_HOME" "$DEV_KIT_BIN_DIR" +rm -rf "$DEV_KIT_HOME/source" "$DEV_KIT_HOME/state" + +dev_kit_copy_tree "$REPO_DIR/bin" "$DEV_KIT_HOME/bin" +dev_kit_copy_tree "$REPO_DIR/lib" "$DEV_KIT_HOME/lib" +dev_kit_copy_tree "$REPO_DIR/config" "$DEV_KIT_HOME/config" + +find "$DEV_KIT_HOME/bin" -type f -exec chmod +x {} \; + +ln -sfn "$DEV_KIT_HOME/bin/dev-kit" "$TARGET" + +echo "Installed dev.kit" +echo "binary: $TARGET" +echo "home: $DEV_KIT_HOME" +if dev_kit_path_contains_bin_dir; then + echo "shell: PATH already includes $DEV_KIT_BIN_DIR" +else + echo "shell: unchanged" + echo "next: export PATH=\"$DEV_KIT_BIN_DIR:\$PATH\"" + echo "then: source \"$DEV_KIT_HOME/bin/env/dev-kit.sh\"" +fi diff --git a/bin/scripts/uninstall.sh b/bin/scripts/uninstall.sh new file mode 100755 index 0000000..136f20d --- /dev/null +++ b/bin/scripts/uninstall.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +# shellcheck disable=SC1091 +. "$REPO_DIR/lib/modules/bootstrap.sh" +dev_kit_bootstrap "$REPO_DIR" + +TARGET="${DEV_KIT_BIN_DIR}/dev.kit" + +if [ -L "$TARGET" ] || [ -f "$TARGET" ]; then + rm -f "$TARGET" + echo "Removed binary: $TARGET" +else + echo "Binary not found: $TARGET" +fi + +if [ -d "$DEV_KIT_HOME" ]; then + rm -rf "$DEV_KIT_HOME" + echo "Removed home: $DEV_KIT_HOME" +else + echo "Home not found: $DEV_KIT_HOME" +fi + +echo "Shell profile files were not modified." diff --git a/config/default.env b/config/default.env new file mode 100644 index 0000000..a42923f --- /dev/null +++ b/config/default.env @@ -0,0 +1,3 @@ +DEV_KIT_OWNER=udx +DEV_KIT_REPO=dev.kit +DEV_KIT_BIN_DIR=$HOME/.local/bin diff --git a/lib/commands/bridge.sh b/lib/commands/bridge.sh new file mode 100644 index 0000000..1e3ada8 --- /dev/null +++ b/lib/commands/bridge.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# @description: Show a basic bridge payload + +dev_kit_cmd_bridge() { + local format="${1:-text}" + + if [ "$format" = "json" ]; then + printf '{\n "command": "bridge",\n "repo": "%s",\n "capabilities": ["install", "status"],\n "boundaries": ["local shell"]\n}\n' "$(pwd)" + return 0 + fi + + echo "dev.kit bridge" + echo "repo: $(pwd)" + echo "capabilities: install, status" + echo "boundaries: local shell" +} diff --git a/lib/commands/status.sh b/lib/commands/status.sh new file mode 100644 index 0000000..81e1133 --- /dev/null +++ b/lib/commands/status.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# @description: Show basic installation status + +dev_kit_cmd_status() { + local format="${1:-text}" + local state="not installed" + + if [ -d "$DEV_KIT_HOME" ]; then + state="installed" + fi + + if [ "$format" = "json" ]; then + printf '{\n "name": "dev.kit",\n "home": "%s",\n "state": "%s"\n}\n' "$DEV_KIT_HOME" "$state" + return 0 + fi + + echo "dev.kit" + echo "home: $DEV_KIT_HOME" + echo "state: $state" +} diff --git a/lib/modules/bootstrap.sh b/lib/modules/bootstrap.sh new file mode 100644 index 0000000..f95c39f --- /dev/null +++ b/lib/modules/bootstrap.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +dev_kit_load_defaults() { + local config_file="$1" + local key="" + local value="" + + [ -f "$config_file" ] || return 0 + + while IFS='=' read -r key value; do + case "$key" in + ''|\#*) continue ;; + esac + if [ -z "${!key+x}" ]; then + eval "export ${key}=\"${value}\"" + fi + done < "$config_file" +} + +dev_kit_bootstrap() { + local root_dir="$1" + + dev_kit_load_defaults "$root_dir/config/default.env" + + export DEV_KIT_OWNER="${DEV_KIT_OWNER:-udx}" + export DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" + export DEV_KIT_BIN_DIR="${DEV_KIT_BIN_DIR:-$HOME/.local/bin}" + export DEV_KIT_HOME="${DEV_KIT_HOME:-$HOME/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}}" +} + +dev_kit_path_contains_bin_dir() { + case ":$PATH:" in + *":${DEV_KIT_BIN_DIR}:"*) return 0 ;; + *) return 1 ;; + esac +} + +dev_kit_copy_file() { + local src="$1" + local dst="$2" + mkdir -p "$(dirname "$dst")" + cp "$src" "$dst" +} + +dev_kit_copy_tree() { + local src="$1" + local dst="$2" + mkdir -p "$dst" + cp -R "$src/." "$dst/" +} + +dev_kit_command_name_from_file() { + local file="$1" + basename "$file" .sh | tr '_' '-' +} + +dev_kit_command_description() { + local file="$1" + awk -F': ' '/^# @description:/ { print $2; exit }' "$file" +} + +dev_kit_list_command_files() { + local root_dir="$1" + find "$root_dir/lib/commands" -maxdepth 1 -type f -name '*.sh' | sort +} From dd25feb0e40d1631894e9ee5f62781ff2e92918c Mon Sep 17 00:00:00 2001 From: Dmitry Smirnov <dmitry.smirnov@usabilitydynamics.com> Date: Tue, 10 Mar 2026 15:10:39 +0300 Subject: [PATCH 9/9] Add stateless audit flow and worker-backed tests --- README.md | 2 + bin/dev-kit | 26 ++++- bin/env/dev-kit.sh | 2 +- bin/scripts/install.sh | 6 +- bin/scripts/uninstall.sh | 2 +- config/default.env | 3 - deploy.yml | 14 +++ docs/development.md | 14 +++ lib/commands/audit.sh | 41 +++++++ lib/commands/bridge.sh | 4 +- lib/modules/bootstrap.sh | 25 +---- lib/modules/repo_inspector.sh | 108 +++++++++++++++++++ lib/modules/rule_catalog.sh | 32 ++++++ src/configs/audit-rules.yml | 7 ++ tests/fixtures/simple-repo/index.js | 1 + tests/fixtures/simple-repo/package.json | 7 ++ tests/helpers/assert.sh | 71 ++++++++++++ tests/run.sh | 44 ++++++++ tests/suite.sh | 137 ++++++++++++++++++++++++ 19 files changed, 508 insertions(+), 38 deletions(-) delete mode 100644 config/default.env create mode 100644 deploy.yml create mode 100644 docs/development.md create mode 100644 lib/commands/audit.sh create mode 100644 lib/modules/repo_inspector.sh create mode 100644 lib/modules/rule_catalog.sh create mode 100644 src/configs/audit-rules.yml create mode 100644 tests/fixtures/simple-repo/index.js create mode 100644 tests/fixtures/simple-repo/package.json create mode 100644 tests/helpers/assert.sh create mode 100644 tests/run.sh create mode 100644 tests/suite.sh diff --git a/README.md b/README.md index 679d795..2cee9ab 100644 --- a/README.md +++ b/README.md @@ -81,3 +81,5 @@ dev.kit status ```bash "$HOME/.udx/dev.kit/bin/scripts/uninstall.sh" ``` + +Development and test workflow lives in [docs/development.md](/Users/jonyfq/git/udx/dev.kit/docs/development.md). diff --git a/bin/dev-kit b/bin/dev-kit index 4e2754e..54f4c30 100755 --- a/bin/dev-kit +++ b/bin/dev-kit @@ -1,10 +1,21 @@ #!/usr/bin/env bash set -euo pipefail -REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +SCRIPT_PATH="${BASH_SOURCE[0]}" + +while [ -L "$SCRIPT_PATH" ]; do + SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)" + SCRIPT_PATH="$(readlink "$SCRIPT_PATH")" + case "$SCRIPT_PATH" in + /*) ;; + *) SCRIPT_PATH="${SCRIPT_DIR}/${SCRIPT_PATH}" ;; + esac +done + +REPO_DIR="$(cd "$(dirname "$SCRIPT_PATH")/.." && pwd)" # shellcheck disable=SC1091 . "$REPO_DIR/lib/modules/bootstrap.sh" -dev_kit_bootstrap "$REPO_DIR" +dev_kit_bootstrap for module_file in "$REPO_DIR"/lib/modules/*.sh; do [ "$module_file" = "$REPO_DIR/lib/modules/bootstrap.sh" ] && continue @@ -51,7 +62,7 @@ Options: EOF } -command="${1:-status}" +command="${1:-audit}" format="text" if [ "${2:-}" = "--json" ] || [ "${1:-}" = "--json" ]; then @@ -73,8 +84,15 @@ case "$command" in fi dev_kit_cmd_bridge "$format" ;; + audit) + if [ "${2:-}" = "-h" ] || [ "${2:-}" = "--help" ]; then + command_usage "audit" + exit 0 + fi + dev_kit_cmd_audit "$format" + ;; --json) - dev_kit_cmd_status "json" + dev_kit_cmd_audit "json" ;; help|-h|--help) usage diff --git a/bin/env/dev-kit.sh b/bin/env/dev-kit.sh index 0903019..e56b4fd 100755 --- a/bin/env/dev-kit.sh +++ b/bin/env/dev-kit.sh @@ -3,7 +3,7 @@ REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" # shellcheck disable=SC1091 . "$REPO_DIR/lib/modules/bootstrap.sh" -dev_kit_bootstrap "$REPO_DIR" +dev_kit_bootstrap case ":$PATH:" in *":${DEV_KIT_BIN_DIR}:"*) ;; diff --git a/bin/scripts/install.sh b/bin/scripts/install.sh index a685dee..ea66288 100755 --- a/bin/scripts/install.sh +++ b/bin/scripts/install.sh @@ -4,7 +4,7 @@ set -euo pipefail REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" # shellcheck disable=SC1091 . "$REPO_DIR/lib/modules/bootstrap.sh" -dev_kit_bootstrap "$REPO_DIR" +dev_kit_bootstrap TARGET="${DEV_KIT_BIN_DIR}/dev.kit" @@ -15,11 +15,11 @@ if [ "$#" -gt 0 ]; then fi mkdir -p "$DEV_KIT_HOME" "$DEV_KIT_BIN_DIR" -rm -rf "$DEV_KIT_HOME/source" "$DEV_KIT_HOME/state" +rm -rf "$DEV_KIT_HOME/bin" "$DEV_KIT_HOME/lib" "$DEV_KIT_HOME/src" "$DEV_KIT_HOME/config" "$DEV_KIT_HOME/source" "$DEV_KIT_HOME/state" dev_kit_copy_tree "$REPO_DIR/bin" "$DEV_KIT_HOME/bin" dev_kit_copy_tree "$REPO_DIR/lib" "$DEV_KIT_HOME/lib" -dev_kit_copy_tree "$REPO_DIR/config" "$DEV_KIT_HOME/config" +dev_kit_copy_tree "$REPO_DIR/src" "$DEV_KIT_HOME/src" find "$DEV_KIT_HOME/bin" -type f -exec chmod +x {} \; diff --git a/bin/scripts/uninstall.sh b/bin/scripts/uninstall.sh index 136f20d..71bf99f 100755 --- a/bin/scripts/uninstall.sh +++ b/bin/scripts/uninstall.sh @@ -4,7 +4,7 @@ set -euo pipefail REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" # shellcheck disable=SC1091 . "$REPO_DIR/lib/modules/bootstrap.sh" -dev_kit_bootstrap "$REPO_DIR" +dev_kit_bootstrap TARGET="${DEV_KIT_BIN_DIR}/dev.kit" diff --git a/config/default.env b/config/default.env deleted file mode 100644 index a42923f..0000000 --- a/config/default.env +++ /dev/null @@ -1,3 +0,0 @@ -DEV_KIT_OWNER=udx -DEV_KIT_REPO=dev.kit -DEV_KIT_BIN_DIR=$HOME/.local/bin diff --git a/deploy.yml b/deploy.yml new file mode 100644 index 0000000..d1dcce0 --- /dev/null +++ b/deploy.yml @@ -0,0 +1,14 @@ +kind: workerDeployConfig +version: udx.io/worker-v1/deploy + +config: + image: "usabilitydynamics/udx-worker:latest" + name: "dev-kit-test-suite" + volumes: + - ".:/workspace" + env: + TERM: "xterm-256color" + command: "/bin/bash" + args: + - "/workspace/tests/suite.sh" + diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 0000000..92612a2 --- /dev/null +++ b/docs/development.md @@ -0,0 +1,14 @@ +# Development + +## Test + +Canonical verification runs in the preconfigured worker container: + +```bash +bash tests/run.sh +``` + +## Notes + +- `bash tests/run.sh` uses [deploy.yml](/Users/jonyfq/git/udx/dev.kit/deploy.yml) with the globally installed `worker` CLI. +- The suite validates install, env setup, dynamic command discovery, Bash completion, and uninstall in a fresh temporary `HOME`. diff --git a/lib/commands/audit.sh b/lib/commands/audit.sh new file mode 100644 index 0000000..b3e38b4 --- /dev/null +++ b/lib/commands/audit.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# @description: Audit the current repository for basic fidelity gaps + +dev_kit_cmd_audit() { + local format="${1:-text}" + local repo_dir="${2:-$(pwd)}" + local repo_name="" + local stack="" + local readme_status="" + local test_status="" + + repo_name="$(dev_kit_repo_name "$repo_dir")" + stack="$(dev_kit_repo_detect_stack "$repo_dir")" + readme_status="$(dev_kit_repo_readme_status "$repo_dir")" + test_status="$(dev_kit_repo_test_status "$repo_dir")" + + if [ "$format" = "json" ]; then + printf '{\n' + printf ' "command": "audit",\n' + printf ' "repo": "%s",\n' "$repo_name" + printf ' "path": "%s",\n' "$repo_dir" + printf ' "stack": "%s",\n' "$stack" + printf ' "checks": {\n' + printf ' "readme": "%s",\n' "$readme_status" + printf ' "test_command": "%s"\n' "$test_status" + printf ' },\n' + printf ' "improvement_plan": ' + dev_kit_repo_findings_json "$repo_dir" + printf '\n}\n' + return 0 + fi + + echo "dev.kit audit" + echo "repo: $repo_name" + echo "path: $repo_dir" + echo "stack: $stack" + echo "readme: $readme_status" + echo "test command: $test_status" + dev_kit_repo_advices "$repo_dir" +} diff --git a/lib/commands/bridge.sh b/lib/commands/bridge.sh index 1e3ada8..559ce27 100644 --- a/lib/commands/bridge.sh +++ b/lib/commands/bridge.sh @@ -6,12 +6,12 @@ dev_kit_cmd_bridge() { local format="${1:-text}" if [ "$format" = "json" ]; then - printf '{\n "command": "bridge",\n "repo": "%s",\n "capabilities": ["install", "status"],\n "boundaries": ["local shell"]\n}\n' "$(pwd)" + printf '{\n "command": "bridge",\n "repo": "%s",\n "capabilities": ["audit", "bridge", "status"],\n "boundaries": ["local shell"]\n}\n' "$(pwd)" return 0 fi echo "dev.kit bridge" echo "repo: $(pwd)" - echo "capabilities: install, status" + echo "capabilities: audit, bridge, status" echo "boundaries: local shell" } diff --git a/lib/modules/bootstrap.sh b/lib/modules/bootstrap.sh index f95c39f..1d1ce80 100644 --- a/lib/modules/bootstrap.sh +++ b/lib/modules/bootstrap.sh @@ -1,31 +1,8 @@ #!/usr/bin/env bash -dev_kit_load_defaults() { - local config_file="$1" - local key="" - local value="" - - [ -f "$config_file" ] || return 0 - - while IFS='=' read -r key value; do - case "$key" in - ''|\#*) continue ;; - esac - if [ -z "${!key+x}" ]; then - eval "export ${key}=\"${value}\"" - fi - done < "$config_file" -} - dev_kit_bootstrap() { - local root_dir="$1" - - dev_kit_load_defaults "$root_dir/config/default.env" - - export DEV_KIT_OWNER="${DEV_KIT_OWNER:-udx}" - export DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" export DEV_KIT_BIN_DIR="${DEV_KIT_BIN_DIR:-$HOME/.local/bin}" - export DEV_KIT_HOME="${DEV_KIT_HOME:-$HOME/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}}" + export DEV_KIT_HOME="${DEV_KIT_HOME:-$HOME/.udx/dev.kit}" } dev_kit_path_contains_bin_dir() { diff --git a/lib/modules/repo_inspector.sh b/lib/modules/repo_inspector.sh new file mode 100644 index 0000000..a41932f --- /dev/null +++ b/lib/modules/repo_inspector.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +dev_kit_repo_name() { + basename "${1:-$(pwd)}" +} + +dev_kit_has_file() { + local repo_dir="$1" + local path="$2" + [ -e "$repo_dir/$path" ] +} + +dev_kit_detect_node_repo() { + local repo_dir="$1" + dev_kit_has_file "$repo_dir" "package.json" +} + +dev_kit_repo_test_status() { + local repo_dir="$1" + + if dev_kit_detect_node_repo "$repo_dir"; then + if awk ' + /"scripts"[[:space:]]*:[[:space:]]*{/ { in_scripts=1 } + in_scripts && /"test"[[:space:]]*:/ { found=1 } + in_scripts && /}/ { if (!found) exit } + END { exit found ? 0 : 1 } + ' "$repo_dir/package.json"; then + printf "%s" "present" + return 0 + fi + fi + + printf "%s" "missing" +} + +dev_kit_repo_readme_status() { + local repo_dir="$1" + + if dev_kit_has_file "$repo_dir" "README.md" || dev_kit_has_file "$repo_dir" "README"; then + printf "%s" "present" + return 0 + fi + + printf "%s" "missing" +} + +dev_kit_repo_detect_stack() { + local repo_dir="$1" + + if dev_kit_detect_node_repo "$repo_dir"; then + printf "%s" "node" + return 0 + fi + + printf "%s" "unknown" +} + +dev_kit_repo_findings_json() { + local repo_dir="$1" + local readme_status="" + local test_status="" + local emitted=0 + local readme_message="" + local test_message="" + + readme_status="$(dev_kit_repo_readme_status "$repo_dir")" + test_status="$(dev_kit_repo_test_status "$repo_dir")" + readme_message="$(dev_kit_rule_message "missing-readme")" + test_message="$(dev_kit_rule_message "missing-test-command")" + + printf "[" + + if [ "$readme_status" = "missing" ]; then + printf '\n { "id": "missing-readme", "message": "%s" }' "$readme_message" + emitted=1 + fi + + if [ "$test_status" = "missing" ]; then + if [ "$emitted" -eq 1 ]; then + printf "," + fi + printf '\n { "id": "missing-test-command", "message": "%s" }' "$test_message" + emitted=1 + fi + + if [ "$emitted" -eq 1 ]; then + printf '\n ' + fi + + printf "]" +} + +dev_kit_repo_advices() { + local repo_dir="$1" + local readme_status="" + local test_status="" + + readme_status="$(dev_kit_repo_readme_status "$repo_dir")" + test_status="$(dev_kit_repo_test_status "$repo_dir")" + + if [ "$readme_status" = "missing" ]; then + printf 'advice: %s\n' "$(dev_kit_rule_message "missing-readme")" + fi + + if [ "$test_status" = "missing" ]; then + printf 'advice: %s\n' "$(dev_kit_rule_message "missing-test-command")" + fi +} diff --git a/lib/modules/rule_catalog.sh b/lib/modules/rule_catalog.sh new file mode 100644 index 0000000..402adfb --- /dev/null +++ b/lib/modules/rule_catalog.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +dev_kit_rule_catalog_path() { + printf "%s" "$REPO_DIR/src/configs/audit-rules.yml" +} + +dev_kit_rule_field() { + local rule_id="$1" + local field_name="$2" + local catalog_path="" + + catalog_path="$(dev_kit_rule_catalog_path)" + + awk -v rule_id="$rule_id" -v field_name="$field_name" ' + $1 == "-" && $2 == "id:" { + current_id = $3 + in_rule = (current_id == rule_id) + next + } + + in_rule && $1 == field_name ":" { + $1 = "" + sub(/^ /, "") + print + exit + } + ' "$catalog_path" +} + +dev_kit_rule_message() { + dev_kit_rule_field "$1" "message" +} diff --git a/src/configs/audit-rules.yml b/src/configs/audit-rules.yml new file mode 100644 index 0000000..64a5c8f --- /dev/null +++ b/src/configs/audit-rules.yml @@ -0,0 +1,7 @@ +rules: + - id: missing-readme + check: readme + message: Add a README so humans and agents can orient quickly. + - id: missing-test-command + check: test_command + message: Add a runnable test command so verification is deterministic. diff --git a/tests/fixtures/simple-repo/index.js b/tests/fixtures/simple-repo/index.js new file mode 100644 index 0000000..7728117 --- /dev/null +++ b/tests/fixtures/simple-repo/index.js @@ -0,0 +1 @@ +console.log("hello") diff --git a/tests/fixtures/simple-repo/package.json b/tests/fixtures/simple-repo/package.json new file mode 100644 index 0000000..eff7ff8 --- /dev/null +++ b/tests/fixtures/simple-repo/package.json @@ -0,0 +1,7 @@ +{ + "name": "simple-repo", + "private": true, + "scripts": { + "start": "node index.js" + } +} diff --git a/tests/helpers/assert.sh b/tests/helpers/assert.sh new file mode 100644 index 0000000..dedbda6 --- /dev/null +++ b/tests/helpers/assert.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +fail() { + printf "not ok - %s\n" "$1" >&2 + exit 1 +} + +pass() { + printf "ok - %s\n" "$1" +} + +assert_file_exists() { + local path="$1" + local message="$2" + + [ -e "$path" ] || fail "$message" + pass "$message" +} + +assert_file_missing() { + local path="$1" + local message="$2" + + [ ! -e "$path" ] || fail "$message" + pass "$message" +} + +assert_contains() { + local haystack="$1" + local needle="$2" + local message="$3" + + case "$haystack" in + *"$needle"*) pass "$message" ;; + *) fail "$message" ;; + esac +} + +assert_not_contains() { + local haystack="$1" + local needle="$2" + local message="$3" + + case "$haystack" in + *"$needle"*) fail "$message" ;; + *) pass "$message" ;; + esac +} + +assert_symlink_target() { + local path="$1" + local expected="$2" + local message="$3" + local actual="" + + [ -L "$path" ] || fail "$message" + actual="$(readlink "$path")" + [ "$actual" = "$expected" ] || fail "$message" + pass "$message" +} + +assert_command_output_contains() { + local cmd="$1" + local needle="$2" + local message="$3" + local output="" + + output="$(eval "$cmd")" || fail "$message" + assert_contains "$output" "$needle" "$message" +} + diff --git a/tests/run.sh b/tests/run.sh new file mode 100644 index 0000000..fbc25c9 --- /dev/null +++ b/tests/run.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +set -euo pipefail + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +DEPLOY_CONFIG="$REPO_DIR/deploy.yml" + +worker_cmd() { + if command -v worker >/dev/null 2>&1; then + command -v worker + return 0 + fi + + return 1 +} + +usage() { + cat <<'EOF' +Usage: bash tests/run.sh + +Options: + --help Show this help +EOF +} + +run_worker() { + local cmd="" + + cmd="$(worker_cmd)" || { + echo "worker CLI not found. Install @udx/worker-deployment globally." >&2 + exit 1 + } + + "$cmd" run --config="$DEPLOY_CONFIG" +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) usage; exit 0 ;; + *) echo "Unknown option: $1" >&2; usage >&2; exit 1 ;; + esac + shift +done + +run_worker diff --git a/tests/suite.sh b/tests/suite.sh new file mode 100644 index 0000000..ad40d88 --- /dev/null +++ b/tests/suite.sh @@ -0,0 +1,137 @@ +#!/usr/bin/env bash +set -euo pipefail + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +# shellcheck disable=SC1091 +. "$REPO_DIR/tests/helpers/assert.sh" + +TEST_HOME="${DEV_KIT_TEST_HOME:-$(mktemp -d "${TMPDIR:-/tmp}/dev-kit-test-home.XXXXXX")}" +PROFILE_FILES=("$TEST_HOME/.bash_profile" "$TEST_HOME/.bashrc" "$TEST_HOME/.zshrc") +BASE_PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" +INSTALL_OUTPUT="" +FIXTURE_REPO="$REPO_DIR/tests/fixtures/simple-repo" + +cleanup() { + rm -rf "$TEST_HOME" +} + +trap cleanup EXIT + +mkdir -p "$TEST_HOME" +export HOME="$TEST_HOME" +export PATH="$BASE_PATH" +unset DEV_KIT_HOME +unset DEV_KIT_BIN_DIR + +for profile in "${PROFILE_FILES[@]}"; do + printf "# dev.kit test sentinel\n" > "$profile" +done + +INSTALL_OUTPUT="$(bash "$REPO_DIR/bin/scripts/install.sh")" +assert_contains "$INSTALL_OUTPUT" "Installed dev.kit" "installer reports success" +assert_contains "$INSTALL_OUTPUT" "shell: unchanged" "installer leaves shell init untouched" + +DEV_KIT_HOME="$HOME/.udx/dev.kit" +DEV_KIT_BIN_DIR="$HOME/.local/bin" + +assert_file_exists "$DEV_KIT_HOME/bin/dev-kit" "installs command source into dev.kit home" +assert_file_exists "$DEV_KIT_HOME/lib/modules/bootstrap.sh" "installs internal modules" +assert_file_exists "$DEV_KIT_HOME/lib/commands/status.sh" "installs public commands" +assert_file_exists "$DEV_KIT_HOME/src/configs/audit-rules.yml" "installs source rule catalog" +assert_file_missing "$DEV_KIT_HOME/source" "does not create legacy source directory" +assert_file_missing "$DEV_KIT_HOME/state" "does not create legacy state directory" +assert_file_missing "$DEV_KIT_HOME/config" "does not install a config layer" +assert_symlink_target "$DEV_KIT_BIN_DIR/dev.kit" "$DEV_KIT_HOME/bin/dev-kit" "creates global dev.kit symlink" + +for profile in "${PROFILE_FILES[@]}"; do + assert_command_output_contains "cat \"$profile\"" "test sentinel" "$(basename "$profile") remains unchanged" +done + +if command -v dev.kit >/dev/null 2>&1; then + fail "command is not exposed before PATH setup" +else + pass "command is not exposed before PATH setup" +fi + +# shellcheck disable=SC1090 +. "$DEV_KIT_HOME/bin/env/dev-kit.sh" + +assert_contains ":$PATH:" ":$DEV_KIT_BIN_DIR:" "env script prepends the user bin dir" + +if command -v dev.kit >/dev/null 2>&1; then + pass "command resolves after env setup" +else + fail "command resolves after env setup" +fi + +status_output="$(dev.kit status)" +assert_contains "$status_output" "state: installed" "status reports installed state" + +status_json="$(dev.kit status --json)" +assert_contains "$status_json" "\"state\": \"installed\"" "status json reports installed state" + +audit_output="$(cd "$FIXTURE_REPO" && dev.kit)" +printf '%s\n' "--- dev.kit fixture output ---" +printf '%s\n' "$audit_output" +printf '%s\n' "--- end dev.kit fixture output ---" +assert_contains "$audit_output" "repo: simple-repo" "audit reports the fixture repo name" +assert_contains "$audit_output" "stack: node" "audit detects node repositories" +assert_contains "$audit_output" "readme: missing" "audit reports missing readme" +assert_contains "$audit_output" "test command: missing" "audit reports missing test command" +assert_contains "$audit_output" "Add a README" "audit gives useful readme advice" +assert_contains "$audit_output" "Add a runnable test command" "audit gives useful test advice" + +audit_json="$(cd "$FIXTURE_REPO" && dev.kit --json)" +printf '%s\n' "--- dev.kit fixture json ---" +printf '%s\n' "$audit_json" +printf '%s\n' "--- end dev.kit fixture json ---" +assert_contains "$audit_json" "\"command\": \"audit\"" "default json output is audit" +assert_contains "$audit_json" "\"repo\": \"simple-repo\"" "audit json reports repo name" +assert_contains "$audit_json" "\"readme\": \"missing\"" "audit json reports missing readme" +assert_contains "$audit_json" "\"test_command\": \"missing\"" "audit json reports missing test command" +assert_contains "$audit_json" "\"id\": \"missing-readme\"" "audit json includes readme finding" +assert_contains "$audit_json" "\"id\": \"missing-test-command\"" "audit json includes test finding" + +bridge_json="$(cd "$FIXTURE_REPO" && dev.kit bridge --json)" +assert_contains "$bridge_json" "\"command\": \"bridge\"" "bridge json is available" +assert_contains "$bridge_json" "\"capabilities\": [\"audit\", \"bridge\", \"status\"]" "bridge exposes discovered capabilities" + +help_output="$(dev.kit help)" +assert_contains "$help_output" "audit" "help discovers audit dynamically" +assert_contains "$help_output" "status" "help discovers status dynamically" +assert_contains "$help_output" "bridge" "help discovers bridge dynamically" + +if declare -F _dev_kit_complete >/dev/null 2>&1; then + pass "bash completion function is loaded" +else + fail "bash completion function is loaded" +fi + +COMP_WORDS=(dev.kit "") +COMP_CWORD=1 +COMPREPLY=() +_dev_kit_complete +completion_list=" ${COMPREPLY[*]} " +assert_contains "$completion_list" " status " "completion lists status" +assert_contains "$completion_list" " bridge " "completion lists bridge" +assert_contains "$completion_list" " audit " "completion lists audit" +assert_contains "$completion_list" " --json " "completion lists global json flag" + +COMP_WORDS=(dev.kit bridge --) +COMP_CWORD=2 +COMPREPLY=() +_dev_kit_complete +bridge_completion_list=" ${COMPREPLY[*]} " +assert_contains "$bridge_completion_list" " --json " "bridge completion lists json flag" + +UNINSTALL_OUTPUT="$("$DEV_KIT_HOME/bin/scripts/uninstall.sh")" +assert_contains "$UNINSTALL_OUTPUT" "Removed binary:" "uninstall removes the global binary" +assert_contains "$UNINSTALL_OUTPUT" "Removed home:" "uninstall removes the installed home" +assert_file_missing "$DEV_KIT_BIN_DIR/dev.kit" "global symlink is removed" +assert_file_missing "$DEV_KIT_HOME" "installed home is removed" + +for profile in "${PROFILE_FILES[@]}"; do + assert_command_output_contains "cat \"$profile\"" "test sentinel" "$(basename "$profile") remains unchanged after uninstall" +done + +printf "ok - dev.kit integration suite completed\n"