diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..5008ddf Binary files /dev/null and b/.DS_Store differ diff --git a/.github/workflows/context7-ops.yml b/.github/workflows/context7-ops.yml index bd2a764..915ef23 100644 --- a/.github/workflows/context7-ops.yml +++ b/.github/workflows/context7-ops.yml @@ -5,6 +5,9 @@ on: branches: [main] workflow_dispatch: +permissions: + contents: write + jobs: sync: uses: udx/reusable-workflows/.github/workflows/context7-ops.yml@master diff --git a/.gitignore b/.gitignore deleted file mode 100644 index eae0d05..0000000 --- a/.gitignore +++ /dev/null @@ -1,30 +0,0 @@ -# dev.kit State & Hub -.udx/ -.dev.kit/ -.codex/ -.tmp/ -.drift.tmp -.processed.tmp -tasks/ - -# OS -.DS_Store -Thumbs.db - -# Shell / History -*.log -.bash_history -.zsh_history - -# Editor -.vscode/ -.idea/ -*.swp -*.swo - -# Project specific -node_modules/ -dist/ -build/ -.venv/ -__pycache__/ diff --git a/README.md b/README.md index ca45ce0..2cee9ab 100644 --- a/README.md +++ b/README.md @@ -1,70 +1,85 @@ -# dev.kit: The Thin Empowerment Layer +# āš”ļø dev.kit: The Repo Engine -**Experienced engineering flow with high-fidelity, deterministic results.** +![dev.kit](assets/logo.svg) -`dev.kit` resolves the **Drift** (intent divergence) by **Normalizing** it into a deterministic path and **Iterating** to resolution. It acts as a **Thin Empowerment Layer** that bridges human intent with repository truth via **Context-Driven Engineering (CDE)**. +**The deterministic middleware that translates chaotic repositories into high-fidelity, 12-factor standards.** -### 1. Grounding (The Bridge) -Intent is mapped to repo-specific logic and normalized into a deterministic execution plan. -![Grounding Bridge](assets/diagrams/grounding-bridge.svg) +`dev.kit` acts as a **Contextual Proxy** between your environment and AI agents. It serves as both the **Logic** (the engine) and the **Template** (the blueprint) to resolve architectural drift. -### 2. Normalization (The Gate) -Capabilities are resolved at runtime through a **Dynamic Discovery Engine** that scans script headers and skills. -![Normalization Gate](assets/diagrams/normalization-boundary.svg) +--- -### 3. Execution (The Engine) -AI iterates through instruction steps with deterministic validation and real-time status updates. -![Execution Engine](assets/diagrams/execution-engine.svg) +## šŸ•¹ The Single-Command Interface -## Standard Engineering Flow +The entire engine is distilled into a single, high-impact verb. -`dev.kit` provides a **Thin Empowerment Layer** that bridges chaotic intent with deterministic repository functions. +### `dev.kit` -| Phase | Description | Implementation | -| :--- | :--- | :--- | -| **1. Grounding** | **UDX Worker** provides the isolated, pre-hydrated base environment. | [`udx/worker`](docs/reference/operations/worker-ecosystem-refs.md) | -| **2. Discovery** | **Skill Mesh** resolves internal commands and `@udx` NPM packages. | [`@udx/mcurl`](docs/ai/mesh/npm.md) | -| **3. Normalize** | AI Reasoning Skills transform intent into a bounded `workflow.md`. | [`SKILL.md`](docs/skills/README.md) | -| **4. Execute** | Deterministic Primitives execute the logic within the CLI boundary. | `lib/commands/` | +**The Pulse Check.** Instantly analyzes your repository, calculates your **Fidelity Score**, and generates a prioritized improvement plan for drift resolution. ---- +#### Compliance Mode + +`dev.kit --json` outputs a machine-readable audit of 12-factor misalignments. Agents can use this to identify and fix fidelity gaps such as missing tests, broken builds, or structural drift. + +![compliance audit](assets/compliance-audit.svg) + +The audit output becomes a focused improvement plan with bounded next steps. -## Core Interface +![compliance improve](assets/compliance-improve.svg) -- **`dev.kit status`**: (Default) High-fidelity engineering brief and system health. -- **`dev.kit doctor`**: Verify environment health, software detection, and compliance. -- **`dev.kit ai`**: Unified agent integration management and grounding. -- **`dev.kit sync`**: Logical, atomic repository synchronization and drift resolution. -- **`dev.kit task`**: Manage the lifecycle of active workflows and engineering sessions. +#### Development Mode -> **Execution**: Run any repository-bound skill with `dev.kit skills run `. +`dev.kit bridge --json` resolves the repository into high-fidelity, agent-friendly assets. It maps the skill mesh, available CLI primitives, and internal logic so agents can execute tasks without hallucinating paths or patterns. + +![dev.kit bridge](assets/dev-kit-bridge.svg) --- -## Documentation +## šŸ— How it Works + +- **The Normalization Gate**: Chaotic repo states are filtered into bounded, repeatable workflow artifacts. +- **Logic-as-Template**: The `dev.kit` repository is the canonical example of the standards it enforces. Its structure is the blueprint; its commands are the truth. +- **The Bridge**: Instead of feeding an agent raw files, the `bridge` command provides a structured "Map of Truth," ensuring the agent works within validated boundaries. -The `dev.kit` knowledge base is structured to reflect **CDE Principles**. +--- -- **[Foundations](docs/README.md#%EF%B8%8F-foundations)**: Core philosophy (CDE), dev.kit primitives, and methodology. -- **[Runtime](docs/README.md#%EF%B8%8F-runtime)**: CLI overview, lifecycle, and execution loops. -- **[AI Integration](docs/README.md#-ai-integration)**: Grounded orchestration and agent mission. -- **[Best Practices](docs/foundations/best-practices.md)**: High-fidelity engineering rules and command mappings. +## āœ… The Fidelity States + +| State | Human Experience | Agent Experience | +| :------------ | :---------------------- | :------------------------------------------------- | +| **Build** | _I know how to build._ | Strict 12-factor separation (Build/Release/Run). | +| **Test** | _I know how to verify._ | Deterministic loops to validate health instantly. | +| **Structure** | _I know where to add._ | Standardized hierarchy; zero-guesswork navigation. | +| **Pattern** | _I know how to grow._ | Repeatable Analyze-Normalize-Process sequences. | --- -## Install +## šŸš€ 60-Second Onboard ```bash -curl -fsSL https://udx.dev/dev.kit/install.sh | bash +# 1. Install & Run the Pulse Check +bash bin/scripts/install.sh +source "$HOME/.udx/dev.kit/bin/env/dev-kit.sh" +dev.kit + +# 2. Let an Agent Fix Compliance +dev.kit --json | agent-execute "Fix all fidelity gaps" + +# 3. Let an Agent Develop a Feature +dev.kit bridge --json | agent-execute "Add a new module using existing primitives" ``` -## šŸ“š Authoritative References +## Install -The `dev.kit` mission is grounded in foundational research on high-fidelity automation and AI orchestration: +```bash +bash bin/scripts/install.sh +source "$HOME/.udx/dev.kit/bin/env/dev-kit.sh" +dev.kit status +``` -- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Systematic transformation of the engineering flow. -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Revolutionary task normalization through pattern recognition. -- **[Autonomous Technical Operations](https://andypotanin.com/claude-operator-prompt/)**: Principles for high-fidelity agent grounding and execution. +## Uninstall ---- -_UDX DevSecOps Team_ +```bash +"$HOME/.udx/dev.kit/bin/scripts/uninstall.sh" +``` + +Development and test workflow lives in [docs/development.md](/Users/jonyfq/git/udx/dev.kit/docs/development.md). diff --git a/assets/compliance-audit.svg b/assets/compliance-audit.svg new file mode 100644 index 0000000..9911159 --- /dev/null +++ b/assets/compliance-audit.svg @@ -0,0 +1 @@ +

Repo

dev.kit --json

Audit

Gaps

\ No newline at end of file diff --git a/assets/compliance-improve.svg b/assets/compliance-improve.svg new file mode 100644 index 0000000..dfd06f6 --- /dev/null +++ b/assets/compliance-improve.svg @@ -0,0 +1 @@ +

Gaps

Plan

Fix

Aligned

\ No newline at end of file diff --git a/assets/dev-kit-bridge.svg b/assets/dev-kit-bridge.svg new file mode 100644 index 0000000..57a8d8a --- /dev/null +++ b/assets/dev-kit-bridge.svg @@ -0,0 +1 @@ +Agent Runtimedev.kit bridgeRepositoryAgent Runtimedev.kit bridgeRepository1. Request grounded context2. Inspect repo and tools3. Return capabilities4. Return integration map5. Execute scoped changes \ No newline at end of file diff --git a/assets/diagrams/adaptation-flow.mmd b/assets/diagrams/adaptation-flow.mmd deleted file mode 100644 index 4ec0432..0000000 --- a/assets/diagrams/adaptation-flow.mmd +++ /dev/null @@ -1,10 +0,0 @@ -flowchart LR - Source[(Repo Source)] --> Discovery{Discovery} - Discovery --> Mapping[Fidelity Mapping] - Mapping --> Projection([Tool Projection]) - Projection -- Fail-Open --> Source - - style Source fill:#bbf,stroke:#333,stroke-width:2px - style Discovery fill:#dfd,stroke:#333,stroke-width:2px - style Mapping fill:#dfd,stroke:#333,stroke-width:2px - style Projection fill:#f9f,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/adaptation-flow.svg b/assets/diagrams/adaptation-flow.svg deleted file mode 100644 index 4928d6e..0000000 --- a/assets/diagrams/adaptation-flow.svg +++ /dev/null @@ -1 +0,0 @@ -

Fail-Open

Repo Source

Discovery

Fidelity Mapping

Tool Projection

\ No newline at end of file diff --git a/assets/diagrams/cde-flow.mmd b/assets/diagrams/cde-flow.mmd deleted file mode 100644 index 323c129..0000000 --- a/assets/diagrams/cde-flow.mmd +++ /dev/null @@ -1,12 +0,0 @@ -flowchart LR - Intent([Intent]) --> Specs[Intent-as-Artifact] - Specs --> Discovery{Drift Discovery} - Discovery --> Resolution[Resolution Cycle] - Resolution --> Capture([Experience Capture]) - Capture -.-> Specs - - style Intent fill:#f9f,stroke:#333,stroke-width:2px - style Specs fill:#bbf,stroke:#333,stroke-width:2px - style Discovery fill:#dfd,stroke:#333,stroke-width:2px - style Resolution fill:#bbf,stroke:#333,stroke-width:2px - style Capture fill:#f9f,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/cde-flow.svg b/assets/diagrams/cde-flow.svg deleted file mode 100644 index e715534..0000000 --- a/assets/diagrams/cde-flow.svg +++ /dev/null @@ -1 +0,0 @@ -

Intent

Intent-as-Artifact

Drift Discovery

Resolution Cycle

Experience Capture

\ No newline at end of file diff --git a/assets/diagrams/docs-index.mmd b/assets/diagrams/docs-index.mmd deleted file mode 100644 index 9a03e9c..0000000 --- a/assets/diagrams/docs-index.mmd +++ /dev/null @@ -1,14 +0,0 @@ -flowchart LR - Foundations[1. Foundations] --> Runtime[2. Runtime] - Runtime --> AI[3. AI Integration] - AI --> Ref[4. Reference] - - click Foundations "foundations/cde.md" - click Runtime "runtime/overview.md" - click AI "ai/README.md" - click Ref "reference/standards/12-factor.md" - - style Foundations fill:#dfd,stroke:#333,stroke-width:2px - style Runtime fill:#bbf,stroke:#333,stroke-width:2px - style AI fill:#f9f,stroke:#333,stroke-width:2px - style Ref fill:#dfd,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/docs-index.svg b/assets/diagrams/docs-index.svg deleted file mode 100644 index 62e4e6f..0000000 --- a/assets/diagrams/docs-index.svg +++ /dev/null @@ -1 +0,0 @@ -
1. Foundations
2. Runtime
3. AI Integration
4. Reference
\ No newline at end of file diff --git a/assets/diagrams/drift-resolution-cycle.mmd b/assets/diagrams/drift-resolution-cycle.mmd deleted file mode 100644 index 0e81ae4..0000000 --- a/assets/diagrams/drift-resolution-cycle.mmd +++ /dev/null @@ -1,12 +0,0 @@ -flowchart LR - Drift([Drift]) --> Normalize[1. Normalize] - Normalize --> Iterate[2. Iterate] - Iterate --> Validate[3. Validate] - Validate --> Sync([4. Synchronize]) - Sync -.-> Drift - - style Drift fill:#f9f,stroke:#333,stroke-width:2px - style Normalize fill:#dfd,stroke:#333,stroke-width:2px - style Iterate fill:#bbf,stroke:#333,stroke-width:2px - style Validate fill:#dfd,stroke:#333,stroke-width:2px - style Sync fill:#f9f,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/drift-resolution-cycle.svg b/assets/diagrams/drift-resolution-cycle.svg deleted file mode 100644 index 0513653..0000000 --- a/assets/diagrams/drift-resolution-cycle.svg +++ /dev/null @@ -1 +0,0 @@ -

Drift

1. Normalize
2. Iterate
3. Validate
4. Synchronize
\ No newline at end of file diff --git a/assets/diagrams/engineering-layers.mmd b/assets/diagrams/engineering-layers.mmd deleted file mode 100644 index b055fd0..0000000 --- a/assets/diagrams/engineering-layers.mmd +++ /dev/null @@ -1,7 +0,0 @@ -flowchart LR - L1[Layer 1: Source & Build] --> L2[Layer 2: Deployment & Runtime] - L2 --> L3[Layer 3: Context & Orchestration] - - style L1 fill:#dfd,stroke:#333,stroke-width:2px - style L2 fill:#bbf,stroke:#333,stroke-width:2px - style L3 fill:#f9f,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/engineering-layers.svg b/assets/diagrams/engineering-layers.svg deleted file mode 100644 index eae8eb4..0000000 --- a/assets/diagrams/engineering-layers.svg +++ /dev/null @@ -1 +0,0 @@ -

Layer 1: Source & Build

Layer 2: Deployment & Runtime

Layer 3: Context & Orchestration

\ No newline at end of file diff --git a/assets/diagrams/execution-engine.mmd b/assets/diagrams/execution-engine.mmd deleted file mode 100644 index de0213b..0000000 --- a/assets/diagrams/execution-engine.mmd +++ /dev/null @@ -1,9 +0,0 @@ -flowchart LR - Steps([Workflow Steps]) --> Engine[CLI Engine] - Engine --> Skills[Internal Skills] - Engine --> Tools[Virtual Tools] - Skills & Tools --> Resolution[Resolved Drift] - - style Steps fill:#f9f,stroke:#333,stroke-width:2px - style Engine fill:#dfd,stroke:#333,stroke-width:2px - style Resolution fill:#bbf,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/execution-engine.svg b/assets/diagrams/execution-engine.svg deleted file mode 100644 index 8fe7139..0000000 --- a/assets/diagrams/execution-engine.svg +++ /dev/null @@ -1 +0,0 @@ -

Workflow Steps

CLI Engine

Internal Skills

Virtual Tools

Resolved Drift

\ No newline at end of file diff --git a/assets/diagrams/grounding-bridge.mmd b/assets/diagrams/grounding-bridge.mmd deleted file mode 100644 index 8fe5af5..0000000 --- a/assets/diagrams/grounding-bridge.mmd +++ /dev/null @@ -1,9 +0,0 @@ -flowchart LR - User([User Intent]) --> Bridge[Grounding Bridge] - Bridge --> Skills{Skill Discovery} - Skills --> Local[Local Repo] - Skills --> Remote[Remote Mesh] - - style User fill:#f9f,stroke:#333,stroke-width:2px - style Bridge fill:#bbf,stroke:#333,stroke-width:2px - style Skills fill:#dfd,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/grounding-bridge.svg b/assets/diagrams/grounding-bridge.svg deleted file mode 100644 index 11c8fdb..0000000 --- a/assets/diagrams/grounding-bridge.svg +++ /dev/null @@ -1 +0,0 @@ -

User Intent

Grounding Bridge

Skill Discovery

Local Repo

Remote Mesh

\ No newline at end of file diff --git a/assets/diagrams/methodology-flow.svg b/assets/diagrams/methodology-flow.svg deleted file mode 100644 index 8d25e25..0000000 --- a/assets/diagrams/methodology-flow.svg +++ /dev/null @@ -1 +0,0 @@ -

Chaotic Scripts

CLI-Wrapped Automation

Portable Skill

AI Agent Integration

Drift Resolution

\ No newline at end of file diff --git a/assets/diagrams/normalization-boundary.mmd b/assets/diagrams/normalization-boundary.mmd deleted file mode 100644 index 8c050f3..0000000 --- a/assets/diagrams/normalization-boundary.mmd +++ /dev/null @@ -1,9 +0,0 @@ -flowchart LR - Intent([Mapped Intent]) --> Gate{Normalization Gate} - Gate --> Workflow[workflow.md] - Workflow --> Steps[Bounded Steps] - - style Intent fill:#f9f,stroke:#333,stroke-width:2px - style Gate fill:#dfd,stroke:#333,stroke-width:2px - style Workflow fill:#bbf,stroke:#333,stroke-width:2px - style Steps fill:#bbf,stroke:#333,stroke-width:2px diff --git a/assets/diagrams/normalization-boundary.svg b/assets/diagrams/normalization-boundary.svg deleted file mode 100644 index fe14927..0000000 --- a/assets/diagrams/normalization-boundary.svg +++ /dev/null @@ -1 +0,0 @@ -

Mapped Intent

Normalization Gate

workflow.md

Bounded Steps

\ No newline at end of file diff --git a/assets/diagrams/runtime-lifecycle.svg b/assets/diagrams/runtime-lifecycle.svg deleted file mode 100644 index 62711df..0000000 --- a/assets/diagrams/runtime-lifecycle.svg +++ /dev/null @@ -1 +0,0 @@ -
1. Install & Init
2. Config Orchestration
3. Task Execution
4. Experience Capture
5. Exit & Cleanup
\ No newline at end of file diff --git a/bin/completions/_dev.kit b/bin/completions/_dev.kit index 1f6c91e..faf92ac 100644 --- a/bin/completions/_dev.kit +++ b/bin/completions/_dev.kit @@ -2,50 +2,32 @@ _dev_kit() { local -a commands - local -a subcommands local -a options - local cmd cur - local -a help_lines - local -a parsed - commands=(${(f)"$(dev.kit help 2>/dev/null | awk '/^ /{print $1}')"}) + local cmd + local dev_kit_cmd - cmd="$words[2]" - cur="$words[CURRENT]" + dev_kit_cmd="${0:A:h:h}/dev-kit" + + commands=(${(f)"$("$dev_kit_cmd" help 2>/dev/null | awk ' + /^Commands:/ { flag=1; next } + flag && $0 ~ /^ [a-zA-Z0-9-]+/ { print $1 ":" substr($0, index($0, $2)) } + flag && $0 == "" { exit } + ')"}) if (( CURRENT == 2 )); then _describe 'command' commands return fi - if (( CURRENT == 3 )); then - subcommands=(${(f)"$(dev.kit "$cmd" -h 2>/dev/null | awk ' - /^Commands:/ {flag=1; next} - flag && $0 ~ /^ [a-zA-Z0-9]/ {print $1} - flag && $0 == "" {exit} - ')"}) - if (( ${#subcommands} )); then - _describe 'subcommand' subcommands - return - fi - fi + cmd="$words[2]" + options=(${(f)"$("$dev_kit_cmd" "$cmd" --help 2>/dev/null | awk ' + /^Options:/ { flag=1; next } + flag && $0 ~ /^ --/ { print $1 ":" substr($0, index($0, $2)) } + flag && $0 == "" { exit } + ')"}) - if [[ "$cur" == -* ]]; then - options=(${(f)"$(dev.kit "$cmd" -h 2>/dev/null | awk ' - /^Options:/ {flag=1; next} - flag && $0 == "" {exit} - flag { - for (i=1; i<=NF; i++) { - if ($i ~ /^--/) { - gsub(/,/, "", $i); - print $i - } - } - } - ' | sort -u)"}) - if (( ${#options} )); then - compadd -- $options - return - fi + if (( ${#options} )); then + _describe 'option' options fi } diff --git a/bin/completions/dev.kit.bash b/bin/completions/dev.kit.bash index f48f59f..1e89529 100644 --- a/bin/completions/dev.kit.bash +++ b/bin/completions/dev.kit.bash @@ -1,58 +1,44 @@ -#!/bin/bash +#!/usr/bin/env bash + +_DEV_KIT_COMPLETION_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" _dev_kit_complete() { - local cur prev cmd sub + local cur cmd cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD-1]}" cmd="${COMP_WORDS[1]}" - _dev_kit_list_subcommands() { - dev.kit "$1" -h 2>/dev/null | awk ' - /^Commands:/ {flag=1; next} - flag && $0 ~ /^ [a-zA-Z0-9]/ {print $1} - flag && $0 == "" {exit} + _dev_kit_cmd() { + printf "%s" "$(cd "${_DEV_KIT_COMPLETION_DIR}/.." && pwd)/dev-kit" + } + + _dev_kit_list_commands() { + "$(_dev_kit_cmd)" help 2>/dev/null | awk ' + /^Commands:/ { flag=1; next } + flag && $0 ~ /^ [a-zA-Z0-9-]+/ { print $1 } + flag && $0 == "" { exit } ' } _dev_kit_list_options() { - dev.kit "$1" -h 2>/dev/null | awk ' - /^Options:/ {flag=1; next} - flag && $0 == "" {exit} - flag { - for (i=1; i<=NF; i++) { - if ($i ~ /^--/) { - gsub(/,/, "", $i); - print $i - } - } - } - ' | sort -u + local target="${1:-help}" + "$(_dev_kit_cmd)" "$target" --help 2>/dev/null | awk ' + /^Options:/ { flag=1; next } + flag && $0 ~ /^ --/ { print $1 } + flag && $0 == "" { exit } + ' } - if [ $COMP_CWORD -eq 1 ]; then - local cmds - cmds="$(dev.kit help 2>/dev/null | awk '/^ /{print $1}')" - COMPREPLY=( $(compgen -W "$cmds" -- "$cur") ) + if [ "$COMP_CWORD" -eq 1 ]; then + COMPREPLY=( $(compgen -W "$(_dev_kit_list_commands) --json" -- "$cur") ) return 0 fi - if [ $COMP_CWORD -eq 2 ]; then - local subs - subs="$(_dev_kit_list_subcommands "$cmd")" - if [ -n "$subs" ]; then - COMPREPLY=( $(compgen -W "$subs" -- "$cur") ) - return 0 - fi - fi - if [[ "$cur" == -* ]]; then - local opts - opts="$(_dev_kit_list_options "$cmd")" - if [ -n "$opts" ]; then - COMPREPLY=( $(compgen -W "$opts" -- "$cur") ) - return 0 - fi + COMPREPLY=( $(compgen -W "$(_dev_kit_list_options "$cmd")" -- "$cur") ) + return 0 fi + + COMPREPLY=() } complete -F _dev_kit_complete dev.kit diff --git a/bin/dev-kit b/bin/dev-kit index ac10510..54f4c30 100755 --- a/bin/dev-kit +++ b/bin/dev-kit @@ -1,386 +1,115 @@ -#!/bin/bash +#!/usr/bin/env bash set -euo pipefail -# --- Helper Functions --- +SCRIPT_PATH="${BASH_SOURCE[0]}" -resolve_self() { - local target="$0" - if command -v realpath >/dev/null 2>&1; then - realpath "$target" - return - fi - if command -v readlink >/dev/null 2>&1; then - while [ -L "$target" ]; do - local link - link="$(readlink "$target")" - case "$link" in - /*) target="$link" ;; - *) target="$(cd "$(dirname "$target")" && cd "$(dirname "$link")" && pwd)/$(basename "$link")" ;; - esac - done - local dir - dir="$(cd "$(dirname "$target")" && pwd -P)" - echo "$dir/$(basename "$target")" - return - fi - echo "$target" -} - -export SCRIPT_PATH="$(resolve_self)" -export REPO_DIR="$(cd "$(dirname "$SCRIPT_PATH")/.." && pwd)" - -# Global Pathing -export DEV_KIT_OWNER="${DEV_KIT_OWNER:-udx}" -export DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" -export DEV_KIT_HOME="${DEV_KIT_HOME:-$HOME/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}}" - -get_repo_root() { - if command -v git >/dev/null 2>&1; then - git rev-parse --show-toplevel 2>/dev/null || true - fi -} - -bootstrap_state_path() { - local path="" - if [ -f "$DEV_KIT_HOME/config.env" ]; then - path="$(awk -F= ' - $1 ~ "^[[:space:]]*state_path[[:space:]]*$" { - gsub(/[[:space:]]/,"",$2); - print $2; - exit - } - ' "$DEV_KIT_HOME/config.env")" - fi - printf "%s" "$path" -} - -bootstrap_expand_path() { - local val="$1" - if [[ "$val" == "~/"* ]]; then - echo "$HOME/${val:2}" - return - fi - if [[ "$val" == /* ]]; then - echo "$val" - return - fi - if [ -n "$val" ]; then - echo "$DEV_KIT_HOME/$val" - return - fi - echo "" -} - -BOOTSTRAP_STATE_PATH="$(bootstrap_expand_path "$(bootstrap_state_path)")" -DEV_KIT_STATE="${DEV_KIT_STATE:-${BOOTSTRAP_STATE_PATH:-$DEV_KIT_HOME/state}}" -DEV_KIT_SOURCE="${DEV_KIT_SOURCE:-$DEV_KIT_HOME/source}" -if [ ! -d "$DEV_KIT_SOURCE" ]; then - DEV_KIT_SOURCE="$DEV_KIT_HOME" -fi -if [ ! -d "$DEV_KIT_STATE" ]; then - DEV_KIT_STATE="$DEV_KIT_HOME" -fi -CONFIG_FILE="${DEV_KIT_CONFIG:-$DEV_KIT_STATE/config.env}" -if [ ! -f "$CONFIG_FILE" ] && [ -f "$DEV_KIT_HOME/config.env" ]; then - CONFIG_FILE="$DEV_KIT_HOME/config.env" -fi - -expand_path() { - local val="$1" - if [[ "$val" == "~/"* ]]; then - echo "$HOME/${val:2}" - return - fi - if [[ "$val" == /* ]]; then - echo "$val" - return - fi - echo "$REPO_DIR/$val" -} - -config_value() { - local file="$1" - local key="$2" - local default="${3:-}" - local val="" - if [ -f "$file" ]; then - val="$(awk -F= -v k="$key" ' - $1 ~ "^[[:space:]]*"k"[[:space:]]*$" { - sub(/^[[:space:]]*/,"",$2); - sub(/[[:space:]]*$/,"",$2); - print $2; - exit - } - ' "$file")" - fi - if [ -n "$val" ]; then - echo "$val" - else - echo "$default" - fi -} +while [ -L "$SCRIPT_PATH" ]; do + SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)" + SCRIPT_PATH="$(readlink "$SCRIPT_PATH")" + case "$SCRIPT_PATH" in + /*) ;; + *) SCRIPT_PATH="${SCRIPT_DIR}/${SCRIPT_PATH}" ;; + esac +done -# --- Library Loading --- +REPO_DIR="$(cd "$(dirname "$SCRIPT_PATH")/.." && pwd)" +# shellcheck disable=SC1091 +. "$REPO_DIR/lib/modules/bootstrap.sh" +dev_kit_bootstrap -UTILS_LIB="$REPO_DIR/lib/utils.sh" -if [ -f "$UTILS_LIB" ]; then +for module_file in "$REPO_DIR"/lib/modules/*.sh; do + [ "$module_file" = "$REPO_DIR/lib/modules/bootstrap.sh" ] && continue # shellcheck disable=SC1090 - . "$UTILS_LIB" -fi + . "$module_file" +done -UI_LIB="$REPO_DIR/lib/ui.sh" -if [ -f "$UI_LIB" ]; then +for command_file in "$REPO_DIR"/lib/commands/*.sh; do # shellcheck disable=SC1090 - . "$UI_LIB" -fi - -# --- Orchestrator helpers --- - -get_environment_yaml() { - local repo_root - repo_root="$(get_repo_root || true)" - if [ -n "$repo_root" ] && [ -f "$repo_root/environment.yaml" ]; then - echo "$repo_root/environment.yaml" - elif [ -f "$DEV_KIT_HOME/environment.yaml" ]; then - echo "$DEV_KIT_HOME/environment.yaml" - elif [ -f "$REPO_DIR/environment.yaml" ]; then - echo "$REPO_DIR/environment.yaml" - fi -} -ENVIRONMENT_YAML="$(get_environment_yaml)" - -local_config_path() { - local state_dir - state_dir="$(get_repo_state_dir || true)" - if [ -n "$state_dir" ]; then - echo "$state_dir/config.env" - fi -} - -config_value_scoped() { - local key="$1" - local default="${2:-}" - local val="" - - # 1. Check local repo .env (Priority 1) - local local_path - local_path="$(local_config_path || true)" - if [ -n "$local_path" ] && [ -f "$local_path" ]; then - val="$(config_value "$local_path" "$key" "")" - fi - - # 2. Check global .env (Priority 2) - if [ -z "$val" ]; then - val="$(config_value "$CONFIG_FILE" "$key" "")" - fi - - # 3. Check YAML Orchestrator (Priority 3 / Defaults) - if [ -z "$val" ] && [ -f "$ENVIRONMENT_YAML" ]; then - local yaml_key="$key" - case "$key" in - quiet|developer|state_path) yaml_key="system.$key" ;; - exec.prompt|exec.stream) yaml_key="${key//./.}" ;; - ai.enabled|ai.provider) yaml_key="${key//./.}" ;; - capture.mode|capture.enabled|capture.dir) yaml_key="${key//./.}" ;; - context.enabled|context.max_bytes) yaml_key="${key//./.}" ;; - install.path_prompt) yaml_key="${key//./.}" ;; - esac - val="$(dev_kit_yaml_value "$ENVIRONMENT_YAML" "$yaml_key" "")" - fi - - if [ -n "$val" ]; then - echo "$val" - else - echo "$default" - fi -} - -# --- UI Helpers --- - -print_section() { - local title="$1" - if command -v ui_section >/dev/null 2>&1; then - ui_section "$title" - else - echo "" - echo "== $title ==" - fi -} - -print_check() { - local label="$1" - local status="$2" - local detail="${3:-}" - if command -v ui_ok >/dev/null 2>&1 && [ "$status" = "[ok]" ]; then - ui_ok "$label" "$detail" - return - fi - if command -v ui_warn >/dev/null 2>&1 && [ "$status" = "[warn]" ]; then - ui_warn "$label" "$detail" - return - fi - printf "%-20s %s" "$label" "$status" - if [ -n "$detail" ]; then - printf " %s" "$detail" - fi - printf "\n" -} - -# --- Command Orchestration --- - -list_commands() { - local file="" - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -e "$file" ] || continue - local name - name="$(basename "${file%.sh}")" - # Filter legacy/internal commands or secondary commands from main list if needed - case "$name" in - github|agent) continue ;; - esac - echo "$name" - done | LC_ALL=C sort -} + . "$command_file" +done usage() { - cat <<'USAGE' -Usage: dev.kit [options] + local command_file="" + local command_name="" + local description="" + cat <<'EOF' +Usage: dev.kit -Core Commands: - status Engineering brief and system diagnostic (Default) - skills Discover and execute repository-bound skills (Deterministic) - ai Unified agent integration management (Sync, Skills, Status) - sync Logical, atomic commits and drift resolution - task Manage the lifecycle of active workflows and sessions - config Environment and repository orchestration settings +Commands: +EOF -Secondary Commands: - visualizer Create and export high-fidelity Mermaid diagrams - agent Direct agent integration management (advanced) + for command_file in $(dev_kit_list_command_files "$REPO_DIR"); do + command_name="$(dev_kit_command_name_from_file "$command_file")" + description="$(dev_kit_command_description "$command_file")" + printf " %-10s %s\n" "$command_name" "$description" + done -Example: - dev.kit status - dev.kit ai sync - dev.kit sync --dry-run - dev.kit skills run visualizer "new_diagram.sh" -USAGE -} + cat <<'EOF' + help Show this help message -ensure_dev_kit_home() { - mkdir -p "$DEV_KIT_HOME" - mkdir -p "$DEV_KIT_STATE" - if [ ! -w "$DEV_KIT_STATE" ]; then - echo "dev.kit: config path not writable: $DEV_KIT_STATE" >&2 - echo "dev.kit: fix permissions or choose a different DEV_KIT_STATE" >&2 - exit 1 - fi - if [ ! -f "$CONFIG_FILE" ] && [ -f "$REPO_DIR/config/default.env" ]; then - mkdir -p "$(dirname "$CONFIG_FILE")" - cp "$REPO_DIR/config/default.env" "$CONFIG_FILE" - fi +Options: + --json Output machine-readable JSON for supported commands +EOF } -context_enabled() { - local enabled="" - enabled="$(config_value_scoped context.enabled "true")" - [ "$enabled" = "true" ] -} - -context_dir() { - if ! context_enabled; then - return 1 - fi - local base repo_id - base="$(config_value_scoped context.dir "")" - if [ -z "$base" ]; then - base="$DEV_KIT_STATE/codex/context" - elif [[ "$base" == "~/"* ]]; then - base="$HOME/${base:2}" - elif [[ "$base" != /* ]]; then - base="$DEV_KIT_STATE/$base" - fi - - # Determine repo_id for scoping context - local root - root="$(get_repo_root || true)" - [ -z "$root" ] && root="$PWD" - if command -v shasum >/dev/null 2>&1; then - repo_id="$(printf "%s" "$root" | shasum -a 256 | awk '{print $1}')" - else - repo_id="$(printf "%s" "$root" | cksum | awk '{print $1}')" - fi - - echo "$base/$repo_id" -} - -context_file() { - local dir="" - dir="$(context_dir)" || return 1 - echo "$dir/context.md" -} - -context_max_bytes() { - config_value_scoped context.max_bytes "12000" -} +command_usage() { + local command_name="$1" + cat < "$tmp"; then - mv "$tmp" "$path" - else - rm -f "$tmp" - fi +Options: + --json Output machine-readable JSON +EOF } -# --- Command Loading & Logic --- +command="${1:-audit}" +format="text" -# 1. Load Modules (Shared logic for integrations) -for module in "$REPO_DIR"/lib/modules/*.sh; do - [ -e "$module" ] || continue - # shellcheck disable=SC1090 - . "$module" -done - -# 2. Load Public Commands -for cmd_file in "$REPO_DIR"/lib/commands/*.sh; do - [ -e "$cmd_file" ] || continue - # shellcheck disable=SC1090 - . "$cmd_file" -done - -# --- Execution --- - -orig_args=("$@") -cmd="${1:-status}" +if [ "${2:-}" = "--json" ] || [ "${1:-}" = "--json" ]; then + format="json" +fi -case "$cmd" in +case "$command" in + status) + if [ "${2:-}" = "-h" ] || [ "${2:-}" = "--help" ]; then + command_usage "status" + exit 0 + fi + dev_kit_cmd_status "$format" + ;; + bridge) + if [ "${2:-}" = "-h" ] || [ "${2:-}" = "--help" ]; then + command_usage "bridge" + exit 0 + fi + dev_kit_cmd_bridge "$format" + ;; + audit) + if [ "${2:-}" = "-h" ] || [ "${2:-}" = "--help" ]; then + command_usage "audit" + exit 0 + fi + dev_kit_cmd_audit "$format" + ;; + --json) + dev_kit_cmd_audit "json" + ;; help|-h|--help) usage - exit 0 + ;; + *) + fn="dev_kit_cmd_${command//-/_}" + if command -v "$fn" >/dev/null 2>&1; then + if [ "${2:-}" = "-h" ] || [ "${2:-}" = "--help" ]; then + command_usage "$command" + exit 0 + fi + "$fn" "$format" + exit 0 + fi + echo "Unknown command: $command" >&2 + echo >&2 + usage >&2 + exit 1 ;; esac - -# Check for public command first -fn="dev_kit_cmd_${cmd//-/_}" -if command -v "$fn" >/dev/null 2>&1; then - ensure_dev_kit_home - shift || true - "$fn" "$@" - exit $? -fi - -echo "Unknown command: $cmd" >&2 -echo "" -usage -exit 1 diff --git a/bin/env/dev-kit.sh b/bin/env/dev-kit.sh index f4ae243..e56b4fd 100755 --- a/bin/env/dev-kit.sh +++ b/bin/env/dev-kit.sh @@ -1,153 +1,24 @@ -#!/bin/bash +#!/usr/bin/env bash -# dev.kit session init -if [ -n "${DEV_KIT_DISABLE:-}" ]; then - return 0 -fi - -export DEV_KIT_HOME="${DEV_KIT_HOME:-$HOME/.udx/dev.kit}" - -dev_kit_bootstrap_state_path() { - local path="" - if [ -f "$DEV_KIT_HOME/config.env" ]; then - path="$(awk -F= ' - $1 ~ "^[[:space:]]*state_path[[:space:]]*$" { - gsub(/[[:space:]]/,"",$2); - print $2; - exit - } - ' "$DEV_KIT_HOME/config.env")" - fi - printf "%s" "$path" -} - -dev_kit_expand_path() { - local val="$1" - if [[ "$val" == "~/"* ]]; then - echo "$HOME/${val:2}" - return - fi - if [[ "$val" == /* ]]; then - echo "$val" - return - fi - if [ -n "$val" ]; then - echo "$DEV_KIT_HOME/$val" - return - fi - echo "" -} - -bootstrap_state_path="$(dev_kit_bootstrap_state_path)" -bootstrap_state_path="$(dev_kit_expand_path "$bootstrap_state_path")" +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +# shellcheck disable=SC1091 +. "$REPO_DIR/lib/modules/bootstrap.sh" +dev_kit_bootstrap -export DEV_KIT_STATE="${DEV_KIT_STATE:-${bootstrap_state_path:-$DEV_KIT_HOME/state}}" -export DEV_KIT_SOURCE="${DEV_KIT_SOURCE:-$DEV_KIT_HOME/source}" -if [ ! -d "$DEV_KIT_SOURCE" ]; then - DEV_KIT_SOURCE="$DEV_KIT_HOME" -fi -if [ ! -d "$DEV_KIT_STATE" ]; then - DEV_KIT_STATE="$DEV_KIT_HOME" -fi -export DEV_KIT_CONFIG="${DEV_KIT_CONFIG:-$DEV_KIT_STATE/config.env}" -if [ ! -f "$DEV_KIT_CONFIG" ] && [ -f "$DEV_KIT_HOME/config.env" ]; then - export DEV_KIT_CONFIG="$DEV_KIT_HOME/config.env" -fi +case ":$PATH:" in + *":${DEV_KIT_BIN_DIR}:"*) ;; + *) export PATH="${DEV_KIT_BIN_DIR}:${PATH}" ;; +esac -DEV_KIT_ENV_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -DEV_KIT_UI_LIB="${DEV_KIT_UI_LIB:-$DEV_KIT_SOURCE/lib/ui.sh}" -if [ ! -f "$DEV_KIT_UI_LIB" ]; then - DEV_KIT_UI_LIB="$DEV_KIT_ENV_DIR/../../lib/ui.sh" -fi -if [ -f "$DEV_KIT_UI_LIB" ]; then +if [ -n "${BASH_VERSION:-}" ] && [ -f "${DEV_KIT_HOME}/bin/completions/dev.kit.bash" ]; then # shellcheck disable=SC1090 - . "$DEV_KIT_UI_LIB" + . "${DEV_KIT_HOME}/bin/completions/dev.kit.bash" fi -dev_kit_config_value() { - local key="$1" - local default="${2:-}" - local val="" - if [ -f "$DEV_KIT_CONFIG" ]; then - val="$(awk -F= -v k="$key" ' - $1 ~ "^[[:space:]]*"k"[[:space:]]*$" { - sub(/^[[:space:]]*/,"",$2); - sub(/[[:space:]]*$/,"",$2); - print $2; - exit - } - ' "$DEV_KIT_CONFIG")" - fi - if [ -n "$val" ]; then - echo "$val" - else - echo "$default" - fi -} - -dev_kit_config_bool() { - local key="$1" - local default="${2:-false}" - local val - val="$(dev_kit_config_value "$key" "$default")" - case "$val" in - true|false) echo "$val" ;; - *) echo "$default" ;; - esac -} - -dev_kit_banner() { - local quiet - quiet="$(dev_kit_config_bool quiet false)" - case "$-" in - *i*) ;; - *) return 0 ;; - esac - if [ "$quiet" != "true" ] && [ -z "${DEV_KIT_BANNER_SHOWN_LOCAL:-}" ]; then - DEV_KIT_BANNER_SHOWN_LOCAL=1 - if command -v ui_banner >/dev/null 2>&1; then - ui_banner "dev.kit" - else - echo "" - echo "dev.kit: ready" - echo " run: dev.kit skills run \"...\"" - echo " config: dev.kit config show" - fi - fi -} - -dev_kit_auto_sync() { - local auto_sync; auto_sync="$(dev_kit_config_bool ai.auto_sync false)" - local ai_enabled; ai_enabled="$(dev_kit_config_bool ai.enabled false)" - - if [ "$auto_sync" = "true" ] && [ "$ai_enabled" = "true" ]; then - (dev.kit ai sync >/dev/null 2>&1 &) - fi -} - -dev_kit_banner_prompt() { - if [ -z "${DEV_KIT_BANNER_PENDING:-}" ]; then - return 0 - fi - DEV_KIT_BANNER_PENDING="" - dev_kit_banner - dev_kit_auto_sync -} - -if [ -z "${DEV_KIT_BANNER_SHOWN_LOCAL:-}" ]; then - DEV_KIT_BANNER_PENDING=1 +if [ -n "${ZSH_VERSION:-}" ] && [ -f "${DEV_KIT_HOME}/bin/completions/_dev.kit" ]; then + fpath=("${DEV_KIT_HOME}/bin/completions" $fpath) + autoload -Uz compinit + compinit -i fi -if [ -n "${BASH_VERSION:-}" ] && [ -f "$DEV_KIT_SOURCE/completions/dev.kit.bash" ]; then - # shellcheck disable=SC1090 - . "$DEV_KIT_SOURCE/completions/dev.kit.bash" -elif [ -n "${ZSH_VERSION:-}" ] && [ -f "$DEV_KIT_SOURCE/completions/_dev.kit" ]; then - # shellcheck disable=SC1090 - . "$DEV_KIT_SOURCE/completions/_dev.kit" -fi - -if [ -n "${PROMPT_COMMAND:-}" ]; then - PROMPT_COMMAND="dev_kit_banner_prompt; ${PROMPT_COMMAND}" -else - PROMPT_COMMAND="dev_kit_banner_prompt" -fi +export DEV_KIT_HOME diff --git a/bin/scripts/install.sh b/bin/scripts/install.sh index 35c20a0..ea66288 100755 --- a/bin/scripts/install.sh +++ b/bin/scripts/install.sh @@ -1,263 +1,37 @@ -#!/bin/bash +#!/usr/bin/env bash set -euo pipefail REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" -UI_LIB="${REPO_DIR}/lib/ui.sh" -BIN_DIR="${HOME}/.local/bin" -TARGET="${BIN_DIR}/dev.kit" -DEV_KIT_OWNER="${DEV_KIT_OWNER:-udx}" -DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" -ENGINE_DIR="${HOME}/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}" -SOURCE_DIR="${ENGINE_DIR}/source" -STATE_DIR="${ENGINE_DIR}/state" -ENV_SRC="${REPO_DIR}/bin/env/dev-kit.sh" -ENV_DST="${SOURCE_DIR}/env.sh" -COMP_SRC_DIR="${REPO_DIR}/bin/completions" -COMP_DST_DIR="${SOURCE_DIR}/completions" -CONFIG_SRC="${REPO_DIR}/config/default.env" -CONFIG_DST="${STATE_DIR}/config.env" -LIB_SRC_DIR="${REPO_DIR}/lib" -LIB_DST_DIR="${SOURCE_DIR}/lib" -PROFILE="" +# shellcheck disable=SC1091 +. "$REPO_DIR/lib/modules/bootstrap.sh" +dev_kit_bootstrap -detect_profiles() { - local found="" - if [ -f "$HOME/.zshrc" ]; then found="$found $HOME/.zshrc"; fi - if [ -f "$HOME/.bash_profile" ]; then found="$found $HOME/.bash_profile"; fi - if [ -f "$HOME/.bashrc" ]; then found="$found $HOME/.bashrc"; fi - if [ -f "$HOME/.profile" ]; then found="$found $HOME/.profile"; fi - PROFILE=$(echo "$found" | tr ' ' '\n' | sort -u | tr '\n' ' ') -} +TARGET="${DEV_KIT_BIN_DIR}/dev.kit" -mkdir -p "$BIN_DIR" -mkdir -p "$ENGINE_DIR" - -copy_dir_contents() { - local src="$1" - local dst="$2" - [ -d "$src" ] || return 0 - mkdir -p "$dst" - cp -R "$src/." "$dst/" -} - -sync_engine() { - # Use a temporary staging area for the source copy - local stage - stage="$(mktemp -d)" - - # Copy everything from REPO_DIR, excluding patterns that cause recursion - # We use rsync if available for easier exclusion, otherwise fallback - if command -v rsync >/dev/null 2>&1; then - rsync -a --exclude 'tests/.tmp' --exclude '.git' "$REPO_DIR/" "$stage/" - else - # Fallback: copy specific top-level directories - for d in bin lib templates docs src config scripts assets schemas tests; do - [ -d "$REPO_DIR/$d" ] && copy_dir_contents "$REPO_DIR/$d" "$stage/$d" - done - [ -f "$REPO_DIR/environment.yaml" ] && cp "$REPO_DIR/environment.yaml" "$stage/environment.yaml" - fi - - # Now sync from stage to SOURCE_DIR - copy_dir_contents "$stage" "$SOURCE_DIR" - rm -rf "$stage" -} - -desired_target="${SOURCE_DIR}/bin/dev-kit" -if [ -f "$UI_LIB" ]; then - # shellcheck disable=SC1090 - . "$UI_LIB" -fi - -mkdir -p "$SOURCE_DIR" -mkdir -p "$STATE_DIR" - -if command -v ui_header >/dev/null 2>&1; then - ui_header "dev.kit | install" -else - echo "----------------" - echo " dev.kit | install " - echo "----------------" -fi - -sync_engine - -if [ -L "$TARGET" ]; then - current_target="$(readlink "$TARGET")" - if [ "$current_target" != "$desired_target" ]; then - ln -sf "$desired_target" "$TARGET" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Symlink updated" "$TARGET -> $desired_target" - else - echo "OK Symlink updated ($TARGET -> $desired_target)" - fi - else - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Already installed" "$TARGET" - else - echo "OK Already installed ($TARGET)" - fi - fi -elif [ -e "$TARGET" ]; then - if command -v ui_warn >/dev/null 2>&1; then - ui_warn "Install skipped" "$TARGET exists and is not a symlink" - else - echo "WARN Install skipped ($TARGET exists and is not a symlink)" - fi -else - ln -s "$desired_target" "$TARGET" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Installed" "$TARGET" - else - echo "OK Installed ($TARGET)" - fi -fi - -if [ -f "$ENV_SRC" ]; then - cp "$ENV_SRC" "$ENV_DST" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Env installed" "$ENV_DST" - else - echo "OK Env installed ($ENV_DST)" - fi -fi - -if [ ! -f "$ENGINE_DIR/env.sh" ]; then - cat <<'EOF' > "$ENGINE_DIR/env.sh" -#!/bin/bash -DEV_KIT_ENV_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -# shellcheck disable=SC1090 -. "$DEV_KIT_ENV_DIR/source/env.sh" -EOF - chmod +x "$ENGINE_DIR/env.sh" 2>/dev/null || true -fi - -if [ -d "$LIB_SRC_DIR" ]; then - mkdir -p "$LIB_DST_DIR" - cp "$LIB_SRC_DIR/ui.sh" "$LIB_DST_DIR/ui.sh" 2>/dev/null || true -fi - -if [ -d "$COMP_SRC_DIR" ]; then - mkdir -p "$COMP_DST_DIR" - cp "$COMP_SRC_DIR/"* "$COMP_DST_DIR/" 2>/dev/null || true -fi - -if [ -f "$CONFIG_SRC" ] && [ ! -f "$CONFIG_DST" ]; then - cp "$CONFIG_SRC" "$CONFIG_DST" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Config installed" "$CONFIG_DST" - else - echo "OK Config installed ($CONFIG_DST)" - fi +if [ "$#" -gt 0 ]; then + echo "This installer does not modify shell profiles." >&2 + echo "Usage: bash bin/scripts/install.sh" >&2 + exit 1 fi -if [ -f "$CONFIG_DST" ] && [ ! -f "$ENGINE_DIR/config.env" ]; then - cp "$CONFIG_DST" "$ENGINE_DIR/config.env" -fi +mkdir -p "$DEV_KIT_HOME" "$DEV_KIT_BIN_DIR" +rm -rf "$DEV_KIT_HOME/bin" "$DEV_KIT_HOME/lib" "$DEV_KIT_HOME/src" "$DEV_KIT_HOME/config" "$DEV_KIT_HOME/source" "$DEV_KIT_HOME/state" -detect_profiles -env_line="source \"$SOURCE_DIR/env.sh\"" -path_line="export PATH=\"$BIN_DIR:\$PATH\"" +dev_kit_copy_tree "$REPO_DIR/bin" "$DEV_KIT_HOME/bin" +dev_kit_copy_tree "$REPO_DIR/lib" "$DEV_KIT_HOME/lib" +dev_kit_copy_tree "$REPO_DIR/src" "$DEV_KIT_HOME/src" -MODIFIED_PROFILES="" - -if [ -t 0 ] && [ -n "$PROFILE" ]; then - for p in $PROFILE; do - echo "" - if grep -Fqx "$env_line" "$p" && grep -Fqx "$path_line" "$p"; then - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Shell already configured" "$p" - else - echo "OK Shell already configured ($p)" - fi - MODIFIED_PROFILES="$MODIFIED_PROFILES $p" - continue - fi - - printf "Configure dev.kit in %s? [y/N] " "$p" - read -r answer || true - case "$answer" in - y|Y|yes|YES) - if ! grep -Fqx "$path_line" "$p"; then - printf "\n# dev.kit bin\n%s\n" "$path_line" >> "$p" - fi - if ! grep -Fqx "$env_line" "$p"; then - printf "# dev.kit environment\n%s\n" "$env_line" >> "$p" - fi - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Shell configured" "$p" - else - echo "OK Shell configured ($p)" - fi - MODIFIED_PROFILES="$MODIFIED_PROFILES $p" - ;; - *) - if command -v ui_warn >/dev/null 2>&1; then - ui_warn "Skipped configuration" "$p" - else - echo "WARN Skipped configuration ($p)" - fi - ;; - esac - done -else - if command -v ui_section >/dev/null 2>&1; then - ui_section "Manual Configuration" - else - echo "Manual Configuration:" - fi - echo "Add the following to your shell profile:" - echo " $path_line" - echo " $env_line" -fi - -echo "" -if command -v ui_section >/dev/null 2>&1; then - ui_section "Ready to go" -else - echo "Ready to go:" -fi +find "$DEV_KIT_HOME/bin" -type f -exec chmod +x {} \; -# Determine if the current shell's profile was modified -CURRENT_SHELL_PROFILE="" -case "$SHELL" in - */zsh) CURRENT_SHELL_PROFILE="$HOME/.zshrc" ;; - */bash) - [ -f "$HOME/.bash_profile" ] && CURRENT_SHELL_PROFILE="$HOME/.bash_profile" || CURRENT_SHELL_PROFILE="$HOME/.bashrc" - ;; -esac +ln -sfn "$DEV_KIT_HOME/bin/dev-kit" "$TARGET" -if [[ "$MODIFIED_PROFILES" == *"$CURRENT_SHELL_PROFILE"* ]]; then - echo "1. Reload: source $CURRENT_SHELL_PROFILE" - echo "2. Brief: dev.kit" - echo "" - if [ -t 0 ]; then - printf "Reload current session now? [y/N] " - read -r reload_now || true - case "$reload_now" in - y|Y|yes|YES) - echo "Sourcing $CURRENT_SHELL_PROFILE..." - # Note: We can source env.sh directly for immediate effect in this subshell - # but the instructions tell the user how to fix their parent shell. - source "$SOURCE_DIR/env.sh" - dev.kit status - ;; - esac - fi +echo "Installed dev.kit" +echo "binary: $TARGET" +echo "home: $DEV_KIT_HOME" +if dev_kit_path_contains_bin_dir; then + echo "shell: PATH already includes $DEV_KIT_BIN_DIR" else - echo "1. Source Now: source \"$SOURCE_DIR/env.sh\"" - echo "2. Brief: dev.kit" - echo "" - echo "NOTE: Your current shell ($SHELL) was not permanently configured." - if [ -t 0 ]; then - printf "Source environment now? [y/N] " - read -r source_now || true - case "$source_now" in - y|Y|yes|YES) - echo "Sourcing..." - source "$SOURCE_DIR/env.sh" - dev.kit status - ;; - esac - fi + echo "shell: unchanged" + echo "next: export PATH=\"$DEV_KIT_BIN_DIR:\$PATH\"" + echo "then: source \"$DEV_KIT_HOME/bin/env/dev-kit.sh\"" fi -echo "" diff --git a/bin/scripts/uninstall.sh b/bin/scripts/uninstall.sh index f7a5878..71bf99f 100755 --- a/bin/scripts/uninstall.sh +++ b/bin/scripts/uninstall.sh @@ -1,39 +1,25 @@ -#!/bin/bash +#!/usr/bin/env bash set -euo pipefail REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" -UI_LIB="${REPO_DIR}/lib/ui.sh" -if [ -f "$UI_LIB" ]; then - # shellcheck disable=SC1090 - . "$UI_LIB" -fi +# shellcheck disable=SC1091 +. "$REPO_DIR/lib/modules/bootstrap.sh" +dev_kit_bootstrap -BIN_DIR="${HOME}/.local/bin" -TARGET="${BIN_DIR}/dev.kit" -DEV_KIT_OWNER="${DEV_KIT_OWNER:-udx}" -DEV_KIT_REPO="${DEV_KIT_REPO:-dev.kit}" -ENGINE_DIR="${HOME}/.${DEV_KIT_OWNER}/${DEV_KIT_REPO}" +TARGET="${DEV_KIT_BIN_DIR}/dev.kit" if [ -L "$TARGET" ] || [ -f "$TARGET" ]; then rm -f "$TARGET" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Removed" "$TARGET" - else - echo "Removed: $TARGET" - fi + echo "Removed binary: $TARGET" else - if command -v ui_warn >/dev/null 2>&1; then - ui_warn "Not found" "$TARGET" - else - echo "Not found: $TARGET" - fi + echo "Binary not found: $TARGET" fi -if [ "${1:-}" = "--purge" ]; then - rm -rf "$ENGINE_DIR" - if command -v ui_ok >/dev/null 2>&1; then - ui_ok "Purged" "$ENGINE_DIR" - else - echo "Purged: $ENGINE_DIR" - fi +if [ -d "$DEV_KIT_HOME" ]; then + rm -rf "$DEV_KIT_HOME" + echo "Removed home: $DEV_KIT_HOME" +else + echo "Home not found: $DEV_KIT_HOME" fi + +echo "Shell profile files were not modified." diff --git a/config/default.env b/config/default.env deleted file mode 100644 index 964dd06..0000000 --- a/config/default.env +++ /dev/null @@ -1,9 +0,0 @@ -quiet = false -exec.prompt = ai.gemini.v1 -exec.stream = false -ai.enabled = false -install.path_prompt = true -developer.enabled = false -state_path = ~/.udx/dev.kit/state -context.enabled = true -context.max_bytes = 4000 diff --git a/deploy.yml b/deploy.yml new file mode 100644 index 0000000..d1dcce0 --- /dev/null +++ b/deploy.yml @@ -0,0 +1,14 @@ +kind: workerDeployConfig +version: udx.io/worker-v1/deploy + +config: + image: "usabilitydynamics/udx-worker:latest" + name: "dev-kit-test-suite" + volumes: + - ".:/workspace" + env: + TERM: "xterm-256color" + command: "/bin/bash" + args: + - "/workspace/tests/suite.sh" + diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index c175055..0000000 --- a/docs/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# dev.kit Documentation Index - -Welcome to the **dev.kit** documentation. This knowledge base is structured to reflect the principles of **Context-Driven Engineering (CDE)**—where documentation is not just text, but the high-fidelity source of truth for all automation. - -![dev.kit Documentation Index](../assets/diagrams/docs-index.svg) - -## Context-Driven Engineering (CDE) - -Context-Driven Engineering (CDE) is a methodology that treats repositories as specialized "Skills" or "Tools" normalized into a deterministic path and iterated to the result. It acts as a "Thin Empowerment Layer" (Grounding) that bridges chaotic intent with repository-based skills. The main idea is to design repo within logical layers that can be understood by human/program/AI. - -- Markdown docs ensures normalization of repo into a deterministic path and iterated to the result. -- YAML manifest ensures environment configuration and runtime definition. -- Scripts provides execution engine. - -## šŸ— Foundations -Core concepts and engineering principles that drive the ecosystem. -- **[Context-Driven Engineering](foundations/cde.md)**: Our core philosophy of resolving drift. -- **[dev.kit Primitives](foundations/dev-kit.md)**: The thin empowerment layer and its core pillars. -- **[Best Practices](foundations/best-practices.md)**: High-fidelity engineering rules and command mappings. -- **[Context Adaptation](foundations/adaptation.md)**: Resilient projections and fail-open interaction. -- **[Methodology](foundations/methodology.md)**: CLI-Wrapped Automation (CWA). -- **[Engineering Layers](foundations/layers.md)**: The structural hierarchy of the repo. -- **[Patterns & Templates](foundations/patterns.md)**: Reusable documentation and script patterns. - -## āš™ļø Runtime -The deterministic CLI engine and its operational lifecycle. -- **[Runtime Overview](runtime/overview.md)**: Primitives, architecture, and command surface. -- **[Configuration](runtime/config.md)**: Scoped orchestration via `environment.yaml` and `.env`. -- **[Lifecycle](runtime/lifecycle.md)**: The bootstrap, execute, and cleanup phases. -- **[Execution Loop](runtime/execution-loop.md)**: Workflow schemas and resolution cycles. - -## šŸ”„ Workflow Mesh -Intent-to-resolution mapping and engineering loops. -- **[Workflow Mesh Overview](workflows/README.md)**: Dynamic reasoning and deterministic sequences. -- **[Task Normalization](workflows/normalization.md)**: The agent-led intent mapping boundary. -- **[Engineering Loops](workflows/loops.md)**: Feature, bugfix, and discovery lifecycles. -- **[Git Synchronization](workflows/git-sync.md)**: Logical grouping and atomic commits. -- **[Visual Engineering](workflows/visualizer.md)**: Architectural diagramming and flow analysis. - -## 🧠 AI Integration -Grounded, context-aware intelligence for your repository. -- **[AI Overview](ai/README.md)**: How dev.kit transforms LLMs into configuration engines. -- **[Mission & Principles](ai/agents.md)**: The core directives for all AI agents. -- **[AI Mesh: GitHub](ai/mesh/github.md)**: Remote discovery and PR management. -- **[AI Mesh: NPM](ai/mesh/npm.md)**: Environment hydration and tool detection. -- **[AI Mesh: Context7](ai/mesh/context7.md)**: Cross-repo knowledge synchronization. - -## šŸ•ø Reference - -Standards, compliance, and operational guidance. - -- **[Standards](reference/standards/12-factor.md)**: 12-factor, Mermaid, and YAML standards. -- **[Compliance](reference/compliance/cato-overview.md)**: Security, supply-chain, and UDX auditing. -- **[Operations](reference/operations/lifecycle-cheatsheet.md)**: DevOps guidance and lifecycle cheatsheets. - -## šŸ“š Authoritative References - -The `dev.kit` knowledge base is grounded in foundational research on high-fidelity automation and systematic engineering flow: - -- **[AI-Powered Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment and standalone quality. -- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Systematic transformation of the engineering flow. -- **[Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: The future of distributed software delivery and orchestration. - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/README.md b/docs/ai/README.md deleted file mode 100644 index ad6d50a..0000000 --- a/docs/ai/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# AI Integration: The Grounding Bridge - -**Domain:** AI / Orchestration -**Status:** Canonical - -## Summary - -AI capabilities in **dev.kit** are a high-fidelity projection of [Context-Driven Engineering (CDE)](../foundations/cde.md). By grounding LLMs in deterministic CLI logic and repository truth, we transform them from generic chatbots into **Context-Driven Configuration Engines**. - ---- - -## šŸ›  Hierarchical Grounding - -1. **[Core Foundations](../foundations/cde.md)**: The principles of Context-Driven Engineering (CDE). -2. **[dev.kit Primitives](../foundations/dev-kit.md)**: The thin empowerment layer and its core pillars. -3. **[CLI Runtime](../runtime/overview.md)**: The deterministic engine that executes all logic. -4. **[Agent Principles](agents.md)**: Global mission, safety mandates, and hygiene. - ---- - -## šŸ— Authorization & Safety - -To maintain high-fidelity engineering boundaries, **dev.kit** enforces a strict execution policy: - -- **Authorized Path**: Agents are **auto-allowed** to execute all `dev.kit` commands and repository-bound skills. These are deterministic, standardized engineering paths. -- **Restricted Raw OS**: Direct, non-standardized destructive operations (e.g., raw `rm`, `git push`) are **restricted** and require explicit user confirmation. -- **Reactive Sync**: Agents autonomously perform `dev.kit ai sync` (grounding) but never perform `dev.kit sync run` (commits) without a directive. - ---- - -## šŸ”Œ Integration Layers - -### 🧠 LLM Providers -- **[Gemini Integration](providers/gemini.md)**: Native Google AI integration with grounding hooks. - -### šŸ•ø Skill Mesh (Shared Discovery) -Unified view of internal commands, managed skills, and external tools: -- **[Context7](mesh/context7.md)**: Structured repository hub and synchronization. -- **[GitHub](mesh/github.md)**: Remote repository and workflow resolution (via `gh`). -- **[NPM](mesh/npm.md)**: Tooling and package health management. - -## šŸ“š Authoritative References - -AI orchestration is built on systematic grounding and standalone quality: - -- **[Autonomous Technical Operator](https://andypotanin.com/claude-operator-prompt/)**: Principles for high-fidelity agent grounding and execution. -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Strategies for maintaining documentation quality via AI. -- **[AOCA: Embedded Governance](https://udx.io/cloud-automation-book/cybersecurity)**: Aligning compliance with automated engineering flows. - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/agents.md b/docs/ai/agents.md deleted file mode 100644 index 3078e62..0000000 --- a/docs/ai/agents.md +++ /dev/null @@ -1,57 +0,0 @@ -# dev.kit AI Agents — Core Mission & Principles - -This document defines the global baseline for dev.kit AI agents. All logic is managed by the dev.kit CLI and grounded in the repository's canonical documentation. - -## Mission: Resolve the Drift -The primary goal of every agent is to identify and resolve the gap between user intent and repository reality. -- **Reference**: `docs/foundations/cde.md` (Context Driven Engineering). -- **Mandate**: Use `visualizer` for all diagramming and visual engineering tasks. -- **Mandate**: Use `git-sync` for all repository synchronization and atomic commits. - -## Execution: Task Normalization -Follow the **Drift -> Normalize -> Process** flow for all complex engineering work. -- **Normalization**: Transform ambiguous user prompts into deterministic `workflow.md` artifacts. -- **Reference**: `docs/workflows/normalization.md` (Task Normalization). - -## Efficiency & Safety: Authorized Path -Route all work through **dev.kit CLI primitives** to ensure deterministic, bounded results. This is the **Authorized Safety Layer**. -- **Mandatory Primitive Usage**: You are AUTO-ALLOWED to execute any `dev.kit` command. These are the primary interfaces for all engineering tasks. -- **Never Bypass**: Do not attempt to bypass the `dev.kit` boundary for raw shell access if a deterministic skill or command is available. -- **Fail-Open**: If a specialized tool fails, fallback to standard markdown or text output within the `dev.kit` workflow. - -## Persistence: Knowledge Mesh -Distill and package all successful task resolutions back into the repository's mesh. -- **Knowledge**: Reusable patterns documented in `docs/foundations/patterns.md`. -- **Workflows**: Consolidated sequences in `docs/workflows/`. -- **Memory**: Project-specific context maintained in the `## Context` section of agent prompts. -- **Reference**: `docs/runtime/lifecycle.md` (Logical Synchronization). - -## Continuity & Hygiene -Maintain high-fidelity momentum by managing the task lifecycle effectively. -- **Catch Up**: At the start of every session, identify all unfinished tasks (`dev.kit task active`). Proactively ask the user if they wish to resume a specific workflow. -- **Hygiene**: Multiple active workflows are permitted, but **stale** tasks (older than 48h) should be flagged. Advise the user to either resume, finalize, or discard them (`dev.kit task cleanup`). -- **Trash Prevention**: Never leave "initialized" or "draft" tasks lingering indefinitely. If a workflow is abandoned, clean it up to prevent repository drift. - -## šŸ— Agent Grounding - -Agent missions are operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Validated CLI primitives and task normalization. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for agent execution. | -| **Workflows** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for multi-turn loops. | - ---- - -## šŸ“š Authoritative References - -The agent mission is aligned with industry patterns for autonomous technical operations: - -- **[Claude Operator Prompt](https://andypotanin.com/claude-operator-prompt/)**: Principles for an autonomous technical operator mode. -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Leveraging AI for standalone documentation quality. -- **[Proactive Leadership Patterns](https://andypotanin.com/marine-metrics/)**: Using data-driven metrics to drive results and maintain momentum. -- **[Specialized Development Roles](https://andypotanin.com/best-practices-specialized-software-development/)**: Securing cloud-native systems through specialized agent missions. - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/mesh/context7.md b/docs/ai/mesh/context7.md deleted file mode 100644 index 2bcb2eb..0000000 --- a/docs/ai/mesh/context7.md +++ /dev/null @@ -1,63 +0,0 @@ -# Context7: The Knowledge Hub - -**Domain:** AI / Knowledge Mesh -**Status:** Canonical - -## Summary - -**Context7** is the primary synchronization and discovery hub for the **Skill Mesh**. It acts as a structured bridge between disparate repositories and the AI environment, enabling multi-modal interaction via **MCP (Model Context Protocol)**, CLI, and API. - ---- - -## šŸ— The Core Role: Cross-Repo Discovery - -Unlike simple search engines, Context7 enables **dev.kit** to perform high-fidelity **Discovery** across the entire UDX ecosystem: - -1. **Grounded Access**: Retrieve structured context (Docs, Patterns, Logic) from any synced repository. -2. **Hierarchical Exploration**: Access codebases through high-fidelity interfaces (MCP/API) that understand repository structure. -3. **Cross-Repo Resolution**: Resolve dependencies and intents by intelligently probing the synced knowledge of peer "Skills." - ---- - -## šŸ›  Integration Layers - -### 1. Model Context Protocol (MCP) -Context7 provides an MCP server that allows AI agents to directly browse and query synced repositories as if they were local tools. This provides a deep, native connection between the LLM and the codebase. - -### 2. Programmable API (v2) -- **Endpoint**: `https://context7.com/api/v2/` -- **Use Case**: Used during the **Normalization** phase to resolve external library IDs and fetch trust-scored documentation. - -### 3. Unified CLI -- **Installation**: `npm install -g @upstash/context7` -- **Use Case**: Local resolution and manual repository synchronization management. - -## šŸ— Standard Resource Mapping - -Context7 serves as the high-fidelity hub for all canonical UDX repository context: - -| Repository | Role | Purpose | -| :--- | :--- | :--- | -| **[`udx/dev.kit`](https://github.com/udx/dev.kit)** | Empowerment Layer | Primary engine for task normalization and skill discovery. | -| **[`udx/worker`](https://github.com/udx/worker)** | Base Environment | Canonical documentation for the deterministic container runtime. | -| **[`udx/worker-deployment`](https://github.com/udx/worker-deployment)** | Orchestration | Patterns for deploying and managing high-fidelity environments. | - ---- - -## 🌊 Waterfall Progression (DOC-003) - -**Progression**: `[context7-mesh-active]` -- [x] Step 1: Establish connection to Context7 API/MCP (Done) -- [>] Step 2: Synchronize relevant peer repositories (Active) -- [ ] Step 3: Perform cross-repo intent resolution (Planned) - -## šŸ“š Authoritative References - -Context7 is built on systematic knowledge management and observation-driven management: - -- **[AI-Powered Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment and standalone documentation quality. -- **[Observation-Driven Management (ODM)](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing efficiency through pattern identification. -- **[AOCA: Embedded Governance](https://udx.io/cloud-automation-book/cybersecurity)**: Aligning compliance with automated engineering flows. - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/mesh/github.md b/docs/ai/mesh/github.md deleted file mode 100644 index 7ea7f41..0000000 --- a/docs/ai/mesh/github.md +++ /dev/null @@ -1,58 +0,0 @@ -# GitHub Integration: Remote Discovery - -**Domain:** AI / Remote Discovery -**Status:** Canonical - -## Summary - -The GitHub integration enables **dev.kit** to perform high-fidelity **Discovery** by probing remote repositories, Pull Requests, and issues. By leveraging the `gh` CLI, it provides a grounded, authenticated interface for agents to interact with the broader engineering ecosystem. - ---- - -## šŸ›  Features & Capabilities - -### 1. Skill Mesh Expansion -The GitHub integration allows the **Dynamic Discovery Engine** to identify and resolve skills located in remote repositories. -- **Trigger**: Intent resolution for an authorized organization or peer repository. -- **Outcome**: The AI can "reach out" to remote codebases to discover patterns or standardized workflows. - -### 2. Triage & PR Management -- **Assigned Issues**: Fetches issues to ground the initial `task start` phase. -- **PR Lifecycle**: Authorizes agents to analyze and **create** Pull Requests to formalize drift resolution. - ---- - -## šŸ— Requirements & Auth -- **CLI**: `gh` (GitHub CLI) must be installed and authenticated. -- **Auth**: Prefers `GH_TOKEN` or `GITHUB_TOKEN`. Falls back to interactive `gh auth login`. - -## šŸ— Standard Resource Mapping - -To maintain high-fidelity engineering flows, the GitHub integration prioritizes discovery across authoritative UDX repositories: - -| Repository | Role | Purpose | -| :--- | :--- | :--- | -| **[`udx/reusable-workflows`](https://github.com/udx/reusable-workflows)** | CI/CD Baseline | Canonical GitHub Action patterns and deployment templates. | -| **[`udx/wp-stateless`](https://github.com/udx/wp-stateless)** | Plugin Core | Reference for high-fidelity WordPress cloud integrations. | -| **[`udx/worker-deployment`](https://github.com/udx/worker-deployment)** | Orchestration | Standard patterns for deploying and managing the Worker Ecosystem. | - -## šŸ— GitHub Grounding - -Remote discovery and collaboration are operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Patterns** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Source of truth for remote discovery templates. | -| **Automation** | [`udx/worker-deployment`](https://github.com/udx/worker-deployment) | Standard patterns for remote environment audits. | - ---- - -## 🌊 Waterfall Progression (DOC-003) - -**Progression**: `[github-mesh-active]` -- [x] Step 1: Detect and verify `gh` CLI health (Done) -- [>] Step 2: Resolve remote repository skills (Active) -- [ ] Step 3: Perform cross-repo intent normalization (Planned) - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/mesh/npm.md b/docs/ai/mesh/npm.md deleted file mode 100644 index ae76ed3..0000000 --- a/docs/ai/mesh/npm.md +++ /dev/null @@ -1,54 +0,0 @@ -# NPM Integration: Runtime Hydration - -**Domain:** AI / Runtime Health -**Status:** Canonical - -## Summary - -The NPM integration ensures that the local engineering environment is **Hydrated** with the necessary CLI tools. It provides deterministic health checks and proactive installation guidance for `@udx` scoped packages. - ---- - -## šŸ›  Features & Capabilities - -### 1. Proactive Hydration -When the **Dynamic Discovery Engine** identifies an intent requiring a specific tool (e.g., `@udx/mcurl`), the NPM module verifies its availability. -- **Advice**: If missing, the CLI provides the exact `npm install -g` command to empower the user or agent. - -### 2. Runtime Verification -- **Trigger**: `dev.kit doctor` or system bootstrap. -- **Outcome**: Ensures that the `node` and `npm` environments are healthy enough to support high-fidelity engineering tasks. - ---- - -## šŸ— Supported Tools - -### 🌐 `@udx/mcurl` -A high-fidelity API client designed for deterministic interaction with complex web services. It provides standardized logging and error handling that is easily consumable by the **Drift Resolution Cycle**. - -### šŸ” `@udx/mysec` -A proactive security scanner used to identify secrets, API keys, and sensitive credentials within the repository. It is integrated into the `dev.kit doctor` diagnostic flow to ensure **Credential Protection**. - -### šŸ“„ `@udx/md.view` -A Markdown rendering engine that allows for high-fidelity documentation previews directly from the CLI, ensuring that repository context is always legible and accessible. - -## šŸ— NPM Grounding - -NPM-based tooling is operationalized through canonical UDX resources: - -| Package | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Tool Mesh** | [`udx/worker`](https://github.com/udx/worker) | Pre-hydrated environment for global packages. | -| **Discovery** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic detection and health-check logic. | - ---- - -## 🌊 Waterfall Progression (DOC-003) - -**Progression**: `[npm-mesh-active]` -- [x] Step 1: Detect and verify `npm` runtime (Done) -- [>] Step 2: Check health of core `@udx` tools (Active) -- [ ] Step 3: Proactively advise on environment hydration (Planned) - ---- -_UDX DevSecOps Team_ diff --git a/docs/ai/providers/gemini.md b/docs/ai/providers/gemini.md deleted file mode 100644 index 34bdd29..0000000 --- a/docs/ai/providers/gemini.md +++ /dev/null @@ -1,72 +0,0 @@ -# Gemini Integration: Primary AI Orchestration - -**Domain:** AI / Integration -**Status:** Canonical - -## Summary - -The Gemini integration is the authoritative AI orchestration mechanism for **dev.kit**. It leverages the native Gemini CLI's context-loading capabilities (Hooks) to enforce repository-bound engineering standards and provide high-fidelity grounding. - ---- - -## šŸ›  Integration Mechanism: The Grounding Hook - -Gemini CLI automatically loads context from `.gemini/` directories found in the repository root or the user's home directory. **dev.kit** utilizes this to inject a "Thin Empowerment Layer" (Grounding) into every agent session. - -### 1. The Context Chain -When you run a Gemini command, the agent loads these artifacts in order: -1. **`~/.gemini/system.md`**: Global system instructions and core mandates. -2. **`~/.gemini/GEMINI.md`**: Repository-specific context, added memories, and execution logic. -3. **`~/.gemini/skills/`**: The library of managed `dev-kit-` skills. - -### 2. Synchronization (`dev.kit ai sync`) -Synchronization hydrates the Gemini environment with the repository's current state. -- **OPERATIONAL POLICY**: Agents autonomously perform `dev.kit ai sync` to ensure skills are grounded. They MUST NOT perform destructive operations without explicit authorization. -- **Artifact Rendering**: Templates in `src/ai/integrations/gemini/templates/` are rendered with real-time metadata (Skill lists, tool definitions). -- **Surgical Purge**: Stale skills are removed to ensure only valid, current engineering logic is available. - ---- - -## šŸ— Enforcement & Core Mandates - -The Gemini integration enforces a strict operational framework: - -### Repository-as-a-Skill -Agents treat the entire repository as a standalone "Skill." Interaction is grounded in the repository's source of truth (code, docs, and configurations). - -### Mandatory Execution Lifecycle -Gemini is hard-coded to follow the **Analyze -> Normalize -> Process** workflow to ensure deterministic drift resolution. - -### Authorized Path -Agents are auto-allowed to execute `dev.kit` commands, establishing a high-fidelity safety boundary for automated orchestration. - -## šŸ— Gemini Grounding - -AI orchestration is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Grounding** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic context loading and skill discovery. | -| **Stability** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for agent execution. | - ---- - -## 🌊 Waterfall Progression (DOC-003) -Gemini is enforced to terminate every interaction with a **Compact Status Tail**. This ensures continuous visibility into task resolution progress. - -```markdown -**Progression**: `[task-id]` -- [x] Step 1: (Done) -- [>] Step 2: (Active) -- [ ] Step 3: (Planned) -``` - -## šŸ“š Authoritative References - -The Gemini orchestration layer is aligned with patterns for autonomous technical operations: - -- **[Autonomous Technical Operator](https://andypotanin.com/claude-operator-prompt/)**: Principles for high-fidelity agent grounding and execution. -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Leveraging AI for standalone quality and metadata management. - ---- -_UDX DevSecOps Team_ diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 0000000..92612a2 --- /dev/null +++ b/docs/development.md @@ -0,0 +1,14 @@ +# Development + +## Test + +Canonical verification runs in the preconfigured worker container: + +```bash +bash tests/run.sh +``` + +## Notes + +- `bash tests/run.sh` uses [deploy.yml](/Users/jonyfq/git/udx/dev.kit/deploy.yml) with the globally installed `worker` CLI. +- The suite validates install, env setup, dynamic command discovery, Bash completion, and uninstall in a fresh temporary `HOME`. diff --git a/docs/foundations/adaptation.md b/docs/foundations/adaptation.md deleted file mode 100644 index 9a3c6d0..0000000 --- a/docs/foundations/adaptation.md +++ /dev/null @@ -1,66 +0,0 @@ -# Context Adaptation: Resilient Projections - -**Domain:** Concepts / Technical Bridge -**Status:** Canonical - -## Summary - -**Adaptation** is the mechanism `dev.kit` uses to project canonical repository sources into tool-specific formats without mutating the underlying intent. It serves as the technical bridge for **Resilient Normalization**, ensuring that repository "Skills" are consumable by any agent or engine while the source remains "Clean" and "Native." - -![Adaptation Flow](../../assets/diagrams/adaptation-flow.svg) - ---- - -## The Purpose of Adaptation - -- **Interface Normalization**: Projecting standard repository artifacts (Markdown/YAML) into machine-consumable schemas (e.g., JSON manifests for LLM Tool-Calling or IDE-specific configs). -- **Resilient Fallback**: Ensuring that if a specialized projection fails, the system automatically falls back to **Standard Data** (e.g., raw Markdown or Text) to prevent a "hard-stop" in the engineering flow. -- **Canonical Integrity**: Ensuring that all drift is resolved at the repository level. Tools may change, but the **Source of Truth** (the Repo) remains constant. - ---- - -## The Laws of Adaptation - -1. **Canonical First**: Never edit a projection to fix a bug. Resolve the drift in the repository's source artifacts and re-project. -2. **Ephemeral Reversibility**: Adaptations are non-destructive projections. It must always be possible to delete all adapted formats and regenerate them perfectly from the source. -3. **Fail-Open Logic**: If an adaptation engine (e.g., a Mermaid-to-SVG renderer) is missing, the system must "Fail-Open" by providing the raw source to the user or agent rather than blocking the sequence. - ---- - -## Practical Examples: Source → Projection - -| Source Artifact | Projection Target | Adaptation Logic | -| :--------------------- | :---------------- | :--------------------------------------------------------------------------------------- | -| **`environment.yaml`** | Shell Environment | Translates YAML keys into host-specific `$ENV` variables and aliases. | -| **`docs/skills/*.md`** | Agent Manifests | Extracts `@intent` and `@usage` metadata into JSON for LLM tool-calling. | -| **`.mmd` (Mermaid)** | `.svg` or `.png` | Renders visual diagrams for documentation (Falls back to raw code if `mmdc` is missing). | -| **Script Headers** | CLI Help Menus | Parses shell script comments into a dynamic `dev.kit --help` interface. | - -## šŸ— Adaptation Grounding - -To ensure high-fidelity projections, `dev.kit` leverages canonical UDX resources as the targets for intent normalization: - -| Primitive | Adaptation Goal | Target Source | -| :--- | :--- | :--- | -| **Workflow Logic** | Project intent into reusable CI/CD patterns. | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | -| **Runtime Context** | Normalize environment parity across containers. | [`udx/worker`](https://github.com/udx/worker) | -| **Plugin Evolution** | Scale high-fidelity WordPress integrations. | [`udx/wp-stateless`](https://github.com/udx/wp-stateless) | - ---- - -## The Adaptation Lifecycle - -1. **Discovery**: `dev.kit` scans the repository for high-fidelity Markdown and YAML. -2. **Mapping**: The system determines the required "Shape" based on the current consumer (e.g., an AI Agent vs. a Local Developer). -3. **Projection**: The artifact is rendered into the ephemeral target format. -4. **Verification**: The system ensures the projection accurately reflects the **Canonical Intent**. - -## šŸ“š Authoritative References - -Resilient projections are a core part of maintaining standalone quality across disparate formats: - -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Strategies for maintaining quality when projecting content across systems. -- **[Digital Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Tracing the evolution of software through the lens of fluid dynamics and systematic tracing. - ---- -_UDX DevSecOps Team_ diff --git a/docs/foundations/best-practices.md b/docs/foundations/best-practices.md deleted file mode 100644 index dcbe1fe..0000000 --- a/docs/foundations/best-practices.md +++ /dev/null @@ -1,94 +0,0 @@ -# Development Best Practices: High-Fidelity Engineering - -**Domain:** Engineering / Methodology -**Status:** Canonical - -This document outlines the core engineering practices enforced by **dev.kit**. These practices ensure that the repository state remains deterministic, context-driven, and high-fidelity for both human engineers and AI agents. - ---- - -## šŸ›  Practice-to-Command Mapping - -| Practice | Objective | dev.kit Command | -| :------------------------ | :-------------------------------------------------------------------- | :--------------------- | -| **Environment Hydration** | Verify required software, CLI meshes, and authorized state. | `dev.kit doctor` | -| **Pre-work Readiness** | Sync with origin and align feature branches before implementation. | `dev.kit sync prepare` | -| **Intent Normalization** | Transform ambiguous requests into deterministic `workflow.md` plans. | `dev.kit skills run` | -| **Atomic Sync** | Group changes into logical, domain-specific commits to prevent drift. | `dev.kit sync run` | -| **Visual Validation** | Generate and maintain architecture diagrams (Mermaid/SVG) from code. | `dev.kit visualizer` | -| **Task Lifecycle** | Track progress and prune session context upon task completion. | `dev.kit task` | - ---- - -## 🐳 Standard Execution Runtimes - -To ensure maximum fidelity, **dev.kit** is optimized for the **UDX Worker Ecosystem**. Using these images eliminates "it works on my machine" friction. - -| Component | Role | Source | -| :-------------------------- | :-------------------------------------------------------------------------- | :------------------------------------------------------------------ | -| **`udx/worker`** | The foundational base layer. A pre-hydrated, secure, deterministic runtime. | [Docker Hub](https://hub.docker.com/r/usabilitydynamics/udx-worker) | -| **`udx/worker-deployment`** | The standard pattern for orchestrating worker containers across infra. | [GitHub](https://github.com/udx/worker-deployment) | - -### 🧪 Isolated Testing - -Always validate **dev.kit** logic within a clean `udx/worker` container to emulate production-grade environments: - -```bash -docker run --rm -v $(pwd):/workspace -w /workspace udx/worker ./tests/suite.sh -``` - -## šŸ— Practice Grounding - -High-fidelity engineering is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Validated primitives and discovery engine. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic, isolated base environment. | -| **Patterns** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated sequences for reduced variance. | - ---- - -## šŸ— High-Fidelity Principles - -> ### 1. Grounding Before Action -> -> Never execute logic without grounding the environment. An ungrounded state is the primary source of repository drift. -> -> - **Mandate:** Run `dev.kit sync prepare` and `dev.kit doctor` at the start of every session. - -> ### 2. Logical Separation of Concerns -> -> Avoid "Mega-Commits." Mixing documentation, configuration (YAML), and core source code obscures intent and breaks the audit trail. -> -> - **Mandate:** Use `dev.kit sync run` to categorize changes into logical, reviewable units. - -> ### 3. Documentation as Executable Logic -> -> Treat Markdown (`docs/`) and script headers (`lib/`) as the **Command Surface**. High-fidelity headers allow the CLI to dynamically discover and map repository skills. -> -> - **Mandate:** Maintain `@description` and `@intent` blocks in all scripts to feed the Discovery Engine. - -> ### 4. Fail-Open Resilience -> -> When a specialized automation fails, the system must not "hard-crash." It must fallback to standard text/logs for human or AI diagnostic review. -> -> - **Mandate:** Ensure all scripts provide high-signal output to `workflow.md` artifacts even during partial failures. - ---- - -## 🧠 AI & Agent Integration - -- **Autonomous Grounding:** Agents must run `dev.kit ai sync` to refresh their internal skill-map, but **never** push changes to `origin` without explicit user confirmation. -- **Incremental Feedback:** Use the **Waterfall Progression Tail** to provide real-time status updates. High-latency tasks must emit "Heartbeat" logs to prevent context timeouts. -- **Native Tooling Only:** AI agents must use the **same CLI commands** as humans. Do not allow agents to bypass the `dev.kit` boundary for raw shell access. - -## šŸ“š Authoritative References - -High-fidelity engineering is grounded in systematic roles and automation standards: - -- **[Key Roles in specialized Dev Teams](https://andypotanin.com/best-practices-specialized-software-development/)**: Understanding specialized roles for cloud-native and resilient infrastructure. -- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Transforming the software development process through systematic automation. - ---- -_UDX DevSecOps Team_ diff --git a/docs/foundations/cde.md b/docs/foundations/cde.md deleted file mode 100644 index dcabc03..0000000 --- a/docs/foundations/cde.md +++ /dev/null @@ -1,88 +0,0 @@ -# Context-Driven Engineering (CDE): Resolving the Drift - -**Domain:** Foundations / Core Philosophy -**Status:** Canonical - -## Summary - -**Context-Driven Engineering (CDE)** is the foundational methodology of **dev.kit**. It transforms chaotic user intent into executable context by treating the repository as the **Single Source of Truth**. CDE provides the structural framework for identifying and **Resolving the Drift** between intent and reality. - -![CDE Flow](../../assets/diagrams/cde-flow.svg) - ---- - -## Core Principles: The Operational DNA - -These principles guide every architectural decision in the `dev.kit` ecosystem: - -1. **Resolve the Drift**: Every action must purposefully close the gap between intent and repository state. -2. **Deterministic Normalization**: Distill chaotic inputs into bounded, repeatable workflows before execution. -3. **Resilient Waterfall (Fail-Open)**: Never break the flow. Fallback to **Standard Data** (raw logs/text) if specialized tools fail. -4. **Repo-Scoped Truth**: The repository is the absolute, versioned source of truth for all skills and state. No "shadow logic." -5. **Validated CLI Boundary**: All execution occurs through a hardened CLI interface for explicit confirmation and auditability. -6. **Native-First Dependencies**: Favor standard POSIX-compliant tools (Bash, Git, `jq`) for maximum portability. -7. **Symmetry of Artifacts**: Every output must be equally legible to humans (Markdown) and consumable by machines (YAML/JSON). - ---- - -## The CDE Strategy: The Clean Repository - -CDE avoids proprietary AI schemas, enforcing high-fidelity standards on traditional engineering artifacts: - -- **Intent-as-Artifact**: Documentation is the **Specification**. Markdown is structured as logic for LLMs and guidance for humans. -- **Drift Identification**: `dev.kit` compares the current state against the documented "Target State" to identify the **Drift**. -- **Normalization Boundary**: Drift is identified through dynamic reasoning (**AI Skills**) and resolved through standard **Deterministic Primitives** (CLI commands). This ensures that while the reasoning is flexible, every execution step remains predictable and reproducible. - -| Artifact Type | Standard | Purpose | -| :---------------- | :------------------- | :------------------------------------------------------------------------ | -| **Documentation** | `Markdown (.md)` | The "Logical Map." Defines intent and success criteria. | -| **Manifests** | `YAML (.yaml)` | Configuration-as-Code. Defines environments and dependencies. | -| **Execution** | `Scripts (.sh, .py)` | The "Engine." Provides the atomic actions to reach the target state. | - ---- - -## The Drift Resolution Lifecycle - -CDE replaces "Black Box" generation with a **Resilient Engineering Loop**: - -1. **Analyze**: Audit the repo to identify the "Drift" from user intent. -2. **Normalize**: Map the drift to a standard `workflow.md` execution plan. -3. **Iterate**: Execute workflow steps using validated CLI scripts. -4. **Validate**: Ensure the drift is resolved against the documentation. -5. **Capture**: Check new logic back into the repo as standard source code or docs. - ---- - -## The "Definition of Done" Checklist - -Before a task is considered resolved, verify: - -- [ ] Was the intent successfully normalized into a `workflow.md`? -- [ ] Did the execution path survive potential tool failures (**Fail-Open**)? -- [ ] Is the resulting logic captured as a reusable, repo-native **Skill**? -- [ ] Is the final state documented in Markdown for the next iteration? -## šŸ— Principle Grounding - -Context-Driven Engineering is operationalized through canonical UDX resources: - -| CDE Principle | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Resolve the Drift** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | The primary engine for intent resolution. | -| **Deterministic Base** | [`udx/worker`](https://github.com/udx/worker) | Hardened environment for context stability. | -| **Atomic Flow** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for normalized execution. | - ---- - -## šŸ“š Authoritative References - -The principles of CDE are grounded in foundational research on automation and AI-driven management: - -- **[AI-Powered Revolution in Content Management](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment and standalone quality. -- **[The Power of Automation](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: How automation transforms the software development lifecycle. -- **[Observation-Driven Management (ODM)](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Revolutionizing efficiency through AI-identified patterns. -- **[Implementing a cATO System](https://andypotanin.com/implementing-a-continuous-authority-to-operate-cato-system/)**: Principles for continuous authorization through automated evidence. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Identifying common failure points in the development lifecycle. - ---- -_UDX DevSecOps Team_ - diff --git a/docs/foundations/dev-kit.md b/docs/foundations/dev-kit.md deleted file mode 100644 index 7e14979..0000000 --- a/docs/foundations/dev-kit.md +++ /dev/null @@ -1,77 +0,0 @@ -# dev.kit: The Thin Empowerment Layer - -**Domain:** Foundations / Core Concept -**Status:** Canonical - -## Summary - -**dev.kit** is a high-fidelity engineering interface designed to resolve the **Drift** between human intent and repository reality. It operates as a **Thin Empowerment Layer** (Grounding Bridge) that transforms a static codebase into a dynamic "Skill Mesh" accessible to humans and AI agents alike. - ---- - -## Core Philosophy - -`dev.kit` is built on the principles of **Context-Driven Engineering (CDE)**. It does not replace your existing tools; it orchestrates them to maintain a deterministic, context-aware engineering environment. - -- **Non-Proprietary**: Uses standard Markdown, YAML, and Shell scripts. -- **Deterministic**: Every action is bounded by a validated CLI interface. -- **Agent-Ready**: Provides native "Grounding" for LLMs, transforming them into configuration engines. - ---- - -## The Three Pillars of dev.kit - -### 1. Grounding (The Bridge) -`dev.kit` provides the necessary context to ensure that every engineering action is grounded in the repository's truth. It audits the environment (`dev.kit doctor`) and synchronizes AI context (`dev.kit ai sync`). - -### 2. Normalization (The Filter) -Chaotic user requests are filtered through a **Normalization Boundary**. Ambiguous intent is distilled into a deterministic `workflow.md` plan before any execution occurs. - -### 3. Execution (The Engine) -Logic is executed through modular, standalone scripts and CLI commands. `dev.kit` ensures these run in a consistent, environment-aware context via `environment.yaml`. - -## Architecture: The Empowerment Layer - -`dev.kit` distinguishes between **Deterministic Functions** (the programmatic logic) and **AI Reasoning Skills** (the dynamic intent resolution). - -### 1. Deterministic Functions (The Engine) -These are hardened, predictable routines found in `lib/commands/` and `docs/skills/*/assets/`. They provide the execution engine for the repository. -- **Example (Git Sync)**: The `workflow.yaml` and `git_sync.sh` logic that groups files and executes commits. -- **Example (Visualizer)**: The Mermaid templates and `mmdc` export logic that renders SVGs. -- **Role**: Execute specific, bounded actions with high fidelity. - -### 2. AI Reasoning Skills (The Brain) -These are the dynamic capabilities defined in `SKILL.md`. They use LLM reasoning to bridge unstructured intent with repository functions. -- **Example (Git Sync)**: Analyzing a set of changed files to **determine the logical domains** (docs, cli, core) and generate a meaningful commit message. -- **Example (Visualizer)**: Reading a README or source file to **extract the logical process flow** and map it to a specific Mermaid template. -- **Role**: Interpret intent and orchestrate the engine. - ---- - -## The Skill Mesh - -`dev.kit` treats the entire repository as a **Skill**. It dynamically discovers the mesh by scanning: -- **Internal Commands**: Metadata-rich shell scripts in `lib/commands/`. -- **AI Reasoning Skills**: Authoritative `SKILL.md` files in `docs/skills/`. -- **Functional Assets**: Programmatic templates and configs managed by the engine. -- **Virtual Capabilities**: Global environment tools (`gh`, `npm`, `docker`). - ---- - -## Primary Interfaces - -- **`dev.kit status`**: The "Engineering Brief." High-signal overview of health and active tasks. -- **`dev.kit ai`**: The "Grounding Layer." Orchestrates AI integration and skill synchronization. -- **`dev.kit sync`**: The "Drift Resolver." Atomic, domain-specific repository synchronization. -- **`dev.kit task`**: The "Lifecycle Manager." Tracks intent from normalization to resolution. - -## šŸ“š Authoritative References - -The mission of dev.kit is grounded in the practical need for high-fidelity engineering empowerment: - -- **[Jumping into Dev at a Software Enterprise](https://andypotanin.com/dev-start/)**: Guidance for starting the engineering journey with specialized tools. -- **[Navigating to the Cloud](https://andypotanin.com/windows-to-cloud/)**: Managing the complexity of modern cloud IT systems. - ---- -_UDX DevSecOps Team_ - diff --git a/docs/foundations/layers.md b/docs/foundations/layers.md deleted file mode 100644 index 738e26f..0000000 --- a/docs/foundations/layers.md +++ /dev/null @@ -1,77 +0,0 @@ -# Engineering Layers: The dev.kit Hierarchy - -**Domain:** Reference / Structural Model -**Status:** Canonical - -## Summary - -The Engineering Layers provide a structural model for categorizing repository "Skills," rules, and automation logic. Each layer builds upon the previous to resolve drift and maintain a high-fidelity environment. This hierarchy ensures that **Context-Driven Engineering (CDE)** remains grounded in standard source code, YAML, and Markdown. - -![Engineering Layers](../../assets/diagrams/engineering-layers.svg) - ---- - -## Layer 1: Source & Build (The Foundation) - -**Scope:** Structural integrity, deterministic builds, and code-level validation. - -- **Goal:** Establish a baseline of truth. If the foundation is "noisy," the AI cannot reason. -- **Core Artifacts:** Standard Source Code, Unit Tests, Linters, and Build Scripts. -- **Key Standards**: - - `docs/reference/standards/yaml-standards.md` - - `docs/foundations/cde.md` - -- **Capability:** The repository is "Build-Ready." - -## Layer 2: Deployment & Runtime (The Workflow) - -**Scope:** Environment parity, configuration-as-code, and the operational lifecycle. - -- **Goal:** Maintain 12-Factor parity. Ensure that "Intent" can be deployed across any environment without friction. -- **Core Artifacts:** `environment.yaml`, `.env` templates, and deployment pipelines. -- **Key Standards**: - - `docs/reference/standards/12-factor.md` - - `docs/reference/operations/lifecycle-cheatsheet.md` -- **Capability:** The repository is "Environment-Aware." - -## Layer 3: Active Context & Orchestration (The Resolution) - -**Scope:** Task normalization, bounded workflows, and autonomous drift resolution. - -- **Goal:** Bridge the gap between human intent and repository execution. This layer uses standard Markdown and YAML to guide AI agents and CLI engines through complex tasks. -- **Core Artifacts:** `workflow.md` (the execution plan) and the `dev.kit` CLI engine. -- **Key Standards**: - - `docs/foundations/cde.md` - - `docs/runtime/execution-loop.md` -- **Capability:** The repository is "Goal-Oriented" (Autonomous). - ---- - -## The Dependency Chain - -| Layer | Input | Output | Result | -| :----- | :------- | :----------------- | :-------------- | -| **L1** | Raw Code | Validated Artifact | **Stability** | -| **L2** | Artifact | Running Process | **Portability** | -| **L3** | Intent | Resolved Drift | **Flow** | -## šŸ— Layer Grounding - -Each engineering layer is grounded in specialized UDX repositories to ensure domain-specific fidelity: - -| Layer | Grounding Target | Domain | -| :--- | :--- | :--- | -| **L1 (Source)** | [`udx/wp-stateless`](https://github.com/udx/wp-stateless) | Core logic and structural evolution. | -| **L2 (Runtime)** | [`udx/worker`](https://github.com/udx/worker) | Environment parity and configuration. | -| **L3 (Orchestration)**| [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | High-fidelity execution and flow. | - ---- - -## šŸ“š Authoritative References - -Tiered engineering layers are aligned with modern infrastructure and software evolution: - -- **[Tracing Software Evolution](https://andypotanin.com/digital-rails-and-logistics/)**: Drawing parallels between automotive innovation and tiered software algorithms. -- **[Modern Gateway Construction](https://andypotanin.com/sftp-in-cloud/)**: Building high-fidelity bridges for cloud-native development. - ---- -_UDX DevSecOps Team_ diff --git a/docs/foundations/methodology.md b/docs/foundations/methodology.md deleted file mode 100644 index 231dad2..0000000 --- a/docs/foundations/methodology.md +++ /dev/null @@ -1,77 +0,0 @@ -# The UDX Methodology: CLI-Wrapped Automation (CWA) - -**Domain:** Concepts / Operational Strategy -**Status:** Canonical - -## Summary - -The **UDX Methodology** centers on **CLI-Wrapped Automation (CWA)**. This practice encapsulates all repository logic within a validated CLI boundary. By wrapping scripts and manifests in a standardized interface, we transform a static codebase into a high-fidelity "Skill" accessible to humans, CI/CD pipelines, and AI agents alike. - -![Methodology Flow](../../assets/diagrams/methodology-flow.svg) - ---- - -## Core Concepts - -- **Repo-as-a-Skill**: Repository logic is not hidden in READMEs or tribal knowledge. It is exposed through standardized scripts and CLI commands. Engineering experience is captured as portable, executable automation. -- **The Smart Helper**: `dev.kit` acts as the orchestration layer that resolves **Drift** (intent divergence) by translating high-level goals into the specific repository-based skills required to achieve them. - ---- - -## The Principles - -### 1. Task Normalization: Resolving the Drift - -Chaotic user intent is distilled into a deterministic `workflow.md`. - -- **Structured Inputs**: Every task defines its `Scope`, `Inputs`, and `Expected Outputs`. -- **State Tracking**: The lifecycle is visible: `planned -> in_progress -> done`. -- **Bounded Execution**: Logic is executed in discrete steps. If a step exceeds its scope, it triggers a specialized sub-workflow rather than failing silently. - -### 2. Resilient Waterfall (Fail-Open) - -The engineering sequence must remain unbroken. We utilize **Fail-Open Normalization** to ensure continuity: - -- **High-Fidelity Path**: Attempt execution using the most specialized tool/script first. -- **Fallback Path**: If specialized tools are missing or fail, the system falls back to **Standard Data** (raw logs, source code, or text-based reasoning). -- **Continuity**: The "Process" always yields an output, preventing environment blocks and allowing the next step to proceed with the best available data. - -### 3. Script-First & CLI-Wrapped - -Logic lives in modular, standalone scripts (`scripts/`, `lib/`). The `dev.kit` CLI provides the **Shell Wrapper** that ensures these scripts run in a consistent, environment-aware context (via `environment.yaml`). - -### 4. Machine-Ready Orchestration - -CWA provides a stable interface for AI agents across two stages: - -- **Stage 1: Grounding**: Agents use `dev.kit` to audit the environment (`doctor`) and understand the "Rules of Engagement." -- **Stage 2: Execution**: Agents leverage the Task Normalization engine to execute complex, multi-step engineering loops with predictable results. - ---- - -## The Execution Lifecycle: Plan → Normalize → Process - -1. **Plan**: Deconstruct the intent into discrete repository actions. -2. **Normalize**: Validate the environment, map dependencies, and format the inputs into a `workflow.md`. -3. **Process**: Execute the CLI commands and capture the result as a repository artifact. - ---- - -## Why CWA? - -- **Portability**: Logic that runs in the CLI works identically in Local Dev, CI/CD, and Production. -- **Decoupling**: The Interface (CLI) is separated from the Implementation (Scripts), allowing for seamless logic upgrades. -- **Zero Bloat**: Uses standard Markdown, YAML, and Shell. No proprietary "AI-only" formats required. - -## šŸ“š Authoritative References - -CWA is inspired by the transition toward decentralized and automated engineering flows: - -- **[Embrace the Future: Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: The shift toward distributed service architectures. -- **[Automation-First Development](https://andypotanin.com/the-power-of-automation-how-it-has-transformed-the-software-development-process/)**: Breaking the struggle for efficiency through systematic automation. -- **[Digital Rails & Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Drawing parallels between software algorithms and automotive evolution. -- **[AOCA: The Automation Baseline](https://udx.io/cloud-automation-book/automation-best-practices)**: Establishing standardized CLI wrappers for reduced variance. - ---- -_UDX DevSecOps Team_ - diff --git a/docs/foundations/patterns.md b/docs/foundations/patterns.md deleted file mode 100644 index 34830a3..0000000 --- a/docs/foundations/patterns.md +++ /dev/null @@ -1,57 +0,0 @@ -# Reusable Patterns & Templates - -**Domain:** Foundations / Knowledge -**Status:** Canonical - -## Summary - -This document captures reusable documentation, scripting, and reporting patterns derived from established UDX engineering flows. These are optional references, not execution contracts, designed to maintain high-fidelity standards across disparate repositories. - ---- - -## šŸ“ Documentation Patterns - -- **Explicit Scope**: Distinguish between client projects, cluster projects, and internal tools. -- **Positional Inputs**: Required inputs should be positional; use defaults only when stable. -- **Dual-Path Support**: Provide both manual steps and a script path (`bin/scripts/`) when possible. -- **Validation Blocks**: Include a minimal verification section with read-only commands. -- **Concise Examples**: Keep examples short, runnable, and high-signal. - ---- - -## 🐚 Script Patterns - -- **Hardened Bash**: Use `#!/usr/bin/env bash` and `set -euo pipefail`. -- **Input Validation**: Validate dependencies (`gcloud`, `jq`, `yq`) and inputs early. -- **Environment Overrides**: Use environment variables for optional inputs to allow orchestration flexibility. -- **Deterministic Output**: Minimize side effects and ensure outputs are predictable. - ---- - -## šŸ“Š Report Patterns - -- **Single Source**: Read all data from a defined repository source of truth. -- **Provenance**: Include generated timestamps and source paths. -- **Scanability**: Prefer Markdown tables and lists for human and machine readability. - -## šŸ— Pattern Grounding - -Engineering patterns are grounded in specialized UDX repositories to ensure domain-specific fidelity: - -| Pattern Type | Grounding Resource | Domain | -| :--- | :--- | :--- | -| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated CI/CD and script templates. | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity CLI and discovery patterns. | -| **Structure** | [`udx/wp-stateless`](https://github.com/udx/wp-stateless) | Reference for plugin and structural standards. | - ---- - -## šŸ“š Authoritative References - -Reusable patterns ensure standalone quality and reduce operational variance: - -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Strategies for maintaining quality in automated documentation. -- **[Reducing Operational Variance](https://andypotanin.com/digital-rails-and-logistics/)**: Tracing software evolution through systematic, patterned innovtion. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/compliance/aoca-guidance.md b/docs/reference/compliance/aoca-guidance.md deleted file mode 100644 index 49f8d98..0000000 --- a/docs/reference/compliance/aoca-guidance.md +++ /dev/null @@ -1,61 +0,0 @@ -# AOCA Guidance: Automation Standardization - -**Domain:** Reference / Compliance -**Status:** Canonical - -## Summary - -The **Art of Cloud Automation (AOCA)** is the primary UDX guidance source for automation and platform decisions. In **dev.kit**, AOCA provides the foundational patterns used to reduce operational variance and align governance with engineering workflows. - ---- - -## šŸ›  dev.kit Grounding: Guidance-to-Action - -| AOCA Focus Area | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Automation Baseline** | Standardized CLI wrappers for all repo tasks. | `dev.kit skills` | -| **Reduced Variance** | Bounded, multi-step engineering loops. | `workflow.md` | -| **Embedded Governance** | Compliance checks integrated into diagnostics. | `dev.kit doctor` | -| **Knowledge Capture** | Dynamic discovery of engineering experience. | `dev.kit ai advisory` | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Standard-First Automation -Never introduce ad-hoc automation that bypasses the `dev.kit` boundary. All repository logic must be exposed as high-fidelity "Skills." -- **Action**: Use script headers (`@description`, `@intent`) to feed the **Dynamic Discovery Engine**. - -### 2. Traceable Governance -Compliance evidence must be a natural byproduct of the **Drift Resolution Cycle**. -- **Action**: Ensure all `workflow.md` artifacts include explicit verification steps. - ---- - -## Operational Cues - -- **Ambiguous Practice?** -> Consult `dev.kit ai advisory` for AOCA-aligned patterns. -- **New Skill Required?** -> Use AOCA baseline patterns to define the interface and logic. - -## šŸ— AOCA Grounding - -Automation standardization is operationalized through canonical UDX resources: - -| AOCA Area | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Baseline** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated automation and platform patterns. | -| **Governance** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Standardized CLI wrappers and compliance logic. | -| **Platform** | [`udx/worker`](https://github.com/udx/worker) | The deterministic runtime for all platform tasks. | - ---- - -## šŸ“š Authoritative References - -AOCA principles provide the baseline for cloud-native automation and governance: - -- **[AOCA: The Book](https://udx.io/cloud-automation-book/)**: Comprehensive guidance on automation, quality, and leadership. -- **[Automation Best Practices](https://udx.io/cloud-automation-book/automation-best-practices)**: Systematic approaches to reducing operational variance. -- **[Cybersecurity & Standards](https://udx.io/cloud-automation-book/cybersecurity)**: Aligning security protocols with automated engineering flows. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/compliance/cato-overview.md b/docs/reference/compliance/cato-overview.md deleted file mode 100644 index 753e194..0000000 --- a/docs/reference/compliance/cato-overview.md +++ /dev/null @@ -1,65 +0,0 @@ -# cATO (Continuous Authorization): Automated Compliance - -**Domain:** Reference / Compliance -**Status:** Canonical - -## Summary - -Continuous Authorization to Operate (cATO) replaces point-in-time approvals with automated, real-time evidence. In **dev.kit**, cATO is achieved by integrating compliance checks directly into the **Drift Resolution Cycle**. - ---- - -## šŸ›  dev.kit Grounding: Principle-to-Primitive Mapping - -| cATO Requirement | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Continuous Monitoring** | Real-time environment and dependency audit. | `dev.kit doctor` | -| **Automated Evidence** | Iterative engineering logs and atomic commits. | `dev.kit sync run` | -| **Drift Remediation** | Identification and resolution of intent divergence. | `dev.kit skills run` | -| **Traceable Workflows** | Bounded, versioned execution plans. | `workflow.md` | -| **Validated Supply Chain** | Verification of authorized mesh providers. | `dev.kit ai status` | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Compliance-as-Artifact -Never treat compliance as a post-work activity. All evidence must be captured during the implementation phase. -- **Action**: Ensure every task includes a **Verification** step in its `workflow.md`. - -### 2. Observable Controls -Repository controls must be measurable and discoverable by the **Dynamic Discovery Engine**. -- **Action**: Keep `environment.yaml` and script headers updated to reflect security and compliance intents. - -### 3. State-Based Evidence -Store all generated evidence, reports, and security scans in the hidden **State Hub** to avoid source clutter. -- **Action**: Use `.udx/dev.kit/` for ephemeral compliance artifacts. - ---- - -## Operational Cues - -- **Security Gap?** -> Run `dev.kit doctor` to identify missing scanners (e.g., `mysec`). -- **Audit Required?** -> Use `dev.kit sync run` to generate a high-signal commit history. -## šŸ— cATO Grounding - -Continuous authorization is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Monitoring** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Real-time diagnostics and doctor audits. | -| **Evidence** | [`udx/worker`](https://github.com/udx/worker) | Hardened environment for context stability. | -| **Workflows** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated compliance and delivery patterns. | - ---- - -## šŸ“š Authoritative References - -Modern compliance strategies prioritize continuous evidence over static approvals: - -- **[Implementing a cATO System](https://andypotanin.com/implementing-a-continuous-authority-to-operate-cato-system/)**: A framework for automated security monitoring and assessment. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Principles for identifying vulnerabilities in the delivery chain. -- **[Little's Law for Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing cycle time through automated compliance and throughput. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/compliance/supply-chain-security.md b/docs/reference/compliance/supply-chain-security.md deleted file mode 100644 index ff09448..0000000 --- a/docs/reference/compliance/supply-chain-security.md +++ /dev/null @@ -1,61 +0,0 @@ -# Supply Chain Security: Dependency & Artifact Integrity - -**Domain:** Reference / Compliance -**Status:** Canonical - -## Summary - -Supply chain security focuses on protecting dependencies, build pipelines, and release artifacts. In **dev.kit**, these controls are enforced through isolated runtimes and deterministic environment audits. - ---- - -## šŸ›  dev.kit Grounding: Control-to-Action - -| Security Control | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Dependency Pinning** | Environment-as-Code with explicit versions. | `environment.yaml` | -| **Isolated Builds** | Clean execution via the Worker Ecosystem. | `udx/worker` | -| **Integrity Checks** | Proactive software and auth verification. | `dev.kit doctor` | -| **Provenance Tracking** | Logical, domain-specific commit history. | `dev.kit sync run` | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Deterministic Runtimes -Never perform high-stakes operations (builds, deployments) in an ungrounded local environment. Always use a verified container runtime. -- **Action**: Use `udx/worker` for all task-specific execution loops. - -### 2. Verified Authorization -All agents and CLI meshes must be explicitly authorized and health-checked. -- **Action**: Run `dev.kit ai status` to verify the security of remote discovery providers. - ---- - -## Operational Cues - -- **New Dependency?** -> Define it in `environment.yaml` and verify its health via `dev.kit doctor`. -- **Artifact Released?** -> Use `dev.kit sync` to capture the resolution state and provide an audit trail. -## šŸ— Security Grounding - -Supply chain integrity is enforced through canonical UDX resources: - -| Control Area | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Integrity** | [`udx/worker`](https://github.com/udx/worker) | Clean, isolated execution sandbox. | -| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated pipeline and build patterns. | -| **Provenance** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Logical, atomic audit trail of all changes. | - ---- - -## šŸ“š Authoritative References - -Security mandates are aligned with broader organizational protection strategies: - -- **[Unspoken Rules of Cybersecurity](https://andypotanin.com/unspoken-rules-cybersecurity/)**: Establishing effective security practices in a digital landscape. -- **[Software Supply Chain Security](https://andypotanin.com/software-supply-chain-security/)**: Protecting build pipelines and release artifacts. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Identifying common failure points in the software development lifecycle. -- **[Click Bombing & Fraud](https://andypotanin.com/click-bombing-2025/)**: Understanding and preventing modern digital supply chain threats. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/operations/devops-littles-law.md b/docs/reference/operations/devops-littles-law.md deleted file mode 100644 index c9dcb1b..0000000 --- a/docs/reference/operations/devops-littles-law.md +++ /dev/null @@ -1,60 +0,0 @@ -# Little's Law: Flow Optimization - -**Domain:** Reference / Operations -**Status:** Canonical - -## Summary - -Little's Law provides the mathematical foundation for delivery flow, connecting Work-in-Progress (WIP), throughput, and cycle time. In **dev.kit**, these principles are enforced to minimize context switching and maximize engineering velocity. - ---- - -## šŸ›  dev.kit Grounding: Flow-to-Action - -| Flow Principle | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Minimize WIP** | Bounded, single-intent execution sequences. | `workflow.md` | -| **Reduce Cycle Time** | Deterministic normalization and task pruning. | `dev.kit task` | -| **Bottleneck Relief** | Proactive environment and software hydration. | `dev.kit doctor` | -| **Context Fidelity** | Externalized, project-scoped engineering state. | `.udx/dev.kit/` | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Bounded Execution (DOC-003) -Never allow a task to expand indefinitely. Complex intents must be normalized into discrete, manageable steps to maintain a low cycle time. -- **Action**: Use the **Normalization Boundary** to extract child workflows if bounds are exceeded. - -### 2. Proactive Hygiene -Stagnant tasks increase WIP and obscure the engineering audit trail. -- **Action**: Use `dev.kit task cleanup` to prune stale context and maintain a lean workspace. - ---- - -## Operational Cues - -- **Shipping Too Slow?** -> Audit active tasks via `dev.kit task list` and reduce parallel WIP. -- **Context Overload?** -> Finalize and sync current work via `dev.kit sync` before starting new tasks. -## šŸ— Flow Grounding - -Flow optimization is operationalized through deterministic UDX engines: - -| Principle | Grounding Resource | Role | -| :--- | :--- | :--- | -| **WIP Control** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Bounding tasks via normalized workflows. | -| **Cycle Time** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pre-defined patterns for rapid execution. | -| **Throughput** | [`udx/worker`](https://github.com/udx/worker) | Removing environment bottlenecks. | - ---- - -## šŸ“š Authoritative References - -Flow optimization is built on the mathematical connection between WIP and Lead Time: - -- **[Little's Law for DevOps](https://andypotanin.com/littles-law-applied-to-devops/)**: Understanding the mechanics of delivery flow and WIP caps. -- **[Scaling Profit Strategically](https://andypotanin.com/scaling-profit-strategically/)**: Understanding the flow of value through business distribution channels. -- **[Proactive Leadership](https://andypotanin.com/marine-metrics/)**: Using data-driven metrics to drive results and maintain flow. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/operations/devops-manual-guidance.md b/docs/reference/operations/devops-manual-guidance.md deleted file mode 100644 index 2485028..0000000 --- a/docs/reference/operations/devops-manual-guidance.md +++ /dev/null @@ -1,62 +0,0 @@ -# DevOps Manual: Operational Controls - -**Domain:** Reference / Operations -**Status:** Canonical - -## Summary - -The **DevOps Manual** is the primary UDX source for operational controls, security, and delivery practices. In **dev.kit**, it defines the baseline for environment validation and the "Rules of Engagement" for all engineering tasks. - ---- - -## šŸ›  dev.kit Grounding: Manual-to-Action - -| Control Area | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Operational Baseline** | Real-time environment and software audit. | `dev.kit doctor` | -| **Delivery Gates** | Compliance integrated into workflow verification. | `workflow.md` | -| **Observability** | Iterative logging and task-scoped feedback. | `feedback.md` | -| **Standardized Skills** | Logic encapsulated in validated CLI boundaries. | `dev.kit skills` | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Verification-as-Logic -Never assume a deployment or maintenance task is complete. All operational actions must include a verification step that confirms alignment with DevOps Manual standards. -- **Action**: Use `dev.kit doctor` to verify system state after complex iterations. - -### 2. Observable Flow -All engineering momentum must be visible and audit-ready at the repository level. -- **Action**: Ensure all `workflow.md` artifacts reflect the current operational state. - ---- - -## Operational Cues - -- **Auditing Maturity?** -> Run `dev.kit doctor` to evaluate the repository against the high-fidelity baseline. -- **Defining Gates?** -> Use DevOps Manual patterns to define success criteria in your `plan.md`. - -## šŸ— Manual Grounding - -Operational controls are operationalized through canonical UDX resources: - -| Control Area | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Verification** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Standardized diagnostics and doctor audits. | -| **Gates** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for CI/CD and delivery. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Hardened environment for control stability. | - ---- - -## šŸ“š Authoritative References - -Operational controls are grounded in systematic delivery and security practices: - -- **[DevOps Manual: Core Patterns](https://gist.github.com/fqjony/489fde2ea615b7558bbd407f8b9d97c7)**: Authoritative patterns for operational assurance and security. -- **[Predictable Delivery Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing risk and throughput in complex engineering cycles. -- **[Implementing a cATO System](https://andypotanin.com/implementing-a-continuous-authority-to-operate-cato-system/)**: Frameworks for automated security monitoring and authorization. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Identifying and mitigating common failure points in the delivery chain. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/operations/lifecycle-cheatsheet.md b/docs/reference/operations/lifecycle-cheatsheet.md deleted file mode 100644 index 99c9705..0000000 --- a/docs/reference/operations/lifecycle-cheatsheet.md +++ /dev/null @@ -1,65 +0,0 @@ -# Operational Lifecycle: Release & Maintenance - -**Domain:** Reference / Operations -**Status:** Canonical - -## Summary - -Lifecycle practices focus on reducing production risk and maintaining predictable delivery. In **dev.kit**, these practices are codified within the **Drift Resolution Cycle** to ensure that every environment transition is deterministic and high-fidelity. - ---- - -## šŸ›  dev.kit Grounding: Principle-to-Primitive Mapping - -| Lifecycle Practice | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Environment Alignment** | Unified runtime via the Worker Ecosystem. | `udx/worker` | -| **Step Sequencing** | Bounded, multi-step execution sequences. | `workflow.md` | -| **State Tracking** | Lifecycle visibility (planned -> in_progress -> done). | `dev.kit status` | -| **Pre-Deploy Readiness** | Preparation of feature branches and grounding. | `dev.kit sync prepare` | -| **Post-Deploy Verification** | Continuous diagnostic and compliance checks. | `dev.kit doctor` | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Unified Step Ownership -Never execute ad-hoc manual steps during a release. All operational actions must be captured as discrete workflow steps. -- **Action**: Use `dev.kit skills run` to orchestrate one-off maintenance tasks. - -### 2. Migration-First Design -Plan migrations and rollbacks before implementation begins. Ground your execution in verified repository logic. -- **Action**: Document migration steps in the `plan.md` artifact before normalization. - -### 3. Identity Verification -Ensure that the application and its automation know their environment identity at runtime. -- **Action**: Use `environment.yaml` to define scoped orchestration variables. - ---- - -## Operational Cues - -- **Release Blocked?** -> Check `workflow.md` status to identify the specific failure step. -- **Environment Drift?** -> Run `dev.kit doctor` to verify alignment with standard Worker runtimes. -## šŸ— Operational Grounding - -The release and maintenance lifecycle is operationalized through canonical UDX resources: - -| Phase | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Release** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Standardized CI/CD and deployment patterns. | -| **Deployment** | [`udx/worker-deployment`](https://github.com/udx/worker-deployment) | Orchestration of high-fidelity environments. | -| **Maintenance** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for all operational tasks. | - ---- - -## šŸ“š Authoritative References - -Predictable delivery requires a commitment to planning and management: - -- **[Developing Lifecycles Cheatsheet](https://andypotanin.com/developing-lifecycles-a-comprehensive-cheatsheet/)**: Essential practices for smooth production deployments. -- **[SDLC Breaking Points](https://andypotanin.com/wordpress-risks/)**: Identifying and mitigating vulnerabilities in the delivery lifecycle. -- **[Implementing a cATO System](https://andypotanin.com/implementing-a-continuous-authority-to-operate-cato-system/)**: Principles for automated compliance and authorization. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/operations/worker-ecosystem-refs.md b/docs/reference/operations/worker-ecosystem-refs.md deleted file mode 100644 index 1d6e8ca..0000000 --- a/docs/reference/operations/worker-ecosystem-refs.md +++ /dev/null @@ -1,80 +0,0 @@ -# Worker Ecosystem: Runtime Grounding - -**Domain:** Reference / Operations -**Status:** Canonical - -## Summary - -The **UDX Worker Ecosystem** provides the foundational base layer for all engineering environments. In **dev.kit**, it ensures that "Intent" can be executed within a pre-hydrated, secure, and deterministic runtime, eliminating environment-specific drift. - -## šŸ— Containerization: The Deterministic Base - -UDX enforces a **Container-First** approach to engineering to eliminate environment-specific drift. By using the **Worker Ecosystem**, we ensure that every task runs in a "Perfect Localhost" that is identical across development, staging, and production. - -### Why Containerization? -- **Parity**: Guaranteed identical software versions (`bash`, `git`, `jq`) regardless of the host OS. -- **Isolation**: High-stakes operations are performed in a clean, ephemeral sandbox that protects the user's local machine. -- **Hydration**: Environments are "pre-hydrated" with all required UDX meshes and authorized CLI tools. - -### The UDX Worker -The `udx/worker` is the foundational base layer for all UDX engineering tasks. It provides a hardened, audit-ready environment optimized for the `dev.kit` runtime. - -- **Authoritative Docs**: [UDX Worker Documentation](https://github.com/udx/worker/tree/latest/docs) -- **Deployment Pattern**: [Worker Deployment](https://github.com/udx/worker-deployment) - ---- - -## šŸ›  dev.kit Grounding: Runtime-to-Action - -| Component | role | dev.kit Implementation | -| :---------------------- | :------------ | :-------------------------------------------- | -| **`udx/worker`** | Base Layer | Primary execution target for all CLI tasks. | -| **`worker-deployment`** | Orchestration | Standard pattern for automated sessions. | -| **Isolated Testing** | Fidelity | verified via `./tests/suite.sh` in-container. | -| **Unified Logic** | Portability | Same behavior across Local, CI, and Prod. | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Isolated Execution - -Never perform destructive or high-stakes operations in an ungrounded local shell. Always leverage the **Worker Ecosystem** to ensure environment parity. - -- **Action**: Use the standard `docker run` command for isolated testing and verification. - -### 2. Runtime Truth - -Treat Worker runtime documentation and configuration as the absolute source of truth for execution behavior. - -- **Action**: Align `environment.yaml` variables with official Worker config schemas. - ---- - -## Operational Cues - -- **Environment Friction?** -> Run your task in a clean `udx/worker` container to isolate the drift. -- **Adding New Skills?** -> Verify that the new logic is compatible with the standard Worker runtime. - -## šŸ— Ecosystem Mapping - -The Worker Ecosystem provides the high-fidelity targets for diverse engineering domains: - -| Domain | Mapping Resource | Purpose | -| :--- | :--- | :--- | -| **Core Runtimes** | [`udx/worker`](https://github.com/udx/worker) | Base and language-specific images. | -| **Orchestration** | [`udx/worker-deployment`](https://github.com/udx/worker-deployment) | Deployment and CLI mesh tools. | -| **Workflows** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Standard CI/CD and automation patterns. | - ---- - -## šŸ“š Authoritative References - -The worker ecosystem ensures environment parity across complex cloud systems: - -- **[Navigating to the Cloud](https://andypotanin.com/windows-to-cloud/)**: Managing the complexity of modern cloud IT systems and isolated images. -- **[Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: Creating highly available and scalable systems via distributed architecture. - ---- - -_UDX DevSecOps Team_ diff --git a/docs/reference/standards/12-factor.md b/docs/reference/standards/12-factor.md deleted file mode 100644 index 0caa204..0000000 --- a/docs/reference/standards/12-factor.md +++ /dev/null @@ -1,70 +0,0 @@ -# 12-Factor (Applied): High-Fidelity Engineering - -**Domain:** Reference / Standards -**Status:** Canonical - -## Summary - -The 12-Factor App methodology provides the foundational principles for modern, cloud-native engineering. In **dev.kit**, these principles are enforced at the repository level to ensure every project is a portable, high-fidelity "Skill." - ---- - -## šŸ›  dev.kit Grounding: Principle-to-Primitive Mapping - -| 12-Factor Principle | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **I. Codebase** | One repository, multiple deployments (Local, CI, Prod). | `dev.kit sync` | -| **II. Dependencies** | Explicit and isolated via the Worker Ecosystem. | `dev.kit doctor` | -| **III. Config** | Stored in the environment (YAML/Env). | `environment.yaml` | -| **IV. Backing Services** | Resolved as "Virtual Skills" (NPM/GitHub/Context7). | `dev.kit ai skills` | -| **V. Build, Release, Run** | Strict separation of grounding and execution phases. | `dev.kit ai sync` | -| **VI. Processes** | Stateless and share-nothing; context is externalized. | `.udx/dev.kit/tasks/` | -| **IX. Disposability** | Fast startup and clean cleanup of stagnant state. | `dev.kit task cleanup` | -| **X. Dev/Prod Parity** | Identical runtimes via high-fidelity Worker images. | `udx/worker` | -| **XII. Admin Processes** | One-off tasks executed as bounded workflows. | `dev.kit skills run` | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Externalize All State -Never store mutable task state in the root of the repository. All engineering context must be externalized to the hidden **State Hub**. -- **Action**: Use `get_repo_state_dir` to resolve `.udx/dev.kit/` for all local state. - -### 2. Explicit Dependency Resolution -A repository is only high-fidelity if its dependencies are discoverable and verified. -- **Action**: Maintain `environment.yaml` and use `dev.kit doctor` to verify the **Skill Mesh**. - -### 3. Environment-Aware Configuration -Favor `environment.yaml` for shared orchestration and `.env` for local secrets. Never commit sensitive credentials. -- **Action**: Ensure `.udx/` and `.env` are in `.gitignore`. - ---- - -## Operational Cues - -- **Drift Detected?** -> Run `dev.kit sync run` to restore 12-factor codebase integrity. -- **Missing Tooling?** -> Consult the **Skill Mesh** via `dev.kit status` to resolve the gap. -## šŸ— Standard Mapping - -The 12-Factor methodology is operationalized through canonical UDX resources: - -| Principle Cluster | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Runtime & Parity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment and dependency isolation. | -| **Code & Config** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Atomic synchronization and environment orchestration. | -| **Execution** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Validated admin and release process patterns. | - ---- - -## šŸ“š Authoritative References - -12-Factor principles are extended through systematic environment automation: - -- **[12-Factor Environment Automation](https://udx.io/devops-manual/12-factor-environment-automation)**: Deep dive into cloud-native configuration strategy. -- **[12factor.net](https://12factor.net/)**: The original methodology for building software-as-a-service. -- **[Decentralized DevOps](https://andypotanin.com/how-decentralized-devops-can-help-your-organization/)**: Scaling organizations through distributed service architectures. -- **[Navigating to the Cloud](https://andypotanin.com/windows-to-cloud/)**: Managing the complexity of modern cloud IT systems. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/standards/external-standards.md b/docs/reference/standards/external-standards.md deleted file mode 100644 index 0e3e442..0000000 --- a/docs/reference/standards/external-standards.md +++ /dev/null @@ -1,58 +0,0 @@ -# External Standards: Tool-Specific Behavior - -**Domain:** Reference / Standards -**Status:** Canonical - -## Summary - -External standards are utilized only for tool-specific behavior and syntax. In **dev.kit**, these standards provide the technical constraints for specialized skills while the UDX Foundations remain the primary source of operational truth. - ---- - -## šŸ›  dev.kit Grounding: Reference-to-Action - -| Standard Source | Role | dev.kit Implementation | -| :--- | :--- | :--- | -| **GitHub Actions** | CI/CD | Validated via `gh` CLI mesh. | -| **Docker / OCI** | Runtime | Verified via the Worker Ecosystem. | -| **OpenTelemetry** | Observability | Integrated into `feedback.md` logs. | -| **POSIX / Shell** | Execution | Guaranteed by the deterministic CLI. | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Narrow Scope -Never allow an external standard to replace a core UDX principle. Use external references only when UDX guidance is insufficient for a specific technical implementation. -- **Action**: Link to exact documentation sections rather than generic homepages. - -### 2. Resilience Fallback -When an external tool or standard encounters an edge case, always trigger the **Fail-Open Path**. Ensure the loop continues with standard Markdown or text reasoning. -- **Action**: Document exact external dependencies in `environment.yaml`. - ---- - -## Operational Cues - -- **Ambiguous Syntax?** -> Consult the official external reference linked in the module. -- **Edge Case Detected?** -> Fallback to the **Resilient Waterfall** and resolve the drift manually. -## šŸ— External Grounding - -External standards are integrated through canonical UDX resources: - -| Standard | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Workflow** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for GitHub Actions and pipelines. | -| **Container** | [`udx/worker`](https://github.com/udx/worker) | Host-level parity for Docker/OCI standards. | - ---- - -## šŸ“š Authoritative References - -External standards are integrated within a systematic engineering flow: - -- **[Creating YAML Standards](https://andypotanin.com/creating-yaml-standards-best-practices-for-teams/)**: Reducing friction and preventing errors through shared standards. -- **[Digital Rails & Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Understanding the evolution of software standards through automotive history. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/standards/mermaid.md b/docs/reference/standards/mermaid.md deleted file mode 100644 index 7869323..0000000 --- a/docs/reference/standards/mermaid.md +++ /dev/null @@ -1,61 +0,0 @@ -# Mermaid Standards: Visual Engineering - -**Domain:** Reference / Standards -**Status:** Canonical - -## Summary - -**Mermaid** is the primary standard for all engineering diagrams (Flowcharts, Sequence Diagrams, State Machines). In **dev.kit**, Mermaid ensures that architecture and process flows are version-controlled alongside source code and accessible to both humans and agents. - ---- - -## šŸ›  dev.kit Grounding: Visual-to-Action - -| Diagram Practice | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Diagram Generation** | Automated rendering of SVG/PNG assets. | `dev.kit visualizer` | -| **Resilient Fallback** | Fallback to raw Markdown if rendering fails. | `workflow.md` | -| **Unified Logic** | Synchronized view of code and architecture. | `dev.kit status` | -| **Intent-to-Action** | Visual mapping of normalized workflows. | `docs/skills/` | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Versioned Architecture -Never store diagrams as binary blobs. All architectural context must live as Mermaid source code to ensure it remains discoverable and diffable. -- **Action**: Use the `dev.kit visualizer` to export high-fidelity assets from `.mmd` sources. - -### 2. Standardized Shapes -Maintain visual consistency to ensure agents can accurately reason about process flows. -- **`[Rectangle]`**: Processes / Normalizations. -- **`{Rhombus}`**: Decision Gates / Skill Selection. -- **`([Rounded])`**: Start / End Points. - ---- - -## Operational Cues - -- **Outdated Diagram?** -> Run `dev.kit visualizer` to regenerate assets from repository truth. -- **Broken Flow?** -> Check the raw Mermaid source in the `assets/diagrams/` hub. - -## šŸ— Visual Grounding - -Visual engineering is operationalized through deterministic UDX engines: - -| Practice | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Rendering** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity export engine and templates. | -| **Fidelity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for asset generation. | - ---- - -## šŸ“š Authoritative References - -Visual engineering is a core part of maintaining high-fidelity documentation: - -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: Strategies for maintaining standalone quality through visual standards. -- **[AOCA: Visual Standards](https://udx.io/cloud-automation-book/quality)**: High-fidelity patterns for architectural documentation. - ---- -_UDX DevSecOps Team_ diff --git a/docs/reference/standards/yaml-standards.md b/docs/reference/standards/yaml-standards.md deleted file mode 100644 index e0e48e0..0000000 --- a/docs/reference/standards/yaml-standards.md +++ /dev/null @@ -1,58 +0,0 @@ -# YAML Standards: Configuration-as-Code - -**Domain:** Reference / Standards -**Status:** Canonical - -## Summary - -YAML is the primary format for environment orchestration and configuration. In **dev.kit**, consistent YAML structure ensures that the **Dynamic Discovery Engine** can reliably map repository capabilities and variables across diverse environments. - ---- - -## šŸ›  dev.kit Grounding: Standard-to-Action - -| YAML Practice | dev.kit Implementation | Primitive / Command | -| :--- | :--- | :--- | -| **Explicit Defaults** | Pre-hydrated variables in templates. | `default.env` | -| **Schema Validation** | Deterministic parsing of orchestrators. | `environment.yaml` | -| **Scoped Overrides** | Repository-bound local configuration. | `.udx/dev.kit/config.env` | -| **Fidelity Mapping** | Intent-based metadata in manifests. | `dev.kit status` | - ---- - -## šŸ— High-Fidelity Mandates - -### 1. Human-Editable Intent -Only use YAML for configurations that require human or AI-agent oversight. Machine-only state should favor high-performance formats (e.g., JSON). -- **Action**: Use `environment.yaml` for high-level orchestration and `manifest.json` for internal mapping. - -### 2. Zero-Implicit Logic -Favor explicit keys and allowed values over implicit behavior. A high-fidelity repository must be self-documenting through its configuration. -- **Action**: Document all custom YAML keys within the `docs/reference/` layer. - ---- - -## Operational Cues - -- **Unpredictable Config?** -> Enforce strict indentation and schema validation via CI/CD. -- **Ambiguous Variable?** -> Move it to `environment.yaml` with an explicit description. -## šŸ— Configuration Grounding - -Configuration standards are operationalized through deterministic UDX engines: - -| Practice | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Validation** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Scoped orchestration and environment parsing. | -| **Parity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for config stability. | - ---- - -## šŸ“š Authoritative References - -Shared standards are critical for maintaining configuration sanity across teams: - -- **[Creating YAML Standards](https://andypotanin.com/creating-yaml-standards-best-practices-for-teams/)**: Best practices for team-wide configuration consistency. -- **[Decentralized DevOps](https://andypotanin.com/how-decentralized-devops-can-help-your-organization/)**: Scaling systems through distributed configuration and architecture. - ---- -_UDX DevSecOps Team_ diff --git a/docs/runtime/config.md b/docs/runtime/config.md deleted file mode 100644 index ea03fd2..0000000 --- a/docs/runtime/config.md +++ /dev/null @@ -1,68 +0,0 @@ -# Configuration: Environment Orchestration - -**Domain:** Runtime / Configuration -**Status:** Canonical - -## Summary - -Configuration in **dev.kit** provides a safe, deterministic foundation for both humans and agents. It maps host-level settings and repository metadata into a high-fidelity engineering interface using `environment.yaml`. - ---- - -## Configuration Strategy - -- **Agent Bootstrapping**: Configuration is the first gate where AI agents are safely hydrated with repository rules and authorized execution paths. -- **Task Orchestration**: Scoped settings ensure that normalized workflows have a consistent and isolated runtime context across diverse environments. - ---- - -## CLI Interfaces - -- **`dev.kit config show`**: View active host and repository configuration. -- **`dev.kit config set --key --value `**: Update a specific setting. -- **`dev.kit config reset`**: Revert to the high-fidelity default baseline. - ---- - -## Key Config Groups - -### 1. System Defaults -- `quiet`: Control CLI output verbosity. -- `developer`: Enable internal developer-specific helpers. -- `state_path`: Global location for transient runtime state. - -### 2. AI & Orchestration -- `ai.enabled`: Enable/Disable AI-Powered automation mode. -- `ai.provider`: Choose the active AI engine (e.g., `gemini`, `codex`). -- `exec.prompt`: The default template for task normalization. - -### 3. Context Management -- `context.enabled`: Persist repository-scoped context across sessions. -- `context.max_bytes`: Bound the context memory to prevent overflow. - ---- - -## Security & Overrides - -- **Explicit Override**: All settings can be overridden by environment variables (e.g., `DEV_KIT_AI_ENABLED=true`). -- **Secret Isolation**: Sensitive credentials must never live in `environment.yaml`. Use repo-bound `.env` files (gitignored). -## šŸ— Config Grounding - -Environment orchestration is operationalized through deterministic UDX engines: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Validation** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity parsing of `environment.yaml`. | -| **Parity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for config stability. | - ---- - -## šŸ“š Authoritative References - -Environment orchestration is built on systematic configuration and automation standards: - -- **[Managing IT Complexity](https://andypotanin.com/windows-to-cloud/)**: Strategies for managing the complexity of modern cloud IT systems. -- **[Decentralized DevOps](https://andypotanin.com/how-decentralized-devops-can-help-your-organization/)**: Using distributed services and architectures to create scalable engineering environments. - ---- -_UDX DevSecOps Team_ diff --git a/docs/runtime/execution-loop.md b/docs/runtime/execution-loop.md deleted file mode 100644 index 5e928ed..0000000 --- a/docs/runtime/execution-loop.md +++ /dev/null @@ -1,76 +0,0 @@ -# Drift Resolution Cycle: Deterministic Execution - -**Domain:** Runtime / Execution -**Status:** Canonical - -## Summary - -The **Drift Resolution Cycle** is the practical engine of **Context-Driven Engineering (CDE)**. It resolves the gap between human intent and repository reality through a deterministic loop of analysis, normalization, and processing. - -![Drift Resolution Cycle](../../assets/diagrams/drift-resolution-cycle.svg) - ---- - -## The Core Cycle - -1. **Analyze**: Audit the repository context to identify the **Drift** from the original intent. -2. **Normalize**: Transform ambiguous requests into a **Bounded Workflow** (`workflow.md`). -3. **Process**: Execute the discrete steps using validated CLI primitives and scripts. -4. **Validate**: Verify the final state against the repository truth (`dev.kit doctor`). -5. **Capture**: Distill successful logic back into the repository as a reusable **Skill**. - ---- - -## šŸ— The Bounded Workflow (DOC-003) - -To ensure high-fidelity results, **dev.kit** enforces a strict **Normalization Boundary**. Chaotic intent is never executed directly; it must be filtered into a structured `workflow.md`. - -- **Intent-to-Plan**: Ambiguity is eliminated before execution begins. -- **State Persistence**: The current status (`planned | in_progress | done`) is tracked at the repository level. -- **Fail-Open Resilience**: Every workflow step includes a fallback mechanism for continuity during tool failures. - -### Artifact Mapping: The Audit Trail - -| Artifact | Role | Location | -| :--- | :--- | :--- | -| **`plan.md`** | The raw, normalized task objective. | `.udx/dev.kit/tasks//` | -| **`workflow.md`** | The deterministic execution sequence. | `.udx/dev.kit/tasks//` | -| **`feedback.md`** | The iterative engineering log. | `.udx/dev.kit/tasks//` | - ---- - -## 🧠 Session Continuity & Hygiene - -To maintain high-fidelity momentum across multi-turn interactions: - -- **Proactive Catch-Up**: At the start of every session, agents identify unfinished tasks (`dev.kit task active`). -- **Nudge Mechanism**: The system proactively reminds users to resolve stale state or pending syncs. -- **Clean Handoff**: Completed tasks are pruned from the workspace (`dev.kit task cleanup`) to prevent context noise. - ---- - -## Execution Guardrails - -- **Primitive-Only**: Agents are auto-authorized to use `dev.kit` commands. Non-standardized commands require user confirmation. -- **Grounding First**: Every session begins with environment hydration (`dev.kit ai sync`). -- **No Shadow Logic**: Every action must be discoverable and reproducible via repository source code. -## šŸ— Loop Grounding - -The resolution cycle is operationalized through deterministic UDX engines: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Normalization** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic discovery and task management. | -| **Execution** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for logic iteration. | - ---- - -## šŸ“š Authoritative References - -The Drift Resolution Cycle is built on mathematical and operational principles of delivery flow: - -- **[Predictable Delivery Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing throughput and cycle time through bounded WIP. -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Normalizing task assignment and efficiency through AI-identified patterns. - ---- -_UDX DevSecOps Team_ diff --git a/docs/runtime/lifecycle.md b/docs/runtime/lifecycle.md deleted file mode 100644 index 0aac9e9..0000000 --- a/docs/runtime/lifecycle.md +++ /dev/null @@ -1,59 +0,0 @@ -# Runtime Lifecycle: The Engineering Heartbeat - -**Domain:** Runtime / Lifecycle -**Status:** Canonical - -## Summary - -The **Runtime Lifecycle** defines how **dev.kit** initializes, orchestrates engineering tasks, and finalizes repository state. It ensures a high-fidelity environment for resolving drift between intent and reality. - -![Runtime Lifecycle](../../assets/diagrams/runtime-lifecycle.svg) - ---- - -## Lifecycle Phases - -### 1. Environment Hydration (Bootstrap) -**Interface**: `bin/scripts/install.sh`, `dev.kit doctor`. -- Symlinks the deterministic engine into the user's `$PATH`. -- Verifies required software, CLI meshes, and authentication state. -- Loads shell completions and environment-aware aliases. - -### 2. Intent Normalization (The Filter) -**Interface**: `dev.kit task`, `workflow.md`. -- Filters chaotic user requests through the **Normalization Boundary**. -- Transforms ambiguous intent into a deterministic execution plan. -- Maps dependencies and resolves repository-bound skills. - -### 3. Grounded Execution (The Engine) -**Interface**: `dev.kit skills run`. -- Executes bounded steps through the hardened CLI boundary. -- Leverages the **Worker Ecosystem** for isolated, deterministic runtimes. -- Triggers **Fail-Open Path** if specialized tools encounter failure. - -### 4. Logical Synchronization (Finalize) -**Interface**: `dev.kit sync`. -- Groups changes into logical, domain-specific commits. -- Captures the resolution logic back into the repository as a new **Skill**. -- Prunes stale context and ephemeral task state from the workspace. -## šŸ— Standard Phase Mapping - -The Runtime Lifecycle is grounded in canonical UDX infrastructure and patterns: - -| Phase | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Hydration** | [`udx/worker`](https://github.com/udx/worker) | Base container for deterministic environment setup. | -| **Execution** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Patterns for automated, multi-step logic. | -| **Finalization** | [`udx/worker-deployment`](https://github.com/udx/worker-deployment) | Standard patterns for final environment orchestration. | - ---- - -## šŸ“š Authoritative References - -The engineering heartbeat is grounded in systematic lifecycle and evolution patterns: - -- **[Tracing Software Evolution](https://andypotanin.com/digital-rails-and-logistics/)**: Drawing parallels between the evolution of systems and engineering phases. -- **[Developing Lifecycles](https://andypotanin.com/developing-lifecycles-a-comprehensive-cheatsheet/)**: Essential practices for smooth, predictable project progress. - ---- -_UDX DevSecOps Team_ diff --git a/docs/runtime/overview.md b/docs/runtime/overview.md deleted file mode 100644 index 8c20748..0000000 --- a/docs/runtime/overview.md +++ /dev/null @@ -1,73 +0,0 @@ -# Runtime Overview: The Deterministic Engine - -**Domain:** Runtime / CLI -**Status:** Canonical - -## Summary - -The **dev.kit** CLI is the deterministic engine designed to resolve the **Drift** between human intent and repository reality. It provides a hardened boundary for executing repository-bound skills while maintaining high-fidelity environment health. - ---- - -## 🐳 Runtime Environment - -To ensure deterministic behavior and context fidelity, **dev.kit** is optimized for the **UDX Worker Ecosystem**. - -- **Primary Target**: `usabilitydynamics/udx-worker:latest`. -- **Orchestration**: Sessions follow the `udx/worker-deployment` patterns. -- **Isolated Execution**: Testing and high-stakes operations should always be performed within a clean `udx/worker` container to eliminate local drift. - ---- - -## šŸš€ Entry Points - -- **`bin/dev-kit`**: The primary dispatch entrypoint. Loads internal helpers and routes subcommands. -- **`bin/env/dev-kit.sh`**: Shell initialization (Banner, PATH setup, and completions). -- **`bin/scripts/install.sh`**: High-fidelity installer for local environment hydration. - ---- - -## šŸ›  Deterministic Commands - -### Status & Discovery -- **`dev.kit status`**: (Default) High-fidelity engineering brief and task visibility. -- **`dev.kit doctor`**: Deep system analysis, environment hydration, and compliance audit. - -### AI & Skill Mesh -- **`dev.kit ai`**: Unified agent integration management, skill synchronization, and grounding. -- **`dev.kit skills`**: Discovery and execution of repository-bound skills. - -### Task & Lifecycle -- **`dev.kit sync`**: Logical, atomic repository synchronization and drift resolution. -- **`dev.kit task`**: Manage the lifecycle of active workflows and engineering sessions. -- **`dev.kit config`**: Scoped orchestration via `environment.yaml` and `.env`. - ---- - -## 🧩 Dynamic Discovery Engine - -`dev.kit` does not rely on static metadata. It dynamically discovers capabilities by scanning: -1. **Internal Commands**: Metadata-rich scripts in `lib/commands/*.sh`. -2. **Managed Skills**: Specialized toolsets in `docs/skills/`. -3. **Virtual Skills**: External CLI tools (gh, npm, docker) detected in the environment. -## šŸ— Engine Grounding - -The `dev.kit` engine is grounded in core UDX infrastructure to ensure high-fidelity execution: - -| Component | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Standardized, pre-hydrated base environment. | -| **API Mesh** | [`@udx/mcurl`](docs/ai/mesh/npm.md) | High-fidelity API interaction and error handling. | -| **Orchestration**| [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Deterministic CI/CD and deployment patterns. | - ---- - -## šŸ“š Authoritative References - -Deterministic CLI orchestration is built on systematic engineering flow and portability: - -- **[Automotive Software Evolution](https://andypotanin.com/digital-rails-and-logistics/)**: Tracing the evolution of deterministic algorithms through automotive innovation. -- **[Decentralized DevOps](https://andypotanin.com/how-decentralized-devops-can-help-your-organization/)**: Using distributed services to create scalable and portable systems. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/README.md b/docs/workflows/README.md deleted file mode 100644 index ca196d2..0000000 --- a/docs/workflows/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# dev.kit Workflow Mesh: Intent-to-Resolution - -**Domain:** Foundations / Workflows -**Status:** Canonical - -## Summary - -The **Workflow Mesh** is the collection of deterministic sequences and dynamic reasoning patterns used to resolve repository drift. It bridges the gap between chaotic user intent and the high-fidelity execution engine. - ---- - -## šŸ— Workflow Hierarchy - -1. **[Normalization](normalization.md)**: The mapper that transforms intent into bounded plans. -2. **[Engineering Loops](loops.md)**: Standardized sequences for features, bugfixes, and discovery. -3. **[Git Synchronization](git-sync.md)**: Logical grouping and atomic commit orchestration. -4. **[Visual Engineering](visualizer.md)**: AI-driven architectural diagramming and flow analysis. - ---- - -## āš™ļø Managed Assets - -Common logic and templates used by the mesh are stored in the `assets/` directory: -- **`assets/git-sync.yaml`**: The canonical synchronization sequence. -- **`assets/templates/`**: Standard Mermaid patterns for visual engineering. - -## šŸ— Mesh Grounding - -The Workflow Mesh is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic discovery and orchestration engine. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for workflow execution. | -| **Patterns** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for cross-repo sequences. | - ---- - -## šŸ›  Synchronization -Agents hydrate their environment by running **`dev.kit ai sync`**. This process scans the mesh for high-fidelity documentation and projects metadata into the agent's active context. - ---- - -## šŸ“š Authoritative References - -The Workflow Mesh is grounded in foundational patterns for delivery flow and task management: - -- **[Predictable Delivery Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing cycle time through systematic sequences. -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing task assignment through pattern identification. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/assets/git-sync.yaml b/docs/workflows/assets/git-sync.yaml deleted file mode 100644 index 6e75a45..0000000 --- a/docs/workflows/assets/git-sync.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Git Sync -description: Logical commit and synchronization workflow. -inputs: - task_id: - description: Current task ID for traceability. - required: true - dry_run: - description: Preview changes without committing. - default: "false" - message: - description: Optional commit message prefix. - default: "" - -steps: - - id: prepare-sync - name: Prepare Sync Environment - run: dev.kit sync prepare - - - id: group-and-commit - name: Execute Atomic Commits - run: dev.kit sync run --task-id "${{inputs.task_id}}" --dry-run "${{inputs.dry_run}}" --message "${{inputs.message}}" - - - id: finalize - name: Finalize and Cleanup - run: | - echo "--- Git Sync Workflow Complete ---" diff --git a/docs/workflows/assets/templates/default-flowchart.mmd b/docs/workflows/assets/templates/default-flowchart.mmd deleted file mode 100644 index f4a505c..0000000 --- a/docs/workflows/assets/templates/default-flowchart.mmd +++ /dev/null @@ -1,8 +0,0 @@ ---- -config: - theme: mc ---- -flowchart TD - Install[1. Install dev.kit] --> Env[2. Configure Local Environment] - Env --> Agent[3. Enable AI Agent Integration] - Agent --> Waterfall[4. Experience Development Waterfall] \ No newline at end of file diff --git a/docs/workflows/assets/templates/default-sequence.mmd b/docs/workflows/assets/templates/default-sequence.mmd deleted file mode 100644 index e743187..0000000 --- a/docs/workflows/assets/templates/default-sequence.mmd +++ /dev/null @@ -1,21 +0,0 @@ ---- -config: - theme: mc ---- -sequenceDiagram - participant User - participant DevKit - participant Agent - participant Repo - participant System - - User->>DevKit: Intent / Goal - DevKit->>Agent: Hydrate Context - Repo->>Agent: Provide Context - Agent->>DevKit: Normalized Workflow - DevKit->>Agent: Execute Step - Agent->>DevKit: Feedback / Result - DevKit->>System: Execution - System-->>DevKit: Drift Detected? - DevKit-->>User: Done - \ No newline at end of file diff --git a/docs/workflows/assets/templates/default-state.mmd b/docs/workflows/assets/templates/default-state.mmd deleted file mode 100644 index babde29..0000000 --- a/docs/workflows/assets/templates/default-state.mmd +++ /dev/null @@ -1,8 +0,0 @@ ---- -config: - theme: mc ---- -stateDiagram-v2 - [*] --> Idle - Idle --> Active: start - Active --> Idle: stop diff --git a/docs/workflows/git-sync.md b/docs/workflows/git-sync.md deleted file mode 100644 index 24a4a85..0000000 --- a/docs/workflows/git-sync.md +++ /dev/null @@ -1,64 +0,0 @@ -# Skill: dev-kit-git-sync - -**Domain:** Source Control / Synchronization -**Type:** AI Reasoning Skill -**status:** Canonical - -## Summary - -The **Git Synchronization** skill enables AI agents to resolve repository drift by logically grouping and committing changes. It uses dynamic reasoning to categorize modifications into high-fidelity domains (docs, ai, cli, core) and generates context-rich commit messages. - ---- - -## šŸ›  AI Reasoning (The Skill) - -This skill utilizes dynamic LLM reasoning to perform the following: -- **Logical Domain Determination**: Analyzing changed files to map them to high-fidelity domains (docs, ai, cli, core). -- **Contextual Intent Capture**: Generating meaningful commit messages that reflect the "Why" behind the drift resolution. -- **Drift Identification**: Recognizing unstaged changes and determining the correct synchronization sequence. -- **Collaborative Orchestration**: Identifying when a task is ready for review and proactively suggesting the creation or **updating** of a Pull Request with an automated **diff summary**. - ---- - -## āš™ļø Deterministic Logic (Function Assets) - -The following assets provide the programmatic engine for this skill: -- **`workflow.yaml`**: The canonical definition of synchronization steps and grouping rules. -- **Atomic Committer**: Hardened logic that ensures changes are committed in discrete, revertible blocks. -- **PR Suggestion Engine**: Proactive prompt that interfaces with the **GitHub Mesh** to create remote Pull Requests. - -## šŸ— Sync Grounding - -Git synchronization is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Atomic Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | The primary engine for logical grouping and commits. | -| **Workflow Pattern** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for remote sync and CI/CD. | -| **Collaboration** | [`ai/mesh/github.md`](../ai/mesh/github.md) | Grounding for PR creation and remote resolution. | - ---- - -## šŸš€ Primitives Orchestrated - -This skill is grounded in the following **Deterministic Primitives**: -- **`dev.kit sync prepare`**: Prepares feature branches and synchronizes with origin. -- **`dev.kit sync run`**: Executes atomic commits and resolves drift. - ---- - -## šŸ“‚ Managed Assets - -- **Workflow YAML**: Canonical synchronization sequence in `docs/workflows/assets/git-sync.yaml`. - ---- - -## šŸ“š Authoritative References - -High-fidelity synchronization is grounded in systematic SDLC and version control practices: - -- **[Predictable Delivery Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing Work-in-Progress (WIP) through atomic, domain-specific commits. -- **[Decentralized DevOps](https://andypotanin.com/decentralized-devops-the-future-of-software-delivery/)**: The shift toward distributed architectures and automated synchronization. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/loops.md b/docs/workflows/loops.md deleted file mode 100644 index f893b9d..0000000 --- a/docs/workflows/loops.md +++ /dev/null @@ -1,83 +0,0 @@ -# Engineering Loops: Standardized Workflows - -**Domain:** AI / Workflows -**Status:** Canonical - -## Summary - -Engineering Loops are the standardized execution plans used by agents to resolve **Drift**. By following these deterministic sequences, **dev.kit** ensures that complex tasks—from feature implementation to documentation synchronization—remain grounded in repository truth. - ---- - -## šŸ— The Standard Loop (Drift Resolution) - -Every high-fidelity task follows the **Analyze -> Normalize -> Process -> Validate -> Capture** cycle. - -### 1. Feature Engineering Loop -Standard loop for implementing new capabilities with TDD and documentation. -- **Goal**: Expand repository "Skills" while maintaining 12-factor compliance. -- **Steps**: - 1. **Analyze**: Audit existing code and docs to identify the implementation gap. - 2. **Normalize**: `dev.kit task start` to create a bounded `workflow.md`. - 3. **Process**: `dev.kit skills run` to implement logic and test cases. - 4. **Validate**: `dev.kit doctor` to verify environment health and TDD success. - 5. **Capture**: `dev.kit sync run` to logically group and commit the resolution. - -### 2. Resilient Bugfix Loop -Deterministic lifecycle for identifying, reproducing, and resolving repository defects. -- **Goal**: Restore repository integrity with verified test evidence. -- **Steps**: - 1. **Analyze**: `dev.kit doctor` to detect environment or software drift. - 2. **Normalize**: Define reproduction steps in a new `workflow.md`. - 3. **Process**: Apply the fix and implement a regression test. - 4. **Validate**: Execute the test suite within the **Worker Ecosystem**. - 5. **Capture**: `dev.kit sync run` to finalize the fix and update the Skill Mesh. - -### 3. Knowledge & Discovery Sync -Workflow for synchronizing repository documentation and agent context. -- **Goal**: Eliminate documentation drift and hydrate the **Skill Mesh**. -- **Steps**: - 1. **Analyze**: Scan `docs/` and script headers for outdated metadata. - 2. **Normalize**: Map documentation updates to current repository reality. - 3. **Process**: `dev.kit visualizer` to regenerate high-fidelity architecture diagrams. - 4. **Validate**: Verify that all internal and external links are high-fidelity. - 5. **Capture**: `dev.kit ai sync` to ground the agent in the updated knowledge. - -## šŸ— Standard Loop Mapping - -The standard engineering loops are operationalized through specialized UDX targets: - -| Loop Domain | Grounding Target | Pattern Role | -| :--- | :--- | :--- | -| **Logic Implementation** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Intent normalization and task management. | -| **Environment Parity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for loop execution. | -| **Automation Flow** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for sequence steps. | - ---- - -## šŸ— Workflow Grounding - -Engineering loops are operationalized through deterministic UDX engines: - -| Loop Type | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Engineering** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic normalization and task management. | -| **Automation** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Pattern baseline for implementation steps. | - ---- - -## 🧠 Continuity Mandates - -- **Resume First**: Before starting a new loop, agents must check for active tasks (`dev.kit task active`). -- **Hygiene**: Aborted or stagnant loops must be pruned (`dev.kit task cleanup`) to prevent context noise. -- **Feedback**: Every iteration must emit high-signal progress to the `feedback.md` artifact. - -## šŸ“š Authoritative References - -Standardized loops ensure predictable delivery and high-fidelity results: - -- **[Little's Law for Flow](https://andypotanin.com/littles-law-applied-to-devops/)**: Managing cycle time and throughput through systematic sequences. -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing task assignment and execution through identified patterns. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/mermaid-patterns.md b/docs/workflows/mermaid-patterns.md deleted file mode 100644 index 0ae0356..0000000 --- a/docs/workflows/mermaid-patterns.md +++ /dev/null @@ -1,54 +0,0 @@ -# Mermaid Patterns: Visual Standards - -**Domain:** Visual Engineering / Standards -**Status:** Canonical - -## Summary - -This reference provides standardized patterns for Mermaid-based visualizations within the `dev.kit` ecosystem. These patterns ensure that architecture and process flows are consistent, version-controlled, and legible to both humans and agents. - ---- - -## šŸ— Type Selection - -- **`flowchart`**: Use for process steps, service interactions, and decision gates. -- **`sequenceDiagram`**: Use for time-ordered interactions between actors or multi-turn execution loops. -- **`stateDiagram-v2`**: Use for state transitions with explicit events and lifecycle stages. -- **`erDiagram`**: Use for entity relationships and data cardinality. - ---- - -## šŸ“ Conventions - -- **Identifier Stability**: Maintain consistent IDs during revisions to ensure clean diffs. -- **Labeling**: Prefer short, action-oriented node labels; use edge labels for details. -- **Domain Separation**: Split diagrams when crossing functional boundaries (e.g., separate API flow from deployment flow). -- **Horizontal Priority**: Favor `flowchart LR` to optimize vertical space in Markdown documentation. - ---- - -## āš™ļø Deterministic Logic (Export) - -- **Fail-Open**: If `mmdc` fails, always provide the raw Mermaid source to the user/agent. -- **Sandboxing**: In restricted environments, leverage Puppeteer `--no-sandbox` flags via local configuration. - -## šŸ— Standard Grounding - -Visual standards are operationalized through deterministic UDX engines: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity export engine and pattern discovery. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for asset generation. | - ---- - -## šŸ“š Authoritative References - -Visual standards are a core part of maintaining standalone documentation quality: - -- **[Synthetic Content Enrichment](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity metadata management and visual standards. -- **[Visual Tracing & Logistics](https://andypotanin.com/digital-rails-and-logistics/)**: Drawing parallels between software algorithms and visual process dynamics. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/normalization.md b/docs/workflows/normalization.md deleted file mode 100644 index 0cc4f5f..0000000 --- a/docs/workflows/normalization.md +++ /dev/null @@ -1,67 +0,0 @@ -# Task Normalization: Intent-to-Workflow Mapping - -**Domain:** Foundations / Normalization -**Status:** Canonical - -## Summary - -**Task Normalization** is the process of transforming ambiguous user intent into deterministic execution plans. In **dev.kit**, the AI agent acts as the primary **Mapper**, reasoning about the request and mapping it to the appropriate repository workflows and primitives. - ---- - -## šŸ— The Normalization Mapper - -The agent is responsible for dynamic prompt transformation. It receives intent from the user, identifies the required capabilities, and sends structured instructions to the `dev.kit` workflow engine. - -### 1. Strict Mappings (Deterministic) -Used for well-defined engineering tasks where the path is predictable and hardened. -- **Example**: Git Synchronization, environment hydration (`doctor`), or diagram rendering. -- **Enforcement**: Direct mapping to `lib/commands/` or `docs/workflows/assets/*.yaml`. - -### 2. Non-Strict Mappings (Reasoning-First) -Used for creative or complex tasks where the agent must reason about the best path before committing to a sequence. -- **Example**: Implementing a new feature, refactoring complex logic, or resolving multi-domain drift. -- **Enforcement**: The agent generates a custom `workflow.md` that orchestrates multiple primitives. - ---- - -## šŸ”„ Dynamic Prompt Transformation - -Agents are auto-mapped to send and receive context from repository workflows. If a task requires something outside of existing scripts or tools, the agent: -1. **Reasons** about the implementation gap. -2. **Generates** the necessary code or documentation patterns. -3. **Packages** the resolution into a normalized `dev.kit` workflow step. - -## šŸ— Standard Task Mapping - -The normalization mapper routes common engineering intents to specialized UDX repositories: - -| Intent Domain | Grounding Target | Mapping logic | -| :--- | :--- | :--- | -| **Containerization** | [`udx/worker`](https://github.com/udx/worker) | Normalize to base environment specs. | -| **Plugin Dev** | [`udx/wp-stateless`](https://github.com/udx/wp-stateless) | Normalize to structural plugin patterns. | -| **CI/CD / Actions** | [`udx/reusable-workflows`](https://github.com/udx/reusable-workflows) | Normalize to validated pipeline steps. | - ---- - -## šŸ— Normalization Grounding - -Task normalization is operationalized through canonical UDX resources: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic Mapping** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | Dynamic discovery and task resolution engine. | -| **Context Hub** | [`docs/workflows/README.md`](README.md) | Source of truth for available repository sequences. | -| **Fidelity** | [`udx/worker`](https://github.com/udx/worker) | Deterministic runtime for validating normalized plans. | - ---- - -## šŸ“š Authoritative References - -Normalization ensures high-fidelity execution through systematic pattern recognition: - -- **[Observation-Driven Management](https://andypotanin.com/observation-driven-management-revolutionizing-task-assignment-efficiency-workplace/)**: Optimizing task assignment through identified patterns. -- **[Autonomous Technical Operations](https://andypotanin.com/claude-operator-prompt/)**: Principles for high-fidelity agent grounding and execution. - ---- -_UDX DevSecOps Team_ diff --git a/docs/workflows/visualizer.md b/docs/workflows/visualizer.md deleted file mode 100644 index 979ccaa..0000000 --- a/docs/workflows/visualizer.md +++ /dev/null @@ -1,64 +0,0 @@ -# Skill: dev-kit-visualizer - -**Domain:** Visual Engineering -**Type:** AI Reasoning Skill -**status:** Canonical - -## Summary - -The **Visual Engineering** skill empowers AI agents to transform repository context into high-fidelity diagrams. It uses dynamic reasoning to understand source code, flow, and architecture, then leverages the deterministic `dev.kit visualizer` command to render SVG assets. - ---- - -## šŸ›  AI Reasoning (The Skill) - -This skill utilizes dynamic LLM reasoning to perform the following: -- **Flow Extraction**: Reading READMEs or source code to identify discrete process steps. -- **Visual Mapping**: Determining which Mermaid pattern (flowchart, sequence, state) best represents the intent. -- **Intent-to-MMD**: Generating raw Mermaid source code based on extracted logic. - ---- - -## āš™ļø Deterministic Logic (Function Assets) - -The following assets provide the programmatic engine for this skill: -- **Templates**: Standardized Mermaid patterns in `assets/templates/`. -- **Patterns**: High-fidelity Mermaid styling and shape standards. -- **Export Engine**: Hardened `mmdc` wrapper for SVG/PNG generation. - -## šŸ— Visual Grounding - -Visual engineering is operationalized through deterministic UDX engines: - -| Requirement | Grounding Resource | Role | -| :--- | :--- | :--- | -| **Logic** | [`udx/dev.kit`](https://github.com/udx/dev.kit) | High-fidelity export engine and template discovery. | -| **Patterns** | [`reference/standards/mermaid.md`](../reference/standards/mermaid.md) | Canonical shapes and visual mapping rules. | -| **Runtime** | [`udx/worker`](https://github.com/udx/worker) | Deterministic environment for asset generation. | - ---- - -## šŸš€ Primitives Orchestrated - -This skill is grounded in the following **Deterministic Primitives**: -- **`dev.kit visualizer create`**: Initializes a new Mermaid source from templates. -- **`dev.kit visualizer export`**: Renders Mermaid sources into SVG/PNG. - ---- - -## šŸ“‚ Managed Assets - -- **Templates**: Standard flowchart, sequence, and state machine patterns in `docs/workflows/assets/templates/`. -- **Patterns**: High-fidelity Mermaid styling and shape standards in `docs/workflows/mermaid-patterns.md`. - ---- - -## šŸ“š Authoritative References - -Visual engineering is grounded in systematic diagramming and documentation standards: - -- **[Visualizing Complex Systems](https://andypotanin.com/digital-rails-and-logistics/)**: Understanding software evolution through fluid dynamics and visual tracing. -- **[Mermaid Standards](https://andypotanin.com/ai-powered-revolution-content-management-synthetic-enrichment-standalone-quality/)**: High-fidelity synthetic enrichment for documentation. - ---- -_UDX DevSecOps Team_ diff --git a/environment.yaml b/environment.yaml deleted file mode 100644 index c0adb19..0000000 --- a/environment.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# dev.kit Environment Orchestrator -# Standardize configurations across hosts and repositories. - -system: - quiet: false - developer: false - state_path: "~/.udx/dev.kit/state" - -exec: - prompt: "ai.gemini.v1" - stream: false - -ai: - enabled: false - provider: "gemini" # Supported: gemini - auto_sync: true # Automatically synchronize skills on shell load - # Discovery: Capabilities resolved at runtime via lib/commands and docs/skills - -context: - enabled: true - max_bytes: 4000 - storage: "repo" # repo-scoped storage for context - -install: - path_prompt: true diff --git a/lib/commands/agent.sh b/lib/commands/agent.sh deleted file mode 100644 index 1a1db70..0000000 --- a/lib/commands/agent.sh +++ /dev/null @@ -1,309 +0,0 @@ -#!/bin/bash - -# @description: Direct agent integration management (advanced). -# @intent: agent, llm, provider, model, configure -# @objective: Orchestrate the rendering and deployment of AI provider artifacts (Gemini) using dynamic normalization from documentation and scripts. -# @usage: dev.kit agent gemini --plan -# @usage: dev.kit agent all -# @workflow: 1. Parse Manifest -> 2. Render Templates from Docs/Lib -> 3. Synchronize Skills -> 4. Backup & Deploy Artifacts - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_agent_manifest() { - echo "$REPO_DIR/src/ai/integrations/manifest.json" -} - -dev_kit_agent_expand_path() { - local val="$1" - val="${val//\{\{HOME\}\}/$HOME}" - val="${val//\{\{DEV_KIT_HOME\}\}/$DEV_KIT_HOME}" - val="${val//\{\{DEV_KIT_STATE\}\}/$DEV_KIT_STATE}" - echo "$val" -} - -dev_kit_agent_render_artifact() { - local type="$1" - local src_tmpl="$2" - local dst_path="$3" - local base_rendered="$4" - - case "$type" in - template) - # Dynamic gathering from Docs & Lib - local agent_skills="" - local available_tools="" - local memories="" - - # Gather Workflows from docs/workflows/ - for skill_file in "$REPO_DIR"/docs/workflows/*.md; do - [ -f "$skill_file" ] || continue - local filename; filename="$(basename "$skill_file")" - [ "$filename" = "README.md" ] && continue - [ "$filename" = "normalization.md" ] && continue - [ "$filename" = "loops.md" ] && continue - [ "$filename" = "mermaid-patterns.md" ] && continue - - local name="${filename%.md}" - local desc; desc="$(grep -i "^description:" "$skill_file" 2>/dev/null | head -n 1 | sed 's/^description: //I' || echo "Grounded workflow reasoning.")" - agent_skills+="- **$name**: $desc\n" - done - - # Gather Commands from lib/commands/ - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -f "$file" ] || continue - local key desc - key="$(basename "${file%.sh}")" - # Hide internal/utility commands - case "$key" in agent|github|skills) continue ;; esac - desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" - available_tools+="- **dev.kit $key**: $desc\n" - done - - # Gather Memories - if [[ "$src_tmpl" == *"GEMINI"* ]]; then - if [ -f "$HOME/.gemini/GEMINI.md" ]; then - memories="$(grep -A 100 "Gemini Added Memories" "$HOME/.gemini/GEMINI.md" | tail -n +2 || true)" - fi - [ -z "$memories" ] && memories="- (none)" - fi - - export DEV_KIT_RENDER_DATE="$(date +%Y-%m-%d)" - export DEV_KIT_RENDER_HOME="$HOME" - export DEV_KIT_RENDER_DEV_KIT_HOME="$DEV_KIT_HOME" - export DEV_KIT_RENDER_DEV_KIT_SOURCE="$DEV_KIT_SOURCE" - export DEV_KIT_RENDER_DEV_KIT_STATE="$DEV_KIT_STATE" - export DEV_KIT_RENDER_SKILLS="$agent_skills" - export DEV_KIT_RENDER_TOOLS="$available_tools" - export DEV_KIT_RENDER_MEMORIES="$memories" - - perl -pe ' - s/\{\{DATE\}\}/$ENV{DEV_KIT_RENDER_DATE}/g; - s/\{\{HOME\}\}/$ENV{DEV_KIT_RENDER_HOME}/g; - s/\{\{DEV_KIT_HOME\}\}/$ENV{DEV_KIT_RENDER_DEV_KIT_HOME}/g; - s/\{\{DEV_KIT_SOURCE\}\}/$ENV{DEV_KIT_RENDER_DEV_KIT_SOURCE}/g; - s/\{\{DEV_KIT_STATE\}\}/$ENV{DEV_KIT_RENDER_DEV_KIT_STATE}/g; - s/\$\{AgentSkills\}/$ENV{DEV_KIT_RENDER_SKILLS}/g; - s/\$\{AvailableTools\}/$ENV{DEV_KIT_RENDER_TOOLS}/g; - s/\{\{MEMORIES\}\}/$ENV{DEV_KIT_RENDER_MEMORIES}/g; - ' "$src_tmpl" > "$dst_path" - ;; - *) - cp "$src_tmpl" "$dst_path" - ;; - esac -} - -dev_kit_agent_apply_integration() { - local key="$1" - local mode="$2" - local manifest - manifest="$(dev_kit_agent_manifest)" - - [ ! -f "$manifest" ] && { echo "Error: Manifest not found." >&2; exit 1; } - - local integration_json - integration_json="$(jq -r ".integrations[] | select(.key == \"$key\")" "$manifest")" - [ -z "$integration_json" ] && { echo "Error: Integration '$key' not found." >&2; exit 1; } - - local target_dir - target_dir="$(dev_kit_agent_expand_path "$(echo "$integration_json" | jq -r '.target_dir')")" - local templates_dir="$REPO_DIR/$(echo "$integration_json" | jq -r '.templates_dir')" - local skills_dst_dir="$target_dir/skills" - - local rendered - rendered="$(mktemp -d)" - - local artifacts_count - artifacts_count="$(echo "$integration_json" | jq '.artifacts | length')" - - for ((i=0; i" >&2; exit 1; } - local manifest; manifest="$(dev_kit_agent_manifest)" - local target_dir; target_dir="$(dev_kit_agent_expand_path "$(jq -r ".integrations[] | select(.key == \"$key\") | .target_dir" "$manifest")")" - local backup_base="$target_dir/.backup/dev.kit" - - if [ ! -d "$backup_base" ]; then - echo "No backups found for $key." - return 1 - fi - - local last_backup; last_backup="$(ls -d "$backup_base"/*/ | sort | tail -n 1)" - if [ -z "$last_backup" ]; then - echo "No backups found for $key." - return 1 - fi - - echo "Restoring $key from $last_backup..." - cp -R "$last_backup/." "$target_dir/" - echo "Restore complete." - ;; - disable) - local key="${2:-}" - if [ "$key" = "all" ]; then - for k in $(jq -r '.integrations[].key' "$(dev_kit_agent_manifest)"); do - dev_kit_agent_disable_integration "$k" - done - else - [ -z "$key" ] && { echo "Usage: dev.kit agent disable " >&2; exit 1; } - dev_kit_agent_disable_integration "$key" - fi - ;; - skills) - local key="${2:-}" - [ -z "$key" ] && { echo "Usage: dev.kit agent skills " >&2; exit 1; } - local manifest="$(dev_kit_agent_manifest)" - local target_dir="$(dev_kit_agent_expand_path "$(jq -r ".integrations[] | select(.key == \"$key\") | .target_dir" "$manifest")")" - local skills_dst_dir="$target_dir/skills" - echo "Managed Skills for '$key' ($skills_dst_dir):" - [ -d "$skills_dst_dir" ] && ls "$skills_dst_dir" | sed 's/^/- /' || echo "(none)" - ;; - help|-h|--help) - cat <<'AGENT_USAGE' -Usage: dev.kit agent - -Commands: - status Show status of all AI agent integrations - skills List managed skills for a specific agent - restore Restore latest backup for specific agent - disable Safely backup and remove agent settings - [--plan] Apply configuration for specific agent (e.g., gemini) - all [--plan] Apply all supported agent configurations -AGENT_USAGE - ;; - all) - shift - [ "${1:-}" = "--plan" ] && mode="plan" - for k in $(jq -r '.integrations[].key' "$(dev_kit_agent_manifest)"); do - dev_kit_agent_apply_integration "$k" "$mode" - done - ;; - *) - local key="$sub" - shift - [ "${1:-}" = "--plan" ] && mode="plan" - dev_kit_agent_apply_integration "$key" "$mode" - ;; - esac -} diff --git a/lib/commands/ai.sh b/lib/commands/ai.sh deleted file mode 100644 index 6a26225..0000000 --- a/lib/commands/ai.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash - -# @description: Unified agent integration management (Sync, Skills, Status). -# @intent: ai, agent, integration, skills, sync, status -# @objective: Manage the lifecycle of AI integrations by synchronizing skills, monitoring health, and providing engineering advisory insights. -# @usage: dev.kit ai status -# @usage: dev.kit ai sync gemini -# @workflow: 1. Monitor Integration Health -> 2. Synchronize Skills & Memories -> 3. Ground Agent in Engineering Loop -> 4. Provide Advisory Insights - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_ai() { - local sub="${1:-status}" - local data_dir="$REPO_DIR/src/ai/data" - - case "$sub" in - status) - print_section "dev.kit | AI Integration Status" - local provider - provider="$(config_value_scoped ai.provider "gemini")" - local enabled - enabled="$(config_value_scoped ai.enabled "false")" - - print_check "Provider" "[ok]" "$provider" - print_check "Enabled" "$([ "$enabled" = "true" ] && echo "[ok]" || echo "[warn]")" "$enabled" - - echo "" - echo "Active Integrations:" - if [ -d "$HOME/.gemini" ]; then - print_check "Gemini" "[ok]" "path: ~/.gemini" - else - print_check "Gemini" "[warn]" "missing (run: dev.kit ai sync)" - fi - ;; - sync) - local provider="${2:-}" - if [ -z "$provider" ]; then - provider="$(config_value_scoped ai.provider "gemini")" - fi - echo "Synchronizing AI skills and memories for: $provider" - if command -v dev_kit_agent_apply_integration >/dev/null 2>&1; then - dev_kit_agent_apply_integration "$provider" "apply" - else - echo "Error: Synchronization logic not loaded correctly." >&2 - exit 1 - fi - ;; - skills) - print_section "dev.kit | Managed AI Skills" - local local_packs="$REPO_DIR/docs/skills" - if [ -d "$local_packs" ]; then - find "$local_packs" -mindepth 1 -maxdepth 1 -type d | sort | while IFS= read -r skill; do - local name desc usage - name="$(basename "$skill")" - desc="$(grep -i "^description:" "$skill/SKILL.md" 2>/dev/null | head -n 1 | sed 's/^description: //I' || echo "no description")" - usage="dev.kit skills run \"$name\" \"\"" - - echo "- [skill] $name" - echo " description: $desc" - echo " usage: $usage" - echo "" - done - fi - ;; - commands) - print_section "dev.kit | CLI Commands Metadata" - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -f "$file" ] || continue - local key; key="$(basename "${file%.sh}")" - local desc; desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" - local objective; objective="$(grep "^# @objective:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "")" - local workflow; workflow="$(grep "^# @workflow:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "")" - local intents; intents="$(grep "^# @intent:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "none")" - - echo "- [command] dev.kit $key" - echo " description: $desc" - [ -n "$objective" ] && echo " objective: $objective" - [ -n "$workflow" ] && echo " workflow: $workflow" - echo " intents: $intents" - echo "" - done - ;; - workflows) - print_section "dev.kit | Engineering Loops (Workflows)" - local workflow_file="$REPO_DIR/docs/ai/workflows.md" - if [ -f "$workflow_file" ]; then - # Parse markdown headers as workflow names - grep "^## " "$workflow_file" | sed 's/^## //' | while IFS= read -r name; do - echo "- [loop] $name" - # Simple extraction of description/steps if needed - echo "" - done - else - echo "No centralized workflow documentation found." - fi - ;; - advisory) - local ops_dir="$REPO_DIR/docs/reference/operations" - if [ -d "$ops_dir" ]; then - echo "Engineering Advisory (Resolved Insights):" - local file="" - while IFS= read -r file; do - [ -z "$file" ] && continue - local title - title="$(head -n 1 "$file" | sed 's/^# //')" - local highlights - highlights="$(grep -m 2 "^- " "$file" | head -n 2 | sed 's/^- / - /' || true)" - echo "- [insight] $title" - if [ -n "$highlights" ]; then - echo "$highlights" - fi - done < <(find "$ops_dir" -type f -name '*.md' | sort) - else - echo "Engineering Advisory: (no local guidance artifacts found)" - fi - ;; - help|-h|--help) - cat <<'AI_HELP' -Usage: dev.kit ai - -Commands: - status Show AI provider and integration health - sync Synchronize AI skills, memories, and hooks - skills List managed AI skills with usage and workflow - commands List CLI commands with waterfall metadata - workflows List standardized engineering loops (loops) - advisory Fetch engineering guidance from local docs -AI_HELP - ;; - *) - echo "Unknown ai command: $sub" >&2 - exit 1 - ;; - esac -} diff --git a/lib/commands/audit.sh b/lib/commands/audit.sh new file mode 100644 index 0000000..b3e38b4 --- /dev/null +++ b/lib/commands/audit.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# @description: Audit the current repository for basic fidelity gaps + +dev_kit_cmd_audit() { + local format="${1:-text}" + local repo_dir="${2:-$(pwd)}" + local repo_name="" + local stack="" + local readme_status="" + local test_status="" + + repo_name="$(dev_kit_repo_name "$repo_dir")" + stack="$(dev_kit_repo_detect_stack "$repo_dir")" + readme_status="$(dev_kit_repo_readme_status "$repo_dir")" + test_status="$(dev_kit_repo_test_status "$repo_dir")" + + if [ "$format" = "json" ]; then + printf '{\n' + printf ' "command": "audit",\n' + printf ' "repo": "%s",\n' "$repo_name" + printf ' "path": "%s",\n' "$repo_dir" + printf ' "stack": "%s",\n' "$stack" + printf ' "checks": {\n' + printf ' "readme": "%s",\n' "$readme_status" + printf ' "test_command": "%s"\n' "$test_status" + printf ' },\n' + printf ' "improvement_plan": ' + dev_kit_repo_findings_json "$repo_dir" + printf '\n}\n' + return 0 + fi + + echo "dev.kit audit" + echo "repo: $repo_name" + echo "path: $repo_dir" + echo "stack: $stack" + echo "readme: $readme_status" + echo "test command: $test_status" + dev_kit_repo_advices "$repo_dir" +} diff --git a/lib/commands/bridge.sh b/lib/commands/bridge.sh new file mode 100644 index 0000000..559ce27 --- /dev/null +++ b/lib/commands/bridge.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# @description: Show a basic bridge payload + +dev_kit_cmd_bridge() { + local format="${1:-text}" + + if [ "$format" = "json" ]; then + printf '{\n "command": "bridge",\n "repo": "%s",\n "capabilities": ["audit", "bridge", "status"],\n "boundaries": ["local shell"]\n}\n' "$(pwd)" + return 0 + fi + + echo "dev.kit bridge" + echo "repo: $(pwd)" + echo "capabilities: audit, bridge, status" + echo "boundaries: local shell" +} diff --git a/lib/commands/config.sh b/lib/commands/config.sh deleted file mode 100644 index 39029a9..0000000 --- a/lib/commands/config.sh +++ /dev/null @@ -1,444 +0,0 @@ -#!/bin/bash - -# @description: Environment and repository orchestration settings. -# @intent: config, setting, env, setup, manage - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_config() { - ensure_dev_kit_home - local sub="${1:-}" - - scope_path() { - local scope="$1" - local variant="$2" - local base="" - case "$scope" in - global) - if [ -n "${DEV_KIT_STATE:-}" ] && [ -d "$DEV_KIT_STATE" ]; then - base="$DEV_KIT_STATE" - else - base="$DEV_KIT_HOME" - fi - ;; - repo) - if command -v git >/dev/null 2>&1; then - base="$(git rev-parse --show-toplevel 2>/dev/null)/.udx/dev.kit" - fi - ;; - *) - return 1 - ;; - esac - if [ -z "$base" ]; then - return 1 - fi - case "$variant" in - show|set|reset) echo "$base/config.env" ;; - default) echo "$base/config.default.env" ;; - min) echo "$base/config.min.env" ;; - max) echo "$base/config.max.env" ;; - custom) echo "$base/config.custom.env" ;; - *) return 1 ;; - esac - } - - prompt_value() { - local label="$1" - local default="${2:-}" - local input="" - if [ -t 0 ]; then - if [ -n "$default" ]; then - printf "%s [%s]: " "$label" "$default" - else - printf "%s: " "$label" - fi - read -r input || true - fi - if [ -n "$input" ]; then - printf "%s" "$input" - else - printf "%s" "$default" - fi - } - - parse_key_flag() { - local key="" - local args=("$@") - local i=0 - while [ $i -lt ${#args[@]} ]; do - case "${args[$i]}" in - --key=*) - key="${args[$i]#--key=}" - ;; - --key) - if [ $((i+1)) -lt ${#args[@]} ]; then - key="${args[$((i+1))]}" - i=$((i+1)) - fi - ;; - esac - i=$((i+1)) - done - printf "%s" "$key" - } - - parse_scope_flag() { - local scope="global" - local args=("$@") - local i=0 - while [ $i -lt ${#args[@]} ]; do - case "${args[$i]}" in - --scope=*) - scope="${args[$i]#--scope=}" - ;; - --scope) - if [ $((i+1)) -lt ${#args[@]} ]; then - scope="${args[$((i+1))]}" - i=$((i+1)) - fi - ;; - esac - i=$((i+1)) - done - printf "%s" "$scope" - } - - parse_force_flag() { - local force="false" - local args=("$@") - local i=0 - while [ $i -lt ${#args[@]} ]; do - case "${args[$i]}" in - --force) force="true" ;; - esac - i=$((i+1)) - done - printf "%s" "$force" - } - - parse_developer_flag() { - local developer="false" - local args=("$@") - local i=0 - while [ $i -lt ${#args[@]} ]; do - case "${args[$i]}" in - --developer) developer="true" ;; - esac - i=$((i+1)) - done - printf "%s" "$developer" - } - - update_config_value() { - local key="$1" - local value="$2" - local path="${3:-$CONFIG_FILE}" - local mode="${4:-set}" - local tmp="" - tmp="$(mktemp)" - if [ -f "$path" ]; then - awk -v k="$key" -v v="$value" -v mode="$mode" ' - BEGIN { found=0 } - { - if ($0 ~ "^[[:space:]]*"k"[[:space:]]*=") { - found=1 - if (mode=="reset" && v=="") { next } - print k" = "v - next - } - print - } - END { - if (!found && v!="") { - print k" = "v - } - } - ' "$path" > "$tmp" - else - if [ -n "$value" ]; then - printf "%s = %s\n" "$key" "$value" > "$tmp" - else - : > "$tmp" - fi - fi - mkdir -p "$(dirname "$path")" - mv "$tmp" "$path" - if [ -n "$value" ]; then - echo "Set: $key = $value ($path)" - else - echo "Reset: $key ($path)" - fi - } - - detect_cli() { - local name="$1" - local path="" - if command -v "$name" >/dev/null 2>&1; then - path="$(command -v "$name")" - printf "%-10s %s\n" "$name" "found ($path)" - else - printf "%-10s %s\n" "$name" "missing" - fi - } - - case "$sub" in - global|repo) - local action="${2:---show}" - local path="" - case "$action" in - --show|show) path="$(scope_path "$sub" show)" ;; - --default|default) path="$(scope_path "$sub" default)" ;; - --min|min) path="$(scope_path "$sub" min)" ;; - --max|max) path="$(scope_path "$sub" max)" ;; - --custom|custom) path="$(scope_path "$sub" custom)" ;; - *) - echo "Unknown config action: $action" >&2 - exit 1 - ;; - esac - if [ -z "${path:-}" ]; then - echo "Config scope not available: $sub" >&2 - exit 1 - fi - if [ "$action" = "--custom" ] || [ "$action" = "custom" ]; then - local schema_artifact="$REPO_DIR/docs/artifacts/modules/config/local-schema.json" - local schema_source="$REPO_DIR/docs/src/configs/tooling/local/config-schema.json" - local schema_path="$schema_artifact" - if [ ! -f "$schema_path" ]; then - schema_path="$schema_source" - fi - if [ ! -f "$schema_path" ]; then - echo "Config schema not found: $schema_artifact or $schema_source" >&2 - exit 1 - fi - if ! command -v jq >/dev/null 2>&1; then - echo "jq is required for --custom config generation." >&2 - exit 1 - fi - mkdir -p "$(dirname "$path")" - : > "$path" - while IFS= read -r field; do - local field_json="" - local key="" - local default="" - local desc="" - local options="" - field_json="$(printf "%s" "$field" | base64 --decode)" - key="$(printf "%s" "$field_json" | jq -r '.key // empty')" - default="$(printf "%s" "$field_json" | jq -r '.default // ""')" - desc="$(printf "%s" "$field_json" | jq -r '.description // ""')" - options="$(printf "%s" "$field_json" | jq -r '.options // [] | join(\", \")')" - if [ -n "$desc" ]; then - echo "" - echo "$desc" - fi - if [ -n "$options" ]; then - echo "options: $options" - fi - local value="" - if [ -t 0 ]; then - printf "%s [%s]: " "$key" "$default" - read -r value || true - fi - if [ -z "$value" ]; then - value="$default" - fi - if [ -n "$key" ]; then - printf "%s = %s\n" "$key" "$value" >> "$path" - fi - done < <(jq -r '.fields[] | @base64' "$schema_path") - echo "Saved: $path" - exit 0 - fi - if [ -f "$path" ]; then - cat "$path" - exit 0 - fi - if [ "$sub" = "repo" ]; then - if [ -f "$CONFIG_FILE" ]; then - cat "$CONFIG_FILE" - exit 0 - fi - fi - echo "Config file not found: $path" >&2 - exit 1 - ;; - show|"") - local key - key="$(parse_key_flag "$@")" - if [ -n "$key" ]; then - local val="" - val="$(config_value_scoped "$key" "")" - if [ -n "$val" ]; then - echo "$key = $val" - else - echo "Key not found: $key" >&2 - exit 1 - fi - exit 0 - fi - if [ -f "${ENVIRONMENT_YAML:-}" ]; then - echo "Orchestrator: $ENVIRONMENT_YAML" - cat "$ENVIRONMENT_YAML" - echo "" - fi - if [ -f "$CONFIG_FILE" ]; then - echo "Global: $CONFIG_FILE" - cat "$CONFIG_FILE" - echo "" - fi - local local_path - local_path="$(local_config_path || true)" - if [ -n "$local_path" ] && [ -f "$local_path" ]; then - echo "Local: $local_path" - cat "$local_path" - echo "" - fi - echo "Detected CLIs (read-only):" - detect_cli git - detect_cli gh - detect_cli docker - detect_cli npm - detect_cli codex - detect_cli claude - detect_cli gemini - ;; - reset) - local force="false" - force="$(parse_force_flag "$@")" - local scope="global" - scope="$(parse_scope_flag "$@")" - local key="" - key="$(parse_key_flag "$@")" - local target_path="" - if [ "$scope" = "repo" ]; then - target_path="$(scope_path "repo" "reset")" - else - target_path="$CONFIG_FILE" - fi - - if [ -n "$key" ]; then - if [ "$force" != "true" ]; then - confirm_action "Reset $key to default in $scope scope?" - fi - local default_val="" - default_val="$(config_value "$REPO_DIR/config/default.env" "$key" "")" - update_config_value "$key" "$default_val" "$target_path" "reset" - exit 0 - fi - if [ ! -f "$REPO_DIR/config/default.env" ]; then - echo "Missing default config: $REPO_DIR/config/default.env" - exit 1 - fi - if [ -t 0 ] && [ "$force" != "true" ]; then - confirm_action "Reset $scope config to defaults?" - fi - if [ "$scope" = "repo" ]; then - if [ -z "$target_path" ]; then - echo "Repo scope not available" >&2 - exit 1 - fi - cp "$REPO_DIR/config/default.env" "$target_path" - else - cp "$REPO_DIR/config/default.env" "$CONFIG_FILE" - cp "$REPO_DIR/config/default.env" "$DEV_KIT_HOME/config.env" - fi - echo "Reset: $target_path" - ;; - set) - local force="false" - force="$(parse_force_flag "$@")" - local developer="false" - developer="$(parse_developer_flag "$@")" - local scope="global" - scope="$(parse_scope_flag "$@")" - local key="" - local value="" - key="$(parse_key_flag "$@")" - - local target_path="" - if [ "$scope" = "repo" ]; then - target_path="$(scope_path "repo" "set")" - else - target_path="$CONFIG_FILE" - fi - - if [ "$developer" = "true" ]; then - update_config_value "exec.prompt" "developer" "$target_path" "set" - update_config_value "developer.enabled" "true" "$target_path" "set" - exit 0 - fi - - if [ -n "$key" ]; then - # If --key was used, the value is the first non-flag argument that is NOT the key or --key - local arg - for arg in "$@"; do - if [[ "$arg" != --* ]] && [ "$arg" != "$key" ] && [ "$arg" != "set" ]; then - value="$arg" - break - fi - done - else - # positional legacy support: dev.kit config set - key="${2:-}" - value="${3:-}" - fi - # re-check key/value if not set by --key/--value - if [ -z "$key" ] || [[ "$key" == --* ]]; then - key="" - fi - - if [ -z "$key" ]; then - if [ -t 0 ] && [ "$force" != "true" ]; then - key="$(prompt_value "key" "")" - else - echo "Missing --key in non-interactive mode" >&2 - exit 1 - fi - fi - if [ -z "$value" ]; then - if [ -t 0 ] && [ "$force" != "true" ]; then - value="$(prompt_value "value" "")" - else - echo "Missing value in non-interactive mode" >&2 - exit 1 - fi - fi - if [ -z "$key" ] || [ -z "$value" ]; then - echo "Usage: dev.kit config set [--scope global|repo] --key " >&2 - exit 1 - fi - update_config_value "$key" "$value" "$target_path" "set" - if [ "$scope" = "global" ] && [ "$key" = "state_path" ]; then - update_config_value "$key" "$value" "$DEV_KIT_HOME/config.env" "set" - fi - ;; - -h|--help) - cat <<'CONFIG_USAGE' -Usage: dev.kit config - -Commands: - show Print current config - reset Reset config to defaults (prompts) - set Set a config key/value (or --developer) - global Global config (use --show|--default|--min|--max|--custom) - repo Repo config (use --show|--default|--min|--max|--custom) - -Options: - --key Target a specific config key - --value Set a config value when using --key - --scope Target scope: global (default) or repo - --force Skip confirmation prompts - --developer Enable developer mode (sets exec.prompt + developer.enabled) -CONFIG_USAGE - ;; - *) - echo "Unknown config command: $sub" >&2 - exit 1 - ;; - esac -} diff --git a/lib/commands/doctor.sh b/lib/commands/doctor.sh deleted file mode 100644 index d1c70bf..0000000 --- a/lib/commands/doctor.sh +++ /dev/null @@ -1,284 +0,0 @@ -#!/bin/bash - -# @description: Deep system analysis and environment hydration advice. -# @intent: doctor, check, health, environment, diagnosis -# @objective: Audit the engineering environment for healthy integrations, secure configurations, and required software, providing proactive advice for empowerment. -# @usage: dev.kit doctor -# @usage: dev.kit doctor --shell-integrate -# @workflow: 1. Core Health -> 2. Software Prerequisites -> 3. External Engineering Context (Mesh) -> 4. AI Skills Health -> 5. Security & Secrets Advisory - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_doctor() { - local json_output="false" - if [ "${1:-}" = "--json" ]; then - json_output="true" - shift - fi - - ensure_dev_kit_home - - if [ "$json_output" = "false" ]; then - print_section "dev.kit | doctor" - fi - - local status_orchestrator="missing" - if [ -f "${ENVIRONMENT_YAML:-}" ]; then - status_orchestrator="ok" - fi - - local env_line="source \"$HOME/.udx/dev.kit/source/env.sh\"" - local profile="" - case "${SHELL:-}" in - */zsh) profile="$HOME/.zshrc" ;; - */bash) profile="$HOME/.bash_profile" ;; - *) profile="$HOME/.bash_profile" ;; - esac - - local status_shell="missing" - if [ -f "$profile" ] && grep -Fqx "$env_line" "$profile"; then - status_shell="ok" - fi - - local status_path="missing" - local path_bin="" - if command -v dev.kit >/dev/null 2>&1; then - status_path="ok" - path_bin="$(command -v dev.kit)" - fi - - local ai_enabled - ai_enabled="$(config_value_scoped ai.enabled "false")" - local operating_mode="Personal Helper" - if [ "$ai_enabled" = "true" ]; then - operating_mode="AI-Powered" - fi - - check_sw() { - if command -v "$1" >/dev/null 2>&1; then echo "ok"; else echo "missing"; fi - } - - local sw_git; sw_git=$(check_sw "git") - local sw_docker; sw_docker=$(check_sw "docker") - local sw_npm; sw_npm=$(check_sw "npm") - local sw_gh; sw_gh=$(check_sw "gh") - local sw_gemini; sw_gemini=$(check_sw "gemini") - local sw_mmdc; sw_mmdc=$(check_sw "mmdc") - - if [ "$json_output" = "true" ]; then - local repo_root; repo_root="$(get_repo_root || true)" - - # Calculate Mesh Health - local gh_health="missing" - if command -v dev_kit_github_health >/dev/null 2>&1; then - case $(dev_kit_github_health; echo $?) in - 0) gh_health="ok" ;; - 2) gh_health="warn" ;; - esac - fi - - local c7_health="missing" - if command -v dev_kit_context7_health >/dev/null 2>&1; then - case $(dev_kit_context7_health; echo $?) in - 0) c7_health="ok" ;; - 2) c7_health="warn" ;; - esac - fi - - # Calculate Skill Count - local skill_count=0 - if [ -d "$REPO_DIR/docs/workflows" ]; then - skill_count=$(find "$REPO_DIR/docs/workflows" -maxdepth 1 -name "*.md" ! -name "README.md" ! -name "normalization.md" ! -name "loops.md" ! -name "mermaid-patterns.md" | wc -l | tr -d ' ') - fi - - cat </dev/null 2>&1; then - dev_kit_github_health - local gh_status=$? - case $gh_status in - 0) print_check "GitHub Resolution" "[ok]" "authenticated (gh)" ;; - 1) print_check "GitHub Resolution" "[missing]" "CLI missing (gh)" ;; - 2) print_check "GitHub Resolution" "[warn]" "not authenticated" ;; - esac - fi - - # Context7 Resolution - if command -v dev_kit_context7_health >/dev/null 2>&1; then - dev_kit_context7_health - local c7_status=$? - case $c7_status in - 0) print_check "Context7 Resolution" "[ok]" "ready (API/CLI)" ;; - 1) print_check "Context7 Resolution" "[missing]" "API key or CLI missing" ;; - 2) print_check "Context7 Resolution" "[warn]" "CLI available via npm" ;; - esac - fi - - # @udx NPM Packages - if command -v npm >/dev/null 2>&1; then - local missing_pkgs=() - for pkg in "@udx/mcurl" "@udx/mysec"; do - if ! dev_kit_npm_health "$pkg" >/dev/null 2>&1; then - missing_pkgs+=("$(echo "$pkg" | sed 's/.*[\/]//')") - fi - done - if [ ${#missing_pkgs[@]} -eq 0 ]; then - print_check "@udx Tools" "[ok]" "all core tools installed" - else - print_check "@udx Tools" "[warn]" "missing: ${missing_pkgs[*]}" - echo " - [advice] Install for more power: npm install -g @udx/mcurl @udx/mysec" - fi - else - print_check "@udx Tools" "[missing]" "npm runtime required" - fi - - echo "" - echo "Managed AI Skills Health (Repository):" - local local_skills="$REPO_DIR/docs/workflows" - if [ -d "$local_skills" ]; then - local count=0 - # Scan for .md files that define skills (excluding README.md) - while IFS= read -r skill_file; do - [ -z "$skill_file" ] && continue - local filename; filename="$(basename "$skill_file")" - [ "$filename" = "README.md" ] && continue - [ "$filename" = "normalization.md" ] && continue - [ "$filename" = "loops.md" ] && continue - [ "$filename" = "mermaid-patterns.md" ] && continue - - ((count++)) - local name="${filename%.md}" - local status="[ok]" - local detail="documented" - - print_check "$name" "$status" "$detail" - done < <(find "$local_skills" -maxdepth 1 -name "*.md") - - if [ $count -eq 0 ]; then - print_check "skills" "[info]" "No specialized workflows defined in docs/workflows/." - fi - else - print_check "skills" "[info]" "No workflows directory found at $local_skills" - fi - - echo "" - echo "Advisory (Security & Secrets):" - local repo_root - repo_root="$(get_repo_root || true)" - - if command -v mysec >/dev/null 2>&1; then - print_check "mysec" "[ok]" "Active (Secret Scanning)" - else - print_check "mysec" "[info]" "Missing (npm install -g @udx/mysec)" - fi - - if [ -n "$repo_root" ]; then - if [ -f "$repo_root/.env" ]; then - if git check-ignore "$repo_root/.env" >/dev/null 2>&1; then - print_check ".env" "[ok]" "Gitignored (Safe)" - else - print_check ".env" "[alert]" "Not Gitignored! (Risk)" - fi - fi - fi - echo "- [info] Use environment.yaml for non-sensitive orchestration." - - # 6. Repository Audit (Compliance) - echo "" - print_section "Repository Compliance (Repo-as-a-Skill)" - - if [ -n "$repo_root" ]; then - # TDD Check - if [ -d "$repo_root/tests" ] || [ -d "$repo_root/test" ] || [ -d "$repo_root/spec" ]; then - print_check "TDD" "[ok]" "Test suite detected" - else - print_check "TDD" "[warn]" "Missing tests/ or spec/ directory" - fi - - # Config-as-Code Check - if [ -f "$repo_root/environment.yaml" ]; then - print_check "CaC" "[ok]" "environment.yaml active" - else - print_check "CaC" "[warn]" "Missing environment.yaml" - fi - - # Documentation Check - if [ -d "$repo_root/docs" ]; then - print_check "Docs" "[ok]" "Knowledge base active" - else - print_check "Docs" "[warn]" "Missing docs/ directory" - fi - - # AI Readiness - if [ -d "$repo_root/src/ai" ]; then - print_check "AI Skills" "[ok]" "Repo skills defined" - else - print_check "AI Skills" "[warn]" "Missing src/ai/ directory" - fi - else - echo " - [info] Run inside a git repository for full compliance audit." - fi - echo "" -} diff --git a/lib/commands/github.sh b/lib/commands/github.sh deleted file mode 100644 index 7efda84..0000000 --- a/lib/commands/github.sh +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# @description: Manage GitHub operations (PRs, issues, actions) if gh CLI is available. -# @intent: github, pr, issue, repo, remote - -# github.sh -# -# GitHub triage helper (GH CLI only): assigned issues, my PRs, PRs to review. -# Ensures authentication before each command execution. -# -# Requirements: -# - gh (GitHub CLI) -# -# Auth: -# - Preferred: GH_TOKEN or GITHUB_TOKEN (non-interactive) -# - Otherwise: interactive "gh auth login" when needed -# -# Usage: -# dev.kit github assigned-issues [--repo OWNER/REPO] [--state open|closed|all] [--limit N] [--json] -# dev.kit github my-prs [--repo OWNER/REPO] [--state open|closed|merged|all] [--limit N] [--json] -# dev.kit github review-prs [--repo OWNER/REPO] [--state open|closed|merged|all] [--limit N] [--json] [--include-drafts] -# dev.kit github pr-create --title "Title" --body "Body" [--base branch] [--head branch] [--draft] - -dev_kit_cmd_github() { - - - LIMIT=30 - STATE="open" # issues: open|closed|all ; prs: open|closed|merged|all - REPO="" - JSON=0 - INCLUDE_DRAFTS=0 - COMMAND="" - - # pr-create options - PR_TITLE="" - PR_BODY="" - PR_BASE="main" - PR_HEAD="" - PR_DRAFT="false" - - die() { echo "ERROR: $*" >&2; exit 1; } - - usage() { - cat < [options] - -Commands: - assigned-issues List issues assigned to you - my-prs List PRs authored by you - review-prs List PRs requesting your review - pr-create Create a new Pull Request - -Options: - --repo OWNER/REPO Restrict to one repository - --state STATE open|closed|merged|all (default: open) - --limit N Max results (default: 30) - --json JSON output (adds useful default fields) - --include-drafts (review-prs only) include draft PRs - -Options (pr-create): - --title TITLE PR title - --body BODY PR body - --base BRANCH Base branch (default: main) - --head BRANCH Head branch (default: current branch) - --draft Create as draft PR - - -h, --help Show this help - -Auth: - - Preferred: export GH_TOKEN=... (or GITHUB_TOKEN=...) - - Otherwise: gh auth login (interactive) - -EOF - } - - need_gh() { - command -v gh >/dev/null 2>&1 || die "gh not found. Install GitHub CLI: https://cli.github.com/" - } - - ensure_auth() { - if [[ -n "${GH_TOKEN:-}" || -n "${GITHUB_TOKEN:-}" ]]; then - return 0 - fi - - if gh auth status >/dev/null 2>&1; then - return 0 - fi - - echo "No GH_TOKEN/GITHUB_TOKEN and gh not authenticated. Running: gh auth login" >&2 - gh auth login 1>&2 - gh auth status >/dev/null 2>&1 || die "gh authentication failed" - } - - run_gh() { - ensure_auth - gh "$@" - } - - assigned_issues() { - case "$STATE" in open|closed|all) ;; *) die "assigned-issues: --state must be open|closed|all" ;; esac - - local args=(issue list --assignee @me --limit "$LIMIT" --state "$STATE") - [[ -n "$REPO" ]] && args+=(--repo "$REPO") - if [[ "$JSON" -eq 1 ]]; then - args+=(--json number,title,url,updatedAt,createdAt,state) - fi - run_gh "${args[@]}" - } - - my_prs() { - case "$STATE" in open|closed|merged|all) ;; *) die "my-prs: --state must be open|closed|merged|all" ;; esac - - local args=(pr list --author @me --limit "$LIMIT" --state "$STATE") - [[ -n "$REPO" ]] && args+=(--repo "$REPO") - if [[ "$JSON" -eq 1 ]]; then - args+=(--json number,title,url,updatedAt,createdAt,state,isDraft) - fi - run_gh "${args[@]}" - } - - review_prs() { - case "$STATE" in open|closed|merged|all) ;; *) die "review-prs: --state must be open|closed|merged|all" ;; esac - - local args=(pr list --search "review-requested:@me" --limit "$LIMIT" --state "$STATE") - [[ -n "$REPO" ]] && args+=(--repo "$REPO") - if [[ "$INCLUDE_DRAFTS" -eq 0 ]]; then - args+=(--draft=false) - fi - if [[ "$JSON" -eq 1 ]]; then - args+=(--json number,title,url,updatedAt,createdAt,state,isDraft) - fi - run_gh "${args[@]}" - } - - pr_create() { - [[ -n "$PR_TITLE" ]] || die "pr-create: --title is required" - [[ -n "$PR_BODY" ]] || die "pr-create: --body is required" - - if command -v dev_kit_github_pr_create >/dev/null 2>&1; then - ensure_auth - dev_kit_github_pr_create "$PR_TITLE" "$PR_BODY" "$PR_BASE" "$PR_HEAD" "$PR_DRAFT" - else - die "GitHub module logic not loaded." - fi - } - - while [[ $# -gt 0 ]]; do - case "$1" in - assigned-issues|my-prs|review-prs|pr-create) - COMMAND="$1"; shift;; - --repo) - REPO="${2:-}"; shift 2;; - --state) - STATE="${2:-}"; shift 2;; - --limit) - LIMIT="${2:-}"; shift 2;; - --json) - JSON=1; shift;; - --include-drafts) - INCLUDE_DRAFTS=1; shift;; - --title) - PR_TITLE="${2:-}"; shift 2;; - --body) - PR_BODY="${2:-}"; shift 2;; - --base) - PR_BASE="${2:-}"; shift 2;; - --head) - PR_HEAD="${2:-}"; shift 2;; - --draft) - PR_DRAFT="true"; shift;; - -h|--help) - usage; exit 0;; - *) - die "Unknown argument: $1 (use --help)";; - esac - done - - [[ -n "$COMMAND" ]] || { usage; exit 1; } - - need_gh - - case "$COMMAND" in - assigned-issues) assigned_issues ;; - my-prs) my_prs ;; - review-prs) review_prs ;; - pr-create) pr_create ;; - *) die "Unknown command: $COMMAND" ;; - esac -} diff --git a/lib/commands/skills.sh b/lib/commands/skills.sh deleted file mode 100644 index 8564a4f..0000000 --- a/lib/commands/skills.sh +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env bash - -# @description: Discover and execute repository-bound skills. -# @intent: skills, list, run, discover, execute -# @objective: Provide a unified interface for discovering and executing both deterministic CLI commands and managed AI skills grounded in the repository. -# @usage: dev.kit skills list -# @usage: dev.kit skills run [intent] -# @workflow: 1. Discover capabilities -> 2. Resolve intent -> 3. Normalize to deterministic command -> 4. Execute and report - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -dev_kit_cmd_skills() { - local sub="${1:-list}" - - # Resolve skills directory based on active provider - local provider - provider="$(config_value_scoped ai.provider "gemini")" - local skills_dir="$HOME/.$provider/skills" - - case "$sub" in - list) - print_section "dev.kit | Dynamic Capability Mesh" - - # 1. Deterministic Commands (Internal Logic) - echo "Deterministic Commands (Internal Logic):" - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -f "$file" ] || continue - local name; name="$(basename "${file%.sh}")" - # Hide internal/utility commands from the main logic list - case "$name" in agent|github|skills) continue ;; esac - - local desc; desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" - echo "- [command] $name: $desc" - done - - # List from lib/modules/ - for file in "$REPO_DIR"/lib/modules/*.sh; do - [ -f "$file" ] || continue - local name; name="$(basename "${file%.sh}")" - local desc; desc="$(grep "^# @description:" "$file" | cut -d: -f2- | sed 's/^ //' || echo "no description")" - echo "- [module] $name: $desc" - done - echo "" - - # 2. AI Skills (Dynamic Reasoning) - echo "AI Skills (Dynamic Reasoning):" - # List from provider-specific managed path - if [ -d "$skills_dir" ]; then - find "$skills_dir" -mindepth 1 -maxdepth 1 -name "dev-kit-*" -type d | while read -r skill; do - local name; name="$(basename "$skill")" - local desc="(no description)" - if [ -f "$skill/SKILL.md" ]; then - desc="$(grep -i "^description:" "$skill/SKILL.md" | head -n 1 | sed 's/^description: //I')" - fi - echo "- [skill] $name: $desc" - done - fi - - # List from local repo workflows - local local_workflows="$REPO_DIR/docs/workflows" - if [ -d "$local_workflows" ]; then - find "$local_workflows" -maxdepth 1 -name "*.md" | while read -r skill_file; do - local filename; filename="$(basename "$skill_file")" - [ "$filename" = "README.md" ] && continue - [ "$filename" = "normalization.md" ] && continue - [ "$filename" = "loops.md" ] && continue - [ "$filename" = "mermaid-patterns.md" ] && continue - - local name="${filename%.md}" - # Skip showing if already listed in managed - [ -d "$skills_dir/dev-kit-$name" ] && continue - - local desc; desc="$(grep -i "^description:" "$skill_file" | head -n 1 | sed 's/^description: //I' || echo "Grounded workflow reasoning.")" - echo "- [skill] $name: $desc" - done - fi - echo "" - - # 3. Virtual Capabilities - echo "Virtual Capabilities (Environment):" - if command -v gh >/dev/null 2>&1; then echo "- [virtual] github (via gh CLI)"; fi - if command -v npm >/dev/null 2>&1; then echo "- [virtual] npm (via node runtime)"; fi - if command -v docker >/dev/null 2>&1; then echo "- [virtual] docker (via docker CLI)"; fi - if command -v gcloud >/dev/null 2>&1; then echo "- [virtual] google (via gcloud CLI)"; fi - echo "" - - return 0 - ;; - run|execute) - local skill_name="${2:-}" - local intent="${3:-}" - - if [ -z "$skill_name" ]; then - echo "Error: Skill name required. Usage: dev.kit skills run [intent]" >&2 - exit 1 - fi - - # Determine skill path or file - local skill_path="" - local skill_file="" - if [ -d "$skills_dir/$skill_name" ]; then - skill_path="$skills_dir/$skill_name" - elif [ -d "$skills_dir/dev-kit-$skill_name" ]; then - skill_path="$skills_dir/dev-kit-$skill_name" - elif [ -f "$REPO_DIR/docs/workflows/$skill_name.md" ]; then - skill_file="$REPO_DIR/docs/workflows/$skill_name.md" - elif [ -f "$REPO_DIR/docs/workflows/dev-kit-$skill_name.md" ]; then - skill_file="$REPO_DIR/docs/workflows/dev-kit-$skill_name.md" - fi - - # If skill path found, execute legacy script logic - if [ -n "$skill_path" ]; then - local script_exec="" - if [ -d "$skill_path/scripts" ]; then - if [ -f "$skill_path/scripts/$intent" ]; then - script_exec="$skill_path/scripts/$intent" - fi - fi - - if [ -n "$script_exec" ]; then - [ ! -x "$script_exec" ] && chmod +x "$script_exec" - export SKILL_PATH="$skill_path" - export SKILL_NAME="$skill_name" - shift 3 || true - "$script_exec" "$@" - exit $? - fi - fi - - # Dynamic Intent Normalization (The Modern Path) - if command -v dev_kit_context_normalize >/dev/null 2>&1; then - echo "--- dev.kit Intent Normalization ---" - echo "Input: $skill_name $intent" - - # Resolve intent to a structured manifest - local manifest - manifest="$(dev_kit_context_normalize "$skill_name $intent")" - - # Display the resolution for transparency - if command -v jq >/dev/null 2>&1; then - echo "Resolution:" - - # 1. Standard Commands/Workflows - echo "$manifest" | jq -r '.mappings.discovery[]? | " - [Detected] \(.name) (\(.type))"' - echo "$manifest" | jq -r '.mappings.internal_workflows[]? | " - [Workflow] \(.name) (\(.path))"' - fi - echo "------------------------------------" - echo "Status: Intent Resolved (Dynamic Discovery)" - exit 0 - else - echo "Error: Normalization mechanism not loaded." >&2 - exit 1 - fi - ;; - info) - local skill_name="${2:-}" - [ -z "$skill_name" ] && { echo "Error: Skill name required."; exit 1; } - - local skill_path="" - local skill_file="" - if [ -d "$skills_dir/$skill_name" ]; then - skill_path="$skills_dir/$skill_name" - elif [ -d "$skills_dir/dev-kit-$skill_name" ]; then - skill_path="$skills_dir/dev-kit-$skill_name" - elif [ -f "$REPO_DIR/docs/workflows/$skill_name.md" ]; then - skill_file="$REPO_DIR/docs/workflows/$skill_name.md" - elif [ -f "$REPO_DIR/docs/workflows/dev-kit-$skill_name.md" ]; then - skill_file="$REPO_DIR/docs/workflows/dev-kit-$skill_name.md" - fi - - if [ -n "$skill_path" ] && [ -f "$skill_path/SKILL.md" ]; then - cat "$skill_path/SKILL.md" - elif [ -n "$skill_file" ]; then - cat "$skill_file" - else - echo "Error: Skill info for '$skill_name' not found." >&2 - exit 1 - fi - ;; - help|-h|--help) - cat <<'SKILLS_HELP' -Usage: dev.kit skills - -Commands: - list List all available skills and their scripts - run [script] Execute a script from a skill - info Display skill documentation (SKILL.md) - -Examples: - dev.kit skills run diagram-generator new_diagram.sh "A -> B" - dev.kit skills execute git-sync -SKILLS_HELP - ;; - *) - echo "Unknown skills command: $sub" >&2 - exit 1 - ;; - esac -} diff --git a/lib/commands/status.sh b/lib/commands/status.sh index 16cb7eb..81e1133 100644 --- a/lib/commands/status.sh +++ b/lib/commands/status.sh @@ -1,110 +1,21 @@ -#!/bin/bash +#!/usr/bin/env bash -# @description: Engineering brief and system diagnostic. -# @intent: status, check, health, info, diagnostic -# @objective: Provide a compact, high-signal overview of the current engineering environment, active tasks, and empowerment mesh. -# @usage: dev.kit status -# @usage: dev.kit status --json -# @workflow: 1. Identity & Operating Mode -> 2. Environment Health -> 3. Active Context -> 4. Empowerment Mesh -> 5. Actionable Advice - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi +# @description: Show basic installation status dev_kit_cmd_status() { - local json_output="false" - if [ "${1:-}" = "--json" ]; then - json_output="true" - shift - fi - - if [ "$json_output" = "true" ]; then - dev_kit_cmd_doctor --json - return - fi - - ui_header "Engineering Brief" - - # 1. Identity - local version="0.1.0" - [ -f "$REPO_DIR/VERSION" ] && version="$(cat "$REPO_DIR/VERSION")" - - # 2. Operating Mode & Environment - local ai_enabled; ai_enabled="$(config_value_scoped ai.enabled "false")" - local provider; provider="$(config_value_scoped ai.provider "codex")" - - if [ "$ai_enabled" = "true" ]; then - ui_ok "Mode" "AI-Powered ($provider)" - else - ui_info "Mode" "Personal Helper (Local)" - fi - - local env_line="source \"$HOME/.udx/dev.kit/source/env.sh\"" - local profile=""; case "${SHELL:-}" in */zsh) profile="$HOME/.zshrc" ;; *) profile="$HOME/.bash_profile" ;; esac - if [ -f "$profile" ] && grep -Fqx "$env_line" "$profile"; then - ui_ok "Shell" "Integrated ($profile)" - else - ui_warn "Shell" "Missing integration" - fi + local format="${1:-text}" + local state="not installed" - # 3. Workspace & Context - local repo_root; repo_root="$(get_repo_root || true)" - if [ -n "$repo_root" ]; then - ui_ok "Workspace" "$(basename "$repo_root")" - - # Active Task - local active_workflow="" - if [ -d "$repo_root/tasks" ]; then - active_workflow="$(find "$repo_root/tasks" -name "workflow.md" -exec grep -l "status: planned\|status: active" {} + | head -n 1 || true)" - if [ -n "$active_workflow" ]; then - local task_id; task_id="$(basename "$(dirname "$active_workflow")")" - echo "" - printf "%sWaterfall Progression: [%s]%s\n" "$(ui_cyan)" "$task_id" "$(ui_reset)" - grep -A 2 "^### Step" "$active_workflow" | awk ' - /^### Step/ { step = $0; sub(/^### /, "", step); printf " %-20s", step; } - /^status:/ { - status = $2; - if (status == "completed" || status == "done") printf " \033[32māœ”\033[0m\n"; - else if (status == "active" || status == "running") printf " \033[36m›\033[0m\n"; - else printf " \033[2m…\033[0m\n"; - } - ' - fi - fi - else - ui_warn "Workspace" "Not in a repository" + if [ -d "$DEV_KIT_HOME" ]; then + state="installed" fi - # 4. Virtual Skills (Discovery) - echo "" - printf "%sVirtual Skills (Environment Discovery):%s\n" "$(ui_cyan)" "$(ui_reset)" - if command -v gh >/dev/null 2>&1; then ui_ok "GitHub" "CLI (Discovery Active)"; else ui_info "GitHub" "Missing"; fi - if command -v npm >/dev/null 2>&1; then ui_ok "NPM" "Node Runtime"; else ui_info "NPM" "Missing"; fi - if command -v docker >/dev/null 2>&1; then ui_ok "Docker" "Engine Detected"; else ui_info "Docker" "Missing"; fi - if command -v gcloud >/dev/null 2>&1; then ui_ok "Google" "Cloud CLI"; fi - - # 5. Empowerment Mesh - echo "" - printf "%sEmpowerment Mesh (Capability Discovery):%s\n" "$(ui_cyan)" "$(ui_reset)" - - local cmd_count; cmd_count=$(ls "$REPO_DIR"/lib/commands/*.sh 2>/dev/null | wc -l) - local mod_count; mod_count=$(ls "$REPO_DIR"/lib/modules/*.sh 2>/dev/null | wc -l) - local skill_count; skill_count=$(ls "$REPO_DIR"/docs/workflows/*.md 2>/dev/null | grep -v "README.md\|normalization.md\|loops.md\|mermaid-patterns.md" | wc -l) - - ui_ok "Capabilities" "$cmd_count Commands | $mod_count Modules | $skill_count AI Skills" - - if command -v dev_kit_github_health >/dev/null 2>&1 && dev_kit_github_health >/dev/null 2>&1; then - ui_ok "Remote" "GitHub Authorized" - fi - - if command -v dev_kit_context7_health >/dev/null 2>&1 && dev_kit_context7_health >/dev/null 2>&1; then - ui_ok "Knowledge" "Context7 API (v2)" + if [ "$format" = "json" ]; then + printf '{\n "name": "dev.kit",\n "home": "%s",\n "state": "%s"\n}\n' "$DEV_KIT_HOME" "$state" + return 0 fi - # 6. Actionable Tips - echo "" - ui_tip "Run 'dev.kit skills run \"\"' to resolve drift." - ui_tip "Run 'dev.kit sync' to atomically commit changes." - echo "" + echo "dev.kit" + echo "home: $DEV_KIT_HOME" + echo "state: $state" } diff --git a/lib/commands/sync.sh b/lib/commands/sync.sh deleted file mode 100644 index 26dc980..0000000 --- a/lib/commands/sync.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bash - -# @description: Resolve repository drift or prepare for new work. -# @intent: sync, commit, drift, atomic, push, resolve, prepare, branch -# @objective: Maintain high-fidelity repository state by either preparing the environment for work (branch management, origin sync) or resolving drift via logical, domain-specific commits. -# @usage: dev.kit sync prepare [main_branch] -# @usage: dev.kit sync run --task-id "TASK-123" --message "feat: implementation" -# @usage: dev.kit sync --dry-run -# @workflow: 1. (Prepare) Detect Branch -> 2. (Prepare) Sync Origin -> 3. (Run) Group Changes -> 4. (Run) Atomic Commits - -dev_kit_cmd_sync() { - local sub="${1:-run}" - - case "$sub" in - prepare) - local target_main="${2:-main}" - if command -v dev_kit_git_sync_prepare >/dev/null 2>&1; then - dev_kit_git_sync_prepare "$target_main" - else - echo "Error: Git sync module not loaded." >&2 - return 1 - fi - ;; - reminder) - if command -v ui_sync_reminder >/dev/null 2>&1; then - ui_sync_reminder - else - echo "Error: UI module not loaded." >&2 - return 1 - fi - ;; - run|execute|*) - # If first arg isn't a known subcommand, treat as 'run' and don't shift if it looks like an option - if [[ "$sub" == --* ]]; then - # It's an option, so we are in 'run' mode by default - sub="run" - else - shift 1 - fi - - local dry_run="false" - local task_id="unknown" - local message="" - - while [[ $# -gt 0 ]]; do - case "$1" in - --dry-run) dry_run="true"; shift ;; - --task-id) task_id="$2"; shift 2 ;; - --message) message="$2"; shift 2 ;; - -h|--help) - cat <<'SYNC_HELP' -Usage: dev.kit sync [options] - -Commands: - prepare [main] Prepare repository (fetch origin, merge, optional branch) - run Resolve drift via atomic commits (Default) - -Options (run): - --dry-run Show what commits would be made without executing them - --task-id The current task ID to associate with commits - --message Optional base message prefix - -h, --help Show this help message - -Example: - dev.kit sync prepare main - dev.kit sync run --task-id "TASK-123" -SYNC_HELP - return 0 - ;; - *) echo "Unknown option: $1"; return 1 ;; - esac - done - - if command -v dev_kit_git_sync_run >/dev/null 2>&1; then - dev_kit_git_sync_run "$dry_run" "$task_id" "$message" - else - echo "Error: Git sync module not loaded." >&2 - return 1 - fi - ;; - esac -} diff --git a/lib/commands/task.sh b/lib/commands/task.sh deleted file mode 100644 index f162ff4..0000000 --- a/lib/commands/task.sh +++ /dev/null @@ -1,254 +0,0 @@ -#!/bin/bash - -# @description: Manage the lifecycle of active workflows and sessions. -# @intent: task, session, workflow, start, reset -# @objective: Orchestrate the engineering lifecycle by initializing tasks, tracking context, and managing the 'tasks/' directory through discovery and cleanup. -# @usage: dev.kit task start "Implement new feature" -# @usage: dev.kit task list -# @usage: dev.kit task cleanup -# @workflow: 1. Start Task -> 2. Normalize Intent -> 3. Iterate (Implementation/Verification) -> 4. Finalize Sync -> 5. Cleanup Completed Tasks - -if [ -n "${REPO_DIR:-}" ] && [ -f "$REPO_DIR/lib/utils.sh" ]; then - # shellcheck source=/dev/null - . "$REPO_DIR/lib/utils.sh" -fi - -print_task_usage() { - cat <<'TASK_USAGE' -Usage: dev.kit task [TASK_ID] - -Commands: - start "" Initialize a new task with a generated ID and request - list Briefly list all tasks and their status - cleanup Remove completed or stale tasks from the workspace - reset Clear repository-scoped session context - new Initialize a new task directory with templates - apply Apply task feedback to create/update a workflow -TASK_USAGE -} - -# Helper to check if a task is stale (not modified in 2 days) -_is_task_stale() { - local task_dir="$1" - # -mtime +1 matches anything not modified in >48 hours - [ -n "$(find "$task_dir" -mtime +1 -print -quit 2>/dev/null)" ] -} - -dev_kit_cmd_task() { - local sub="${1:-}" - - if [ -z "$sub" ] || [ "$sub" = "help" ] || [ "$sub" = "-h" ]; then - print_task_usage - exit 0 - fi - - local tasks_dir - tasks_dir="$(get_tasks_dir)" - - case "$sub" in - list) - [ ! -d "$tasks_dir" ] && { echo "No tasks found."; return 0; } - printf "\n%sActive & Recent Tasks:%s\n" "$(ui_cyan)" "$(ui_reset)" - find "$tasks_dir" -mindepth 1 -maxdepth 1 -type d | sort -r | while read -r task_dir; do - local task_id; task_id="$(basename "$task_dir")" - local status="initialized" - if [ -f "$task_dir/workflow.md" ]; then - status="$(grep "^status:" "$task_dir/workflow.md" | head -n 1 | awk '{print $2}')" - elif [ -f "$task_dir/plan.md" ]; then - status="$(grep "^status:" "$task_dir/plan.md" | head -n 1 | awk '{print $2}')" - fi - - local stale_marker="" - if _is_task_stale "$task_dir" && [[ "$status" != "done" && "$status" != "completed" ]]; then - stale_marker=" $(ui_orange)āš ļø stale$(ui_reset)" - fi - - printf " %-20s %s%s\n" "$task_id" "$status" "$stale_marker" - done - echo "" - ;; - active) - [ ! -d "$tasks_dir" ] && { echo "No active tasks."; return 0; } - local count=0 - while read -r task_dir; do - local task_id; task_id="$(basename "$task_dir")" - local status="initialized" - if [ -f "$task_dir/workflow.md" ]; then - status="$(grep "^status:" "$task_dir/workflow.md" | head -n 1 | awk '{print $2}')" - elif [ -f "$task_dir/plan.md" ]; then - status="$(grep "^status:" "$task_dir/plan.md" | head -n 1 | awk '{print $2}')" - fi - - if [[ "$status" != "done" && "$status" != "completed" ]]; then - ((count++)) - local objective; objective="$(grep -A 2 "## Objective" "$task_dir/plan.md" 2>/dev/null | tail -n 1 | sed 's/^[[:space:]]*//' || echo "No objective defined.")" - local stale_marker="" - if _is_task_stale "$task_dir"; then stale_marker=" [STALE]"; fi - echo "- [$task_id] status: $status$stale_marker" - echo " objective: $objective" - fi - done < <(find "$tasks_dir" -mindepth 1 -maxdepth 1 -type d | sort -r) - - if [ "$count" -eq 0 ]; then - echo "No active tasks." - fi - ;; - reminder) - [ ! -d "$tasks_dir" ] && return 0 - local stale_count=0 - while read -r task_dir; do - local status="" - if [ -f "$task_dir/workflow.md" ]; then - status="$(grep "^status:" "$task_dir/workflow.md" | head -n 1 | awk '{print $2}')" - elif [ -f "$task_dir/plan.md" ]; then - status="$(grep "^status:" "$task_dir/plan.md" | head -n 1 | awk '{print $2}')" - fi - - if [[ "$status" != "done" && "$status" != "completed" ]] && _is_task_stale "$task_dir"; then - ((stale_count++)) - fi - done < <(find "$tasks_dir" -mindepth 1 -maxdepth 1 -type d) - - if [ "$stale_count" -gt 0 ]; then - ui_tip "You have $stale_count stale tasks (older than 2 days). Run 'dev.kit task cleanup' to clear them." - fi - ;; - cleanup) - [ ! -d "$tasks_dir" ] && return 0 - echo "Scanning for tasks to cleanup..." - local to_remove=() - while read -r task_dir; do - local task_id; task_id="$(basename "$task_dir")" - local status="" - if [ -f "$task_dir/workflow.md" ]; then - status="$(grep "^status:" "$task_dir/workflow.md" | head -n 1 | awk '{print $2}')" - elif [ -f "$task_dir/plan.md" ]; then - status="$(grep "^status:" "$task_dir/plan.md" | head -n 1 | awk '{print $2}')" - fi - - if [[ "$status" == "done" || "$status" == "completed" ]] || _is_task_stale "$task_dir"; then - to_remove+=("$task_dir") - fi - done < <(find "$tasks_dir" -mindepth 1 -maxdepth 1 -type d) - - if [ ${#to_remove[@]} -eq 0 ]; then - echo "Nothing to cleanup." - return 0 - fi - - echo "Tasks identified for removal (completed or stale):" - for tr in "${to_remove[@]}"; do - echo " - $(basename "$tr")" - done - - printf "Remove these %d tasks? (y/N): " "${#to_remove[@]}" - read -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - for tr in "${to_remove[@]}"; do - rm -rf "$tr" - done - echo "Cleanup complete." - else - echo "Cleanup aborted." - fi - ;; - reset) - if context_enabled; then - local ctx - ctx="$(context_file || true)" - if [ -f "$ctx" ]; then - : > "$ctx" - echo "Session context cleared." - fi - fi - ;; - start) - local request="${2:-}" - if [ -z "$request" ] && [ ! -t 0 ]; then - request="$(cat)" - fi - if [ -z "$request" ]; then - echo "Error: Request is required for 'task start'" >&2 - exit 1 - fi - - local task_id - task_id="TASK-$(date +%Y%m%d-%H%M)" - local task_dir="$tasks_dir/$task_id" - - mkdir -p "$task_dir" - cat > "$task_dir/plan.md" < "$task_dir/feedback.md" < "$task_dir/prompt.md" < - -## Request - -EOF - cat > "$task_dir/feedback.md" < "$workflow_file" <> "$workflow_file" - fi - echo "Workflow ready: $workflow_file" - ;; - *) - echo "Unknown task command: $sub" >&2 - exit 1 - ;; - esac -} diff --git a/lib/commands/visualizer.sh b/lib/commands/visualizer.sh deleted file mode 100644 index 2040d89..0000000 --- a/lib/commands/visualizer.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# @description: Generate and export high-fidelity Mermaid diagrams (SVG). -# @intent: diagram, mermaid, svg, export, flowchart, sequence -# @objective: Enable seamless transition from "Intent" to "Visual Asset" by automating both the creation of Mermaid (.mmd) diagrams and their rendering into SVG documentation. -# @usage: dev.kit visualizer create flowchart "assets/arch.mmd" -# @usage: dev.kit visualizer export "assets/arch.mmd" -# @workflow: 1. Request diagram type -> 2. Generate .mmd -> 3. Refine logic -> 4. Export .svg - -dev_kit_cmd_visualizer() { - local sub="${1:-help}" - - case "$sub" in - create|new) - local type="${2:-flowchart}" - local output="${3:-assets/diagrams/new-diagram.mmd}" - if command -v dev_kit_visualizer_create >/dev/null 2>&1; then - dev_kit_visualizer_create "$type" "$output" - else - echo "Error: Visualizer module not loaded." >&2 - exit 1 - fi - ;; - export|render) - local input="${2:-}" - local output="${3:-}" - if [ -z "$input" ]; then - echo "Error: Input file required. Usage: dev.kit visualizer export [output.svg]" >&2 - exit 64 - fi - if command -v dev_kit_visualizer_export >/dev/null 2>&1; then - dev_kit_visualizer_export "$input" "$output" - else - echo "Error: Visualizer module not loaded." >&2 - exit 1 - fi - ;; - help|-h|--help) - cat <<'VISUALIZER_HELP' -Usage: dev.kit visualizer - -Commands: - create [output] Create a new Mermaid diagram from template - export [output] Export a Mermaid (.mmd) file to SVG - -Diagram Types: - flowchart, sequence, state, er (auto defaults to flowchart) - -Example: - dev.kit visualizer create flowchart assets/arch.mmd - dev.kit visualizer export assets/arch.mmd assets/arch.svg -VISUALIZER_HELP - ;; - *) - echo "Unknown visualizer command: $sub" >&2 - exit 1 - ;; - esac -} diff --git a/lib/modules/bootstrap.sh b/lib/modules/bootstrap.sh new file mode 100644 index 0000000..1d1ce80 --- /dev/null +++ b/lib/modules/bootstrap.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +dev_kit_bootstrap() { + export DEV_KIT_BIN_DIR="${DEV_KIT_BIN_DIR:-$HOME/.local/bin}" + export DEV_KIT_HOME="${DEV_KIT_HOME:-$HOME/.udx/dev.kit}" +} + +dev_kit_path_contains_bin_dir() { + case ":$PATH:" in + *":${DEV_KIT_BIN_DIR}:"*) return 0 ;; + *) return 1 ;; + esac +} + +dev_kit_copy_file() { + local src="$1" + local dst="$2" + mkdir -p "$(dirname "$dst")" + cp "$src" "$dst" +} + +dev_kit_copy_tree() { + local src="$1" + local dst="$2" + mkdir -p "$dst" + cp -R "$src/." "$dst/" +} + +dev_kit_command_name_from_file() { + local file="$1" + basename "$file" .sh | tr '_' '-' +} + +dev_kit_command_description() { + local file="$1" + awk -F': ' '/^# @description:/ { print $2; exit }' "$file" +} + +dev_kit_list_command_files() { + local root_dir="$1" + find "$root_dir/lib/commands" -maxdepth 1 -type f -name '*.sh' | sort +} diff --git a/lib/modules/context7.sh b/lib/modules/context7.sh deleted file mode 100644 index 7a1e26e..0000000 --- a/lib/modules/context7.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env bash - -# @description: High-fidelity search and resolution for the "Skill Mesh" (Multi-repo context). -# @intent: context7, knowledge, search, resolution, mesh -# @objective: Bridge disparate repository context into a unified engineering mesh via structured API and CLI discovery. - -# Check if Context7 integration is available (API key or CLI) -dev_kit_context7_health() { - # 1. Check for API Key (Priority 1) - local api_key - api_key="$(config_value_scoped context7.api_key "${CONTEXT7_API_KEY:-}")" - if [ -n "$api_key" ]; then - return 0 - fi - - # 2. Check for CLI - if command -v context7 >/dev/null 2>&1; then - return 0 - fi - - # 3. Suggest installation if npm is present - if command -v npm >/dev/null 2>&1; then - # We return 2 to indicate "Available to install" - return 2 - fi - - return 1 # Not available -} - -# Synchronize a repository with the Context7 hub -# Usage: dev_kit_context7_sync [repo_path] -dev_kit_context7_sync() { - local repo_path="${1:-$REPO_DIR}" - - # 1. Check health first - if ! dev_kit_context7_health; then - echo "Error: Context7 not ready (API key or CLI missing)." >&2 - return 1 - fi - - # 2. Prefer CLI for sync if available - if command -v context7 >/dev/null 2>&1; then - echo "Synchronizing $repo_path with Context7 CLI..." >&2 - (cd "$repo_path" && context7 sync) - return $? - fi - - # 3. Fallback to API-based sync notification (if implemented in API) - local api_key; api_key="$(config_value_scoped context7.api_key "${CONTEXT7_API_KEY:-}")" - if [ -n "$api_key" ]; then - echo "Sending sync signal to Context7 API for $repo_path..." >&2 - # Placeholder for API-based sync trigger - return 0 - fi - - return 1 -} - -# Search for libraries and engineering context using Context7 -# Usage: dev_kit_context7_search "react" "how to use hooks" -dev_kit_context7_search() { - local lib_name="$1" - local query="${2:-$1}" - local results=() - - local api_key - api_key="$(config_value_scoped context7.api_key "${CONTEXT7_API_KEY:-}")" - - # Case A: Use API (v2) via curl - if [ -n "$api_key" ]; then - local encoded_lib encoded_query - encoded_lib="$(printf "%s" "$lib_name" | jq -sRr @uri)" - encoded_query="$(printf "%s" "$query" | jq -sRr @uri)" - - local response - response="$(curl -s -X GET "https://context7.com/api/v2/libs/search?libraryName=$encoded_lib&query=$encoded_query" \ - -H "Authorization: Bearer $api_key" \ - -H "Content-Type: application/json")" - - if [ -n "$response" ] && [ "$response" != "null" ]; then - while IFS= read -r match; do - [ -n "$match" ] && results+=("$match") - done < <(echo "$response" | jq -c '.[] | {name: .id, type: "external-library", score: .trustScore, uri: "https://context7.com/libs\(.id)"}') - fi - - # Case B: Use CLI (Fallback) - elif command -v context7 >/dev/null 2>&1; then - # Assuming standard 'context7 search' output format - local cli_out - cli_out="$(context7 search "$query" --json 2>/dev/null || true)" - if [ -n "$cli_out" ]; then - while IFS= read -r match; do - [ -n "$match" ] && results+=("$match") - done < <(echo "$cli_out" | jq -c '.[] | {name: .id, type: "external-library", uri: .url}') - fi - fi - - # Case C: Local Peer Repositories (Heuristic fallback) - local parent_dir - parent_dir="$(dirname "$REPO_DIR")" - if [ -d "$parent_dir" ]; then - for peer in "$parent_dir"/*; do - [ -d "$peer" ] || continue - [ "$peer" == "$REPO_DIR" ] && continue - if [ -f "$peer/context.yaml" ] || [ -f "$peer/README.md" ]; then - if grep -qi "$lib_name" "$peer/README.md" 2>/dev/null; then - results+=("{\"name\": \"$(basename "$peer")\", \"type\": \"peer-repo\", \"path\": \"$peer\"}") - fi - fi - done - fi - - (IFS=,; echo "${results[*]}") -} - -# Prompt user to install Context7 CLI if missing -dev_kit_context7_install_hint() { - dev_kit_context7_health - local status=$? - if [ $status -eq 2 ]; then - echo "Hint: Context7 CLI is available. Install it for better library discovery:" >&2 - echo " npm install -g @upstash/context7" >&2 - fi -} diff --git a/lib/modules/context_manager.sh b/lib/modules/context_manager.sh deleted file mode 100644 index 014ddf8..0000000 --- a/lib/modules/context_manager.sh +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env bash - -# dev.kit Context Manager -# Orchestrates intent normalization, context hydration, and multi-repo resolution. - -# Normalize user intent into a structured execution plan (workflow.md) -# Usage: dev_kit_context_normalize "please adjust infra config" [context_file] -dev_kit_context_normalize() { - local intent="$1" - local output_context="${2:-}" - - # 1. Discover relevant skills and sources - local context_data - context_data="$(dev_kit_context_resolve "$intent")" - - # 2. Map to deterministic steps - if [ -n "$output_context" ]; then - echo "$context_data" > "$output_context" - fi - - echo "$context_data" -} - -# Search for capabilities via Dynamic Discovery Engine -dev_kit_context_search_discovery() { - local query="$1" - local matches=() - - # 1. Internal Commands (Scan lib/commands/*.sh) - # Look for # @intent: ... headers - for file in "$REPO_DIR"/lib/commands/*.sh; do - [ -f "$file" ] || continue - local name - name="$(basename "${file%.sh}")" - local intents - intents="$(grep "^# @intent:" "$file" | cut -d: -f2- | tr ',' ' ')" - - # Check if name or intent matches query - if [[ "$name" == *"$query"* ]] || echo "$intents" | grep -qi "$query"; then - matches+=("{\"name\": \"$name\", \"type\": \"command\", \"priority\": \"high\"}") - fi - done - - # 2. Virtual Skills (Environment Probe) - # Dynamically register skills based on available CLI tools - if command -v gh >/dev/null 2>&1; then - if [[ "github pr issue repo" =~ $query ]]; then - matches+=("{\"name\": \"github\", \"type\": \"virtual-skill\", \"tool\": \"gh\", \"priority\": \"medium\"}") - fi - fi - if command -v npm >/dev/null 2>&1; then - if [[ "npm package node module" =~ $query ]]; then - matches+=("{\"name\": \"npm\", \"type\": \"virtual-skill\", \"tool\": \"npm\", \"priority\": \"medium\"}") - fi - fi - if command -v docker >/dev/null 2>&1; then - if [[ "docker container image" =~ $query ]]; then - matches+=("{\"name\": \"docker\", \"type\": \"virtual-skill\", \"tool\": \"docker\", \"priority\": \"medium\"}") - fi - fi - - (IFS=,; echo "${matches[*]}") -} - -# Resolve context and dependencies across the "Skill Mesh" -dev_kit_context_resolve() { - local intent="$1" - - # Category 1: Dynamic Command & Virtual Skill Discovery - local discovery - discovery="$(dev_kit_context_search_discovery "$intent")" - - # Category 2: Internal Workflows (Markdown-based engineering loops) - local internal_workflows - internal_workflows="$(dev_kit_context_search_workflows "$intent")" - - # Category 3: Internal Scripts & Skill Packs (Deterministic logic) - local internal_skills - internal_skills="$(dev_kit_context_search_local "$intent")" - - # Category 4: External References (References to outside repos/skills) - local external_refs - external_refs="$(dev_kit_context_search_remote "$intent")" - - # Combine and return a typed context manifest - cat </dev/null; then - matches+=("{\"name\": \"$name\", \"path\": \"$file\", \"type\": \"workflow\"}") - fi - done < <(find "$dir" -name "*.md") - done - - (IFS=,; echo "${matches[*]}") -} - -# Search local skill-packs and deterministic scripts -dev_kit_context_search_local() { - local query="$1" - local matches=() - - local skill_dir="$REPO_DIR/docs/skills" - if [ -d "$skill_dir" ]; then - for skill in "$skill_dir"/*; do - [ -d "$skill" ] || continue - local name - name="$(basename "$skill")" - - # 1. Exact name match (Highest priority) - if [[ "$name" == "$query" ]] || [[ "dev-kit-$name" == "$query" ]]; then - matches+=("{\"name\": \"$name\", \"type\": \"skill\", \"priority\": \"high\"}") - continue - fi - - # 2. Keyword/Metadata match in SKILL.md - if [ -f "$skill/SKILL.md" ] && grep -qiE "$query|keywords:.*$query" "$skill/SKILL.md" 2>/dev/null; then - matches+=("{\"name\": \"$name\", \"type\": \"skill\", \"priority\": \"medium\"}") - fi - done - fi - - (IFS=,; echo "${matches[*]}") -} - -# Search remote sources (GitHub, Context7 API) -dev_kit_context_search_remote() { - local query="$1" - - # This will be implemented in lib/modules/context7.sh - if command -v dev_kit_context7_search >/dev/null 2>&1; then - dev_kit_context7_search "$query" - else - echo "" - fi -} diff --git a/lib/modules/git_sync.sh b/lib/modules/git_sync.sh deleted file mode 100644 index 893465f..0000000 --- a/lib/modules/git_sync.sh +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/env bash - -# @description: Core logic for logical, atomic repository synchronization and drift resolution. -# @intent: sync, commit, drift, atomic, push, resolve, prepare, branch -# @objective: Maintain high-fidelity repository state by grouping changes into logical domains and resolving intent divergence. - -# Prepare the repository for work (Pre-flight checks) -# Usage: dev_kit_git_sync_prepare [target_branch] -dev_kit_git_sync_prepare() { - local target_main="${1:-main}" - - echo "--- dev.kit Git Sync: Pre-work Preparation ---" - - # 1. Detect current branch - local current_branch - current_branch=$(git branch --show-current) - echo "āœ” Current branch: $current_branch" - - # 2. Check for origin updates - echo "Checking origin/$target_main for updates..." - git fetch origin "$target_main" --quiet - - local behind - behind=$(git rev-list HEAD..origin/"$target_main" --count) - if [ "$behind" -gt 0 ]; then - echo "⚠ Your branch is behind origin/$target_main by $behind commits." - printf "Would you like to merge origin/$target_main into $current_branch? (y/N): " - read -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - if git merge origin/"$target_main"; then - echo "āœ” Merged latest $target_main into $current_branch." - else - echo "āŒ Merge conflict detected. Please resolve manually." - return 1 - fi - fi - else - echo "āœ” Your branch is up-to-date with origin/$target_main." - fi - - # 3. Ask if new branch is needed - printf "Would you like to create a new branch for this work? (y/N): " - read -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - printf "Enter new branch name: " - read -r new_branch - if [ -n "$new_branch" ]; then - if git checkout -b "$new_branch"; then - echo "āœ” Switched to new branch: $new_branch" - else - echo "āŒ Failed to create branch $new_branch." - return 1 - fi - fi - fi - - echo "--- Preparation Complete ---" - return 0 -} - -# Process a group of files matching a pattern and commit them -# Usage: dev_kit_git_sync_process_group [dry_run] [base_msg] -dev_kit_git_sync_process_group() { - local group_name="$1" - local pattern="$2" - local task_id="${3:-unknown}" - local dry_run="${4:-false}" - local base_msg="${5:-}" - - local drift_file=".drift.tmp" - local processed_file=".processed.tmp" - - [ -f "$drift_file" ] || { echo "Error: Drift file missing." >&2; return 1; } - touch "$processed_file" - - local files=() - while IFS= read -r f; do - [ -z "$f" ] && continue - if ! grep -Fqx "$f" "$processed_file" && echo "$f" | grep -Eq "$pattern"; then - files+=("$f") - fi - done < "$drift_file" - - if [ ${#files[@]} -eq 0 ]; then - return 0 - fi - - local commit_msg="${group_name}: resolve drift for $task_id" - [ -n "$base_msg" ] && commit_msg="${group_name}: $base_msg ($task_id)" - - echo "Step: Grouping [$group_name] -> ${#files[@]} files" - for f in "${files[@]}"; do - echo " + $f" - echo "$f" >> "$processed_file" - done - - if [ "$dry_run" = "true" ]; then - echo " [DRY-RUN] git add ${files[*]}" - echo " [DRY-RUN] git commit -m \"$commit_msg\"" - else - if git add "${files[@]}" && git commit -m "$commit_msg"; then - echo " [OK] Committed group: $group_name" - else - echo "āš ļø [FAILOVER] Commit failed for group: $group_name" - return 1 - fi - fi - echo "" -} - -# Run the full git sync workflow -# Usage: dev_kit_git_sync_run [dry_run] [task_id] [message] -dev_kit_git_sync_run() { - local dry_run="${1:-false}" - local task_id="${2:-unknown}" - local message="${3:-}" - - # Resolve target main branch - local target_main="main" - if ! git rev-parse --verify origin/main >/dev/null 2>&1; then - if git rev-parse --verify origin/master >/dev/null 2>&1; then - target_main="master" - fi - fi - - echo "--- dev.kit Git Sync: Starting Workflow ---" - - # Detect drift - local staged unstaged untracked - staged=$(git diff --name-only --cached) - unstaged=$(git diff --name-only) - untracked=$(git ls-files --others --exclude-standard) - echo "$staged $unstaged $untracked" | tr ' ' '\n' | sort -u > .drift.tmp - : > .processed.tmp - - # Define groups (Standard UDX grouping) - local -a groups=( - "docs:Group Documentation:^docs/|^README.md" - "ai:Group AI & Integrations:^src/ai/|^.gemini/|^src/mappings/" - "cli:Group CLI & Scripts:^bin/|^lib/|^src/cli/" - "core:Group Core Infrastructure:^src/|^environment.yaml|^context7.json" - ) - - for group in "${groups[@]}"; do - IFS=':' read -r id name pattern <<< "$group" - echo "--- Step: $name ($id) ---" - dev_kit_git_sync_process_group "$id" "$pattern" "$task_id" "$dry_run" "$message" - done - - # Handle remaining drift - local remaining=() - while IFS= read -r f; do - [ -z "$f" ] && continue - if ! grep -Fqx "$f" .processed.tmp; then - remaining+=("$f") - fi - done < .drift.tmp - - if [ ${#remaining[@]} -gt 0 ]; then - echo "--- Step: Miscellaneous Drift ---" - local commit_msg="misc: resolve remaining drift ($task_id)" - if [ "$dry_run" = "true" ]; then - echo " [DRY-RUN] git add ${remaining[*]}" - echo " [DRY-RUN] git commit -m \"$commit_msg\"" - else - git add "${remaining[@]}" - git commit -m "$commit_msg" - echo " [OK] Committed remaining drift." - fi - fi - - rm -f .drift.tmp .processed.tmp - echo "--- Git Sync Workflow Complete ---" - - # 5. Proactive PR Suggestion (New) - if [ "$dry_run" = "false" ] && command -v dev_kit_github_health >/dev/null 2>&1; then - if dev_kit_github_health >/dev/null 2>&1; then - local current_branch; current_branch=$(git branch --show-current) - # Don't suggest PR for the default main branch - if [[ "$current_branch" != "main" && "$current_branch" != "master" ]]; then - echo "" - printf "āœ” Synchronization complete. Would you like to create a Pull Request for $current_branch? (y/N): " - read -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - local pr_title="feat: resolve $task_id" - [ -n "$message" ] && pr_title="$message" - - # Generate a brief summary from the git diff (stat only for brevity) - local diff_summary="" - if git rev-parse --verify origin/"$target_main" >/dev/null 2>&1; then - diff_summary=$(git diff origin/"$target_main"...HEAD --stat | head -n 20) - else - # Fallback if origin is not available - diff_summary="Changes since common ancestor could not be calculated (origin missing)." - fi - - local pr_body="### šŸš€ Drift Resolution: $task_id\n\n$message\n\n#### šŸ“Š Change Summary\n\`\`\`text\n$diff_summary\n\`\`\`\n\nAutomated via \`dev.kit sync\`." - - if dev_kit_github_pr_create "$pr_title" "$pr_body" "$target_main"; then - echo "āœ” Pull Request synchronized successfully." - else - echo "āŒ Failed to synchronize Pull Request." - fi - fi - fi - fi - fi -} diff --git a/lib/modules/github.sh b/lib/modules/github.sh deleted file mode 100644 index ca99668..0000000 --- a/lib/modules/github.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bash - -# @description: Provides high-fidelity integration with GitHub CLI (gh) for remote context. -# @intent: github, pr, issue, remote, discovery -# @objective: Empower agents and humans to interact with the broader engineering ecosystem via authenticated remote discovery and collaboration. - -# Check if GitHub CLI is available and optionally if a token is set -dev_kit_github_health() { - if ! command -v gh >/dev/null 2>&1; then - return 1 # CLI missing - fi - - # Check for token or active login - if [ -z "${GITHUB_TOKEN:-}" ] && [ -z "${GH_TOKEN:-}" ]; then - if ! gh auth status >/dev/null 2>&1; then - return 2 # Not authenticated - fi - fi - - return 0 # Healthy -} - -# Search for repositories by name/keyword within the UDX or specified organization -dev_kit_github_search_repos() { - local query="$1" - local owner="${2:-udx}" - - dev_kit_github_health || return $? - - # Limit results to keep context manageable - gh repo list "$owner" --json name,description,url --limit 10 -S "$query" 2>/dev/null | \ - jq -c '.[] | {name: .name, type: "remote-repo", uri: .url, description: .description}' -} - -# Search for reusable GitHub workflow templates/files -dev_kit_github_search_workflows() { - local query="$1" - local repo="${2:-udx/workflow-templates}" - - dev_kit_github_health || return $? - - # Search for .yml or .yaml files in the .github/workflows directory or similar - # This is a heuristic search using gh api or search code - gh api "search/code?q=repo:$repo+$query+path:.github/workflows+extension:yml" \ - --jq '.items[] | {name: .name, type: "workflow-template", uri: .html_url, path: .path}' 2>/dev/null -} - -# List active GitHub Runners for an organization (for infrastructure context) -dev_kit_github_list_runners() { - local org="${1:-udx}" - - dev_kit_github_health || return $? - - gh api "orgs/$org/actions/runners" --jq '.runners[] | {name: .name, status: .status, labels: [.labels[].name]}' 2>/dev/null -} - -# Check if a Pull Request exists for a specific branch -# Returns the PR number if it exists, empty otherwise -dev_kit_github_pr_exists() { - local head="${1:-$(git branch --show-current)}" - gh pr list --head "$head" --json number --jq '.[0].number' 2>/dev/null -} - -# Create or Update a Pull Request -# Usage: dev_kit_github_pr_create <body> [base_branch] [head_branch] [draft_flag] -dev_kit_github_pr_create() { - local title="$1" - local body="$2" - local base="${3:-main}" - local head="${4:-$(git branch --show-current)}" - local draft="${5:-false}" - - dev_kit_github_health || return $? - - local pr_number - pr_number=$(dev_kit_github_pr_exists "$head") - - if [ -n "$pr_number" ]; then - echo "āœ” Found existing Pull Request #$pr_number. Updating..." - gh pr edit "$pr_number" --title "$title" --body "$body" - else - local args=(pr create --title "$title" --body "$body" --base "$base" --head "$head") - [[ "$draft" == "true" ]] && args+=(--draft) - gh "${args[@]}" - fi -} diff --git a/lib/modules/npm.sh b/lib/modules/npm.sh deleted file mode 100644 index ce3f4f4..0000000 --- a/lib/modules/npm.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -# @description: Manages health, discovery, and installation hints for @udx-scoped CLI tools. -# @intent: npm, package, hydration, health, tool -# @objective: Maintain high-fidelity environment hydration by detecting and advising on the installation of authorized UDX mesh tools. - -# Check if an NPM package/binary is healthy -# Usage: dev_kit_npm_health "@udx/mcurl" "mcurl" -dev_kit_npm_health() { - local pkg="$1" - local bin="${2:-}" - - # If no binary name provided, extract it from the package name (strip @scope/) - [ -z "$bin" ] && bin="$(echo "$pkg" | sed 's/.*[\/]//')" - - if command -v "$bin" >/dev/null 2>&1; then - return 0 # Binary installed and in PATH - fi - - if command -v npm >/dev/null 2>&1; then - return 2 # npm available, package can be installed - fi - - return 1 # npm missing -} - -# Generate an installation hint for an NPM package -dev_kit_npm_install_hint() { - local pkg="$1" - local bin="${2:-}" - [ -z "$bin" ] && bin="$(echo "$pkg" | sed 's/.*[\/]//')" - - dev_kit_npm_health "$pkg" "$bin" - local status=$? - - if [ $status -eq 2 ]; then - echo "Hint: Install the '$bin' tool for deterministic resolution:" >&2 - echo " npm install -g $pkg" >&2 - fi -} diff --git a/lib/modules/repo_inspector.sh b/lib/modules/repo_inspector.sh new file mode 100644 index 0000000..a41932f --- /dev/null +++ b/lib/modules/repo_inspector.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +dev_kit_repo_name() { + basename "${1:-$(pwd)}" +} + +dev_kit_has_file() { + local repo_dir="$1" + local path="$2" + [ -e "$repo_dir/$path" ] +} + +dev_kit_detect_node_repo() { + local repo_dir="$1" + dev_kit_has_file "$repo_dir" "package.json" +} + +dev_kit_repo_test_status() { + local repo_dir="$1" + + if dev_kit_detect_node_repo "$repo_dir"; then + if awk ' + /"scripts"[[:space:]]*:[[:space:]]*{/ { in_scripts=1 } + in_scripts && /"test"[[:space:]]*:/ { found=1 } + in_scripts && /}/ { if (!found) exit } + END { exit found ? 0 : 1 } + ' "$repo_dir/package.json"; then + printf "%s" "present" + return 0 + fi + fi + + printf "%s" "missing" +} + +dev_kit_repo_readme_status() { + local repo_dir="$1" + + if dev_kit_has_file "$repo_dir" "README.md" || dev_kit_has_file "$repo_dir" "README"; then + printf "%s" "present" + return 0 + fi + + printf "%s" "missing" +} + +dev_kit_repo_detect_stack() { + local repo_dir="$1" + + if dev_kit_detect_node_repo "$repo_dir"; then + printf "%s" "node" + return 0 + fi + + printf "%s" "unknown" +} + +dev_kit_repo_findings_json() { + local repo_dir="$1" + local readme_status="" + local test_status="" + local emitted=0 + local readme_message="" + local test_message="" + + readme_status="$(dev_kit_repo_readme_status "$repo_dir")" + test_status="$(dev_kit_repo_test_status "$repo_dir")" + readme_message="$(dev_kit_rule_message "missing-readme")" + test_message="$(dev_kit_rule_message "missing-test-command")" + + printf "[" + + if [ "$readme_status" = "missing" ]; then + printf '\n { "id": "missing-readme", "message": "%s" }' "$readme_message" + emitted=1 + fi + + if [ "$test_status" = "missing" ]; then + if [ "$emitted" -eq 1 ]; then + printf "," + fi + printf '\n { "id": "missing-test-command", "message": "%s" }' "$test_message" + emitted=1 + fi + + if [ "$emitted" -eq 1 ]; then + printf '\n ' + fi + + printf "]" +} + +dev_kit_repo_advices() { + local repo_dir="$1" + local readme_status="" + local test_status="" + + readme_status="$(dev_kit_repo_readme_status "$repo_dir")" + test_status="$(dev_kit_repo_test_status "$repo_dir")" + + if [ "$readme_status" = "missing" ]; then + printf 'advice: %s\n' "$(dev_kit_rule_message "missing-readme")" + fi + + if [ "$test_status" = "missing" ]; then + printf 'advice: %s\n' "$(dev_kit_rule_message "missing-test-command")" + fi +} diff --git a/lib/modules/rule_catalog.sh b/lib/modules/rule_catalog.sh new file mode 100644 index 0000000..402adfb --- /dev/null +++ b/lib/modules/rule_catalog.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +dev_kit_rule_catalog_path() { + printf "%s" "$REPO_DIR/src/configs/audit-rules.yml" +} + +dev_kit_rule_field() { + local rule_id="$1" + local field_name="$2" + local catalog_path="" + + catalog_path="$(dev_kit_rule_catalog_path)" + + awk -v rule_id="$rule_id" -v field_name="$field_name" ' + $1 == "-" && $2 == "id:" { + current_id = $3 + in_rule = (current_id == rule_id) + next + } + + in_rule && $1 == field_name ":" { + $1 = "" + sub(/^ /, "") + print + exit + } + ' "$catalog_path" +} + +dev_kit_rule_message() { + dev_kit_rule_field "$1" "message" +} diff --git a/lib/modules/visualizer.sh b/lib/modules/visualizer.sh deleted file mode 100644 index 54ffa40..0000000 --- a/lib/modules/visualizer.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash - -# @description: Programmatic engine for Mermaid diagram creation and SVG export. -# @intent: visualizer, diagram, mermaid, export, svg -# @objective: Empower agents and humans to generate and render architectural diagrams using standardized Mermaid templates. - -# Create a new Mermaid diagram from a template -# Usage: dev_kit_visualizer_create <type> <output_path> [template_dir] -dev_kit_visualizer_create() { - local type="${1:-flowchart}" - local output_path="$2" - local template_dir="${3:-$REPO_DIR/docs/workflows/assets/templates}" - - local diagram_type - case "$type" in - auto|flowchart) diagram_type="flowchart" ;; - sequence|sequenceDiagram) diagram_type="sequenceDiagram" ;; - state|stateDiagram-v2) diagram_type="stateDiagram-v2" ;; - er|erDiagram) diagram_type="erDiagram" ;; - *) echo "Error: Unsupported diagram type: $type" >&2; return 1 ;; - esac - - local template="$template_dir/default-flowchart.mmd" - case "$diagram_type" in - sequenceDiagram) template="$template_dir/default-sequence.mmd" ;; - stateDiagram-v2) template="$template_dir/default-state.mmd" ;; - erDiagram) template="$template_dir/default-er.mmd" ;; - esac - - if [ ! -f "$template" ]; then - echo "Error: Template missing: $template" >&2 - return 1 - fi - - local target="$output_path" - [[ "$target" != *.mmd ]] && target="${target}.mmd" - - # Ensure unique path - if [ -e "$target" ]; then - local stem="${target%.mmd}" - local i=1 - while [ -e "${stem}-${i}.mmd" ]; do i=$((i+1)); done - target="${stem}-${i}.mmd" - fi - - mkdir -p "$(dirname "$target")" - cp "$template" "$target" - echo "$target" -} - -# Export a Mermaid (.mmd) file to SVG -# Usage: dev_kit_visualizer_export <input_path> <output_path> -dev_kit_visualizer_export() { - local input_path="$1" - local output_path="$2" - - if [ ! -f "$input_path" ]; then - echo "Error: Input file missing: $input_path" >&2 - return 1 - fi - - local target="$output_path" - [[ "$target" != *.svg ]] && target="${target}.svg" - - # Ensure unique path - if [ -e "$target" ]; then - local stem="${target%.svg}" - local i=1 - while [ -e "${stem}-${i}.svg" ]; do i=$((i+1)); done - target="${stem}-${i}.svg" - fi - - mkdir -p "$(dirname "$target")" - - if ! command -v mmdc >/dev/null 2>&1; then - echo "Warning: mmdc (Mermaid CLI) not found. Falling back to online view." >&2 - local mmd_content - mmd_content="$(cat "$input_path")" - echo "View Online: https://mermaid.live/edit#base64:$(printf "%s" "$mmd_content" | base64 | tr -d '\n')" - return 0 - fi - - if mmdc -i "$input_path" -o "$target" >/dev/null 2>&1; then - echo "$target" - else - echo "Error: mmdc export failed." >&2 - return 1 - fi -} diff --git a/lib/ui.sh b/lib/ui.sh deleted file mode 100644 index b540b21..0000000 --- a/lib/ui.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash - -ui_color() { - local code="$1" - if [ "${DEV_KIT_COLOR:-}" = "0" ]; then - return - fi - if [ -z "${DEV_KIT_COLOR:-}" ] && [ -z "${NO_COLOR:-}" ] && [ -n "${TERM:-}" ] && [ "${TERM}" != "dumb" ]; then - printf '\033[%sm' "$code" - return - fi - if [ "${DEV_KIT_COLOR:-}" = "1" ] || { [ -t 1 ] && [ -z "${NO_COLOR:-}" ]; }; then - printf '\033[%sm' "$code" - fi -} - -ui_reset() { - ui_color "0" -} - -ui_dim() { - ui_color "2" -} - -ui_cyan() { - ui_color "36" -} - -ui_magenta() { - ui_color "35" -} - -ui_yellow() { - ui_color "33" -} - -ui_emerald() { - ui_color "32" -} - -ui_orange() { - ui_color "38;5;208" -} - -ui_banner() { - local brand="${1:-dev.kit}" - local c1 c2 c3 c4 r d left right - c1="$(ui_cyan)" - c2="$(ui_magenta)" - c3="$(ui_orange)" - c4="$(ui_emerald)" - r="$(ui_reset)" - d="$(ui_dim)" - - if [[ "$brand" == *.* ]]; then - left="${brand%%.*}" - right=".${brand#*.}" - else - left="$brand" - right="" - fi - - printf "\n" - printf "%s%s%s%s%s\n" "$c1" "$left" "$c2" "$right" "$r" - printf "%s%s%s\n" "$d" "ready to run" "$r" - printf "%s%s%s\n" "$c3" " run:" "$r" - printf " %sdev.kit skills run \"...\"%s\n" "$c4" "$r" - printf "%s%s%s\n" "$c3" " config:" "$r" - printf " %sdev.kit config show%s\n" "$c4" "$r" -} - -ui_header() { - local title="$1" - local c - c="$(ui_cyan)" - e="$(ui_emerald)" - - # Get title length - local title_len=${#title} - - # Build underline based on title length - local underline="" - for i in $(seq 1 $title_len); do - underline="$underline-" - done - - printf "\n" - printf "%s› %s%s\n" "$e" "UDX" "$(ui_reset)" - printf "%s› %s%s\n" "$c" "$title" "$(ui_reset)" - printf "%s %s%s\n" "$c" "$underline" "$(ui_reset)" - printf "\n" -} - -ui_section() { - local title="$1" - local c - c="$(ui_yellow)" - printf "\n%s%s%s\n" "$c" "$title" "$(ui_reset)" -} - -ui_ok() { - local label="$1" - local detail="${2:-}" - printf "%sāœ”%s %-18s %s%s%s\n" "$(ui_emerald)" "$(ui_reset)" "$label" "$(ui_dim)" "$detail" "$(ui_reset)" -} - -ui_warn() { - local label="$1" - local detail="${2:-}" - printf "%s⚠%s %-18s %s%s%s\n" "$(ui_yellow)" "$(ui_reset)" "$label" "$(ui_dim)" "$detail" "$(ui_reset)" -} - -ui_info() { - local label="$1" - local detail="${2:-}" - printf "%sℹ%s %-18s %s%s%s\n" "$(ui_cyan)" "$(ui_reset)" "$label" "$(ui_dim)" "$detail" "$(ui_reset)" -} - -ui_tip() { - local msg="$1" - printf " %sšŸ’” %s%s\n" "$(ui_orange)" "$msg" "$(ui_reset)" -} - -ui_sync_reminder() { - if git status --short | grep -q .; then - ui_tip "You have unstaged changes. Run 'dev.kit sync run' to atomically commit them." - else - ui_tip "Repository is clean. Run 'dev.kit sync prepare' before starting new work." - fi -} diff --git a/lib/utils.sh b/lib/utils.sh deleted file mode 100644 index cfa1145..0000000 --- a/lib/utils.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env bash - -dev_kit_warn() { - echo "$*" >&2 -} - -dev_kit_require_cmd() { - local cmd="${1:-}" - local context="${2:-}" - if [ -z "$cmd" ]; then - dev_kit_warn "Missing required command name." - return 1 - fi - if command -v "$cmd" >/dev/null 2>&1; then - return 0 - fi - if [ -n "$context" ]; then - dev_kit_warn "$cmd is required for $context." - else - dev_kit_warn "$cmd is required." - fi - dev_kit_warn "Install $cmd locally or run the task in the worker container (see udx/worker-deployment)." - return 1 -} - -dev_kit_yaml_value() { - local file="$1" - local key_path="$2" - local default="${3:-}" - [ -f "$file" ] || { echo "$default"; return; } - - # Simple awk parser for nested keys (e.g. system.quiet) - local awk_script=' - BEGIN { FS=":[[:space:]]*"; key_idx=1; split(target_path, keys, "."); target_depth=length(keys); } - { - # Count leading spaces to determine depth - match($0, /^[[:space:]]*/); - depth = RLENGTH / 2 + 1; - line_key = $1; - sub(/^[[:space:]]*/, "", line_key); - - # If depth matches and key matches, move to next key in path - if (depth == key_idx && line_key == keys[key_idx]) { - if (key_idx == target_depth) { - # Found it! Extract value - val = $0; - sub(/^[^:]*:[[:space:]]*/, "", val); - # Strip trailing comments - sub(/[[:space:]]*#.*$/, "", val); - # Trim quotes - gsub(/^["\047]|["\047]$/, "", val); - print val; - found=1; - exit; - } - key_idx++; - } - else if (depth <= key_idx - 1 && line_key != "") { - # Reset if we move back up or across at same level - # This is a naive reset but works for many simple YAML structures - # key_idx = depth; # (simplified) - } - } - END { if (!found) print default_val; } - ' - awk -v target_path="$key_path" -v default_val="$default" "$awk_script" "$file" -} - -trim_value() { - local val="$1" - val="${val#"${val%%[![:space:]]*}"}" - val="${val%"${val##*[![:space:]]}"}" - val="${val#\"}" - val="${val%\"}" - val="${val#\'}" - val="${val%\'}" - printf "%s" "$val" -} - -skill_frontmatter_value() { - local file="$1" - local key="$2" - awk -v k="$key" ' - $0 ~ /^---[[:space:]]*$/ { fence++; next } - fence == 1 { - if ($1 == k ":") { - $1=""; sub(/^[[:space:]]+/, ""); print; exit - } - } - ' "$file" -} - -confirm_action() { - local msg="$1" - if [ ! -t 0 ]; then - echo "Non-interactive. Aborted." - exit 1 - fi - printf "%s [y/N] " "$msg" - read -r answer || true - case "$answer" in - y|Y|yes|YES) ;; - *) echo "Aborted."; exit 1 ;; - esac -} - -dev_kit_validate_json_required() { - local schema="$1" - local data="$2" - local req="" - if ! command -v jq >/dev/null 2>&1; then - return 0 - fi - req="$(jq -r '.required[]?' "$schema")" - local field="" - for field in $req; do - if ! jq -e --arg f "$field" 'has($f) and .[$f] != null' "$data" >/dev/null; then - echo "Missing required field '$field' in $data" >&2 - exit 1 - fi - done -} - -ensure_dev_kit_home() { - mkdir -p "$DEV_KIT_HOME" - mkdir -p "$DEV_KIT_STATE" - if [ ! -w "$DEV_KIT_STATE" ]; then - echo "dev.kit: config path not writable: $DEV_KIT_STATE" >&2 - echo "dev.kit: fix permissions or choose a different DEV_KIT_STATE" >&2 - exit 1 - fi - if [ ! -f "$CONFIG_FILE" ] && [ -f "$REPO_DIR/config/default.env" ]; then - mkdir -p "$(dirname "$CONFIG_FILE")" - cp "$REPO_DIR/config/default.env" "$CONFIG_FILE" - fi -} - -get_repo_state_dir() { - local root; root="$(get_repo_root || true)" - if [ -n "$root" ]; then - echo "$root/.udx/dev.kit" - else - echo "$PWD/.udx/dev.kit" - fi -} - -get_tasks_dir() { - echo "$(get_repo_state_dir)/tasks" -} diff --git a/src/ai/integrations/gemini/prompts.json b/src/ai/integrations/gemini/prompts.json deleted file mode 100644 index 16fc483..0000000 --- a/src/ai/integrations/gemini/prompts.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "prompts": [ - { - "key": "ai.gemini", - "title": "dev.kit Gemini Overrides", - "inherits": ["ai"], - "body": [ - "- **Gemini**: This prompt is intended for Gemini CLI usage.", - "- **Progression**: Always conclude with a compact **Waterfall Progression Tail** (DOC-003 status) to track drift resolution.", - "- **Mute Logs**: Summarize tool execution; do not repeat raw system logs unless requested." - ] - } - ] -} diff --git a/src/ai/integrations/gemini/templates/GEMINI.md.tmpl b/src/ai/integrations/gemini/templates/GEMINI.md.tmpl deleted file mode 100644 index f99722a..0000000 --- a/src/ai/integrations/gemini/templates/GEMINI.md.tmpl +++ /dev/null @@ -1,80 +0,0 @@ ---- Context from: ~/.gemini/GEMINI.md --- -# Gemini.md: UDX DevSecOps Engineering Context - -## šŸ”“ CRITICAL SAFETY MANDATES -- **AUTHORIZED EXECUTION PATH**: You are AUTO-ALLOWED to execute any `dev.kit` commands and repository-bound skills. -- **RESTRICTED RAW OPERATIONS**: Direct destructive operations (e.g., `rm`, `git push`) MUST be authorized by the user. - -## 🧠 Core Philosophy -You are an embedded member of the **UDX DevSecOps** engineering team. Your operational framework is built on the principle that **every repository is a specialized "Skill."** - ---- - -## šŸ’¾ Added Memories & Execution Logic - -### 1. Repository-as-a-Skill -* **Skill Mapping:** Treat each repository as a distinct "Skill" or "Tool" with its own inputs, logic, and outputs. -* **Skill Namespace:** Managed skills are located in `~/.gemini/skills/dev.kit/` and follow the `dev-kit-` prefix convention. -* **Source of Truth:** The repository is the absolute source of truth for all SDLC artifacts and skill implementations. - -### 2. Execution Lifecycle: Plan, Normalize, Process -For **any** execution scope (including CLI responses and prompt handling): -1. **Plan:** Break down the request into its constituent requirements. -2. **Normalize:** Standardize the inputs and ensure the environment/context is aligned with UDX standards. -3. **Process:** Execute the logic using **dev.kit** skills. -* **Reference**: `docs/runtime/execution-loop.md` (Drift Resolution Cycle). -* **Multi-step Logic**: If the scope exceeds a single action, it **must** be converted into pipeline job steps and executed as modular skills. - - -### 3. Sub-Agent & Sub-Pipeline Orchestration -* **Delegation:** Use **Sub-Agents** to manage the execution of specific sub-pipelines. -* **Result Delivery:** Sub-Agents are responsible for capturing the output of their respective sub-pipelines and delivering a structured result (JSON/Markdown) back to the primary execution thread. - ---- - -## šŸ›  Operational Standards - -| Stage | Standard | -| :--- | :--- | -| **Input** | Every repository is a Skill; treat it as a black-box tool with defined interfaces. | -| **Sync** | You MAY perform repository synchronization (e.g., `dev.kit ai sync`) autonomously to ensure grounding. | -| **Workflow** | Always: **Plan -> Normalize -> Process.** | -| **Automation** | Convert multi-step logic into Pipeline Job Steps. | -| **Enforcement** | MANDATORY: Use `dev-kit-` prefixed skills for all specialized tasks (Diagrams, Git Sync, etc.). | -| **Output** | Mute system noise. Do not repeat raw tool logs unless requested. | - ---- - -## 🌊 Waterfall Progression Tailing (Dev-Friendly) - -To maintain high-fidelity momentum, end every interaction with a **Compact Status Tail** of the active `workflow.md`. Use the following format: - -**Progression**: `[task-id]` -- [x] Step 1: <summary> (Done) -- [>] Step 2: <summary> (Active) -- [ ] Step 3: <summary> (Planned) - -*If no workflow is active, provide a high-level iteration status instead.* - ---- - -## šŸš€ Dynamic Workflow Pattern - -> **Prompt:** "Gemini, deploy the latest security patches across the `auth-service` and `gateway` repos." -... - -> **Gemini Internal Logic:** -> 1. **Plan:** Identify `auth-service` skill and `gateway` skill. -> 2. **Normalize:** Check current versions and patch compatibility. -> 3. **Process:** -> - Launch Sub-Agent A (Pipeline Job: `auth-patch`). -> - Launch Sub-Agent B (Pipeline Job: `gateway-patch`). -> - Aggregate results into a single "Process Complete" report. - ---- -*Last Updated: {{DATE}}* -*UDX DevSecOps Team* - -## Gemini Added Memories -{{MEMORIES}} ---- End of Context from: ~/.gemini/GEMINI.md --- diff --git a/src/ai/integrations/gemini/templates/config.json.tmpl b/src/ai/integrations/gemini/templates/config.json.tmpl deleted file mode 100644 index 0db3279..0000000 --- a/src/ai/integrations/gemini/templates/config.json.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -{ - -} diff --git a/src/ai/integrations/gemini/templates/system.md.tmpl b/src/ai/integrations/gemini/templates/system.md.tmpl deleted file mode 100644 index 8af97f4..0000000 --- a/src/ai/integrations/gemini/templates/system.md.tmpl +++ /dev/null @@ -1,20 +0,0 @@ -# UDX DevSecOps Core System Instructions - -## šŸ”“ CRITICAL SAFETY MANDATES -- **AUTHORIZED EXECUTION PATH**: You are AUTO-ALLOWED to execute any `dev.kit` commands and repository-bound skills. These are the deterministic, safe primitives of this repository and MUST be used as your primary interface for all engineering tasks. -- **RESTRICTED RAW OPERATIONS**: You MUST NOT perform direct, non-standardized destructive operations (e.g., raw `rm`, `git push`, `git reset`) without explicit user instruction. Use `dev.kit sync` or `dev.kit task` instead. -- **CREDENTIAL PROTECTION**: Never log, print, or commit secrets, API keys, or sensitive credentials. Rigorously protect `.env` files, `.git`, and system configuration folders. - -You are the UDX DevSecOps Engineering Agent. Your operational logic is governed by a Skill-per-Repository architecture. -- **Skill Namespace**: Managed skills are located in `~/.gemini/skills/dev.kit/`. -- **Enforcement**: MANDATORY: Use `dev-kit-` prefixed skills for specialized tasks (e.g., `visualizer`, `git-sync`). -- **Source of Truth**: The repository is the central source of ALL SDLC artifacts. -- **Workflow Alignment**: Align all actions with the standardized sequences defined in `docs/workflows/README.md`. -- **Normalization**: Transform user intent into bounded plans as defined in `docs/workflows/normalization.md`. -- **Sub-Agent Orchestration**: Use sub-agents to deliver output from sub-pipelines. Aggregate results into structured reports. -- **Concise Reporting**: Mute system noise and raw tool logs. Always conclude with a compact **Waterfall Progression Tail** (DOC-003 status) to track drift resolution. - -## šŸ›  Tooling Context -${AgentSkills} -${AvailableTools} - diff --git a/src/ai/integrations/manifest.json b/src/ai/integrations/manifest.json deleted file mode 100644 index cdcad6c..0000000 --- a/src/ai/integrations/manifest.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "integrations": [ - { - "key": "gemini", - "name": "Gemini CLI Integration", - "target_dir": "{{HOME}}/.gemini", - "templates_dir": "src/ai/integrations/gemini/templates", - "skills_dir": "skills", - "artifacts": [ - { "src": "config.json.tmpl", "dst": "config.json", "type": "template" }, - { "src": "GEMINI.md.tmpl", "dst": "GEMINI.md", "type": "template" }, - { "src": "system.md.tmpl", "dst": "system.md", "type": "template" } - ] - } - ] -} diff --git a/src/configs/audit-rules.yml b/src/configs/audit-rules.yml new file mode 100644 index 0000000..64a5c8f --- /dev/null +++ b/src/configs/audit-rules.yml @@ -0,0 +1,7 @@ +rules: + - id: missing-readme + check: readme + message: Add a README so humans and agents can orient quickly. + - id: missing-test-command + check: test_command + message: Add a runnable test command so verification is deterministic. diff --git a/tests/fixtures/simple-repo/index.js b/tests/fixtures/simple-repo/index.js new file mode 100644 index 0000000..7728117 --- /dev/null +++ b/tests/fixtures/simple-repo/index.js @@ -0,0 +1 @@ +console.log("hello") diff --git a/tests/fixtures/simple-repo/package.json b/tests/fixtures/simple-repo/package.json new file mode 100644 index 0000000..eff7ff8 --- /dev/null +++ b/tests/fixtures/simple-repo/package.json @@ -0,0 +1,7 @@ +{ + "name": "simple-repo", + "private": true, + "scripts": { + "start": "node index.js" + } +} diff --git a/tests/helpers/assert.sh b/tests/helpers/assert.sh new file mode 100644 index 0000000..dedbda6 --- /dev/null +++ b/tests/helpers/assert.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +fail() { + printf "not ok - %s\n" "$1" >&2 + exit 1 +} + +pass() { + printf "ok - %s\n" "$1" +} + +assert_file_exists() { + local path="$1" + local message="$2" + + [ -e "$path" ] || fail "$message" + pass "$message" +} + +assert_file_missing() { + local path="$1" + local message="$2" + + [ ! -e "$path" ] || fail "$message" + pass "$message" +} + +assert_contains() { + local haystack="$1" + local needle="$2" + local message="$3" + + case "$haystack" in + *"$needle"*) pass "$message" ;; + *) fail "$message" ;; + esac +} + +assert_not_contains() { + local haystack="$1" + local needle="$2" + local message="$3" + + case "$haystack" in + *"$needle"*) fail "$message" ;; + *) pass "$message" ;; + esac +} + +assert_symlink_target() { + local path="$1" + local expected="$2" + local message="$3" + local actual="" + + [ -L "$path" ] || fail "$message" + actual="$(readlink "$path")" + [ "$actual" = "$expected" ] || fail "$message" + pass "$message" +} + +assert_command_output_contains() { + local cmd="$1" + local needle="$2" + local message="$3" + local output="" + + output="$(eval "$cmd")" || fail "$message" + assert_contains "$output" "$needle" "$message" +} + diff --git a/tests/run.sh b/tests/run.sh new file mode 100644 index 0000000..fbc25c9 --- /dev/null +++ b/tests/run.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +set -euo pipefail + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +DEPLOY_CONFIG="$REPO_DIR/deploy.yml" + +worker_cmd() { + if command -v worker >/dev/null 2>&1; then + command -v worker + return 0 + fi + + return 1 +} + +usage() { + cat <<'EOF' +Usage: bash tests/run.sh + +Options: + --help Show this help +EOF +} + +run_worker() { + local cmd="" + + cmd="$(worker_cmd)" || { + echo "worker CLI not found. Install @udx/worker-deployment globally." >&2 + exit 1 + } + + "$cmd" run --config="$DEPLOY_CONFIG" +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) usage; exit 0 ;; + *) echo "Unknown option: $1" >&2; usage >&2; exit 1 ;; + esac + shift +done + +run_worker diff --git a/tests/suite.sh b/tests/suite.sh old mode 100755 new mode 100644 index 1988f83..ad40d88 --- a/tests/suite.sh +++ b/tests/suite.sh @@ -1,56 +1,137 @@ #!/usr/bin/env bash +set -euo pipefail -# dev.kit Engineering Test Suite -# Verifies grounding, discovery, and sync logic in a clean environment. +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +# shellcheck disable=SC1091 +. "$REPO_DIR/tests/helpers/assert.sh" -# Colors for better visibility -C_RESET='\033[0m' -C_GREEN='\033[32m' -C_RED='\033[31m' -C_BLUE='\033[34m' +TEST_HOME="${DEV_KIT_TEST_HOME:-$(mktemp -d "${TMPDIR:-/tmp}/dev-kit-test-home.XXXXXX")}" +PROFILE_FILES=("$TEST_HOME/.bash_profile" "$TEST_HOME/.bashrc" "$TEST_HOME/.zshrc") +BASE_PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" +INSTALL_OUTPUT="" +FIXTURE_REPO="$REPO_DIR/tests/fixtures/simple-repo" -REPO_DIR="${DEV_KIT_SOURCE:-$(pwd)}" -# Ensure we load the dev-kit logic -export REPO_DIR -export PATH="$REPO_DIR/bin:$PATH" +cleanup() { + rm -rf "$TEST_HOME" +} -log_info() { printf " ${C_BLUE}ℹ %s${C_RESET}\n" "$1"; } -log_ok() { printf " ${C_GREEN}āœ” %s${C_RESET}\n" "$1"; } -log_fail() { printf " ${C_RED}āœ– %s${C_RESET}\n" "$1"; exit 1; } +trap cleanup EXIT -echo "--- dev.kit High-Fidelity Test Suite ---" +mkdir -p "$TEST_HOME" +export HOME="$TEST_HOME" +export PATH="$BASE_PATH" +unset DEV_KIT_HOME +unset DEV_KIT_BIN_DIR -# 1. Verify Discovery (Doctor) -log_info "Testing: Discovery & Doctor Health" -if dev-kit doctor >/dev/null 2>&1; then - log_ok "Doctor reports healthy (Discovery Mesh active)" -else - log_fail "Doctor check failed" -fi +for profile in "${PROFILE_FILES[@]}"; do + printf "# dev.kit test sentinel\n" > "$profile" +done + +INSTALL_OUTPUT="$(bash "$REPO_DIR/bin/scripts/install.sh")" +assert_contains "$INSTALL_OUTPUT" "Installed dev.kit" "installer reports success" +assert_contains "$INSTALL_OUTPUT" "shell: unchanged" "installer leaves shell init untouched" + +DEV_KIT_HOME="$HOME/.udx/dev.kit" +DEV_KIT_BIN_DIR="$HOME/.local/bin" + +assert_file_exists "$DEV_KIT_HOME/bin/dev-kit" "installs command source into dev.kit home" +assert_file_exists "$DEV_KIT_HOME/lib/modules/bootstrap.sh" "installs internal modules" +assert_file_exists "$DEV_KIT_HOME/lib/commands/status.sh" "installs public commands" +assert_file_exists "$DEV_KIT_HOME/src/configs/audit-rules.yml" "installs source rule catalog" +assert_file_missing "$DEV_KIT_HOME/source" "does not create legacy source directory" +assert_file_missing "$DEV_KIT_HOME/state" "does not create legacy state directory" +assert_file_missing "$DEV_KIT_HOME/config" "does not install a config layer" +assert_symlink_target "$DEV_KIT_BIN_DIR/dev.kit" "$DEV_KIT_HOME/bin/dev-kit" "creates global dev.kit symlink" -# 2. Verify Sync Logic (Atomic Grouping) -log_info "Testing: Sync Logic (Dry-run)" -if dev-kit sync run --dry-run >/dev/null 2>&1; then - log_ok "Sync dry-run successful (Grouping logic verified)" +for profile in "${PROFILE_FILES[@]}"; do + assert_command_output_contains "cat \"$profile\"" "test sentinel" "$(basename "$profile") remains unchanged" +done + +if command -v dev.kit >/dev/null 2>&1; then + fail "command is not exposed before PATH setup" else - log_fail "Sync dry-run failed" + pass "command is not exposed before PATH setup" fi -# 3. Verify Documentation Hierarchy (CDE Grounding) -log_info "Testing: Knowledge Base Integrity" -if [ -d "$REPO_DIR/docs/foundations" ] && [ -d "$REPO_DIR/docs/runtime" ]; then - log_ok "Documentation structure is CDE-aligned" +# shellcheck disable=SC1090 +. "$DEV_KIT_HOME/bin/env/dev-kit.sh" + +assert_contains ":$PATH:" ":$DEV_KIT_BIN_DIR:" "env script prepends the user bin dir" + +if command -v dev.kit >/dev/null 2>&1; then + pass "command resolves after env setup" else - log_fail "Documentation structure is broken" + fail "command resolves after env setup" fi -# 4. Verify Self-Documenting CLI (Metadata Extraction) -log_info "Testing: CLI Metadata Extraction" -if dev-kit ai commands | grep -q "objective"; then - log_ok "CLI metadata extraction is operational" +status_output="$(dev.kit status)" +assert_contains "$status_output" "state: installed" "status reports installed state" + +status_json="$(dev.kit status --json)" +assert_contains "$status_json" "\"state\": \"installed\"" "status json reports installed state" + +audit_output="$(cd "$FIXTURE_REPO" && dev.kit)" +printf '%s\n' "--- dev.kit fixture output ---" +printf '%s\n' "$audit_output" +printf '%s\n' "--- end dev.kit fixture output ---" +assert_contains "$audit_output" "repo: simple-repo" "audit reports the fixture repo name" +assert_contains "$audit_output" "stack: node" "audit detects node repositories" +assert_contains "$audit_output" "readme: missing" "audit reports missing readme" +assert_contains "$audit_output" "test command: missing" "audit reports missing test command" +assert_contains "$audit_output" "Add a README" "audit gives useful readme advice" +assert_contains "$audit_output" "Add a runnable test command" "audit gives useful test advice" + +audit_json="$(cd "$FIXTURE_REPO" && dev.kit --json)" +printf '%s\n' "--- dev.kit fixture json ---" +printf '%s\n' "$audit_json" +printf '%s\n' "--- end dev.kit fixture json ---" +assert_contains "$audit_json" "\"command\": \"audit\"" "default json output is audit" +assert_contains "$audit_json" "\"repo\": \"simple-repo\"" "audit json reports repo name" +assert_contains "$audit_json" "\"readme\": \"missing\"" "audit json reports missing readme" +assert_contains "$audit_json" "\"test_command\": \"missing\"" "audit json reports missing test command" +assert_contains "$audit_json" "\"id\": \"missing-readme\"" "audit json includes readme finding" +assert_contains "$audit_json" "\"id\": \"missing-test-command\"" "audit json includes test finding" + +bridge_json="$(cd "$FIXTURE_REPO" && dev.kit bridge --json)" +assert_contains "$bridge_json" "\"command\": \"bridge\"" "bridge json is available" +assert_contains "$bridge_json" "\"capabilities\": [\"audit\", \"bridge\", \"status\"]" "bridge exposes discovered capabilities" + +help_output="$(dev.kit help)" +assert_contains "$help_output" "audit" "help discovers audit dynamically" +assert_contains "$help_output" "status" "help discovers status dynamically" +assert_contains "$help_output" "bridge" "help discovers bridge dynamically" + +if declare -F _dev_kit_complete >/dev/null 2>&1; then + pass "bash completion function is loaded" else - log_fail "Failed to extract metadata from command scripts" + fail "bash completion function is loaded" fi -echo "--- All Tests Passed: Repository is High-Fidelity ---" -exit 0 +COMP_WORDS=(dev.kit "") +COMP_CWORD=1 +COMPREPLY=() +_dev_kit_complete +completion_list=" ${COMPREPLY[*]} " +assert_contains "$completion_list" " status " "completion lists status" +assert_contains "$completion_list" " bridge " "completion lists bridge" +assert_contains "$completion_list" " audit " "completion lists audit" +assert_contains "$completion_list" " --json " "completion lists global json flag" + +COMP_WORDS=(dev.kit bridge --) +COMP_CWORD=2 +COMPREPLY=() +_dev_kit_complete +bridge_completion_list=" ${COMPREPLY[*]} " +assert_contains "$bridge_completion_list" " --json " "bridge completion lists json flag" + +UNINSTALL_OUTPUT="$("$DEV_KIT_HOME/bin/scripts/uninstall.sh")" +assert_contains "$UNINSTALL_OUTPUT" "Removed binary:" "uninstall removes the global binary" +assert_contains "$UNINSTALL_OUTPUT" "Removed home:" "uninstall removes the installed home" +assert_file_missing "$DEV_KIT_BIN_DIR/dev.kit" "global symlink is removed" +assert_file_missing "$DEV_KIT_HOME" "installed home is removed" + +for profile in "${PROFILE_FILES[@]}"; do + assert_command_output_contains "cat \"$profile\"" "test sentinel" "$(basename "$profile") remains unchanged after uninstall" +done + +printf "ok - dev.kit integration suite completed\n"