From abf31b07c9cff717d40fe6e50c2fb11eac3d40b9 Mon Sep 17 00:00:00 2001
From: gaafa <34062684+AmitoVrito@users.noreply.github.com>
Date: Thu, 19 Mar 2026 19:37:48 +0100
Subject: [PATCH] =?UTF-8?q?docs:=20update=20for=20v1.1.0=20=E2=80=94=20Gra?=
=?UTF-8?q?phRAG,=20Redis=20memory,=20Vertex=20AI,=20MarkdownSplitter,=20g?=
=?UTF-8?q?raph=20visualization?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Add v1.1.0 changelog and roadmap entries
- Add GraphRAG retrieval strategy to retriever docs
- Add MarkdownTextSplitter to splitter docs
- Add RedisConversationMemory to memory docs
- Add Vertex AI provider page
- Add trace-highlighted Mermaid and GraphVisualizer to graph docs
- Update sidebar and announcement bar for v1.1.0
---
docs/changelog.md | 23 ++++++++++
docs/graph/mermaid.md | 56 +++++++++++++++++++++++
docs/llms/vertex-ai.md | 88 +++++++++++++++++++++++++++++++++++++
docs/memory/conversation.md | 55 +++++++++++++++++++++++
docs/rag/retriever.md | 53 ++++++++++++++++++++++
docs/rag/splitter.md | 36 +++++++++++++++
docs/roadmap.md | 13 +++++-
docusaurus.config.ts | 6 +--
sidebars.ts | 1 +
9 files changed, 327 insertions(+), 4 deletions(-)
create mode 100644 docs/llms/vertex-ai.md
diff --git a/docs/changelog.md b/docs/changelog.md
index ecc1062..1b1f0e5 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -8,6 +8,29 @@ All notable changes to SynapseKit are documented here.
---
+## v1.1.0 — GraphRAG, Redis Memory, Vertex AI, MarkdownSplitter, Graph Visualization
+
+**Retrieval**
+- `GraphRAGRetriever` — entity-based graph traversal merged with vector retrieval for knowledge-graph-augmented RAG
+- `KnowledgeGraph` — in-memory graph store with triples, BFS traversal, and LLM-powered entity extraction
+
+**Memory**
+- `RedisConversationMemory` — persistent conversation memory backed by Redis with windowing support (`pip install synapsekit[redis]`)
+
+**LLM Providers**
+- `VertexAILLM` — Google Vertex AI with Application Default Credentials, streaming, and native function calling (`pip install synapsekit[vertex]`)
+
+**Text Splitters**
+- `MarkdownTextSplitter` — header-aware chunking that preserves parent header context, with recursive fallback for oversized sections
+
+**Graph Visualization**
+- `GraphVisualizer` — ASCII timeline rendering, Mermaid trace highlighting, step-by-step replay, and standalone HTML export
+- `get_mermaid_with_trace()` — Mermaid flowcharts with CSS classes for completed/errored/skipped nodes
+
+**Stats:** 1047 tests, 16 LLM providers, 20 retrieval strategies, 6 text splitters, 9 memory backends
+
+---
+
## v1.0.0 — Multimodal + Image Loader + API Markers
**Multimodal**
diff --git a/docs/graph/mermaid.md b/docs/graph/mermaid.md
index 544a18d..2f9e419 100644
--- a/docs/graph/mermaid.md
+++ b/docs/graph/mermaid.md
@@ -80,3 +80,59 @@ GitHub renders Mermaid diagrams natively.
- `END` renders as `__end__`
- Conditional edge labels come from the `mapping` keys
- Only static structure is reflected — conditional routing is shown as all possible branches
+
+## Trace-highlighted Mermaid
+
+`get_mermaid_with_trace()` generates a Mermaid diagram with CSS classes showing execution status — useful for debugging failed graph runs.
+
+```python
+from synapsekit import StateGraph, ExecutionTrace, EventHooks
+
+trace = ExecutionTrace()
+hooks = trace.hook(EventHooks())
+result = await compiled.run(state, hooks=hooks)
+
+from synapsekit.graph.mermaid import get_mermaid_with_trace
+print(get_mermaid_with_trace(graph, trace))
+```
+
+Nodes are styled with:
+- **completed** (green) — node finished successfully
+- **errored** (red) — node encountered an error
+- **skipped** (gray) — node was not executed
+
+## GraphVisualizer
+
+`GraphVisualizer` provides a higher-level visualization API with multiple output formats.
+
+```python
+from synapsekit import GraphVisualizer
+
+viz = GraphVisualizer(compiled)
+
+# ASCII timeline with wave grouping
+print(viz.render_trace(trace))
+# Wave 1:
+# [ingest] 12.3ms
+# Wave 2:
+# [classify] 8.1ms
+# Total: 20.4ms
+
+# Step-by-step replay
+for step in viz.replay_steps(trace):
+ print(step["node"], step["duration_ms"], step["status"])
+
+# Standalone HTML with embedded Mermaid
+html = viz.to_html(trace)
+with open("graph.html", "w") as f:
+ f.write(html)
+```
+
+### Methods
+
+| Method | Description |
+|---|---|
+| `render_trace(trace)` | ASCII timeline with wave grouping and durations |
+| `render_mermaid(trace=None)` | Static or trace-highlighted Mermaid diagram |
+| `replay_steps(trace)` | List of step dicts with node, duration, wave, status |
+| `to_html(trace=None)` | Standalone HTML with embedded Mermaid JS |
diff --git a/docs/llms/vertex-ai.md b/docs/llms/vertex-ai.md
new file mode 100644
index 0000000..af4cda1
--- /dev/null
+++ b/docs/llms/vertex-ai.md
@@ -0,0 +1,88 @@
+---
+sidebar_position: 17
+---
+
+# Google Vertex AI
+
+## Install
+
+```bash
+pip install synapsekit[vertex]
+```
+
+## Via the RAG facade
+
+```python
+from synapsekit import RAG
+
+rag = RAG(model="gemini-1.5-pro", api_key="your-gcp-project-id")
+rag.add("Your document text here")
+
+answer = rag.ask_sync("Summarize the document.")
+```
+
+:::info
+Vertex AI uses Application Default Credentials (ADC). The `api_key` field is used as the GCP project ID. Make sure you have authenticated with `gcloud auth application-default login`.
+:::
+
+## Direct usage
+
+```python
+from synapsekit.llm.vertex_ai import VertexAILLM
+from synapsekit.llm.base import LLMConfig
+
+llm = VertexAILLM(LLMConfig(
+ model="gemini-1.5-pro",
+ api_key="your-gcp-project-id",
+ provider="vertex",
+ temperature=0.3,
+ max_tokens=1024,
+))
+
+async for token in llm.stream("Explain vector embeddings."):
+ print(token, end="", flush=True)
+```
+
+## Function calling
+
+VertexAILLM supports native function calling via `call_with_tools()`.
+
+```python
+from synapsekit import FunctionCallingAgent, CalculatorTool
+from synapsekit.llm.vertex_ai import VertexAILLM
+from synapsekit.llm.base import LLMConfig
+
+llm = VertexAILLM(LLMConfig(
+ model="gemini-1.5-pro",
+ api_key="your-gcp-project-id",
+ provider="vertex",
+))
+
+agent = FunctionCallingAgent(
+ llm=llm,
+ tools=[CalculatorTool()],
+)
+
+answer = await agent.run("What is 144 divided by 12?")
+```
+
+### How it works
+
+SynapseKit converts OpenAI-format tool schemas to Vertex AI function declarations via `Tool.from_dict()`. Response `function_call` parts are parsed back into the standard `{"id", "name", "arguments"}` format. Since Vertex AI doesn't provide tool call IDs, SynapseKit generates them via `uuid4`.
+
+## Supported models
+
+- `gemini-1.5-pro` — most capable
+- `gemini-1.5-flash` — faster, lower cost
+- `gemini-1.0-pro`
+- Any model available in your Vertex AI project
+
+See [Vertex AI docs](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models) for the full list.
+
+## Vertex AI vs Gemini
+
+| | VertexAILLM | GeminiLLM |
+|---|---|---|
+| Auth | ADC (service accounts, gcloud) | API key |
+| Package | `google-cloud-aiplatform` | `google-generativeai` |
+| Best for | Production / enterprise | Development / prototyping |
diff --git a/docs/memory/conversation.md b/docs/memory/conversation.md
index 8022e02..fd8513a 100644
--- a/docs/memory/conversation.md
+++ b/docs/memory/conversation.md
@@ -400,3 +400,58 @@ context = memory.format_context()
| `get_entities()` | `dict[str, str]` | Entity name → description mapping |
| `format_context()` | `str` | Entities section + messages |
| `clear()` | `None` | Clear messages and entities |
+
+---
+
+## RedisConversationMemory
+
+`RedisConversationMemory` provides persistent conversation memory backed by Redis. Messages survive process restarts and support multiple concurrent conversations.
+
+```bash
+pip install synapsekit[redis]
+```
+
+### Usage
+
+```python
+from synapsekit.memory.redis import RedisConversationMemory
+
+memory = RedisConversationMemory(
+ url="redis://localhost:6379",
+ conversation_id="user-42",
+ window=10,
+)
+
+memory.add("user", "What is SynapseKit?")
+memory.add("assistant", "An async-first RAG framework.")
+
+messages = memory.get_messages()
+context = memory.format_context()
+
+# List all conversations
+conversations = memory.list_conversations()
+
+# Clean up
+memory.clear()
+memory.close()
+```
+
+### Parameters
+
+| Parameter | Default | Description |
+|---|---|---|
+| `url` | `"redis://localhost:6379"` | Redis connection URL |
+| `conversation_id` | `"default"` | Conversation identifier for multi-conversation support |
+| `window` | `None` | Max message pairs to keep (None = unlimited) |
+| `prefix` | `"synapsekit:memory:"` | Redis key prefix for namespacing |
+
+### Methods
+
+| Method | Description |
+|---|---|
+| `add(role, content, metadata=None)` | Append a message |
+| `get_messages()` | Return all messages for this conversation |
+| `format_context()` | Flatten history to a plain string |
+| `clear()` | Delete all messages for this conversation |
+| `list_conversations()` | List all conversation IDs |
+| `close()` | Close the Redis connection |
diff --git a/docs/rag/retriever.md b/docs/rag/retriever.md
index bf12ef0..56bb727 100644
--- a/docs/rag/retriever.md
+++ b/docs/rag/retriever.md
@@ -609,6 +609,50 @@ results, classification = await adaptive.retrieve_with_classification("query")
print(classification) # "simple", "moderate", or "complex"
```
+## GraphRAG (Knowledge Graph Retrieval)
+
+The `GraphRAGRetriever` combines knowledge graph traversal with vector retrieval. It extracts entities from the query, traverses a knowledge graph to find related documents, and merges those with standard vector retrieval results.
+
+```python
+from synapsekit import GraphRAGRetriever, KnowledgeGraph
+
+# Build a knowledge graph
+kg = KnowledgeGraph()
+kg.add_triple("Python", "is_a", "programming language")
+kg.add_triple("Python", "used_for", "machine learning")
+kg.add_document_link("Python", "doc_1")
+kg.add_document_link("machine learning", "doc_2")
+
+# Or build from documents using an LLM
+await kg.build_from_documents(["Python is a programming language used for ML..."], llm)
+
+# Combine with vector retrieval
+graphrag = GraphRAGRetriever(
+ retriever=retriever,
+ llm=llm,
+ knowledge_graph=kg,
+ max_hops=2,
+)
+
+results = await graphrag.retrieve("What is Python used for?", top_k=5)
+```
+
+The process:
+1. The LLM extracts entities from the query
+2. The knowledge graph is traversed up to `max_hops` from each entity
+3. Related documents are gathered from the graph
+4. Standard vector retrieval runs in parallel
+5. Results are merged and deduplicated
+
+### Inspecting graph metadata
+
+```python
+results, meta = await graphrag.retrieve_with_graph("query", top_k=5)
+print(meta["entities_extracted"]) # Entities found in the query
+print(meta["graph_docs"]) # Documents from graph traversal
+print(meta["traversal_hops"]) # Max hops used
+```
+
## Multi-Step Retrieval
The `MultiStepRetriever` performs iterative retrieval-generation: retrieve documents, generate an answer, identify information gaps, retrieve for those gaps, and repeat until the answer is complete or `max_steps` is reached.
@@ -782,3 +826,12 @@ for step in trace:
| `retriever` | — | Base `Retriever` instance |
| `llm` | — | LLM for answer generation and gap identification |
| `max_steps` | `3` | Maximum retrieval-generation iterations |
+
+### GraphRAGRetriever
+
+| Parameter | Default | Description |
+|---|---|---|
+| `retriever` | — | Base `Retriever` instance |
+| `llm` | — | LLM for entity extraction |
+| `knowledge_graph` | `None` | `KnowledgeGraph` instance (falls back to vector-only if None) |
+| `max_hops` | `2` | Maximum graph traversal hops from extracted entities |
diff --git a/docs/rag/splitter.md b/docs/rag/splitter.md
index 59eddf2..e801ea8 100644
--- a/docs/rag/splitter.md
+++ b/docs/rag/splitter.md
@@ -119,6 +119,42 @@ chunks = splitter.split(document)
`SemanticSplitter` requires `sentence-transformers`. Install with `pip install synapsekit[semantic]`.
:::
+## MarkdownTextSplitter
+
+Splits markdown text respecting document structure. Headers define natural split points, and each chunk carries its parent header context for semantic completeness.
+
+```python
+from synapsekit import MarkdownTextSplitter
+
+splitter = MarkdownTextSplitter(
+ chunk_size=512,
+ chunk_overlap=50,
+)
+
+chunks = splitter.split("""# User Guide
+## Installation
+Run pip install synapsekit to get started.
+
+## Quick Start
+Import RAG and create a pipeline.
+
+### Configuration
+Set your API key in the config.
+""")
+# Each chunk includes parent headers:
+# "# User Guide\n## Installation\nRun pip install..."
+# "# User Guide\n## Quick Start\nImport RAG and..."
+# "# User Guide\n## Quick Start\n### Configuration\nSet your..."
+```
+
+| Parameter | Default | Description |
+|---|---|---|
+| `chunk_size` | `512` | Maximum characters per chunk |
+| `chunk_overlap` | `50` | Characters of overlap between consecutive chunks |
+| `headers_to_split_on` | `[("#", "Header1"), ("##", "Header2"), ("###", "Header3"), ("####", "Header4")]` | Header markers and labels to split on |
+
+Oversized sections without headers fall back to `RecursiveCharacterTextSplitter` with `---`, `\n\n`, `\n`, `. `, ` ` as separators.
+
## Using splitters with RAGPipeline
By default, `RAGPipeline` uses `RecursiveCharacterTextSplitter` with the `chunk_size` and `chunk_overlap` from `RAGConfig`. You can override this by passing any `BaseSplitter` to `RAGConfig.splitter`:
diff --git a/docs/roadmap.md b/docs/roadmap.md
index 5215338..27d3f4a 100644
--- a/docs/roadmap.md
+++ b/docs/roadmap.md
@@ -245,7 +245,18 @@ sidebar_position: 99
- **API Markers: @experimental** — mark experimental features
- **API Markers: @deprecated(reason, alternative)** — deprecation with migration guidance
-## Phase 12 — Platform 🔜
+## Phase 12 — Retrieval, Memory, Providers & Visualization ✅ Done (v1.1.0)
+
+- **Retrieval: GraphRAGRetriever** — knowledge-graph-augmented retrieval: extract entities via LLM, traverse KG, merge with vector results
+- **Retrieval: KnowledgeGraph** — in-memory triple store with BFS traversal, entity-document linking, LLM-powered extraction
+- **Memory: RedisConversationMemory** — Redis-backed conversation memory with windowing, multi-conversation, JSON serialization
+- **LLM: VertexAILLM** — Google Vertex AI provider with ADC auth, streaming, native function calling
+- **Text Splitters: MarkdownTextSplitter** — header-hierarchy-aware splitting with parent context preservation
+- **Graph: GraphVisualizer** — ASCII timeline, Mermaid trace highlighting, step replay, HTML export
+- **Graph: get_mermaid_with_trace()** — Mermaid diagrams with CSS status classes (completed/errored/skipped)
+- 16 providers, 20 retrieval strategies, 6 text splitters, 9 memory backends, 1047 tests passing
+
+## Phase 13 — Platform 🔜
- `synapsekit serve` — deploy any app as FastAPI in one command
- Prompt hub — versioned prompt registry
diff --git a/docusaurus.config.ts b/docusaurus.config.ts
index 6273d4a..67d9e57 100644
--- a/docusaurus.config.ts
+++ b/docusaurus.config.ts
@@ -81,8 +81,8 @@ const config: Config = {
respectPrefersColorScheme: true,
},
announcementBar: {
- id: 'v068',
- content: '🎉 SynapseKit v0.6.8 — VectorSearch, PubMed, GitHub API, Email, YouTube tools + ExecutionTrace + WebSocket streaming. See what\'s new →',
+ id: 'v110',
+ content: '🎉 SynapseKit v1.1.0 — GraphRAG, Redis Memory, Vertex AI, MarkdownSplitter, Graph Visualization. See what\'s new →',
backgroundColor: '#161b22',
textColor: '#8b949e',
isCloseable: true,
@@ -109,7 +109,7 @@ const config: Config = {
{
type: 'html',
position: 'left',
- value: 'v0.6.8',
+ value: 'v1.1.0',
},
{
href: 'https://github.com/SynapseKit/SynapseKit',
diff --git a/sidebars.ts b/sidebars.ts
index 9bc2c48..7d8fc04 100644
--- a/sidebars.ts
+++ b/sidebars.ts
@@ -41,6 +41,7 @@ const sidebars: SidebarsConfig = {
'llms/fireworks',
'llms/perplexity',
'llms/cerebras',
+ 'llms/vertex-ai',
'llms/caching-retries',
],
},