Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 5 additions & 20 deletions .husky/pre-commit
Original file line number Diff line number Diff line change
@@ -1,27 +1,12 @@
#!/usr/bin/env sh
branch="$(git rev-parse --abbrev-ref HEAD)"

if [ "$branch" = "main" ]; then
echo "You can't commit directly to main - please check out a branch."
exit 1
fi

# Detect if running on Windows and use pnpm.cmd, otherwise use pnpm.
if [ "$OS" = "Windows_NT" ]; then
pnpm_cmd="pnpm.cmd"
else
if command -v pnpm >/dev/null 2>&1; then
pnpm_cmd="pnpm"
else
pnpm_cmd="npx pnpm"
fi
fi

# Detect if running on Windows and use npx.cmd, otherwise use npx.
if [ "$OS" = "Windows_NT" ]; then
npx_cmd="npx.cmd"
else
npx_cmd="npx"
fi

$npx_cmd lint-staged
$pnpm_cmd lint
# Use npx to run lint-staged and pnpm for lint
# Git Bash on Windows automatically resolves pnpm to pnpm.cmd
npx lint-staged
pnpm lint
19 changes: 5 additions & 14 deletions .husky/pre-push
Original file line number Diff line number Diff line change
@@ -1,33 +1,24 @@
#!/usr/bin/env sh
branch="$(git rev-parse --abbrev-ref HEAD)"

if [ "$branch" = "main" ]; then
echo "You can't push directly to main - please check out a branch."
exit 1
fi

# Detect if running on Windows and use pnpm.cmd, otherwise use pnpm.
if [ "$OS" = "Windows_NT" ]; then
pnpm_cmd="pnpm.cmd"
else
if command -v pnpm >/dev/null 2>&1; then
pnpm_cmd="pnpm"
else
pnpm_cmd="npx pnpm"
fi
fi

$pnpm_cmd run check-types
# Git Bash on Windows automatically resolves pnpm to pnpm.cmd
pnpm run check-types

# Use dotenvx to securely load .env.local and run commands that depend on it
if [ -f ".env.local" ]; then
# Check if RUN_TESTS_ON_PUSH is set to true and run tests with dotenvx
if npx dotenvx get RUN_TESTS_ON_PUSH -f .env.local 2>/dev/null | grep -q "^true$"; then
npx dotenvx run -f .env.local -- $pnpm_cmd run test
npx dotenvx run -f .env.local -- pnpm run test
fi
else
# Fallback: run tests if RUN_TESTS_ON_PUSH is set in regular environment
if [ "$RUN_TESTS_ON_PUSH" = "true" ]; then
$pnpm_cmd run test
pnpm run test
fi
fi

Expand Down
9 changes: 9 additions & 0 deletions apps/cli/turbo.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
{
"$schema": "https://turbo.build/schema.json",
"extends": ["//"],
"tasks": {
"build": {
"dependsOn": ["@roo-code/types#build", "@roo-code/core#build", "@roo-code/vscode-shim#build"]
}
}
}
2 changes: 1 addition & 1 deletion apps/web-evals/next-env.d.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
import "./.next/dev/types/routes.d.ts"
import "./.next/types/routes.d.ts"

// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.
6 changes: 5 additions & 1 deletion apps/web-evals/next.config.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
import path from "path"
import type { NextConfig } from "next"

const nextConfig: NextConfig = {
turbopack: {},
turbopack: {
root: path.join(__dirname, "../.."),
},
transpilePackages: ["@roo-code/types"],
}

export default nextConfig
1 change: 1 addition & 0 deletions apps/web-evals/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
"@roo-code/config-typescript": "workspace:^",
"@tailwindcss/postcss": "^4",
"@types/archiver": "^7.0.0",
"@types/node": "^25.5.2",
"@types/ps-tree": "^1.1.6",
"@types/react": "^18.3.23",
"@types/react-dom": "^18.3.5",
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "roo-code",
"packageManager": "pnpm@10.8.1",
"packageManager": "pnpm@10.33.0",
"engines": {
"node": "20.19.2"
},
Expand Down
2 changes: 1 addition & 1 deletion packages/core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
},
"dependencies": {
"@roo-code/types": "workspace:^",
"esbuild": "^0.25.0",
"esbuild-wasm": "^0.25.12",
"execa": "^9.5.2",
"ignore": "^7.0.3",
"openai": "^5.12.2",
Expand Down
4 changes: 2 additions & 2 deletions packages/types/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
"main": "./dist/index.cjs",
"exports": {
".": {
"types": "./src/index.ts",
"import": "./src/index.ts",
"types": "./dist/index.d.ts",
"import": "./dist/index.js",
"require": {
"types": "./dist/index.d.cts",
"default": "./dist/index.cjs"
Expand Down
32 changes: 32 additions & 0 deletions packages/types/src/mcp.ts
Original file line number Diff line number Diff line change
Expand Up @@ -99,22 +99,35 @@ export type McpResourceResponse = {
}>
}

// Annotations for content items in tool call responses
export type McpContentAnnotations = {
audience?: Array<"user" | "assistant">
priority?: number
lastModified?: string
}

export type McpToolCallResponse = {
_meta?: Record<string, any> // eslint-disable-line @typescript-eslint/no-explicit-any
content: Array<
| {
type: "text"
text: string
annotations?: McpContentAnnotations
_meta?: Record<string, any> // eslint-disable-line @typescript-eslint/no-explicit-any
}
| {
type: "image"
data: string
mimeType: string
annotations?: McpContentAnnotations
_meta?: Record<string, any> // eslint-disable-line @typescript-eslint/no-explicit-any
}
| {
type: "audio"
data: string
mimeType: string
annotations?: McpContentAnnotations
_meta?: Record<string, any> // eslint-disable-line @typescript-eslint/no-explicit-any
}
| {
type: "resource"
Expand All @@ -124,6 +137,25 @@ export type McpToolCallResponse = {
text?: string
blob?: string
}
annotations?: McpContentAnnotations
_meta?: Record<string, any> // eslint-disable-line @typescript-eslint/no-explicit-any
}
| {
type: "resource_link"
uri: string
name: string
description?: string
mimeType?: string
size?: number
annotations?: McpContentAnnotations
_meta?: Record<string, any> // eslint-disable-line @typescript-eslint/no-explicit-any
icons?: Array<{
src: string
mimeType?: string
sizes?: string[]
theme?: "light" | "dark"
}>
title?: string
}
>
isError?: boolean
Expand Down
15 changes: 15 additions & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import {
openAiNativeModels,
qwenCodeModels,
sambaNovaModels,
nvidiaModels,
vertexModels,
vscodeLlmModels,
xaiModels,
Expand Down Expand Up @@ -117,6 +118,7 @@ export const providerNames = [
"qwen-code",
"roo",
"sambanova",
"nvidia",
"vertex",
"xai",
"zai",
Expand Down Expand Up @@ -350,6 +352,11 @@ const sambaNovaSchema = apiModelIdProviderModelSchema.extend({
sambaNovaApiKey: z.string().optional(),
})

const nvidiaSchema = apiModelIdProviderModelSchema.extend({
nvidiaApiKey: z.string().optional(),
nvidiaBaseUrl: z.string().optional(),
})

export const zaiApiLineSchema = z.enum(["international_coding", "china_coding", "international_api", "china_api"])

export type ZaiApiLine = z.infer<typeof zaiApiLineSchema>
Expand Down Expand Up @@ -409,6 +416,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
basetenSchema.merge(z.object({ apiProvider: z.literal("baseten") })),
litellmSchema.merge(z.object({ apiProvider: z.literal("litellm") })),
sambaNovaSchema.merge(z.object({ apiProvider: z.literal("sambanova") })),
nvidiaSchema.merge(z.object({ apiProvider: z.literal("nvidia") })),
zaiSchema.merge(z.object({ apiProvider: z.literal("zai") })),
fireworksSchema.merge(z.object({ apiProvider: z.literal("fireworks") })),
qwenCodeSchema.merge(z.object({ apiProvider: z.literal("qwen-code") })),
Expand Down Expand Up @@ -442,6 +450,7 @@ export const providerSettingsSchema = z.object({
...basetenSchema.shape,
...litellmSchema.shape,
...sambaNovaSchema.shape,
...nvidiaSchema.shape,
...zaiSchema.shape,
...fireworksSchema.shape,
...qwenCodeSchema.shape,
Expand Down Expand Up @@ -517,6 +526,7 @@ export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
baseten: "apiModelId",
litellm: "litellmModelId",
sambanova: "apiModelId",
nvidia: "apiModelId",
zai: "apiModelId",
fireworks: "apiModelId",
roo: "apiModelId",
Expand Down Expand Up @@ -617,6 +627,11 @@ export const MODELS_BY_PROVIDER: Record<
label: "SambaNova",
models: Object.keys(sambaNovaModels),
},
nvidia: {
id: "nvidia",
label: "NVIDIA NIM",
models: Object.keys(nvidiaModels),
},
vertex: {
id: "vertex",
label: "GCP Vertex AI",
Expand Down
4 changes: 4 additions & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ export * from "./qwen-code.js"
export * from "./requesty.js"
export * from "./roo.js"
export * from "./sambanova.js"
export * from "./nvidia.js"
export * from "./unbound.js"
export * from "./vertex.js"
export * from "./vscode-llm.js"
Expand All @@ -40,6 +41,7 @@ import { qwenCodeDefaultModelId } from "./qwen-code.js"
import { requestyDefaultModelId } from "./requesty.js"
import { rooDefaultModelId } from "./roo.js"
import { sambaNovaDefaultModelId } from "./sambanova.js"
import { nvidiaDefaultModelId } from "./nvidia.js"
import { unboundDefaultModelId } from "./unbound.js"
import { vertexDefaultModelId } from "./vertex.js"
import { vscodeLlmDefaultModelId } from "./vscode-llm.js"
Expand Down Expand Up @@ -101,6 +103,8 @@ export function getProviderDefaultModelId(
return vscodeLlmDefaultModelId
case "sambanova":
return sambaNovaDefaultModelId
case "nvidia":
return nvidiaDefaultModelId
case "fireworks":
return fireworksDefaultModelId
case "roo":
Expand Down
93 changes: 93 additions & 0 deletions packages/types/src/providers/nvidia.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
import type { ModelInfo } from "../model.js"

// NVIDIA NIM API model IDs
// Reference: https://build.nvidia.com/explore/discover/models
export type NvidiaModelId =
| "deepseek-ai/deepseek-r1"
| "google/gemma-4-31b-it"
| "meta/llama-3.1-405b-instruct"
| "meta/llama-3.3-70b-instruct"
| "z-ai/glm5"
| "qwen/qwen3.5-397b-a17b"
| "qwen/qwen3.5-122b-a10b"

export const nvidiaDefaultModelId: NvidiaModelId = "deepseek-ai/deepseek-r1"

export const nvidiaModels = {
// DeepSeek R1 - reasoning model with enable_thinking support
"deepseek-ai/deepseek-r1": {
maxTokens: 16384,
contextWindow: 128000,
supportsImages: false,
supportsPromptCache: false,
// Critical: This flag enables the reasoning toggle in UI
supportsReasoningBinary: true,
inputPrice: 0.0, // NVIDIA NIM pricing varies by deployment
outputPrice: 0.0,
description:
"DeepSeek R1 reasoning model via NVIDIA NIM API. Supports chain-of-thought reasoning with enable_thinking parameter.",
},
// Google Gemma 4 - reasoning model with enable_thinking support
"google/gemma-4-31b-it": {
maxTokens: 16384,
contextWindow: 128000,
supportsImages: false,
supportsPromptCache: false,
supportsReasoningBinary: true,
inputPrice: 0.0,
outputPrice: 0.0,
description:
"Google Gemma 4 31B reasoning model via NVIDIA NIM API. Supports chain-of-thought reasoning with enable_thinking parameter.",
},
// Llama models - no reasoning support, included for completeness
"meta/llama-3.1-405b-instruct": {
maxTokens: 8192,
contextWindow: 128000,
supportsImages: true,
supportsPromptCache: false,
inputPrice: 0.0,
outputPrice: 0.0,
description: "Meta Llama 3.1 405B Instruct model via NVIDIA NIM API.",
},
"meta/llama-3.3-70b-instruct": {
maxTokens: 8192,
contextWindow: 128000,
supportsImages: true,
supportsPromptCache: false,
inputPrice: 0.0,
outputPrice: 0.0,
description: "Meta Llama 3.3 70B Instruct model via NVIDIA NIM API.",
},
// GLM5 - Z.AI reasoning model with enable_thinking support
"z-ai/glm5": {
maxTokens: 32768,
contextWindow: 204800,
supportsImages: false,
supportsPromptCache: false,
supportsReasoningBinary: true,
inputPrice: 1.0,
outputPrice: 3.2,
description: "Z.AI GLM5 reasoning model via NVIDIA NIM API. Supports chain-of-thought reasoning.",
},
// Qwen 3.5 models - reasoning models with enable_thinking support
"qwen/qwen3.5-397b-a17b": {
maxTokens: 32768,
contextWindow: 262144,
supportsImages: false,
supportsPromptCache: false,
supportsReasoningBinary: true,
inputPrice: 0.0,
outputPrice: 0.0,
description: "Qwen 3.5 397B reasoning model via NVIDIA NIM API. Supports chain-of-thought reasoning.",
},
"qwen/qwen3.5-122b-a10b": {
maxTokens: 32768,
contextWindow: 262144,
supportsImages: false,
supportsPromptCache: false,
supportsReasoningBinary: true,
inputPrice: 0.0,
outputPrice: 0.0,
description: "Qwen 3.5 122B reasoning model via NVIDIA NIM API. Supports chain-of-thought reasoning.",
},
} as const satisfies Record<string, ModelInfo>
Loading
Loading