diff --git a/src/api/providers/lm-studio.ts b/src/api/providers/lm-studio.ts index a771394c53..519e3dc3cf 100644 --- a/src/api/providers/lm-studio.ts +++ b/src/api/providers/lm-studio.ts @@ -10,6 +10,7 @@ import { NativeToolCallParser } from "../../core/assistant-message/NativeToolCal import { TagMatcher } from "../../utils/tag-matcher" import { convertToOpenAiMessages } from "../transform/openai-format" +import { convertToZAiFormat } from "../transform/zai-format" import { ApiStream } from "../transform/stream" import { BaseProvider } from "./base-provider" @@ -17,11 +18,13 @@ import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from ". import { getModelsFromCache } from "./fetchers/modelCache" import { getApiRequestTimeout } from "./utils/timeout-config" import { handleOpenAIError } from "./utils/openai-error-handler" +import { detectGlmModel, logGlmDetection, type GlmModelConfig } from "./utils/glm-model-detection" export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler { protected options: ApiHandlerOptions private client: OpenAI private readonly providerName = "LM Studio" + private glmConfig: GlmModelConfig constructor(options: ApiHandlerOptions) { super() @@ -35,6 +38,12 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan apiKey: apiKey, timeout: getApiRequestTimeout(), }) + + // Detect if this is a GLM model and apply optimizations + this.glmConfig = detectGlmModel(this.options.lmStudioModelId) + if (this.options.lmStudioModelId) { + logGlmDetection(this.providerName, this.options.lmStudioModelId, this.glmConfig) + } } override async *createMessage( @@ -42,10 +51,19 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan messages: Anthropic.Messages.MessageParam[], metadata?: ApiHandlerCreateMessageMetadata, ): ApiStream { - const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ - { role: "system", content: systemPrompt }, - ...convertToOpenAiMessages(messages), - ] + // For GLM models, use Z.ai format with mergeToolResultText to prevent conversation flow disruption + // For other models, use standard OpenAI format + let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] + if (this.glmConfig.isGlm && this.glmConfig.mergeToolResultText) { + // Use Z.ai format converter which merges text after tool results into tool messages + const convertedMessages = convertToZAiFormat(messages, { mergeToolResultText: true }) + openAiMessages = [{ role: "system", content: systemPrompt }, ...convertedMessages] + } else { + openAiMessages = [ + { role: "system", content: systemPrompt }, + ...convertToOpenAiMessages(messages), + ] + } // ------------------------- // Track token usage @@ -83,14 +101,27 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan let assistantText = "" try { + // Determine temperature: use GLM default (0.6) for GLM models, otherwise LM Studio default (0) + const temperature = this.options.modelTemperature ?? + (this.glmConfig.isGlm ? this.glmConfig.temperature : LMSTUDIO_DEFAULT_TEMPERATURE) + + // For GLM models, disable parallel_tool_calls as GLM models may not support it + const parallelToolCalls = this.glmConfig.isGlm && this.glmConfig.disableParallelToolCalls + ? false + : (metadata?.parallelToolCalls ?? true) + + if (this.glmConfig.isGlm && this.glmConfig.disableParallelToolCalls) { + console.log(`[${this.providerName}] parallel_tool_calls disabled for GLM model`) + } + const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming & { draft_model?: string } = { model: this.getModel().id, messages: openAiMessages, - temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE, + temperature, stream: true, tools: this.convertToolsForOpenAI(metadata?.tools), tool_choice: metadata?.tool_choice, - parallel_tool_calls: metadata?.parallelToolCalls ?? true, + parallel_tool_calls: parallelToolCalls, } if (this.options.lmStudioSpeculativeDecodingEnabled && this.options.lmStudioDraftModelId) { @@ -187,11 +218,15 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan async completePrompt(prompt: string): Promise { try { + // Determine temperature: use GLM default (0.6) for GLM models, otherwise LM Studio default (0) + const temperature = this.options.modelTemperature ?? + (this.glmConfig.isGlm ? this.glmConfig.temperature : LMSTUDIO_DEFAULT_TEMPERATURE) + // Create params object with optional draft model const params: any = { model: this.getModel().id, messages: [{ role: "user", content: prompt }], - temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE, + temperature, stream: false, } diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 87589b9396..a87bfb8fc0 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -16,6 +16,7 @@ import { TagMatcher } from "../../utils/tag-matcher" import { convertToOpenAiMessages } from "../transform/openai-format" import { convertToR1Format } from "../transform/r1-format" +import { convertToZAiFormat } from "../transform/zai-format" import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" import { getModelParams } from "../transform/model-params" @@ -24,6 +25,7 @@ import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" import { getApiRequestTimeout } from "./utils/timeout-config" import { handleOpenAIError } from "./utils/openai-error-handler" +import { detectGlmModel, logGlmDetection, type GlmModelConfig } from "./utils/glm-model-detection" // TODO: Rename this to OpenAICompatibleHandler. Also, I think the // `OpenAINativeHandler` can subclass from this, since it's obviously @@ -31,7 +33,8 @@ import { handleOpenAIError } from "./utils/openai-error-handler" export class OpenAiHandler extends BaseProvider implements SingleCompletionHandler { protected options: ApiHandlerOptions protected client: OpenAI - private readonly providerName = "OpenAI" + private readonly providerName = "OpenAI Compatible" + private glmConfig: GlmModelConfig constructor(options: ApiHandlerOptions) { super() @@ -77,6 +80,12 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl timeout, }) } + + // Detect if this is a GLM model and apply optimizations + this.glmConfig = detectGlmModel(this.options.openAiModelId) + if (this.options.openAiModelId) { + logGlmDetection(this.providerName, this.options.openAiModelId, this.glmConfig) + } } override async *createMessage( @@ -106,6 +115,10 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl if (deepseekReasoner) { convertedMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) + } else if (this.glmConfig.isGlm && this.glmConfig.mergeToolResultText) { + // For GLM models, use Z.ai format with mergeToolResultText to prevent conversation flow disruption + const zaiMessages = convertToZAiFormat(messages, { mergeToolResultText: true }) + convertedMessages = [systemMessage, ...zaiMessages] } else { if (modelInfo.supportsPromptCache) { systemMessage = { @@ -152,16 +165,37 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl const isGrokXAI = this._isGrokXAI(this.options.openAiBaseUrl) + // Determine temperature: use GLM default (0.6) for GLM models, DeepSeek default for DeepSeek, otherwise 0 + let temperature: number | undefined + if (this.options.modelTemperature !== undefined) { + temperature = this.options.modelTemperature + } else if (this.glmConfig.isGlm) { + temperature = this.glmConfig.temperature + } else if (deepseekReasoner) { + temperature = DEEP_SEEK_DEFAULT_TEMPERATURE + } else { + temperature = 0 + } + + // For GLM models, disable parallel_tool_calls as GLM models may not support it + const parallelToolCalls = this.glmConfig.isGlm && this.glmConfig.disableParallelToolCalls + ? false + : (metadata?.parallelToolCalls ?? true) + + if (this.glmConfig.isGlm && this.glmConfig.disableParallelToolCalls) { + console.log(`[${this.providerName}] parallel_tool_calls disabled for GLM model`) + } + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { model: modelId, - temperature: this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0), + temperature, messages: convertedMessages, stream: true as const, ...(isGrokXAI ? {} : { stream_options: { include_usage: true } }), ...(reasoning && reasoning), tools: this.convertToolsForOpenAI(metadata?.tools), tool_choice: metadata?.tool_choice, - parallel_tool_calls: metadata?.parallelToolCalls ?? true, + parallel_tool_calls: parallelToolCalls, } // Add max_tokens if needed @@ -221,15 +255,30 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl yield this.processUsageMetrics(lastUsage, modelInfo) } } else { + // Non-streaming: also apply GLM-specific settings + let nonStreamingMessages + if (deepseekReasoner) { + nonStreamingMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) + } else if (this.glmConfig.isGlm && this.glmConfig.mergeToolResultText) { + // For GLM models, use Z.ai format with mergeToolResultText + const zaiMessages = convertToZAiFormat(messages, { mergeToolResultText: true }) + nonStreamingMessages = [systemMessage, ...zaiMessages] + } else { + nonStreamingMessages = [systemMessage, ...convertToOpenAiMessages(messages)] + } + + // For GLM models, disable parallel_tool_calls + const nonStreamingParallelToolCalls = this.glmConfig.isGlm && this.glmConfig.disableParallelToolCalls + ? false + : (metadata?.parallelToolCalls ?? true) + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { model: modelId, - messages: deepseekReasoner - ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) - : [systemMessage, ...convertToOpenAiMessages(messages)], + messages: nonStreamingMessages, // Tools are always present (minimum ALWAYS_AVAILABLE_TOOLS) tools: this.convertToolsForOpenAI(metadata?.tools), tool_choice: metadata?.tool_choice, - parallel_tool_calls: metadata?.parallelToolCalls ?? true, + parallel_tool_calls: nonStreamingParallelToolCalls, } // Add max_tokens if needed diff --git a/src/api/providers/utils/__tests__/glm-model-detection.spec.ts b/src/api/providers/utils/__tests__/glm-model-detection.spec.ts new file mode 100644 index 0000000000..833057e004 --- /dev/null +++ b/src/api/providers/utils/__tests__/glm-model-detection.spec.ts @@ -0,0 +1,231 @@ +import { describe, it, expect } from "vitest" +import { detectGlmModel, type GlmModelConfig, type GlmVersion, type GlmVariant } from "../glm-model-detection" +import { ZAI_DEFAULT_TEMPERATURE } from "@roo-code/types" + +describe("GLM Model Detection", () => { + describe("detectGlmModel", () => { + describe("when model ID is undefined or empty", () => { + it("should return non-GLM config for undefined", () => { + const result = detectGlmModel(undefined) + expect(result.isGlm).toBe(false) + }) + + it("should return non-GLM config for empty string", () => { + const result = detectGlmModel("") + expect(result.isGlm).toBe(false) + }) + }) + + describe("when model ID is not a GLM model", () => { + it("should return non-GLM config for non-GLM models", () => { + const nonGlmModels = [ + "gpt-4", + "gpt-4-turbo", + "claude-3-opus", + "llama-3.1-70b", + "deepseek-coder", + "qwen-2.5", + ] + + for (const modelId of nonGlmModels) { + const result = detectGlmModel(modelId) + expect(result.isGlm).toBe(false) + expect(result.temperature).toBe(0) + expect(result.mergeToolResultText).toBe(false) + expect(result.disableParallelToolCalls).toBe(false) + } + }) + }) + + describe("GLM-4.5 models", () => { + it("should detect glm-4.5 base model", () => { + const result = detectGlmModel("glm-4.5") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.5") + expect(result.variant).toBe("base") + expect(result.displayName).toBe("GLM-4.5") + expect(result.supportsVision).toBe(false) + expect(result.supportsThinking).toBe(false) + expect(result.temperature).toBe(ZAI_DEFAULT_TEMPERATURE) + expect(result.mergeToolResultText).toBe(true) + expect(result.disableParallelToolCalls).toBe(true) + }) + + it("should detect glm-4.5-air variant", () => { + const result = detectGlmModel("glm-4.5-air") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.5") + expect(result.variant).toBe("air") + expect(result.displayName).toBe("GLM-4.5-Air") + }) + + it("should detect glm-4.5-airx variant", () => { + const result = detectGlmModel("glm-4.5-airx") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.5") + expect(result.variant).toBe("airx") + expect(result.displayName).toBe("GLM-4.5-AirX") + }) + + it("should detect glm-4.5-x variant", () => { + const result = detectGlmModel("glm-4.5-x") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.5") + expect(result.variant).toBe("x") + expect(result.displayName).toBe("GLM-4.5-X") + }) + + it("should detect glm-4.5-flash variant", () => { + const result = detectGlmModel("glm-4.5-flash") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.5") + expect(result.variant).toBe("flash") + expect(result.displayName).toBe("GLM-4.5-Flash") + }) + + it("should detect glm-4.5v vision variant", () => { + const result = detectGlmModel("glm-4.5v") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.5") + expect(result.variant).toBe("v") + expect(result.displayName).toBe("GLM-4.5V") + expect(result.supportsVision).toBe(true) + }) + }) + + describe("GLM-4.6 models", () => { + it("should detect glm-4.6 base model", () => { + const result = detectGlmModel("glm-4.6") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.6") + expect(result.variant).toBe("base") + expect(result.displayName).toBe("GLM-4.6") + expect(result.supportsThinking).toBe(true) + }) + + it("should detect glm-4.6v vision variant", () => { + const result = detectGlmModel("glm-4.6v") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.6") + expect(result.variant).toBe("v") + expect(result.displayName).toBe("GLM-4.6V") + expect(result.supportsVision).toBe(true) + expect(result.supportsThinking).toBe(true) + }) + + it("should detect glm-4.6v-flash variant", () => { + const result = detectGlmModel("glm-4.6v-flash") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.6") + expect(result.variant).toBe("v-flash") + expect(result.displayName).toBe("GLM-4.6V-Flash") + expect(result.supportsVision).toBe(true) + }) + + it("should detect glm-4.6v-flashx variant", () => { + const result = detectGlmModel("glm-4.6v-flashx") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.6") + expect(result.variant).toBe("v-flashx") + expect(result.displayName).toBe("GLM-4.6V-FlashX") + expect(result.supportsVision).toBe(true) + }) + }) + + describe("GLM-4.7 models", () => { + it("should detect glm-4.7 base model", () => { + const result = detectGlmModel("glm-4.7") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.7") + expect(result.variant).toBe("base") + expect(result.displayName).toBe("GLM-4.7") + expect(result.supportsThinking).toBe(true) + }) + + it("should detect glm-4.7-flash variant", () => { + const result = detectGlmModel("glm-4.7-flash") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.7") + expect(result.variant).toBe("flash") + expect(result.displayName).toBe("GLM-4.7-Flash") + }) + + it("should detect glm-4.7-flashx variant", () => { + const result = detectGlmModel("glm-4.7-flashx") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.7") + expect(result.variant).toBe("flashx") + expect(result.displayName).toBe("GLM-4.7-FlashX") + }) + }) + + describe("LM Studio / GGUF format detection", () => { + it("should detect GLM from GGUF filename", () => { + const result = detectGlmModel("GLM-4.5-UD-Q8_K_XL-00001-of-00008.gguf") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.5") + expect(result.variant).toBe("base") + }) + + it("should detect GLM from mlx-community path", () => { + const result = detectGlmModel("mlx-community/GLM-4.5-4bit") + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.5") + }) + + it("should detect GLM with different separators", () => { + const modelIds = [ + "glm-4.5", + "glm_4.5", + "GLM-4.5", + "GLM_4.5", + "glm4.5", + "GLM4.5", + ] + + for (const modelId of modelIds) { + const result = detectGlmModel(modelId) + expect(result.isGlm).toBe(true) + expect(result.version).toBe("4.5") + } + }) + }) + + describe("case insensitivity", () => { + it("should detect GLM regardless of case", () => { + const modelIds = [ + "GLM-4.5", + "glm-4.5", + "Glm-4.5", + "gLm-4.5", + ] + + for (const modelId of modelIds) { + const result = detectGlmModel(modelId) + expect(result.isGlm).toBe(true) + } + }) + }) + + describe("GLM model settings", () => { + it("should always apply GLM-specific settings for detected models", () => { + const glmModels = [ + "glm-4.5", + "glm-4.6", + "glm-4.7", + "glm-4.5-air", + "glm-4.6v", + "glm-4.7-flash", + ] + + for (const modelId of glmModels) { + const result = detectGlmModel(modelId) + expect(result.isGlm).toBe(true) + expect(result.temperature).toBe(ZAI_DEFAULT_TEMPERATURE) + expect(result.mergeToolResultText).toBe(true) + expect(result.disableParallelToolCalls).toBe(true) + } + }) + }) + }) +}) diff --git a/src/api/providers/utils/glm-model-detection.ts b/src/api/providers/utils/glm-model-detection.ts new file mode 100644 index 0000000000..eb95f22e4e --- /dev/null +++ b/src/api/providers/utils/glm-model-detection.ts @@ -0,0 +1,234 @@ +/** + * GLM Model Detection Utility + * + * Detects GLM models from Zhipu AI when used via LM Studio or OpenAI-compatible endpoints. + * This allows applying the same optimizations that the Z.ai provider uses: + * - Temperature: 0.6 (ZAI_DEFAULT_TEMPERATURE) + * - mergeToolResultText: true (prevents conversation flow disruption) + * - parallel_tool_calls: false (GLM models may not support this) + * - Thinking mode support for GLM-4.7+ models + */ + +import { ZAI_DEFAULT_TEMPERATURE } from "@roo-code/types" + +/** + * GLM model variant types + */ +export type GlmVariant = "base" | "air" | "airx" | "x" | "flash" | "flashx" | "v" | "v-flash" | "v-flashx" + +/** + * GLM version types + */ +export type GlmVersion = "4.5" | "4.6" | "4.7" | "unknown" + +/** + * Result of GLM model detection + */ +export interface GlmModelConfig { + /** Whether the model was detected as a GLM model */ + isGlm: boolean + /** The detected version (4.5, 4.6, 4.7) */ + version: GlmVersion + /** The detected variant (base, air, flash, v, etc.) */ + variant: GlmVariant + /** Human-readable display name */ + displayName: string + /** Whether the model supports vision (images) */ + supportsVision: boolean + /** Whether the model supports thinking mode */ + supportsThinking: boolean + /** The temperature to use (ZAI_DEFAULT_TEMPERATURE for GLM models) */ + temperature: number + /** Whether to merge tool result text */ + mergeToolResultText: boolean + /** Whether to disable parallel tool calls */ + disableParallelToolCalls: boolean +} + +/** + * Default config for non-GLM models + */ +const NON_GLM_CONFIG: GlmModelConfig = { + isGlm: false, + version: "unknown", + variant: "base", + displayName: "", + supportsVision: false, + supportsThinking: false, + temperature: 0, + mergeToolResultText: false, + disableParallelToolCalls: false, +} + +/** + * Detects if a model ID represents a GLM model and returns its configuration. + * + * Supports various model ID formats: + * - Official Z.ai format: "glm-4.5", "glm-4.7-flash" + * - LM Studio/HuggingFace format: "mlx-community/GLM-4.5-4bit" + * - GGUF file names: "GLM-4.5-UD-Q8_K_XL-00001-of-00008.gguf" + * - Case insensitive matching + * + * @param modelId - The model ID to check + * @returns GlmModelConfig with detection results and settings + */ +export function detectGlmModel(modelId: string | undefined): GlmModelConfig { + if (!modelId) { + return NON_GLM_CONFIG + } + + // Normalize to lowercase for matching + const normalized = modelId.toLowerCase() + + // Check if this is a GLM model + // Match patterns like: glm-4.5, glm-4.6, glm-4.7, glm4.5, glm45, etc. + const glmPattern = /glm[-_]?4[._]?([567])/i + const match = normalized.match(glmPattern) + + if (!match) { + return NON_GLM_CONFIG + } + + // Extract version + const versionDigit = match[1] + let version: GlmVersion + switch (versionDigit) { + case "5": + version = "4.5" + break + case "6": + version = "4.6" + break + case "7": + version = "4.7" + break + default: + version = "unknown" + } + + // Extract variant + const variant = detectVariant(normalized, version) + + // Determine capabilities based on version and variant + const supportsVision = variant === "v" || variant === "v-flash" || variant === "v-flashx" + const supportsThinking = version === "4.6" || version === "4.7" + + // Build display name + const displayName = buildDisplayName(version, variant) + + return { + isGlm: true, + version, + variant, + displayName, + supportsVision, + supportsThinking, + temperature: ZAI_DEFAULT_TEMPERATURE, + mergeToolResultText: true, + disableParallelToolCalls: true, + } +} + +/** + * Detects the variant from the model ID + */ +function detectVariant(normalizedId: string, version: GlmVersion): GlmVariant { + // Vision variants with flash + if (/glm[-_]?4[._]?\d+v[-_]?flashx/i.test(normalizedId)) { + return "v-flashx" + } + if (/glm[-_]?4[._]?\d+v[-_]?flash/i.test(normalizedId)) { + return "v-flash" + } + + // Vision variant (e.g., glm-4.6v, glm-4.5v) + if (/glm[-_]?4[._]?\d+v(?![a-z])/i.test(normalizedId)) { + return "v" + } + + // FlashX variant (check before flash) + if (normalizedId.includes("flashx")) { + return "flashx" + } + + // Flash variant + if (normalizedId.includes("flash")) { + return "flash" + } + + // AirX variant (check before air) + if (normalizedId.includes("airx")) { + return "airx" + } + + // Air variant + if (normalizedId.includes("air")) { + return "air" + } + + // X variant (high-performance, check after airx) + // Match -x or _x but not other words containing x + if (/[-_]x(?:[-_]|$)/i.test(normalizedId) && !normalizedId.includes("flashx") && !normalizedId.includes("airx")) { + return "x" + } + + return "base" +} + +/** + * Builds a human-readable display name + */ +function buildDisplayName(version: GlmVersion, variant: GlmVariant): string { + let name = `GLM-${version}` + + switch (variant) { + case "air": + name += "-Air" + break + case "airx": + name += "-AirX" + break + case "x": + name += "-X" + break + case "flash": + name += "-Flash" + break + case "flashx": + name += "-FlashX" + break + case "v": + name = `GLM-${version}V` + break + case "v-flash": + name = `GLM-${version}V-Flash` + break + case "v-flashx": + name = `GLM-${version}V-FlashX` + break + // base variant gets no suffix + } + + return name +} + +/** + * Logs GLM detection results to the console for debugging + */ +export function logGlmDetection(providerName: string, modelId: string, config: GlmModelConfig): void { + console.log(`[${providerName}] Using model ID: "${modelId}"`) + + if (config.isGlm) { + console.log(`[GLM Detection] ✓ GLM model detected: "${modelId}"`) + console.log(`[GLM Detection] - Version: ${config.version}`) + console.log(`[GLM Detection] - Variant: ${config.variant}`) + console.log(`[GLM Detection] - Display name: ${config.displayName}`) + console.log(`[GLM Detection] - Supports vision: ${config.supportsVision}`) + console.log(`[GLM Detection] - Supports thinking: ${config.supportsThinking}`) + console.log(`[GLM Detection] - Temperature: ${config.temperature}`) + console.log(`[GLM Detection] - mergeToolResultText: ${config.mergeToolResultText}`) + console.log(`[GLM Detection] - disableParallelToolCalls: ${config.disableParallelToolCalls}`) + } else { + console.log(`[GLM Detection] ✗ Not a GLM model: "${modelId}"`) + } +}