From b0545abd4f07a4492e30be03e81662f7ce562a45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 9 Jan 2026 22:01:29 +0100 Subject: [PATCH 01/32] Select backend devices via arg --- examples/common/common.hpp | 63 ++++++---- include/stable-diffusion.h | 12 +- src/stable-diffusion.cpp | 243 ++++++++++++++++++++----------------- 3 files changed, 186 insertions(+), 132 deletions(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 9389b03a3..c3df7467d 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -447,6 +447,13 @@ struct SDContextParams { std::string tensor_type_rules; std::string lora_model_dir = "."; + std::string main_backend_device; + std::string diffusion_backend_device; + std::string clip_backend_device; + std::string vae_backend_device; + std::string tae_backend_device; + std::string control_net_backend_device; + std::map embedding_map; std::vector embedding_vec; @@ -454,9 +461,6 @@ struct SDContextParams { rng_type_t sampler_rng_type = RNG_TYPE_COUNT; bool offload_params_to_cpu = false; bool enable_mmap = false; - bool control_net_cpu = false; - bool clip_on_cpu = false; - bool vae_on_cpu = false; bool flash_attn = false; bool diffusion_flash_attn = false; bool diffusion_conv_direct = false; @@ -562,6 +566,31 @@ struct SDContextParams { "--upscale-model", "path to esrgan model.", &esrgan_path}, + {"", + "--main-backend-device", + "default device to use for all backends (defaults to main gpu device if hardware acceleration is available, otherwise cpu)", + &main_backend_device}, + {"", + "--diffusion-backend-device", + "device to use for diffusion (defaults to main-backend-device)", + &diffusion_backend_device}, + {"", + "--clip-backend-device", + "device to use for clip (defaults to main-backend-device)", + &clip_backend_device}, + {"", + "--vae-backend-device", + "device to use for vae (defaults to main-backend-device). Also applies to tae, unless tae-backend-device is specified", + &vae_backend_device}, + {"", + "--tae-backend-device", + "device to use for tae (defaults to vae-backend-device)", + &tae_backend_device}, + {"", + "--control-net-backend-device", + "device to use for control net (defaults to main-backend-device)", + &control_net_backend_device}, + }; options.int_options = { @@ -600,18 +629,6 @@ struct SDContextParams { "--mmap", "whether to memory-map model", true, &enable_mmap}, - {"", - "--control-net-cpu", - "keep controlnet in cpu (for low vram)", - true, &control_net_cpu}, - {"", - "--clip-on-cpu", - "keep clip in cpu (for low vram)", - true, &clip_on_cpu}, - {"", - "--vae-on-cpu", - "keep vae in cpu (for low vram)", - true, &vae_on_cpu}, {"", "--fa", "use flash attention", @@ -876,6 +893,7 @@ struct SDContextParams { std::string embeddings_str = emb_ss.str(); std::ostringstream oss; + // TODO backend devices oss << "SDContextParams {\n" << " n_threads: " << n_threads << ",\n" << " model_path: \"" << model_path << "\",\n" @@ -901,9 +919,9 @@ struct SDContextParams { << " sampler_rng_type: " << sd_rng_type_name(sampler_rng_type) << ",\n" << " offload_params_to_cpu: " << (offload_params_to_cpu ? "true" : "false") << ",\n" << " enable_mmap: " << (enable_mmap ? "true" : "false") << ",\n" - << " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n" - << " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n" - << " vae_on_cpu: " << (vae_on_cpu ? "true" : "false") << ",\n" + // << " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n" + // << " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n" + // << " vae_on_cpu: " << (vae_on_cpu ? "true" : "false") << ",\n" << " flash_attn: " << (flash_attn ? "true" : "false") << ",\n" << " diffusion_flash_attn: " << (diffusion_flash_attn ? "true" : "false") << ",\n" << " diffusion_conv_direct: " << (diffusion_conv_direct ? "true" : "false") << ",\n" @@ -966,9 +984,6 @@ struct SDContextParams { lora_apply_mode, offload_params_to_cpu, enable_mmap, - clip_on_cpu, - control_net_cpu, - vae_on_cpu, flash_attn, diffusion_flash_attn, taesd_preview, @@ -981,6 +996,12 @@ struct SDContextParams { chroma_use_t5_mask, chroma_t5_mask_pad, qwen_image_zero_cond_t, + main_backend_device.c_str(), + diffusion_backend_device.c_str(), + clip_backend_device.c_str(), + vae_backend_device.c_str(), + tae_backend_device.c_str(), + control_net_backend_device.c_str(), }; return sd_ctx_params; } diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 029c2ab1d..4a8b645ae 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -186,9 +186,9 @@ typedef struct { enum lora_apply_mode_t lora_apply_mode; bool offload_params_to_cpu; bool enable_mmap; - bool keep_clip_on_cpu; - bool keep_control_net_on_cpu; - bool keep_vae_on_cpu; + // bool keep_clip_on_cpu; + // bool keep_control_net_on_cpu; + // bool keep_vae_on_cpu; bool flash_attn; bool diffusion_flash_attn; bool tae_preview_only; @@ -201,6 +201,12 @@ typedef struct { bool chroma_use_t5_mask; int chroma_t5_mask_pad; bool qwen_image_zero_cond_t; + const char* main_device; + const char* diffusion_device; + const char* clip_device; + const char* vae_device; + const char* tae_device; + const char* control_net_device; } sd_ctx_params_t; typedef struct { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index bbf2f979d..829c33d35 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -481,9 +481,13 @@ static void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t t class StableDiffusionGGML { public: ggml_backend_t backend = nullptr; // general backend + ggml_backend_t diffusion_backend = nullptr; ggml_backend_t clip_backend = nullptr; ggml_backend_t control_net_backend = nullptr; ggml_backend_t vae_backend = nullptr; + ggml_backend_t tae_backend = nullptr; + + // TODO: clip_vision and photomaker backends SDVersion version; bool vae_decode_only = false; @@ -531,72 +535,65 @@ class StableDiffusionGGML { StableDiffusionGGML() = default; ~StableDiffusionGGML() { + if (diffusion_backend != backend) { + ggml_backend_free(diffusion_backend); + } if (clip_backend != backend) { ggml_backend_free(clip_backend); } if (control_net_backend != backend) { ggml_backend_free(control_net_backend); } + if (tae_backend != vae_backend) { + ggml_backend_free(tae_backend); + } if (vae_backend != backend) { ggml_backend_free(vae_backend); } ggml_backend_free(backend); } - void init_backend() { -#ifdef SD_USE_CUDA - LOG_DEBUG("Using CUDA backend"); - backend = ggml_backend_cuda_init(0); -#endif -#ifdef SD_USE_METAL - LOG_DEBUG("Using Metal backend"); - backend = ggml_backend_metal_init(); -#endif -#ifdef SD_USE_VULKAN - LOG_DEBUG("Using Vulkan backend"); - size_t device = 0; - const int device_count = ggml_backend_vk_get_device_count(); - if (device_count) { - const char* SD_VK_DEVICE = getenv("SD_VK_DEVICE"); - if (SD_VK_DEVICE != nullptr) { - std::string sd_vk_device_str = SD_VK_DEVICE; - try { - device = std::stoull(sd_vk_device_str); - } catch (const std::invalid_argument&) { - LOG_WARN("SD_VK_DEVICE environment variable is not a valid integer (%s). Falling back to device 0.", SD_VK_DEVICE); - device = 0; - } catch (const std::out_of_range&) { - LOG_WARN("SD_VK_DEVICE environment variable value is out of range for `unsigned long long` type (%s). Falling back to device 0.", SD_VK_DEVICE); - device = 0; - } - if (device >= device_count) { - LOG_WARN("Cannot find targeted vulkan device (%llu). Falling back to device 0.", device); - device = 0; - } - } - LOG_INFO("Vulkan: Using device %llu", device); - backend = ggml_backend_vk_init(device); - } - if (!backend) { - LOG_WARN("Failed to initialize Vulkan backend"); - } -#endif -#ifdef SD_USE_OPENCL - LOG_DEBUG("Using OpenCL backend"); - // ggml_log_set(ggml_log_callback_default, nullptr); // Optional ggml logs - backend = ggml_backend_opencl_init(); - if (!backend) { - LOG_WARN("Failed to initialize OpenCL backend"); - } -#endif -#ifdef SD_USE_SYCL - LOG_DEBUG("Using SYCL backend"); - backend = ggml_backend_sycl_init(0); -#endif - - if (!backend) { - LOG_DEBUG("Using CPU backend"); - backend = ggml_backend_cpu_init(); + void list_backends() { + // TODO: expose via C API and fill a cstr + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + LOG_INFO("%s", ggml_backend_dev_name(ggml_backend_dev_get(i))); + } + } + + bool backend_name_exists(std::string name) { + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + if (name == ggml_backend_dev_name(ggml_backend_dev_get(i))) { + return true; + } + } + return false; + } + + std::string sanitize_backend_name(std::string name) { + if (name == "" || backend_name_exists(name)) { + return name; + } else { + LOG_WARN("Backend %s not found, using default backend", name.c_str()); + return ""; + } + } + + std::string get_default_backend_name() { + // should pick the same backend as ggml_backend_init_best + ggml_backend_dev_t dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + return ggml_backend_dev_name(dev); + } + + ggml_backend_t init_named_backend(std::string name = "") { + LOG_DEBUG("Initializing backend: %s", name.c_str()); + if (name.empty()) { + return ggml_backend_init_best(); + } else { + return ggml_backend_init_by_name(name.c_str(), nullptr); } } @@ -627,7 +624,44 @@ class StableDiffusionGGML { ggml_log_set(ggml_log_callback_default, nullptr); - init_backend(); + list_backends(); + + std::string default_backend_name = get_default_backend_name(); + + std::string override_default_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->main_device)); + + if (override_default_backend_name.size() > 0) { + LOG_INFO("Setting default backend to %s", override_default_backend_name.c_str()); + default_backend_name = override_default_backend_name; + } + + std::string diffusion_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->diffusion_device)); + std::string clip_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->clip_device)); + std::string control_net_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->control_net_device)); + std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); + std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); + + bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; + bool clip_backend_is_default = (clip_backend_name.empty() || clip_backend_name == default_backend_name); + bool control_net_backend_is_default = (control_net_backend_name.empty() || control_net_backend_name == default_backend_name); + bool vae_backend_is_default = (vae_backend_name.empty() || vae_backend_name == default_backend_name); + // if tae_backend_name is empty, it will use the same backend as vae + bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; + + // if some backend is not specified or is the same as the default backend, use the default backend + bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default; + + if (use_default_backend) { + backend = init_named_backend(override_default_backend_name); + LOG_DEBUG("Loaded default backend %s", ggml_backend_name(backend)); + } + + if (!diffusion_backend_is_default) { + diffusion_backend = init_named_backend(diffusion_backend_name); + LOG_INFO("Using diffusion backend: %s", ggml_backend_name(diffusion_backend)); + } else { + diffusion_backend = backend; + } ModelLoader model_loader; @@ -798,21 +832,19 @@ class StableDiffusionGGML { LOG_INFO("Using circular padding for convolutions"); } - bool clip_on_cpu = sd_ctx_params->keep_clip_on_cpu; - { clip_backend = backend; - if (clip_on_cpu && !ggml_backend_is_cpu(backend)) { - LOG_INFO("CLIP: Using CPU backend"); - clip_backend = ggml_backend_cpu_init(); + if (!clip_backend_is_default) { + clip_backend = init_named_backend(clip_backend_name); + LOG_INFO("CLIP: Using %s backend", ggml_backend_name(clip_backend)); } if (sd_version_is_sd3(version)) { cond_stage_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map); } else if (sd_version_is_flux(version)) { bool is_chroma = false; for (auto pair : tensor_storage_map) { @@ -848,7 +880,7 @@ class StableDiffusionGGML { offload_params_to_cpu, tensor_storage_map); } - diffusion_model = std::make_shared(backend, + diffusion_model = std::make_shared(diffusion_backend, offload_params_to_cpu, tensor_storage_map, version, @@ -859,11 +891,11 @@ class StableDiffusionGGML { offload_params_to_cpu, tensor_storage_map, version); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - version, - sd_ctx_params->chroma_use_dit_mask); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + version, + sd_ctx_params->chroma_use_dit_mask); } else if (sd_version_is_wan(version)) { cond_stage_model = std::make_shared(clip_backend, offload_params_to_cpu, @@ -871,13 +903,13 @@ class StableDiffusionGGML { true, 1, true); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model", - version); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model", + version); if (strlen(SAFE_STR(sd_ctx_params->high_noise_diffusion_model_path)) > 0) { - high_noise_diffusion_model = std::make_shared(backend, + high_noise_diffusion_model = std::make_shared(diffusion_backend, offload_params_to_cpu, tensor_storage_map, "model.high_noise_diffusion_model", @@ -903,12 +935,12 @@ class StableDiffusionGGML { version, "", enable_vision); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model", - version, - sd_ctx_params->qwen_image_zero_cond_t); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model", + version, + sd_ctx_params->qwen_image_zero_cond_t); } else if (sd_version_is_anima(version)) { cond_stage_model = std::make_shared(clip_backend, offload_params_to_cpu, @@ -922,11 +954,11 @@ class StableDiffusionGGML { offload_params_to_cpu, tensor_storage_map, version); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model", - version); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model", + version); } else { // SD1.x SD2.x SDXL std::map embbeding_map; for (uint32_t i = 0; i < sd_ctx_params->embedding_count; i++) { @@ -946,7 +978,7 @@ class StableDiffusionGGML { embbeding_map, version); } - diffusion_model = std::make_shared(backend, + diffusion_model = std::make_shared(diffusion_backend, offload_params_to_cpu, tensor_storage_map, version); @@ -971,18 +1003,22 @@ class StableDiffusionGGML { high_noise_diffusion_model->get_param_tensors(tensors); } - if (sd_ctx_params->keep_vae_on_cpu && !ggml_backend_is_cpu(backend)) { - LOG_INFO("VAE Autoencoder: Using CPU backend"); - vae_backend = ggml_backend_cpu_init(); - } else { - vae_backend = backend; + vae_backend = backend; + if (!vae_backend_is_default) { + vae_backend = init_named_backend(vae_backend_name); + LOG_INFO("VAE Autoencoder: Using %s backend", ggml_backend_name(vae_backend)); + } + tae_backend = vae_backend; + if (tae_backend_name.length() > 0 && tae_backend_name != vae_backend_name) { + tae_backend = init_named_backend(tae_backend_name); + LOG_INFO("Tiny Autoencoder: Using %s backend", ggml_backend_name(tae_backend)); } auto create_tae = [&]() -> std::shared_ptr { if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { - return std::make_shared(vae_backend, + return std::make_shared(tae_backend, offload_params_to_cpu, tensor_storage_map, "decoder", @@ -990,7 +1026,7 @@ class StableDiffusionGGML { version); } else { - auto model = std::make_shared(vae_backend, + auto model = std::make_shared(tae_backend, offload_params_to_cpu, tensor_storage_map, "decoder.layers", @@ -1064,9 +1100,9 @@ class StableDiffusionGGML { if (strlen(SAFE_STR(sd_ctx_params->control_net_path)) > 0) { ggml_backend_t controlnet_backend = nullptr; - if (sd_ctx_params->keep_control_net_on_cpu && !ggml_backend_is_cpu(backend)) { - LOG_DEBUG("ControlNet: Using CPU backend"); - controlnet_backend = ggml_backend_cpu_init(); + if (!control_net_backend_is_default) { + control_net_backend = init_named_backend(control_net_backend_name); + LOG_INFO("ControlNet: Using %s backend", control_net_backend_name); } else { controlnet_backend = backend; } @@ -1235,7 +1271,7 @@ class StableDiffusionGGML { total_params_vram_size += clip_params_mem_size + pmid_params_mem_size; } - if (ggml_backend_is_cpu(backend)) { + if (ggml_backend_is_cpu(diffusion_backend)) { total_params_ram_size += unet_params_mem_size; } else { total_params_vram_size += unet_params_mem_size; @@ -2679,9 +2715,6 @@ void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) { sd_ctx_params->lora_apply_mode = LORA_APPLY_AUTO; sd_ctx_params->offload_params_to_cpu = false; sd_ctx_params->enable_mmap = false; - sd_ctx_params->keep_clip_on_cpu = false; - sd_ctx_params->keep_control_net_on_cpu = false; - sd_ctx_params->keep_vae_on_cpu = false; sd_ctx_params->diffusion_flash_attn = false; sd_ctx_params->circular_x = false; sd_ctx_params->circular_y = false; @@ -2695,7 +2728,7 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { if (!buf) return nullptr; buf[0] = '\0'; - + // TODO devices snprintf(buf + strlen(buf), 4096 - strlen(buf), "model_path: %s\n" "clip_l_path: %s\n" @@ -2719,9 +2752,6 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { "sampler_rng_type: %s\n" "prediction: %s\n" "offload_params_to_cpu: %s\n" - "keep_clip_on_cpu: %s\n" - "keep_control_net_on_cpu: %s\n" - "keep_vae_on_cpu: %s\n" "flash_attn: %s\n" "diffusion_flash_attn: %s\n" "circular_x: %s\n" @@ -2751,9 +2781,6 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { sd_rng_type_name(sd_ctx_params->sampler_rng_type), sd_prediction_name(sd_ctx_params->prediction), BOOL_STR(sd_ctx_params->offload_params_to_cpu), - BOOL_STR(sd_ctx_params->keep_clip_on_cpu), - BOOL_STR(sd_ctx_params->keep_control_net_on_cpu), - BOOL_STR(sd_ctx_params->keep_vae_on_cpu), BOOL_STR(sd_ctx_params->flash_attn), BOOL_STR(sd_ctx_params->diffusion_flash_attn), BOOL_STR(sd_ctx_params->circular_x), From f89620cc8deb335dd37ef5489b670cba778306c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 9 Jan 2026 22:59:31 +0100 Subject: [PATCH 02/32] fix build --- src/stable-diffusion.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 829c33d35..989ede078 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1102,7 +1102,7 @@ class StableDiffusionGGML { ggml_backend_t controlnet_backend = nullptr; if (!control_net_backend_is_default) { control_net_backend = init_named_backend(control_net_backend_name); - LOG_INFO("ControlNet: Using %s backend", control_net_backend_name); + LOG_INFO("ControlNet: Using %s backend", ggml_backend_name(controlnet_backend)); } else { controlnet_backend = backend; } From 61af83ecb60a3e16889b1f0ac4af6b4870fc4999 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Sun, 11 Jan 2026 20:43:16 +0100 Subject: [PATCH 03/32] show backend device description --- src/stable-diffusion.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 989ede078..9c913f7e5 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -557,7 +557,8 @@ class StableDiffusionGGML { // TODO: expose via C API and fill a cstr const int device_count = ggml_backend_dev_count(); for (int i = 0; i < device_count; i++) { - LOG_INFO("%s", ggml_backend_dev_name(ggml_backend_dev_get(i))); + auto dev = ggml_backend_dev_get(i); + LOG_INFO("%s (%s)", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev)); } } From c8ffbd399f693f23a4b37deaca582a2398c7e761 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Sun, 11 Jan 2026 21:36:43 +0100 Subject: [PATCH 04/32] CLI: add --list-devices arg --- examples/cli/main.cpp | 35 ++++++++++++++- examples/common/common.hpp | 14 +++++- include/stable-diffusion.h | 5 +++ src/stable-diffusion.cpp | 90 ++++++++++++++++++++++++++++++++++++-- 4 files changed, 138 insertions(+), 6 deletions(-) diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index f9e4928ea..d347142ae 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -46,6 +46,7 @@ struct SDCliParams { bool color = false; bool normal_exit = false; + bool skip_usage = false; ArgOptions get_options() { ArgOptions options; @@ -143,7 +144,27 @@ struct SDCliParams { auto on_help_arg = [&](int argc, const char** argv, int index) { normal_exit = true; - return -1; + return VALID_BREAK_OPT; + }; + + auto on_rpc_arg = [&](int argc, const char** argv, int index) { + if (++index >= argc) { + return -1; + } + const char* rpc_device = argv[index]; + add_rpc_device(rpc_device); + return 1; + }; + + auto on_list_devices_arg = [&](int argc, const char** argv, int index) { + size_t buff_size = backend_list_size(); + char* buff = (char*)malloc(buff_size); + list_backends_to_buffer(buff, buff_size); + printf("List of available GGML devices:\nName\tDescription\n-------------------\n%s\n", buff); + free(buff); + normal_exit = true; + skip_usage = true; + return VALID_BREAK_OPT; }; options.manual_options = { @@ -159,6 +180,14 @@ struct SDCliParams { "--help", "show this help message and exit", on_help_arg}, + {"", + "--rpc", + "add a rpc device", + on_rpc_arg}, + {"", + "--list-devices", + "list available ggml compute devices", + on_list_devices_arg}, }; return options; @@ -213,7 +242,9 @@ void parse_args(int argc, const char** argv, SDCliParams& cli_params, SDContextP std::vector options_vec = {cli_params.get_options(), ctx_params.get_options(), gen_params.get_options()}; if (!parse_options(argc, argv, options_vec)) { - print_usage(argc, argv, options_vec); + if (!cli_params.skip_usage){ + print_usage(argc, argv, options_vec); + } exit(cli_params.normal_exit ? 0 : 1); } diff --git a/examples/common/common.hpp b/examples/common/common.hpp index c3df7467d..932f7aece 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -34,6 +34,8 @@ namespace fs = std::filesystem; #define SAFE_STR(s) ((s) ? (s) : "") #define BOOL_STR(b) ((b) ? "true" : "false") +#define VALID_BREAK_OPT -42 + const char* modes_str[] = { "img_gen", "vid_gen", @@ -401,16 +403,26 @@ static bool parse_options(int argc, const char** argv, const std::vector string_split(const std::string & input, char separator) +{ + std::vector parts; + size_t begin_pos = 0; + size_t separator_pos = input.find(separator); + while (separator_pos != std::string::npos) { + std::string part = input.substr(begin_pos, separator_pos - begin_pos); + parts.emplace_back(part); + begin_pos = separator_pos + 1; + separator_pos = input.find(separator, begin_pos); + } + parts.emplace_back(input.substr(begin_pos, separator_pos - begin_pos)); + return parts; +} + +static void add_rpc_devices(const std::string & servers) { + auto rpc_servers = string_split(servers, ','); + if (rpc_servers.empty()) { + throw std::invalid_argument("no RPC servers specified"); + } + ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC"); + if (!rpc_reg) { + throw std::invalid_argument("failed to find RPC backend"); + } + typedef ggml_backend_reg_t (*ggml_backend_rpc_add_server_t)(const char * endpoint); + ggml_backend_rpc_add_server_t ggml_backend_rpc_add_server_fn = (ggml_backend_rpc_add_server_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_server"); + if (!ggml_backend_rpc_add_server_fn) { + throw std::invalid_argument("failed to find RPC add server function"); + } + for (const auto & server : rpc_servers) { + auto reg = ggml_backend_rpc_add_server_fn(server.c_str()); + ggml_backend_register(reg); + } +} + +void add_rpc_device(const char* servers_cstr){ + std::string servers(servers_cstr); + add_rpc_devices(servers); +} + +std::vector> list_backends_vector() { + std::vector> backends; + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + auto dev = ggml_backend_dev_get(i); + backends.push_back({ggml_backend_dev_name(dev), ggml_backend_dev_description(dev)}); + } + return backends; +} + +SD_API size_t backend_list_size(){ + // for C API + size_t buffer_size = 0; + auto backends = list_backends_vector(); + for (auto& backend : backends) { + auto dev_name_size = backend.first.size(); + auto dev_desc_size = backend.second.size(); + buffer_size+=dev_name_size+dev_desc_size+2; // +2 for the separators + } + return buffer_size; +} + +// devices are separated by \n and name and description are separated by \t +SD_API void list_backends_to_buffer(char* buffer, size_t buffer_size) { + auto backends = list_backends_vector(); + size_t offset = 0; + for (auto& backend : backends) { + size_t name_size = backend.first.size(); + size_t desc_size = backend.second.size(); + if (offset + name_size + desc_size + 2 > buffer_size) { + break; // Not enough space in the buffer + } + memcpy(buffer + offset, backend.first.c_str(), name_size); + offset += name_size; + buffer[offset++] = '\t'; + memcpy(buffer + offset, backend.second.c_str(), desc_size); + offset += desc_size; + buffer[offset++] = '\n'; + } + if (offset < buffer_size) { + buffer[offset] = '\0'; // Ensure the buffer is null-terminated at the end + } +} + /*=============================================== StableDiffusionGGML ================================================*/ class StableDiffusionGGML { @@ -553,8 +637,8 @@ class StableDiffusionGGML { ggml_backend_free(backend); } - void list_backends() { - // TODO: expose via C API and fill a cstr + + void log_backends() { const int device_count = ggml_backend_dev_count(); for (int i = 0; i < device_count; i++) { auto dev = ggml_backend_dev_get(i); @@ -625,7 +709,7 @@ class StableDiffusionGGML { ggml_log_set(ggml_log_callback_default, nullptr); - list_backends(); + log_backends(); std::string default_backend_name = get_default_backend_name(); From 4e7bdb32071e145cba4b88540fdc996fbc1a3606 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 00:46:44 +0100 Subject: [PATCH 05/32] null-terminate even if buffer is too small --- src/stable-diffusion.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 1cd6ae3ca..eb885c03f 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -557,6 +557,9 @@ SD_API void list_backends_to_buffer(char* buffer, size_t buffer_size) { } if (offset < buffer_size) { buffer[offset] = '\0'; // Ensure the buffer is null-terminated at the end + } else { + LOG_WARN("Provided buffer size is too small to contain details of all devices."); + buffer[buffer_size - 1] = '\0'; // Ensure the buffer is null-terminated at the end } } From 53d32a93db0862b2f37b9f18879cedf9b18bdf96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 01:03:37 +0100 Subject: [PATCH 06/32] move stuff to ggml_extend.cpp --- src/ggml_extend.hpp | 36 ++++++++++++++++++++++++++++++++++++ src/stable-diffusion.cpp | 36 ------------------------------------ 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index e6b27cc7c..0fbade8a0 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -88,6 +88,42 @@ __STATIC_INLINE__ void ggml_log_callback_default(ggml_log_level level, const cha } } +__STATIC_INLINE__ bool backend_name_exists(std::string name) { + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + if (name == ggml_backend_dev_name(ggml_backend_dev_get(i))) { + return true; + } + } + return false; +} + +__STATIC_INLINE__ std::string sanitize_backend_name(std::string name) { + if (name == "" || backend_name_exists(name)) { + return name; + } else { + LOG_WARN("Backend %s not found, using default backend", name.c_str()); + return ""; + } +} + +__STATIC_INLINE__ std::string get_default_backend_name() { + // should pick the same backend as ggml_backend_init_best + ggml_backend_dev_t dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + return ggml_backend_dev_name(dev); +} + +__STATIC_INLINE__ ggml_backend_t init_named_backend(std::string name = "") { + LOG_DEBUG("Initializing backend: %s", name.c_str()); + if (name.empty()) { + return ggml_backend_init_best(); + } else { + return ggml_backend_init_by_name(name.c_str(), nullptr); + } +} + static_assert(GGML_MAX_NAME >= 128, "GGML_MAX_NAME must be at least 128"); // n-mode tensor-matrix product diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index eb885c03f..c8d6f5d72 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -649,42 +649,6 @@ class StableDiffusionGGML { } } - bool backend_name_exists(std::string name) { - const int device_count = ggml_backend_dev_count(); - for (int i = 0; i < device_count; i++) { - if (name == ggml_backend_dev_name(ggml_backend_dev_get(i))) { - return true; - } - } - return false; - } - - std::string sanitize_backend_name(std::string name) { - if (name == "" || backend_name_exists(name)) { - return name; - } else { - LOG_WARN("Backend %s not found, using default backend", name.c_str()); - return ""; - } - } - - std::string get_default_backend_name() { - // should pick the same backend as ggml_backend_init_best - ggml_backend_dev_t dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU); - dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU); - dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); - return ggml_backend_dev_name(dev); - } - - ggml_backend_t init_named_backend(std::string name = "") { - LOG_DEBUG("Initializing backend: %s", name.c_str()); - if (name.empty()) { - return ggml_backend_init_best(); - } else { - return ggml_backend_init_by_name(name.c_str(), nullptr); - } - } - std::shared_ptr get_rng(rng_type_t rng_type) { if (rng_type == STD_DEFAULT_RNG) { return std::make_shared(); From 1800c9aaf3a8c5bf0673f1c359ad1b416dbf9ad6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 01:06:53 +0100 Subject: [PATCH 07/32] --upscaler-backend-device --- examples/cli/main.cpp | 3 ++- examples/common/common.hpp | 7 +++++++ include/stable-diffusion.h | 3 ++- src/upscaler.cpp | 38 +++++++++++--------------------------- 4 files changed, 22 insertions(+), 29 deletions(-) diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index d347142ae..1bf8f31a7 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -829,7 +829,8 @@ int main(int argc, const char* argv[]) { ctx_params.offload_params_to_cpu, ctx_params.diffusion_conv_direct, ctx_params.n_threads, - gen_params.upscale_tile_size); + gen_params.upscale_tile_size, + ctx_params.upscaler_backend_device.c_str()); if (upscaler_ctx == nullptr) { LOG_ERROR("new_upscaler_ctx failed"); diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 932f7aece..cfa9c740e 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -465,6 +465,8 @@ struct SDContextParams { std::string vae_backend_device; std::string tae_backend_device; std::string control_net_backend_device; + std::string upscaler_backend_device; + std::map embedding_map; std::vector embedding_vec; @@ -602,6 +604,11 @@ struct SDContextParams { "--control-net-backend-device", "device to use for control net (defaults to main-backend-device)", &control_net_backend_device}, + {"", + "--upscaler-backend-device", + "device to use for upscaling models (defaults to main-backend-device)", + &upscaler_backend_device}, + }; diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 2bdef5e87..93c194103 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -395,7 +395,8 @@ SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path, bool offload_params_to_cpu, bool direct, int n_threads, - int tile_size); + int tile_size, + const char * device); SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx); SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, diff --git a/src/upscaler.cpp b/src/upscaler.cpp index 18e185d06..021605e8a 100644 --- a/src/upscaler.cpp +++ b/src/upscaler.cpp @@ -22,37 +22,20 @@ struct UpscalerGGML { bool load_from_file(const std::string& esrgan_path, bool offload_params_to_cpu, - int n_threads) { + int n_threads, + std::string device = "") { ggml_log_set(ggml_log_callback_default, nullptr); -#ifdef SD_USE_CUDA - LOG_DEBUG("Using CUDA backend"); - backend = ggml_backend_cuda_init(0); -#endif -#ifdef SD_USE_METAL - LOG_DEBUG("Using Metal backend"); - backend = ggml_backend_metal_init(); -#endif -#ifdef SD_USE_VULKAN - LOG_DEBUG("Using Vulkan backend"); - backend = ggml_backend_vk_init(0); -#endif -#ifdef SD_USE_OPENCL - LOG_DEBUG("Using OpenCL backend"); - backend = ggml_backend_opencl_init(); -#endif -#ifdef SD_USE_SYCL - LOG_DEBUG("Using SYCL backend"); - backend = ggml_backend_sycl_init(0); -#endif + device = sanitize_backend_name(device); + backend = init_named_backend(device); ModelLoader model_loader; if (!model_loader.init_from_file_and_convert_name(esrgan_path)) { LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str()); } model_loader.set_wtype_override(model_data_type); - if (!backend) { - LOG_DEBUG("Using CPU backend"); - backend = ggml_backend_cpu_init(); - } + // if (!backend) { + // LOG_DEBUG("Using CPU backend"); + // backend = ggml_backend_cpu_init(); + // } LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type)); esrgan_upscaler = std::make_shared(backend, offload_params_to_cpu, tile_size, model_loader.get_tensor_storage_map()); if (direct) { @@ -118,7 +101,8 @@ upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, bool offload_params_to_cpu, bool direct, int n_threads, - int tile_size) { + int tile_size, + const char* device) { upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t)); if (upscaler_ctx == nullptr) { return nullptr; @@ -130,7 +114,7 @@ upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, return nullptr; } - if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu, n_threads)) { + if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu, n_threads, SAFE_STR(device))) { delete upscaler_ctx->upscaler; upscaler_ctx->upscaler = nullptr; free(upscaler_ctx); From c1fc2868c0983e955f8b570286df25eaa485691d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 02:00:34 +0100 Subject: [PATCH 08/32] use diffusion_backend for loading LoRAs --- src/stable-diffusion.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index c8d6f5d72..1dff01d6a 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1184,7 +1184,7 @@ class StableDiffusionGGML { version); } if (strlen(SAFE_STR(sd_ctx_params->photo_maker_path)) > 0) { - pmid_lora = std::make_shared("pmid", backend, sd_ctx_params->photo_maker_path, "", version); + pmid_lora = std::make_shared("pmid", diffusion_backend, sd_ctx_params->photo_maker_path, "", version); auto lora_tensor_filter = [&](const std::string& tensor_name) { if (starts_with(tensor_name, "lora.model")) { return true; @@ -1548,8 +1548,11 @@ class StableDiffusionGGML { for (auto& kv : lora_state_diff) { int64_t t0 = ggml_time_ms(); - - auto lora = load_lora_model_from_file(kv.first, kv.second, backend); + // TODO: Fix that + if(diffusion_backend!=clip_backend && !ggml_backend_is_cpu(clip_backend)){ + LOG_WARN("Diffusion models and text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); + } + auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend); if (!lora || lora->lora_tensors.empty()) { continue; } @@ -1639,7 +1642,7 @@ class StableDiffusionGGML { const std::string& lora_name = kv.first; float multiplier = kv.second; - auto lora = load_lora_model_from_file(lora_name, multiplier, backend, lora_tensor_filter); + auto lora = load_lora_model_from_file(lora_name, multiplier, diffusion_backend, lora_tensor_filter); if (lora && !lora->lora_tensors.empty()) { lora->preprocess_lora_tensors(tensors); diffusion_lora_models.push_back(lora); From 9de50328366324704ddf3fd9fe583e8ed056c42c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 02:19:45 +0100 Subject: [PATCH 09/32] --photomaker-backend-device (+fixes) --- examples/common/common.hpp | 7 ++++++- include/stable-diffusion.h | 1 + src/stable-diffusion.cpp | 26 +++++++++++++++++--------- 3 files changed, 24 insertions(+), 10 deletions(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index cfa9c740e..333108ad7 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -466,7 +466,7 @@ struct SDContextParams { std::string tae_backend_device; std::string control_net_backend_device; std::string upscaler_backend_device; - + std::string photomaker_backend_device; std::map embedding_map; std::vector embedding_vec; @@ -608,6 +608,10 @@ struct SDContextParams { "--upscaler-backend-device", "device to use for upscaling models (defaults to main-backend-device)", &upscaler_backend_device}, + {"", + "--photomaker-backend-device", + "device to use for photomaker (defaults to main-backend-device)", + &photomaker_backend_device}, }; @@ -1021,6 +1025,7 @@ struct SDContextParams { vae_backend_device.c_str(), tae_backend_device.c_str(), control_net_backend_device.c_str(), + photomaker_backend_device.c_str(), }; return sd_ctx_params; } diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 93c194103..920b3cb3d 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -207,6 +207,7 @@ typedef struct { const char* vae_device; const char* tae_device; const char* control_net_device; + const char* photomaker_device; } sd_ctx_params_t; typedef struct { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 1dff01d6a..1e8b23dc3 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -573,6 +573,7 @@ class StableDiffusionGGML { ggml_backend_t control_net_backend = nullptr; ggml_backend_t vae_backend = nullptr; ggml_backend_t tae_backend = nullptr; + ggml_backend_t pmid_backend = nullptr; // TODO: clip_vision and photomaker backends @@ -692,6 +693,7 @@ class StableDiffusionGGML { std::string control_net_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->control_net_device)); std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); + std::string pmid_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->photomaker_device)); bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; bool clip_backend_is_default = (clip_backend_name.empty() || clip_backend_name == default_backend_name); @@ -699,9 +701,10 @@ class StableDiffusionGGML { bool vae_backend_is_default = (vae_backend_name.empty() || vae_backend_name == default_backend_name); // if tae_backend_name is empty, it will use the same backend as vae bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; + bool pmid_backend_is_default = (pmid_backend_name.empty() || pmid_backend_name == default_backend_name); // if some backend is not specified or is the same as the default backend, use the default backend - bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default; + bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default; if (use_default_backend) { backend = init_named_backend(override_default_backend_name); @@ -1151,14 +1154,13 @@ class StableDiffusionGGML { } if (strlen(SAFE_STR(sd_ctx_params->control_net_path)) > 0) { - ggml_backend_t controlnet_backend = nullptr; if (!control_net_backend_is_default) { control_net_backend = init_named_backend(control_net_backend_name); - LOG_INFO("ControlNet: Using %s backend", ggml_backend_name(controlnet_backend)); + LOG_INFO("ControlNet: Using %s backend", ggml_backend_name(control_net_backend)); } else { - controlnet_backend = backend; + control_net_backend = backend; } - control_net = std::make_shared(controlnet_backend, + control_net = std::make_shared(control_net_backend, offload_params_to_cpu, tensor_storage_map, version); @@ -1167,9 +1169,15 @@ class StableDiffusionGGML { control_net->set_conv2d_direct_enabled(true); } } - + pmid_backend = backend; + if (!pmid_backend_is_default) { + pmid_backend = init_named_backend(pmid_backend_name); + LOG_INFO("PhotoMaker: Using %s backend", ggml_backend_name(pmid_backend)); + } else { + pmid_backend = backend; + } if (strstr(SAFE_STR(sd_ctx_params->photo_maker_path), "v2")) { - pmid_model = std::make_shared(backend, + pmid_model = std::make_shared(pmid_backend, offload_params_to_cpu, tensor_storage_map, "pmid", @@ -1177,7 +1185,7 @@ class StableDiffusionGGML { PM_VERSION_2); LOG_INFO("using PhotoMaker Version 2"); } else { - pmid_model = std::make_shared(backend, + pmid_model = std::make_shared(pmid_backend, offload_params_to_cpu, tensor_storage_map, "pmid", @@ -1357,7 +1365,7 @@ class StableDiffusionGGML { control_net_params_mem_size / 1024.0 / 1024.0, ggml_backend_is_cpu(control_net_backend) ? "RAM" : "VRAM", pmid_params_mem_size / 1024.0 / 1024.0, - ggml_backend_is_cpu(clip_backend) ? "RAM" : "VRAM"); + ggml_backend_is_cpu(pmid_backend) ? "RAM" : "VRAM"); } // init denoiser From a85121fd23ea9d01196d03bd9775e307ea2a4405 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 02:28:37 +0100 Subject: [PATCH 10/32] --vision-backend-device --- examples/common/common.hpp | 7 ++++++- include/stable-diffusion.h | 1 + src/stable-diffusion.cpp | 10 ++++++---- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 333108ad7..750dabf05 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -467,6 +467,7 @@ struct SDContextParams { std::string control_net_backend_device; std::string upscaler_backend_device; std::string photomaker_backend_device; + std::string vision_backend_device; std::map embedding_map; std::vector embedding_vec; @@ -612,7 +613,10 @@ struct SDContextParams { "--photomaker-backend-device", "device to use for photomaker (defaults to main-backend-device)", &photomaker_backend_device}, - + {"", + "--vision-backend-device", + "device to use for clip-vision model (defaults to clip-backend-device)", + &vision_backend_device}, }; @@ -1026,6 +1030,7 @@ struct SDContextParams { tae_backend_device.c_str(), control_net_backend_device.c_str(), photomaker_backend_device.c_str(), + vision_backend_device.c_str(), }; return sd_ctx_params; } diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 920b3cb3d..521875034 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -208,6 +208,7 @@ typedef struct { const char* tae_device; const char* control_net_device; const char* photomaker_device; + const char* vision_device; } sd_ctx_params_t; typedef struct { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 1e8b23dc3..a80bab233 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -574,8 +574,7 @@ class StableDiffusionGGML { ggml_backend_t vae_backend = nullptr; ggml_backend_t tae_backend = nullptr; ggml_backend_t pmid_backend = nullptr; - - // TODO: clip_vision and photomaker backends + ggml_backend_t vision_backend = nullptr; SDVersion version; bool vae_decode_only = false; @@ -694,6 +693,7 @@ class StableDiffusionGGML { std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); std::string pmid_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->photomaker_device)); + std::string vision_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vision_device)); bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; bool clip_backend_is_default = (clip_backend_name.empty() || clip_backend_name == default_backend_name); @@ -702,9 +702,11 @@ class StableDiffusionGGML { // if tae_backend_name is empty, it will use the same backend as vae bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; bool pmid_backend_is_default = (pmid_backend_name.empty() || pmid_backend_name == default_backend_name); + // if vision_backend_name is empty, it will use the same backend as clip + bool vision_backend_is_default = (vision_backend_name.empty() && clip_backend_is_default) || vision_backend_name == default_backend_name; // if some backend is not specified or is the same as the default backend, use the default backend - bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default; + bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default || vision_backend_is_default; if (use_default_backend) { backend = init_named_backend(override_default_backend_name); @@ -973,7 +975,7 @@ class StableDiffusionGGML { if (diffusion_model->get_desc() == "Wan2.1-I2V-14B" || diffusion_model->get_desc() == "Wan2.1-FLF2V-14B" || diffusion_model->get_desc() == "Wan2.1-I2V-1.3B") { - clip_vision = std::make_shared(backend, + clip_vision = std::make_shared(vision_backend, offload_params_to_cpu, tensor_storage_map); clip_vision->alloc_params_buffer(); From 4d0b24f6bdb17cd1b2a7439d97671b1a94e280bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 21 Jan 2026 20:37:34 +0100 Subject: [PATCH 11/32] check backends at runtime --- src/common_block.hpp | 8 +++++--- src/ggml_extend.hpp | 28 ++++++++-------------------- src/model.cpp | 12 ------------ src/qwen_image.hpp | 9 ++++++--- src/z_image.hpp | 19 ++++++++++++------- 5 files changed, 31 insertions(+), 45 deletions(-) diff --git a/src/common_block.hpp b/src/common_block.hpp index 2cef389af..476c9499a 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -248,9 +248,6 @@ class FeedForward : public GGMLBlock { float scale = 1.f; if (precision_fix) { scale = 1.f / 128.f; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif } // The purpose of the scale here is to prevent NaN issues in certain situations. // For example, when using Vulkan without enabling force_prec_f32, @@ -264,6 +261,11 @@ class FeedForward : public GGMLBlock { auto net_0 = std::dynamic_pointer_cast(blocks["net.0"]); auto net_2 = std::dynamic_pointer_cast(blocks["net.2"]); + #ifdef SD_USE_VULKAN + if(ggml_backend_is_vk(ctx->backend)){ + net_2->set_force_prec_32(true); + } + #endif x = net_0->forward(ctx, x); // [ne3, ne2, ne1, inner_dim] x = net_2->forward(ctx, x); // [ne3, ne2, ne1, dim_out] diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 0fbade8a0..34d2ad009 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -28,26 +28,6 @@ #include "model.h" -#ifdef SD_USE_CUDA -#include "ggml-cuda.h" -#endif - -#ifdef SD_USE_METAL -#include "ggml-metal.h" -#endif - -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - -#ifdef SD_USE_OPENCL -#include "ggml-opencl.h" -#endif - -#ifdef SD_USE_SYCL -#include "ggml-sycl.h" -#endif - #include "rng.hpp" #include "util.h" @@ -2315,6 +2295,14 @@ class Linear : public UnaryBlock { force_prec_f32(force_prec_f32), scale(scale) {} + void set_scale(float scale_){ + scale = scale_; + } + + void set_force_prec_32(bool force_prec_f32_){ + force_prec_f32 = force_prec_f32_; + } + ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) { ggml_tensor* w = params["weight"]; ggml_tensor* b = nullptr; diff --git a/src/model.cpp b/src/model.cpp index d23b97fac..b7f9ea0c5 100644 --- a/src/model.cpp +++ b/src/model.cpp @@ -25,18 +25,6 @@ #include "name_conversion.h" #include "stable-diffusion.h" -#ifdef SD_USE_METAL -#include "ggml-metal.h" -#endif - -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - -#ifdef SD_USE_OPENCL -#include "ggml-opencl.h" -#endif - #define ST_HEADER_SIZE_LEN 8 uint64_t read_u64(uint8_t* buffer) { diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index 68af0e8e8..d4e40e877 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -95,9 +95,7 @@ namespace Qwen { float scale = 1.f / 32.f; bool force_prec_f32 = false; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif + // The purpose of the scale here is to prevent NaN issues in certain situations. // For example when using CUDA but the weights are k-quants (not all prompts). blocks["to_out.0"] = std::shared_ptr(new Linear(inner_dim, out_dim, out_bias, false, force_prec_f32, scale)); @@ -123,6 +121,11 @@ namespace Qwen { auto to_k = std::dynamic_pointer_cast(blocks["to_k"]); auto to_v = std::dynamic_pointer_cast(blocks["to_v"]); auto to_out_0 = std::dynamic_pointer_cast(blocks["to_out.0"]); +#ifdef SD_USE_VULKAN + if(ggml_backend_is_vk(ctx->backend)){ + to_out_0->set_force_prec_32(true); + } +#endif auto norm_added_q = std::dynamic_pointer_cast(blocks["norm_added_q"]); auto norm_added_k = std::dynamic_pointer_cast(blocks["norm_added_k"]); diff --git a/src/z_image.hpp b/src/z_image.hpp index 53a7cf824..75334a1ae 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -31,10 +31,6 @@ namespace ZImage { : head_dim(head_dim), num_heads(num_heads), num_kv_heads(num_kv_heads), qk_norm(qk_norm) { blocks["qkv"] = std::make_shared(hidden_size, (num_heads + num_kv_heads * 2) * head_dim, false); float scale = 1.f; -#if GGML_USE_HIP - // Prevent NaN issues with certain ROCm setups - scale = 1.f / 16.f; -#endif blocks["out"] = std::make_shared(num_heads * head_dim, hidden_size, false, false, false, scale); if (qk_norm) { blocks["q_norm"] = std::make_shared(head_dim); @@ -51,6 +47,12 @@ namespace ZImage { int64_t N = x->ne[2]; auto qkv_proj = std::dynamic_pointer_cast(blocks["qkv"]); auto out_proj = std::dynamic_pointer_cast(blocks["out"]); +#if GGML_USE_HIP + // Prevent NaN issues with certain ROCm setups + if (ggml_backend_is_cuda(ctx->backend)) { + out_proj->set_scale(1.f / 16.f); + } +#endif auto qkv = qkv_proj->forward(ctx, x); // [N, n_token, (num_heads + num_kv_heads*2)*head_dim] qkv = ggml_reshape_4d(ctx->ggml_ctx, qkv, head_dim, num_heads + num_kv_heads * 2, qkv->ne[1], qkv->ne[2]); // [N, n_token, num_heads + num_kv_heads*2, head_dim] @@ -115,9 +117,7 @@ namespace ZImage { bool force_prec_f32 = false; float scale = 1.f / 128.f; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif + // The purpose of the scale here is to prevent NaN issues in certain situations. // For example, when using CUDA but the weights are k-quants. blocks["w2"] = std::make_shared(hidden_dim, dim, false, false, force_prec_f32, scale); @@ -128,6 +128,11 @@ namespace ZImage { auto w1 = std::dynamic_pointer_cast(blocks["w1"]); auto w2 = std::dynamic_pointer_cast(blocks["w2"]); auto w3 = std::dynamic_pointer_cast(blocks["w3"]); +#ifdef SD_USE_VULKAN + if(ggml_backend_is_vk(ctx->backend)){ + w2->set_force_prec_32(true); + } +#endif auto x1 = w1->forward(ctx, x); auto x3 = w3->forward(ctx, x); From ead7116b26fd3a20bf82f0171699e00fdb5efd70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 21 Jan 2026 20:51:26 +0100 Subject: [PATCH 12/32] fix missing includes --- src/common_block.hpp | 4 ++++ src/qwen_image.hpp | 4 ++++ src/z_image.hpp | 8 ++++++++ 3 files changed, 16 insertions(+) diff --git a/src/common_block.hpp b/src/common_block.hpp index 476c9499a..deddb6165 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -3,6 +3,10 @@ #include "ggml_extend.hpp" +#ifdef SD_USE_VULKAN +#include "ggml-vulkan.h" +#endif + class DownSampleBlock : public GGMLBlock { protected: int channels; diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index d4e40e877..b4214d4d3 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -6,6 +6,10 @@ #include "common_block.hpp" #include "flux.hpp" +#ifdef SD_USE_VULKAN +#include "ggml-vulkan.h" +#endif + namespace Qwen { constexpr int QWEN_IMAGE_GRAPH_SIZE = 20480; diff --git a/src/z_image.hpp b/src/z_image.hpp index 75334a1ae..1d7f03490 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -7,6 +7,14 @@ #include "ggml_extend.hpp" #include "mmdit.hpp" +#ifdef SD_USE_VULKAN +#include "ggml-vulkan.h" +#endif + +#if GGML_USE_HIP +#include "ggml-cuda.h" +#endif + // Ref: https://github.com/Alpha-VLLM/Lumina-Image-2.0/blob/main/models/model.py // Ref: https://github.com/huggingface/diffusers/pull/12703 From 459701326c5910e4a94313a6d1d78ee6779e5654 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 23 Jan 2026 12:04:25 +0100 Subject: [PATCH 13/32] fix typo --- src/common_block.hpp | 2 +- src/ggml_extend.hpp | 2 +- src/qwen_image.hpp | 2 +- src/z_image.hpp | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/common_block.hpp b/src/common_block.hpp index deddb6165..a25fe9311 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -267,7 +267,7 @@ class FeedForward : public GGMLBlock { auto net_2 = std::dynamic_pointer_cast(blocks["net.2"]); #ifdef SD_USE_VULKAN if(ggml_backend_is_vk(ctx->backend)){ - net_2->set_force_prec_32(true); + net_2->set_force_prec_f32(true); } #endif diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 34d2ad009..5f286d3a3 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -2299,7 +2299,7 @@ class Linear : public UnaryBlock { scale = scale_; } - void set_force_prec_32(bool force_prec_f32_){ + void set_force_prec_f32(bool force_prec_f32_){ force_prec_f32 = force_prec_f32_; } diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index b4214d4d3..af3c820bb 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -127,7 +127,7 @@ namespace Qwen { auto to_out_0 = std::dynamic_pointer_cast(blocks["to_out.0"]); #ifdef SD_USE_VULKAN if(ggml_backend_is_vk(ctx->backend)){ - to_out_0->set_force_prec_32(true); + to_out_0->set_force_prec_f32(true); } #endif diff --git a/src/z_image.hpp b/src/z_image.hpp index 1d7f03490..3272ec437 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -138,7 +138,7 @@ namespace ZImage { auto w3 = std::dynamic_pointer_cast(blocks["w3"]); #ifdef SD_USE_VULKAN if(ggml_backend_is_vk(ctx->backend)){ - w2->set_force_prec_32(true); + w2->set_force_prec_f32(true); } #endif From 10763be97aa4c7552d7999f6c4dfa41c35e65ce4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Sun, 25 Jan 2026 18:31:54 +0100 Subject: [PATCH 14/32] multiple clip backend devices fix sdxl conditionner backends fix sd3 backend display --- examples/common/common.hpp | 2 +- src/conditioner.hpp | 76 ++++++++++++++++++++++++----- src/stable-diffusion.cpp | 99 +++++++++++++++++++++++++++----------- 3 files changed, 137 insertions(+), 40 deletions(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 750dabf05..44ecc6c10 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -615,7 +615,7 @@ struct SDContextParams { &photomaker_backend_device}, {"", "--vision-backend-device", - "device to use for clip-vision model (defaults to clip-backend-device)", + "device to use for clip-vision model (defaults to main-backend-device)", &vision_backend_device}, }; diff --git a/src/conditioner.hpp b/src/conditioner.hpp index 534a2f11f..b2e3d163b 100644 --- a/src/conditioner.hpp +++ b/src/conditioner.hpp @@ -2,8 +2,11 @@ #define __CONDITIONER_HPP__ #include "clip.hpp" +#include "ggml-alloc.h" +#include "ggml-backend.h" #include "llm.hpp" #include "t5.hpp" +#include "util.h" struct SDCondition { ggml_tensor* c_crossattn = nullptr; // aka context @@ -68,7 +71,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { std::vector token_embed_custom; std::map> embedding_pos_map; - FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend, + FrozenCLIPEmbedderWithCustomWords(std::vector backends, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map, const std::map& orig_embedding_map, @@ -82,13 +85,27 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { tokenizer.add_special_token(name); } bool force_clip_f32 = !embedding_map.empty(); + + ggml_backend_t clip_backend = backends[0]; + if (sd_version_is_sd1(version)) { - text_model = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, true, force_clip_f32); + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_backend)); + text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, true, force_clip_f32); } else if (sd_version_is_sd2(version)) { - text_model = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, true, force_clip_f32); + LOG_INFO("CLIP-H: using %s backend", ggml_backend_name(clip_backend)); + text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, true, force_clip_f32); } else if (sd_version_is_sdxl(version)) { - text_model = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, false, force_clip_f32); - text_model2 = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false, force_clip_f32); + ggml_backend_t clip_g_backend = clip_backend; + if (backends.size() >= 2){ + clip_g_backend = backends[1]; + if (backends.size() > 2) { + LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); + } + } + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_backend)); + LOG_INFO("CLIP-G: using %s backend", ggml_backend_name(clip_g_backend)); + text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, false, force_clip_f32); + text_model2 = std::make_shared(clip_g_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false, force_clip_f32); } } @@ -715,13 +732,29 @@ struct SD3CLIPEmbedder : public Conditioner { std::shared_ptr clip_g; std::shared_ptr t5; - SD3CLIPEmbedder(ggml_backend_t backend, + SD3CLIPEmbedder(std::vector backends, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map = {}) : clip_g_tokenizer(0) { bool use_clip_l = false; bool use_clip_g = false; bool use_t5 = false; + + ggml_backend_t clip_l_backend, clip_g_backend, t5_backend; + if (backends.size() == 1) { + clip_l_backend = clip_g_backend = t5_backend = backends[0]; + } else if (backends.size() == 2) { + clip_l_backend = clip_g_backend = backends[0]; + t5_backend = backends[1]; + } else if (backends.size() >= 3) { + clip_l_backend = backends[0]; + clip_g_backend = backends[1]; + t5_backend = backends[2]; + if (backends.size() > 3) { + LOG_WARN("More than 3 clip backends provided, but the model only supports 3 text encoders. Ignoring the rest."); + } + } + for (auto pair : tensor_storage_map) { if (pair.first.find("text_encoders.clip_l") != std::string::npos) { use_clip_l = true; @@ -736,13 +769,16 @@ struct SD3CLIPEmbedder : public Conditioner { return; } if (use_clip_l) { - clip_l = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, false); + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_l_backend)); + clip_l = std::make_shared(clip_l_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, false); } if (use_clip_g) { - clip_g = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false); + LOG_INFO("CLIP-G: using %s backend", ggml_backend_name(clip_g_backend)); + clip_g = std::make_shared(clip_g_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false); } if (use_t5) { - t5 = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); + LOG_INFO("T5-XXL: using %s backend", ggml_backend_name(t5_backend)); + t5 = std::make_shared(t5_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); } } @@ -1148,11 +1184,25 @@ struct FluxCLIPEmbedder : public Conditioner { std::shared_ptr t5; size_t chunk_len = 256; - FluxCLIPEmbedder(ggml_backend_t backend, + FluxCLIPEmbedder(std::vector backends, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map = {}) { bool use_clip_l = false; bool use_t5 = false; + + + ggml_backend_t clip_l_backend, t5_backend; + if (backends.size() == 1) { + clip_l_backend = t5_backend = backends[0]; + } else if (backends.size() >= 2) { + clip_l_backend = backends[0]; + t5_backend = backends[1]; + if (backends.size() > 2) { + LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); + } + } + + for (auto pair : tensor_storage_map) { if (pair.first.find("text_encoders.clip_l") != std::string::npos) { use_clip_l = true; @@ -1167,12 +1217,14 @@ struct FluxCLIPEmbedder : public Conditioner { } if (use_clip_l) { - clip_l = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, true); + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_l_backend)); + clip_l = std::make_shared(clip_l_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, true); } else { LOG_WARN("clip_l text encoder not found! Prompt adherence might be degraded."); } if (use_t5) { - t5 = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); + LOG_INFO("T5-XXL: using %s backend", ggml_backend_name(clip_l_backend)); + t5 = std::make_shared(t5_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); } else { LOG_WARN("t5xxl text encoder not found! Prompt adherence might be degraded."); } diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index a80bab233..2aa6a9835 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1,3 +1,4 @@ +#include "ggml-cpu.h" #include "ggml_extend.hpp" #include "model.h" @@ -5,6 +6,7 @@ #include "rng_mt19937.hpp" #include "rng_philox.hpp" #include "stable-diffusion.h" +#include #include "util.h" #include "auto_encoder_kl.hpp" @@ -516,6 +518,29 @@ void add_rpc_device(const char* servers_cstr){ add_rpc_devices(servers); } +std::vector sanitize_backend_name_list(std::string name) { + std::vector vec = {}; + if (name == "" || backend_name_exists(name)) { + // single backend + vec.push_back(name); + } else if (name.find(",") != std::string::npos) { + // comma-separated backend names + std::stringstream ss(name); + std::string token; + while (std::getline(ss, token, ',')) { + if (token == "" || backend_name_exists(token)) { + vec.push_back(token); + } else { + LOG_WARN("backend name %s not found, using default", token.c_str()); + vec.push_back(""); + } + } + } else { + vec.push_back(""); + } + return vec; +} + std::vector> list_backends_vector() { std::vector> backends; const int device_count = ggml_backend_dev_count(); @@ -569,13 +594,14 @@ class StableDiffusionGGML { public: ggml_backend_t backend = nullptr; // general backend ggml_backend_t diffusion_backend = nullptr; - ggml_backend_t clip_backend = nullptr; ggml_backend_t control_net_backend = nullptr; ggml_backend_t vae_backend = nullptr; ggml_backend_t tae_backend = nullptr; ggml_backend_t pmid_backend = nullptr; ggml_backend_t vision_backend = nullptr; + std::vector clip_backends = {nullptr}; + SDVersion version; bool vae_decode_only = false; bool external_vae_is_invalid = false; @@ -625,8 +651,10 @@ class StableDiffusionGGML { if (diffusion_backend != backend) { ggml_backend_free(diffusion_backend); } - if (clip_backend != backend) { - ggml_backend_free(clip_backend); + for(auto clip_backend : clip_backends) { + if (clip_backend != backend) { + ggml_backend_free(clip_backend); + } } if (control_net_backend != backend) { ggml_backend_free(control_net_backend); @@ -688,7 +716,7 @@ class StableDiffusionGGML { } std::string diffusion_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->diffusion_device)); - std::string clip_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->clip_device)); + std::vector clip_backend_names = sanitize_backend_name_list(SAFE_STR(sd_ctx_params->clip_device)); std::string control_net_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->control_net_device)); std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); @@ -696,17 +724,22 @@ class StableDiffusionGGML { std::string vision_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vision_device)); bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; - bool clip_backend_is_default = (clip_backend_name.empty() || clip_backend_name == default_backend_name); + bool clip_backends_are_default = true; + for (const auto& clip_backend_name : clip_backend_names) { + if (!clip_backend_name.empty() && clip_backend_name != default_backend_name) { + clip_backends_are_default = false; + break; + } + } bool control_net_backend_is_default = (control_net_backend_name.empty() || control_net_backend_name == default_backend_name); bool vae_backend_is_default = (vae_backend_name.empty() || vae_backend_name == default_backend_name); // if tae_backend_name is empty, it will use the same backend as vae bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; bool pmid_backend_is_default = (pmid_backend_name.empty() || pmid_backend_name == default_backend_name); - // if vision_backend_name is empty, it will use the same backend as clip - bool vision_backend_is_default = (vision_backend_name.empty() && clip_backend_is_default) || vision_backend_name == default_backend_name; + bool vision_backend_is_default = (vision_backend_name.empty() || vision_backend_name == default_backend_name); // if some backend is not specified or is the same as the default backend, use the default backend - bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default || vision_backend_is_default; + bool use_default_backend = diffusion_backend_is_default || clip_backends_are_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default || vision_backend_is_default; if (use_default_backend) { backend = init_named_backend(override_default_backend_name); @@ -890,13 +923,18 @@ class StableDiffusionGGML { } { - clip_backend = backend; - if (!clip_backend_is_default) { - clip_backend = init_named_backend(clip_backend_name); - LOG_INFO("CLIP: Using %s backend", ggml_backend_name(clip_backend)); + if (!clip_backends_are_default) { + clip_backends.clear(); + for(auto clip_backend_name : clip_backend_names){ + auto clip_backend = init_named_backend(clip_backend_name); + LOG_INFO("CLIP: Using %s backend", ggml_backend_name(clip_backend)); + clip_backends.push_back(clip_backend); + } + }else{ + clip_backends = {backend}; } if (sd_version_is_sd3(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map); diffusion_model = std::make_shared(diffusion_backend, @@ -920,20 +958,20 @@ class StableDiffusionGGML { "--chroma-disable-dit-mask as a workaround."); } - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, sd_ctx_params->chroma_use_t5_mask, sd_ctx_params->chroma_t5_mask_pad); } else if (version == VERSION_OVIS_IMAGE) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version, "", false); } else { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map); } @@ -944,7 +982,7 @@ class StableDiffusionGGML { sd_ctx_params->chroma_use_dit_mask); } else if (sd_version_is_flux2(version)) { bool is_chroma = false; - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version); @@ -954,7 +992,7 @@ class StableDiffusionGGML { version, sd_ctx_params->chroma_use_dit_mask); } else if (sd_version_is_wan(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, true, @@ -986,7 +1024,7 @@ class StableDiffusionGGML { if (!vae_decode_only) { enable_vision = true; } - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version, @@ -1007,7 +1045,7 @@ class StableDiffusionGGML { tensor_storage_map, "model.diffusion_model"); } else if (sd_version_is_z_image(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version); @@ -1022,14 +1060,14 @@ class StableDiffusionGGML { embbeding_map.emplace(SAFE_STR(sd_ctx_params->embeddings[i].name), SAFE_STR(sd_ctx_params->embeddings[i].path)); } if (strstr(SAFE_STR(sd_ctx_params->photo_maker_path), "v2")) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map, embbeding_map, version, PM_VERSION_2); } else { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map, embbeding_map, @@ -1327,7 +1365,9 @@ class StableDiffusionGGML { size_t total_params_ram_size = 0; size_t total_params_vram_size = 0; - if (ggml_backend_is_cpu(clip_backend)) { + + // TODO: split by individual text encoders + if (ggml_backend_is_cpu(clip_backends[0])) { total_params_ram_size += clip_params_mem_size + pmid_params_mem_size; } else { total_params_vram_size += clip_params_mem_size + pmid_params_mem_size; @@ -1359,7 +1399,8 @@ class StableDiffusionGGML { total_params_vram_size / 1024.0 / 1024.0, total_params_ram_size / 1024.0 / 1024.0, clip_params_mem_size / 1024.0 / 1024.0, - ggml_backend_is_cpu(clip_backend) ? "RAM" : "VRAM", + // TODO: split + ggml_backend_is_cpu(clip_backends[0]) ? "RAM" : "VRAM", unet_params_mem_size / 1024.0 / 1024.0, ggml_backend_is_cpu(backend) ? "RAM" : "VRAM", vae_params_mem_size / 1024.0 / 1024.0, @@ -1559,7 +1600,11 @@ class StableDiffusionGGML { for (auto& kv : lora_state_diff) { int64_t t0 = ggml_time_ms(); // TODO: Fix that - if(diffusion_backend!=clip_backend && !ggml_backend_is_cpu(clip_backend)){ + bool are_clip_backends_compatible = true; + for (auto backend: clip_backends){ + are_clip_backends_compatible = are_clip_backends_compatible && (diffusion_backend==backend || ggml_backend_is_cpu(backend)); + } + if(!are_clip_backends_compatible){ LOG_WARN("Diffusion models and text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); } auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend); @@ -1619,8 +1664,8 @@ class StableDiffusionGGML { for (auto& kv : lora_state_diff) { const std::string& lora_id = kv.first; float multiplier = kv.second; - - auto lora = load_lora_model_from_file(lora_id, multiplier, clip_backend, lora_tensor_filter); + //TODO: split by model + auto lora = load_lora_model_from_file(lora_id, multiplier, clip_backends[0], lora_tensor_filter); if (lora && !lora->lora_tensors.empty()) { lora->preprocess_lora_tensors(tensors); cond_stage_lora_models.push_back(lora); From f3076ea397160ef191a93fb767d614670912f63f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 28 Jan 2026 18:48:03 +0100 Subject: [PATCH 15/32] update help message --- examples/common/common.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 44ecc6c10..66a6e6719 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -591,7 +591,7 @@ struct SDContextParams { &diffusion_backend_device}, {"", "--clip-backend-device", - "device to use for clip (defaults to main-backend-device)", + "device to use for clip (defaults to main-backend-device). Can be a comma-separated list of devices for models with multiple encoders", &clip_backend_device}, {"", "--vae-backend-device", From a97a97832e37f32d95e683658671c570c3e730ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 28 Jan 2026 20:44:09 +0100 Subject: [PATCH 16/32] Add RPC documentation --- docs/rpc.md | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 docs/rpc.md diff --git a/docs/rpc.md b/docs/rpc.md new file mode 100644 index 000000000..0a9b7ccd2 --- /dev/null +++ b/docs/rpc.md @@ -0,0 +1,202 @@ +# Building and Using the RPC Server with `stable-diffusion.cpp` + +This guide covers how to build a version of the RPC server from `llama.cpp` that is compatible with your version of `stable-diffusion.cpp` to manage multi-backends setups. RPC allows you to offload specific model components to a remote server. + +> **Note on Model Location:** The model files (e.g., `.safetensors` or `.gguf`) remain on the **Client** machine. The client parses the file and transmits the necessary tensor data and computational graphs to the server. The server does not need to store the model files locally. + +## 1. Building `stable-diffusion.cpp` with RPC client + +First, you should build the client application from source. It requires `GGML_RPC=ON` to include the RPC backend to your client. +```bash +mkdir build +cd build +cmake .. \ + -DGGML_RPC=ON \ + # Add other build flags here (e.g., -DSD_VULKAN=ON) +cmake --build . --config Release -j $(nproc) +``` + +> **Note:** Ensure you add the other flags you would normally use (e.g., `-DSD_VULKAN=ON`, `-DSD_CUDA=ON`, `-DSD_HIPBLAS=ON`, or `-DGGML_METAL=ON`), for more information about building `stable-diffusion.cpp` from source, please refer to the `build.md` documentation. + +## 2. Ensure `llama.cpp` is at the correct commit + +`stable-diffusion.cpp`'s RPC client is designed to work with a specific version of `llama.cpp` (compatible with the `ggml` submodule) to ensure API compatibility. The commit hash for `llama.cpp` is stored in `ggml/scripts/sync-llama.last`. + +> **Start from Root:** Perform these steps from the root of your `stable-diffusion.cpp` directory. + +1. Read the target commit hash from the submodule tracker: + ```bash + # Linux / WSL / MacOS + HASH=$(cat ggml/scripts/sync-llama.last) + + # Windows (PowerShell) + $HASH = Get-Content -Path "ggml\scripts\sync-llama.last" + ``` + +2. Clone `llama.cpp` at the target commit . + ```bash + git clone https://github.com/ggml-org/llama.cpp.git + cd llama.cpp + git checkout $HASH + ``` + +To save on download time and storage, you can use a shallow clone to download only the target commit: + ```bash + mkdir -p llama.cpp + cd llama.cpp + git init + git remote add origin https://github.com/ggml-org/llama.cpp.git + git fetch --depth 1 origin $HASH + git checkout FETCH_HEAD + ``` + +## 3. Build `llama.cpp` (RPC Server) + +The RPC server acts as the worker. You must explicitly enable the **backend** (the hardware interface, such as CUDA for Nvidia, Metal for Apple Silicon, or Vulkan) when building, otherwise the server will default to using only the CPU. + +To find the correct flags, refer to the official documentation for the `llama.cpp` repository. + +> **Crucial:** You must include the compiler flags required to satisfy the API compatibility with `stable-diffusion.cpp` (`-DGGML_MAX_NAME=128`). Without this flag, `GGML_MAX_NAME` will default to `64` for the server, and data transfers between the client and server will fail. Of course, `-DGGML_RPC` must also be enabled. +> +> I recommend disabling the `LLAMA_CURL` flag to avoid unnecessary dependencies, and disabling shared library builds to avoid potential conflicts. + +> **Build Target:** We are specifically building the `rpc-server` target. This prevents the build system from compiling the entire `llama.cpp` suite (like `llama-cli`), making the build significantly faster. + +### Linux / WSL (Vulkan) +```bash +mkdir build +cd build +cmake .. -DGGML_RPC=ON \ + -DGGML_VULKAN=ON \ # Ensure backend is enabled + -DGGML_BUILD_SHARED_LIBS=OFF \ + -DLLAMA_CURL=OFF \ + -DCMAKE_C_FLAGS=-DGGML_MAX_NAME=128 \ + -DCMAKE_CXX_FLAGS=-DGGML_MAX_NAME=128 +cmake --build . --config Release --target rpc-server -j $(nproc) +``` + +### macOS (Metal) +```bash +mkdir build +cd build +cmake .. -DGGML_RPC=ON \ + -DGGML_METAL=ON \ + -DGGML_BUILD_SHARED_LIBS=OFF \ + -DLLAMA_CURL=OFF \ + -DCMAKE_C_FLAGS=-DGGML_MAX_NAME=128 \ + -DCMAKE_CXX_FLAGS=-DGGML_MAX_NAME=128 +cmake --build . --config Release --target rpc-server +``` + +### Windows (Visual Studio 2022, Vulkan) +```powershell +mkdir build +cd build +cmake .. -G "Visual Studio 17 2022" -A x64 ` + -DGGML_RPC=ON ` + -DGGML_VULKAN=ON ` + -DGGML_BUILD_SHARED_LIBS=OFF ` + -DLLAMA_CURL=OFF ` + -DCMAKE_C_FLAGS=-DGGML_MAX_NAME=128 ` + -DCMAKE_CXX_FLAGS=-DGGML_MAX_NAME=128 +cmake --build . --config Release --target rpc-server +``` + +## 4. Usage + +Once both applications are built, you can run the server and the client to manage your GPU allocation. + +### Step A: Run the RPC Server + +Start the server. It listens for connections on the default address (usually `localhost:50052`). If your server is on a different machine, ensure the server binds to the correct interface and your firewall allows the connection. + +**On the Server :** +If running on the same machine, you can use the default address: +```bash +./rpc-server +``` +If you want to allow connections from other machines on the network: +```bash +./rpc-server --host 0.0.0.0 +``` + +> **Security Warning:** The RPC server does not currently support authentication or encryption. **Only run the server on trusted local networks**. Never expose the RPC server directly to the open internet. + +> **Drivers & Hardware:** Ensure the Server machine has the necessary drivers installed and functional (e.g., Nvidia Drivers for CUDA, Vulkan SDK, or Metal). If no devices are found, the server will simply fallback to CPU usage. + +### Step B: Check if the client is able to connect to the server and see the available devices + +We're assuming the server is running on your local machine, and listening on the default port `50052`. If it's running on a different machine, you can replace `localhost` with the IP address of the server. + +**On the Client:** +```bash +./sd-cli --rpc localhost:50052 --list-devices +``` +If the server is running and the client is able to connect, you should see `RPC0 localhost:50052` in the list of devices. + +Example output: +(Client built without GPU acceleration, two GPUs available on the server) +``` +List of available GGML devices: +Name Description +------------------- +CPU AMD Ryzen 9 5900X 12-Core Processor +RPC0 localhost:50052 +RPC1 localhost:50052 +``` + +### Step C: Run with RPC device + +If everything is working correctly, you can now run the client while offloading some or all of the work to the RPC server. + +Example: Setting the main backend to the RPC0 device for doing all the work on the server. + +```bash +./sd-cli -m models/sd1.5.safetensors -p "A cat" --rpc localhost:50052 --main-backend-device RPC0 +``` + +--- + +## 5. Scaling: Multiple RPC Servers + +You can connect the client to multiple RPC servers simultaneously to scale out your hardware usage. + +Example: A main machine (192.168.1.10) with 3 GPUs, with one GPU running CUDA and the other two running Vulkan, and a second machine (192.168.1.11) only one GPU. + +**On the first machine (Running two server instances):** + +**Terminal 1 (CUDA):** +```bash +# Linux / macOS / WSL +export CUDA_VISIBLE_DEVICES=0 +./rpc-server-cuda --host 0.0.0.0 + +# Windows PowerShell +$env:CUDA_VISIBLE_DEVICES="0" +./rpc-server-cuda --host 0.0.0.0 +``` + +**Terminal 2 (Vulkan):** +```bash +./rpc-server-vulkan --host 0.0.0.0 --port 50053 -d Vulkan1,Vulkan2 +``` + +**On the second machine:** +```bash +./rpc-server --host 0.0.0.0 +``` + +**On the Client:** +Pass multiple server addresses separated by commas. + +```bash +./sd-cli --rpc 192.168.1.10:50052,192.168.1.10:50053,192.168.1.11:50052 --list-devices +``` + +The client will map these servers to sequential device IDs (e.g., RPC0 from the first server, RPC2, RPC3 from the second, and RPC4 from the third). With this setup, you could for example use RPC0 for the main backend, RPC1 and RPC2 for the text encoders, and RPC3 for the VAE. + +--- + +## 6. Performance Considerations + +RPC performance is heavily dependent on network bandwidth, as large weights and activations must be transferred back and forth over the network, especially for large models, or when using high resolutions. For best results, ensure your network connection is stable and has sufficient bandwidth (>1Gbps recommended). \ No newline at end of file From 20e6ba47640348439e37e1249ac799146b57f29a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 28 Jan 2026 20:44:13 +0100 Subject: [PATCH 17/32] update docs --- examples/cli/README.md | 16 +++++++++++++--- examples/server/README.md | 14 +++++++++++--- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/examples/cli/README.md b/examples/cli/README.md index 904f3c441..38565cf9b 100644 --- a/examples/cli/README.md +++ b/examples/cli/README.md @@ -19,6 +19,8 @@ CLI Options: -M, --mode run mode, one of [img_gen, vid_gen, upscale, convert], default: img_gen --preview preview method. must be one of the following [none, proj, tae, vae] (default is none) -h, --help show this help message and exit + --rpc add a rpc device + --list-devices list available ggml compute devices Context Options: -m, --model path to full model @@ -41,6 +43,17 @@ Context Options: --tensor-type-rules weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0") --photo-maker path to PHOTOMAKER model --upscale-model path to esrgan model. + --main-backend-device default device to use for all backends (defaults to main gpu device if hardware acceleration is available, otherwise + cpu) + --diffusion-backend-device device to use for diffusion (defaults to main-backend-device) + --clip-backend-device device to use for clip (defaults to main-backend-device). Can be a comma-separated list of devices for models with + multiple encoders + --vae-backend-device device to use for vae (defaults to main-backend-device). Also applies to tae, unless tae-backend-device is specified + --tae-backend-device device to use for tae (defaults to vae-backend-device) + --control-net-backend-device device to use for control net (defaults to main-backend-device) + --upscaler-backend-device device to use for upscaling models (defaults to main-backend-device) + --photomaker-backend-device device to use for photomaker (defaults to main-backend-device) + --vision-backend-device device to use for clip-vision model (defaults to main-backend-device) -t, --threads number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma @@ -49,9 +62,6 @@ Context Options: --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed --mmap whether to memory-map model - --control-net-cpu keep controlnet in cpu (for low vram) - --clip-on-cpu keep clip in cpu (for low vram) - --vae-on-cpu keep vae in cpu (for low vram) --fa use flash attention --diffusion-fa use flash attention in the diffusion model only --diffusion-conv-direct use ggml_conv2d_direct in the diffusion model diff --git a/examples/server/README.md b/examples/server/README.md index 8aa2158f5..5a44e6c4b 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -121,6 +121,17 @@ Context Options: --tensor-type-rules weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0") --photo-maker path to PHOTOMAKER model --upscale-model path to esrgan model. + --main-backend-device default device to use for all backends (defaults to main gpu device if hardware acceleration is available, otherwise + cpu) + --diffusion-backend-device device to use for diffusion (defaults to main-backend-device) + --clip-backend-device device to use for clip (defaults to main-backend-device). Can be a comma-separated list of devices for models with + multiple encoders + --vae-backend-device device to use for vae (defaults to main-backend-device). Also applies to tae, unless tae-backend-device is specified + --tae-backend-device device to use for tae (defaults to vae-backend-device) + --control-net-backend-device device to use for control net (defaults to main-backend-device) + --upscaler-backend-device device to use for upscaling models (defaults to main-backend-device) + --photomaker-backend-device device to use for photomaker (defaults to main-backend-device) + --vision-backend-device device to use for clip-vision model (defaults to main-backend-device) -t, --threads number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma @@ -129,9 +140,6 @@ Context Options: --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed --mmap whether to memory-map model - --control-net-cpu keep controlnet in cpu (for low vram) - --clip-on-cpu keep clip in cpu (for low vram) - --vae-on-cpu keep vae in cpu (for low vram) --fa use flash attention --diffusion-fa use flash attention in the diffusion model only --diffusion-conv-direct use ggml_conv2d_direct in the diffusion model From 970304eb89aff58de5431ee4df6bf0cf9fb3ec45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 28 Jan 2026 21:06:02 +0100 Subject: [PATCH 18/32] update RPC docs --- docs/rpc.md | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/docs/rpc.md b/docs/rpc.md index 0a9b7ccd2..44485478c 100644 --- a/docs/rpc.md +++ b/docs/rpc.md @@ -1,12 +1,13 @@ # Building and Using the RPC Server with `stable-diffusion.cpp` -This guide covers how to build a version of the RPC server from `llama.cpp` that is compatible with your version of `stable-diffusion.cpp` to manage multi-backends setups. RPC allows you to offload specific model components to a remote server. +This guide covers how to build a version of [the RPC server from `llama.cpp`](https://github.com/ggml-org/llama.cpp/blob/master/tools/rpc/README.md) that is compatible with your version of `stable-diffusion.cpp` to manage multi-backends setups. RPC allows you to offload specific model components to a remote server. > **Note on Model Location:** The model files (e.g., `.safetensors` or `.gguf`) remain on the **Client** machine. The client parses the file and transmits the necessary tensor data and computational graphs to the server. The server does not need to store the model files locally. ## 1. Building `stable-diffusion.cpp` with RPC client First, you should build the client application from source. It requires `GGML_RPC=ON` to include the RPC backend to your client. + ```bash mkdir build cd build @@ -16,7 +17,7 @@ cmake .. \ cmake --build . --config Release -j $(nproc) ``` -> **Note:** Ensure you add the other flags you would normally use (e.g., `-DSD_VULKAN=ON`, `-DSD_CUDA=ON`, `-DSD_HIPBLAS=ON`, or `-DGGML_METAL=ON`), for more information about building `stable-diffusion.cpp` from source, please refer to the `build.md` documentation. +> **Note:** Ensure you add the other flags you would normally use (e.g., `-DSD_VULKAN=ON`, `-DSD_CUDA=ON`, `-DSD_HIPBLAS=ON`, or `-DGGML_METAL=ON`), for more information about building `stable-diffusion.cpp` from source, please refer to the [build.md](build.md) documentation. ## 2. Ensure `llama.cpp` is at the correct commit @@ -25,6 +26,7 @@ cmake --build . --config Release -j $(nproc) > **Start from Root:** Perform these steps from the root of your `stable-diffusion.cpp` directory. 1. Read the target commit hash from the submodule tracker: + ```bash # Linux / WSL / MacOS HASH=$(cat ggml/scripts/sync-llama.last) @@ -39,8 +41,7 @@ cmake --build . --config Release -j $(nproc) cd llama.cpp git checkout $HASH ``` - -To save on download time and storage, you can use a shallow clone to download only the target commit: + To save on download time and storage, you can use a shallow clone to download only the target commit: ```bash mkdir -p llama.cpp cd llama.cpp @@ -54,15 +55,16 @@ To save on download time and storage, you can use a shallow clone to download on The RPC server acts as the worker. You must explicitly enable the **backend** (the hardware interface, such as CUDA for Nvidia, Metal for Apple Silicon, or Vulkan) when building, otherwise the server will default to using only the CPU. -To find the correct flags, refer to the official documentation for the `llama.cpp` repository. +To find the correct flags for your system, refer to the official documentation for the [`llama.cpp`](https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md) repository. > **Crucial:** You must include the compiler flags required to satisfy the API compatibility with `stable-diffusion.cpp` (`-DGGML_MAX_NAME=128`). Without this flag, `GGML_MAX_NAME` will default to `64` for the server, and data transfers between the client and server will fail. Of course, `-DGGML_RPC` must also be enabled. > > I recommend disabling the `LLAMA_CURL` flag to avoid unnecessary dependencies, and disabling shared library builds to avoid potential conflicts. -> **Build Target:** We are specifically building the `rpc-server` target. This prevents the build system from compiling the entire `llama.cpp` suite (like `llama-cli`), making the build significantly faster. +> **Build Target:** We are specifically building the `rpc-server` target. This prevents the build system from compiling the entire `llama.cpp` suite (like `llama-server`), making the build significantly faster. ### Linux / WSL (Vulkan) + ```bash mkdir build cd build @@ -76,6 +78,7 @@ cmake --build . --config Release --target rpc-server -j $(nproc) ``` ### macOS (Metal) + ```bash mkdir build cd build @@ -89,6 +92,7 @@ cmake --build . --config Release --target rpc-server ``` ### Windows (Visual Studio 2022, Vulkan) + ```powershell mkdir build cd build @@ -112,10 +116,13 @@ Start the server. It listens for connections on the default address (usually `lo **On the Server :** If running on the same machine, you can use the default address: + ```bash ./rpc-server ``` + If you want to allow connections from other machines on the network: + ```bash ./rpc-server --host 0.0.0.0 ``` @@ -129,13 +136,16 @@ If you want to allow connections from other machines on the network: We're assuming the server is running on your local machine, and listening on the default port `50052`. If it's running on a different machine, you can replace `localhost` with the IP address of the server. **On the Client:** + ```bash ./sd-cli --rpc localhost:50052 --list-devices ``` + If the server is running and the client is able to connect, you should see `RPC0 localhost:50052` in the list of devices. -Example output: +Example output: (Client built without GPU acceleration, two GPUs available on the server) + ``` List of available GGML devices: Name Description @@ -166,23 +176,31 @@ Example: A main machine (192.168.1.10) with 3 GPUs, with one GPU running CUDA an **On the first machine (Running two server instances):** **Terminal 1 (CUDA):** + ```bash -# Linux / macOS / WSL +# Linux / WSL export CUDA_VISIBLE_DEVICES=0 -./rpc-server-cuda --host 0.0.0.0 +cd ./build_cuda/bin/Release +./rpc-server --host 0.0.0.0 # Windows PowerShell $env:CUDA_VISIBLE_DEVICES="0" -./rpc-server-cuda --host 0.0.0.0 +cd .\build_cuda\bin\Release +./rpc-server --host 0.0.0.0 ``` **Terminal 2 (Vulkan):** + ```bash -./rpc-server-vulkan --host 0.0.0.0 --port 50053 -d Vulkan1,Vulkan2 +cd ./build_vulkan/bin/Release +# ignore the first GPU (used by CUDA server) +./rpc-server --host 0.0.0.0 --port 50053 -d Vulkan1,Vulkan2 ``` **On the second machine:** + ```bash +cd ./build/bin/Release ./rpc-server --host 0.0.0.0 ``` @@ -199,4 +217,4 @@ The client will map these servers to sequential device IDs (e.g., RPC0 from the ## 6. Performance Considerations -RPC performance is heavily dependent on network bandwidth, as large weights and activations must be transferred back and forth over the network, especially for large models, or when using high resolutions. For best results, ensure your network connection is stable and has sufficient bandwidth (>1Gbps recommended). \ No newline at end of file +RPC performance is heavily dependent on network bandwidth, as large weights and activations must be transferred back and forth over the network, especially for large models, or when using high resolutions. For best results, ensure your network connection is stable and has sufficient bandwidth (>1Gbps recommended). From a7925797ee12b7967505cb232f0d85cc4effa4b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Thu, 29 Jan 2026 12:34:32 +0100 Subject: [PATCH 19/32] fix apply_loras_immediately when using different non-CPU backends --- src/stable-diffusion.cpp | 56 +++++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 9 deletions(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 2aa6a9835..9f95ddf27 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1598,22 +1598,60 @@ class StableDiffusionGGML { } for (auto& kv : lora_state_diff) { + bool applied = false; int64_t t0 = ggml_time_ms(); // TODO: Fix that - bool are_clip_backends_compatible = true; + bool are_clip_backends_similar = true; for (auto backend: clip_backends){ - are_clip_backends_compatible = are_clip_backends_compatible && (diffusion_backend==backend || ggml_backend_is_cpu(backend)); + are_clip_backends_similar = are_clip_backends_similar && (clip_backends[0]==backend || ggml_backend_is_cpu(backend)); } - if(!are_clip_backends_compatible){ - LOG_WARN("Diffusion models and text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); + if(!are_clip_backends_similar){ + LOG_WARN("Text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); } - auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend); - if (!lora || lora->lora_tensors.empty()) { - continue; + auto lora_tensor_filter_diff = [&](const std::string& tensor_name) { + if (is_diffusion_model_name(tensor_name)) { + return true; + } + return false; + }; + auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend, lora_tensor_filter_diff); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; + } + + auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { + if (is_cond_stage_model_name(tensor_name)) { + return true; + } + return false; + }; + // TODO: split by model + lora = load_lora_model_from_file(kv.first, kv.second, clip_backends[0], lora_tensor_filter_cond); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; } - lora->apply(tensors, version, n_threads); - lora->free_params_buffer(); + auto lora_tensor_filter_first = [&](const std::string& tensor_name) { + if (is_first_stage_model_name(tensor_name)) { + return true; + } + return false; + }; + auto first_stage_backend = use_tiny_autoencoder ? tae_backend : vae_backend; + lora = load_lora_model_from_file(kv.first, kv.second, first_stage_backend, lora_tensor_filter_first); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; + } + + if (!applied) { + continue; + } int64_t t1 = ggml_time_ms(); LOG_INFO("lora '%s' applied, taking %.2fs", kv.first.c_str(), (t1 - t0) * 1.0f / 1000); From 9e3229ba60eaaaed84d58ccefcdfa4c244d1bc51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Mon, 16 Feb 2026 15:19:51 +0100 Subject: [PATCH 20/32] Force sequencial tensor loading when using RPC --- CMakeLists.txt | 6 ++++++ docs/rpc.md | 4 ++-- src/stable-diffusion.cpp | 45 ++++++++++++++++++++++++++++++++++++---- 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bad1ba4c2..afc756ad7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -77,6 +77,12 @@ if(SD_MUSA) add_definitions(-DSD_USE_CUDA) endif() +if (SD_RPC) + message("-- Use RPC as backend stable-diffusion") + set(GGML_RPC ON) + add_definitions(-DSD_USE_RPC) +endif () + set(SD_LIB stable-diffusion) file(GLOB SD_LIB_SOURCES diff --git a/docs/rpc.md b/docs/rpc.md index 44485478c..2f4e92282 100644 --- a/docs/rpc.md +++ b/docs/rpc.md @@ -6,13 +6,13 @@ This guide covers how to build a version of [the RPC server from `llama.cpp`](ht ## 1. Building `stable-diffusion.cpp` with RPC client -First, you should build the client application from source. It requires `GGML_RPC=ON` to include the RPC backend to your client. +First, you should build the client application from source. It requires `SD_RPC=ON` to include the RPC backend to your client. ```bash mkdir build cd build cmake .. \ - -DGGML_RPC=ON \ + -DSD_RPC=ON \ # Add other build flags here (e.g., -DSD_VULKAN=ON) cmake --build . --config Release -j $(nproc) ``` diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 9f95ddf27..6245419fc 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -27,6 +27,10 @@ #include "latent-preview.h" #include "name_conversion.h" +#if SD_USE_RPC +#include "ggml-rpc.h" +#endif + const char* model_version_to_str[] = { "SD 1.x", "SD 1.x Inpaint", @@ -1239,7 +1243,13 @@ class StableDiffusionGGML { } return false; }; - if (!pmid_lora->load_from_file(n_threads, lora_tensor_filter)) { + int n_th = n_threads; +#ifdef SD_USE_RPC + if (ggml_backend_is_rpc(diffusion_backend)) { + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + if (!pmid_lora->load_from_file(n_th, lora_tensor_filter)) { LOG_WARN("load photomaker lora tensors from %s failed", sd_ctx_params->photo_maker_path); return false; } @@ -1331,7 +1341,22 @@ class StableDiffusionGGML { if (version == VERSION_SVD) { ignore_tensors.insert("conditioner.embedders.3"); } - bool success = model_loader.load_tensors(tensors, ignore_tensors, n_threads, sd_ctx_params->enable_mmap); + int n_th = n_threads; +#ifdef SD_USE_RPC + // TODO: maybe set it to 1 threads only for model parts that are on remote? + bool is_any_clip_rpc = false; + for (auto& backend : clip_backends) { + if (ggml_backend_is_rpc(backend)) { + is_any_clip_rpc = true; + } + } + // I think those are all the backends that should get sent data to when calling model_loader.load_tensors() + if (is_any_clip_rpc || ggml_backend_is_rpc(diffusion_backend) || ggml_backend_is_rpc(vae_backend) || ggml_backend_is_rpc(vision_backend) || ggml_backend_is_rpc(pmid_backend)) { + LOG_DEBUG("Using single-thread for tensor loading because RPC backend is used"); + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + bool success = model_loader.load_tensors(tensors, ignore_tensors, n_th, sd_ctx_params->enable_mmap); if (!success) { LOG_ERROR("load tensors from model loader failed"); ggml_free(ctx); @@ -1353,7 +1378,13 @@ class StableDiffusionGGML { } size_t control_net_params_mem_size = 0; if (control_net) { - if (!control_net->load_from_file(SAFE_STR(sd_ctx_params->control_net_path), n_threads)) { + int n_th = n_threads; +#ifdef SD_USE_RPC + if (ggml_backend_is_rpc(control_net_backend)) { + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + if (!control_net->load_from_file(SAFE_STR(sd_ctx_params->control_net_path), n_th)) { return false; } control_net_params_mem_size = control_net->get_params_buffer_size(); @@ -1562,7 +1593,13 @@ class StableDiffusionGGML { LOG_DEBUG("high noise lora: %s", lora_path.c_str()); } auto lora = std::make_shared(lora_id, backend, lora_path, is_high_noise ? "model.high_noise_" : "", version); - if (!lora->load_from_file(n_threads, lora_tensor_filter)) { + int n_th = n_threads; +#ifdef SD_USE_RPC + if (ggml_backend_is_rpc(backend)) { + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + if (!lora->load_from_file(n_th, lora_tensor_filter)) { LOG_WARN("load lora tensors from %s failed", lora_path.c_str()); return nullptr; } From 61ed0239d486ff022aab82db30f64ddcbdd4c8e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Sun, 1 Mar 2026 20:49:07 +0100 Subject: [PATCH 21/32] fix build --- src/stable-diffusion.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 6245419fc..0a043b10d 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1041,7 +1041,7 @@ class StableDiffusionGGML { version, sd_ctx_params->qwen_image_zero_cond_t); } else if (sd_version_is_anima(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map); diffusion_model = std::make_shared(backend, From 8fe2ccc979ee95ff21ef17e2106231056963a7ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Thu, 19 Mar 2026 14:30:39 +0100 Subject: [PATCH 22/32] Get first stage backend for loading loras --- src/ggml_extend.hpp | 8 ++++++++ src/stable-diffusion.cpp | 6 +++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 5f286d3a3..2a1775f5e 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -2151,6 +2151,14 @@ struct GGMLRunner { void set_weight_adapter(const std::shared_ptr& adapter) { weight_adapter = adapter; } + + ggml_backend_t get_runtime_backend() { + return runtime_backend; + } + + ggml_backend_t get_params_backend() { + return params_backend; + } }; class GGMLBlock { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 0a043b10d..7387797c6 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1651,6 +1651,8 @@ class StableDiffusionGGML { } return false; }; + + LOG_INFO("applying lora to diffusion model"); auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend, lora_tensor_filter_diff); if (lora && !lora->lora_tensors.empty()) { lora->apply(tensors, version, n_threads); @@ -1665,6 +1667,7 @@ class StableDiffusionGGML { return false; }; // TODO: split by model + LOG_INFO("applying lora to text encoders"); lora = load_lora_model_from_file(kv.first, kv.second, clip_backends[0], lora_tensor_filter_cond); if (lora && !lora->lora_tensors.empty()) { lora->apply(tensors, version, n_threads); @@ -1678,7 +1681,8 @@ class StableDiffusionGGML { } return false; }; - auto first_stage_backend = use_tiny_autoencoder ? tae_backend : vae_backend; + LOG_INFO("applying lora to first stage model"); + auto first_stage_backend = first_stage_model->get_params_backend(); lora = load_lora_model_from_file(kv.first, kv.second, first_stage_backend, lora_tensor_filter_first); if (lora && !lora->lora_tensors.empty()) { lora->apply(tensors, version, n_threads); From ae5b8ab4508953f195f8ca125be92cb9bd3175ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Thu, 19 Mar 2026 15:27:34 +0100 Subject: [PATCH 23/32] Fix lora loading when using multiple clip backends --- src/conditioner.hpp | 171 ++++++++++++++++++++++++++++++++++++--- src/stable-diffusion.cpp | 68 ++++++++-------- 2 files changed, 196 insertions(+), 43 deletions(-) diff --git a/src/conditioner.hpp b/src/conditioner.hpp index b2e3d163b..2b234b252 100644 --- a/src/conditioner.hpp +++ b/src/conditioner.hpp @@ -35,6 +35,7 @@ struct ConditionerParams { }; struct Conditioner { + int model_count = 1; virtual SDCondition get_learned_condition(ggml_context* work_ctx, int n_threads, const ConditionerParams& conditioner_params) = 0; @@ -53,6 +54,11 @@ struct Conditioner { const std::string& prompt) { GGML_ABORT("Not implemented yet!"); } + virtual bool is_cond_stage_model_name_at_index(const std::string& name, int index) { + return true; + } + virtual ggml_backend_t get_params_backend_at_index(int index) = 0; + virtual ggml_backend_t get_runtime_backend_at_index(int index) = 0; }; // ldm.modules.encoders.modules.FrozenCLIPEmbedder @@ -95,8 +101,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { LOG_INFO("CLIP-H: using %s backend", ggml_backend_name(clip_backend)); text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, true, force_clip_f32); } else if (sd_version_is_sdxl(version)) { + model_count = 2; ggml_backend_t clip_g_backend = clip_backend; - if (backends.size() >= 2){ + if (backends.size() >= 2) { clip_g_backend = backends[1]; if (backends.size() > 2) { LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); @@ -665,6 +672,42 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { conditioner_params.adm_in_channels, conditioner_params.zero_out_masked); } + + bool is_cond_stage_model_name_at_index(const std::string& name, int index) override { + if (sd_version_is_sdxl(version)) { + if (index == 0) { + return contains(name, "cond_stage_model.model.transformer"); + } else if (index == 1) { + return contains(name, "cond_stage_model.model.1"); + } else { + return false; + } + } + return true; + } + + ggml_backend_t get_params_backend_at_index(int index){ + if (sd_version_is_sdxl(version) && index == 1){ + if(text_model2) { + return text_model2->get_params_backend(); + } + } else if (text_model) { + return text_model->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (sd_version_is_sdxl(version) && index == 1){ + if(text_model2) { + return text_model2->get_runtime_backend(); + } + } else if (text_model) { + return text_model->get_runtime_backend(); + } + return nullptr; + } + }; struct FrozenCLIPVisionEmbedder : public GGMLRunner { @@ -740,12 +783,14 @@ struct SD3CLIPEmbedder : public Conditioner { bool use_clip_g = false; bool use_t5 = false; + model_count = 3; + ggml_backend_t clip_l_backend, clip_g_backend, t5_backend; if (backends.size() == 1) { clip_l_backend = clip_g_backend = t5_backend = backends[0]; } else if (backends.size() == 2) { clip_l_backend = clip_g_backend = backends[0]; - t5_backend = backends[1]; + t5_backend = backends[1]; } else if (backends.size() >= 3) { clip_l_backend = backends[0]; clip_g_backend = backends[1]; @@ -1175,6 +1220,42 @@ struct SD3CLIPEmbedder : public Conditioner { conditioner_params.clip_skip, conditioner_params.zero_out_masked); } + + bool is_cond_stage_model_name_at_index(const std::string& name, int index) override { + if (index == 0) { + return contains(name, "text_encoders.clip_l"); + } else if (index == 1) { + return contains(name, "text_encoders.clip_g"); + } else if (index == 2) { + return contains(name, "text_encoders.t5xxl"); + } else { + return false; + } + } + + ggml_backend_t get_params_backend_at_index(int index){ + if (index == 0 && clip_l) { + return clip_l->get_params_backend(); + } else if (index == 1 && clip_g) { + return clip_g->get_params_backend(); + } else if (index == 2 && t5) { + return t5->get_params_backend(); + } else { + return nullptr; + } + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (index == 0 && clip_l) { + return clip_l->get_runtime_backend(); + } else if (index == 1 && clip_g) { + return clip_g->get_runtime_backend(); + } else if (index == 2 && t5) { + return t5->get_runtime_backend(); + } else { + return nullptr; + } + } }; struct FluxCLIPEmbedder : public Conditioner { @@ -1190,19 +1271,19 @@ struct FluxCLIPEmbedder : public Conditioner { bool use_clip_l = false; bool use_t5 = false; + model_count = 2; ggml_backend_t clip_l_backend, t5_backend; if (backends.size() == 1) { clip_l_backend = t5_backend = backends[0]; } else if (backends.size() >= 2) { clip_l_backend = backends[0]; - t5_backend = backends[1]; + t5_backend = backends[1]; if (backends.size() > 2) { LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); } } - for (auto pair : tensor_storage_map) { if (pair.first.find("text_encoders.clip_l") != std::string::npos) { use_clip_l = true; @@ -1468,6 +1549,36 @@ struct FluxCLIPEmbedder : public Conditioner { conditioner_params.clip_skip, conditioner_params.zero_out_masked); } + + bool is_cond_stage_model_name_at_index(const std::string& name, int index) override { + if (index == 0) { + return contains(name, "text_encoders.clip_l"); + } else if (index == 1) { + return contains(name, "text_encoders.t5xxl"); + } else { + return false; + } + } + + ggml_backend_t get_params_backend_at_index(int index){ + if (index == 0 && clip_l) { + return clip_l->get_params_backend(); + } else if (index == 1 && t5) { + return t5->get_params_backend(); + } else { + return nullptr; + } + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (index == 0 && clip_l) { + return clip_l->get_runtime_backend(); + } else if (index == 1 && t5) { + return t5->get_runtime_backend(); + } else { + return nullptr; + } + } }; struct T5CLIPEmbedder : public Conditioner { @@ -1691,6 +1802,20 @@ struct T5CLIPEmbedder : public Conditioner { conditioner_params.clip_skip, conditioner_params.zero_out_masked); } + + ggml_backend_t get_params_backend_at_index(int index){ + if (t5){ + return t5->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (t5){ + return t5->get_runtime_backend(); + } + return nullptr; + } }; struct AnimaConditioner : public Conditioner { @@ -1703,11 +1828,11 @@ struct AnimaConditioner : public Conditioner { const String2TensorStorage& tensor_storage_map = {}) { qwen_tokenizer = std::make_shared(); llm = std::make_shared(LLM::LLMArch::QWEN3, - backend, - offload_params_to_cpu, - tensor_storage_map, - "text_encoders.llm", - false); + backend, + offload_params_to_cpu, + tensor_storage_map, + "text_encoders.llm", + false); } void get_param_tensors(std::map& tensors) override { @@ -1827,6 +1952,20 @@ struct AnimaConditioner : public Conditioner { return {hidden_states, t5_weight_tensor, t5_ids_tensor}; } + + ggml_backend_t get_params_backend_at_index(int index){ + if (llm){ + return llm->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (llm){ + return llm->get_runtime_backend(); + } + return nullptr; + } }; struct LLMEmbedder : public Conditioner { @@ -2201,6 +2340,20 @@ struct LLMEmbedder : public Conditioner { LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0); return {hidden_states, nullptr, nullptr, extra_hidden_states_vec}; } + + ggml_backend_t get_params_backend_at_index(int index){ + if (llm){ + return llm->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (llm){ + return llm->get_runtime_backend(); + } + return nullptr; + } }; #endif diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 7387797c6..806e028a3 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1637,14 +1637,6 @@ class StableDiffusionGGML { for (auto& kv : lora_state_diff) { bool applied = false; int64_t t0 = ggml_time_ms(); - // TODO: Fix that - bool are_clip_backends_similar = true; - for (auto backend: clip_backends){ - are_clip_backends_similar = are_clip_backends_similar && (clip_backends[0]==backend || ggml_backend_is_cpu(backend)); - } - if(!are_clip_backends_similar){ - LOG_WARN("Text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); - } auto lora_tensor_filter_diff = [&](const std::string& tensor_name) { if (is_diffusion_model_name(tensor_name)) { return true; @@ -1660,19 +1652,22 @@ class StableDiffusionGGML { applied = true; } - auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { - if (is_cond_stage_model_name(tensor_name)) { - return true; + for (int i = 0; i < cond_stage_model->model_count; i++) { + auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { + if (is_cond_stage_model_name(tensor_name)) { + return cond_stage_model->is_cond_stage_model_name_at_index(tensor_name, i); + } + return false; + }; + // TODO: split by model + LOG_INFO("applying lora to text encoder (%d)", i); + auto backend = cond_stage_model->get_params_backend_at_index(i); + lora = load_lora_model_from_file(kv.first, kv.second, backend, lora_tensor_filter_cond); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; } - return false; - }; - // TODO: split by model - LOG_INFO("applying lora to text encoders"); - lora = load_lora_model_from_file(kv.first, kv.second, clip_backends[0], lora_tensor_filter_cond); - if (lora && !lora->lora_tensors.empty()) { - lora->apply(tensors, version, n_threads); - lora->free_params_buffer(); - applied = true; } auto lora_tensor_filter_first = [&](const std::string& tensor_name) { @@ -1734,22 +1729,27 @@ class StableDiffusionGGML { } } cond_stage_lora_models = lora_models; - auto lora_tensor_filter = [&](const std::string& tensor_name) { - if (is_cond_stage_model_name(tensor_name)) { - return true; - } - return false; - }; - for (auto& kv : lora_state_diff) { - const std::string& lora_id = kv.first; - float multiplier = kv.second; - //TODO: split by model - auto lora = load_lora_model_from_file(lora_id, multiplier, clip_backends[0], lora_tensor_filter); - if (lora && !lora->lora_tensors.empty()) { - lora->preprocess_lora_tensors(tensors); - cond_stage_lora_models.push_back(lora); + + + for(int i=0;imodel_count;i++){ + auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { + if (is_cond_stage_model_name(tensor_name)) { + return cond_stage_model->is_cond_stage_model_name_at_index(tensor_name, i); + } + return false; + }; + for (auto& kv : lora_state_diff) { + const std::string& lora_id = kv.first; + float multiplier = kv.second; + auto backend = cond_stage_model->get_runtime_backend_at_index(i); + auto lora = load_lora_model_from_file(kv.first, kv.second, backend, lora_tensor_filter_cond); + if (lora && !lora->lora_tensors.empty()) { + lora->preprocess_lora_tensors(tensors); + cond_stage_lora_models.push_back(lora); + } } } + auto multi_lora_adapter = std::make_shared(cond_stage_lora_models); cond_stage_model->set_weight_adapter(multi_lora_adapter); } From 229664db238046ec40d2efc35b7c6496e8a72c4c Mon Sep 17 00:00:00 2001 From: Cyberhan123 <255542417@qq.com> Date: Thu, 26 Mar 2026 10:55:14 +0800 Subject: [PATCH 24/32] rm all head code for ggml --- src/common_block.hpp | 15 ++++++--------- src/ggml_extend.hpp | 14 ++++++++------ src/qwen_image.hpp | 13 +++++-------- src/util.cpp | 8 ++++++++ src/util.h | 3 +++ src/z_image.hpp | 23 +++++++---------------- 6 files changed, 37 insertions(+), 39 deletions(-) diff --git a/src/common_block.hpp b/src/common_block.hpp index a25fe9311..e1dc2e6bf 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -2,10 +2,9 @@ #define __COMMON_BLOCK_HPP__ #include "ggml_extend.hpp" +#include "ggml-backend.h" +#include "util.h" -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif class DownSampleBlock : public GGMLBlock { protected: @@ -265,12 +264,10 @@ class FeedForward : public GGMLBlock { auto net_0 = std::dynamic_pointer_cast(blocks["net.0"]); auto net_2 = std::dynamic_pointer_cast(blocks["net.2"]); - #ifdef SD_USE_VULKAN - if(ggml_backend_is_vk(ctx->backend)){ - net_2->set_force_prec_f32(true); - } - #endif - + if (sd_backend_is(ctx->backend, "Vulkan")) { + net_2->set_force_prec_f32(true); + } + x = net_0->forward(ctx, x); // [ne3, ne2, ne1, inner_dim] x = net_2->forward(ctx, x); // [ne3, ne2, ne1, dim_out] return x; diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 2a1775f5e..0539092fc 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -1299,16 +1299,17 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_ones_like(ggml_context* ctx, return ggml_ext_ones(ctx, x->ne[0], x->ne[1], x->ne[2], x->ne[3]); } -__STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx, ggml_tensor* a) { -#ifdef SD_USE_VULKAN +__STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx,ggml_backend_t backend, ggml_tensor* a) { +if (sd_backend_is(backend, "Vulkan")) +{ auto zero_index = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:zero_int"); auto out = ggml_reshape_1d(ctx, a, ggml_nelements(a)); out = ggml_get_rows(ctx, out, zero_index); out = ggml_reshape(ctx, out, a); // auto out = ggml_cast(ctx, a, GGML_TYPE_F32); return out; -#else - auto out = ggml_reshape_2d(ctx, a, 1, ggml_nelements(a)); +}else{ + auto out = ggml_reshape_2d(ctx, a, 1, ggml_nelements(a)); ggml_tensor* one = ggml_ext_ones(ctx, 1, 1, 1, 1); // [1,] if (ggml_is_transposed(out)) { out = ggml_mul_mat(ctx, one, out); @@ -1316,8 +1317,9 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx, ggml_tensor* out = ggml_mul_mat(ctx, out, one); } out = ggml_reshape(ctx, out, a); -#endif - return out; + return out; +} + } // q: [N, L_q, C(n_head*d_head)] or [N*n_head, L_q, d_head] diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index af3c820bb..773328877 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -6,10 +6,6 @@ #include "common_block.hpp" #include "flux.hpp" -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - namespace Qwen { constexpr int QWEN_IMAGE_GRAPH_SIZE = 20480; @@ -125,12 +121,13 @@ namespace Qwen { auto to_k = std::dynamic_pointer_cast(blocks["to_k"]); auto to_v = std::dynamic_pointer_cast(blocks["to_v"]); auto to_out_0 = std::dynamic_pointer_cast(blocks["to_out.0"]); -#ifdef SD_USE_VULKAN - if(ggml_backend_is_vk(ctx->backend)){ + + + if (sd_backend_is(ctx->backend,"Vulkan")) + { to_out_0->set_force_prec_f32(true); } -#endif - + auto norm_added_q = std::dynamic_pointer_cast(blocks["norm_added_q"]); auto norm_added_k = std::dynamic_pointer_cast(blocks["norm_added_k"]); diff --git a/src/util.cpp b/src/util.cpp index a94cfd986..3aceec079 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -743,3 +743,11 @@ std::vector> parse_prompt_attention(const std::str return res; } + +// test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc. +static inline bool sd_backend_is(ggml_backend_t backend, const std::string& name) { + ggml_backend_dev_t dev = ggml_backend_get_device(backend); + if (!dev) return false; + std::string dev_name = ggml_backend_dev_name(dev); + return dev_name.find(name) != std::string::npos; +} \ No newline at end of file diff --git a/src/util.h b/src/util.h index 7dee7bf51..c72d5c027 100644 --- a/src/util.h +++ b/src/util.h @@ -86,6 +86,9 @@ int sd_get_preview_interval(); bool sd_should_preview_denoised(); bool sd_should_preview_noisy(); +// test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc. +bool sd_backend_is(ggml_backend_t backend, const std::string& name); + #define LOG_DEBUG(format, ...) log_printf(SD_LOG_DEBUG, __FILE__, __LINE__, format, ##__VA_ARGS__) #define LOG_INFO(format, ...) log_printf(SD_LOG_INFO, __FILE__, __LINE__, format, ##__VA_ARGS__) #define LOG_WARN(format, ...) log_printf(SD_LOG_WARN, __FILE__, __LINE__, format, ##__VA_ARGS__) diff --git a/src/z_image.hpp b/src/z_image.hpp index 3272ec437..ad09ed1e9 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -7,14 +7,6 @@ #include "ggml_extend.hpp" #include "mmdit.hpp" -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - -#if GGML_USE_HIP -#include "ggml-cuda.h" -#endif - // Ref: https://github.com/Alpha-VLLM/Lumina-Image-2.0/blob/main/models/model.py // Ref: https://github.com/huggingface/diffusers/pull/12703 @@ -55,13 +47,12 @@ namespace ZImage { int64_t N = x->ne[2]; auto qkv_proj = std::dynamic_pointer_cast(blocks["qkv"]); auto out_proj = std::dynamic_pointer_cast(blocks["out"]); -#if GGML_USE_HIP - // Prevent NaN issues with certain ROCm setups - if (ggml_backend_is_cuda(ctx->backend)) { + + if (sd_backend_is(ctx->backend,"ROCm")) + { out_proj->set_scale(1.f / 16.f); } -#endif - + auto qkv = qkv_proj->forward(ctx, x); // [N, n_token, (num_heads + num_kv_heads*2)*head_dim] qkv = ggml_reshape_4d(ctx->ggml_ctx, qkv, head_dim, num_heads + num_kv_heads * 2, qkv->ne[1], qkv->ne[2]); // [N, n_token, num_heads + num_kv_heads*2, head_dim] @@ -136,11 +127,11 @@ namespace ZImage { auto w1 = std::dynamic_pointer_cast(blocks["w1"]); auto w2 = std::dynamic_pointer_cast(blocks["w2"]); auto w3 = std::dynamic_pointer_cast(blocks["w3"]); -#ifdef SD_USE_VULKAN - if(ggml_backend_is_vk(ctx->backend)){ + + if (sd_backend_is(ctx->backend,"Vulkan")) + { w2->set_force_prec_f32(true); } -#endif auto x1 = w1->forward(ctx, x); auto x3 = w3->forward(ctx, x); From e302ee551a59c308308d91840d698cb2e7f62b76 Mon Sep 17 00:00:00 2001 From: Cyberhan123 <255542417@qq.com> Date: Thu, 26 Mar 2026 13:27:06 +0800 Subject: [PATCH 25/32] feat: enhance the build system by adding CMake configuration and backend support, and eliminate hard-coded backend references. --- CMakeLists.txt | 180 +++++++++++++++++++++++---------------- cmake/build-info.cmake | 48 +++++++++++ cmake/common.cmake | 60 +++++++++++++ cmake/sd-config.cmake.in | 30 +++++++ cmake/sd.pc.in | 10 +++ examples/CMakeLists.txt | 5 +- src/CMakeLists.txt | 35 ++++++++ src/ggml_extend.hpp | 26 +++--- src/lora.hpp | 69 +++++++-------- src/stable-diffusion.cpp | 10 +-- src/util.cpp | 4 +- src/util.h | 1 + 12 files changed, 348 insertions(+), 130 deletions(-) create mode 100644 cmake/build-info.cmake create mode 100644 cmake/common.cmake create mode 100644 cmake/sd-config.cmake.in create mode 100644 cmake/sd.pc.in create mode 100644 src/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index afc756ad7..3813c85c8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,6 +8,8 @@ if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/") + if (MSVC) add_compile_definitions(_CRT_SECURE_NO_WARNINGS) add_compile_definitions(_SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING) @@ -22,66 +24,84 @@ else() set(SD_STANDALONE OFF) endif() +if (MINGW) + set(BUILD_SHARED_LIBS_DEFAULT OFF) + else() + set(BUILD_SHARED_LIBS_DEFAULT ON) +endif() + +option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT}) + +if (WIN32) + add_compile_definitions(_CRT_SECURE_NO_WARNINGS) +endif() + +if (MSVC) + add_compile_options("$<$:/utf-8>") + add_compile_options("$<$:/utf-8>") + add_compile_options("$<$:/bigobj>") + add_compile_options("$<$:/bigobj>") +endif() + +if (SD_STANDALONE) + # enable parallel builds for msbuild + list(APPEND CMAKE_VS_GLOBALS UseMultiToolTask=true) + list(APPEND CMAKE_VS_GLOBALS EnforceProcessCountAcrossBuilds=true) +endif() + + # # Option list # - # general #option(SD_BUILD_TESTS "sd: build tests" ${SD_STANDALONE}) option(SD_BUILD_EXAMPLES "sd: build examples" ${SD_STANDALONE}) -option(SD_CUDA "sd: cuda backend" OFF) -option(SD_HIPBLAS "sd: rocm backend" OFF) -option(SD_METAL "sd: metal backend" OFF) -option(SD_VULKAN "sd: vulkan backend" OFF) -option(SD_OPENCL "sd: opencl backend" OFF) -option(SD_SYCL "sd: sycl backend" OFF) -option(SD_MUSA "sd: musa backend" OFF) -option(SD_BUILD_SHARED_LIBS "sd: build shared libs" OFF) -option(SD_BUILD_SHARED_GGML_LIB "sd: build ggml as a separate shared lib" OFF) option(SD_USE_SYSTEM_GGML "sd: use system-installed GGML library" OFF) #option(SD_BUILD_SERVER "sd: build server example" ON) -if(SD_CUDA) - message("-- Use CUDA as backend stable-diffusion") - set(GGML_CUDA ON) - add_definitions(-DSD_USE_CUDA) -endif() +# Required for relocatable CMake package +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake) +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/common.cmake) -if(SD_METAL) - message("-- Use Metal as backend stable-diffusion") - set(GGML_METAL ON) - add_definitions(-DSD_USE_METAL) +if (NOT DEFINED SD_BUILD_NUMBER) + set(SD_BUILD_NUMBER ${BUILD_NUMBER}) endif() +if (NOT DEFINED SD_BUILD_COMMIT) + set(SD_BUILD_COMMIT ${BUILD_COMMIT}) +endif() +set(SD_INSTALL_VERSION 0.0.${SD_BUILD_NUMBER}) -if (SD_VULKAN) - message("-- Use Vulkan as backend stable-diffusion") - set(GGML_VULKAN ON) - add_definitions(-DSD_USE_VULKAN) -endif () - -if (SD_OPENCL) - message("-- Use OpenCL as backend stable-diffusion") - set(GGML_OPENCL ON) - add_definitions(-DSD_USE_OPENCL) -endif () - -if (SD_HIPBLAS) - message("-- Use HIPBLAS as backend stable-diffusion") - set(GGML_HIP ON) - add_definitions(-DSD_USE_CUDA) -endif () +# override ggml options +set(GGML_ALL_WARNINGS ${SD_ALL_WARNINGS}) +set(GGML_FATAL_WARNINGS ${SD_FATAL_WARNINGS}) -if(SD_MUSA) - message("-- Use MUSA as backend stable-diffusion") - set(GGML_MUSA ON) - add_definitions(-DSD_USE_CUDA) +if (NOT DEFINED GGML_CUDA_GRAPHS) + set(GGML_CUDA_GRAPHS_DEFAULT ON) endif() -if (SD_RPC) - message("-- Use RPC as backend stable-diffusion") - set(GGML_RPC ON) - add_definitions(-DSD_USE_RPC) -endif () +# Ref: https://github.com/ggml-org/llama.cpp/blob/master/CMakeLists.txt#L145 +# transition helpers +function (sd_option_depr TYPE OLD) + if (${OLD}) + set(NEW "${ARGV2}") + if(NEW) + message(${TYPE} "${OLD} is deprecated, use ${NEW} instead") + set(${NEW} ON PARENT_SCOPE) + else() + message(${TYPE} "${OLD} is deprecated and will be ignored") + endif() + endif() +endfunction() + +sd_option_depr(FATAL_ERROR SD_HIPBLAS GGML_CUDA) +sd_option_depr(FATAL_ERROR SD_BUILD_SHARED_LIBS BUILD_SHARED_LIBS) +sd_option_depr(FATAL_ERROR SD_BUILD_SHARED_GGML_LIB BUILD_SHARED_LIBS) +sd_option_depr(WARNING SD_CUDA GGML_CUDA) +sd_option_depr(WARNING SD_METAL GGML_METAL) +sd_option_depr(WARNING SD_VULKAN GGML_VULKAN) +sd_option_depr(WARNING SD_OPENCL GGML_OPENCL) +sd_option_depr(WARNING SD_SYCL GGML_SYCL) +sd_option_depr(WARNING SD_MUSA GGML_MUSA) set(SD_LIB stable-diffusion) @@ -125,29 +145,9 @@ set_property( SDCPP_BUILD_COMMIT=${SDCPP_BUILD_COMMIT} SDCPP_BUILD_VERSION=${SDCPP_BUILD_VERSION} ) -if(SD_BUILD_SHARED_LIBS) - message("-- Build shared library") - message(${SD_LIB_SOURCES}) - if(NOT SD_BUILD_SHARED_GGML_LIB) - set(BUILD_SHARED_LIBS OFF) - endif() - add_library(${SD_LIB} SHARED ${SD_LIB_SOURCES}) - add_definitions(-DSD_BUILD_SHARED_LIB) - target_compile_definitions(${SD_LIB} PRIVATE -DSD_BUILD_DLL) - set(CMAKE_POSITION_INDEPENDENT_CODE ON) -else() - message("-- Build static library") - if(NOT SD_BUILD_SHARED_GGML_LIB) - set(BUILD_SHARED_LIBS OFF) - endif() - add_library(${SD_LIB} STATIC ${SD_LIB_SOURCES}) -endif() - -if(SD_SYCL) - message("-- Use SYCL as backend stable-diffusion") - set(GGML_SYCL ON) +# Is this needed? +if(GGML_SYCL) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -fsycl") - add_definitions(-DSD_USE_SYCL) # disable fast-math on host, see: # https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-10/fp-model-fp.html if (WIN32) @@ -181,18 +181,48 @@ if (NOT TARGET ggml) endif() add_subdirectory(thirdparty) - -target_link_libraries(${SD_LIB} PUBLIC ggml zip) -target_include_directories(${SD_LIB} PUBLIC . include) -target_include_directories(${SD_LIB} PUBLIC . thirdparty) -target_compile_features(${SD_LIB} PUBLIC c_std_11 cxx_std_17) - +add_subdirectory(src) if (SD_BUILD_EXAMPLES) add_subdirectory(examples) endif() -set(SD_PUBLIC_HEADERS include/stable-diffusion.h) -set_target_properties(${SD_LIB} PROPERTIES PUBLIC_HEADER "${SD_PUBLIC_HEADERS}") +include(GNUInstallDirs) +include(CMakePackageConfigHelpers) + +set(SD_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files") +set(SD_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files") +set(SD_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files") +set(SD_PUBLIC_HEADERS + ${CMAKE_CURRENT_SOURCE_DIR}/include/stable-diffusion.h +) + +set_target_properties(${SD_LIB} + PROPERTIES + PUBLIC_HEADER "${SD_PUBLIC_HEADERS}") install(TARGETS ${SD_LIB} LIBRARY PUBLIC_HEADER) + +configure_package_config_file( + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/sd-config.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/sd-config.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/sd + PATH_VARS SD_INCLUDE_INSTALL_DIR + SD_LIB_INSTALL_DIR + SD_BIN_INSTALL_DIR ) + +write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/sd-version.cmake + VERSION ${SD_INSTALL_VERSION} + COMPATIBILITY SameMajorVersion) + +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/sd-config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/sd-version.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/sd) + +configure_file(cmake/sd.pc.in + "${CMAKE_CURRENT_BINARY_DIR}/sd.pc" + @ONLY) + +install(FILES "${CMAKE_CURRENT_BINARY_DIR}/sd.pc" + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) diff --git a/cmake/build-info.cmake b/cmake/build-info.cmake new file mode 100644 index 000000000..3194f8159 --- /dev/null +++ b/cmake/build-info.cmake @@ -0,0 +1,48 @@ +set(BUILD_NUMBER 0) +set(BUILD_COMMIT "unknown") +set(BUILD_COMPILER "unknown") +set(BUILD_TARGET "unknown") + +# Look for git +find_package(Git) +if(NOT Git_FOUND) + find_program(GIT_EXECUTABLE NAMES git git.exe) + if(GIT_EXECUTABLE) + set(Git_FOUND TRUE) + message(STATUS "Found Git: ${GIT_EXECUTABLE}") + else() + message(WARNING "Git not found. Build info will not be accurate.") + endif() +endif() + +# Get the commit count and hash +if(Git_FOUND) + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE HEAD + OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE RES + ) + if (RES EQUAL 0) + set(BUILD_COMMIT ${HEAD}) + endif() + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-list --count HEAD + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE COUNT + OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE RES + ) + if (RES EQUAL 0) + set(BUILD_NUMBER ${COUNT}) + endif() +endif() + +set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}") + +if(CMAKE_VS_PLATFORM_NAME) + set(BUILD_TARGET ${CMAKE_VS_PLATFORM_NAME}) +else() + set(BUILD_TARGET "${CMAKE_SYSTEM_NAME} ${CMAKE_SYSTEM_PROCESSOR}") +endif() \ No newline at end of file diff --git a/cmake/common.cmake b/cmake/common.cmake new file mode 100644 index 000000000..9176dce60 --- /dev/null +++ b/cmake/common.cmake @@ -0,0 +1,60 @@ +include("ggml/cmake/common.cmake") + +# https://github.com/ggml-org/llama.cpp/blob/master/cmake/common.cmake + +function(sd_add_compile_flags) + if (SD_FATAL_WARNINGS) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + list(APPEND C_FLAGS -Werror) + list(APPEND CXX_FLAGS -Werror) + elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + add_compile_options(/WX) + endif() + endif() + + if (SD_ALL_WARNINGS) + if (NOT MSVC) + list(APPEND C_FLAGS -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes + -Werror=implicit-int -Werror=implicit-function-declaration) + + list(APPEND CXX_FLAGS -Wmissing-declarations -Wmissing-noreturn) + + list(APPEND WARNING_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function) + + list(APPEND C_FLAGS ${WARNING_FLAGS}) + list(APPEND CXX_FLAGS ${WARNING_FLAGS}) + + ggml_get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}) + + add_compile_options("$<$:${C_FLAGS};${GF_C_FLAGS}>" + "$<$:${CXX_FLAGS};${GF_CXX_FLAGS}>") + else() + # todo : msvc + set(C_FLAGS "" PARENT_SCOPE) + set(CXX_FLAGS "" PARENT_SCOPE) + endif() + endif() + + if (NOT MSVC) + if (SD_SANITIZE_THREAD) + message(STATUS "Using -fsanitize=thread") + + add_compile_options(-fsanitize=thread) + link_libraries (-fsanitize=thread) + endif() + + if (SD_SANITIZE_ADDRESS) + message(STATUS "Using -fsanitize=address") + + add_compile_options(-fsanitize=address -fno-omit-frame-pointer) + link_libraries (-fsanitize=address) + endif() + + if (SD_SANITIZE_UNDEFINED) + message(STATUS "Using -fsanitize=undefined") + + add_compile_options(-fsanitize=undefined) + link_libraries (-fsanitize=undefined) + endif() + endif() +endfunction() \ No newline at end of file diff --git a/cmake/sd-config.cmake.in b/cmake/sd-config.cmake.in new file mode 100644 index 000000000..34b634aec --- /dev/null +++ b/cmake/sd-config.cmake.in @@ -0,0 +1,30 @@ +set(SD_VERSION @SD_INSTALL_VERSION@) +set(SD_BUILD_COMMIT @SD_BUILD_COMMIT@) +set(SD_BUILD_NUMBER @SD_BUILD_NUMBER@) +set(SD_SHARED_LIB @BUILD_SHARED_LIBS@) + +@PACKAGE_INIT@ + +set_and_check(SD_INCLUDE_DIR "@PACKAGE_SD_INCLUDE_INSTALL_DIR@") +set_and_check(SD_LIB_DIR "@PACKAGE_SD_LIB_INSTALL_DIR@") +set_and_check(SD_BIN_DIR "@PACKAGE_SD_BIN_INSTALL_DIR@") + +find_package(ggml REQUIRED HINTS ${SD_LIB_DIR}/cmake) + +find_library(stable-diffusion_LIBRARY stable-diffusion + REQUIRED + HINTS ${SD_LIB_DIR} + NO_CMAKE_FIND_ROOT_PATH +) + +add_library(llama UNKNOWN IMPORTED) +set_target_properties(stable-diffusion + PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}" + INTERFACE_LINK_LIBRARIES "ggml::ggml;ggml::ggml-base;" + IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" + IMPORTED_LOCATION "${stable-diffusion_LIBRARY}" + INTERFACE_COMPILE_FEATURES c_std_90 + POSITION_INDEPENDENT_CODE ON) + +check_required_components(Stable-diffusion) \ No newline at end of file diff --git a/cmake/sd.pc.in b/cmake/sd.pc.in new file mode 100644 index 000000000..f0a5ebc7d --- /dev/null +++ b/cmake/sd.pc.in @@ -0,0 +1,10 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=@CMAKE_INSTALL_PREFIX@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ + +Name: stable-diffusion +Description: Diffusion model(SD,Flux,Wan,Qwen Image,Z-Image,...) inference in pure C/C++ +Version: @SD_INSTALL_VERSION@ +Libs: -L${libdir} -lggml -lggml-base -lstable-diffusion +Cflags: -I${includedir} \ No newline at end of file diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 2dcd1d53a..29cef50fa 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,4 +1,7 @@ +sd_add_compile_flags() + include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${PROJECT_SOURCE_DIR}/thirdparty) add_subdirectory(cli) -add_subdirectory(server) \ No newline at end of file +add_subdirectory(server) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 000000000..54496add4 --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,35 @@ +sd_add_compile_flags() + +# +# libraries +# + +# stable-diffusion +file(GLOB SD_SOURCES "*.cpp" "*.hpp" "*.h" "*.hpp") +file(GLOB SD_VOCAB_SOURCES "vocab/*.h" "vocab/*.cpp") + +add_library(${SD_LIB} + ../include/stable-diffusion.h + ${SD_SOURCES} + ${SD_VOCAB_SOURCES} +) + +set_target_properties(${SD_LIB} PROPERTIES + VERSION ${SD_INSTALL_VERSION} + SOVERSION 0 + MACHO_CURRENT_VERSION 0 # keep macOS linker from seeing oversized version number +) + + + +target_include_directories(${SD_LIB} PRIVATE .) +target_include_directories(${SD_LIB} PUBLIC ../include) +target_compile_features(${SD_LIB} PRIVATE c_std_11 cxx_std_17) + +target_link_libraries(${SD_LIB} PUBLIC ggml PRIVATE zip) + +if (BUILD_SHARED_LIBS) + set_target_properties(${SD_LIB} PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(${SD_LIB} PRIVATE SD_BUILD_DLL) + target_compile_definitions(${SD_LIB} PUBLIC SD_BUILD_SHARED_LIB ) +endif() diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 0539092fc..b6e6689c5 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -1511,16 +1511,15 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_group_norm(ggml_context* ctx, } __STATIC_INLINE__ void ggml_ext_backend_tensor_get_and_sync(ggml_backend_t backend, const ggml_tensor* tensor, void* data, size_t offset, size_t size) { -#if defined(SD_USE_CUDA) || defined(SD_USE_SYCL) + if (sd_backend_is(backend, "ROCm") || sd_backend_is(backend, "CUDA") || sd_backend_is(backend, "SYCL")) { if (!ggml_backend_is_cpu(backend)) { ggml_backend_tensor_get_async(backend, tensor, data, offset, size); ggml_backend_synchronize(backend); } else { ggml_backend_tensor_get(tensor, data, offset, size); } -#else +} ggml_backend_tensor_get(tensor, data, offset, size); -#endif } __STATIC_INLINE__ float ggml_ext_backend_tensor_get_f32(ggml_tensor* tensor) { @@ -1667,8 +1666,9 @@ struct WeightAdapter { float scale = 1.f; } conv2d; }; - virtual ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name) = 0; + virtual ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name) = 0; virtual ggml_tensor* forward_with_lora(ggml_context* ctx, + ggml_backend_t backend, ggml_tensor* x, ggml_tensor* w, ggml_tensor* b, @@ -2324,7 +2324,7 @@ class Linear : public UnaryBlock { forward_params.op_type = WeightAdapter::ForwardParams::op_type_t::OP_LINEAR; forward_params.linear.force_prec_f32 = force_prec_f32; forward_params.linear.scale = scale; - return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, x, w, b, prefix, forward_params); + return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx,ctx->backend, x, w, b, prefix, forward_params); } return ggml_ext_linear(ctx->ggml_ctx, x, w, b, force_prec_f32, scale); } @@ -2440,7 +2440,7 @@ class Conv2d : public UnaryBlock { forward_params.conv2d.circular_x = ctx->circular_x_enabled; forward_params.conv2d.circular_y = ctx->circular_y_enabled; forward_params.conv2d.scale = scale; - return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, x, w, b, prefix, forward_params); + return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, ctx->backend, x, w, b, prefix, forward_params); } return ggml_ext_conv_2d(ctx->ggml_ctx, x, @@ -2504,7 +2504,7 @@ class Conv3d : public UnaryBlock { ggml_tensor* w = params["weight"]; ggml_tensor* b = nullptr; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx,ctx->backend, w, prefix + "weight"); if (w->type != GGML_TYPE_F16) { w = ggml_cast(ctx->ggml_ctx, w, GGML_TYPE_F16); } @@ -2512,7 +2512,7 @@ class Conv3d : public UnaryBlock { if (bias) { b = params["bias"]; if (ctx->weight_adapter) { - b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, b, prefix + "bias"); + b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, b, prefix + "bias"); } } return ggml_ext_conv_3d(ctx->ggml_ctx, x, w, b, in_channels, @@ -2559,12 +2559,12 @@ class LayerNorm : public UnaryBlock { if (elementwise_affine) { w = params["weight"]; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); } if (bias) { b = params["bias"]; if (ctx->weight_adapter) { - b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, b, prefix + "bias"); + b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, b, prefix + "bias"); } } } @@ -2607,8 +2607,8 @@ class GroupNorm : public GGMLBlock { w = params["weight"]; b = params["bias"]; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); - b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, b, prefix + "bias"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); + b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, b, prefix + "bias"); } } return ggml_ext_group_norm(ctx->ggml_ctx, x, w, b, num_groups); @@ -2642,7 +2642,7 @@ class RMSNorm : public UnaryBlock { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) { ggml_tensor* w = params["weight"]; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); } x = ggml_rms_norm(ctx->ggml_ctx, x, eps); x = ggml_mul_inplace(ctx->ggml_ctx, x, w); diff --git a/src/lora.hpp b/src/lora.hpp index 7df04ea27..a261b1f51 100644 --- a/src/lora.hpp +++ b/src/lora.hpp @@ -129,7 +129,7 @@ struct LoraModel : public GGMLRunner { } } - ggml_tensor* get_lora_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_lora_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -152,17 +152,17 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(lora_up_name); if (iter != lora_tensors.end()) { - lora_up = ggml_ext_cast_f32(ctx, iter->second); + lora_up = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lora_mid_name); if (iter != lora_tensors.end()) { - lora_mid = ggml_ext_cast_f32(ctx, iter->second); + lora_mid = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lora_down_name); if (iter != lora_tensors.end()) { - lora_down = ggml_ext_cast_f32(ctx, iter->second); + lora_down = ggml_ext_cast_f32(ctx, backend, iter->second); } if (lora_up == nullptr || lora_down == nullptr) { @@ -208,7 +208,7 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_raw_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_raw_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -225,7 +225,7 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(diff_name); if (iter != lora_tensors.end()) { - curr_updown = ggml_ext_cast_f32(ctx, iter->second); + curr_updown = ggml_ext_cast_f32(ctx, backend, iter->second); } else { break; } @@ -248,7 +248,7 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_loha_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_loha_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -276,33 +276,33 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(hada_1_down_name); if (iter != lora_tensors.end()) { - hada_1_down = ggml_ext_cast_f32(ctx, iter->second); + hada_1_down = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_1_up_name); if (iter != lora_tensors.end()) { - hada_1_up = ggml_ext_cast_f32(ctx, iter->second); + hada_1_up = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_1_mid_name); if (iter != lora_tensors.end()) { - hada_1_mid = ggml_ext_cast_f32(ctx, iter->second); + hada_1_mid = ggml_ext_cast_f32(ctx, backend, iter->second); hada_1_up = ggml_cont(ctx, ggml_transpose(ctx, hada_1_up)); } iter = lora_tensors.find(hada_2_down_name); if (iter != lora_tensors.end()) { - hada_2_down = ggml_ext_cast_f32(ctx, iter->second); + hada_2_down = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_2_up_name); if (iter != lora_tensors.end()) { - hada_2_up = ggml_ext_cast_f32(ctx, iter->second); + hada_2_up = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_2_mid_name); if (iter != lora_tensors.end()) { - hada_2_mid = ggml_ext_cast_f32(ctx, iter->second); + hada_2_mid = ggml_ext_cast_f32(ctx, backend, iter->second); hada_2_up = ggml_cont(ctx, ggml_transpose(ctx, hada_2_up)); } @@ -351,7 +351,7 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_lokr_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_lokr_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -378,24 +378,24 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(lokr_w1_name); if (iter != lora_tensors.end()) { - lokr_w1 = ggml_ext_cast_f32(ctx, iter->second); + lokr_w1 = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lokr_w2_name); if (iter != lora_tensors.end()) { - lokr_w2 = ggml_ext_cast_f32(ctx, iter->second); + lokr_w2 = ggml_ext_cast_f32(ctx, backend, iter->second); } int64_t rank = 1; if (lokr_w1 == nullptr) { iter = lora_tensors.find(lokr_w1_a_name); if (iter != lora_tensors.end()) { - lokr_w1_a = ggml_ext_cast_f32(ctx, iter->second); + lokr_w1_a = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lokr_w1_b_name); if (iter != lora_tensors.end()) { - lokr_w1_b = ggml_ext_cast_f32(ctx, iter->second); + lokr_w1_b = ggml_ext_cast_f32(ctx, backend, iter->second); } if (lokr_w1_a == nullptr || lokr_w1_b == nullptr) { @@ -410,12 +410,12 @@ struct LoraModel : public GGMLRunner { if (lokr_w2 == nullptr) { iter = lora_tensors.find(lokr_w2_a_name); if (iter != lora_tensors.end()) { - lokr_w2_a = ggml_ext_cast_f32(ctx, iter->second); + lokr_w2_a = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lokr_w2_b_name); if (iter != lora_tensors.end()) { - lokr_w2_b = ggml_ext_cast_f32(ctx, iter->second); + lokr_w2_b = ggml_ext_cast_f32(ctx, backend, iter->second); } if (lokr_w2_a == nullptr || lokr_w2_b == nullptr) { @@ -468,23 +468,23 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora_and_lokr = true) { + ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_backend_t backend, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora_and_lokr = true) { // lora ggml_tensor* diff = nullptr; if (with_lora_and_lokr) { - diff = get_lora_weight_diff(model_tensor_name, ctx); + diff = get_lora_weight_diff(model_tensor_name, ctx, backend); } // diff if (diff == nullptr) { - diff = get_raw_weight_diff(model_tensor_name, ctx); + diff = get_raw_weight_diff(model_tensor_name, ctx, backend); } // loha if (diff == nullptr) { - diff = get_loha_weight_diff(model_tensor_name, ctx); + diff = get_loha_weight_diff(model_tensor_name, ctx, backend); } // lokr if (diff == nullptr && with_lora_and_lokr) { - diff = get_lokr_weight_diff(model_tensor_name, ctx); + diff = get_lokr_weight_diff(model_tensor_name, ctx, backend); } if (diff != nullptr) { if (ggml_nelements(diff) < ggml_nelements(model_tensor)) { @@ -761,7 +761,7 @@ struct LoraModel : public GGMLRunner { ggml_tensor* model_tensor = it.second; // lora - ggml_tensor* diff = get_weight_diff(model_tensor_name, compute_ctx, model_tensor); + ggml_tensor* diff = get_weight_diff(model_tensor_name,runtime_backend, compute_ctx, model_tensor); if (diff == nullptr) { continue; } @@ -774,7 +774,7 @@ struct LoraModel : public GGMLRunner { ggml_tensor* final_tensor; if (model_tensor->type != GGML_TYPE_F32 && model_tensor->type != GGML_TYPE_F16) { - final_tensor = ggml_ext_cast_f32(compute_ctx, model_tensor); + final_tensor = ggml_ext_cast_f32(compute_ctx,runtime_backend ,model_tensor); final_tensor = ggml_add_inplace(compute_ctx, final_tensor, diff); final_tensor = ggml_cpy(compute_ctx, final_tensor, model_tensor); } else { @@ -841,34 +841,35 @@ struct MultiLoraAdapter : public WeightAdapter { : lora_models(lora_models) { } - ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name, bool with_lora_and_lokr) { + ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name, bool with_lora_and_lokr) { for (auto& lora_model : lora_models) { - ggml_tensor* diff = lora_model->get_weight_diff(weight_name, ctx, weight, with_lora_and_lokr); + ggml_tensor* diff = lora_model->get_weight_diff(weight_name, backend, ctx, weight, with_lora_and_lokr); if (diff == nullptr) { continue; } if (weight->type != GGML_TYPE_F32 && weight->type != GGML_TYPE_F16) { - weight = ggml_ext_cast_f32(ctx, weight); + weight = ggml_ext_cast_f32(ctx, backend, weight); } weight = ggml_add(ctx, weight, diff); } return weight; } - ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name) override { - return patch_weight(ctx, weight, weight_name, true); + ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name) override { + return patch_weight(ctx, backend, weight, weight_name, true); } ggml_tensor* forward_with_lora(ggml_context* ctx, + ggml_backend_t backend, ggml_tensor* x, ggml_tensor* w, ggml_tensor* b, const std::string& prefix, WeightAdapter::ForwardParams forward_params) override { - w = patch_weight(ctx, w, prefix + "weight", false); + w = patch_weight(ctx, backend, w, prefix + "weight", false); if (b) { - b = patch_weight(ctx, b, prefix + "bias", false); + b = patch_weight(ctx, backend, b, prefix + "bias", false); } ggml_tensor* out; if (forward_params.op_type == ForwardParams::op_type_t::OP_LINEAR) { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 806e028a3..e9e9e6045 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -27,7 +27,7 @@ #include "latent-preview.h" #include "name_conversion.h" -#if SD_USE_RPC +#if GGML_RPC #include "ggml-rpc.h" #endif @@ -1244,7 +1244,7 @@ class StableDiffusionGGML { return false; }; int n_th = n_threads; -#ifdef SD_USE_RPC +#ifdef GGML_RPC if (ggml_backend_is_rpc(diffusion_backend)) { n_th = 1; // avoid multi-thread for loading to remote } @@ -1342,7 +1342,7 @@ class StableDiffusionGGML { ignore_tensors.insert("conditioner.embedders.3"); } int n_th = n_threads; -#ifdef SD_USE_RPC +#ifdef GGML_RPC // TODO: maybe set it to 1 threads only for model parts that are on remote? bool is_any_clip_rpc = false; for (auto& backend : clip_backends) { @@ -1379,7 +1379,7 @@ class StableDiffusionGGML { size_t control_net_params_mem_size = 0; if (control_net) { int n_th = n_threads; -#ifdef SD_USE_RPC +#ifdef GGML_RPC if (ggml_backend_is_rpc(control_net_backend)) { n_th = 1; // avoid multi-thread for loading to remote } @@ -1594,7 +1594,7 @@ class StableDiffusionGGML { } auto lora = std::make_shared(lora_id, backend, lora_path, is_high_noise ? "model.high_noise_" : "", version); int n_th = n_threads; -#ifdef SD_USE_RPC +#ifdef GGML_RPC if (ggml_backend_is_rpc(backend)) { n_th = 1; // avoid multi-thread for loading to remote } diff --git a/src/util.cpp b/src/util.cpp index 3aceec079..ccb71327f 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -745,9 +745,9 @@ std::vector> parse_prompt_attention(const std::str } // test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc. -static inline bool sd_backend_is(ggml_backend_t backend, const std::string& name) { +bool sd_backend_is(ggml_backend_t backend, const std::string& name) { ggml_backend_dev_t dev = ggml_backend_get_device(backend); if (!dev) return false; std::string dev_name = ggml_backend_dev_name(dev); return dev_name.find(name) != std::string::npos; -} \ No newline at end of file +} diff --git a/src/util.h b/src/util.h index c72d5c027..8e7d3790d 100644 --- a/src/util.h +++ b/src/util.h @@ -7,6 +7,7 @@ #include #include "stable-diffusion.h" +#include "ggml-backend.h" #define SAFE_STR(s) ((s) ? (s) : "") #define BOOL_STR(b) ((b) ? "true" : "false") From 83f7bab5c45a0448c70bdd272475972e37a4bf78 Mon Sep 17 00:00:00 2001 From: Cyberhan123 <255542417@qq.com> Date: Thu, 26 Mar 2026 16:10:14 +0800 Subject: [PATCH 26/32] fix review --- CMakeLists.txt | 6 +++++- cmake/sd-config.cmake.in | 6 +++--- examples/cli/main.cpp | 3 +++ src/conditioner.hpp | 2 +- src/ggml_extend.hpp | 9 ++++----- src/stable-diffusion.cpp | 23 ++++++++++++++--------- src/util.cpp | 3 +++ 7 files changed, 33 insertions(+), 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3813c85c8..f18c81141 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -201,7 +201,11 @@ set_target_properties(${SD_LIB} PROPERTIES PUBLIC_HEADER "${SD_PUBLIC_HEADERS}") -install(TARGETS ${SD_LIB} LIBRARY PUBLIC_HEADER) +install(TARGETS ${SD_LIB} + RUNTIME DESTINATION ${SD_BIN_INSTALL_DIR} + LIBRARY DESTINATION ${SD_LIB_INSTALL_DIR} + ARCHIVE DESTINATION ${SD_LIB_INSTALL_DIR} + PUBLIC_HEADER DESTINATION ${SD_INCLUDE_INSTALL_DIR}) configure_package_config_file( ${CMAKE_CURRENT_SOURCE_DIR}/cmake/sd-config.cmake.in diff --git a/cmake/sd-config.cmake.in b/cmake/sd-config.cmake.in index 34b634aec..7b224924e 100644 --- a/cmake/sd-config.cmake.in +++ b/cmake/sd-config.cmake.in @@ -17,14 +17,14 @@ find_library(stable-diffusion_LIBRARY stable-diffusion NO_CMAKE_FIND_ROOT_PATH ) -add_library(llama UNKNOWN IMPORTED) +add_library(stable-diffusion UNKNOWN IMPORTED) set_target_properties(stable-diffusion PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}" + INTERFACE_INCLUDE_DIRECTORIES "${SD_INCLUDE_DIR}" INTERFACE_LINK_LIBRARIES "ggml::ggml;ggml::ggml-base;" IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" IMPORTED_LOCATION "${stable-diffusion_LIBRARY}" - INTERFACE_COMPILE_FEATURES c_std_90 + INTERFACE_COMPILE_FEATURES "c_std_11;cxx_std_17" POSITION_INDEPENDENT_CODE ON) check_required_components(Stable-diffusion) \ No newline at end of file diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index 1bf8f31a7..00298afae 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -12,6 +12,8 @@ #include #include +#include "ggml.h" + // #include "preprocessing.hpp" #include "stable-diffusion.h" @@ -158,6 +160,7 @@ struct SDCliParams { auto on_list_devices_arg = [&](int argc, const char** argv, int index) { size_t buff_size = backend_list_size(); + GGML_ASSERT(buff_size > 0); char* buff = (char*)malloc(buff_size); list_backends_to_buffer(buff, buff_size); printf("List of available GGML devices:\nName\tDescription\n-------------------\n%s\n", buff); diff --git a/src/conditioner.hpp b/src/conditioner.hpp index 2b234b252..09394d069 100644 --- a/src/conditioner.hpp +++ b/src/conditioner.hpp @@ -1304,7 +1304,7 @@ struct FluxCLIPEmbedder : public Conditioner { LOG_WARN("clip_l text encoder not found! Prompt adherence might be degraded."); } if (use_t5) { - LOG_INFO("T5-XXL: using %s backend", ggml_backend_name(clip_l_backend)); + LOG_INFO("T5-XXL: using %s backend", ggml_backend_name(t5_backend)); t5 = std::make_shared(t5_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); } else { LOG_WARN("t5xxl text encoder not found! Prompt adherence might be degraded."); diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index b6e6689c5..9f1163758 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -1511,14 +1511,13 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_group_norm(ggml_context* ctx, } __STATIC_INLINE__ void ggml_ext_backend_tensor_get_and_sync(ggml_backend_t backend, const ggml_tensor* tensor, void* data, size_t offset, size_t size) { - if (sd_backend_is(backend, "ROCm") || sd_backend_is(backend, "CUDA") || sd_backend_is(backend, "SYCL")) { - if (!ggml_backend_is_cpu(backend)) { + if ((sd_backend_is(backend, "ROCm") || sd_backend_is(backend, "CUDA") || sd_backend_is(backend, "SYCL")) && + !ggml_backend_is_cpu(backend)) { ggml_backend_tensor_get_async(backend, tensor, data, offset, size); ggml_backend_synchronize(backend); - } else { - ggml_backend_tensor_get(tensor, data, offset, size); + return; } -} + ggml_backend_tensor_get(tensor, data, offset, size); } diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index e9e9e6045..31575bbe4 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -500,16 +500,19 @@ std::vector string_split(const std::string & input, char separator) static void add_rpc_devices(const std::string & servers) { auto rpc_servers = string_split(servers, ','); if (rpc_servers.empty()) { - throw std::invalid_argument("no RPC servers specified"); + LOG_ERROR("no RPC servers specified"); + return; } ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC"); if (!rpc_reg) { - throw std::invalid_argument("failed to find RPC backend"); + LOG_ERROR("failed to find RPC backend"); + return; } typedef ggml_backend_reg_t (*ggml_backend_rpc_add_server_t)(const char * endpoint); ggml_backend_rpc_add_server_t ggml_backend_rpc_add_server_fn = (ggml_backend_rpc_add_server_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_server"); if (!ggml_backend_rpc_add_server_fn) { - throw std::invalid_argument("failed to find RPC add server function"); + LOG_ERROR("failed to find RPC add server function"); + return; } for (const auto & server : rpc_servers) { auto reg = ggml_backend_rpc_add_server_fn(server.c_str()); @@ -652,24 +655,26 @@ class StableDiffusionGGML { StableDiffusionGGML() = default; ~StableDiffusionGGML() { - if (diffusion_backend != backend) { + if (diffusion_backend && diffusion_backend != backend) { ggml_backend_free(diffusion_backend); } for(auto clip_backend : clip_backends) { - if (clip_backend != backend) { + if (clip_backend && clip_backend != backend) { ggml_backend_free(clip_backend); } } - if (control_net_backend != backend) { + if (control_net_backend && control_net_backend != backend) { ggml_backend_free(control_net_backend); } - if (tae_backend != vae_backend) { + if (tae_backend && tae_backend != vae_backend) { ggml_backend_free(tae_backend); } - if (vae_backend != backend) { + if (vae_backend && vae_backend != backend) { ggml_backend_free(vae_backend); } - ggml_backend_free(backend); + if (backend) { + ggml_backend_free(backend); + } } diff --git a/src/util.cpp b/src/util.cpp index ccb71327f..e2510f0ff 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -746,6 +746,9 @@ std::vector> parse_prompt_attention(const std::str // test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc. bool sd_backend_is(ggml_backend_t backend, const std::string& name) { + if (!backend) { + return false; + } ggml_backend_dev_t dev = ggml_backend_get_device(backend); if (!dev) return false; std::string dev_name = ggml_backend_dev_name(dev); From 1556395dd301168345e02bbf97f2aadeeade6d45 Mon Sep 17 00:00:00 2001 From: Cyberhan123 <255542417@qq.com> Date: Thu, 26 Mar 2026 16:15:14 +0800 Subject: [PATCH 27/32] format code --- examples/cli/main.cpp | 18 +++---- examples/common/common.hpp | 6 +-- examples/server/main.cpp | 10 ++-- format-code.ps1 | 54 +++++++++++++++++++ src/auto_encoder_kl.hpp | 32 +++++------ src/cache_dit.hpp | 2 +- src/common_block.hpp | 7 ++- src/conditioner.hpp | 47 ++++++++--------- src/diffusion_model.hpp | 2 +- src/ggml_extend.hpp | 48 ++++++++--------- src/lora.hpp | 4 +- src/qwen_image.hpp | 6 +-- src/stable-diffusion.cpp | 105 ++++++++++++++++++------------------- src/t5.hpp | 2 +- src/tokenize_util.cpp | 4 +- src/upscaler.cpp | 2 +- src/util.cpp | 5 +- src/util.h | 2 +- src/wan.hpp | 12 ++--- src/z_image.hpp | 8 ++- 20 files changed, 212 insertions(+), 164 deletions(-) create mode 100644 format-code.ps1 diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index 00298afae..55aa3ed91 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -48,7 +48,7 @@ struct SDCliParams { bool color = false; bool normal_exit = false; - bool skip_usage = false; + bool skip_usage = false; ArgOptions get_options() { ArgOptions options; @@ -155,7 +155,7 @@ struct SDCliParams { } const char* rpc_device = argv[index]; add_rpc_device(rpc_device); - return 1; + return 1; }; auto on_list_devices_arg = [&](int argc, const char** argv, int index) { @@ -166,7 +166,7 @@ struct SDCliParams { printf("List of available GGML devices:\nName\tDescription\n-------------------\n%s\n", buff); free(buff); normal_exit = true; - skip_usage = true; + skip_usage = true; return VALID_BREAK_OPT; }; @@ -185,12 +185,12 @@ struct SDCliParams { on_help_arg}, {"", "--rpc", - "add a rpc device", + "add a rpc device", on_rpc_arg}, - {"", - "--list-devices", - "list available ggml compute devices", - on_list_devices_arg}, + {"", + "--list-devices", + "list available ggml compute devices", + on_list_devices_arg}, }; return options; @@ -245,7 +245,7 @@ void parse_args(int argc, const char** argv, SDCliParams& cli_params, SDContextP std::vector options_vec = {cli_params.get_options(), ctx_params.get_options(), gen_params.get_options()}; if (!parse_options(argc, argv, options_vec)) { - if (!cli_params.skip_usage){ + if (!cli_params.skip_usage) { print_usage(argc, argv, options_vec); } exit(cli_params.normal_exit ? 0 : 1); diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 66a6e6719..64d3e026f 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -605,15 +605,15 @@ struct SDContextParams { "--control-net-backend-device", "device to use for control net (defaults to main-backend-device)", &control_net_backend_device}, - {"", + {"", "--upscaler-backend-device", "device to use for upscaling models (defaults to main-backend-device)", &upscaler_backend_device}, - {"", + {"", "--photomaker-backend-device", "device to use for photomaker (defaults to main-backend-device)", &photomaker_backend_device}, - {"", + {"", "--vision-backend-device", "device to use for clip-vision model (defaults to main-backend-device)", &vision_backend_device}, diff --git a/examples/server/main.cpp b/examples/server/main.cpp index 6e4340a61..156ba7c41 100644 --- a/examples/server/main.cpp +++ b/examples/server/main.cpp @@ -909,9 +909,10 @@ int main(int argc, const char** argv) { } } - auto get_sample_method = [](std::string name) -> enum sample_method_t { + auto get_sample_method = [](std::string name)->enum sample_method_t { enum sample_method_t result = str_to_sample_method(name.c_str()); - if (result != SAMPLE_METHOD_COUNT) return result; + if (result != SAMPLE_METHOD_COUNT) + return result; // some applications use a hardcoded sampler list std::transform(name.begin(), name.end(), name.begin(), [](unsigned char c) { return std::tolower(c); }); @@ -932,8 +933,9 @@ int main(int argc, const char** argv) { {"k_res_multistep", RES_MULTISTEP_SAMPLE_METHOD}, {"res 2s", RES_2S_SAMPLE_METHOD}, {"k_res_2s", RES_2S_SAMPLE_METHOD}}; - auto it = hardcoded.find(name); - if (it != hardcoded.end()) return it->second; + auto it = hardcoded.find(name); + if (it != hardcoded.end()) + return it->second; return SAMPLE_METHOD_COUNT; }; diff --git a/format-code.ps1 b/format-code.ps1 new file mode 100644 index 000000000..7f6d00727 --- /dev/null +++ b/format-code.ps1 @@ -0,0 +1,54 @@ +param( + [switch]$DryRun +) + +$ErrorActionPreference = "Stop" + +$repoRoot = $PSScriptRoot +if (-not $repoRoot) { + $repoRoot = (Get-Location).Path +} + +$patterns = @( + "src/*.cpp" + "src/*.h" + "src/*.hpp" + "src/vocab/*.h" + "src/vocab/*.cpp" + "examples/cli/*.cpp" + "examples/common/*.hpp" + "examples/cli/*.h" + "examples/server/*.cpp" +) + +Push-Location $repoRoot +try { + if (-not $DryRun) { + $null = Get-Command clang-format -ErrorAction Stop + } + + foreach ($pattern in $patterns) { + $files = Get-ChildItem -Path $pattern -File -ErrorAction SilentlyContinue | Sort-Object FullName + + foreach ($file in $files) { + $relativePath = $file.FullName.Substring($repoRoot.Length).TrimStart('\', '/') -replace '\\', '/' + + if ($relativePath -like "vocab*") { + continue + } + + Write-Host "formatting '$relativePath'" + + # if ($file.Name -ne "stable-diffusion.h") { + # clang-tidy -fix -p build_linux/ "$relativePath" + # } + + if (-not $DryRun) { + & clang-format -style=file -i $file.FullName + } + } + } +} +finally { + Pop-Location +} diff --git a/src/auto_encoder_kl.hpp b/src/auto_encoder_kl.hpp index 6efdb41a2..ebc7a6f6f 100644 --- a/src/auto_encoder_kl.hpp +++ b/src/auto_encoder_kl.hpp @@ -780,22 +780,22 @@ struct AutoEncoderKL : public VAE { -0.0511f, -0.0603f, -0.0478f, -0.0524f, -0.0227f, -0.0274f, -0.0154f, -0.0255f, -0.0572f, -0.0565f, -0.0518f, -0.0496f, 0.0116f, 0.0054f, 0.0163f, 0.0104f}; latents_std_vec = { - 1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f, - 1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f, - 1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f, - 1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f, - 1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f, - 1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f, - 1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f, - 1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f, - 1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f, - 1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f, - 1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f, - 1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f, - 1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f, - 1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f, - 1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f, - 1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f}; + 1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f, + 1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f, + 1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f, + 1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f, + 1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f, + 1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f, + 1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f, + 1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f, + 1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f, + 1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f, + 1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f, + 1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f, + 1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f, + 1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f, + 1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f, + 1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f}; } else { GGML_ABORT("unknown version %d", version); } diff --git a/src/cache_dit.hpp b/src/cache_dit.hpp index 9af627fba..0d1047847 100644 --- a/src/cache_dit.hpp +++ b/src/cache_dit.hpp @@ -839,7 +839,7 @@ struct CacheDitConditionState { float* input_data = (float*)input->data; float diff = CacheDitState::calculate_residual_diff( - it->second.prev_input.data(), input_data, ne); + it->second.prev_input.data(), input_data, ne); float effective_threshold = config.residual_diff_threshold; if (config.Fn_compute_blocks > 0) { diff --git a/src/common_block.hpp b/src/common_block.hpp index e1dc2e6bf..82e95e750 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -1,11 +1,10 @@ #ifndef __COMMON_BLOCK_HPP__ #define __COMMON_BLOCK_HPP__ -#include "ggml_extend.hpp" #include "ggml-backend.h" +#include "ggml_extend.hpp" #include "util.h" - class DownSampleBlock : public GGMLBlock { protected: int channels; @@ -265,9 +264,9 @@ class FeedForward : public GGMLBlock { auto net_0 = std::dynamic_pointer_cast(blocks["net.0"]); auto net_2 = std::dynamic_pointer_cast(blocks["net.2"]); if (sd_backend_is(ctx->backend, "Vulkan")) { - net_2->set_force_prec_f32(true); + net_2->set_force_prec_f32(true); } - + x = net_0->forward(ctx, x); // [ne3, ne2, ne1, inner_dim] x = net_2->forward(ctx, x); // [ne3, ne2, ne1, dim_out] return x; diff --git a/src/conditioner.hpp b/src/conditioner.hpp index 09394d069..6af1d4cae 100644 --- a/src/conditioner.hpp +++ b/src/conditioner.hpp @@ -57,7 +57,7 @@ struct Conditioner { virtual bool is_cond_stage_model_name_at_index(const std::string& name, int index) { return true; } - virtual ggml_backend_t get_params_backend_at_index(int index) = 0; + virtual ggml_backend_t get_params_backend_at_index(int index) = 0; virtual ggml_backend_t get_runtime_backend_at_index(int index) = 0; }; @@ -686,9 +686,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { return true; } - ggml_backend_t get_params_backend_at_index(int index){ - if (sd_version_is_sdxl(version) && index == 1){ - if(text_model2) { + ggml_backend_t get_params_backend_at_index(int index) { + if (sd_version_is_sdxl(version) && index == 1) { + if (text_model2) { return text_model2->get_params_backend(); } } else if (text_model) { @@ -697,9 +697,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { return nullptr; } - ggml_backend_t get_runtime_backend_at_index(int index){ - if (sd_version_is_sdxl(version) && index == 1){ - if(text_model2) { + ggml_backend_t get_runtime_backend_at_index(int index) { + if (sd_version_is_sdxl(version) && index == 1) { + if (text_model2) { return text_model2->get_runtime_backend(); } } else if (text_model) { @@ -707,7 +707,6 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { } return nullptr; } - }; struct FrozenCLIPVisionEmbedder : public GGMLRunner { @@ -1233,7 +1232,7 @@ struct SD3CLIPEmbedder : public Conditioner { } } - ggml_backend_t get_params_backend_at_index(int index){ + ggml_backend_t get_params_backend_at_index(int index) { if (index == 0 && clip_l) { return clip_l->get_params_backend(); } else if (index == 1 && clip_g) { @@ -1245,7 +1244,7 @@ struct SD3CLIPEmbedder : public Conditioner { } } - ggml_backend_t get_runtime_backend_at_index(int index){ + ggml_backend_t get_runtime_backend_at_index(int index) { if (index == 0 && clip_l) { return clip_l->get_runtime_backend(); } else if (index == 1 && clip_g) { @@ -1560,7 +1559,7 @@ struct FluxCLIPEmbedder : public Conditioner { } } - ggml_backend_t get_params_backend_at_index(int index){ + ggml_backend_t get_params_backend_at_index(int index) { if (index == 0 && clip_l) { return clip_l->get_params_backend(); } else if (index == 1 && t5) { @@ -1570,7 +1569,7 @@ struct FluxCLIPEmbedder : public Conditioner { } } - ggml_backend_t get_runtime_backend_at_index(int index){ + ggml_backend_t get_runtime_backend_at_index(int index) { if (index == 0 && clip_l) { return clip_l->get_runtime_backend(); } else if (index == 1 && t5) { @@ -1803,15 +1802,15 @@ struct T5CLIPEmbedder : public Conditioner { conditioner_params.zero_out_masked); } - ggml_backend_t get_params_backend_at_index(int index){ - if (t5){ + ggml_backend_t get_params_backend_at_index(int index) { + if (t5) { return t5->get_params_backend(); } return nullptr; } - ggml_backend_t get_runtime_backend_at_index(int index){ - if (t5){ + ggml_backend_t get_runtime_backend_at_index(int index) { + if (t5) { return t5->get_runtime_backend(); } return nullptr; @@ -1953,15 +1952,15 @@ struct AnimaConditioner : public Conditioner { return {hidden_states, t5_weight_tensor, t5_ids_tensor}; } - ggml_backend_t get_params_backend_at_index(int index){ - if (llm){ + ggml_backend_t get_params_backend_at_index(int index) { + if (llm) { return llm->get_params_backend(); } return nullptr; } - ggml_backend_t get_runtime_backend_at_index(int index){ - if (llm){ + ggml_backend_t get_runtime_backend_at_index(int index) { + if (llm) { return llm->get_runtime_backend(); } return nullptr; @@ -2341,15 +2340,15 @@ struct LLMEmbedder : public Conditioner { return {hidden_states, nullptr, nullptr, extra_hidden_states_vec}; } - ggml_backend_t get_params_backend_at_index(int index){ - if (llm){ + ggml_backend_t get_params_backend_at_index(int index) { + if (llm) { return llm->get_params_backend(); } return nullptr; } - ggml_backend_t get_runtime_backend_at_index(int index){ - if (llm){ + ggml_backend_t get_runtime_backend_at_index(int index) { + if (llm) { return llm->get_runtime_backend(); } return nullptr; diff --git a/src/diffusion_model.hpp b/src/diffusion_model.hpp index 07d9df898..3068628ac 100644 --- a/src/diffusion_model.hpp +++ b/src/diffusion_model.hpp @@ -37,7 +37,7 @@ struct DiffusionModel { virtual void free_compute_buffer() = 0; virtual void get_param_tensors(std::map& tensors) = 0; virtual size_t get_params_buffer_size() = 0; - virtual void set_weight_adapter(const std::shared_ptr& adapter){}; + virtual void set_weight_adapter(const std::shared_ptr& adapter) {}; virtual int64_t get_adm_in_channels() = 0; virtual void set_flash_attention_enabled(bool enabled) = 0; virtual void set_circular_axes(bool circular_x, bool circular_y) = 0; diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 9f1163758..63bc7dbbb 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -1299,27 +1299,25 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_ones_like(ggml_context* ctx, return ggml_ext_ones(ctx, x->ne[0], x->ne[1], x->ne[2], x->ne[3]); } -__STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx,ggml_backend_t backend, ggml_tensor* a) { -if (sd_backend_is(backend, "Vulkan")) -{ - auto zero_index = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:zero_int"); - auto out = ggml_reshape_1d(ctx, a, ggml_nelements(a)); - out = ggml_get_rows(ctx, out, zero_index); - out = ggml_reshape(ctx, out, a); - // auto out = ggml_cast(ctx, a, GGML_TYPE_F32); - return out; -}else{ - auto out = ggml_reshape_2d(ctx, a, 1, ggml_nelements(a)); - ggml_tensor* one = ggml_ext_ones(ctx, 1, 1, 1, 1); // [1,] - if (ggml_is_transposed(out)) { - out = ggml_mul_mat(ctx, one, out); +__STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* a) { + if (sd_backend_is(backend, "Vulkan")) { + auto zero_index = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:zero_int"); + auto out = ggml_reshape_1d(ctx, a, ggml_nelements(a)); + out = ggml_get_rows(ctx, out, zero_index); + out = ggml_reshape(ctx, out, a); + // auto out = ggml_cast(ctx, a, GGML_TYPE_F32); + return out; } else { - out = ggml_mul_mat(ctx, out, one); + auto out = ggml_reshape_2d(ctx, a, 1, ggml_nelements(a)); + ggml_tensor* one = ggml_ext_ones(ctx, 1, 1, 1, 1); // [1,] + if (ggml_is_transposed(out)) { + out = ggml_mul_mat(ctx, one, out); + } else { + out = ggml_mul_mat(ctx, out, one); + } + out = ggml_reshape(ctx, out, a); + return out; } - out = ggml_reshape(ctx, out, a); - return out; -} - } // q: [N, L_q, C(n_head*d_head)] or [N*n_head, L_q, d_head] @@ -1672,8 +1670,8 @@ struct WeightAdapter { ggml_tensor* w, ggml_tensor* b, const std::string& prefix, - ForwardParams forward_params) = 0; - virtual size_t get_extra_graph_size() = 0; + ForwardParams forward_params) = 0; + virtual size_t get_extra_graph_size() = 0; }; struct GGMLRunnerContext { @@ -2304,11 +2302,11 @@ class Linear : public UnaryBlock { force_prec_f32(force_prec_f32), scale(scale) {} - void set_scale(float scale_){ + void set_scale(float scale_) { scale = scale_; } - void set_force_prec_f32(bool force_prec_f32_){ + void set_force_prec_f32(bool force_prec_f32_) { force_prec_f32 = force_prec_f32_; } @@ -2323,7 +2321,7 @@ class Linear : public UnaryBlock { forward_params.op_type = WeightAdapter::ForwardParams::op_type_t::OP_LINEAR; forward_params.linear.force_prec_f32 = force_prec_f32; forward_params.linear.scale = scale; - return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx,ctx->backend, x, w, b, prefix, forward_params); + return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, ctx->backend, x, w, b, prefix, forward_params); } return ggml_ext_linear(ctx->ggml_ctx, x, w, b, force_prec_f32, scale); } @@ -2503,7 +2501,7 @@ class Conv3d : public UnaryBlock { ggml_tensor* w = params["weight"]; ggml_tensor* b = nullptr; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx,ctx->backend, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); if (w->type != GGML_TYPE_F16) { w = ggml_cast(ctx->ggml_ctx, w, GGML_TYPE_F16); } diff --git a/src/lora.hpp b/src/lora.hpp index a261b1f51..2ddca167f 100644 --- a/src/lora.hpp +++ b/src/lora.hpp @@ -761,7 +761,7 @@ struct LoraModel : public GGMLRunner { ggml_tensor* model_tensor = it.second; // lora - ggml_tensor* diff = get_weight_diff(model_tensor_name,runtime_backend, compute_ctx, model_tensor); + ggml_tensor* diff = get_weight_diff(model_tensor_name, runtime_backend, compute_ctx, model_tensor); if (diff == nullptr) { continue; } @@ -774,7 +774,7 @@ struct LoraModel : public GGMLRunner { ggml_tensor* final_tensor; if (model_tensor->type != GGML_TYPE_F32 && model_tensor->type != GGML_TYPE_F16) { - final_tensor = ggml_ext_cast_f32(compute_ctx,runtime_backend ,model_tensor); + final_tensor = ggml_ext_cast_f32(compute_ctx, runtime_backend, model_tensor); final_tensor = ggml_add_inplace(compute_ctx, final_tensor, diff); final_tensor = ggml_cpy(compute_ctx, final_tensor, model_tensor); } else { diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index 773328877..ebc10ec52 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -122,12 +122,10 @@ namespace Qwen { auto to_v = std::dynamic_pointer_cast(blocks["to_v"]); auto to_out_0 = std::dynamic_pointer_cast(blocks["to_out.0"]); - - if (sd_backend_is(ctx->backend,"Vulkan")) - { + if (sd_backend_is(ctx->backend, "Vulkan")) { to_out_0->set_force_prec_f32(true); } - + auto norm_added_q = std::dynamic_pointer_cast(blocks["norm_added_q"]); auto norm_added_k = std::dynamic_pointer_cast(blocks["norm_added_k"]); diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 31575bbe4..629b738cc 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1,12 +1,12 @@ #include "ggml-cpu.h" #include "ggml_extend.hpp" +#include #include "model.h" #include "rng.hpp" #include "rng_mt19937.hpp" #include "rng_philox.hpp" #include "stable-diffusion.h" -#include #include "util.h" #include "auto_encoder_kl.hpp" @@ -482,45 +482,44 @@ static void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t t } } -std::vector string_split(const std::string & input, char separator) -{ +std::vector string_split(const std::string& input, char separator) { std::vector parts; - size_t begin_pos = 0; + size_t begin_pos = 0; size_t separator_pos = input.find(separator); while (separator_pos != std::string::npos) { std::string part = input.substr(begin_pos, separator_pos - begin_pos); parts.emplace_back(part); - begin_pos = separator_pos + 1; + begin_pos = separator_pos + 1; separator_pos = input.find(separator, begin_pos); } parts.emplace_back(input.substr(begin_pos, separator_pos - begin_pos)); return parts; } -static void add_rpc_devices(const std::string & servers) { +static void add_rpc_devices(const std::string& servers) { auto rpc_servers = string_split(servers, ','); if (rpc_servers.empty()) { - LOG_ERROR("no RPC servers specified"); - return; + LOG_ERROR("no RPC servers specified"); + return; } ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC"); if (!rpc_reg) { LOG_ERROR("failed to find RPC backend"); - return; + return; } - typedef ggml_backend_reg_t (*ggml_backend_rpc_add_server_t)(const char * endpoint); - ggml_backend_rpc_add_server_t ggml_backend_rpc_add_server_fn = (ggml_backend_rpc_add_server_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_server"); + typedef ggml_backend_reg_t (*ggml_backend_rpc_add_server_t)(const char* endpoint); + ggml_backend_rpc_add_server_t ggml_backend_rpc_add_server_fn = (ggml_backend_rpc_add_server_t)ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_server"); if (!ggml_backend_rpc_add_server_fn) { LOG_ERROR("failed to find RPC add server function"); - return; + return; } - for (const auto & server : rpc_servers) { + for (const auto& server : rpc_servers) { auto reg = ggml_backend_rpc_add_server_fn(server.c_str()); ggml_backend_register(reg); } } -void add_rpc_device(const char* servers_cstr){ +void add_rpc_device(const char* servers_cstr) { std::string servers(servers_cstr); add_rpc_devices(servers); } @@ -558,14 +557,14 @@ std::vector> list_backends_vector() { return backends; } -SD_API size_t backend_list_size(){ +SD_API size_t backend_list_size() { // for C API size_t buffer_size = 0; - auto backends = list_backends_vector(); + auto backends = list_backends_vector(); for (auto& backend : backends) { auto dev_name_size = backend.first.size(); auto dev_desc_size = backend.second.size(); - buffer_size+=dev_name_size+dev_desc_size+2; // +2 for the separators + buffer_size += dev_name_size + dev_desc_size + 2; // +2 for the separators } return buffer_size; } @@ -578,17 +577,17 @@ SD_API void list_backends_to_buffer(char* buffer, size_t buffer_size) { size_t name_size = backend.first.size(); size_t desc_size = backend.second.size(); if (offset + name_size + desc_size + 2 > buffer_size) { - break; // Not enough space in the buffer + break; // Not enough space in the buffer } memcpy(buffer + offset, backend.first.c_str(), name_size); offset += name_size; buffer[offset++] = '\t'; memcpy(buffer + offset, backend.second.c_str(), desc_size); offset += desc_size; - buffer[offset++] = '\n'; + buffer[offset++] = '\n'; } if (offset < buffer_size) { - buffer[offset] = '\0'; // Ensure the buffer is null-terminated at the end + buffer[offset] = '\0'; // Ensure the buffer is null-terminated at the end } else { LOG_WARN("Provided buffer size is too small to contain details of all devices."); buffer[buffer_size - 1] = '\0'; // Ensure the buffer is null-terminated at the end @@ -607,7 +606,7 @@ class StableDiffusionGGML { ggml_backend_t pmid_backend = nullptr; ggml_backend_t vision_backend = nullptr; - std::vector clip_backends = {nullptr}; + std::vector clip_backends = {nullptr}; SDVersion version; bool vae_decode_only = false; @@ -655,10 +654,10 @@ class StableDiffusionGGML { StableDiffusionGGML() = default; ~StableDiffusionGGML() { - if (diffusion_backend && diffusion_backend != backend) { + if (diffusion_backend && diffusion_backend != backend) { ggml_backend_free(diffusion_backend); } - for(auto clip_backend : clip_backends) { + for (auto clip_backend : clip_backends) { if (clip_backend && clip_backend != backend) { ggml_backend_free(clip_backend); } @@ -677,7 +676,6 @@ class StableDiffusionGGML { } } - void log_backends() { const int device_count = ggml_backend_dev_count(); for (int i = 0; i < device_count; i++) { @@ -724,16 +722,16 @@ class StableDiffusionGGML { default_backend_name = override_default_backend_name; } - std::string diffusion_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->diffusion_device)); - std::vector clip_backend_names = sanitize_backend_name_list(SAFE_STR(sd_ctx_params->clip_device)); - std::string control_net_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->control_net_device)); - std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); - std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); - std::string pmid_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->photomaker_device)); - std::string vision_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vision_device)); + std::string diffusion_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->diffusion_device)); + std::vector clip_backend_names = sanitize_backend_name_list(SAFE_STR(sd_ctx_params->clip_device)); + std::string control_net_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->control_net_device)); + std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); + std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); + std::string pmid_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->photomaker_device)); + std::string vision_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vision_device)); - bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; - bool clip_backends_are_default = true; + bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; + bool clip_backends_are_default = true; for (const auto& clip_backend_name : clip_backend_names) { if (!clip_backend_name.empty() && clip_backend_name != default_backend_name) { clip_backends_are_default = false; @@ -743,8 +741,8 @@ class StableDiffusionGGML { bool control_net_backend_is_default = (control_net_backend_name.empty() || control_net_backend_name == default_backend_name); bool vae_backend_is_default = (vae_backend_name.empty() || vae_backend_name == default_backend_name); // if tae_backend_name is empty, it will use the same backend as vae - bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; - bool pmid_backend_is_default = (pmid_backend_name.empty() || pmid_backend_name == default_backend_name); + bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; + bool pmid_backend_is_default = (pmid_backend_name.empty() || pmid_backend_name == default_backend_name); bool vision_backend_is_default = (vision_backend_name.empty() || vision_backend_name == default_backend_name); // if some backend is not specified or is the same as the default backend, use the default backend @@ -934,12 +932,12 @@ class StableDiffusionGGML { { if (!clip_backends_are_default) { clip_backends.clear(); - for(auto clip_backend_name : clip_backend_names){ + for (auto clip_backend_name : clip_backend_names) { auto clip_backend = init_named_backend(clip_backend_name); LOG_INFO("CLIP: Using %s backend", ggml_backend_name(clip_backend)); - clip_backends.push_back(clip_backend); + clip_backends.push_back(clip_backend); } - }else{ + } else { clip_backends = {backend}; } if (sd_version_is_sd3(version)) { @@ -1050,9 +1048,9 @@ class StableDiffusionGGML { offload_params_to_cpu, tensor_storage_map); diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model"); + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model"); } else if (sd_version_is_z_image(version)) { cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, @@ -1218,7 +1216,7 @@ class StableDiffusionGGML { control_net->set_conv2d_direct_enabled(true); } } - pmid_backend = backend; + pmid_backend = backend; if (!pmid_backend_is_default) { pmid_backend = init_named_backend(pmid_backend_name); LOG_INFO("PhotoMaker: Using %s backend", ggml_backend_name(pmid_backend)); @@ -1401,7 +1399,7 @@ class StableDiffusionGGML { size_t total_params_ram_size = 0; size_t total_params_vram_size = 0; - + // TODO: split by individual text encoders if (ggml_backend_is_cpu(clip_backends[0])) { total_params_ram_size += clip_params_mem_size + pmid_params_mem_size; @@ -1640,8 +1638,8 @@ class StableDiffusionGGML { } for (auto& kv : lora_state_diff) { - bool applied = false; - int64_t t0 = ggml_time_ms(); + bool applied = false; + int64_t t0 = ggml_time_ms(); auto lora_tensor_filter_diff = [&](const std::string& tensor_name) { if (is_diffusion_model_name(tensor_name)) { return true; @@ -1681,7 +1679,7 @@ class StableDiffusionGGML { } return false; }; - LOG_INFO("applying lora to first stage model"); + LOG_INFO("applying lora to first stage model"); auto first_stage_backend = first_stage_model->get_params_backend(); lora = load_lora_model_from_file(kv.first, kv.second, first_stage_backend, lora_tensor_filter_first); if (lora && !lora->lora_tensors.empty()) { @@ -1733,10 +1731,9 @@ class StableDiffusionGGML { lora_state_diff.erase(iter); } } - cond_stage_lora_models = lora_models; + cond_stage_lora_models = lora_models; - - for(int i=0;imodel_count;i++){ + for (int i = 0; i < cond_stage_model->model_count; i++) { auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { if (is_cond_stage_model_name(tensor_name)) { return cond_stage_model->is_cond_stage_model_name_at_index(tensor_name, i); @@ -1746,8 +1743,8 @@ class StableDiffusionGGML { for (auto& kv : lora_state_diff) { const std::string& lora_id = kv.first; float multiplier = kv.second; - auto backend = cond_stage_model->get_runtime_backend_at_index(i); - auto lora = load_lora_model_from_file(kv.first, kv.second, backend, lora_tensor_filter_cond); + auto backend = cond_stage_model->get_runtime_backend_at_index(i); + auto lora = load_lora_model_from_file(kv.first, kv.second, backend, lora_tensor_filter_cond); if (lora && !lora->lora_tensors.empty()) { lora->preprocess_lora_tensors(tensors); cond_stage_lora_models.push_back(lora); @@ -2101,9 +2098,9 @@ class StableDiffusionGGML { uint32_t dim = static_cast(latents->ne[ggml_n_dims(latents) - 1]); if (preview_mode == PREVIEW_PROJ) { - int patch_sz = 1; - const float(*latent_rgb_proj)[channel] = nullptr; - float* latent_rgb_bias = nullptr; + int patch_sz = 1; + const float (*latent_rgb_proj)[channel] = nullptr; + float* latent_rgb_bias = nullptr; if (dim == 128) { if (sd_version_is_flux2(version)) { diff --git a/src/t5.hpp b/src/t5.hpp index 5f8c99dda..0cf1409d4 100644 --- a/src/t5.hpp +++ b/src/t5.hpp @@ -362,7 +362,7 @@ class T5UniGramTokenizer { BuildTrie(&pieces); } - ~T5UniGramTokenizer(){}; + ~T5UniGramTokenizer() {}; std::string Normalize(const std::string& input) const { // Ref: https://github.com/huggingface/tokenizers/blob/1ff56c0c70b045f0cd82da1af9ac08cd4c7a6f9f/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py#L29 diff --git a/src/tokenize_util.cpp b/src/tokenize_util.cpp index 22cf8ae2e..68f78d790 100644 --- a/src/tokenize_util.cpp +++ b/src/tokenize_util.cpp @@ -10,7 +10,9 @@ bool is_number(char32_t ch) { } bool is_letter(char32_t ch) { - static const struct { char32_t start, end; } ranges[] = { + static const struct { + char32_t start, end; + } ranges[] = { {0x41, 0x5A}, {0x61, 0x7A}, {0xAA, 0xAA}, diff --git a/src/upscaler.cpp b/src/upscaler.cpp index 021605e8a..5a0d9562a 100644 --- a/src/upscaler.cpp +++ b/src/upscaler.cpp @@ -25,7 +25,7 @@ struct UpscalerGGML { int n_threads, std::string device = "") { ggml_log_set(ggml_log_callback_default, nullptr); - device = sanitize_backend_name(device); + device = sanitize_backend_name(device); backend = init_named_backend(device); ModelLoader model_loader; if (!model_loader.init_from_file_and_convert_name(esrgan_path)) { diff --git a/src/util.cpp b/src/util.cpp index e2510f0ff..511a595ab 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -750,7 +750,8 @@ bool sd_backend_is(ggml_backend_t backend, const std::string& name) { return false; } ggml_backend_dev_t dev = ggml_backend_get_device(backend); - if (!dev) return false; - std::string dev_name = ggml_backend_dev_name(dev); + if (!dev) + return false; + std::string dev_name = ggml_backend_dev_name(dev); return dev_name.find(name) != std::string::npos; } diff --git a/src/util.h b/src/util.h index 8e7d3790d..a77e05046 100644 --- a/src/util.h +++ b/src/util.h @@ -6,8 +6,8 @@ #include #include -#include "stable-diffusion.h" #include "ggml-backend.h" +#include "stable-diffusion.h" #define SAFE_STR(s) ((s) ? (s) : "") #define BOOL_STR(b) ((b) ? "true" : "false") diff --git a/src/wan.hpp b/src/wan.hpp index af8acbfda..82cd33390 100644 --- a/src/wan.hpp +++ b/src/wan.hpp @@ -1150,12 +1150,12 @@ namespace WAN { -0.0313f, -0.1649f, 0.0117f, 0.0723f, -0.2839f, -0.2083f, -0.0520f, 0.3748f, 0.0152f, 0.1957f, 0.1433f, -0.2944f, 0.3573f, -0.0548f, -0.1681f, -0.0667f}; latents_std_vec = { - 0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f, - 0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f, - 0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f, - 0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f, - 0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f, - 0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f}; + 0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f, + 0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f, + 0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f, + 0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f, + 0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f, + 0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f}; } } diff --git a/src/z_image.hpp b/src/z_image.hpp index ad09ed1e9..e16e179db 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -48,11 +48,10 @@ namespace ZImage { auto qkv_proj = std::dynamic_pointer_cast(blocks["qkv"]); auto out_proj = std::dynamic_pointer_cast(blocks["out"]); - if (sd_backend_is(ctx->backend,"ROCm")) - { + if (sd_backend_is(ctx->backend, "ROCm")) { out_proj->set_scale(1.f / 16.f); } - + auto qkv = qkv_proj->forward(ctx, x); // [N, n_token, (num_heads + num_kv_heads*2)*head_dim] qkv = ggml_reshape_4d(ctx->ggml_ctx, qkv, head_dim, num_heads + num_kv_heads * 2, qkv->ne[1], qkv->ne[2]); // [N, n_token, num_heads + num_kv_heads*2, head_dim] @@ -128,8 +127,7 @@ namespace ZImage { auto w2 = std::dynamic_pointer_cast(blocks["w2"]); auto w3 = std::dynamic_pointer_cast(blocks["w3"]); - if (sd_backend_is(ctx->backend,"Vulkan")) - { + if (sd_backend_is(ctx->backend, "Vulkan")) { w2->set_force_prec_f32(true); } From 1428dc170d568d06d0c0d75037fff68512b203f7 Mon Sep 17 00:00:00 2001 From: Cyberhan123 <255542417@qq.com> Date: Thu, 26 Mar 2026 16:50:50 +0800 Subject: [PATCH 28/32] up date ci job --- .github/workflows/build.yml | 72 +++++++++++++++++-------------------- Dockerfile.cuda | 2 +- Dockerfile.musa | 2 +- Dockerfile.sycl | 2 +- Dockerfile.vulkan | 2 +- 5 files changed, 37 insertions(+), 43 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1fbcbf94d..f697effee 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -75,10 +75,13 @@ jobs: - name: Build id: cmake_build run: | - mkdir build - cd build - cmake .. -DGGML_AVX2=ON -DSD_BUILD_SHARED_LIBS=ON - cmake --build . --config Release + cmake -B build \ + -DCMAKE_INSTALL_RPATH='$ORIGIN' \ + -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ + -DGGML_BACKEND_DL=ON \ + -DGGML_NATIVE=OFF \ + -DGGML_CPU_ALL_VARIANTS=ON \ + cmake --build build --config Release -j $(nproc) - name: Get commit hash id: commit @@ -138,10 +141,14 @@ jobs: - name: Build id: cmake_build run: | - mkdir build - cd build - cmake .. -DSD_BUILD_SHARED_LIBS=ON -DSD_VULKAN=ON - cmake --build . --config Release + cmake -B build \ + -DCMAKE_INSTALL_RPATH='$ORIGIN' \ + -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ + -DGGML_BACKEND_DL=ON \ + -DGGML_NATIVE=OFF \ + -DGGML_CPU_ALL_VARIANTS=ON \ + -DGGML_VULKAN=ON + cmake --build build --config Release -j $(nproc) - name: Get commit hash id: commit @@ -275,10 +282,12 @@ jobs: id: cmake_build run: | sysctl -a - mkdir build - cd build - cmake .. -DGGML_AVX2=ON -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" -DSD_BUILD_SHARED_LIBS=ON - cmake --build . --config Release + cmake -B build \ + -DCMAKE_INSTALL_RPATH='@loader_path' \ + -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ + -DGGML_METAL=ON \ + -DGGML_METAL_EMBED_LIBRARY=ON \ + cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) - name: Get commit hash id: commit @@ -318,18 +327,12 @@ jobs: strategy: matrix: include: - - build: "noavx" - defines: "-DGGML_NATIVE=OFF -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF -DSD_BUILD_SHARED_LIBS=ON" - - build: "avx2" - defines: "-DGGML_NATIVE=OFF -DGGML_AVX2=ON -DSD_BUILD_SHARED_LIBS=ON" - - build: "avx" - defines: "-DGGML_NATIVE=OFF -DGGML_AVX=ON -DGGML_AVX2=OFF -DSD_BUILD_SHARED_LIBS=ON" - - build: "avx512" - defines: "-DGGML_NATIVE=OFF -DGGML_AVX512=ON -DGGML_AVX=ON -DGGML_AVX2=ON -DSD_BUILD_SHARED_LIBS=ON" + - build: "cpu" + defines: "-DGGML_NATIVE=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON" - build: "cuda12" - defines: "-DSD_CUDA=ON -DSD_BUILD_SHARED_LIBS=ON -DCMAKE_CUDA_ARCHITECTURES='61;70;75;80;86;89;90;100;120' -DCMAKE_CUDA_FLAGS='-Xcudafe \"--diag_suppress=177\" -Xcudafe \"--diag_suppress=550\"'" + defines: "-DGGML_NATIVE=OFF -DGGML_CUDA=ON -DGGML_BACKEND_DL=ON -DCMAKE_CUDA_ARCHITECTURES='61;70;75;80;86;89;90;100;120' -DCMAKE_CUDA_FLAGS='-Xcudafe \"--diag_suppress=177\" -Xcudafe \"--diag_suppress=550\"'" - build: "vulkan" - defines: "-DSD_VULKAN=ON -DSD_BUILD_SHARED_LIBS=ON" + defines: "-DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_VULKAN=ON -DGGML_BACKEND_DL=ON" steps: - name: Clone id: checkout @@ -377,19 +380,6 @@ jobs: cmake .. -DCMAKE_CXX_FLAGS='/bigobj' -G Ninja -DCMAKE_C_COMPILER=cl.exe -DCMAKE_CXX_COMPILER=cl.exe -DCMAKE_BUILD_TYPE=Release ${{ matrix.defines }} cmake --build . - - name: Check AVX512F support - id: check_avx512f - if: ${{ matrix.build == 'avx512' }} - continue-on-error: true - run: | - cd build - $vcdir = $(vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath) - $msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim())) - $cl = $(join-path $msvc 'bin\Hostx64\x64\cl.exe') - echo 'int main(void){unsigned int a[4];__cpuid(a,7);return !(a[1]&65536);}' >> avx512f.c - & $cl /O2 /GS- /kernel avx512f.c /link /nodefaultlib /entry:main - .\avx512f.exe && echo "AVX512F: YES" && ( echo HAS_AVX512F=1 >> $env:GITHUB_ENV ) || echo "AVX512F: NO" - - name: Get commit hash id: commit if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} @@ -514,9 +504,10 @@ jobs: $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}" cmake .. ` -G "Unix Makefiles" ` - -DSD_HIPBLAS=ON ` - -DSD_BUILD_SHARED_LIBS=ON ` + -DGGML_HIP=ON ` -DGGML_NATIVE=OFF ` + -DGGML_CPU=OFF ` + -DGGML_BACKEND_DL=ON ` -DCMAKE_C_COMPILER=clang ` -DCMAKE_CXX_COMPILER=clang++ ` -DCMAKE_BUILD_TYPE=Release ` @@ -643,13 +634,16 @@ jobs: -DCMAKE_CXX_COMPILER=amdclang++ \ -DCMAKE_C_COMPILER=amdclang \ -DCMAKE_BUILD_TYPE=Release \ + -DGGML_BACKEND_DL=ON \ + -DGGML_NATIVE=OFF \ + -DCMAKE_INSTALL_RPATH='$ORIGIN' \ + -DGGML_CPU_ALL_VARIANTS=ON \ -DSD_HIPBLAS=ON \ -DGPU_TARGETS="${{ env.GPU_TARGETS }}" \ -DAMDGPU_TARGETS="${{ env.GPU_TARGETS }}" \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - -DSD_BUILD_SHARED_LIBS=ON - cmake --build . --config Release + cmake --build . --config Release -j $(nproc) - name: Get commit hash id: commit diff --git a/Dockerfile.cuda b/Dockerfile.cuda index 4deb72477..745d12559 100644 --- a/Dockerfile.cuda +++ b/Dockerfile.cuda @@ -10,7 +10,7 @@ WORKDIR /sd.cpp COPY . . ARG CUDACXX=/usr/local/cuda/bin/nvcc -RUN cmake . -B ./build -DSD_CUDA=ON +RUN cmake . -B ./build -DGGML_CUDA=ON RUN cmake --build ./build --config Release -j$(nproc) FROM nvidia/cuda:${CUDA_VERSION}-cudnn-runtime-ubuntu${UBUNTU_VERSION} AS runtime diff --git a/Dockerfile.musa b/Dockerfile.musa index 2d95f817f..407e7fc5c 100644 --- a/Dockerfile.musa +++ b/Dockerfile.musa @@ -13,7 +13,7 @@ RUN mkdir build && cd build && \ cmake .. -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_FLAGS="${CMAKE_C_FLAGS} -fopenmp -I/usr/lib/llvm-14/lib/clang/14.0.0/include -L/usr/lib/llvm-14/lib" \ -DCMAKE_CXX_FLAGS="${CMAKE_CXX_FLAGS} -fopenmp -I/usr/lib/llvm-14/lib/clang/14.0.0/include -L/usr/lib/llvm-14/lib" \ - -DSD_MUSA=ON -DCMAKE_BUILD_TYPE=Release && \ + -DGGML_MUSA=ON -DCMAKE_BUILD_TYPE=Release && \ cmake --build . --config Release FROM mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}-amd64 as runtime diff --git a/Dockerfile.sycl b/Dockerfile.sycl index 466d5517c..24d7b2d16 100644 --- a/Dockerfile.sycl +++ b/Dockerfile.sycl @@ -9,7 +9,7 @@ WORKDIR /sd.cpp COPY . . RUN mkdir build && cd build && \ - cmake .. -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DSD_SYCL=ON -DCMAKE_BUILD_TYPE=Release && \ + cmake .. -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON -DCMAKE_BUILD_TYPE=Release && \ cmake --build . --config Release -j$(nproc) FROM intel/oneapi-basekit:${SYCL_VERSION}-devel-ubuntu24.04 AS runtime diff --git a/Dockerfile.vulkan b/Dockerfile.vulkan index 5ba6cb05d..5c85fa1ad 100644 --- a/Dockerfile.vulkan +++ b/Dockerfile.vulkan @@ -8,7 +8,7 @@ WORKDIR /sd.cpp COPY . . -RUN cmake . -B ./build -DSD_VULKAN=ON +RUN cmake . -B ./build -DGGML_VULKAN=ON RUN cmake --build ./build --config Release --parallel FROM ubuntu:$UBUNTU_VERSION AS runtime From 1cf3edca2306184159e2dde0c19abf5f127c120c Mon Sep 17 00:00:00 2001 From: Cyberhan123 <255542417@qq.com> Date: Thu, 26 Mar 2026 16:56:36 +0800 Subject: [PATCH 29/32] fix vision_backend null --- src/stable-diffusion.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 629b738cc..bf2453a73 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1020,6 +1020,10 @@ class StableDiffusionGGML { if (diffusion_model->get_desc() == "Wan2.1-I2V-14B" || diffusion_model->get_desc() == "Wan2.1-FLF2V-14B" || diffusion_model->get_desc() == "Wan2.1-I2V-1.3B") { + if (!vision_backend) { + LOG_ERROR("WAN2.1 Need a vision_backend"); + return false; + } clip_vision = std::make_shared(vision_backend, offload_params_to_cpu, tensor_storage_map); From 4e5471c1553e7b99754d8d2a190cf86bc76c7c24 Mon Sep 17 00:00:00 2001 From: Cyberhan123 <255542417@qq.com> Date: Thu, 26 Mar 2026 17:01:44 +0800 Subject: [PATCH 30/32] rm -j args --- .github/workflows/build.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f697effee..06d6a83ac 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -81,7 +81,7 @@ jobs: -DGGML_BACKEND_DL=ON \ -DGGML_NATIVE=OFF \ -DGGML_CPU_ALL_VARIANTS=ON \ - cmake --build build --config Release -j $(nproc) + cmake --build build --config Release - name: Get commit hash id: commit @@ -148,7 +148,7 @@ jobs: -DGGML_NATIVE=OFF \ -DGGML_CPU_ALL_VARIANTS=ON \ -DGGML_VULKAN=ON - cmake --build build --config Release -j $(nproc) + cmake --build build --config Release - name: Get commit hash id: commit @@ -287,7 +287,7 @@ jobs: -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ -DGGML_METAL=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ - cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) + cmake --build build --config Release - name: Get commit hash id: commit @@ -643,7 +643,7 @@ jobs: -DAMDGPU_TARGETS="${{ env.GPU_TARGETS }}" \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - cmake --build . --config Release -j $(nproc) + cmake --build . --config Release - name: Get commit hash id: commit From 7783b321bde36f1ef67bbc72896c83232306e10b Mon Sep 17 00:00:00 2001 From: Cyberhan123 <255542417@qq.com> Date: Thu, 26 Mar 2026 17:15:17 +0800 Subject: [PATCH 31/32] test ci --- .github/workflows/build.yml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 06d6a83ac..a26bccaed 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -76,12 +76,13 @@ jobs: id: cmake_build run: | cmake -B build \ + -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_RPATH='$ORIGIN' \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ -DGGML_BACKEND_DL=ON \ -DGGML_NATIVE=OFF \ -DGGML_CPU_ALL_VARIANTS=ON \ - cmake --build build --config Release + cmake --build build -j $(nproc) - name: Get commit hash id: commit @@ -142,13 +143,14 @@ jobs: id: cmake_build run: | cmake -B build \ + -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_RPATH='$ORIGIN' \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ -DGGML_BACKEND_DL=ON \ -DGGML_NATIVE=OFF \ -DGGML_CPU_ALL_VARIANTS=ON \ -DGGML_VULKAN=ON - cmake --build build --config Release + cmake --build build -j $(nproc) - name: Get commit hash id: commit @@ -283,11 +285,12 @@ jobs: run: | sysctl -a cmake -B build \ + -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_RPATH='@loader_path' \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ -DGGML_METAL=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ - cmake --build build --config Release + cmake --build build -j $(sysctl -n hw.logicalcpu) - name: Get commit hash id: commit @@ -332,7 +335,7 @@ jobs: - build: "cuda12" defines: "-DGGML_NATIVE=OFF -DGGML_CUDA=ON -DGGML_BACKEND_DL=ON -DCMAKE_CUDA_ARCHITECTURES='61;70;75;80;86;89;90;100;120' -DCMAKE_CUDA_FLAGS='-Xcudafe \"--diag_suppress=177\" -Xcudafe \"--diag_suppress=550\"'" - build: "vulkan" - defines: "-DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_VULKAN=ON -DGGML_BACKEND_DL=ON" + defines: "-DGGML_NATIVE=OFF -DGGML_CPU=ON -DGGML_VULKAN=ON -DGGML_BACKEND_DL=ON" steps: - name: Clone id: checkout @@ -506,7 +509,7 @@ jobs: -G "Unix Makefiles" ` -DGGML_HIP=ON ` -DGGML_NATIVE=OFF ` - -DGGML_CPU=OFF ` + -DGGML_CPU=ON ` -DGGML_BACKEND_DL=ON ` -DCMAKE_C_COMPILER=clang ` -DCMAKE_CXX_COMPILER=clang++ ` @@ -638,7 +641,7 @@ jobs: -DGGML_NATIVE=OFF \ -DCMAKE_INSTALL_RPATH='$ORIGIN' \ -DGGML_CPU_ALL_VARIANTS=ON \ - -DSD_HIPBLAS=ON \ + -DGGML_HIP=ON \ -DGPU_TARGETS="${{ env.GPU_TARGETS }}" \ -DAMDGPU_TARGETS="${{ env.GPU_TARGETS }}" \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ From 3d392a137e007a65841281af1b311daeadb973a2 Mon Sep 17 00:00:00 2001 From: Cyberhan123 <255542417@qq.com> Date: Thu, 26 Mar 2026 17:36:18 +0800 Subject: [PATCH 32/32] fix: remove trailing backslashes in CMake commands --- .github/workflows/build.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a26bccaed..4e0b9ae98 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -81,7 +81,7 @@ jobs: -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ -DGGML_BACKEND_DL=ON \ -DGGML_NATIVE=OFF \ - -DGGML_CPU_ALL_VARIANTS=ON \ + -DGGML_CPU_ALL_VARIANTS=ON cmake --build build -j $(nproc) - name: Get commit hash @@ -143,7 +143,7 @@ jobs: id: cmake_build run: | cmake -B build \ - -DCMAKE_BUILD_TYPE=Release + -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_RPATH='$ORIGIN' \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ -DGGML_BACKEND_DL=ON \ @@ -289,7 +289,7 @@ jobs: -DCMAKE_INSTALL_RPATH='@loader_path' \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ -DGGML_METAL=ON \ - -DGGML_METAL_EMBED_LIBRARY=ON \ + -DGGML_METAL_EMBED_LIBRARY=ON cmake --build build -j $(sysctl -n hw.logicalcpu) - name: Get commit hash @@ -645,7 +645,7 @@ jobs: -DGPU_TARGETS="${{ env.GPU_TARGETS }}" \ -DAMDGPU_TARGETS="${{ env.GPU_TARGETS }}" \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON cmake --build . --config Release - name: Get commit hash