-
Notifications
You must be signed in to change notification settings - Fork 736
[KSM] support keep sampling mask #7222
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -460,6 +460,14 @@ class EngineArgs: | |||||||||||||
| Must be explicitly enabled via the `--enable-logprob` startup parameter to output logprob values. | ||||||||||||||
| """ | ||||||||||||||
|
|
||||||||||||||
| enable_keep_sampling_mask: bool = False | ||||||||||||||
| """ | ||||||||||||||
| When enabled, the server returns a sparse index list for each generated token, indicating | ||||||||||||||
| which vocabulary positions were retained after top_p/top_k sampling, and streams it to | ||||||||||||||
| the client. In MTP (multi-token prediction) scenarios this field is a List[List[int]], | ||||||||||||||
| where each inner list contains the retained vocabulary indices for a predicted token. | ||||||||||||||
| """ | ||||||||||||||
|
|
||||||||||||||
| max_logprobs: int = 20 | ||||||||||||||
| """ | ||||||||||||||
| Maximum number of log probabilities to return when `enable_logprob` is True. The default value comes the default for the | ||||||||||||||
|
|
@@ -893,6 +901,18 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: | |||||||||||||
| default=EngineArgs.enable_logprob, | ||||||||||||||
| help="Enable output of token-level log probabilities.", | ||||||||||||||
| ) | ||||||||||||||
| model_group.add_argument( | ||||||||||||||
| "--enable-keep-sampling-mask", | ||||||||||||||
| action="store_true", | ||||||||||||||
| default=EngineArgs.enable_keep_sampling_mask, | ||||||||||||||
| help=( | ||||||||||||||
| "Enable output of sampling mask as a sparse index list over the vocabulary. " | ||||||||||||||
|
Comment on lines
+904
to
+909
|
||||||||||||||
| "For non-MTP decoding, this is a list[int] per token step indicating which " | ||||||||||||||
| "vocabulary indices were kept after top_p/top_k sampling. " | ||||||||||||||
| "For MTP decoding, this is a list[list[int]] per token step, where each inner " | ||||||||||||||
| "list corresponds to one MTP group." | ||||||||||||||
|
||||||||||||||
| "list corresponds to one MTP group." | |
| "list corresponds to one MTP group. Warning: when top_p >= 1.0 and top_k is " | |
| "unset or non-positive, the returned index list may include nearly the entire " | |
| "vocabulary for each token step, which can significantly increase compute, " | |
| "memory, serialization, and network overhead. Prefer using this option with a " | |
| "bounded top_k to avoid very large responses." |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -435,6 +435,11 @@ async def chat_completion_stream_generator( | |
| delta=delta_message, | ||
| logprobs=logprobs_res, | ||
| draft_logprobs=draft_logprobs_res, | ||
| sampling_mask=( | ||
| self._make_sampling_mask_list(output["sampling_mask"]) | ||
| if output.get("sampling_mask") is not None | ||
| else None | ||
| ), | ||
| arrival_time=arrival_time, | ||
| speculate_metrics=output_speculate_metrics, | ||
| ) | ||
|
|
@@ -580,6 +585,7 @@ async def chat_completion_full_generator( | |
| decoder_base_url=self.tokenizer_base_url, | ||
| ) | ||
| prompt_logprobs_res_list = [[] for _ in range(num_choices)] | ||
| sampling_mask_list = [[] for _ in range(num_choices)] | ||
| speculate_metrics = [None for _ in range(num_choices)] | ||
| choices = [] | ||
| while num_choices > 0: | ||
|
|
@@ -660,6 +666,9 @@ async def chat_completion_full_generator( | |
| ) | ||
| if prompt_logprobs_res: | ||
| prompt_logprobs_res_list[idx].extend(clamp_prompt_logprobs(prompt_logprobs_res)) | ||
| output_sampling_mask = output.get("sampling_mask", None) | ||
| if output_sampling_mask is not None: | ||
| sampling_mask_list[idx].append(self._make_sampling_mask_list(output_sampling_mask)) | ||
| speculate_metrics[idx] = data["metrics"].get("speculate_metrics", None) | ||
| if data["finished"]: | ||
| trace_carrier = data.get("trace_carrier") | ||
|
|
@@ -695,6 +704,7 @@ async def chat_completion_full_generator( | |
| draft_logprob_contents=draft_logprob_contents, | ||
| response_processor=response_processor, | ||
| prompt_logprobs_res_list=prompt_logprobs_res_list, | ||
| sampling_mask_list=sampling_mask_list, | ||
| max_tokens=max_tokens, | ||
| speculate_metrics=speculate_metrics[idx], | ||
| ) | ||
|
|
@@ -749,6 +759,7 @@ async def _create_chat_completion_choice( | |
| logprob_contents: list, | ||
| draft_logprob_contents: list, | ||
| prompt_logprobs_res_list: list, | ||
| sampling_mask_list: list, | ||
| response_processor: ChatResponseProcessor, | ||
| max_tokens: int, | ||
| speculate_metrics: SpeculateMetrics | None, | ||
|
|
@@ -787,6 +798,11 @@ async def _create_chat_completion_choice( | |
| if prompt_logprobs_res_list[idx]: | ||
| prompt_logprobs_full_res = prompt_logprobs_res_list[idx] | ||
|
|
||
| # Flatten per-step List[List[int]] into a single List[List[int]] over all tokens. | ||
| sampling_mask_full_res = None | ||
| if sampling_mask_list and sampling_mask_list[idx]: | ||
| sampling_mask_full_res = [mask for step in sampling_mask_list[idx] for mask in step] | ||
|
|
||
| num_cached_tokens[idx] = data.get("num_cached_tokens", 0) | ||
| num_input_image_tokens[idx] = data.get("num_input_image_tokens", 0) | ||
| num_input_video_tokens[idx] = data.get("num_input_video_tokens", 0) | ||
|
|
@@ -810,6 +826,7 @@ async def _create_chat_completion_choice( | |
| logprobs=logprobs_full_res, | ||
| draft_logprobs=draft_logprobs_full_res, | ||
| prompt_logprobs=prompt_logprobs_full_res, | ||
| sampling_mask=sampling_mask_full_res, | ||
| finish_reason=finish_reason, | ||
| speculate_metrics=speculate_metrics, | ||
| ) | ||
|
|
@@ -1000,3 +1017,18 @@ def _make_logprob_dict( | |
| ) | ||
| for token_id, logprob, rank, token in zip(logprob_token_ids, logprobs, ranks, decoded_tokens) | ||
| } | ||
|
|
||
| @staticmethod | ||
| def _make_sampling_mask_list(sampling_mask) -> List[List[int]]: | ||
| """Wrap sampling_mask into a uniform List[List[int]] format. | ||
|
|
||
| sampling_mask is already in sparse-index form (no bool-to-index conversion needed): | ||
| Non-MTP: List[int] (indices for 1 token/step) → [[idx, ...]] | ||
| MTP: List[List[int]] (indices for N tokens/step) → [[idx, ...], ...] | ||
| """ | ||
| assert sampling_mask is not None | ||
| if sampling_mask and isinstance(sampling_mask[0], list): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🟡 建议 当 建议修复方式: @staticmethod
def _make_sampling_mask_list(sampling_mask) -> List[List[int]]:
assert sampling_mask is not None
if sampling_mask and isinstance(sampling_mask[0], list):
# MTP: already List[List[int]], return as-is
return sampling_mask
# Non-MTP: already List[int], wrap in outer list for uniform format
return [sampling_mask]或者在调用方增加空列表检查。 |
||
| # MTP: already List[List[int]], return as-is | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🟡 建议 当 建议添加空列表检查: if sampling_mask and isinstance(sampling_mask[0], list):
return sampling_mask |
||
| return sampling_mask | ||
| # Non-MTP: already List[int], wrap in outer list for uniform format | ||
| return [sampling_mask] | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -159,9 +159,6 @@ def build_output_logprobs( | |
| logprobs_tensors = None | ||
| cu_batch_token_offset = None | ||
|
|
||
| if num_logprobs is None: | ||
| return logprobs_tensors, cu_batch_token_offset | ||
|
|
||
| real_bsz = share_inputs["seq_lens_this_time"].shape[0] | ||
|
|
||
| if is_naive: | ||
|
|
@@ -208,6 +205,10 @@ def build_output_logprobs( | |
| mask = idx < share_inputs["accept_num"].unsqueeze(1) | ||
| token_ids = paddle.masked_select(share_inputs["accept_tokens"], mask) | ||
|
|
||
| # Adapate for sampling mask | ||
| if num_logprobs is None: | ||
| return None, None, output_logits | ||
|
|
||
| # Compute logprobs with temperature scaling and top_p normalization | ||
|
Comment on lines
205
to
212
|
||
| if logprobs_mode == "raw_logprobs": | ||
| raw_logprobs = compute_logprobs_fn(output_logits, sampling_metadata) | ||
|
|
@@ -217,5 +218,5 @@ def build_output_logprobs( | |
| raw_logprobs = F.log_softmax(output_logits, axis=-1) | ||
|
|
||
| logprobs_tensors = gather_logprobs(raw_logprobs, num_logprobs, token_ids=token_ids) | ||
|
|
||
| return logprobs_tensors, cu_batch_token_offset | ||
| # output_logits use to compute sampling_mask | ||
| return logprobs_tensors, cu_batch_token_offset, output_logits | ||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -125,6 +125,16 @@ def top_k_top_p_sampling( | |||||||||||||||||||
| if topp_seed is not None: | ||||||||||||||||||||
| topp_seed_device = paddle.empty(shape=topp_seed.shape, dtype=topp_seed.dtype) | ||||||||||||||||||||
| topp_seed_device.copy_(topp_seed, False) | ||||||||||||||||||||
| if top_k_list and any(x > 0 for x in top_k_list): | ||||||||||||||||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🟡 建议 请确认第 128-136 行新增的 从代码结构看,第 87-97 行已经在 第 128-136 行的新增代码位于内层 建议检查代码缩进和逻辑分支,确保 |
||||||||||||||||||||
| try: | ||||||||||||||||||||
| from fastdeploy.model_executor.ops.gpu import top_k_renorm_probs | ||||||||||||||||||||
|
|
||||||||||||||||||||
| x = top_k_renorm_probs(x, top_k) | ||||||||||||||||||||
| except ImportError: | ||||||||||||||||||||
| logger.warning( | ||||||||||||||||||||
| "top_k_renorm_probs is not supported on current platform, skipping top_k_renorm_probs." | ||||||||||||||||||||
| ) | ||||||||||||||||||||
|
|
||||||||||||||||||||
|
Comment on lines
+128
to
+137
|
||||||||||||||||||||
| if top_k_list and any(x > 0 for x in top_k_list): | |
| try: | |
| from fastdeploy.model_executor.ops.gpu import top_k_renorm_probs | |
| x = top_k_renorm_probs(x, top_k) | |
| except ImportError: | |
| logger.warning( | |
| "top_k_renorm_probs is not supported on current platform, skipping top_k_renorm_probs." | |
| ) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
按仓库 PR 规范,标题需要使用预置的 tag(例如 [Feature]/[APIServer]/[Engine]/[Speculative Decoding] 等)。当前标题使用的 [KSM] 不在模板列出的 tag 列表里且语义不够明确,建议改为更通用且可检索的 tag(如本 PR 涉及采样/接口输出,可考虑 [Feature] 或 [APIServer]/[Engine] 组合)。