Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion examples/apple/coreml/llama/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,20 @@ def forward(self, x):


def replace_linear_with_split_linear(
model, out_target_split_size, out_max_splits, in_target_split_size, in_max_splits=1
model,
out_target_split_size,
out_max_splits,
in_target_split_size,
in_max_splits=1,
skip_names=None,
):
from executorch.examples.models.llama.lora import LoRALinear

for name, module in model.named_children():
if skip_names and name in skip_names:
Copy link
Contributor Author

@lucylq lucylq Mar 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in the next PR, this ends up being 'wqs', 'wks', 'wvs', 'wos'. Regular linear and LoraLinear both have to be skipped.

continue
if isinstance(module, LoRALinear):
continue
if isinstance(module, torch.nn.Linear):
assert module.bias is None, "SplitLinearModule does not support bias"
new_module = SplitLinearModule(
Expand All @@ -113,4 +124,5 @@ def replace_linear_with_split_linear(
out_max_splits,
in_target_split_size,
in_max_splits,
skip_names=skip_names,
)
Loading