From 39f181fe8e5e56691ac233ac8d6e85bda8152398 Mon Sep 17 00:00:00 2001 From: arunkrishnak Date: Sun, 1 Feb 2026 10:12:33 +0530 Subject: [PATCH] Correct minor spelling and grammar mistakes --- docs/codegen.md | 2 +- utils/convert-hf-to-gguf-bitnet.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/codegen.md b/docs/codegen.md index ff21e3cfb..e3c8b3b5d 100644 --- a/docs/codegen.md +++ b/docs/codegen.md @@ -3,7 +3,7 @@ Codegen for TL1 and TL2 codegen_tl1.py and codegen_tl2.py are using params to generate kernel codes in different devices to achieve fastest performance for TL1 and TL2. -We cutting weight into multiple compute blocks to best utilize hardware capabilities. +We cut weights into multiple compute blocks to best utilize hardware capabilities. ### Example bitnet_b1_58-large: diff --git a/utils/convert-hf-to-gguf-bitnet.py b/utils/convert-hf-to-gguf-bitnet.py index 23e84384c..5940bcfc0 100644 --- a/utils/convert-hf-to-gguf-bitnet.py +++ b/utils/convert-hf-to-gguf-bitnet.py @@ -745,7 +745,7 @@ def write_tensors(self): bid = int(part) break - # old gguf bf16 not implenmented + # old gguf bf16 not implemented # if data_torch.dtype == torch.bfloat16: # for new_name, data in ((n, d) for n, d in self.modify_tensors(data_torch, name, bid)): # shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}"