From 1bf4e1199253b267ebeb0c6996775edb7911f4c2 Mon Sep 17 00:00:00 2001 From: Konstantin Dobler Date: Thu, 16 Nov 2023 11:03:25 +0100 Subject: [PATCH] Correctly handle `sentencepiece` byte-fallback tokens (#3) --- src/deepfocus/vocab_helper.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/deepfocus/vocab_helper.py b/src/deepfocus/vocab_helper.py index 1eaea19..5c2cc20 100644 --- a/src/deepfocus/vocab_helper.py +++ b/src/deepfocus/vocab_helper.py @@ -2,6 +2,7 @@ from dataclasses import dataclass import numpy as np +import regex from torch import Tensor from tqdm import tqdm from transformers import PreTrainedTokenizer @@ -59,6 +60,11 @@ def replace_space(tokenizer: PreTrainedTokenizer, token_id: int): """For XLM-R tokenizer (sentencepiece-style)""" decoded_token = tokenizer.decode(token_id) token = tokenizer.convert_ids_to_tokens(token_id) + + # For sentencepiece ByteFallback tokens used in Llama, Mistral et al. + if regex.match(r"<0x[0-9,A-F]{2}>", token): + return token, False + is_beginning_of_word = token.startswith(XLMR_WHITESPACE) if is_beginning_of_word: return XLMR_WHITESPACE + decoded_token.lstrip(), True