diff --git a/caikit_nlp/modules/text_embedding/embedding.py b/caikit_nlp/modules/text_embedding/embedding.py index 06779d0d..3301367a 100644 --- a/caikit_nlp/modules/text_embedding/embedding.py +++ b/caikit_nlp/modules/text_embedding/embedding.py @@ -37,6 +37,7 @@ from torch import nn from torch.backends import mps from transformers import BatchEncoding +from transformers.tokenization_utils import PaddingStrategy import numpy as np import torch @@ -976,6 +977,7 @@ def _tokenize_plus( truncate_input_tokens: int, texts: List[str], implicit_truncation_errors: bool = True, + **kwargs, ) -> TruncatedTokensTuple: """Tokenize with support for truncation handling and returning the token count Args: @@ -1015,7 +1017,7 @@ def _tokenize_plus( texts = [str(s).strip() for s in texts] # Call tokenizer with the same truncation parameters every time - tokenized = self._get_tokenized(texts) + tokenized = self._get_tokenized(texts, **kwargs) # Custom truncation and/or error raise if needed truncation_needed = self._truncation_needed(tokenized, max_length, texts) @@ -1023,13 +1025,13 @@ def _tokenize_plus( # Truncate texts in place _truncate_texts(texts, tokenized, max_length, truncation_needed) # Re-tokenize the truncated texts - tokenized = self._get_tokenized(texts) + tokenized = self._get_tokenized(texts, **kwargs) truncation_needed = [] # truncation accomplished input_token_count = sum_token_count(tokenized) return TruncatedTokensTuple(tokenized, input_token_count, truncation_needed) - def _get_tokenized(self, texts): + def _get_tokenized(self, texts, **kwargs): """Intentionally always call tokenizer the same way to avoid thread issues. Use a copy of the tokenizer per-model (self) and per-thread (map by thread ID). @@ -1039,6 +1041,8 @@ def _get_tokenized(self, texts): the fast tokenizer with different truncation settings. """ + pad_to_max_length = kwargs.pop("pad_to_max_length", None) + # Keep copies of tokenizer per thread (in each wrapped model instance) thread_id = threading.get_ident() tokenizer = ( @@ -1047,18 +1051,32 @@ def _get_tokenized(self, texts): else self.tokenizers.setdefault(thread_id, deepcopy(self.tokenizer)) ) - return tokenizer( - texts, - return_attention_mask=True, # Used for determining token count - return_token_type_ids=False, - return_overflowing_tokens=False, # DO NOT USE overflow tokens break sentence batches - return_offsets_mapping=True, # Used for truncation - return_length=False, - return_tensors="pt", - truncation=True, # DO NOT CHANGE else "Already borrowed" errors - padding=True, # DO NOT CHANGE else "Already borrowed" errors - max_length=self.max_seq_length, # DO NOT CHANGE else "Already borrowed" errors - ) + if pad_to_max_length: + return tokenizer( + texts, + return_attention_mask=True, # Used for determining token count + return_token_type_ids=False, + return_overflowing_tokens=False, # DO NOT USE overflow tokens break sentence batches + return_offsets_mapping=True, # Used for truncation + return_length=False, + return_tensors="pt", + truncation=True, # DO NOT CHANGE else "Already borrowed" errors + padding=PaddingStrategy.MAX_LENGTH, # DO NOT CHANGE else "Already borrowed" errors + max_length=self.max_seq_length, # DO NOT CHANGE else "Already borrowed" errors + ) + else: + return tokenizer( + texts, + return_attention_mask=True, # Used for determining token count + return_token_type_ids=False, + return_overflowing_tokens=False, # DO NOT USE overflow tokens break sentence batches + return_offsets_mapping=True, # Used for truncation + return_length=False, + return_tensors="pt", + truncation=True, # DO NOT CHANGE else "Already borrowed" errors + padding=True, # DO NOT CHANGE else "Already borrowed" errors + max_length=self.max_seq_length, # DO NOT CHANGE else "Already borrowed" errors + ) def encode( self, @@ -1077,6 +1095,7 @@ def encode( return_token_count: bool = False, implicit_truncation_errors: bool = True, autocast: bool = False, + tokenizer_kwargs: Dict[str, Any] = {}, ) -> Union[EmbeddingResultTuple, List[torch.Tensor], np.ndarray, torch.Tensor]: """ Computes sentence embeddings @@ -1161,6 +1180,7 @@ def encode( truncate_input_tokens, sentences_batch, implicit_truncation_errors=implicit_truncation_errors, + **tokenizer_kwargs ) if truncation_needed: # truncation was needed and was not done/not allowed diff --git a/tests/modules/text_embedding/test_embedding.py b/tests/modules/text_embedding/test_embedding.py index cbd22ee8..209522a6 100644 --- a/tests/modules/text_embedding/test_embedding.py +++ b/tests/modules/text_embedding/test_embedding.py @@ -1143,3 +1143,22 @@ def test_same_same(loaded_model: EmbeddingModule, truncate_input_tokens): assert not np.allclose( separate_vectors[1], separate_vectors[2], rtol=1e-05, atol=1e-08 ) + +@pytest.mark.parametrize("pad_to_max_length", [None, False, True, 0, 1]) +def test_pad_to_max_length(pad_to_max_length, loaded_model): + """Tests for tokenization kwargs pad_to_max_length will modify tokenizer and give same result""" + model_max = loaded_model.model.max_seq_length + + tokenizer_kwargs = {'pad_to_max_length': pad_to_max_length} + max_seq = "x " * (model_max - 2) # Subtract 2 for begin/end tokens + max_seq_minus_one = "x " * (model_max - 3) # 1 token length shorter than max_seq_length + short = "x " + + normal_result = loaded_model._encode_with_retry( + [max_seq_minus_one, max_seq, short], return_token_count=True + ) + padded_result = loaded_model._encode_with_retry( + [max_seq_minus_one, max_seq, short], return_token_count=True, tokenizer_kwargs=tokenizer_kwargs + ) + + assert np.all(normal_result.embedding == padded_result.embedding) \ No newline at end of file