diff --git a/tests/models/llama/test_tokenization_llama.py b/tests/models/llama/test_tokenization_llama.py index f4a84307d9b80d..c7e8b5e86021e5 100644 --- a/tests/models/llama/test_tokenization_llama.py +++ b/tests/models/llama/test_tokenization_llama.py @@ -28,7 +28,7 @@ AutoTokenizer, LlamaTokenizer, LlamaTokenizerFast, - PreTrainedTokenizerFast + PreTrainedTokenizerFast, ) from transformers.convert_slow_tokenizer import convert_slow_tokenizer from transformers.testing_utils import ( @@ -54,9 +54,9 @@ class LlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = ["hf-internal-testing/llama-tokenizer", "meta-llama/Llama-2-7b-hf"] tokenizer_class = LlamaTokenizer - rust_tokenizer_class = PreTrainedTokenizerFast + rust_tokenizer_class = LlamaTokenizerFast - test_rust_tokenizer = True + test_rust_tokenizer = False test_sentencepiece = True from_pretrained_kwargs = {}