Browse Source

add paddlenlp tokenizer (#2706)

zxcd 10 tháng trước cách đây
mục cha
commit
7f2682a6ee

+ 2 - 0
paddlex/inference/models_new/common/__init__.py

@@ -31,3 +31,5 @@ from .ts import (
     TStoBatch,
 )
 from .static_infer import StaticInfer
+
+from .tokenizer import PretrainedTokenizer, PretrainedTokenizer

+ 16 - 0
paddlex/inference/models_new/common/tokenizer/__init__.py

@@ -0,0 +1,16 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .tokenizer_utils import PretrainedTokenizer
+from .gpt_tokenizer import GPTTokenizer

+ 451 - 0
paddlex/inference/models_new/common/tokenizer/gpt_tokenizer.py

@@ -0,0 +1,451 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import shutil
+from functools import lru_cache
+from typing import Dict, Optional, Union
+
+import jieba
+import numpy as np
+import sentencepiece as spm
+import lazy_paddle as paddle
+import regex as re
+
+from .tokenizer_utils import PretrainedTokenizer
+from .tokenizer_utils_base import (
+    AddedToken,
+    BatchEncoding,
+    EncodedInput,
+    PaddingStrategy,
+)
+
+__all__ = [
+    "GPTTokenizer",
+]
+
+
+@lru_cache()
+def bytes_to_unicode():
+    """
+    Returns list of utf-8 byte and a corresponding list of unicode strings.
+    The reversible bpe codes work on unicode strings.
+    This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
+    When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
+    This is a signficant percentage of your normal, say, 32K bpe vocab.
+    To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+    And avoids mapping to whitespace/control characters the bpe code barfs on.
+    """
+    _chr = chr
+    bs = (
+        list(range(ord("!"), ord("~") + 1))
+        + list(range(ord("¡"), ord("¬") + 1))
+        + list(range(ord("®"), ord("ÿ") + 1))
+    )
+    cs = bs[:]
+    n = 0
+    for b in range(2**8):
+        if b not in bs:
+            bs.append(b)
+            cs.append(2**8 + n)
+            n += 1
+    cs = [_chr(n) for n in cs]
+    return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+    """Return set of symbol pairs in a word.
+
+    Word is represented as tuple of symbols (symbols being variable-length strings).
+    """
+    pairs = set()
+    prev_char = word[0]
+    for char in word[1:]:
+        pairs.add((prev_char, char))
+        prev_char = char
+    return pairs
+
+
+class GPTTokenizer(PretrainedTokenizer):
+    """
+    Constructs a GPT tokenizer based on byte-level Byte-Pair-Encoding.
+
+    This tokenizer inherits from :class:`~paddlenlp.transformers.tokenizer_utils.PretrainedTokenizer`
+    which contains most of the main methods. For more information regarding those methods,
+    please refer to this superclass.
+
+    Args:
+        vocab_file (str):
+            Path to the vocab file.
+            The vocab file contains a mapping from vocabulary strings to indices.
+        merges_file (str):
+            Path to the merge file.
+            The merge file is used to split the input sentence into "subword" units.
+            The vocab file is then used to encode those units as intices.
+        errors (str):
+            Paradigm to follow when decoding bytes to UTF-8.
+            Defaults to `'replace'`.
+        max_len (int, optional):
+            The maximum value of the input sequence length.
+            Defaults to `None`.
+
+    Examples:
+        .. code-block::
+
+            from paddlenlp.transformers import GPTTokenizer
+
+            tokenizer = GPTTokenizer.from_pretrained('gpt2-medium-en')
+            print(tokenizer('Welcome to use PaddlePaddle and PaddleNLP'))
+
+            '''
+            {'input_ids': [14618, 284, 779, 350, 37382, 47, 37382, 290, 350, 37382, 45, 19930],
+            'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
+            '''
+
+    """
+
+    resource_files_names = {
+        "vocab_file": "vocab.json",
+        "merges_file": "merges.txt",
+    }  # for save_pretrained
+    gpt_vocab_link = (
+        "http://bj.bcebos.com/paddlenlp/models/transformers/gpt/gpt-en-vocab.json"
+    )
+    gpt_merges_link = (
+        "http://bj.bcebos.com/paddlenlp/models/transformers/gpt/gpt-en-merges.txt"
+    )
+    pretrained_resource_files_map = {
+        "vocab_file": {
+            "gpt3-175B-en": gpt_vocab_link,
+            "gpt3-89B-en": gpt_vocab_link,
+            "gpt3-13B-en": gpt_vocab_link,
+            "gpt3-6.7B-en": gpt_vocab_link,
+            "gpt3-1.3B-en": gpt_vocab_link,
+            "gpt2-xl-en": gpt_vocab_link,
+            "gpt2-large-en": gpt_vocab_link,
+            "gpt2-medium-en": gpt_vocab_link,
+            "gpt2-en": gpt_vocab_link,
+            "gpt2-small-en": gpt_vocab_link,
+        },
+        "merges_file": {
+            "gpt3-175B-en": gpt_merges_link,
+            "gpt3-89B-en": gpt_merges_link,
+            "gpt3-13B-en": gpt_merges_link,
+            "gpt3-6.7B-en": gpt_merges_link,
+            "gpt3-1.3B-en": gpt_merges_link,
+            "gpt2-xl-en": gpt_merges_link,
+            "gpt2-large-en": gpt_merges_link,
+            "gpt2-medium-en": gpt_merges_link,
+            "gpt2-en": gpt_merges_link,
+            "gpt2-small-en": gpt_merges_link,
+        },
+    }
+    pretrained_init_configuration = {
+        "gpt3-175B-en": {},
+        "gpt3-89B-en": {},
+        "gpt3-13B-en": {},
+        "gpt3-6.7B-en": {},
+        "gpt3-1.3B-en": {},
+        "gpt2-xl-en": {},
+        "gpt2-large-en": {},
+        "gpt2-medium-en": {},
+        "gpt2-en": {},
+        "gpt2-small-en": {},
+    }
+
+    def __init__(
+        self,
+        vocab_file,
+        merges_file,
+        errors="replace",
+        max_len=None,
+        pad_token="<|endoftext|>",
+        eos_token="<|endoftext|>",
+        unk_token="<|endoftext|>",
+        eol_token="\u010a",
+        add_prefix_space=False,
+        add_bos_token=False,
+        **kwargs  # The token of newline.
+    ):
+        pad_token = (
+            AddedToken(pad_token, lstrip=False, rstrip=False)
+            if isinstance(pad_token, str)
+            else pad_token
+        )
+        eos_token = (
+            AddedToken(eos_token, lstrip=False, rstrip=False)
+            if isinstance(eos_token, str)
+            else eos_token
+        )
+        unk_token = (
+            AddedToken(unk_token, lstrip=False, rstrip=False)
+            if isinstance(unk_token, str)
+            else unk_token
+        )
+        self.eol_token = eol_token
+        self._build_special_tokens_map_extended(
+            bos_token=(
+                pad_token
+                if getattr(self, "bos_token", None) is None
+                else self.bos_token
+            ),
+            eos_token=eos_token,
+            unk_token=unk_token,
+        )
+
+        self._vocab_file = vocab_file
+        self._merges_file = merges_file
+        self.max_len = max_len if max_len is not None else int(1e12)
+        self.num_command_tokens = 2
+        self.num_type_tokens = 2
+
+        with open(vocab_file, "r", encoding="utf-8") as f:
+            self.encoder = json.load(f)
+
+        self.decoder = {v: k for k, v in self.encoder.items()}
+
+        self.num_tokens = len(self.encoder)
+        self.num_text_tokens = self.num_tokens - 1
+        self.errors = errors  # how to handle errors in decoding
+        self.byte_encoder = bytes_to_unicode()
+        self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+
+        with open(merges_file, encoding="utf-8") as f:
+            bpe_data = f.read().split("\n")[1:-1]
+
+        bpe_merges = [tuple(merge.split()) for merge in bpe_data]
+        self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+        self.cache = {}
+        self.add_prefix_space = add_prefix_space
+        self.add_bos_token = add_bos_token
+
+        self.pat = re.compile(
+            r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
+        )
+
+    @property
+    def vocab_size(self):
+        """
+        Returns the size of vocabulary.
+
+        Returns:
+            int: The sum of size of vocabulary and the size of speical tokens.
+
+        """
+
+        return len(self.encoder)
+
+    @property
+    def eol_token_id(self):
+        if self.eol_token is None:
+            return None
+        return self.convert_tokens_to_ids(self.eol_token)
+
+    def bpe(self, token):
+        if token in self.cache:
+            return self.cache[token]
+        word = tuple(token)
+        pairs = get_pairs(word)
+
+        if not pairs:
+            return token
+
+        while True:
+            bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+            if bigram not in self.bpe_ranks:
+                break
+            first, second = bigram
+            new_word = []
+            i = 0
+            while i < len(word):
+                try:
+                    j = word.index(first, i)
+                    new_word.extend(word[i:j])
+                    i = j
+                except:
+                    new_word.extend(word[i:])
+                    break
+
+                if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+                    new_word.append(first + second)
+                    i += 2
+                else:
+                    new_word.append(word[i])
+                    i += 1
+            new_word = tuple(new_word)
+            word = new_word
+            if len(word) == 1:
+                break
+            else:
+                pairs = get_pairs(word)
+        word = " ".join(word)
+        self.cache[token] = word
+        return word
+
+    def _tokenize(self, text):
+        """Tokenize a string."""
+        bpe_tokens = []
+        for token in re.findall(self.pat, text):
+            token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
+            bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+        return bpe_tokens
+
+    def _convert_token_to_id(self, token):
+        return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+    def _convert_id_to_token(self, index):
+        return self.decoder[index]
+
+    def convert_ids_to_string(self, ids):
+        """
+        Converts a single index or a sequence of indices to texts.
+
+        Args:
+            ids (int|List[int]):
+                The token id (or token ids) to be converted to text.
+
+        Returns:
+            str: The decoded text.
+
+        Example:
+            .. code-block::
+
+                from paddlenlp.transformers import GPTTokenizer
+                tokenizer = GPTTokenizer.from_pretrained('gpt2-medium-en')
+                print(tokenizer.convert_ids_to_string(tokenizer.convert_ids_to_string([14618, 284, 779, 350, 37382, 47, 37382, 290, 350, 37382, 45, 19930]))
+                # 'Welcome to use PaddlePaddle and PaddleNLP'
+
+        """
+
+        text = "".join([self.decoder[id] for id in ids])
+        text = bytearray([self.byte_decoder[c] for c in text]).decode(
+            "utf-8", errors=self.errors
+        )
+        return text
+
+    def save_resources(self, save_directory):
+        """
+        Saves `SentencePiece <https://github.com/google/sentencepiece>`__ file
+        (ends with '.spm') under `save_directory`.
+
+        Args:
+            save_directory (str): Directory to save files into.
+        """
+        for name, file_name in self.resource_files_names.items():
+            source_path = getattr(self, "_%s" % name)
+
+            save_path = os.path.join(save_directory, file_name)
+            if os.path.abspath(source_path) != os.path.abspath(save_path):
+                shutil.copyfile(source_path, save_path)
+
+    def convert_tokens_to_string(self, tokens):
+        """
+        Converts a sequence of tokens (string) in a single string.
+        """
+        text = "".join(tokens)
+        text = bytearray([self.byte_decoder[c] for c in text]).decode(
+            "utf-8", errors=self.errors
+        )
+        return text
+
+    def get_vocab(self):
+        return dict(self.encoder, **self.added_tokens_encoder)
+
+    def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+        add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+        if is_split_into_words or add_prefix_space:
+            text = " " + text
+        return (text, kwargs)
+
+    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+        if self.add_bos_token:
+            bos_token_ids = [self.bos_token_id]
+        else:
+            bos_token_ids = []
+
+        output = bos_token_ids + token_ids_0
+
+        if token_ids_1 is None:
+            return output
+
+        return output + bos_token_ids + token_ids_1
+
+    def _pad(
+        self,
+        encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+        max_length: Optional[int] = None,
+        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+        pad_to_multiple_of: Optional[int] = None,
+        return_attention_mask: Optional[bool] = None,
+    ) -> dict:
+        """
+        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+
+        Args:
+            encoded_inputs:
+                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+            max_length: maximum length of the returned list and optionally padding length (see below).
+                Will truncate by taking into account the special tokens.
+            padding_strategy: PaddingStrategy to use for padding.
+
+                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+                - PaddingStrategy.DO_NOT_PAD: Do not pad
+                The tokenizer padding sides are defined in self.padding_side:
+
+                    - 'left': pads on the left of the sequences
+                    - 'right': pads on the right of the sequences
+            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+                >= 7.5 (Volta).
+            return_attention_mask:
+                (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+        """
+        # Load from model defaults
+
+        # attention_mask shape [1,seq_len,seq_len]
+        if (
+            "attention_mask" in encoded_inputs
+            and len(np.shape(encoded_inputs["attention_mask"])) > 2
+        ):
+            attention_mask = encoded_inputs["attention_mask"]
+            encoded_inputs.pop("attention_mask")
+        else:
+            attention_mask = None
+
+        required_input = encoded_inputs[self.model_input_names[0]]
+        encoded_inputs = super()._pad(
+            encoded_inputs,
+            max_length,
+            padding_strategy,
+            pad_to_multiple_of,
+            return_attention_mask,
+        )
+        if attention_mask is not None and len(np.shape(attention_mask)) > 2:
+            encoded_inputs["attention_mask"] = attention_mask
+            needs_to_be_padded = (
+                padding_strategy != PaddingStrategy.DO_NOT_PAD
+                and len(required_input) != max_length
+            )
+            if needs_to_be_padded:
+                difference = max_length - len(required_input)
+                if "attention_mask" in encoded_inputs:
+                    encoded_inputs["attention_mask"] = np.pad(
+                        encoded_inputs["attention_mask"],
+                        pad_width=[(0, 0), (difference, 0), (difference, 0)],
+                        mode="constant",
+                        constant_values=0,
+                    )
+        return encoded_inputs

+ 2048 - 0
paddlex/inference/models_new/common/tokenizer/tokenizer_utils.py

@@ -0,0 +1,2048 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import bisect
+import io
+import itertools
+import json
+import os
+import re
+import six
+import inspect
+import unicodedata
+import functools
+from collections import OrderedDict
+from dataclasses import asdict, dataclass
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import numpy
+import numpy as np
+import lazy_paddle as paddle
+from jinja2 import Template
+from jinja2.exceptions import TemplateError, TemplateSyntaxError
+from jinja2.sandbox import ImmutableSandboxedEnvironment
+
+from .tokenizer_utils_base import CHAT_TEMPLATE_CONFIG_NAME
+from .....utils import logging
+
+from functools import lru_cache
+
+from .vocab import Vocab
+from .tokenizer_utils_base import (
+    AddedToken,
+    BatchEncoding,
+    EncodedInput,
+    EncodedInputPair,
+    PaddingStrategy,
+    PreTokenizedInput,
+    PreTokenizedInputPair,
+    PretrainedTokenizerBase,
+    TensorType,
+    TextInput,
+    TextInputPair,
+    TruncationStrategy,
+)
+from .utils import convert_to_dict_message, fn_args_to_dict
+
+__all__ = [
+    "ChatTemplate",
+    "Trie",
+    "ChatTemplateMixin",
+    "PretrainedTokenizer",
+    "InitTrackerMeta",
+]
+
+
+@dataclass
+class ChatTemplate:
+    conversation: list[str] | None = None
+    system: str | None = None
+    query: str = None
+
+    @staticmethod
+    @lru_cache()
+    def _compile_jinja_template(chat_template) -> Template:
+        def raise_exception(message):
+            raise TemplateError(message)
+
+        jinja_env = ImmutableSandboxedEnvironment(
+            trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True
+        )
+        jinja_env.globals["raise_exception"] = raise_exception
+        return jinja_env.from_string(chat_template)
+
+    def render_conversation(
+        self,
+        conversation_data: list[str] | dict[str, str],
+        index: int = 0,
+        context_data: Dict[str, Any] = {},
+    ) -> list[str]:
+        """
+        Args:
+            conversation_data (list[str]): the conversation data which must be two parts
+            index (int): the index of current conversation
+
+        Returns:
+            list[str]: the rendered conversation data
+        """
+        if self.conversation is None:
+            raise ValueError(
+                "The template for multi-turns is invalid, please check `conversation` filed in your chat-template."
+            )
+
+        if isinstance(conversation_data, (list, tuple)):
+            assert (
+                len(conversation_data) == 2
+            ), "Each round/turn of conversation must be two participants, eg: [user-query, bot-query]"
+
+            conversation_data = {
+                "user": conversation_data[0],
+                "bot": conversation_data[1],
+                "index": index,
+            }
+        conversation_data.update(context_data)
+
+        one_turn_conversation = []
+        for conversation in self.conversation:
+            template = self._compile_jinja_template(conversation)
+            result = template.render(conversation_data)
+            one_turn_conversation.append(result)
+        return one_turn_conversation
+
+    def render_query(
+        self, query: str, index: int = 0, context_data: Dict[str, Union[int, str]] = {}
+    ):
+        if self.query is None:
+            return query
+
+        template = self._compile_jinja_template(self.query)
+        return template.render(query=query, index=index, **context_data)
+
+    def _init_context_data(
+        self, context_data: Dict[str, Union[int, str]] = {}
+    ) -> Dict[str, Union[int, str]]:
+        """init the context data for chat-template"""
+        context_data["is_training"] = context_data.get("is_training", False)
+        return context_data
+
+    def render_system(self, context_data: Dict[str, Union[int, str]] = {}) -> str:
+        if self.system is None:
+            return ""
+
+        template = self._compile_jinja_template(self.system)
+        return template.render(**context_data)
+
+    def __call__(
+        self,
+        conversations: list[list[str]] | str,
+        context_data: Dict[str, Union[int, str]] = {},
+    ) -> str:
+        """render the conversations by chat-template
+
+        Args:
+            conversations (list[list[str]]): the conversations of use and bot
+
+        Returns:
+            str: the result of conversation
+        """
+        if isinstance(conversations, str):
+            conversations = [[conversations]]
+
+        # [1 ... n-1] conversation
+        final_query = self.render_system(context_data=context_data)
+        context_data["length"] = len(conversations)
+        for index, conversation in enumerate(conversations[:-1]):
+            context_data["is_first"] = index == 0
+            context_data["is_last"] = False
+            final_query += "".join(
+                self.render_conversation(
+                    conversation, index=index, context_data=context_data
+                )
+            )
+
+        if not isinstance(conversations[-1], list) and not len(conversations[-1]) != 1:
+            raise ValueError(
+                "The length of last conversation must be one, eg: [[user-query, bot-answer], [user-query, bot-answer], ..., [user-query]]"
+            )
+        if len(conversations[-1]) > 1:
+            logging.warning(
+                f"The last conversation is not a single-round, chat-template will skip the conversation: {conversations[-1][1:]}"
+            )
+
+        final_query += self.render_query(
+            conversations[-1][0],
+            index=len(conversations) - 1,
+            context_data=context_data,
+        )
+        return final_query
+
+    @classmethod
+    def from_dict(cls, config: dict):
+        return cls(**config)
+
+    @classmethod
+    def from_file(cls, file: str):
+        with open(file, "r", encoding="utf-8") as f:
+            config = json.load(f)
+        return cls.from_dict(config)
+
+
+def adapt_stale_fwd_patch(self, name, value):
+    """
+    Since there are some monkey patches for forward of PretrainedModel, such as
+    model compression, we make these patches compatible with the latest forward
+    method.
+    """
+    if name == "forward":
+        # NOTE(guosheng): In dygraph to static, `layer.forward` would be patched
+        # by an instance of `StaticFunction`. And use string compare to avoid to
+        # import fluid.
+        if type(value).__name__.endswith(
+            "StaticFunction"
+        ) or self.forward.__class__.__name__.endswith("StaticFunction"):
+            return value
+        (
+            patch_spec_args,
+            patch_spec_varargs,
+            patch_spec_varkw,
+            patch_spec_defaults,
+            _,
+            _,
+            _,
+        ) = inspect.getfullargspec(value)
+        (spec_args, spec_varargs, spec_varkw, spec_defaults, _, _, _) = (
+            inspect.getfullargspec(self.forward)
+        )
+        new_args = [
+            arg
+            for arg in ("output_hidden_states", "output_attentions", "return_dict")
+            if arg not in patch_spec_args and arg in spec_args
+        ]
+
+        if new_args:
+            if self.__module__.startswith("paddlenlp"):
+                logging.warning(
+                    f"The `forward` method of {self.__class__ if isinstance(self, paddle.nn.Layer) else self} is patched and the patch "
+                    "might be based on an old oversion which missing some "
+                    f"arguments compared with the latest, such as {new_args}. "
+                    "We automatically add compatibility on the patch for "
+                    "these arguemnts, and maybe the patch should be updated."
+                )
+            else:
+                logging.warning(
+                    f"The `forward` method of {self.__class__ if isinstance(self, paddle.nn.Layer) else self} "
+                    "is patched and the patch might be conflict with patches made "
+                    f"by paddlenlp which seems have more arguments such as {new_args}. "
+                    "We automatically add compatibility on the patch for "
+                    "these arguemnts, and maybe the patch should be updated."
+                )
+            if isinstance(self, paddle.nn.Layer) and inspect.isfunction(value):
+
+                @functools.wraps(value)
+                def wrap_fwd(*args, **kwargs):
+                    for arg in new_args:
+                        kwargs.pop(arg, None)
+                    return value(self, *args, **kwargs)
+
+            else:
+
+                @functools.wraps(value)
+                def wrap_fwd(*args, **kwargs):
+                    for arg in new_args:
+                        kwargs.pop(arg, None)
+                    return value(*args, **kwargs)
+
+            return wrap_fwd
+    return value
+
+
+class InitTrackerMeta(type(paddle.nn.Layer)):
+    """
+    This metaclass wraps the `__init__` method of a class to add `init_config`
+    attribute for instances of that class, and `init_config` use a dict to track
+    the initial configuration. If the class has `_pre_init` or `_post_init`
+    method, it would be hooked before or after `__init__` and called as
+    `_pre_init(self, init_fn, init_args)` or `_post_init(self, init_fn, init_args)`.
+    Since InitTrackerMeta would be used as metaclass for pretrained model classes,
+    which always are Layer and `type(Layer)` is not `type`, thus use `type(Layer)`
+    rather than `type` as base class for it to avoid inheritance metaclass
+    conflicts.
+    """
+
+    def __init__(cls, name, bases, attrs):
+        init_func = cls.__init__
+        # If attrs has `__init__`, wrap it using accessable `_pre_init, _post_init`.
+        # Otherwise, no need to wrap again since the super cls has been wraped.
+        # TODO: remove reduplicated tracker if using super cls `__init__`
+        pre_init_func = getattr(cls, "_pre_init", None) if "__init__" in attrs else None
+        post_init_func = (
+            getattr(cls, "_post_init", None) if "__init__" in attrs else None
+        )
+        cls.__init__ = InitTrackerMeta.init_and_track_conf(
+            init_func, pre_init_func, post_init_func
+        )
+        super(InitTrackerMeta, cls).__init__(name, bases, attrs)
+
+    @staticmethod
+    def init_and_track_conf(init_func, pre_init_func=None, post_init_func=None):
+        """
+        wraps `init_func` which is `__init__` method of a class to add `init_config`
+        attribute for instances of that class.
+        Args:
+            init_func (callable): It should be the `__init__` method of a class.
+                warning: `self` always is the class type of down-stream model, eg: BertForTokenClassification
+            pre_init_func (callable, optional): If provided, it would be hooked after
+                `init_func` and called as `pre_init_func(self, init_func, *init_args, **init_args)`.
+                Default None.
+            post_init_func (callable, optional): If provided, it would be hooked after
+                `init_func` and called as `post_init_func(self, init_func, *init_args, **init_args)`.
+                Default None.
+
+        Returns:
+            function: the wrapped function
+        """
+
+        @functools.wraps(init_func)
+        def __impl__(self, *args, **kwargs):
+            # registed helper by `pre_init_func`
+            if pre_init_func:
+                pre_init_func(self, init_func, *args, **kwargs)
+            # keep full configuration
+            init_func(self, *args, **kwargs)
+            # registed helper by `post_init_func`
+            if post_init_func:
+                post_init_func(self, init_func, *args, **kwargs)
+            self.init_config = kwargs
+            if args:
+                kwargs["init_args"] = args
+            kwargs["init_class"] = self.__class__.__name__
+
+        return __impl__
+
+    def __setattr__(self, name, value):
+        value = adapt_stale_fwd_patch(self, name, value)
+        return super(InitTrackerMeta, self).__setattr__(name, value)
+
+
+class Trie:
+    """
+    Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
+    Loose reference https://en.wikipedia.org/wiki/Trie
+    """
+
+    def __init__(self):
+        self.data = {}
+
+    def add(self, word: str):
+        """
+        Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
+        The special key `""` is used to represent termination.
+
+        This function is idempotent, adding twice the same word will leave the trie unchanged
+
+        Example:
+
+        ```python
+        >>> trie = Trie()
+        >>> trie.add("Hello 友達")
+        >>> trie.data
+        {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
+
+        >>> trie.add("Hello")
+        >>> trie.data
+        {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
+        ```
+        """
+        if not word:
+            # Prevent empty string
+            return
+        ref = self.data
+        for char in word:
+            ref[char] = char in ref and ref[char] or {}
+            ref = ref[char]
+        ref[""] = 1
+
+    def split(self, text: str) -> List[str]:
+        """
+        Will look for the words added to the trie within `text`. Output is the original string splitted along the
+        boundaries of the words found.
+
+        This trie will match the longest possible word first !
+
+        Example:
+
+        ```python
+        >>> trie = Trie()
+        >>> trie.split("[CLS] This is a extra_id_100")
+        ["[CLS] This is a extra_id_100"]
+
+        >>> trie.add("[CLS]")
+        >>> trie.add("extra_id_1")
+        >>> trie.add("extra_id_100")
+        >>> trie.split("[CLS] This is a extra_id_100")
+        ["[CLS]", " This is a ", "extra_id_100"]
+        ```
+        """
+        # indexes are counted left of the chars index.
+        # "hello", index 0, is left of h, index 1 is between h and e.
+        # index 5 is right of the "o".
+
+        # States are going to capture every possible start (indexes as above)
+        # as keys, and have as values, a pointer to the position in the trie
+        # where we're at. This is a partial match for now.
+        # This enables to keep track of multiple matches while we're iterating
+        # the string
+        # If the trie contains, "blowing", and "lower" and we encounter the
+        # string "blower", we need to split into ["b", "lower"].
+        # This is where we need to keep track of multiple possible starts.
+        states = OrderedDict()
+
+        # This will contain every indices where we need
+        # to cut.
+        # We force to cut at offset 0 and len(text) (added later)
+        offsets = [0]
+
+        # This is used by the lookahead which needs to skip over
+        # some text where the full match exceeded the place in the initial
+        # for loop
+        skip = 0
+        # Main loop, Giving this algorithm O(n) complexity
+        for current, current_char in enumerate(text):
+            if skip and current < skip:
+                # Prevents the lookahead for matching twice
+                # like extra_id_100 and id_100
+                continue
+
+            # This will track every state
+            # that stop matching, we need to stop tracking them.
+            # If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
+            # fail on "b", we need to remove 0 from the valid states.
+            to_remove = set()
+            # Whenever we found a match, we need to drop everything
+            # this is a greedy algorithm, it will match on the first found token
+            reset = False
+
+            # In this case, we already have partial matches (But unfinished)
+            for start, trie_pointer in states.items():
+                if "" in trie_pointer:
+                    # This is a final match, we need to reset and
+                    # store the results in `offsets`.
+
+                    # Lookahead to match longest first
+                    # Important in case of extra_id_1 vs extra_id_100
+                    # Here we are also actively looking for other earlier partial
+                    # matches
+                    # "[CLS]", "L", we need to match CLS even if L is special
+                    for lookstart, looktrie_pointer in states.items():
+                        if lookstart > start:
+                            # This partial match is later, we can stop looking
+                            break
+                        elif lookstart < start:
+                            # This partial match is earlier, the trie pointer
+                            # was already updated, so index is + 1
+                            lookahead_index = current + 1
+                            end = current + 1
+                        else:
+                            # Here lookstart == start and
+                            #      looktrie_pointer == trie_pointer
+                            # It wasn't updated yet so indices are current ones
+                            lookahead_index = current
+                            end = current
+                        next_char = (
+                            text[lookahead_index]
+                            if lookahead_index < len(text)
+                            else None
+                        )
+                        if "" in looktrie_pointer:
+                            start = lookstart
+                            end = lookahead_index
+                            skip = lookahead_index
+
+                        while next_char in looktrie_pointer:
+                            looktrie_pointer = looktrie_pointer[next_char]
+                            lookahead_index += 1
+                            if "" in looktrie_pointer:
+                                start = lookstart
+                                end = lookahead_index
+                                skip = lookahead_index
+
+                            if lookahead_index == len(text):
+                                # End of string
+                                break
+                            next_char = text[lookahead_index]
+                        # End lookahead
+
+                        # Storing and resetting
+                    offsets.append(start)
+                    offsets.append(end)
+                    reset = True
+                    break
+                elif current_char in trie_pointer:
+                    # The current character being looked at has a match within the trie
+                    # update the pointer (it will be stored back into states later).
+                    trie_pointer = trie_pointer[current_char]
+
+                    # Storing back the new pointer into the states.
+                    # Partial matches got longer by one.
+                    states[start] = trie_pointer
+                else:
+                    # The new character has not match in the trie, we need
+                    # to stop keeping track of this partial match.
+                    # We can't do it directly within the loop because of how
+                    # python iteration works
+                    to_remove.add(start)
+
+            # Either clearing the full start (we found a real match)
+            # Or clearing only the partial matches that didn't work.
+            if reset:
+                states = {}
+            else:
+                for start in to_remove:
+                    del states[start]
+
+            # If this character is a starting character within the trie
+            # start keeping track of this partial match.
+            if current >= skip and current_char in self.data:
+                states[current] = self.data[current_char]
+
+        # We have a cut at the end with states.
+        for start, trie_pointer in states.items():
+            if "" in trie_pointer:
+                # This is a final match, we need to reset and
+                # store the results in `offsets`.
+                end = len(text)
+                offsets.append(start)
+                offsets.append(end)
+                # Longest cut is always the one with lower start so the first
+                # item so we need to break.
+                break
+
+        return self.cut_text(text, offsets)
+
+    def cut_text(self, text, offsets):
+        # We have all the offsets now, we just need to do the actual splitting.
+        # We need to eventually add the first part of the string and the eventual
+        # last part.
+        offsets.append(len(text))
+        tokens = []
+        start = 0
+        for end in offsets:
+            if start > end:
+                logging.error(
+                    "There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway."
+                )
+                continue
+            elif start == end:
+                # This might happen if there's a match at index 0
+                # we're also preventing zero-width cuts in case of two
+                # consecutive matches
+                continue
+            tokens.append(text[start:end])
+            start = end
+
+        return tokens
+
+
+def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str):
+    """
+    Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
+    """
+    insertion_idx = bisect.bisect_left(token_list, new_token)
+    # Checks if new_token is already in the ordered token_list
+    if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token:
+        # new_token is in token_list, don't add
+        return
+    else:
+        token_list.insert(insertion_idx, new_token)
+
+
+def _is_control(char):
+    """Checks whether `chars` is a control character."""
+    # These are technically control characters but we count them as whitespace
+    # characters.
+    if char == "\t" or char == "\n" or char == "\r":
+        return False
+    cat = unicodedata.category(char)
+    if cat.startswith("C"):
+        return True
+    return False
+
+
+def _is_nonnormalized_char(char):
+    """Check whther `chars` is a non-normalized character."""
+    cp = ord(char)
+    if (
+        (0xFF00 <= cp <= 0xFFEF)
+        or (0xFE50 <= cp <= 0xFE6B)  # Halfwidth and Fullwidth Forms
+        or (0x3358 <= cp <= 0x33FF)  # Small Form Variants
+        or (0x249C <= cp <= 0x24E9)  # CJK Compatibility
+        or (0x3200 <= cp <= 0x32FF)  # Enclosed Alphanumerics: Ⓛ ⒰
+    ):  # Enclosed CJK Letters and Months
+        return True
+
+    return False
+
+
+def _is_nonnormalized_numeric(char):
+    """Check whether `chars` is a non-normalized numeric character."""
+    cp = ord(char)
+    if (
+        (0x2460 <= cp <= 0x249B)
+        or (0x24EA <= cp <= 0x24FF)  #
+        or (0x2776 <= cp <= 0x2793)  #
+        or (0x2160 <= cp <= 0x217F)  # Enclosed Alphanumerics
+    ):  # Number Forms
+        return True
+
+    return False
+
+
+def normalize_chars(text):
+    """
+    Normalize the text for multiligual and chinese models. Unicode range:
+    https://www.ling.upenn.edu/courses/Spring_2003/ling538/UnicodeRanges.html
+    """
+    output = []
+    for char in text:
+        if _is_nonnormalized_char(char):
+            for c in unicodedata.normalize("NFKC", char):
+                output.append(c)
+        elif _is_nonnormalized_numeric(char):
+            output.append(" ")
+            for c in str(int(unicodedata.numeric(char))):
+                output.append(c)
+            output.append(" ")
+        elif ord(char) == 0xF979:  # https://www.zhihu.com/question/20697984
+            output.append("凉")
+        else:
+            output.append(char)
+    return "".join(output)
+
+
+class ChatTemplateMixin:
+    chat_template: Optional[ChatTemplate] = None
+
+    def apply_chat_template(
+        self,
+        conversation: Union[Dict[str, str] | Dict[str, str]] | str,
+        tokenize: bool = True,
+        context_data: Dict[str, Any] = {},
+        **tokenizer_kwargs,
+    ) -> str | dict[str, numpy.ndarray | paddle.Tensor]:
+        """apply chat_template rules to conversation which should not be batched data
+
+        Args:
+            conversation (List[List[str, str]] | str): the conversation messages between user and bot
+            context_data (Dict[str, Any]): the context data for chat_template.json
+            tokenize (bool, optional): whether do tokenization. Defaults to True.
+
+        Returns:
+            str | dict[str, numpy.ndarray | paddle.Tensor]: return the result of applied data
+        """
+        if not self.chat_template:
+            raise ValueError(
+                "chat_template is not set, please set chat_template first."
+            )
+        elif isinstance(self.chat_template, Template):
+            add_generation_prompt = tokenizer_kwargs.pop("add_generation_prompt", True)
+            query = self._apply_chat_template(
+                conversation, add_generation_prompt=add_generation_prompt
+            )
+        elif isinstance(self.chat_template, ChatTemplate):
+            query = self._apply_chat_template_paddle(conversation, context_data)
+
+        if not tokenize:
+            return query
+
+        # chat_template should not add special tokens
+        tokenizer_kwargs["add_special_tokens"] = False
+        return self(query, **tokenizer_kwargs)
+
+    def _apply_chat_template_paddle(
+        self,
+        conversation: List[Dict[str, str]] | str,
+        context_data: Dict[str, Any] = {},
+    ) -> str | dict[str, numpy.ndarray | paddle.Tensor]:
+        context_data = self.chat_template._init_context_data(context_data)
+
+        if isinstance(conversation, str):
+            conversation = [[conversation]]
+        elif isinstance(conversation, list) and isinstance(conversation[0], str):
+            raise ValueError(
+                "apply_chat_template do not support appling batch conversations, "
+                "so you should apply the conversation one by one."
+            )
+
+        query = self.chat_template(conversation, context_data=context_data)
+        return query
+
+    def _apply_chat_template(
+        self,
+        conversation: Union[Dict[str, str] | Dict[str, str]] | str,
+        add_generation_prompt=True,
+    ) -> str | dict[str, numpy.ndarray | paddle.Tensor]:
+        if isinstance(conversation, str):
+            conversations = [{"role": "user", "content": conversation}]
+        elif isinstance(conversation, list):
+            assert len(conversation) > 0, "empty conversation is not allowed"
+            if isinstance(conversation[0], list):
+                conversations = convert_to_dict_message(conversation)
+            elif isinstance(conversation[0], dict):
+                conversations = conversation
+            else:
+                raise ValueError(
+                    "apply_chat_template do not support appling batch conversations, "
+                    "so you should apply the conversation one by one."
+                )
+        query = self.chat_template.render(
+            messages=conversations,
+            **self.special_tokens_map,
+            add_generation_prompt=add_generation_prompt,
+        )
+        return query
+
+    def encode_chat_inputs(
+        self,
+        conversations: List[Dict[str, str]],
+        context_data: Dict[str, Any] = {},
+        **kwargs,
+    ):
+        """Encodes conversation to pairs of token ids.
+        Turn 0: bos + system + sep + user     bot + eos
+        Turn t: sep + bot + query             bot + eos
+
+        Args:
+            conversation (List[Dict[str, str]]): the conversation of data
+            context_data (Dict[str, Any]): the context data of conversation
+
+        Returns:
+            List[list[int], list[int]]: the pair of input_ids and target_ids
+        """
+        if not self.chat_template:
+            raise ValueError(
+                "chat_template is not set, please set chat_template first."
+            )
+        elif isinstance(self.chat_template, Template):
+            add_generation_prompt = kwargs.pop("add_generation_prompt", True)
+            query = self._encode_chat_inputs(
+                conversations, context_data, add_generation_prompt=add_generation_prompt
+            )
+        elif isinstance(self.chat_template, ChatTemplate):
+            query = self._encode_chat_inputs_paddle(conversations, context_data)
+        return query
+
+    def _encode_chat_inputs_paddle(
+        self, conversations: List[Dict[str, str]], context_data: Dict[str, Any] = {}
+    ):
+        context_data = self.chat_template._init_context_data(context_data)
+        # encode system
+        result = {}
+        if self.chat_template.system:
+            system = self.chat_template.render_system(context_data)
+            result["system"] = self.encode(system, add_special_tokens=False)[
+                "input_ids"
+            ]
+
+        # encode conversation
+        conversation_ids = []
+        for index, conversation in enumerate(conversations):
+            # give more control to chat_template
+            context_data["is_first"] = index == 0
+            context_data["is_last"] = index == len(conversations) - 1
+
+            user_input, bot_output = self.chat_template.render_conversation(
+                conversation, index=index, context_data=context_data
+            )
+            user_ids = self.encode(user_input, add_special_tokens=False)["input_ids"]
+            bot_ids = self.encode(bot_output, add_special_tokens=False)["input_ids"]
+            conversation_ids.append([user_ids, bot_ids])
+
+        result["conversations"] = conversation_ids
+        return result
+
+    def _encode_chat_inputs(
+        self,
+        conversations: List[Dict[str, str]],
+        context_data: Dict[str, Any] = {},
+        system: str = None,
+        add_generation_prompt=True,
+    ):
+        result = {}
+
+        # Some template do not support system msg, so we need to check it first.
+        if system:
+            try:
+                self.chat_template.render(
+                    messages={"role": "system", "content": system}
+                )
+            except Exception as e:
+                raise ValueError("System is not supported in this tokenizer.", e)
+
+        # convert list msg to role dict msg
+        conversation_dict = []
+        origin_msg = []
+        for round in conversations:
+            round_role = [
+                {"role": "user", "content": round[0]},
+                {"role": "assistant", "content": round[1]},
+            ]
+            origin_msg.extend(round_role)
+            conversation_dict.append(round_role)
+        ans = []
+
+        # get answer in single round, then compile the chat entirely and split by single round ans
+        # attention: answer should include end token!
+        for conv in conversation_dict:
+            roundi = [system] + conv if system else conv
+            roundi_str = self.chat_template.render(
+                messages=roundi, add_generation_prompt=False, **self.special_tokens_map
+            )
+            roundi_no_ans = [system] + [conv[0]] if system else [conv[0]]
+            roundi_no_ans_str = self.chat_template.render(
+                messages=roundi_no_ans,
+                add_generation_prompt=add_generation_prompt,
+                **self.special_tokens_map,
+            )
+            ans_roundi = roundi_str[len(roundi_no_ans_str) :]
+            ans.append(ans_roundi)
+
+        non_learnable_parts = self._extract_non_learnable_parts(origin_msg, ans)
+        assert len(non_learnable_parts) == len(ans)
+
+        conversation_ids = []
+        for i in range(len(non_learnable_parts)):
+            conversation_ids.append(
+                self.batch_encode(
+                    [non_learnable_parts[i], ans[i]],
+                    add_special_tokens=False,
+                    padding=False,
+                )["input_ids"]
+            )
+
+        result["conversations"] = conversation_ids
+        return result
+
+    def _extract_non_learnable_parts(
+        self, origin_msg: List[Dict[str, str]], split_s: List[str]
+    ):
+        """Split the entire chat by specified words. Extract the non-learnable parts."""
+        # distingish and replace the special words in original string to an uncompiled form: Like | -> \|
+        regex_pattern = "|".join(map(re.escape, split_s))
+        # splited by replaced specified words
+        non_learnable_parts = re.split(
+            r"(?:%s)" % regex_pattern,
+            self.chat_template.render(
+                messages=origin_msg,
+                add_generation_prompt=False,
+                **self.special_tokens_map,
+            ),
+        )
+        if non_learnable_parts[-1] == "":
+            non_learnable_parts.pop()
+        return non_learnable_parts
+
+    @classmethod
+    def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
+        cache_dir = kwargs.pop("cache_dir", None)
+        from_hf_hub = kwargs.pop("from_hf_hub", False)
+        from_aistudio = kwargs.pop("from_aistudio", False)
+        subfolder = kwargs.pop("subfolder", "")
+        if subfolder is None:
+            subfolder = ""
+
+        kwargs["subfolder"] = subfolder
+        kwargs["cache_dir"] = cache_dir
+        kwargs["from_hf_hub"] = from_hf_hub
+        kwargs["from_aistudio"] = from_aistudio
+        kwargs["return_tokenizer_file_dir"] = True
+        tokenizer, tokenizer_config_file_dir = super().from_pretrained(
+            pretrained_model_name_or_path, *args, **kwargs
+        )
+
+        # load chat-template
+        chat_template_file = os.path.join(
+            tokenizer_config_file_dir, CHAT_TEMPLATE_CONFIG_NAME
+        )
+        if not os.path.exists(chat_template_file):
+            return tokenizer
+
+        if tokenizer.chat_template is not None:
+            logging.warning(
+                "Chat-template already exists in config file, it will be overwritten by chat_template.json file."
+            )
+            logging.warning(
+                "`chat_template.json` will be deprecated in the future! Please set it in `tokenizer_config.json`."
+            )
+        tokenizer.init_chat_template(chat_template_file)
+        return tokenizer
+
+    def init_chat_template(self, chat_template: str | dict):
+        """init chat_tempalte by file_path or template dict data
+
+        Args:
+            chat_template (str | dict): file_path or template dict data
+        """
+        if isinstance(chat_template, str):
+            if not os.path.exists(chat_template):
+                try:
+                    self.chat_template: Template = ChatTemplate._compile_jinja_template(
+                        chat_template
+                    )
+                except TemplateSyntaxError:
+                    # It is neither jinjia string nor path string
+                    raise TemplateSyntaxError(
+                        "The chat-template in json is not valid jinja string: {}".format(
+                            chat_template
+                        ),
+                        lineno=0,  # fake lineno, useless required msg
+                    )
+            else:
+                self.chat_template = ChatTemplate.from_file(chat_template)
+        elif isinstance(chat_template, dict):
+            self.chat_template = ChatTemplate.from_dict(chat_template)
+        elif isinstance(chat_template, ChatTemplate):
+            self.chat_template = chat_template
+        else:
+            raise ValueError("Receive error chat_template data: ", chat_template)
+
+    def save_resources(self, save_directory):
+        super().save_resources(save_directory)
+
+        if isinstance(
+            self.chat_template, ChatTemplate
+        ):  # Future remove if ChatTemplate is deprecated
+            chat_template_file = os.path.join(save_directory, CHAT_TEMPLATE_CONFIG_NAME)
+            with open(chat_template_file, "w", encoding="utf-8") as f:
+                json.dump(asdict(self.chat_template), f, ensure_ascii=False, indent=4)
+            logging.info("Chat-template config file saved in " + chat_template_file)
+
+
+@six.add_metaclass(InitTrackerMeta)
+class PretrainedTokenizer(ChatTemplateMixin, PretrainedTokenizerBase):
+    """
+    Base class for all tokenizers.
+
+    Inherits from [`~tokenizer_utils_base.PretrainedTokenizerBase`].
+
+    Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
+    pretrained tokenizers as well as adding tokens to the vocabulary.
+
+    This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
+    specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
+
+    - **resource_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
+        vocabulary file required by the model, and as associated values, the filename for saving the associated file
+        (string).
+    - **pretrained_resource_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
+        high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
+        low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
+        associated pretrained vocabulary file.
+    - **max_model_input_sizes** (`Dict[str, Optional[int]]`) -- A dictionary with, as keys, the `short-cut-names`
+        of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model,
+        or `None` if the model has no maximum input size.
+    - **pretrained_init_configuration** (`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
+        `short-cut-names` of the pretrained models, and as associated values, a dictionary of specific arguments to
+        pass to the `__init__` method of the tokenizer class for this pretrained model when loading the tokenizer
+        with the [`~tokenizer_utils_base.PretrainedTokenizerBase.from_pretrained`] method.
+    - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
+    - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
+        Should be `'right'` or `'left'`.
+    - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
+        applied. Should be `'right'` or `'left'`.
+
+    Moreover, methods common to tokenizers for tokenization, token/id conversion
+    and encoding as model inputs are also provided here.
+
+    Besides, metaclass `InitTrackerMeta` is used to create `PretrainedTokenizer`,
+    by which subclasses can track arguments for initialization automatically
+    and expose special tokens initialization used as attributes.
+    """
+
+    added_tokens_encoder: Dict[str, int] = {}
+    added_tokens_decoder: Dict[int, str] = {}
+    unique_no_split_tokens: List[str] = []
+    tokens_trie = Trie()
+
+    _decode_use_source_tokenizer = False
+
+    def _pre_init(self, original_init, *args, **kwargs):
+        """
+        It would be hooked before `__init__` to add specials tokens (arguments of
+        `__init__` whose name ends with `_token`) as attributes of the tokenizer
+        instance.
+        """
+        init_dict = fn_args_to_dict(original_init, *((self,) + args), **kwargs)
+        init_dict.pop("self", None)
+        super(PretrainedTokenizer, self).__init__(**init_dict)
+
+        self.added_tokens_encoder: Dict[str, int] = {}
+        self.added_tokens_decoder: Dict[int, str] = {}
+        self.unique_no_split_tokens: List[str] = []
+        self.tokens_trie = Trie()
+
+        self._decode_use_source_tokenizer = False
+
+    def _build_special_tokens_map_extended(self, **kwargs):
+        for key, value in kwargs.items():
+            if value is None:
+                continue
+            if key in self.SPECIAL_TOKENS_ATTRIBUTES:
+                if key == "additional_special_tokens":
+                    assert isinstance(
+                        value, (list, tuple)
+                    ), f"Value {value} is not a list or tuple"
+                    assert all(
+                        isinstance(t, (str, AddedToken)) for t in value
+                    ), "One of the tokens is not a string or an AddedToken"
+                    setattr(self, key, value)
+                elif isinstance(value, (str, AddedToken)):
+                    setattr(self, key, value)
+                else:
+                    raise TypeError(
+                        f"special token {key} has to be either str or AddedToken but got: {type(value)}"
+                    )
+
+    @property
+    def vocab_size(self) -> int:
+        """
+        `int`: Size of the base vocabulary (without the added tokens).
+        """
+        raise NotImplementedError
+
+    @property
+    def is_fast(self) -> bool:
+        return False
+
+    def get_added_vocab(self) -> Dict[str, int]:
+        """
+        Returns the added tokens in the vocabulary as a dictionary of token to index.
+
+        Returns:
+            `Dict[str, int]`: The added tokens.
+        """
+        return self.added_tokens_encoder
+
+    def __len__(self):
+        """
+        Size of the full vocabulary with the added tokens.
+        """
+        return self.vocab_size + len(self.added_tokens_encoder)
+
+    def _add_tokens(
+        self,
+        new_tokens: Union[List[str], List[AddedToken]],
+        special_tokens: bool = False,
+    ) -> int:
+        """
+        Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
+        it with indices starting from length of the current vocabulary.
+
+        Args:
+            new_tokens (`List[str]`or `List[AddedToken]`):
+                Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by
+                checking if the tokenizer assign the index of the `unk_token` to them).
+            special_tokens (`bool`, *optional*, defaults to `False`):
+                Whether or not the tokens should be added as special tokens.
+
+        Returns:
+            `int`: The number of tokens actually added to the vocabulary.
+
+        Examples:
+
+        ```python
+        # Let's see how to increase the vocabulary of Bert model and tokenizer
+        tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
+        model = BertModel.from_pretrained("bert-base-uncased")
+
+        num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
+        print("We have added", num_added_toks, "tokens")
+        ```"""
+        new_tokens = [str(tok) for tok in new_tokens]
+
+        tokens_to_add = []
+        for token in new_tokens:
+            if not isinstance(token, str):
+                raise TypeError(f"Token {token} is not a string but a {type(token)}.")
+            if (
+                not special_tokens
+                and hasattr(self, "do_lower_case")
+                and self.do_lower_case
+            ):
+                token = token.lower()
+            if (
+                token != self.unk_token
+                and self.convert_tokens_to_ids(token)
+                == self.convert_tokens_to_ids(self.unk_token)
+                and token not in tokens_to_add
+            ):
+                tokens_to_add.append(token)
+                if self.verbose:
+                    logging.info(f"Adding {token} to the vocabulary")
+
+        added_tok_encoder = dict(
+            (tok, len(self) + i) for i, tok in enumerate(tokens_to_add)
+        )
+        added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
+        self.added_tokens_encoder.update(added_tok_encoder)
+        self.added_tokens_decoder.update(added_tok_decoder)
+
+        # Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
+        if special_tokens:
+            if len(new_tokens) == 1:
+                _insert_one_token_to_ordered_list(
+                    self.unique_no_split_tokens, new_tokens[0]
+                )
+            else:
+                self.unique_no_split_tokens = sorted(
+                    set(self.unique_no_split_tokens).union(set(new_tokens))
+                )
+        else:
+            # Or on the newly added tokens
+            if len(tokens_to_add) == 1:
+                _insert_one_token_to_ordered_list(
+                    self.unique_no_split_tokens, tokens_to_add[0]
+                )
+            else:
+                self.unique_no_split_tokens = sorted(
+                    set(self.unique_no_split_tokens).union(set(tokens_to_add))
+                )
+        self._create_trie(self.unique_no_split_tokens)
+
+        return len(tokens_to_add)
+
+    def _create_trie(self, unique_no_split_tokens):
+        trie = Trie()
+        for token in unique_no_split_tokens:
+            if (
+                hasattr(self, "do_lower_case")
+                and self.do_lower_case
+                and token not in self.all_special_tokens
+            ):
+                trie.add(token.lower())
+            else:
+                trie.add(token)
+        self.tokens_trie = trie
+
+    def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+        """
+        Performs any necessary transformations before tokenization.
+
+        This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
+        `kwargs` at the end of the encoding process to be sure all the arguments have been used.
+
+        Args:
+            text (`str`):
+                The text to prepare.
+            is_split_into_words (`bool`, *optional*, defaults to `False`):
+                Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
+                tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
+                which it will tokenize. This is useful for NER or token classification.
+            kwargs:
+                Keyword arguments to use for the tokenization.
+
+        Returns:
+            `Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
+        """
+
+        return (text, kwargs)
+
+    def tokenize(self, text: TextInput, **kwargs) -> List[str]:
+        """
+        Converts a string in a sequence of tokens, using the tokenizer.
+
+        Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
+        (BPE/SentencePieces/WordPieces). Takes care of added tokens.
+
+        Args:
+            text (`str`):
+                The sequence to be encoded.
+            **kwargs (additional keyword arguments):
+                Passed along to the model-specific `prepare_for_tokenization` preprocessing method.
+
+        Returns:
+            `List[str]`: The list of tokens.
+        """
+        # Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
+        all_special_tokens_extended = dict(
+            (str(t), t)
+            for t in self.all_special_tokens_extended
+            if isinstance(t, AddedToken)
+        )
+
+        text, kwargs = self.prepare_for_tokenization(text, **kwargs)
+
+        # TODO: should this be in the base class?
+        if hasattr(self, "do_lower_case") and self.do_lower_case:
+            # convert non-special tokens to lowercase
+            escaped_special_toks = [
+                re.escape(s_tok)
+                for s_tok in (self.unique_no_split_tokens + self.all_special_tokens)
+            ]
+            pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
+            text = re.sub(
+                pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text
+            )
+
+        no_split_token = set(self.unique_no_split_tokens)
+        tokens = self.tokens_trie.split(text)
+
+        # ["This is something", "<special_token_1>", "  else"]
+        for i, token in enumerate(tokens):
+            if token in no_split_token:
+                tok_extended = all_special_tokens_extended.get(token, None)
+                left = tokens[i - 1] if i > 0 else None
+                right = tokens[i + 1] if i < len(tokens) - 1 else None
+                if isinstance(tok_extended, AddedToken):
+                    if tok_extended.rstrip and right:
+                        # A bit counter-intuitive but we strip the left of the string
+                        # since tok_extended.rstrip means the special token is eating all white spaces on its right
+                        tokens[i + 1] = right.lstrip()
+                    # Strip white spaces on the left
+                    if tok_extended.lstrip and left:
+                        tokens[i - 1] = left.rstrip()  # Opposite here
+                else:
+                    # We strip left and right by default
+                    if right:
+                        tokens[i + 1] = right.lstrip()
+                    if left:
+                        tokens[i - 1] = left.rstrip()
+        # ["This is something", "<special_token_1>", "else"]
+        tokenized_text = []
+        for token in tokens:
+            # Need to skip eventual empty (fully stripped) tokens
+            if not token:
+                continue
+            if token in no_split_token:
+                tokenized_text.append(token)
+            else:
+                tokenized_text.extend(self._tokenize(token))
+        # ["This", " is", " something", "<special_token_1>", "else"]
+        return tokenized_text
+
+    def _tokenize(self, text, **kwargs):
+        """
+        Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
+        vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
+
+        Do NOT take care of added tokens.
+        """
+        raise NotImplementedError
+
+    def convert_tokens_to_ids(self, tokens):
+        if tokens is None:
+            return None
+
+        if isinstance(tokens, str):
+            return self._convert_token_to_id_with_added_voc(tokens)
+
+        ids = []
+        for token in tokens:
+            ids.append(self._convert_token_to_id_with_added_voc(token))
+
+        return ids
+
+    def _convert_token_to_id_with_added_voc(self, token):
+        if token is None:
+            return None
+
+        if token in self.added_tokens_encoder:
+            return self.added_tokens_encoder[token]
+        return self._convert_token_to_id(token)
+
+    def _convert_token_to_id(self, token):
+
+        return self.vocab.to_indices(token)
+
+    def convert_tokens_to_string(self, tokens):
+        """
+        Converts a sequence of tokens (list of string) to a single string by
+        using ``' '.join(tokens)`` .
+
+        Args:
+            tokens (list[str]): A sequence of tokens.
+
+        Returns:
+            str: Converted string.
+        """
+        return " ".join(tokens)
+
+    def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
+        if isinstance(ids, int):
+            if ids in self.added_tokens_decoder:
+                return self.added_tokens_decoder[ids]
+            else:
+                return self._convert_id_to_token(ids)
+        tokens = []
+        for index in ids:
+            index = int(index)
+            if skip_special_tokens and index in self.all_special_ids:
+                continue
+            if index in self.added_tokens_decoder:
+                tokens.append(self.added_tokens_decoder[index])
+            else:
+                tokens.append(self._convert_id_to_token(index))
+        return tokens
+
+    def _convert_id_to_token(self, index):
+
+        return self.vocab.to_tokens(index)
+
+    @staticmethod
+    def load_vocabulary(
+        filepath,
+        unk_token=None,
+        pad_token=None,
+        bos_token=None,
+        eos_token=None,
+        **kwargs,
+    ):
+        """
+        Instantiate an instance of `Vocab` from a file reserving all tokens
+        by using `Vocab.from_dict`. The file contains a token per line, and the
+        line number would be the index of corresponding token.
+
+        Args:
+            filepath (str): path of file to construct vocabulary.
+            unk_token (str): special token for unknown token. If no need, it also
+                could be `None`. Defaults to `None`.
+            pad_token (str): special token for padding token. If no need, it also
+                could be `None`. Defaults to `None`.
+            bos_token (str): special token for bos token. If no need, it also
+                could be `None`. Defaults to `None`.
+            eos_token (str): special token for eos token. If no need, it also
+                could be `None`. Defaults to `None`.
+            **kwargs (dict): keyword arguments for `Vocab.from_dict`.
+
+        Returns:
+            Vocab: An instance of `Vocab`.
+        """
+        token_to_idx = {}
+        with io.open(filepath, "r", encoding="utf-8") as f:
+            for index, line in enumerate(f):
+                token = line.rstrip("\n")
+                token_to_idx[token] = int(index)
+        vocab = Vocab.from_dict(
+            token_to_idx,
+            unk_token=unk_token,
+            pad_token=pad_token,
+            bos_token=bos_token,
+            eos_token=eos_token,
+            **kwargs,
+        )
+        return vocab
+
+    @staticmethod
+    def save_vocabulary(filepath, vocab):
+        """
+        Save all tokens to a vocabulary file. The file contains a token per line,
+        and the line number would be the index of corresponding token.
+
+        Args:
+            filepath (str): File path to be saved to.
+            vocab (Vocab|dict): The `Vocab` or `dict` instance to be saved.
+        """
+        if isinstance(vocab, Vocab):
+            tokens = vocab.idx_to_token
+        else:
+            tokens = sorted(vocab.keys(), key=lambda token: vocab[token])
+        with io.open(filepath, "w", encoding="utf-8") as f:
+            for token in tokens:
+                f.write(token + "\n")
+
+    def get_special_tokens_mask(
+        self, token_ids_0, token_ids_1=None, already_has_special_tokens=False
+    ):
+        """
+        Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+        special tokens using the tokenizer ``encode`` methods.
+
+        Args:
+            token_ids_0 (List[int]): List of ids of the first sequence.
+            token_ids_1 (List[int], optional): List of ids of the second sequence.
+            already_has_special_tokens (bool, optional): Whether or not the token list is already
+                formatted with special tokens for the model. Defaults to None.
+
+        Returns:
+            results (List[int]): The list of integers in the range [0, 1]:
+                1 for a special token, 0 for a sequence token.
+        """
+        if already_has_special_tokens:
+            if token_ids_1 is not None:
+                raise ValueError(
+                    "You should not supply a second sequence if the provided sequence of "
+                    "ids is already formatted with special tokens for the model."
+                )
+
+            return super().get_special_tokens_mask(
+                token_ids_0=token_ids_0,
+                token_ids_1=token_ids_1,
+                already_has_special_tokens=True,
+            )
+        return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
+
+    def num_special_tokens_to_add(self, pair):
+        """
+        Returns the number of added tokens when encoding a sequence with special tokens.
+
+        Args:
+            pair (bool, optional):
+                Whether the number of added tokens should be computed in the case of a sequence pair or a single
+                sequence. Defaults to `False`.
+        Returns:
+            int: Number of special tokens added to sequences.
+        """
+        token_ids_0 = []
+        token_ids_1 = []
+        return len(
+            self.build_inputs_with_special_tokens(
+                token_ids_0, token_ids_1 if pair else None
+            )
+        )
+
+    def _encode_plus(
+        self,
+        text: Union[TextInput, PreTokenizedInput, EncodedInput],
+        text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
+        add_special_tokens: bool = True,
+        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+        truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+        max_length: Optional[int] = None,
+        stride: int = 0,
+        is_split_into_words: bool = False,
+        pad_to_multiple_of: Optional[int] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        return_position_ids: Optional[bool] = None,
+        return_token_type_ids: Optional[bool] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_overflowing_tokens: bool = False,
+        return_special_tokens_mask: bool = False,
+        return_offsets_mapping: bool = False,
+        return_length: bool = False,
+        verbose: bool = True,
+        **kwargs,
+    ) -> BatchEncoding:
+        def get_input_ids(text):
+            if isinstance(text, str):
+                tokens = self.tokenize(text, **kwargs)
+                return self.convert_tokens_to_ids(tokens)
+            elif (
+                isinstance(text, (list, tuple))
+                and len(text) > 0
+                and isinstance(text[0], str)
+            ):
+                if is_split_into_words:
+                    tokens = list(
+                        itertools.chain(
+                            *(
+                                self.tokenize(t, is_split_into_words=True, **kwargs)
+                                for t in text
+                            )
+                        )
+                    )
+                    return self.convert_tokens_to_ids(tokens)
+                else:
+                    return self.convert_tokens_to_ids(text)
+            elif (
+                isinstance(text, (list, tuple))
+                and len(text) > 0
+                and isinstance(text[0], int)
+            ):
+                return text
+            else:
+                if is_split_into_words:
+                    raise ValueError(
+                        f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_split_into_words=True`."
+                    )
+                else:
+                    raise ValueError(
+                        f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
+                    )
+
+        first_ids = get_input_ids(text)
+        second_ids = get_input_ids(text_pair) if text_pair is not None else None
+
+        if return_offsets_mapping:
+            kwargs["text"] = text
+            kwargs["text_pair"] = text_pair
+
+        return self.prepare_for_model(
+            first_ids,
+            pair_ids=second_ids,
+            add_special_tokens=add_special_tokens,
+            padding=padding_strategy.value,
+            truncation=truncation_strategy.value,
+            max_length=max_length,
+            stride=stride,
+            pad_to_multiple_of=pad_to_multiple_of,
+            return_tensors=return_tensors,
+            prepend_batch_axis=True,
+            return_position_ids=return_position_ids,
+            return_attention_mask=return_attention_mask,
+            return_token_type_ids=return_token_type_ids,
+            return_overflowing_tokens=return_overflowing_tokens,
+            return_special_tokens_mask=return_special_tokens_mask,
+            return_offsets_mapping=return_offsets_mapping,
+            return_length=return_length,
+            verbose=verbose,
+            **kwargs,
+        )
+
+    def _batch_encode_plus(
+        self,
+        batch_text_or_text_pairs: Union[
+            List[TextInput],
+            List[TextInputPair],
+            List[PreTokenizedInput],
+            List[PreTokenizedInputPair],
+            List[EncodedInput],
+            List[EncodedInputPair],
+        ],
+        add_special_tokens: bool = True,
+        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+        truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+        max_length: Optional[int] = None,
+        stride: int = 0,
+        is_split_into_words: bool = False,
+        pad_to_multiple_of: Optional[int] = None,
+        return_position_ids: Optional[bool] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        return_token_type_ids: Optional[bool] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_overflowing_tokens: bool = False,
+        return_special_tokens_mask: bool = False,
+        return_dict: bool = True,
+        return_offsets_mapping: bool = False,
+        return_length: bool = False,
+        verbose: bool = True,
+        **kwargs,
+    ) -> BatchEncoding:
+        def get_input_ids(text):
+            if isinstance(text, str):
+                tokens = self.tokenize(text, **kwargs)
+                return self.convert_tokens_to_ids(tokens)
+            elif (
+                isinstance(text, (list, tuple))
+                and len(text) > 0
+                and isinstance(text[0], str)
+            ):
+                if is_split_into_words:
+                    tokens = list(
+                        itertools.chain(
+                            *(
+                                self.tokenize(t, is_split_into_words=True, **kwargs)
+                                for t in text
+                            )
+                        )
+                    )
+                    return self.convert_tokens_to_ids(tokens)
+                else:
+                    return self.convert_tokens_to_ids(text)
+            elif (
+                isinstance(text, (list, tuple))
+                and len(text) > 0
+                and isinstance(text[0], int)
+            ):
+                return text
+            else:
+                raise ValueError(
+                    "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
+                )
+
+        input_ids = []
+        for ids_or_pair_ids in batch_text_or_text_pairs:
+            if not isinstance(ids_or_pair_ids, (list, tuple)):
+                ids, pair_ids = ids_or_pair_ids, None
+            elif is_split_into_words and not isinstance(
+                ids_or_pair_ids[0], (list, tuple)
+            ):
+                ids, pair_ids = ids_or_pair_ids, None
+            else:
+                ids, pair_ids = ids_or_pair_ids
+
+            first_ids = get_input_ids(ids)
+            second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
+            input_ids.append((first_ids, second_ids))
+
+        if stride > 0 and second_ids is not None:
+            kwargs["batch_text_or_text_pairs"] = batch_text_or_text_pairs
+        else:
+            if return_offsets_mapping:
+                has_pair = False
+                if len(batch_text_or_text_pairs) > 0:
+                    if isinstance(batch_text_or_text_pairs[0], (list, tuple)):
+                        has_pair = True
+                kwargs["texts"] = None
+                kwargs["text_pairs"] = None
+                if has_pair:
+                    kwargs["texts"] = [text[0] for text in batch_text_or_text_pairs]
+                    kwargs["text_pairs"] = [
+                        text[1] for text in batch_text_or_text_pairs
+                    ]
+                else:
+                    kwargs["texts"] = [text for text in batch_text_or_text_pairs]
+
+        batch_outputs = self._batch_prepare_for_model(
+            input_ids,
+            add_special_tokens=add_special_tokens,
+            padding_strategy=padding_strategy,
+            truncation_strategy=truncation_strategy,
+            max_length=max_length,
+            stride=stride,
+            pad_to_multiple_of=pad_to_multiple_of,
+            return_position_ids=return_position_ids,
+            return_attention_mask=return_attention_mask,
+            return_token_type_ids=return_token_type_ids,
+            return_overflowing_tokens=return_overflowing_tokens,
+            return_special_tokens_mask=return_special_tokens_mask,
+            return_dict=return_dict,
+            return_offsets_mapping=return_offsets_mapping,
+            return_length=return_length,
+            return_tensors=return_tensors,
+            verbose=verbose,
+            **kwargs,
+        )
+
+        return batch_outputs
+
+    def _batch_prepare_for_model(
+        self,
+        batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
+        add_special_tokens: bool = True,
+        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+        truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+        max_length: Optional[int] = None,
+        stride: int = 0,
+        pad_to_multiple_of: Optional[int] = None,
+        return_position_ids: Optional[bool] = None,
+        return_tensors: Optional[str] = None,
+        return_token_type_ids: Optional[bool] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_overflowing_tokens: bool = False,
+        return_special_tokens_mask: bool = False,
+        return_dict: bool = True,
+        return_offsets_mapping: bool = False,
+        return_length: bool = False,
+        verbose: bool = True,
+        **kwargs,
+    ) -> BatchEncoding:
+        """
+        Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
+        adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
+        manages a moving window (with user defined stride) for overflowing tokens
+
+        Args:
+            batch_ids_pairs: list of tokenized input ids or input ids pairs
+        """
+        if return_token_type_ids and not add_special_tokens:
+            raise ValueError(
+                "Asking to return token_type_ids while setting add_special_tokens to False "
+                "results in an undefined behavior. Please set add_special_tokens to True or "
+                "set return_token_type_ids to None."
+            )
+
+        batch_outputs = {}
+        batch_outputs_list = []
+        for example_id, (first_ids, second_ids) in enumerate(batch_ids_pairs):
+            if stride > 0 and second_ids is not None:
+                if return_token_type_ids is None:
+                    return_token_type_ids = "token_type_ids" in self.model_input_names
+                if return_attention_mask is None:
+                    return_attention_mask = "attention_mask" in self.model_input_names
+
+                max_len_for_pair = (
+                    max_length
+                    - len(first_ids)
+                    - (
+                        self.num_special_tokens_to_add(pair=True)
+                        if add_special_tokens
+                        else 0
+                    )
+                )
+
+                text, text_pair = kwargs["batch_text_or_text_pairs"][example_id]
+                token_offset_mapping = self.get_offset_mapping(text)
+                token_pair_offset_mapping = self.get_offset_mapping(text_pair)
+
+                offset = 0
+                while offset < len(second_ids):
+                    encoded_inputs = {}
+                    length = len(second_ids) - offset
+                    if length > max_len_for_pair:
+                        length = max_len_for_pair
+
+                    ids = first_ids
+                    pair_ids = second_ids[offset : offset + length]
+                    pair = bool(pair_ids is not None)
+                    mapping = token_offset_mapping
+                    pair_mapping = token_pair_offset_mapping[offset : offset + length]
+                    if add_special_tokens:
+                        offset_mapping = self.build_offset_mapping_with_special_tokens(
+                            mapping, pair_mapping
+                        )
+                        sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
+                        token_type_ids = self.create_token_type_ids_from_sequences(
+                            ids, pair_ids
+                        )
+                    else:
+                        offset_mapping = mapping + pair_mapping
+                        sequence = ids + pair_ids if pair else ids
+                        token_type_ids = [0] * len(ids) + (
+                            [0] * len(pair_ids) if pair else []
+                        )
+                    encoded_inputs["offset_mapping"] = offset_mapping
+                    # Build output dictionnary
+                    encoded_inputs["input_ids"] = sequence
+                    if return_token_type_ids:
+                        encoded_inputs["token_type_ids"] = token_type_ids
+                    if return_special_tokens_mask:
+                        if add_special_tokens:
+                            encoded_inputs["special_tokens_mask"] = (
+                                self.get_special_tokens_mask(ids, pair_ids)
+                            )
+                        else:
+                            encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
+
+                    # Check lengths
+                    self._eventual_warn_about_too_long_sequence(
+                        encoded_inputs["input_ids"], max_length, verbose
+                    )
+                    if return_position_ids:
+                        encoded_inputs["position_ids"] = list(
+                            range(len(encoded_inputs["input_ids"]))
+                        )
+
+                    if return_length:
+                        encoded_inputs["length"] = len(encoded_inputs["input_ids"])
+                        encoded_inputs["seq_len"] = encoded_inputs["length"]
+
+                    encoded_inputs["overflow_to_sample"] = example_id
+
+                    for key, value in encoded_inputs.items():
+                        if key not in batch_outputs:
+                            batch_outputs[key] = []
+                        batch_outputs[key].append(value)
+
+                    if offset + length == len(second_ids):
+                        break
+                    offset += min(length, stride)
+            else:
+                if return_offsets_mapping:
+                    kwargs["text"] = kwargs["texts"][example_id]
+                    kwargs["text_pair"] = None
+                    if kwargs["text_pairs"] is not None:
+                        kwargs["text_pair"] = kwargs["text_pairs"][example_id]
+
+                encoded_inputs = self.prepare_for_model(
+                    first_ids,
+                    second_ids,
+                    add_special_tokens=add_special_tokens,
+                    padding=PaddingStrategy.DO_NOT_PAD.value,  # we pad in batch afterward
+                    truncation=truncation_strategy.value,
+                    max_length=max_length,
+                    stride=stride,
+                    pad_to_multiple_of=None,  # we pad in batch afterward
+                    return_position_ids=return_position_ids,  # we pad in batch afterward
+                    return_attention_mask=False,  # we pad in batch afterward
+                    return_token_type_ids=return_token_type_ids,
+                    return_overflowing_tokens=return_overflowing_tokens,
+                    return_special_tokens_mask=return_special_tokens_mask,
+                    return_offsets_mapping=return_offsets_mapping,
+                    return_length=return_length,
+                    return_tensors=None,  # We convert the whole batch to tensors at the end
+                    prepend_batch_axis=False,
+                    verbose=verbose,
+                    **kwargs,
+                )
+                for key, value in encoded_inputs.items():
+                    if key not in batch_outputs:
+                        batch_outputs[key] = []
+                    batch_outputs[key].append(value)
+
+        batch_outputs = self.pad(
+            batch_outputs,
+            padding=padding_strategy.value,
+            max_length=max_length,
+            pad_to_multiple_of=pad_to_multiple_of,
+            return_attention_mask=return_attention_mask,
+        )
+        if return_dict:
+            batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
+            return batch_outputs
+        else:
+            for k, v in batch_outputs.items():
+                for i in range(len(v)):
+                    if i >= len(batch_outputs_list):
+                        batch_outputs_list.append({k: v[i]})
+                    else:
+                        batch_outputs_list[i][k] = v[i]
+            return batch_outputs_list
+
+    def _get_bert_like_offset_mapping(self, text: str):
+        """
+        Returns the map of tokens and the start and end index of their start and end character.
+        Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
+        Args:
+            text (str):
+                Input text.
+        Returns:
+            list: The offset map of input text.
+
+        """
+        if text is None:
+            return None
+        split_tokens = self.tokenize(text)
+
+        normalized_text, char_mapping = "", []
+
+        for i, ch in enumerate(text):
+            if hasattr(self, "do_lower_case") and self.do_lower_case:
+                ch = ch.lower()
+                if self.basic_tokenizer.strip_accents is not False:
+                    ch = unicodedata.normalize("NFD", ch)
+                    ch = "".join([c for c in ch if unicodedata.category(c) != "Mn"])
+            elif self.basic_tokenizer.strip_accents:
+                ch = unicodedata.normalize("NFD", ch)
+                ch = "".join([c for c in ch if unicodedata.category(c) != "Mn"])
+
+            ch = "".join(
+                [
+                    c
+                    for c in ch
+                    if not (ord(c) == 0 or ord(c) == 0xFFFD or _is_control(c))
+                ]
+            )
+            normalized_text += ch
+
+            char_mapping.extend([i] * len(ch))
+        text, token_mapping, offset = normalized_text, [], 0
+
+        char_mapping_indexes = []
+        for index, token in enumerate(split_tokens):
+            if token[:2] == "##":
+                token = token[2:]
+            if token in self.all_special_tokens:
+                token = (
+                    token.lower()
+                    if hasattr(self, "do_lower_case") and self.do_lower_case
+                    else token
+                )
+            # The greek letter "sigma" has 2 forms of lowercase, σ and ς respectively.
+            # When used as a final letter of a word, the final form (ς) is used. Otherwise, the form (σ) is used.
+            # https://latin.stackexchange.com/questions/6168/how-and-when-did-we-get-two-forms-of-sigma
+            if "σ" in token or "ς" in token:
+                start = (
+                    text[offset:].replace("ς", "σ").index(token.replace("ς", "σ"))
+                    + offset
+                )
+            else:
+
+                # try to fix: https://github.com/PaddlePaddle/PaddleNLP/issues/3985
+                if token not in text[offset:]:
+                    # check whether there are consecutive UNK tokens, eg: ['好', '[UNK]', '[UNK]', 'good']
+                    if (
+                        index < len(split_tokens) - 1
+                        and split_tokens[index + 1] in self.all_special_tokens
+                    ):
+                        start = offset
+                        token = " "  # only contains one char
+                    else:
+                        start = -1
+                else:
+                    start = text[offset:].index(token) + offset
+
+            end = start + len(token)
+            char_mapping_indexes.append([start, end])
+
+            if start != -1:
+                offset = end
+
+        token_mapping = []
+        for index, (start, end) in enumerate(char_mapping_indexes):
+            if start == -1:
+                # init start
+                if index == 0:
+                    start = 0
+                else:
+                    start = char_mapping_indexes[index - 1][1]
+
+                # init end
+                if index == len(char_mapping_indexes) - 1:
+                    end = len(char_mapping)
+                else:
+                    # next start
+                    end = char_mapping_indexes[index + 1][0]
+
+            token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
+
+        return token_mapping
+
+    def get_offset_mapping(self, text: str, split_tokens: Optional[List[str]] = None):
+        """
+        Returns the map of tokens and the start and end index of their start and end character.
+        Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
+        Args:
+            text (str):
+                Input text.
+            split_tokens (Optional[List[str]]):
+                the tokens which has been split which can accelerate the operation.
+
+        Returns:
+            list: The offset map of input text.
+
+        """
+        if text is None:
+            return None
+        split_tokens = self.tokenize(text)
+
+        # bert-like tokenizer use the old-school code block
+        if hasattr(self, "basic_tokenizer") or hasattr(self, "wordpiece_tokenizer"):
+            return self._get_bert_like_offset_mapping(text)
+
+        if not split_tokens:
+            split_tokens = self.tokenize(text)
+
+        normalized_text, char_mapping = "", []
+
+        for i, ch in enumerate(text):
+            normalized_text += normalize_chars(ch)
+            char_mapping.extend([i] * len(ch))
+
+        text, token_mapping, offset = normalized_text, [], 0
+        do_lower_case = getattr(self, "do_lower_case", False)
+
+        # lower the text if the token is lower-cased
+        # keep align with token
+        if do_lower_case:
+            text = text.lower()
+
+        char_mapping_indexes = []
+        for token in split_tokens:
+
+            # convert tokens into original string
+            token: str = self.convert_tokens_to_string(token).strip()
+
+            if token in self.all_special_tokens:
+                if do_lower_case:
+                    token = token.lower()
+
+            # The greek letter "sigma" has 2 forms of lowercase, σ and ς respectively.
+            # When used as a final letter of a word, the final form (ς) is used. Otherwise, the form (σ) is used.
+            # https://latin.stackexchange.com/questions/6168/how-and-when-did-we-get-two-forms-of-sigma
+            if "σ" in token or "ς" in token:
+                start = (
+                    text[offset:].replace("ς", "σ").index(token.replace("ς", "σ"))
+                    + offset
+                )
+            else:
+
+                # try to fix: https://github.com/PaddlePaddle/PaddleNLP/issues/3985
+                if token not in text[offset:]:
+                    start = -1
+                else:
+                    start = text[offset:].index(token) + offset
+
+            end = start + len(token)
+            char_mapping_indexes.append([start, end])
+
+            if start != -1:
+                offset = end
+
+        token_mapping = []
+        for index, (start, end) in enumerate(char_mapping_indexes):
+            if start == -1:
+                # init start
+                if index == 0:
+                    start = 0
+                else:
+                    start = char_mapping_indexes[index - 1][1]
+
+                # init end
+                if index == len(char_mapping_indexes) - 1:
+                    end = len(char_mapping)
+                else:
+                    # next start
+                    end = char_mapping_indexes[index + 1][0]
+
+            token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
+
+        return token_mapping
+
+    def _decode(
+        self,
+        token_ids: List[int],
+        skip_special_tokens: bool = False,
+        clean_up_tokenization_spaces: bool = True,
+        spaces_between_special_tokens: bool = True,
+        **kwargs,
+    ) -> str:
+        if isinstance(token_ids, np.ndarray):
+            token_ids = token_ids.tolist()
+        self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
+        filtered_tokens = self.convert_ids_to_tokens(
+            token_ids, skip_special_tokens=skip_special_tokens
+        )
+
+        # To avoid mixing byte-level and unicode for byte-level BPT
+        # we need to build string separately for added tokens and byte-level tokens
+        # cf. https://github.com/huggingface/transformers/issues/1133
+        sub_texts = []
+        current_sub_text = []
+        for token in filtered_tokens:
+            if skip_special_tokens and token in self.all_special_ids:
+                continue
+            if token in self.added_tokens_encoder:
+                if current_sub_text:
+                    sub_texts.append(self.convert_tokens_to_string(current_sub_text))
+                    current_sub_text = []
+                sub_texts.append(token)
+            else:
+                current_sub_text.append(token)
+        if current_sub_text:
+            sub_texts.append(self.convert_tokens_to_string(current_sub_text))
+
+        if spaces_between_special_tokens:
+            text = " ".join(sub_texts)
+        else:
+            text = "".join(sub_texts)
+
+        if clean_up_tokenization_spaces:
+            clean_text = self.clean_up_tokenization(text)
+            return clean_text
+        else:
+            return text
+
+    def decode_token(
+        self,
+        all_input_ids: List[int],
+        prefix_offset: int = 0,
+        read_offset: int = 0,
+    ) -> Tuple[str, int, int]:
+        """tokenizer decoding for the streaming generation use case. This method can be overrided for tokenizer that doesn't follow this API"""
+        # The prefix text is necessary only to defeat cleanup algorithms in the decode
+        # which decide to add a space or not depending on the surrounding ids.
+        prefix_text = self.decode(
+            all_input_ids[prefix_offset:read_offset], skip_special_tokens=False
+        )
+        new_text = self.decode(all_input_ids[prefix_offset:], skip_special_tokens=False)
+
+        if len(new_text) > len(prefix_text) and not new_text.endswith("�"):
+            # utf-8 char at the end means it's a potential unfinished byte sequence
+            # from byte fallback tokenization.
+            # If it's in the middle, it's probably a real invalid id generated
+            # by the model
+            prefix_index = new_text.index(prefix_text)
+            new_text = new_text[prefix_index + len(prefix_text) :]
+            return new_text, read_offset, len(all_input_ids)
+        else:
+            return "", prefix_offset, read_offset

+ 3504 - 0
paddlex/inference/models_new/common/tokenizer/tokenizer_utils_base.py

@@ -0,0 +1,3504 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import io
+import json
+import os
+
+import warnings
+from collections import OrderedDict, UserDict
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
+
+import numpy as np
+import lazy_paddle as paddle
+
+from .....utils import logging
+
+__all__ = [
+    "AddedToken",
+    "FastEncoding",
+    "ExplicitEnum",
+    "PaddingStrategy",
+    "TensorType",
+    "TruncationStrategy",
+    "CharSpan",
+    "TokenSpan",
+    "BatchEncoding",
+    "SpecialTokensMixin",
+    "PretrainedTokenizerBase",
+]
+
+TOKENIZER_CONFIG_NAME = "tokenizer_config.json"
+CHAT_TEMPLATE_CONFIG_NAME = "chat_template.json"
+CHAT_TEMPLATE_CONFIG_NAME = "chat_template.json"
+
+VERY_LARGE_INTEGER = int(
+    1e30
+)  # This is used to set the max input length for a model with infinite size input
+LARGE_INTEGER = int(
+    1e20
+)  # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
+
+# Define type aliases and NamedTuples
+TextInput = str
+PreTokenizedInput = List[str]
+EncodedInput = List[int]
+TextInputPair = Tuple[str, str]
+PreTokenizedInputPair = Tuple[List[str], List[str]]
+EncodedInputPair = Tuple[List[int], List[int]]
+
+# Slow tokenizers used to be saved in three separated files
+SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
+ADDED_TOKENS_FILE = "added_tokens.json"
+TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
+
+
+@dataclass(frozen=True, eq=True)
+class AddedToken:
+    """
+    AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
+    way it should behave.
+    """
+
+    content: str = field(default_factory=str)
+    single_word: bool = False
+    lstrip: bool = False
+    rstrip: bool = False
+    normalized: bool = True
+    special: bool = True
+
+    def __getstate__(self):
+        return self.__dict__
+
+    def __str__(self):
+        return self.content
+
+
+@dataclass
+class FastEncoding:
+    """This is dummy class reserved for fast tokenizer"""
+
+    pass
+
+
+class ExplicitEnum(Enum):
+    """
+    Enum with more explicit error message for missing values.
+    """
+
+    @classmethod
+    def _missing_(cls, value):
+        raise ValueError(
+            f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}"
+        )
+
+
+class PaddingStrategy(ExplicitEnum):
+    """
+    Possible values for the `padding` argument in [`PretrainedTokenizerBase.__call__`]. Useful for tab-completion in an
+    IDE.
+    """
+
+    LONGEST = "longest"
+    MAX_LENGTH = "max_length"
+    DO_NOT_PAD = "do_not_pad"
+
+
+class TensorType(ExplicitEnum):
+    """
+    Possible values for the `return_tensors` argument in [`PretrainedTokenizerBase.__call__`]. Useful for
+    tab-completion in an IDE.
+    """
+
+    PADDLE = "pd"
+    NUMPY = "np"
+
+
+def to_py_obj(obj):
+    """
+    Convert a Paddle tensor, Numpy array or python list to a python list.
+    """
+    if isinstance(obj, (dict, UserDict)):
+        return {k: to_py_obj(v) for k, v in obj.items()}
+    elif isinstance(obj, (list, tuple)):
+        return [to_py_obj(o) for o in obj]
+    elif isinstance(obj, paddle.Tensor):
+        return obj.numpy().tolist()
+    elif isinstance(obj, (np.ndarray, np.number)):  # tolist also works on 0d np arrays
+        return obj.tolist()
+    else:
+        return obj
+
+
+def _is_numpy(x):
+    return isinstance(x, np.ndarray)
+
+
+class TruncationStrategy(ExplicitEnum):
+    """
+    Possible values for the `truncation` argument in [`PretrainedTokenizerBase.__call__`]. Useful for tab-completion in
+    an IDE.
+    """
+
+    ONLY_FIRST = "only_first"
+    ONLY_SECOND = "only_second"
+    LONGEST_FIRST = "longest_first"
+    DO_NOT_TRUNCATE = "do_not_truncate"
+
+
+class CharSpan(NamedTuple):
+    """
+    Character span in the original string.
+
+    Args:
+        start (`int`): Index of the first character in the original string.
+        end (`int`): Index of the character following the last character in the original string.
+    """
+
+    start: int
+    end: int
+
+
+class TokenSpan(NamedTuple):
+    """
+    Token span in an encoded string (list of tokens).
+
+    Args:
+        start (`int`): Index of the first token in the span.
+        end (`int`): Index of the token following the last token in the span.
+    """
+
+    start: int
+    end: int
+
+
+class BatchEncoding(UserDict):
+    """
+    Holds the output of the [`PretrainedTokenizerBase.__call__`],
+    [`PretrainedTokenizerBase.encode_plus`] and
+    [`PretrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc).
+
+    This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
+    utility methods to map from word/character space to token space.
+
+    Args:
+        data (`dict`):
+            Dictionary of lists/arrays/tensors returned by the `__call__`/`encode`/`batch_encode` methods
+            ('input_ids', 'attention_mask', etc.).
+        tensor_type (`Union[None, str, TensorType]`, *optional*):
+            You can give a tensor_type here to convert the lists of integers in Paddle/Numpy Tensors at
+            initialization.
+        prepend_batch_axis (`bool`, *optional*, defaults to `False`):
+            Whether or not to add a batch axis when converting to tensors (see `tensor_type` above).
+    """
+
+    def __init__(
+        self,
+        data: Optional[Dict[str, Any]] = None,
+        encoding: Optional[Union[FastEncoding, Sequence[FastEncoding]]] = None,
+        tensor_type: Union[None, str] = None,
+        prepend_batch_axis: bool = False,
+        n_sequences: Optional[int] = None,
+    ):
+        super().__init__(data)
+
+        if isinstance(encoding, FastEncoding):
+            encoding = [encoding]
+
+        self._encodings = encoding
+
+        if n_sequences is None and encoding is not None and len(encoding):
+            n_sequences = encoding[0].n_sequences
+
+        self._n_sequences = n_sequences
+
+        self.convert_to_tensors(
+            tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis
+        )
+
+    @property
+    def n_sequences(self) -> Optional[int]:
+        """
+        `Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this
+        [`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of
+        sentences)
+        """
+        return self._n_sequences
+
+    @property
+    def is_fast(self) -> bool:
+        """
+        `bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PretrainedFastTokenizer`]
+        or not.
+        """
+        return self._encodings is not None
+
+    def __getitem__(self, item: Union[int, str]) -> Union[Any, FastEncoding]:
+        """
+        If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask',
+        etc.).
+
+        If the key is an integer, get the `Encoding` for batch item with index `key`.
+        """
+        if isinstance(item, str):
+            return self.data[item]
+        elif self._encodings is not None:
+            return self._encodings[item]
+        else:
+            raise KeyError(
+                "Indexing with integers is not available when using tokenizer.__call__()"
+                " with return_dict=True. Please set return_dict to False to use integer indexing."
+            )
+
+    def __getattr__(self, item: str):
+        try:
+            return self.data[item]
+        except KeyError:
+            raise AttributeError
+
+    def __getstate__(self):
+        return {"data": self.data, "encodings": self._encodings}
+
+    def __setstate__(self, state):
+        if "data" in state:
+            self.data = state["data"]
+
+        if "encodings" in state:
+            self._encodings = state["encodings"]
+
+    def keys(self):
+        return self.data.keys()
+
+    def values(self):
+        return self.data.values()
+
+    def items(self):
+        return self.data.items()
+
+    # After this point:
+    # Extended properties and methods only available for fast tokenizers
+    # not yet supported
+
+    @property
+    def encodings(self) -> Optional[List[FastEncoding]]:
+        """
+        `Optional[List[FastEncoding]]`: The list all encodings from the tokenization process. Returns `None` if
+        the input was tokenized through Python (i.e., not a fast) tokenizer.
+        """
+        return self._encodings
+
+    def tokens(self, batch_index: int = 0) -> List[str]:
+        """
+        Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to
+        integer indices) at a given batch index (only works for the output of a fast tokenizer).
+
+        Args:
+            batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
+
+        Returns:
+            `List[str]`: The list of tokens at that index.
+        """
+        if not self._encodings:
+            raise ValueError(
+                "tokens() is not available when using Python-based tokenizers"
+            )
+        return self._encodings[batch_index].tokens
+
+    def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]:
+        """
+        Return a list mapping the tokens to the id of their original sentences:
+
+            - `None` for special tokens added around or between sequences,
+            - `0` for tokens corresponding to words in the first sequence,
+            - `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly
+              encoded.
+
+        Args:
+            batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
+
+        Returns:
+            `List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added
+            by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding
+            sequence.
+        """
+        if not self._encodings:
+            raise ValueError(
+                "sequence_ids() is not available when using Python-based tokenizers"
+            )
+        return self._encodings[batch_index].sequence_ids
+
+    def words(self, batch_index: int = 0) -> List[Optional[int]]:
+        """
+        Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
+
+        Args:
+            batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
+
+        Returns:
+            `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
+            tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
+            (several tokens will be mapped to the same word index if they are parts of that word).
+        """
+        if not self._encodings:
+            raise ValueError(
+                "words() is not available when using Python-based tokenizers"
+            )
+        warnings.warn(
+            "`BatchEncoding.words()` property is deprecated and should be replaced with the identical, "
+            "but more self-explanatory `BatchEncoding.word_ids()` property.",
+            FutureWarning,
+        )
+        return self.word_ids(batch_index)
+
+    def word_ids(self, batch_index: int = 0) -> List[Optional[int]]:
+        """
+        Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
+
+        Args:
+            batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
+
+        Returns:
+            `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
+            tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
+            (several tokens will be mapped to the same word index if they are parts of that word).
+        """
+        if not self._encodings:
+            raise ValueError(
+                "word_ids() is not available when using Python-based tokenizers"
+            )
+        return self._encodings[batch_index].word_ids
+
+    def token_to_sequence(
+        self, batch_or_token_index: int, token_index: Optional[int] = None
+    ) -> int:
+        """
+        Get the index of the sequence represented by the given token. In the general use case, this method returns `0`
+        for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair
+
+        Can be called as:
+
+        - `self.token_to_sequence(token_index)` if batch size is 1
+        - `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1
+
+        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
+        words are defined by the user). In this case it allows to easily associate encoded tokens with provided
+        tokenized words.
+
+        Args:
+            batch_or_token_index (`int`):
+                Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
+                the token in the sequence.
+            token_index (`int`, *optional*):
+                If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
+                sequence.
+
+        Returns:
+            `int`: Index of the word in the input sequence.
+        """
+
+        if not self._encodings:
+            raise ValueError(
+                "token_to_sequence() is not available when using Python based tokenizers"
+            )
+        if token_index is not None:
+            batch_index = batch_or_token_index
+        else:
+            batch_index = 0
+            token_index = batch_or_token_index
+        if batch_index < 0:
+            batch_index = self._batch_size + batch_index
+        if token_index < 0:
+            token_index = self._seq_len + token_index
+        return self._encodings[batch_index].token_to_sequence(token_index)
+
+    def token_to_word(
+        self, batch_or_token_index: int, token_index: Optional[int] = None
+    ) -> int:
+        """
+        Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.
+
+        Can be called as:
+
+        - `self.token_to_word(token_index)` if batch size is 1
+        - `self.token_to_word(batch_index, token_index)` if batch size is greater than 1
+
+        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
+        words are defined by the user). In this case it allows to easily associate encoded tokens with provided
+        tokenized words.
+
+        Args:
+            batch_or_token_index (`int`):
+                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+                the token in the sequence.
+            token_index (`int`, *optional*):
+                If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
+                sequence.
+
+        Returns:
+            `int`: Index of the word in the input sequence.
+        """
+
+        if not self._encodings:
+            raise ValueError(
+                "token_to_word() is not available when using Python based tokenizers"
+            )
+        if token_index is not None:
+            batch_index = batch_or_token_index
+        else:
+            batch_index = 0
+            token_index = batch_or_token_index
+        if batch_index < 0:
+            batch_index = self._batch_size + batch_index
+        if token_index < 0:
+            token_index = self._seq_len + token_index
+        return self._encodings[batch_index].token_to_word(token_index)
+
+    def word_to_tokens(
+        self,
+        batch_or_word_index: int,
+        word_index: Optional[int] = None,
+        sequence_index: int = 0,
+    ) -> Optional[TokenSpan]:
+        """
+        Get the encoded token span corresponding to a word in a sequence of the batch.
+
+        Token spans are returned as a [`TokenSpan`] with:
+
+        - **start** -- Index of the first token.
+        - **end** -- Index of the token following the last token.
+
+        Can be called as:
+
+        - `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1
+        - `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to
+          1
+
+        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
+        are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
+        words.
+
+        Args:
+            batch_or_word_index (`int`):
+                Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
+                the word in the sequence.
+            word_index (`int`, *optional*):
+                If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
+                sequence.
+            sequence_index (`int`, *optional*, defaults to 0):
+                If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
+                or 1) the provided word index belongs to.
+
+        Returns:
+            Optional [`TokenSpan`] Span of tokens in the encoded sequence. Returns `None` if
+            no tokens correspond to the word.
+        """
+
+        if not self._encodings:
+            raise ValueError(
+                "word_to_tokens() is not available when using Python based tokenizers"
+            )
+        if word_index is not None:
+            batch_index = batch_or_word_index
+        else:
+            batch_index = 0
+            word_index = batch_or_word_index
+        if batch_index < 0:
+            batch_index = self._batch_size + batch_index
+        if word_index < 0:
+            word_index = self._seq_len + word_index
+        span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index)
+        return TokenSpan(*span) if span is not None else None
+
+    def token_to_chars(
+        self, batch_or_token_index: int, token_index: Optional[int] = None
+    ) -> CharSpan:
+        """
+        Get the character span corresponding to an encoded token in a sequence of the batch.
+
+        Character spans are returned as a [`CharSpan`] with:
+
+        - **start** -- Index of the first character in the original string associated to the token.
+        - **end** -- Index of the character following the last character in the original string associated to the
+          token.
+
+        Can be called as:
+
+        - `self.token_to_chars(token_index)` if batch size is 1
+        - `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1
+
+        Args:
+            batch_or_token_index (`int`):
+                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+                the token in the sequence.
+            token_index (`int`, *optional*):
+                If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in
+                the sequence.
+
+        Returns:
+            [`CharSpan`]: Span of characters in the original string.
+        """
+
+        if not self._encodings:
+            raise ValueError(
+                "token_to_chars() is not available when using Python based tokenizers"
+            )
+        if token_index is not None:
+            batch_index = batch_or_token_index
+        else:
+            batch_index = 0
+            token_index = batch_or_token_index
+        return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index)))
+
+    def char_to_token(
+        self,
+        batch_or_char_index: int,
+        char_index: Optional[int] = None,
+        sequence_index: int = 0,
+    ) -> int:
+        """
+        Get the index of the token in the encoded output comprising a character in the original string for a sequence
+        of the batch.
+
+        Can be called as:
+
+        - `self.char_to_token(char_index)` if batch size is 1
+        - `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1
+
+        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
+        are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
+        words.
+
+        Args:
+            batch_or_char_index (`int`):
+                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+                the word in the sequence
+            char_index (`int`, *optional*):
+                If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
+                sequence.
+            sequence_index (`int`, *optional*, defaults to 0):
+                If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
+                or 1) the provided character index belongs to.
+
+
+        Returns:
+            `int`: Index of the token.
+        """
+
+        if not self._encodings:
+            raise ValueError(
+                "char_to_token() is not available when using Python based tokenizers"
+            )
+        if char_index is not None:
+            batch_index = batch_or_char_index
+        else:
+            batch_index = 0
+            char_index = batch_or_char_index
+        return self._encodings[batch_index].char_to_token(char_index, sequence_index)
+
+    def word_to_chars(
+        self,
+        batch_or_word_index: int,
+        word_index: Optional[int] = None,
+        sequence_index: int = 0,
+    ) -> CharSpan:
+        """
+        Get the character span in the original string corresponding to given word in a sequence of the batch.
+
+        Character spans are returned as a CharSpan NamedTuple with:
+
+        - start: index of the first character in the original string
+        - end: index of the character following the last character in the original string
+
+        Can be called as:
+
+        - `self.word_to_chars(word_index)` if batch size is 1
+        - `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1
+
+        Args:
+            batch_or_word_index (`int`):
+                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+                the word in the sequence
+            word_index (`int`, *optional*):
+                If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
+                sequence.
+            sequence_index (`int`, *optional*, defaults to 0):
+                If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
+                or 1) the provided word index belongs to.
+
+        Returns:
+            `CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan
+            are NamedTuple with:
+
+                - start: index of the first character associated to the token in the original string
+                - end: index of the character following the last character associated to the token in the original
+                  string
+        """
+
+        if not self._encodings:
+            raise ValueError(
+                "word_to_chars() is not available when using Python based tokenizers"
+            )
+        if word_index is not None:
+            batch_index = batch_or_word_index
+        else:
+            batch_index = 0
+            word_index = batch_or_word_index
+        return CharSpan(
+            *(self._encodings[batch_index].word_to_chars(word_index, sequence_index))
+        )
+
+    def char_to_word(
+        self,
+        batch_or_char_index: int,
+        char_index: Optional[int] = None,
+        sequence_index: int = 0,
+    ) -> int:
+        """
+        Get the word in the original string corresponding to a character in the original string of a sequence of the
+        batch.
+
+        Can be called as:
+
+        - `self.char_to_word(char_index)` if batch size is 1
+        - `self.char_to_word(batch_index, char_index)` if batch size is greater than 1
+
+        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
+        are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
+        words.
+
+        Args:
+            batch_or_char_index (`int`):
+                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
+                the character in the original string.
+            char_index (`int`, *optional*):
+                If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the
+                original string.
+            sequence_index (`int`, *optional*, defaults to 0):
+                If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
+                or 1) the provided character index belongs to.
+
+
+        Returns:
+            `int` or `List[int]`: Index or indices of the associated encoded token(s).
+        """
+
+        if not self._encodings:
+            raise ValueError(
+                "char_to_word() is not available when using Python based tokenizers"
+            )
+        if char_index is not None:
+            batch_index = batch_or_char_index
+        else:
+            batch_index = 0
+            char_index = batch_or_char_index
+        return self._encodings[batch_index].char_to_word(char_index, sequence_index)
+
+    def convert_to_tensors(
+        self,
+        tensor_type: Optional[Union[str, TensorType]] = None,
+        prepend_batch_axis: bool = False,
+    ):
+        """
+        Convert the inner content to tensors.
+
+        Args:
+            tensor_type (`str` or [`TensorType`], *optional*):
+                The type of tensors to use. If `str`, should be one of the values of the enum [`TensorType`]. If
+                `None`, no modification is done.
+            prepend_batch_axis (`int`, *optional*, defaults to `False`):
+                Whether or not to add the batch dimension during the conversion.
+        """
+        if tensor_type is None:
+            return self
+
+        # Convert to TensorType
+        if not isinstance(tensor_type, TensorType):
+            tensor_type = TensorType(tensor_type)
+        # Get a function reference for the correct framework
+        if tensor_type == TensorType.PADDLE:
+            as_tensor = paddle.to_tensor
+            is_tensor = paddle.is_tensor
+        else:
+            as_tensor = np.asarray
+            is_tensor = _is_numpy
+
+        # Do the tensor conversion in batch
+        for key, value in self.items():
+            try:
+                if prepend_batch_axis:
+                    value = [value]
+
+                if not is_tensor(value):
+                    tensor = as_tensor(value)
+
+                    self[key] = tensor
+            except:  # noqa E722
+                if key == "overflowing_tokens":
+                    raise ValueError(
+                        "Unable to create tensor returning overflowing tokens of different lengths. "
+                        "Please see if a fast version of this tokenizer is available to have this feature available."
+                    )
+                raise ValueError(
+                    "Unable to create tensor, you should probably activate truncation and/or padding "
+                    "with 'padding=True' 'truncation=True' to have batched tensors with the same length."
+                )
+
+        return self
+
+
+class SpecialTokensMixin:
+    """
+    A mixin derived by [`PretrainedTokenizer`] to handle specific behaviors related to
+    special tokens. In particular, this class hold the attributes which can be used to directly access these special
+    tokens in a model-independent manner and allow to set and update the special tokens.
+
+    Args:
+        bos_token (`str` or `AddedToken`, *optional*):
+            A special token representing the beginning of a sentence.
+        eos_token (`str` or `AddedToken`, *optional*):
+            A special token representing the end of a sentence.
+        unk_token (`str` or `AddedToken`, *optional*):
+            A special token representing an out-of-vocabulary token.
+        sep_token (`str` or `AddedToken`, *optional*):
+            A special token separating two different sentences in the same input (used by BERT for instance).
+        pad_token (`str` or `AddedToken`, *optional*):
+            A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
+            attention mechanisms or loss computation.
+        cls_token (`str` or `AddedToken`, *optional*):
+            A special token representing the class of the input (used by BERT for instance).
+        mask_token (`str` or `AddedToken`, *optional*):
+            A special token representing a masked token (used by masked-language modeling pretraining objectives, like
+            BERT).
+        additional_special_tokens (tuple or list of `str` or `AddedToken`, *optional*):
+            A tuple or a list of additional special tokens.
+    """
+
+    SPECIAL_TOKENS_ATTRIBUTES = [
+        "bos_token",
+        "eos_token",
+        "unk_token",
+        "sep_token",
+        "pad_token",
+        "cls_token",
+        "mask_token",
+        "additional_special_tokens",
+    ]
+
+    def __init__(self, verbose=True, **kwargs):
+        # note(guosheng): Since `__init__` might be called multiple times which
+        # is hooked before `PretrainedTokenizer` init, we do not set to None as
+        # HF to avoid unintentional overriding.
+        self._bos_token = getattr(self, "_bos_token", None)
+        self._eos_token = getattr(self, "_eos_token", None)
+        self._unk_token = getattr(self, "_unk_token", None)
+        self._sep_token = getattr(self, "_sep_token", None)
+        self._pad_token = getattr(self, "_pad_token", None)
+        self._cls_token = getattr(self, "_cls_token", None)
+        self._mask_token = getattr(self, "_mask_token", None)
+        self._pad_token_type_id = getattr(self, "_pad_token_type_id", 0)
+        self._additional_special_tokens = getattr(
+            self, "_additional_special_tokens", []
+        )
+        self.verbose = verbose
+
+        # We directly set the hidden value to allow initialization with special tokens
+        # which are not yet in the vocabulary. Necessary for serialization/de-serialization
+        # TODO clean this up at some point (probably by switching to fast tokenizers)
+        for key, value in kwargs.items():
+            if value is None:
+                continue
+            if key in self.SPECIAL_TOKENS_ATTRIBUTES:
+                if key == "additional_special_tokens":
+                    assert isinstance(
+                        value, (list, tuple)
+                    ), f"Value {value} is not a list or tuple"
+                    assert all(
+                        isinstance(t, (str, AddedToken)) for t in value
+                    ), "One of the tokens is not a string or an AddedToken"
+                    setattr(self, key, value)
+                elif isinstance(value, (str, AddedToken)):
+                    setattr(self, key, value)
+                else:
+                    raise TypeError(
+                        f"special token {key} has to be either str or AddedToken but got: {type(value)}"
+                    )
+
+    def sanitize_special_tokens(self) -> int:
+        """
+        Make sure that all the special tokens attributes of the tokenizer (`tokenizer.mask_token`,
+        `tokenizer.cls_token`, etc.) are in the vocabulary.
+
+        Add the missing ones to the vocabulary if needed.
+
+        Return:
+            `int`: The number of tokens added in the vocabulary during the operation.
+        """
+        return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
+
+    def add_special_tokens(
+        self, special_tokens_dict: Dict[str, Union[str, AddedToken]]
+    ) -> int:
+        """
+        Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
+        special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
+        current vocabulary).
+
+        Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding
+        matrix of the model so that its embedding matrix matches the tokenizer.
+
+        In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
+
+        Using `add_special_tokens` will ensure your special tokens can be used in several ways:
+
+        - Special tokens are carefully handled by the tokenizer (they are never split).
+        - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This
+          makes it easy to develop model-agnostic training and fine-tuning scripts.
+
+        When possible, special tokens are already registered for provided pretrained models (for instance
+        [`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be
+        `'</s>'`).
+
+        Args:
+            special_tokens_dict (dictionary *str* to *str* or `AddedToken`):
+                Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`,
+                `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`].
+
+                Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
+                assign the index of the `unk_token` to them).
+
+        Returns:
+            `int`: Number of tokens added to the vocabulary.
+
+        Examples:
+
+        ```python
+        # Let's see how to add a new classification token to GPT-2
+        tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
+        model = GPT2Model.from_pretrained("gpt2")
+
+        special_tokens_dict = {"cls_token": "<CLS>"}
+
+        num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
+        print("We have added", num_added_toks, "tokens")
+        # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
+        model.resize_token_embeddings(len(tokenizer))
+
+        assert tokenizer.cls_token == "<CLS>"
+        ```"""
+        if not special_tokens_dict:
+            return 0
+
+        added_tokens = 0
+        for key, value in special_tokens_dict.items():
+            assert (
+                key in self.SPECIAL_TOKENS_ATTRIBUTES
+            ), f"Key {key} is not a special token"
+
+            if self.verbose:
+                logging.info(f"Assigning {value} to the {key} key of the tokenizer")
+            setattr(self, key, value)
+
+            if key == "additional_special_tokens":
+                assert isinstance(value, (list, tuple)) and all(
+                    isinstance(t, (str, AddedToken)) for t in value
+                ), f"Tokens {value} for key {key} should all be str or AddedToken instances"
+                added_tokens += self.add_tokens(value, special_tokens=True)
+            else:
+                assert isinstance(
+                    value, (str, AddedToken)
+                ), f"Token {value} for key {key} should be a str or an AddedToken instance"
+                added_tokens += self.add_tokens([value], special_tokens=True)
+
+        return added_tokens
+
+    def add_tokens(
+        self,
+        new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]],
+        special_tokens: bool = False,
+    ) -> int:
+        """
+        Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
+        it with indices starting from length of the current vocabulary.
+
+        Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding
+        matrix of the model so that its embedding matrix matches the tokenizer.
+
+        In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
+
+        Args:
+            new_tokens (`str`, `AddedToken` or a list of *str* or `AddedToken`):
+                Tokens are only added if they are not already in the vocabulary. `AddedToken` wraps a string
+                token to let you personalize its behavior: whether this token should only match against a single word,
+                whether this token should strip all potential whitespaces on the left side, whether this token should
+                strip all potential whitespaces on the right side, etc.
+            special_tokens (`bool`, *optional*, defaults to `False`):
+                Can be used to specify if the token is a special token. This mostly change the normalization behavior
+                (special tokens like CLS or [MASK] are usually not lower-cased for instance).
+
+        Returns:
+            `int`: Number of tokens added to the vocabulary.
+
+        Examples:
+
+        ```python
+        # Let's see how to increase the vocabulary of Bert model and tokenizer
+        tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
+        model = BertModel.from_pretrained("bert-base-uncased")
+
+        num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
+        print("We have added", num_added_toks, "tokens")
+        # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
+        model.resize_token_embeddings(len(tokenizer))
+        ```"""
+        if not new_tokens:
+            return 0
+
+        if not isinstance(new_tokens, (list, tuple)):
+            new_tokens = [new_tokens]
+
+        return self._add_tokens(new_tokens, special_tokens=special_tokens)
+
+    def _add_tokens(
+        self,
+        new_tokens: Union[List[str], List[AddedToken]],
+        special_tokens: bool = False,
+    ) -> int:
+        raise NotImplementedError
+
+    @property
+    def bos_token(self) -> str:
+        """
+        `str`: Beginning of sentence token. Log an error if used while not having been set.
+        """
+        if self._bos_token is None and self.verbose:
+            logging.error("Using bos_token, but it is not set yet.")
+            return None
+        return str(self._bos_token)
+
+    @property
+    def eos_token(self) -> str:
+        """
+        `str`: End of sentence token. Log an error if used while not having been set.
+        """
+        if self._eos_token is None and self.verbose:
+            logging.error("Using eos_token, but it is not set yet.")
+            return None
+        return str(self._eos_token)
+
+    @property
+    def unk_token(self) -> str:
+        """
+        `str`: Unknown token. Log an error if used while not having been set.
+        """
+        if self._unk_token is None and self.verbose:
+            logging.error("Using unk_token, but it is not set yet.")
+            return None
+        return str(self._unk_token)
+
+    @property
+    def sep_token(self) -> str:
+        """
+        `str`: Separation token, to separate context and query in an input sequence. Log an error if used while not
+        having been set.
+        """
+        if self._sep_token is None and self.verbose:
+            logging.error("Using sep_token, but it is not set yet.")
+            return None
+        return str(self._sep_token)
+
+    @property
+    def pad_token(self) -> str:
+        """
+        `str`: Padding token. Log an error if used while not having been set.
+        """
+        if self._pad_token is None and self.verbose:
+            logging.error("Using pad_token, but it is not set yet.")
+            return None
+        return str(self._pad_token)
+
+    @property
+    def cls_token(self) -> str:
+        """
+        `str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full
+        depth of the model. Log an error if used while not having been set.
+        """
+        if self._cls_token is None and self.verbose:
+            logging.error("Using cls_token, but it is not set yet.")
+            return None
+        return str(self._cls_token)
+
+    @property
+    def mask_token(self) -> str:
+        """
+        `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
+        having been set.
+        """
+        if self._mask_token is None and self.verbose:
+            logging.error("Using mask_token, but it is not set yet.")
+            return None
+        return str(self._mask_token)
+
+    @property
+    def additional_special_tokens(self) -> List[str]:
+        """
+        `List[str]`: All the additional special tokens you may want to use. Log an error if used while not having been
+        set.
+        """
+        if self._additional_special_tokens is None and self.verbose:
+            logging.error("Using additional_special_tokens, but it is not set yet.")
+            return None
+        return [str(tok) for tok in self._additional_special_tokens]
+
+    @bos_token.setter
+    def bos_token(self, value):
+        self._bos_token = value
+
+    @eos_token.setter
+    def eos_token(self, value):
+        self._eos_token = value
+
+    @unk_token.setter
+    def unk_token(self, value):
+        self._unk_token = value
+
+    @sep_token.setter
+    def sep_token(self, value):
+        self._sep_token = value
+
+    @pad_token.setter
+    def pad_token(self, value):
+        self._pad_token = value
+
+    @cls_token.setter
+    def cls_token(self, value):
+        self._cls_token = value
+
+    @mask_token.setter
+    def mask_token(self, value):
+        self._mask_token = value
+
+    @additional_special_tokens.setter
+    def additional_special_tokens(self, value):
+        self._additional_special_tokens = value
+
+    @property
+    def bos_token_id(self) -> Optional[int]:
+        """
+        `Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns `None` if the token has not
+        been set.
+        """
+        if self._bos_token is None:
+            return None
+        return self.convert_tokens_to_ids(self.bos_token)
+
+    @property
+    def eos_token_id(self) -> Optional[int]:
+        """
+        `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been
+        set.
+        """
+        if self._eos_token is None:
+            return None
+        return self.convert_tokens_to_ids(self.eos_token)
+
+    @property
+    def unk_token_id(self) -> Optional[int]:
+        """
+        `Optional[int]`: Id of the unknown token in the vocabulary. Returns `None` if the token has not been set.
+        """
+        if self._unk_token is None:
+            return None
+        return self.convert_tokens_to_ids(self.unk_token)
+
+    @property
+    def sep_token_id(self) -> Optional[int]:
+        """
+        `Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input
+        sequence. Returns `None` if the token has not been set.
+        """
+        if self._sep_token is None:
+            return None
+        return self.convert_tokens_to_ids(self.sep_token)
+
+    @property
+    def pad_token_id(self) -> Optional[int]:
+        """
+        `Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set.
+        """
+        if self._pad_token is None:
+            return None
+        return self.convert_tokens_to_ids(self.pad_token)
+
+    @property
+    def pad_token_type_id(self) -> int:
+        """
+        `int`: Id of the padding token type in the vocabulary.
+        """
+        return self._pad_token_type_id
+
+    @property
+    def cls_token_id(self) -> Optional[int]:
+        """
+        `Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input sequence
+        leveraging self-attention along the full depth of the model.
+
+        Returns `None` if the token has not been set.
+        """
+        if self._cls_token is None:
+            return None
+        return self.convert_tokens_to_ids(self.cls_token)
+
+    @property
+    def mask_token_id(self) -> Optional[int]:
+        """
+        `Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language
+        modeling. Returns `None` if the token has not been set.
+        """
+        if self._mask_token is None:
+            return None
+        return self.convert_tokens_to_ids(self.mask_token)
+
+    @property
+    def additional_special_tokens_ids(self) -> List[int]:
+        """
+        `List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not having
+        been set.
+        """
+        return self.convert_tokens_to_ids(self.additional_special_tokens)
+
+    @bos_token_id.setter
+    def bos_token_id(self, value):
+        self._bos_token = (
+            self.convert_ids_to_tokens(value) if value is not None else None
+        )
+
+    @eos_token_id.setter
+    def eos_token_id(self, value):
+        self._eos_token = (
+            self.convert_ids_to_tokens(value) if value is not None else None
+        )
+
+    @unk_token_id.setter
+    def unk_token_id(self, value):
+        self._unk_token = (
+            self.convert_ids_to_tokens(value) if value is not None else None
+        )
+
+    @sep_token_id.setter
+    def sep_token_id(self, value):
+        self._sep_token = (
+            self.convert_ids_to_tokens(value) if value is not None else None
+        )
+
+    @pad_token_id.setter
+    def pad_token_id(self, value):
+        self._pad_token = (
+            self.convert_ids_to_tokens(value) if value is not None else None
+        )
+
+    @cls_token_id.setter
+    def cls_token_id(self, value):
+        self._cls_token = (
+            self.convert_ids_to_tokens(value) if value is not None else None
+        )
+
+    @mask_token_id.setter
+    def mask_token_id(self, value):
+        self._mask_token = (
+            self.convert_ids_to_tokens(value) if value is not None else None
+        )
+
+    @additional_special_tokens_ids.setter
+    def additional_special_tokens_ids(self, values):
+        self._additional_special_tokens = [
+            self.convert_ids_to_tokens(value) for value in values
+        ]
+
+    @property
+    def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
+        """
+        `Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`,
+        `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
+
+        Convert potential tokens of `AddedToken` type to string.
+        """
+        set_attr = {}
+        for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
+            attr_value = getattr(self, "_" + attr)
+            if attr_value:
+                set_attr[attr] = (
+                    type(attr_value)(
+                        str(attr_value_sub) for attr_value_sub in attr_value
+                    )
+                    if isinstance(attr_value, (list, tuple))
+                    else str(attr_value)
+                )
+        return set_attr
+
+    @property
+    def special_tokens_map_extended(
+        self,
+    ) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]:
+        """
+        `Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]`: A dictionary mapping
+        special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
+
+        Don't convert tokens of `AddedToken` type to string so they can be used to control more finely how
+        special tokens are tokenized.
+        """
+        set_attr = {}
+        for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
+            attr_value = getattr(self, "_" + attr, None)
+            if attr_value:
+                set_attr[attr] = attr_value
+        return set_attr
+
+    @property
+    def all_special_tokens(self) -> List[str]:
+        """
+        `List[str]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
+
+        Convert tokens of `AddedToken` type to string.
+        """
+        all_toks = [str(s) for s in self.all_special_tokens_extended]
+        return all_toks
+
+    @property
+    def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]:
+        """
+        `List[Union[str, AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.) mapped to class
+        attributes.
+
+        Don't convert tokens of `AddedToken` type to string so they can be used to control more finely how
+        special tokens are tokenized.
+        """
+        all_toks = []
+        set_attr = self.special_tokens_map_extended
+        for attr_value in set_attr.values():
+            all_toks = all_toks + (
+                list(attr_value)
+                if isinstance(attr_value, (list, tuple))
+                else [attr_value]
+            )
+        all_toks = list(OrderedDict.fromkeys(all_toks))
+        return all_toks
+
+    @property
+    def all_special_ids(self) -> List[int]:
+        """
+        `List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
+        """
+        all_toks = self.all_special_tokens
+        all_ids = self.convert_tokens_to_ids(all_toks)
+        return all_ids
+
+
+class PretrainedTokenizerBase(SpecialTokensMixin):
+    """
+    Base class for [`PretrainedTokenizer`].
+
+    Class attributes (overridden by derived classes)
+
+         - **resource_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
+            vocabulary file required by the model, and as associated values, the filename for saving the associated file
+            (string).
+        - **pretrained_resource_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
+            high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
+            low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
+            associated pretrained vocabulary file.
+        - **max_model_input_sizes** (`Dict[str, Optional[int]]`) -- A dictionary with, as keys, the `short-cut-names`
+            of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model,
+            or `None` if the model has no maximum input size.
+        - **pretrained_init_configuration** (`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
+            `short-cut-names` of the pretrained models, and as associated values, a dictionary of specific arguments to
+            pass to the `__init__` method of the tokenizer class for this pretrained model when loading the tokenizer
+            with the [`~tokenizer_utils_base.PretrainedTokenizerBase.from_pretrained`] method.
+        - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
+        - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
+            Should be `'right'` or `'left'`.
+        - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
+            applied. Should be `'right'` or `'left'`.
+
+    Args:
+        model_max_length (`int`, *optional*):
+            The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
+            loaded with [`~tokenizer_utils_base.PretrainedTokenizerBase.from_pretrained`], this will be set to the
+            value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will
+            default to VERY_LARGE_INTEGER (`int(1e30)`).
+        padding_side (`str`, *optional*):
+            The side on which the model should have padding applied. Should be selected between ['right', 'left'].
+            Default value is picked from the class attribute of the same name.
+        truncation_side (`str`, *optional*):
+            The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
+            Default value is picked from the class attribute of the same name.
+        model_input_names (`List[string]`, *optional*):
+            The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
+            `"attention_mask"`). Default value is picked from the class attribute of the same name.
+        bos_token (`str` or `AddedToken`, *optional*):
+            A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and
+            `self.bos_token_id`.
+        eos_token (`str` or `AddedToken`, *optional*):
+            A special token representing the end of a sentence. Will be associated to `self.eos_token` and
+            `self.eos_token_id`.
+        unk_token (`str` or `AddedToken`, *optional*):
+            A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and
+            `self.unk_token_id`.
+        sep_token (`str` or `AddedToken`, *optional*):
+            A special token separating two different sentences in the same input (used by BERT for instance). Will be
+            associated to `self.sep_token` and `self.sep_token_id`.
+        pad_token (`str` or `AddedToken`, *optional*):
+            A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
+            attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`.
+        cls_token (`str` or `AddedToken`, *optional*):
+            A special token representing the class of the input (used by BERT for instance). Will be associated to
+            `self.cls_token` and `self.cls_token_id`.
+        mask_token (`str` or `AddedToken`, *optional*):
+            A special token representing a masked token (used by masked-language modeling pretraining objectives, like
+            BERT). Will be associated to `self.mask_token` and `self.mask_token_id`.
+        additional_special_tokens (tuple or list of `str` or `AddedToken`, *optional*):
+            A tuple or a list of additional special tokens. Add them here to ensure they won't be split by the
+            tokenization process. Will be associated to `self.additional_special_tokens` and
+            `self.additional_special_tokens_ids`.
+    """
+
+    resource_files_names: Dict[str, str] = {}
+    pretrained_resource_files_map: Dict[str, Dict[str, str]] = {}
+    pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
+    max_model_input_sizes: Dict[str, Optional[int]] = {}
+    _auto_class: Optional[str] = None
+    tokenizer_config_file = TOKENIZER_CONFIG_NAME
+
+    # first name has to correspond to main model input name
+    # to make sure `tokenizer.pad(...)` works correctly
+    model_input_names: List[str] = ["input_ids", "token_type_ids"]
+    padding_side: str = "right"
+    truncation_side: str = "right"
+    slow_tokenizer_class = None
+
+    def __init__(self, **kwargs):
+        # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
+        self.init_inputs = ()
+
+        self.init_kwargs = getattr(self, "init_kwargs", None) or copy.deepcopy(kwargs)
+        self.name_or_path = kwargs.pop("name_or_path", "")
+        self._processor_class = kwargs.pop("processor_class", None)
+
+        # For backward compatibility we fallback to set model_max_length from max_len if provided
+        model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None))
+        self.model_max_length = (
+            model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
+        )
+
+        # Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it
+        # is changed.
+        self.padding_side = kwargs.pop("padding_side", self.padding_side)
+        if self.padding_side not in ["right", "left"]:
+            raise ValueError(
+                f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
+            )
+
+        self.truncation_side = kwargs.pop("truncation_side", self.truncation_side)
+        if self.truncation_side not in ["right", "left"]:
+            raise ValueError(
+                f"Padding side should be selected between 'right' and 'left', current value: {self.truncation_side}"
+            )
+
+        self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
+
+        self.deprecation_warnings = (
+            {}
+        )  # Use to store when we have already noticed a deprecation warning (avoid overlogging).
+
+        super().__init__(**kwargs)
+
+    @property
+    def max_len_single_sentence(self) -> int:
+        """
+        `int`: The maximum length of a sentence that can be fed to the model.
+        """
+        return self.model_max_length - self.num_special_tokens_to_add(pair=False)
+
+    @property
+    def max_len_sentences_pair(self) -> int:
+        """
+        `int`: The maximum combined length of a pair of sentences that can be fed to the model.
+        """
+        return self.model_max_length - self.num_special_tokens_to_add(pair=True)
+
+    @max_len_single_sentence.setter
+    def max_len_single_sentence(self, value) -> int:
+        # For backward compatibility, allow to try to setup 'max_len_single_sentence'.
+        if (
+            value == self.model_max_length - self.num_special_tokens_to_add(pair=False)
+            and self.verbose
+        ):
+            if not self.deprecation_warnings.get("max_len_single_sentence", False):
+                warnings.warn(
+                    "Setting 'max_len_single_sentence' is now deprecated. "
+                    "This value is automatically set up."
+                )
+            self.deprecation_warnings["max_len_single_sentence"] = True
+        else:
+            raise ValueError(
+                "Setting 'max_len_single_sentence' is now deprecated. "
+                "This value is automatically set up."
+            )
+
+    def _switch_to_input_mode(self):
+        """
+        Private method to put the tokenizer in input mode (when it has different modes for input/outputs)
+        """
+        pass
+
+    @max_len_sentences_pair.setter
+    def max_len_sentences_pair(self, value) -> int:
+        # For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
+        if (
+            value == self.model_max_length - self.num_special_tokens_to_add(pair=True)
+            and self.verbose
+        ):
+            if not self.deprecation_warnings.get("max_len_sentences_pair", False):
+                warnings.warn(
+                    "Setting 'max_len_sentences_pair' is now deprecated. "
+                    "This value is automatically set up."
+                )
+            self.deprecation_warnings["max_len_sentences_pair"] = True
+        else:
+            raise ValueError(
+                "Setting 'max_len_sentences_pair' is now deprecated. "
+                "This value is automatically set up."
+            )
+
+    def _set_processor_class(self, processor_class: str):
+        """Sets processor class as an attribute."""
+        self._processor_class = processor_class
+
+    def __repr__(self) -> str:
+        return (
+            f"{'PretrainedTokenizer'}(name_or_path='{self.name_or_path}', "
+            f"vocab_size={self.vocab_size}, model_max_len={self.model_max_length}, "
+            f"padding_side='{self.padding_side}', truncation_side='{self.truncation_side}', special_tokens={self.special_tokens_map_extended})"
+        )
+
+    def get_vocab(self) -> Dict[str, int]:
+        """
+        Returns the vocabulary as a dictionary of token to index.
+
+        `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the
+        vocab.
+
+        Returns:
+            `Dict[str, int]`: The vocabulary.
+        """
+        raise NotImplementedError()
+
+    @classmethod
+    def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
+        """
+        Creates an instance of `PretrainedTokenizer`. Related resources are loaded
+        by specifying name of a built-in pretrained model, or a community-contributed
+        pretrained model, or a local file directory path.
+
+        Args:
+            pretrained_model_name_or_path (str): Name of pretrained model or dir path
+                to load from. The string can be:
+
+                - Name of built-in pretrained model
+                - Name of a community-contributed pretrained model.
+                - Local directory path which contains tokenizer related resources
+                  and tokenizer config file ("tokenizer_config.json").
+            from_hf_hub (bool, optional): whether to load from Huggingface Hub
+            subfolder (str, optional) An optional value corresponding to a folder inside the repo.
+                Only works when loading from Huggingface Hub.
+            *args (tuple): position arguments for model `__init__`. If provided,
+                use these as position argument values for tokenizer initialization.
+            **kwargs (dict): keyword arguments for model `__init__`. If provided,
+                use these to update pre-defined keyword argument values for tokenizer
+                initialization.
+
+        Returns:
+            PretrainedTokenizer: An instance of `PretrainedTokenizer`.
+
+        Example:
+            .. code-block::
+
+                from paddlenlp.transformers import BertTokenizer
+
+                # Name of built-in pretrained model
+                tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
+
+                # Name of community-contributed pretrained model
+                tokenizer = BertTokenizer.from_pretrained('yingyibiao/bert-base-uncased-sst-2-finetuned')
+
+                # Load from local directory path
+                tokenizer = BertTokenizer.from_pretrained('./my_bert/')
+        """
+
+        pretrained_model_name_or_path = str(pretrained_model_name_or_path)
+        cache_dir = kwargs.pop("cache_dir", None)
+        from_hf_hub = kwargs.pop("from_hf_hub", False)
+        from_aistudio = kwargs.pop("from_aistudio", False)
+        subfolder = kwargs.pop("subfolder", "")
+        return_tokenizer_file_dir = kwargs.pop("return_tokenizer_file_dir", False)
+
+        if subfolder is None:
+            subfolder = ""
+
+        vocab_files = {}
+        init_configuration = {}
+
+        additional_files_names = {
+            "added_tokens_file": ADDED_TOKENS_FILE,
+            "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
+            "tokenizer_config_file": TOKENIZER_CONFIG_FILE,
+            "chat_template_file": CHAT_TEMPLATE_CONFIG_NAME,
+        }
+
+        vocab_files_target = {**cls.resource_files_names, **additional_files_names}
+
+        # From HF Hub or AI Studio
+        if from_hf_hub or from_aistudio:
+            # Only include the necessary resource files specified by the tokenizer cls
+            # Deep copy to avoid modifiying the class attributes
+            vocab_files = copy.deepcopy(cls.resource_files_names)
+            vocab_files["tokenizer_config_file"] = cls.tokenizer_config_file
+
+        # From built-in pretrained models
+        elif pretrained_model_name_or_path in cls.pretrained_init_configuration:
+            for file_id, map_list in cls.pretrained_resource_files_map.items():
+                vocab_files[file_id] = map_list[pretrained_model_name_or_path]
+            init_configuration = copy.deepcopy(
+                cls.pretrained_init_configuration[pretrained_model_name_or_path]
+            )
+        # From local dir path
+        elif os.path.isdir(pretrained_model_name_or_path):
+            vocab_files_target["tokenizer_config_file"] = cls.tokenizer_config_file
+            for file_id, file_name in vocab_files_target.items():
+                full_file_name = os.path.join(
+                    pretrained_model_name_or_path, subfolder, file_name
+                )
+                if os.path.isfile(full_file_name):
+                    vocab_files[file_id] = full_file_name
+        else:
+            # Assuming from community-contributed pretrained models
+            for file_id, file_name in vocab_files_target.items():
+                vocab_files[file_id] = file_name
+
+        resolved_vocab_files = {}
+        for file_id, file_path in vocab_files.items():
+            if file_path is None or os.path.isfile(file_path):
+                resolved_vocab_files[file_id] = file_path
+                continue
+            else:
+                logging.warnings("need to download tokenizer, but not support yet.")
+            # tokenizer download not support yet
+            # resolved_vocab_files[file_id] = resolve_file_path(
+            #     pretrained_model_name_or_path,
+            #     [file_path],
+            #     subfolder,
+            #     cache_dir=cache_dir,
+            #     from_aistudio=from_aistudio,
+            #     from_hf_hub=from_hf_hub,
+            # )
+
+        for file_id, file_path in resolved_vocab_files.items():
+            if resolved_vocab_files[file_id] is not None:
+                cache_dir = os.path.dirname(resolved_vocab_files[file_id])
+                break
+
+        tokenizer_config_file_dir_list = set()
+        for k, v in resolved_vocab_files.items():
+            if v is not None and os.path.isfile(v):
+                tokenizer_config_file_dir_list.add(os.path.dirname(v))
+        tokenizer_config_file_dir_list = list(tokenizer_config_file_dir_list)
+        # TODO: check this
+        assert (
+            len(tokenizer_config_file_dir_list) > 0
+        ), "All tokenizer files should be in the same directory."
+        # Prepare tokenizer initialization kwargs
+        # Did we saved some inputs and kwargs to reload ?
+        has_tokenizer_file = (
+            resolved_vocab_files.get("tokenizer_file", None) is not None
+        )
+        tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
+        if tokenizer_config_file is not None:
+            with io.open(tokenizer_config_file, encoding="utf-8") as f:
+                init_kwargs = json.load(f)
+        else:
+            init_kwargs = init_configuration
+
+        # position args are stored in kwargs, maybe better not include
+        init_args = init_kwargs.pop("init_args", ())
+        init_kwargs.pop("init_class", None)
+
+        # Update with newly provided args and kwargs
+        init_args = init_args if not args else args
+        init_kwargs.update(kwargs)
+
+        def convert_added_tokens(obj):
+            if (
+                isinstance(obj, dict)
+                and "__type" in obj
+                and obj["__type"] == "AddedToken"
+            ):
+                obj.pop("__type")
+                return AddedToken(**obj)
+            elif isinstance(obj, (list, tuple)):
+                return list(convert_added_tokens(o) for o in obj)
+            elif isinstance(obj, dict):
+                return {k: convert_added_tokens(v) for k, v in obj.items()}
+            return obj
+
+        init_kwargs = convert_added_tokens(init_kwargs)
+        # Set max length if needed
+        if pretrained_model_name_or_path in cls.max_model_input_sizes:
+            # if we're using a pretrained model, ensure the tokenizer
+            # wont index sequences longer than the number of positional embeddings
+            model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
+            if model_max_length is not None and isinstance(
+                model_max_length, (int, float)
+            ):
+                init_kwargs["model_max_length"] = min(
+                    init_kwargs.get("model_max_length", int(1e30)), model_max_length
+                )
+
+        added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
+        # Merge resolved_vocab_files arguments in init_kwargs if not including.
+        # Maybe need more ways to load resources.
+        for args_name, file_path in resolved_vocab_files.items():
+            # when `pretrained_model_name_or_path` is a pretrained model name,
+            # use pretrained_init_configuration as `init_kwargs` to init which
+            # does not include the vocab file in it, thus add vocab file into
+            # args.
+            if args_name not in init_kwargs:
+                init_kwargs[args_name] = file_path
+            # when `pretrained_model_name_or_path` is a pretrained model dir,
+            # use tokenizer_config_file.json as `init_kwargs` to init which
+            # does include a vocab file path in it. However, if the vocab file
+            # path included in json does not exist, such as was deleted, to make
+            # it still work, use the vocab file under this dir.
+            elif not os.path.isfile(init_kwargs[args_name] or "") and os.path.isfile(
+                file_path
+            ):
+                init_kwargs[args_name] = file_path
+
+        # TODO(zhoushunjie): It's not supportted to load tokenizer.json of hf so far.
+        if from_hf_hub and "tokenizer_file" in init_kwargs:
+            init_kwargs.pop("tokenizer_file")
+
+        # TODO(guosheng): avoid reduplication of position args and key word args
+        tokenizer = cls(*init_args, **init_kwargs)
+        chat_template = init_kwargs.pop("chat_template", None)
+        if chat_template is not None:
+            tokenizer.init_chat_template(chat_template)
+        special_tokens_map_file = resolved_vocab_files.pop(
+            "special_tokens_map_file", None
+        )
+        if special_tokens_map_file is not None:
+            with open(
+                special_tokens_map_file, encoding="utf-8"
+            ) as special_tokens_map_handle:
+                special_tokens_map = json.load(special_tokens_map_handle)
+            for key, value in special_tokens_map.items():
+                if key in kwargs and kwargs[key]:
+                    # This value has already been redefined by the kwargs
+                    # We keep this new value and ignore the one stored in the special_tokens_map_file
+
+                    continue
+
+                if isinstance(value, dict):
+                    value = AddedToken(**value)
+                elif isinstance(value, list):
+                    value = [
+                        AddedToken(**token) if isinstance(token, dict) else token
+                        for token in value
+                    ]
+                setattr(tokenizer, key, value)
+        # Add supplementary tokens.
+        special_tokens = tokenizer.all_special_tokens
+        if added_tokens_file is not None:
+            with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
+                added_tok_encoder = json.load(added_tokens_handle)
+
+            # Sort added tokens by index
+            added_tok_encoder_sorted = list(
+                sorted(added_tok_encoder.items(), key=lambda x: x[1])
+            )
+            for token, index in added_tok_encoder_sorted:
+                if (
+                    has_tokenizer_file
+                    and index != len(tokenizer)
+                    and tokenizer.convert_tokens_to_ids(token) != index
+                ):
+                    # index is the current length of the tokenizer (not in vocabulary)
+                    raise ValueError(
+                        f"Wrong index found for {token}: should be {tokenizer.convert_tokens_to_ids(token)} but found "
+                        f"{index}."
+                    )
+                elif not has_tokenizer_file and index != len(tokenizer):
+                    # Tokenizer slow: added token cannot already be in the vocabulary so its index needs to be the
+                    # current length of the tokenizer.
+                    raise ValueError(
+                        f"Non-consecutive added token '{token}' found. "
+                        f"Should have index {len(tokenizer)} but has index {index} in saved vocabulary."
+                    )
+
+                tokenizer.add_tokens(
+                    token, special_tokens=bool(token in special_tokens)
+                )
+        # Check all our special tokens are registered as "no split" token (we don't cut them) and are in the vocab
+        added_tokens = tokenizer.sanitize_special_tokens()
+        if added_tokens:
+            logging.info(
+                "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained."
+            )
+        # save all of related things into default root dir
+        if pretrained_model_name_or_path in cls.pretrained_init_configuration:
+            # tokenizer.save_pretrained(os.path.join(cache_dir, pretrained_model_name_or_path, subfolder))
+            tokenizer.save_pretrained(cache_dir)
+
+        if return_tokenizer_file_dir:
+            return tokenizer, list(tokenizer_config_file_dir_list)[0]
+        return tokenizer
+
+    def save_pretrained(
+        self, save_directory, filename_prefix: Optional[str] = None, **kwargs
+    ):
+        """
+        Save tokenizer configuration and related resources to files under
+        `save_directory`. The tokenizer configuration would be saved into
+        `tokenizer_config_file` indicating file (thus `tokenizer_config.json`),
+        and resources would be saved into `resource_files_names` indicating files
+        by using `self.save_resources(save_directory)`.
+
+        The `save_directory` can be used in `from_pretrained` as argument value
+        of `pretrained_model_name_or_path` to re-load the tokenizer.
+
+        Args:
+            save_directory (str): Directory to save files into.
+            filename_prefix: (str, optional):
+                A prefix to add to the names of the files saved by the tokenizer.
+
+        Example:
+            .. code-block::
+
+                from paddlenlp.transformers import BertTokenizer
+
+                tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
+                tokenizer.save_pretrained('trained_model')
+                # reload from save_directory
+                tokenizer = BertTokenizer.from_pretrained('trained_model')
+        """
+        assert not os.path.isfile(
+            save_directory
+        ), "Saving directory ({}) should be a directory, not a file".format(
+            save_directory
+        )
+        os.makedirs(save_directory, exist_ok=True)
+
+        special_tokens_map_file = os.path.join(
+            save_directory,
+            (filename_prefix + "-" if filename_prefix else "")
+            + SPECIAL_TOKENS_MAP_FILE,
+        )
+        tokenizer_config_file = os.path.join(
+            save_directory,
+            (filename_prefix + "-" if filename_prefix else "")
+            + self.tokenizer_config_file,
+        )
+
+        tokenizer_config = copy.deepcopy(self.init_kwargs)
+        if len(self.init_inputs) > 0:
+            tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
+        for file_id in self.resource_files_names.keys():
+            tokenizer_config.pop(file_id, None)
+
+        # Sanitize AddedTokens
+        def convert_added_tokens(obj: Union[AddedToken, Any], add_type_field=True):
+            if isinstance(obj, AddedToken):
+                out = obj.__getstate__()
+                if add_type_field:
+                    out["__type"] = "AddedToken"
+                return out
+            elif isinstance(obj, (list, tuple)):
+                return list(
+                    convert_added_tokens(o, add_type_field=add_type_field) for o in obj
+                )
+            elif isinstance(obj, dict):
+                return {
+                    k: convert_added_tokens(v, add_type_field=add_type_field)
+                    for k, v in obj.items()
+                }
+            return obj
+
+        # add_type_field=True to allow dicts in the kwargs / differentiate from AddedToken serialization
+        tokenizer_config = convert_added_tokens(tokenizer_config, add_type_field=True)
+
+        # Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained
+        tokenizer_class = self.__class__.__name__
+        tokenizer_config["tokenizer_class"] = tokenizer_class
+
+        with io.open(tokenizer_config_file, "w", encoding="utf-8") as f:
+            f.write(json.dumps(tokenizer_config, ensure_ascii=False))
+        logging.info(f"tokenizer config file saved in {tokenizer_config_file}")
+
+        # Sanitize AddedTokens in special_tokens_map
+        write_dict = convert_added_tokens(
+            self.special_tokens_map_extended, add_type_field=False
+        )
+        with open(special_tokens_map_file, "w", encoding="utf-8") as f:
+            f.write(json.dumps(write_dict, ensure_ascii=False))
+        logging.info(f"Special tokens file saved in {special_tokens_map_file}")
+
+        file_names = (tokenizer_config_file, special_tokens_map_file)
+
+        save_files = self._save_pretrained(
+            save_directory=save_directory,
+            file_names=file_names,
+            filename_prefix=filename_prefix,
+        )
+
+        return save_files
+
+    def _save_pretrained(
+        self,
+        save_directory: Union[str, os.PathLike],
+        file_names: Tuple[str],
+        filename_prefix: Optional[str] = None,
+    ) -> Tuple[str]:
+        """
+        Save a tokenizer using the tokenizer format: vocabulary + added tokens.
+
+        """
+        save_directory = str(save_directory)
+
+        added_tokens_file = os.path.join(
+            save_directory,
+            (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE,
+        )
+        added_vocab = self.get_added_vocab()
+        if added_vocab:
+            with open(added_tokens_file, "w", encoding="utf-8") as f:
+                out_str = json.dumps(added_vocab, ensure_ascii=False)
+                f.write(out_str)
+                logging.info(f"added tokens file saved in {added_tokens_file}")
+
+        self.save_resources(save_directory)
+
+        return file_names + (added_tokens_file,)
+
+    def tokenize(
+        self,
+        text: str,
+        pair: Optional[str] = None,
+        add_special_tokens: bool = False,
+        **kwargs,
+    ) -> List[str]:
+        """
+        Converts a string in a sequence of tokens, replacing unknown tokens with the `unk_token`.
+
+        Args:
+            text (`str`):
+                The sequence to be encoded.
+            pair (`str`, *optional*):
+                A second sequence to be encoded with the first.
+            add_special_tokens (`bool`, *optional*, defaults to `False`):
+                Whether or not to add the special tokens associated with the corresponding model.
+            kwargs (additional keyword arguments, *optional*):
+                Will be passed to the underlying model specific encode method. See details in
+                [`~PretrainedTokenizerBase.__call__`]
+
+        Returns:
+            `List[str]`: The list of tokens.
+        """
+        raise NotImplementedError
+
+    def num_special_tokens_to_add(self, pair: bool = False) -> int:
+        raise NotImplementedError
+
+    def _get_padding_truncation_strategies(
+        self,
+        padding=False,
+        truncation=False,
+        max_length=None,
+        pad_to_multiple_of=None,
+        verbose=True,
+        **kwargs,
+    ):
+        """
+        Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy
+        and pad_to_max_length) and behaviors.
+        """
+        old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate")
+        old_pad_to_max_length = kwargs.pop("pad_to_max_seq_len", False)
+
+        # Backward compatibility for previous behavior, maybe we should deprecate it:
+        # If you only set max_length, it activates truncation for max_length
+        if max_length is not None and padding is False and truncation is False:
+            if verbose:
+                if not self.deprecation_warnings.get(
+                    "Truncation-not-explicitly-activated", False
+                ):
+                    warnings.warn(
+                        "Truncation was not explicitly activated but `max_length` is provided a specific value, "
+                        "please use `truncation=True` to explicitly truncate examples to max length. "
+                        "Defaulting to 'longest_first' truncation strategy. "
+                        "If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy "
+                        "more precisely by providing a specific strategy to `truncation`."
+                    )
+                self.deprecation_warnings["Truncation-not-explicitly-activated"] = True
+            truncation = "longest_first"
+
+        # Get padding strategy
+        if padding is False and old_pad_to_max_length:
+            if verbose:
+                warnings.warn(
+                    "The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
+                    "use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
+                    "use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
+                    "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
+                    "maximal input size of the model (e.g. 512 for Bert).",
+                    FutureWarning,
+                )
+            if max_length is None:
+                padding_strategy = PaddingStrategy.LONGEST
+            else:
+                padding_strategy = PaddingStrategy.MAX_LENGTH
+        elif padding is not False:
+            if padding is True:
+                if verbose:
+                    if max_length is not None and (
+                        truncation is False or truncation == "do_not_truncate"
+                    ):
+                        warnings.warn(
+                            "`max_length` is ignored when `padding`=`True` and there is no truncation strategy. "
+                            "To pad to max length, use `padding='max_length'`."
+                        )
+                    if old_pad_to_max_length is not False:
+                        warnings.warn(
+                            "Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`."
+                        )
+                # Default to pad to the longest sequence in the batch
+                padding_strategy = PaddingStrategy.LONGEST
+            elif not isinstance(padding, PaddingStrategy):
+                padding_strategy = PaddingStrategy(padding)
+            elif isinstance(padding, PaddingStrategy):
+                padding_strategy = padding
+        else:
+            padding_strategy = PaddingStrategy.DO_NOT_PAD
+
+        # Get truncation strategy
+        if truncation is False and old_truncation_strategy != "do_not_truncate":
+            if verbose:
+                warnings.warn(
+                    "The `truncation_strategy` argument is deprecated and will be removed in a future version, "
+                    "use `truncation=True` to truncate examples to a max length. You can give a specific "
+                    "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the "
+                    "maximal input size of the model (e.g. 512 for Bert). "
+                    " If you have pairs of inputs, you can give a specific truncation strategy selected among "
+                    "`truncation='only_first'` (will only truncate the first sentence in the pairs) "
+                    "`truncation='only_second'` (will only truncate the second sentence in the pairs) "
+                    "or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).",
+                    FutureWarning,
+                )
+            truncation_strategy = TruncationStrategy(old_truncation_strategy)
+        elif truncation is not False and truncation is not None:
+            if truncation is True:
+                truncation_strategy = (
+                    TruncationStrategy.LONGEST_FIRST
+                )  # Default to truncate the longest sequences in pairs of inputs
+            elif not isinstance(truncation, TruncationStrategy):
+                truncation_strategy = TruncationStrategy(truncation)
+            elif isinstance(truncation, TruncationStrategy):
+                truncation_strategy = truncation
+        else:
+            truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
+
+        # Set max length if needed
+        if max_length is None:
+            if padding_strategy == PaddingStrategy.MAX_LENGTH:
+                if self.model_max_length > LARGE_INTEGER:
+                    if verbose:
+                        if not self.deprecation_warnings.get(
+                            "Asking-to-pad-to-max_length", False
+                        ):
+                            warnings.warn(
+                                "Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. "
+                                "Default to no padding."
+                            )
+                        self.deprecation_warnings["Asking-to-pad-to-max_length"] = True
+                    padding_strategy = PaddingStrategy.DO_NOT_PAD
+                else:
+                    max_length = self.model_max_length
+
+            if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
+                if self.model_max_length > LARGE_INTEGER:
+                    if verbose:
+                        if not self.deprecation_warnings.get(
+                            "Asking-to-truncate-to-max_length", False
+                        ):
+                            warnings.warn(
+                                "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. "
+                                "Default to no truncation."
+                            )
+                        self.deprecation_warnings[
+                            "Asking-to-truncate-to-max_length"
+                        ] = True
+                    truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
+                else:
+                    max_length = self.model_max_length
+
+        # Test if we have a padding token
+        if padding_strategy != PaddingStrategy.DO_NOT_PAD and (
+            not self.pad_token or self.pad_token_id < 0
+        ):
+            raise ValueError(
+                "Asking to pad but the tokenizer does not have a padding token. "
+                "Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
+                "or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
+            )
+
+        # Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
+        if (
+            truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
+            and padding_strategy != PaddingStrategy.DO_NOT_PAD
+            and pad_to_multiple_of is not None
+            and max_length is not None
+            and (max_length % pad_to_multiple_of != 0)
+        ):
+            raise ValueError(
+                f"Truncation and padding are both activated but "
+                f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
+            )
+
+        return padding_strategy, truncation_strategy, max_length, kwargs
+
+    def __call__(
+        self,
+        text: Union[str, List[str], List[List[str]]],
+        text_pair: Optional[Union[str, List[str], List[List[str]]]] = None,
+        max_length: Optional[int] = None,
+        stride: int = 0,
+        is_split_into_words: Union[bool, str] = False,
+        padding: Union[bool, str, PaddingStrategy] = False,
+        truncation: Union[bool, str, TruncationStrategy] = False,
+        return_position_ids: bool = None,
+        return_token_type_ids: Optional[bool] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_length: bool = False,
+        return_overflowing_tokens: bool = False,
+        return_special_tokens_mask: bool = False,
+        return_dict: bool = True,
+        return_offsets_mapping: bool = False,
+        add_special_tokens: bool = True,
+        pad_to_multiple_of: Optional[int] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        verbose: bool = True,
+        **kwargs,
+    ):
+        """
+        Performs tokenization and uses the tokenized tokens to prepare model
+        inputs. It supports sequence or sequence pair as input, and batch input
+        is allowed. `self.encode()` or `self.batch_encode()` would be called
+        separately for single or batch input depending on input format and
+        `is_split_into_words` argument.
+
+        Args:
+            text (str, List[str] or List[List[str]]):
+                The sequence or batch of sequences to be processed. One sequence
+                is a string or a list of strings depending on whether it has been
+                pretokenized. If each sequence is provided as a list of strings
+                (pretokenized), you must set `is_split_into_words` as `True` to
+                disambiguate with a batch of sequences.
+            text_pair (str, List[str] or List[List[str]], optional):
+                Same as `text` argument, while it represents for the latter
+                sequence of the sequence pair.
+            max_length (int, optional):
+                If set to a number, will limit the total sequence returned so
+                that it has a maximum length. If there are overflowing tokens,
+                those overflowing tokens will be added to the returned dictionary
+                when `return_overflowing_tokens` is `True`. Defaults to `None`.
+            stride (int, optional):
+                Only available for batch input of sequence pair and mainly for
+                question answering usage. When for QA, `text` represents questions
+                and `text_pair` represents contexts. If `stride` is set to a
+                positive number, the context will be split into multiple spans
+                where `stride` defines the number of (tokenized) tokens to skip
+                from the start of one span to get the next span, thus will produce
+                a bigger batch than inputs to include all spans. Moreover, 'overflow_to_sample'
+                and 'offset_mapping' preserving the original example and position
+                information will be added to the returned dictionary. Defaults to 0.
+            is_split_into_words (Union[bool, str], optional):
+                when the text is words or tokens, `is_split_into_words` should be True or `token`.
+                `True`: means that the text should be words which should be tokenized.
+                `token`: means that the text should be tokens which already be tokenized, so it should not be tokenized again.
+            padding (bool, str or [PaddingStrategy], optional):
+                Activates and controls padding. Accepts the following values:
+
+                - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+                  sequence if provided).
+                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+                  acceptable input length for the model if that argument is not provided.
+                - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+                  lengths).
+                Defaults to `False`.
+            truncation (bool, str or [TruncationStrategy], optional):
+                Activates and controls truncation. Accepts the following values:
+
+                - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+                  to the maximum acceptable input length for the model if that argument is not provided. This will
+                  truncate token by token, removing a token from the longest sequence in the pair if a pair of
+                  sequences (or a batch of pairs) is provided.
+                - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+                  maximum acceptable input length for the model if that argument is not provided. This will only
+                  truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+                - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+                  maximum acceptable input length for the model if that argument is not provided. This will only
+                  truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+                - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+                  greater than the model maximum admissible input size).
+                Defaults to `False`.
+            return_position_ids (bool, optional):
+                Whether to include tokens position ids in the returned dictionary.
+                Defaults to `False`.
+            return_token_type_ids (bool, optional):
+                Whether to include token type ids in the returned dictionary.
+                Defaults to `True`.
+            return_attention_mask (bool, optional):
+                Whether to include the attention mask in the returned dictionary.
+                Defaults to `False`.
+            return_length (bool, optional):
+                Whether to include the length of each encoded inputs in the
+                returned dictionary. Defaults to `False`.
+            return_overflowing_tokens (bool, optional):
+                Whether to include overflowing token information in the returned
+                dictionary. Defaults to `False`.
+            return_special_tokens_mask (bool, optional):
+                Whether to include special tokens mask information in the returned
+                dictionary. Defaults to `False`.
+            return_dict (bool, optional):
+                Decide the format for returned encoded batch inputs. Only works when
+                input is a batch of data.
+                ::
+                    - If True, encoded inputs would be a dictionary like:
+                        {'input_ids': [[1, 4444, 4385, 1545, 6712],[1, 4444, 4385]],
+                        'token_type_ids': [[0, 0, 0, 0, 0], [0, 0, 0]]}
+                    - If False, encoded inputs would be a list like:
+                        [{'input_ids': [1, 4444, 4385, 1545, 6712],
+                          'token_type_ids': [0, 0, 0, 0, 0]},
+                         {'input_ids': [1, 4444, 4385], 'token_type_ids': [0, 0, 0]}]
+
+                Defaults to `True`.
+            return_offsets_mapping (bool, optional):
+                Whether to include the list of pair preserving the index of start
+                and end char in original input for each token in the returned
+                dictionary. Would be automatically set to `True` when `stride` > 0.
+                Defaults to `False`.
+            add_special_tokens (bool, optional):
+                Whether to add the special tokens associated with the corresponding model
+                to the encoded inputs. Defaults to `True`
+            pad_to_multiple_of (int, optional):
+                If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
+                the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
+                Defaults to `None`.
+            return_tensors (str or [TensorType], optional):
+                If set, will return tensors instead of list of python integers. Acceptable values are:
+
+                - `'pd'`: Return Paddle `paddle.Tensor` objects.
+                - `'np'`: Return Numpy `np.ndarray` objects.
+                Defaults to `None`.
+            verbose (bool, optional):
+                Whether or not to print more information and warnings. Defaults to True.
+
+        Returns:
+            dict or list[dict] (for batch input):
+                The dict has the following optional items:
+
+                - **input_ids** (list[int] or list[list[int]]): List of token ids to be fed to a model.
+                - **position_ids** (list[int] or list[list[int]], optional): List of token position ids to be
+                  fed to a model. Included when `return_position_ids` is `True`
+                - **token_type_ids** (list[int] or list[list[int]], optional): List of token type ids to be
+                  fed to a model. Included when `return_token_type_ids` is `True`.
+                - **attention_mask** (list[int] or list[list[int]], optional): List of integers valued 0 or 1,
+                  where 0 specifies paddings and should not be attended to by the
+                  model. Included when `return_attention_mask` is `True`.
+                - **seq_len** (int or list[int], optional): The input_ids length. Included when `return_length`
+                  is `True`.
+                - **overflowing_tokens** (list[int] or list[list[int]], optional): List of overflowing tokens.
+                  Included when if `max_length` is specified and `return_overflowing_tokens`
+                  is True.
+                - **num_truncated_tokens** (int or list[int], optional): The number of overflowing tokens.
+                  Included when if `max_length` is specified and `return_overflowing_tokens`
+                  is True.
+                - **special_tokens_mask** (list[int] or list[list[int]], optional): List of integers valued 0 or 1,
+                  with 0 specifying special added tokens and 1 specifying sequence tokens.
+                  Included when `return_special_tokens_mask` is `True`.
+                - **offset_mapping** (list[int], optional): list of pair preserving the
+                  index of start and end char in original input for each token.
+                  For a sqecial token, the index pair is `(0, 0)`. Included when
+                  `return_overflowing_tokens` is True or `stride` > 0.
+                - **overflow_to_sample** (int or list[int], optional): Index of example from which this
+                  feature is generated. Included when `stride` works.
+        """
+
+        # Input type checking for clearer error
+        def _is_valid_text_input(t):
+            if isinstance(t, str):
+                # Strings are fine
+                return True
+            elif isinstance(t, (list, tuple)):
+                # List are fine as long as they are...
+                if len(t) == 0:
+                    # ... empty
+                    return True
+                elif isinstance(t[0], str):
+                    # ... list of strings
+                    return True
+                elif isinstance(t[0], (list, tuple)):
+                    # ... list with an empty list or with a list of strings
+                    return len(t[0]) == 0 or isinstance(t[0][0], str)
+                else:
+                    return False
+            else:
+                return False
+
+        if not _is_valid_text_input(text):
+            raise ValueError(
+                "text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
+                "or `List[List[str]]` (batch of pretokenized examples)."
+            )
+
+        if text_pair is not None and not _is_valid_text_input(text_pair):
+            raise ValueError(
+                "text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
+                "or `List[List[str]]` (batch of pretokenized examples)."
+            )
+
+        # check `split_into_words` value
+        if isinstance(is_split_into_words, str) and is_split_into_words != "token":
+            raise ValueError(
+                "the value of `is_split_into_words` should be one of: {True, False, 'token'} but receive: <%s>",
+                is_split_into_words,
+            )
+
+        if is_split_into_words:
+            is_batched = (
+                isinstance(text, (list, tuple))
+                and text
+                and isinstance(text[0], (list, tuple))
+            )
+        else:
+            is_batched = isinstance(text, (list, tuple))
+
+        if is_batched:
+            if isinstance(text_pair, str):
+                raise TypeError(
+                    "when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as `text`."
+                )
+            if text_pair is not None and len(text) != len(text_pair):
+                raise ValueError(
+                    f"batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}."
+                )
+            batch_text_or_text_pairs = (
+                list(zip(text, text_pair)) if text_pair is not None else text
+            )
+            return self.batch_encode(
+                batch_text_or_text_pairs=batch_text_or_text_pairs,
+                max_length=max_length,
+                stride=stride,
+                is_split_into_words=is_split_into_words,
+                padding=padding,
+                truncation=truncation,
+                return_position_ids=return_position_ids,
+                return_token_type_ids=return_token_type_ids,
+                return_attention_mask=return_attention_mask,
+                return_length=return_length,
+                return_overflowing_tokens=return_overflowing_tokens,
+                return_special_tokens_mask=return_special_tokens_mask,
+                return_dict=return_dict,
+                return_offsets_mapping=return_offsets_mapping,
+                add_special_tokens=add_special_tokens,
+                pad_to_multiple_of=pad_to_multiple_of,
+                return_tensors=return_tensors,
+                verbose=verbose,
+                **kwargs,
+            )
+        else:
+            return self.encode(
+                text=text,
+                text_pair=text_pair,
+                max_length=max_length,
+                stride=stride,
+                is_split_into_words=is_split_into_words,
+                padding=padding,
+                truncation=truncation,
+                return_position_ids=return_position_ids,
+                return_token_type_ids=return_token_type_ids,
+                return_attention_mask=return_attention_mask,
+                return_length=return_length,
+                return_overflowing_tokens=return_overflowing_tokens,
+                return_special_tokens_mask=return_special_tokens_mask,
+                return_offsets_mapping=return_offsets_mapping,
+                add_special_tokens=add_special_tokens,
+                pad_to_multiple_of=pad_to_multiple_of,
+                return_tensors=return_tensors,
+                verbose=verbose,
+                **kwargs,
+            )
+
+    def encode(
+        self,
+        text,
+        text_pair=None,
+        add_special_tokens=True,
+        padding: Union[bool, str, PaddingStrategy] = False,
+        truncation: Union[bool, str, TruncationStrategy] = False,
+        max_length: Optional[int] = None,
+        stride: int = 0,
+        is_split_into_words: bool = False,
+        pad_to_multiple_of: Optional[int] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        return_token_type_ids: Optional[bool] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_overflowing_tokens: bool = False,
+        return_special_tokens_mask: bool = False,
+        return_offsets_mapping: bool = False,
+        return_length: bool = False,
+        verbose: bool = True,
+        return_position_ids=None,
+        **kwargs,
+    ) -> BatchEncoding:
+        """
+        Tokenize and prepare for the model a sequence or a pair of sequences.
+
+        Args:
+            text (`str`, `List[str]` or `List[int]`):
+                The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
+                `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
+                method).
+            text_pair (`str`, `List[str]` or `List[int]`, *optional*):
+                Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
+                the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
+                method).
+        """
+        # Backward compatibility for 'max_seq_len'
+        old_max_seq_len = kwargs.get("max_seq_len", None)
+        if max_length is None and old_max_seq_len:
+            if verbose:
+                warnings.warn(
+                    "The `max_seq_len` argument is deprecated and will be removed in a future version, "
+                    "please use `max_length` instead.",
+                    FutureWarning,
+                )
+            max_length = old_max_seq_len
+        # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+        padding_strategy, truncation_strategy, max_length, kwargs = (
+            self._get_padding_truncation_strategies(
+                padding=padding,
+                truncation=truncation,
+                max_length=max_length,
+                pad_to_multiple_of=pad_to_multiple_of,
+                verbose=verbose,
+                **kwargs,
+            )
+        )
+
+        return self._encode_plus(
+            text=text,
+            text_pair=text_pair,
+            add_special_tokens=add_special_tokens,
+            padding_strategy=padding_strategy,
+            truncation_strategy=truncation_strategy,
+            max_length=max_length,
+            stride=stride,
+            is_split_into_words=is_split_into_words,
+            pad_to_multiple_of=pad_to_multiple_of,
+            return_tensors=return_tensors,
+            return_position_ids=return_position_ids,
+            return_token_type_ids=return_token_type_ids,
+            return_attention_mask=return_attention_mask,
+            return_overflowing_tokens=return_overflowing_tokens,
+            return_special_tokens_mask=return_special_tokens_mask,
+            return_offsets_mapping=return_offsets_mapping,
+            return_length=return_length,
+            verbose=verbose,
+            **kwargs,
+        )
+
+    def encode_plus(
+        self,
+        text: Union[TextInput, PreTokenizedInput, EncodedInput],
+        text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
+        add_special_tokens: bool = True,
+        padding: Union[bool, str, PaddingStrategy] = False,
+        truncation: Union[bool, str, TruncationStrategy] = None,
+        max_length: Optional[int] = None,
+        stride: int = 0,
+        is_split_into_words: bool = False,
+        pad_to_multiple_of: Optional[int] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        return_token_type_ids: Optional[bool] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_overflowing_tokens: bool = False,
+        return_special_tokens_mask: bool = False,
+        return_offsets_mapping: bool = False,
+        return_length: bool = False,
+        verbose: bool = True,
+        **kwargs,
+    ) -> BatchEncoding:
+        """
+        Tokenize and prepare for the model a sequence or a pair of sequences.
+
+        <Tip warning={true}>
+
+        This method is deprecated, `__call__` should be used instead.
+
+        </Tip>
+
+        Args:
+            text (`str`, `List[str]` or `List[int]` (the latter only for not-fast tokenizers)):
+                The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
+                `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
+                method).
+            text_pair (`str`, `List[str]` or `List[int]`, *optional*):
+                Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
+                the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
+                method).
+        """
+
+        # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+        padding_strategy, truncation_strategy, max_length, kwargs = (
+            self._get_padding_truncation_strategies(
+                padding=padding,
+                truncation=truncation,
+                max_length=max_length,
+                pad_to_multiple_of=pad_to_multiple_of,
+                verbose=verbose,
+                **kwargs,
+            )
+        )
+
+        return self._encode_plus(
+            text=text,
+            text_pair=text_pair,
+            add_special_tokens=add_special_tokens,
+            padding_strategy=padding_strategy,
+            truncation_strategy=truncation_strategy,
+            max_length=max_length,
+            stride=stride,
+            is_split_into_words=is_split_into_words,
+            pad_to_multiple_of=pad_to_multiple_of,
+            return_tensors=return_tensors,
+            return_token_type_ids=return_token_type_ids,
+            return_attention_mask=return_attention_mask,
+            return_overflowing_tokens=return_overflowing_tokens,
+            return_special_tokens_mask=return_special_tokens_mask,
+            return_offsets_mapping=return_offsets_mapping,
+            return_length=return_length,
+            verbose=verbose,
+            **kwargs,
+        )
+
+    def _encode_plus(
+        self,
+        text: Union[TextInput, PreTokenizedInput, EncodedInput],
+        text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
+        add_special_tokens: bool = True,
+        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+        truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+        max_length: Optional[int] = None,
+        stride: int = 0,
+        is_split_into_words: bool = False,
+        pad_to_multiple_of: Optional[int] = None,
+        return_position_ids: Optional[bool] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        return_token_type_ids: Optional[bool] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_overflowing_tokens: bool = False,
+        return_special_tokens_mask: bool = False,
+        return_offsets_mapping: bool = False,
+        return_length: bool = False,
+        verbose: bool = True,
+        **kwargs,
+    ) -> BatchEncoding:
+        raise NotImplementedError
+
+    def batch_encode(
+        self,
+        batch_text_or_text_pairs: Union[
+            List[TextInput],
+            List[TextInputPair],
+            List[PreTokenizedInput],
+            List[PreTokenizedInputPair],
+            List[EncodedInput],
+            List[EncodedInputPair],
+        ],
+        max_length=None,
+        stride: int = 0,
+        is_split_into_words: bool = False,
+        padding: Union[bool, str, PaddingStrategy] = False,
+        truncation: Union[bool, str, TruncationStrategy] = False,
+        return_position_ids=None,
+        # TODO(wj-mcat): keep align with `encode` method
+        return_token_type_ids=None,
+        return_attention_mask=None,
+        return_length=False,
+        return_overflowing_tokens=False,
+        return_special_tokens_mask=False,
+        return_dict=True,
+        return_offsets_mapping=False,
+        add_special_tokens=True,
+        pad_to_multiple_of: Optional[int] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        verbose: bool = True,
+        **kwargs,
+    ) -> BatchEncoding:
+        """
+        Performs tokenization and uses the tokenized tokens to prepare model
+        inputs. It supports batch inputs of sequence or sequence pair.
+
+        Args:
+            batch_text_or_text_pairs (list):
+                The element of list can be sequence or sequence pair, and the
+                sequence is a string or a list of strings depending on whether
+                it has been pretokenized. If each sequence is provided as a list
+                of strings (pretokenized), you must set `is_split_into_words` as
+                `True` to disambiguate with a sequence pair.
+
+        Returns:
+            dict or list[dict]:
+                The dict has the following optional items:
+
+        """
+        # Backward compatibility for 'max_seq_len'
+        old_max_seq_len = kwargs.get("max_seq_len", None)
+        if max_length is None and old_max_seq_len:
+            if verbose:
+                warnings.warn(
+                    "The `max_seq_len` argument is deprecated and will be removed in a future version, "
+                    "please use `max_length` instead.",
+                    FutureWarning,
+                )
+            max_length = old_max_seq_len
+        # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+        padding_strategy, truncation_strategy, max_length, kwargs = (
+            self._get_padding_truncation_strategies(
+                padding=padding,
+                truncation=truncation,
+                max_length=max_length,
+                pad_to_multiple_of=pad_to_multiple_of,
+                verbose=verbose,
+                **kwargs,
+            )
+        )
+
+        return self._batch_encode_plus(
+            batch_text_or_text_pairs=batch_text_or_text_pairs,
+            add_special_tokens=add_special_tokens,
+            padding_strategy=padding_strategy,
+            truncation_strategy=truncation_strategy,
+            max_length=max_length,
+            stride=stride,
+            is_split_into_words=is_split_into_words,
+            pad_to_multiple_of=pad_to_multiple_of,
+            return_tensors=return_tensors,
+            return_position_ids=return_position_ids,
+            return_token_type_ids=return_token_type_ids,
+            return_attention_mask=return_attention_mask,
+            return_overflowing_tokens=return_overflowing_tokens,
+            return_special_tokens_mask=return_special_tokens_mask,
+            return_dict=return_dict,
+            return_offsets_mapping=return_offsets_mapping,
+            return_length=return_length,
+            verbose=verbose,
+            **kwargs,
+        )
+
+    def _batch_encode_plus(
+        self,
+        batch_text_or_text_pairs: Union[
+            List[TextInput],
+            List[TextInputPair],
+            List[PreTokenizedInput],
+            List[PreTokenizedInputPair],
+            List[EncodedInput],
+            List[EncodedInputPair],
+        ],
+        add_special_tokens: bool = True,
+        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+        truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+        max_length: Optional[int] = None,
+        stride: int = 0,
+        is_split_into_words: bool = False,
+        pad_to_multiple_of: Optional[int] = None,
+        return_position_ids: Optional[bool] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        return_token_type_ids: Optional[bool] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_overflowing_tokens: bool = False,
+        return_special_tokens_mask: bool = False,
+        return_dict: bool = True,
+        return_offsets_mapping: bool = False,
+        return_length: bool = False,
+        verbose: bool = True,
+        **kwargs,
+    ) -> BatchEncoding:
+        raise NotImplementedError
+
+    def pad(
+        self,
+        encoded_inputs: Union[
+            BatchEncoding,
+            List[BatchEncoding],
+            Dict[str, EncodedInput],
+            Dict[str, List[EncodedInput]],
+            List[Dict[str, EncodedInput]],
+        ],
+        padding: Union[bool, str, PaddingStrategy] = True,
+        max_length: Optional[int] = None,
+        pad_to_multiple_of: Optional[int] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        verbose: bool = True,
+    ) -> BatchEncoding:
+        """
+        Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
+        in the batch.
+
+        Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,
+        `self.pad_token_id` and `self.pad_token_type_id`)
+
+        <Tip>
+
+        If the `encoded_inputs` passed are dictionary of numpy arrays, Paddle tensors, the
+        result will use the same type unless you provide a different tensor type with `return_tensors`.
+        </Tip>
+
+        Args:
+            encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
+                Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
+                tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
+                List[int]]]*) so you can use this method during preprocessing as well as in a Paddle Dataloader
+                collate function.
+
+                Instead of `List[int]` you can have tensors (numpy arrays, Paddle tensors), see
+                the note above for the return type.
+            padding (`bool`, `str` or [`PaddingStrategy`], *optional*, defaults to `True`):
+                 Select a strategy to pad the returned sequences (according to the model's padding side and padding
+                 index) among:
+
+                - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+                  sequence if provided).
+                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+                  acceptable input length for the model if that argument is not provided.
+                - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+                  lengths).
+            max_length (`int`, *optional*):
+                Maximum length of the returned list and optionally padding length (see above).
+            pad_to_multiple_of (`int`, *optional*):
+                If set will pad the sequence to a multiple of the provided value.
+
+                This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+                >= 7.5 (Volta).
+            return_attention_mask (`bool`, *optional*):
+                Whether to return the attention mask. If left to the default, will return the attention mask according
+                to the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+                [What are attention masks?](../glossary#attention-mask)
+            return_tensors (`str` or [`TensorType`], *optional*):
+                If set, will return tensors instead of list of python integers. Acceptable values are:
+
+                - `'pd'`: Return Paddle `paddle.Tensor` objects.
+                - `'np'`: Return Numpy `np.ndarray` objects.
+            verbose (`bool`, *optional*, defaults to `True`):
+                Whether or not to print more information and warnings.
+        """
+        # If we have a list of dicts, let's convert it in a dict of lists
+        if isinstance(encoded_inputs, (list, tuple)) and isinstance(
+            encoded_inputs[0], (dict, BatchEncoding)
+        ):
+            encoded_inputs = {
+                key: [example[key] for example in encoded_inputs]
+                for key in encoded_inputs[0].keys()
+            }
+
+        # The model's main input name, usually `input_ids`, has be passed for padding
+        if self.model_input_names[0] not in encoded_inputs:
+            raise ValueError(
+                "You should supply an encoding or a list of encodings to this method "
+                f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
+            )
+
+        required_input = encoded_inputs[self.model_input_names[0]]
+
+        if not required_input:
+            if return_attention_mask:
+                encoded_inputs["attention_mask"] = []
+            return encoded_inputs
+
+        # If we have Paddle/NumPy tensors/arrays as inputs, we cast them as python objects
+        # and rebuild them afterwards if no return_tensors is specified
+
+        first_element = required_input[0]
+        if isinstance(first_element, (list, tuple)):
+            # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
+            for item in required_input:
+                if len(item) != 0:
+                    first_element = item[0]
+                    break
+        # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
+        if not isinstance(first_element, (int, list, tuple)):
+            if isinstance(first_element, paddle.Tensor):
+                return_tensors = "pd" if return_tensors is None else return_tensors
+            else:
+                raise ValueError(
+                    f"type of {first_element} unknown: {type(first_element)}. "
+                    f"Should be either python or paddle object."
+                )
+
+            for key, value in encoded_inputs.items():
+                encoded_inputs[key] = to_py_obj(value)
+
+        # Convert padding_strategy in PaddingStrategy
+        padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
+            padding=padding, max_length=max_length, verbose=verbose
+        )
+
+        required_input = encoded_inputs[self.model_input_names[0]]
+        if required_input and not isinstance(required_input[0], (list, tuple)):
+            encoded_inputs = self._pad(
+                encoded_inputs,
+                max_length=max_length,
+                padding_strategy=padding_strategy,
+                pad_to_multiple_of=pad_to_multiple_of,
+                return_attention_mask=return_attention_mask,
+            )
+            return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
+
+        batch_size = len(required_input)
+        assert all(
+            len(v) == batch_size for v in encoded_inputs.values()
+        ), "Some items in the output dictionary have a different batch size than others."
+
+        if padding_strategy == PaddingStrategy.LONGEST:
+            max_length = max(len(inputs) for inputs in required_input)
+            padding_strategy = PaddingStrategy.MAX_LENGTH
+
+        batch_outputs = {}
+        for i in range(batch_size):
+            inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
+            outputs = self._pad(
+                inputs,
+                max_length=max_length,
+                padding_strategy=padding_strategy,
+                pad_to_multiple_of=pad_to_multiple_of,
+                return_attention_mask=return_attention_mask,
+            )
+
+            for key, value in outputs.items():
+                if key not in batch_outputs:
+                    batch_outputs[key] = []
+                batch_outputs[key].append(value)
+
+        return BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+    def create_token_type_ids_from_sequences(
+        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+    ) -> List[int]:
+        """
+        Create the token type IDs corresponding to the sequences passed. [What are token type
+        IDs?](../glossary#token-type-ids)
+
+        Should be overridden in a subclass if the model has a special way of building those.
+
+        Args:
+            token_ids_0 (`List[int]`): The first tokenized sequence.
+            token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
+
+        Returns:
+            `List[int]`: The token type ids.
+        """
+        if token_ids_1 is None:
+            return len(token_ids_0) * [0]
+        return [0] * len(token_ids_0) + [1] * len(token_ids_1)
+
+    def build_inputs_with_special_tokens(
+        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+    ) -> List[int]:
+        """
+        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+        adding special tokens.
+
+        This implementation does not add special tokens and this method should be overridden in a subclass.
+
+        Args:
+            token_ids_0 (`List[int]`): The first tokenized sequence.
+            token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
+
+        Returns:
+            `List[int]`: The model input with special tokens.
+        """
+        if token_ids_1 is None:
+            return token_ids_0
+        return token_ids_0 + token_ids_1
+
+    def build_offset_mapping_with_special_tokens(
+        self, offset_mapping_0, offset_mapping_1=None
+    ):
+        """
+        Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
+
+        Should be overridden in a subclass if the model has a special way of building those.
+
+        Args:
+            offset_mapping_0 (List[tuple]):
+                List of char offsets to which the special tokens will be added.
+            offset_mapping_1 (List[tuple], optional):
+                Optional second list of char offsets for offset mapping pairs.
+
+        Returns:
+            List[tuple]: List of char offsets with the appropriate offsets of special tokens.
+        """
+        if offset_mapping_1 is None:
+            return offset_mapping_0
+
+        return offset_mapping_0 + offset_mapping_1
+
+    def prepare_for_model(
+        self,
+        ids,
+        pair_ids=None,
+        padding: Union[bool, str, PaddingStrategy] = False,
+        truncation: Union[bool, str, TruncationStrategy] = False,
+        max_length: Optional[int] = None,
+        stride: int = 0,
+        pad_to_multiple_of: Optional[int] = None,
+        return_tensors: Optional[Union[str, TensorType]] = None,
+        return_position_ids=None,
+        return_token_type_ids: Optional[bool] = None,
+        return_attention_mask: Optional[bool] = None,
+        return_length=False,
+        return_overflowing_tokens=False,
+        return_special_tokens_mask=False,
+        return_offsets_mapping=False,
+        add_special_tokens=True,
+        verbose: bool = True,
+        prepend_batch_axis: bool = False,
+        **kwargs,
+    ):
+        """
+        Performs tokenization and uses the tokenized tokens to prepare model
+        inputs. It supports sequence or sequence pair as input, and batch input
+        is not allowed.
+        """
+        padding_strategy, truncation_strategy, max_length, kwargs = (
+            self._get_padding_truncation_strategies(
+                padding=padding,
+                truncation=truncation,
+                max_length=max_length,
+                pad_to_multiple_of=pad_to_multiple_of,
+                verbose=verbose,
+                **kwargs,
+            )
+        )
+
+        pair = bool(pair_ids is not None)
+        len_ids = len(ids)
+        len_pair_ids = len(pair_ids) if pair else 0
+
+        if return_token_type_ids and not add_special_tokens:
+            raise ValueError(
+                "Asking to return token_type_ids while setting add_special_tokens to False "
+                "results in an undefined behavior. Please set add_special_tokens to True or "
+                "set return_token_type_ids to None."
+            )
+
+        if (
+            return_overflowing_tokens
+            and truncation_strategy == TruncationStrategy.LONGEST_FIRST
+            and pair_ids is not None
+        ):
+            raise ValueError(
+                "Not possible to return overflowing tokens for pair of sequences with the "
+                "`longest_first`. Please select another truncation strategy than `longest_first`, "
+                "for instance `only_second` or `only_first`."
+            )
+
+        # Load from model defaults
+        if return_token_type_ids is None:
+            return_token_type_ids = "token_type_ids" in self.model_input_names
+        if return_attention_mask is None:
+            return_attention_mask = "attention_mask" in self.model_input_names
+        if return_position_ids is None:
+            return_position_ids = "position_ids" in self.model_input_names
+        encoded_inputs = {}
+        # Truncation: Handle max sequence length
+        total_len = (
+            len_ids
+            + len_pair_ids
+            + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
+        )
+
+        overflowing_tokens = []
+
+        if (
+            truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
+            and max_length
+            and total_len > max_length
+        ):
+            ids, pair_ids, overflowing_tokens = self.truncate_sequences(
+                ids,
+                pair_ids=pair_ids,
+                num_tokens_to_remove=total_len - max_length,
+                truncation_strategy=truncation_strategy,
+                stride=stride,
+            )
+        if return_overflowing_tokens:
+            encoded_inputs["overflowing_tokens"] = overflowing_tokens
+            encoded_inputs["num_truncated_tokens"] = total_len - max_length
+
+        # Add special tokens
+        if add_special_tokens:
+            sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
+            token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
+        else:
+            sequence = ids + pair_ids if pair else ids
+            token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
+
+        # Build output dictionnary
+        encoded_inputs["input_ids"] = sequence
+        if return_token_type_ids:
+            encoded_inputs["token_type_ids"] = token_type_ids
+        if return_special_tokens_mask:
+            if add_special_tokens:
+                encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(
+                    ids, pair_ids
+                )
+            else:
+                encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
+
+        if return_offsets_mapping and "text" in kwargs and "text_pair" in kwargs:
+            text = kwargs.pop("text")
+            text_pair = kwargs.pop("text_pair")
+
+            token_offset_mapping = self.get_offset_mapping(text)
+            token_pair_offset_mapping = (
+                self.get_offset_mapping(text_pair) if text_pair is not None else None
+            )
+            if max_length and total_len > max_length:
+                token_offset_mapping, token_pair_offset_mapping, _ = (
+                    self.truncate_sequences(
+                        token_offset_mapping,
+                        pair_ids=token_pair_offset_mapping,
+                        num_tokens_to_remove=total_len - max_length,
+                        truncation_strategy=truncation_strategy,
+                        stride=stride,
+                    )
+                )
+            if add_special_tokens:
+                offset_mapping = self.build_offset_mapping_with_special_tokens(
+                    token_offset_mapping, token_pair_offset_mapping
+                )
+            else:
+                offset_mapping = (
+                    token_offset_mapping + token_pair_offset_mapping
+                    if token_pair_offset_mapping
+                    else token_offset_mapping
+                )
+            encoded_inputs["offset_mapping"] = offset_mapping
+
+        # Check lengths
+        self._eventual_warn_about_too_long_sequence(
+            encoded_inputs["input_ids"], max_length, verbose
+        )
+
+        if return_position_ids:
+            encoded_inputs["position_ids"] = list(
+                range(len(encoded_inputs["input_ids"]))
+            )
+
+        if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
+            encoded_inputs = self.pad(
+                encoded_inputs,
+                max_length=max_length,
+                padding=padding_strategy.value,
+                pad_to_multiple_of=pad_to_multiple_of,
+                return_attention_mask=return_attention_mask,
+            )
+
+        if return_length:
+            encoded_inputs["length"] = len(encoded_inputs["input_ids"])
+            # for compatibility
+            encoded_inputs["seq_len"] = encoded_inputs["length"]
+
+        batch_outputs = BatchEncoding(
+            encoded_inputs,
+            tensor_type=return_tensors,
+            prepend_batch_axis=prepend_batch_axis,
+        )
+
+        return batch_outputs
+
+    def truncate_sequences(
+        self,
+        ids: List[int],
+        pair_ids: Optional[List[int]] = None,
+        num_tokens_to_remove: int = 0,
+        truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
+        stride: int = 0,
+    ) -> Tuple[List[int], List[int], List[int]]:
+        """
+        Truncates a sequence pair in-place following the strategy.
+
+        Args:
+            ids (`List[int]`):
+                Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
+                `convert_tokens_to_ids` methods.
+            pair_ids (`List[int]`, *optional*):
+                Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
+                and `convert_tokens_to_ids` methods.
+            num_tokens_to_remove (`int`, *optional*, defaults to 0):
+                Number of tokens to remove using the truncation strategy.
+            truncation_strategy (`str` or [`TruncationStrategy`], *optional*, defaults to `False`):
+                The strategy to follow for truncation. Can be:
+
+                - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+                  maximum acceptable input length for the model if that argument is not provided. This will truncate
+                  token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
+                  batch of pairs) is provided.
+                - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+                  maximum acceptable input length for the model if that argument is not provided. This will only
+                  truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+                - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+                  maximum acceptable input length for the model if that argument is not provided. This will only
+                  truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+                - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
+                  than the model maximum admissible input size).
+            stride (`int`, *optional*, defaults to 0):
+                If set to a positive number, the overflowing tokens returned will contain some tokens from the main
+                sequence returned. The value of this argument defines the number of additional tokens.
+
+        Returns:
+            `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
+            overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
+            of sequences (or a batch of pairs) is provided.
+        """
+        if num_tokens_to_remove <= 0:
+            return ids, pair_ids, []
+
+        if not isinstance(truncation_strategy, TruncationStrategy):
+            truncation_strategy = TruncationStrategy(truncation_strategy)
+
+        overflowing_tokens = []
+        if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
+            truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
+        ):
+            if len(ids) > num_tokens_to_remove:
+                window_len = min(len(ids), stride + num_tokens_to_remove)
+                if self.truncation_side == "left":
+                    overflowing_tokens = ids[:window_len]
+                    ids = ids[num_tokens_to_remove:]
+                elif self.truncation_side == "right":
+                    overflowing_tokens = ids[-window_len:]
+                    ids = ids[:-num_tokens_to_remove]
+                else:
+                    raise ValueError(
+                        f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'."
+                    )
+
+            else:
+                error_msg = (
+                    f"We need to remove {num_tokens_to_remove} to truncate the input "
+                    f"but the first sequence has a length {len(ids)}. "
+                )
+                if truncation_strategy == TruncationStrategy.ONLY_FIRST:
+                    error_msg = (
+                        error_msg + "Please select another truncation strategy than "
+                        f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
+                    )
+                logging.error(error_msg)
+        elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
+            warnings.warn(
+                f"Be aware, overflowing tokens are not returned for the setting you have chosen,"
+                f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
+                f"truncation strategy. So the returned list will always be empty even if some "
+                f"tokens have been removed."
+            )
+            for _ in range(num_tokens_to_remove):
+                if pair_ids is None or len(ids) > len(pair_ids):
+                    if self.truncation_side == "right":
+                        ids = ids[:-1]
+                    elif self.truncation_side == "left":
+                        ids = ids[1:]
+                    else:
+                        raise ValueError(
+                            "invalid truncation strategy:" + str(self.truncation_side)
+                        )
+                else:
+                    if self.truncation_side == "right":
+                        pair_ids = pair_ids[:-1]
+                    elif self.truncation_side == "left":
+                        pair_ids = pair_ids[1:]
+                    else:
+                        raise ValueError(
+                            "invalid truncation strategy:" + str(self.truncation_side)
+                        )
+        elif (
+            truncation_strategy == TruncationStrategy.ONLY_SECOND
+            and pair_ids is not None
+        ):
+            if len(pair_ids) > num_tokens_to_remove:
+                window_len = min(len(pair_ids), stride + num_tokens_to_remove)
+                if self.truncation_side == "right":
+                    overflowing_tokens = pair_ids[-window_len:]
+                    pair_ids = pair_ids[:-num_tokens_to_remove]
+                elif self.truncation_side == "left":
+                    overflowing_tokens = pair_ids[:window_len]
+                    pair_ids = pair_ids[num_tokens_to_remove:]
+                else:
+                    raise ValueError(
+                        "invalid truncation strategy:" + str(self.truncation_side)
+                    )
+            else:
+                logging.error(
+                    f"We need to remove {num_tokens_to_remove} to truncate the input "
+                    f"but the second sequence has a length {len(pair_ids)}. "
+                    f"Please select another truncation strategy than {truncation_strategy}, "
+                    f"for instance 'longest_first' or 'only_first'."
+                )
+
+        return (ids, pair_ids, overflowing_tokens)
+
+    def _pad(
+        self,
+        encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+        max_length: Optional[int] = None,
+        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+        pad_to_multiple_of: Optional[int] = None,
+        return_attention_mask: Optional[bool] = None,
+    ) -> dict:
+        """
+        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+
+        Args:
+            encoded_inputs:
+                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+            max_length: maximum length of the returned list and optionally padding length (see below).
+                Will truncate by taking into account the special tokens.
+            padding_strategy: PaddingStrategy to use for padding.
+
+                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+                - PaddingStrategy.DO_NOT_PAD: Do not pad
+                The tokenizer padding sides are defined in self.padding_side:
+
+                    - 'left': pads on the left of the sequences
+                    - 'right': pads on the right of the sequences
+            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+                >= 7.5 (Volta).
+            return_attention_mask:
+                (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+        """
+        # Load from model defaults
+        if return_attention_mask is None:
+            return_attention_mask = (
+                "attention_mask" in self.model_input_names
+                or "attention_mask" in encoded_inputs
+            )
+
+        required_input = encoded_inputs[self.model_input_names[0]]
+
+        if padding_strategy == PaddingStrategy.LONGEST:
+            max_length = len(required_input)
+
+        if (
+            max_length is not None
+            and pad_to_multiple_of is not None
+            and (max_length % pad_to_multiple_of != 0)
+        ):
+            max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+
+        needs_to_be_padded = (
+            padding_strategy != PaddingStrategy.DO_NOT_PAD
+            and len(required_input) != max_length
+        )
+
+        # Initialize attention mask if not present.
+        if return_attention_mask and "attention_mask" not in encoded_inputs:
+            encoded_inputs["attention_mask"] = [1] * len(required_input)
+
+        if needs_to_be_padded:
+            difference = max_length - len(required_input)
+
+            if self.padding_side == "right":
+                if return_attention_mask:
+
+                    encoded_inputs["attention_mask"] = (
+                        encoded_inputs["attention_mask"] + [0] * difference
+                    )
+                if "token_type_ids" in encoded_inputs:
+                    encoded_inputs["token_type_ids"] = (
+                        encoded_inputs["token_type_ids"]
+                        + [self.pad_token_type_id] * difference
+                    )
+                if "special_tokens_mask" in encoded_inputs:
+                    encoded_inputs["special_tokens_mask"] = (
+                        encoded_inputs["special_tokens_mask"] + [1] * difference
+                    )
+                if "offset_mapping" in encoded_inputs:
+                    encoded_inputs["offset_mapping"] = (
+                        encoded_inputs["offset_mapping"] + [(0, 0)] * difference
+                    )
+                if "position_ids" in encoded_inputs:
+                    encoded_inputs["position_ids"] = (
+                        encoded_inputs["position_ids"] + [0] * difference
+                    )
+                # NOTE: In ernie3.0-qa, the type of `*_positions` is int.
+                if "start_positions" in encoded_inputs and isinstance(
+                    encoded_inputs["start_positions"], list
+                ):
+                    encoded_inputs["start_positions"] = (
+                        encoded_inputs["start_positions"] + [0] * difference
+                    )
+                if "end_positions" in encoded_inputs and isinstance(
+                    encoded_inputs["end_positions"], list
+                ):
+                    encoded_inputs["end_positions"] = (
+                        encoded_inputs["end_positions"] + [0] * difference
+                    )
+                encoded_inputs[self.model_input_names[0]] = (
+                    required_input + [self.pad_token_id] * difference
+                )
+            elif self.padding_side == "left":
+                if return_attention_mask:
+                    encoded_inputs["attention_mask"] = [
+                        0
+                    ] * difference + encoded_inputs["attention_mask"]
+                if "token_type_ids" in encoded_inputs:
+                    encoded_inputs["token_type_ids"] = [
+                        self.pad_token_type_id
+                    ] * difference + encoded_inputs["token_type_ids"]
+                if "special_tokens_mask" in encoded_inputs:
+                    encoded_inputs["special_tokens_mask"] = [
+                        1
+                    ] * difference + encoded_inputs["special_tokens_mask"]
+                if "offset_mapping" in encoded_inputs:
+                    encoded_inputs["offset_mapping"] = [
+                        (0, 0)
+                    ] * difference + encoded_inputs["offset_mapping"]
+                if "position_ids" in encoded_inputs:
+                    encoded_inputs["position_ids"] = [0] * difference + encoded_inputs[
+                        "position_ids"
+                    ]
+                if "start_positions" in encoded_inputs and isinstance(
+                    encoded_inputs["start_positions"], list
+                ):
+                    encoded_inputs["start_positions"] = [
+                        0
+                    ] * difference + encoded_inputs["start_positions"]
+                if "end_positions" in encoded_inputs and isinstance(
+                    encoded_inputs["end_positions"], list
+                ):
+                    encoded_inputs["end_positions"] = [0] * difference + encoded_inputs[
+                        "end_positions"
+                    ]
+                encoded_inputs[self.model_input_names[0]] = [
+                    self.pad_token_id
+                ] * difference + required_input
+            else:
+                raise ValueError("Invalid padding strategy:" + str(self.padding_side))
+
+        return encoded_inputs
+
+    def convert_tokens_to_string(self, tokens: List[str]) -> str:
+        """
+        Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
+        often want to remove sub-word tokenization artifacts at the same time.
+
+        Args:
+            tokens (`List[str]`): The token to join in a string.
+
+        Returns:
+            `str`: The joined tokens.
+        """
+        raise NotImplementedError
+
+    def batch_decode(
+        self,
+        sequences: Union[List[int], List[List[int]], "np.ndarray", "paddle.Tensor"],
+        skip_special_tokens: bool = False,
+        clean_up_tokenization_spaces: bool = True,
+        **kwargs,
+    ) -> List[str]:
+        """
+        Convert a list of lists of token ids into a list of strings by calling decode.
+
+        Args:
+            sequences (`Union[List[int], List[List[int]], np.ndarray, paddle.Tensor]`):
+                List of tokenized input ids. Can be obtained using the `__call__` method.
+            skip_special_tokens (`bool`, *optional*, defaults to `False`):
+                Whether or not to remove special tokens in the decoding.
+            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
+                Whether or not to clean up the tokenization spaces.
+            kwargs (additional keyword arguments, *optional*):
+                Will be passed to the underlying model specific decode method.
+
+        Returns:
+            `List[str]`: The list of decoded sentences.
+        """
+        return [
+            self.decode(
+                seq,
+                skip_special_tokens=skip_special_tokens,
+                clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+                **kwargs,
+            )
+            for seq in sequences
+        ]
+
+    def decode(
+        self,
+        token_ids: Union[int, List[int], "np.ndarray", "paddle.Tensor"],
+        skip_special_tokens: bool = False,
+        clean_up_tokenization_spaces: bool = True,
+        **kwargs,
+    ) -> str:
+        """
+        Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
+        tokens and clean up tokenization spaces.
+
+        Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
+
+        Args:
+            token_ids (`Union[int, List[int], np.ndarray, paddle.Tensor]`):
+                List of tokenized input ids. Can be obtained using the `__call__` method.
+            skip_special_tokens (`bool`, *optional*, defaults to `False`):
+                Whether or not to remove special tokens in the decoding.
+            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
+                Whether or not to clean up the tokenization spaces.
+            kwargs (additional keyword arguments, *optional*):
+                Will be passed to the underlying model specific decode method.
+
+        Returns:
+            `str`: The decoded sentence.
+        """
+        # Convert inputs to python lists
+        token_ids = to_py_obj(token_ids)
+
+        return self._decode(
+            token_ids=token_ids,
+            skip_special_tokens=skip_special_tokens,
+            clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+            **kwargs,
+        )
+
+    def _decode(
+        self,
+        token_ids: Union[int, List[int]],
+        skip_special_tokens: bool = False,
+        clean_up_tokenization_spaces: bool = True,
+        **kwargs,
+    ) -> str:
+        raise NotImplementedError
+
+    def get_special_tokens_mask(
+        self,
+        token_ids_0: List[int],
+        token_ids_1: Optional[List[int]] = None,
+        already_has_special_tokens: bool = False,
+    ) -> List[int]:
+        """
+        Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+        special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
+
+        Args:
+            token_ids_0 (`List[int]`):
+                List of ids of the first sequence.
+            token_ids_1 (`List[int]`, *optional*):
+                List of ids of the second sequence.
+            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+                Whether or not the token list is already formatted with special tokens for the model.
+
+        Returns:
+            A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+        """
+        assert already_has_special_tokens and token_ids_1 is None, (
+            "You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
+            "Please use a slow (full python) tokenizer to activate this argument. "
+            "Or set `return_special_tokens_mask=True` when calling the encoding method "
+            "to get the special tokens mask in any tokenizer. "
+        )
+
+        all_special_ids = self.all_special_ids  # cache the property
+
+        special_tokens_mask = [
+            1 if token in all_special_ids else 0 for token in token_ids_0
+        ]
+
+        return special_tokens_mask
+
+    @staticmethod
+    def clean_up_tokenization(out_string: str) -> str:
+        """
+        Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
+
+        Args:
+            out_string (`str`): The text to clean up.
+
+        Returns:
+            `str`: The cleaned-up string.
+        """
+        out_string = (
+            out_string.replace(" .", ".")
+            .replace(" ?", "?")
+            .replace(" !", "!")
+            .replace(" ,", ",")
+            .replace(" ' ", "'")
+            .replace(" n't", "n't")
+            .replace(" 'm", "'m")
+            .replace(" 's", "'s")
+            .replace(" 've", "'ve")
+            .replace(" 're", "'re")
+        )
+        return out_string
+
+    def _eventual_warn_about_too_long_sequence(
+        self, ids: List[int], max_length: Optional[int], verbose: bool
+    ):
+        """
+        Depending on the input and internal state we might trigger a warning about a sequence that is too long for its
+        corresponding model
+
+        Args:
+            ids (`List[str]`): The ids produced by the tokenization
+            max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)
+            verbose (`bool`): Whether or not to print more information and warnings.
+
+        """
+        if max_length is None and len(ids) > self.model_max_length and verbose:
+            if not self.deprecation_warnings.get(
+                "sequence-length-is-longer-than-the-specified-maximum", False
+            ):
+                logging.warning(
+                    "Token indices sequence length is longer than the specified maximum sequence length "
+                    f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model "
+                    "will result in indexing errors"
+                )
+            self.deprecation_warnings[
+                "sequence-length-is-longer-than-the-specified-maximum"
+            ] = True

+ 66 - 0
paddlex/inference/models_new/common/tokenizer/utils.py

@@ -0,0 +1,66 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+from typing import List
+
+__all__ = [
+    "convert_to_dict_message",
+    "fn_args_to_dict",
+]
+
+
+def convert_to_dict_message(conversation: List[List[str]]):
+    """Convert the list of chat messages to a role dictionary chat messages."""
+    conversations = []
+    for index, item in enumerate(conversation):
+        assert (
+            1 <= len(item) <= 2
+        ), "Each Rounds in conversation should have 1 or 2 elements."
+        if isinstance(item[0], str):
+            conversations.append({"role": "user", "content": item[0]})
+            if len(item) == 2 and isinstance(item[1], str):
+                conversations.append({"role": "assistant", "content": item[1]})
+            else:
+                # If there is only one element in item, it must be the last round.
+                # If it is not the last round, it must be an error.
+                if index != len(conversation) - 1:
+                    raise ValueError(f"Round {index} has error round")
+        else:
+            raise ValueError("Each round in list should be string")
+    return conversations
+
+
+def fn_args_to_dict(func, *args, **kwargs):
+    """
+    Inspect function `func` and its arguments for running, and extract a
+    dict mapping between argument names and keys.
+    """
+    (spec_args, spec_varargs, spec_varkw, spec_defaults, _, _, _) = (
+        inspect.getfullargspec(func)
+    )
+    # add positional argument values
+    init_dict = dict(zip(spec_args, args))
+    # add default argument values
+    kwargs_dict = (
+        dict(zip(spec_args[-len(spec_defaults) :], spec_defaults))
+        if spec_defaults
+        else {}
+    )
+    for k in list(kwargs_dict.keys()):
+        if k in init_dict:
+            kwargs_dict.pop(k)
+    kwargs_dict.update(kwargs)
+    init_dict.update(kwargs_dict)
+    return init_dict

+ 647 - 0
paddlex/inference/models_new/common/tokenizer/vocab.py

@@ -0,0 +1,647 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import io
+import json
+import os
+import warnings
+
+import numpy as np
+
+
+class Vocab(object):
+    """
+    The class used to convert between tokens and ids. It also includes some
+    store/load functions.
+
+    Args:
+        counter (collections.Counter, optional): A Counter intance describes
+            the tokens and their frequencies. Its keys will be indexed accroding
+            to the order of frequency sorting to construct mapping relationship.
+            If None, `token_to_idx` must be provided as the mapping relationship.
+            Default: None.
+        max_size (int, optional): Max size of vocab, not including special tokens.
+            Default: None.
+        min_freq (int, optional): Ignore tokens whose frequencies are less than
+            `min_freq`. Default: 1.
+        token_to_idx (dict, optional): A dict specifies the mapping relationship
+            between tokens and indices to be used. If provided, adjust the tokens
+            and indices mapping according to it. If None, counter must be provided.
+            Default: None.
+        unk_token (str, optional): Special token for unknow token. If no need,
+            it also could be None. Default: None.
+        pad_token (str, optional): Special token for padding token. If no need,
+            it also could be None. Default: None.
+        bos_token (str, optional): Special token for bos token. If no need, it
+            also could be None. Default: None.
+        eos_token (str, optional): Special token for eos token. If no need, it
+            lso could be None. Default: None.
+
+        kwargs (dict): Keyword arguments ending with `_token`. It can be used
+            to specify further special tokens that will be exposed as attribute
+            of the vocabulary and associated with an index.
+    """
+
+    def __init__(
+        self,
+        counter=None,
+        max_size=None,
+        min_freq=1,
+        token_to_idx=None,
+        unk_token=None,
+        pad_token=None,
+        bos_token=None,
+        eos_token=None,
+        **kwargs
+    ):
+        # Handle special tokens
+        combs = (
+            ("unk_token", unk_token),
+            ("pad_token", pad_token),
+            ("bos_token", bos_token),
+            ("eos_token", eos_token),
+        )
+        for name, value in combs:
+            kwargs[name] = value
+        special_tokens = []
+        special_iter = kwargs.keys()
+        # sort alphabetically
+        special_iter = sorted(special_iter)
+        for special_token_name in special_iter:
+            # Test if kwarg specifies a special token
+            if not special_token_name.endswith("_token"):
+                raise ValueError(
+                    "{} is invalid. Only keyword arguments "
+                    "that end in '_token' are supported "
+                    "to declare special tokens.".format(special_token_name)
+                )
+
+            special_token = kwargs[special_token_name]
+            if special_token is not None and special_token not in special_tokens:
+                special_tokens.append(special_token)
+
+        if counter is None:
+            # use token_to_idx as dict to import pretrained vocabulary
+            assert token_to_idx, "token_to_idx should not be None when counter is None"
+            for special_token in special_tokens:
+                assert (
+                    special_token in token_to_idx
+                ), "{} is not in token_to_idx".format(special_token)
+            self._token_to_idx = token_to_idx
+            self._idx_to_token = {idx: token for token, idx in token_to_idx.items()}
+            if unk_token:
+                unk_index = self._token_to_idx[unk_token]
+                self._token_to_idx = collections.defaultdict(lambda: unk_index)
+                self._token_to_idx.update(token_to_idx)
+        else:
+            self._idx_to_token = {
+                idx: special_token for idx, special_token in enumerate(special_tokens)
+            }
+            self._token_to_idx = collections.defaultdict()
+            self._token_to_idx.update(
+                (token, idx) for idx, token in self._idx_to_token.items()
+            )
+            self._index_counter_keys(counter, special_tokens, max_size, min_freq)
+            if token_to_idx:
+                self._sort_index_according_to_user_specification(token_to_idx)
+            if unk_token:
+                self._token_to_idx.default_factory = lambda: self._token_to_idx[
+                    unk_token
+                ]
+
+        # _expose_tokens_as_attributes
+        self._identifiers_to_tokens = kwargs
+        for identifier, token in kwargs.items():
+            if identifier.startswith("_"):
+                raise ValueError(
+                    "It is not allowed to use identifiers starting with "
+                    "underscore. In Python identifier names beginning with "
+                    "underscore are internal."
+                )
+            if hasattr(self, identifier):
+                raise ValueError(
+                    "vocab.{} already exists. "
+                    "Please choose a different identifier for token {}".format(
+                        identifier, token
+                    )
+                )
+            setattr(self, identifier, token)
+
+    def _index_counter_keys(self, counter, special_tokens, max_size, min_freq):
+        # sort by frequency, then alphabetically
+        token_freqs = sorted(counter.items(), key=lambda x: x[0])
+        token_freqs.sort(key=lambda x: x[1], reverse=True)
+        # frequencies of special tokens are not counted when building vocabulary
+        # in frequency order
+        special_tokens = set(special_tokens)
+        max_size = None if max_size is None else max_size + len(special_tokens)
+        for token, freq in token_freqs:
+            if freq < min_freq or len(self._idx_to_token) == max_size:
+                break
+            if token not in special_tokens:
+                self._idx_to_token[max(list(self._idx_to_token.keys()) + [-1]) + 1] = (
+                    token
+                )
+                self._token_to_idx[token] = max(self._idx_to_token.keys())
+
+    def _sort_index_according_to_user_specification(self, token_to_idx):
+        # Sanity checks
+        if not set(token_to_idx.keys()).issubset(self.token_to_idx.keys()):
+            raise ValueError(
+                "User-specified token_to_idx mapping can only contain "
+                "tokens that will be part of the vocabulary."
+            )
+        if len(set(token_to_idx.values())) != len(token_to_idx):
+            raise ValueError("User-specified indices must not contain duplicates.")
+        if min(token_to_idx.values()) < 0 or max(token_to_idx.values()) >= len(
+            self.token_to_idx
+        ):
+            raise ValueError(
+                "User-specified indices must not be < 0 or >= the number of tokens "
+                "that will be in the vocabulary. The current vocab contains {}"
+                "tokens.".format(len(self.token_to_idx))
+            )
+
+        # Update index ordering
+        for token, new_idx in token_to_idx.items():
+            old_idx = self.token_to_idx[token]
+            ousted_token = self.idx_to_token[new_idx]
+
+            self.token_to_idx[token] = new_idx
+            self.token_to_idx[ousted_token] = old_idx
+            self.idx_to_token[old_idx] = ousted_token
+            self.idx_to_token[new_idx] = token
+
+    def to_tokens(self, indices):
+        """
+        Maps the input indices to token list.
+
+        Args:
+            indices (int|list[int]|tuple[int]|numpy.ndarray): The input indice(s) for mapping.
+                Must be an `int` or 1D `list[int]`|`tuple[int]`|`numpy.ndarray`.
+
+        Returns:
+            str|list[str]: Obtained token(s). If `indices` is an integer, it
+            will return a str. If `indices` is a list/tuple of integers, it will
+            return a list of str.
+
+        Example:
+            .. code-block:: python
+
+                from paddlenlp.data import Vocab
+                # The vocab file. The sample file can be downloaded firstly.
+                # wget https://bj.bcebos.com/paddlenlp/data/senta_word_dict.txt
+                vocab_file_path = './senta_word_dict.txt'
+                # Initialize the Vocab
+                vocab = Vocab.load_vocabulary(
+                    vocab_file_path,
+                    unk_token='[UNK]',
+                    pad_token='[PAD]')
+                tokens = vocab.to_tokens([0, 1, 2, 3])
+                print(tokens)
+                # ['[PAD]', '[UNK]', '一斤三', '意面屋']
+        """
+        to_reduce = False
+        if not isinstance(indices, (list, tuple, np.ndarray)):
+            indices = [indices]
+            to_reduce = True
+        if isinstance(indices, (list, tuple)):
+            indices = np.asarray(indices)
+
+        if isinstance(indices, (np.ndarray)) and len(indices.shape) > 1:
+            raise ValueError(
+                "Token indices is invalid. Expected 1D array, but received {}D array. ".format(
+                    len(indices.shape)
+                )
+            )
+
+        tokens = []
+        for idx in indices:
+            if not isinstance(idx, (int, np.integer)):
+                warnings.warn(
+                    "The type of `to_tokens()`'s input `indices` is not `int` which will be forcibly transfered to `int`. "
+                )
+                idx = int(idx)
+
+            try:
+                tokens.append(self._idx_to_token[idx])
+            except KeyError:
+                raise ValueError(
+                    "Token index {} in the provided `indices` is invalid.".format(idx)
+                )
+
+        return tokens[0] if to_reduce else tokens
+
+    def to_indices(self, tokens):
+        """
+        Maps the input tokens into indices.
+
+        Args:
+            tokens (str|list[str]|tuple[str], optional): The input token(s) for
+                mapping.
+
+        Returns:
+            int|list[int]: Obationed indice(s). If `tokens` is a str, it will
+            return an integer. If `tokens` is a list/tuple of str, it will
+            return a list of integers.
+
+        Example:
+            .. code-block:: python
+
+                from paddlenlp.data import Vocab
+                # The vocab file. The sample file can be downloaded firstly.
+                # wget https://bj.bcebos.com/paddlenlp/data/senta_word_dict.txt
+                vocab_file_path = './senta_word_dict.txt'
+                # Initialize the Vocab
+                vocab = Vocab.load_vocabulary(
+                    vocab_file_path,
+                    unk_token='[UNK]',
+                    pad_token='[PAD]')
+                tokens = vocab.to_indices(['[PAD]', '[UNK]', '一斤三', '意面屋'])
+                print(tokens)
+                # [0, 1, 2, 3]
+        """
+        return self[tokens]
+
+    def __getitem__(self, tokens):
+        if not isinstance(tokens, (list, tuple)):
+            return (
+                self._token_to_idx[tokens]
+                if tokens in self._token_to_idx
+                else self._token_to_idx[self.unk_token]
+            )
+        else:
+            return [
+                (
+                    self._token_to_idx[token]
+                    if token in self._token_to_idx
+                    else self._token_to_idx[self.unk_token]
+                )
+                for token in tokens
+            ]
+
+    def __len__(self):
+        return len(self._idx_to_token)
+
+    def __contains__(self, token):
+        return token in self._token_to_idx
+
+    def __call__(self, tokens):
+        """
+        Maps the input tokens into indices. Its function is the same as the
+        :meth:`to_indices` method.
+
+        See detail at `to_indices`.
+        """
+        return self[tokens]
+
+    @property
+    def idx_to_token(self):
+        # Returns index-token dict
+        return self._idx_to_token
+
+    @property
+    def token_to_idx(self):
+        # Return token-index dict
+        return self._token_to_idx
+
+    def to_json(self, path=None):
+        """
+        Summarizes some information of vocab as JSON string. If path is gaven,
+        the JSON string will be saved into files. The JSON string and the saved
+        file all can be used to reconstruct the :class:`Vocab` by calling
+        :meth:`from_json` method.
+
+        Args:
+            path (str, optional): The path to save JSON string. If None, the
+                JSON will not be saved. Default: None.
+
+        Returns:
+            str: The JSON string including information of vocab.
+
+        Example:
+            .. code-block:: python
+
+                from paddlenlp.data import Vocab
+                # The vocab file. The sample file can be downloaded firstly.
+                # wget https://bj.bcebos.com/paddlenlp/data/senta_word_dict.txt
+                vocab_file_path = './senta_word_dict.txt'
+                # Initialize the Vocab
+                vocab = Vocab.load_vocabulary(
+                    vocab_file_path,
+                    unk_token='[UNK]',
+                    pad_token='[PAD]')
+                json_str = vocab.to_json(path='./vocab.json')
+        """
+        vocab_dict = {}
+        vocab_dict["idx_to_token"] = dict(self.idx_to_token)
+        vocab_dict["token_to_idx"] = dict(self.token_to_idx)
+        vocab_dict["unk_token"] = self.unk_token
+        vocab_dict["identifiers_to_tokens"] = self._identifiers_to_tokens
+        json_str = json.dumps(vocab_dict)
+        if path:
+            with io.open(path, "w", encoding="utf-8") as f:
+                f.write(json_str)
+        return json_str
+
+    @classmethod
+    def from_json(cls, json_str):
+        """
+        Loads :class:`Vocab` from JSON string or JSON file, which is gotten by
+        calling :meth:`to_json` method.
+
+        Args:
+            json_str (str): JSON string or file path of JSON string.
+
+        Returns:
+            Vocab: An instance of :class:`Vocab` generated from information
+            contained in JSON string.
+
+        Example:
+            .. code-block:: python
+
+                from paddlenlp.data import Vocab
+                # The vocab file. The sample file can be downloaded firstly.
+                # wget https://bj.bcebos.com/paddlenlp/data/senta_word_dict.txt
+                vocab_file_path = './senta_word_dict.txt'
+                # Initialize the Vocab
+                vocab = Vocab.load_vocabulary(
+                    vocab_file_path,
+                    unk_token='[UNK]',
+                    pad_token='[PAD]')
+                json_str = vocab.to_json(path='./vocab.json')
+
+                vocab1 = Vocab.from_json(json_str)
+                vocab2 = Vocab.from_json('./vocab.json')
+                print(len(vocab), len(vocab1), len(vocab2))
+                # 1256608 1256608 1256608
+        """
+        if os.path.isfile(json_str):
+            with io.open(json_str, "r", encoding="utf-8") as f:
+                vocab_dict = json.load(f)
+        else:
+            vocab_dict = json.loads(json_str)
+        token_to_idx = vocab_dict.get("token_to_idx")
+        unk_token = vocab_dict.get("unk_token")
+        identifiers_to_tokens = vocab_dict.get("identifiers_to_tokens", dict())
+        if "unk_token" in identifiers_to_tokens:
+            del identifiers_to_tokens["unk_token"]
+        vocab = cls(
+            counter=None,
+            token_to_idx=token_to_idx,
+            unk_token=unk_token,
+            **identifiers_to_tokens,
+        )
+        return vocab
+
+    @classmethod
+    def from_dict(
+        cls,
+        token_to_idx,
+        unk_token=None,
+        pad_token=None,
+        bos_token=None,
+        eos_token=None,
+        **kwargs
+    ):
+        """
+        Builds the :class:`Vocab` from a dict.
+
+        Args:
+            token_to_idx (dict): A dict describes the mapping relationship between
+                tokens and indices.
+            unk_token (str, optional): The special token for unknow token. If
+                no need, it also could be None. Default: None.
+            pad_token (str, optional): The special token for padding token. If
+                no need, it also could be None. Default: None.
+            bos_token (str, optional): The special token for bos token. If no
+                need, it also could be None. Default: None.
+            eos_token (str, optional): The special token for eos token. If no
+                need, it also could be None. Default: None.
+
+            kwargs (dict): Keyword arguments ending with `_token`. It can be
+                used to specify further special tokens that will be exposed as
+                attribute of the vocabulary and associated with an index.
+
+        Returns:
+            Vocab: An instance of :class:`Vocab` generated from the given dict
+            and special tokens.
+
+        Example:
+            .. code-block:: python
+
+                from paddlenlp.data import Vocab
+                # The vocab file. The sample file can be downloaded firstly.
+                # wget https://bj.bcebos.com/paddlenlp/data/senta_word_dict.txt
+                vocab_file_path = './senta_word_dict.txt'
+                # Initialize the Vocab
+                vocab = Vocab.load_vocabulary(
+                    vocab_file_path,
+                    unk_token='[UNK]',
+                    pad_token='[PAD]')
+
+                vocab1 = Vocab.from_dict(vocab.token_to_idx)
+                print(len(vocab), len(vocab.token_to_idx), len(vocab1))
+                # 1256608 1256608 1256608
+        """
+        vocab = cls(
+            counter=None,
+            token_to_idx=token_to_idx,
+            unk_token=unk_token,
+            pad_token=pad_token,
+            bos_token=bos_token,
+            eos_token=eos_token,
+            **kwargs,
+        )
+        return vocab
+
+    @staticmethod
+    def build_vocab(
+        iterator,
+        max_size=None,
+        min_freq=1,
+        token_to_idx=None,
+        unk_token=None,
+        pad_token=None,
+        bos_token=None,
+        eos_token=None,
+        **kwargs
+    ):
+        """
+        Builds the :class:`Vocab` accoring to given iterator and other
+        information. Firstly, iterate over the `iterator` to construct a
+        :class:`collections.Counter` and used to init the as  :class:`Vocab`.
+
+        Args:
+            iterator (collections.Iterable): Iterator of tokens. Each element
+                should be a list of tokens if wordlevel vocab is needed.
+            max_size (int, optional): The max size of vocab, not including
+                special tokens. Default: None.
+            min_freq (int, optional): Ignore tokens whose frequencies are less
+                than `min_freq`. Default: 1.
+            token_to_idx (dict, optional): A dict specifies the mapping
+                relationship between tokens and indices to be used. If provided,
+                adjust the tokens and indices mapping according to it. If None,
+                counter must be provided. Default: None.
+            unk_token (str, optional): The special token for unknow token
+                '<unk>'. If no need, it also could be None. Default: None.
+            pad_token (str, optional): The special token for padding token
+                '<pad>'. If no need, it also could be None. Default: None.
+            bos_token (str, optional): The special token for bos token '<bos>'.
+                If no need, it also could be None. Default: None.
+            eos_token (str, optional): The special token for eos token '<eos>'.
+                If no need, it also could be None. Default: None.
+
+            kwargs (dict): Keyword arguments ending with `_token`. It can be
+                used to specify further special tokens that will be exposed as
+                attribute of the vocabulary and associated with an index.
+
+        Returns:
+            Vocab: An instance of :class:`Vocab` generated from given iterator
+            and other informations.
+
+        Example:
+            .. code-block:: python
+
+                from paddlenlp.data import Vocab
+                # The vocab file. The sample file can be downloaded firstly.
+                # wget https://bj.bcebos.com/paddlenlp/data/senta_word_dict.txt
+                vocab_file_path = './senta_word_dict.txt'
+                # Initialize the Vocab
+                vocab = Vocab.load_vocabulary(
+                    vocab_file_path,
+                    unk_token='[UNK]',
+                    pad_token='[PAD]')
+
+                vocab1 = Vocab.build_vocab([list(vocab.token_to_idx.keys())])
+                print(len(vocab), len(vocab1))
+                # 1256608 1256608
+        """
+        counter = collections.Counter()
+        for tokens in iterator:
+            counter.update(tokens)
+        vocab = Vocab(
+            counter,
+            max_size=max_size,
+            min_freq=min_freq,
+            token_to_idx=token_to_idx,
+            unk_token=unk_token,
+            pad_token=pad_token,
+            bos_token=bos_token,
+            eos_token=eos_token,
+            **kwargs,
+        )
+        return vocab
+
+    @staticmethod
+    def load_vocabulary(
+        filepath,
+        unk_token=None,
+        pad_token=None,
+        bos_token=None,
+        eos_token=None,
+        **kwargs
+    ):
+        """
+        Builds the :class:`Vocab` from a file reserving all tokens by calling
+        :meth:`Vocab.from_dict` method. The file contains a token per line, and
+        the line index would be the index of corresponding token.
+
+        Args:
+            filepath (str): the path of file to construct vocabulary.
+            unk_token (str, optional): special token for unknown token. If no
+                need, it also could be None. Default: None.
+            pad_token (str, optional): special token for padding token. If no
+                need, it also could be None. Default: None.
+            bos_token (str, optional): special token for bos token. If no need,
+                it also could be None. Default: None.
+            eos_token (str, optional): special token for eos token. If no need,
+                it also could be None. Default: None.
+
+            kwargs (dict): Keyword arguments ending with `_token`. It can be
+                used to specify further special tokens that will be exposed as
+                attribute of the vocabulary and associated with an index.
+
+        Returns:
+            Vocab: An instance of :class:`Vocab` generated from the given file.
+
+        Example:
+            .. code-block:: python
+
+                from paddlenlp.data import Vocab
+                # The vocab file. The sample file can be downloaded firstly.
+                # wget https://bj.bcebos.com/paddlenlp/data/senta_word_dict.txt
+                vocab_file_path = './senta_word_dict.txt'
+                # Initialize the Vocab
+                vocab = Vocab.load_vocabulary(
+                    vocab_file_path,
+                    unk_token='[UNK]',
+                    pad_token='[PAD]')
+                print(len(vocab))
+                # 1256608
+        """
+        token_to_idx = {}
+        with io.open(filepath, "r", encoding="utf-8") as f:
+            for index, line in enumerate(f):
+                token = line.rstrip("\n")
+                token_to_idx[token] = int(index)
+        vocab = Vocab.from_dict(
+            token_to_idx,
+            unk_token=unk_token,
+            pad_token=pad_token,
+            bos_token=bos_token,
+            eos_token=eos_token,
+            **kwargs,
+        )
+        return vocab
+
+    def save_vocabulary(self, filepath):
+        """
+        Save the :class:`Vocab` to a specific file. Can be reloaded by calling `load_vocabulary`.
+
+        Args:
+            filepath (str): the path of file to save vocabulary.
+        """
+        with open(filepath, "w") as f:
+            for idx in range(len(self._idx_to_token)):
+                f.write(self._idx_to_token[idx] + "\n")
+
+    def get_unk_token_id(self):
+        return (
+            self._token_to_idx[self.unk_token]
+            if self.unk_token is not None
+            else self.unk_token
+        )
+
+    def get_bos_token_id(self):
+        return (
+            self._token_to_idx[self.bos_token]
+            if self.bos_token is not None
+            else self.bos_token
+        )
+
+    def get_eos_token_id(self):
+        return (
+            self._token_to_idx[self.eos_token]
+            if self.eos_token is not None
+            else self.eos_token
+        )
+
+    def get_pad_token_id(self):
+        return (
+            self._token_to_idx[self.pad_token]
+            if self.pad_token is not None
+            else self.pad_token
+        )

+ 7 - 0
requirements.txt

@@ -37,3 +37,10 @@ unstructured
 networkx
 faiss-cpu
 decord==0.6.0
+######## For NLP Tokenizer #######
+jieba
+sentencepiece
+jinja2
+regex
+######## For Speech #######
+soundfile