aboutsummaryrefslogtreecommitdiff
path: root/llama/tokenizer.py
blob: 937a0b81129eae593c607ed6bd725d73bedc2abc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
"""
Llama Tokenizer
===============
This module contains the Tokenizer class that wraps the SentencePiece tokenizer.
"""

from typing import List
from sentencepiece import SentencePieceProcessor  # type: ignore

class Tokenizer:
    """
    Llama Tokenizer Class
    ---------------------
    This class provides a wrapper around the SentencePiece tokenizer.
    It adds some utility functions for easier encoding and decoding.

    Attributes:
        bos_id (int): The id representing the "beginning of sentence" token.
        eos_id (int): The id representing the "end of sentence" token.
        pad_id (int): The id representing the padding token.
        vocab_size (int): The size of the vocabulary.
    """

    def __init__(self, model_path: str):
        """
        Initialize the Tokenizer.

        Args:
            model_path (str): The path to the SentencePiece model file.

        Returns:
            None
        """
        sp = SentencePieceProcessor(model_file=model_path)

        self.bos_id: int = sp.bos_id()
        self.eos_id: int = sp.eos_id()
        self.pad_id: int = sp.pad_id()
        self.vocab_size: int = sp.vocab_size()

        self.sp = sp

    def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]:
        """
        Encode a string as a sequence of token IDs.

        Args:
            s (str): The string to be encoded.
            bos (bool, optional): Whether to add a "beginning of sentence" token. Defaults to False.
            eos (bool, optional): Whether to add an "end of sentence" token. Defaults to False.

        Returns:
            List[int]: The list of token IDs.
        """
        tokens = []

        if bos:
            tokens.append(self.bos_id)

        tokens.extend(self.sp.encode(s))

        if eos:
            tokens.append(self.eos_id)

        return tokens

    def decode(self, tokens: List[int]) -> str:
        """
        Decode a sequence of token IDs to a string.

        Args:
            tokens (List[int]): The list of token IDs to be decoded.

        Returns:
            str: The decoded string.
        """
        return self.sp.decode(tokens)

    def id_to_piece(self, token: int) -> str:
        """
        Convert a token ID to its corresponding token string.

        Args:
            token (int): The token ID.

        Returns:
            str: The token string, with SentencePiece's '▁' character replaced by a space.
        """
        return self.sp.id_to_piece(token).replace('▁', ' ')