tokenizer.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. # -------------------------------------------------------------------------
  2. # MIT License
  3. #
  4. # Copyright (c) 2021 OpenAI
  5. #
  6. # Permission is hereby granted, free of charge, to any person obtaining a copy
  7. # of this software and associated documentation files (the "Software"), to deal
  8. # in the Software without restriction, including without limitation the rights
  9. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. # copies of the Software, and to permit persons to whom the Software is
  11. # furnished to do so, subject to the following conditions:
  12. #
  13. # The above copyright notice and this permission notice shall be included in all
  14. # copies or substantial portions of the Software.
  15. #
  16. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  19. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22. # SOFTWARE.
  23. #
  24. # Modified by Jiarui Xu
  25. # -------------------------------------------------------------------------
  26. import gzip
  27. import html
  28. import os
  29. from functools import lru_cache
  30. import ftfy
  31. import regex as re
  32. @lru_cache()
  33. def default_bpe():
  34. return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
  35. @lru_cache()
  36. def bytes_to_unicode():
  37. """Returns list of utf-8 byte and a corresponding list of unicode strings.
  38. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
  39. if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent
  40. coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables
  41. between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
  42. """
  43. bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1))
  44. cs = bs[:]
  45. n = 0
  46. for b in range(2**8):
  47. if b not in bs:
  48. bs.append(b)
  49. cs.append(2**8 + n)
  50. n += 1
  51. cs = [chr(n) for n in cs]
  52. return dict(zip(bs, cs))
  53. def get_pairs(word):
  54. """Return set of symbol pairs in a word.
  55. Word is represented as tuple of symbols (symbols being variable-length strings).
  56. """
  57. pairs = set()
  58. prev_char = word[0]
  59. for char in word[1:]:
  60. pairs.add((prev_char, char))
  61. prev_char = char
  62. return pairs
  63. def basic_clean(text):
  64. text = ftfy.fix_text(text)
  65. text = html.unescape(html.unescape(text))
  66. return text.strip()
  67. def whitespace_clean(text):
  68. text = re.sub(r'\s+', ' ', text)
  69. text = text.strip()
  70. return text
  71. class SimpleTokenizer(object):
  72. def __init__(self, bpe_path: str = default_bpe()):
  73. self.byte_encoder = bytes_to_unicode()
  74. self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
  75. merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
  76. merges = merges[1:49152 - 256 - 2 + 1]
  77. merges = [tuple(merge.split()) for merge in merges]
  78. vocab = list(bytes_to_unicode().values())
  79. vocab = vocab + [v + '</w>' for v in vocab]
  80. for merge in merges:
  81. vocab.append(''.join(merge))
  82. vocab.extend(['<|startoftext|>', '<|endoftext|>'])
  83. self.encoder = dict(zip(vocab, range(len(vocab))))
  84. self.decoder = {v: k for k, v in self.encoder.items()}
  85. self.bpe_ranks = dict(zip(merges, range(len(merges))))
  86. self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
  87. self.pat = re.compile(
  88. r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
  89. re.IGNORECASE)
  90. def bpe(self, token):
  91. if token in self.cache:
  92. return self.cache[token]
  93. word = tuple(token[:-1]) + (token[-1] + '</w>', )
  94. pairs = get_pairs(word)
  95. if not pairs:
  96. return token + '</w>'
  97. while True:
  98. bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
  99. if bigram not in self.bpe_ranks:
  100. break
  101. first, second = bigram
  102. new_word = []
  103. i = 0
  104. while i < len(word):
  105. try:
  106. j = word.index(first, i)
  107. new_word.extend(word[i:j])
  108. i = j
  109. except: # noqa: E722
  110. new_word.extend(word[i:])
  111. break
  112. if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
  113. new_word.append(first + second)
  114. i += 2
  115. else:
  116. new_word.append(word[i])
  117. i += 1
  118. new_word = tuple(new_word)
  119. word = new_word
  120. if len(word) == 1:
  121. break
  122. else:
  123. pairs = get_pairs(word)
  124. word = ' '.join(word)
  125. self.cache[token] = word
  126. return word
  127. # def encode(self, text):
  128. # bpe_tokens = []
  129. # text = whitespace_clean(basic_clean(text)).lower()
  130. # for token in re.findall(self.pat, text):
  131. # token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
  132. # bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
  133. # return bpe_tokens
  134. def encode(self, text):
  135. bpe_tokens = []
  136. text = whitespace_clean(basic_clean(text)).lower()
  137. for token in re.findall(self.pat, text):
  138. # print(token)
  139. token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
  140. bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
  141. return bpe_tokens
  142. def decode(self, tokens):
  143. text = ''.join([self.decoder[token] for token in tokens])
  144. text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
  145. return text