test_word_tokenize.py 285 Bytes
def word_tokenize(title: str):
    """
    分词器
    """
    from nltk.tokenize import word_tokenize
    result = word_tokenize(title, "english")
    return result


if __name__ == '__main__':
    aba = "nation's bravest tales of courage and heroism"
    print(word_tokenize(aba))