From 2d91ec66798882584a7b18df917856a7b246b44d Mon Sep 17 00:00:00 2001 From: Solumilken Date: Thu, 24 Jan 2019 12:45:21 +0800 Subject: [PATCH] more explicit import --- uttut/pipeline/bert/tests/test_basic.py | 4 ++-- uttut/pipeline/bert/tests/test_full.py | 4 ++-- uttut/pipeline/bert/tests/test_word_piece.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/uttut/pipeline/bert/tests/test_basic.py b/uttut/pipeline/bert/tests/test_basic.py index 10d774a..3a88f95 100644 --- a/uttut/pipeline/bert/tests/test_basic.py +++ b/uttut/pipeline/bert/tests/test_basic.py @@ -1,6 +1,6 @@ import pytest -from .tokenization import BasicTokenizer +from .tokenization import BasicTokenizer as BertBasicTokenizer from ..basic import basic_pipe from uttut.elements import Datum @@ -28,7 +28,7 @@ @pytest.fixture def tokenizer(): - yield BasicTokenizer() + yield BertBasicTokenizer() @pytest.mark.parametrize("input_str", test_cases) diff --git a/uttut/pipeline/bert/tests/test_full.py b/uttut/pipeline/bert/tests/test_full.py index 3a50a4d..0fbb58b 100644 --- a/uttut/pipeline/bert/tests/test_full.py +++ b/uttut/pipeline/bert/tests/test_full.py @@ -4,7 +4,7 @@ import pytest -from .tokenization import FullTokenizer +from .tokenization import FullTokenizer as BertFullTokenizer from ..full import full_pipe, vocab_tokens from uttut.elements import Datum @@ -28,7 +28,7 @@ def tokenizer(): vocab_file = vocab_writer.name - tokenizer = FullTokenizer(vocab_file) + tokenizer = BertFullTokenizer(vocab_file) yield tokenizer os.unlink(vocab_file) diff --git a/uttut/pipeline/bert/tests/test_word_piece.py b/uttut/pipeline/bert/tests/test_word_piece.py index 0934b86..2f5c92d 100644 --- a/uttut/pipeline/bert/tests/test_word_piece.py +++ b/uttut/pipeline/bert/tests/test_word_piece.py @@ -1,6 +1,6 @@ import pytest -from .tokenization import WordpieceTokenizer +from .tokenization import WordpieceTokenizer as BertWordpieceTokenizer from ..word_piece import word_piece_pipe, vocab from uttut.elements import Datum @@ -20,7 +20,7 @@ @pytest.fixture def tokenizer(): - yield WordpieceTokenizer(vocab) + yield BertWordpieceTokenizer(vocab) @pytest.mark.parametrize("input_str", test_cases)