Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expose stream-ordering in subword tokenizer API #17206

Merged
merged 6 commits into from
Nov 4, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions cpp/include/nvtext/subword_tokenize.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,13 @@ struct hashed_vocabulary {
* @param filename_hashed_vocabulary A path to the preprocessed vocab.txt file.
* Note that this is the file AFTER python/perfect_hash.py has been used
* for preprocessing.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Memory resource to allocate any returned objects.
* @return vocabulary hash-table elements
*/
std::unique_ptr<hashed_vocabulary> load_vocabulary_file(
std::string const& filename_hashed_vocabulary,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::device_async_resource_ref mr = cudf::get_current_device_resource_ref());

/**
Expand Down Expand Up @@ -147,6 +149,7 @@ struct tokenizer_result {
* @param do_truncate If true, the tokenizer will discard all the token-ids after
* `max_sequence_length` for each input string. If false, it will use a new row
* in the output token-ids to continue generating the output.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Memory resource to allocate any returned objects.
* @return token-ids, attention-mask, and metadata
*/
Expand All @@ -157,6 +160,7 @@ tokenizer_result subword_tokenize(
uint32_t stride,
bool do_lower_case,
bool do_truncate,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::device_async_resource_ref mr = cudf::get_current_device_resource_ref());

/** @} */ // end of group
Expand Down
6 changes: 4 additions & 2 deletions cpp/src/text/subword/load_hash_file.cu
Original file line number Diff line number Diff line change
Expand Up @@ -289,10 +289,12 @@ std::unique_ptr<hashed_vocabulary> load_vocabulary_file(
} // namespace detail

std::unique_ptr<hashed_vocabulary> load_vocabulary_file(
std::string const& filename_hashed_vocabulary, rmm::device_async_resource_ref mr)
std::string const& filename_hashed_vocabulary,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr)
{
CUDF_FUNC_RANGE();
return detail::load_vocabulary_file(filename_hashed_vocabulary, cudf::get_default_stream(), mr);
return detail::load_vocabulary_file(filename_hashed_vocabulary, stream, mr);
}

} // namespace nvtext
11 changes: 3 additions & 8 deletions cpp/src/text/subword/subword_tokenize.cu
Original file line number Diff line number Diff line change
Expand Up @@ -293,17 +293,12 @@ tokenizer_result subword_tokenize(cudf::strings_column_view const& strings,
uint32_t stride,
bool do_lower_case,
bool do_truncate,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr)
{
CUDF_FUNC_RANGE();
return detail::subword_tokenize(strings,
vocabulary_table,
max_sequence_length,
stride,
do_lower_case,
do_truncate,
cudf::get_default_stream(),
mr);
return detail::subword_tokenize(
strings, vocabulary_table, max_sequence_length, stride, do_lower_case, do_truncate, stream, mr);
}

} // namespace nvtext
1 change: 1 addition & 0 deletions cpp/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -741,6 +741,7 @@ ConfigureTest(
streams/text/ngrams_test.cpp
streams/text/replace_test.cpp
streams/text/stemmer_test.cpp
streams/text/subword_tokenize_test.cpp
shrshi marked this conversation as resolved.
Show resolved Hide resolved
streams/text/tokenize_test.cpp
STREAM_MODE
testing
Expand Down
Loading