/external/libtextclassifier/native/annotator/duration/ |
D | duration_test.cc | 133 std::vector<Token> Tokenize(const UnicodeText& text) { in Tokenize() function in libtextclassifier3::__anon5ab10c4f0111::DurationAnnotatorTest 134 return feature_processor_->Tokenize(text); in Tokenize() 194 std::vector<Token> tokens = Tokenize(text); in TEST_F() 214 std::vector<Token> tokens = Tokenize(text); in TEST_F() 226 std::vector<Token> tokens = Tokenize(text); in TEST_F() 246 std::vector<Token> tokens = Tokenize(text); in TEST_F() 266 std::vector<Token> tokens = Tokenize(text); in TEST_F() 286 std::vector<Token> tokens = Tokenize(text); in TEST_F() 306 std::vector<Token> tokens = Tokenize(text); in TEST_F() 326 std::vector<Token> tokens = Tokenize(text); in TEST_F() [all …]
|
/external/libtextclassifier/native/utils/ |
D | tokenizer_test.cc | 94 std::vector<Token> Tokenize(const std::string& utf8_text) const { in Tokenize() function in libtextclassifier3::__anonebae1baa0111::TestingTokenizerProxy 95 return tokenizer_->Tokenize(utf8_text); in Tokenize() 178 std::vector<Token> tokens = tokenizer.Tokenize("Hello world!"); in TEST() 213 EXPECT_THAT(tokenizer.Tokenize("앨라배마 주 전화(123) 456-789웹사이트"), in TEST() 351 tokens = tokenizer.Tokenize( in TEST() 355 tokens = tokenizer.Tokenize("問少目 hello 木輸ยามきゃ"); in TEST() 379 std::vector<Token> tokens = tokenizer.Tokenize("พระบาท สมเด็จ พระ ปร มิ"); in TEST() 400 tokenizer.Tokenize("The interval is: -(12, 138*)"); in TEST() 427 std::vector<Token> tokens = tokenizer.Tokenize("3.1 3﹒2 3.3"); in TEST() 445 std::vector<Token> tokens = tokenizer.Tokenize("พระบาทสมเด็จพระปรมิ"); in TEST() [all …]
|
D | tokenizer.h | 94 std::vector<Token> Tokenize(absl::string_view text) const; 97 std::vector<Token> Tokenize(const UnicodeText& text_unicode) const;
|
D | bert_tokenizer_test.cc | 33 auto results = tokenizer->Tokenize("i'm question"); in AssertTokenizerResults() 69 auto results = tokenizer->Tokenize("i'm questionansweraskask"); in TEST() 112 auto results = tokenizer->Tokenize("i'm questionansweraskask"); in TEST()
|
D | tokenizer.cc | 99 std::vector<Token> Tokenizer::Tokenize(absl::string_view text) const { in Tokenize() function in libtextclassifier3::Tokenizer 101 return Tokenize(text_unicode); in Tokenize() 104 std::vector<Token> Tokenizer::Tokenize(const UnicodeText& text_unicode) const { in Tokenize() function in libtextclassifier3::Tokenizer
|
/external/libtextclassifier/native/utils/grammar/parsing/ |
D | lexer_test.cc | 88 std::vector<Token> tokens = tokenizer_.Tokenize("This is a word"); in TEST_F() 97 std::vector<Token> tokens = tokenizer_.Tokenize("1234This a4321cde"); in TEST_F() 107 std::vector<Token> tokens = tokenizer_.Tokenize("10/18/2014"); in TEST_F() 117 std::vector<Token> tokens = tokenizer_.Tokenize("电话:0871—6857(曹"); in TEST_F() 130 std::vector<Token> tokens = tokenizer_.Tokenize("电话 :0871—6857(曹"); in TEST_F() 144 tokenizer_.Tokenize("The.qUIck\n brown2345fox88 \xE2\x80\x94 the"); in TEST_F() 158 std::vector<Token> tokens = tokenizer_.Tokenize("The+2345++the +"); in TEST_F()
|
/external/icing/icing/tokenization/ |
D | verbatim-tokenizer_test.cc | 97 auto token_iterator = verbatim_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F() 110 auto token_iterator = verbatim_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F() 137 auto token_iterator = verbatim_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F() 162 auto token_iterator = verbatim_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F() 182 auto token_iterator = verbatim_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F() 198 auto token_iterator = verbatim_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F()
|
D | verbatim-tokenizer.cc | 127 VerbatimTokenizer::Tokenize(std::string_view text) const { in Tokenize() function in icing::lib::VerbatimTokenizer 134 Tokenize(text)); in TokenizeAll()
|
D | plain-tokenizer_test.cc | 71 plain_tokenizer->Tokenize(kText)); in TEST_F() 90 plain_tokenizer->Tokenize(kText)); in TEST_F() 347 auto iterator = plain_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F() 368 auto iterator = plain_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F() 415 auto iterator = plain_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F() 469 auto iterator = plain_tokenizer->Tokenize(kText).ValueOrDie(); in TEST_F()
|
D | plain-tokenizer.cc | 133 PlainTokenizer::Tokenize(std::string_view text) const { in Tokenize() function in icing::lib::PlainTokenizer 143 Tokenize(text)); in TokenizeAll()
|
/external/icing/icing/util/ |
D | tokenized-document.cc | 37 libtextclassifier3::StatusOr<std::vector<TokenizedSection>> Tokenize( in Tokenize() function 49 tokenizer->Tokenize(subcontent)); in Tokenize() 82 Tokenize(schema_store, language_segmenter, in Create()
|
/external/perfetto/src/trace_processor/importers/proto/ |
D | proto_trace_parser_unittest.cc | 284 util::Status Tokenize() { in Tokenize() function in perfetto::trace_processor::__anon06099b9a0111::ProtoTraceParserTest 352 Tokenize(); in TEST_F() 384 Tokenize(); in TEST_F() 435 Tokenize(); in TEST_F() 503 Tokenize(); in TEST_F() 549 Tokenize(); in TEST_F() 572 Tokenize(); in TEST_F() 592 Tokenize(); in TEST_F() 607 Tokenize(); in TEST_F() 625 Tokenize(); in TEST_F() [all …]
|
/external/perfetto/src/trace_processor/importers/fuchsia/ |
D | fuchsia_parser_unittest.cc | 275 util::Status Tokenize() { in Tokenize() function in perfetto::trace_processor::__anon92d29aca0111::FuchsiaTraceParserTest 305 EXPECT_FALSE(Tokenize().ok()); in TEST_F() 332 EXPECT_TRUE(Tokenize().ok()); in TEST_F() 370 EXPECT_TRUE(Tokenize().ok()); in TEST_F() 480 auto status = Tokenize(); in TEST_F() 534 EXPECT_TRUE(Tokenize().ok()); in TEST_F() 605 EXPECT_TRUE(Tokenize().ok()); in TEST_F()
|
/external/perfetto/src/trace_processor/util/ |
D | streaming_line_reader.cc | 42 size_t consumed = Tokenize(base::StringView(buf_.data(), buf_.size())); in EndWrite() 51 size_t StreamingLineReader::Tokenize(base::StringView input) { in Tokenize() function in perfetto::trace_processor::util::StreamingLineReader
|
D | streaming_line_reader_unittest.cc | 56 TEST(StreamingLineReaderTest, Tokenize) { in TEST() argument 60 slr.Tokenize("a12\nb3456\nc\nd78\n\ne12\nf3456\n"); in TEST()
|
/external/pigweed/pw_tokenizer/size_report/ |
D | BUILD.gn | 35 # Tokenize string size report executable. 54 # Tokenize string expression size report executable.
|
D | BUILD.bazel | 38 # Tokenize string size report binary. 59 # Tokenize string expression size report binary.
|
/external/licenseclassifier/stringclassifier/searchset/tokenizer/ |
D | tokenizer_test.go | 62 if got := Tokenize(tt.text); !reflect.DeepEqual(got, tt.want) { 110 toks := Tokenize(tt.text)
|
/external/tflite-support/tensorflow_lite_support/ios/text/tokenizers/Sources/ |
D | TFLTokenizerUtil.mm | 21 NSArray<NSString *> *Tokenize(Tokenizer *tokenizer, NSString *input) { function 22 TokenizerResult tokenize_result = tokenizer->Tokenize(MakeString(input));
|
/external/tflite-support/tensorflow_lite_support/cc/test/text/ |
D | regex_tokenizer_test.cc | 48 auto results = tokenizer->Tokenize("good morning, i'm your teacher.\n"); in TEST() 59 auto results = tokenizer->Tokenize("good morning, i'm your teacher.\n"); in TEST()
|
/external/google-breakpad/src/processor/ |
D | basic_source_line_resolver.cc | 79 if (!Tokenize(line, separators, max_tokens - 1, tokens)) { in TokenizeWithOptionalField() 87 if (!Tokenize(tokens->back(), separators, 2, &last_tokens)) { in TokenizeWithOptionalField() 513 if (!Tokenize(file_line, kWhitespace, 2, &tokens)) { in ParseFile() 576 if (!Tokenize(line_line, kWhitespace, 4, &tokens)) { in ParseLine()
|
D | tokenize.h | 53 bool Tokenize(char *line,
|
/external/libtextclassifier/native/annotator/ |
D | feature-processor_test.cc | 383 std::vector<Token> tokens = feature_processor.Tokenize("one, two, three"); in TEST_F() 424 tokens = feature_processor3.Tokenize("zero, one, two, three, four"); in TEST_F() 457 std::vector<Token> tokens = feature_processor.Tokenize("one, two, three"); in TEST_F() 498 tokens = feature_processor3.Tokenize("zero, one, two, three, four"); in TEST_F() 629 {0, 3}, feature_processor.Tokenize("aaa bbb ccc")), in TEST_F() 632 {0, 3}, feature_processor.Tokenize("aaa bbb ěěě")), in TEST_F() 635 {0, 3}, feature_processor.Tokenize("ěěě řřř ěěě")), in TEST_F() 638 {0, 0}, feature_processor.Tokenize("")), in TEST_F()
|
/external/perfetto/src/trace_processor/importers/systrace/ |
D | systrace_line_tokenizer.h | 33 util::Status Tokenize(const std::string& line, SystraceLine*);
|
/external/libtextclassifier/native/lang_id/ |
D | custom-tokenizer.h | 42 void Tokenize(StringPiece text, LightSentence *sentence) const;
|