/external/libtextclassifier/native/utils/ |
D | bert_tokenizer_test.cc | 53 std::vector<std::string> vocab; in TEST() local 54 vocab.emplace_back("i"); in TEST() 55 vocab.emplace_back("'"); in TEST() 56 vocab.emplace_back("m"); in TEST() 57 vocab.emplace_back("question"); in TEST() 58 auto tokenizer = absl::make_unique<BertTokenizer>(vocab); in TEST() 74 std::vector<std::string> vocab; in TEST() local 75 vocab.emplace_back("i"); in TEST() 76 vocab.emplace_back("'"); in TEST() 77 vocab.emplace_back("m"); in TEST() [all …]
|
D | bert_tokenizer.h | 70 explicit FlatHashMapBackedWordpiece(const std::vector<std::string>& vocab); 87 explicit BertTokenizer(const std::vector<std::string>& vocab, 89 : vocab_{FlatHashMapBackedWordpiece(vocab)}, options_{options} {}
|
D | bert_tokenizer.cc | 30 const std::vector<std::string>& vocab) in FlatHashMapBackedWordpiece() argument 31 : vocab_{vocab} { in FlatHashMapBackedWordpiece()
|
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/ |
D | index_lookup.py | 311 def _set_forward_vocabulary(self, vocab, idf_weights=None): argument 313 table_utils.validate_vocabulary_is_unique(vocab) 316 has_mask = vocab[0] == self.mask_token 323 has_oov = vocab[oov_start:oov_end] == expected_oov 342 vocab[oov_start:oov_end])) 354 oov_start, oov_end, self.mask_token, vocab[0])) 361 tokens = vocab if insert_special_tokens else vocab[num_special_tokens:] 390 values = np.arange(len(vocab), dtype=np.int64) 391 self._table_handler.insert(vocab, values) 396 if len(vocab) != len(idf_weights): [all …]
|
D | integer_lookup.py | 317 def set_vocabulary(self, vocab, idf_weights=None): argument 318 if isinstance(vocab, str): 328 vocab = table_utils.get_vocabulary_from_file(vocab) 329 vocab = [int(v) for v in vocab] 330 super().set_vocabulary(vocab, idf_weights=idf_weights)
|
D | string_lookup.py | 311 def set_vocabulary(self, vocab, idf_weights=None): argument 312 if isinstance(vocab, str): 321 vocab = table_utils.get_vocabulary_from_file(vocab, self.encoding) 322 super().set_vocabulary(vocab, idf_weights=idf_weights)
|
D | table_utils.py | 208 vocab = [] 222 vocab.append(token) 223 return vocab
|
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/ |
D | index_lookup_forward_benchmark.py | 55 vocab = list( 57 vocab.sort() 58 return vocab 68 for vocab in vocab_list: 69 writer.write(vocab + "\n") 74 def run_numpy_implementation(self, data, vocab): argument 78 vocabulary=vocab, 99 vocab = get_vocab() 100 vocab_file = self._write_to_temp_file("vocab", vocab) 128 baseline, _ = self.run_numpy_implementation(data, vocab)
|
D | index_lookup_adapt_benchmark.py | 84 vocab = get_top_k(batched_ds, k) 85 layer.set_vocabulary(vocab)
|
/external/tensorflow/tensorflow/lite/kernels/hashtable/ |
D | README.md | 104 with open('/tmp/vocab.file', 'r') as f: 121 vocab = tf.constant(["emerson", "lake", "palmer"]) 122 vocab_table = tf.lookup.index_table_from_tensor(vocab, default_value=UNK_ID) 133 with open('/tmp/vocab.file', 'r') as f: 137 vocab = tf.constant(words) 138 vocab_table = tf.lookup.index_table_from_tensor(vocab, default_value=UNK_ID) 150 vocab = tf.constant(["emerson", "lake", "palmer"]) 151 vocab_table = tf.lookup.index_to_string_table_from_tensor(vocab, default_value=UNK_WORD) 162 with open('/tmp/vocab.file', 'r') as f: 166 vocab = tf.constant(words) [all …]
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_GenerateVocabRemapping.pbtxt | 6 Path to the new vocab file. 12 Path to the old vocab file. 26 Number of new vocab entries found in old vocab. 32 How many entries into the new vocab file to start reading. 38 Number of entries in the new vocab file to remap. 44 Number of entries in the old vocab file to consider. If -1,
|
/external/libtextclassifier/native/annotator/vocab/ |
D | vocab-level-table.cc | 60 Optional<LookupResult> VocabLevelTable::Lookup(const std::string& vocab) const { in Lookup() 62 agent.set_query(vocab.data(), vocab.size()); in Lookup()
|
D | vocab-annotator.h | 25 #error No vocab-annotator implementation specified.
|
D | vocab-level-table.h | 41 Optional<LookupResult> Lookup(const std::string& vocab) const;
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/ |
D | init_text_file_to_import.mlir | 7 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no… 17 // Tests that the tf.InitializeTableFromTextFileV2 op with explicit vocab size. 21 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no…
|
D | init_text_file_to_import_invalid.mlir | 7 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no… 19 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no… 33 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no…
|
/external/tflite-support/tensorflow_lite_support/ios/text/tokenizers/Sources/ |
D | TFLBertTokenizer.mm | 35 - (instancetype)initWithVocab:(NSArray<NSString *> *)vocab { 39 vocabCpp.reserve([vocab count]); 40 for (NSString *word in vocab) {
|
D | TFLBertTokenizer.h | 36 - (instancetype)initWithVocab:(NSArray<NSString *> *)vocab NS_DESIGNATED_INITIALIZER;
|
/external/tflite-support/tensorflow_lite_support/cc/text/tokenizers/ |
D | bert_tokenizer.h | 74 explicit FlatHashMapBackedWordpiece(const std::vector<std::string>& vocab); 92 explicit BertTokenizer(const std::vector<std::string>& vocab, 94 : vocab_{FlatHashMapBackedWordpiece(vocab)},
|
D | bert_tokenizer_jni.cc | 39 std::vector<std::string> vocab = StringListToVector(env, vocab_list); in Java_org_tensorflow_lite_support_text_tokenizers_BertTokenizer_nativeLoadResource() local 51 vocab, BertTokenizerOptions{ in Java_org_tensorflow_lite_support_text_tokenizers_BertTokenizer_nativeLoadResource()
|
D | bert_tokenizer.cc | 26 const std::vector<std::string>& vocab) in FlatHashMapBackedWordpiece() argument 27 : vocab_{vocab} { in FlatHashMapBackedWordpiece()
|
/external/antlr/runtime/Ruby/lib/antlr3/ |
D | task.rb | 165 if vocab = grammar.token_vocab and 166 tfile = find_tokens_file( vocab, grammar ) 183 def find_tokens_file( vocab, grammar ) argument 184 gram = @grammars.find { | gram | gram.name == vocab } and 186 file = locate( "#{ vocab }.tokens" ) and return( file )
|
/external/tflite-support/tensorflow_lite_support/java/src/native/task/text/qa/ |
D | bert_question_answerer_jni.cc | 71 absl::string_view vocab = in Java_org_tensorflow_lite_task_text_qa_BertQuestionAnswerer_initJniWithBertByteBuffers() local 76 model.data(), model.size(), vocab.data(), vocab.size()); in Java_org_tensorflow_lite_task_text_qa_BertQuestionAnswerer_initJniWithBertByteBuffers()
|
/external/tensorflow/tensorflow/python/keras/premade/ |
D | wide_deep_test.py | 197 for vocab, val in zip(vocab_list, vocab_val): 198 indices = np.where(data == vocab) 226 for vocab, val in zip(vocab_list, vocab_val): 227 indices = np.where(data == vocab)
|
/external/tflite-support/tensorflow_lite_support/ios/text/tokenizers/Tests/ |
D | TFLBertTokenizerTest.swift | 38 let bertTokenizer = TFLBertTokenizer(vocab: ["hell", "##o", "wor", "##ld", "there"]) in testInitBertTokenizerFromVocab()
|