Home
last modified time | relevance | path

Searched refs:vocab (Results 1 – 25 of 53) sorted by relevance

123

/external/libtextclassifier/native/utils/
Dbert_tokenizer_test.cc53 std::vector<std::string> vocab; in TEST() local
54 vocab.emplace_back("i"); in TEST()
55 vocab.emplace_back("'"); in TEST()
56 vocab.emplace_back("m"); in TEST()
57 vocab.emplace_back("question"); in TEST()
58 auto tokenizer = absl::make_unique<BertTokenizer>(vocab); in TEST()
74 std::vector<std::string> vocab; in TEST() local
75 vocab.emplace_back("i"); in TEST()
76 vocab.emplace_back("'"); in TEST()
77 vocab.emplace_back("m"); in TEST()
[all …]
Dbert_tokenizer.h70 explicit FlatHashMapBackedWordpiece(const std::vector<std::string>& vocab);
87 explicit BertTokenizer(const std::vector<std::string>& vocab,
89 : vocab_{FlatHashMapBackedWordpiece(vocab)}, options_{options} {}
Dbert_tokenizer.cc30 const std::vector<std::string>& vocab) in FlatHashMapBackedWordpiece() argument
31 : vocab_{vocab} { in FlatHashMapBackedWordpiece()
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/
Dindex_lookup.py311 def _set_forward_vocabulary(self, vocab, idf_weights=None): argument
313 table_utils.validate_vocabulary_is_unique(vocab)
316 has_mask = vocab[0] == self.mask_token
323 has_oov = vocab[oov_start:oov_end] == expected_oov
342 vocab[oov_start:oov_end]))
354 oov_start, oov_end, self.mask_token, vocab[0]))
361 tokens = vocab if insert_special_tokens else vocab[num_special_tokens:]
390 values = np.arange(len(vocab), dtype=np.int64)
391 self._table_handler.insert(vocab, values)
396 if len(vocab) != len(idf_weights):
[all …]
Dinteger_lookup.py317 def set_vocabulary(self, vocab, idf_weights=None): argument
318 if isinstance(vocab, str):
328 vocab = table_utils.get_vocabulary_from_file(vocab)
329 vocab = [int(v) for v in vocab]
330 super().set_vocabulary(vocab, idf_weights=idf_weights)
Dstring_lookup.py311 def set_vocabulary(self, vocab, idf_weights=None): argument
312 if isinstance(vocab, str):
321 vocab = table_utils.get_vocabulary_from_file(vocab, self.encoding)
322 super().set_vocabulary(vocab, idf_weights=idf_weights)
Dtable_utils.py208 vocab = []
222 vocab.append(token)
223 return vocab
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/
Dindex_lookup_forward_benchmark.py55 vocab = list(
57 vocab.sort()
58 return vocab
68 for vocab in vocab_list:
69 writer.write(vocab + "\n")
74 def run_numpy_implementation(self, data, vocab): argument
78 vocabulary=vocab,
99 vocab = get_vocab()
100 vocab_file = self._write_to_temp_file("vocab", vocab)
128 baseline, _ = self.run_numpy_implementation(data, vocab)
Dindex_lookup_adapt_benchmark.py84 vocab = get_top_k(batched_ds, k)
85 layer.set_vocabulary(vocab)
/external/tensorflow/tensorflow/lite/kernels/hashtable/
DREADME.md104 with open('/tmp/vocab.file', 'r') as f:
121 vocab = tf.constant(["emerson", "lake", "palmer"])
122 vocab_table = tf.lookup.index_table_from_tensor(vocab, default_value=UNK_ID)
133 with open('/tmp/vocab.file', 'r') as f:
137 vocab = tf.constant(words)
138 vocab_table = tf.lookup.index_table_from_tensor(vocab, default_value=UNK_ID)
150 vocab = tf.constant(["emerson", "lake", "palmer"])
151 vocab_table = tf.lookup.index_to_string_table_from_tensor(vocab, default_value=UNK_WORD)
162 with open('/tmp/vocab.file', 'r') as f:
166 vocab = tf.constant(words)
[all …]
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_GenerateVocabRemapping.pbtxt6 Path to the new vocab file.
12 Path to the old vocab file.
26 Number of new vocab entries found in old vocab.
32 How many entries into the new vocab file to start reading.
38 Number of entries in the new vocab file to remap.
44 Number of entries in the old vocab file to consider. If -1,
/external/libtextclassifier/native/annotator/vocab/
Dvocab-level-table.cc60 Optional<LookupResult> VocabLevelTable::Lookup(const std::string& vocab) const { in Lookup()
62 agent.set_query(vocab.data(), vocab.size()); in Lookup()
Dvocab-annotator.h25 #error No vocab-annotator implementation specified.
Dvocab-level-table.h41 Optional<LookupResult> Lookup(const std::string& vocab) const;
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/
Dinit_text_file_to_import.mlir7 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no…
17 // Tests that the tf.InitializeTableFromTextFileV2 op with explicit vocab size.
21 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no…
Dinit_text_file_to_import_invalid.mlir7 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no…
19 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no…
33 …= "", device = "", key_dtype = !tf.string, shared_name = "hash_table_/tmp/vocab.txt_-2_-1", use_no…
/external/tflite-support/tensorflow_lite_support/ios/text/tokenizers/Sources/
DTFLBertTokenizer.mm35 - (instancetype)initWithVocab:(NSArray<NSString *> *)vocab {
39 vocabCpp.reserve([vocab count]);
40 for (NSString *word in vocab) {
DTFLBertTokenizer.h36 - (instancetype)initWithVocab:(NSArray<NSString *> *)vocab NS_DESIGNATED_INITIALIZER;
/external/tflite-support/tensorflow_lite_support/cc/text/tokenizers/
Dbert_tokenizer.h74 explicit FlatHashMapBackedWordpiece(const std::vector<std::string>& vocab);
92 explicit BertTokenizer(const std::vector<std::string>& vocab,
94 : vocab_{FlatHashMapBackedWordpiece(vocab)},
Dbert_tokenizer_jni.cc39 std::vector<std::string> vocab = StringListToVector(env, vocab_list); in Java_org_tensorflow_lite_support_text_tokenizers_BertTokenizer_nativeLoadResource() local
51 vocab, BertTokenizerOptions{ in Java_org_tensorflow_lite_support_text_tokenizers_BertTokenizer_nativeLoadResource()
Dbert_tokenizer.cc26 const std::vector<std::string>& vocab) in FlatHashMapBackedWordpiece() argument
27 : vocab_{vocab} { in FlatHashMapBackedWordpiece()
/external/antlr/runtime/Ruby/lib/antlr3/
Dtask.rb165 if vocab = grammar.token_vocab and
166 tfile = find_tokens_file( vocab, grammar )
183 def find_tokens_file( vocab, grammar ) argument
184 gram = @grammars.find { | gram | gram.name == vocab } and
186 file = locate( "#{ vocab }.tokens" ) and return( file )
/external/tflite-support/tensorflow_lite_support/java/src/native/task/text/qa/
Dbert_question_answerer_jni.cc71 absl::string_view vocab = in Java_org_tensorflow_lite_task_text_qa_BertQuestionAnswerer_initJniWithBertByteBuffers() local
76 model.data(), model.size(), vocab.data(), vocab.size()); in Java_org_tensorflow_lite_task_text_qa_BertQuestionAnswerer_initJniWithBertByteBuffers()
/external/tensorflow/tensorflow/python/keras/premade/
Dwide_deep_test.py197 for vocab, val in zip(vocab_list, vocab_val):
198 indices = np.where(data == vocab)
226 for vocab, val in zip(vocab_list, vocab_val):
227 indices = np.where(data == vocab)
/external/tflite-support/tensorflow_lite_support/ios/text/tokenizers/Tests/
DTFLBertTokenizerTest.swift38 let bertTokenizer = TFLBertTokenizer(vocab: ["hell", "##o", "wor", "##ld", "there"]) in testInitBertTokenizerFromVocab()

123