/external/tensorflow/tensorflow/python/keras/layers/preprocessing/ |
D | text_vectorization_test.py | 402 max_tokens=None, standardize=None, split=None, pad_to_max_tokens=False) 424 max_tokens=None, 450 max_tokens=10, 469 max_tokens=None, 487 max_tokens=None, 507 max_tokens=None, 525 max_tokens=None, 544 max_tokens=None, 568 max_tokens=None, 592 max_tokens=None, [all …]
|
D | index_lookup_test.py | 390 max_tokens=None, 416 max_tokens=None, 437 max_tokens=None, 456 max_tokens=None, 475 max_tokens=None, 505 max_tokens=None, 531 max_tokens=None, 552 max_tokens=None, 571 max_tokens=None, 596 max_tokens=None, [all …]
|
D | index_lookup.py | 109 max_tokens, argument 122 if max_tokens is not None and max_tokens <= 1: 124 "You passed %s" % (max_tokens,)) 142 self.max_tokens = max_tokens 162 if max_tokens is not None: 164 vocab_size = max_tokens - (num_oov_indices + num_mask_tokens) 203 self.max_tokens = ( 226 if not self.pad_to_max_tokens or max_tokens is None: 233 idf_shape = (max_tokens,) if self.pad_to_max_tokens else (None,) 249 return tensor_shape.TensorShape([input_shape[0], self.max_tokens]) [all …]
|
D | string_lookup_v1.py | 31 max_tokens=None, argument 40 max_tokens=max_tokens,
|
D | text_vectorization.py | 247 max_tokens=None, argument 312 if max_tokens is not None and max_tokens < 1: 315 self._max_tokens = max_tokens 343 max_tokens=max_tokens, 435 "max_tokens": self._index_lookup_layer.max_tokens,
|
D | string_lookup.py | 251 max_tokens=None, argument 281 max_tokens=max_tokens,
|
D | text_vectorization_v1.py | 80 max_tokens=None, argument 89 self).__init__(max_tokens, standardize, split, ngrams, output_mode,
|
D | text_vectorization_distribution_test.py | 67 max_tokens=None, 96 max_tokens=None,
|
D | index_lookup_distribution_test.py | 70 max_tokens=None,
|
D | integer_lookup.py | 287 max_tokens=max_values,
|
D | string_lookup_test.py | 320 layer = get_layer_class()(max_tokens=10) 334 layer = get_layer_class()(max_tokens=10)
|
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/ |
D | category_encoding_benchmark.py | 40 max_tokens): argument 43 max_tokens=max_tokens, output_mode=output_mode) 53 maxval=max_tokens - 1, 69 batch_size, sequence_length, max_tokens) 80 max_tokens=num_tokens)
|
D | index_lookup_adapt_benchmark.py | 73 max_tokens=k, 97 max_tokens=k,
|
D | index_lookup_forward_benchmark.py | 79 max_tokens=None, 111 max_tokens=None,
|
/external/google-breakpad/src/processor/ |
D | tokenize.cc | 47 int max_tokens, in Tokenize() argument 50 tokens->reserve(max_tokens); in Tokenize() 52 int remaining = max_tokens; in Tokenize() 69 return tokens->size() == static_cast<unsigned int>(max_tokens); in Tokenize()
|
D | tokenize.h | 55 int max_tokens,
|
D | basic_source_line_resolver.cc | 75 int max_tokens, in TokenizeWithOptionalField() argument 79 if (!Tokenize(line, separators, max_tokens - 1, tokens)) { in TokenizeWithOptionalField()
|
/external/libtextclassifier/native/utils/tflite/ |
D | string_projection.cc | 105 size_t len, size_t max_tokens) { in SplitByCharInternal() argument 111 if (max_tokens != kInvalid && tokens->size() == max_tokens) { in SplitByCharInternal() 119 size_t max_tokens) { in SplitByChar() argument 121 SplitByCharInternal(&tokens, input_ptr, len, max_tokens); in SplitByChar() 162 size_t len, size_t max_input, size_t max_tokens) { in SplitBySpaceInternal() argument 172 (max_tokens == kAllTokens || tokens->size() < max_tokens - 1)) { in SplitBySpaceInternal() 188 size_t max_input, size_t max_tokens) { in SplitBySpace() argument 190 SplitBySpaceInternal(&tokens, input_ptr, len, max_input, max_tokens); in SplitBySpace() 279 size_t max_tokens) const { in Tokenize() 280 return Tokenize(input.c_str(), input.size(), max_input, max_tokens); in Tokenize() [all …]
|
/external/mesa3d/src/gallium/tests/graw/ |
D | disasm.c | 59 const size_t max_tokens = 1024*1024; in disasm() local 66 tokens = malloc(max_tokens * sizeof *tokens); in disasm() 67 fread(tokens, sizeof *tokens, max_tokens, fp); in disasm()
|
/external/tflite-support/tensorflow_lite_support/custom_ops/kernel/ |
D | whitespace_tokenizer.cc | 100 size_t max_tokens = 0; in WritePaddedOutput() local 102 max_tokens = std::max(max_tokens, tokens.size()); in WritePaddedOutput() 105 output_shape->data[NumDimensions(input)] = max_tokens; in WritePaddedOutput() 111 for (int i = tokens.size(); i < max_tokens; ++i) { in WritePaddedOutput()
|
/external/webp/src/utils/ |
D | huffman_encode_utils.h | 47 HuffmanTreeToken* tokens, int max_tokens);
|
D | huffman_encode_utils.c | 327 HuffmanTreeToken* tokens, int max_tokens) { in VP8LCreateCompressedHuffmanTree() argument 329 HuffmanTreeToken* const ending_token = tokens + max_tokens; in VP8LCreateCompressedHuffmanTree()
|
/external/webp/src/enc/ |
D | vp8l_enc.c | 613 const int max_tokens = tree->num_symbols; in StoreFullHuffmanCode() local 621 num_tokens = VP8LCreateCompressedHuffmanTree(tree, tokens, max_tokens); in StoreFullHuffmanCode() 799 int max_tokens = 0; in EncodeImageNoHuffman() local 848 if (max_tokens < codes->num_symbols) { in EncodeImageNoHuffman() 849 max_tokens = codes->num_symbols; in EncodeImageNoHuffman() 853 tokens = (HuffmanTreeToken*)WebPSafeMalloc(max_tokens, sizeof(*tokens)); in EncodeImageNoHuffman() 1022 int max_tokens = 0; in EncodeImageInternal() local 1026 if (max_tokens < codes->num_symbols) { in EncodeImageInternal() 1027 max_tokens = codes->num_symbols; in EncodeImageInternal() 1030 tokens = (HuffmanTreeToken*)WebPSafeMalloc(max_tokens, sizeof(*tokens)); in EncodeImageInternal()
|
/external/libtextclassifier/native/actions/ |
D | actions-suggestions.cc | 600 const int max_tokens, in AllocateInput() argument 630 {conversation_length, max_tokens, token_embedding_size_}); in AllocateInput() 652 int max_tokens = 0; in SetupModelInput() local 665 if (!EmbedTokensPerMessage(tokens, &token_embeddings, &max_tokens)) { in SetupModelInput() 679 if (!AllocateInput(context.size(), max_tokens, total_token_count, in SetupModelInput()
|
/external/freetype/src/psaux/ |
D | psobjs.h | 90 FT_UInt max_tokens,
|