/external/tensorflow/tensorflow/python/keras/layers/preprocessing/ |
D | categorical_encoding_test.py | 57 max_tokens = 6 58 expected_output_shape = [None, max_tokens] 62 max_tokens=max_tokens, output_mode=categorical_encoding.BINARY) 77 max_tokens = 5 78 expected_output_shape = [None, max_tokens] 82 max_tokens=None, output_mode=categorical_encoding.BINARY) 83 layer.set_weights([np.array(max_tokens)]) 98 max_tokens = 6 99 expected_output_shape = [None, max_tokens] 103 max_tokens=6, output_mode=categorical_encoding.COUNT) [all …]
|
D | text_vectorization_test.py | 295 max_tokens=10, 314 max_tokens=None, 334 max_tokens=None, 352 max_tokens=None, 371 max_tokens=None, 395 max_tokens=None, 417 max_tokens=None, 451 max_tokens=None, 464 max_tokens=None, 529 max_tokens=None, [all …]
|
D | categorical.py | 59 max_tokens=None, argument 64 if max_tokens is not None: 68 self.max_tokens = max_tokens
|
D | categorical_encoding.py | 74 def __init__(self, max_tokens=None, output_mode=COUNT, **kwargs): argument 84 if max_tokens is not None and max_tokens < 1: 89 compute_max_element=max_tokens is None, 93 self._max_tokens = max_tokens 102 if max_tokens is None: 116 if max_tokens is None: 123 shape=tensor_shape.TensorShape((max_tokens,)),
|
D | text_vectorization.py | 203 max_tokens=None, argument 267 if max_tokens is not None and max_tokens < 1: 270 self._max_tokens = max_tokens 285 self._max_vocab_size = max_tokens - 1 if max_tokens is not None else None 330 if max_tokens is not None and self._pad_to_max: 331 vectorize_max_tokens = max_tokens 335 max_tokens=vectorize_max_tokens, output_mode=self._output_mode)
|
/external/google-breakpad/src/processor/ |
D | tokenize.cc | 47 int max_tokens, in Tokenize() argument 50 tokens->reserve(max_tokens); in Tokenize() 52 int remaining = max_tokens; in Tokenize() 69 return tokens->size() == static_cast<unsigned int>(max_tokens); in Tokenize()
|
D | tokenize.h | 55 int max_tokens,
|
/external/mesa3d/src/gallium/tests/graw/ |
D | disasm.c | 59 const size_t max_tokens = 1024*1024; in disasm() local 66 tokens = malloc(max_tokens * sizeof *tokens); in disasm() 67 fread(tokens, sizeof *tokens, max_tokens, fp); in disasm()
|
/external/webp/src/utils/ |
D | huffman_encode_utils.h | 47 HuffmanTreeToken* tokens, int max_tokens);
|
D | huffman_encode_utils.c | 327 HuffmanTreeToken* tokens, int max_tokens) { in VP8LCreateCompressedHuffmanTree() argument 329 HuffmanTreeToken* const ending_token = tokens + max_tokens; in VP8LCreateCompressedHuffmanTree()
|
/external/webp/src/enc/ |
D | vp8l_enc.c | 591 const int max_tokens = tree->num_symbols; in StoreFullHuffmanCode() local 599 num_tokens = VP8LCreateCompressedHuffmanTree(tree, tokens, max_tokens); in StoreFullHuffmanCode() 780 int max_tokens = 0; in EncodeImageNoHuffman() local 831 if (max_tokens < codes->num_symbols) { in EncodeImageNoHuffman() 832 max_tokens = codes->num_symbols; in EncodeImageNoHuffman() 836 tokens = (HuffmanTreeToken*)WebPSafeMalloc(max_tokens, sizeof(*tokens)); in EncodeImageNoHuffman() 1009 int max_tokens = 0; in EncodeImageInternal() local 1013 if (max_tokens < codes->num_symbols) { in EncodeImageInternal() 1014 max_tokens = codes->num_symbols; in EncodeImageInternal() 1017 tokens = (HuffmanTreeToken*)WebPSafeMalloc(max_tokens, sizeof(*tokens)); in EncodeImageInternal()
|
/external/libtextclassifier/native/actions/ |
D | actions-suggestions.cc | 572 const int max_tokens, in AllocateInput() argument 602 {conversation_length, max_tokens, token_embedding_size_}); in AllocateInput() 624 int max_tokens = 0; in SetupModelInput() local 637 if (!EmbedTokensPerMessage(tokens, &token_embeddings, &max_tokens)) { in SetupModelInput() 651 if (!AllocateInput(context.size(), max_tokens, total_token_count, in SetupModelInput()
|
D | actions-suggestions.h | 175 bool AllocateInput(const int conversation_length, const int max_tokens,
|
/external/freetype/src/psaux/ |
D | psobjs.h | 91 FT_UInt max_tokens,
|
D | psobjs.c | 731 FT_UInt max_tokens, in ps_parser_to_token_array() argument 747 T1_Token limit = cur + max_tokens; in ps_parser_to_token_array()
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt | 119 …argspec: "args=[\'self\', \'max_tokens\', \'standardize\', \'split\', \'ngrams\', \'output_mode\',…
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt | 117 …argspec: "args=[\'self\', \'max_tokens\', \'standardize\', \'split\', \'ngrams\', \'output_mode\',…
|
/external/freetype/include/freetype/internal/ |
D | psaux.h | 418 FT_UInt max_tokens,
|