Home
last modified time | relevance | path

Searched refs:vocab_size (Results 1 – 25 of 53) sorted by relevance

123

/external/tensorflow/tensorflow/python/kernel_tests/
Dembedding_ops_test.py141 vocab_size, argument
151 shard_shape = [vocab_size // num_shards] + shape
152 if i < vocab_size % num_shards: # Excess goes evenly on the first shards
171 vocab_size, argument
176 num_shards, vocab_size, dtype=dtype, shape=shape)
180 shape=[vocab_size] + shape,
191 vocab_size, argument
212 ids_per_partition, extras = divmod(vocab_size, num_shards)
255 vocab_size = 4
256 p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
[all …]
Dlookup_ops_test.py809 vocab_size = 3
817 vocab_size=vocab_size)
823 self.assertEqual(vocab_size, self.evaluate(table1.size()))
826 vocab_size = 5
833 vocab_size=vocab_size)
839 vocab_size = 1
847 vocab_size=vocab_size)
853 self.assertEqual(vocab_size, self.evaluate(table3.size()))
916 vocab_size = 3
918 vocab_file, vocab_size=vocab_size)
[all …]
Dsparse_ops_test.py136 def _AssertResultsSorted(self, output, vocab_size): argument
140 self.assertAllEqual(output.dense_shape, [3, vocab_size])
142 def _AssertResultsNotSorted(self, output, vocab_size): argument
146 self.assertAllEqual(output.dense_shape, [3, vocab_size])
149 vocab_size = 50
156 sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
159 self._AssertResultsSorted(output, vocab_size)
162 vocab_size = 50
165 sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
168 self._AssertResultsSorted(output, vocab_size)
[all …]
/external/tensorflow/tensorflow/python/keras/layers/
Drecurrent_v2_test.py45 vocab_size = 20
50 x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
51 y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
57 keras.layers.Embedding(vocab_size, embedding_dim,
60 keras.layers.Dense(vocab_size)
119 vocab_size = 100
121 np.random.RandomState(0).randint(0, vocab_size, [128, 25]))
122 embedder = embeddings.Embedding(input_dim=vocab_size, output_dim=16)
Dlstm_v2_test.py713 vocab_size = 20
718 x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
719 y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
722 keras.layers.Embedding(vocab_size, embedding_dim,
725 keras.layers.Dense(vocab_size)
750 vocab_size = 1000
752 keras.layers.Embedding(vocab_size, 64),
764 x = np.random.randint(0, vocab_size, size=(batch, timestep))
821 vocab_size = 20
824 inputs = np.random.randint(0, vocab_size, size=(num_samples, timestep))
[all …]
Dgru_v2_test.py550 vocab_size = 20
555 x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
556 y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
559 keras.layers.Embedding(vocab_size, embedding_dim,
562 keras.layers.Dense(vocab_size)
621 vocab_size = 20
624 inputs = np.random.randint(0, vocab_size, size=(num_samples, timestep))
631 vocab_size,
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/
Dindex_lookup.py164 vocab_size = max_tokens - (num_oov_indices + num_mask_tokens)
166 vocab_size = None
170 vocab_size=vocab_size,
204 self._table_handler.vocab_size() + self.num_oov_indices +
276 if self._table_handler.vocab_size() == 0:
288 def vocab_size(self): member in IndexLookup
289 return int(self._table_handler.vocab_size())
504 vocab_size = self._set_inverse_vocabulary(vocab)
506 vocab_size = self._set_forward_vocabulary(vocab, idf_weights=idf_weights)
512 self.max_tokens = vocab_size
[all …]
/external/tensorflow/tensorflow/python/ops/
Dlookup_ops.py648 vocab_size=None, argument
719 if (vocab_size is not None) and (vocab_size <= 0):
720 raise ValueError("Invalid vocab_size %s." % vocab_size)
724 self._vocab_size = vocab_size
785 vocab_size=None, argument
824 vocab_size=vocab_size,
836 vocab_size=None, argument
877 vocab_size=vocab_size,
1353 vocab_size=None, argument
1440 if vocab_size is not None and vocab_size < 1:
[all …]
Dsparse_ops.py1730 def sparse_to_indicator(sp_input, vocab_size, name=None): argument
1784 sp_new = sparse_merge_impl(sp_input, sp_values, vocab_size, name)
1794 def sparse_merge(sp_ids, sp_values, vocab_size, name=None, argument
1887 return sparse_merge_impl(sp_ids, sp_values, vocab_size, name, already_sorted)
1892 vocab_size, argument
1899 if not (isinstance(vocab_size, ops.Tensor) or
1900 isinstance(vocab_size, numbers.Integral)):
1902 type(vocab_size))
1903 vocab_size = [vocab_size]
1908 if not isinstance(vocab_size, collections_abc.Iterable):
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/
Dinit_text_file_to_import_invalid.mlir9 …iter = " ", device = "", key_index = -2 : i64, value_index = -1 : i64, vocab_size = -1 : i64} : (t…
20 …iter = " ", device = "", key_index = -1 : i64, value_index = -1 : i64, vocab_size = -1 : i64} : (t…
34 …miter = " ", device = "", key_index = -2 : i64, value_index = 0 : i64, vocab_size = -1 : i64} : (t…
Dinit_text_file_to_import.mlir8 …iter = " ", device = "", key_index = -2 : i64, value_index = -1 : i64, vocab_size = -1 : i64} : (t…
22 …iter = " ", device = "", key_index = -2 : i64, value_index = -1 : i64, vocab_size = 2 : i64} : (te…
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/
Dindex_lookup_adapt_benchmark.py123 for vocab_size in [100, 1000, 10000, 100000, 1000000]:
125 self.bm_adapt_implementation(vocab_size, batch, int(vocab_size / 10))
Ddiscretization_adapt_benchmark.py112 for vocab_size in [100, 1000, 10000, 100000, 1000000]:
114 self.bm_adapt_implementation(vocab_size, batch)
Dnormalization_adapt_benchmark.py124 for vocab_size in [100, 1000, 10000, 100000, 1000000]:
126 self.bm_adapt_implementation(vocab_size, batch)
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/
DInitializeTableFromTextFileV2.pbtxt24 name: "vocab_size"
64 name: "vocab_size"
DInitializeTableFromTextFile.pbtxt25 name: "vocab_size"
65 name: "vocab_size"
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/
Dinit_text_file_to_import.cc88 if (op.vocab_size() != -1) lines.resize(op.vocab_size()); in matchAndRewrite()
/external/tensorflow/tensorflow/core/kernels/
Dword2vec_kernels.cc285 const int64 vocab_size = w_in.dim_size(0); in Compute() local
288 OP_REQUIRES(ctx, vocab_size == sampler_->num(), in Compute()
289 errors::InvalidArgument("vocab_size mismatches: ", vocab_size, in Compute()
308 DCHECK(0 <= example && example < vocab_size) << example; in Compute()
310 DCHECK(0 <= label && label < vocab_size) << label; in Compute()
Dlookup_table_init_op.h25 Status InitializeTableFromTextFile(const string& filename, int64 vocab_size,
Dlookup_util.cc78 Status Init(const string& filename, int64 vocab_size, char delimiter, in Init() argument
82 vocab_size_ = vocab_size; in Init()
356 Status InitializeTableFromTextFile(const string& filename, int64 vocab_size, in InitializeTableFromTextFile() argument
385 TF_RETURN_IF_ERROR(iter.Init(filename, vocab_size, delimiter, key_dtype, in InitializeTableFromTextFile()
Dlookup_util.h54 Status InitializeTableFromTextFile(const string& filename, int64 vocab_size,
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/
DInitializeTableFromTextFile.pbtxt25 name: "vocab_size"
DInitializeTableFromTextFileV2.pbtxt24 name: "vocab_size"
/external/tensorflow/tensorflow/tools/api/golden/v2/
Dtensorflow.lookup.-text-file-initializer.pbtxt17 …e\', \'key_dtype\', \'key_index\', \'value_dtype\', \'value_index\', \'vocab_size\', \'delimiter\'…
/external/tensorflow/tensorflow/tools/api/golden/v1/
Dtensorflow.lookup.-text-file-initializer.pbtxt17 …e\', \'key_dtype\', \'key_index\', \'value_dtype\', \'value_index\', \'vocab_size\', \'delimiter\'…

123