/external/libtextclassifier/native/lang_id/common/ |
D | embedding-network.cc | 159 const EmbeddingNetworkParams::Matrix &embedding_matrix = in ConcatEmbeddings() local 161 const int embedding_dim = embedding_matrix.cols; in ConcatEmbeddings() 193 SAFTM_CHECK_LT(feature_id, embedding_matrix.rows); in ConcatEmbeddings() 197 (reinterpret_cast<const char *>(embedding_matrix.elements) + in ConcatEmbeddings() 200 switch (embedding_matrix.quant_type) { in ConcatEmbeddings() 210 multiplier *= Float16To32(embedding_matrix.quant_scales[feature_id]); in ConcatEmbeddings() 222 multiplier *= Float16To32(embedding_matrix.quant_scales[feature_id]); in ConcatEmbeddings() 240 << static_cast<int>(embedding_matrix.quant_type); in ConcatEmbeddings()
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | eager_test.py | 274 embedding_matrix = resource_variable_ops.ResourceVariable( 279 embedding = embedding_ops.embedding_lookup(embedding_matrix, [1]) 281 dy_dx = tape.gradient(y, embedding_matrix) 286 optimizer.apply_gradients([(dy_dx, embedding_matrix)]) 291 embedding_matrix.assign_add(array_ops.ones([3, 2])) 295 [2.0, 2.0]], embedding_matrix.numpy())
|
/external/pytorch/torch/onnx/ |
D | symbolic_opset18.py | 231 embedding_matrix, argument 243 embedding_matrix,
|
D | symbolic_opset10.py | 592 embedding_matrix, argument 642 embeddings = g.op("Gather", embedding_matrix, indices_row)
|
D | symbolic_opset11.py | 1205 embedding_matrix, argument 1217 embedding_matrix,
|
D | symbolic_helper.py | 1942 embedding_matrix, argument 2001 embeddings = loop_context.op("Gather", embedding_matrix, indices_row, axis_i=0)
|
D | symbolic_opset9.py | 933 embedding_matrix, argument 948 return symbolic_helper._onnx_unsupported("embedding_bag", embedding_matrix)
|
/external/pytorch/test/mobile/model_test/ |
D | nn_ops.py | 311 embedding_matrix = torch.rand(10, 3) 314 F.embedding(input, embedding_matrix), 315 F.embedding_bag(input2, embedding_matrix, offsets),
|
/external/tensorflow/tensorflow/python/ops/ |
D | control_flow_ops_test.py | 211 embedding_matrix = variable_scope.get_variable( 219 embedding = embedding_ops.embedding_lookup(embedding_matrix + 0.0, [0]) 234 embedding_matrix = variable_scope.get_variable( 241 embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) 253 embedding_matrix = variable_scope.get_variable( 262 embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) 272 dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0] 276 embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) 280 static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
|
/external/pytorch/test/onnx/ |
D | test_pytorch_onnx_onnxruntime.py | 7657 def forward(self, embedding_matrix, input, offset, weights): argument 7660 embedding_matrix, 7672 embedding_matrix = torch.rand(10, 15) 7673 self.run_test(model, (embedding_matrix, x, offset, w)) 7686 def forward(self, embedding_matrix, input, weights): argument 7688 input, embedding_matrix, mode="sum", per_sample_weights=weights 7691 embedding_matrix = torch.rand(10, 15) 7700 (embedding_matrix, x, w), 7703 additional_test_inputs=[(embedding_matrix, x2, w2)], 7714 def forward(self, embedding_matrix, input, weights, offsets): argument [all …]
|
/external/pytorch/test/ |
D | test_jit.py | 10094 def embedding_norm(input, embedding_matrix, max_norm): argument 10095 F.embedding(input, embedding_matrix, max_norm=0.01) 10098 def embedding_norm_script(input, embedding_matrix, max_norm): argument 10100 F.embedding(input, embedding_matrix, max_norm=0.01) 10104 embedding_matrix = torch.randn(10, 3) 10108 output1 = var1 * embedding_matrix 10109 output2 = var2 * embedding_matrix 10113 ignore = F.embedding(input, embedding_matrix, max_norm=0.01)
|