/external/tensorflow/tensorflow/core/distributed_runtime/ |
D | collective_rma_distributed.cc | 38 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor, in RecvBufCall() argument 47 req_.set_num_bytes(to_tensor->TotalBytes()); in RecvBufCall() 48 req_.set_buf_ptr(reinterpret_cast<int64>(DMAHelper::base(to_tensor))); in RecvBufCall() 79 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor, in RecvFromPeer() argument 85 to_alloc_attr, to_tensor, client_locality, dev_to_dev_stream_index, in RecvFromPeer() 100 to_device_ctx, to_tensor, dev_to_dev_stream_index, in RecvFromPeer() 112 if (num_bytes != to_tensor->TotalBytes()) { in RecvFromPeer() 115 to_tensor->TotalBytes())); in RecvFromPeer() 133 to_tensor->dtype(), to_tensor->shape()); in RecvFromPeer() 139 to_tensor, dev_to_dev_stream_index, in RecvFromPeer() [all …]
|
D | collective_rma_distributed.h | 40 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
|
/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_to_tensor_op_test.py | 97 dt = rt.to_tensor() 341 dt = rt.to_tensor(default_value=default, shape=shape) 400 self.evaluate(rt.to_tensor(default_value=default, shape=shape)) 405 rt_placeholder.to_tensor(default_value=default, shape=shape)) 409 actual = input_data.to_tensor( 416 actual = input_data.to_tensor( 424 actual = input_data.to_tensor( 431 actual = input_data.to_tensor( 438 actual = input_data.to_tensor(shape=[4, 4]) 460 actual = input_data.to_tensor(default_value=default_value) [all …]
|
D | ragged_squeeze_op_test.py | 54 self.assertAllEqual(ragged_conversion_ops.to_tensor(rt), dt) 114 self.assertAllEqual(ragged_conversion_ops.to_tensor(rt), dt) 169 self.assertAllEqual(ragged_conversion_ops.to_tensor(rt_s), dt_s)
|
D | ragged_conversion_ops.py | 48 def to_tensor(rt_input, default_value=None, name=None): function 50 return rt_input.to_tensor(default_value, name) 57 return rt_input.to_tensor(default_value=default_value, shape=shape)
|
D | ragged_string_ops.py | 742 to_tensor = False 745 to_tensor = True 766 dense_shape) if to_tensor else output 797 dense_shape) if to_tensor else output
|
D | ragged_gather_ops.py | 207 result = ragged_tensor.RaggedTensor.to_tensor(result)
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | collective_rma_local.cc | 32 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor, in RecvFromPeer() argument 51 auto consumer_callback = [to_tensor, to_device_ctx, to_device, to_alloc_attr, in RecvFromPeer() 68 int64 recv_bytes = to_tensor->TotalBytes(); in RecvFromPeer() 77 to_tensor, // dst Tensor* in RecvFromPeer()
|
D | base_collective_executor.h | 125 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor, in RecvFromPeer() argument 130 to_alloc_attr, to_tensor, client_locality, stream_index, done); in RecvFromPeer()
|
D | graph_execution_state.cc | 569 tensor_connection.to_tensor(), "\"."); in PruneGraph() 574 tensor_connection.to_tensor(), in PruneGraph() 579 &tensor_connection.to_tensor(), {from_node, from_id.second})); in PruneGraph() 684 TensorId id = ParseTensorName(tensor_connection.to_tensor()); in OptimizeGraph() 687 tensor_connection.to_tensor()); in OptimizeGraph()
|
D | collective_rma_local.h | 46 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
|
D | test_collective_executor_mgr.h | 34 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor, in RecvFromPeer() argument
|
D | ring_gatherer_test.cc | 72 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor, in RecvFromPeer() argument 79 to_alloc_attr, to_tensor, client_locality, dev_to_dev_stream_index, in RecvFromPeer()
|
D | ring_reducer_test.cc | 72 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor, in RecvFromPeer() argument 79 to_alloc_attr, to_tensor, client_locality, dev_to_dev_stream_index, in RecvFromPeer()
|
D | hierarchical_tree_broadcaster_test.cc | 168 const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor, in RecvFromPeer() argument 174 to_alloc_attr, to_tensor, client_locality, stream_index, done); in RecvFromPeer()
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | pooling_test.py | 64 dense_data = ragged_data.to_tensor() 84 dense_data = ragged_data.to_tensor()
|
D | merge_test.py | 244 dense_data = ragged_data.to_tensor() 251 out_ragged).to_tensor()
|
D | wrappers_test.py | 417 dense_data = ragged_data.to_tensor() 425 self.assertAllEqual(output_ragged.to_tensor(), output_dense) 449 dense_data = ragged_data.to_tensor() 454 self.assertAllEqual(output_ragged.to_tensor(), output_dense)
|
/external/tensorflow/tensorflow/python/data/kernel_tests/ |
D | flat_map_test.py | 162 ragged_conversion_ops.to_tensor(x))
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.-ragged-tensor.pbtxt | 127 name: "to_tensor"
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.-ragged-tensor.pbtxt | 127 name: "to_tensor"
|
/external/tensorflow/tensorflow/core/framework/ |
D | collective.h | 252 Tensor* to_tensor,
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | unicode_decode_op_test.py | 226 input_tensor, "UTF-8").to_tensor(default_value=-1) 594 input_tensor, "UTF-8").to_tensor(default_value="")
|
/external/tensorflow/tensorflow/python/distribute/ |
D | input_lib_test.py | 546 "dense": ragged_tensor.to_tensor(), 572 values.select_replica(i, per_replica_batch["ragged"]).to_tensor(),
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | function_optimizer.cc | 326 SafeTensorId to_tensor(func_node, to_idx); in AddTensorMapping() local 327 AddTensorMapping(from_tensor, to_tensor); in AddTensorMapping()
|