/external/tensorflow/tensorflow/core/common_runtime/ |
D | entry.h | 41 Entry(const Entry& other) : state(other.state), alloc_attr(other.alloc_attr) { in Entry() 66 alloc_attr = other.alloc_attr; 88 alloc_attr = other.alloc_attr; 134 AllocatorAttributes alloc_attr; member
|
D | partitioning_utils.cc | 128 AllocatorAttributes alloc_attr; in UpdateArgAndRetvalMetadata() local 135 alloc_attr.set_on_host(true); in UpdateArgAndRetvalMetadata() 137 arg_alloc_attrs->push_back(alloc_attr); in UpdateArgAndRetvalMetadata() 145 AllocatorAttributes alloc_attr; in UpdateArgAndRetvalMetadata() local 152 alloc_attr.set_on_host(true); in UpdateArgAndRetvalMetadata() 154 ret_alloc_attrs->push_back(alloc_attr); in UpdateArgAndRetvalMetadata()
|
D | rendezvous_util.cc | 83 AllocatorAttributes alloc_attr; in RecvOutputsFromRendezvousAsync() local 85 alloc_attr = alloc_attrs[i]; in RecvOutputsFromRendezvousAsync() 88 alloc_attr); in RecvOutputsFromRendezvousAsync()
|
D | immutable_executor_state.cc | 270 AllocatorAttributes* alloc_attr) { in ExtractScopedAllocatorAttr() argument 274 CHECK_EQ(alloc_attr->scope_id, 0); in ExtractScopedAllocatorAttr() 275 alloc_attr->scope_id = sc_attr[i + 1]; in ExtractScopedAllocatorAttr()
|
D | graph_view.cc | 281 AllocatorAttributes* alloc_attr) { in ExtractScopedAllocatorAttr() argument 285 CHECK_EQ(alloc_attr->scope_id, 0); in ExtractScopedAllocatorAttr() 286 alloc_attr->scope_id = sc_attr[i + 1]; in ExtractScopedAllocatorAttr()
|
D | executor.cc | 660 output.alloc_attr = item.output_attrs()[0]; in ProcessConstTensor() 865 (*input_alloc_attrs)[i] = entry->alloc_attr; in PrepareInputs() 1012 out->alloc_attr = ctx->output_alloc_attr(i); in ProcessOutputs()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | host_constant_op.cc | 30 AllocatorAttributes alloc_attr; in _HostConstantOp() local 31 alloc_attr.set_on_host(true); in _HostConstantOp() 34 ctx, ctx->device()->MakeTensorFromProto(*proto, alloc_attr, &tensor_)); in _HostConstantOp()
|
D | dynamic_partition_op_gpu.cu.cc | 252 AllocatorAttributes alloc_attr; in ComputeAsync() local 253 alloc_attr.set_on_host(true); in ComputeAsync() 257 &partition_count, alloc_attr), in ComputeAsync() 288 AllocatorAttributes alloc_attr; in ComputeAsync() local 289 alloc_attr.set_on_host(true); in ComputeAsync() 290 alloc_attr.set_gpu_compatible(true); in ComputeAsync() 294 &cpu_tensor, alloc_attr), in ComputeAsync()
|
D | reduction_ops_common.h | 166 const AllocatorAttributes alloc_attr = ctx->output_alloc_attr(0); 177 &tmp_out, alloc_attr)); 186 helper.out_reshape(), &tmp_out, alloc_attr)); 227 &shuffled, alloc_attr));
|
D | stack.cc | 195 AllocatorAttributes alloc_attr; in Compute() local 196 alloc_attr.set_on_host(true); in Compute() 199 &stack->handle_, alloc_attr)); in Compute()
|
D | scatter_nd_op.cc | 196 AllocatorAttributes alloc_attr; in Compute() local 199 alloc_attr.set_on_host(true); in Compute() 205 c->forward_input(0, 0, input.dtype(), shape, memory_type, alloc_attr); in Compute() 916 AllocatorAttributes alloc_attr; in DoScatterNd() local 918 alloc_attr.set_on_host(true); in DoScatterNd() 921 c->allocate_temp(DataTypeToEnum<T>::value, shape, out, alloc_attr)); in DoScatterNd()
|
D | tensor_array_ops.cc | 112 AllocatorAttributes alloc_attr; in Compute() local 113 alloc_attr.set_on_host(true); in Compute() 116 &tensor_array_output_handle, alloc_attr)); in Compute()
|
/external/tensorflow/tensorflow/core/util/ |
D | rocm_solvers.h | 112 AllocatorAttributes alloc_attr; 116 alloc_attr.set_on_host(true); 117 alloc_attr.set_gpu_compatible(true); 120 &scratch_tensor_, alloc_attr));
|
D | cuda_solvers.h | 389 AllocatorAttributes alloc_attr; 393 alloc_attr.set_on_host(true); 394 alloc_attr.set_gpu_compatible(true); 397 &scratch_tensor_, alloc_attr));
|
/external/tensorflow/tensorflow/core/kernels/image/ |
D | scale_and_translate_op.cc | 66 AllocatorAttributes alloc_attr; in ComputeSpansCore() local 67 alloc_attr.set_on_host(true); in ComputeSpansCore() 70 &spans->starts, alloc_attr)); in ComputeSpansCore() 75 &spans->weights, alloc_attr)); in ComputeSpansCore() 158 AllocatorAttributes alloc_attr; in ComputeGradSpansCore() local 159 alloc_attr.set_on_host(true); in ComputeGradSpansCore() 162 &grad_spans->starts, alloc_attr)); in ComputeGradSpansCore() 167 &grad_spans->weights, alloc_attr)); in ComputeGradSpansCore()
|
D | non_max_suppression_op.cu.cc | 277 AllocatorAttributes alloc_attr; in NmsGpu() local 278 alloc_attr.set_on_host(true); in NmsGpu() 279 alloc_attr.set_gpu_compatible(true); in NmsGpu() 283 DataType::DT_INT32, TensorShape({1}), &h_num_selected, alloc_attr)); in NmsGpu()
|
D | crop_and_resize_op.cc | 828 AllocatorAttributes alloc_attr; in RunIfBoxIndexIsValid() local 829 alloc_attr.set_on_host(true); in RunIfBoxIndexIsValid() 830 alloc_attr.set_gpu_compatible(true); in RunIfBoxIndexIsValid() 834 &isvalid_host_tensor, alloc_attr), in RunIfBoxIndexIsValid()
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | infeed_ops.cc | 100 AllocatorAttributes alloc_attr; in TransposeTensor() local 101 alloc_attr.set_on_host(true); in TransposeTensor() 104 &transposed_tensor, alloc_attr)); in TransposeTensor()
|
/external/tensorflow/tensorflow/compiler/tf2tensorrt/convert/ |
D | convert_graph.cc | 669 AllocatorAttributes alloc_attr; in GetDeviceAndAllocator() local 671 dev_allocator = devices[0]->GetAllocator(alloc_attr); in GetDeviceAndAllocator()
|