Home
last modified time | relevance | path

Searched refs:alloc_attrs (Results 1 – 25 of 34) sorted by relevance

12

/external/tensorflow/tensorflow/core/common_runtime/
Drendezvous_util.cc24 const std::vector<AllocatorAttributes>& alloc_attrs, in SendTensorsToRendezvous() argument
31 if (!alloc_attrs.empty() && (keys.size() != alloc_attrs.size())) { in SendTensorsToRendezvous()
35 "; alloc_attrs.size() = ", alloc_attrs.size()); in SendTensorsToRendezvous()
46 if (!alloc_attrs.empty()) { in SendTensorsToRendezvous()
47 rendez_args.alloc_attrs = alloc_attrs[i]; in SendTensorsToRendezvous()
58 const std::vector<AllocatorAttributes>& alloc_attrs, in RecvOutputsFromRendezvousAsync() argument
65 if (!alloc_attrs.empty() && (keys.size() != alloc_attrs.size())) { in RecvOutputsFromRendezvousAsync()
68 keys.size(), "; alloc_attrs.size() = ", alloc_attrs.size())); in RecvOutputsFromRendezvousAsync()
84 if (!alloc_attrs.empty()) { in RecvOutputsFromRendezvousAsync()
85 alloc_attr = alloc_attrs[i]; in RecvOutputsFromRendezvousAsync()
[all …]
Drendezvous_mgr.cc44 (send_args.alloc_attrs.on_host() || parsed.src.type == "CPU"); in SameWorkerRecvDone()
46 (recv_args.alloc_attrs.on_host() || parsed.dst.type == "CPU"); in SameWorkerRecvDone()
79 AllocatorAttributes attr = recv_args.alloc_attrs; in SameWorkerRecvDone()
80 attr.set_gpu_compatible(send_args.alloc_attrs.gpu_compatible() || in SameWorkerRecvDone()
81 recv_args.alloc_attrs.gpu_compatible()); in SameWorkerRecvDone()
111 src_device, dst_device, send_args.alloc_attrs, recv_args.alloc_attrs, &in, in SameWorkerRecvDone()
Drendezvous_util.h35 const std::vector<AllocatorAttributes>& alloc_attrs,
44 const std::vector<AllocatorAttributes>& alloc_attrs,
Drenamed_device.h107 const AllocatorAttributes alloc_attrs, in MakeTensorFromProto() argument
109 return underlying_device_->MakeTensorFromProto(tensor_proto, alloc_attrs, in MakeTensorFromProto()
Dthreadpool_device.h39 const AllocatorAttributes alloc_attrs,
Dsingle_threaded_cpu_device.cc62 const AllocatorAttributes alloc_attrs, in MakeTensorFromProto() argument
Dthreadpool_device.cc89 const TensorProto& tensor_proto, const AllocatorAttributes alloc_attrs, in MakeTensorFromProto() argument
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dgpu_swapping_kernels.cc37 AllocatorAttributes alloc_attrs; in ComputeAsync() local
38 alloc_attrs.set_gpu_compatible(true); in ComputeAsync()
39 alloc_attrs.set_on_host(true); in ComputeAsync()
42 ctx, ctx->allocate_output(0, input.shape(), &output, alloc_attrs), in ComputeAsync()
Devaluation_utils.h41 const AllocatorAttributes alloc_attrs,
Devaluation_utils.cc51 const AllocatorAttributes alloc_attrs, in MakeTensorFromProto() argument
/external/tensorflow/tensorflow/core/kernels/
Dstack.cc47 AllocatorAttributes alloc_attrs; member
237 AllocatorAttributes alloc_attrs = ctx->input_alloc_attr(1); in ComputeAsync() local
243 if (swap_memory_ && !alloc_attrs.on_host() && in ComputeAsync()
247 Allocator* allocator = device->GetAllocator(alloc_attrs); in ComputeAsync()
264 AllocatorAttributes alloc_attrs = ctx->input_alloc_attr(1); in ComputeAsync() local
265 ctx->SetStatus(stack->Push({*cpu_tensor, alloc_attrs, true})); in ComputeAsync()
278 OP_REQUIRES_OK_ASYNC(ctx, stack->Push({tensor, alloc_attrs, false}), done); in ComputeAsync()
305 Allocator* gpu_allocator = device->GetAllocator(value.alloc_attrs); in ComputeAsync()
Dpad_op.cc109 AllocatorAttributes alloc_attrs; in Compute() local
110 alloc_attrs.set_on_host(context->input_memory_type(0) == HOST_MEMORY); in Compute()
114 &collapsed_output, alloc_attrs)); in Compute()
Dsendrecv_ops.cc91 args.alloc_attrs = ctx->input_alloc_attr(0); in Compute()
198 args.alloc_attrs = ctx->output_alloc_attr(0); in ComputeAsync()
/external/tensorflow/tensorflow/c/
Dkernels_test.cc816 TF_AllocatorAttributes alloc_attrs; in TEST_F() local
817 alloc_attrs.struct_size = TF_ALLOCATOR_ATTRIBUTES_STRUCT_SIZE; in TEST_F()
819 alloc_attrs.on_host = 0; in TEST_F()
821 alloc_attrs.on_host = 1; in TEST_F()
825 /*num_dims=*/1, /*allocator_attributes*/ &alloc_attrs, s); in TEST_F()
853 TF_AllocatorAttributes alloc_attrs; in TEST_F() local
854 alloc_attrs.struct_size = TF_ALLOCATOR_ATTRIBUTES_STRUCT_SIZE; in TEST_F()
856 alloc_attrs.on_host = 0; in TEST_F()
858 alloc_attrs.on_host = 1; in TEST_F()
862 /*num_dims=*/1, /*allocator_attributes*/ &alloc_attrs, s); in TEST_F()
[all …]
/external/tensorflow/tensorflow/core/distributed_runtime/
Dbase_rendezvous_mgr.cc228 (send_args.alloc_attrs.on_host() || parsed.src.type == "CPU"); in SameWorkerRecvDone()
230 (recv_args.alloc_attrs.on_host() || parsed.dst.type == "CPU"); in SameWorkerRecvDone()
264 AllocatorAttributes attr = recv_args.alloc_attrs; in SameWorkerRecvDone()
265 attr.set_gpu_compatible(send_args.alloc_attrs.gpu_compatible() || in SameWorkerRecvDone()
266 recv_args.alloc_attrs.gpu_compatible()); in SameWorkerRecvDone()
288 src_device, dst_device, send_args.alloc_attrs, recv_args.alloc_attrs, &in, in SameWorkerRecvDone()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_device.cc476 const AllocatorAttributes alloc_attrs, in MakeTensorFromProto() argument
485 if (alloc_attrs.on_host()) { in MakeTensorFromProto()
489 Allocator* allocator = GetAllocatorLocked(alloc_attrs); in MakeTensorFromProto()
500 const AllocatorAttributes alloc_attrs, in MakeTensorFromProto() argument
508 return MakeTensorFromProto(device_contexts.first, tensor_proto, alloc_attrs, in MakeTensorFromProto()
513 const TensorProto& tensor_proto, const AllocatorAttributes alloc_attrs, in MakeFastMemTensorFromProto() argument
521 return MakeTensorFromProto(device_contexts.second, tensor_proto, alloc_attrs, in MakeFastMemTensorFromProto()
Dxla_device.h156 const AllocatorAttributes alloc_attrs,
163 const AllocatorAttributes alloc_attrs,
207 const AllocatorAttributes alloc_attrs,
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
Drpc_rendezvous_mgr.cc63 AllocatorAttributes alloc_attrs, Device* dst_device, in Init() argument
66 alloc_attrs_ = alloc_attrs; in Init()
272 call->Init(rwi, step_id_, parsed.FullKey(), recv_args.alloc_attrs, dst_device, in RecvFromRemoteAsync()
Dgrpc_worker_service.cc525 const bool on_host = send_args.alloc_attrs.on_host(); in GrpcRecvTensorAsync()
530 AllocatorAttributes alloc_attrs; in GrpcRecvTensorAsync() local
531 alloc_attrs.set_gpu_compatible(true); in GrpcRecvTensorAsync()
532 alloc_attrs.set_on_host(true); in GrpcRecvTensorAsync()
533 Allocator* alloc = src_dev->GetAllocator(alloc_attrs); in GrpcRecvTensorAsync()
/external/tensorflow/tensorflow/core/tpu/
Dvirtual_device.h32 const AllocatorAttributes alloc_attrs,
Dvirtual_device.cc80 const AllocatorAttributes alloc_attrs, in MakeTensorFromProto() argument
/external/tensorflow/tensorflow/compiler/tf2xla/
Dxla_compilation_device.h60 const AllocatorAttributes alloc_attrs,
/external/tensorflow/tensorflow/c/kernels/
Dsummary_op_test.cc77 AllocatorAttributes alloc_attrs; in TestScalarSummaryOp() local
78 params.output_attr_array = &alloc_attrs; in TestScalarSummaryOp()
/external/tensorflow/tensorflow/core/tpu/kernels/
Dhost_compute_ops.cc145 args.alloc_attrs = ctx->output_alloc_attr(i); in ComputeAsync()
272 args.alloc_attrs = ctx->input_alloc_attr(i); in Compute()
/external/tensorflow/tensorflow/core/common_runtime/gpu/
Dgpu_device.h73 const AllocatorAttributes alloc_attrs,
171 Status MaybeCopyTensorToGPU(const AllocatorAttributes& alloc_attrs,

12