Home
last modified time | relevance | path

Searched refs:cpu_allocator (Results 1 – 25 of 54) sorted by relevance

123

/external/tensorflow/tensorflow/contrib/ignite/kernels/dataset/
Dignite_binary_object_parser.cc35 out_tensors->emplace_back(cpu_allocator(), DT_UINT8, TensorShape({})); in Parse()
40 out_tensors->emplace_back(cpu_allocator(), DT_INT16, TensorShape({})); in Parse()
45 out_tensors->emplace_back(cpu_allocator(), DT_UINT16, TensorShape({})); in Parse()
50 out_tensors->emplace_back(cpu_allocator(), DT_INT32, TensorShape({})); in Parse()
55 out_tensors->emplace_back(cpu_allocator(), DT_INT64, TensorShape({})); in Parse()
60 out_tensors->emplace_back(cpu_allocator(), DT_FLOAT, TensorShape({})); in Parse()
65 out_tensors->emplace_back(cpu_allocator(), DT_DOUBLE, TensorShape({})); in Parse()
70 out_tensors->emplace_back(cpu_allocator(), DT_BOOL, TensorShape({})); in Parse()
75 out_tensors->emplace_back(cpu_allocator(), DT_STRING, TensorShape({})); in Parse()
80 out_tensors->emplace_back(cpu_allocator(), DT_INT64, TensorShape({})); in Parse()
[all …]
/external/tensorflow/tensorflow/core/common_runtime/
Dcopy_tensor.cc50 void CopyHostToDevice(const Tensor* input, Allocator* cpu_allocator, in CopyHostToDevice() argument
55 Tensor copy(cpu_allocator, DT_VARIANT, input->shape()); in CopyHostToDevice()
64 [dst, recv_dev_context, out_allocator, status_cb, cpu_allocator, in CopyHostToDevice()
70 CopyHostToDevice(&from, cpu_allocator, out_allocator, edge_name, in CopyHostToDevice()
115 void CopyDeviceToHost(const Tensor* input, Allocator* cpu_allocator, in CopyDeviceToHost() argument
120 Tensor copy(cpu_allocator, DT_VARIANT, input->shape()); in CopyDeviceToHost()
130 cpu_allocator](StatusCallback wrapped_done_, in CopyDeviceToHost()
135 CopyDeviceToHost(&from, cpu_allocator, out_allocator, edge_name, in CopyDeviceToHost()
181 Allocator* cpu_allocator, Allocator* out_allocator, in CopyDeviceToDevice() argument
189 Tensor copy(cpu_allocator, DT_VARIANT, input->shape()); in CopyDeviceToDevice()
[all …]
Dmkl_threadpool_device_test.cc34 options, "/device:CPU:0", Bytes(256), DeviceLocality(), cpu_allocator()); in TEST()
45 options, "/device:CPU:0", Bytes(256), DeviceLocality(), cpu_allocator()); in TEST()
Dsingle_threaded_cpu_device.cc69 if (!parsed.FromProto(cpu_allocator(), tensor_proto)) { in MakeTensorFromProto()
77 return cpu_allocator(); in GetAllocator()
Dmkl_cpu_allocator.h279 return cpu_allocator()->AllocateRaw(kAlignment, size); in MallocHook()
284 cpu_allocator()->DeallocateRaw(ptr); in FreeHook()
/external/tensorflow/tensorflow/lite/delegates/flex/
Dbuffer_map.cc36 proto->set_allocator_name(tensorflow::cpu_allocator()->Name()); in FillAllocationDescription()
48 data(), tensorflow::cpu_allocator()); in LogAllocation()
56 tensorflow::cpu_allocator(), false); in LogDeallocation()
66 : BaseTfLiteTensorBuffer(tensorflow::cpu_allocator()->AllocateRaw( in TfLiteTensorBuffer()
82 tensorflow::cpu_allocator()->DeallocateRaw(data()); in ~TfLiteTensorBuffer()
102 tensorflow::cpu_allocator()->Deallocate<string>( in ~StringTfLiteTensorBuffer()
112 ? tensorflow::cpu_allocator()->Allocate<string>(num_strings) in StringTfLiteTensorBuffer()
/external/tensorflow/tensorflow/core/framework/
Dallocator_test.cc90 Allocator* a = cpu_allocator(); in TEST()
129 Allocator* a = cpu_allocator(); in TEST()
139 Allocator* a = cpu_allocator(); in TEST()
150 Allocator* a = cpu_allocator(); in TEST()
176 Allocator* a = cpu_allocator(); in BM_Allocation()
Dallocator.cc192 explicit CPUSubAllocator(CPUAllocator* cpu_allocator) in CPUSubAllocator() argument
193 : SubAllocator({}, {}), cpu_allocator_(cpu_allocator) {} in CPUSubAllocator()
224 Allocator* cpu_allocator(int numa_node) { in cpu_allocator() function
Dop_kernel_test.cc161 std::move(device_type), &device_, cpu_allocator(), in ExpectSuccess()
177 CreateOpKernel(std::move(device_type), &device_, cpu_allocator(), in ExpectFailure()
314 return cpu_allocator(); in GetAllocator()
328 CreateOpKernel(DEVICE_CPU, params.device, cpu_allocator(), in TEST_F()
353 CreateOpKernel(DEVICE_CPU, params.device, cpu_allocator(), in TEST_F()
381 CreateOpKernel(DEVICE_CPU, params.device, cpu_allocator(), in TEST_F()
438 cpu_allocator(), def,
472 cpu_allocator(), def, in ExpectFailure()
664 DEVICE_CPU, params.device, cpu_allocator(), in TEST_F()
939 cpu_allocator(), node_def, in BM_InputRangeHelper()
Dop_segment_test.cc67 auto created = CreateOpKernel(DEVICE_CPU, &device_, cpu_allocator(), in GetFn()
Dresource_op_kernel_test.cc42 return cpu_allocator(); in GetAllocator()
/external/tensorflow/tensorflow/core/common_runtime/gpu/
Dgpu_device_factory.cc33 Allocator* gpu_allocator, Allocator* cpu_allocator) in GPUDevice() argument
35 physical_device_desc, gpu_allocator, cpu_allocator, in GPUDevice()
67 Allocator* cpu_allocator) override { in CreateGPUDevice() argument
70 gpu_allocator, cpu_allocator); in CreateGPUDevice()
Dgpu_device.h57 Allocator* gpu_allocator, Allocator* cpu_allocator,
309 Allocator* cpu_allocator) = 0;
/external/tensorflow/tensorflow/core/common_runtime/sycl/
Dsycl_device.h117 m_cpu_allocator_.push_back(cpu_allocator()); in AddDevice()
201 Allocator* cpu_allocator, SYCLDeviceContext* ctx) in SYCLDevice() argument
205 cpu_allocator_(cpu_allocator), in SYCLDevice()
/external/tensorflow/tensorflow/compiler/jit/kernels/
Dxla_ops.cc478 Allocator* cpu_allocator = ctx->device()->GetAllocator(host_alloc_attrs); in Compute() local
482 Tensor compilation_key(cpu_allocator, DT_STRING, TensorShape({})); in Compute()
484 Tensor compilation_successful(cpu_allocator, DT_BOOL, TensorShape({})); in Compute()
486 ctx->set_output(0, Tensor(cpu_allocator, DT_STRING, TensorShape({}))); in Compute()
499 Tensor compilation_key(cpu_allocator, DT_STRING, TensorShape({})); in Compute()
502 Tensor compilation_successful(cpu_allocator, DT_BOOL, TensorShape({})); in Compute()
/external/tensorflow/tensorflow/core/kernels/
Dimmutable_constant_op_test.cc44 : memptr_(cpu_allocator()->AllocateRaw(kTestAlignment, length)), in TestReadOnlyMemoryRegion()
47 cpu_allocator()->DeallocateRaw(memptr_); in ~TestReadOnlyMemoryRegion()
Dstack.cc255 Allocator* cpu_allocator = device->GetAllocator(host_alloc_attrs); in ComputeAsync() local
257 new Tensor(cpu_allocator, tensor.dtype(), tensor.shape()); in ComputeAsync()
Dconstant_op_test.cc61 cpu_allocator(), const_node, in PersistentMemoryTrackingTest()
Drestore_op_test.cc89 cpu_allocator(), save, in TEST_F()
393 cpu_allocator(), save, in TEST_F()
/external/tensorflow/tensorflow/core/grappler/optimizers/
Devaluation_utils.h45 return cpu_allocator(); in GetAllocator()
Devaluation_utils.cc75 if (!parsed.FromProto(cpu_allocator(), tensor_proto)) { in MakeTensorFromProto()
/external/tensorflow/tensorflow/c/kernels/
Dbitcast_op_test.cc30 return cpu_allocator(); in GetAllocator()
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Dconst_op.cc120 OP_REQUIRES(ctx, tensor.FromProto(cpu_allocator(), proto_), in Compile()
/external/tensorflow/tensorflow/contrib/util/
Dconvert_graphdef_memmapped_format_lib.cc69 if (!parsed.FromProto(cpu_allocator(), tensor_proto)) { in ConvertConstantsToImmutable()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_device.cc247 return cpu_allocator(); in GetAllocatorLocked()
446 if (!parsed.FromProto(cpu_allocator(), tensor_proto)) { in MakeTensorFromProto()

123