/external/tensorflow/tensorflow/java/src/main/java/org/tensorflow/ |
D | Tensors.java | 29 public static Tensor<String> create(String data) { in create() 30 return Tensor.create(data.getBytes(UTF_8), String.class); in create() 39 public static Tensor<String> create(String data, java.nio.charset.Charset charset) { in create() 40 return Tensor.create(data.getBytes(charset), String.class); in create() 48 public static Tensor<Float> create(float data) { in create() 49 return Tensor.create(data, Float.class); in create() 58 public static Tensor<Float> create(float[] data) { in create() 59 return Tensor.create(data, Float.class); in create() 68 public static Tensor<Float> create(float[][] data) { in create() 69 return Tensor.create(data, Float.class); in create() [all …]
|
/external/eigen/unsupported/Eigen/CXX11/ |
D | Tensor | 34 /** \defgroup CXX11_Tensor_Module Tensor Module 36 * This module provides a Tensor class for storing arbitrarily indexed 40 * #include <Eigen/CXX11/Tensor> 84 #include "src/Tensor/TensorMacros.h" 85 #include "src/Tensor/TensorForwardDeclarations.h" 86 #include "src/Tensor/TensorMeta.h" 87 #include "src/Tensor/TensorFunctors.h" 88 #include "src/Tensor/TensorCostModel.h" 89 #include "src/Tensor/TensorDeviceDefault.h" 90 #include "src/Tensor/TensorDeviceThreadPool.h" [all …]
|
/external/tensorflow/tensorflow/core/framework/ |
D | tensor.h | 44 class Tensor; variable 53 Status CopyElementToSlice(Tensor element, Tensor* parent, int64 index); 54 Status CopySliceToElement(const Tensor& parent, Tensor* element, int64 index); 55 Status MaybeMoveSliceToElement(Tensor* parent, Tensor* element, int64 index); 56 Status CopyContiguousSlices(const Tensor& src, int64 src_offset, 57 int64 dst_offset, int64 num_slices, Tensor* dst); 102 class Tensor { 118 Tensor(); 128 Tensor(DataType type, const TensorShape& shape); 139 Tensor(Allocator* a, DataType type, const TensorShape& shape); [all …]
|
D | lookup_interface.h | 50 virtual Status Find(OpKernelContext* ctx, const Tensor& keys, Tensor* values, 51 const Tensor& default_value) = 0; 64 virtual Status Insert(OpKernelContext* ctx, const Tensor& keys, 65 const Tensor& values) = 0; 76 virtual Status Remove(OpKernelContext* ctx, const Tensor& keys) = 0; 91 virtual Status ImportValues(OpKernelContext* ctx, const Tensor& keys, 92 const Tensor& values) = 0; 113 virtual Status CheckKeyAndValueTensorsForInsert(const Tensor& keys, 114 const Tensor& values); 118 virtual Status CheckKeyAndValueTensorsForImport(const Tensor& keys, [all …]
|
D | tensor_test.cc | 36 static void set_shape(Tensor* t, const TensorShape& s) { t->set_shape(s); } in set_shape() 82 Tensor t; in TEST() 119 void ExpectEqual(const Tensor& x, const Tensor& y) { in ExpectEqual() 124 void ExpectEqual<ResourceHandle>(const Tensor& x, const Tensor& y) { in ExpectEqual() 128 void ExpectEqual<Variant>(const Tensor& x, const Tensor& y) { in ExpectEqual() 133 void TestCopies(const Tensor& t) { in TestCopies() 136 Tensor t2(t.dtype()); in TestCopies() 142 Tensor t2(t.dtype()); in TestCopies() 148 Tensor t2(t.dtype(), t.shape()); in TestCopies() 156 Tensor t2(t.dtype()); in TestCopies() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | relu_op.h | 38 void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { in Operate() 49 static void ValidateSameSizeHelper(OpKernelContext* context, const Tensor& g, in ValidateSameSizeHelper() 50 const Tensor& a) { in ValidateSameSizeHelper() 54 static bool ValidateSameSize(OpKernelContext* context, const Tensor& g, in ValidateSameSize() 55 const Tensor& a) { in ValidateSameSize() 66 void OperateNoTemplate(OpKernelContext* context, const Tensor& g, 67 const Tensor& a, Tensor* output); 76 void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, in Operate() 77 Tensor* output) { in Operate() 84 const Tensor& g, const Tensor& a, in OperateNoTemplate() [all …]
|
D | tensor_map_test.cc | 35 TensorKey k1 = Tensor(15); in TEST() 36 TensorKey k2 = Tensor(15); in TEST() 41 TensorKey k3 = Tensor(37.0); in TEST() 48 TensorKey k = Tensor(11); in TEST() 49 Tensor v = Tensor(22); in TEST() 51 absl::flat_hash_map<TensorKey, Tensor> am; in TEST() 54 absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = in TEST() 64 TensorKey k = Tensor(11); in TEST() 65 Tensor v = Tensor(22); in TEST() 67 absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k); in TEST() [all …]
|
D | fake_quant_ops_test.cc | 29 using tensorflow::Tensor; 39 Tensor* input = new Tensor(device_->GetAllocator(AllocatorAttributes()), in AddRandomInput() 72 Tensor* output = GetOutput(0); in RunTestFakeQuantWithMinMaxArgs() 73 Tensor expected(allocator(), DT_FLOAT, shape); in RunTestFakeQuantWithMinMaxArgs() 101 Tensor* output = GetOutput(0); in RunTestFakeQuantWithMinMaxVars() 102 Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); in RunTestFakeQuantWithMinMaxVars() 131 Tensor* output = GetOutput(0); in RunTestFakeQuantWithMinMaxVarsPerChannel() 132 Tensor expected(allocator(), DT_FLOAT, shape); in RunTestFakeQuantWithMinMaxVarsPerChannel() 379 Tensor* output = GetOutput(0); in TEST_F() 381 Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); in TEST_F() [all …]
|
D | conv_ops_test.cc | 124 Tensor image(dtype, {image_batch_count, image_height, image_width, depth}); in HandwrittenConv() 133 Tensor filter(dtype, {filter_size, filter_size, depth, filter_count}); in HandwrittenConv() 173 Tensor expected(dtype, TensorShape({image_batch_count, expected_height, in HandwrittenConv() 177 const Tensor& output = *GetOutput(0); in HandwrittenConv() 192 Tensor input_data(DT_FLOAT, in CompareFusedAndSeparate() 199 Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size, in CompareFusedAndSeparate() 234 std::vector<Tensor> unfused_tensors; in CompareFusedAndSeparate() 237 std::vector<Tensor> fused_tensors; in CompareFusedAndSeparate() 253 Tensor input_data(DT_FLOAT, in CompareFusedPadOnlyAndSeparate() 260 Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size, in CompareFusedPadOnlyAndSeparate() [all …]
|
D | initializable_lookup_table.h | 46 Status Find(OpKernelContext* ctx, const Tensor& keys, Tensor* values, 47 const Tensor& default_value) final; 50 Status Insert(OpKernelContext* ctx, const Tensor& keys, in Insert() 51 const Tensor& values) final { in Insert() 57 Status Remove(OpKernelContext* ctx, const Tensor& keys) final { in Remove() 68 Status ImportValues(OpKernelContext* ctx, const Tensor& keys, 69 const Tensor& values) final; 116 virtual const Tensor& keys() const = 0; 119 virtual const Tensor& values() const = 0; 154 virtual Status DoInsert(const Tensor& keys, const Tensor& values) = 0; [all …]
|
D | fused_batch_norm_ex_op_test.cc | 51 Tensor y; 52 Tensor batch_mean; 53 Tensor batch_variance; 54 Tensor reserve_space_1; 55 Tensor reserve_space_2; 56 Tensor reserve_space_3; 60 Tensor y_backprop; 61 Tensor x_backprop; 62 Tensor scale_backprop; 63 Tensor offset_backprop; [all …]
|
D | quantized_batch_norm_op_test.cc | 66 Tensor input_float(DT_FLOAT, in TEST_F() 70 Tensor input_quantized = in TEST_F() 74 Tensor mean_float(DT_FLOAT, {input_depth}); in TEST_F() 76 Tensor mean_quantized = in TEST_F() 80 Tensor variance_float(DT_FLOAT, {input_depth}); in TEST_F() 82 Tensor variance_quantized = FloatTensorToQuantized<quint8>( in TEST_F() 86 Tensor beta_float(DT_FLOAT, {input_depth}); in TEST_F() 88 Tensor beta_quantized = in TEST_F() 92 Tensor gamma_float(DT_FLOAT, {input_depth}); in TEST_F() 94 Tensor gamma_quantized = in TEST_F() [all …]
|
D | sparse_conditional_accumulator.h | 49 std::tuple<const Tensor*, const Tensor*, const Tensor*>> { 55 std::tuple<const Tensor*, const Tensor*, const Tensor*>>( in SparseConditionalAccumulator() 74 Tensor* accum_val_ = nullptr; 77 typedef Eigen::TensorMap<Eigen::Tensor<T, 1, Eigen::RowMajor>, 80 typedef Eigen::TensorMap<Eigen::Tensor<const T, 1, Eigen::RowMajor>, 85 std::tuple<const Tensor*, const Tensor*, const Tensor*>* tensor, in ValidateShape() argument 87 const Tensor* tensor_idx = std::get<0>(*tensor); in ValidateShape() 88 const Tensor* tensor_val = std::get<1>(*tensor); in ValidateShape() 89 const Tensor* tensor_shape = std::get<2>(*tensor); in ValidateShape() 160 std::tuple<const Tensor*, const Tensor*, const Tensor*>* grad) override { in AllocateAndAssignToAccumGradFunction() argument [all …]
|
/external/tensorflow/tensorflow/core/kernels/mkl/ |
D | mkl_fused_batch_norm_op_test.cc | 45 const Tensor& input, const Tensor& scale, const Tensor& offset, 46 const Tensor& mean, const Tensor& variance, 47 const float exponential_avg_factor, const bool is_training, Tensor* output, 48 Tensor* batch_mean, Tensor* batch_var)>; 51 const Tensor& input, const Tensor& filter, const Tensor& y_backprop, 52 const Tensor& scale, const Tensor& mean, const Tensor& variance, 53 const Tensor& res_sp3, Tensor* output, Tensor* scale_backprop, 54 Tensor* offset_backprop, bool disable_grappler_opts)>; 59 void PerformConversion(DataType dtype, const Tensor& tensor, in PerformConversion() 60 const Tensor& mkl_meta_tensor, Tensor* output) { in PerformConversion() [all …]
|
D | mkl_fused_ops_test.cc | 48 std::function<void(const Tensor& input_data, const Tensor& filter_data, 49 const Tensor& bias_data, Tensor* out)>; 52 const Tensor& input_data, const Tensor& filter_data, 53 const Tensor& bias_data, const std::vector<string>& fused_ops, Tensor* out, 57 std::function<void(const Tensor& input_data, const Tensor& filter_data, 58 const Tensor& bias_data, 59 const std::vector<string>& fused_ops, Tensor* out)>; 64 void PerformConversion(DataType dtype, const Tensor& tensor, in PerformConversion() 65 const Tensor& mkl_meta_tensor, Tensor* output) { in PerformConversion() 85 Tensor* output) { in RunAndFetch() [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | array_grad_test.cc | 36 std::vector<Tensor> PackGrad(const Tensor& x0, const Tensor& x1, in PackGrad() 37 const Tensor& dy, int axis) { in PackGrad() 52 std::vector<Tensor> out; in PackGrad() 64 Tensor x0(DT_FLOAT, {2, 3}); in TEST() 66 Tensor x1(DT_FLOAT, {2, 3}); in TEST() 68 Tensor dy(DT_FLOAT, {2, 2, 3}); in TEST() 77 std::vector<Tensor> UnpackGrad(const Tensor& x, const Tensor& dy0, in UnpackGrad() 78 const Tensor& dy1, int axis) { in UnpackGrad() 93 std::vector<Tensor> out; in UnpackGrad() 105 Tensor x(DT_FLOAT, {2, 2, 3}); in TEST() [all …]
|
D | math_grad_test.cc | 43 Status Unary(const FDH::Node& op_node, const Tensor& x, const DataType dst, in Unary() 44 Tensor* y) { in Unary() 87 std::vector<Tensor> outputs; in Unary() 97 Status Unary(const string& op, const Tensor& x, Tensor* y) { in Unary() 103 Tensor SymGrad(const string& op, const Tensor& x) { in SymGrad() 104 Tensor ret; in SymGrad() 109 Tensor SymCastGrad(const Tensor& x, const DataType dst) { in SymCastGrad() 110 Tensor ret; in SymCastGrad() 118 void SymGrad(const string& op, const Tensor& x, const Tensor& y, Tensor* dx, in SymGrad() 119 Tensor* dy) { in SymGrad() [all …]
|
/external/tensorflow/tensorflow/cc/experimental/base/public/ |
D | tensor.h | 35 class Tensor { 58 static Tensor FromBuffer(TF_DataType dtype, const std::vector<int64_t>& shape, 72 Tensor(Tensor&&) = default; 73 Tensor& operator=(Tensor&&) = default; 101 explicit Tensor(TF_Tensor* tensor) : tensor_(tensor) {} in Tensor() function 104 Tensor(const Tensor&) = delete; 105 Tensor& operator=(const Tensor&) = delete; 127 inline void* Tensor::data() const { return TF_TensorData(tensor_.get()); } in data() 129 inline int Tensor::dims() const { return TF_NumDims(tensor_.get()); } in dims() 131 inline int64_t Tensor::dim_size(int d) const { in dim_size() [all …]
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_expr.cpp | 14 using Eigen::Tensor; 19 Tensor<float, 1> vec1(6); in test_1d() 20 Tensor<float, 1, RowMajor> vec2(6); in test_1d() 30 TensorMap<Tensor<float, 1>> vec3(data3, 6); in test_1d() 33 TensorMap<Tensor<float, 1, RowMajor>> vec4(data4, 6); in test_1d() 36 TensorMap<Tensor<float, 1, RowMajor>> vec5(data5, 6); in test_1d() 72 TensorMap<Tensor<float, 2>> mat1(data1, 2, 3); in test_2d() 74 TensorMap<Tensor<float, 2, RowMajor>> mat2(data2, 2, 3); in test_2d() 90 Tensor<float, 2> mat3(2,3); in test_2d() 91 Tensor<float, 2, RowMajor> mat4(2,3); in test_2d() [all …]
|
D | cxx11_tensor_of_complex.cpp | 14 using Eigen::Tensor; 21 Tensor<std::complex<float>, 1> data1(3); in test_additions() 22 Tensor<std::complex<float>, 1> data2(3); in test_additions() 28 Tensor<std::complex<float>, 1> sum = data1 + data2; in test_additions() 37 Tensor<std::complex<float>, 1> data1(3); in test_abs() 38 Tensor<std::complex<double>, 1> data2(3); in test_abs() 42 Tensor<float, 1> abs1 = data1.abs(); in test_abs() 43 Tensor<double, 1> abs2 = data2.abs(); in test_abs() 53 Tensor<std::complex<float>, 1> data1(3); in test_conjugate() 54 Tensor<std::complex<double>, 1> data2(3); in test_conjugate() [all …]
|
D | cxx11_tensor_of_const_values.cpp | 14 using Eigen::Tensor; 20 TensorMap<Tensor<const float, 2>> mat1(data1, 2, 3); in test_assign() 22 const TensorMap<Tensor<float, 2>> mat2(data2, 2, 3); in test_assign() 29 Tensor<float, 2> rslt1; in test_assign() 31 Tensor<float, 2> rslt2; in test_assign() 34 Tensor<float, 2> rslt3 = mat1; in test_assign() 35 Tensor<float, 2> rslt4 = mat2; in test_assign() 37 Tensor<float, 2> rslt5(mat1); in test_assign() 38 Tensor<float, 2> rslt6(mat2); in test_assign() 56 TensorMap<Tensor<const float, 2>> mat1(data1, 2, 3); in test_plus() [all …]
|
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/ |
D | tensor.h | 40 class Tensor : public GPUObject, public GpuSpatialTensor { 42 Tensor() in Tensor() function 44 Tensor(cl_mem memory, bool memory_owner, const BHWC& shape, 46 Tensor(cl_mem memory, bool memory_owner, const BHWDC& shape, 48 Tensor(cl_mem memory, bool memory_owner, cl_mem image_buffer_memory, 50 Tensor(cl_mem memory, bool memory_owner, cl_mem image_buffer_memory, 54 Tensor(Tensor&& tensor); 55 Tensor& operator=(Tensor&& tensor); 56 Tensor(const Tensor&) = delete; 57 Tensor& operator=(const Tensor&) = delete; [all …]
|
/external/tensorflow/tensorflow/java/src/test/java/org/tensorflow/ |
D | TensorTest.java | 54 try (Tensor<String> t = Tensors.create(strings)) { in createWithByteBuffer() 62 try (Tensor<Boolean> t = Tensor.create(Boolean.class, bools_shape, ByteBuffer.wrap(bools_))) { in createWithByteBuffer() 70 try (Tensor<String> t = in createWithByteBuffer() 71 Tensor.create(String.class, strings_shape, ByteBuffer.wrap(strings_))) { in createWithByteBuffer() 80 try (Tensor<Double> t = Tensor.create(Double.class, doubles_shape, buf)) { in createWithByteBuffer() 87 try (Tensor<Boolean> t = in createWithByteBuffer() 88 Tensor.create(Boolean.class, new long[bools_.length * 2], ByteBuffer.wrap(bools_))) { in createWithByteBuffer() 107 try (Tensor<Double> t = Tensor.create(new long[] {doubles.length}, buf)) { in createFromBufferWithNonNativeByteOrder() 123 try (Tensor<Double> t = Tensor.create(shape, DoubleBuffer.wrap(doubles))) { in createWithTypedBuffer() 127 try (Tensor<Float> t = Tensor.create(shape, FloatBuffer.wrap(floats))) { in createWithTypedBuffer() [all …]
|
/external/tensorflow/tensorflow/core/util/ |
D | batch_util.h | 30 Status CopyElementToSlice(Tensor element, Tensor* parent, int64 index); 33 Status CopySliceToElement(const Tensor& parent, Tensor* element, int64 index); 43 Status CopyContiguousSlices(const Tensor& src, int64 src_offset, 44 int64 dst_offset, int64 num_slices, Tensor* dst); 50 Status MaybeMoveSliceToElement(Tensor* parent, Tensor* element, int64 index); 54 Status SetElementZero(Tensor* element, const Tensor& padding); 59 Status CopyElementToLargerSlice(const Tensor& element, Tensor* parent,
|
/external/tensorflow/tensorflow/core/tpu/ |
D | virtual_device.cc | 27 void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device, 28 Tensor* device_tensor, StatusCallback done, 30 void CopyDeviceTensorToCPU(const Tensor* device_tensor, 32 Tensor* cpu_tensor, StatusCallback done) override; 33 void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device, 34 Tensor* output_tensor, 38 void VirtualDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor, in CopyCPUTensorToDevice() 40 Tensor* device_tensor, in CopyCPUTensorToDevice() 47 void VirtualDeviceContext::CopyDeviceTensorToCPU(const Tensor* device_tensor, in CopyDeviceTensorToCPU() 50 Tensor* cpu_tensor, in CopyDeviceTensorToCPU() [all …]
|