/external/tensorflow/tensorflow/python/framework/ |
D | common_shapes.py | 45 for (dim_x, dim_y) in broadcasted_dims: 46 if dim_x.value is None or dim_y.value is None: 54 elif dim_y.value is not None and dim_y.value > 1: 55 return_dims.append(dim_y) 60 return_dims.append(dim_y) 61 elif dim_y.value == 1: 64 elif dim_x.value == dim_y.value: 67 return_dims.append(dim_x.merge_with(dim_y))
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_reduction_cuda.cu | 62 int dim_y = 1; in test_first_dim_reductions() local 65 Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z); in test_first_dim_reductions() 77 Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type)); in test_first_dim_reductions() 78 Type* out_data = (Type*)dev.allocate(dim_z*dim_y*sizeof(Type)); in test_first_dim_reductions() 79 Eigen::TensorMap<Eigen::Tensor<Type, 3, DataLayout> > gpu_in(in_data, dim_x, dim_y, dim_z); in test_first_dim_reductions() 80 Eigen::TensorMap<Eigen::Tensor<Type, 2, DataLayout> > gpu_out(out_data, dim_y, dim_z); in test_first_dim_reductions() 86 Tensor<Type, 2, DataLayout> redux_gpu(dim_y, dim_z); in test_first_dim_reductions() 102 int dim_y = 1; in test_last_dim_reductions() local 105 Tensor<Type, 3, DataLayout> in(dim_x, dim_y, dim_z); in test_last_dim_reductions() 117 Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type)); in test_last_dim_reductions() [all …]
|
D | cxx11_tensor_reduction_sycl.cpp | 58 int dim_y = 1; in test_first_dim_reductions_sycl() local 61 array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}}; in test_first_dim_reductions_sycl() 64 array<int, 2> reduced_tensorRange = {{dim_y, dim_z}}; in test_first_dim_reductions_sycl() 96 int dim_y = 1; in test_last_dim_reductions_sycl() local 99 array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}}; in test_last_dim_reductions_sycl() 102 array<int, 2> reduced_tensorRange = {{dim_x, dim_y}}; in test_last_dim_reductions_sycl()
|
/external/llvm-project/mlir/test/mlir-cpu-runner/ |
D | memref_reinterpret_cast.mlir | 15 %dim_y = dim %input, %c1 : memref<2x3xf32> 16 scf.parallel (%i, %j) = (%c0, %c0) to (%dim_x, %dim_y) step (%c1, %c1) { 17 %prod = muli %i, %dim_y : index
|
D | memref_reshape.mlir | 16 %dim_y = dim %input, %c1 : memref<2x3xf32> 17 scf.parallel (%i, %j) = (%c0, %c0) to (%dim_x, %dim_y) step (%c1, %c1) { 18 %prod = muli %i, %dim_y : index
|
/external/tensorflow/tensorflow/core/framework/ |
D | common_shape_fns.cc | 2016 const auto dim_y = in BroadcastBinaryOpOutputShapeFnHelper() local 2018 if (!c->ValueKnown(dim_x) || !c->ValueKnown(dim_y)) { in BroadcastBinaryOpOutputShapeFnHelper() 2034 } else if (c->Value(dim_y) > 1) { in BroadcastBinaryOpOutputShapeFnHelper() 2039 dims.push_back(dim_y); in BroadcastBinaryOpOutputShapeFnHelper() 2041 dims.push_back(dim_y); in BroadcastBinaryOpOutputShapeFnHelper() 2042 } else if (c->Value(dim_y) == 1) { in BroadcastBinaryOpOutputShapeFnHelper() 2044 } else if (dim_y.SameHandle(dim_x)) { in BroadcastBinaryOpOutputShapeFnHelper() 2046 } else if (!c->ValueKnown(dim_x) && !c->ValueKnown(dim_y)) { in BroadcastBinaryOpOutputShapeFnHelper() 2055 } else if (c->Value(dim_x) == 1 || c->Value(dim_y) == 1) { in BroadcastBinaryOpOutputShapeFnHelper() 2058 dims.push_back(dim_y); in BroadcastBinaryOpOutputShapeFnHelper() [all …]
|
/external/llvm-project/lldb/source/Plugins/LanguageRuntime/RenderScript/RenderScriptRuntime/ |
D | RenderScriptRuntime.cpp | 2119 uint32_t dim_y = alloc->dimension.get()->dim_2; in JITAllocationSize() local 2128 if (dim_y == 0) in JITAllocationSize() 2129 dim_y = 1; in JITAllocationSize() 2133 alloc->size = dim_x * dim_y * dim_z * *alloc->element.datum_size.get(); in JITAllocationSize() 2145 dim_y = dim_y == 0 ? 0 : dim_y - 1; in JITAllocationSize() 2149 *alloc->address.get(), dim_x, dim_y, dim_z); in JITAllocationSize() 3258 uint32_t dim_y = alloc->dimension.get()->dim_2; in DumpAllocation() local 3259 dim_y = dim_y == 0 ? 1 : dim_y; in DumpAllocation() 3276 for (uint32_t y = 0; y < dim_y; ++y) { in DumpAllocation()
|
/external/tensorflow/tensorflow/python/keras/saving/saved_model/ |
D | load.py | 1097 for dim_x, dim_y in zip(x.dims, y.dims): 1098 if (dim_x != dim_y 1100 or tensor_shape.dimension_value(dim_y) is None):
|
/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
D | propagate_fixed_sizes.cc | 102 int dim_y = dim_y_is_one ? 1 : input_shape_y.dims(i - (rank_out - rank_y)); in ComputeBinaryOperatorOutputSize() local 103 if (dim_x == -1 || dim_y == -1) { in ComputeBinaryOperatorOutputSize() 106 } else if (dim_x == 1 || dim_y == 1) { in ComputeBinaryOperatorOutputSize() 110 dims_out->push_back(dim_y); in ComputeBinaryOperatorOutputSize() 113 DCHECK_EQ(dim_y, 1); in ComputeBinaryOperatorOutputSize() 118 CHECK_EQ(dim_x, dim_y) << "Dimensions must match"; in ComputeBinaryOperatorOutputSize()
|
/external/freetype/src/pshinter/ |
D | pshalgo.c | 2113 PSH_Dimension dim_y = &glyph->globals->dimension[1]; in ps_hints_apply() local 2116 FT_Fixed y_scale = dim_y->scale_mult; in ps_hints_apply()
|
/external/tensorflow/tensorflow/python/eager/ |
D | function.py | 208 for dim_x, dim_y in zip(x.dims, y.dims): 209 if (dim_x != dim_y 211 or tensor_shape.dimension_value(dim_y) is None):
|