/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | bias_ops.cc | 106 std::vector<int64> reduce_dims(out_backprop_shape.dims() - 1); in Compile() local 107 std::iota(reduce_dims.begin(), reduce_dims.begin() + feature_dim, 0); in Compile() 108 std::iota(reduce_dims.begin() + feature_dim, reduce_dims.end(), in Compile() 117 *ctx->GetOrCreateAdd(accumulation_type), reduce_dims); in Compile()
|
/external/tensorflow/tensorflow/contrib/quantize/python/ |
D | quant_ops.py | 134 reduce_dims = [0] 136 reduce_dims = [0, 1, 2] 141 inputs, axis=reduce_dims, name='BatchMin') 150 inputs, axis=reduce_dims, name='BatchMax') 259 reduce_dims = [0] 261 reduce_dims = [0, 1, 2] 266 inputs, axis=reduce_dims, name='BatchMin') 275 inputs, axis=reduce_dims, name='BatchMax')
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_argmax.cpp | 73 array<DenseIndex, 3> reduce_dims; in test_argmax_tuple_reducer() local 74 for (int d = 0; d < 3; ++d) reduce_dims[d] = d; in test_argmax_tuple_reducer() 77 reduce_dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float> >()); in test_argmax_tuple_reducer() 79 Tensor<float, 1, DataLayout> max_by_dims = tensor.maximum(reduce_dims); in test_argmax_tuple_reducer() 105 array<DenseIndex, 3> reduce_dims; in test_argmin_tuple_reducer() local 106 for (int d = 0; d < 3; ++d) reduce_dims[d] = d; in test_argmin_tuple_reducer() 109 reduce_dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float> >()); in test_argmin_tuple_reducer() 111 Tensor<float, 1, DataLayout> min_by_dims = tensor.minimum(reduce_dims); in test_argmin_tuple_reducer()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | batch_normalization_test.cc | 466 std::vector<int64> reduce_dims; in XLA_TEST_P() local 469 reduce_dims.push_back(i); in XLA_TEST_P() 474 ReferenceUtil::Reduce4DTo1D(input_array, /*init=*/0.0f, reduce_dims, in XLA_TEST_P() 478 ReferenceUtil::Reduce4DTo1D(*input_squared, /*init=*/0.0f, reduce_dims, in XLA_TEST_P() 567 std::vector<int64> reduce_dims; in XLA_TEST_P() local 570 reduce_dims.push_back(i); in XLA_TEST_P() 575 ReferenceUtil::Reduce4DTo1D(input_array, /*init=*/0.0f, reduce_dims, in XLA_TEST_P() 579 ReferenceUtil::Reduce4DTo1D(*input_squared, /*init=*/0.0f, reduce_dims, in XLA_TEST_P() 678 std::vector<int64> reduce_dims; in XLA_TEST_P() local 681 reduce_dims.push_back(i); in XLA_TEST_P() [all …]
|
D | reduce_test.cc | 549 std::vector<int64> reduce_dims; member 554 spec.bounds.size() - spec.reduce_dims.size(), in PrintTo() 557 absl::StrJoin(spec.reduce_dims, "")); in PrintTo() 828 GetParam().reduce_dims); in XLA_TEST_P() 831 ReferenceUtil::Reduce3DTo2D(input_array, 0.0f, GetParam().reduce_dims, in XLA_TEST_P()
|
/external/tensorflow/tensorflow/contrib/distributions/python/ops/ |
D | independent.py | 317 reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)] 320 kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims) 334 reduce_dims = math_ops.range(-num_reduce_dims - 1, -1, 1) 336 kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
|
/external/tensorflow/tensorflow/core/kernels/ |
D | fused_batch_norm_op.cc | 99 Eigen::array<int, 1> reduce_dims({0}); in operator ()() local 104 Eigen::IndexList<Eigen::type2index<0> > reduce_dims; in operator ()() local 119 mean.device(d) = (x_rest_by_depth.sum(reduce_dims) * rest_size_inv); in operator ()() 130 variance.device(d) = x_centered.square().sum(reduce_dims) * rest_size_inv; in operator ()() 191 Eigen::array<int, 1> reduce_dims({0}); in operator ()() local 196 Eigen::IndexList<Eigen::type2index<0> > reduce_dims; in operator ()() local 215 (y_backprop_rest_by_depth * x_scaled).sum(reduce_dims); in operator ()() 216 auto y_backprop_sum = y_backprop_rest_by_depth.sum(reduce_dims); in operator ()() 230 (y_backprop_rest_by_depth * x_centered).mean(reduce_dims)) in operator ()()
|
D | bincount_op.cc | 81 Eigen::array<int, 1> reduce_dims({0}); in Compute() local 82 output.device(context->eigen_cpu_device()) = partial_bins.sum(reduce_dims); in Compute()
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorArgMax.h | 176 const Dims& reduce_dims) 177 : m_xpr(expr), m_reduce_op(reduce_op), m_return_dim(return_dim), m_reduce_dims(reduce_dims) {} 187 const Dims& reduce_dims() const { return m_reduce_dims; } 224 m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device),
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | algebraic_simplifier.cc | 1752 std::vector<int64> reduce_dims( in HandleDot() local 1757 reduce_dims, in HandleDot() 1759 new_dot = AddReduce(new_dot, reduce_dims); in HandleDot() 2994 std::vector<int64> reduce_dims = reduce->dimensions(); in HandleReduce() local 2995 absl::c_sort(reduce_dims); in HandleReduce() 2998 for (int64& dim : reduce_dims) { in HandleReduce() 3007 std::merge(arg_dims.begin(), arg_dims.end(), reduce_dims.begin(), in HandleReduce() 3008 reduce_dims.end(), std::back_inserter(new_dimensions)); in HandleReduce() 3118 absl::InlinedVector<int64, 8> reduce_dims; in HandleReduceWindow() local 3123 reduce_dims.push_back(i); in HandleReduceWindow() [all …]
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | constant_folding.cc | 622 BCast::Vec reduce_dims[2]; in MaterializeBroadcastGradientArgs() local 623 reduce_dims[0] = bcast.grad_x_reduce_idx(); in MaterializeBroadcastGradientArgs() 624 reduce_dims[1] = bcast.grad_y_reduce_idx(); in MaterializeBroadcastGradientArgs() 630 int reduction_indices = reduce_dims[j].size(); in MaterializeBroadcastGradientArgs() 634 value.vec<int32>()(i) = reduce_dims[j][i]; in MaterializeBroadcastGradientArgs() 636 value.vec<int64>()(i) = reduce_dims[j][i]; in MaterializeBroadcastGradientArgs()
|