/external/tensorflow/tensorflow/lite/delegates/gpu/common/tasks/ |
D | reduce.h | 30 class Reduce : public GPUOperation { 32 Reduce() = default; 33 Reduce(const std::map<Axis, int>& axis_to_reduce, OperationType op_type, 44 Reduce(Reduce&& operation); 45 Reduce& operator=(Reduce&& operation); 46 Reduce(const Reduce&) = delete; 47 Reduce& operator=(const Reduce&) = delete; 58 Reduce CreateReduce(const std::set<Axis>& axis_to_reduce, const BHWC& src_shape, 62 Reduce CreateReduce(const std::set<Axis>& axis_to_reduce,
|
D | reduce_test_util.cc | 43 Reduce operation = in MeanHWTest() 47 src_tensor, absl::make_unique<Reduce>(std::move(operation)), in MeanHWTest() 70 Reduce operation = in ReduceSumChannelsTest() 74 src_tensor, absl::make_unique<Reduce>(std::move(operation)), in ReduceSumChannelsTest() 97 Reduce operation = in ReduceProductChannelsTest() 101 src_tensor, absl::make_unique<Reduce>(std::move(operation)), in ReduceProductChannelsTest() 125 Reduce operation = in ReduceMaxChannelsTest() 129 src_tensor, absl::make_unique<Reduce>(std::move(operation)), in ReduceMaxChannelsTest() 153 Reduce operation = in ReduceMinChannelsTest() 157 src_tensor, absl::make_unique<Reduce>(std::move(operation)), in ReduceMinChannelsTest()
|
D | reduce.cc | 112 Reduce::Reduce(const std::map<Axis, int>& axis_to_reduce, OperationType op_type, in Reduce() function in tflite::gpu::Reduce 148 Reduce::Reduce(Reduce&& operation) in Reduce() function in tflite::gpu::Reduce 152 Reduce& Reduce::operator=(Reduce&& operation) { in operator =() 160 std::string Reduce::GetReduceKernelCode(const OperationDef& op_def, in GetReduceKernelCode() 425 absl::Status Reduce::BindArguments(ArgumentsBinder* args) { in BindArguments() 451 int3 Reduce::GetGridSize() const { in GetGridSize() 463 void Reduce::GetPossibleKernelWorkGroups(TuningType tuning_type, in GetPossibleKernelWorkGroups() 475 Reduce CreateReduce(const std::set<Axis>& axis_to_reduce, const BHWC& src_shape, in CreateReduce() 478 return Reduce(GetSizesFromShape(axis_to_reduce, src_shape), op_type, in CreateReduce() 482 Reduce CreateReduce(const std::set<Axis>& axis_to_reduce, in CreateReduce() [all …]
|
/external/deqp-deps/glslang/Test/baseResults/ |
D | spv.shaderBallotAMD.comp.out | 96 29: 6(int) GroupSMin 28 Reduce 27 102 36: 7(int) GroupUMin 28 Reduce 35 104 38: 7(int) GroupUMin 28 Reduce 37 111 46: 9(float) GroupFMin 28 Reduce 45 113 48: 9(float) GroupFMin 28 Reduce 47 115 50: 9(float) GroupFMin 28 Reduce 49 122 58:11(float64_t) GroupFMin 28 Reduce 57 124 60:11(float64_t) GroupFMin 28 Reduce 59 126 62:11(float64_t) GroupFMin 28 Reduce 61 128 64:11(float64_t) GroupFMin 28 Reduce 63 [all …]
|
D | spv.subgroupExtendedTypesArithmetic.comp.out | 142 43: 17(int8_t) GroupNonUniformIAdd 42 Reduce 41 149 52: 47(i8vec2) GroupNonUniformIAdd 42 Reduce 51 158 62: 58(i8vec3) GroupNonUniformIAdd 42 Reduce 61 166 70: 18(i8vec4) GroupNonUniformIAdd 42 Reduce 69 172 75: 17(int8_t) GroupNonUniformIMul 42 Reduce 74 179 81: 47(i8vec2) GroupNonUniformIMul 42 Reduce 80 188 89: 58(i8vec3) GroupNonUniformIMul 42 Reduce 88 196 96: 18(i8vec4) GroupNonUniformIMul 42 Reduce 95 202 101: 17(int8_t) GroupNonUniformSMin 42 Reduce 100 209 107: 47(i8vec2) GroupNonUniformSMin 42 Reduce 106 [all …]
|
D | spv.subgroupArithmetic.comp.out | 108 35: 17(float) GroupNonUniformFAdd 34 Reduce 33 115 44: 39(fvec2) GroupNonUniformFAdd 34 Reduce 43 124 54: 50(fvec3) GroupNonUniformFAdd 34 Reduce 53 132 62: 18(fvec4) GroupNonUniformFAdd 34 Reduce 61 138 68: 19(int) GroupNonUniformIAdd 34 Reduce 67 145 76: 71(ivec2) GroupNonUniformIAdd 34 Reduce 75 154 85: 81(ivec3) GroupNonUniformIAdd 34 Reduce 84 162 92: 20(ivec4) GroupNonUniformIAdd 34 Reduce 91 168 98: 6(int) GroupNonUniformIAdd 34 Reduce 97 175 106: 101(ivec2) GroupNonUniformIAdd 34 Reduce 105 [all …]
|
/external/angle/third_party/vulkan-deps/glslang/src/Test/baseResults/ |
D | spv.shaderBallotAMD.comp.out | 96 29: 6(int) GroupSMin 28 Reduce 27 102 36: 7(int) GroupUMin 28 Reduce 35 104 38: 7(int) GroupUMin 28 Reduce 37 111 46: 9(float) GroupFMin 28 Reduce 45 113 48: 9(float) GroupFMin 28 Reduce 47 115 50: 9(float) GroupFMin 28 Reduce 49 122 58:11(float64_t) GroupFMin 28 Reduce 57 124 60:11(float64_t) GroupFMin 28 Reduce 59 126 62:11(float64_t) GroupFMin 28 Reduce 61 128 64:11(float64_t) GroupFMin 28 Reduce 63 [all …]
|
D | spv.subgroupExtendedTypesArithmetic.comp.out | 142 43: 17(int8_t) GroupNonUniformIAdd 42 Reduce 41 149 52: 47(i8vec2) GroupNonUniformIAdd 42 Reduce 51 158 62: 58(i8vec3) GroupNonUniformIAdd 42 Reduce 61 166 70: 18(i8vec4) GroupNonUniformIAdd 42 Reduce 69 172 75: 17(int8_t) GroupNonUniformIMul 42 Reduce 74 179 81: 47(i8vec2) GroupNonUniformIMul 42 Reduce 80 188 89: 58(i8vec3) GroupNonUniformIMul 42 Reduce 88 196 96: 18(i8vec4) GroupNonUniformIMul 42 Reduce 95 202 101: 17(int8_t) GroupNonUniformSMin 42 Reduce 100 209 107: 47(i8vec2) GroupNonUniformSMin 42 Reduce 106 [all …]
|
D | spv.subgroupArithmetic.comp.out | 108 35: 17(float) GroupNonUniformFAdd 34 Reduce 33 115 44: 39(fvec2) GroupNonUniformFAdd 34 Reduce 43 124 54: 50(fvec3) GroupNonUniformFAdd 34 Reduce 53 132 62: 18(fvec4) GroupNonUniformFAdd 34 Reduce 61 138 68: 19(int) GroupNonUniformIAdd 34 Reduce 67 145 76: 71(ivec2) GroupNonUniformIAdd 34 Reduce 75 154 85: 81(ivec3) GroupNonUniformIAdd 34 Reduce 84 162 92: 20(ivec4) GroupNonUniformIAdd 34 Reduce 91 168 98: 6(int) GroupNonUniformIAdd 34 Reduce 97 175 106: 101(ivec2) GroupNonUniformIAdd 34 Reduce 105 [all …]
|
/external/llvm-project/mlir/test/Dialect/SPIRV/Serialization/ |
D | non-uniform-ops.mlir | 28 // CHECK: %{{.+}} = spv.GroupNonUniformFAdd "Workgroup" "Reduce" %{{.+}} : f32 29 %0 = spv.GroupNonUniformFAdd "Workgroup" "Reduce" %val : f32 35 // CHECK: %{{.+}} = spv.GroupNonUniformFMax "Workgroup" "Reduce" %{{.+}} : f32 36 %0 = spv.GroupNonUniformFMax "Workgroup" "Reduce" %val : f32 42 // CHECK: %{{.+}} = spv.GroupNonUniformFMin "Workgroup" "Reduce" %{{.+}} : f32 43 %0 = spv.GroupNonUniformFMin "Workgroup" "Reduce" %val : f32 49 // CHECK: %{{.+}} = spv.GroupNonUniformFMul "Workgroup" "Reduce" %{{.+}} : f32 50 %0 = spv.GroupNonUniformFMul "Workgroup" "Reduce" %val : f32 56 // CHECK: %{{.+}} = spv.GroupNonUniformIAdd "Workgroup" "Reduce" %{{.+}} : i32 57 %0 = spv.GroupNonUniformIAdd "Workgroup" "Reduce" %val : i32 [all …]
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | vector_ops_reduce_test.cc | 61 Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer, in TEST_F() 74 Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer, in TEST_F() 86 Reduce(x, ConstantR0<float>(&builder_, 0.0f), max_reducer, in TEST_F() 97 Reduce(x, ConstantR0<float>(&builder_, 4.0f), max_reducer, in TEST_F() 113 Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer, in TEST_F() 127 Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer, in TEST_F() 136 Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer, in TEST_F() 147 Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer, in TEST_F() 159 Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer, in TEST_F() 170 Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer, in TEST_F() [all …]
|
D | reduce_test.cc | 101 Reduce(input, zero, add_f32, /*dimensions_to_reduce=*/{0}); in RunR1ToR0Test() 136 Reduce(pred_values, init_value, reduce, in RunR1ToR0PredTest() 175 Reduce(input_pred, init_value, reduce_op, in RunR2ToR1PredTest() 209 Reduce(input, zero, add_f32, /*dimensions_to_reduce=*/{0, 1}); in RunR2ToR0Test() 236 Reduce(input, zero, add_f32, /*dimensions_to_reduce=*/{0}); in RunR2ToR1Test() 292 Reduce(input, zero, reduction_function, in RunVectorizedReduceTestForType() 447 Reduce(log_, zero, add_f32, /*dimensions_to_reduce=*/{0}); in XLA_TEST_F() 478 Reduce(transpose, zero, add_f32, /*dimensions_to_reduce=*/{1}); in XLA_TEST_F() 508 Reduce(transpose, zero, add_f32, /*dimensions_to_reduce=*/{0}); in XLA_TEST_F() 525 Reduce(reshape, zero, add_f32, /*dimensions_to_reduce=*/{0}); in XLA_TEST_F() [all …]
|
/external/libtextclassifier/native/utils/grammar/semantics/evaluators/ |
D | arithmetic-eval.cc | 25 StatusOr<const SemanticValue*> Reduce( in Reduce() function 106 return Reduce<int8>(composer_, context, arithmetic_expression, arena); in Apply() 108 return Reduce<uint8>(composer_, context, arithmetic_expression, arena); in Apply() 110 return Reduce<int16>(composer_, context, arithmetic_expression, arena); in Apply() 112 return Reduce<uint16>(composer_, context, arithmetic_expression, arena); in Apply() 114 return Reduce<int32>(composer_, context, arithmetic_expression, arena); in Apply() 116 return Reduce<uint32>(composer_, context, arithmetic_expression, arena); in Apply() 118 return Reduce<int64>(composer_, context, arithmetic_expression, arena); in Apply() 120 return Reduce<uint64>(composer_, context, arithmetic_expression, arena); in Apply() 122 return Reduce<float>(composer_, context, arithmetic_expression, arena); in Apply() [all …]
|
/external/Reactive-Extensions/RxCpp/Rx/v2/src/rxcpp/operators/ |
D | rx-reduce.hpp | 508 …class Reduce = rxo::detail::reduce<rxu::value_type_t<Observable>, rxu::decay_t<Observable>, rxu::d… 509 class Value = rxu::value_type_t<Reduce>, 510 class Result = observable<Value, Reduce>> 513 …return Result(Reduce(std::forward<Observable>(o), std::forward<Accumulator>(a), std::forward<Resul… in member() 518 …class Reduce = rxo::detail::reduce<rxu::value_type_t<Observable>, rxu::decay_t<Observable>, rxu::d… 519 class Value = rxu::value_type_t<Reduce>, 520 class Result = observable<Value, Reduce>> 523 …return Result(Reduce(std::forward<Observable>(o), std::forward<Accumulator>(a), rxu::detail::take_… in member() 544 …class Reduce = rxo::detail::reduce<SValue, rxu::decay_t<TakeOne>, rxu::decay_t<Accumulator>, rxu::… 545 class RValue = rxu::value_type_t<Reduce>, [all …]
|
/external/llvm-project/mlir/test/Dialect/SPIRV/ |
D | non-uniform-ops.mlir | 97 // CHECK: %{{.+}} = spv.GroupNonUniformFAdd "Workgroup" "Reduce" %{{.+}} : f32 98 %0 = spv.GroupNonUniformFAdd "Workgroup" "Reduce" %val : f32 116 // CHECK: %{{.+}} = spv.GroupNonUniformFMul "Workgroup" "Reduce" %{{.+}} : f32 117 %0 = spv.GroupNonUniformFMul "Workgroup" "Reduce" %val : f32 137 // CHECK: %{{.+}} = spv.GroupNonUniformFMax "Workgroup" "Reduce" %{{.+}} : f32 138 %0 = spv.GroupNonUniformFMax "Workgroup" "Reduce" %val : f32 150 // CHECK: %{{.+}} = spv.GroupNonUniformFMin "Workgroup" "Reduce" %{{.+}} : f32 151 %0 = spv.GroupNonUniformFMin "Workgroup" "Reduce" %val : f32 163 // CHECK: %{{.+}} = spv.GroupNonUniformIAdd "Workgroup" "Reduce" %{{.+}} : i32 164 %0 = spv.GroupNonUniformIAdd "Workgroup" "Reduce" %val : i32 [all …]
|
/external/libchrome/crypto/ |
D | p224.cc | 212 void Reduce(FieldElement* in_out) { in Reduce() function 463 Reduce(&h); in AddJacobian() 470 Reduce(&i); in AddJacobian() 477 Reduce(&r); in AddJacobian() 490 Reduce(&r); in AddJacobian() 498 Reduce(&z2z2); in AddJacobian() 501 Reduce(&out->z); in AddJacobian() 509 Reduce(&z1z1); in AddJacobian() 512 Reduce(&out->x); in AddJacobian() 520 Reduce(&z1z1); in AddJacobian() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | multi_output_fusion_test.cc | 86 op::Tuple(op::Reduce(), op::Reduce())); in TEST_F() 174 op::Tuple(op::Reduce(), op::Reduce())); in TEST_F() 207 op::Tuple(op::Reduce(), op::Reduce(), op::Reduce())); in TEST_F() 435 op::Tuple(op::Reduce(), op::Exp())); in TEST_F() 463 op::Tuple(op::Reduce(), op::Add())); in TEST_F() 510 op::Tuple(op::Reduce(), op::Reduce(), op::Select())); in TEST_F() 586 op::Tuple(op::Reduce(), op::Reduce(), op::Select())); in TEST_F()
|
D | reduction_splitter_test.cc | 57 ASSERT_THAT(root_reduction, op::Reduce(op::Reduce(), op::Constant())); in TEST_F() 87 ASSERT_THAT(root_reduction, op::Reduce(op::Reduce(), op::Constant())); in TEST_F()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | reduction_ops_common.h | 178 Functor::Reduce(ctx, tmp_out.flat<T>(), 199 Functor::Reduce(ctx, helper.out<T, 0>(&tmp_out), helper.in<T, 1>(data), 203 Functor::Reduce(ctx, helper.out<T, 1>(&tmp_out), helper.in<T, 2>(data), 207 Functor::Reduce(ctx, helper.out<T, 1>(&tmp_out), helper.in<T, 2>(data), 212 Functor::Reduce(ctx, helper.out<T, 1>(&tmp_out), helper.in<T, 3>(data), 216 Functor::Reduce(ctx, helper.out<T, 2>(&tmp_out), helper.in<T, 3>(data), 233 Functor::Reduce(ctx, tmp_out.flat<T>(), 258 static void Reduce(OpKernelContext* ctx, OUT_T out, IN_T in,
|
/external/llvm-project/llvm/include/llvm/Support/ |
D | Parallel.h | 169 ReduceFuncTy Reduce, in parallel_transform_reduce() argument 188 TG.spawn([=, &Transform, &Reduce, &Results] { in parallel_transform_reduce() 192 R = Reduce(R, Transform(*It)); in parallel_transform_reduce() 206 FinalResult = Reduce(FinalResult, std::move(PartialResult)); in parallel_transform_reduce() 255 ReduceFuncTy Reduce, in parallelTransformReduce() argument 259 return parallel::detail::parallel_transform_reduce(Begin, End, Init, Reduce, in parallelTransformReduce() 264 Init = Reduce(std::move(Init), Transform(*I)); in parallelTransformReduce() 283 ReduceFuncTy Reduce, in parallelTransformReduce() argument 285 return parallelTransformReduce(std::begin(R), std::end(R), Init, Reduce, in parallelTransformReduce()
|
/external/llvm-project/mlir/include/mlir/Reducer/ |
D | Passes.td | 1 //===-- Passes.td - MLIR Reduce pass definition file -------*- tablegen -*-===// 9 // This file contains definitions of the passes for the MLIR Reduce Tool. 19 let summary = "A general reduction tree pass for the MLIR Reduce Tool";
|
/external/llvm-project/mlir/include/mlir/Dialect/SPIRV/ |
D | SPIRVNonUniformOps.td | 256 operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... 270 %0 = spv.GroupNonUniformFAdd "Workgroup" "Reduce" %scalar : f32 310 operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... 324 %0 = spv.GroupNonUniformFMax "Workgroup" "Reduce" %scalar : f32 364 operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... 378 %0 = spv.GroupNonUniformFMin "Workgroup" "Reduce" %scalar : f32 415 operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... 429 %0 = spv.GroupNonUniformFMul "Workgroup" "Reduce" %scalar : f32 464 operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... 478 %0 = spv.GroupNonUniformIAdd "Workgroup" "Reduce" %scalar : i32 [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | while_loop_expensive_invariant_code_motion_test.cc | 82 EXPECT_THAT(while_body->instructions(), Contains(op::Reduce())); in TEST_F() 103 EXPECT_THAT(while_body->instructions(), Not(Contains(op::Reduce()))); in TEST_F() 187 EXPECT_THAT(while_body->instructions(), Not(Contains(op::Reduce()))); in TEST_F()
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | arithmetic.cc | 109 return Reduce(predicates, f, logical_or, all_dimensions); in Any() 164 XlaOp max_argmax = Reduce(builder, {input, iota}, in ArgMinMax() 192 XlaOp reduced_input = Reduce(input, init_value, reducer, in ArgMinMaxTwoPass() 203 return Reduce(select_mask, max_idx, in ArgMinMaxTwoPass() 212 return Reduce(select_mask, min_idx, in ArgMinMaxTwoPass()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | softmax_op.cc | 85 xla::Reduce(logits, xla::MinValue(b, xla_type), max_func, {kClassDim}); in Compile() 97 xla::Reduce(converted, xla::Zero(b, xla_accumulation_type), in Compile() 127 xla::Reduce(logits, xla::MinValue(b, xla_type), max_func, {kClassDim}); in CrossEntropyWithLogits() 141 xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type), in CrossEntropyWithLogits() 158 auto sum = xla::Reduce(XlaHelpers::ConvertElementType(mul, accumulation_type), in CrossEntropyWithLogits()
|