1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/compiler/xla/service/gpu/reduction_degenerate_dim_remover.h"
17
18 #include <optional>
19 #include <utility>
20
21 #include "tensorflow/compiler/xla/service/hlo_instruction.h"
22 #include "tensorflow/compiler/xla/service/hlo_module_config.h"
23 #include "tensorflow/compiler/xla/service/hlo_parser.h"
24 #include "tensorflow/compiler/xla/statusor.h"
25 #include "tensorflow/compiler/xla/tests/filecheck.h"
26 #include "tensorflow/compiler/xla/tests/hlo_test_base.h"
27 #include "tensorflow/core/lib/core/status_test_util.h"
28 #include "tensorflow/core/platform/test.h"
29 #include "tensorflow/stream_executor/lib/statusor.h"
30
31 namespace xla {
32
33 namespace {
34
35 class ReductionDegenerateDimRemoverTest : public HloTestBase {
36 public:
CheckDegenerateDimRemover(absl::string_view hlo,std::optional<absl::string_view> expected)37 void CheckDegenerateDimRemover(absl::string_view hlo,
38 std::optional<absl::string_view> expected) {
39 RunAndFilecheckHloRewrite(hlo, gpu::ReductionDegenerateDimRemover{},
40 expected);
41 }
42 };
43
TEST_F(ReductionDegenerateDimRemoverTest,ReductionWithDegenerateDimensions)44 TEST_F(ReductionDegenerateDimRemoverTest, ReductionWithDegenerateDimensions) {
45 const char* hlo = R"(
46 HloModule ReduceWithDegenerateDimensions
47
48 add {
49 accum = f32[] parameter(0)
50 op = f32[] parameter(1)
51 ROOT out = f32[] add(accum, op)
52 }
53
54 ENTRY main {
55 input = f32[1,3,1,4,1,5,1] parameter(0)
56 zero = f32[] constant(0)
57
58 ROOT out = f32[1,1,1,1] reduce(input, zero), dimensions={1,3,5}, to_apply=add
59 }
60
61 )";
62
63 CheckDegenerateDimRemover(hlo, R"(
64 // CHECK: [[bitcast_0:%[^ ]+]] = f32[3,4,5]{2,1,0} bitcast([[input_1:%[^ ]+]])
65 // CHECK: [[reduce_2:%[^ ]+]] = f32[] reduce([[bitcast_0]], [[zero_3:%[^ ]+]]), dimensions={0,1,2}, to_apply=[[add_4:%[^ ]+]]
66 // CHECK: ROOT [[bitcast_1_5:%[^ ]+]] = f32[1,1,1,1]{3,2,1,0} bitcast([[reduce_2]])
67 )");
68 }
69
TEST_F(ReductionDegenerateDimRemoverTest,ReductionWithDegenerateDimensionsVariadic)70 TEST_F(ReductionDegenerateDimRemoverTest,
71 ReductionWithDegenerateDimensionsVariadic) {
72 const char* hlo = R"(
73 HloModule ReduceWithDegenerateDimensions
74
75 argmax {
76 running_max = f32[] parameter(0)
77 running_max_idx = u32[] parameter(1)
78 current_value = f32[] parameter(2)
79 current_value_idx = u32[] parameter(3)
80
81 current = (f32[], u32[]) tuple(running_max, running_max_idx)
82 potential = (f32[], u32[]) tuple(current_value, current_value_idx)
83
84 cmp_code = pred[] compare(current_value, running_max), direction=GT
85
86 new_max = f32[] select(cmp_code, current_value, running_max)
87 new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
88
89 ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
90 }
91
92 ENTRY main {
93 input = f32[1,3,1,4,1,5,1] parameter(0)
94 idxs = u32[1,3,1,4,1,5,1] parameter(1)
95 zero = f32[] constant(0)
96 zero_idx = u32[] constant(0)
97
98 ROOT out = (f32[1,1,1,1], u32[1,1,1,1]) reduce(input, idxs, zero, zero_idx), dimensions={1,3,5}, to_apply=argmax
99 }
100
101 )";
102
103 CheckDegenerateDimRemover(hlo, R"(
104 // CHECK: [[bitcast_0:%[^ ]+]] = f32[3,4,5]{2,1,0} bitcast([[input_1:%[^ ]+]])
105 // CHECK: [[bitcast_1_2:%[^ ]+]] = u32[3,4,5]{2,1,0} bitcast([[idxs_3:%[^ ]+]])
106 // CHECK: [[reduce_4:%[^ ]+]] = (f32[], u32[]) reduce([[bitcast_0]], [[bitcast_1_2]], [[zero_5:%[^ ]+]], [[zero_idx_6:%[^ ]+]]), dimensions={0,1,2}, to_apply=[[argmax_7:%[^ ]+]]
107 // CHECK-NEXT: [[get_tuple_element_8:%[^ ]+]] = f32[] get-tuple-element([[reduce_4]]), index=0
108 // CHECK-NEXT: [[bitcast_2_9:%[^ ]+]] = f32[1,1,1,1]{3,2,1,0} bitcast([[get_tuple_element_8]])
109 // CHECK-NEXT: [[get_tuple_element_1_10:%[^ ]+]] = u32[] get-tuple-element([[reduce_4]]), index=1
110 // CHECK-NEXT: [[bitcast_3_11:%[^ ]+]] = u32[1,1,1,1]{3,2,1,0} bitcast([[get_tuple_element_1_10]])
111 // CHECK-NEXT: ROOT [[tuple_12:%[^ ]+]] = (f32[1,1,1,1]{3,2,1,0}, u32[1,1,1,1]{3,2,1,0}) tuple([[bitcast_2_9]], [[bitcast_3_11]])
112 )");
113 }
114
TEST_F(ReductionDegenerateDimRemoverTest,DegenerateWithEmptyDimension)115 TEST_F(ReductionDegenerateDimRemoverTest, DegenerateWithEmptyDimension) {
116 const char* hlo = R"(
117 HloModule ReduceWithDegenerateDimensions
118
119 add {
120 accum = f32[] parameter(0)
121 op = f32[] parameter(1)
122 ROOT out = f32[] add(accum, op)
123 }
124
125 ENTRY main {
126 input = f32[1,3,1,4,1,5,1] parameter(0)
127 zero = f32[] constant(0)
128
129 ROOT out = f32[3,4,5,1] reduce(input, zero), dimensions={0,2,4}, to_apply=add
130 }
131 )";
132
133 CheckDegenerateDimRemover(hlo,
134 R"(
135 // CHECK: ROOT [[bitcast_0:%[^ ]+]] = f32[3,4,5,1]{3,2,1,0} bitcast([[input_1:%[^ ]+]])
136 )");
137 }
138
139 } // namespace
140 } // namespace xla
141