1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/core/framework/op.h"
17 #include "tensorflow/core/framework/shape_inference.h"
18
19 namespace tensorflow {
20
21 using shape_inference::DimensionHandle;
22 using shape_inference::InferenceContext;
23 using shape_inference::ShapeHandle;
24
25 namespace {
26
ReduceSliceShapeFn(InferenceContext * c)27 Status ReduceSliceShapeFn(InferenceContext* c) {
28 ShapeHandle handle;
29 DimensionHandle dimhandle;
30 DimensionHandle dim_axis = c->UnknownDim();
31 // "axis" must be a scala
32 TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &handle));
33 // "data" must have rank at least 1
34 TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &handle));
35 // "indices" must have have rank 1 or rank 2 with the number of columns must
36 // be 2
37 if (c->RankKnown(c->input(1))) {
38 TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &handle));
39 TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 2, &handle));
40 if (c->Rank(c->input(1)) == 1) {
41 // if "indices" is a vector of 0 elements, then the axis dimension of
42 // output tensor should be of dimension 0.
43 DimensionHandle raw_dim_axis;
44 TF_RETURN_IF_ERROR(c->Max(c->Dim(c->input(1), 0), 1, &raw_dim_axis));
45 TF_RETURN_IF_ERROR(c->Subtract(raw_dim_axis, 1, &dim_axis));
46 } else { // c->Rank(c->input(1)) == 2
47 TF_RETURN_IF_ERROR(
48 c->Merge(c->Dim(c->input(1), 1), c->MakeDim(2), &dimhandle));
49 dim_axis = c->Dim(c->input(1), 0);
50 }
51 }
52 // shape of output tensor
53 const Tensor* _axis = c->input_tensor(2);
54 if (nullptr == _axis) {
55 c->set_output(0, c->UnknownShapeOfRank(c->Rank(c->input(0))));
56 } else {
57 int64 axis = _axis->scalar<int64>()();
58 TF_RETURN_IF_ERROR(c->ReplaceDim(handle, axis, dim_axis, &handle));
59 c->set_output(0, handle);
60 }
61 return Status::OK();
62 }
63
64 } // namespace
65
66 REGISTER_OP("ReduceSliceSum")
67 .Input("data: T")
68 .Input("indices: Tindices")
69 .Input("axis: int64")
70 .Output("output: T")
71 .Attr("T: numbertype")
72 .Attr("Tindices: {int32,int64}")
73 .SetShapeFn(ReduceSliceShapeFn)
74 .Doc(R"doc(
75 Dynamically sum over the first dimension of a tensor according to start and end
76 indices specified at 'index'.
77
78 For example:
79
80 ```prettyprint
81 # if 'data' is [[ 1, 2, 3]
82 [ 40, 50, 60]
83 [ 700, 800, 900]
84 [1000,2000,3000]],
85
86 and 'indices' is [[0,1]
87 [1,1]
88 [0,2]],
89
90 the output will be [[ 1, 2, 3]
91 [ 0, 0, 0]
92 [41,52,63]].
93 ```
94
95 The data must be at least rank 1. The indices must be of shape (?,2) where the
96 first column is start indices and the second column is end indices. The end indices
97 are not included in the reduce operation, which means, if you want to do a reduce
98 over indices 0,1,2, then you should have start index 0 and end index 3. If end
99 index is smaller than or equal to start, the result will be zero. If end index is
100 out of bounds, then the reduce operation will automatically stop at the bound, so
101 feel free to put a large number as your end of your index if you want to do the
102 reduction until the bound.
103
104 data: The source of data where the computation will be taken from.
105 indices: start, end indices that controls which part to be included.
106 T: the type of data.
107 Tindices: the type of indices, must be int32 or int64.
108 output: the computed sum values.
109 )doc");
110
111 REGISTER_OP("ReduceSliceProd")
112 .Input("data: T")
113 .Input("indices: Tindices")
114 .Input("axis: int64")
115 .Output("output: T")
116 .Attr("T: numbertype")
117 .Attr("Tindices: {int32,int64}")
118 .SetShapeFn(ReduceSliceShapeFn)
119 .Doc(R"doc(
120 Dynamically compute the product over the first dimension of a tensor according
121 to start and end indices specified at 'indices'.
122
123 For example:
124
125 ```prettyprint
126 # if 'data' is [[ 1, 2, 3]
127 [ 40, 50, 60]
128 [ 700, 800, 900]
129 [1000,2000,3000]],
130
131 and 'indices' is [[0,1]
132 [1,1]
133 [0,2]],
134
135 the output will be [[ 1, 2, 3]
136 [ 1, 1, 1]
137 [40,100,180]].
138 ```
139
140 The data must be at least rank 1. The indices can be of shape (?,2) where the
141 first column is start indices and the second column is end indices. The end indices
142 are not included in the reduce operation, which means, if you want to do a reduce
143 over indices 0,1,2, then you should have start index 0 and end index 3. If end
144 index is smaller than or equal to start, the result will be 1. If end index is
145 out of bounds, then the reduce operation will automatically stop at the bound, so
146 feel free to put a large number as your end of your index if you want to do the
147 reduction until the bound. The indices can also be of shape (?), in this case, the
148 start index of i will be the element at i, then end index of i will be the element
149 at i+1. That is:
150
151 ```prettyprint
152 indices = [0,5,11,115]
153
154 is equivalent to
155
156 indices = [ [0,5],
157 [5,11],
158 [11,115]]
159 ```
160
161 data: The source of data where the computation will be taken from.
162 indices: start, end indices that controls which part to be included.
163 T: the type of data.
164 Tindices: the type of indices, must be int32 or int64.
165 output: the computed product values.
166 )doc");
167
168 REGISTER_OP("ReduceSliceMax")
169 .Input("data: T")
170 .Input("indices: Tindices")
171 .Input("axis: int64")
172 .Output("output: T")
173 .Attr("T: numbertype")
174 .Attr("Tindices: {int32,int64}")
175 .SetShapeFn(ReduceSliceShapeFn)
176 .Doc(R"doc(
177 Dynamically compute the maximum over the first dimension of a tensor according
178 to start and end indices specified at "indices".
179
180 For example:
181
182 ```prettyprint
183 # if 'data' is [[ 1, 20, 3]
184 [ 400, 5, 60]
185 [ 70, 8, 900]
186 [1000,2000,3000]],
187
188 and 'indices' is [[0,1]
189 [1,1]
190 [0,2]],
191
192 the output will be [[ 1, 20, 3]
193 [ -BIG_VALUE, -BIG_VALUE, -BIG_VALUE]
194 [ 400, 20, 60]].
195 ```
196
197 The data must be at least rank 1. The indices can be of shape (?,2) where the
198 first column is start indices and the second column is end indices. The end indices
199 are not included in the reduce operation, which means, if you want to do a reduce
200 over indices 0,1,2, then you should have start index 0 and end index 3. If end
201 index is smaller than or equal to start, the result will be 1. If end index is
202 out of bounds, then the reduce operation will automatically stop at the bound, so
203 feel free to put a large number as your end of your index if you want to do the
204 reduction until the bound. The indices can also be of shape (?), in this case, the
205 start index of i will be the element at i, then end index of i will be the element
206 at i+1. That is:
207
208 ```prettyprint
209 indices = [0,5,11,115]
210
211 is equivalent to
212
213 indices = [ [0,5],
214 [5,11],
215 [11,115]]
216 ```
217
218 data: The source of data where the computation will be taken from.
219 indices: start, end indices that controls which part to be included.
220 T: the type of data.
221 Tindices: the type of indices, must be int32 or int64.
222 output: the computed product values.
223 )doc");
224
225 REGISTER_OP("ReduceSliceMin")
226 .Input("data: T")
227 .Input("indices: Tindices")
228 .Input("axis: int64")
229 .Output("output: T")
230 .Attr("T: numbertype")
231 .Attr("Tindices: {int32,int64}")
232 .SetShapeFn(ReduceSliceShapeFn)
233 .Doc(R"doc(
234 Dynamically compute the minimum over the first dimension of a tensor according
235 to start and end indices specified at 'indices'.
236
237 For example:
238
239 ```prettyprint
240 # if 'data' is [[ 1, 20, 3]
241 [ 400, 5, 60]
242 [ 70, 8, 900]
243 [1000,2000,3000]],
244
245 and 'indices' is [[0,1]
246 [1,1]
247 [0,2]],
248
249 the output will be [[ 1, 20, 3]
250 [ +BIG_VALUE, +BIG_VALUE, +BIG_VALUE]
251 [ 1, 5, 3]].
252 ```
253
254 The data must be at least rank 1. The indices can be of shape (?,2) where the
255 first column is start indices and the second column is end indices. The end indices
256 are not included in the reduce operation, which means, if you want to do a reduce
257 over indices 0,1,2, then you should have start index 0 and end index 3. If end
258 index is smaller than or equal to start, the result will be 1. If end index is
259 out of bounds, then the reduce operation will automatically stop at the bound, so
260 feel free to put a large number as your end of your index if you want to do the
261 reduction until the bound. The indices can also be of shape (?), in this case, the
262 start index of i will be the element at i, then end index of i will be the element
263 at i+1. That is:
264
265 ```prettyprint
266 indices = [0,5,11,115]
267
268 is equivalent to
269
270 indices = [ [0,5],
271 [5,11],
272 [11,115]]
273 ```
274
275 data: The source of data where the computation will be taken from.
276 indices: start, end indices that controls which part to be included.
277 T: the type of data.
278 Tindices: the type of indices, must be int32 or int64.
279 output: the computed product values.
280 )doc");
281
282 } // namespace tensorflow
283