1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 // Neural Net operation support for StreamExecutor instances.
17 //
18 // This is an abstract interface for a platform to optionally support common
19 // neural net operations; it accommodates implementations such as the cudnn
20 // library operations.
21
22 #ifndef TENSORFLOW_STREAM_EXECUTOR_DNN_H_
23 #define TENSORFLOW_STREAM_EXECUTOR_DNN_H_
24
25 #include <functional>
26 #include <limits>
27 #include <memory>
28
29 #include "tensorflow/stream_executor/device_memory.h"
30 #include "tensorflow/stream_executor/lib/array_slice.h"
31 #include "tensorflow/stream_executor/lib/status.h"
32 #include "tensorflow/stream_executor/lib/statusor.h"
33 #include "tensorflow/stream_executor/platform/logging.h"
34 #include "tensorflow/stream_executor/platform/port.h"
35
36 namespace Eigen {
37 struct half;
38 } // namespace Eigen
39
40 namespace perftools {
41 namespace gputools {
42
43 class HostBuffer;
44 class Stream;
45 class ScratchAllocator;
46
47 namespace dnn {
48
49 // Describes how an input or output layer's data is formatted.
50 // Specify int64 so there's no padding in BatchDescriptor.
51 enum class DataLayout : int64 {
52 kYXDepthBatch = 0, // Same as dist_belief::DF_DEPTH_MAJOR.
53 kYXBatchDepth, // Same as dist_belief::DF_BATCH_MAJOR.
54 kBatchYXDepth, // Same as run_brain output, and tensorflow's layout.
55 kBatchDepthYX, // cuDNN's NCHW layout, data laid out as image, feature
56 // maps, rows, columns.
57 kBatchDepthYX4, // cuDNN's NCHW_VECT_C layout, data laid out the same as
58 // kBatchDepthYX but each element is a vector of 4 feature
59 // maps.
60 };
61
62 // Specifies an index to use when accessing specific spatial dimensions.
63 enum class DimIndex : int {
64 X = 0,
65 Y = 1,
66 Z = 2,
67 };
68
69 // Helper functions to make methods more readable.
GetDim(const std::vector<int64> & data,DimIndex dim)70 inline int64 GetDim(const std::vector<int64>& data, DimIndex dim) {
71 return data.rbegin()[static_cast<int64>(dim)];
72 }
73
SetDim(std::vector<int64> * data,DimIndex dim,int64 value)74 inline void SetDim(std::vector<int64>* data, DimIndex dim, int64 value) {
75 data->rbegin()[static_cast<int64>(dim)] = value;
76 }
77
78 // Returns a string representation of the given data layout.
79 string DataLayoutString(DataLayout layout);
80
81 // Specifies a quantization for activations in a given BatchDescriptor.
82 enum class QuantizedActivationMode {
83 k8Bit = 1,
84 k16Bit = 2,
85 k32Bit = 4,
86 };
87
88 // Specifies the data type used by an operation.
89 enum class DataType {
90 kFloat = 0,
91 kDouble = 1,
92 kHalf = 2,
93 kInt8 = 3,
94 };
95
96 // A helper class to convert C/C++ types to the proper enums.
97 template <typename T>
98 struct ToDataType;
99 template <>
100 struct ToDataType<float> {
101 static constexpr DataType value = DataType::kFloat;
102 };
103 template <>
104 struct ToDataType<double> {
105 static constexpr DataType value = DataType::kDouble;
106 };
107 template <>
108 struct ToDataType<Eigen::half> {
109 static constexpr DataType value = DataType::kHalf;
110 };
111 template <>
112 struct ToDataType<int8> {
113 static constexpr DataType value = DataType::kInt8;
114 };
115
116 // Specifies the types of a RNN model.
117 enum class RnnMode {
118 kRnnRelu = 0,
119 kRnnTanh = 1,
120 kRnnLstm = 2,
121 kRnnGru = 3,
122 };
123
124 // Specifies the input model and whether there is a linear transformation
125 // between the input state and the first layer hidden state.
126 enum class RnnInputMode {
127 kRnnLinearSkip = 0,
128 kRnnSkipInput = 1,
129 };
130
131 // Specifies the number of directions used in a RNN model. When bidirection
132 // is used, the input states and output sequence contain data for both
133 // directions.
134 enum class RnnDirectionMode {
135 kRnnUnidirectional = 0,
136 kRnnBidirectional = 1,
137 };
138
139 // Relevant to DepthToSpace and SpaceToDepth. This is the write layout when
140 // performing depth to space and the read layout when performing space to depth.
141 // It's specified with most-major dimension first and most-minor dimension last.
142 // In DepthToSpace, the D*M² values are read in and then, for DepthHeightWidth,
143 // written out to the output patch, by varying first width, then height, then
144 // depth. In C array format, it looks like [depth][height][width]. See
145 // DepthToSpace comment for more information.
146 enum class DepthToSpaceLayout { DepthHeightWidth };
147
148 // Specifies the descriptor for a RNN model.
149 //
150 // An example use case:
151 // * The user first creates a model through createRnnDescriptor.
152 // * The user queries the size of the underlying opaque parameter buffer.
153 // * The user creates and initializes a parameter buffer of the proper size.
154 // * The user runs forward and backward operations using this RNN descriptor.
155 // * Once a while, user queries maintainable weights and bias regions from
156 // the underlying parameter buffer. They are more likely to be forward
157 // compatible and should used in saving and restoring a model.
158 // * The user releases the RNN descriptor when the model is no longer in use.
159 class RnnDescriptor {
160 public:
161 struct ParamsRegion {
162 int64 offset;
163 int64 size;
164 };
165 typedef std::vector<ParamsRegion> ParamsRegions;
166 virtual ~RnnDescriptor() {}
167 virtual int64 ParamsSizeInBytes() const { return -1; }
168 virtual ParamsRegions ParamsWeightRegions() const { return ParamsRegions(); }
169 virtual ParamsRegions ParamsBiasRegions() const { return ParamsRegions(); }
170 };
171
172 // Specifies the sequence in a RNN model.
173 //
174 // The user is responsible for releasing this descriptor when it is no longer
175 // in use. The destructor releases the underlying descriptors.
176 class RnnSequenceTensorDescriptor {
177 public:
178 virtual ~RnnSequenceTensorDescriptor() {}
179 };
180
181 // Specifies either the input and hidden state in a RNN model.
182 //
183 // The user is responsible for releasing this descriptor when it is no longer
184 // in use. The destructor releases the underlying descriptors.
185 class RnnStateTensorDescriptor {
186 public:
187 virtual ~RnnStateTensorDescriptor() {}
188 };
189
190 // Returns a string representation of the given quantization mode.
191 string QuantizedActivationModeString(QuantizedActivationMode mode);
192
193 // Describes the dimensions that a layer consumes/produces.
194 //
195 // This is a matrix (height, width), its "depth" (feature_map_count),
196 // how many of these matrices are present (count),
197 // and the maximum and minimum values expected in the matrix (value_max,
198 // value_min).
199 // If input is quantized, all values greater
200 // than value_max will be clipped to value_max and all values less than
201 // value_min will be clipped to value_min.
202 // When quantized output is dequantized no value will be greater than
203 // value_max or less than value_min.
204 //
205 // Uses the named argument construction form:
206 //
207 // auto input_batch_dimensions =
208 // BatchDescriptor().set_count(42).set_feature_map_count(7)...
209 //
210 // Details:
211 //
212 // For a convolutional layer, a single inference takes a 3-dimensional matrix
213 // of input and produces a 3-dimensional matrix of output. We call the three
214 // dimensions height, width and feature_map_count, where for an image, the
215 // height and width correspond to the Y and X pixel indices, respectively, and
216 // the feature_map_count corresponds to the RGB dimension of the input data.
217 // Then the count indicates how many 3D matrices are being presented to be
218 // processed at once; this corresponds to the neural network concept of
219 // minibatch size.
220 //
221 // For a fully connected layer, it's better to put the nodes of the layer in
222 // the feature_map_count, and leave the height and weight as degenerate (== 1).
223 // Count indicates how many input vectors (degenerate 3D matrices) are to be
224 // processed.
225 //
226 // If unspecified, value_max and value_min default to 0.0.
227 // If value_max == value_min the Stream will attempt to derive valid values -
228 // for example the output of Relu6 activation will always be in the range
229 // [0.0, 6.0].
230 //
231 // If unspecified, layout defaults to kYXDepthBatch.
232 class BatchDescriptor {
233 public:
234 // Creates a "blank" batch descriptor, which should be initialized via the
235 // named argument helpers.
236 BatchDescriptor();
237 explicit BatchDescriptor(int ndims);
238
239 // Clones values from 'other' for initialization.
240 void CloneFrom(const BatchDescriptor& other);
241
242 string ToString() const;
243 string ToShortString() const;
244
245 // Accessors.
246 int64 count() const { return count_; }
247 int64 feature_map_count() const { return feature_map_count_; }
248 int64 height() const { return GetDim(spatial_size_, DimIndex::Y); }
249 int64 width() const { return GetDim(spatial_size_, DimIndex::X); }
250 int64 spatial_dim(DimIndex dim) const { return GetDim(spatial_size_, dim); }
251 int ndims() const { return ndims_; }
252 float value_max() const { return value_max_; }
253 float value_min() const { return value_min_; }
254 DataLayout layout() const { return layout_; }
255 QuantizedActivationMode quantized_activation_mode() const {
256 return quantized_activation_mode_;
257 }
258 // Full dimensions of the underlying data, ordered according to a specific
259 // layout.
260 std::vector<int64> full_dims(const DataLayout& layout) const;
261
262 // Full strides of the underlying data, ordered according to a specific
263 // layout.
264 std::vector<int64> full_strides(const DataLayout& layout) const;
265
266 // Named-argument helpers for avoiding user error during construction.
267 BatchDescriptor& set_count(int64 value) {
268 count_ = value;
269 return *this;
270 }
271 BatchDescriptor& set_feature_map_count(int64 value) {
272 feature_map_count_ = value;
273 return *this;
274 }
275 BatchDescriptor& set_height(int64 value) {
276 SetDim(&spatial_size_, DimIndex::Y, value);
277 return *this;
278 }
279 BatchDescriptor& set_width(int64 value) {
280 SetDim(&spatial_size_, DimIndex::X, value);
281 return *this;
282 }
283 BatchDescriptor& set_spatial_dim(DimIndex dim, int64 value) {
284 SetDim(&spatial_size_, dim, value);
285 return *this;
286 }
287 BatchDescriptor& set_value_max(float value) {
288 value_max_ = value;
289 return *this;
290 }
291 BatchDescriptor& set_value_min(float value) {
292 value_min_ = value;
293 return *this;
294 }
295 BatchDescriptor& set_layout(DataLayout layout) {
296 layout_ = layout;
297 return *this;
298 }
299 BatchDescriptor& set_quantized_activation_mode(
300 QuantizedActivationMode quantized_activation_mode) {
301 quantized_activation_mode_ = quantized_activation_mode;
302 return *this;
303 }
304
305 // Return the number of nodes in a single feature map.
306 int64 NodesPerFeatureMap() const;
307
308 // Return the number of nodes across all feature maps. Note that this is not
309 // affected by the batch count.
310 int64 NodesAcrossFeatureMaps() const;
311
312 // Returns the number of elements (e.g. RGB pixel values) required to hold a
313 // given batch descriptor, given a no-padding assumption. Note that this is
314 // affected by the batch count.
315 int64 ElementCount() const;
316
317 // Return the number of weights required to fully connect a layer with
318 // dimensions given by the 'input' descriptor with a layer with dimensions
319 // given by the 'output' descriptor.
320 static int64 FullyConnectedWeightCount(const BatchDescriptor& input,
321 const BatchDescriptor& output);
322
323 // Return the number of biases required to fully connect to an output layer
324 // with dimensions given the 'output' descriptor.
325 static int64 FullyConnectedBiasCount(const BatchDescriptor& output);
326
327 // Return a BatchDescriptor for the output of a depth concatenation
328 // with the given input descriptors. The inputs should have the same
329 // dimensions, except possibly for feature_map_count(), though this
330 // function does not verify that.
331 static BatchDescriptor DepthConcatenateOutputDescriptor(
332 port::ArraySlice<dnn::BatchDescriptor> inputs);
333
334 private:
335 int64 count_;
336 int64 feature_map_count_;
337 // Stored as: ..., y, x.
338 std::vector<int64> spatial_size_;
339 float value_max_;
340 float value_min_;
341 DataLayout layout_;
342 int ndims_;
343 QuantizedActivationMode quantized_activation_mode_;
344 };
345
346 // Describes how a filter is laid out in the memory.
347 // Specify int64 so there's no padding in FilterDescriptor.
348 enum class FilterLayout : int64 {
349 kOutputInputYX = 0, // cuDNN's default filter layout, laid out as:
350 // (major) output feature maps >> input feature maps >>
351 // rows >> columns (minor).
352 kOutputInputYX4, // laid out the same as kOutputInputYX but each element is a
353 // vector of 4 feature maps.
354 kInputYXOutput, // Same as dist_belief's default filter layout.
355 kYXInputOutput, // Same as tensorflow's default filter layout.
356 };
357
358 // Returns a string representation of the given filter layout.
359 string FilterLayoutString(FilterLayout layout);
360
361 // Describes a filter for the convolution. This is the "window" from
362 // height-by-width patches of each of the feature maps in the input layer to the
363 // cells within the output feature map.
364 //
365 // Uses the named argument construction form:
366 //
367 // FilterDescriptor filter_dimensions;
368 // filter_dimensions
369 // .set_output_feature_map_count(42)
370 // .set_input_feature_map_count(7)
371 // ...
372 //
373 // Arguments:
374 // - output_feature_map_count: number of feature maps in the output layer.
375 // - input_feature_map_count: number of feature maps in the input layer (from
376 // which the filter patch is taken).
377 // - input_filter_height: "height" number of neurons used in the sliding window
378 // over the input layer.
379 // - input_filter_width: "width" number of neurons used in the sliding window
380 // over the input layer.
381 //
382 // Sometimes names like "filter input height" are referred to by synonymous
383 // terminology, such as "kernel y size".
384 //
385 // If unspecified, layout defaults to kOutputInputYX.
386 class FilterDescriptor {
387 public:
388 // By default construction, all dimensions are set to zero, so they should all
389 // be populated by the user via the named-argument helpers below. (See class
390 // comment for details.)
391 FilterDescriptor();
392 explicit FilterDescriptor(int ndims);
393 ~FilterDescriptor();
394
395 // Named-argument helpers for avoiding user error during construction.
396 FilterDescriptor& set_output_feature_map_count(int64 value) {
397 output_feature_map_count_ = value;
398 return *this;
399 }
400 FilterDescriptor& set_input_feature_map_count(int64 value) {
401 input_feature_map_count_ = value;
402 return *this;
403 }
404 FilterDescriptor& set_input_filter_height(int64 value) {
405 SetDim(&input_filter_dims_, DimIndex::Y, value);
406 return *this;
407 }
408 FilterDescriptor& set_input_filter_width(int64 value) {
409 SetDim(&input_filter_dims_, DimIndex::X, value);
410 return *this;
411 }
412 FilterDescriptor& set_layout(FilterLayout layout) {
413 layout_ = layout;
414 return *this;
415 }
416 FilterDescriptor& set_spatial_dim(DimIndex dim, int64 value) {
417 SetDim(&input_filter_dims_, dim, value);
418 return *this;
419 }
420 int ndims() const { return ndims_; }
421
422 void CloneFrom(const FilterDescriptor& other);
423
424 string ToString() const;
425 string ToShortString() const;
426
427 // Returns the number of weights required as parameters for a convolution
428 // using this filter descriptor.
429 int64 ComputeWeightCount() const;
430
431 // Returns the number of biases required as parameters for a convolution
432 // using this filter descriptor.
433 int64 bias_count() const { return output_feature_map_count_; }
434
435 int64 output_feature_map_count() const { return output_feature_map_count_; }
436 int64 input_feature_map_count() const { return input_feature_map_count_; }
437 int64 input_filter_height() const {
438 return GetDim(input_filter_dims_, DimIndex::Y);
439 }
440 int64 input_filter_width() const {
441 return GetDim(input_filter_dims_, DimIndex::X);
442 }
443 int64 input_filter_dim(DimIndex dim) const {
444 return GetDim(input_filter_dims_, dim);
445 }
446
447 FilterLayout layout() const { return layout_; }
448 std::vector<int64> input_filter_dims() const { return input_filter_dims_; }
449
450 private:
451 int64 output_feature_map_count_;
452 int64 input_feature_map_count_;
453 // Stored as: ..., y, x.
454 std::vector<int64> input_filter_dims_;
455 int ndims_;
456 FilterLayout layout_;
457 };
458
459 // Describes how padding should be aligned when the total number of pad
460 // elements is odd.
461 enum class PadAlignment : int64 {
462 kDefault = 0, // default padding for the device.
463 kCudnnPadding, // cuDNN padding - prefer to pad at the start.
464 kTensorFlowPadding, // TensorFlow padding - prefer to pad at the end.
465 };
466
467 // Returns a string representation of the given padding alignment.
468 string PadAlignmentString(PadAlignment alignment);
469
470 // Describes a convolution.
471 //
472 // Uses the named argument construction form:
473 //
474 // ConvolutionDescriptor convolution_dimensions;
475 // convolution_dimensions
476 // .set_vertical_filter_stride(2)
477 // .set_horizontal_filter_stride(2)
478 // ...
479 //
480 // Arguments:
481 // - zero_padding_height: padding of the "y dimension" of the input data. Note
482 // that this is different from the height of the filter.
483 // - zero_padding_width: analogous to the height above, but in the "x
484 // dimension".
485 // - vertical_filter_stride: the convolution slides a 2-dimensional window of
486 // filter-height-by-filter-width over the input layer -- the center of that
487 // window is moved in the "y dimension" according to this stride value.
488 // - horizontal_filter_stride: analogous to the vertical stride above, but in
489 // the "x dimension".
490 // - vertical_dilation_rate: there will be (vertical_dilation_rate - 1) skipped
491 // cells between each filter element in the "y dimension".
492 // - horizontal_dilation_rate: there will be (horizontal_dilation_rate - 1)
493 // skipped cells between each filter element in the "x dimension".
494 class ConvolutionDescriptor {
495 public:
496 // By default construction, there is no zero-padding and the filter stride is
497 // 1x1 (centering the filter on every cell in the input layer's
498 // width-by-height area).
499 ConvolutionDescriptor();
500 explicit ConvolutionDescriptor(int ndims);
501 ~ConvolutionDescriptor();
502
503 string ToString() const;
504 string ToShortString() const;
505
506 ConvolutionDescriptor& set_zero_padding_height(int64 value) {
507 SetDim(&zero_padding_, DimIndex::Y, value);
508 return *this;
509 }
510 ConvolutionDescriptor& set_zero_padding_width(int64 value) {
511 SetDim(&zero_padding_, DimIndex::X, value);
512 return *this;
513 }
514 ConvolutionDescriptor& set_zero_padding(DimIndex dim, int64 value) {
515 SetDim(&zero_padding_, dim, value);
516 return *this;
517 }
518 ConvolutionDescriptor& set_vertical_filter_stride(int64 value) {
519 SetDim(&filter_strides_, DimIndex::Y, value);
520 return *this;
521 }
522 ConvolutionDescriptor& set_horizontal_filter_stride(int64 value) {
523 SetDim(&filter_strides_, DimIndex::X, value);
524 return *this;
525 }
526 ConvolutionDescriptor& set_filter_stride(DimIndex dim, int64 value) {
527 SetDim(&filter_strides_, dim, value);
528 return *this;
529 }
530 ConvolutionDescriptor& set_vertical_dilation_rate(int64 value) {
531 SetDim(&dilation_rates_, DimIndex::Y, value);
532 return *this;
533 }
534 ConvolutionDescriptor& set_horizontal_dilation_rate(int64 value) {
535 SetDim(&dilation_rates_, DimIndex::X, value);
536 return *this;
537 }
538 ConvolutionDescriptor& set_dilation_rate(DimIndex dim, int64 value) {
539 SetDim(&dilation_rates_, dim, value);
540 return *this;
541 }
542 ConvolutionDescriptor& set_pad_alignment(PadAlignment pad_alignment) {
543 pad_alignment_ = pad_alignment;
544 return *this;
545 }
546 int64 zero_padding_height() const {
547 return GetDim(zero_padding_, DimIndex::Y);
548 }
549 int64 zero_padding_width() const {
550 return GetDim(zero_padding_, DimIndex::X);
551 }
552 int64 vertical_filter_stride() const {
553 return GetDim(filter_strides_, DimIndex::Y);
554 }
555 int64 horizontal_filter_stride() const {
556 return GetDim(filter_strides_, DimIndex::X);
557 }
558 int64 vertical_dilation_rate() const {
559 return GetDim(dilation_rates_, DimIndex::Y);
560 }
561 int64 horizontal_dilation_rate() const {
562 return GetDim(dilation_rates_, DimIndex::X);
563 }
564
565 int zero_padding(DimIndex dim) const { return GetDim(zero_padding_, dim); }
566 int filter_stride(DimIndex dim) const { return GetDim(filter_strides_, dim); }
567 int dilation_rate(DimIndex dim) const { return GetDim(dilation_rates_, dim); }
568 PadAlignment pad_alignment() const { return pad_alignment_; }
569 int ndims() const { return ndims_; }
570
571 std::vector<int64> strides() const { return filter_strides_; }
572 std::vector<int64> dilations() const { return dilation_rates_; }
573 std::vector<int64> padding() const { return zero_padding_; }
574
575 private:
576 // Stored as: .. y, x.
577 std::vector<int64> zero_padding_;
578 std::vector<int64> filter_strides_;
579 std::vector<int64> dilation_rates_;
580 PadAlignment pad_alignment_;
581 int ndims_;
582 // TODO(leary) cudnn provides these fields, but need to characterize what
583 // their effect is -- they may be boolean rather than integral.
584 // int64 upscale_input_x;
585 // int64 upscale_input_y;
586 };
587
588 // A patch of values in the input can be pooled via either a max or an average
589 // operation.
590 // Specify int64 so there's no padding in PoolingDescriptor.
591 enum class PoolingMode : int64 {
592 kMaximum,
593 kAverage,
594 };
595
596 // Specify the dimension in which to concatenate inputs in space.
597 // Specify int64 so there's no padding in SpaceConcatenateMode.
598 enum class SpaceConcatenateMode : int64 {
599 XDirection,
600 YDirection,
601 };
602
603 // Returns a short name for the pooling mode, e.g. "Avg".
604 string ShortPoolingModeString(PoolingMode mode);
605
606 // Describes a pooling operation to be enqueued onto a stream via a platform's
607 // DnnSupport.
608 //
609 // TODO(broune): describe how padding works and what happens if the
610 // window height/width is not divisible by the vertical/horizontal
611 // stride.
612 //
613 // Arguments:
614 // pooling_mode: pooling operator to use on the input patch
615 // window_height: height of input window
616 // window_width: width of input window
617 // vertical_stride: vertical delta for center of the input patch
618 // horizontal_stride: horizontal delta for center of the input patch
619 class PoolingDescriptor {
620 public:
621 PoolingDescriptor();
622 explicit PoolingDescriptor(int ndims);
623
624 PoolingDescriptor& set_pooling_mode(PoolingMode value) {
625 mode_ = value;
626 return *this;
627 }
628 PoolingDescriptor& set_window_height(int64 value) {
629 SetDim(&window_, DimIndex::Y, value);
630 return *this;
631 }
632 PoolingDescriptor& set_window_width(int64 value) {
633 SetDim(&window_, DimIndex::X, value);
634 return *this;
635 }
636 PoolingDescriptor& set_window(DimIndex dim, int64 value) {
637 SetDim(&window_, dim, value);
638 return *this;
639 }
640 PoolingDescriptor& set_vertical_padding(int64 value) {
641 SetDim(&padding_, DimIndex::Y, value);
642 return *this;
643 }
644 PoolingDescriptor& set_horizontal_padding(int64 value) {
645 SetDim(&padding_, DimIndex::X, value);
646 return *this;
647 }
648 PoolingDescriptor& set_padding(DimIndex dim, int64 value) {
649 SetDim(&padding_, dim, value);
650 return *this;
651 }
652 PoolingDescriptor& set_vertical_stride(int64 value) {
653 SetDim(&strides_, DimIndex::Y, value);
654 return *this;
655 }
656 PoolingDescriptor& set_horizontal_stride(int64 value) {
657 SetDim(&strides_, DimIndex::X, value);
658 return *this;
659 }
660 PoolingDescriptor& set_stride(DimIndex dim, int64 value) {
661 SetDim(&strides_, dim, value);
662 return *this;
663 }
664 PoolingDescriptor& set_propagate_nans(bool value) {
665 propagate_nans_ = value;
666 return *this;
667 }
668
669 int ndims() const { return ndims_; }
670 void CloneFrom(const PoolingDescriptor& other);
671
672 string ToString() const;
673 string ToShortString() const;
674
675 PoolingMode mode() const { return mode_; }
676 int64 window_height() const { return GetDim(window_, DimIndex::Y); }
677 int64 window_width() const { return GetDim(window_, DimIndex::X); }
678 int64 window(DimIndex dim) const { return GetDim(window_, dim); }
679 int64 vertical_padding() const { return GetDim(padding_, DimIndex::Y); }
680 int64 horizontal_padding() const { return GetDim(padding_, DimIndex::X); }
681 int64 padding(DimIndex dim) const { return GetDim(padding_, dim); }
682 int64 vertical_stride() const { return GetDim(strides_, DimIndex::Y); }
683 int64 horizontal_stride() const { return GetDim(strides_, DimIndex::X); }
684 int64 stride(DimIndex dim) const { return GetDim(strides_, dim); }
685 std::vector<int64> window() const { return window_; }
686 std::vector<int64> padding() const { return padding_; }
687 std::vector<int64> strides() const { return strides_; }
688 bool propagate_nans() const { return propagate_nans_; }
689
690 private:
691 PoolingMode mode_;
692 int ndims_;
693 bool propagate_nans_;
694
695 // Stored as: ..., y, x.
696 std::vector<int64> window_;
697 std::vector<int64> padding_;
698 std::vector<int64> strides_;
699 };
700
701 // Collects parameters for DNN algorithms
702 class AlgorithmDesc {
703 public:
704 typedef int64 Index;
705 AlgorithmDesc() : algo_(kDefaultAlgorithm), tensor_ops_enabled_(false) {}
706 AlgorithmDesc(Index a, bool use_tensor_ops)
707 : algo_(a), tensor_ops_enabled_(use_tensor_ops) {}
708 bool is_default() const { return algo_ == kDefaultAlgorithm; }
709 bool tensor_ops_enabled() const { return tensor_ops_enabled_; }
710 Index algo_id() const { return algo_; }
711 bool operator==(const AlgorithmDesc& other) const {
712 return this->algo_ == other.algo_ &&
713 this->tensor_ops_enabled_ == other.tensor_ops_enabled_;
714 }
715
716 private:
717 enum { kDefaultAlgorithm = -1 };
718 Index algo_;
719 bool tensor_ops_enabled_;
720 };
721
722 // Describes the result from a perf experiment.
723 //
724 // Arguments:
725 // algorithm: returns the exact algorithm that was used.
726 // elapsed_time_in_ms: returns the measured elapsed time in milliseconds.
727 class ProfileResult {
728 public:
729 bool is_valid() const {
730 return (!algorithm_.is_default() &&
731 elapsed_time_in_ms_ != std::numeric_limits<float>::max());
732 }
733 AlgorithmDesc algorithm() const { return algorithm_; }
734 void set_algorithm(AlgorithmDesc val) { algorithm_ = val; }
735 float elapsed_time_in_ms() const { return elapsed_time_in_ms_; }
736 void set_elapsed_time_in_ms(float val) { elapsed_time_in_ms_ = val; }
737
738 private:
739 AlgorithmDesc algorithm_;
740 float elapsed_time_in_ms_ = std::numeric_limits<float>::max();
741 };
742
743 // Describes the configuration for the algorithms that will used.
744 //
745 // Arguments:
746 // algorithm: the primary algorithm that should be used.
747 // algorithm_no_scratch: a secondary algorithm that should be used, if the
748 // the allocation for the scratch memory fails.
749 class AlgorithmConfig {
750 public:
751 AlgorithmConfig() {}
752 explicit AlgorithmConfig(AlgorithmDesc algorithm) : algorithm_(algorithm) {}
753 AlgorithmConfig(AlgorithmDesc algorithm, AlgorithmDesc algorithm_no_scratch)
754 : algorithm_(algorithm), algorithm_no_scratch_(algorithm_no_scratch) {}
755 AlgorithmDesc algorithm() const { return algorithm_; }
756 void set_algorithm(AlgorithmDesc val) { algorithm_ = val; }
757 AlgorithmDesc algorithm_no_scratch() const { return algorithm_no_scratch_; }
758 void set_algorithm_no_scratch(AlgorithmDesc val) {
759 algorithm_no_scratch_ = val;
760 }
761 bool operator==(const AlgorithmConfig& other) const {
762 return this->algorithm_ == other.algorithm_ &&
763 this->algorithm_no_scratch_ == other.algorithm_no_scratch_;
764 }
765 bool operator!=(const AlgorithmConfig& other) const {
766 return !(*this == other);
767 }
768 string ToString() const;
769
770 private:
771 AlgorithmDesc algorithm_;
772 AlgorithmDesc algorithm_no_scratch_;
773 };
774
775 // Describes a local response normalization (LRN). LRN is used e.g. in
776 // dist_belief.
777 //
778 // Let V be the vector of feature maps at some (batch, y, x)
779 // coordinate. LRN applies independently to each vector V in the
780 // input, across all coordinates (batch, y, x), by mapping each V to
781 // another vector U of the same size using the formula
782 //
783 // U_i = V_i / ((bias + alpha * (sum_j V_j^2)) ^ beta)
784 //
785 // where the sum is taken over j in the closed range [i - range, i + range].
786 //
787 // When calculating U_i the j in the sum can extend beyond the bounds
788 // of V. If wrap_around is true, then V_j = V_{j mod F} where F is the
789 // size of V, which is the number of feature maps. If wrap_around is
790 // false, then V_j = 0 for j outside [0, F-1].
791 //
792 // If segment_size <= F, where F is the number of feature_maps, then
793 // segment_size has no effect. Otherwise, each consecutive segment of
794 // segment_size entries in V are normalized separately.
795 //
796 // Not all StreamExecutors allow wrap_around == true or segment_size
797 // != 64. Some do not implement normalization at all.
798 class NormalizeDescriptor {
799 public:
800 NormalizeDescriptor();
801
802 NormalizeDescriptor& set_bias(float bias) {
803 bias_ = bias;
804 return *this;
805 }
806
807 NormalizeDescriptor& set_range(int32 range) {
808 range_ = range;
809 return *this;
810 }
811
812 NormalizeDescriptor& set_alpha(float alpha) {
813 alpha_ = alpha;
814 return *this;
815 }
816
817 NormalizeDescriptor& set_beta(float beta) {
818 beta_ = beta;
819 return *this;
820 }
821
822 NormalizeDescriptor& set_wrap_around(bool wrap_around) {
823 wrap_around_ = wrap_around;
824 return *this;
825 }
826
827 NormalizeDescriptor& set_segment_size(int32 segment_size) {
828 segment_size_ = segment_size;
829 return *this;
830 }
831
832 void CloneFrom(const NormalizeDescriptor& other);
833
834 string ToString() const;
835 string ToShortString() const;
836
837 float bias() const { return bias_; }
838 int32 range() const { return range_; }
839 float alpha() const { return alpha_; }
840 float beta() const { return beta_; }
841 bool wrap_around() const { return wrap_around_; }
842 int32 segment_size() const { return segment_size_; }
843
844 private:
845 float bias_;
846 int32 range_;
847 float alpha_;
848 float beta_;
849 bool wrap_around_;
850 int32 segment_size_;
851 };
852
853 // Describes a kind of non-linearity (threshold-like mathematical function).
854 enum class ActivationMode {
855 kNone,
856 kSigmoid,
857 // Rectified linear activation: f(x) = x < 0 ? 0 : x
858 kRelu,
859 // Rectified linear activation, where upper maximum is 6.0.
860 kRelu6,
861 // Rectified linear activation, where upper maximum specified by
862 // BatchDescriptor::value_max().
863 kReluX,
864 kTanh,
865 // Like ReluX, but passes all values in the range [-X,X].
866 kBandPass,
867 };
868
869 // Returns a string representation of the given activation mode.
870 string ActivationModeString(ActivationMode mode);
871
872 // Describes the operation that DoElementwiseOperation should perform on its
873 // inputs.
874 enum class ElementwiseOperation { kAdd, kMultiply };
875
876 string ElementwiseOperationString(ElementwiseOperation op);
877
878 // Suite of operations typically used for implementing Deep/Convolutional Neural
879 // Nets. Note: A false return value of an operation indicates the
880 // implementation is not available.
881 class DnnSupport {
882 public:
883 DnnSupport() {}
884 virtual ~DnnSupport() {}
885
886 virtual port::Status Init() = 0;
887
888 // Performs a single-precision forward batch normalization operation onto
889 // the stream.
890 //
891 // Arguments:
892 // stream: borrowed pointer to the stream that the batch normalization
893 // operation should be enqueued onto.
894 // x: input data.
895 // scale: scaling parameters.
896 // offset: offset parameters.
897 // estimated_mean: population mean estimated during training.
898 // Used for inference only; empty for training.
899 // estimated_variance: population variance estimated during training,
900 // used for inference only; empty for training.
901 // x_desc: dimensions of the input data, which is the same as the dimensions
902 // of the output.
903 // scale_offset_desc: dimensions of scale and offset.
904 // epsilon: a small floating point number added to the variance of x.
905 // y: output data.
906 // batch_mean: batch mean, to be used to compute the running mean.
907 // batch_variance: batch variance, to be used to compute
908 // the running variance.
909 // reserve_space_1: saved mean, to be reused in the backward gradient
910 // computation.
911 // reserve_space_2: saved inv_var (1/sqrt(epsilon + variance), to be reused
912 // in the backward gradient computation.
913 // is_training: Set to true for training, false for inference.
914 // var_to_inv_var: a function to convert the variance to inverted variance
915 // for cuDNN v4 forward inference.
916 // inv_var_to_var: a function to convert the inverted variance to
917 // variance for cuDNN v4 forward training, to be used for TensorFlow
918 // to calculate the running variance.
919 virtual bool DoBatchNormalizationForward(
920 Stream* stream, const DeviceMemory<float>& x,
921 const DeviceMemory<float>& scale, const DeviceMemory<float>& offset,
922 const DeviceMemory<float>& estimated_mean,
923 const DeviceMemory<float>& estimated_variance,
924 const dnn::BatchDescriptor& x_desc,
925 const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
926 DeviceMemory<float>* y, DeviceMemory<float>* batch_mean,
927 DeviceMemory<float>* batch_var, DeviceMemory<float>* reserve_space_1,
928 DeviceMemory<float>* reserve_space_2, bool is_training,
929 std::function<const DeviceMemory<float>&()> var_to_inv_var,
930 std::function<void()> inv_var_to_var) {
931 return false;
932 }
933
934 // Performs a half-precision forwards batch normalization operation onto the
935 // stream. See DoBatchNormalizationForward above for argument details.
936 virtual bool DoBatchNormalizationForward(
937 Stream* stream, const DeviceMemory<Eigen::half>& x,
938 const DeviceMemory<float>& scale, const DeviceMemory<float>& offset,
939 const DeviceMemory<float>& estimated_mean,
940 const DeviceMemory<float>& estimated_variance,
941 const dnn::BatchDescriptor& x_desc,
942 const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
943 DeviceMemory<Eigen::half>* y, DeviceMemory<float>* batch_mean,
944 DeviceMemory<float>* batch_var, DeviceMemory<float>* reserve_space_1,
945 DeviceMemory<float>* reserve_space_2, bool is_training,
946 std::function<const DeviceMemory<float>&()> var_to_inv_var,
947 std::function<void()> inv_var_to_var) {
948 return false;
949 }
950
951 // Performs a single-precision backward batch normalization gradient
952 // computation operation onto the stream.
953 //
954 // Arguments:
955 // stream: borrowed pointer to the stream that the batch normalization
956 // gradient computation operation should be enqueued onto.
957 // y_backprop: gradient with regard to output y.
958 // x: input data.
959 // scale: scaling parameters.
960 // inv_var: 1/sqrt(epsilon + variance) of x.
961 // x_desc: dimensions of the input data, which is the same as the dimensions
962 // of the output.
963 // scale_offset_desc: dimensions of scale and offset.
964 // epsilon: a small floating point number added to the variance of x.
965 // x_backprop: gradient with respect to input x.
966 // scale_backprop: gradient with respect to scale.
967 // offset_backprop: gradient with respect to offset.
968 virtual bool DoBatchNormalizationBackward(
969 Stream* stream, const DeviceMemory<float>& y_backprop,
970 const DeviceMemory<float>& x, const DeviceMemory<float>& scale,
971 const DeviceMemory<float>& mean, const DeviceMemory<float>& inv_var,
972 const dnn::BatchDescriptor& x_desc,
973 const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
974 DeviceMemory<float>* x_backprop, DeviceMemory<float>* scale_backprop,
975 DeviceMemory<float>* offset_backprop) {
976 return false;
977 }
978
979 // Performs a half-precision backward batch normalization gradient computation
980 // operation onto the stream. See DoBatchNormalizationBackward above for
981 // argument details.
982 virtual bool DoBatchNormalizationBackward(
983 Stream* stream, const DeviceMemory<Eigen::half>& y_backprop,
984 const DeviceMemory<Eigen::half>& x, const DeviceMemory<float>& scale,
985 const DeviceMemory<float>& mean, const DeviceMemory<float>& inv_var,
986 const dnn::BatchDescriptor& x_desc,
987 const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
988 DeviceMemory<Eigen::half>* x_backprop,
989 DeviceMemory<float>* scale_backprop,
990 DeviceMemory<float>* offset_backprop) {
991 return false;
992 }
993
994 // Enqueues a fused convolution operation onto the stream.
995 // We provide several variants with different types for inputs, biases and
996 // scaling parameters.
997 //
998 // Arguments (all borrowed):
999 // stream: borrowed pointer to the stream that the 'convolve' operation
1000 // should be enqueued onto.
1001 // conv_input_descriptor: dimensions of the convolution input layer.
1002 // conv_input_data: un-owned device memory region which contains the
1003 // convolution input.
1004 // conv_input_scale: a floating point scale to multiply with each element
1005 // of conv_input_data.
1006 // filter_descriptor: dimensions of the convolution filter.
1007 // filter_data: un-owned device memory region which contains the
1008 // convolution filter weights.
1009 // convolution_descriptor: stride of the convolution filter.
1010 // biases: un-owned device memory region containing biases to add to the
1011 // input.
1012 // activation_mode: Type of activation to perform.
1013 // side_input_data: un-owned device memory region which contains optional
1014 // side input data. If 'side_input_scale' is non-zero, then this must
1015 // point to data in the tensor shape specified by output_shape.
1016 // It will be scaled by 'side_input_scale' and added to the convolution
1017 // result and bias prior to applying the activation function.
1018 // side_input_scale: a floating point scale to multiply with each element
1019 // of side_input_data.
1020 // output_descriptor: dimensions of the output layer.
1021 // output_data: un-owned device memory region in which to place the
1022 // convolution result.
1023 // scratch_allocator: un-owned, may-be-null object that may allocate scratch
1024 // space in order to speed up the convolution operation.
1025 // algorithm: specifies which algorithm should be used for the
1026 // operation. If algorithm.is_default(), the system will pick an algorithm
1027 // by default. The coding of the algorithm is be interpretted by the
1028 // underlying implementation.
1029 // output_profile_result: the output profile result for this call. The
1030 // profiling is only enabled when this is not nullptr.
1031 //
1032 // conv_input_descriptor, filter_descriptor, convolution_descriptor and
1033 // output_descriptor together specify exactly how the convolution is aligned
1034 // with the input data:
1035 //
1036 // * (input dimensions - filter size + 1) / filter stride == output dimensions
1037 // corresponds to dist_belief padding = VALID, i.e. the input is not padded.
1038 // * input dimensions / filter stride == output dimensions
1039 // corresponds to dist_belief padding = SAME, i.e. input and output are the
1040 // same size - this requires padding the input.
1041 // * (input dimensions + filter size - 1) / filter stride == output dimensions
1042 // corresponds to dist_belief padding = FULL, i.e. the output is sized so
1043 // that if the inverse of the filter is applied to the output in VALID mode
1044 // the result is the same size as the input - this requires even more
1045 // padding of the input.
1046 virtual bool DoFusedConvolve(
1047 Stream* stream, const dnn::BatchDescriptor& conv_input_descriptor,
1048 const DeviceMemory<double>& conv_input_data, double conv_input_scale,
1049 const dnn::FilterDescriptor& filter_descriptor,
1050 const DeviceMemory<double>& filter_data,
1051 const dnn::ConvolutionDescriptor& convolution_descriptor,
1052 const DeviceMemory<double>& side_input_data, double side_input_scale,
1053 const dnn::BatchDescriptor& bias_descriptor,
1054 const DeviceMemory<double>& biases, dnn::ActivationMode activation_mode,
1055 const dnn::BatchDescriptor& output_descriptor,
1056 DeviceMemory<double>* output_data, ScratchAllocator* scratch_allocator,
1057 const dnn::AlgorithmConfig& algorithm_config,
1058 dnn::ProfileResult* output_profile_result) {
1059 return false;
1060 }
1061
1062 // This is the float version of DoFusedConvolve.
1063 virtual bool DoFusedConvolve(
1064 Stream* stream, const dnn::BatchDescriptor& conv_input_descriptor,
1065 const DeviceMemory<float>& conv_input_data, float conv_input_scale,
1066 const dnn::FilterDescriptor& filter_descriptor,
1067 const DeviceMemory<float>& filter_data,
1068 const dnn::ConvolutionDescriptor& convolution_descriptor,
1069 const DeviceMemory<float>& side_input_data, float side_input_scale,
1070 const dnn::BatchDescriptor& bias_descriptor,
1071 const DeviceMemory<float>& biases, dnn::ActivationMode activation_mode,
1072 const dnn::BatchDescriptor& output_descriptor,
1073 DeviceMemory<float>* output_data, ScratchAllocator* scratch_allocator,
1074 const dnn::AlgorithmConfig& algorithm_config,
1075 dnn::ProfileResult* output_profile_result) {
1076 return false;
1077 }
1078
1079 // This is the Eigen::half version of DoFusedConvolve.
1080 // The scaling parameters are still floats.
1081 virtual bool DoFusedConvolve(
1082 Stream* stream, const dnn::BatchDescriptor& conv_input_descriptor,
1083 const DeviceMemory<Eigen::half>& conv_input_data, float conv_input_scale,
1084 const dnn::FilterDescriptor& filter_descriptor,
1085 const DeviceMemory<Eigen::half>& filter_data,
1086 const dnn::ConvolutionDescriptor& convolution_descriptor,
1087 const DeviceMemory<Eigen::half>& side_input_data, float side_input_scale,
1088 const dnn::BatchDescriptor& bias_descriptor,
1089 const DeviceMemory<Eigen::half>& biases,
1090 dnn::ActivationMode activation_mode,
1091 const dnn::BatchDescriptor& output_descriptor,
1092 DeviceMemory<Eigen::half>* output_data,
1093 ScratchAllocator* scratch_allocator,
1094 const dnn::AlgorithmConfig& algorithm_config,
1095 dnn::ProfileResult* output_profile_result) {
1096 return false;
1097 }
1098
1099 // This is the int8 version of DoFusedConvolve.
1100 // The bias input and scaling parameters are floats.
1101 virtual bool DoFusedConvolve(
1102 Stream* stream, const dnn::BatchDescriptor& conv_input_descriptor,
1103 const DeviceMemory<int8>& conv_input_data, float conv_input_scale,
1104 const dnn::FilterDescriptor& filter_descriptor,
1105 const DeviceMemory<int8>& filter_data,
1106 const dnn::ConvolutionDescriptor& convolution_descriptor,
1107 const DeviceMemory<int8>& side_input_data, float side_input_scale,
1108 const dnn::BatchDescriptor& bias_descriptor,
1109 const DeviceMemory<float>& biases, dnn::ActivationMode activation_mode,
1110 const dnn::BatchDescriptor& output_descriptor,
1111 DeviceMemory<int8>* output_data, ScratchAllocator* scratch_allocator,
1112 const dnn::AlgorithmConfig& algorithm_config,
1113 dnn::ProfileResult* output_profile_result) {
1114 return false;
1115 }
1116
1117 // Enqueues a single-precision convolution operation onto the stream.
1118 //
1119 // Arguments (all borrowed):
1120 // stream: borrowed pointer to the stream that the 'convolve' operation
1121 // should be enqueued onto.
1122 // input_descriptor: dimensions of the input layer.
1123 // input_data: un-owned device memory region which contains the
1124 // convolution input.
1125 // filter_descriptor: dimensions of the convolution filter.
1126 // convolution_descriptor: stride of the convolution filter.
1127 // input. This can be DeviceMemory pointing to NULL only when activation_mode
1128 // is kNone.
1129 // output_descriptor: dimensions of the output layer.
1130 // output_data: un-owned device memory region in which to place the
1131 // convolution result.
1132 // scratch_allocator: un-owned, may-be-null object that may allocate scratch
1133 // space in order to speed up the convolution operation.
1134 // algorithm: an integer to specify which algorithm should be used for the
1135 // operation. kDefaultAlgorithm means the system will pick an algorithm
1136 // by default. The coding of the algorithm is be interpreted by the
1137 // underlying implementation.
1138 // output_profile_result: the output profile result for this call. The
1139 // profiling is only enabled when this is not nullptr.
1140 //
1141 // input_descriptor, filter_descriptor, convolution_descriptor and
1142 // output_descriptor together specify exactly how the convolution is aligned
1143 // with the input data:
1144 //
1145 // * (input dimensions - filter size + 1) / filter stride == output dimensions
1146 // corresponds to dist_belief padding = VALID, i.e. the input is not padded.
1147 // * input dimensions / filter stride == output dimensions
1148 // corresponds to dist_belief padding = SAME, i.e. input and output are the
1149 // same size - this requires padding the input.
1150 // * (input dimensions + filter size - 1) / filter stride == output dimensions
1151 // corresponds to dist_belief padding = FULL, i.e. the output is sized so
1152 // that if the inverse of the filter is applied to the output in VALID mode
1153 // the result is the same size as the input - this requires even more
1154 // padding of the input.
1155 virtual bool DoConvolve(
1156 Stream* stream, const dnn::BatchDescriptor& input_descriptor,
1157 const DeviceMemory<float>& input_data,
1158 const dnn::FilterDescriptor& filter_descriptor,
1159 const DeviceMemory<float>& filter_data,
1160 const dnn::ConvolutionDescriptor& convolution_descriptor,
1161 const dnn::BatchDescriptor& output_descriptor,
1162 DeviceMemory<float>* output_data, ScratchAllocator* scratch_allocator,
1163 const dnn::AlgorithmConfig& algorithm_config,
1164 ProfileResult* output_profile_result) = 0;
1165
1166 // Enqueues a double-precision convolution operation onto the stream.
1167 // See DoConvolve above for argument details.
1168 virtual bool DoConvolve(
1169 Stream* stream, const dnn::BatchDescriptor& batch_descriptor,
1170 const DeviceMemory<double>& input_data,
1171 const dnn::FilterDescriptor& filter_descriptor,
1172 const DeviceMemory<double>& filter_data,
1173 const dnn::ConvolutionDescriptor& convolution_descriptor,
1174 const dnn::BatchDescriptor& output_descriptor,
1175 DeviceMemory<double>* output_data) = 0;
1176
1177 // Enqueues a half-precision convolution operation onto the stream.
1178 // See DoConvolve above for argument details.
1179 virtual bool DoConvolve(
1180 Stream* stream, const dnn::BatchDescriptor& batch_descriptor,
1181 const DeviceMemory<Eigen::half>& input_data,
1182 const dnn::FilterDescriptor& filter_descriptor,
1183 const DeviceMemory<Eigen::half>& filter_data,
1184 const dnn::ConvolutionDescriptor& convolution_descriptor,
1185 const dnn::BatchDescriptor& output_descriptor,
1186 DeviceMemory<Eigen::half>* output_data,
1187 ScratchAllocator* scratch_allocator,
1188 const dnn::AlgorithmConfig& algorithm_config,
1189 ProfileResult* output_profile_result) = 0;
1190
1191 // Return a list of algorithms supported by the forward convolution pass.
1192 virtual bool GetConvolveAlgorithms(
1193 bool with_winograd_nonfused, int cc_major, int cc_minor,
1194 std::vector<AlgorithmDesc>* out_algorithms);
1195
1196 // Version of DoConvolve that uses pre-quantized 8 bit coefficients.
1197 // coefficient_scales specifies the scaling of each column of coefficients:
1198 // original float coefficient[row * num_columns + column] =
1199 // quantized coefficient[row * num_columns + column] *
1200 // coefficient_scales[column].
1201 virtual bool DoConvolveQuantized(
1202 Stream* stream, const dnn::BatchDescriptor& input_descriptor,
1203 const DeviceMemory<float>& input_data,
1204 const dnn::FilterDescriptor& filter_descriptor,
1205 const DeviceMemory<int8>& filter_coefficients,
1206 const DeviceMemory<float>& coefficient_scales,
1207 const dnn::ConvolutionDescriptor& convolution_descriptor,
1208 const dnn::BatchDescriptor& output_descriptor,
1209 DeviceMemory<float>* output_data) = 0;
1210
1211 // Same as DoConvolveQuantized above, but int8 filter coefficients.
1212 virtual bool DoConvolveQuantized(
1213 Stream* stream, const dnn::BatchDescriptor& input_descriptor,
1214 const DeviceMemory<float>& input_data,
1215 const dnn::FilterDescriptor& filter_descriptor,
1216 const DeviceMemory<int16>& filter_coefficients,
1217 const DeviceMemory<float>& coefficient_scales,
1218 const dnn::ConvolutionDescriptor& convolution_descriptor,
1219 const dnn::BatchDescriptor& output_descriptor,
1220 DeviceMemory<float>* output_data) = 0;
1221
1222 // Variation of the above with the weight matrix split into two matrices.
1223 // first_weights: Coefficients of the first matrix.
1224 // second_weights: Coefficients of the second matrix.
1225 // depth_multiplier: specifies the columns of the first matrix and rows
1226 // of the second one - first_weights columns = depth_multiplier,
1227 // second_weights rows = depth_multiplier *
1228 // filter_descriptor.input_feature_map_count().
1229 // see go/separable for documentation on separable convolutions.
1230 virtual bool DoSeparableConvolve(
1231 Stream* stream, const BatchDescriptor& input_descriptor,
1232 const DeviceMemory<float>& input_data,
1233 const FilterDescriptor& filter_descriptor, int depth_multiplier,
1234 const DeviceMemory<float>& first_weights,
1235 const DeviceMemory<float>& second_weights,
1236 const ConvolutionDescriptor& convolution_descriptor,
1237 const BatchDescriptor& output_descriptor,
1238 DeviceMemory<float>* output_data) = 0;
1239
1240 // Enqueues a single-precision backward convolution (for data) operation onto
1241 // the stream.
1242 //
1243 // Arguments:
1244 // stream: borrowed pointer to the stream that the 'convolve' operation
1245 // should be enqueued onto.
1246 // filter_descriptor: dimensions of the convolution filter.
1247 // filter_data: coefficients for the convolution filter.
1248 // output_descriptor: dimensions of the output gradients, which is the same
1249 // as the dimensions of the output.
1250 // backward_output_data: un-owned device memory region which contains the
1251 // backprop of the output.
1252 // convolution_descriptor: stride of the convolution filter.
1253 // input_descriptor: dimensions of the input layer.
1254 // backward_input_data: un-owned device memory region in which to place the
1255 // backprop of the input.
1256 // scratch_allocator: un-owned, may-be-null object that may allocate scratch
1257 // space in order to speed up the convolution operation.
1258 virtual bool DoConvolveBackwardData(
1259 Stream* stream, const FilterDescriptor& filter_descriptor,
1260 const DeviceMemory<float>& filter_data,
1261 const BatchDescriptor& output_descriptor,
1262 DeviceMemory<float> backward_output_data,
1263 const ConvolutionDescriptor& convolution_descriptor,
1264 const BatchDescriptor& input_descriptor,
1265 DeviceMemory<float>* backward_input_data,
1266 ScratchAllocator* scratch_allocator,
1267 const dnn::AlgorithmConfig& algorithm_config,
1268 ProfileResult* output_profile_result) = 0;
1269
1270 // Return a list of algorithms supported by the backward convolution pass for
1271 // data.
1272 virtual bool GetConvolveBackwardDataAlgorithms(
1273 bool with_winograd_nonfused, int cc_major, int cc_minor,
1274 std::vector<AlgorithmDesc>* out_algorithms);
1275
1276 virtual bool DoConvolveBackwardData(
1277 Stream* stream, const FilterDescriptor& filter_descriptor,
1278 const DeviceMemory<Eigen::half>& filter_data,
1279 const BatchDescriptor& output_descriptor,
1280 DeviceMemory<Eigen::half> backward_output_data,
1281 const ConvolutionDescriptor& convolution_descriptor,
1282 const BatchDescriptor& input_descriptor,
1283 DeviceMemory<Eigen::half>* backward_input_data,
1284 ScratchAllocator* scratch_allocator,
1285 const dnn::AlgorithmConfig& algorithm_config,
1286 ProfileResult* output_profile_result) = 0;
1287
1288 // Enqueues a single-precision backward convolution (for filter) operation
1289 // onto the stream.
1290 //
1291 // Arguments:
1292 // stream: borrowed pointer to the stream that the 'convolve' operation
1293 // should be enqueued onto.
1294 // input_descriptor: dimensions of the input layer.
1295 // input_data: un-owned device memory region which contains the
1296 // convolution input.
1297 // output_descriptor: dimensions of the output gradients, which is the same
1298 // as the dimensions of the output.
1299 // backward_output_data: un-owned device memory region which contains the
1300 // backprop of the output.
1301 // convolution_descriptor: stride of the convolution filter.
1302 // filter_descriptor: dimensions of the convolution filter.
1303 // backward_filter_data: un-owned device memory region in which to place the
1304 // backprop of the filter.
1305 // scratch_allocator: un-owned, may-be-null object that may allocate scratch
1306 // space in order to speed up the convolution operation.
1307 virtual bool DoConvolveBackwardFilter(
1308 Stream* stream, const BatchDescriptor& input_descriptor,
1309 const DeviceMemory<float>& input_data,
1310 const BatchDescriptor& output_descriptor,
1311 DeviceMemory<float> backward_output_data,
1312 const ConvolutionDescriptor& convolution_descriptor,
1313 const FilterDescriptor& filter_descriptor,
1314 DeviceMemory<float>* backward_filter_data,
1315 ScratchAllocator* scratch_allocator,
1316 const dnn::AlgorithmConfig& algorithm_config,
1317 ProfileResult* output_profile_result) = 0;
1318
1319 // Return a list of algorithms supported by the backward convolution pass for
1320 // filters.
1321 virtual bool GetConvolveBackwardFilterAlgorithms(
1322 bool with_winograd_nonfused, int cc_major, int cc_minor,
1323 std::vector<AlgorithmDesc>* out_algorithms);
1324
1325 virtual bool DoConvolveBackwardFilter(
1326 Stream* stream, const BatchDescriptor& input_descriptor,
1327 const DeviceMemory<Eigen::half>& input_data,
1328 const BatchDescriptor& output_descriptor,
1329 DeviceMemory<Eigen::half> backward_output_data,
1330 const ConvolutionDescriptor& convolution_descriptor,
1331 const FilterDescriptor& filter_descriptor,
1332 DeviceMemory<Eigen::half>* backward_filter_data,
1333 ScratchAllocator* scratch_allocator,
1334 const dnn::AlgorithmConfig& algorithm_config,
1335 ProfileResult* output_profile_result) = 0;
1336
1337 // Enqueues a single-precision backward convolution (for bias) operation onto
1338 // the stream.
1339 //
1340 // Arguments:
1341 // stream: borrowed pointer to the stream that the 'convolve' operation
1342 // should be enqueued onto.
1343 // input_descriptor: dimensions of the input layer.
1344 // input_data: un-owned device memory region which contains the
1345 // convolution input.
1346 // bias_descriptor: dimensions of the bias tensor. Should be the same as the
1347 // input dimensions, but with the spatial dimensions set to 1.
1348 // backward_filter_data: un-owned device memory region in which to place the
1349 // backprop of the bias.
1350 virtual bool DoConvolveBackwardBias(Stream* stream,
1351 const BatchDescriptor& input_descriptor,
1352 const DeviceMemory<float>& input_data,
1353 const BatchDescriptor& bias_descriptor,
1354 DeviceMemory<float>* backward_bias_data) {
1355 return false;
1356 }
1357
1358 virtual bool DoConvolveBackwardBias(
1359 Stream* stream, const BatchDescriptor& input_descriptor,
1360 const DeviceMemory<double>& input_data,
1361 const BatchDescriptor& bias_descriptor,
1362 DeviceMemory<double>* backward_bias_data) {
1363 return false;
1364 }
1365
1366 virtual bool DoConvolveBackwardBias(
1367 Stream* stream, const BatchDescriptor& input_descriptor,
1368 const DeviceMemory<Eigen::half>& input_data,
1369 const BatchDescriptor& bias_descriptor,
1370 DeviceMemory<Eigen::half>* backward_bias_data) {
1371 return false;
1372 }
1373
1374 // Fully connects the "nodes" (float values) in input_data with
1375 // shape input_dimensions to output_data with output_dimensions
1376 // using provided weights. This is equivalent to computing a matrix
1377 // product, hence the name MatMul.
1378 //
1379 // A BatchDescriptor has four dimensions: batch, y, x, depth. Matrix products
1380 // happen in two dimensions. To get down to two dimensions, we consider the
1381 // input y, x and depth dimension as one combined dimension T. For now,
1382 // assume that the output height and width are 1 and let OD be the output
1383 // depth.
1384 //
1385 // There are three device memory buffers passed in to this
1386 // function. We can now view all three as matrices:
1387 //
1388 // input_data: A batch x T matrix
1389 // weights: A T x OD matrix
1390 // output_data: A batch x OD matrix
1391 //
1392 // This function then computes the matrix product of input_data and
1393 // weights and writes the result into output_data.
1394 //
1395 // Here the weights buffer is in row major order, i.e. the first OD
1396 // entries in weights are the first row, the second OD entries in
1397 // weights are the second row and so on.
1398 //
1399 // The case for output width*height > 1 is more complicated. Let K =
1400 // OY * OX where OY is the output height and OX is the output
1401 // width. Then weights is divided into K sub-arrays W_i, for
1402 // i=0,...,k-1, that each represent a T x OD matrix. This function
1403 // then computes the K matrix multiplications of input_data with
1404 // each W_i. This creates K matrices with dimensions batch x
1405 // OD. These K matrices are concatenated horizontally to form one
1406 // larger matrix with dimensions batch x (K*OD); note that this is
1407 // not the same as concatenating the bytes of the matrices. The
1408 // combined matrix can then be interpreted as a tensor with
1409 // dimensions (batch, OY, OX, OD). If the output tensor format is
1410 // not kBatchYXDepth, this function would then need to arrange for
1411 // the output to be in the requested layout, if that is
1412 // supported. Note that the case K=1 is equivalent to the
1413 // description above. It is recommended to prefer the case K=1.
1414 //
1415 // Arguments (all borrowed):
1416 // stream: borrowed pointer to the stream that the 'fully connect' operation
1417 // should be enqueued onto.
1418 // output_data: un-owned device memory region in which to place the
1419 // fully connected result.
1420 virtual bool DoMatMul(Stream* stream, const DeviceMemory<float>& input_data,
1421 const DeviceMemory<float>& weights,
1422 const dnn::BatchDescriptor& input_dimensions,
1423 const dnn::BatchDescriptor& output_dimensions,
1424 DeviceMemory<float>* output_data) = 0;
1425
1426 // Version of DoMatMul that uses pre-quantized 8 bit weights.
1427 // weight_scales specifies the scaling of each column of weights:
1428 // original float weight[row * num_columns + column] =
1429 // quantized_weight[row * nnum_columns + column] * weight_scales[column].
1430 virtual bool DoMatMulQuantized(Stream* stream,
1431 const DeviceMemory<float>& input_data,
1432 const DeviceMemory<int8>& quantized_weights,
1433 const DeviceMemory<float>& weight_scales,
1434 const dnn::BatchDescriptor& input_dimensions,
1435 const dnn::BatchDescriptor& output_dimensions,
1436 DeviceMemory<float>* output_data) = 0;
1437
1438 // Version of DoMatMul that uses pre-quantized 16 bit weights.
1439 // weight_scales specifies the scaling of each column of weights:
1440 // original float weight[row * num_columns + column] =
1441 // quantized_weight[row * nnum_columns + column] * weight_scales[column].
1442 virtual bool DoMatMulQuantized(Stream* stream,
1443 const DeviceMemory<float>& input_data,
1444 const DeviceMemory<int16>& quantized_weights,
1445 const DeviceMemory<float>& weight_scales,
1446 const dnn::BatchDescriptor& input_dimensions,
1447 const dnn::BatchDescriptor& output_dimensions,
1448 DeviceMemory<float>* output_data) = 0;
1449
1450 // Adds biases to the feature maps in input_data producing
1451 // output_data. input_data can equal output_data, but must not
1452 // partially overlap it.
1453 //
1454 // Let K = count() * height() * width() and N = feature_map_count()
1455 // on dimensions. Then input_value contains K*N values and biases
1456 // contains N values. We can thus logically consider input_value to
1457 // contain K vectors of N elements each. This function adds biases
1458 // to each of those N vectors.
1459 //
1460 // TODO(broune): This works differently when width() * height() > 1
1461 // and the call to ThenBiasAdd() follows a call to ThenMatMul(). In
1462 // that case there should be width() * height() *
1463 // feature_map_count() biases, but this is not implemented on all
1464 // StreamExecutors.
1465 //
1466 // Arguments (all borrowed):
1467 // stream: borrowed pointer to the stream that the 'bias add' operation
1468 // should be enqueued onto.
1469 // input_data: un-owned device memory region containing the input.
1470 // biases: un-owned device memory region containing biases to add to the
1471 // input.
1472 // dimensions: dimensions of input_data and output_data.
1473 // output_data: un-owned device memory region in which to place the result.
1474 virtual bool DoBiasAdd(Stream* stream, const DeviceMemory<float>& input_data,
1475 const DeviceMemory<float>& biases,
1476 const dnn::BatchDescriptor& dimensions,
1477 DeviceMemory<float>* output_data) = 0;
1478
1479 // Performs a forward pooling operation on input_data, writing to
1480 // output_data. See PoolingDescriptor for how to configure the
1481 // pooling operation.
1482 //
1483 // Pooling happens as a window that moves across the Y and X
1484 // dimensions of input_data, where each position of the window
1485 // yields one output value. E.g. for max pooling, the computed value
1486 // is the maximum element in the window. The operation is applied
1487 // independently to each batch and at each feature map (depth), so
1488 // that the output depth and feature_map_count are the same as for
1489 // the input. The output width and height can be different.
1490 //
1491 // See PoolingDescriptor for how to configure the pooling operation.
1492 virtual bool DoPoolForward(Stream* stream,
1493 const dnn::PoolingDescriptor& pooling_dimensions,
1494 const dnn::BatchDescriptor& input_dimensions,
1495 const DeviceMemory<float>& input_data,
1496 const dnn::BatchDescriptor& output_dimensions,
1497 DeviceMemory<float>* output_data) = 0;
1498
1499 virtual bool DoPoolForward(Stream* stream,
1500 const dnn::PoolingDescriptor& pooling_dimensions,
1501 const dnn::BatchDescriptor& input_dimensions,
1502 const DeviceMemory<double>& input_data,
1503 const dnn::BatchDescriptor& output_dimensions,
1504 DeviceMemory<double>* output_data) {
1505 LOG(FATAL) << "DoPoolForward not implemented for double.";
1506 return false;
1507 }
1508
1509 virtual bool DoPoolForward(Stream* stream,
1510 const dnn::PoolingDescriptor& pooling_dimensions,
1511 const dnn::BatchDescriptor& input_dimensions,
1512 const DeviceMemory<Eigen::half>& input_data,
1513 const dnn::BatchDescriptor& output_dimensions,
1514 DeviceMemory<Eigen::half>* output_data) {
1515 LOG(FATAL) << "DoPoolForward not implemented for float16.";
1516 return false;
1517 }
1518
1519 // Performs differentiation of the pooling operation.
1520 virtual bool DoPoolBackward(Stream* stream,
1521 const dnn::PoolingDescriptor& pooling_dimensions,
1522 const dnn::BatchDescriptor& input_dimensions,
1523 const DeviceMemory<double>& input_data,
1524 const dnn::BatchDescriptor& output_dimensions,
1525 const DeviceMemory<double>& output_data,
1526 const DeviceMemory<double>& input_diff_data,
1527 DeviceMemory<double>* output_diff_data) {
1528 LOG(FATAL) << "DoPoolBackward not implemented.";
1529 return false;
1530 }
1531
1532 virtual bool DoPoolBackward(Stream* stream,
1533 const dnn::PoolingDescriptor& pooling_dimensions,
1534 const dnn::BatchDescriptor& input_dimensions,
1535 const DeviceMemory<float>& input_data,
1536 const dnn::BatchDescriptor& output_dimensions,
1537 const DeviceMemory<float>& output_data,
1538 const DeviceMemory<float>& input_diff_data,
1539 DeviceMemory<float>* output_diff_data) {
1540 LOG(FATAL) << "DoPoolBackward not implemented.";
1541 return false;
1542 }
1543
1544 virtual bool DoPoolBackward(Stream* stream,
1545 const dnn::PoolingDescriptor& pooling_dimensions,
1546 const dnn::BatchDescriptor& input_dimensions,
1547 const DeviceMemory<Eigen::half>& input_data,
1548 const dnn::BatchDescriptor& output_dimensions,
1549 const DeviceMemory<Eigen::half>& output_data,
1550 const DeviceMemory<Eigen::half>& input_diff_data,
1551 DeviceMemory<Eigen::half>* output_diff_data) {
1552 LOG(FATAL) << "DoPoolBackward not implemented.";
1553 return false;
1554 }
1555
1556 // Applies local response normalization to the values from
1557 // input_data and writes the result to output_data. See comments on
1558 // NormalizeDescriptor for a description of local response
1559 // normalization.
1560 virtual bool DoNormalize(Stream* stream,
1561 const dnn::NormalizeDescriptor& normalize_descriptor,
1562 const DeviceMemory<float>& input_data,
1563 DeviceMemory<float>* output_data) = 0;
1564
1565 // Applies local response normalization to the values from input_data and
1566 // writes the result to output_data.
1567 //
1568 // Similar to DoNormalize, but normalizes across feature maps and allows for
1569 // specifying the dimensions of the tensor.
1570 //
1571 // See comments on NormalizeDescriptor for a description of local response
1572 // normalization.
1573 virtual bool DoNormalizeWithDimensions(
1574 Stream* stream, const dnn::NormalizeDescriptor& normalize_descriptor,
1575 const dnn::BatchDescriptor& dimensions,
1576 const DeviceMemory<float>& input_data, DeviceMemory<float>* output_data) {
1577 return false;
1578 }
1579
1580 // Performs backpropagation for the normalization operation
1581 //
1582 // Given raw data, its corresponding normalized output, and a gradient of some
1583 // unspecified function with respect to the normalized variables, computes the
1584 // gradient of that unspecified function with respect to the raw variables.
1585 //
1586 // The normalized data input array is expected to match the output that would
1587 // be obtained by running the raw data input array through the DoNormalize
1588 // method above.
1589 //
1590 // See comments on NormalizeDescriptor for a description of local response
1591 // normalization.
1592 virtual bool DoNormalizeBackwardWithDimensions(
1593 Stream* stream, const dnn::NormalizeDescriptor& normalize_descriptor,
1594 const dnn::BatchDescriptor& dimensions,
1595 const DeviceMemory<float>& raw_data,
1596 const DeviceMemory<float>& normalized_data,
1597 const DeviceMemory<float>& normalized_variable_gradient,
1598 DeviceMemory<float>* raw_variable_gradient) {
1599 return false;
1600 }
1601
1602 // Applies an activation function (see ActivationMode) to all of the values
1603 // held on the device in 'input_data', whose dimensions are described by
1604 // 'dimensions'.
1605 //
1606 // Arguments (all borrowed):
1607 // stream: borrowed pointer to the stream that the 'activate' operation
1608 // should be enqueued onto.
1609 // activation_mode: Type of activation to perform.
1610 // input_data: un-owned device memory region which contains the
1611 // activate input.
1612 // output_data: un-owned device memory region in which to place the
1613 // activate result.
1614 virtual bool DoActivate(Stream* stream, ActivationMode activation_mode,
1615 const BatchDescriptor& dimensions,
1616 const DeviceMemory<float>& input_data,
1617 DeviceMemory<float>* output_data, uint64 options) {
1618 return false;
1619 }
1620
1621 // Concatenates several layers into one, by concatenating the depth of each
1622 // layer at matching x and y coordinates.
1623 // The inputs must all have the same width and height, the output will have
1624 // the same width and height as the inputs and its depth will be the sum of
1625 // the input depths.
1626 //
1627 // Arguments (all borrowed):
1628 // stream: borrowed pointer to the stream that the 'depth concatenate'
1629 // operation should be enqueued onto.
1630 // input_dimensions: The dimensions of each input.
1631 // input_data: un-owned device memory region which contains the
1632 // input data for each input layer.
1633 // output_data: un-owned device memory region in which to place the
1634 // depth concatenate result.
1635 virtual bool DoDepthConcatenate(
1636 Stream* stream, port::ArraySlice<dnn::BatchDescriptor> input_dimensions,
1637 port::ArraySlice<const DeviceMemory<float>*> input_data,
1638 DeviceMemory<float>* output_data) = 0;
1639
1640 // Concatenates several layers into one, by concatenating each in the
1641 // x-dimension or y-dimension, based on a user-specified flag.
1642 // For x-concatenation, layers are aligned at matching y and depth
1643 // coordinates, and for y-concatenation, they are aligned at matching x and
1644 // depth coordinates. The inputs must all have the same depth and batch size.
1645 // For x-concatenation, the inputs must have the same height (y-size), and the
1646 // output will have the same depth and height as the inputs and its width (x-
1647 // size) will be the sum of the input widths. For y-concatenation, the inputs
1648 // must have the same width, and the output will have the same depth and width
1649 // as the inputs, and its height will be the sum of the input heights.
1650 //
1651 // Arguments:
1652 // stream: borrowed pointer to the stream that the 'space concatenate'
1653 // operation should be enqueued onto.
1654 // input_dimensions: the dimensions of each input.
1655 // input_data: un-owned device memory region which contains the input data
1656 // for each input layer.
1657 // output_data: un-owned device memory region in which to place the space
1658 // concatenate result.
1659 // concat_direction: either dnn:SpaceConcatenateMode::XDirection or
1660 // dnn::SpaceConcatenateMode::YDirection.
1661 virtual bool DoSpaceConcatenate(
1662 Stream* stream, port::ArraySlice<dnn::BatchDescriptor> input_dimensions,
1663 port::ArraySlice<const DeviceMemory<float>*> input_data,
1664 DeviceMemory<float>* output_data,
1665 dnn::SpaceConcatenateMode concat_direction) {
1666 return false;
1667 }
1668
1669 // Change the layout of the data by shrinking one dimension (or set of
1670 // dimensions) and growing another dimension (or set of dimensions), while
1671 // keeping the total number of data elements constant, and maintaining the
1672 // current data ordering.
1673 //
1674 // Currently, the only supported operation is depth into space by a power of
1675 // 2. E.g. (y, x, z) -> (y*2, x*2, z/4)
1676 //
1677 // Note that Reshape may not be a no-op, depending on the platform and which
1678 // dimensions are being changed.
1679 //
1680 // Example: forgetting about batch for the moment, let's take a tensor that's
1681 // 2x1x8 (y by x by z) and reshape to a tensor that's 4x2x2. The memory layout
1682 // is row-major order: y,x,z. I.e. z changes the fastest, then x, then y. The
1683 // elements of the tensor range from 0 to 15. The x,y,z indices are below each
1684 // element.
1685 //
1686 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1687 // y0 y0 y0 y0 y0 y0 y0 y0 y1 y1 y1 y1 y1 y1 y1 y1
1688 // x0 x0 x0 x0 x0 x0 x0 x0 x0 x0 x0 x0 x0 x0 x0 x0
1689 // z0 z1 z2 z3 z4 z5 z6 z7 z0 z1 z2 z3 z4 z5 z6 z7
1690 //
1691 // reshape to 4x2x2
1692 //
1693 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1694 // y0 y0 y0 y0 y1 y1 y1 y1 y2 y2 y2 y2 y3 y3 y3 y3
1695 // x0 x0 x1 x1 x0 x0 x1 x1 x0 x0 x1 x1 x0 x0 x1 x1
1696 // z0 z1 z0 z1 z0 z1 z0 z1 z0 z1 z0 z1 z0 z1 z0 z1
1697 virtual bool DoReshape(Stream* stream,
1698 const dnn::BatchDescriptor& input_dimensions,
1699 const DeviceMemory<float>& input_data,
1700 const dnn::BatchDescriptor& output_dimensions,
1701 DeviceMemory<float>* output_data) {
1702 return false;
1703 }
1704
1705 // Depth to space takes an X by Y image with depth D*M² and changes it to an
1706 // MX x MY image with depth D. Each input location (x,y) with depth D*M² in
1707 // the input image is changed to an MxM contiguous area in the output image,
1708 // with the values being laid out in the raster order by DepthToSpaceLayout,
1709 // and will have a new depth of D.
1710 //
1711 // Example.
1712 // M=2, Din =8, Xin=2, Yin=2. Xout=4, Yout=4, Dout=2
1713 // DepthHeightWidth layout
1714 // Values within a 'cell' are at different depths and same x & y.
1715 // Input:
1716 // abcdefgh ijklmnop
1717 // qrstuvwx yz012345
1718 // Output:
1719 // ae bf im jn
1720 // cg dh ko lp
1721 // qu rv y2 z3
1722 // sw tx 04 15
1723 //
1724 // sqrt_depth_reduction: 'M' in the comment above
1725 virtual bool DoDepthToSpace(Stream* stream,
1726 const dnn::BatchDescriptor& input_dimensions,
1727 const DeviceMemory<float>& input_data,
1728 const DepthToSpaceLayout& depth_to_space_layout,
1729 const int& sqrt_depth_reduction,
1730 DeviceMemory<float>* output_data) {
1731 return false;
1732 }
1733
1734 // Space to depth is the inverse of depth to space. Space to depth takes each
1735 // non-overlapping M by M patch (in the X and Y dimensions) with depth D of
1736 // the input, and transforms it to a 1 by 1 patch with depth D*M². If the
1737 // input has size (MX, MY, D), the output has size (X, Y, D*M²). The number of
1738 // data elements is not changed.
1739 //
1740 // Example.
1741 // M=2, Din =2, Xin=4, Yin=4, Dout=8
1742 // DepthHeightWidth layout
1743 // Values within a 'cell' are at different depths and same x & y.
1744 // Input:
1745 // ae bf im jn
1746 // cg dh ko lp
1747 // qu rv y2 z3
1748 // sw tx 04 15
1749 // Output:
1750 // abcdefgh ijklmnop
1751 // qrstuvwx yz012345
1752 //
1753 // sqrt_depth_increase: 'M' in the comment above
1754 virtual bool DoSpaceToDepth(Stream* stream,
1755 const dnn::BatchDescriptor& input_dimensions,
1756 const DeviceMemory<float>& input_data,
1757 const DepthToSpaceLayout& space_to_depth_layout,
1758 const int& sqrt_depth_increase,
1759 DeviceMemory<float>* output_data) {
1760 return false;
1761 }
1762
1763 // Computes the specified operation (e.g. addition or multiplication)
1764 // between corresponding elements in the inputs and stores the result in the
1765 // output element.
1766 // The inputs and output must all have the same dimensions, but may have
1767 // different quantization parameters (min_value and max_value).
1768 //
1769 // Arguments (all borrowed):
1770 // stream: borrowed pointer to the stream that the 'elementwise operation'
1771 // should be enqueued onto.
1772 // operation: The operation to perform.
1773 // input_dimensions: The dimensions of each input.
1774 // input_data: un-owned device memory region which contains the
1775 // input data for each input layer.
1776 // output_dimensions: The dimensions of the output.
1777 // output_data: un-owned device memory region in which to place the
1778 // operation result.
1779 virtual bool DoElementwiseOperate(
1780 Stream* stream, ElementwiseOperation operation,
1781 port::ArraySlice<dnn::BatchDescriptor> input_dimensions,
1782 port::ArraySlice<const DeviceMemory<float>*> input_data,
1783 const dnn::BatchDescriptor& output_dimensions,
1784 DeviceMemory<float>* output_data) = 0;
1785
1786 // Computes the specified operation (e.g. addition or multiplication)
1787 // between corresponding elements in the inputs and stores the result in the
1788 // output element. Each input is multiplied by a scalar constant and the
1789 // result is divided by a scalar constant.
1790 // e.g. To perform Z = 0.9*X + 1.1*Y, set the input multiplicands to 9 and 11
1791 // and the output divisor to 10.
1792 // The inputs and output must all have the same dimensions, but may have
1793 // different quantization parameters (min_value and max_value).
1794 //
1795 // Arguments (all borrowed):
1796 // stream: borrowed pointer to the stream that the 'elementwise operation'
1797 // should be enqueued onto.
1798 // operation: The operation to perform.
1799 // input_multiplicands: Amount to scale each input.
1800 // output_divisor: Amount to divide the output.
1801 // input_dimensions: The dimensions of each input.
1802 // input_data: un-owned device memory region which contains the
1803 // input data for each input layer.
1804 // output_dimensions: The dimensions of the output.
1805 // output_data: un-owned device memory region in which to place the
1806 // operation result.
1807 virtual bool DoElementwiseOperateScaledQuantized(
1808 Stream* stream, ElementwiseOperation operation,
1809 port::ArraySlice<int> input_multiplicands, int output_divisor,
1810 port::ArraySlice<dnn::BatchDescriptor> input_dimensions,
1811 port::ArraySlice<const DeviceMemory<float>*> input_data,
1812 const dnn::BatchDescriptor& output_dimensions,
1813 DeviceMemory<float>* output_data) {
1814 return false;
1815 }
1816
1817 // Pads the input with zeros in the X and Y dimensions. The feature_map
1818 // dimension is unchanged.
1819 //
1820 // Arguments (all borrowed):
1821 // stream: borrowed pointer to the stream that the 'elementwise operation'
1822 // should be enqueued onto.
1823 // dimensions: The dimensions of the input.
1824 // input_data: un-owned device memory region which contains the
1825 // input data for the input layer.
1826 // left_pad: Amount to pad the input on the left.
1827 // right_pad: Amount to pad the input on the right.
1828 // top_pad: Amount to pad the input at the top (low Y).
1829 // bottom_pad: Amount to pad the input at the bottom (high Y).
1830 // output_data: un-owned device memory region in which to place the
1831 // padded result.
1832 virtual bool DoXYPad(Stream* stream, const dnn::BatchDescriptor &dimensions,
1833 const DeviceMemory<float> &input_data,
1834 int64 left_pad, int64 right_pad, int64 top_pad,
1835 int64 bottom_pad, DeviceMemory<float> *output_data) = 0;
1836
1837 // Extracts a slice of the input in the X and Y dimensions. The feature_map
1838 // dimension is unchanged.
1839 //
1840 // Arguments (all borrowed):
1841 // stream: borrowed pointer to the stream that the 'elementwise operation'
1842 // should be enqueued onto.
1843 // dimensions: The dimensions of the input.
1844 // input_data: un-owned device memory region which contains the
1845 // input data for the input layer.
1846 // left_trim: Amount to cut off the input on the left.
1847 // right_trim: Amount to cut off the input on the right.
1848 // top_trim: Amount to cut off the input at the top (low y).
1849 // bottom_trim: Amount to cut off the input at the bottom (high Y).
1850 // output_data: un-owned device memory region in which to place the
1851 // padded result.
1852 virtual bool DoXYSlice(Stream* stream, const dnn::BatchDescriptor &dimensions,
1853 const DeviceMemory<float> &input_data,
1854 int64 left_trim, int64 right_trim, int64 top_trim,
1855 int64 bottom_trim, DeviceMemory<float> *output_data) = 0;
1856
1857 // Grows the input tensor by replicating the X and Y dimensions. The batch and
1858 // depth/feature_map dimensions are unchanged. Currently, the input tensor is
1859 // limited to X=1 and Y=1.
1860 //
1861 // For example, the input has dimensions x=2, y=3, and replicate_x=3,
1862 // replicate_y=2. The diagonal elements of the output would be: [x0y0, x1y1,
1863 // x0y2, x1y0, x0y1, x1y2].
1864 // Here is the example as a picture. input:
1865 // AB
1866 // CD
1867 // EF
1868 // broadcast result:
1869 // ABABAB
1870 // CDCDCD
1871 // EFEFEF
1872 // ABABAB
1873 // CDCDCD
1874 // EFEFEF
1875 //
1876 // Arguments (all borrowed):
1877 // stream: borrowed pointer to the stream that the 'elementwise operation'
1878 // should be enqueued onto.
1879 // dimensions: The dimensions of the input.
1880 // input_data: un-owned device memory region which contains the
1881 // input data for the input layer.
1882 // replicate_x: Amount to replicate the input's X dimension.
1883 // replicate_y: Amount to replicate the input's Y dimension.
1884 // output_data: un-owned device memory region in which to place the
1885 // padded result.
1886 virtual bool DoXYBroadcast(Stream* stream,
1887 const dnn::BatchDescriptor& dimensions,
1888 const DeviceMemory<float>& input_data,
1889 int64 replicate_x, int64 replicate_y,
1890 DeviceMemory<float>* output_data) {
1891 return false;
1892 }
1893
1894 // Enqueues an asynchronous memcpy of the *quantized* output of a layer (that
1895 // is, bytes instead of scaled floats) into 'host_dst' if they are available
1896 // for the underlying DNN implementation. If this quantized output is not
1897 // available, false is returned, which will place 'stream' into an error
1898 // state.
1899 //
1900 // Arguments (all borrowed):
1901 // stream: borrowed pointer to the stream that the 'quantized memcpy'
1902 // operation should be enqueued onto.
1903 // gpu_unquantized_src: the device memory that contains the unquantized data
1904 // -- this data should also have a corresponding quantized representation
1905 // on the device for this operation to succeed.
1906 // mode: Type of quantization of the data to write into host_dst.
1907 // host_dst: un-owned host memory region that is mutated in place,
1908 // it is clobbered by the values in 'gpu_unquantized_src' when the enqueued
1909 // (asynchronous) memcpy operation is performed.
1910 // size: size in bytes of the host_dst host memory region.
1911 virtual bool DoMemcpyD2HQuantized(
1912 Stream* stream, const DeviceMemory<float>& gpu_unquantized_src,
1913 QuantizedActivationMode mode, void* host_dst, int64 size) = 0;
1914
1915 // Enqueues an asynchronous memcpy of 'host_dst' into the *quantized* input
1916 // of a layer (that is, bytes instead of scaled floats) if they are supported
1917 // by the underlying DNN implementation. If this quantized input is not
1918 // supported, false is returned, which will place 'stream' into an error
1919 // state.
1920 //
1921 // Arguments (all borrowed):
1922 // stream: borrowed pointer to the stream that the 'quantized memcpy'
1923 // operation should be enqueued onto.
1924 // host_src: un-owned host memory region that contains the quantized data.
1925 // size: size in bytes of the host_src host memory region.
1926 // mode: Type of quantization of the data to read from host_src.
1927 // gpu_unquantized_dst: the device memory that is clobbered by the values in
1928 // 'host_src' when the enqueued (asynchronous) memcpy operation is
1929 // performed. -- this data should also have a corresponding quantized
1930 // representation on the device for this operation to
1931 // succeed.
1932 virtual bool DoMemcpyH2DQuantized(
1933 Stream* stream, const void* host_src, int64 size,
1934 QuantizedActivationMode mode,
1935 DeviceMemory<float>* gpu_unquantized_dst) = 0;
1936
1937 // Enqueues an asynchronous copy of the contents of buffer_src to
1938 // gpu_unquantized_dst.
1939 virtual bool DoCopyHostBuffer2Device(
1940 Stream* stream, HostBuffer* buffer_src,
1941 DeviceMemory<float>* gpu_unquantized_dst) {
1942 return false;
1943 }
1944
1945 // Enqueues an asynchronous copy of the contents of gpu_unquantized_src to
1946 // buffer_dst.
1947 virtual bool DoCopyDevice2HostBuffer(
1948 Stream* stream, const DeviceMemory<float>& gpu_unquantized_src,
1949 HostBuffer* buffer_dst) {
1950 return false;
1951 }
1952
1953 // Create an RNN descriptor based on model shapes and configurations.
1954 // The caller retains the ownership of the descriptor.
1955 //
1956 // Arguments:
1957 // num_layers: the number of layers for a RNN model.
1958 // hidden_size: the size of the hidden state.
1959 // input_size: the size of the input state.
1960 // input_mode: an enum to specify whether a linear transformation is added
1961 // after the input state. If input_size is different from hidden_size, this
1962 // is required.
1963 // direction_mode: an enum to specify whether this model is unidirectional or
1964 // bidirectional.
1965 // rnn_mode: an enum to specify the type of model to build.
1966 // data_type: an enum to specify the data types used in this model.
1967 // dropout: the dropout threshold between layers. When it is 0., no dropout
1968 // is added.
1969 // seed: a seed for initializing the dropout layers.
1970 // state_allocator: an memory allocator that will be used to store the state
1971 // for dropout layer. The user has to maintain the memory until the model
1972 // is no longer in use.
1973 virtual port::StatusOr<std::unique_ptr<dnn::RnnDescriptor>>
1974 createRnnDescriptor(int num_layers, int hidden_size, int input_size,
1975 dnn::RnnInputMode input_mode,
1976 dnn::RnnDirectionMode direction_mode,
1977 dnn::RnnMode rnn_mode, dnn::DataType data_type,
1978 float dropout, uint64 seed,
1979 ScratchAllocator* state_allocator) {
1980 return port::Status{port::error::UNIMPLEMENTED,
1981 "createRnnDescriptor is unimplemented"};
1982 }
1983
1984 // Create a RNN sequence descriptor that specifies either the input or output
1985 // sequence. The caller retains the ownership of the returned descriptor.
1986 //
1987 // Arguments:
1988 // seq_length: the length of the sequence.
1989 // batch_size: the size of a minibatch.
1990 // data_size: the size of the state.
1991 // data_type: an enum to specify the type for the underlying data.
1992 virtual port::StatusOr<std::unique_ptr<dnn::RnnSequenceTensorDescriptor>>
1993 createRnnSequenceTensorDescriptor(int seq_length, int batch_size,
1994 int data_size, dnn::DataType data_type) {
1995 return port::Status{port::error::UNIMPLEMENTED,
1996 "createRnnSequenceTensorDescriptor is unimplemented"};
1997 }
1998
1999 // Create an RNN state descriptor that specifies the input or hidden state.
2000 // The caller retains the ownership of the returned descriptor.
2001 virtual port::StatusOr<std::unique_ptr<dnn::RnnStateTensorDescriptor>>
2002 createRnnStateTensorDescriptor(int num_layer, int batch_size, int data_size,
2003 dnn::DataType data_type) {
2004 return port::Status{port::error::UNIMPLEMENTED,
2005 "createRnnStateTensorDescriptor is unimplemented"};
2006 }
2007
2008 // Enqueue a forward operation of the RNN model onto the stream.
2009 //
2010 // Arguments:
2011 // stream: pointer to the stream where this operation should be enqueued to.
2012 // rnn_desc: a RNN descriptor created by createRnnDescriptor.
2013 // input_desc: descriptor for the input sequence.
2014 // input_data: the device memory region that contains the input data.
2015 // input_h_desc: descriptor for the input "h" state.
2016 // input_h_data: the device memory region that contains the input "h" data.
2017 // input_c_desc: descriptor for the input "c" state.
2018 // input_c_data: the device memory region that contains the input "c" data.
2019 // This must be specified for LSTM models.
2020 // params: the device memory region that contains the parameters used in this
2021 // model.
2022 // output_desc: descriptor for the output sequence.
2023 // output_data: the memory region that stores the output sequence data.
2024 // output_h_desc: descriptor for the output "h" state.
2025 // output_h_data: the memory region that stores the output "h" data.
2026 // output_c_desc: descriptor for the output "c" state.
2027 // output_c_data: the memory region that stores the output "c" data. This
2028 // must be specified for LSTM models.
2029 // is_training: whether this is used in training or inference. That decides
2030 // whether respace_space data need to be produced.
2031 // reserve_space_allocator: if "is_training" is true, an memory allocator
2032 // to create memory that holds the produced reserve_space. The caller is
2033 // retains the data and feed it to the backward pass.
2034 // workspace_allocator: an allocator to create temporary workspace used in
2035 // this kernel. The caller is responsible for retaining the memory long
2036 // enough for the lifespan of this operation, and recycles afterwards.
2037 virtual bool DoRnnForward(Stream* stream, const dnn::RnnDescriptor& rnn_desc,
2038 const dnn::RnnSequenceTensorDescriptor& input_desc,
2039 const DeviceMemory<Eigen::half>& input_data,
2040 const dnn::RnnStateTensorDescriptor& input_h_desc,
2041 const DeviceMemory<Eigen::half>& input_h_data,
2042 const dnn::RnnStateTensorDescriptor& input_c_desc,
2043 const DeviceMemory<Eigen::half>& input_c_data,
2044 const DeviceMemory<Eigen::half>& params,
2045 const dnn::RnnSequenceTensorDescriptor& output_desc,
2046 DeviceMemory<Eigen::half>* output_data,
2047 const dnn::RnnStateTensorDescriptor& output_h_desc,
2048 DeviceMemory<Eigen::half>* output_h_data,
2049 const dnn::RnnStateTensorDescriptor& output_c_desc,
2050 DeviceMemory<Eigen::half>* output_c_data,
2051 bool is_training,
2052 ScratchAllocator* reserve_space_allocator,
2053 ScratchAllocator* workspace_allocator) {
2054 return false;
2055 }
2056
2057 virtual bool DoRnnForward(Stream* stream, const dnn::RnnDescriptor& rnn_desc,
2058 const dnn::RnnSequenceTensorDescriptor& input_desc,
2059 const DeviceMemory<float>& input_data,
2060 const dnn::RnnStateTensorDescriptor& input_h_desc,
2061 const DeviceMemory<float>& input_h_data,
2062 const dnn::RnnStateTensorDescriptor& input_c_desc,
2063 const DeviceMemory<float>& input_c_data,
2064 const DeviceMemory<float>& params,
2065 const dnn::RnnSequenceTensorDescriptor& output_desc,
2066 DeviceMemory<float>* output_data,
2067 const dnn::RnnStateTensorDescriptor& output_h_desc,
2068 DeviceMemory<float>* output_h_data,
2069 const dnn::RnnStateTensorDescriptor& output_c_desc,
2070 DeviceMemory<float>* output_c_data,
2071 bool is_training,
2072 ScratchAllocator* reserve_space_allocator,
2073 ScratchAllocator* workspace_allocator) {
2074 return false;
2075 }
2076
2077 virtual bool DoRnnForward(Stream* stream, const dnn::RnnDescriptor& rnn_desc,
2078 const dnn::RnnSequenceTensorDescriptor& input_desc,
2079 const DeviceMemory<double>& input_data,
2080 const dnn::RnnStateTensorDescriptor& input_h_desc,
2081 const DeviceMemory<double>& input_h_data,
2082 const dnn::RnnStateTensorDescriptor& input_c_desc,
2083 const DeviceMemory<double>& input_c_data,
2084 const DeviceMemory<double>& params,
2085 const dnn::RnnSequenceTensorDescriptor& output_desc,
2086 DeviceMemory<double>* output_data,
2087 const dnn::RnnStateTensorDescriptor& output_h_desc,
2088 DeviceMemory<double>* output_h_data,
2089 const dnn::RnnStateTensorDescriptor& output_c_desc,
2090 DeviceMemory<double>* output_c_data,
2091 bool is_training,
2092 ScratchAllocator* reserve_space_allocator,
2093 ScratchAllocator* workspace_allocator) {
2094 return false;
2095 }
2096 // Enqueue a backward operation of the RNN model onto the stream.
2097 //
2098 // Arguments:
2099 // stream: pointer to the stream where this operation should be enqueued to.
2100 // rnn_desc: a RNN descriptor created by createRnnDescriptor.
2101 // input_desc: descriptor for the input sequence.
2102 // input_data: the device memory region that contains the input data.
2103 // input_h_desc: descriptor for the input "h" state.
2104 // input_h_data: the device memory region that contains the input "h" data.
2105 // input_c_desc: descriptor for the input "c" state.
2106 // input_c_data: the device memory region that contains the input "c" data.
2107 // This must be specified for LSTM models.
2108 // params: the device memory region that contains the parameters used in this
2109 // model.
2110 // output_desc: descriptor for the output sequence.
2111 // output_data: the memory region that stores the output sequence data.
2112 // output_h_desc: descriptor for the output "h" state.
2113 // output_h_data: the memory region that stores the output "h" data.
2114 // output_c_desc: descriptor for the output "c" state.
2115 // output_c_data: the memory region that stores the output "c" data. This
2116 // must be specified for LSTM models.
2117 // output_backprop_data: the device memory region that contains the backprop
2118 // to the output sequence.
2119 // output_h_backprop_data: the device memory region that contains the
2120 // backprop to the output "h" state.
2121 // output_c_backprop_data: the device memory region that contains the
2122 // backprop to the output "c" state.
2123 // input_backprop_data: the device memory region that stores the backprop
2124 // to the input sequence.
2125 // input_h_backprop_data: the device memory region that stores the backprop
2126 // to the input "h" state.
2127 // input_c_backprop_data: the device memory region that stores the backprop
2128 // to the input "c" state.
2129 // params_backprop_data: the device memory region that stores the backprop
2130 // to the parameters.
2131 // reserve_space_data: the reserve_space data that is produced by the forward
2132 // operation. This memory region could be modified by this operation.
2133 // workspace_allocator: a memory allocator that creates the temporary
2134 // workspace memory used by this operation. The caller is responsible for
2135 // keeping the memory alive long enough for this operation, and recylces
2136 // afterwards.
2137 virtual bool DoRnnBackward(
2138 Stream* stream, const dnn::RnnDescriptor& rnn_desc,
2139 const dnn::RnnSequenceTensorDescriptor& input_desc,
2140 const DeviceMemory<Eigen::half>& input_data,
2141 const dnn::RnnStateTensorDescriptor& input_h_desc,
2142 const DeviceMemory<Eigen::half>& input_h_data,
2143 const dnn::RnnStateTensorDescriptor& input_c_desc,
2144 const DeviceMemory<Eigen::half>& input_c_data,
2145 const DeviceMemory<Eigen::half>& params,
2146 const dnn::RnnSequenceTensorDescriptor& output_desc,
2147 const DeviceMemory<Eigen::half>& output_data,
2148 const dnn::RnnStateTensorDescriptor& output_h_desc,
2149 const DeviceMemory<Eigen::half>& output_h_data,
2150 const dnn::RnnStateTensorDescriptor& output_c_desc,
2151 const DeviceMemory<Eigen::half>& output_c_data,
2152 const DeviceMemory<Eigen::half>& output_backprop_data,
2153 const DeviceMemory<Eigen::half>& output_h_backprop_data,
2154 const DeviceMemory<Eigen::half>& output_c_backprop_data,
2155 DeviceMemory<Eigen::half>* input_backprop_data,
2156 DeviceMemory<Eigen::half>* input_h_backprop_data,
2157 DeviceMemory<Eigen::half>* input_c_backprop_data,
2158 DeviceMemory<Eigen::half>* params_backprop_data,
2159 DeviceMemory<uint8>* reserve_space_data,
2160 ScratchAllocator* workspace_allocator) {
2161 return false;
2162 }
2163
2164 virtual bool DoRnnBackward(
2165 Stream* stream, const dnn::RnnDescriptor& rnn_desc,
2166 const dnn::RnnSequenceTensorDescriptor& input_desc,
2167 const DeviceMemory<float>& input_data,
2168 const dnn::RnnStateTensorDescriptor& input_h_desc,
2169 const DeviceMemory<float>& input_h_data,
2170 const dnn::RnnStateTensorDescriptor& input_c_desc,
2171 const DeviceMemory<float>& input_c_data,
2172 const DeviceMemory<float>& params,
2173 const dnn::RnnSequenceTensorDescriptor& output_desc,
2174 const DeviceMemory<float>& output_data,
2175 const dnn::RnnStateTensorDescriptor& output_h_desc,
2176 const DeviceMemory<float>& output_h_data,
2177 const dnn::RnnStateTensorDescriptor& output_c_desc,
2178 const DeviceMemory<float>& output_c_data,
2179 const DeviceMemory<float>& output_backprop_data,
2180 const DeviceMemory<float>& output_h_backprop_data,
2181 const DeviceMemory<float>& output_c_backprop_data,
2182 DeviceMemory<float>* input_backprop_data,
2183 DeviceMemory<float>* input_h_backprop_data,
2184 DeviceMemory<float>* input_c_backprop_data,
2185 DeviceMemory<float>* params_backprop_data,
2186 DeviceMemory<uint8>* reserve_space_data,
2187 ScratchAllocator* workspace_allocator) {
2188 return false;
2189 }
2190
2191 virtual bool DoRnnBackward(
2192 Stream* stream, const dnn::RnnDescriptor& rnn_desc,
2193 const dnn::RnnSequenceTensorDescriptor& input_desc,
2194 const DeviceMemory<double>& input_data,
2195 const dnn::RnnStateTensorDescriptor& input_h_desc,
2196 const DeviceMemory<double>& input_h_data,
2197 const dnn::RnnStateTensorDescriptor& input_c_desc,
2198 const DeviceMemory<double>& input_c_data,
2199 const DeviceMemory<double>& params,
2200 const dnn::RnnSequenceTensorDescriptor& output_desc,
2201 const DeviceMemory<double>& output_data,
2202 const dnn::RnnStateTensorDescriptor& output_h_desc,
2203 const DeviceMemory<double>& output_h_data,
2204 const dnn::RnnStateTensorDescriptor& output_c_desc,
2205 const DeviceMemory<double>& output_c_data,
2206 const DeviceMemory<double>& output_backprop_data,
2207 const DeviceMemory<double>& output_h_backprop_data,
2208 const DeviceMemory<double>& output_c_backprop_data,
2209 DeviceMemory<double>* input_backprop_data,
2210 DeviceMemory<double>* input_h_backprop_data,
2211 DeviceMemory<double>* input_c_backprop_data,
2212 DeviceMemory<double>* params_backprop_data,
2213 DeviceMemory<uint8>* reserve_space_data,
2214 ScratchAllocator* workspace_allocator) {
2215 return false;
2216 }
2217
2218 // Transforms a tensor into another tensor with a different layout and/or data
2219 // type.
2220 //
2221 // Arguments:
2222 // stream: pointer to the stream where this operation should be enqueued to.
2223 // input_desc: specifies the shape and the data layout of the input tensor.
2224 // input_type: the data type of the input tensor.
2225 // input_data: the device memory region that contains the input tensor.
2226 // output_desc: specifies the shape and the data layout of the output tensor.
2227 // output_type: the data type of the output tensor.
2228 // scale: an element-wise scaling factor to apply.
2229 // output_data: the device memory region that contains the output tensor.
2230 virtual bool DoTransformTensor(Stream* stream,
2231 const dnn::BatchDescriptor& input_desc,
2232 dnn::DataType input_type,
2233 const DeviceMemoryBase& input_data,
2234 const dnn::BatchDescriptor& output_desc,
2235 dnn::DataType output_type, float scale,
2236 DeviceMemoryBase* output_data) {
2237 return false;
2238 }
2239
2240 private:
2241 SE_DISALLOW_COPY_AND_ASSIGN(DnnSupport);
2242 };
2243
2244 } // namespace dnn
2245 } // namespace gputools
2246 } // namespace perftools
2247
2248 #endif // TENSORFLOW_STREAM_EXECUTOR_DNN_H_
2249