• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3.h"
17 
18 #include <string>
19 #include <utility>
20 
21 #include "tensorflow/lite/delegates/gpu/common/status.h"
22 #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
23 
24 namespace tflite {
25 namespace gpu {
26 
DepthwiseConv3x3(const OperationDef & definition,bool weights_are_buffer,bool local_mem_uploads,const GpuInfo & gpu_info)27 DepthwiseConv3x3::DepthwiseConv3x3(const OperationDef& definition,
28                                    bool weights_are_buffer,
29                                    bool local_mem_uploads,
30                                    const GpuInfo& gpu_info)
31     : GPUOperation(definition), local_mem_uploads_(local_mem_uploads) {
32   work_group_size_ = int3(8, 4, 1);
33   code_ = GenerateDepthwiseConvCode(definition_, weights_are_buffer,
34                                     local_mem_uploads_);
35 
36   if (definition_.precision == CalculationsPrecision::F16 &&
37       gpu_info.IsPowerVR()) {
38     compiler_options_.push_back(CompilerOptions::kClPowervrFp16);
39   }
40 }
41 
DepthwiseConv3x3(DepthwiseConv3x3 && operation)42 DepthwiseConv3x3::DepthwiseConv3x3(DepthwiseConv3x3&& operation)
43     : GPUOperation(std::move(operation)),
44       local_mem_uploads_(operation.local_mem_uploads_) {}
45 
operator =(DepthwiseConv3x3 && operation)46 DepthwiseConv3x3& DepthwiseConv3x3::operator=(DepthwiseConv3x3&& operation) {
47   if (this != &operation) {
48     std::swap(local_mem_uploads_, operation.local_mem_uploads_);
49     GPUOperation::operator=(std::move(operation));
50   }
51   return *this;
52 }
53 
GenerateDepthwiseConvCode(const OperationDef & op_def,bool weights_are_buffer,bool local_mem_uploads)54 std::string DepthwiseConv3x3::GenerateDepthwiseConvCode(
55     const OperationDef& op_def, bool weights_are_buffer,
56     bool local_mem_uploads) {
57   auto src_desc = op_def.src_tensors[0];
58   src_desc.SetAddressMode(AddressMode::kZero);
59   AddSrcTensor("src_tensor", src_desc);
60   AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
61 
62   const auto src_tensor_type = op_def.src_tensors[0].storage_type;
63 
64   const bool manual_clamp = src_tensor_type == TensorStorageType::BUFFER ||
65                             src_tensor_type == TensorStorageType::IMAGE_BUFFER;
66 
67   std::string c;
68   if (local_mem_uploads) {
69     c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n";
70   }
71   c += "MAIN_FUNCTION($0) {\n";
72   if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
73     c += "  int linear_id = GLOBAL_ID_0;\n";
74     c += "  int X = (linear_id / args.dst_tensor.Batch()) * 2;\n";
75     c += "  int B = linear_id % args.dst_tensor.Batch();\n";
76     c += "  args.dst_tensor.SetBatchRef(B);\n";
77     c += "  args.src_tensor.SetBatchRef(B);\n";
78   } else {
79     c += "  int X = GLOBAL_ID_0 * 2;\n";
80   }
81   c += "  int Y = GLOBAL_ID_1 * 2;\n";
82   c += "  int S = GLOBAL_ID_2;\n";
83   c += "   ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n";
84   c += "   ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n";
85   c += "   ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n";
86   c += "   ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n";
87   if (!local_mem_uploads) {
88     c += "  if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() "
89          "|| S >= args.dst_tensor.Slices()) { \n";
90     c += "    return; \n";
91     c += "  } \n";
92   }
93   if (local_mem_uploads) {
94     c += "  __local FLT4 f[10];\n";
95     c += "  event_t e = async_work_group_copy(f, args.weights.GetPtr() + S * "
96          "10, 10, 0);\n";
97     c += "  wait_group_events(1, &e);\n";
98   } else if (weights_are_buffer) {
99     c += "  __global FLT4* f = args.weights.GetPtr() + S * 10;\n";
100   }
101   c += "  FLT4 s0;\n";
102   c += "  FLT4 s1;\n";
103   c += "  FLT4 s2;\n";
104   c += "  FLT4 s3;\n";
105   std::string W[9] = {"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8"};
106   std::string bias = "bias";
107   std::string xc[4] = {"X - 1", "X", "X + 1", "X + 2"};
108   std::string yc[4] = {"Y - 1", "Y", "Y + 1", "Y + 2"};
109   if (!weights_are_buffer) {
110     c += "   FLT4 f0 = args.weights.Read(0, S);\n";
111     c += "   FLT4 f1 = args.weights.Read(1, S);\n";
112     c += "   FLT4 f2 = args.weights.Read(2, S);\n";
113     c += "   FLT4 f3 = args.weights.Read(3, S);\n";
114     c += "   FLT4 f4 = args.weights.Read(4, S);\n";
115     c += "   FLT4 f5 = args.weights.Read(5, S);\n";
116     c += "   FLT4 f6 = args.weights.Read(6, S);\n";
117     c += "   FLT4 f7 = args.weights.Read(7, S);\n";
118     c += "   FLT4 f8 = args.weights.Read(8, S);\n";
119   }
120   if (manual_clamp) {
121     c += "  int x0 = X - 1;\n";
122     c += "  int x1 = X;\n";
123     c += "  int x2 = X + 1;\n";
124     c += "  int x3 = X + 2;\n";
125     c += "  int y0 = Y - 1;\n";
126     c += "  int y1 = Y;\n";
127     c += "  int y2 = Y + 1;\n";
128     c += "  int y3 = Y + 2;\n";
129     c += "  bool x0_in = x0 >= 0 && x0 < args.dst_tensor.Width();\n";
130     c += "  bool x1_in = x1 >= 0 && x1 < args.dst_tensor.Width();\n";
131     c += "  bool x2_in = x2 >= 0 && x2 < args.dst_tensor.Width();\n";
132     c += "  bool x3_in = x3 >= 0 && x3 < args.dst_tensor.Width();\n";
133     c += "  bool y0_in = y0 >= 0 && y0 < args.dst_tensor.Height();\n";
134     c += "  bool y1_in = y1 >= 0 && y1 < args.dst_tensor.Height();\n";
135     c += "  bool y2_in = y2 >= 0 && y2 < args.dst_tensor.Height();\n";
136     c += "  bool y3_in = y3 >= 0 && y3 < args.dst_tensor.Height();\n";
137     c += "  x0 = clamp(x0, 0, args.dst_tensor.Width() - 1);\n";
138     c += "  x1 = clamp(x1, 0, args.dst_tensor.Width() - 1);\n";
139     c += "  x2 = clamp(x2, 0, args.dst_tensor.Width() - 1);\n";
140     c += "  x3 = clamp(x3, 0, args.dst_tensor.Width() - 1);\n";
141     c += "  y0 = clamp(y0, 0, args.dst_tensor.Height() - 1);\n";
142     c += "  y1 = clamp(y1, 0, args.dst_tensor.Height() - 1);\n";
143     c += "  y2 = clamp(y2, 0, args.dst_tensor.Height() - 1);\n";
144     c += "  y3 = clamp(y3, 0, args.dst_tensor.Height() - 1);\n";
145     if (src_tensor_type == TensorStorageType::BUFFER) {
146       c += "  __global FLT4* src_loc = "
147            "args.src_tensor.GetPtrWithSliceOffset(S);\n";
148     }
149     xc[0] = "x0";
150     xc[1] = "x1";
151     xc[2] = "x2";
152     xc[3] = "x3";
153     yc[0] = "y0";
154     yc[1] = "y1";
155     yc[2] = "y2";
156     yc[3] = "y3";
157   }
158   if (local_mem_uploads || weights_are_buffer) {
159     W[0] = "f[0]";
160     W[1] = "f[1]";
161     W[2] = "f[2]";
162     W[3] = "f[3]";
163     W[4] = "f[4]";
164     W[5] = "f[5]";
165     W[6] = "f[6]";
166     W[7] = "f[7]";
167     W[8] = "f[8]";
168     bias = "f[9]";
169   }
170   auto read_4x_line = [&](int y) {
171     if (src_tensor_type == TensorStorageType::BUFFER) {
172       const std::string y_in = "y" + std::to_string(y) + "_in";
173       c += "    s0 = src_loc[args.src_tensor.GetWHOffset(" + xc[0] + ", " +
174            yc[y] + ")] * INIT_FLT(x0_in && " + y_in + ");\n";
175       c += "    s1 = src_loc[args.src_tensor.GetWHOffset(" + xc[1] + ", " +
176            yc[y] + ")] * INIT_FLT(x1_in && " + y_in + ");\n";
177       c += "    s2 = src_loc[args.src_tensor.GetWHOffset(" + xc[2] + ", " +
178            yc[y] + ")] * INIT_FLT(x2_in && " + y_in + ");\n";
179       c += "    s3 = src_loc[args.src_tensor.GetWHOffset(" + xc[3] + ", " +
180            yc[y] + ")] * INIT_FLT(x3_in && " + y_in + ");\n";
181     } else if (src_tensor_type == TensorStorageType::IMAGE_BUFFER) {
182       const std::string y_in = "y" + std::to_string(y) + "_in";
183       c += "    s0 = args.src_tensor.Read(" + xc[0] + ", " + yc[y] +
184            ", S) * INIT_FLT(x0_in && " + y_in + ");\n";
185       c += "    s1 = args.src_tensor.Read(" + xc[1] + ", " + yc[y] +
186            ", S) * INIT_FLT(x1_in && " + y_in + ");\n";
187       c += "    s2 = args.src_tensor.Read(" + xc[2] + ", " + yc[y] +
188            ", S) * INIT_FLT(x2_in && " + y_in + ");\n";
189       c += "    s3 = args.src_tensor.Read(" + xc[3] + ", " + yc[y] +
190            ", S) * INIT_FLT(x3_in && " + y_in + ");\n";
191     } else {
192       c += "    s0 = args.src_tensor.Read(" + xc[0] + ", " + yc[y] + ", S);\n";
193       c += "    s1 = args.src_tensor.Read(" + xc[1] + ", " + yc[y] + ", S);\n";
194       c += "    s2 = args.src_tensor.Read(" + xc[2] + ", " + yc[y] + ", S);\n";
195       c += "    s3 = args.src_tensor.Read(" + xc[3] + ", " + yc[y] + ", S);\n";
196     }
197   };
198   c += "  {\n";
199   read_4x_line(0);
200   c += "    r0 += TO_ACCUM_TYPE(" + W[0] + " * s0);\n";
201   c += "    r0 += TO_ACCUM_TYPE(" + W[1] + " * s1);\n";
202   c += "    r1 += TO_ACCUM_TYPE(" + W[0] + " * s1);\n";
203   c += "    r0 += TO_ACCUM_TYPE(" + W[2] + " * s2);\n";
204   c += "    r1 += TO_ACCUM_TYPE(" + W[1] + " * s2);\n";
205   c += "    r1 += TO_ACCUM_TYPE(" + W[2] + " * s3);\n";
206   c += "  }\n";
207   c += "  {\n";
208   read_4x_line(1);
209   c += "    r0 += TO_ACCUM_TYPE(" + W[3] + " * s0);\n";
210   c += "    r2 += TO_ACCUM_TYPE(" + W[0] + " * s0);\n";
211   c += "    r0 += TO_ACCUM_TYPE(" + W[4] + " * s1);\n";
212   c += "    r1 += TO_ACCUM_TYPE(" + W[3] + " * s1);\n";
213   c += "    r2 += TO_ACCUM_TYPE(" + W[1] + " * s1);\n";
214   c += "    r3 += TO_ACCUM_TYPE(" + W[0] + " * s1);\n";
215   c += "    r0 += TO_ACCUM_TYPE(" + W[5] + " * s2);\n";
216   c += "    r1 += TO_ACCUM_TYPE(" + W[4] + " * s2);\n";
217   c += "    r2 += TO_ACCUM_TYPE(" + W[2] + " * s2);\n";
218   c += "    r3 += TO_ACCUM_TYPE(" + W[1] + " * s2);\n";
219   c += "    r1 += TO_ACCUM_TYPE(" + W[5] + " * s3);\n";
220   c += "    r3 += TO_ACCUM_TYPE(" + W[2] + " * s3);\n";
221   c += "  }\n";
222   c += "  {\n";
223   read_4x_line(2);
224   c += "    r0 += TO_ACCUM_TYPE(" + W[6] + " * s0);\n";
225   c += "    r2 += TO_ACCUM_TYPE(" + W[3] + " * s0);\n";
226   c += "    r0 += TO_ACCUM_TYPE(" + W[7] + " * s1);\n";
227   c += "    r1 += TO_ACCUM_TYPE(" + W[6] + " * s1);\n";
228   c += "    r2 += TO_ACCUM_TYPE(" + W[4] + " * s1);\n";
229   c += "    r3 += TO_ACCUM_TYPE(" + W[3] + " * s1);\n";
230   c += "    r0 += TO_ACCUM_TYPE(" + W[8] + " * s2);\n";
231   c += "    r1 += TO_ACCUM_TYPE(" + W[7] + " * s2);\n";
232   c += "    r2 += TO_ACCUM_TYPE(" + W[5] + " * s2);\n";
233   c += "    r3 += TO_ACCUM_TYPE(" + W[4] + " * s2);\n";
234   c += "    r1 += TO_ACCUM_TYPE(" + W[8] + " * s3);\n";
235   c += "    r3 += TO_ACCUM_TYPE(" + W[5] + " * s3);\n";
236   c += "  }\n";
237   c += "  {\n";
238   read_4x_line(3);
239   c += "    r2 += TO_ACCUM_TYPE(" + W[6] + " * s0);\n";
240   c += "    r2 += TO_ACCUM_TYPE(" + W[7] + " * s1);\n";
241   c += "    r3 += TO_ACCUM_TYPE(" + W[6] + " * s1);\n";
242   c += "    r2 += TO_ACCUM_TYPE(" + W[8] + " * s2);\n";
243   c += "    r3 += TO_ACCUM_TYPE(" + W[7] + " * s2);\n";
244   c += "    r3 += TO_ACCUM_TYPE(" + W[8] + " * s3);\n";
245   c += "  }\n";
246   if (!weights_are_buffer) {
247     c += "   FLT4 bias = args.weights.Read(9, S);\n";
248   }
249   c += "  r0 += TO_ACCUM_TYPE(" + bias + ");\n";
250   c += "  r1 += TO_ACCUM_TYPE(" + bias + ");\n";
251   c += "  r2 += TO_ACCUM_TYPE(" + bias + ");\n";
252   c += "  r3 += TO_ACCUM_TYPE(" + bias + ");\n";
253   if (local_mem_uploads) {
254     c += "  if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() "
255          "|| S >= args.dst_tensor.Slices()) { \n";
256     c += "    return; \n";
257     c += "  } \n";
258   }
259   c += "  if(X + 0 < args.dst_tensor.Width() && Y + 0 < "
260        "args.dst_tensor.Height()) {\n";
261   c += "    FLT4 result = TO_FLT4(r0);\n";
262   c += "    args.dst_tensor.Write(result, X + 0, Y + 0, S);\n";
263   c += "  }\n";
264   c += "  if(X + 1 < args.dst_tensor.Width() && Y + 0 < "
265        "args.dst_tensor.Height()) {\n";
266   c += "    FLT4 result = TO_FLT4(r1);\n";
267   c += "    args.dst_tensor.Write(result, X + 1, Y + 0, S);\n";
268   c += "  }\n";
269   c += "  if(X + 0 < args.dst_tensor.Width() && Y + 1 < "
270        "args.dst_tensor.Height()) {\n";
271   c += "    FLT4 result = TO_FLT4(r2);\n";
272   c += "    args.dst_tensor.Write(result, X + 0, Y + 1, S);\n";
273   c += "  }\n";
274   c += "  if(X + 1 < args.dst_tensor.Width() && Y + 1 < "
275        "args.dst_tensor.Height()) {\n";
276   c += "    FLT4 result = TO_FLT4(r3);\n";
277   c += "    args.dst_tensor.Write(result, X + 1, Y + 1, S);\n";
278   c += "  }\n";
279   c += "}\n";
280 
281   return c;
282 }
283 
GetGridSize() const284 int3 DepthwiseConv3x3::GetGridSize() const {
285   const int grid_x = DivideRoundUp(dst_[0]->Width(), 2) * dst_[0]->Batch();
286   const int grid_y = DivideRoundUp(dst_[0]->Height(), 2);
287   const int grid_z = dst_[0]->Slices();
288   return int3(grid_x, grid_y, grid_z);
289 }
290 
GetPossibleKernelWorkGroups(TuningType tuning_type,const GpuInfo & gpu_info,const KernelInfo & kernel_info,std::vector<int3> * work_groups) const291 void DepthwiseConv3x3::GetPossibleKernelWorkGroups(
292     TuningType tuning_type, const GpuInfo& gpu_info,
293     const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
294   if (local_mem_uploads_) {
295     work_groups->push_back(work_group_size_);
296   } else {
297     GetPossibleWorkGroups(tuning_type, gpu_info, kernel_info, grid_size_,
298                           work_groups);
299   }
300 }
301 
IsDepthwiseConv3x3Supported(const DepthwiseConvolution2DAttributes & attr)302 bool IsDepthwiseConv3x3Supported(const DepthwiseConvolution2DAttributes& attr) {
303   return attr.weights.shape.o == 1 && attr.dilations.w == 1 &&
304          attr.dilations.h == 1 && attr.weights.shape.w == 3 &&
305          attr.weights.shape.h == 3 && attr.strides.w == 1 &&
306          attr.strides.h == 1 && attr.padding.prepended.w == 1 &&
307          attr.padding.prepended.h == 1 && attr.padding.appended.w == 1 &&
308          attr.padding.appended.h == 1;
309 }
310 
CreateDepthwiseConv3x3(const GpuInfo & gpu_info,const OperationDef & definition,const DepthwiseConvolution2DAttributes & attr)311 DepthwiseConv3x3 CreateDepthwiseConv3x3(
312     const GpuInfo& gpu_info, const OperationDef& definition,
313     const DepthwiseConvolution2DAttributes& attr) {
314   bool weights_are_buffer = !gpu_info.SupportsImages() ||
315                             gpu_info.IsPowerVR() || gpu_info.IsMali() ||
316                             gpu_info.IsApple();
317   bool local_mem_uploads = weights_are_buffer && gpu_info.IsPowerVR();
318   DepthwiseConv3x3 result(definition, weights_are_buffer, local_mem_uploads,
319                           gpu_info);
320   result.UploadWeightsAndBiases(attr.weights, attr.bias, weights_are_buffer);
321   return result;
322 }
323 
324 }  // namespace gpu
325 }  // namespace tflite
326