• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3.h"
17 
18 #include <string>
19 #include <utility>
20 
21 #include "absl/strings/match.h"
22 #include "tensorflow/lite/delegates/gpu/common/status.h"
23 #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
24 
25 namespace tflite {
26 namespace gpu {
27 
DepthwiseConv3x3(const OperationDef & definition,bool weights_are_buffer,bool local_mem_uploads,const GpuInfo & gpu_info)28 DepthwiseConv3x3::DepthwiseConv3x3(const OperationDef& definition,
29                                    bool weights_are_buffer,
30                                    bool local_mem_uploads,
31                                    const GpuInfo& gpu_info)
32     : GPUOperation(definition), local_mem_uploads_(local_mem_uploads) {
33   work_group_size_ = int3(8, 4, 1);
34   code_ = GenerateDepthwiseConvCode(definition_, weights_are_buffer,
35                                     local_mem_uploads_);
36 
37   if (definition_.precision == CalculationsPrecision::F16 &&
38       gpu_info.IsPowerVR()) {
39     compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
40   }
41 }
42 
DepthwiseConv3x3(DepthwiseConv3x3 && operation)43 DepthwiseConv3x3::DepthwiseConv3x3(DepthwiseConv3x3&& operation)
44     : GPUOperation(std::move(operation)),
45       local_mem_uploads_(operation.local_mem_uploads_) {}
46 
operator =(DepthwiseConv3x3 && operation)47 DepthwiseConv3x3& DepthwiseConv3x3::operator=(DepthwiseConv3x3&& operation) {
48   if (this != &operation) {
49     std::swap(local_mem_uploads_, operation.local_mem_uploads_);
50     GPUOperation::operator=(std::move(operation));
51   }
52   return *this;
53 }
54 
GenerateDepthwiseConvCode(const OperationDef & op_def,bool weights_are_buffer,bool local_mem_uploads)55 std::string DepthwiseConv3x3::GenerateDepthwiseConvCode(
56     const OperationDef& op_def, bool weights_are_buffer,
57     bool local_mem_uploads) {
58   auto src_desc = op_def.src_tensors[0];
59   src_desc.SetAddressMode(AddressMode::kZero);
60   AddSrcTensor("src_tensor", src_desc);
61   AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
62 
63   const auto src_tensor_type = op_def.src_tensors[0].storage_type;
64 
65   const bool manual_clamp = src_tensor_type == TensorStorageType::BUFFER ||
66                             src_tensor_type == TensorStorageType::IMAGE_BUFFER;
67 
68   std::string c;
69   if (local_mem_uploads) {
70     c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n";
71   }
72   c += "MAIN_FUNCTION($0) {\n";
73   if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
74     c += "  int linear_id = GLOBAL_ID_0;\n";
75     c += "  int X = (linear_id / args.dst_tensor.Batch()) * 2;\n";
76     c += "  int B = linear_id % args.dst_tensor.Batch();\n";
77     c += "  args.dst_tensor.SetBatchRef(B);\n";
78     c += "  args.src_tensor.SetBatchRef(B);\n";
79   } else {
80     c += "  int X = GLOBAL_ID_0 * 2;\n";
81   }
82   c += "  int Y = GLOBAL_ID_1 * 2;\n";
83   c += "  int S = GLOBAL_ID_2;\n";
84   c += "   ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n";
85   c += "   ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n";
86   c += "   ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n";
87   c += "   ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n";
88   if (!local_mem_uploads) {
89     c += "  if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() "
90          "|| S >= args.dst_tensor.Slices()) { \n";
91     c += "    return; \n";
92     c += "  } \n";
93   }
94   if (local_mem_uploads) {
95     c += "  __local FLT4 f[10];\n";
96     c += "  event_t e = async_work_group_copy(f, args.weights.GetPtr() + S * "
97          "10, 10, 0);\n";
98     c += "  wait_group_events(1, &e);\n";
99   } else if (weights_are_buffer) {
100     c += "  __global FLT4* f = args.weights.GetPtr() + S * 10;\n";
101   }
102   c += "  FLT4 s0;\n";
103   c += "  FLT4 s1;\n";
104   c += "  FLT4 s2;\n";
105   c += "  FLT4 s3;\n";
106   std::string W[9] = {"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8"};
107   std::string bias = "bias";
108   std::string xc[4] = {"X - 1", "X", "X + 1", "X + 2"};
109   std::string yc[4] = {"Y - 1", "Y", "Y + 1", "Y + 2"};
110   if (!weights_are_buffer) {
111     c += "   FLT4 f0 = args.weights.Read(0, S);\n";
112     c += "   FLT4 f1 = args.weights.Read(1, S);\n";
113     c += "   FLT4 f2 = args.weights.Read(2, S);\n";
114     c += "   FLT4 f3 = args.weights.Read(3, S);\n";
115     c += "   FLT4 f4 = args.weights.Read(4, S);\n";
116     c += "   FLT4 f5 = args.weights.Read(5, S);\n";
117     c += "   FLT4 f6 = args.weights.Read(6, S);\n";
118     c += "   FLT4 f7 = args.weights.Read(7, S);\n";
119     c += "   FLT4 f8 = args.weights.Read(8, S);\n";
120   }
121   if (manual_clamp) {
122     c += "  int x0 = X - 1;\n";
123     c += "  int x1 = X;\n";
124     c += "  int x2 = X + 1;\n";
125     c += "  int x3 = X + 2;\n";
126     c += "  int y0 = Y - 1;\n";
127     c += "  int y1 = Y;\n";
128     c += "  int y2 = Y + 1;\n";
129     c += "  int y3 = Y + 2;\n";
130     c += "  bool x0_in = x0 >= 0 && x0 < args.dst_tensor.Width();\n";
131     c += "  bool x1_in = x1 >= 0 && x1 < args.dst_tensor.Width();\n";
132     c += "  bool x2_in = x2 >= 0 && x2 < args.dst_tensor.Width();\n";
133     c += "  bool x3_in = x3 >= 0 && x3 < args.dst_tensor.Width();\n";
134     c += "  bool y0_in = y0 >= 0 && y0 < args.dst_tensor.Height();\n";
135     c += "  bool y1_in = y1 >= 0 && y1 < args.dst_tensor.Height();\n";
136     c += "  bool y2_in = y2 >= 0 && y2 < args.dst_tensor.Height();\n";
137     c += "  bool y3_in = y3 >= 0 && y3 < args.dst_tensor.Height();\n";
138     c += "  x0 = clamp(x0, 0, args.dst_tensor.Width() - 1);\n";
139     c += "  x1 = clamp(x1, 0, args.dst_tensor.Width() - 1);\n";
140     c += "  x2 = clamp(x2, 0, args.dst_tensor.Width() - 1);\n";
141     c += "  x3 = clamp(x3, 0, args.dst_tensor.Width() - 1);\n";
142     c += "  y0 = clamp(y0, 0, args.dst_tensor.Height() - 1);\n";
143     c += "  y1 = clamp(y1, 0, args.dst_tensor.Height() - 1);\n";
144     c += "  y2 = clamp(y2, 0, args.dst_tensor.Height() - 1);\n";
145     c += "  y3 = clamp(y3, 0, args.dst_tensor.Height() - 1);\n";
146     if (src_tensor_type == TensorStorageType::BUFFER) {
147       c += "  __global FLT4* src_loc = "
148            "args.src_tensor.GetPtrWithSliceOffset(S);\n";
149     }
150     xc[0] = "x0";
151     xc[1] = "x1";
152     xc[2] = "x2";
153     xc[3] = "x3";
154     yc[0] = "y0";
155     yc[1] = "y1";
156     yc[2] = "y2";
157     yc[3] = "y3";
158   }
159   if (local_mem_uploads || weights_are_buffer) {
160     W[0] = "f[0]";
161     W[1] = "f[1]";
162     W[2] = "f[2]";
163     W[3] = "f[3]";
164     W[4] = "f[4]";
165     W[5] = "f[5]";
166     W[6] = "f[6]";
167     W[7] = "f[7]";
168     W[8] = "f[8]";
169     bias = "f[9]";
170   }
171   auto read_4x_line = [&](int y) {
172     if (src_tensor_type == TensorStorageType::BUFFER) {
173       const std::string y_in = "y" + std::to_string(y) + "_in";
174       c += "    s0 = src_loc[args.src_tensor.GetWHOffset(" + xc[0] + ", " +
175            yc[y] + ")] * INIT_FLT(x0_in && " + y_in + ");\n";
176       c += "    s1 = src_loc[args.src_tensor.GetWHOffset(" + xc[1] + ", " +
177            yc[y] + ")] * INIT_FLT(x1_in && " + y_in + ");\n";
178       c += "    s2 = src_loc[args.src_tensor.GetWHOffset(" + xc[2] + ", " +
179            yc[y] + ")] * INIT_FLT(x2_in && " + y_in + ");\n";
180       c += "    s3 = src_loc[args.src_tensor.GetWHOffset(" + xc[3] + ", " +
181            yc[y] + ")] * INIT_FLT(x3_in && " + y_in + ");\n";
182     } else if (src_tensor_type == TensorStorageType::IMAGE_BUFFER) {
183       const std::string y_in = "y" + std::to_string(y) + "_in";
184       c += "    s0 = args.src_tensor.Read(" + xc[0] + ", " + yc[y] +
185            ", S) * INIT_FLT(x0_in && " + y_in + ");\n";
186       c += "    s1 = args.src_tensor.Read(" + xc[1] + ", " + yc[y] +
187            ", S) * INIT_FLT(x1_in && " + y_in + ");\n";
188       c += "    s2 = args.src_tensor.Read(" + xc[2] + ", " + yc[y] +
189            ", S) * INIT_FLT(x2_in && " + y_in + ");\n";
190       c += "    s3 = args.src_tensor.Read(" + xc[3] + ", " + yc[y] +
191            ", S) * INIT_FLT(x3_in && " + y_in + ");\n";
192     } else {
193       c += "    s0 = args.src_tensor.Read(" + xc[0] + ", " + yc[y] + ", S);\n";
194       c += "    s1 = args.src_tensor.Read(" + xc[1] + ", " + yc[y] + ", S);\n";
195       c += "    s2 = args.src_tensor.Read(" + xc[2] + ", " + yc[y] + ", S);\n";
196       c += "    s3 = args.src_tensor.Read(" + xc[3] + ", " + yc[y] + ", S);\n";
197     }
198   };
199   c += "  {\n";
200   read_4x_line(0);
201   c += "    r0 += TO_ACCUM_TYPE(" + W[0] + " * s0);\n";
202   c += "    r0 += TO_ACCUM_TYPE(" + W[1] + " * s1);\n";
203   c += "    r1 += TO_ACCUM_TYPE(" + W[0] + " * s1);\n";
204   c += "    r0 += TO_ACCUM_TYPE(" + W[2] + " * s2);\n";
205   c += "    r1 += TO_ACCUM_TYPE(" + W[1] + " * s2);\n";
206   c += "    r1 += TO_ACCUM_TYPE(" + W[2] + " * s3);\n";
207   c += "  }\n";
208   c += "  {\n";
209   read_4x_line(1);
210   c += "    r0 += TO_ACCUM_TYPE(" + W[3] + " * s0);\n";
211   c += "    r2 += TO_ACCUM_TYPE(" + W[0] + " * s0);\n";
212   c += "    r0 += TO_ACCUM_TYPE(" + W[4] + " * s1);\n";
213   c += "    r1 += TO_ACCUM_TYPE(" + W[3] + " * s1);\n";
214   c += "    r2 += TO_ACCUM_TYPE(" + W[1] + " * s1);\n";
215   c += "    r3 += TO_ACCUM_TYPE(" + W[0] + " * s1);\n";
216   c += "    r0 += TO_ACCUM_TYPE(" + W[5] + " * s2);\n";
217   c += "    r1 += TO_ACCUM_TYPE(" + W[4] + " * s2);\n";
218   c += "    r2 += TO_ACCUM_TYPE(" + W[2] + " * s2);\n";
219   c += "    r3 += TO_ACCUM_TYPE(" + W[1] + " * s2);\n";
220   c += "    r1 += TO_ACCUM_TYPE(" + W[5] + " * s3);\n";
221   c += "    r3 += TO_ACCUM_TYPE(" + W[2] + " * s3);\n";
222   c += "  }\n";
223   c += "  {\n";
224   read_4x_line(2);
225   c += "    r0 += TO_ACCUM_TYPE(" + W[6] + " * s0);\n";
226   c += "    r2 += TO_ACCUM_TYPE(" + W[3] + " * s0);\n";
227   c += "    r0 += TO_ACCUM_TYPE(" + W[7] + " * s1);\n";
228   c += "    r1 += TO_ACCUM_TYPE(" + W[6] + " * s1);\n";
229   c += "    r2 += TO_ACCUM_TYPE(" + W[4] + " * s1);\n";
230   c += "    r3 += TO_ACCUM_TYPE(" + W[3] + " * s1);\n";
231   c += "    r0 += TO_ACCUM_TYPE(" + W[8] + " * s2);\n";
232   c += "    r1 += TO_ACCUM_TYPE(" + W[7] + " * s2);\n";
233   c += "    r2 += TO_ACCUM_TYPE(" + W[5] + " * s2);\n";
234   c += "    r3 += TO_ACCUM_TYPE(" + W[4] + " * s2);\n";
235   c += "    r1 += TO_ACCUM_TYPE(" + W[8] + " * s3);\n";
236   c += "    r3 += TO_ACCUM_TYPE(" + W[5] + " * s3);\n";
237   c += "  }\n";
238   c += "  {\n";
239   read_4x_line(3);
240   c += "    r2 += TO_ACCUM_TYPE(" + W[6] + " * s0);\n";
241   c += "    r2 += TO_ACCUM_TYPE(" + W[7] + " * s1);\n";
242   c += "    r3 += TO_ACCUM_TYPE(" + W[6] + " * s1);\n";
243   c += "    r2 += TO_ACCUM_TYPE(" + W[8] + " * s2);\n";
244   c += "    r3 += TO_ACCUM_TYPE(" + W[7] + " * s2);\n";
245   c += "    r3 += TO_ACCUM_TYPE(" + W[8] + " * s3);\n";
246   c += "  }\n";
247   if (!weights_are_buffer) {
248     c += "   FLT4 bias = args.weights.Read(9, S);\n";
249   }
250   c += "  r0 += TO_ACCUM_TYPE(" + bias + ");\n";
251   c += "  r1 += TO_ACCUM_TYPE(" + bias + ");\n";
252   c += "  r2 += TO_ACCUM_TYPE(" + bias + ");\n";
253   c += "  r3 += TO_ACCUM_TYPE(" + bias + ");\n";
254   if (local_mem_uploads) {
255     c += "  if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() "
256          "|| S >= args.dst_tensor.Slices()) { \n";
257     c += "    return; \n";
258     c += "  } \n";
259   }
260   c += "  if(X + 0 < args.dst_tensor.Width() && Y + 0 < "
261        "args.dst_tensor.Height()) {\n";
262   c += "    FLT4 result = TO_FLT4(r0);\n";
263   c += "    args.dst_tensor.Write(result, X + 0, Y + 0, S);\n";
264   c += "  }\n";
265   c += "  if(X + 1 < args.dst_tensor.Width() && Y + 0 < "
266        "args.dst_tensor.Height()) {\n";
267   c += "    FLT4 result = TO_FLT4(r1);\n";
268   c += "    args.dst_tensor.Write(result, X + 1, Y + 0, S);\n";
269   c += "  }\n";
270   c += "  if(X + 0 < args.dst_tensor.Width() && Y + 1 < "
271        "args.dst_tensor.Height()) {\n";
272   c += "    FLT4 result = TO_FLT4(r2);\n";
273   c += "    args.dst_tensor.Write(result, X + 0, Y + 1, S);\n";
274   c += "  }\n";
275   c += "  if(X + 1 < args.dst_tensor.Width() && Y + 1 < "
276        "args.dst_tensor.Height()) {\n";
277   c += "    FLT4 result = TO_FLT4(r3);\n";
278   c += "    args.dst_tensor.Write(result, X + 1, Y + 1, S);\n";
279   c += "  }\n";
280   c += "}\n";
281 
282   return c;
283 }
284 
GetGridSize() const285 int3 DepthwiseConv3x3::GetGridSize() const {
286   const int grid_x = DivideRoundUp(dst_[0]->Width(), 2) * dst_[0]->Batch();
287   const int grid_y = DivideRoundUp(dst_[0]->Height(), 2);
288   const int grid_z = dst_[0]->Slices();
289   return int3(grid_x, grid_y, grid_z);
290 }
291 
GetPossibleKernelWorkGroups(TuningType tuning_type,const GpuInfo & gpu_info,const KernelInfo & kernel_info,std::vector<int3> * work_groups) const292 void DepthwiseConv3x3::GetPossibleKernelWorkGroups(
293     TuningType tuning_type, const GpuInfo& gpu_info,
294     const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
295   if (local_mem_uploads_) {
296     work_groups->push_back(work_group_size_);
297   } else {
298     GetPossibleWorkGroups(tuning_type, gpu_info, kernel_info, grid_size_,
299                           work_groups);
300   }
301 }
302 
IsDepthwiseConv3x3Supported(const GpuInfo & gpu_info,const DepthwiseConvolution2DAttributes & attr)303 bool IsDepthwiseConv3x3Supported(const GpuInfo& gpu_info,
304                                  const DepthwiseConvolution2DAttributes& attr) {
305   if (gpu_info.IsApiOpenCl() && gpu_info.IsAdreno()) {
306     const std::string kBadDriver =
307         "OpenCL 2.0 QUALCOMM build: commit #7daed58 changeid #I7ece6fe30d "
308         "Date: 10/19/16";
309     if (absl::StrContains(gpu_info.opencl_info.platform_version, kBadDriver)) {
310       return false;
311     }
312   }
313   return attr.weights.shape.o == 1 && attr.dilations.w == 1 &&
314          attr.dilations.h == 1 && attr.weights.shape.w == 3 &&
315          attr.weights.shape.h == 3 && attr.strides.w == 1 &&
316          attr.strides.h == 1 && attr.padding.prepended.w == 1 &&
317          attr.padding.prepended.h == 1 && attr.padding.appended.w == 1 &&
318          attr.padding.appended.h == 1;
319 }
320 
CreateDepthwiseConv3x3(const GpuInfo & gpu_info,const OperationDef & definition,const DepthwiseConvolution2DAttributes & attr)321 DepthwiseConv3x3 CreateDepthwiseConv3x3(
322     const GpuInfo& gpu_info, const OperationDef& definition,
323     const DepthwiseConvolution2DAttributes& attr) {
324   bool weights_are_buffer = !gpu_info.SupportsImages() ||
325                             gpu_info.IsPowerVR() || gpu_info.IsMali() ||
326                             gpu_info.IsApple();
327   bool local_mem_uploads = weights_are_buffer && gpu_info.IsPowerVR();
328   DepthwiseConv3x3 result(definition, weights_are_buffer, local_mem_uploads,
329                           gpu_info);
330   result.UploadWeightsAndBiases(attr.weights, attr.bias, weights_are_buffer);
331   return result;
332 }
333 
334 }  // namespace gpu
335 }  // namespace tflite
336