1 /**
2 * Copyright 2020 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "minddata/dataset/kernels/image/lite_image_utils.h"
17
18 #include <limits>
19 #include <stdexcept>
20 #include <utility>
21 #include <vector>
22
23 #include "minddata/dataset/core/tensor.h"
24 #include "minddata/dataset/core/tensor_shape.h"
25 #include "minddata/dataset/include/dataset/constants.h"
26 #include "minddata/dataset/kernels/image/lite_cv/lite_mat.h"
27 #include "minddata/dataset/kernels/image/lite_cv/image_process.h"
28 #include "minddata/dataset/util/random.h"
29
30 #define MAX_INT_PRECISION 16777216 // float int precision is 16777216
31 namespace mindspore {
32 namespace dataset {
IsNonEmptyJPEG(const std::shared_ptr<Tensor> & input)33 bool IsNonEmptyJPEG(const std::shared_ptr<Tensor> &input) {
34 const unsigned char *kJpegMagic = (unsigned char *)"\xFF\xD8\xFF";
35 constexpr size_t kJpegMagicLen = 3;
36 return input->SizeInBytes() > kJpegMagicLen && memcmp(input->GetBuffer(), kJpegMagic, kJpegMagicLen) == 0;
37 }
38
JpegInitSource(j_decompress_ptr cinfo)39 static void JpegInitSource(j_decompress_ptr cinfo) {}
40
JpegFillInputBuffer(j_decompress_ptr cinfo)41 static boolean JpegFillInputBuffer(j_decompress_ptr cinfo) {
42 if (cinfo->src->bytes_in_buffer == 0) {
43 // Under ARM platform raise runtime_error may cause core problem,
44 // so we catch runtime_error and just return FALSE.
45 try {
46 ERREXIT(cinfo, JERR_INPUT_EMPTY);
47 } catch (std::runtime_error &e) {
48 return FALSE;
49 }
50 return FALSE;
51 }
52 return TRUE;
53 }
54
JpegTermSource(j_decompress_ptr cinfo)55 static void JpegTermSource(j_decompress_ptr cinfo) {}
56
JpegSkipInputData(j_decompress_ptr cinfo,int64_t jump)57 static void JpegSkipInputData(j_decompress_ptr cinfo, int64_t jump) {
58 if (jump < 0) {
59 return;
60 }
61 if (static_cast<size_t>(jump) > cinfo->src->bytes_in_buffer) {
62 cinfo->src->bytes_in_buffer = 0;
63 return;
64 } else {
65 cinfo->src->bytes_in_buffer -= jump;
66 cinfo->src->next_input_byte += jump;
67 }
68 }
69
JpegSetSource(j_decompress_ptr cinfo,const void * data,int64_t datasize)70 void JpegSetSource(j_decompress_ptr cinfo, const void *data, int64_t datasize) {
71 cinfo->src = static_cast<struct jpeg_source_mgr *>(
72 (*cinfo->mem->alloc_small)(reinterpret_cast<j_common_ptr>(cinfo), JPOOL_PERMANENT, sizeof(struct jpeg_source_mgr)));
73 cinfo->src->init_source = JpegInitSource;
74 cinfo->src->fill_input_buffer = JpegFillInputBuffer;
75 #if defined(_WIN32) || defined(_WIN64) || defined(ENABLE_ARM32)
76 // the following line skips CI because it uses underlying C type
77 cinfo->src->skip_input_data = reinterpret_cast<void (*)(j_decompress_ptr, long)>(JpegSkipInputData); // NOLINT.
78 #else
79 cinfo->src->skip_input_data = JpegSkipInputData;
80 #endif
81 cinfo->src->resync_to_restart = jpeg_resync_to_restart;
82 cinfo->src->term_source = JpegTermSource;
83 cinfo->src->bytes_in_buffer = datasize;
84 cinfo->src->next_input_byte = static_cast<const JOCTET *>(data);
85 }
86
JpegReadScanlines(jpeg_decompress_struct * const cinfo,int max_scanlines_to_read,JSAMPLE * buffer,int buffer_size,int crop_w,int crop_w_aligned,int offset,int stride)87 static Status JpegReadScanlines(jpeg_decompress_struct *const cinfo, int max_scanlines_to_read, JSAMPLE *buffer,
88 int buffer_size, int crop_w, int crop_w_aligned, int offset, int stride) {
89 // scanlines will be read to this buffer first, must have the number
90 // of components equal to the number of components in the image
91 int64_t scanline_size = crop_w_aligned * cinfo->output_components;
92 std::vector<JSAMPLE> scanline(scanline_size);
93 JSAMPLE *scanline_ptr = &scanline[0];
94 while (cinfo->output_scanline < static_cast<unsigned int>(max_scanlines_to_read)) {
95 int num_lines_read = 0;
96 try {
97 num_lines_read = jpeg_read_scanlines(cinfo, &scanline_ptr, 1);
98 } catch (std::runtime_error &e) {
99 RETURN_STATUS_UNEXPECTED("Decode: jpeg_read_scanlines error.");
100 }
101 if (cinfo->out_color_space == JCS_CMYK && num_lines_read > 0) {
102 for (int i = 0; i < crop_w; ++i) {
103 const int cmyk_pixel = 4 * i + offset;
104 const int c = scanline_ptr[cmyk_pixel];
105 const int m = scanline_ptr[cmyk_pixel + 1];
106 const int y = scanline_ptr[cmyk_pixel + 2];
107 const int k = scanline_ptr[cmyk_pixel + 3];
108 int r, g, b;
109 if (cinfo->saw_Adobe_marker) {
110 r = (k * c) / MAX_PIXEL_VALUE;
111 g = (k * m) / MAX_PIXEL_VALUE;
112 b = (k * y) / MAX_PIXEL_VALUE;
113 } else {
114 r = (MAX_PIXEL_VALUE - c) * (MAX_PIXEL_VALUE - k) / MAX_PIXEL_VALUE;
115 g = (MAX_PIXEL_VALUE - m) * (MAX_PIXEL_VALUE - k) / MAX_PIXEL_VALUE;
116 b = (MAX_PIXEL_VALUE - y) * (MAX_PIXEL_VALUE - k) / MAX_PIXEL_VALUE;
117 }
118 constexpr int buffer_rgb_val_size = 3;
119 constexpr int channel_red = 0;
120 constexpr int channel_green = 1;
121 constexpr int channel_blue = 2;
122 buffer[buffer_rgb_val_size * i + channel_red] = r;
123 buffer[buffer_rgb_val_size * i + channel_green] = g;
124 buffer[buffer_rgb_val_size * i + channel_blue] = b;
125 }
126 } else if (num_lines_read > 0) {
127 auto copy_status = memcpy_s(buffer, buffer_size, scanline_ptr + offset, stride);
128 if (copy_status != 0) {
129 jpeg_destroy_decompress(cinfo);
130 RETURN_STATUS_UNEXPECTED("Decode: memcpy_s failed");
131 }
132 } else {
133 jpeg_destroy_decompress(cinfo);
134 std::string err_msg = "Decode: failed to decompress image.";
135 RETURN_STATUS_UNEXPECTED(err_msg);
136 }
137 buffer += stride;
138 buffer_size = buffer_size - stride;
139 }
140 return Status::OK();
141 }
142
JpegSetColorSpace(jpeg_decompress_struct * cinfo)143 static Status JpegSetColorSpace(jpeg_decompress_struct *cinfo) {
144 switch (cinfo->num_components) {
145 case 1:
146 // we want to output 3 components if it's grayscale
147 cinfo->out_color_space = JCS_RGB;
148 return Status::OK();
149 case 3:
150 cinfo->out_color_space = JCS_RGB;
151 return Status::OK();
152 case 4:
153 // Need to manually convert to RGB
154 cinfo->out_color_space = JCS_CMYK;
155 return Status::OK();
156 default:
157 jpeg_destroy_decompress(cinfo);
158 std::string err_msg = "Decode: failed to decompress image.";
159 RETURN_STATUS_UNEXPECTED(err_msg);
160 }
161 }
162
JpegErrorExitCustom(j_common_ptr cinfo)163 void JpegErrorExitCustom(j_common_ptr cinfo) {
164 char jpeg_last_error_msg[JMSG_LENGTH_MAX];
165 (*(cinfo->err->format_message))(cinfo, jpeg_last_error_msg);
166 throw std::runtime_error(jpeg_last_error_msg);
167 }
168
JpegCropAndDecode(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,int crop_x,int crop_y,int crop_w,int crop_h)169 Status JpegCropAndDecode(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int crop_x, int crop_y,
170 int crop_w, int crop_h) {
171 struct jpeg_decompress_struct cinfo;
172 auto DestroyDecompressAndReturnError = [&cinfo](const std::string &err) {
173 jpeg_destroy_decompress(&cinfo);
174 RETURN_STATUS_UNEXPECTED(err);
175 };
176 struct JpegErrorManagerCustom jerr;
177 cinfo.err = jpeg_std_error(&jerr.pub);
178 jerr.pub.error_exit = JpegErrorExitCustom;
179 try {
180 jpeg_create_decompress(&cinfo);
181 JpegSetSource(&cinfo, input->GetBuffer(), input->SizeInBytes());
182 (void)jpeg_read_header(&cinfo, TRUE);
183 RETURN_IF_NOT_OK(JpegSetColorSpace(&cinfo));
184 jpeg_calc_output_dimensions(&cinfo);
185 } catch (std::runtime_error &e) {
186 return DestroyDecompressAndReturnError(e.what());
187 }
188 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - crop_w) > crop_x, "invalid crop width");
189 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - crop_h) > crop_y, "invalid crop height");
190 if (crop_x == 0 && crop_y == 0 && crop_w == 0 && crop_h == 0) {
191 crop_w = cinfo.output_width;
192 crop_h = cinfo.output_height;
193 } else if (crop_w == 0 || static_cast<unsigned int>(crop_w + crop_x) > cinfo.output_width || crop_h == 0 ||
194 static_cast<unsigned int>(crop_h + crop_y) > cinfo.output_height) {
195 return DestroyDecompressAndReturnError("Decode: invalid crop size");
196 }
197 const int mcu_size = cinfo.min_DCT_scaled_size;
198 CHECK_FAIL_RETURN_UNEXPECTED(mcu_size != 0, "Invalid data.");
199 unsigned int crop_x_aligned = (crop_x / mcu_size) * mcu_size;
200 unsigned int crop_w_aligned = crop_w + crop_x - crop_x_aligned;
201 try {
202 (void)jpeg_start_decompress(&cinfo);
203 jpeg_crop_scanline(&cinfo, &crop_x_aligned, &crop_w_aligned);
204 } catch (std::runtime_error &e) {
205 return DestroyDecompressAndReturnError(e.what());
206 }
207 JDIMENSION skipped_scanlines = jpeg_skip_scanlines(&cinfo, crop_y);
208 // three number of output components, always convert to RGB and output
209 constexpr int kOutNumComponents = 3;
210 TensorShape ts = TensorShape({crop_h, crop_w, kOutNumComponents});
211 std::shared_ptr<Tensor> output_tensor;
212 RETURN_IF_NOT_OK(Tensor::CreateEmpty(ts, DataType(DataType::DE_UINT8), &output_tensor));
213 const int buffer_size = output_tensor->SizeInBytes();
214 JSAMPLE *buffer = reinterpret_cast<JSAMPLE *>(&(*output_tensor->begin<uint8_t>()));
215 // stride refers to output tensor, which has 3 components at most
216 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - skipped_scanlines) > crop_h,
217 "Invalid crop height.");
218 const int max_scanlines_to_read = skipped_scanlines + crop_h;
219 // stride refers to output tensor, which has 3 components at most
220 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() / crop_w) > kOutNumComponents,
221 "Invalid crop width.");
222 const int stride = crop_w * kOutNumComponents;
223 // offset is calculated for scanlines read from the image, therefore
224 // has the same number of components as the image
225 const int offset = (crop_x - crop_x_aligned) * cinfo.output_components;
226 RETURN_IF_NOT_OK(
227 JpegReadScanlines(&cinfo, max_scanlines_to_read, buffer, buffer_size, crop_w, crop_w_aligned, offset, stride));
228 *output = output_tensor;
229 jpeg_destroy_decompress(&cinfo);
230 return Status::OK();
231 }
232
GetLiteCVDataType(DataType data_type)233 static LDataType GetLiteCVDataType(DataType data_type) {
234 if (data_type == DataType::DE_UINT8) {
235 return LDataType::UINT8;
236 } else if (data_type == DataType::DE_FLOAT32) {
237 return LDataType::FLOAT32;
238 } else {
239 return LDataType::UNKNOWN;
240 }
241 }
242
Decode(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output)243 Status Decode(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
244 if (IsNonEmptyJPEG(input)) {
245 return JpegCropAndDecode(input, output);
246 } else {
247 RETURN_STATUS_UNEXPECTED("Decode: Decode only supports jpeg for android");
248 }
249 }
250
Crop(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,int x,int y,int w,int h)251 Status Crop(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int x, int y, int w, int h) {
252 if (input->Rank() != 3 && input->Rank() != 2) {
253 RETURN_STATUS_UNEXPECTED("Crop: image shape is not <H,W,C> or <H,W>");
254 }
255
256 if (input->type() != DataType::DE_FLOAT32 && input->type() != DataType::DE_UINT8) {
257 RETURN_STATUS_UNEXPECTED("Crop: image datatype is not float32 or uint8");
258 }
259
260 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - y) > h, "Invalid crop height.");
261 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - x) > w, "Invalid crop width.");
262 // account for integer overflow
263 if (y < 0 || (y + h) > input->shape()[0] || (y + h) < 0) {
264 RETURN_STATUS_UNEXPECTED(
265 "Crop: invalid y coordinate value for crop"
266 "y coordinate value exceeds the boundary of the image.");
267 }
268 // account for integer overflow
269 if (x < 0 || (x + w) > input->shape()[1] || (x + w) < 0) {
270 RETURN_STATUS_UNEXPECTED(
271 "Crop: invalid x coordinate value for crop"
272 "x coordinate value exceeds the boundary of the image.");
273 }
274
275 try {
276 LiteMat lite_mat_rgb;
277 TensorShape shape{h, w};
278 if (input->Rank() == 2) {
279 lite_mat_rgb.Init(input->shape()[1], input->shape()[0],
280 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
281 GetLiteCVDataType(input->type()));
282 } else { // rank == 3
283 lite_mat_rgb.Init(input->shape()[1], input->shape()[0], input->shape()[2],
284 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
285 GetLiteCVDataType(input->type()));
286 int num_channels = input->shape()[2];
287 shape = shape.AppendDim(num_channels);
288 }
289
290 std::shared_ptr<Tensor> output_tensor;
291 RETURN_IF_NOT_OK(Tensor::CreateEmpty(shape, input->type(), &output_tensor));
292
293 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*output_tensor->begin<uint8_t>()));
294 LiteMat lite_mat_cut;
295
296 lite_mat_cut.Init(w, h, lite_mat_rgb.channel_, reinterpret_cast<void *>(buffer), GetLiteCVDataType(input->type()));
297
298 bool ret = Crop(lite_mat_rgb, lite_mat_cut, x, y, w, h);
299 CHECK_FAIL_RETURN_UNEXPECTED(ret, "Crop: image crop failed.");
300
301 *output = output_tensor;
302 return Status::OK();
303 } catch (std::runtime_error &e) {
304 RETURN_STATUS_UNEXPECTED("Crop: " + std::string(e.what()));
305 }
306 return Status::OK();
307 }
308
GetJpegImageInfo(const std::shared_ptr<Tensor> & input,int * img_width,int * img_height)309 Status GetJpegImageInfo(const std::shared_ptr<Tensor> &input, int *img_width, int *img_height) {
310 struct jpeg_decompress_struct cinfo {};
311 struct JpegErrorManagerCustom jerr {};
312 cinfo.err = jpeg_std_error(&jerr.pub);
313 jerr.pub.error_exit = JpegErrorExitCustom;
314 try {
315 jpeg_create_decompress(&cinfo);
316 JpegSetSource(&cinfo, input->GetBuffer(), input->SizeInBytes());
317 (void)jpeg_read_header(&cinfo, TRUE);
318 jpeg_calc_output_dimensions(&cinfo);
319 } catch (std::runtime_error &e) {
320 jpeg_destroy_decompress(&cinfo);
321 RETURN_STATUS_UNEXPECTED(e.what());
322 }
323 *img_height = cinfo.output_height;
324 *img_width = cinfo.output_width;
325 jpeg_destroy_decompress(&cinfo);
326 return Status::OK();
327 }
328
Normalize(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,std::vector<float> vec_mean,std::vector<float> vec_std)329 Status Normalize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, std::vector<float> vec_mean,
330 std::vector<float> vec_std) {
331 if (input->Rank() != 3) {
332 RETURN_STATUS_UNEXPECTED("Normalize: image shape is not <H,W,C>.");
333 }
334
335 if (input->type() != DataType::DE_UINT8 && input->type() != DataType::DE_FLOAT32) {
336 RETURN_STATUS_UNEXPECTED("Normalize: image datatype is not uint8 or float32.");
337 }
338
339 try {
340 LiteMat lite_mat_norm;
341 bool ret = false;
342 LiteMat lite_mat_rgb(input->shape()[1], input->shape()[0], input->shape()[2],
343 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
344 GetLiteCVDataType(input->type()));
345
346 if (input->type() == DataType::DE_UINT8) {
347 LiteMat lite_mat_float;
348 // change input to float
349 ret = ConvertTo(lite_mat_rgb, lite_mat_float, 1.0);
350 CHECK_FAIL_RETURN_UNEXPECTED(ret, "Normalize: convert to float datatype failed.");
351 ret = SubStractMeanNormalize(lite_mat_float, lite_mat_norm, vec_mean, vec_std);
352 } else { // float32
353 ret = SubStractMeanNormalize(lite_mat_rgb, lite_mat_norm, vec_mean, vec_std);
354 }
355 CHECK_FAIL_RETURN_UNEXPECTED(ret, "Normalize: normalize failed.");
356
357 std::shared_ptr<Tensor> output_tensor;
358 RETURN_IF_NOT_OK(Tensor::CreateFromMemory(input->shape(), DataType(DataType::DE_FLOAT32),
359 static_cast<uchar *>(lite_mat_norm.data_ptr_), &output_tensor));
360
361 *output = output_tensor;
362 } catch (std::runtime_error &e) {
363 RETURN_STATUS_UNEXPECTED("Normalize: " + std::string(e.what()));
364 }
365 return Status::OK();
366 }
367
Resize(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,int32_t output_height,int32_t output_width,double fx,double fy,InterpolationMode mode)368 Status Resize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int32_t output_height,
369 int32_t output_width, double fx, double fy, InterpolationMode mode) {
370 if (input->Rank() != 3 && input->Rank() != 2) {
371 RETURN_STATUS_UNEXPECTED("Resize: input image is not in shape of <H,W,C> or <H,W>");
372 }
373 if (input->type() != DataType::DE_UINT8) {
374 RETURN_STATUS_UNEXPECTED("Resize: image datatype is not uint8.");
375 }
376 // resize image too large or too small
377 const int height_width_scale_limit = 1000;
378 if (output_height == 0 || output_height > input->shape()[0] * height_width_scale_limit || output_width == 0 ||
379 output_width > input->shape()[1] * height_width_scale_limit) {
380 std::string err_msg =
381 "Resize: the resizing width or height 1) is too big, it's up to "
382 "1000 times the original image; 2) can not be 0.";
383 return Status(StatusCode::kMDShapeMisMatch, err_msg);
384 }
385 try {
386 LiteMat lite_mat_rgb;
387 TensorShape shape{output_height, output_width};
388 if (input->Rank() == 2) {
389 lite_mat_rgb.Init(input->shape()[1], input->shape()[0],
390 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
391 GetLiteCVDataType(input->type()));
392 } else { // rank == 3
393 lite_mat_rgb.Init(input->shape()[1], input->shape()[0], input->shape()[2],
394 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
395 GetLiteCVDataType(input->type()));
396 int num_channels = input->shape()[2];
397 shape = shape.AppendDim(num_channels);
398 }
399
400 LiteMat lite_mat_resize;
401 std::shared_ptr<Tensor> output_tensor;
402 RETURN_IF_NOT_OK(Tensor::CreateEmpty(shape, input->type(), &output_tensor));
403
404 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*output_tensor->begin<uint8_t>()));
405
406 lite_mat_resize.Init(output_width, output_height, lite_mat_rgb.channel_, reinterpret_cast<void *>(buffer),
407 GetLiteCVDataType(input->type()));
408
409 bool ret = ResizeBilinear(lite_mat_rgb, lite_mat_resize, output_width, output_height);
410 CHECK_FAIL_RETURN_UNEXPECTED(ret, "Resize: bilinear resize failed.");
411
412 *output = output_tensor;
413 } catch (std::runtime_error &e) {
414 RETURN_STATUS_UNEXPECTED("Resize: " + std::string(e.what()));
415 }
416 return Status::OK();
417 }
418
ResizePreserve(const TensorRow & inputs,int32_t height,int32_t width,int32_t img_orientation,TensorRow * outputs)419 Status ResizePreserve(const TensorRow &inputs, int32_t height, int32_t width, int32_t img_orientation,
420 TensorRow *outputs) {
421 outputs->resize(3);
422 CHECK_FAIL_RETURN_UNEXPECTED(inputs.size() > 0,
423 "Invalid input, should greater than 0, but got " + std::to_string(inputs.size()));
424 std::shared_ptr<Tensor> input = inputs[0];
425 CHECK_FAIL_RETURN_UNEXPECTED(input->shape().Size() >= 3, "Invalid input shape, should be greater than 3 dimensions.");
426 LiteMat lite_mat_src(input->shape()[1], input->shape()[0], input->shape()[2],
427 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
428 GetLiteCVDataType(input->type()));
429
430 LiteMat lite_mat_dst;
431 std::shared_ptr<Tensor> image_tensor;
432 TensorShape new_shape = TensorShape({height, width, input->shape()[2]});
433 RETURN_IF_NOT_OK(Tensor::CreateEmpty(new_shape, DataType(DataType::DE_FLOAT32), &image_tensor));
434 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*image_tensor->begin<uint8_t>()));
435 lite_mat_dst.Init(width, height, input->shape()[2], reinterpret_cast<void *>(buffer), LDataType::FLOAT32);
436
437 float ratioShiftWShiftH[3] = {0};
438 float invM[2][3] = {{0, 0, 0}, {0, 0, 0}};
439 bool ret =
440 ResizePreserveARWithFiller(lite_mat_src, lite_mat_dst, height, width, &ratioShiftWShiftH, &invM, img_orientation);
441 CHECK_FAIL_RETURN_UNEXPECTED(ret, "Resize: bilinear resize failed.");
442
443 std::shared_ptr<Tensor> ratio_tensor;
444 TensorShape ratio_shape = TensorShape({3});
445 RETURN_IF_NOT_OK(Tensor::CreateFromMemory(ratio_shape, DataType(DataType::DE_FLOAT32),
446 reinterpret_cast<uint8_t *>(&ratioShiftWShiftH), &ratio_tensor));
447
448 std::shared_ptr<Tensor> invM_tensor;
449 TensorShape invM_shape = TensorShape({2, 3});
450 RETURN_IF_NOT_OK(Tensor::CreateFromMemory(invM_shape, DataType(DataType::DE_FLOAT32),
451 reinterpret_cast<uint8_t *>(&invM), &invM_tensor));
452
453 (*outputs)[0] = image_tensor;
454 (*outputs)[1] = ratio_tensor;
455 (*outputs)[2] = invM_tensor;
456 return Status::OK();
457 }
458
RgbToBgr(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output)459 Status RgbToBgr(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
460 if (input->Rank() != 3) {
461 RETURN_STATUS_UNEXPECTED("RgbToBgr: input image is not in shape of <H,W,C>");
462 }
463 if (input->type() != DataType::DE_UINT8) {
464 RETURN_STATUS_UNEXPECTED("RgbToBgr: image datatype is not uint8.");
465 }
466
467 try {
468 int output_height = input->shape()[0];
469 int output_width = input->shape()[1];
470
471 LiteMat lite_mat_rgb(input->shape()[1], input->shape()[0], input->shape()[2],
472 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
473 GetLiteCVDataType(input->type()));
474 LiteMat lite_mat_convert;
475 std::shared_ptr<Tensor> output_tensor;
476 TensorShape new_shape = TensorShape({output_height, output_width, 3});
477 RETURN_IF_NOT_OK(Tensor::CreateEmpty(new_shape, input->type(), &output_tensor));
478 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*output_tensor->begin<uint8_t>()));
479 lite_mat_convert.Init(output_width, output_height, 3, reinterpret_cast<void *>(buffer),
480 GetLiteCVDataType(input->type()));
481
482 bool ret =
483 ConvertRgbToBgr(lite_mat_rgb, GetLiteCVDataType(input->type()), output_width, output_height, lite_mat_convert);
484 CHECK_FAIL_RETURN_UNEXPECTED(ret, "RgbToBgr: RGBToBGR failed.");
485
486 *output = output_tensor;
487 } catch (std::runtime_error &e) {
488 RETURN_STATUS_UNEXPECTED("RgbToBgr: " + std::string(e.what()));
489 }
490 return Status::OK();
491 }
492
RgbToGray(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output)493 Status RgbToGray(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
494 if (input->Rank() != 3) {
495 RETURN_STATUS_UNEXPECTED("RgbToGray: input image is not in shape of <H,W,C>");
496 }
497 if (input->type() != DataType::DE_UINT8) {
498 RETURN_STATUS_UNEXPECTED("RgbToGray: image datatype is not uint8.");
499 }
500
501 try {
502 int output_height = input->shape()[0];
503 int output_width = input->shape()[1];
504
505 LiteMat lite_mat_rgb(input->shape()[1], input->shape()[0], input->shape()[2],
506 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
507 GetLiteCVDataType(input->type()));
508 LiteMat lite_mat_convert;
509 std::shared_ptr<Tensor> output_tensor;
510 TensorShape new_shape = TensorShape({output_height, output_width, 1});
511 RETURN_IF_NOT_OK(Tensor::CreateEmpty(new_shape, input->type(), &output_tensor));
512 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*output_tensor->begin<uint8_t>()));
513 lite_mat_convert.Init(output_width, output_height, 1, reinterpret_cast<void *>(buffer),
514 GetLiteCVDataType(input->type()));
515
516 bool ret =
517 ConvertRgbToGray(lite_mat_rgb, GetLiteCVDataType(input->type()), output_width, output_height, lite_mat_convert);
518 CHECK_FAIL_RETURN_UNEXPECTED(ret, "RgbToGray: RGBToGRAY failed.");
519
520 *output = output_tensor;
521 } catch (std::runtime_error &e) {
522 RETURN_STATUS_UNEXPECTED("RgbToGray: " + std::string(e.what()));
523 }
524 return Status::OK();
525 }
526
Pad(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,const int32_t & pad_top,const int32_t & pad_bottom,const int32_t & pad_left,const int32_t & pad_right,const BorderType & border_types,uint8_t fill_r,uint8_t fill_g,uint8_t fill_b)527 Status Pad(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, const int32_t &pad_top,
528 const int32_t &pad_bottom, const int32_t &pad_left, const int32_t &pad_right, const BorderType &border_types,
529 uint8_t fill_r, uint8_t fill_g, uint8_t fill_b) {
530 if (input->Rank() != 3) {
531 RETURN_STATUS_UNEXPECTED("Pad: input image is not in shape of <H,W,C>");
532 }
533
534 if (input->type() != DataType::DE_FLOAT32 && input->type() != DataType::DE_UINT8) {
535 RETURN_STATUS_UNEXPECTED("Pad: image datatype is not uint8 or float32.");
536 }
537
538 if (pad_top < 0 || pad_bottom < 0 || pad_left < 0 || pad_right < 0) {
539 RETURN_STATUS_UNEXPECTED(
540 "Pad: "
541 "the top, bottom, left, right of pad must be greater than 0.");
542 }
543
544 try {
545 LiteMat lite_mat_rgb(input->shape()[1], input->shape()[0], input->shape()[2],
546 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
547 GetLiteCVDataType(input->type()));
548 LiteMat lite_mat_pad;
549
550 std::shared_ptr<Tensor> output_tensor;
551
552 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - lite_mat_rgb.width_) > pad_left,
553 "Invalid pad width.");
554 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - lite_mat_rgb.width_ + pad_left) > pad_right,
555 "Invalid pad width.");
556 int pad_width = lite_mat_rgb.width_ + pad_left + pad_right;
557 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - lite_mat_rgb.height_) > pad_top,
558 "Invalid pad height.");
559 CHECK_FAIL_RETURN_UNEXPECTED((std::numeric_limits<int32_t>::max() - lite_mat_rgb.height_ + pad_top) > pad_bottom,
560 "Invalid pad height.");
561 int pad_height = lite_mat_rgb.height_ + pad_top + pad_bottom;
562 TensorShape new_shape = TensorShape({pad_height, pad_width, input->shape()[2]});
563 RETURN_IF_NOT_OK(Tensor::CreateEmpty(new_shape, input->type(), &output_tensor));
564
565 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*output_tensor->begin<uint8_t>()));
566
567 lite_mat_pad.Init(pad_width, pad_height, lite_mat_rgb.channel_, reinterpret_cast<void *>(buffer),
568 GetLiteCVDataType(input->type()));
569
570 bool ret = Pad(lite_mat_rgb, lite_mat_pad, pad_top, pad_bottom, pad_left, pad_right,
571 PaddBorderType::PADD_BORDER_CONSTANT, fill_r, fill_g, fill_b);
572 CHECK_FAIL_RETURN_UNEXPECTED(ret, "Pad: pad failed.");
573
574 *output = output_tensor;
575 } catch (std::runtime_error &e) {
576 RETURN_STATUS_UNEXPECTED("Pad: " + std::string(e.what()));
577 }
578 return Status::OK();
579 }
580
RotateAngleWithOutMirror(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,const uint64_t orientation)581 static Status RotateAngleWithOutMirror(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output,
582 const uint64_t orientation) {
583 try {
584 int height = 0;
585 int width = 0;
586 double M[6] = {};
587
588 LiteMat lite_mat_rgb(input->shape()[1], input->shape()[0], input->shape()[2],
589 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
590 GetLiteCVDataType(input->type()));
591
592 if (orientation == 3) {
593 height = lite_mat_rgb.height_;
594 width = lite_mat_rgb.width_;
595 M[0] = -1.0f;
596 M[1] = 0.0f;
597 M[2] = lite_mat_rgb.width_ - 1;
598 M[3] = 0.0f;
599 M[4] = -1.0f;
600 M[5] = lite_mat_rgb.height_ - 1;
601 } else if (orientation == 6) {
602 height = lite_mat_rgb.width_;
603 width = lite_mat_rgb.height_;
604 M[0] = 0.0f;
605 M[1] = -1.0f;
606 M[2] = lite_mat_rgb.height_ - 1;
607 M[3] = 1.0f;
608 M[4] = 0.0f;
609 M[5] = 0.0f;
610 } else if (orientation == 8) {
611 height = lite_mat_rgb.width_;
612 width = lite_mat_rgb.height_;
613 M[0] = 0.0f;
614 M[1] = 1.0f;
615 M[2] = 0.0f;
616 M[3] = -1.0f;
617 M[4] = 0.0f;
618 M[5] = lite_mat_rgb.width_ - 1.0f;
619 } else {
620 }
621
622 std::vector<size_t> dsize;
623 dsize.push_back(width);
624 dsize.push_back(height);
625 LiteMat lite_mat_affine;
626 std::shared_ptr<Tensor> output_tensor;
627 TensorShape new_shape = TensorShape({height, width, input->shape()[2]});
628 RETURN_IF_NOT_OK(Tensor::CreateEmpty(new_shape, input->type(), &output_tensor));
629 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*output_tensor->begin<uint8_t>()));
630 lite_mat_affine.Init(width, height, lite_mat_rgb.channel_, reinterpret_cast<void *>(buffer),
631 GetLiteCVDataType(input->type()));
632
633 bool ret = Affine(lite_mat_rgb, lite_mat_affine, M, dsize, UINT8_C3(0, 0, 0));
634 CHECK_FAIL_RETURN_UNEXPECTED(ret, "Rotate: rotate failed.");
635
636 *output = output_tensor;
637 } catch (std::runtime_error &e) {
638 RETURN_STATUS_UNEXPECTED("Rotate: " + std::string(e.what()));
639 }
640 return Status::OK();
641 }
642
RotateAngleWithMirror(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,const uint64_t orientation)643 static Status RotateAngleWithMirror(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output,
644 const uint64_t orientation) {
645 try {
646 int height = 0;
647 int width = 0;
648 double M[6] = {};
649
650 LiteMat lite_mat_rgb(input->shape()[1], input->shape()[0], input->shape()[2],
651 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
652 GetLiteCVDataType(input->type()));
653
654 if (orientation == 2) {
655 height = lite_mat_rgb.height_;
656 width = lite_mat_rgb.width_;
657 M[0] = -1.0f;
658 M[1] = 0.0f;
659 M[2] = lite_mat_rgb.width_ - 1;
660 M[3] = 0.0f;
661 M[4] = 1.0f;
662 M[5] = 0.0f;
663 } else if (orientation == 5) {
664 height = lite_mat_rgb.width_;
665 width = lite_mat_rgb.height_;
666 M[0] = 0.0f;
667 M[1] = 1.0f;
668 M[2] = 0.0f;
669 M[3] = 1.0f;
670 M[4] = 0.0f;
671 M[5] = 0.0f;
672 } else if (orientation == 7) {
673 height = lite_mat_rgb.width_;
674 width = lite_mat_rgb.height_;
675 M[0] = 0.0f;
676 M[1] = -1.0f;
677 M[2] = lite_mat_rgb.height_ - 1;
678 M[3] = -1.0f;
679 M[4] = 0.0f;
680 M[5] = lite_mat_rgb.width_ - 1;
681 } else if (orientation == 4) {
682 height = lite_mat_rgb.height_;
683 width = lite_mat_rgb.width_;
684 M[0] = 1.0f;
685 M[1] = 0.0f;
686 M[2] = 0.0f;
687 M[3] = 0.0f;
688 M[4] = -1.0f;
689 M[5] = lite_mat_rgb.height_ - 1;
690 } else {
691 }
692 std::vector<size_t> dsize;
693 dsize.push_back(width);
694 dsize.push_back(height);
695 LiteMat lite_mat_affine;
696 std::shared_ptr<Tensor> output_tensor;
697 TensorShape new_shape = TensorShape({height, width, input->shape()[2]});
698 RETURN_IF_NOT_OK(Tensor::CreateEmpty(new_shape, input->type(), &output_tensor));
699 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*output_tensor->begin<uint8_t>()));
700 lite_mat_affine.Init(width, height, lite_mat_rgb.channel_, reinterpret_cast<void *>(buffer),
701 GetLiteCVDataType(input->type()));
702
703 bool ret = Affine(lite_mat_rgb, lite_mat_affine, M, dsize, UINT8_C3(0, 0, 0));
704 CHECK_FAIL_RETURN_UNEXPECTED(ret, "Rotate: rotate failed.");
705
706 *output = output_tensor;
707 } catch (std::runtime_error &e) {
708 RETURN_STATUS_UNEXPECTED("Rotate: " + std::string(e.what()));
709 }
710 return Status::OK();
711 }
712
IsMirror(int orientation)713 static bool IsMirror(int orientation) {
714 if (orientation == 2 || orientation == 4 || orientation == 5 || orientation == 7) {
715 return true;
716 }
717 return false;
718 }
719 // rotate the image by EXIF orientation
Rotate(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,const uint64_t orientation)720 Status Rotate(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, const uint64_t orientation) {
721 if (input->Rank() != 2 || input->Rank() != 3) {
722 RETURN_STATUS_UNEXPECTED("Rotate: input image is not in shape of <H,W,C> or <H,W>");
723 }
724
725 if (input->type() != DataType::DE_FLOAT32 && input->type() != DataType::DE_UINT8) {
726 RETURN_STATUS_UNEXPECTED("Rotate: image datatype is not float32 or uint8.");
727 }
728
729 if (!IsMirror(orientation)) {
730 return RotateAngleWithOutMirror(input, output, orientation);
731 } else {
732 return RotateAngleWithMirror(input, output, orientation);
733 }
734 }
735
Affine(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,const std::vector<float_t> & mat,InterpolationMode interpolation,uint8_t fill_r,uint8_t fill_g,uint8_t fill_b)736 Status Affine(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, const std::vector<float_t> &mat,
737 InterpolationMode interpolation, uint8_t fill_r, uint8_t fill_g, uint8_t fill_b) {
738 try {
739 if (interpolation != InterpolationMode::kLinear) {
740 MS_LOG(WARNING) << "Only Bilinear interpolation supported for now";
741 }
742 int height = 0;
743 int width = 0;
744 CHECK_FAIL_RETURN_UNEXPECTED(mat.size() <= 6, "Invalid mat shape.");
745 double M[6] = {};
746 for (int i = 0; i < mat.size(); i++) {
747 M[i] = static_cast<double>(mat[i]);
748 }
749
750 CHECK_FAIL_RETURN_UNEXPECTED(input->shape().Size() >= 3, "Invalid input shape, should be 3.");
751 LiteMat lite_mat_rgb(input->shape()[1], input->shape()[0], input->shape()[2],
752 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
753 GetLiteCVDataType(input->type()));
754
755 height = lite_mat_rgb.height_;
756 width = lite_mat_rgb.width_;
757 std::vector<size_t> dsize;
758 dsize.push_back(width);
759 dsize.push_back(height);
760 LiteMat lite_mat_affine;
761 std::shared_ptr<Tensor> output_tensor;
762 TensorShape new_shape = TensorShape({height, width, input->shape()[2]});
763 RETURN_IF_NOT_OK(Tensor::CreateEmpty(new_shape, input->type(), &output_tensor));
764 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*output_tensor->begin<uint8_t>()));
765 lite_mat_affine.Init(width, height, lite_mat_rgb.channel_, reinterpret_cast<void *>(buffer),
766 GetLiteCVDataType(input->type()));
767
768 bool ret = Affine(lite_mat_rgb, lite_mat_affine, M, dsize, UINT8_C3(fill_r, fill_g, fill_b));
769 CHECK_FAIL_RETURN_UNEXPECTED(ret, "Affine: affine failed.");
770
771 *output = output_tensor;
772 return Status::OK();
773 } catch (std::runtime_error &e) {
774 RETURN_STATUS_UNEXPECTED("Affine: " + std::string(e.what()));
775 }
776 }
777
GaussianBlur(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output,int32_t kernel_x,int32_t kernel_y,float sigma_x,float sigma_y)778 Status GaussianBlur(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output, int32_t kernel_x,
779 int32_t kernel_y, float sigma_x, float sigma_y) {
780 try {
781 LiteMat lite_mat_input;
782 if (input->Rank() == 3) {
783 if (input->shape()[2] != 1 && input->shape()[2] != 3) {
784 RETURN_STATUS_UNEXPECTED("GaussianBlur: input image is not in channel of 1 or 3");
785 }
786 lite_mat_input = LiteMat(input->shape()[1], input->shape()[0], input->shape()[2],
787 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
788 GetLiteCVDataType(input->type()));
789 } else if (input->Rank() == 2) {
790 lite_mat_input = LiteMat(input->shape()[1], input->shape()[0],
791 const_cast<void *>(reinterpret_cast<const void *>(input->GetBuffer())),
792 GetLiteCVDataType(input->type()));
793 } else {
794 RETURN_STATUS_UNEXPECTED("GaussianBlur: input image is not in shape of <H,W,C> or <H,W>");
795 }
796
797 std::shared_ptr<Tensor> output_tensor;
798 RETURN_IF_NOT_OK(Tensor::CreateEmpty(input->shape(), input->type(), &output_tensor));
799 uint8_t *buffer = reinterpret_cast<uint8_t *>(&(*output_tensor->begin<uint8_t>()));
800 LiteMat lite_mat_output;
801 lite_mat_output.Init(lite_mat_input.width_, lite_mat_input.height_, lite_mat_input.channel_,
802 reinterpret_cast<void *>(buffer), GetLiteCVDataType(input->type()));
803 bool ret = GaussianBlur(lite_mat_input, lite_mat_output, {kernel_x, kernel_y}, static_cast<double>(sigma_x),
804 static_cast<double>(sigma_y));
805 CHECK_FAIL_RETURN_UNEXPECTED(ret, "GaussianBlur: GaussianBlur failed.");
806 *output = output_tensor;
807 return Status::OK();
808 } catch (std::runtime_error &e) {
809 RETURN_STATUS_UNEXPECTED("GaussianBlur: " + std::string(e.what()));
810 }
811 }
812
ValidateImageRank(const std::string & op_name,int32_t rank)813 Status ValidateImageRank(const std::string &op_name, int32_t rank) {
814 if (rank != 2 && rank != 3) {
815 std::string err_msg = op_name + ": image shape is not <H,W,C> or <H, W>, but got rank:" + std::to_string(rank);
816 RETURN_STATUS_UNEXPECTED(err_msg);
817 }
818 return Status::OK();
819 }
820 } // namespace dataset
821 } // namespace mindspore
822