1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <algorithm>
6
7 #include "base/logging.h"
8 #include "skia/ext/convolver.h"
9 #include "skia/ext/convolver_SSE2.h"
10 #include "skia/ext/convolver_mips_dspr2.h"
11 #include "third_party/skia/include/core/SkSize.h"
12 #include "third_party/skia/include/core/SkTypes.h"
13
14 namespace skia {
15
16 namespace {
17
18 // Converts the argument to an 8-bit unsigned value by clamping to the range
19 // 0-255.
ClampTo8(int a)20 inline unsigned char ClampTo8(int a) {
21 if (static_cast<unsigned>(a) < 256)
22 return a; // Avoid the extra check in the common case.
23 if (a < 0)
24 return 0;
25 return 255;
26 }
27
28 // Takes the value produced by accumulating element-wise product of image with
29 // a kernel and brings it back into range.
30 // All of the filter scaling factors are in fixed point with kShiftBits bits of
31 // fractional part.
BringBackTo8(int a,bool take_absolute)32 inline unsigned char BringBackTo8(int a, bool take_absolute) {
33 a >>= ConvolutionFilter1D::kShiftBits;
34 if (take_absolute)
35 a = std::abs(a);
36 return ClampTo8(a);
37 }
38
39 // Stores a list of rows in a circular buffer. The usage is you write into it
40 // by calling AdvanceRow. It will keep track of which row in the buffer it
41 // should use next, and the total number of rows added.
42 class CircularRowBuffer {
43 public:
44 // The number of pixels in each row is given in |source_row_pixel_width|.
45 // The maximum number of rows needed in the buffer is |max_y_filter_size|
46 // (we only need to store enough rows for the biggest filter).
47 //
48 // We use the |first_input_row| to compute the coordinates of all of the
49 // following rows returned by Advance().
CircularRowBuffer(int dest_row_pixel_width,int max_y_filter_size,int first_input_row)50 CircularRowBuffer(int dest_row_pixel_width, int max_y_filter_size,
51 int first_input_row)
52 : row_byte_width_(dest_row_pixel_width * 4),
53 num_rows_(max_y_filter_size),
54 next_row_(0),
55 next_row_coordinate_(first_input_row) {
56 buffer_.resize(row_byte_width_ * max_y_filter_size);
57 row_addresses_.resize(num_rows_);
58 }
59
60 // Moves to the next row in the buffer, returning a pointer to the beginning
61 // of it.
AdvanceRow()62 unsigned char* AdvanceRow() {
63 unsigned char* row = &buffer_[next_row_ * row_byte_width_];
64 next_row_coordinate_++;
65
66 // Set the pointer to the next row to use, wrapping around if necessary.
67 next_row_++;
68 if (next_row_ == num_rows_)
69 next_row_ = 0;
70 return row;
71 }
72
73 // Returns a pointer to an "unrolled" array of rows. These rows will start
74 // at the y coordinate placed into |*first_row_index| and will continue in
75 // order for the maximum number of rows in this circular buffer.
76 //
77 // The |first_row_index_| may be negative. This means the circular buffer
78 // starts before the top of the image (it hasn't been filled yet).
GetRowAddresses(int * first_row_index)79 unsigned char* const* GetRowAddresses(int* first_row_index) {
80 // Example for a 4-element circular buffer holding coords 6-9.
81 // Row 0 Coord 8
82 // Row 1 Coord 9
83 // Row 2 Coord 6 <- next_row_ = 2, next_row_coordinate_ = 10.
84 // Row 3 Coord 7
85 //
86 // The "next" row is also the first (lowest) coordinate. This computation
87 // may yield a negative value, but that's OK, the math will work out
88 // since the user of this buffer will compute the offset relative
89 // to the first_row_index and the negative rows will never be used.
90 *first_row_index = next_row_coordinate_ - num_rows_;
91
92 int cur_row = next_row_;
93 for (int i = 0; i < num_rows_; i++) {
94 row_addresses_[i] = &buffer_[cur_row * row_byte_width_];
95
96 // Advance to the next row, wrapping if necessary.
97 cur_row++;
98 if (cur_row == num_rows_)
99 cur_row = 0;
100 }
101 return &row_addresses_[0];
102 }
103
104 private:
105 // The buffer storing the rows. They are packed, each one row_byte_width_.
106 std::vector<unsigned char> buffer_;
107
108 // Number of bytes per row in the |buffer_|.
109 int row_byte_width_;
110
111 // The number of rows available in the buffer.
112 int num_rows_;
113
114 // The next row index we should write into. This wraps around as the
115 // circular buffer is used.
116 int next_row_;
117
118 // The y coordinate of the |next_row_|. This is incremented each time a
119 // new row is appended and does not wrap.
120 int next_row_coordinate_;
121
122 // Buffer used by GetRowAddresses().
123 std::vector<unsigned char*> row_addresses_;
124 };
125
126 // Convolves horizontally along a single row. The row data is given in
127 // |src_data| and continues for the num_values() of the filter.
128 template<bool has_alpha>
ConvolveHorizontally(const unsigned char * src_data,const ConvolutionFilter1D & filter,unsigned char * out_row)129 void ConvolveHorizontally(const unsigned char* src_data,
130 const ConvolutionFilter1D& filter,
131 unsigned char* out_row) {
132 // Loop over each pixel on this row in the output image.
133 int num_values = filter.num_values();
134 for (int out_x = 0; out_x < num_values; out_x++) {
135 // Get the filter that determines the current output pixel.
136 int filter_offset, filter_length;
137 const ConvolutionFilter1D::Fixed* filter_values =
138 filter.FilterForValue(out_x, &filter_offset, &filter_length);
139
140 // Compute the first pixel in this row that the filter affects. It will
141 // touch |filter_length| pixels (4 bytes each) after this.
142 const unsigned char* row_to_filter = &src_data[filter_offset * 4];
143
144 // Apply the filter to the row to get the destination pixel in |accum|.
145 int accum[4] = {0};
146 for (int filter_x = 0; filter_x < filter_length; filter_x++) {
147 ConvolutionFilter1D::Fixed cur_filter = filter_values[filter_x];
148 accum[0] += cur_filter * row_to_filter[filter_x * 4 + 0];
149 accum[1] += cur_filter * row_to_filter[filter_x * 4 + 1];
150 accum[2] += cur_filter * row_to_filter[filter_x * 4 + 2];
151 if (has_alpha)
152 accum[3] += cur_filter * row_to_filter[filter_x * 4 + 3];
153 }
154
155 // Bring this value back in range. All of the filter scaling factors
156 // are in fixed point with kShiftBits bits of fractional part.
157 accum[0] >>= ConvolutionFilter1D::kShiftBits;
158 accum[1] >>= ConvolutionFilter1D::kShiftBits;
159 accum[2] >>= ConvolutionFilter1D::kShiftBits;
160 if (has_alpha)
161 accum[3] >>= ConvolutionFilter1D::kShiftBits;
162
163 // Store the new pixel.
164 out_row[out_x * 4 + 0] = ClampTo8(accum[0]);
165 out_row[out_x * 4 + 1] = ClampTo8(accum[1]);
166 out_row[out_x * 4 + 2] = ClampTo8(accum[2]);
167 if (has_alpha)
168 out_row[out_x * 4 + 3] = ClampTo8(accum[3]);
169 }
170 }
171
172 // Does vertical convolution to produce one output row. The filter values and
173 // length are given in the first two parameters. These are applied to each
174 // of the rows pointed to in the |source_data_rows| array, with each row
175 // being |pixel_width| wide.
176 //
177 // The output must have room for |pixel_width * 4| bytes.
178 template<bool has_alpha>
ConvolveVertically(const ConvolutionFilter1D::Fixed * filter_values,int filter_length,unsigned char * const * source_data_rows,int pixel_width,unsigned char * out_row)179 void ConvolveVertically(const ConvolutionFilter1D::Fixed* filter_values,
180 int filter_length,
181 unsigned char* const* source_data_rows,
182 int pixel_width,
183 unsigned char* out_row) {
184 // We go through each column in the output and do a vertical convolution,
185 // generating one output pixel each time.
186 for (int out_x = 0; out_x < pixel_width; out_x++) {
187 // Compute the number of bytes over in each row that the current column
188 // we're convolving starts at. The pixel will cover the next 4 bytes.
189 int byte_offset = out_x * 4;
190
191 // Apply the filter to one column of pixels.
192 int accum[4] = {0};
193 for (int filter_y = 0; filter_y < filter_length; filter_y++) {
194 ConvolutionFilter1D::Fixed cur_filter = filter_values[filter_y];
195 accum[0] += cur_filter * source_data_rows[filter_y][byte_offset + 0];
196 accum[1] += cur_filter * source_data_rows[filter_y][byte_offset + 1];
197 accum[2] += cur_filter * source_data_rows[filter_y][byte_offset + 2];
198 if (has_alpha)
199 accum[3] += cur_filter * source_data_rows[filter_y][byte_offset + 3];
200 }
201
202 // Bring this value back in range. All of the filter scaling factors
203 // are in fixed point with kShiftBits bits of precision.
204 accum[0] >>= ConvolutionFilter1D::kShiftBits;
205 accum[1] >>= ConvolutionFilter1D::kShiftBits;
206 accum[2] >>= ConvolutionFilter1D::kShiftBits;
207 if (has_alpha)
208 accum[3] >>= ConvolutionFilter1D::kShiftBits;
209
210 // Store the new pixel.
211 out_row[byte_offset + 0] = ClampTo8(accum[0]);
212 out_row[byte_offset + 1] = ClampTo8(accum[1]);
213 out_row[byte_offset + 2] = ClampTo8(accum[2]);
214 if (has_alpha) {
215 unsigned char alpha = ClampTo8(accum[3]);
216
217 // Make sure the alpha channel doesn't come out smaller than any of the
218 // color channels. We use premultipled alpha channels, so this should
219 // never happen, but rounding errors will cause this from time to time.
220 // These "impossible" colors will cause overflows (and hence random pixel
221 // values) when the resulting bitmap is drawn to the screen.
222 //
223 // We only need to do this when generating the final output row (here).
224 int max_color_channel = std::max(out_row[byte_offset + 0],
225 std::max(out_row[byte_offset + 1], out_row[byte_offset + 2]));
226 if (alpha < max_color_channel)
227 out_row[byte_offset + 3] = max_color_channel;
228 else
229 out_row[byte_offset + 3] = alpha;
230 } else {
231 // No alpha channel, the image is opaque.
232 out_row[byte_offset + 3] = 0xff;
233 }
234 }
235 }
236
ConvolveVertically(const ConvolutionFilter1D::Fixed * filter_values,int filter_length,unsigned char * const * source_data_rows,int pixel_width,unsigned char * out_row,bool source_has_alpha)237 void ConvolveVertically(const ConvolutionFilter1D::Fixed* filter_values,
238 int filter_length,
239 unsigned char* const* source_data_rows,
240 int pixel_width,
241 unsigned char* out_row,
242 bool source_has_alpha) {
243 if (source_has_alpha) {
244 ConvolveVertically<true>(filter_values, filter_length,
245 source_data_rows,
246 pixel_width,
247 out_row);
248 } else {
249 ConvolveVertically<false>(filter_values, filter_length,
250 source_data_rows,
251 pixel_width,
252 out_row);
253 }
254 }
255
256 } // namespace
257
258 // ConvolutionFilter1D ---------------------------------------------------------
259
ConvolutionFilter1D()260 ConvolutionFilter1D::ConvolutionFilter1D()
261 : max_filter_(0) {
262 }
263
~ConvolutionFilter1D()264 ConvolutionFilter1D::~ConvolutionFilter1D() {
265 }
266
AddFilter(int filter_offset,const float * filter_values,int filter_length)267 void ConvolutionFilter1D::AddFilter(int filter_offset,
268 const float* filter_values,
269 int filter_length) {
270 SkASSERT(filter_length > 0);
271
272 std::vector<Fixed> fixed_values;
273 fixed_values.reserve(filter_length);
274
275 for (int i = 0; i < filter_length; ++i)
276 fixed_values.push_back(FloatToFixed(filter_values[i]));
277
278 AddFilter(filter_offset, &fixed_values[0], filter_length);
279 }
280
AddFilter(int filter_offset,const Fixed * filter_values,int filter_length)281 void ConvolutionFilter1D::AddFilter(int filter_offset,
282 const Fixed* filter_values,
283 int filter_length) {
284 // It is common for leading/trailing filter values to be zeros. In such
285 // cases it is beneficial to only store the central factors.
286 // For a scaling to 1/4th in each dimension using a Lanczos-2 filter on
287 // a 1080p image this optimization gives a ~10% speed improvement.
288 int filter_size = filter_length;
289 int first_non_zero = 0;
290 while (first_non_zero < filter_length && filter_values[first_non_zero] == 0)
291 first_non_zero++;
292
293 if (first_non_zero < filter_length) {
294 // Here we have at least one non-zero factor.
295 int last_non_zero = filter_length - 1;
296 while (last_non_zero >= 0 && filter_values[last_non_zero] == 0)
297 last_non_zero--;
298
299 filter_offset += first_non_zero;
300 filter_length = last_non_zero + 1 - first_non_zero;
301 SkASSERT(filter_length > 0);
302
303 for (int i = first_non_zero; i <= last_non_zero; i++)
304 filter_values_.push_back(filter_values[i]);
305 } else {
306 // Here all the factors were zeroes.
307 filter_length = 0;
308 }
309
310 FilterInstance instance;
311
312 // We pushed filter_length elements onto filter_values_
313 instance.data_location = (static_cast<int>(filter_values_.size()) -
314 filter_length);
315 instance.offset = filter_offset;
316 instance.trimmed_length = filter_length;
317 instance.length = filter_size;
318 filters_.push_back(instance);
319
320 max_filter_ = std::max(max_filter_, filter_length);
321 }
322
GetSingleFilter(int * specified_filter_length,int * filter_offset,int * filter_length) const323 const ConvolutionFilter1D::Fixed* ConvolutionFilter1D::GetSingleFilter(
324 int* specified_filter_length,
325 int* filter_offset,
326 int* filter_length) const {
327 const FilterInstance& filter = filters_[0];
328 *filter_offset = filter.offset;
329 *filter_length = filter.trimmed_length;
330 *specified_filter_length = filter.length;
331 if (filter.trimmed_length == 0)
332 return NULL;
333
334 return &filter_values_[filter.data_location];
335 }
336
337 typedef void (*ConvolveVertically_pointer)(
338 const ConvolutionFilter1D::Fixed* filter_values,
339 int filter_length,
340 unsigned char* const* source_data_rows,
341 int pixel_width,
342 unsigned char* out_row,
343 bool has_alpha);
344 typedef void (*Convolve4RowsHorizontally_pointer)(
345 const unsigned char* src_data[4],
346 const ConvolutionFilter1D& filter,
347 unsigned char* out_row[4]);
348 typedef void (*ConvolveHorizontally_pointer)(
349 const unsigned char* src_data,
350 const ConvolutionFilter1D& filter,
351 unsigned char* out_row,
352 bool has_alpha);
353
354 struct ConvolveProcs {
355 // This is how many extra pixels may be read by the
356 // conolve*horizontally functions.
357 int extra_horizontal_reads;
358 ConvolveVertically_pointer convolve_vertically;
359 Convolve4RowsHorizontally_pointer convolve_4rows_horizontally;
360 ConvolveHorizontally_pointer convolve_horizontally;
361 };
362
SetupSIMD(ConvolveProcs * procs)363 void SetupSIMD(ConvolveProcs *procs) {
364 #ifdef SIMD_SSE2
365 base::CPU cpu;
366 if (cpu.has_sse2()) {
367 procs->extra_horizontal_reads = 3;
368 procs->convolve_vertically = &ConvolveVertically_SSE2;
369 procs->convolve_4rows_horizontally = &Convolve4RowsHorizontally_SSE2;
370 procs->convolve_horizontally = &ConvolveHorizontally_SSE2;
371 }
372 #elif defined SIMD_MIPS_DSPR2
373 procs->extra_horizontal_reads = 3;
374 procs->convolve_vertically = &ConvolveVertically_mips_dspr2;
375 procs->convolve_horizontally = &ConvolveHorizontally_mips_dspr2;
376 #endif
377 }
378
BGRAConvolve2D(const unsigned char * source_data,int source_byte_row_stride,bool source_has_alpha,const ConvolutionFilter1D & filter_x,const ConvolutionFilter1D & filter_y,int output_byte_row_stride,unsigned char * output,bool use_simd_if_possible)379 void BGRAConvolve2D(const unsigned char* source_data,
380 int source_byte_row_stride,
381 bool source_has_alpha,
382 const ConvolutionFilter1D& filter_x,
383 const ConvolutionFilter1D& filter_y,
384 int output_byte_row_stride,
385 unsigned char* output,
386 bool use_simd_if_possible) {
387 ConvolveProcs simd;
388 simd.extra_horizontal_reads = 0;
389 simd.convolve_vertically = NULL;
390 simd.convolve_4rows_horizontally = NULL;
391 simd.convolve_horizontally = NULL;
392 if (use_simd_if_possible) {
393 SetupSIMD(&simd);
394 }
395
396 int max_y_filter_size = filter_y.max_filter();
397
398 // The next row in the input that we will generate a horizontally
399 // convolved row for. If the filter doesn't start at the beginning of the
400 // image (this is the case when we are only resizing a subset), then we
401 // don't want to generate any output rows before that. Compute the starting
402 // row for convolution as the first pixel for the first vertical filter.
403 int filter_offset, filter_length;
404 const ConvolutionFilter1D::Fixed* filter_values =
405 filter_y.FilterForValue(0, &filter_offset, &filter_length);
406 int next_x_row = filter_offset;
407
408 // We loop over each row in the input doing a horizontal convolution. This
409 // will result in a horizontally convolved image. We write the results into
410 // a circular buffer of convolved rows and do vertical convolution as rows
411 // are available. This prevents us from having to store the entire
412 // intermediate image and helps cache coherency.
413 // We will need four extra rows to allow horizontal convolution could be done
414 // simultaneously. We also padding each row in row buffer to be aligned-up to
415 // 16 bytes.
416 // TODO(jiesun): We do not use aligned load from row buffer in vertical
417 // convolution pass yet. Somehow Windows does not like it.
418 int row_buffer_width = (filter_x.num_values() + 15) & ~0xF;
419 int row_buffer_height = max_y_filter_size +
420 (simd.convolve_4rows_horizontally ? 4 : 0);
421 CircularRowBuffer row_buffer(row_buffer_width,
422 row_buffer_height,
423 filter_offset);
424
425 // Loop over every possible output row, processing just enough horizontal
426 // convolutions to run each subsequent vertical convolution.
427 SkASSERT(output_byte_row_stride >= filter_x.num_values() * 4);
428 int num_output_rows = filter_y.num_values();
429
430 // We need to check which is the last line to convolve before we advance 4
431 // lines in one iteration.
432 int last_filter_offset, last_filter_length;
433
434 // SSE2 can access up to 3 extra pixels past the end of the
435 // buffer. At the bottom of the image, we have to be careful
436 // not to access data past the end of the buffer. Normally
437 // we fall back to the C++ implementation for the last row.
438 // If the last row is less than 3 pixels wide, we may have to fall
439 // back to the C++ version for more rows. Compute how many
440 // rows we need to avoid the SSE implementation for here.
441 filter_x.FilterForValue(filter_x.num_values() - 1, &last_filter_offset,
442 &last_filter_length);
443 int avoid_simd_rows = 1 + simd.extra_horizontal_reads /
444 (last_filter_offset + last_filter_length);
445
446 filter_y.FilterForValue(num_output_rows - 1, &last_filter_offset,
447 &last_filter_length);
448
449 for (int out_y = 0; out_y < num_output_rows; out_y++) {
450 filter_values = filter_y.FilterForValue(out_y,
451 &filter_offset, &filter_length);
452
453 // Generate output rows until we have enough to run the current filter.
454 while (next_x_row < filter_offset + filter_length) {
455 if (simd.convolve_4rows_horizontally &&
456 next_x_row + 3 < last_filter_offset + last_filter_length -
457 avoid_simd_rows) {
458 const unsigned char* src[4];
459 unsigned char* out_row[4];
460 for (int i = 0; i < 4; ++i) {
461 src[i] = &source_data[(next_x_row + i) * source_byte_row_stride];
462 out_row[i] = row_buffer.AdvanceRow();
463 }
464 simd.convolve_4rows_horizontally(src, filter_x, out_row);
465 next_x_row += 4;
466 } else {
467 // Check if we need to avoid SSE2 for this row.
468 if (simd.convolve_horizontally &&
469 next_x_row < last_filter_offset + last_filter_length -
470 avoid_simd_rows) {
471 simd.convolve_horizontally(
472 &source_data[next_x_row * source_byte_row_stride],
473 filter_x, row_buffer.AdvanceRow(), source_has_alpha);
474 } else {
475 if (source_has_alpha) {
476 ConvolveHorizontally<true>(
477 &source_data[next_x_row * source_byte_row_stride],
478 filter_x, row_buffer.AdvanceRow());
479 } else {
480 ConvolveHorizontally<false>(
481 &source_data[next_x_row * source_byte_row_stride],
482 filter_x, row_buffer.AdvanceRow());
483 }
484 }
485 next_x_row++;
486 }
487 }
488
489 // Compute where in the output image this row of final data will go.
490 unsigned char* cur_output_row = &output[out_y * output_byte_row_stride];
491
492 // Get the list of rows that the circular buffer has, in order.
493 int first_row_in_circular_buffer;
494 unsigned char* const* rows_to_convolve =
495 row_buffer.GetRowAddresses(&first_row_in_circular_buffer);
496
497 // Now compute the start of the subset of those rows that the filter
498 // needs.
499 unsigned char* const* first_row_for_filter =
500 &rows_to_convolve[filter_offset - first_row_in_circular_buffer];
501
502 if (simd.convolve_vertically) {
503 simd.convolve_vertically(filter_values, filter_length,
504 first_row_for_filter,
505 filter_x.num_values(), cur_output_row,
506 source_has_alpha);
507 } else {
508 ConvolveVertically(filter_values, filter_length,
509 first_row_for_filter,
510 filter_x.num_values(), cur_output_row,
511 source_has_alpha);
512 }
513 }
514 }
515
SingleChannelConvolveX1D(const unsigned char * source_data,int source_byte_row_stride,int input_channel_index,int input_channel_count,const ConvolutionFilter1D & filter,const SkISize & image_size,unsigned char * output,int output_byte_row_stride,int output_channel_index,int output_channel_count,bool absolute_values)516 void SingleChannelConvolveX1D(const unsigned char* source_data,
517 int source_byte_row_stride,
518 int input_channel_index,
519 int input_channel_count,
520 const ConvolutionFilter1D& filter,
521 const SkISize& image_size,
522 unsigned char* output,
523 int output_byte_row_stride,
524 int output_channel_index,
525 int output_channel_count,
526 bool absolute_values) {
527 int filter_offset, filter_length, filter_size;
528 // Very much unlike BGRAConvolve2D, here we expect to have the same filter
529 // for all pixels.
530 const ConvolutionFilter1D::Fixed* filter_values =
531 filter.GetSingleFilter(&filter_size, &filter_offset, &filter_length);
532
533 if (filter_values == NULL || image_size.width() < filter_size) {
534 NOTREACHED();
535 return;
536 }
537
538 int centrepoint = filter_length / 2;
539 if (filter_size - filter_offset != 2 * filter_offset) {
540 // This means the original filter was not symmetrical AND
541 // got clipped from one side more than from the other.
542 centrepoint = filter_size / 2 - filter_offset;
543 }
544
545 const unsigned char* source_data_row = source_data;
546 unsigned char* output_row = output;
547
548 for (int r = 0; r < image_size.height(); ++r) {
549 unsigned char* target_byte = output_row + output_channel_index;
550 // Process the lead part, padding image to the left with the first pixel.
551 int c = 0;
552 for (; c < centrepoint; ++c, target_byte += output_channel_count) {
553 int accval = 0;
554 int i = 0;
555 int pixel_byte_index = input_channel_index;
556 for (; i < centrepoint - c; ++i) // Padding part.
557 accval += filter_values[i] * source_data_row[pixel_byte_index];
558
559 for (; i < filter_length; ++i, pixel_byte_index += input_channel_count)
560 accval += filter_values[i] * source_data_row[pixel_byte_index];
561
562 *target_byte = BringBackTo8(accval, absolute_values);
563 }
564
565 // Now for the main event.
566 for (; c < image_size.width() - centrepoint;
567 ++c, target_byte += output_channel_count) {
568 int accval = 0;
569 int pixel_byte_index = (c - centrepoint) * input_channel_count +
570 input_channel_index;
571
572 for (int i = 0; i < filter_length;
573 ++i, pixel_byte_index += input_channel_count) {
574 accval += filter_values[i] * source_data_row[pixel_byte_index];
575 }
576
577 *target_byte = BringBackTo8(accval, absolute_values);
578 }
579
580 for (; c < image_size.width(); ++c, target_byte += output_channel_count) {
581 int accval = 0;
582 int overlap_taps = image_size.width() - c + centrepoint;
583 int pixel_byte_index = (c - centrepoint) * input_channel_count +
584 input_channel_index;
585 int i = 0;
586 for (; i < overlap_taps - 1; ++i, pixel_byte_index += input_channel_count)
587 accval += filter_values[i] * source_data_row[pixel_byte_index];
588
589 for (; i < filter_length; ++i)
590 accval += filter_values[i] * source_data_row[pixel_byte_index];
591
592 *target_byte = BringBackTo8(accval, absolute_values);
593 }
594
595 source_data_row += source_byte_row_stride;
596 output_row += output_byte_row_stride;
597 }
598 }
599
SingleChannelConvolveY1D(const unsigned char * source_data,int source_byte_row_stride,int input_channel_index,int input_channel_count,const ConvolutionFilter1D & filter,const SkISize & image_size,unsigned char * output,int output_byte_row_stride,int output_channel_index,int output_channel_count,bool absolute_values)600 void SingleChannelConvolveY1D(const unsigned char* source_data,
601 int source_byte_row_stride,
602 int input_channel_index,
603 int input_channel_count,
604 const ConvolutionFilter1D& filter,
605 const SkISize& image_size,
606 unsigned char* output,
607 int output_byte_row_stride,
608 int output_channel_index,
609 int output_channel_count,
610 bool absolute_values) {
611 int filter_offset, filter_length, filter_size;
612 // Very much unlike BGRAConvolve2D, here we expect to have the same filter
613 // for all pixels.
614 const ConvolutionFilter1D::Fixed* filter_values =
615 filter.GetSingleFilter(&filter_size, &filter_offset, &filter_length);
616
617 if (filter_values == NULL || image_size.height() < filter_size) {
618 NOTREACHED();
619 return;
620 }
621
622 int centrepoint = filter_length / 2;
623 if (filter_size - filter_offset != 2 * filter_offset) {
624 // This means the original filter was not symmetrical AND
625 // got clipped from one side more than from the other.
626 centrepoint = filter_size / 2 - filter_offset;
627 }
628
629 for (int c = 0; c < image_size.width(); ++c) {
630 unsigned char* target_byte = output + c * output_channel_count +
631 output_channel_index;
632 int r = 0;
633
634 for (; r < centrepoint; ++r, target_byte += output_byte_row_stride) {
635 int accval = 0;
636 int i = 0;
637 int pixel_byte_index = c * input_channel_count + input_channel_index;
638
639 for (; i < centrepoint - r; ++i) // Padding part.
640 accval += filter_values[i] * source_data[pixel_byte_index];
641
642 for (; i < filter_length; ++i, pixel_byte_index += source_byte_row_stride)
643 accval += filter_values[i] * source_data[pixel_byte_index];
644
645 *target_byte = BringBackTo8(accval, absolute_values);
646 }
647
648 for (; r < image_size.height() - centrepoint;
649 ++r, target_byte += output_byte_row_stride) {
650 int accval = 0;
651 int pixel_byte_index = (r - centrepoint) * source_byte_row_stride +
652 c * input_channel_count + input_channel_index;
653 for (int i = 0; i < filter_length;
654 ++i, pixel_byte_index += source_byte_row_stride) {
655 accval += filter_values[i] * source_data[pixel_byte_index];
656 }
657
658 *target_byte = BringBackTo8(accval, absolute_values);
659 }
660
661 for (; r < image_size.height();
662 ++r, target_byte += output_byte_row_stride) {
663 int accval = 0;
664 int overlap_taps = image_size.height() - r + centrepoint;
665 int pixel_byte_index = (r - centrepoint) * source_byte_row_stride +
666 c * input_channel_count + input_channel_index;
667 int i = 0;
668 for (; i < overlap_taps - 1;
669 ++i, pixel_byte_index += source_byte_row_stride) {
670 accval += filter_values[i] * source_data[pixel_byte_index];
671 }
672
673 for (; i < filter_length; ++i)
674 accval += filter_values[i] * source_data[pixel_byte_index];
675
676 *target_byte = BringBackTo8(accval, absolute_values);
677 }
678 }
679 }
680
SetUpGaussianConvolutionKernel(ConvolutionFilter1D * filter,float kernel_sigma,bool derivative)681 void SetUpGaussianConvolutionKernel(ConvolutionFilter1D* filter,
682 float kernel_sigma,
683 bool derivative) {
684 DCHECK(filter != NULL);
685 DCHECK_GT(kernel_sigma, 0.0);
686 const int tail_length = static_cast<int>(4.0f * kernel_sigma + 0.5f);
687 const int kernel_size = tail_length * 2 + 1;
688 const float sigmasq = kernel_sigma * kernel_sigma;
689 std::vector<float> kernel_weights(kernel_size, 0.0);
690 float kernel_sum = 1.0f;
691
692 kernel_weights[tail_length] = 1.0f;
693
694 for (int ii = 1; ii <= tail_length; ++ii) {
695 float v = std::exp(-0.5f * ii * ii / sigmasq);
696 kernel_weights[tail_length + ii] = v;
697 kernel_weights[tail_length - ii] = v;
698 kernel_sum += 2.0f * v;
699 }
700
701 for (int i = 0; i < kernel_size; ++i)
702 kernel_weights[i] /= kernel_sum;
703
704 if (derivative) {
705 kernel_weights[tail_length] = 0.0;
706 for (int ii = 1; ii <= tail_length; ++ii) {
707 float v = sigmasq * kernel_weights[tail_length + ii] / ii;
708 kernel_weights[tail_length + ii] = v;
709 kernel_weights[tail_length - ii] = -v;
710 }
711 }
712
713 filter->AddFilter(0, &kernel_weights[0], kernel_weights.size());
714 }
715
716 } // namespace skia
717