1 /*
2 * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "libyuv/rotate.h"
12
13 #include "libyuv/convert.h"
14 #include "libyuv/cpu_id.h"
15 #include "libyuv/planar_functions.h"
16 #include "libyuv/row.h"
17
18 #ifdef __cplusplus
19 namespace libyuv {
20 extern "C" {
21 #endif
22
23 // ARGBScale has a function to copy pixels to a row, striding each source
24 // pixel by a constant.
25 #if !defined(LIBYUV_DISABLE_X86) && \
26 (defined(_M_IX86) || \
27 (defined(__x86_64__) && !defined(__native_client__)) || \
28 defined(__i386__))
29 #define HAS_SCALEARGBROWDOWNEVEN_SSE2
30 void ScaleARGBRowDownEven_SSE2(const uint8* src_ptr,
31 int src_stride,
32 int src_stepx,
33 uint8* dst_ptr,
34 int dst_width);
35 #endif
36 #if !defined(LIBYUV_DISABLE_NEON) && !defined(__native_client__) && \
37 (defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
38 #define HAS_SCALEARGBROWDOWNEVEN_NEON
39 void ScaleARGBRowDownEven_NEON(const uint8* src_ptr,
40 int src_stride,
41 int src_stepx,
42 uint8* dst_ptr,
43 int dst_width);
44 #endif
45
46 void ScaleARGBRowDownEven_C(const uint8* src_ptr,
47 int,
48 int src_stepx,
49 uint8* dst_ptr,
50 int dst_width);
51
ARGBTranspose(const uint8 * src,int src_stride,uint8 * dst,int dst_stride,int width,int height)52 static void ARGBTranspose(const uint8* src,
53 int src_stride,
54 uint8* dst,
55 int dst_stride,
56 int width,
57 int height) {
58 int i;
59 int src_pixel_step = src_stride >> 2;
60 void (*ScaleARGBRowDownEven)(const uint8* src_ptr, int src_stride,
61 int src_step, uint8* dst_ptr, int dst_width) =
62 ScaleARGBRowDownEven_C;
63 #if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
64 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(height, 4)) { // Width of dest.
65 ScaleARGBRowDownEven = ScaleARGBRowDownEven_SSE2;
66 }
67 #endif
68 #if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
69 if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(height, 4)) { // Width of dest.
70 ScaleARGBRowDownEven = ScaleARGBRowDownEven_NEON;
71 }
72 #endif
73
74 for (i = 0; i < width; ++i) { // column of source to row of dest.
75 ScaleARGBRowDownEven(src, 0, src_pixel_step, dst, height);
76 dst += dst_stride;
77 src += 4;
78 }
79 }
80
ARGBRotate90(const uint8 * src,int src_stride,uint8 * dst,int dst_stride,int width,int height)81 void ARGBRotate90(const uint8* src,
82 int src_stride,
83 uint8* dst,
84 int dst_stride,
85 int width,
86 int height) {
87 // Rotate by 90 is a ARGBTranspose with the source read
88 // from bottom to top. So set the source pointer to the end
89 // of the buffer and flip the sign of the source stride.
90 src += src_stride * (height - 1);
91 src_stride = -src_stride;
92 ARGBTranspose(src, src_stride, dst, dst_stride, width, height);
93 }
94
ARGBRotate270(const uint8 * src,int src_stride,uint8 * dst,int dst_stride,int width,int height)95 void ARGBRotate270(const uint8* src,
96 int src_stride,
97 uint8* dst,
98 int dst_stride,
99 int width,
100 int height) {
101 // Rotate by 270 is a ARGBTranspose with the destination written
102 // from bottom to top. So set the destination pointer to the end
103 // of the buffer and flip the sign of the destination stride.
104 dst += dst_stride * (width - 1);
105 dst_stride = -dst_stride;
106 ARGBTranspose(src, src_stride, dst, dst_stride, width, height);
107 }
108
ARGBRotate180(const uint8 * src,int src_stride,uint8 * dst,int dst_stride,int width,int height)109 void ARGBRotate180(const uint8* src,
110 int src_stride,
111 uint8* dst,
112 int dst_stride,
113 int width,
114 int height) {
115 // Swap first and last row and mirror the content. Uses a temporary row.
116 align_buffer_64(row, width * 4);
117 const uint8* src_bot = src + src_stride * (height - 1);
118 uint8* dst_bot = dst + dst_stride * (height - 1);
119 int half_height = (height + 1) >> 1;
120 int y;
121 void (*ARGBMirrorRow)(const uint8* src, uint8* dst, int width) =
122 ARGBMirrorRow_C;
123 void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C;
124 #if defined(HAS_ARGBMIRRORROW_NEON)
125 if (TestCpuFlag(kCpuHasNEON)) {
126 ARGBMirrorRow = ARGBMirrorRow_Any_NEON;
127 if (IS_ALIGNED(width, 4)) {
128 ARGBMirrorRow = ARGBMirrorRow_NEON;
129 }
130 }
131 #endif
132 #if defined(HAS_ARGBMIRRORROW_SSE2)
133 if (TestCpuFlag(kCpuHasSSE2)) {
134 ARGBMirrorRow = ARGBMirrorRow_Any_SSE2;
135 if (IS_ALIGNED(width, 4)) {
136 ARGBMirrorRow = ARGBMirrorRow_SSE2;
137 }
138 }
139 #endif
140 #if defined(HAS_ARGBMIRRORROW_AVX2)
141 if (TestCpuFlag(kCpuHasAVX2)) {
142 ARGBMirrorRow = ARGBMirrorRow_Any_AVX2;
143 if (IS_ALIGNED(width, 8)) {
144 ARGBMirrorRow = ARGBMirrorRow_AVX2;
145 }
146 }
147 #endif
148 #if defined(HAS_ARGBMIRRORROW_MSA)
149 if (TestCpuFlag(kCpuHasMSA)) {
150 ARGBMirrorRow = ARGBMirrorRow_Any_MSA;
151 if (IS_ALIGNED(width, 16)) {
152 ARGBMirrorRow = ARGBMirrorRow_MSA;
153 }
154 }
155 #endif
156 #if defined(HAS_COPYROW_SSE2)
157 if (TestCpuFlag(kCpuHasSSE2)) {
158 CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
159 }
160 #endif
161 #if defined(HAS_COPYROW_AVX)
162 if (TestCpuFlag(kCpuHasAVX)) {
163 CopyRow = IS_ALIGNED(width * 4, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
164 }
165 #endif
166 #if defined(HAS_COPYROW_ERMS)
167 if (TestCpuFlag(kCpuHasERMS)) {
168 CopyRow = CopyRow_ERMS;
169 }
170 #endif
171 #if defined(HAS_COPYROW_NEON)
172 if (TestCpuFlag(kCpuHasNEON)) {
173 CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
174 }
175 #endif
176 #if defined(HAS_COPYROW_MIPS)
177 if (TestCpuFlag(kCpuHasMIPS)) {
178 CopyRow = CopyRow_MIPS;
179 }
180 #endif
181
182 // Odd height will harmlessly mirror the middle row twice.
183 for (y = 0; y < half_height; ++y) {
184 ARGBMirrorRow(src, row, width); // Mirror first row into a buffer
185 ARGBMirrorRow(src_bot, dst, width); // Mirror last row into first row
186 CopyRow(row, dst_bot, width * 4); // Copy first mirrored row into last
187 src += src_stride;
188 dst += dst_stride;
189 src_bot -= src_stride;
190 dst_bot -= dst_stride;
191 }
192 free_aligned_buffer_64(row);
193 }
194
195 LIBYUV_API
ARGBRotate(const uint8 * src_argb,int src_stride_argb,uint8 * dst_argb,int dst_stride_argb,int width,int height,enum RotationMode mode)196 int ARGBRotate(const uint8* src_argb,
197 int src_stride_argb,
198 uint8* dst_argb,
199 int dst_stride_argb,
200 int width,
201 int height,
202 enum RotationMode mode) {
203 if (!src_argb || width <= 0 || height == 0 || !dst_argb) {
204 return -1;
205 }
206
207 // Negative height means invert the image.
208 if (height < 0) {
209 height = -height;
210 src_argb = src_argb + (height - 1) * src_stride_argb;
211 src_stride_argb = -src_stride_argb;
212 }
213
214 switch (mode) {
215 case kRotate0:
216 // copy frame
217 return ARGBCopy(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
218 width, height);
219 case kRotate90:
220 ARGBRotate90(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
221 height);
222 return 0;
223 case kRotate270:
224 ARGBRotate270(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
225 height);
226 return 0;
227 case kRotate180:
228 ARGBRotate180(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
229 height);
230 return 0;
231 default:
232 break;
233 }
234 return -1;
235 }
236
237 #ifdef __cplusplus
238 } // extern "C"
239 } // namespace libyuv
240 #endif
241