1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <limits.h>
13 #include <stdlib.h>
14 #include <string.h>
15
16 #include "vpx/vpx_image.h"
17 #include "vpx/vpx_integer.h"
18 #include "vpx_mem/vpx_mem.h"
19
img_alloc_helper(vpx_image_t * img,vpx_img_fmt_t fmt,unsigned int d_w,unsigned int d_h,unsigned int buf_align,unsigned int stride_align,unsigned char * img_data)20 static vpx_image_t *img_alloc_helper(vpx_image_t *img, vpx_img_fmt_t fmt,
21 unsigned int d_w, unsigned int d_h,
22 unsigned int buf_align,
23 unsigned int stride_align,
24 unsigned char *img_data) {
25 unsigned int h, w, xcs, ycs, bps;
26 uint64_t s;
27 int stride_in_bytes;
28 unsigned int align;
29
30 if (img != NULL) memset(img, 0, sizeof(vpx_image_t));
31
32 if (fmt == VPX_IMG_FMT_NONE) goto fail;
33
34 /* Impose maximum values on input parameters so that this function can
35 * perform arithmetic operations without worrying about overflows.
36 */
37 if (d_w > 0x08000000 || d_h > 0x08000000 || buf_align > 65536 ||
38 stride_align > 65536) {
39 goto fail;
40 }
41
42 /* Treat align==0 like align==1 */
43 if (!buf_align) buf_align = 1;
44
45 /* Validate alignment (must be power of 2) */
46 if (buf_align & (buf_align - 1)) goto fail;
47
48 /* Treat align==0 like align==1 */
49 if (!stride_align) stride_align = 1;
50
51 /* Validate alignment (must be power of 2) */
52 if (stride_align & (stride_align - 1)) goto fail;
53
54 /* Get sample size for this format */
55 switch (fmt) {
56 case VPX_IMG_FMT_I420:
57 case VPX_IMG_FMT_YV12:
58 case VPX_IMG_FMT_NV12: bps = 12; break;
59 case VPX_IMG_FMT_I422:
60 case VPX_IMG_FMT_I440: bps = 16; break;
61 case VPX_IMG_FMT_I444: bps = 24; break;
62 case VPX_IMG_FMT_I42016: bps = 24; break;
63 case VPX_IMG_FMT_I42216:
64 case VPX_IMG_FMT_I44016: bps = 32; break;
65 case VPX_IMG_FMT_I44416: bps = 48; break;
66 default: bps = 16; break;
67 }
68
69 /* Get chroma shift values for this format */
70 // For VPX_IMG_FMT_NV12, xcs needs to be 0 such that UV data is all read at
71 // one time.
72 switch (fmt) {
73 case VPX_IMG_FMT_I420:
74 case VPX_IMG_FMT_YV12:
75 case VPX_IMG_FMT_I422:
76 case VPX_IMG_FMT_I42016:
77 case VPX_IMG_FMT_I42216: xcs = 1; break;
78 default: xcs = 0; break;
79 }
80
81 switch (fmt) {
82 case VPX_IMG_FMT_I420:
83 case VPX_IMG_FMT_NV12:
84 case VPX_IMG_FMT_I440:
85 case VPX_IMG_FMT_YV12:
86 case VPX_IMG_FMT_I42016:
87 case VPX_IMG_FMT_I44016: ycs = 1; break;
88 default: ycs = 0; break;
89 }
90
91 /* Calculate storage sizes. */
92 if (img_data) {
93 /* If the buffer was allocated externally, the width and height shouldn't
94 * be adjusted. */
95 w = d_w;
96 h = d_h;
97 } else {
98 /* Calculate storage sizes given the chroma subsampling */
99 align = (1 << xcs) - 1;
100 w = (d_w + align) & ~align;
101 assert(d_w <= w);
102 align = (1 << ycs) - 1;
103 h = (d_h + align) & ~align;
104 assert(d_h <= h);
105 }
106
107 s = (fmt & VPX_IMG_FMT_PLANAR) ? w : (uint64_t)bps * w / 8;
108 s = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? s * 2 : s;
109 s = (s + stride_align - 1) & ~((uint64_t)stride_align - 1);
110 if (s > INT_MAX) goto fail;
111 stride_in_bytes = (int)s;
112 s = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? s / 2 : s;
113
114 /* Allocate the new image */
115 if (!img) {
116 img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
117
118 if (!img) goto fail;
119
120 img->self_allocd = 1;
121 }
122
123 img->img_data = img_data;
124
125 if (!img_data) {
126 uint64_t alloc_size;
127 alloc_size = (fmt & VPX_IMG_FMT_PLANAR) ? (uint64_t)h * s * bps / 8
128 : (uint64_t)h * s;
129
130 if (alloc_size != (size_t)alloc_size) goto fail;
131
132 img->img_data = (uint8_t *)vpx_memalign(buf_align, (size_t)alloc_size);
133 img->img_data_owner = 1;
134 }
135
136 if (!img->img_data) goto fail;
137
138 img->fmt = fmt;
139 img->bit_depth = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 16 : 8;
140 img->w = w;
141 img->h = h;
142 img->x_chroma_shift = xcs;
143 img->y_chroma_shift = ycs;
144 img->bps = bps;
145
146 /* Calculate strides */
147 img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = stride_in_bytes;
148 img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = stride_in_bytes >> xcs;
149
150 /* Default viewport to entire image */
151 if (!vpx_img_set_rect(img, 0, 0, d_w, d_h)) return img;
152
153 fail:
154 vpx_img_free(img);
155 return NULL;
156 }
157
vpx_img_alloc(vpx_image_t * img,vpx_img_fmt_t fmt,unsigned int d_w,unsigned int d_h,unsigned int align)158 vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt,
159 unsigned int d_w, unsigned int d_h,
160 unsigned int align) {
161 return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
162 }
163
vpx_img_wrap(vpx_image_t * img,vpx_img_fmt_t fmt,unsigned int d_w,unsigned int d_h,unsigned int stride_align,unsigned char * img_data)164 vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w,
165 unsigned int d_h, unsigned int stride_align,
166 unsigned char *img_data) {
167 /* By setting buf_align = 1, we don't change buffer alignment in this
168 * function. */
169 return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
170 }
171
vpx_img_set_rect(vpx_image_t * img,unsigned int x,unsigned int y,unsigned int w,unsigned int h)172 int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y,
173 unsigned int w, unsigned int h) {
174 if (x <= UINT_MAX - w && x + w <= img->w && y <= UINT_MAX - h &&
175 y + h <= img->h) {
176 img->d_w = w;
177 img->d_h = h;
178
179 /* Calculate plane pointers */
180 if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
181 img->planes[VPX_PLANE_PACKED] =
182 img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
183 } else {
184 const int bytes_per_sample =
185 (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
186 unsigned char *data = img->img_data;
187
188 if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) {
189 img->planes[VPX_PLANE_ALPHA] =
190 data + x * bytes_per_sample + y * img->stride[VPX_PLANE_ALPHA];
191 data += (size_t)img->h * img->stride[VPX_PLANE_ALPHA];
192 }
193
194 img->planes[VPX_PLANE_Y] =
195 data + x * bytes_per_sample + y * img->stride[VPX_PLANE_Y];
196 data += (size_t)img->h * img->stride[VPX_PLANE_Y];
197
198 if (img->fmt == VPX_IMG_FMT_NV12) {
199 img->planes[VPX_PLANE_U] =
200 data + (x >> img->x_chroma_shift) +
201 (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
202 img->planes[VPX_PLANE_V] = img->planes[VPX_PLANE_U] + 1;
203 } else if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
204 img->planes[VPX_PLANE_U] =
205 data + (x >> img->x_chroma_shift) * bytes_per_sample +
206 (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
207 data +=
208 (size_t)(img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
209 img->planes[VPX_PLANE_V] =
210 data + (x >> img->x_chroma_shift) * bytes_per_sample +
211 (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
212 } else {
213 img->planes[VPX_PLANE_V] =
214 data + (x >> img->x_chroma_shift) * bytes_per_sample +
215 (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
216 data +=
217 (size_t)(img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
218 img->planes[VPX_PLANE_U] =
219 data + (x >> img->x_chroma_shift) * bytes_per_sample +
220 (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
221 }
222 }
223 return 0;
224 }
225 return -1;
226 }
227
vpx_img_flip(vpx_image_t * img)228 void vpx_img_flip(vpx_image_t *img) {
229 /* Note: In the calculation pointer adjustment calculation, we want the
230 * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
231 * standard indicates that if the adjustment parameter is unsigned, the
232 * stride parameter will be promoted to unsigned, causing errors when
233 * the lhs is a larger type than the rhs.
234 */
235 img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
236 img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
237
238 img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
239 img->stride[VPX_PLANE_U];
240 img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
241
242 img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
243 img->stride[VPX_PLANE_V];
244 img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
245
246 img->planes[VPX_PLANE_ALPHA] +=
247 (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
248 img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
249 }
250
vpx_img_free(vpx_image_t * img)251 void vpx_img_free(vpx_image_t *img) {
252 if (img) {
253 if (img->img_data && img->img_data_owner) vpx_free(img->img_data);
254
255 if (img->self_allocd) free(img);
256 }
257 }
258