• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <stdlib.h>
12 #include <string.h>
13 
14 #include "vpx/vpx_image.h"
15 #include "vpx/vpx_integer.h"
16 #include "vpx_mem/vpx_mem.h"
17 
img_alloc_helper(vpx_image_t * img,vpx_img_fmt_t fmt,unsigned int d_w,unsigned int d_h,unsigned int buf_align,unsigned int stride_align,unsigned char * img_data)18 static vpx_image_t *img_alloc_helper(vpx_image_t *img,
19                                      vpx_img_fmt_t fmt,
20                                      unsigned int d_w,
21                                      unsigned int d_h,
22                                      unsigned int buf_align,
23                                      unsigned int stride_align,
24                                      unsigned char *img_data) {
25   unsigned int h, w, s, xcs, ycs, bps;
26   unsigned int stride_in_bytes;
27   int align;
28 
29   /* Treat align==0 like align==1 */
30   if (!buf_align)
31     buf_align = 1;
32 
33   /* Validate alignment (must be power of 2) */
34   if (buf_align & (buf_align - 1))
35     goto fail;
36 
37   /* Treat align==0 like align==1 */
38   if (!stride_align)
39     stride_align = 1;
40 
41   /* Validate alignment (must be power of 2) */
42   if (stride_align & (stride_align - 1))
43     goto fail;
44 
45   /* Get sample size for this format */
46   switch (fmt) {
47     case VPX_IMG_FMT_RGB32:
48     case VPX_IMG_FMT_RGB32_LE:
49     case VPX_IMG_FMT_ARGB:
50     case VPX_IMG_FMT_ARGB_LE:
51       bps = 32;
52       break;
53     case VPX_IMG_FMT_RGB24:
54     case VPX_IMG_FMT_BGR24:
55       bps = 24;
56       break;
57     case VPX_IMG_FMT_RGB565:
58     case VPX_IMG_FMT_RGB565_LE:
59     case VPX_IMG_FMT_RGB555:
60     case VPX_IMG_FMT_RGB555_LE:
61     case VPX_IMG_FMT_UYVY:
62     case VPX_IMG_FMT_YUY2:
63     case VPX_IMG_FMT_YVYU:
64       bps = 16;
65       break;
66     case VPX_IMG_FMT_I420:
67     case VPX_IMG_FMT_YV12:
68     case VPX_IMG_FMT_VPXI420:
69     case VPX_IMG_FMT_VPXYV12:
70       bps = 12;
71       break;
72     case VPX_IMG_FMT_I422:
73     case VPX_IMG_FMT_I440:
74       bps = 16;
75       break;
76     case VPX_IMG_FMT_I444:
77       bps = 24;
78       break;
79     case VPX_IMG_FMT_I42016:
80       bps = 24;
81       break;
82     case VPX_IMG_FMT_I42216:
83     case VPX_IMG_FMT_I44016:
84       bps = 32;
85       break;
86     case VPX_IMG_FMT_I44416:
87       bps = 48;
88       break;
89     default:
90       bps = 16;
91       break;
92   }
93 
94   /* Get chroma shift values for this format */
95   switch (fmt) {
96     case VPX_IMG_FMT_I420:
97     case VPX_IMG_FMT_YV12:
98     case VPX_IMG_FMT_VPXI420:
99     case VPX_IMG_FMT_VPXYV12:
100     case VPX_IMG_FMT_I422:
101     case VPX_IMG_FMT_I42016:
102     case VPX_IMG_FMT_I42216:
103       xcs = 1;
104       break;
105     default:
106       xcs = 0;
107       break;
108   }
109 
110   switch (fmt) {
111     case VPX_IMG_FMT_I420:
112     case VPX_IMG_FMT_I440:
113     case VPX_IMG_FMT_YV12:
114     case VPX_IMG_FMT_VPXI420:
115     case VPX_IMG_FMT_VPXYV12:
116     case VPX_IMG_FMT_I42016:
117     case VPX_IMG_FMT_I44016:
118       ycs = 1;
119       break;
120     default:
121       ycs = 0;
122       break;
123   }
124 
125   /* Calculate storage sizes given the chroma subsampling */
126   align = (1 << xcs) - 1;
127   w = (d_w + align) & ~align;
128   align = (1 << ycs) - 1;
129   h = (d_h + align) & ~align;
130   s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8;
131   s = (s + stride_align - 1) & ~(stride_align - 1);
132   stride_in_bytes = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? s * 2 : s;
133 
134   /* Allocate the new image */
135   if (!img) {
136     img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
137 
138     if (!img)
139       goto fail;
140 
141     img->self_allocd = 1;
142   } else {
143     memset(img, 0, sizeof(vpx_image_t));
144   }
145 
146   img->img_data = img_data;
147 
148   if (!img_data) {
149     const uint64_t alloc_size = (fmt & VPX_IMG_FMT_PLANAR) ?
150                                 (uint64_t)h * s * bps / 8 : (uint64_t)h * s;
151 
152     if (alloc_size != (size_t)alloc_size)
153       goto fail;
154 
155     img->img_data = (uint8_t *)vpx_memalign(buf_align, (size_t)alloc_size);
156     img->img_data_owner = 1;
157   }
158 
159   if (!img->img_data)
160     goto fail;
161 
162   img->fmt = fmt;
163   img->bit_depth = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 16 : 8;
164   img->w = w;
165   img->h = h;
166   img->x_chroma_shift = xcs;
167   img->y_chroma_shift = ycs;
168   img->bps = bps;
169 
170   /* Calculate strides */
171   img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = stride_in_bytes;
172   img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = stride_in_bytes >> xcs;
173 
174   /* Default viewport to entire image */
175   if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
176     return img;
177 
178 fail:
179   vpx_img_free(img);
180   return NULL;
181 }
182 
vpx_img_alloc(vpx_image_t * img,vpx_img_fmt_t fmt,unsigned int d_w,unsigned int d_h,unsigned int align)183 vpx_image_t *vpx_img_alloc(vpx_image_t  *img,
184                            vpx_img_fmt_t fmt,
185                            unsigned int  d_w,
186                            unsigned int  d_h,
187                            unsigned int  align) {
188   return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
189 }
190 
vpx_img_wrap(vpx_image_t * img,vpx_img_fmt_t fmt,unsigned int d_w,unsigned int d_h,unsigned int stride_align,unsigned char * img_data)191 vpx_image_t *vpx_img_wrap(vpx_image_t  *img,
192                           vpx_img_fmt_t fmt,
193                           unsigned int  d_w,
194                           unsigned int  d_h,
195                           unsigned int  stride_align,
196                           unsigned char       *img_data) {
197   /* By setting buf_align = 1, we don't change buffer alignment in this
198    * function. */
199   return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
200 }
201 
vpx_img_set_rect(vpx_image_t * img,unsigned int x,unsigned int y,unsigned int w,unsigned int h)202 int vpx_img_set_rect(vpx_image_t  *img,
203                      unsigned int  x,
204                      unsigned int  y,
205                      unsigned int  w,
206                      unsigned int  h) {
207   unsigned char      *data;
208 
209   if (x + w <= img->w && y + h <= img->h) {
210     img->d_w = w;
211     img->d_h = h;
212 
213     /* Calculate plane pointers */
214     if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
215       img->planes[VPX_PLANE_PACKED] =
216         img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
217     } else {
218       const int bytes_per_sample =
219           (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
220       data = img->img_data;
221 
222       if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) {
223         img->planes[VPX_PLANE_ALPHA] =
224             data + x * bytes_per_sample + y * img->stride[VPX_PLANE_ALPHA];
225         data += img->h * img->stride[VPX_PLANE_ALPHA];
226       }
227 
228       img->planes[VPX_PLANE_Y] = data + x * bytes_per_sample +
229           y * img->stride[VPX_PLANE_Y];
230       data += img->h * img->stride[VPX_PLANE_Y];
231 
232       if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
233         img->planes[VPX_PLANE_U] =
234             data + (x >> img->x_chroma_shift) * bytes_per_sample +
235             (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
236         data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
237         img->planes[VPX_PLANE_V] =
238             data + (x >> img->x_chroma_shift) * bytes_per_sample +
239             (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
240       } else {
241         img->planes[VPX_PLANE_V] =
242             data + (x >> img->x_chroma_shift) * bytes_per_sample +
243             (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
244         data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
245         img->planes[VPX_PLANE_U] =
246             data + (x >> img->x_chroma_shift) * bytes_per_sample +
247             (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
248       }
249     }
250     return 0;
251   }
252   return -1;
253 }
254 
vpx_img_flip(vpx_image_t * img)255 void vpx_img_flip(vpx_image_t *img) {
256   /* Note: In the calculation pointer adjustment calculation, we want the
257    * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
258    * standard indicates that if the adjustment parameter is unsigned, the
259    * stride parameter will be promoted to unsigned, causing errors when
260    * the lhs is a larger type than the rhs.
261    */
262   img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
263   img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
264 
265   img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
266                               * img->stride[VPX_PLANE_U];
267   img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
268 
269   img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
270                               * img->stride[VPX_PLANE_V];
271   img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
272 
273   img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
274   img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
275 }
276 
vpx_img_free(vpx_image_t * img)277 void vpx_img_free(vpx_image_t *img) {
278   if (img) {
279     if (img->img_data && img->img_data_owner)
280       vpx_free(img->img_data);
281 
282     if (img->self_allocd)
283       free(img);
284   }
285 }
286