• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (C) 2013 Xiaolei Yu <dreifachstein@gmail.com>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include "config.h"
22#if HAVE_AS_DN_DIRECTIVE
23#include "rgb2yuv_neon_common.S"
24
25/* downsampled R16G16B16 x8 */
26alias_qw    r16x8,  q7
27alias_qw    g16x8,  q8
28alias_qw    b16x8,  q9
29
30alias   n16x16_o,   q11
31alias   n16x16_ol,  q11_l
32alias   n16x16_oh,  q11_h
33
34alias   y32x16_el,  q12
35alias   y32x16_eh,  q13
36alias   y32x16_ol,  q14
37alias   y32x16_oh,  q15
38
39alias   y16x16_e,   q12
40alias   y16x16_el,  q12_l
41alias   y16x16_eh,  q12_h
42alias   y16x16_o,   q13
43alias   y16x16_ol,  q13_l
44alias   y16x16_oh,  q13_h
45
46
47alias   y8x16,  y16x16_e
48
49
50.macro init     src
51    // load s32x3x3, narrow to s16x3x3
52    vld3.i32    {q13_l, q14_l, q15_l},          [\src]!
53    vld3.i32    {q13_h[0], q14_h[0], q15_h[0]}, [\src]
54
55    vmovn.i32   CO_R, q13
56    vmovn.i32   CO_G, q14
57    vmovn.i32   CO_B, q15
58
59    vmov.u8     BIAS_Y, #16
60    vmov.u8     BIAS_U, #128
61.endm
62
63
64.macro compute_y_16x1_step  action, s8x16, coeff
65    vmov.u8     n16x16_o,   #0
66    vtrn.u8     \s8x16,     n16x16_o
67
68    \action     y32x16_el,  \s8x16\()_l,    \coeff
69    \action     y32x16_eh,  \s8x16\()_h,    \coeff
70    \action     y32x16_ol,  n16x16_ol,      \coeff
71    \action     y32x16_oh,  n16x16_oh,      \coeff
72.endm
73
74/*
75 * in:      r8x16, g8x16, b8x16
76 * out:     y8x16
77 * clobber: q11-q15, r8x16, g8x16, b8x16
78 */
79.macro compute_y_16x1
80    compute_y_16x1_step vmull, r8x16, CO_RY
81    compute_y_16x1_step vmlal, g8x16, CO_GY
82    compute_y_16x1_step vmlal, b8x16, CO_BY
83
84    vrshrn.i32  y16x16_el,  y32x16_el,  #15
85    vrshrn.i32  y16x16_eh,  y32x16_eh,  #15
86    vrshrn.i32  y16x16_ol,  y32x16_ol,  #15
87    vrshrn.i32  y16x16_oh,  y32x16_oh,  #15
88
89    vtrn.8      y16x16_e,   y16x16_o
90    vadd.u8     y8x16,      y8x16,      BIAS_Y
91.endm
92
93alias   c32x8_l,    q14
94alias   c32x8_h,    q15
95
96alias_qw    c16x8,  q13
97alias_qw    c8x8x2, q10
98
99.macro compute_chroma_8x1_step  action, s16x8, coeff
100    \action     c32x8_l,    \s16x8\()_l,    \coeff
101    \action     c32x8_h,    \s16x8\()_h,    \coeff
102.endm
103
104/*
105 * in:      r16x8, g16x8, b16x8
106 * out:     c8x8
107 * clobber: q14-q15
108 */
109.macro compute_chroma_8x1   c, C
110    compute_chroma_8x1_step vmull, r16x8, CO_R\C
111    compute_chroma_8x1_step vmlal, g16x8, CO_G\C
112    compute_chroma_8x1_step vmlal, b16x8, CO_B\C
113
114    vrshrn.i32  c16x8_l,    c32x8_l,    #15
115    vrshrn.i32  c16x8_h,    c32x8_h,    #15
116    vmovn.i16   \c\()8x8,   c16x8
117    vadd.u8     \c\()8x8,   \c\()8x8,   BIAS_\C
118.endm
119
120
121    loop_420sp  rgbx, nv12, init, kernel_420_16x2, 32
122#endif
123