• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2017 Meng Wang <wangmeng.kids@bytedance.com>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21
22#include "libavutil/arm/asm.S"
23#include "neon.S"
24
25function ff_hevc_sao_band_filter_neon_8, export=1
26        push    {r4-r10}
27        ldr     r5,  [sp, #28]   // width
28        ldr     r4,  [sp, #32]   // height
29        ldr     r8,  [sp, #36]   // offset_table
30        vpush   {d8-d15}
31        mov     r12,  r4         // r12 = height
32        mov     r6,   r0         // r6 = r0 = dst
33        mov     r7,   r1         // r7 = r1 = src
34        vldm    r8,   {q0-q3}
35        vmov.u16    q15,  #1
36        vmov.u8     q14,  #32
370:      pld      [r1]
38        cmp      r5,    #4
39        beq      4f
408:      subs     r4,    #1
41        vld1.8   {d16},  [r1], r3
42        vshr.u8  d17,   d16,  #3   // index = [src>>3]
43        vshll.u8 q9,    d17,  #1   // lowIndex = 2*index
44        vadd.u16 q11,   q9,   q15  // highIndex = (2*index+1) << 8
45        vshl.u16 q10,   q11,  #8   // q10: highIndex;  q9: lowIndex;
46        vadd.u16 q10,   q9         // combine high and low index;
47        // Look-up Table Round 1; index range: 0-15
48        vtbx.8   d24,   {q0-q1},   d20
49        vtbx.8   d25,   {q0-q1},   d21
50        // Look-up Table Round 2; index range: 16-31
51        vsub.u8  q10,   q14        // Look-up with 8bit
52        vtbx.8   d24,   {q2-q3},   d20
53        vtbx.8   d25,   {q2-q3},   d21
54        vaddw.u8 q13,   q12,       d16
55        vqmovun.s16      d8,         q13
56        vst1.8    d8,   [r0],      r2
57        bne      8b
58        subs     r5,    #8
59        beq      99f
60        mov      r4,    r12
61        add r6, #8
62        mov r0, r6
63        add r7, #8
64        mov r1, r7
65        b        0b
664:      subs     r4,    #1
67        vld1.32   {d16[0]},  [r1],  r3
68        vshr.u8  d17,   d16,  #3  // src>>3
69        vshll.u8 q9,    d17,  #1   // lowIndex = 2*index
70        vadd.u16 q11,   q9,   q15  // highIndex = (2*index+1) << 8
71        vshl.u16 q10,   q11,  #8   // q10: highIndex;  q9: lowIndex;
72        vadd.u16 q10,   q9         // combine high and low index;
73        // Look-up Table Round 1; index range: 0-15
74        vtbx.8   d24,   {q0-q1},   d20
75        vtbx.8   d25,   {q0-q1},   d21
76        // Look-up Table Round 2; index range: 16-32
77        vsub.u8  q10,   q14        // Look-up with 8bit
78        vtbx.8   d24,   {q2-q3},   d20
79        vtbx.8   d25,   {q2-q3},   d21
80        vaddw.u8 q13,   q12,       d16
81        vqmovun.s16     d14,       q13
82        vst1.32   d14[0],    [r0],     r2
83        bne      4b
84        b        99f
8599:
86        vpop {d8-d15}
87        pop  {r4-r10}
88        bx   lr
89endfunc
90
91function ff_hevc_sao_edge_filter_neon_8, export=1
92        push    {r4-r11}
93        ldr     r5,  [sp, #32]   // width
94        ldr     r4,  [sp, #36]   // height
95        ldr     r8,  [sp, #40]   // a_stride
96        ldr     r9,  [sp, #44]   // b_stride
97        ldr     r10, [sp, #48]   // sao_offset_val
98        ldr     r11, [sp, #52]   // edge_idx
99        vpush   {d8-d15}
100        mov     r12,  r4         // r12 = height
101        mov     r6,   r0         // r6 = r0 = dst
102        mov     r7,   r1         // r7 = r1 = src
103        vld1.8  {d0}, [r11]      // edge_idx tabel load in d0 5x8bit
104        vld1.16 {q1}, [r10]      // sao_offset_val table load in q1, 5x16bit
105        vmov.u8  d1,  #2
106        vmov.u16 q2,  #1
1070:      mov      r10,    r1
108        add      r10,    r8           // src[x + a_stride]
109        mov      r11,    r1
110        add      r11,    r9           // src[x + b_stride]
111        pld      [r1]
112        cmp      r5,     #4
113        beq      4f
1148:      subs     r4,     #1
115        vld1.8   {d16},  [r1],  r3    // src[x]  8x8bit
116        vld1.8   {d17},  [r10], r3    // src[x + a_stride]
117        vld1.8   {d18},  [r11], r3    // src[x + b_stride]
118        vcgt.u8  d8,     d16,   d17
119        vshr.u8  d9,     d8,    #7
120        vclt.u8  d8,     d16,   d17
121        vadd.u8  d8,     d9           // diff0
122        vcgt.u8  d10,    d16,   d18
123        vshr.u8  d11,    d10,   #7
124        vclt.u8  d10,    d16,   d18
125        vadd.u8  d10,    d11          // diff1
126        vadd.s8  d8,     d10
127        vadd.s8  d8,     d1
128        vtbx.8   d9,     {d0},  d8    // offset_val
129        vshll.u8 q6,     d9,    #1    // lowIndex
130        vadd.u16 q7,     q6,    q2
131        vshl.u16 q10,    q7,    #8    // highIndex
132        vadd.u16 q10,    q6           // combine lowIndex and highIndex, offset_val
133        vtbx.8   d22,    {q1},  d20
134        vtbx.8   d23,    {q1},  d21
135        vaddw.u8 q12,    q11,   d16
136        vqmovun.s16      d26,   q12
137        vst1.8   d26,    [r0],  r2
138        bne      8b
139        subs     r5,     #8
140        beq      99f
141        mov      r4,     r12
142        add      r6,     #8
143        mov      r0,     r6
144        add      r7,     #8
145        mov      r1,     r7
146        b        0b
1474:      subs     r4,    #1
148        vld1.32   {d16[0]},  [r1],  r3
149        vld1.32   {d17[0]},  [r10], r3    // src[x + a_stride]
150        vld1.32   {d18[0]},  [r11], r3    // src[x + b_stride]
151        vcgt.u8  d8,     d16,   d17
152        vshr.u8  d9,     d8,    #7
153        vclt.u8  d8,     d16,   d17
154        vadd.u8  d8,     d9           // diff0
155        vcgt.u8  d10,    d16,   d18
156        vshr.u8  d11,    d10,   #7
157        vclt.u8  d10,    d16,   d18
158        vadd.u8  d10,    d11          // diff1
159        vadd.s8  d8,     d10
160        vadd.s8  d8,     d1
161        vtbx.8   d9,     {d0},  d8    // offset_val
162        vshll.u8 q6,     d9,    #1    // lowIndex
163        vadd.u16 q7,     q6,    q2
164        vshl.u16 q10,    q7,    #8    // highIndex
165        vadd.u16 q10,    q6           // combine lowIndex and highIndex, offset_val
166        vtbx.8   d22,    {q1},  d20
167        vtbx.8   d23,    {q1},  d21
168        vaddw.u8 q12,    q11,   d16
169        vqmovun.s16      d26,   q12
170        vst1.32  d26[0], [r0],  r2
171        bne      4b
172        b        99f
17399:
174        vpop {d8-d15}
175        pop  {r4-r11}
176        bx   lr
177endfunc
178