• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1///*****************************************************************************
2//*
3//* Copyright (C) 2012 Ittiam Systems Pvt Ltd, Bangalore
4//*
5//* Licensed under the Apache License, Version 2.0 (the "License");
6//* you may not use this file except in compliance with the License.
7//* You may obtain a copy of the License at:
8//*
9//* http://www.apache.org/licenses/LICENSE-2.0
10//*
11//* Unless required by applicable law or agreed to in writing, software
12//* distributed under the License is distributed on an "AS IS" BASIS,
13//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14//* See the License for the specific language governing permissions and
15//* limitations under the License.
16//*
17//*****************************************************************************/
18///**
19//*******************************************************************************
20//* ,:file
21//*  ihevc_sao_edge_offset_class1_chroma.s
22//*
23//* ,:brief
24//*  Contains function definitions for inter prediction  interpolation.
25//* Functions are coded using NEON  intrinsics and can be compiled using@ ARM
26//* RVCT
27//*
28//* ,:author
29//*  Parthiban V
30//*
31//* ,:par List of Functions:
32//*
33//*
34//* ,:remarks
35//*  None
36//*
37//*******************************************************************************
38//*/
39//void ihevc_sao_edge_offset_class1_chroma(UWORD8 *pu1_src,
40//                              WORD32 src_strd,
41//                              UWORD8 *pu1_src_left,
42//                              UWORD8 *pu1_src_top,
43//                              UWORD8 *pu1_src_top_left,
44//                              UWORD8 *pu1_src_top_right,
45//                              UWORD8 *pu1_src_bot_left,
46//                              UWORD8 *pu1_avail,
47//                              WORD8 *pi1_sao_offset_u,
48//                              WORD8 *pi1_sao_offset_v,
49//                              WORD32 wd,
50//                              WORD32 ht)
51//**************Variables Vs Registers*****************************************
52//x0 =>    *pu1_src
53//x1 =>    src_strd
54//x2 =>    *pu1_src_left
55//x3 =>    *pu1_src_top
56//x4    =>    *pu1_src_top_left
57//x5    =>    *pu1_avail
58//x6    =>    *pi1_sao_offset_u
59//x7    =>    *pi1_sao_offset_v
60//x8    =>    wd
61//x9 =>    ht
62
63.text
64.p2align 2
65.include "ihevc_neon_macros.s"
66
67.globl gi1_table_edge_idx
68.globl ihevc_sao_edge_offset_class1_chroma_av8
69
70ihevc_sao_edge_offset_class1_chroma_av8:
71
72
73    ldr         x8,[sp,#0]
74    ldr         x9,[sp,#8]
75    ldr         w10,[sp,#16]
76    ldr         w11,[sp,#24]
77
78
79
80    // STMFD sp!, {x4-x12, x14}            //stack stores the values of the arguments
81    stp         x19, x20,[sp,#-16]!
82    stp         x21, x22,[sp,#-16]!
83    stp         x23, x24,[sp,#-16]!
84    stp         x25, x26,[sp,#-16]!
85
86    mov         x15,x4 // *pu1_src_top_left 40
87    mov         x16,x5 // *pu1_src_top_right 44
88    mov         x17,x6 // *pu1_src_bot_left 48
89    mov         x21,x7 // *pu1_avail 52
90    mov         x22,x8 // *pi1_sao_offset_u 56
91    mov         x23,x9 // *pi1_sao_offset_v 60
92    mov         x24,x10 // wd 64
93    mov         x25,x11 // ht 68
94
95    mov         x4,x15
96    mov         x5,x21
97    mov         x6,x22
98    mov         x7,x23
99    mov         x8,x24
100    mov         x9,x25
101
102    SUB         x10,x8,#2                   //wd - 2
103    LDRH        w11,[x3,x10]                //pu1_src_top[wd - 2]
104    STRH        w11,[x4]                    //*pu1_src_top_left = pu1_src_top[wd - 2]
105    ADD         x11,x0,x10                  //pu1_src[row * src_strd + wd - 2]
106    MOV         x12,x2                      //Move pu1_src_left pointer to x11
107    MOV         x14,x9                      //Move ht to x14 for loop count
108SRC_LEFT_LOOP:
109    LDRH        w10,[x11]                   //Load pu1_src[row * src_strd + wd - 2]
110    ADD         x11,x11,x1
111    STRH        w10,[x12],#2                //pu1_src_left[row]
112    SUBS        x14, x14,#1                 //Decrement the loop count
113    BNE         SRC_LEFT_LOOP               //If not equal to 0 jump to the src_left_loop
114
115    SUB         x12,x9,#1                   //ht - 1
116    mul         x12, x12, x1                //(ht - 1) * src_strd
117    ADD         x12,x12,x0                  //pu1_src[(ht - 1) * src_strd]
118
119    LDRB        w4,[x5,#2]                  //pu1_avail[2]
120    CMP         x4,#0                       //0 == pu1_avail[2]
121    ADD         x20,x0,x1                   //pu1_src += src_strd
122    csel        x0, x20, x0,EQ
123    SUB         x20,x9,#1                   //ht--
124    csel        x9, x20, x9,EQ
125
126    LDRB        w4,[x5,#3]                  //pu1_avail[3]
127    CMP         x4,#0                       //0 == pu1_avail[3]
128    SUB         x20,x9,#1                   //ht--
129    csel        x9, x20, x9,EQ
130
131    movi        v0.16b, #2                  //const_2 = vdupq_n_s8(2)
132    movi        v2.8h, #0                   //const_min_clip = vdupq_n_s16(0)
133    movi        v4.8h, #255                 //const_max_clip = vdupq_n_u16((1 << bit_depth) - 1)
134    ADRP        x14, :got:gi1_table_edge_idx //table pointer
135    LDR         x14, [x14, #:got_lo12:gi1_table_edge_idx]
136    LD1         {v6.8b},[x14]               //edge_idx_tbl = vld1_s8(gi1_table_edge_idx)
137    LD1         {v7.8b},[x6]                //offset_tbl_u = vld1_s8(pi1_sao_offset_u)
138    LD1         {v1.8b},[x7]                //offset_tbl_v = vld1_s8(pi1_sao_offset_v)
139
140    CMP         x8,#16                      //Compare wd with 16
141    BLT         WIDTH_RESIDUE               //If not jump to WIDTH_RESIDUE where loop is unrolled for 8 case
142
143WIDTH_LOOP_16:
144    LDRB        w4,[x5,#2]                  //pu1_avail[2]
145    CMP         x4,#0                       //0 == pu1_avail[2]
146    SUB         x20,x0,x1                   //pu1_src -= src_strd
147    csel        x11, x20, x11,EQ
148    csel        x11, x3, x11,NE             //*pu1_src_top
149
150    MOV         x10,x0                      //*pu1_src
151
152    LD1         {v28.16b},[x11],#16         //pu1_top_row = vld1q_u8(pu1_src_top_cpy || pu1_src - src_strd)
153    //LD1 {v29.8b},[x11],#8                    //pu1_top_row = vld1q_u8(pu1_src_top_cpy || pu1_src - src_strd)
154    LD1         {v3.16b},[x0],#16           //pu1_cur_row = vld1q_u8(pu1_src)
155    //LD1 {v11.8b},[x0],#8                    //pu1_cur_row = vld1q_u8(pu1_src)
156
157    LD1         {v30.16b},[x12],#16         //vld1q_u8(pu1_src[(ht - 1) * src_strd])
158    //LD1 {v31.8b},[x12],#8                    //vld1q_u8(pu1_src[(ht - 1) * src_strd])
159    cmhi        v5.16b,  v3.16b ,  v28.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
160
161    ST1         { v30.16b},[x3],#16         //vst1q_u8(pu1_src_top[col])
162    cmhi        v19.16b,  v28.16b ,  v3.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
163
164    SUB         v16.16b,  v19.16b ,  v5.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
165    MOV         x11,x9                      //move ht to x11 for loop count
166
167PU1_SRC_LOOP:
168    ADD         x10,x10,x1                  //*pu1_src + src_strd
169    LD1         {v18.16b},[x10]             //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
170    //LD1 {v19.8b},[x10]                    //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
171    //SUB x10, x10,#8
172    ADD         x6,x10,x1                   //II Iteration *pu1_src + src_strd
173
174    //mov   v19.d[0],v18.d[1]
175    cmhi        v5.16b,  v3.16b ,  v18.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
176    LD1         {v30.16b},[x6]              //II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
177    //LD1 {v31.8b},[x6]                    //II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
178    //SUB x6, x6,#8
179
180    cmhi        v19.16b,  v18.16b ,  v3.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
181    SUB         x10,x10,x1
182
183    SUB         v20.16b,  v19.16b ,  v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
184    Uxtl        v26.8h, v18.8b              //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
185
186    ADD         v5.16b,  v0.16b ,  v16.16b  //edge_idx = vaddq_s8(const_2, sign_up)
187    Uxtl2       v28.8h, v18.16b             //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
188
189    ADD         v5.16b,  v5.16b ,  v20.16b  //edge_idx = vaddq_s8(edge_idx, sign_down)
190    cmhi        v22.16b,  v18.16b ,  v30.16b //II vcgtq_u8(pu1_cur_row, pu1_top_row)
191
192    mov         v16.d[1],v16.d[0]
193    NEG         v16.16b, v20.16b            //sign_up = vnegq_s8(sign_down)
194    TBL         v5.16b, {v6.16b},v5.16b     //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
195    cmhi        v24.16b,  v30.16b ,  v18.16b //II vcltq_u8(pu1_cur_row, pu1_top_row)
196
197    SUB         v28.16b,  v24.16b ,  v22.16b //II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
198    //TBL v13.8b, {v6.16b},v13.8b                //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
199    ADD         v22.16b,  v0.16b ,  v16.16b //II edge_idx = vaddq_s8(const_2, sign_up)
200
201    mov         v17.d[0], v5.d[1]
202    UZP1        v27.8b, v5.8b, v17.8b
203    UZP2        v17.8b, v5.8b, v17.8b
204    mov         v5.8b,v27.8b
205    NEG         v16.16b, v28.16b            //II sign_up = vnegq_s8(sign_down)
206    TBL         v5.8b, {v7.16b},v5.8b       //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
207    ADD         v22.16b,  v22.16b ,  v28.16b //II edge_idx = vaddq_s8(edge_idx, sign_down)
208
209    Uxtl        v20.8h, v3.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
210    TBL         v17.8b, {v1.16b},v17.8b
211    ZIP1        v27.8b, v5.8b, v17.8b
212    ZIP2        v17.8b, v5.8b, v17.8b
213    mov         v5.8b,v27.8b
214
215    SADDW       v20.8h,  v20.8h ,  v5.8b    //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
216    TBL         v22.16b, {v6.16b},v22.16b   //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
217    SMAX        v20.8h,  v20.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
218
219    UMIN        v20.8h,  v20.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
220    //TBL v23.8b, {v6.16b},v23.8b                //II vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
221
222    mov         v23.d[0], v22.d[1]
223    UZP1        v27.8b, v22.8b, v23.8b
224    UZP2        v23.8b, v22.8b, v23.8b
225    mov         v22.8b,v27.8b
226
227    Uxtl2       v28.8h, v3.16b              //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
228    //VTBL.8        D13,D7,D13                    @offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
229    mov         v3.16b, v30.16b             //II pu1_cur_row = pu1_next_row
230
231    SADDW       v28.8h,  v28.8h ,  v17.8b   //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
232    TBL         v24.8b, {v7.16b},v22.8b     //offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
233    SMAX        v28.8h,  v28.8h ,  v2.8h    //pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
234
235    TBL         v25.8b, {v1.16b},v23.8b
236    ZIP1        v27.8b, v24.8b, v25.8b
237    ZIP2        v25.8b, v24.8b, v25.8b
238    mov         v24.8b,v27.8b
239    //VTBL.8        D24,D7,D22                    @II offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
240    UMIN        v28.8h,  v28.8h ,  v4.8h    //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
241    //VTBL.8        D25,D7,D23                    @II offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
242
243    xtn         v20.8b,  v20.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
244    SADDW       v26.8h,  v26.8h ,  v24.8b   //II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
245
246    xtn2        v20.16b,  v28.8h            //vmovn_s16(pi2_tmp_cur_row.val[1])
247
248    Uxtl2       v28.8h, v18.16b             //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
249    SADDW       v28.8h,  v28.8h ,  v25.8b   //II pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
250
251
252    SMAX        v26.8h,  v26.8h ,  v2.8h    //II pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
253    UMIN        v26.8h,  v26.8h ,  v4.8h    //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
254
255    SMAX        v28.8h,  v28.8h ,  v2.8h    //II pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
256    UMIN        v28.8h,  v28.8h ,  v4.8h    //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
257    ST1         { v20.16b},[x10],x1         //vst1q_u8(pu1_src_cpy, pu1_cur_row)
258
259    xtn         v30.8b,  v26.8h             //II vmovn_s16(pi2_tmp_cur_row.val[0])
260    SUBS        x11,x11,#2                  //II Decrement the ht loop count by 1
261    xtn2        v30.16b,  v28.8h            //II vmovn_s16(pi2_tmp_cur_row.val[1])
262
263    ST1         { v30.16b},[x10],x1         //II vst1q_u8(pu1_src_cpy, pu1_cur_row)
264
265    BEQ         PU1_SRC_LOOP_END            //if 0 == pu1_avail[3] || 0 == pu1_avail[2] ht = ht--
266    CMP         x11,#1                      //checking any residue remains
267    BGT         PU1_SRC_LOOP                //If not equal jump to PU1_SRC_LOOP
268
269    ADD         x10,x10,x1                  //*pu1_src + src_strd
270    LD1         {v18.16b},[x10]             //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
271    //LD1 {v19.8b},[x10]                    //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
272    //SUB x10, x10,#8
273    cmhi        v5.16b,  v3.16b ,  v18.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
274    cmhi        v19.16b,  v18.16b ,  v3.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
275    SUB         v20.16b,  v19.16b ,  v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
276    SUB         x10,x10,x1
277
278    ADD         v22.16b,  v0.16b ,  v16.16b //edge_idx = vaddq_s8(const_2, sign_up)
279    ADD         v22.16b,  v22.16b ,  v20.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
280    TBL         v22.16b, {v6.16b},v22.16b   //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
281    //TBL v23.8b, {v6.16b},v23.8b                //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
282
283    mov         v23.d[0],v22.d[1]
284    UZP1        v27.8b, v22.8b, v23.8b
285    UZP2        v23.8b, v22.8b, v23.8b
286    mov         v22.8b,v27.8b
287    TBL         v24.8b, {v7.16b},v22.8b
288    TBL         v25.8b, {v1.16b},v23.8b
289    ZIP1        v27.8b, v24.8b, v25.8b
290    ZIP2        v25.8b, v24.8b, v25.8b
291    mov         v24.8b,v27.8b
292
293    //VTBL.8        D24,D7,D22                    @offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
294    Uxtl        v26.8h, v3.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
295    SADDW       v26.8h,  v26.8h ,  v24.8b   //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
296    SMAX        v26.8h,  v26.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
297    UMIN        v26.8h,  v26.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
298
299    //VTBL.8        D25,D7,D23                    @offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
300    Uxtl2       v28.8h, v3.16b              //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
301    SADDW       v28.8h,  v28.8h ,  v25.8b   //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
302    SMAX        v28.8h,  v28.8h ,  v2.8h    //pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
303    UMIN        v28.8h,  v28.8h ,  v4.8h    //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
304
305    xtn         v30.8b,  v26.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
306    xtn2        v30.16b,  v28.8h            //vmovn_s16(pi2_tmp_cur_row.val[1])
307
308    ST1         { v30.16b},[x10],x1         //vst1q_u8(pu1_src_cpy, pu1_cur_row)
309
310PU1_SRC_LOOP_END:
311    mov         v3.16b, v18.16b             //pu1_cur_row = pu1_next_row
312    SUBS        x8,x8,#16                   //Decrement the wd loop count by 16
313    CMP         x8,#8                       //Check whether residue remains
314    BEQ         WIDTH_RESIDUE               //If residue remains jump to residue loop
315    BGT         WIDTH_LOOP_16               //If not equal jump to width_loop
316    BLT         END_LOOPS                   //Jump to end function
317
318
319WIDTH_RESIDUE:
320    LDRB        w4,[x5,#2]                  //pu1_avail[2]
321    CMP         x4,#0                       //0 == pu1_avail[2]
322    SUB         x20,x0,x1                   //pu1_src -= src_strd
323    csel        x11, x20, x11,EQ
324    csel        x11, x3, x11,NE             //*pu1_src_top
325    MOV         x10,x0
326
327    LD1         {v28.16b},[x11]             //pu1_top_row = vld1q_u8(pu1_src_top_cpy || pu1_src - src_strd)
328    //LD1 {v29.8b},[x11],#8                    //pu1_top_row = vld1q_u8(pu1_src_top_cpy || pu1_src - src_strd)
329    LD1         {v3.16b},[x0]               //pu1_cur_row = vld1q_u8(pu1_src)
330    //LD1 {v11.8b},[x0],#8                    //pu1_cur_row = vld1q_u8(pu1_src)
331
332    LD1         {v30.8b},[x12]              //vld1_u8(pu1_src[(ht - 1) * src_strd])
333    ST1         {v30.8b},[x3]               //vst1_u8(pu1_src_top[col])
334
335    cmhi        v5.16b,  v3.16b ,  v28.16b  //vcgtq_u8(pu1_cur_row, pu1_top_row)
336    cmhi        v19.16b,  v28.16b ,  v3.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
337    SUB         v16.16b,  v19.16b ,  v5.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
338    MOV         x11,x9                      //move ht to x11 for loop count
339
340PU1_SRC_LOOP_RESIDUE:
341    ADD         x10,x10,x1                  //*pu1_src + src_strd
342    LD1         {v18.16b},[x10]             //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
343    //LD1 {v19.8b},[x10]                    //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
344    //SUB x10, x10,#8
345    ADD         x6,x10,x1                   //II Iteration *pu1_src + src_strd
346
347    cmhi        v5.16b,  v3.16b ,  v18.16b  //vcgtq_u8(pu1_cur_row, pu1_next_row)
348    LD1         {v30.16b},[x6]              //II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
349    //LD1 {v31.8b},[x6]                    //II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
350    //SUB x6, x6,#8
351
352    cmhi        v19.16b,  v18.16b ,  v3.16b //vcltq_u8(pu1_cur_row, pu1_next_row)
353    SUB         x10,x10,x1
354
355    SUB         v20.16b,  v19.16b ,  v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
356    Uxtl        v26.8h, v18.8b              //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
357
358    ADD         v5.16b,  v0.16b ,  v16.16b  //edge_idx = vaddq_s8(const_2, sign_up)
359    cmhi        v22.16b,  v18.16b ,  v30.16b //II vcgtq_u8(pu1_cur_row, pu1_next_row)
360
361    ADD         v5.16b,  v5.16b ,  v20.16b  //edge_idx = vaddq_s8(edge_idx, sign_down)
362    cmhi        v24.16b,  v30.16b ,  v18.16b //II vcltq_u8(pu1_cur_row, pu1_next_row)
363
364    NEG         v16.16b, v20.16b            //sign_up = vnegq_s8(sign_down)
365    TBL         v5.8b, {v6.16b},v5.8b       //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
366    SUB         v20.16b,  v24.16b ,  v22.16b //II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
367
368    UZP1        v27.8b, v5.8b, v17.8b
369    UZP2        v17.8b, v5.8b, v17.8b
370    mov         v5.8b,v27.8b
371
372    ADD         v22.16b,  v0.16b ,  v16.16b //II edge_idx = vaddq_s8(const_2, sign_up)
373    TBL         v5.8b, {v7.16b},v5.8b
374    NEG         v16.16b, v20.16b            //II sign_up = vnegq_s8(sign_down)
375
376    TBL         v17.8b, {v1.16b},v17.8b
377    ZIP1        v27.8b, v5.8b, v17.8b
378    ZIP2        v17.8b, v5.8b, v17.8b
379    mov         v5.8b,v27.8b
380
381    //VTBL.8        D12,D7,D12                    @offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
382
383    ADD         v22.16b,  v22.16b ,  v20.16b //II edge_idx = vaddq_s8(edge_idx, sign_down)
384    Uxtl        v20.8h, v3.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
385
386    SADDW       v20.8h,  v20.8h ,  v5.8b    //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
387    TBL         v22.8b, {v6.16b},v22.8b     //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
388    SMAX        v20.8h,  v20.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
389
390    UZP1        v27.8b, v22.8b, v23.8b
391    UZP2        v23.8b, v22.8b, v23.8b
392    mov         v22.8b,v27.8b
393
394    UMIN        v20.8h,  v20.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
395    TBL         v24.8b, {v7.16b},v22.8b
396    xtn         v20.8b,  v20.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
397
398    TBL         v25.8b, {v1.16b},v23.8b
399    ZIP1        v27.8b, v24.8b, v25.8b
400    ZIP2        v25.8b, v24.8b, v25.8b
401    mov         v24.8b,v27.8b
402    //VTBL.8        D24,D7,D22                    @II offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
403
404    SADDW       v26.8h,  v26.8h ,  v24.8b   //II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
405    SMAX        v26.8h,  v26.8h ,  v2.8h    //II pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
406    UMIN        v26.8h,  v26.8h ,  v4.8h    //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
407
408    mov         v3.16b, v30.16b             //II pu1_cur_row = pu1_next_row
409    ST1         {v20.8b},[x10],x1           //vst1q_u8(pu1_src_cpy, pu1_cur_row)
410    xtn         v30.8b,  v26.8h             //II vmovn_s16(pi2_tmp_cur_row.val[0])
411
412    SUBS        x11,x11,#2                  //Decrement the ht loop count by 1
413    ST1         {v30.8b},[x10],x1           //II vst1q_u8(pu1_src_cpy, pu1_cur_row)
414
415    BEQ         END_LOOPS
416    CMP         x11,#1
417    BGT         PU1_SRC_LOOP_RESIDUE        //If not equal jump to PU1_SRC_LOOP
418
419
420    ADD         x10,x10,x1                  //*pu1_src + src_strd
421    LD1         {v18.16b},[x10]             //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
422    //LD1 {v19.8b},[x10]                    //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
423    //SUB x10, x10,#8
424    cmhi        v5.16b,  v3.16b ,  v18.16b  //vcgtq_u8(pu1_cur_row, pu1_next_row)
425    cmhi        v19.16b,  v18.16b ,  v3.16b //vcltq_u8(pu1_cur_row, pu1_next_row)
426    SUB         v20.16b,  v19.16b ,  v5.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
427    SUB         x10,x10,x1
428
429    ADD         v22.16b,  v0.16b ,  v16.16b //edge_idx = vaddq_s8(const_2, sign_up)
430    ADD         v22.16b,  v22.16b ,  v20.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
431    TBL         v22.8b, {v6.16b},v22.8b     //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
432
433    UZP1        v27.8b, v22.8b, v23.8b
434    UZP2        v23.8b, v22.8b, v23.8b
435    mov         v22.8b,v27.8b
436
437    TBL         v24.8b, {v7.16b},v22.8b
438    TBL         v25.8b, {v1.16b},v23.8b
439    ZIP1        v27.8b, v24.8b, v25.8b
440    ZIP2        v25.8b, v24.8b, v25.8b
441    mov         v24.8b,v27.8b
442
443    //VTBL.8        D24,D7,D22                    @offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
444    Uxtl        v26.8h, v3.8b               //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
445    SADDW       v26.8h,  v26.8h ,  v24.8b   //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
446    SMAX        v26.8h,  v26.8h ,  v2.8h    //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
447    UMIN        v26.8h,  v26.8h ,  v4.8h    //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
448
449    xtn         v30.8b,  v26.8h             //vmovn_s16(pi2_tmp_cur_row.val[0])
450
451    ST1         {v30.8b},[x10],x1           //vst1q_u8(pu1_src_cpy, pu1_cur_row)
452
453END_LOOPS:
454    // LDMFD sp!,{x4-x12,x15}             //Reload the registers from SP
455    ldp         x25, x26,[sp],#16
456    ldp         x23, x24,[sp],#16
457    ldp         x21, x22,[sp],#16
458    ldp         x19, x20,[sp],#16
459
460
461    ret
462
463
464
465
466
467
468