1/* 2 * Copyright (c) 2016 Clément Bœsch <clement stupeflix.com> 3 * Copyright (c) 2019-2021 Sebastian Pop <spop@amazon.com> 4 * Copyright (c) 2022 Jonathan Swinney <jswinney@amazon.com> 5 * 6 * This file is part of FFmpeg. 7 * 8 * FFmpeg is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU Lesser General Public 10 * License as published by the Free Software Foundation; either 11 * version 2.1 of the License, or (at your option) any later version. 12 * 13 * FFmpeg is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public 19 * License along with FFmpeg; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 21 */ 22 23#include "libavutil/aarch64/asm.S" 24 25/* 26;----------------------------------------------------------------------------- 27; horizontal line scaling 28; 29; void hscale<source_width>to<intermediate_nbits>_<filterSize>_<opt> 30; (SwsContext *c, int{16,32}_t *dst, 31; int dstW, const uint{8,16}_t *src, 32; const int16_t *filter, 33; const int32_t *filterPos, int filterSize); 34; 35; Scale one horizontal line. Input is either 8-bit width or 16-bit width 36; ($source_width can be either 8, 9, 10 or 16, difference is whether we have to 37; downscale before multiplying). Filter is 14 bits. Output is either 15 bits 38; (in int16_t) or 19 bits (in int32_t), as given in $intermediate_nbits. Each 39; output pixel is generated from $filterSize input pixels, the position of 40; the first pixel is given in filterPos[nOutputPixel]. 41;----------------------------------------------------------------------------- */ 42 43function ff_hscale8to15_X8_neon, export=1 44 sbfiz x7, x6, #1, #32 // filterSize*2 (*2 because int16) 451: ldr w8, [x5], #4 // filterPos[idx] 46 ldr w0, [x5], #4 // filterPos[idx + 1] 47 ldr w11, [x5], #4 // filterPos[idx + 2] 48 ldr w9, [x5], #4 // filterPos[idx + 3] 49 mov x16, x4 // filter0 = filter 50 add x12, x16, x7 // filter1 = filter0 + filterSize*2 51 add x13, x12, x7 // filter2 = filter1 + filterSize*2 52 add x4, x13, x7 // filter3 = filter2 + filterSize*2 53 movi v0.2D, #0 // val sum part 1 (for dst[0]) 54 movi v1.2D, #0 // val sum part 2 (for dst[1]) 55 movi v2.2D, #0 // val sum part 3 (for dst[2]) 56 movi v3.2D, #0 // val sum part 4 (for dst[3]) 57 add x17, x3, w8, UXTW // srcp + filterPos[0] 58 add x8, x3, w0, UXTW // srcp + filterPos[1] 59 add x0, x3, w11, UXTW // srcp + filterPos[2] 60 add x11, x3, w9, UXTW // srcp + filterPos[3] 61 mov w15, w6 // filterSize counter 622: ld1 {v4.8B}, [x17], #8 // srcp[filterPos[0] + {0..7}] 63 ld1 {v5.8H}, [x16], #16 // load 8x16-bit filter values, part 1 64 ld1 {v6.8B}, [x8], #8 // srcp[filterPos[1] + {0..7}] 65 ld1 {v7.8H}, [x12], #16 // load 8x16-bit at filter+filterSize 66 uxtl v4.8H, v4.8B // unpack part 1 to 16-bit 67 smlal v0.4S, v4.4H, v5.4H // v0 accumulates srcp[filterPos[0] + {0..3}] * filter[{0..3}] 68 smlal2 v0.4S, v4.8H, v5.8H // v0 accumulates srcp[filterPos[0] + {4..7}] * filter[{4..7}] 69 ld1 {v16.8B}, [x0], #8 // srcp[filterPos[2] + {0..7}] 70 ld1 {v17.8H}, [x13], #16 // load 8x16-bit at filter+2*filterSize 71 uxtl v6.8H, v6.8B // unpack part 2 to 16-bit 72 smlal v1.4S, v6.4H, v7.4H // v1 accumulates srcp[filterPos[1] + {0..3}] * filter[{0..3}] 73 uxtl v16.8H, v16.8B // unpack part 3 to 16-bit 74 smlal v2.4S, v16.4H, v17.4H // v2 accumulates srcp[filterPos[2] + {0..3}] * filter[{0..3}] 75 smlal2 v2.4S, v16.8H, v17.8H // v2 accumulates srcp[filterPos[2] + {4..7}] * filter[{4..7}] 76 ld1 {v18.8B}, [x11], #8 // srcp[filterPos[3] + {0..7}] 77 smlal2 v1.4S, v6.8H, v7.8H // v1 accumulates srcp[filterPos[1] + {4..7}] * filter[{4..7}] 78 ld1 {v19.8H}, [x4], #16 // load 8x16-bit at filter+3*filterSize 79 subs w15, w15, #8 // j -= 8: processed 8/filterSize 80 uxtl v18.8H, v18.8B // unpack part 4 to 16-bit 81 smlal v3.4S, v18.4H, v19.4H // v3 accumulates srcp[filterPos[3] + {0..3}] * filter[{0..3}] 82 smlal2 v3.4S, v18.8H, v19.8H // v3 accumulates srcp[filterPos[3] + {4..7}] * filter[{4..7}] 83 b.gt 2b // inner loop if filterSize not consumed completely 84 addp v0.4S, v0.4S, v1.4S // part01 horizontal pair adding 85 addp v2.4S, v2.4S, v3.4S // part23 horizontal pair adding 86 addp v0.4S, v0.4S, v2.4S // part0123 horizontal pair adding 87 subs w2, w2, #4 // dstW -= 4 88 sqshrn v0.4H, v0.4S, #7 // shift and clip the 2x16-bit final values 89 st1 {v0.4H}, [x1], #8 // write to destination part0123 90 b.gt 1b // loop until end of line 91 ret 92endfunc 93 94function ff_hscale8to15_4_neon, export=1 95// x0 SwsContext *c (not used) 96// x1 int16_t *dst 97// x2 int dstW 98// x3 const uint8_t *src 99// x4 const int16_t *filter 100// x5 const int32_t *filterPos 101// x6 int filterSize 102// x8-x15 registers for gathering src data 103 104// v0 madd accumulator 4S 105// v1-v4 filter values (16 bit) 8H 106// v5 madd accumulator 4S 107// v16-v19 src values (8 bit) 8B 108 109// This implementation has 4 sections: 110// 1. Prefetch src data 111// 2. Interleaved prefetching src data and madd 112// 3. Complete madd 113// 4. Complete remaining iterations when dstW % 8 != 0 114 115 sub sp, sp, #32 // allocate 32 bytes on the stack 116 cmp w2, #16 // if dstW <16, skip to the last block used for wrapping up 117 b.lt 2f 118 119 // load 8 values from filterPos to be used as offsets into src 120 ldp w8, w9, [x5] // filterPos[idx + 0], [idx + 1] 121 ldp w10, w11, [x5, #8] // filterPos[idx + 2], [idx + 3] 122 ldp w12, w13, [x5, #16] // filterPos[idx + 4], [idx + 5] 123 ldp w14, w15, [x5, #24] // filterPos[idx + 6], [idx + 7] 124 add x5, x5, #32 // advance filterPos 125 126 // gather random access data from src into contiguous memory 127 ldr w8, [x3, w8, UXTW] // src[filterPos[idx + 0]][0..3] 128 ldr w9, [x3, w9, UXTW] // src[filterPos[idx + 1]][0..3] 129 ldr w10, [x3, w10, UXTW] // src[filterPos[idx + 2]][0..3] 130 ldr w11, [x3, w11, UXTW] // src[filterPos[idx + 3]][0..3] 131 ldr w12, [x3, w12, UXTW] // src[filterPos[idx + 4]][0..3] 132 ldr w13, [x3, w13, UXTW] // src[filterPos[idx + 5]][0..3] 133 ldr w14, [x3, w14, UXTW] // src[filterPos[idx + 6]][0..3] 134 ldr w15, [x3, w15, UXTW] // src[filterPos[idx + 7]][0..3] 135 stp w8, w9, [sp] // *scratch_mem = { src[filterPos[idx + 0]][0..3], src[filterPos[idx + 1]][0..3] } 136 stp w10, w11, [sp, #8] // *scratch_mem = { src[filterPos[idx + 2]][0..3], src[filterPos[idx + 3]][0..3] } 137 stp w12, w13, [sp, #16] // *scratch_mem = { src[filterPos[idx + 4]][0..3], src[filterPos[idx + 5]][0..3] } 138 stp w14, w15, [sp, #24] // *scratch_mem = { src[filterPos[idx + 6]][0..3], src[filterPos[idx + 7]][0..3] } 139 1401: 141 ld4 {v16.8B, v17.8B, v18.8B, v19.8B}, [sp] // transpose 8 bytes each from src into 4 registers 142 143 // load 8 values from filterPos to be used as offsets into src 144 ldp w8, w9, [x5] // filterPos[idx + 0][0..3], [idx + 1][0..3], next iteration 145 ldp w10, w11, [x5, #8] // filterPos[idx + 2][0..3], [idx + 3][0..3], next iteration 146 ldp w12, w13, [x5, #16] // filterPos[idx + 4][0..3], [idx + 5][0..3], next iteration 147 ldp w14, w15, [x5, #24] // filterPos[idx + 6][0..3], [idx + 7][0..3], next iteration 148 149 movi v0.2D, #0 // Clear madd accumulator for idx 0..3 150 movi v5.2D, #0 // Clear madd accumulator for idx 4..7 151 152 ld4 {v1.8H, v2.8H, v3.8H, v4.8H}, [x4], #64 // load filter idx + 0..7 153 154 add x5, x5, #32 // advance filterPos 155 156 // interleaved SIMD and prefetching intended to keep ld/st and vector pipelines busy 157 uxtl v16.8H, v16.8B // unsigned extend long, covert src data to 16-bit 158 uxtl v17.8H, v17.8B // unsigned extend long, covert src data to 16-bit 159 ldr w8, [x3, w8, UXTW] // src[filterPos[idx + 0]], next iteration 160 ldr w9, [x3, w9, UXTW] // src[filterPos[idx + 1]], next iteration 161 uxtl v18.8H, v18.8B // unsigned extend long, covert src data to 16-bit 162 uxtl v19.8H, v19.8B // unsigned extend long, covert src data to 16-bit 163 ldr w10, [x3, w10, UXTW] // src[filterPos[idx + 2]], next iteration 164 ldr w11, [x3, w11, UXTW] // src[filterPos[idx + 3]], next iteration 165 166 smlal v0.4S, v1.4H, v16.4H // multiply accumulate inner loop j = 0, idx = 0..3 167 smlal v0.4S, v2.4H, v17.4H // multiply accumulate inner loop j = 1, idx = 0..3 168 ldr w12, [x3, w12, UXTW] // src[filterPos[idx + 4]], next iteration 169 ldr w13, [x3, w13, UXTW] // src[filterPos[idx + 5]], next iteration 170 smlal v0.4S, v3.4H, v18.4H // multiply accumulate inner loop j = 2, idx = 0..3 171 smlal v0.4S, v4.4H, v19.4H // multiply accumulate inner loop j = 3, idx = 0..3 172 ldr w14, [x3, w14, UXTW] // src[filterPos[idx + 6]], next iteration 173 ldr w15, [x3, w15, UXTW] // src[filterPos[idx + 7]], next iteration 174 175 smlal2 v5.4S, v1.8H, v16.8H // multiply accumulate inner loop j = 0, idx = 4..7 176 smlal2 v5.4S, v2.8H, v17.8H // multiply accumulate inner loop j = 1, idx = 4..7 177 stp w8, w9, [sp] // *scratch_mem = { src[filterPos[idx + 0]][0..3], src[filterPos[idx + 1]][0..3] } 178 stp w10, w11, [sp, #8] // *scratch_mem = { src[filterPos[idx + 2]][0..3], src[filterPos[idx + 3]][0..3] } 179 smlal2 v5.4S, v3.8H, v18.8H // multiply accumulate inner loop j = 2, idx = 4..7 180 smlal2 v5.4S, v4.8H, v19.8H // multiply accumulate inner loop j = 3, idx = 4..7 181 stp w12, w13, [sp, #16] // *scratch_mem = { src[filterPos[idx + 4]][0..3], src[filterPos[idx + 5]][0..3] } 182 stp w14, w15, [sp, #24] // *scratch_mem = { src[filterPos[idx + 6]][0..3], src[filterPos[idx + 7]][0..3] } 183 184 sub w2, w2, #8 // dstW -= 8 185 sqshrn v0.4H, v0.4S, #7 // shift and clip the 2x16-bit final values 186 sqshrn v1.4H, v5.4S, #7 // shift and clip the 2x16-bit final values 187 st1 {v0.4H, v1.4H}, [x1], #16 // write to dst[idx + 0..7] 188 cmp w2, #16 // continue on main loop if there are at least 16 iterations left 189 b.ge 1b 190 191 // last full iteration 192 ld4 {v16.8B, v17.8B, v18.8B, v19.8B}, [sp] 193 ld4 {v1.8H, v2.8H, v3.8H, v4.8H}, [x4], #64 // load filter idx + 0..7 194 195 movi v0.2D, #0 // Clear madd accumulator for idx 0..3 196 movi v5.2D, #0 // Clear madd accumulator for idx 4..7 197 198 uxtl v16.8H, v16.8B // unsigned extend long, covert src data to 16-bit 199 uxtl v17.8H, v17.8B // unsigned extend long, covert src data to 16-bit 200 uxtl v18.8H, v18.8B // unsigned extend long, covert src data to 16-bit 201 uxtl v19.8H, v19.8B // unsigned extend long, covert src data to 16-bit 202 203 smlal v0.4S, v1.4H, v16.4H // multiply accumulate inner loop j = 0, idx = 0..3 204 smlal v0.4S, v2.4H, v17.4H // multiply accumulate inner loop j = 1, idx = 0..3 205 smlal v0.4S, v3.4H, v18.4H // multiply accumulate inner loop j = 2, idx = 0..3 206 smlal v0.4S, v4.4H, v19.4H // multiply accumulate inner loop j = 3, idx = 0..3 207 208 smlal2 v5.4S, v1.8H, v16.8H // multiply accumulate inner loop j = 0, idx = 4..7 209 smlal2 v5.4S, v2.8H, v17.8H // multiply accumulate inner loop j = 1, idx = 4..7 210 smlal2 v5.4S, v3.8H, v18.8H // multiply accumulate inner loop j = 2, idx = 4..7 211 smlal2 v5.4S, v4.8H, v19.8H // multiply accumulate inner loop j = 3, idx = 4..7 212 213 subs w2, w2, #8 // dstW -= 8 214 sqshrn v0.4H, v0.4S, #7 // shift and clip the 2x16-bit final values 215 sqshrn v1.4H, v5.4S, #7 // shift and clip the 2x16-bit final values 216 st1 {v0.4H, v1.4H}, [x1], #16 // write to dst[idx + 0..7] 217 218 cbnz w2, 2f // if >0 iterations remain, jump to the wrap up section 219 220 add sp, sp, #32 // clean up stack 221 ret 222 223 // finish up when dstW % 8 != 0 or dstW < 16 2242: 225 // load src 226 ldr w8, [x5], #4 // filterPos[i] 227 add x9, x3, w8, UXTW // calculate the address for src load 228 ld1 {v5.S}[0], [x9] // src[filterPos[i] + 0..3] 229 // load filter 230 ld1 {v6.4H}, [x4], #8 // filter[filterSize * i + 0..3] 231 232 uxtl v5.8H, v5.8B // unsigned exten long, convert src data to 16-bit 233 smull v0.4S, v5.4H, v6.4H // 4 iterations of src[...] * filter[...] 234 addv s0, v0.4S // add up products of src and filter values 235 sqshrn h0, s0, #7 // shift and clip the 2x16-bit final value 236 st1 {v0.H}[0], [x1], #2 // dst[i] = ... 237 sub w2, w2, #1 // dstW-- 238 cbnz w2, 2b 239 240 add sp, sp, #32 // clean up stack 241 ret 242endfunc 243