• Home
  • Raw
  • Download

Lines Matching +full:non +full:- +full:inclusive

1 /* SPDX-License-Identifier: GPL-2.0-only */
13 * a0 - dest
16 * a0 - Inclusive first byte of dest
17 * a1 - Inclusive first byte of src
18 * a2 - Length of copy n
23 * Note: This currently only works on little-endian.
24 * To port to big-endian, reverse the direction of shifts
34 * Forward Copy: a1 - Index counter of src
35 * Reverse Copy: a4 - Index counter of src
36 * Forward Copy: t3 - Index counter of dest
37 * Reverse Copy: t4 - Index counter of dest
38 * Both Copy Modes: t5 - Inclusive first multibyte/aligned of dest
39 * Both Copy Modes: t6 - Non-Inclusive last multibyte/aligned of dest
40 * Both Copy Modes: t0 - Link / Temporary for load-store
41 * Both Copy Modes: t1 - Temporary for load-store
42 * Both Copy Modes: t2 - Temporary for load-store
43 * Both Copy Modes: a5 - dest to src alignment offset
44 * Both Copy Modes: a6 - Shift ammount
45 * Both Copy Modes: a7 - Inverse Shift ammount
46 * Both Copy Modes: a2 - Alternate breakpoint for unrolled loops
62 andi t0, a2, -(2 * SZREG)
68 andi t5, t3, -SZREG
69 andi t6, t4, -SZREG
73 * to find the low-bound of SZREG alignment in the dest memory
84 * If the dest and src are co-aligned to SZREG, then there is
86 * Instead, do a simpler co-aligned copy.
89 andi t1, t0, (SZREG - 1)
99 andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
102 andi a1, a1, -SZREG /* Align the src pointer */
107 * a7 = XLEN - a6 = XLEN + -a6
108 * 2s complement negation to find the negative: -a6 = ~a6 + 1
115 * Fix Misalignment Copy Loop - Forward
120 * store_ptr[0 - 2] = (load_val0 >> {a6}) | (load_val1 << {a7});
127 * store_ptr[1 - 2] = (load_val1 >> {a6}) | (load_val0 << {a7});
140 REG_S t2, ((0 * SZREG) - (2 * SZREG))(t3)
149 REG_S t2, ((1 * SZREG) - (2 * SZREG))(t3)
161 andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
164 andi a4, a4, -SZREG /* Align the src pointer */
165 addi a2, t5, -SZREG /* The other breakpoint for the unrolled loop*/
169 * a7 = XLEN - a6 = XLEN + -a6
170 * 2s complement negation to find the negative: -a6 = ~a6 + 1
177 * Fix Misalignment Copy Loop - Reverse
180 * load_val0 = load_ptr[-1];
181 * store_ptr -= 2;
187 * load_val1 = load_ptr[-2];
188 * load_ptr -= 2;
197 REG_L t0, (-1 * SZREG)(a4)
198 addi t4, t4, (-2 * SZREG)
206 REG_L t1, (-2 * SZREG)(a4)
207 addi a4, a4, (-2 * SZREG)
221 * Simple copy loops for SZREG co-aligned memory locations.
235 REG_S t1, (-1 * SZREG)(t3)
244 REG_L t1, (-1 * SZREG)(a4)
245 addi a4, a4, -SZREG
246 addi t4, t4, -SZREG
253 * These are basically sub-functions within the function. They
267 sb t1, -1(t3)
275 lb t1, -1(a4)
276 addi a4, a4, -1
277 addi t4, t4, -1
297 sb t1, -1(t3)
305 lb t1, -1(a4)
306 addi a4, a4, -1
307 addi t4, t4, -1