1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/arch/arm/lib/memset.S 4 * 5 * Copyright (C) 1995-2000 Russell King 6 * 7 * ASM optimised string functions 8 */ 9#include <linux/linkage.h> 10#include <asm/assembler.h> 11#include <asm/unwind.h> 12 13 .text 14 .align 5 15 16ENTRY(__memset) 17ENTRY(mmioset) 18WEAK(memset) 19UNWIND( .fnstart ) 20 and r1, r1, #255 @ cast to unsigned char 21 ands r3, r0, #3 @ 1 unaligned? 22 mov ip, r0 @ preserve r0 as return value 23 bne 6f @ 1 24/* 25 * we know that the pointer in ip is aligned to a word boundary. 26 */ 271: orr r1, r1, r1, lsl #8 28 orr r1, r1, r1, lsl #16 29 mov r3, r1 307: cmp r2, #16 31 blt 4f 32 33#if ! CALGN(1)+0 34 35/* 36 * We need 2 extra registers for this loop - use r8 and the LR 37 */ 38 stmfd sp!, {r8, lr} 39UNWIND( .fnend ) 40UNWIND( .fnstart ) 41UNWIND( .save {r8, lr} ) 42 mov r8, r1 43 mov lr, r3 44 452: subs r2, r2, #64 46 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time. 47 stmiage ip!, {r1, r3, r8, lr} 48 stmiage ip!, {r1, r3, r8, lr} 49 stmiage ip!, {r1, r3, r8, lr} 50 bgt 2b 51 ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go. 52/* 53 * No need to correct the count; we're only testing bits from now on 54 */ 55 tst r2, #32 56 stmiane ip!, {r1, r3, r8, lr} 57 stmiane ip!, {r1, r3, r8, lr} 58 tst r2, #16 59 stmiane ip!, {r1, r3, r8, lr} 60 ldmfd sp!, {r8, lr} 61UNWIND( .fnend ) 62 63#else 64 65/* 66 * This version aligns the destination pointer in order to write 67 * whole cache lines at once. 68 */ 69 70 stmfd sp!, {r4-r8, lr} 71UNWIND( .fnend ) 72UNWIND( .fnstart ) 73UNWIND( .save {r4-r8, lr} ) 74 mov r4, r1 75 mov r5, r3 76 mov r6, r1 77 mov r7, r3 78 mov r8, r1 79 mov lr, r3 80 81 cmp r2, #96 82 tstgt ip, #31 83 ble 3f 84 85 and r8, ip, #31 86 rsb r8, r8, #32 87 sub r2, r2, r8 88 movs r8, r8, lsl #(32 - 4) 89 stmiacs ip!, {r4, r5, r6, r7} 90 stmiami ip!, {r4, r5} 91 tst r8, #(1 << 30) 92 mov r8, r1 93 strne r1, [ip], #4 94 953: subs r2, r2, #64 96 stmiage ip!, {r1, r3-r8, lr} 97 stmiage ip!, {r1, r3-r8, lr} 98 bgt 3b 99 ldmfdeq sp!, {r4-r8, pc} 100 101 tst r2, #32 102 stmiane ip!, {r1, r3-r8, lr} 103 tst r2, #16 104 stmiane ip!, {r4-r7} 105 ldmfd sp!, {r4-r8, lr} 106UNWIND( .fnend ) 107 108#endif 109 110UNWIND( .fnstart ) 1114: tst r2, #8 112 stmiane ip!, {r1, r3} 113 tst r2, #4 114 strne r1, [ip], #4 115/* 116 * When we get here, we've got less than 4 bytes to set. We 117 * may have an unaligned pointer as well. 118 */ 1195: tst r2, #2 120 strbne r1, [ip], #1 121 strbne r1, [ip], #1 122 tst r2, #1 123 strbne r1, [ip], #1 124 ret lr 125 1266: subs r2, r2, #4 @ 1 do we have enough 127 blt 5b @ 1 bytes to align with? 128 cmp r3, #2 @ 1 129 strblt r1, [ip], #1 @ 1 130 strble r1, [ip], #1 @ 1 131 strb r1, [ip], #1 @ 1 132 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) 133 b 1b 134UNWIND( .fnend ) 135ENDPROC(memset) 136ENDPROC(mmioset) 137ENDPROC(__memset) 138 139ENTRY(__memset32) 140UNWIND( .fnstart ) 141 mov r3, r1 @ copy r1 to r3 and fall into memset64 142UNWIND( .fnend ) 143ENDPROC(__memset32) 144ENTRY(__memset64) 145UNWIND( .fnstart ) 146 mov ip, r0 @ preserve r0 as return value 147 b 7b @ jump into the middle of memset 148UNWIND( .fnend ) 149ENDPROC(__memset64) 150