• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#if !__ARMEB__
2
3/*
4 * Copyright (C) 2008 The Android Open Source Project
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *  * Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 *  * Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the
15 *    distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
20 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
21 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31
32/*
33 * Optimized memcpy() for ARM.
34 *
35 * note that memcpy() always returns the destination pointer,
36 * so we have to preserve R0.
37  */
38
39/*
40 * This file has been modified from the original for use in musl libc.
41 * The main changes are: addition of .type memcpy,%function to make the
42 * code safely callable from thumb mode, adjusting the return
43 * instructions to be compatible with pre-thumb ARM cpus, removal of
44 * prefetch code that is not compatible with older cpus and support for
45 * building as thumb 2.
46 */
47
48.syntax unified
49
50.global memcpy
51.type memcpy,%function
52memcpy:
53	/* The stack must always be 64-bits aligned to be compliant with the
54	 * ARM ABI. Since we have to save R0, we might as well save R4
55	 * which we can use for better pipelining of the reads below
56	 */
57	.fnstart
58	.save       {r0, r4, lr}
59	stmfd       sp!, {r0, r4, lr}
60	/* Making room for r5-r11 which will be spilled later */
61	.pad        #28
62	sub         sp, sp, #28
63
64	/* it simplifies things to take care of len<4 early */
65	cmp     r2, #4
66	blo     copy_last_3_and_return
67
68	/* compute the offset to align the source
69	 * offset = (4-(src&3))&3 = -src & 3
70	 */
71	rsb     r3, r1, #0
72	ands    r3, r3, #3
73	beq     src_aligned
74
75	/* align source to 32 bits. We need to insert 2 instructions between
76	 * a ldr[b|h] and str[b|h] because byte and half-word instructions
77	 * stall 2 cycles.
78	 */
79	movs    r12, r3, lsl #31
80	sub     r2, r2, r3              /* we know that r3 <= r2 because r2 >= 4 */
81	ldrbmi r3, [r1], #1
82	ldrbcs r4, [r1], #1
83	ldrbcs r12,[r1], #1
84	strbmi r3, [r0], #1
85	strbcs r4, [r0], #1
86	strbcs r12,[r0], #1
87
88src_aligned:
89
90	/* see if src and dst are aligned together (congruent) */
91	eor     r12, r0, r1
92	tst     r12, #3
93	bne     non_congruent
94
95	/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
96	 * frame. Don't update sp.
97	 */
98	stmea   sp, {r5-r11}
99
100	/* align the destination to a cache-line */
101	rsb     r3, r0, #0
102	ands    r3, r3, #0x1C
103	beq     congruent_aligned32
104	cmp     r3, r2
105	andhi   r3, r2, #0x1C
106
107	/* conditionnaly copies 0 to 7 words (length in r3) */
108	movs    r12, r3, lsl #28
109	ldmcs   r1!, {r4, r5, r6, r7}           /* 16 bytes */
110	ldmmi   r1!, {r8, r9}                   /*  8 bytes */
111	stmcs   r0!, {r4, r5, r6, r7}
112	stmmi   r0!, {r8, r9}
113	tst     r3, #0x4
114	ldrne   r10,[r1], #4                    /*  4 bytes */
115	strne   r10,[r0], #4
116	sub     r2, r2, r3
117
118congruent_aligned32:
119	/*
120	 * here source is aligned to 32 bytes.
121	 */
122
123cached_aligned32:
124	subs    r2, r2, #32
125	blo     less_than_32_left
126
127	/*
128	 * We preload a cache-line up to 64 bytes ahead. On the 926, this will
129	 * stall only until the requested world is fetched, but the linefill
130	 * continues in the the background.
131	 * While the linefill is going, we write our previous cache-line
132	 * into the write-buffer (which should have some free space).
133	 * When the linefill is done, the writebuffer will
134	 * start dumping its content into memory
135	 *
136	 * While all this is going, we then load a full cache line into
137	 * 8 registers, this cache line should be in the cache by now
138	 * (or partly in the cache).
139	 *
140	 * This code should work well regardless of the source/dest alignment.
141	 *
142	 */
143
144	/* Align the preload register to a cache-line because the cpu does
145	 * "critical word first" (the first word requested is loaded first).
146	 */
147	@ bic           r12, r1, #0x1F
148	@ add           r12, r12, #64
149
1501:      ldmia   r1!, { r4-r11 }
151	subs    r2, r2, #32
152
153	/*
154	 * NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
155	 * for ARM9 preload will not be safely guarded by the preceding subs.
156	 * When it is safely guarded the only possibility to have SIGSEGV here
157	 * is because the caller overstates the length.
158	 */
159	@ ldrhi         r3, [r12], #32      /* cheap ARM9 preload */
160	stmia   r0!, { r4-r11 }
161	bhs     1b
162
163	add     r2, r2, #32
164
165less_than_32_left:
166	/*
167	 * less than 32 bytes left at this point (length in r2)
168	 */
169
170	/* skip all this if there is nothing to do, which should
171	 * be a common case (if not executed the code below takes
172	 * about 16 cycles)
173	 */
174	tst     r2, #0x1F
175	beq     1f
176
177	/* conditionnaly copies 0 to 31 bytes */
178	movs    r12, r2, lsl #28
179	ldmcs   r1!, {r4, r5, r6, r7}           /* 16 bytes */
180	ldmmi   r1!, {r8, r9}                   /*  8 bytes */
181	stmcs   r0!, {r4, r5, r6, r7}
182	stmmi   r0!, {r8, r9}
183	movs    r12, r2, lsl #30
184	ldrcs   r3, [r1], #4                    /*  4 bytes */
185	ldrhmi r4, [r1], #2                     /*  2 bytes */
186	strcs   r3, [r0], #4
187	strhmi r4, [r0], #2
188	tst     r2, #0x1
189	ldrbne r3, [r1]                         /*  last byte  */
190	strbne r3, [r0]
191
192	/* we're done! restore everything and return */
1931:      ldmfd   sp!, {r5-r11}
194	ldmfd   sp!, {r0, r4, lr}
195	bx      lr
196
197	/********************************************************************/
198
199non_congruent:
200	/*
201	 * here source is aligned to 4 bytes
202	 * but destination is not.
203	 *
204	 * in the code below r2 is the number of bytes read
205	 * (the number of bytes written is always smaller, because we have
206	 * partial words in the shift queue)
207	 */
208	cmp     r2, #4
209	blo     copy_last_3_and_return
210
211	/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
212	 * frame. Don't update sp.
213	 */
214	stmea   sp, {r5-r11}
215
216	/* compute shifts needed to align src to dest */
217	rsb     r5, r0, #0
218	and     r5, r5, #3                      /* r5 = # bytes in partial words */
219	mov     r12, r5, lsl #3         /* r12 = right */
220	rsb     lr, r12, #32            /* lr = left  */
221
222	/* read the first word */
223	ldr     r3, [r1], #4
224	sub     r2, r2, #4
225
226	/* write a partial word (0 to 3 bytes), such that destination
227	 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
228	 */
229	movs    r5, r5, lsl #31
230	strbmi r3, [r0], #1
231	movmi   r3, r3, lsr #8
232	strbcs r3, [r0], #1
233	movcs   r3, r3, lsr #8
234	strbcs r3, [r0], #1
235	movcs   r3, r3, lsr #8
236
237	cmp     r2, #4
238	blo     partial_word_tail
239
240	/* Align destination to 32 bytes (cache line boundary) */
2411:      tst     r0, #0x1c
242	beq     2f
243	ldr     r5, [r1], #4
244	sub     r2, r2, #4
245	mov     r4, r5,                 lsl lr
246	orr     r4, r4, r3
247	mov     r3, r5,                 lsr r12
248	str     r4, [r0], #4
249	cmp     r2, #4
250	bhs     1b
251	blo     partial_word_tail
252
253	/* copy 32 bytes at a time */
2542:      subs    r2, r2, #32
255	blo     less_than_thirtytwo
256
257	/* Use immediate mode for the shifts, because there is an extra cycle
258	 * for register shifts, which could account for up to 50% of
259	 * performance hit.
260	 */
261
262	cmp     r12, #24
263	beq     loop24
264	cmp     r12, #8
265	beq     loop8
266
267loop16:
268	ldr     r12, [r1], #4
2691:      mov     r4, r12
270	ldmia   r1!, {   r5,r6,r7,  r8,r9,r10,r11}
271	subs    r2, r2, #32
272	ldrhs   r12, [r1], #4
273	orr     r3, r3, r4, lsl #16
274	mov     r4, r4, lsr #16
275	orr     r4, r4, r5, lsl #16
276	mov     r5, r5, lsr #16
277	orr     r5, r5, r6, lsl #16
278	mov     r6, r6, lsr #16
279	orr     r6, r6, r7, lsl #16
280	mov     r7, r7, lsr #16
281	orr     r7, r7, r8, lsl #16
282	mov     r8, r8, lsr #16
283	orr     r8, r8, r9, lsl #16
284	mov     r9, r9, lsr #16
285	orr     r9, r9, r10, lsl #16
286	mov     r10, r10,               lsr #16
287	orr     r10, r10, r11, lsl #16
288	stmia   r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
289	mov     r3, r11, lsr #16
290	bhs     1b
291	b       less_than_thirtytwo
292
293loop8:
294	ldr     r12, [r1], #4
2951:      mov     r4, r12
296	ldmia   r1!, {   r5,r6,r7,  r8,r9,r10,r11}
297	subs    r2, r2, #32
298	ldrhs   r12, [r1], #4
299	orr     r3, r3, r4, lsl #24
300	mov     r4, r4, lsr #8
301	orr     r4, r4, r5, lsl #24
302	mov     r5, r5, lsr #8
303	orr     r5, r5, r6, lsl #24
304	mov     r6, r6,  lsr #8
305	orr     r6, r6, r7, lsl #24
306	mov     r7, r7,  lsr #8
307	orr     r7, r7, r8,             lsl #24
308	mov     r8, r8,  lsr #8
309	orr     r8, r8, r9,             lsl #24
310	mov     r9, r9,  lsr #8
311	orr     r9, r9, r10,    lsl #24
312	mov     r10, r10, lsr #8
313	orr     r10, r10, r11,  lsl #24
314	stmia   r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
315	mov     r3, r11, lsr #8
316	bhs     1b
317	b       less_than_thirtytwo
318
319loop24:
320	ldr     r12, [r1], #4
3211:      mov     r4, r12
322	ldmia   r1!, {   r5,r6,r7,  r8,r9,r10,r11}
323	subs    r2, r2, #32
324	ldrhs   r12, [r1], #4
325	orr     r3, r3, r4, lsl #8
326	mov     r4, r4, lsr #24
327	orr     r4, r4, r5, lsl #8
328	mov     r5, r5, lsr #24
329	orr     r5, r5, r6, lsl #8
330	mov     r6, r6, lsr #24
331	orr     r6, r6, r7, lsl #8
332	mov     r7, r7, lsr #24
333	orr     r7, r7, r8, lsl #8
334	mov     r8, r8, lsr #24
335	orr     r8, r8, r9, lsl #8
336	mov     r9, r9, lsr #24
337	orr     r9, r9, r10, lsl #8
338	mov     r10, r10, lsr #24
339	orr     r10, r10, r11, lsl #8
340	stmia   r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
341	mov     r3, r11, lsr #24
342	bhs     1b
343
344less_than_thirtytwo:
345	/* copy the last 0 to 31 bytes of the source */
346	rsb     r12, lr, #32            /* we corrupted r12, recompute it  */
347	add     r2, r2, #32
348	cmp     r2, #4
349	blo     partial_word_tail
350
3511:      ldr     r5, [r1], #4
352	sub     r2, r2, #4
353	mov     r4, r5,                 lsl lr
354	orr     r4, r4, r3
355	mov     r3,     r5,                     lsr r12
356	str     r4, [r0], #4
357	cmp     r2, #4
358	bhs     1b
359
360partial_word_tail:
361	/* we have a partial word in the input buffer */
362	movs    r5, lr, lsl #(31-3)
363	strbmi r3, [r0], #1
364	movmi   r3, r3, lsr #8
365	strbcs r3, [r0], #1
366	movcs   r3, r3, lsr #8
367	strbcs r3, [r0], #1
368
369	/* Refill spilled registers from the stack. Don't update sp. */
370	ldmfd   sp, {r5-r11}
371
372copy_last_3_and_return:
373	movs    r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
374	ldrbmi r2, [r1], #1
375	ldrbcs r3, [r1], #1
376	ldrbcs r12,[r1]
377	strbmi r2, [r0], #1
378	strbcs r3, [r0], #1
379	strbcs r12,[r0]
380
381	/* we're done! restore sp and spilled registers and return */
382	add     sp,  sp, #28
383	ldmfd   sp!, {r0, r4, lr}
384	bx      lr
385
386#endif
387