• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2013 ARM Ltd
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. The name of the company may not be used to endorse or promote
14 *    products derived from this software without specific prior written
15 *    permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <machine/cpu-features.h>
30#include <private/bionic_asm.h>
31
32#ifdef __ARMEB__
33#define S2LOMEM lsl
34#define S2LOMEMEQ lsleq
35#define S2HIMEM lsr
36#define MSB 0x000000ff
37#define LSB 0xff000000
38#define BYTE0_OFFSET 24
39#define BYTE1_OFFSET 16
40#define BYTE2_OFFSET 8
41#define BYTE3_OFFSET 0
42#else /* not  __ARMEB__ */
43#define S2LOMEM lsr
44#define S2LOMEMEQ lsreq
45#define S2HIMEM lsl
46#define BYTE0_OFFSET 0
47#define BYTE1_OFFSET 8
48#define BYTE2_OFFSET 16
49#define BYTE3_OFFSET 24
50#define MSB 0xff000000
51#define LSB 0x000000ff
52#endif /* not  __ARMEB__ */
53
54.syntax         unified
55
56#if defined (__thumb__)
57        .thumb
58        .thumb_func
59#endif
60
61ENTRY(strcmp)
62      /* Use LDRD whenever possible.  */
63
64/* The main thing to look out for when comparing large blocks is that
65   the loads do not cross a page boundary when loading past the index
66   of the byte with the first difference or the first string-terminator.
67
68   For example, if the strings are identical and the string-terminator
69   is at index k, byte by byte comparison will not load beyond address
70   s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
71   k; double word - up to 7 bytes.  If the load of these bytes crosses
72   a page boundary, it might cause a memory fault (if the page is not mapped)
73   that would not have happened in byte by byte comparison.
74
75   If an address is (double) word aligned, then a load of a (double) word
76   from that address will not cross a page boundary.
77   Therefore, the algorithm below considers word and double-word alignment
78   of strings separately.  */
79
80/* High-level description of the algorithm.
81
82   * The fast path: if both strings are double-word aligned,
83     use LDRD to load two words from each string in every loop iteration.
84   * If the strings have the same offset from a word boundary,
85     use LDRB to load and compare byte by byte until
86     the first string is aligned to a word boundary (at most 3 bytes).
87     This is optimized for quick return on short unaligned strings.
88   * If the strings have the same offset from a double-word boundary,
89     use LDRD to load two words from each string in every loop iteration, as in the fast path.
90   * If the strings do not have the same offset from a double-word boundary,
91     load a word from the second string before the loop to initialize the queue.
92     Use LDRD to load two words from every string in every loop iteration.
93     Inside the loop, load the second word from the second string only after comparing
94     the first word, using the queued value, to guarantee safety across page boundaries.
95   * If the strings do not have the same offset from a word boundary,
96     use LDR and a shift queue. Order of loads and comparisons matters,
97     similarly to the previous case.
98
99   * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
100   * The only difference between ARM and Thumb modes is the use of CBZ instruction.
101   * The only difference between big and little endian is the use of REV in little endian
102     to compute the return value, instead of MOV.
103*/
104
105        .macro m_cbz reg label
106#ifdef __thumb2__
107        cbz     \reg, \label
108#else   /* not defined __thumb2__ */
109        cmp     \reg, #0
110        beq     \label
111#endif /* not defined __thumb2__ */
112        .endm /* m_cbz */
113
114        .macro m_cbnz reg label
115#ifdef __thumb2__
116        cbnz    \reg, \label
117#else   /* not defined __thumb2__ */
118        cmp     \reg, #0
119        bne     \label
120#endif /* not defined __thumb2__ */
121        .endm /* m_cbnz */
122
123        .macro  init
124        /* Macro to save temporary registers and prepare magic values.  */
125        subs    sp, sp, #16
126        .cfi_def_cfa_offset 16
127        strd    r4, r5, [sp, #8]
128        .cfi_rel_offset r4, 0
129        .cfi_rel_offset r5, 4
130        strd    r6, r7, [sp]
131        .cfi_rel_offset r6, 8
132        .cfi_rel_offset r7, 12
133        mvn     r6, #0  /* all F */
134        mov     r7, #0  /* all 0 */
135        .endm   /* init */
136
137        .macro  magic_compare_and_branch w1 w2 label
138        /* Macro to compare registers w1 and w2 and conditionally branch to label.  */
139        cmp     \w1, \w2        /* Are w1 and w2 the same?  */
140        magic_find_zero_bytes \w1
141        it      eq
142        cmpeq   ip, #0          /* Is there a zero byte in w1?  */
143        bne     \label
144        .endm /* magic_compare_and_branch */
145
146        .macro  magic_find_zero_bytes w1
147        /* Macro to find all-zero bytes in w1, result is in ip.  */
148        uadd8   ip, \w1, r6
149        sel     ip, r7, r6
150        .endm /* magic_find_zero_bytes */
151
152        .macro  setup_return w1 w2
153#ifdef __ARMEB__
154        mov     r1, \w1
155        mov     r2, \w2
156#else /* not  __ARMEB__ */
157        rev     r1, \w1
158        rev     r2, \w2
159#endif /* not  __ARMEB__ */
160        .endm /* setup_return */
161
162        pld [r0, #0]
163        pld [r1, #0]
164
165        /* Are both strings double-word aligned?  */
166        orr     ip, r0, r1
167        tst     ip, #7
168        bne     .L_do_align
169
170        /* Fast path.  */
171        .save   {r4-r7}
172        init
173
174.L_doubleword_aligned:
175
176        /* Get here when the strings to compare are double-word aligned.  */
177        /* Compare two words in every iteration.  */
178        .p2align        2
1792:
180        pld [r0, #16]
181        pld [r1, #16]
182
183        /* Load the next double-word from each string.  */
184        ldrd    r2, r3, [r0], #8
185        ldrd    r4, r5, [r1], #8
186
187        magic_compare_and_branch w1=r2, w2=r4, label=.L_return_24
188        magic_compare_and_branch w1=r3, w2=r5, label=.L_return_35
189        b       2b
190
191.L_do_align:
192        /* Is the first string word-aligned?  */
193        ands    ip, r0, #3
194        beq     .L_word_aligned_r0
195
196        /* Fast compare byte by byte until the first string is word-aligned.  */
197        /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
198        to read until the next word boundary is 4-ip.  */
199        bic     r0, r0, #3
200        ldr     r2, [r0], #4
201        lsls    ip, ip, #31
202        beq     .L_byte2
203        bcs     .L_byte3
204
205.L_byte1:
206        ldrb    ip, [r1], #1
207        uxtb    r3, r2, ror #BYTE1_OFFSET
208        subs    ip, r3, ip
209        bne     .L_fast_return
210        m_cbz   reg=r3, label=.L_fast_return
211
212.L_byte2:
213        ldrb    ip, [r1], #1
214        uxtb    r3, r2, ror #BYTE2_OFFSET
215        subs    ip, r3, ip
216        bne     .L_fast_return
217        m_cbz   reg=r3, label=.L_fast_return
218
219.L_byte3:
220        ldrb    ip, [r1], #1
221        uxtb    r3, r2, ror #BYTE3_OFFSET
222        subs    ip, r3, ip
223        bne     .L_fast_return
224        m_cbnz  reg=r3, label=.L_word_aligned_r0
225
226.L_fast_return:
227        mov     r0, ip
228        bx      lr
229
230.L_word_aligned_r0:
231        init
232        /* The first string is word-aligned.  */
233        /* Is the second string word-aligned?  */
234        ands    ip, r1, #3
235        bne     .L_strcmp_unaligned
236
237.L_word_aligned:
238        /* The strings are word-aligned. */
239        /* Is the first string double-word aligned?  */
240        tst     r0, #4
241        beq     .L_doubleword_aligned_r0
242
243        /* If r0 is not double-word aligned yet, align it by loading
244        and comparing the next word from each string.  */
245        ldr     r2, [r0], #4
246        ldr     r4, [r1], #4
247        magic_compare_and_branch w1=r2 w2=r4 label=.L_return_24
248
249.L_doubleword_aligned_r0:
250        /* Get here when r0 is double-word aligned.  */
251        /* Is r1 doubleword_aligned?  */
252        tst     r1, #4
253        beq     .L_doubleword_aligned
254
255        /* Get here when the strings to compare are word-aligned,
256        r0 is double-word aligned, but r1 is not double-word aligned.  */
257
258        /* Initialize the queue.  */
259        ldr     r5, [r1], #4
260
261        /* Compare two words in every iteration.  */
262        .p2align        2
2633:
264        pld [r0, #16]
265        pld [r1, #16]
266
267        /* Load the next double-word from each string and compare.  */
268        ldrd    r2, r3, [r0], #8
269        magic_compare_and_branch w1=r2 w2=r5 label=.L_return_25
270        ldrd    r4, r5, [r1], #8
271        magic_compare_and_branch w1=r3 w2=r4 label=.L_return_34
272        b       3b
273
274        .macro miscmp_word offsetlo offsethi
275        /* Macro to compare misaligned strings.  */
276        /* r0, r1 are word-aligned, and at least one of the strings
277        is not double-word aligned.  */
278        /* Compare one word in every loop iteration.  */
279        /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
280        OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word).  */
281
282        /* Initialize the shift queue.  */
283        ldr     r5, [r1], #4
284
285        /* Compare one word from each string in every loop iteration.  */
286        .p2align        2
2877:
288        ldr     r3, [r0], #4
289        S2LOMEM r5, r5, #\offsetlo
290        magic_find_zero_bytes w1=r3
291        cmp     r7, ip, S2HIMEM #\offsetlo
292        and     r2, r3, r6, S2LOMEM #\offsetlo
293        it      eq
294        cmpeq   r2, r5
295        bne     .L_return_25
296        ldr     r5, [r1], #4
297        cmp     ip, #0
298        eor r3, r2, r3
299        S2HIMEM r2, r5, #\offsethi
300        it      eq
301        cmpeq   r3, r2
302        bne     .L_return_32
303        b       7b
304        .endm /* miscmp_word */
305
306.L_return_32:
307        setup_return w1=r3, w2=r2
308        b       .L_do_return
309.L_return_34:
310        setup_return w1=r3, w2=r4
311        b       .L_do_return
312.L_return_25:
313        setup_return w1=r2, w2=r5
314        b       .L_do_return
315.L_return_35:
316        setup_return w1=r3, w2=r5
317        b       .L_do_return
318.L_return_24:
319        setup_return w1=r2, w2=r4
320
321.L_do_return:
322
323#ifdef __ARMEB__
324        mov     r0, ip
325#else /* not  __ARMEB__ */
326        rev     r0, ip
327#endif /* not  __ARMEB__ */
328
329        /* Restore temporaries early, before computing the return value.  */
330        ldrd    r6, r7, [sp]
331        ldrd    r4, r5, [sp, #8]
332        adds    sp, sp, #16
333        .cfi_def_cfa_offset 0
334        .cfi_restore r4
335        .cfi_restore r5
336        .cfi_restore r6
337        .cfi_restore r7
338
339        /* There is a zero or a different byte between r1 and r2.  */
340        /* r0 contains a mask of all-zero bytes in r1.  */
341        /* Using r0 and not ip here because cbz requires low register.  */
342        m_cbz   reg=r0, label=.L_compute_return_value
343        clz     r0, r0
344        /* r0 contains the number of bits on the left of the first all-zero byte in r1.  */
345        rsb     r0, r0, #24
346        /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1.  */
347        lsr     r1, r1, r0
348        lsr     r2, r2, r0
349
350.L_compute_return_value:
351        movs    r0, #1
352        cmp     r1, r2
353        /* The return value is computed as follows.
354        If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
355        If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
356        which means r0:=r0-r0-1 and r0 is #-1 at return.
357        If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
358        which means r0:=r0-r0 and r0 is #0 at return.
359        (C==0 and Z==1) cannot happen because the carry bit is "not borrow".  */
360        it      ls
361        sbcls   r0, r0, r0
362        bx      lr
363
364    /* The code from the previous version of strcmp.S handles all of the
365     * cases where the first string and seconds string cannot both be
366     * aligned to a word boundary faster than the new algorithm. See
367     * bionic/libc/arch-arm/cortex-a15/bionic/strcmp.S for the unedited
368     * version of the code.
369     */
370.L_strcmp_unaligned:
371	wp1 .req r0
372	wp2 .req r1
373	b1  .req r2
374	w1  .req r4
375	w2  .req r5
376	t1  .req ip
377	@ r3 is scratch
378
3792:
380	mov	b1, #1
381	orr	b1, b1, b1, lsl #8
382	orr	b1, b1, b1, lsl #16
383
384	and	t1, wp2, #3
385	bic	wp2, wp2, #3
386	ldr	w1, [wp1], #4
387	ldr	w2, [wp2], #4
388	cmp	t1, #2
389	beq	2f
390	bhi	3f
391
392	/* Critical inner Loop: Block with 3 bytes initial overlap */
393	.p2align	2
3941:
395	bic	t1, w1, #MSB
396	cmp	t1, w2, S2LOMEM #8
397	sub	r3, w1, b1
398	bic	r3, r3, w1
399	bne	4f
400	ands	r3, r3, b1, lsl #7
401	it	eq
402	ldreq	w2, [wp2], #4
403	bne	5f
404	eor	t1, t1, w1
405	cmp	t1, w2, S2HIMEM #24
406	bne	6f
407	ldr	w1, [wp1], #4
408	b	1b
4094:
410	S2LOMEM	w2, w2, #8
411	b	8f
412
4135:
414#ifdef __ARMEB__
415	/* The syndrome value may contain false ones if the string ends
416	 * with the bytes 0x01 0x00
417	 */
418	tst	w1, #0xff000000
419	itt	ne
420	tstne	w1, #0x00ff0000
421	tstne	w1, #0x0000ff00
422	beq	7f
423#else
424	bics	r3, r3, #0xff000000
425	bne	7f
426#endif
427	ldrb	w2, [wp2]
428	S2LOMEM	t1, w1, #24
429#ifdef __ARMEB__
430	lsl	w2, w2, #24
431#endif
432	b	8f
433
4346:
435	S2LOMEM	t1, w1, #24
436	and	w2, w2, #LSB
437	b	8f
438
439	/* Critical inner Loop: Block with 2 bytes initial overlap */
440	.p2align	2
4412:
442	S2HIMEM	t1, w1, #16
443	sub	r3, w1, b1
444	S2LOMEM	t1, t1, #16
445	bic	r3, r3, w1
446	cmp	t1, w2, S2LOMEM #16
447	bne	4f
448	ands	r3, r3, b1, lsl #7
449	it	eq
450	ldreq	w2, [wp2], #4
451	bne	5f
452	eor	t1, t1, w1
453	cmp	t1, w2, S2HIMEM #16
454	bne	6f
455	ldr	w1, [wp1], #4
456	b	2b
457
4585:
459#ifdef __ARMEB__
460	/* The syndrome value may contain false ones if the string ends
461	 * with the bytes 0x01 0x00
462	 */
463	tst	w1, #0xff000000
464	it	ne
465	tstne	w1, #0x00ff0000
466	beq	7f
467#else
468	lsls	r3, r3, #16
469	bne	7f
470#endif
471	ldrh	w2, [wp2]
472	S2LOMEM	t1, w1, #16
473#ifdef __ARMEB__
474	lsl	w2, w2, #16
475#endif
476	b	8f
477
4786:
479	S2HIMEM	w2, w2, #16
480	S2LOMEM	t1, w1, #16
4814:
482	S2LOMEM	w2, w2, #16
483	b	8f
484
485	/* Critical inner Loop: Block with 1 byte initial overlap */
486	.p2align	2
4873:
488	and	t1, w1, #LSB
489	cmp	t1, w2, S2LOMEM #24
490	sub	r3, w1, b1
491	bic	r3, r3, w1
492	bne	4f
493	ands	r3, r3, b1, lsl #7
494	it	eq
495	ldreq	w2, [wp2], #4
496	bne	5f
497	eor	t1, t1, w1
498	cmp	t1, w2, S2HIMEM #8
499	bne	6f
500	ldr	w1, [wp1], #4
501	b	3b
5024:
503	S2LOMEM	w2, w2, #24
504	b	8f
5055:
506	/* The syndrome value may contain false ones if the string ends
507	 * with the bytes 0x01 0x00
508	 */
509	tst	w1, #LSB
510	beq	7f
511	ldr	w2, [wp2], #4
5126:
513	S2LOMEM	t1, w1, #8
514	bic	w2, w2, #MSB
515	b	8f
5167:
517	mov	r0, #0
518
519    /* Restore registers and stack. */
520    ldrd    r6, r7, [sp]
521    ldrd    r4, r5, [sp, #8]
522    adds    sp, sp, #16
523    .cfi_def_cfa_offset 0
524    .cfi_restore r4
525    .cfi_restore r5
526    .cfi_restore r6
527    .cfi_restore r7
528
529	bx	lr
530
5318:
532	and	r2, t1, #LSB
533	and	r0, w2, #LSB
534	cmp	r0, #1
535	it	cs
536	cmpcs	r0, r2
537	itt	eq
538	S2LOMEMEQ	t1, t1, #8
539	S2LOMEMEQ	w2, w2, #8
540	beq	8b
541	sub	r0, r2, r0
542
543    /* Restore registers and stack. */
544    ldrd    r6, r7, [sp]
545    ldrd    r4, r5, [sp, #8]
546    adds    sp, sp, #16
547
548	bx	lr
549END(strcmp)
550