• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (C) Paul Mackerras 1997.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32.
10 */
11
12#include "ppc_asm.h"
13
14	.text
15	.globl	strcpy
16strcpy:
17	addi	r5,r3,-1
18	addi	r4,r4,-1
191:	lbzu	r0,1(r4)
20	cmpwi	0,r0,0
21	stbu	r0,1(r5)
22	bne	1b
23	blr
24
25	.globl	strncpy
26strncpy:
27	cmpwi	0,r5,0
28	beqlr
29	mtctr	r5
30	addi	r6,r3,-1
31	addi	r4,r4,-1
321:	lbzu	r0,1(r4)
33	cmpwi	0,r0,0
34	stbu	r0,1(r6)
35	bdnzf	2,1b		/* dec ctr, branch if ctr != 0 && !cr0.eq */
36	blr
37
38	.globl	strcat
39strcat:
40	addi	r5,r3,-1
41	addi	r4,r4,-1
421:	lbzu	r0,1(r5)
43	cmpwi	0,r0,0
44	bne	1b
45	addi	r5,r5,-1
461:	lbzu	r0,1(r4)
47	cmpwi	0,r0,0
48	stbu	r0,1(r5)
49	bne	1b
50	blr
51
52	.globl	strchr
53strchr:
54	addi	r3,r3,-1
551:	lbzu	r0,1(r3)
56	cmpw	0,r0,r4
57	beqlr
58	cmpwi	0,r0,0
59	bne	1b
60	li	r3,0
61	blr
62
63	.globl	strcmp
64strcmp:
65	addi	r5,r3,-1
66	addi	r4,r4,-1
671:	lbzu	r3,1(r5)
68	cmpwi	1,r3,0
69	lbzu	r0,1(r4)
70	subf.	r3,r0,r3
71	beqlr	1
72	beq	1b
73	blr
74
75	.globl	strncmp
76strncmp:
77	mtctr	r5
78	addi	r5,r3,-1
79	addi	r4,r4,-1
801:	lbzu	r3,1(r5)
81	cmpwi	1,r3,0
82	lbzu	r0,1(r4)
83	subf.	r3,r0,r3
84	beqlr	1
85	bdnzt	eq,1b
86	blr
87
88	.globl	strlen
89strlen:
90	addi	r4,r3,-1
911:	lbzu	r0,1(r4)
92	cmpwi	0,r0,0
93	bne	1b
94	subf	r3,r3,r4
95	blr
96
97	.globl	memset
98memset:
99	rlwimi	r4,r4,8,16,23
100	rlwimi	r4,r4,16,0,15
101	addi	r6,r3,-4
102	cmplwi	0,r5,4
103	blt	7f
104	stwu	r4,4(r6)
105	beqlr
106	andi.	r0,r6,3
107	add	r5,r0,r5
108	subf	r6,r0,r6
109	rlwinm	r0,r5,32-2,2,31
110	mtctr	r0
111	bdz	6f
1121:	stwu	r4,4(r6)
113	bdnz	1b
1146:	andi.	r5,r5,3
1157:	cmpwi	0,r5,0
116	beqlr
117	mtctr	r5
118	addi	r6,r6,3
1198:	stbu	r4,1(r6)
120	bdnz	8b
121	blr
122
123	.globl	memmove
124memmove:
125	cmplw	0,r3,r4
126	bgt	backwards_memcpy
127	/* fall through */
128
129	.globl	memcpy
130memcpy:
131	rlwinm.	r7,r5,32-3,3,31		/* r7 = r5 >> 3 */
132	addi	r6,r3,-4
133	addi	r4,r4,-4
134	beq	3f			/* if less than 8 bytes to do */
135	andi.	r0,r6,3			/* get dest word aligned */
136	mtctr	r7
137	bne	5f
138	andi.	r0,r4,3			/* check src word aligned too */
139	bne	3f
1401:	lwz	r7,4(r4)
141	lwzu	r8,8(r4)
142	stw	r7,4(r6)
143	stwu	r8,8(r6)
144	bdnz	1b
145	andi.	r5,r5,7
1462:	cmplwi	0,r5,4
147	blt	3f
148	lwzu	r0,4(r4)
149	addi	r5,r5,-4
150	stwu	r0,4(r6)
1513:	cmpwi	0,r5,0
152	beqlr
153	mtctr	r5
154	addi	r4,r4,3
155	addi	r6,r6,3
1564:	lbzu	r0,1(r4)
157	stbu	r0,1(r6)
158	bdnz	4b
159	blr
1605:	subfic	r0,r0,4
161	cmpw	cr1,r0,r5
162	add	r7,r0,r4
163	andi.	r7,r7,3			/* will source be word-aligned too? */
164	ble	cr1,3b
165	bne	3b			/* do byte-by-byte if not */
166	mtctr	r0
1676:	lbz	r7,4(r4)
168	addi	r4,r4,1
169	stb	r7,4(r6)
170	addi	r6,r6,1
171	bdnz	6b
172	subf	r5,r0,r5
173	rlwinm.	r7,r5,32-3,3,31
174	beq	2b
175	mtctr	r7
176	b	1b
177
178	.globl	backwards_memcpy
179backwards_memcpy:
180	rlwinm.	r7,r5,32-3,3,31		/* r7 = r5 >> 3 */
181	add	r6,r3,r5
182	add	r4,r4,r5
183	beq	3f
184	andi.	r0,r6,3
185	mtctr	r7
186	bne	5f
187	andi.	r0,r4,3
188	bne	3f
1891:	lwz	r7,-4(r4)
190	lwzu	r8,-8(r4)
191	stw	r7,-4(r6)
192	stwu	r8,-8(r6)
193	bdnz	1b
194	andi.	r5,r5,7
1952:	cmplwi	0,r5,4
196	blt	3f
197	lwzu	r0,-4(r4)
198	subi	r5,r5,4
199	stwu	r0,-4(r6)
2003:	cmpwi	0,r5,0
201	beqlr
202	mtctr	r5
2034:	lbzu	r0,-1(r4)
204	stbu	r0,-1(r6)
205	bdnz	4b
206	blr
2075:	cmpw	cr1,r0,r5
208	subf	r7,r0,r4
209	andi.	r7,r7,3
210	ble	cr1,3b
211	bne	3b
212	mtctr	r0
2136:	lbzu	r7,-1(r4)
214	stbu	r7,-1(r6)
215	bdnz	6b
216	subf	r5,r0,r5
217	rlwinm.	r7,r5,32-3,3,31
218	beq	2b
219	mtctr	r7
220	b	1b
221
222	.globl	memchr
223memchr:
224	cmpwi	0,r5,0
225	blelr
226	mtctr	r5
227	addi	r3,r3,-1
2281:	lbzu	r0,1(r3)
229	cmpw	r0,r4
230	beqlr
231	bdnz	1b
232	li	r3,0
233	blr
234
235	.globl	memcmp
236memcmp:
237	cmpwi	0,r5,0
238	ble	2f
239	mtctr	r5
240	addi	r6,r3,-1
241	addi	r4,r4,-1
2421:	lbzu	r3,1(r6)
243	lbzu	r0,1(r4)
244	subf.	r3,r0,r3
245	bdnzt	2,1b
246	blr
2472:	li	r3,0
248	blr
249
250
251/*
252 * Flush the dcache and invalidate the icache for a range of addresses.
253 *
254 * flush_cache(addr, len)
255 */
256	.global	flush_cache
257flush_cache:
258	addi	4,4,0x1f	/* len = (len + 0x1f) / 0x20 */
259	rlwinm.	4,4,27,5,31
260	mtctr	4
261	beqlr
2621:	dcbf	0,3
263	icbi	0,3
264	addi	3,3,0x20
265	bdnz	1b
266	sync
267	isync
268	blr
269
270