• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
2 *
3 * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
5 */
6
7#ifdef __KERNEL__
8#include <linux/linkage.h>
9#include <asm/visasm.h>
10#include <asm/asi.h>
11#define GLOBAL_SPARE	g7
12#else
13#define GLOBAL_SPARE	g5
14#define ASI_BLK_P 0xf0
15#define FPRS_FEF  0x04
16#ifdef MEMCPY_DEBUG
17#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
18		 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
19#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
20#else
21#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
22#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
23#endif
24#endif
25
26#ifndef EX_LD
27#define EX_LD(x,y)	x
28#endif
29#ifndef EX_LD_FP
30#define EX_LD_FP(x,y)	x
31#endif
32
33#ifndef EX_ST
34#define EX_ST(x,y)	x
35#endif
36#ifndef EX_ST_FP
37#define EX_ST_FP(x,y)	x
38#endif
39
40#ifndef LOAD
41#define LOAD(type,addr,dest)	type [addr], dest
42#endif
43
44#ifndef LOAD_BLK
45#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_P, dest
46#endif
47
48#ifndef STORE
49#define STORE(type,src,addr)	type src, [addr]
50#endif
51
52#ifndef STORE_BLK
53#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
54#endif
55
56#ifndef FUNC_NAME
57#define FUNC_NAME	memcpy
58#endif
59
60#ifndef PREAMBLE
61#define PREAMBLE
62#endif
63
64#ifndef XCC
65#define XCC xcc
66#endif
67
68#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9)		\
69	faligndata		%f1, %f2, %f48;			\
70	faligndata		%f2, %f3, %f50;			\
71	faligndata		%f3, %f4, %f52;			\
72	faligndata		%f4, %f5, %f54;			\
73	faligndata		%f5, %f6, %f56;			\
74	faligndata		%f6, %f7, %f58;			\
75	faligndata		%f7, %f8, %f60;			\
76	faligndata		%f8, %f9, %f62;
77
78#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt)			\
79	EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp);			\
80	EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp);			\
81	add			%src, 0x40, %src;			\
82	subcc			%GLOBAL_SPARE, 0x40, %GLOBAL_SPARE;	\
83	be,pn			%xcc, jmptgt;				\
84	 add			%dest, 0x40, %dest;			\
85
86#define LOOP_CHUNK1(src, dest, branch_dest)		\
87	MAIN_LOOP_CHUNK(src, dest, f0,  f48, branch_dest)
88#define LOOP_CHUNK2(src, dest, branch_dest)		\
89	MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest)
90#define LOOP_CHUNK3(src, dest, branch_dest)		\
91	MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest)
92
93#define DO_SYNC			membar	#Sync;
94#define STORE_SYNC(dest, fsrc)				\
95	EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp);	\
96	add			%dest, 0x40, %dest;	\
97	DO_SYNC
98
99#define STORE_JUMP(dest, fsrc, target)			\
100	EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp);	\
101	add			%dest, 0x40, %dest;	\
102	ba,pt			%xcc, target;		\
103	 nop;
104
105#define FINISH_VISCHUNK(dest, f0, f1)			\
106	subcc			%g3, 8, %g3;		\
107	bl,pn			%xcc, 95f;		\
108	 faligndata		%f0, %f1, %f48;		\
109	EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp);	\
110	add			%dest, 8, %dest;
111
112#define UNEVEN_VISCHUNK_LAST(dest, f0, f1)	\
113	subcc			%g3, 8, %g3;	\
114	bl,pn			%xcc, 95f;	\
115	 fsrc2			%f0, %f1;
116
117#define UNEVEN_VISCHUNK(dest, f0, f1)		\
118	UNEVEN_VISCHUNK_LAST(dest, f0, f1)	\
119	ba,a,pt			%xcc, 93f;
120
121	.register	%g2,#scratch
122	.register	%g3,#scratch
123
124	.text
125#ifndef EX_RETVAL
126#define EX_RETVAL(x)	x
127ENTRY(U1_g1_1_fp)
128	VISExitHalf
129	add		%g1, 1, %g1
130	add		%g1, %g2, %g1
131	retl
132	 add		%g1, %o2, %o0
133ENDPROC(U1_g1_1_fp)
134ENTRY(U1_g2_0_fp)
135	VISExitHalf
136	retl
137	 add		%g2, %o2, %o0
138ENDPROC(U1_g2_0_fp)
139ENTRY(U1_g2_8_fp)
140	VISExitHalf
141	add		%g2, 8, %g2
142	retl
143	 add		%g2, %o2, %o0
144ENDPROC(U1_g2_8_fp)
145ENTRY(U1_gs_0_fp)
146	VISExitHalf
147	add		%GLOBAL_SPARE, %g3, %o0
148	retl
149	 add		%o0, %o2, %o0
150ENDPROC(U1_gs_0_fp)
151ENTRY(U1_gs_80_fp)
152	VISExitHalf
153	add		%GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
154	add		%GLOBAL_SPARE, %g3, %o0
155	retl
156	 add		%o0, %o2, %o0
157ENDPROC(U1_gs_80_fp)
158ENTRY(U1_gs_40_fp)
159	VISExitHalf
160	add		%GLOBAL_SPARE, 0x40, %GLOBAL_SPARE
161	add		%GLOBAL_SPARE, %g3, %o0
162	retl
163	 add		%o0, %o2, %o0
164ENDPROC(U1_gs_40_fp)
165ENTRY(U1_g3_0_fp)
166	VISExitHalf
167	retl
168	 add		%g3, %o2, %o0
169ENDPROC(U1_g3_0_fp)
170ENTRY(U1_g3_8_fp)
171	VISExitHalf
172	add		%g3, 8, %g3
173	retl
174	 add		%g3, %o2, %o0
175ENDPROC(U1_g3_8_fp)
176ENTRY(U1_o2_0_fp)
177	VISExitHalf
178	retl
179	 mov		%o2, %o0
180ENDPROC(U1_o2_0_fp)
181ENTRY(U1_o2_1_fp)
182	VISExitHalf
183	retl
184	 add		%o2, 1, %o0
185ENDPROC(U1_o2_1_fp)
186ENTRY(U1_gs_0)
187	VISExitHalf
188	retl
189	 add		%GLOBAL_SPARE, %o2, %o0
190ENDPROC(U1_gs_0)
191ENTRY(U1_gs_8)
192	VISExitHalf
193	add		%GLOBAL_SPARE, %o2, %GLOBAL_SPARE
194	retl
195	 add		%GLOBAL_SPARE, 0x8, %o0
196ENDPROC(U1_gs_8)
197ENTRY(U1_gs_10)
198	VISExitHalf
199	add		%GLOBAL_SPARE, %o2, %GLOBAL_SPARE
200	retl
201	 add		%GLOBAL_SPARE, 0x10, %o0
202ENDPROC(U1_gs_10)
203ENTRY(U1_o2_0)
204	retl
205	 mov		%o2, %o0
206ENDPROC(U1_o2_0)
207ENTRY(U1_o2_8)
208	retl
209	 add		%o2, 8, %o0
210ENDPROC(U1_o2_8)
211ENTRY(U1_o2_4)
212	retl
213	 add		%o2, 4, %o0
214ENDPROC(U1_o2_4)
215ENTRY(U1_o2_1)
216	retl
217	 add		%o2, 1, %o0
218ENDPROC(U1_o2_1)
219ENTRY(U1_g1_0)
220	retl
221	 add		%g1, %o2, %o0
222ENDPROC(U1_g1_0)
223ENTRY(U1_g1_1)
224	add		%g1, 1, %g1
225	retl
226	 add		%g1, %o2, %o0
227ENDPROC(U1_g1_1)
228ENTRY(U1_gs_0_o2_adj)
229	and		%o2, 7, %o2
230	retl
231	 add		%GLOBAL_SPARE, %o2, %o0
232ENDPROC(U1_gs_0_o2_adj)
233ENTRY(U1_gs_8_o2_adj)
234	and		%o2, 7, %o2
235	add		%GLOBAL_SPARE, 8, %GLOBAL_SPARE
236	retl
237	 add		%GLOBAL_SPARE, %o2, %o0
238ENDPROC(U1_gs_8_o2_adj)
239#endif
240
241	.align		64
242
243	.globl		FUNC_NAME
244	.type		FUNC_NAME,#function
245FUNC_NAME:		/* %o0=dst, %o1=src, %o2=len */
246	srlx		%o2, 31, %g2
247	cmp		%g2, 0
248	tne		%xcc, 5
249	PREAMBLE
250	mov		%o0, %o4
251	cmp		%o2, 0
252	be,pn		%XCC, 85f
253	 or		%o0, %o1, %o3
254	cmp		%o2, 16
255	blu,a,pn	%XCC, 80f
256	 or		%o3, %o2, %o3
257
258	cmp		%o2, (5 * 64)
259	blu,pt		%XCC, 70f
260	 andcc		%o3, 0x7, %g0
261
262	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  */
263	VISEntry
264
265	/* Is 'dst' already aligned on an 64-byte boundary? */
266	andcc		%o0, 0x3f, %g2
267	be,pt		%XCC, 2f
268
269	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
270	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
271	 * subtract this from 'len'.
272	 */
273	 sub		%o0, %o1, %GLOBAL_SPARE
274	sub		%g2, 0x40, %g2
275	sub		%g0, %g2, %g2
276	sub		%o2, %g2, %o2
277	andcc		%g2, 0x7, %g1
278	be,pt		%icc, 2f
279	 and		%g2, 0x38, %g2
280
2811:	subcc		%g1, 0x1, %g1
282	EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp)
283	EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp)
284	bgu,pt		%XCC, 1b
285	 add		%o1, 0x1, %o1
286
287	add		%o1, %GLOBAL_SPARE, %o0
288
2892:	cmp		%g2, 0x0
290	and		%o1, 0x7, %g1
291	be,pt		%icc, 3f
292	 alignaddr	%o1, %g0, %o1
293
294	EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp)
2951:	EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp)
296	add		%o1, 0x8, %o1
297	subcc		%g2, 0x8, %g2
298	faligndata	%f4, %f6, %f0
299	EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
300	be,pn		%icc, 3f
301	 add		%o0, 0x8, %o0
302
303	EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp)
304	add		%o1, 0x8, %o1
305	subcc		%g2, 0x8, %g2
306	faligndata	%f6, %f4, %f0
307	EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
308	bne,pt		%icc, 1b
309	 add		%o0, 0x8, %o0
310
311	/* Destination is 64-byte aligned.  */
3123:
313	membar		  #LoadStore | #StoreStore | #StoreLoad
314
315	subcc		%o2, 0x40, %GLOBAL_SPARE
316	add		%o1, %g1, %g1
317	andncc		%GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
318	srl		%g1, 3, %g2
319	sub		%o2, %GLOBAL_SPARE, %g3
320	andn		%o1, (0x40 - 1), %o1
321	and		%g2, 7, %g2
322	andncc		%g3, 0x7, %g3
323	fsrc2		%f0, %f2
324	sub		%g3, 0x8, %g3
325	sub		%o2, %GLOBAL_SPARE, %o2
326
327	add		%g1, %GLOBAL_SPARE, %g1
328	subcc		%o2, %g3, %o2
329
330	EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp)
331	add		%o1, 0x40, %o1
332	add		%g1, %g3, %g1
333	EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp)
334	add		%o1, 0x40, %o1
335	sub		%GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
336	EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp)
337	add		%o1, 0x40, %o1
338
339	/* There are 8 instances of the unrolled loop,
340	 * one for each possible alignment of the
341	 * source buffer.  Each loop instance is 452
342	 * bytes.
343	 */
344	sll		%g2, 3, %o3
345	sub		%o3, %g2, %o3
346	sllx		%o3, 4, %o3
347	add		%o3, %g2, %o3
348	sllx		%o3, 2, %g2
3491:	rd		%pc, %o3
350	add		%o3, %lo(1f - 1b), %o3
351	jmpl		%o3 + %g2, %g0
352	 nop
353
354	.align		64
3551:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
356	LOOP_CHUNK1(o1, o0, 1f)
357	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
358	LOOP_CHUNK2(o1, o0, 2f)
359	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
360	LOOP_CHUNK3(o1, o0, 3f)
361	ba,pt		%xcc, 1b+4
362	 faligndata	%f0, %f2, %f48
3631:	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
364	STORE_SYNC(o0, f48)
365	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
366	STORE_JUMP(o0, f48, 40f)
3672:	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
368	STORE_SYNC(o0, f48)
369	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
370	STORE_JUMP(o0, f48, 48f)
3713:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
372	STORE_SYNC(o0, f48)
373	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
374	STORE_JUMP(o0, f48, 56f)
375
3761:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
377	LOOP_CHUNK1(o1, o0, 1f)
378	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
379	LOOP_CHUNK2(o1, o0, 2f)
380	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
381	LOOP_CHUNK3(o1, o0, 3f)
382	ba,pt		%xcc, 1b+4
383	 faligndata	%f2, %f4, %f48
3841:	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
385	STORE_SYNC(o0, f48)
386	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
387	STORE_JUMP(o0, f48, 41f)
3882:	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
389	STORE_SYNC(o0, f48)
390	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
391	STORE_JUMP(o0, f48, 49f)
3923:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
393	STORE_SYNC(o0, f48)
394	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
395	STORE_JUMP(o0, f48, 57f)
396
3971:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
398	LOOP_CHUNK1(o1, o0, 1f)
399	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
400	LOOP_CHUNK2(o1, o0, 2f)
401	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
402	LOOP_CHUNK3(o1, o0, 3f)
403	ba,pt		%xcc, 1b+4
404	 faligndata	%f4, %f6, %f48
4051:	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
406	STORE_SYNC(o0, f48)
407	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
408	STORE_JUMP(o0, f48, 42f)
4092:	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
410	STORE_SYNC(o0, f48)
411	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
412	STORE_JUMP(o0, f48, 50f)
4133:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
414	STORE_SYNC(o0, f48)
415	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
416	STORE_JUMP(o0, f48, 58f)
417
4181:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
419	LOOP_CHUNK1(o1, o0, 1f)
420	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
421	LOOP_CHUNK2(o1, o0, 2f)
422	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
423	LOOP_CHUNK3(o1, o0, 3f)
424	ba,pt		%xcc, 1b+4
425	 faligndata	%f6, %f8, %f48
4261:	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
427	STORE_SYNC(o0, f48)
428	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
429	STORE_JUMP(o0, f48, 43f)
4302:	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
431	STORE_SYNC(o0, f48)
432	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
433	STORE_JUMP(o0, f48, 51f)
4343:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
435	STORE_SYNC(o0, f48)
436	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
437	STORE_JUMP(o0, f48, 59f)
438
4391:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
440	LOOP_CHUNK1(o1, o0, 1f)
441	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
442	LOOP_CHUNK2(o1, o0, 2f)
443	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
444	LOOP_CHUNK3(o1, o0, 3f)
445	ba,pt		%xcc, 1b+4
446	 faligndata	%f8, %f10, %f48
4471:	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
448	STORE_SYNC(o0, f48)
449	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
450	STORE_JUMP(o0, f48, 44f)
4512:	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
452	STORE_SYNC(o0, f48)
453	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
454	STORE_JUMP(o0, f48, 52f)
4553:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
456	STORE_SYNC(o0, f48)
457	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
458	STORE_JUMP(o0, f48, 60f)
459
4601:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
461	LOOP_CHUNK1(o1, o0, 1f)
462	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
463	LOOP_CHUNK2(o1, o0, 2f)
464	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
465	LOOP_CHUNK3(o1, o0, 3f)
466	ba,pt		%xcc, 1b+4
467	 faligndata	%f10, %f12, %f48
4681:	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
469	STORE_SYNC(o0, f48)
470	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
471	STORE_JUMP(o0, f48, 45f)
4722:	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
473	STORE_SYNC(o0, f48)
474	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
475	STORE_JUMP(o0, f48, 53f)
4763:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
477	STORE_SYNC(o0, f48)
478	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
479	STORE_JUMP(o0, f48, 61f)
480
4811:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
482	LOOP_CHUNK1(o1, o0, 1f)
483	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
484	LOOP_CHUNK2(o1, o0, 2f)
485	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
486	LOOP_CHUNK3(o1, o0, 3f)
487	ba,pt		%xcc, 1b+4
488	 faligndata	%f12, %f14, %f48
4891:	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
490	STORE_SYNC(o0, f48)
491	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
492	STORE_JUMP(o0, f48, 46f)
4932:	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
494	STORE_SYNC(o0, f48)
495	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
496	STORE_JUMP(o0, f48, 54f)
4973:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
498	STORE_SYNC(o0, f48)
499	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
500	STORE_JUMP(o0, f48, 62f)
501
5021:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
503	LOOP_CHUNK1(o1, o0, 1f)
504	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
505	LOOP_CHUNK2(o1, o0, 2f)
506	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
507	LOOP_CHUNK3(o1, o0, 3f)
508	ba,pt		%xcc, 1b+4
509	 faligndata	%f14, %f16, %f48
5101:	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
511	STORE_SYNC(o0, f48)
512	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
513	STORE_JUMP(o0, f48, 47f)
5142:	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
515	STORE_SYNC(o0, f48)
516	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
517	STORE_JUMP(o0, f48, 55f)
5183:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
519	STORE_SYNC(o0, f48)
520	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
521	STORE_JUMP(o0, f48, 63f)
522
52340:	FINISH_VISCHUNK(o0, f0,  f2)
52441:	FINISH_VISCHUNK(o0, f2,  f4)
52542:	FINISH_VISCHUNK(o0, f4,  f6)
52643:	FINISH_VISCHUNK(o0, f6,  f8)
52744:	FINISH_VISCHUNK(o0, f8,  f10)
52845:	FINISH_VISCHUNK(o0, f10, f12)
52946:	FINISH_VISCHUNK(o0, f12, f14)
53047:	UNEVEN_VISCHUNK(o0, f14, f0)
53148:	FINISH_VISCHUNK(o0, f16, f18)
53249:	FINISH_VISCHUNK(o0, f18, f20)
53350:	FINISH_VISCHUNK(o0, f20, f22)
53451:	FINISH_VISCHUNK(o0, f22, f24)
53552:	FINISH_VISCHUNK(o0, f24, f26)
53653:	FINISH_VISCHUNK(o0, f26, f28)
53754:	FINISH_VISCHUNK(o0, f28, f30)
53855:	UNEVEN_VISCHUNK(o0, f30, f0)
53956:	FINISH_VISCHUNK(o0, f32, f34)
54057:	FINISH_VISCHUNK(o0, f34, f36)
54158:	FINISH_VISCHUNK(o0, f36, f38)
54259:	FINISH_VISCHUNK(o0, f38, f40)
54360:	FINISH_VISCHUNK(o0, f40, f42)
54461:	FINISH_VISCHUNK(o0, f42, f44)
54562:	FINISH_VISCHUNK(o0, f44, f46)
54663:	UNEVEN_VISCHUNK_LAST(o0, f46, f0)
547
54893:	EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
549	add		%o1, 8, %o1
550	subcc		%g3, 8, %g3
551	faligndata	%f0, %f2, %f8
552	EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
553	bl,pn		%xcc, 95f
554	 add		%o0, 8, %o0
555	EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
556	add		%o1, 8, %o1
557	subcc		%g3, 8, %g3
558	faligndata	%f2, %f0, %f8
559	EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
560	bge,pt		%xcc, 93b
561	 add		%o0, 8, %o0
562
56395:	brz,pt		%o2, 2f
564	 mov		%g1, %o1
565
5661:	EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp)
567	add		%o1, 1, %o1
568	subcc		%o2, 1, %o2
569	EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp)
570	bne,pt		%xcc, 1b
571	 add		%o0, 1, %o0
572
5732:	membar		#StoreLoad | #StoreStore
574	VISExit
575	retl
576	 mov		EX_RETVAL(%o4), %o0
577
578	.align		64
57970:	/* 16 < len <= (5 * 64) */
580	bne,pn		%XCC, 75f
581	 sub		%o0, %o1, %o3
582
58372:	andn		%o2, 0xf, %GLOBAL_SPARE
584	and		%o2, 0xf, %o2
5851:	EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0)
586	EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0)
587	subcc		%GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
588	EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10)
589	add		%o1, 0x8, %o1
590	EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8)
591	bgu,pt		%XCC, 1b
592	 add		%o1, 0x8, %o1
59373:	andcc		%o2, 0x8, %g0
594	be,pt		%XCC, 1f
595	 nop
596	EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0)
597	sub		%o2, 0x8, %o2
598	EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8)
599	add		%o1, 0x8, %o1
6001:	andcc		%o2, 0x4, %g0
601	be,pt		%XCC, 1f
602	 nop
603	EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0)
604	sub		%o2, 0x4, %o2
605	EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4)
606	add		%o1, 0x4, %o1
6071:	cmp		%o2, 0
608	be,pt		%XCC, 85f
609	 nop
610	ba,pt		%xcc, 90f
611	 nop
612
61375:	andcc		%o0, 0x7, %g1
614	sub		%g1, 0x8, %g1
615	be,pn		%icc, 2f
616	 sub		%g0, %g1, %g1
617	sub		%o2, %g1, %o2
618
6191:	EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0)
620	subcc		%g1, 1, %g1
621	EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1)
622	bgu,pt		%icc, 1b
623	 add		%o1, 1, %o1
624
6252:	add		%o1, %o3, %o0
626	andcc		%o1, 0x7, %g1
627	bne,pt		%icc, 8f
628	 sll		%g1, 3, %g1
629
630	cmp		%o2, 16
631	bgeu,pt		%icc, 72b
632	 nop
633	ba,a,pt		%xcc, 73b
634
6358:	mov		64, %o3
636	andn		%o1, 0x7, %o1
637	EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0)
638	sub		%o3, %g1, %o3
639	andn		%o2, 0x7, %GLOBAL_SPARE
640	sllx		%g2, %g1, %g2
6411:	EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj)
642	subcc		%GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
643	add		%o1, 0x8, %o1
644	srlx		%g3, %o3, %o5
645	or		%o5, %g2, %o5
646	EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj)
647	add		%o0, 0x8, %o0
648	bgu,pt		%icc, 1b
649	 sllx		%g3, %g1, %g2
650
651	srl		%g1, 3, %g1
652	andcc		%o2, 0x7, %o2
653	be,pn		%icc, 85f
654	 add		%o1, %g1, %o1
655	ba,pt		%xcc, 90f
656	 sub		%o0, %o1, %o3
657
658	.align		64
65980:	/* 0 < len <= 16 */
660	andcc		%o3, 0x3, %g0
661	bne,pn		%XCC, 90f
662	 sub		%o0, %o1, %o3
663
6641:	EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0)
665	subcc		%o2, 4, %o2
666	EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4)
667	bgu,pt		%XCC, 1b
668	 add		%o1, 4, %o1
669
67085:	retl
671	 mov		EX_RETVAL(%o4), %o0
672
673	.align		32
67490:	EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0)
675	subcc		%o2, 1, %o2
676	EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1)
677	bgu,pt		%XCC, 90b
678	 add		%o1, 1, %o1
679	retl
680	 mov		EX_RETVAL(%o4), %o0
681
682	.size		FUNC_NAME, .-FUNC_NAME
683