• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 *
5 * kernel entry points (interruptions, system call wrappers)
6 *  Copyright (C) 1999,2000 Philipp Rumpf
7 *  Copyright (C) 1999 SuSE GmbH Nuernberg
8 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 */
11
12#include <asm/asm-offsets.h>
13
14/* we have the following possibilities to act on an interruption:
15 *  - handle in assembly and use shadowed registers only
16 *  - save registers to kernel stack and handle in assembly or C */
17
18
19#include <asm/psw.h>
20#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
21#include <asm/assembly.h>	/* for LDREG/STREG defines */
22#include <asm/signal.h>
23#include <asm/unistd.h>
24#include <asm/ldcw.h>
25#include <asm/traps.h>
26#include <asm/thread_info.h>
27#include <asm/alternative.h>
28
29#include <linux/linkage.h>
30#include <linux/pgtable.h>
31
32#ifdef CONFIG_64BIT
33	.level 2.0w
34#else
35	.level 2.0
36#endif
37
38	/* Get aligned page_table_lock address for this mm from cr28/tr4 */
39	.macro  get_ptl reg
40	mfctl	%cr28,\reg
41	.endm
42
43	/* space_to_prot macro creates a prot id from a space id */
44
45#if (SPACEID_SHIFT) == 0
46	.macro  space_to_prot spc prot
47	depd,z  \spc,62,31,\prot
48	.endm
49#else
50	.macro  space_to_prot spc prot
51	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
52	.endm
53#endif
54
55	/* Switch to virtual mapping, trashing only %r1 */
56	.macro  virt_map
57	/* pcxt_ssm_bug */
58	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
59	mtsp	%r0, %sr4
60	mtsp	%r0, %sr5
61	mtsp	%r0, %sr6
62	tovirt_r1 %r29
63	load32	KERNEL_PSW, %r1
64
65	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
66	mtctl	%r0, %cr17	/* Clear IIASQ tail */
67	mtctl	%r0, %cr17	/* Clear IIASQ head */
68	mtctl	%r1, %ipsw
69	load32	4f, %r1
70	mtctl	%r1, %cr18	/* Set IIAOQ tail */
71	ldo	4(%r1), %r1
72	mtctl	%r1, %cr18	/* Set IIAOQ head */
73	rfir
74	nop
754:
76	.endm
77
78	/*
79	 * The "get_stack" macros are responsible for determining the
80	 * kernel stack value.
81	 *
82	 *      If sr7 == 0
83	 *          Already using a kernel stack, so call the
84	 *          get_stack_use_r30 macro to push a pt_regs structure
85	 *          on the stack, and store registers there.
86	 *      else
87	 *          Need to set up a kernel stack, so call the
88	 *          get_stack_use_cr30 macro to set up a pointer
89	 *          to the pt_regs structure contained within the
90	 *          task pointer pointed to by cr30. Set the stack
91	 *          pointer to point to the end of the task structure.
92	 *
93	 * Note that we use shadowed registers for temps until
94	 * we can save %r26 and %r29. %r26 is used to preserve
95	 * %r8 (a shadowed register) which temporarily contained
96	 * either the fault type ("code") or the eirr. We need
97	 * to use a non-shadowed register to carry the value over
98	 * the rfir in virt_map. We use %r26 since this value winds
99	 * up being passed as the argument to either do_cpu_irq_mask
100	 * or handle_interruption. %r29 is used to hold a pointer
101	 * the register save area, and once again, it needs to
102	 * be a non-shadowed register so that it survives the rfir.
103	 *
104	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
105	 */
106
107	.macro  get_stack_use_cr30
108
109	/* we save the registers in the task struct */
110
111	copy	%r30, %r17
112	mfctl   %cr30, %r1
113	ldo	THREAD_SZ_ALGN(%r1), %r30
114	mtsp	%r0,%sr7
115	mtsp	%r16,%sr3
116	tophys  %r1,%r9
117	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
118	tophys  %r1,%r9
119	ldo     TASK_REGS(%r9),%r9
120	STREG   %r17,PT_GR30(%r9)
121	STREG   %r29,PT_GR29(%r9)
122	STREG   %r26,PT_GR26(%r9)
123	STREG	%r16,PT_SR7(%r9)
124	copy    %r9,%r29
125	.endm
126
127	.macro  get_stack_use_r30
128
129	/* we put a struct pt_regs on the stack and save the registers there */
130
131	tophys  %r30,%r9
132	copy	%r30,%r1
133	ldo	PT_SZ_ALGN(%r30),%r30
134	STREG   %r1,PT_GR30(%r9)
135	STREG   %r29,PT_GR29(%r9)
136	STREG   %r26,PT_GR26(%r9)
137	STREG	%r16,PT_SR7(%r9)
138	copy    %r9,%r29
139	.endm
140
141	.macro  rest_stack
142	LDREG   PT_GR1(%r29), %r1
143	LDREG   PT_GR30(%r29),%r30
144	LDREG   PT_GR29(%r29),%r29
145	.endm
146
147	/* default interruption handler
148	 * (calls traps.c:handle_interruption) */
149	.macro	def code
150	b	intr_save
151	ldi     \code, %r8
152	.align	32
153	.endm
154
155	/* Interrupt interruption handler
156	 * (calls irq.c:do_cpu_irq_mask) */
157	.macro	extint code
158	b	intr_extint
159	mfsp    %sr7,%r16
160	.align	32
161	.endm
162
163	.import	os_hpmc, code
164
165	/* HPMC handler */
166	.macro	hpmc code
167	nop			/* must be a NOP, will be patched later */
168	load32	PA(os_hpmc), %r3
169	bv,n	0(%r3)
170	nop
171	.word	0		/* checksum (will be patched) */
172	.word	0		/* address of handler */
173	.word	0		/* length of handler */
174	.endm
175
176	/*
177	 * Performance Note: Instructions will be moved up into
178	 * this part of the code later on, once we are sure
179	 * that the tlb miss handlers are close to final form.
180	 */
181
182	/* Register definitions for tlb miss handler macros */
183
184	va  = r8	/* virtual address for which the trap occurred */
185	spc = r24	/* space for which the trap occurred */
186
187#ifndef CONFIG_64BIT
188
189	/*
190	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
191	 */
192
193	.macro	itlb_11 code
194
195	mfctl	%pcsq, spc
196	b	itlb_miss_11
197	mfctl	%pcoq, va
198
199	.align		32
200	.endm
201#endif
202
203	/*
204	 * itlb miss interruption handler (parisc 2.0)
205	 */
206
207	.macro	itlb_20 code
208	mfctl	%pcsq, spc
209#ifdef CONFIG_64BIT
210	b       itlb_miss_20w
211#else
212	b	itlb_miss_20
213#endif
214	mfctl	%pcoq, va
215
216	.align		32
217	.endm
218
219#ifndef CONFIG_64BIT
220	/*
221	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
222	 */
223
224	.macro	naitlb_11 code
225
226	mfctl	%isr,spc
227	b	naitlb_miss_11
228	mfctl 	%ior,va
229
230	.align		32
231	.endm
232#endif
233
234	/*
235	 * naitlb miss interruption handler (parisc 2.0)
236	 */
237
238	.macro	naitlb_20 code
239
240	mfctl	%isr,spc
241#ifdef CONFIG_64BIT
242	b       naitlb_miss_20w
243#else
244	b	naitlb_miss_20
245#endif
246	mfctl 	%ior,va
247
248	.align		32
249	.endm
250
251#ifndef CONFIG_64BIT
252	/*
253	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
254	 */
255
256	.macro	dtlb_11 code
257
258	mfctl	%isr, spc
259	b	dtlb_miss_11
260	mfctl	%ior, va
261
262	.align		32
263	.endm
264#endif
265
266	/*
267	 * dtlb miss interruption handler (parisc 2.0)
268	 */
269
270	.macro	dtlb_20 code
271
272	mfctl	%isr, spc
273#ifdef CONFIG_64BIT
274	b       dtlb_miss_20w
275#else
276	b	dtlb_miss_20
277#endif
278	mfctl	%ior, va
279
280	.align		32
281	.endm
282
283#ifndef CONFIG_64BIT
284	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
285
286	.macro	nadtlb_11 code
287
288	mfctl	%isr,spc
289	b       nadtlb_miss_11
290	mfctl	%ior,va
291
292	.align		32
293	.endm
294#endif
295
296	/* nadtlb miss interruption handler (parisc 2.0) */
297
298	.macro	nadtlb_20 code
299
300	mfctl	%isr,spc
301#ifdef CONFIG_64BIT
302	b       nadtlb_miss_20w
303#else
304	b       nadtlb_miss_20
305#endif
306	mfctl	%ior,va
307
308	.align		32
309	.endm
310
311#ifndef CONFIG_64BIT
312	/*
313	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
314	 */
315
316	.macro	dbit_11 code
317
318	mfctl	%isr,spc
319	b	dbit_trap_11
320	mfctl	%ior,va
321
322	.align		32
323	.endm
324#endif
325
326	/*
327	 * dirty bit trap interruption handler (parisc 2.0)
328	 */
329
330	.macro	dbit_20 code
331
332	mfctl	%isr,spc
333#ifdef CONFIG_64BIT
334	b       dbit_trap_20w
335#else
336	b	dbit_trap_20
337#endif
338	mfctl	%ior,va
339
340	.align		32
341	.endm
342
343	/* In LP64, the space contains part of the upper 32 bits of the
344	 * fault.  We have to extract this and place it in the va,
345	 * zeroing the corresponding bits in the space register */
346	.macro		space_adjust	spc,va,tmp
347#ifdef CONFIG_64BIT
348	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
349	depd		%r0,63,SPACEID_SHIFT,\spc
350	depd		\tmp,31,SPACEID_SHIFT,\va
351#endif
352	.endm
353
354	.import		swapper_pg_dir,code
355
356	/* Get the pgd.  For faults on space zero (kernel space), this
357	 * is simply swapper_pg_dir.  For user space faults, the
358	 * pgd is stored in %cr25 */
359	.macro		get_pgd		spc,reg
360	ldil		L%PA(swapper_pg_dir),\reg
361	ldo		R%PA(swapper_pg_dir)(\reg),\reg
362	or,COND(=)	%r0,\spc,%r0
363	mfctl		%cr25,\reg
364	.endm
365
366	/*
367		space_check(spc,tmp,fault)
368
369		spc - The space we saw the fault with.
370		tmp - The place to store the current space.
371		fault - Function to call on failure.
372
373		Only allow faults on different spaces from the
374		currently active one if we're the kernel
375
376	*/
377	.macro		space_check	spc,tmp,fault
378	mfsp		%sr7,\tmp
379	/* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
380	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
381					 * as kernel, so defeat the space
382					 * check if it is */
383	copy		\spc,\tmp
384	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
385	cmpb,COND(<>),n	\tmp,\spc,\fault
386	.endm
387
388	/* Look up a PTE in a 2-Level scheme (faulting at each
389	 * level if the entry isn't present
390	 *
391	 * NOTE: we use ldw even for LP64, since the short pointers
392	 * can address up to 1TB
393	 */
394	.macro		L2_ptep	pmd,pte,index,va,fault
395#if CONFIG_PGTABLE_LEVELS == 3
396	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
397#else
398# if defined(CONFIG_64BIT)
399	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
400  #else
401  # if PAGE_SIZE > 4096
402	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
403  # else
404	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
405  # endif
406# endif
407#endif
408	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
409#if CONFIG_PGTABLE_LEVELS < 3
410	copy		%r0,\pte
411#endif
412	ldw,s		\index(\pmd),\pmd
413	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
414	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
415	SHLREG		\pmd,PxD_VALUE_SHIFT,\pmd
416	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
417	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
418	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
419	.endm
420
421	/* Look up PTE in a 3-Level scheme. */
422	.macro		L3_ptep pgd,pte,index,va,fault
423#if CONFIG_PGTABLE_LEVELS == 3
424	copy		%r0,\pte
425	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
426	ldw,s		\index(\pgd),\pgd
427	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
428	shld		\pgd,PxD_VALUE_SHIFT,\pgd
429#endif
430	L2_ptep		\pgd,\pte,\index,\va,\fault
431	.endm
432
433	/* Acquire page_table_lock and check page is present. */
434	.macro		ptl_lock	spc,ptp,pte,tmp,tmp1,fault
435#ifdef CONFIG_TLB_PTLOCK
43698:	cmpib,COND(=),n	0,\spc,2f
437	get_ptl		\tmp
4381:	LDCW		0(\tmp),\tmp1
439	cmpib,COND(=)	0,\tmp1,1b
440	nop
441	LDREG		0(\ptp),\pte
442	bb,<,n		\pte,_PAGE_PRESENT_BIT,3f
443	b		\fault
444	stw		\spc,0(\tmp)
44599:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
446#endif
4472:	LDREG		0(\ptp),\pte
448	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
4493:
450	.endm
451
452	/* Release page_table_lock without reloading lock address.
453	   Note that the values in the register spc are limited to
454	   NR_SPACE_IDS (262144). Thus, the stw instruction always
455	   stores a nonzero value even when register spc is 64 bits.
456	   We use an ordered store to ensure all prior accesses are
457	   performed prior to releasing the lock. */
458	.macro		ptl_unlock0	spc,tmp
459#ifdef CONFIG_TLB_PTLOCK
46098:	or,COND(=)	%r0,\spc,%r0
461	stw,ma		\spc,0(\tmp)
46299:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
463#endif
464	.endm
465
466	/* Release page_table_lock. */
467	.macro		ptl_unlock1	spc,tmp
468#ifdef CONFIG_TLB_PTLOCK
46998:	get_ptl		\tmp
470	ptl_unlock0	\spc,\tmp
47199:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
472#endif
473	.endm
474
475	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
476	 * don't needlessly dirty the cache line if it was already set */
477	.macro		update_accessed	ptp,pte,tmp,tmp1
478	ldi		_PAGE_ACCESSED,\tmp1
479	or		\tmp1,\pte,\tmp
480	and,COND(<>)	\tmp1,\pte,%r0
481	STREG		\tmp,0(\ptp)
482	.endm
483
484	/* Set the dirty bit (and accessed bit).  No need to be
485	 * clever, this is only used from the dirty fault */
486	.macro		update_dirty	ptp,pte,tmp
487	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
488	or		\tmp,\pte,\pte
489	STREG		\pte,0(\ptp)
490	.endm
491
492	/* We have (depending on the page size):
493	 * - 38 to 52-bit Physical Page Number
494	 * - 12 to 26-bit page offset
495	 */
496	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
497	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
498	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
499	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
500	#define PFN_START_BIT	(63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
501
502	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
503	.macro		convert_for_tlb_insert20 pte,tmp
504#ifdef CONFIG_HUGETLB_PAGE
505	copy		\pte,\tmp
506	extrd,u		\tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
507
508	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
509				(63-58)+PAGE_ADD_SHIFT,\pte
510	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
511	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
512				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
513#else /* Huge pages disabled */
514	extrd,u		\pte,PFN_START_BIT,PFN_START_BIT+1,\pte
515	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
516				(63-58)+PAGE_ADD_SHIFT,\pte
517#endif
518	.endm
519
520	/* Convert the pte and prot to tlb insertion values.  How
521	 * this happens is quite subtle, read below */
522	.macro		make_insert_tlb	spc,pte,prot,tmp
523	space_to_prot   \spc \prot        /* create prot id from space */
524	/* The following is the real subtlety.  This is depositing
525	 * T <-> _PAGE_REFTRAP
526	 * D <-> _PAGE_DIRTY
527	 * B <-> _PAGE_DMB (memory break)
528	 *
529	 * Then incredible subtlety: The access rights are
530	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
531	 * See 3-14 of the parisc 2.0 manual
532	 *
533	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
534	 * trigger an access rights trap in user space if the user
535	 * tries to read an unreadable page */
536	depd            \pte,8,7,\prot
537
538	/* PAGE_USER indicates the page can be read with user privileges,
539	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
540	 * contains _PAGE_READ) */
541	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
542	depdi		7,11,3,\prot
543	/* If we're a gateway page, drop PL2 back to zero for promotion
544	 * to kernel privilege (so we can execute the page as kernel).
545	 * Any privilege promotion page always denys read and write */
546	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
547	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
548
549	/* Enforce uncacheable pages.
550	 * This should ONLY be use for MMIO on PA 2.0 machines.
551	 * Memory/DMA is cache coherent on all PA2.0 machines we support
552	 * (that means T-class is NOT supported) and the memory controllers
553	 * on most of those machines only handles cache transactions.
554	 */
555	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
556	depdi		1,12,1,\prot
557
558	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
559	convert_for_tlb_insert20 \pte \tmp
560	.endm
561
562	/* Identical macro to make_insert_tlb above, except it
563	 * makes the tlb entry for the differently formatted pa11
564	 * insertion instructions */
565	.macro		make_insert_tlb_11	spc,pte,prot
566	zdep		\spc,30,15,\prot
567	dep		\pte,8,7,\prot
568	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
569	depi		1,12,1,\prot
570	extru,=         \pte,_PAGE_USER_BIT,1,%r0
571	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
572	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
573	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
574
575	/* Get rid of prot bits and convert to page addr for iitlba */
576
577	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
578	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
579	.endm
580
581	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
582	 * to extend into I/O space if the address is 0xfXXXXXXX
583	 * so we extend the f's into the top word of the pte in
584	 * this case */
585	.macro		f_extend	pte,tmp
586	extrd,s		\pte,42,4,\tmp
587	addi,<>		1,\tmp,%r0
588	extrd,s		\pte,63,25,\pte
589	.endm
590
591	/* The alias region is an 8MB aligned 16MB to do clear and
592	 * copy user pages at addresses congruent with the user
593	 * virtual address.
594	 *
595	 * To use the alias page, you set %r26 up with the to TLB
596	 * entry (identifying the physical page) and %r23 up with
597	 * the from tlb entry (or nothing if only a to entry---for
598	 * clear_user_page_asm) */
599	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
600	cmpib,COND(<>),n 0,\spc,\fault
601	ldil		L%(TMPALIAS_MAP_START),\tmp
602#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
603	/* on LP64, ldi will sign extend into the upper 32 bits,
604	 * which is behaviour we don't want */
605	depdi		0,31,32,\tmp
606#endif
607	copy		\va,\tmp1
608	depi		0,31,23,\tmp1
609	cmpb,COND(<>),n	\tmp,\tmp1,\fault
610	mfctl		%cr19,\tmp	/* iir */
611	/* get the opcode (first six bits) into \tmp */
612	extrw,u		\tmp,5,6,\tmp
613	/*
614	 * Only setting the T bit prevents data cache movein
615	 * Setting access rights to zero prevents instruction cache movein
616	 *
617	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
618	 * to type field and _PAGE_READ goes to top bit of PL1
619	 */
620	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
621	/*
622	 * so if the opcode is one (i.e. this is a memory management
623	 * instruction) nullify the next load so \prot is only T.
624	 * Otherwise this is a normal data operation
625	 */
626	cmpiclr,=	0x01,\tmp,%r0
627	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
628.ifc \patype,20
629	depd,z		\prot,8,7,\prot
630.else
631.ifc \patype,11
632	depw,z		\prot,8,7,\prot
633.else
634	.error "undefined PA type to do_alias"
635.endif
636.endif
637	/*
638	 * OK, it is in the temp alias region, check whether "from" or "to".
639	 * Check "subtle" note in pacache.S re: r23/r26.
640	 */
641#ifdef CONFIG_64BIT
642	extrd,u,*=	\va,41,1,%r0
643#else
644	extrw,u,=	\va,9,1,%r0
645#endif
646	or,COND(tr)	%r23,%r0,\pte
647	or		%r26,%r0,\pte
648	.endm
649
650
651	/*
652	 * Fault_vectors are architecturally required to be aligned on a 2K
653	 * boundary
654	 */
655
656	.section .text.hot
657	.align 2048
658
659ENTRY(fault_vector_20)
660	/* First vector is invalid (0) */
661	.ascii	"cows can fly"
662	.byte 0
663	.align 32
664
665	hpmc		 1
666	def		 2
667	def		 3
668	extint		 4
669	def		 5
670	itlb_20		 PARISC_ITLB_TRAP
671	def		 7
672	def		 8
673	def              9
674	def		10
675	def		11
676	def		12
677	def		13
678	def		14
679	dtlb_20		15
680	naitlb_20	16
681	nadtlb_20	17
682	def		18
683	def		19
684	dbit_20		20
685	def		21
686	def		22
687	def		23
688	def		24
689	def		25
690	def		26
691	def		27
692	def		28
693	def		29
694	def		30
695	def		31
696END(fault_vector_20)
697
698#ifndef CONFIG_64BIT
699
700	.align 2048
701
702ENTRY(fault_vector_11)
703	/* First vector is invalid (0) */
704	.ascii	"cows can fly"
705	.byte 0
706	.align 32
707
708	hpmc		 1
709	def		 2
710	def		 3
711	extint		 4
712	def		 5
713	itlb_11		 PARISC_ITLB_TRAP
714	def		 7
715	def		 8
716	def              9
717	def		10
718	def		11
719	def		12
720	def		13
721	def		14
722	dtlb_11		15
723	naitlb_11	16
724	nadtlb_11	17
725	def		18
726	def		19
727	dbit_11		20
728	def		21
729	def		22
730	def		23
731	def		24
732	def		25
733	def		26
734	def		27
735	def		28
736	def		29
737	def		30
738	def		31
739END(fault_vector_11)
740
741#endif
742	/* Fault vector is separately protected and *must* be on its own page */
743	.align		PAGE_SIZE
744
745	.import		handle_interruption,code
746	.import		do_cpu_irq_mask,code
747
748	/*
749	 * Child Returns here
750	 *
751	 * copy_thread moved args into task save area.
752	 */
753
754ENTRY(ret_from_kernel_thread)
755	/* Call schedule_tail first though */
756	BL	schedule_tail, %r2
757	nop
758
759	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
760	LDREG	TASK_PT_GR25(%r1), %r26
761#ifdef CONFIG_64BIT
762	LDREG	TASK_PT_GR27(%r1), %r27
763#endif
764	LDREG	TASK_PT_GR26(%r1), %r1
765	ble	0(%sr7, %r1)
766	copy	%r31, %r2
767	b	finish_child_return
768	nop
769END(ret_from_kernel_thread)
770
771
772	/*
773	 * struct task_struct *_switch_to(struct task_struct *prev,
774	 *	struct task_struct *next)
775	 *
776	 * switch kernel stacks and return prev */
777ENTRY_CFI(_switch_to)
778	STREG	 %r2, -RP_OFFSET(%r30)
779
780	callee_save_float
781	callee_save
782
783	load32	_switch_to_ret, %r2
784
785	STREG	%r2, TASK_PT_KPC(%r26)
786	LDREG	TASK_PT_KPC(%r25), %r2
787
788	STREG	%r30, TASK_PT_KSP(%r26)
789	LDREG	TASK_PT_KSP(%r25), %r30
790	LDREG	TASK_THREAD_INFO(%r25), %r25
791	bv	%r0(%r2)
792	mtctl   %r25,%cr30
793
794ENTRY(_switch_to_ret)
795	mtctl	%r0, %cr0		/* Needed for single stepping */
796	callee_rest
797	callee_rest_float
798
799	LDREG	-RP_OFFSET(%r30), %r2
800	bv	%r0(%r2)
801	copy	%r26, %r28
802ENDPROC_CFI(_switch_to)
803
804	/*
805	 * Common rfi return path for interruptions, kernel execve, and
806	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
807	 * return via this path if the signal was received when the process
808	 * was running; if the process was blocked on a syscall then the
809	 * normal syscall_exit path is used.  All syscalls for traced
810	 * proceses exit via intr_restore.
811	 *
812	 * XXX If any syscalls that change a processes space id ever exit
813	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
814	 * adjust IASQ[0..1].
815	 *
816	 */
817
818	.align	PAGE_SIZE
819
820ENTRY_CFI(syscall_exit_rfi)
821	mfctl   %cr30,%r16
822	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
823	ldo	TASK_REGS(%r16),%r16
824	/* Force iaoq to userspace, as the user has had access to our current
825	 * context via sigcontext. Also Filter the PSW for the same reason.
826	 */
827	LDREG	PT_IAOQ0(%r16),%r19
828	depi	3,31,2,%r19
829	STREG	%r19,PT_IAOQ0(%r16)
830	LDREG	PT_IAOQ1(%r16),%r19
831	depi	3,31,2,%r19
832	STREG	%r19,PT_IAOQ1(%r16)
833	LDREG   PT_PSW(%r16),%r19
834	load32	USER_PSW_MASK,%r1
835#ifdef CONFIG_64BIT
836	load32	USER_PSW_HI_MASK,%r20
837	depd    %r20,31,32,%r1
838#endif
839	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
840	load32	USER_PSW,%r1
841	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
842	STREG   %r19,PT_PSW(%r16)
843
844	/*
845	 * If we aren't being traced, we never saved space registers
846	 * (we don't store them in the sigcontext), so set them
847	 * to "proper" values now (otherwise we'll wind up restoring
848	 * whatever was last stored in the task structure, which might
849	 * be inconsistent if an interrupt occurred while on the gateway
850	 * page). Note that we may be "trashing" values the user put in
851	 * them, but we don't support the user changing them.
852	 */
853
854	STREG   %r0,PT_SR2(%r16)
855	mfsp    %sr3,%r19
856	STREG   %r19,PT_SR0(%r16)
857	STREG   %r19,PT_SR1(%r16)
858	STREG   %r19,PT_SR3(%r16)
859	STREG   %r19,PT_SR4(%r16)
860	STREG   %r19,PT_SR5(%r16)
861	STREG   %r19,PT_SR6(%r16)
862	STREG   %r19,PT_SR7(%r16)
863
864ENTRY(intr_return)
865	/* check for reschedule */
866	mfctl   %cr30,%r1
867	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
868	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
869
870	.import do_notify_resume,code
871intr_check_sig:
872	/* As above */
873	mfctl   %cr30,%r1
874	LDREG	TI_FLAGS(%r1),%r19
875	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
876	and,COND(<>)	%r19, %r20, %r0
877	b,n	intr_restore	/* skip past if we've nothing to do */
878
879	/* This check is critical to having LWS
880	 * working. The IASQ is zero on the gateway
881	 * page and we cannot deliver any signals until
882	 * we get off the gateway page.
883	 *
884	 * Only do signals if we are returning to user space
885	 */
886	LDREG	PT_IASQ0(%r16), %r20
887	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
888	LDREG	PT_IASQ1(%r16), %r20
889	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
890
891	copy	%r0, %r25			/* long in_syscall = 0 */
892#ifdef CONFIG_64BIT
893	ldo	-16(%r30),%r29			/* Reference param save area */
894#endif
895
896	/* NOTE: We need to enable interrupts if we have to deliver
897	 * signals. We used to do this earlier but it caused kernel
898	 * stack overflows. */
899	ssm	PSW_SM_I, %r0
900
901	BL	do_notify_resume,%r2
902	copy	%r16, %r26			/* struct pt_regs *regs */
903
904	b,n	intr_check_sig
905
906intr_restore:
907	copy            %r16,%r29
908	ldo             PT_FR31(%r29),%r1
909	rest_fp         %r1
910	rest_general    %r29
911
912	/* inverse of virt_map */
913	pcxt_ssm_bug
914	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
915	tophys_r1       %r29
916
917	/* Restore space id's and special cr's from PT_REGS
918	 * structure pointed to by r29
919	 */
920	rest_specials	%r29
921
922	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
923	 * It also restores r1 and r30.
924	 */
925	rest_stack
926
927	rfi
928	nop
929
930#ifndef CONFIG_PREEMPTION
931# define intr_do_preempt	intr_restore
932#endif /* !CONFIG_PREEMPTION */
933
934	.import schedule,code
935intr_do_resched:
936	/* Only call schedule on return to userspace. If we're returning
937	 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
938	 * we jump back to intr_restore.
939	 */
940	LDREG	PT_IASQ0(%r16), %r20
941	cmpib,COND(=)	0, %r20, intr_do_preempt
942	nop
943	LDREG	PT_IASQ1(%r16), %r20
944	cmpib,COND(=)	0, %r20, intr_do_preempt
945	nop
946
947	/* NOTE: We need to enable interrupts if we schedule.  We used
948	 * to do this earlier but it caused kernel stack overflows. */
949	ssm     PSW_SM_I, %r0
950
951#ifdef CONFIG_64BIT
952	ldo	-16(%r30),%r29		/* Reference param save area */
953#endif
954
955	ldil	L%intr_check_sig, %r2
956#ifndef CONFIG_64BIT
957	b	schedule
958#else
959	load32	schedule, %r20
960	bv	%r0(%r20)
961#endif
962	ldo	R%intr_check_sig(%r2), %r2
963
964	/* preempt the current task on returning to kernel
965	 * mode from an interrupt, iff need_resched is set,
966	 * and preempt_count is 0. otherwise, we continue on
967	 * our merry way back to the current running task.
968	 */
969#ifdef CONFIG_PREEMPTION
970	.import preempt_schedule_irq,code
971intr_do_preempt:
972	rsm	PSW_SM_I, %r0		/* disable interrupts */
973
974	/* current_thread_info()->preempt_count */
975	mfctl	%cr30, %r1
976	LDREG	TI_PRE_COUNT(%r1), %r19
977	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
978	nop				/* prev insn branched backwards */
979
980	/* check if we interrupted a critical path */
981	LDREG	PT_PSW(%r16), %r20
982	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
983	nop
984
985	/* ssm PSW_SM_I done later in intr_restore */
986#ifdef CONFIG_MLONGCALLS
987	ldil	L%intr_restore, %r2
988	load32	preempt_schedule_irq, %r1
989	bv	%r0(%r1)
990	ldo	R%intr_restore(%r2), %r2
991#else
992	ldil	L%intr_restore, %r1
993	BL	preempt_schedule_irq, %r2
994	ldo	R%intr_restore(%r1), %r2
995#endif
996#endif /* CONFIG_PREEMPTION */
997
998	/*
999	 * External interrupts.
1000	 */
1001
1002intr_extint:
1003	cmpib,COND(=),n 0,%r16,1f
1004
1005	get_stack_use_cr30
1006	b,n 2f
1007
10081:
1009	get_stack_use_r30
10102:
1011	save_specials	%r29
1012	virt_map
1013	save_general	%r29
1014
1015	ldo	PT_FR0(%r29), %r24
1016	save_fp	%r24
1017
1018	loadgp
1019
1020	copy	%r29, %r26	/* arg0 is pt_regs */
1021	copy	%r29, %r16	/* save pt_regs */
1022
1023	ldil	L%intr_return, %r2
1024
1025#ifdef CONFIG_64BIT
1026	ldo	-16(%r30),%r29	/* Reference param save area */
1027#endif
1028
1029	b	do_cpu_irq_mask
1030	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1031ENDPROC_CFI(syscall_exit_rfi)
1032
1033
1034	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1035
1036ENTRY_CFI(intr_save)		/* for os_hpmc */
1037	mfsp    %sr7,%r16
1038	cmpib,COND(=),n 0,%r16,1f
1039	get_stack_use_cr30
1040	b	2f
1041	copy    %r8,%r26
1042
10431:
1044	get_stack_use_r30
1045	copy    %r8,%r26
1046
10472:
1048	save_specials	%r29
1049
1050	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1051	cmpib,COND(=),n        PARISC_ITLB_TRAP,%r26,skip_save_ior
1052
1053
1054	mfctl           %isr, %r16
1055	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1056	mfctl           %ior, %r17
1057
1058
1059#ifdef CONFIG_64BIT
1060	/*
1061	 * If the interrupted code was running with W bit off (32 bit),
1062	 * clear the b bits (bits 0 & 1) in the ior.
1063	 * save_specials left ipsw value in r8 for us to test.
1064	 */
1065	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1066	depdi           0,1,2,%r17
1067
1068	/* adjust isr/ior: get high bits from isr and deposit in ior */
1069	space_adjust	%r16,%r17,%r1
1070#endif
1071	STREG           %r16, PT_ISR(%r29)
1072	STREG           %r17, PT_IOR(%r29)
1073
1074#if 0 && defined(CONFIG_64BIT)
1075	/* Revisit when we have 64-bit code above 4Gb */
1076	b,n		intr_save2
1077
1078skip_save_ior:
1079	/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1080	 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1081	 * above.
1082	 */
1083	extrd,u,*	%r8,PSW_W_BIT,1,%r1
1084	cmpib,COND(=),n	1,%r1,intr_save2
1085	LDREG		PT_IASQ0(%r29), %r16
1086	LDREG		PT_IAOQ0(%r29), %r17
1087	/* adjust iasq/iaoq */
1088	space_adjust	%r16,%r17,%r1
1089	STREG           %r16, PT_IASQ0(%r29)
1090	STREG           %r17, PT_IAOQ0(%r29)
1091#else
1092skip_save_ior:
1093#endif
1094
1095intr_save2:
1096	virt_map
1097	save_general	%r29
1098
1099	ldo		PT_FR0(%r29), %r25
1100	save_fp		%r25
1101
1102	loadgp
1103
1104	copy		%r29, %r25	/* arg1 is pt_regs */
1105#ifdef CONFIG_64BIT
1106	ldo		-16(%r30),%r29	/* Reference param save area */
1107#endif
1108
1109	ldil		L%intr_check_sig, %r2
1110	copy		%r25, %r16	/* save pt_regs */
1111
1112	b		handle_interruption
1113	ldo		R%intr_check_sig(%r2), %r2
1114ENDPROC_CFI(intr_save)
1115
1116
1117	/*
1118	 * Note for all tlb miss handlers:
1119	 *
1120	 * cr24 contains a pointer to the kernel address space
1121	 * page directory.
1122	 *
1123	 * cr25 contains a pointer to the current user address
1124	 * space page directory.
1125	 *
1126	 * sr3 will contain the space id of the user address space
1127	 * of the current running thread while that thread is
1128	 * running in the kernel.
1129	 */
1130
1131	/*
1132	 * register number allocations.  Note that these are all
1133	 * in the shadowed registers
1134	 */
1135
1136	t0 = r1		/* temporary register 0 */
1137	va = r8		/* virtual address for which the trap occurred */
1138	t1 = r9		/* temporary register 1 */
1139	pte  = r16	/* pte/phys page # */
1140	prot = r17	/* prot bits */
1141	spc  = r24	/* space for which the trap occurred */
1142	ptp = r25	/* page directory/page table pointer */
1143
1144#ifdef CONFIG_64BIT
1145
1146dtlb_miss_20w:
1147	space_adjust	spc,va,t0
1148	get_pgd		spc,ptp
1149	space_check	spc,t0,dtlb_fault
1150
1151	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1152
1153	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1154	update_accessed	ptp,pte,t0,t1
1155
1156	make_insert_tlb	spc,pte,prot,t1
1157
1158	idtlbt          pte,prot
1159
1160	ptl_unlock1	spc,t0
1161	rfir
1162	nop
1163
1164dtlb_check_alias_20w:
1165	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1166
1167	idtlbt          pte,prot
1168
1169	rfir
1170	nop
1171
1172nadtlb_miss_20w:
1173	space_adjust	spc,va,t0
1174	get_pgd		spc,ptp
1175	space_check	spc,t0,nadtlb_fault
1176
1177	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1178
1179	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1180	update_accessed	ptp,pte,t0,t1
1181
1182	make_insert_tlb	spc,pte,prot,t1
1183
1184	idtlbt          pte,prot
1185
1186	ptl_unlock1	spc,t0
1187	rfir
1188	nop
1189
1190nadtlb_check_alias_20w:
1191	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1192
1193	idtlbt          pte,prot
1194
1195	rfir
1196	nop
1197
1198#else
1199
1200dtlb_miss_11:
1201	get_pgd		spc,ptp
1202
1203	space_check	spc,t0,dtlb_fault
1204
1205	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1206
1207	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1208	update_accessed	ptp,pte,t0,t1
1209
1210	make_insert_tlb_11	spc,pte,prot
1211
1212	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1213	mtsp		spc,%sr1
1214
1215	idtlba		pte,(%sr1,va)
1216	idtlbp		prot,(%sr1,va)
1217
1218	mtsp		t1, %sr1	/* Restore sr1 */
1219
1220	ptl_unlock1	spc,t0
1221	rfir
1222	nop
1223
1224dtlb_check_alias_11:
1225	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1226
1227	idtlba          pte,(va)
1228	idtlbp          prot,(va)
1229
1230	rfir
1231	nop
1232
1233nadtlb_miss_11:
1234	get_pgd		spc,ptp
1235
1236	space_check	spc,t0,nadtlb_fault
1237
1238	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1239
1240	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1241	update_accessed	ptp,pte,t0,t1
1242
1243	make_insert_tlb_11	spc,pte,prot
1244
1245	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1246	mtsp		spc,%sr1
1247
1248	idtlba		pte,(%sr1,va)
1249	idtlbp		prot,(%sr1,va)
1250
1251	mtsp		t1, %sr1	/* Restore sr1 */
1252
1253	ptl_unlock1	spc,t0
1254	rfir
1255	nop
1256
1257nadtlb_check_alias_11:
1258	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1259
1260	idtlba          pte,(va)
1261	idtlbp          prot,(va)
1262
1263	rfir
1264	nop
1265
1266dtlb_miss_20:
1267	space_adjust	spc,va,t0
1268	get_pgd		spc,ptp
1269	space_check	spc,t0,dtlb_fault
1270
1271	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1272
1273	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1274	update_accessed	ptp,pte,t0,t1
1275
1276	make_insert_tlb	spc,pte,prot,t1
1277
1278	f_extend	pte,t1
1279
1280	idtlbt          pte,prot
1281
1282	ptl_unlock1	spc,t0
1283	rfir
1284	nop
1285
1286dtlb_check_alias_20:
1287	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1288
1289	idtlbt          pte,prot
1290
1291	rfir
1292	nop
1293
1294nadtlb_miss_20:
1295	get_pgd		spc,ptp
1296
1297	space_check	spc,t0,nadtlb_fault
1298
1299	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1300
1301	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1302	update_accessed	ptp,pte,t0,t1
1303
1304	make_insert_tlb	spc,pte,prot,t1
1305
1306	f_extend	pte,t1
1307
1308	idtlbt		pte,prot
1309
1310	ptl_unlock1	spc,t0
1311	rfir
1312	nop
1313
1314nadtlb_check_alias_20:
1315	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1316
1317	idtlbt          pte,prot
1318
1319	rfir
1320	nop
1321
1322#endif
1323
1324nadtlb_emulate:
1325
1326	/*
1327	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1328	 * probei instructions. We don't want to fault for these
1329	 * instructions (not only does it not make sense, it can cause
1330	 * deadlocks, since some flushes are done with the mmap
1331	 * semaphore held). If the translation doesn't exist, we can't
1332	 * insert a translation, so have to emulate the side effects
1333	 * of the instruction. Since we don't insert a translation
1334	 * we can get a lot of faults during a flush loop, so it makes
1335	 * sense to try to do it here with minimum overhead. We only
1336	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1337	 * and index registers are not shadowed. We defer everything
1338	 * else to the "slow" path.
1339	 */
1340
1341	mfctl           %cr19,%r9 /* Get iir */
1342
1343	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1344	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1345
1346	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1347	ldi             0x280,%r16
1348	and             %r9,%r16,%r17
1349	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1350	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1351	BL		get_register,%r25
1352	extrw,u         %r9,15,5,%r8           /* Get index register # */
1353	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1354	copy            %r1,%r24
1355	BL		get_register,%r25
1356	extrw,u         %r9,10,5,%r8           /* Get base register # */
1357	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1358	BL		set_register,%r25
1359	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1360
1361nadtlb_nullify:
1362	mfctl           %ipsw,%r8
1363	ldil            L%PSW_N,%r9
1364	or              %r8,%r9,%r8            /* Set PSW_N */
1365	mtctl           %r8,%ipsw
1366
1367	rfir
1368	nop
1369
1370	/*
1371		When there is no translation for the probe address then we
1372		must nullify the insn and return zero in the target register.
1373		This will indicate to the calling code that it does not have
1374		write/read privileges to this address.
1375
1376		This should technically work for prober and probew in PA 1.1,
1377		and also probe,r and probe,w in PA 2.0
1378
1379		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1380		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1381
1382	*/
1383nadtlb_probe_check:
1384	ldi             0x80,%r16
1385	and             %r9,%r16,%r17
1386	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1387	BL              get_register,%r25      /* Find the target register */
1388	extrw,u         %r9,31,5,%r8           /* Get target register */
1389	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1390	BL		set_register,%r25
1391	copy            %r0,%r1                /* Write zero to target register */
1392	b nadtlb_nullify                       /* Nullify return insn */
1393	nop
1394
1395
1396#ifdef CONFIG_64BIT
1397itlb_miss_20w:
1398
1399	/*
1400	 * I miss is a little different, since we allow users to fault
1401	 * on the gateway page which is in the kernel address space.
1402	 */
1403
1404	space_adjust	spc,va,t0
1405	get_pgd		spc,ptp
1406	space_check	spc,t0,itlb_fault
1407
1408	L3_ptep		ptp,pte,t0,va,itlb_fault
1409
1410	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1411	update_accessed	ptp,pte,t0,t1
1412
1413	make_insert_tlb	spc,pte,prot,t1
1414
1415	iitlbt          pte,prot
1416
1417	ptl_unlock1	spc,t0
1418	rfir
1419	nop
1420
1421naitlb_miss_20w:
1422
1423	/*
1424	 * I miss is a little different, since we allow users to fault
1425	 * on the gateway page which is in the kernel address space.
1426	 */
1427
1428	space_adjust	spc,va,t0
1429	get_pgd		spc,ptp
1430	space_check	spc,t0,naitlb_fault
1431
1432	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1433
1434	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1435	update_accessed	ptp,pte,t0,t1
1436
1437	make_insert_tlb	spc,pte,prot,t1
1438
1439	iitlbt          pte,prot
1440
1441	ptl_unlock1	spc,t0
1442	rfir
1443	nop
1444
1445naitlb_check_alias_20w:
1446	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1447
1448	iitlbt		pte,prot
1449
1450	rfir
1451	nop
1452
1453#else
1454
1455itlb_miss_11:
1456	get_pgd		spc,ptp
1457
1458	space_check	spc,t0,itlb_fault
1459
1460	L2_ptep		ptp,pte,t0,va,itlb_fault
1461
1462	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1463	update_accessed	ptp,pte,t0,t1
1464
1465	make_insert_tlb_11	spc,pte,prot
1466
1467	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1468	mtsp		spc,%sr1
1469
1470	iitlba		pte,(%sr1,va)
1471	iitlbp		prot,(%sr1,va)
1472
1473	mtsp		t1, %sr1	/* Restore sr1 */
1474
1475	ptl_unlock1	spc,t0
1476	rfir
1477	nop
1478
1479naitlb_miss_11:
1480	get_pgd		spc,ptp
1481
1482	space_check	spc,t0,naitlb_fault
1483
1484	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1485
1486	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1487	update_accessed	ptp,pte,t0,t1
1488
1489	make_insert_tlb_11	spc,pte,prot
1490
1491	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1492	mtsp		spc,%sr1
1493
1494	iitlba		pte,(%sr1,va)
1495	iitlbp		prot,(%sr1,va)
1496
1497	mtsp		t1, %sr1	/* Restore sr1 */
1498
1499	ptl_unlock1	spc,t0
1500	rfir
1501	nop
1502
1503naitlb_check_alias_11:
1504	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1505
1506	iitlba          pte,(%sr0, va)
1507	iitlbp          prot,(%sr0, va)
1508
1509	rfir
1510	nop
1511
1512
1513itlb_miss_20:
1514	get_pgd		spc,ptp
1515
1516	space_check	spc,t0,itlb_fault
1517
1518	L2_ptep		ptp,pte,t0,va,itlb_fault
1519
1520	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1521	update_accessed	ptp,pte,t0,t1
1522
1523	make_insert_tlb	spc,pte,prot,t1
1524
1525	f_extend	pte,t1
1526
1527	iitlbt          pte,prot
1528
1529	ptl_unlock1	spc,t0
1530	rfir
1531	nop
1532
1533naitlb_miss_20:
1534	get_pgd		spc,ptp
1535
1536	space_check	spc,t0,naitlb_fault
1537
1538	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1539
1540	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1541	update_accessed	ptp,pte,t0,t1
1542
1543	make_insert_tlb	spc,pte,prot,t1
1544
1545	f_extend	pte,t1
1546
1547	iitlbt          pte,prot
1548
1549	ptl_unlock1	spc,t0
1550	rfir
1551	nop
1552
1553naitlb_check_alias_20:
1554	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1555
1556	iitlbt          pte,prot
1557
1558	rfir
1559	nop
1560
1561#endif
1562
1563#ifdef CONFIG_64BIT
1564
1565dbit_trap_20w:
1566	space_adjust	spc,va,t0
1567	get_pgd		spc,ptp
1568	space_check	spc,t0,dbit_fault
1569
1570	L3_ptep		ptp,pte,t0,va,dbit_fault
1571
1572	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1573	update_dirty	ptp,pte,t1
1574
1575	make_insert_tlb	spc,pte,prot,t1
1576
1577	idtlbt          pte,prot
1578
1579	ptl_unlock0	spc,t0
1580	rfir
1581	nop
1582#else
1583
1584dbit_trap_11:
1585
1586	get_pgd		spc,ptp
1587
1588	space_check	spc,t0,dbit_fault
1589
1590	L2_ptep		ptp,pte,t0,va,dbit_fault
1591
1592	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1593	update_dirty	ptp,pte,t1
1594
1595	make_insert_tlb_11	spc,pte,prot
1596
1597	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1598	mtsp		spc,%sr1
1599
1600	idtlba		pte,(%sr1,va)
1601	idtlbp		prot,(%sr1,va)
1602
1603	mtsp            t1, %sr1     /* Restore sr1 */
1604
1605	ptl_unlock0	spc,t0
1606	rfir
1607	nop
1608
1609dbit_trap_20:
1610	get_pgd		spc,ptp
1611
1612	space_check	spc,t0,dbit_fault
1613
1614	L2_ptep		ptp,pte,t0,va,dbit_fault
1615
1616	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1617	update_dirty	ptp,pte,t1
1618
1619	make_insert_tlb	spc,pte,prot,t1
1620
1621	f_extend	pte,t1
1622
1623	idtlbt		pte,prot
1624
1625	ptl_unlock0	spc,t0
1626	rfir
1627	nop
1628#endif
1629
1630	.import handle_interruption,code
1631
1632kernel_bad_space:
1633	b               intr_save
1634	ldi             31,%r8  /* Use an unused code */
1635
1636dbit_fault:
1637	b               intr_save
1638	ldi             20,%r8
1639
1640itlb_fault:
1641	b               intr_save
1642	ldi             PARISC_ITLB_TRAP,%r8
1643
1644nadtlb_fault:
1645	b               intr_save
1646	ldi             17,%r8
1647
1648naitlb_fault:
1649	b               intr_save
1650	ldi             16,%r8
1651
1652dtlb_fault:
1653	b               intr_save
1654	ldi             15,%r8
1655
1656	/* Register saving semantics for system calls:
1657
1658	   %r1		   clobbered by system call macro in userspace
1659	   %r2		   saved in PT_REGS by gateway page
1660	   %r3  - %r18	   preserved by C code (saved by signal code)
1661	   %r19 - %r20	   saved in PT_REGS by gateway page
1662	   %r21 - %r22	   non-standard syscall args
1663			   stored in kernel stack by gateway page
1664	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1665	   %r27 - %r30	   saved in PT_REGS by gateway page
1666	   %r31		   syscall return pointer
1667	 */
1668
1669	/* Floating point registers (FIXME: what do we do with these?)
1670
1671	   %fr0  - %fr3	   status/exception, not preserved
1672	   %fr4  - %fr7	   arguments
1673	   %fr8	 - %fr11   not preserved by C code
1674	   %fr12 - %fr21   preserved by C code
1675	   %fr22 - %fr31   not preserved by C code
1676	 */
1677
1678	.macro	reg_save regs
1679	STREG	%r3, PT_GR3(\regs)
1680	STREG	%r4, PT_GR4(\regs)
1681	STREG	%r5, PT_GR5(\regs)
1682	STREG	%r6, PT_GR6(\regs)
1683	STREG	%r7, PT_GR7(\regs)
1684	STREG	%r8, PT_GR8(\regs)
1685	STREG	%r9, PT_GR9(\regs)
1686	STREG   %r10,PT_GR10(\regs)
1687	STREG   %r11,PT_GR11(\regs)
1688	STREG   %r12,PT_GR12(\regs)
1689	STREG   %r13,PT_GR13(\regs)
1690	STREG   %r14,PT_GR14(\regs)
1691	STREG   %r15,PT_GR15(\regs)
1692	STREG   %r16,PT_GR16(\regs)
1693	STREG   %r17,PT_GR17(\regs)
1694	STREG   %r18,PT_GR18(\regs)
1695	.endm
1696
1697	.macro	reg_restore regs
1698	LDREG	PT_GR3(\regs), %r3
1699	LDREG	PT_GR4(\regs), %r4
1700	LDREG	PT_GR5(\regs), %r5
1701	LDREG	PT_GR6(\regs), %r6
1702	LDREG	PT_GR7(\regs), %r7
1703	LDREG	PT_GR8(\regs), %r8
1704	LDREG	PT_GR9(\regs), %r9
1705	LDREG   PT_GR10(\regs),%r10
1706	LDREG   PT_GR11(\regs),%r11
1707	LDREG   PT_GR12(\regs),%r12
1708	LDREG   PT_GR13(\regs),%r13
1709	LDREG   PT_GR14(\regs),%r14
1710	LDREG   PT_GR15(\regs),%r15
1711	LDREG   PT_GR16(\regs),%r16
1712	LDREG   PT_GR17(\regs),%r17
1713	LDREG   PT_GR18(\regs),%r18
1714	.endm
1715
1716	.macro	fork_like name
1717ENTRY_CFI(sys_\name\()_wrapper)
1718	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1719	ldo	TASK_REGS(%r1),%r1
1720	reg_save %r1
1721	mfctl	%cr27, %r28
1722	ldil	L%sys_\name, %r31
1723	be	R%sys_\name(%sr4,%r31)
1724	STREG	%r28, PT_CR27(%r1)
1725ENDPROC_CFI(sys_\name\()_wrapper)
1726	.endm
1727
1728fork_like clone
1729fork_like clone3
1730fork_like fork
1731fork_like vfork
1732
1733	/* Set the return value for the child */
1734ENTRY(child_return)
1735	BL	schedule_tail, %r2
1736	nop
1737finish_child_return:
1738	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1739	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1740
1741	LDREG	PT_CR27(%r1), %r3
1742	mtctl	%r3, %cr27
1743	reg_restore %r1
1744	b	syscall_exit
1745	copy	%r0,%r28
1746END(child_return)
1747
1748ENTRY_CFI(sys_rt_sigreturn_wrapper)
1749	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1750	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1751	/* Don't save regs, we are going to restore them from sigcontext. */
1752	STREG	%r2, -RP_OFFSET(%r30)
1753#ifdef CONFIG_64BIT
1754	ldo	FRAME_SIZE(%r30), %r30
1755	BL	sys_rt_sigreturn,%r2
1756	ldo	-16(%r30),%r29		/* Reference param save area */
1757#else
1758	BL	sys_rt_sigreturn,%r2
1759	ldo	FRAME_SIZE(%r30), %r30
1760#endif
1761
1762	ldo	-FRAME_SIZE(%r30), %r30
1763	LDREG	-RP_OFFSET(%r30), %r2
1764
1765	/* FIXME: I think we need to restore a few more things here. */
1766	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1767	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1768	reg_restore %r1
1769
1770	/* If the signal was received while the process was blocked on a
1771	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1772	 * take us to syscall_exit_rfi and on to intr_return.
1773	 */
1774	bv	%r0(%r2)
1775	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1776ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1777
1778ENTRY(syscall_exit)
1779	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1780	 * via syscall_exit_rfi if the signal was received while the process
1781	 * was running.
1782	 */
1783
1784	/* save return value now */
1785
1786	mfctl     %cr30, %r1
1787	LDREG     TI_TASK(%r1),%r1
1788	STREG     %r28,TASK_PT_GR28(%r1)
1789
1790	/* Seems to me that dp could be wrong here, if the syscall involved
1791	 * calling a module, and nothing got round to restoring dp on return.
1792	 */
1793	loadgp
1794
1795syscall_check_resched:
1796
1797	/* check for reschedule */
1798
1799	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1800	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1801
1802	.import do_signal,code
1803syscall_check_sig:
1804	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1805	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1806	and,COND(<>)	%r19, %r26, %r0
1807	b,n	syscall_restore	/* skip past if we've nothing to do */
1808
1809syscall_do_signal:
1810	/* Save callee-save registers (for sigcontext).
1811	 * FIXME: After this point the process structure should be
1812	 * consistent with all the relevant state of the process
1813	 * before the syscall.  We need to verify this.
1814	 */
1815	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1816	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1817	reg_save %r26
1818
1819#ifdef CONFIG_64BIT
1820	ldo	-16(%r30),%r29			/* Reference param save area */
1821#endif
1822
1823	BL	do_notify_resume,%r2
1824	ldi	1, %r25				/* long in_syscall = 1 */
1825
1826	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1827	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1828	reg_restore %r20
1829
1830	b,n     syscall_check_sig
1831
1832syscall_restore:
1833	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1834
1835	/* Are we being ptraced? */
1836	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1837	ldi	_TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1838	and,COND(=)	%r19,%r2,%r0
1839	b,n	syscall_restore_rfi
1840
1841	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1842	rest_fp	%r19
1843
1844	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1845	mtsar	%r19
1846
1847	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1848	LDREG	TASK_PT_GR19(%r1),%r19
1849	LDREG   TASK_PT_GR20(%r1),%r20
1850	LDREG	TASK_PT_GR21(%r1),%r21
1851	LDREG	TASK_PT_GR22(%r1),%r22
1852	LDREG	TASK_PT_GR23(%r1),%r23
1853	LDREG	TASK_PT_GR24(%r1),%r24
1854	LDREG	TASK_PT_GR25(%r1),%r25
1855	LDREG	TASK_PT_GR26(%r1),%r26
1856	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1857	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1858	LDREG	TASK_PT_GR29(%r1),%r29
1859	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1860
1861	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1862	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1863	rsm     PSW_SM_I, %r0
1864	copy    %r1,%r30                           /* Restore user sp */
1865	mfsp    %sr3,%r1                           /* Get user space id */
1866	mtsp    %r1,%sr7                           /* Restore sr7 */
1867	ssm     PSW_SM_I, %r0
1868
1869	/* Set sr2 to zero for userspace syscalls to work. */
1870	mtsp	%r0,%sr2
1871	mtsp	%r1,%sr4			   /* Restore sr4 */
1872	mtsp	%r1,%sr5			   /* Restore sr5 */
1873	mtsp	%r1,%sr6			   /* Restore sr6 */
1874
1875	depi	3,31,2,%r31			   /* ensure return to user mode. */
1876
1877#ifdef CONFIG_64BIT
1878	/* decide whether to reset the wide mode bit
1879	 *
1880	 * For a syscall, the W bit is stored in the lowest bit
1881	 * of sp.  Extract it and reset W if it is zero */
1882	extrd,u,*<>	%r30,63,1,%r1
1883	rsm	PSW_SM_W, %r0
1884	/* now reset the lowest bit of sp if it was set */
1885	xor	%r30,%r1,%r30
1886#endif
1887	be,n    0(%sr3,%r31)                       /* return to user space */
1888
1889	/* We have to return via an RFI, so that PSW T and R bits can be set
1890	 * appropriately.
1891	 * This sets up pt_regs so we can return via intr_restore, which is not
1892	 * the most efficient way of doing things, but it works.
1893	 */
1894syscall_restore_rfi:
1895	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1896	mtctl	%r2,%cr0			   /*   for immediate trap */
1897	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1898	ldi	0x0b,%r20			   /* Create new PSW */
1899	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1900
1901	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1902	 * set in thread_info.h and converted to PA bitmap
1903	 * numbers in asm-offsets.c */
1904
1905	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1906	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1907	depi	-1,27,1,%r20			   /* R bit */
1908
1909	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1910	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1911	depi	-1,7,1,%r20			   /* T bit */
1912
1913	STREG	%r20,TASK_PT_PSW(%r1)
1914
1915	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1916
1917	mfsp    %sr3,%r25
1918	STREG   %r25,TASK_PT_SR3(%r1)
1919	STREG   %r25,TASK_PT_SR4(%r1)
1920	STREG   %r25,TASK_PT_SR5(%r1)
1921	STREG   %r25,TASK_PT_SR6(%r1)
1922	STREG   %r25,TASK_PT_SR7(%r1)
1923	STREG   %r25,TASK_PT_IASQ0(%r1)
1924	STREG   %r25,TASK_PT_IASQ1(%r1)
1925
1926	/* XXX W bit??? */
1927	/* Now if old D bit is clear, it means we didn't save all registers
1928	 * on syscall entry, so do that now.  This only happens on TRACEME
1929	 * calls, or if someone attached to us while we were on a syscall.
1930	 * We could make this more efficient by not saving r3-r18, but
1931	 * then we wouldn't be able to use the common intr_restore path.
1932	 * It is only for traced processes anyway, so performance is not
1933	 * an issue.
1934	 */
1935	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1936	ldo	TASK_REGS(%r1),%r25
1937	reg_save %r25				   /* Save r3 to r18 */
1938
1939	/* Save the current sr */
1940	mfsp	%sr0,%r2
1941	STREG	%r2,TASK_PT_SR0(%r1)
1942
1943	/* Save the scratch sr */
1944	mfsp	%sr1,%r2
1945	STREG	%r2,TASK_PT_SR1(%r1)
1946
1947	/* sr2 should be set to zero for userspace syscalls */
1948	STREG	%r0,TASK_PT_SR2(%r1)
1949
1950	LDREG	TASK_PT_GR31(%r1),%r2
1951	depi	3,31,2,%r2		   /* ensure return to user mode. */
1952	STREG   %r2,TASK_PT_IAOQ0(%r1)
1953	ldo	4(%r2),%r2
1954	STREG	%r2,TASK_PT_IAOQ1(%r1)
1955	b	intr_restore
1956	copy	%r25,%r16
1957
1958pt_regs_ok:
1959	LDREG	TASK_PT_IAOQ0(%r1),%r2
1960	depi	3,31,2,%r2		   /* ensure return to user mode. */
1961	STREG	%r2,TASK_PT_IAOQ0(%r1)
1962	LDREG	TASK_PT_IAOQ1(%r1),%r2
1963	depi	3,31,2,%r2
1964	STREG	%r2,TASK_PT_IAOQ1(%r1)
1965	b	intr_restore
1966	copy	%r25,%r16
1967
1968syscall_do_resched:
1969	load32	syscall_check_resched,%r2 /* if resched, we start over again */
1970	load32	schedule,%r19
1971	bv	%r0(%r19)		/* jumps to schedule() */
1972#ifdef CONFIG_64BIT
1973	ldo	-16(%r30),%r29		/* Reference param save area */
1974#else
1975	nop
1976#endif
1977END(syscall_exit)
1978
1979
1980#ifdef CONFIG_FUNCTION_TRACER
1981
1982	.import ftrace_function_trampoline,code
1983	.align L1_CACHE_BYTES
1984ENTRY_CFI(mcount, caller)
1985_mcount:
1986	.export _mcount,data
1987	/*
1988	 * The 64bit mcount() function pointer needs 4 dwords, of which the
1989	 * first two are free.  We optimize it here and put 2 instructions for
1990	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
1991	 * have all on one L1 cacheline.
1992	 */
1993	ldi	0, %arg3
1994	b	ftrace_function_trampoline
1995	copy	%r3, %arg2	/* caller original %sp */
1996ftrace_stub:
1997	.globl ftrace_stub
1998        .type  ftrace_stub, @function
1999#ifdef CONFIG_64BIT
2000	bve	(%rp)
2001#else
2002	bv	%r0(%rp)
2003#endif
2004	nop
2005#ifdef CONFIG_64BIT
2006	.dword mcount
2007	.dword 0 /* code in head.S puts value of global gp here */
2008#endif
2009ENDPROC_CFI(mcount)
2010
2011#ifdef CONFIG_DYNAMIC_FTRACE
2012
2013#ifdef CONFIG_64BIT
2014#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
2015#else
2016#define FTRACE_FRAME_SIZE FRAME_SIZE
2017#endif
2018ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2019ftrace_caller:
2020	.global ftrace_caller
2021
2022	STREG	%r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
2023	ldo	-FTRACE_FRAME_SIZE(%sp), %r3
2024	STREG	%rp, -RP_OFFSET(%r3)
2025
2026	/* Offset 0 is already allocated for %r1 */
2027	STREG	%r23, 2*REG_SZ(%r3)
2028	STREG	%r24, 3*REG_SZ(%r3)
2029	STREG	%r25, 4*REG_SZ(%r3)
2030	STREG	%r26, 5*REG_SZ(%r3)
2031	STREG	%r28, 6*REG_SZ(%r3)
2032	STREG	%r29, 7*REG_SZ(%r3)
2033#ifdef CONFIG_64BIT
2034	STREG	%r19, 8*REG_SZ(%r3)
2035	STREG	%r20, 9*REG_SZ(%r3)
2036	STREG	%r21, 10*REG_SZ(%r3)
2037	STREG	%r22, 11*REG_SZ(%r3)
2038	STREG	%r27, 12*REG_SZ(%r3)
2039	STREG	%r31, 13*REG_SZ(%r3)
2040	loadgp
2041	ldo	-16(%sp),%r29
2042#endif
2043	LDREG	0(%r3), %r25
2044	copy	%rp, %r26
2045	ldo	-8(%r25), %r25
2046	ldi	0, %r23		/* no pt_regs */
2047	b,l	ftrace_function_trampoline, %rp
2048	copy	%r3, %r24
2049
2050	LDREG	-RP_OFFSET(%r3), %rp
2051	LDREG	2*REG_SZ(%r3), %r23
2052	LDREG	3*REG_SZ(%r3), %r24
2053	LDREG	4*REG_SZ(%r3), %r25
2054	LDREG	5*REG_SZ(%r3), %r26
2055	LDREG	6*REG_SZ(%r3), %r28
2056	LDREG	7*REG_SZ(%r3), %r29
2057#ifdef CONFIG_64BIT
2058	LDREG	8*REG_SZ(%r3), %r19
2059	LDREG	9*REG_SZ(%r3), %r20
2060	LDREG	10*REG_SZ(%r3), %r21
2061	LDREG	11*REG_SZ(%r3), %r22
2062	LDREG	12*REG_SZ(%r3), %r27
2063	LDREG	13*REG_SZ(%r3), %r31
2064#endif
2065	LDREG	1*REG_SZ(%r3), %r3
2066
2067	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
2068	/* Adjust return point to jump back to beginning of traced function */
2069	ldo	-4(%r1), %r1
2070	bv,n	(%r1)
2071
2072ENDPROC_CFI(ftrace_caller)
2073
2074#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2075ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2076	CALLS,SAVE_RP,SAVE_SP)
2077ftrace_regs_caller:
2078	.global ftrace_regs_caller
2079
2080	ldo	-FTRACE_FRAME_SIZE(%sp), %r1
2081	STREG	%rp, -RP_OFFSET(%r1)
2082
2083	copy	%sp, %r1
2084	ldo	PT_SZ_ALGN(%sp), %sp
2085
2086	STREG	%rp, PT_GR2(%r1)
2087	STREG	%r3, PT_GR3(%r1)
2088	STREG	%r4, PT_GR4(%r1)
2089	STREG	%r5, PT_GR5(%r1)
2090	STREG	%r6, PT_GR6(%r1)
2091	STREG	%r7, PT_GR7(%r1)
2092	STREG	%r8, PT_GR8(%r1)
2093	STREG	%r9, PT_GR9(%r1)
2094	STREG   %r10, PT_GR10(%r1)
2095	STREG   %r11, PT_GR11(%r1)
2096	STREG   %r12, PT_GR12(%r1)
2097	STREG   %r13, PT_GR13(%r1)
2098	STREG   %r14, PT_GR14(%r1)
2099	STREG   %r15, PT_GR15(%r1)
2100	STREG   %r16, PT_GR16(%r1)
2101	STREG   %r17, PT_GR17(%r1)
2102	STREG   %r18, PT_GR18(%r1)
2103	STREG	%r19, PT_GR19(%r1)
2104	STREG	%r20, PT_GR20(%r1)
2105	STREG	%r21, PT_GR21(%r1)
2106	STREG	%r22, PT_GR22(%r1)
2107	STREG	%r23, PT_GR23(%r1)
2108	STREG	%r24, PT_GR24(%r1)
2109	STREG	%r25, PT_GR25(%r1)
2110	STREG	%r26, PT_GR26(%r1)
2111	STREG	%r27, PT_GR27(%r1)
2112	STREG	%r28, PT_GR28(%r1)
2113	STREG	%r29, PT_GR29(%r1)
2114	STREG	%r30, PT_GR30(%r1)
2115	STREG	%r31, PT_GR31(%r1)
2116	mfctl	%cr11, %r26
2117	STREG	%r26, PT_SAR(%r1)
2118
2119	copy	%rp, %r26
2120	LDREG	-FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2121	ldo	-8(%r25), %r25
2122	ldo	-FTRACE_FRAME_SIZE(%r1), %arg2
2123	b,l	ftrace_function_trampoline, %rp
2124	copy	%r1, %arg3 /* struct pt_regs */
2125
2126	ldo	-PT_SZ_ALGN(%sp), %r1
2127
2128	LDREG	PT_SAR(%r1), %rp
2129	mtctl	%rp, %cr11
2130
2131	LDREG	PT_GR2(%r1), %rp
2132	LDREG	PT_GR3(%r1), %r3
2133	LDREG	PT_GR4(%r1), %r4
2134	LDREG	PT_GR5(%r1), %r5
2135	LDREG	PT_GR6(%r1), %r6
2136	LDREG	PT_GR7(%r1), %r7
2137	LDREG	PT_GR8(%r1), %r8
2138	LDREG	PT_GR9(%r1), %r9
2139	LDREG   PT_GR10(%r1),%r10
2140	LDREG   PT_GR11(%r1),%r11
2141	LDREG   PT_GR12(%r1),%r12
2142	LDREG   PT_GR13(%r1),%r13
2143	LDREG   PT_GR14(%r1),%r14
2144	LDREG   PT_GR15(%r1),%r15
2145	LDREG   PT_GR16(%r1),%r16
2146	LDREG   PT_GR17(%r1),%r17
2147	LDREG   PT_GR18(%r1),%r18
2148	LDREG   PT_GR19(%r1),%r19
2149	LDREG   PT_GR20(%r1),%r20
2150	LDREG   PT_GR21(%r1),%r21
2151	LDREG   PT_GR22(%r1),%r22
2152	LDREG   PT_GR23(%r1),%r23
2153	LDREG   PT_GR24(%r1),%r24
2154	LDREG   PT_GR25(%r1),%r25
2155	LDREG   PT_GR26(%r1),%r26
2156	LDREG   PT_GR27(%r1),%r27
2157	LDREG   PT_GR28(%r1),%r28
2158	LDREG   PT_GR29(%r1),%r29
2159	LDREG   PT_GR30(%r1),%r30
2160	LDREG   PT_GR31(%r1),%r31
2161
2162	ldo	-PT_SZ_ALGN(%sp), %sp
2163	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
2164	/* Adjust return point to jump back to beginning of traced function */
2165	ldo	-4(%r1), %r1
2166	bv,n	(%r1)
2167
2168ENDPROC_CFI(ftrace_regs_caller)
2169
2170#endif
2171#endif
2172
2173#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2174	.align 8
2175ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2176	.export parisc_return_to_handler,data
2177parisc_return_to_handler:
2178	copy %r3,%r1
2179	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
2180	copy %sp,%r3
2181	STREGM %r1,FRAME_SIZE(%sp)
2182	STREG %ret0,8(%r3)
2183	STREG %ret1,16(%r3)
2184
2185#ifdef CONFIG_64BIT
2186	loadgp
2187#endif
2188
2189	/* call ftrace_return_to_handler(0) */
2190	.import ftrace_return_to_handler,code
2191	load32 ftrace_return_to_handler,%ret0
2192	load32 .Lftrace_ret,%r2
2193#ifdef CONFIG_64BIT
2194	ldo -16(%sp),%ret1		/* Reference param save area */
2195	bve	(%ret0)
2196#else
2197	bv	%r0(%ret0)
2198#endif
2199	ldi 0,%r26
2200.Lftrace_ret:
2201	copy %ret0,%rp
2202
2203	/* restore original return values */
2204	LDREG 8(%r3),%ret0
2205	LDREG 16(%r3),%ret1
2206
2207	/* return from function */
2208#ifdef CONFIG_64BIT
2209	bve	(%rp)
2210#else
2211	bv	%r0(%rp)
2212#endif
2213	LDREGM -FRAME_SIZE(%sp),%r3
2214ENDPROC_CFI(return_to_handler)
2215
2216#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2217
2218#endif	/* CONFIG_FUNCTION_TRACER */
2219
2220#ifdef CONFIG_IRQSTACKS
2221/* void call_on_stack(unsigned long param1, void *func,
2222		      unsigned long new_stack) */
2223ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2224ENTRY(_call_on_stack)
2225	copy	%sp, %r1
2226
2227	/* Regarding the HPPA calling conventions for function pointers,
2228	   we assume the PIC register is not changed across call.  For
2229	   CONFIG_64BIT, the argument pointer is left to point at the
2230	   argument region allocated for the call to call_on_stack. */
2231
2232	/* Switch to new stack.  We allocate two frames.  */
2233	ldo	2*FRAME_SIZE(%arg2), %sp
2234# ifdef CONFIG_64BIT
2235	/* Save previous stack pointer and return pointer in frame marker */
2236	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2237	/* Calls always use function descriptor */
2238	LDREG	16(%arg1), %arg1
2239	bve,l	(%arg1), %rp
2240	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2241	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2242	bve	(%rp)
2243	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2244# else
2245	/* Save previous stack pointer and return pointer in frame marker */
2246	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2247	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2248	/* Calls use function descriptor if PLABEL bit is set */
2249	bb,>=,n	%arg1, 30, 1f
2250	depwi	0,31,2, %arg1
2251	LDREG	0(%arg1), %arg1
22521:
2253	be,l	0(%sr4,%arg1), %sr0, %r31
2254	copy	%r31, %rp
2255	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2256	bv	(%rp)
2257	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2258# endif /* CONFIG_64BIT */
2259ENDPROC_CFI(call_on_stack)
2260#endif /* CONFIG_IRQSTACKS */
2261
2262ENTRY_CFI(get_register)
2263	/*
2264	 * get_register is used by the non access tlb miss handlers to
2265	 * copy the value of the general register specified in r8 into
2266	 * r1. This routine can't be used for shadowed registers, since
2267	 * the rfir will restore the original value. So, for the shadowed
2268	 * registers we put a -1 into r1 to indicate that the register
2269	 * should not be used (the register being copied could also have
2270	 * a -1 in it, but that is OK, it just means that we will have
2271	 * to use the slow path instead).
2272	 */
2273	blr     %r8,%r0
2274	nop
2275	bv      %r0(%r25)    /* r0 */
2276	copy    %r0,%r1
2277	bv      %r0(%r25)    /* r1 - shadowed */
2278	ldi     -1,%r1
2279	bv      %r0(%r25)    /* r2 */
2280	copy    %r2,%r1
2281	bv      %r0(%r25)    /* r3 */
2282	copy    %r3,%r1
2283	bv      %r0(%r25)    /* r4 */
2284	copy    %r4,%r1
2285	bv      %r0(%r25)    /* r5 */
2286	copy    %r5,%r1
2287	bv      %r0(%r25)    /* r6 */
2288	copy    %r6,%r1
2289	bv      %r0(%r25)    /* r7 */
2290	copy    %r7,%r1
2291	bv      %r0(%r25)    /* r8 - shadowed */
2292	ldi     -1,%r1
2293	bv      %r0(%r25)    /* r9 - shadowed */
2294	ldi     -1,%r1
2295	bv      %r0(%r25)    /* r10 */
2296	copy    %r10,%r1
2297	bv      %r0(%r25)    /* r11 */
2298	copy    %r11,%r1
2299	bv      %r0(%r25)    /* r12 */
2300	copy    %r12,%r1
2301	bv      %r0(%r25)    /* r13 */
2302	copy    %r13,%r1
2303	bv      %r0(%r25)    /* r14 */
2304	copy    %r14,%r1
2305	bv      %r0(%r25)    /* r15 */
2306	copy    %r15,%r1
2307	bv      %r0(%r25)    /* r16 - shadowed */
2308	ldi     -1,%r1
2309	bv      %r0(%r25)    /* r17 - shadowed */
2310	ldi     -1,%r1
2311	bv      %r0(%r25)    /* r18 */
2312	copy    %r18,%r1
2313	bv      %r0(%r25)    /* r19 */
2314	copy    %r19,%r1
2315	bv      %r0(%r25)    /* r20 */
2316	copy    %r20,%r1
2317	bv      %r0(%r25)    /* r21 */
2318	copy    %r21,%r1
2319	bv      %r0(%r25)    /* r22 */
2320	copy    %r22,%r1
2321	bv      %r0(%r25)    /* r23 */
2322	copy    %r23,%r1
2323	bv      %r0(%r25)    /* r24 - shadowed */
2324	ldi     -1,%r1
2325	bv      %r0(%r25)    /* r25 - shadowed */
2326	ldi     -1,%r1
2327	bv      %r0(%r25)    /* r26 */
2328	copy    %r26,%r1
2329	bv      %r0(%r25)    /* r27 */
2330	copy    %r27,%r1
2331	bv      %r0(%r25)    /* r28 */
2332	copy    %r28,%r1
2333	bv      %r0(%r25)    /* r29 */
2334	copy    %r29,%r1
2335	bv      %r0(%r25)    /* r30 */
2336	copy    %r30,%r1
2337	bv      %r0(%r25)    /* r31 */
2338	copy    %r31,%r1
2339ENDPROC_CFI(get_register)
2340
2341
2342ENTRY_CFI(set_register)
2343	/*
2344	 * set_register is used by the non access tlb miss handlers to
2345	 * copy the value of r1 into the general register specified in
2346	 * r8.
2347	 */
2348	blr     %r8,%r0
2349	nop
2350	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2351	copy    %r1,%r0
2352	bv      %r0(%r25)    /* r1 */
2353	copy    %r1,%r1
2354	bv      %r0(%r25)    /* r2 */
2355	copy    %r1,%r2
2356	bv      %r0(%r25)    /* r3 */
2357	copy    %r1,%r3
2358	bv      %r0(%r25)    /* r4 */
2359	copy    %r1,%r4
2360	bv      %r0(%r25)    /* r5 */
2361	copy    %r1,%r5
2362	bv      %r0(%r25)    /* r6 */
2363	copy    %r1,%r6
2364	bv      %r0(%r25)    /* r7 */
2365	copy    %r1,%r7
2366	bv      %r0(%r25)    /* r8 */
2367	copy    %r1,%r8
2368	bv      %r0(%r25)    /* r9 */
2369	copy    %r1,%r9
2370	bv      %r0(%r25)    /* r10 */
2371	copy    %r1,%r10
2372	bv      %r0(%r25)    /* r11 */
2373	copy    %r1,%r11
2374	bv      %r0(%r25)    /* r12 */
2375	copy    %r1,%r12
2376	bv      %r0(%r25)    /* r13 */
2377	copy    %r1,%r13
2378	bv      %r0(%r25)    /* r14 */
2379	copy    %r1,%r14
2380	bv      %r0(%r25)    /* r15 */
2381	copy    %r1,%r15
2382	bv      %r0(%r25)    /* r16 */
2383	copy    %r1,%r16
2384	bv      %r0(%r25)    /* r17 */
2385	copy    %r1,%r17
2386	bv      %r0(%r25)    /* r18 */
2387	copy    %r1,%r18
2388	bv      %r0(%r25)    /* r19 */
2389	copy    %r1,%r19
2390	bv      %r0(%r25)    /* r20 */
2391	copy    %r1,%r20
2392	bv      %r0(%r25)    /* r21 */
2393	copy    %r1,%r21
2394	bv      %r0(%r25)    /* r22 */
2395	copy    %r1,%r22
2396	bv      %r0(%r25)    /* r23 */
2397	copy    %r1,%r23
2398	bv      %r0(%r25)    /* r24 */
2399	copy    %r1,%r24
2400	bv      %r0(%r25)    /* r25 */
2401	copy    %r1,%r25
2402	bv      %r0(%r25)    /* r26 */
2403	copy    %r1,%r26
2404	bv      %r0(%r25)    /* r27 */
2405	copy    %r1,%r27
2406	bv      %r0(%r25)    /* r28 */
2407	copy    %r1,%r28
2408	bv      %r0(%r25)    /* r29 */
2409	copy    %r1,%r29
2410	bv      %r0(%r25)    /* r30 */
2411	copy    %r1,%r30
2412	bv      %r0(%r25)    /* r31 */
2413	copy    %r1,%r31
2414ENDPROC_CFI(set_register)
2415
2416