• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* tsb.S: Sparc64 TSB table handling.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6
7#include <asm/tsb.h>
8#include <asm/hypervisor.h>
9#include <asm/page.h>
10#include <asm/cpudata.h>
11#include <asm/mmu.h>
12
13	.text
14	.align	32
15
16	/* Invoked from TLB miss handler, we are in the
17	 * MMU global registers and they are setup like
18	 * this:
19	 *
20	 * %g1: TSB entry pointer
21	 * %g2:	available temporary
22	 * %g3:	FAULT_CODE_{D,I}TLB
23	 * %g4:	available temporary
24	 * %g5:	available temporary
25	 * %g6: TAG TARGET
26	 * %g7:	available temporary, will be loaded by us with
27	 *      the physical address base of the linux page
28	 *      tables for the current address space
29	 */
30tsb_miss_dtlb:
31	mov		TLB_TAG_ACCESS, %g4
32	ldxa		[%g4] ASI_DMMU, %g4
33	srlx		%g4, PAGE_SHIFT, %g4
34	ba,pt		%xcc, tsb_miss_page_table_walk
35	 sllx		%g4, PAGE_SHIFT, %g4
36
37tsb_miss_itlb:
38	mov		TLB_TAG_ACCESS, %g4
39	ldxa		[%g4] ASI_IMMU, %g4
40	srlx		%g4, PAGE_SHIFT, %g4
41	ba,pt		%xcc, tsb_miss_page_table_walk
42	 sllx		%g4, PAGE_SHIFT, %g4
43
44	/* At this point we have:
45	 * %g1 --	PAGE_SIZE TSB entry address
46	 * %g3 --	FAULT_CODE_{D,I}TLB
47	 * %g4 --	missing virtual address
48	 * %g6 --	TAG TARGET (vaddr >> 22)
49	 */
50tsb_miss_page_table_walk:
51	TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
52
53	/* Before committing to a full page table walk,
54	 * check the huge page TSB.
55	 */
56#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
57
58661:	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
59	nop
60	.section	.sun4v_2insn_patch, "ax"
61	.word		661b
62	mov		SCRATCHPAD_UTSBREG2, %g5
63	ldxa		[%g5] ASI_SCRATCHPAD, %g5
64	.previous
65
66	cmp		%g5, -1
67	be,pt		%xcc, 80f
68	 nop
69
70	/* We need an aligned pair of registers containing 2 values
71	 * which can be easily rematerialized.  %g6 and %g7 foot the
72	 * bill just nicely.  We'll save %g6 away into %g2 for the
73	 * huge page TSB TAG comparison.
74	 *
75	 * Perform a huge page TSB lookup.
76	 */
77	mov		%g6, %g2
78	and		%g5, 0x7, %g6
79	mov		512, %g7
80	andn		%g5, 0x7, %g5
81	sllx		%g7, %g6, %g7
82	srlx		%g4, REAL_HPAGE_SHIFT, %g6
83	sub		%g7, 1, %g7
84	and		%g6, %g7, %g6
85	sllx		%g6, 4, %g6
86	add		%g5, %g6, %g5
87
88	TSB_LOAD_QUAD(%g5, %g6)
89	cmp		%g6, %g2
90	be,a,pt		%xcc, tsb_tlb_reload
91	 mov		%g7, %g5
92
93	/* No match, remember the huge page TSB entry address,
94	 * and restore %g6 and %g7.
95	 */
96	TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
97	srlx		%g4, 22, %g6
9880:	stx		%g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
99
100#endif
101
102	ldx		[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
103
104	/* At this point we have:
105	 * %g1 --	TSB entry address
106	 * %g3 --	FAULT_CODE_{D,I}TLB
107	 * %g4 --	missing virtual address
108	 * %g6 --	TAG TARGET (vaddr >> 22)
109	 * %g7 --	page table physical address
110	 *
111	 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
112	 * TSB both lack a matching entry.
113	 */
114tsb_miss_page_table_walk_sun4v_fastpath:
115	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
116
117	/* Valid PTE is now in %g5.  */
118
119#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
120661:	sethi		%uhi(_PAGE_SZALL_4U), %g7
121	sllx		%g7, 32, %g7
122	.section	.sun4v_2insn_patch, "ax"
123	.word		661b
124	mov		_PAGE_SZALL_4V, %g7
125	nop
126	.previous
127
128	and		%g5, %g7, %g2
129
130661:	sethi		%uhi(_PAGE_SZHUGE_4U), %g7
131	sllx		%g7, 32, %g7
132	.section	.sun4v_2insn_patch, "ax"
133	.word		661b
134	mov		_PAGE_SZHUGE_4V, %g7
135	nop
136	.previous
137
138	cmp		%g2, %g7
139	bne,pt		%xcc, 60f
140	 nop
141
142	/* It is a huge page, use huge page TSB entry address we
143	 * calculated above.  If the huge page TSB has not been
144	 * allocated, setup a trap stack and call hugetlb_setup()
145	 * to do so, then return from the trap to replay the TLB
146	 * miss.
147	 *
148	 * This is necessary to handle the case of transparent huge
149	 * pages where we don't really have a non-atomic context
150	 * in which to allocate the hugepage TSB hash table.  When
151	 * the 'mm' faults in the hugepage for the first time, we
152	 * thus handle it here.  This also makes sure that we can
153	 * allocate the TSB hash table on the correct NUMA node.
154	 */
155	TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
156	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
157	cmp		%g1, -1
158	bne,pt		%xcc, 60f
159	 nop
160
161661:	rdpr		%pstate, %g5
162	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
163	.section	.sun4v_2insn_patch, "ax"
164	.word		661b
165	SET_GL(1)
166	nop
167	.previous
168
169	rdpr	%tl, %g7
170	cmp	%g7, 1
171	bne,pn	%xcc, winfix_trampoline
172	 mov	%g3, %g4
173	ba,pt	%xcc, etrap
174	 rd	%pc, %g7
175	call	hugetlb_setup
176	 add	%sp, PTREGS_OFF, %o0
177	ba,pt	%xcc, rtrap
178	 nop
179
18060:
181#endif
182
183	/* At this point we have:
184	 * %g1 --	TSB entry address
185	 * %g3 --	FAULT_CODE_{D,I}TLB
186	 * %g5 --	valid PTE
187	 * %g6 --	TAG TARGET (vaddr >> 22)
188	 */
189tsb_reload:
190	TSB_LOCK_TAG(%g1, %g2, %g7)
191	TSB_WRITE(%g1, %g5, %g6)
192
193	/* Finally, load TLB and return from trap.  */
194tsb_tlb_reload:
195	cmp		%g3, FAULT_CODE_DTLB
196	bne,pn		%xcc, tsb_itlb_load
197	 nop
198
199tsb_dtlb_load:
200
201661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
202	retry
203	.section	.sun4v_2insn_patch, "ax"
204	.word		661b
205	nop
206	nop
207	.previous
208
209	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
210	 * instruction get nop'd out and we get here to branch
211	 * to the sun4v tlb load code.  The registers are setup
212	 * as follows:
213	 *
214	 * %g4: vaddr
215	 * %g5: PTE
216	 * %g6:	TAG
217	 *
218	 * The sun4v TLB load wants the PTE in %g3 so we fix that
219	 * up here.
220	 */
221	ba,pt		%xcc, sun4v_dtlb_load
222	 mov		%g5, %g3
223
224tsb_itlb_load:
225	/* Executable bit must be set.  */
226661:	sethi		%hi(_PAGE_EXEC_4U), %g4
227	andcc		%g5, %g4, %g0
228	.section	.sun4v_2insn_patch, "ax"
229	.word		661b
230	andcc		%g5, _PAGE_EXEC_4V, %g0
231	nop
232	.previous
233
234	be,pn		%xcc, tsb_do_fault
235	 nop
236
237661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
238	retry
239	.section	.sun4v_2insn_patch, "ax"
240	.word		661b
241	nop
242	nop
243	.previous
244
245	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
246	 * instruction get nop'd out and we get here to branch
247	 * to the sun4v tlb load code.  The registers are setup
248	 * as follows:
249	 *
250	 * %g4: vaddr
251	 * %g5: PTE
252	 * %g6:	TAG
253	 *
254	 * The sun4v TLB load wants the PTE in %g3 so we fix that
255	 * up here.
256	 */
257	ba,pt		%xcc, sun4v_itlb_load
258	 mov		%g5, %g3
259
260	/* No valid entry in the page tables, do full fault
261	 * processing.
262	 */
263
264	.globl		tsb_do_fault
265tsb_do_fault:
266	cmp		%g3, FAULT_CODE_DTLB
267
268661:	rdpr		%pstate, %g5
269	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
270	.section	.sun4v_2insn_patch, "ax"
271	.word		661b
272	SET_GL(1)
273	ldxa		[%g0] ASI_SCRATCHPAD, %g4
274	.previous
275
276	bne,pn		%xcc, tsb_do_itlb_fault
277	 nop
278
279tsb_do_dtlb_fault:
280	rdpr	%tl, %g3
281	cmp	%g3, 1
282
283661:	mov	TLB_TAG_ACCESS, %g4
284	ldxa	[%g4] ASI_DMMU, %g5
285	.section .sun4v_2insn_patch, "ax"
286	.word	661b
287	ldx	[%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
288	nop
289	.previous
290
291	/* Clear context ID bits.  */
292	srlx		%g5, PAGE_SHIFT, %g5
293	sllx		%g5, PAGE_SHIFT, %g5
294
295	be,pt	%xcc, sparc64_realfault_common
296	 mov	FAULT_CODE_DTLB, %g4
297	ba,pt	%xcc, winfix_trampoline
298	 nop
299
300tsb_do_itlb_fault:
301	rdpr	%tpc, %g5
302	ba,pt	%xcc, sparc64_realfault_common
303	 mov	FAULT_CODE_ITLB, %g4
304
305	.globl	sparc64_realfault_common
306sparc64_realfault_common:
307	/* fault code in %g4, fault address in %g5, etrap will
308	 * preserve these two values in %l4 and %l5 respectively
309	 */
310	ba,pt	%xcc, etrap			! Save trap state
3111:	 rd	%pc, %g7			! ...
312	stb	%l4, [%g6 + TI_FAULT_CODE]	! Save fault code
313	stx	%l5, [%g6 + TI_FAULT_ADDR]	! Save fault address
314	call	do_sparc64_fault		! Call fault handler
315	 add	%sp, PTREGS_OFF, %o0		! Compute pt_regs arg
316	ba,pt	%xcc, rtrap			! Restore cpu state
317	 nop					! Delay slot (fill me)
318
319winfix_trampoline:
320	rdpr	%tpc, %g3			! Prepare winfixup TNPC
321	or	%g3, 0x7c, %g3			! Compute branch offset
322	wrpr	%g3, %tnpc			! Write it into TNPC
323	done					! Trap return
324
325	/* Insert an entry into the TSB.
326	 *
327	 * %o0: TSB entry pointer (virt or phys address)
328	 * %o1: tag
329	 * %o2:	pte
330	 */
331	.align	32
332	.globl	__tsb_insert
333__tsb_insert:
334	rdpr	%pstate, %o5
335	wrpr	%o5, PSTATE_IE, %pstate
336	TSB_LOCK_TAG(%o0, %g2, %g3)
337	TSB_WRITE(%o0, %o2, %o1)
338	wrpr	%o5, %pstate
339	retl
340	 nop
341	.size	__tsb_insert, .-__tsb_insert
342
343	/* Flush the given TSB entry if it has the matching
344	 * tag.
345	 *
346	 * %o0: TSB entry pointer (virt or phys address)
347	 * %o1:	tag
348	 */
349	.align	32
350	.globl	tsb_flush
351	.type	tsb_flush,#function
352tsb_flush:
353	sethi	%hi(TSB_TAG_LOCK_HIGH), %g2
3541:	TSB_LOAD_TAG(%o0, %g1)
355	srlx	%g1, 32, %o3
356	andcc	%o3, %g2, %g0
357	bne,pn	%icc, 1b
358	 nop
359	cmp	%g1, %o1
360	mov	1, %o3
361	bne,pt	%xcc, 2f
362	 sllx	%o3, TSB_TAG_INVALID_BIT, %o3
363	TSB_CAS_TAG(%o0, %g1, %o3)
364	cmp	%g1, %o3
365	bne,pn	%xcc, 1b
366	 nop
3672:	retl
368	 nop
369	.size	tsb_flush, .-tsb_flush
370
371	/* Reload MMU related context switch state at
372	 * schedule() time.
373	 *
374	 * %o0: page table physical address
375	 * %o1:	TSB base config pointer
376	 * %o2:	TSB huge config pointer, or NULL if none
377	 * %o3:	Hypervisor TSB descriptor physical address
378	 * %o4: Secondary context to load, if non-zero
379	 *
380	 * We have to run this whole thing with interrupts
381	 * disabled so that the current cpu doesn't change
382	 * due to preemption.
383	 */
384	.align	32
385	.globl	__tsb_context_switch
386	.type	__tsb_context_switch,#function
387__tsb_context_switch:
388	rdpr	%pstate, %g1
389	wrpr	%g1, PSTATE_IE, %pstate
390
391	brz,pn	%o4, 1f
392	 mov	SECONDARY_CONTEXT, %o5
393
394661:	stxa	%o4, [%o5] ASI_DMMU
395	.section .sun4v_1insn_patch, "ax"
396	.word	661b
397	stxa	%o4, [%o5] ASI_MMU
398	.previous
399	flush	%g6
400
4011:
402	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
403
404	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
405
406	ldx	[%o1 + TSB_CONFIG_REG_VAL], %o0
407	brz,pt	%o2, 1f
408	 mov	-1, %g3
409
410	ldx	[%o2 + TSB_CONFIG_REG_VAL], %g3
411
4121:	stx	%g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
413
414	sethi	%hi(tlb_type), %g2
415	lduw	[%g2 + %lo(tlb_type)], %g2
416	cmp	%g2, 3
417	bne,pt	%icc, 50f
418	 nop
419
420	/* Hypervisor TSB switch. */
421	mov	SCRATCHPAD_UTSBREG1, %o5
422	stxa	%o0, [%o5] ASI_SCRATCHPAD
423	mov	SCRATCHPAD_UTSBREG2, %o5
424	stxa	%g3, [%o5] ASI_SCRATCHPAD
425
426	mov	2, %o0
427	cmp	%g3, -1
428	move	%xcc, 1, %o0
429
430	mov	HV_FAST_MMU_TSB_CTXNON0, %o5
431	mov	%o3, %o1
432	ta	HV_FAST_TRAP
433
434	/* Finish up.  */
435	ba,pt	%xcc, 9f
436	 nop
437
438	/* SUN4U TSB switch.  */
43950:	mov	TSB_REG, %o5
440	stxa	%o0, [%o5] ASI_DMMU
441	membar	#Sync
442	stxa	%o0, [%o5] ASI_IMMU
443	membar	#Sync
444
4452:	ldx	[%o1 + TSB_CONFIG_MAP_VADDR], %o4
446	brz	%o4, 9f
447	 ldx	[%o1 + TSB_CONFIG_MAP_PTE], %o5
448
449	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
450	mov	TLB_TAG_ACCESS, %g3
451	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
452	stxa	%o4, [%g3] ASI_DMMU
453	membar	#Sync
454	sllx	%g2, 3, %g2
455	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
456	membar	#Sync
457
458	brz,pt	%o2, 9f
459	 nop
460
461	ldx	[%o2 + TSB_CONFIG_MAP_VADDR], %o4
462	ldx	[%o2 + TSB_CONFIG_MAP_PTE], %o5
463	mov	TLB_TAG_ACCESS, %g3
464	stxa	%o4, [%g3] ASI_DMMU
465	membar	#Sync
466	sub	%g2, (1 << 3), %g2
467	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
468	membar	#Sync
469
4709:
471	wrpr	%g1, %pstate
472
473	retl
474	 nop
475	.size	__tsb_context_switch, .-__tsb_context_switch
476
477#define TSB_PASS_BITS	((1 << TSB_TAG_LOCK_BIT) | \
478			 (1 << TSB_TAG_INVALID_BIT))
479
480	.align	32
481	.globl	copy_tsb
482	.type	copy_tsb,#function
483copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
484			 * %o2=new_tsb_base, %o3=new_tsb_size
485			 * %o4=page_size_shift
486			 */
487	sethi		%uhi(TSB_PASS_BITS), %g7
488	srlx		%o3, 4, %o3
489	add		%o0, %o1, %o1	/* end of old tsb */
490	sllx		%g7, 32, %g7
491	sub		%o3, 1, %o3	/* %o3 == new tsb hash mask */
492
493	mov		%o4, %g1	/* page_size_shift */
494
495661:	prefetcha	[%o0] ASI_N, #one_read
496	.section	.tsb_phys_patch, "ax"
497	.word		661b
498	prefetcha	[%o0] ASI_PHYS_USE_EC, #one_read
499	.previous
500
50190:	andcc		%o0, (64 - 1), %g0
502	bne		1f
503	 add		%o0, 64, %o5
504
505661:	prefetcha	[%o5] ASI_N, #one_read
506	.section	.tsb_phys_patch, "ax"
507	.word		661b
508	prefetcha	[%o5] ASI_PHYS_USE_EC, #one_read
509	.previous
510
5111:	TSB_LOAD_QUAD(%o0, %g2)		/* %g2/%g3 == TSB entry */
512	andcc		%g2, %g7, %g0	/* LOCK or INVALID set? */
513	bne,pn		%xcc, 80f	/* Skip it */
514	 sllx		%g2, 22, %o4	/* TAG --> VADDR */
515
516	/* This can definitely be computed faster... */
517	srlx		%o0, 4, %o5	/* Build index */
518	and		%o5, 511, %o5	/* Mask index */
519	sllx		%o5, %g1, %o5	/* Put into vaddr position */
520	or		%o4, %o5, %o4	/* Full VADDR. */
521	srlx		%o4, %g1, %o4	/* Shift down to create index */
522	and		%o4, %o3, %o4	/* Mask with new_tsb_nents-1 */
523	sllx		%o4, 4, %o4	/* Shift back up into tsb ent offset */
524	TSB_STORE(%o2 + %o4, %g2)	/* Store TAG */
525	add		%o4, 0x8, %o4	/* Advance to TTE */
526	TSB_STORE(%o2 + %o4, %g3)	/* Store TTE */
527
52880:	add		%o0, 16, %o0
529	cmp		%o0, %o1
530	bne,pt		%xcc, 90b
531	 nop
532
533	retl
534	 nop
535	.size		copy_tsb, .-copy_tsb
536
537	/* Set the invalid bit in all TSB entries.  */
538	.align		32
539	.globl		tsb_init
540	.type		tsb_init,#function
541tsb_init:		/* %o0 = TSB vaddr, %o1 = size in bytes */
542	prefetch	[%o0 + 0x000], #n_writes
543	mov		1, %g1
544	prefetch	[%o0 + 0x040], #n_writes
545	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
546	prefetch	[%o0 + 0x080], #n_writes
5471:	prefetch	[%o0 + 0x0c0], #n_writes
548	stx		%g1, [%o0 + 0x00]
549	stx		%g1, [%o0 + 0x10]
550	stx		%g1, [%o0 + 0x20]
551	stx		%g1, [%o0 + 0x30]
552	prefetch	[%o0 + 0x100], #n_writes
553	stx		%g1, [%o0 + 0x40]
554	stx		%g1, [%o0 + 0x50]
555	stx		%g1, [%o0 + 0x60]
556	stx		%g1, [%o0 + 0x70]
557	prefetch	[%o0 + 0x140], #n_writes
558	stx		%g1, [%o0 + 0x80]
559	stx		%g1, [%o0 + 0x90]
560	stx		%g1, [%o0 + 0xa0]
561	stx		%g1, [%o0 + 0xb0]
562	prefetch	[%o0 + 0x180], #n_writes
563	stx		%g1, [%o0 + 0xc0]
564	stx		%g1, [%o0 + 0xd0]
565	stx		%g1, [%o0 + 0xe0]
566	stx		%g1, [%o0 + 0xf0]
567	subcc		%o1, 0x100, %o1
568	bne,pt		%xcc, 1b
569	 add		%o0, 0x100, %o0
570	retl
571	 nop
572	nop
573	nop
574	.size		tsb_init, .-tsb_init
575
576	.globl		NGtsb_init
577	.type		NGtsb_init,#function
578NGtsb_init:
579	rd		%asi, %g2
580	mov		1, %g1
581	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
582	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
5831:	stxa		%g1, [%o0 + 0x00] %asi
584	stxa		%g1, [%o0 + 0x10] %asi
585	stxa		%g1, [%o0 + 0x20] %asi
586	stxa		%g1, [%o0 + 0x30] %asi
587	stxa		%g1, [%o0 + 0x40] %asi
588	stxa		%g1, [%o0 + 0x50] %asi
589	stxa		%g1, [%o0 + 0x60] %asi
590	stxa		%g1, [%o0 + 0x70] %asi
591	stxa		%g1, [%o0 + 0x80] %asi
592	stxa		%g1, [%o0 + 0x90] %asi
593	stxa		%g1, [%o0 + 0xa0] %asi
594	stxa		%g1, [%o0 + 0xb0] %asi
595	stxa		%g1, [%o0 + 0xc0] %asi
596	stxa		%g1, [%o0 + 0xd0] %asi
597	stxa		%g1, [%o0 + 0xe0] %asi
598	stxa		%g1, [%o0 + 0xf0] %asi
599	subcc		%o1, 0x100, %o1
600	bne,pt		%xcc, 1b
601	 add		%o0, 0x100, %o0
602	membar		#Sync
603	retl
604	 wr		%g2, 0x0, %asi
605	.size		NGtsb_init, .-NGtsb_init
606