• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file contains the routines for handling the MMU on those
3  * PowerPC implementations where the MMU substantially follows the
4  * architecture specification.  This includes the 6xx, 7xx, 7xxx,
5  * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6  *  -- paulus
7  *
8  *  Derived from arch/ppc/mm/init.c:
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
13  *    Copyright (C) 1996 Paul Mackerras
14  *
15  *  Derived from "arch/i386/mm/init.c"
16  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
17  *
18  *  This program is free software; you can redistribute it and/or
19  *  modify it under the terms of the GNU General Public License
20  *  as published by the Free Software Foundation; either version
21  *  2 of the License, or (at your option) any later version.
22  *
23  */
24 
25 #include <linux/mm.h>
26 #include <linux/init.h>
27 #include <linux/export.h>
28 
29 #include <asm/mmu_context.h>
30 #include <asm/tlbflush.h>
31 
32 /*
33  * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
34  * (virtual segment identifiers) for each context.  Although the
35  * hardware supports 24-bit VSIDs, and thus >1 million contexts,
36  * we only use 32,768 of them.  That is ample, since there can be
37  * at most around 30,000 tasks in the system anyway, and it means
38  * that we can use a bitmap to indicate which contexts are in use.
39  * Using a bitmap means that we entirely avoid all of the problems
40  * that we used to have when the context number overflowed,
41  * particularly on SMP systems.
42  *  -- paulus.
43  */
44 #define NO_CONTEXT      	((unsigned long) -1)
45 #define LAST_CONTEXT    	32767
46 #define FIRST_CONTEXT    	1
47 
48 /*
49  * This function defines the mapping from contexts to VSIDs (virtual
50  * segment IDs).  We use a skew on both the context and the high 4 bits
51  * of the 32-bit virtual address (the "effective segment ID") in order
52  * to spread out the entries in the MMU hash table.  Note, if this
53  * function is changed then arch/ppc/mm/hashtable.S will have to be
54  * changed to correspond.
55  *
56  *
57  * CTX_TO_VSID(ctx, va)	(((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
58  *				 & 0xffffff)
59  */
60 
61 static unsigned long next_mmu_context;
62 static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
63 
__init_new_context(void)64 unsigned long __init_new_context(void)
65 {
66 	unsigned long ctx = next_mmu_context;
67 
68 	while (test_and_set_bit(ctx, context_map)) {
69 		ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
70 		if (ctx > LAST_CONTEXT)
71 			ctx = 0;
72 	}
73 	next_mmu_context = (ctx + 1) & LAST_CONTEXT;
74 
75 	return ctx;
76 }
77 EXPORT_SYMBOL_GPL(__init_new_context);
78 
79 /*
80  * Set up the context for a new address space.
81  */
init_new_context(struct task_struct * t,struct mm_struct * mm)82 int init_new_context(struct task_struct *t, struct mm_struct *mm)
83 {
84 	mm->context.id = __init_new_context();
85 
86 	return 0;
87 }
88 
89 /*
90  * Free a context ID. Make sure to call this with preempt disabled!
91  */
__destroy_context(unsigned long ctx)92 void __destroy_context(unsigned long ctx)
93 {
94 	clear_bit(ctx, context_map);
95 }
96 EXPORT_SYMBOL_GPL(__destroy_context);
97 
98 /*
99  * We're finished using the context for an address space.
100  */
destroy_context(struct mm_struct * mm)101 void destroy_context(struct mm_struct *mm)
102 {
103 	preempt_disable();
104 	if (mm->context.id != NO_CONTEXT) {
105 		__destroy_context(mm->context.id);
106 		mm->context.id = NO_CONTEXT;
107 	}
108 	preempt_enable();
109 }
110 
111 /*
112  * Initialize the context management stuff.
113  */
mmu_context_init(void)114 void __init mmu_context_init(void)
115 {
116 	/* Reserve context 0 for kernel use */
117 	context_map[0] = (1 << FIRST_CONTEXT) - 1;
118 	next_mmu_context = FIRST_CONTEXT;
119 }
120