• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/arch/cris/mm/tlb.c
3  *
4  *  Copyright (C) 2000, 2001  Axis Communications AB
5  *
6  *  Authors:   Bjorn Wesen (bjornw@axis.com)
7  *
8  */
9 
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <asm/tlb.h>
13 
14 #define D(x)
15 
16 /* The TLB can host up to 64 different mm contexts at the same time.
17  * The running context is R_MMU_CONTEXT, and each TLB entry contains a
18  * page_id that has to match to give a hit. In page_id_map, we keep track
19  * of which mm we have assigned to which page_id, so that we know when
20  * to invalidate TLB entries.
21  *
22  * The last page_id is never running - it is used as an invalid page_id
23  * so we can make TLB entries that will never match.
24  *
25  * Notice that we need to make the flushes atomic, otherwise an interrupt
26  * handler that uses vmalloced memory might cause a TLB load in the middle
27  * of a flush causing.
28  */
29 
30 struct mm_struct *page_id_map[NUM_PAGEID];
31 static int map_replace_ptr = 1;  /* which page_id_map entry to replace next */
32 
33 /* the following functions are similar to those used in the PPC port */
34 
35 static inline void
alloc_context(struct mm_struct * mm)36 alloc_context(struct mm_struct *mm)
37 {
38 	struct mm_struct *old_mm;
39 
40 	D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm));
41 
42 	/* did we replace an mm ? */
43 
44 	old_mm = page_id_map[map_replace_ptr];
45 
46 	if(old_mm) {
47 		/* throw out any TLB entries belonging to the mm we replace
48 		 * in the map
49 		 */
50 		flush_tlb_mm(old_mm);
51 
52 		old_mm->context.page_id = NO_CONTEXT;
53 	}
54 
55 	/* insert it into the page_id_map */
56 
57 	mm->context.page_id = map_replace_ptr;
58 	page_id_map[map_replace_ptr] = mm;
59 
60 	map_replace_ptr++;
61 
62 	if(map_replace_ptr == INVALID_PAGEID)
63 		map_replace_ptr = 0;         /* wrap around */
64 }
65 
66 /*
67  * if needed, get a new MMU context for the mm. otherwise nothing is done.
68  */
69 
70 void
get_mmu_context(struct mm_struct * mm)71 get_mmu_context(struct mm_struct *mm)
72 {
73 	if(mm->context.page_id == NO_CONTEXT)
74 		alloc_context(mm);
75 }
76 
77 /* called by __exit_mm to destroy the used MMU context if any before
78  * destroying the mm itself. this is only called when the last user of the mm
79  * drops it.
80  *
81  * the only thing we really need to do here is mark the used PID slot
82  * as empty.
83  */
84 
85 void
destroy_context(struct mm_struct * mm)86 destroy_context(struct mm_struct *mm)
87 {
88 	if(mm->context.page_id != NO_CONTEXT) {
89 		D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm));
90 		flush_tlb_mm(mm);  /* TODO this might be redundant ? */
91 		page_id_map[mm->context.page_id] = NULL;
92 	}
93 }
94 
95 /* called once during VM initialization, from init.c */
96 
97 void __init
tlb_init(void)98 tlb_init(void)
99 {
100 	int i;
101 
102 	/* clear the page_id map */
103 
104 	for (i = 1; i < ARRAY_SIZE(page_id_map); i++)
105 		page_id_map[i] = NULL;
106 
107 	/* invalidate the entire TLB */
108 
109 	flush_tlb_all();
110 
111 	/* the init_mm has context 0 from the boot */
112 
113 	page_id_map[0] = &init_mm;
114 }
115