• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * hw_mmu.h
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * MMU types and API declarations
7  *
8  * Copyright (C) 2007 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18 
19 #ifndef _HW_MMU_H
20 #define _HW_MMU_H
21 
22 #include <linux/types.h>
23 
24 /* Bitmasks for interrupt sources */
25 #define HW_MMU_TRANSLATION_FAULT   0x2
26 #define HW_MMU_ALL_INTERRUPTS      0x1F
27 
28 #define HW_MMU_COARSE_PAGE_SIZE 0x400
29 
30 /* hw_mmu_mixed_size_t:  Enumerated Type used to specify whether to follow
31 			CPU/TLB Element size */
32 enum hw_mmu_mixed_size_t {
33 	HW_MMU_TLBES,
34 	HW_MMU_CPUES
35 };
36 
37 /* hw_mmu_map_attrs_t:  Struct containing MMU mapping attributes */
38 struct hw_mmu_map_attrs_t {
39 	enum hw_endianism_t endianism;
40 	enum hw_element_size_t element_size;
41 	enum hw_mmu_mixed_size_t mixed_size;
42 	bool donotlockmpupage;
43 };
44 
45 extern hw_status hw_mmu_enable(void __iomem *base_address);
46 
47 extern hw_status hw_mmu_disable(void __iomem *base_address);
48 
49 extern hw_status hw_mmu_num_locked_set(void __iomem *base_address,
50 				       u32 num_locked_entries);
51 
52 extern hw_status hw_mmu_victim_num_set(void __iomem *base_address,
53 				       u32 victim_entry_num);
54 
55 /* For MMU faults */
56 extern hw_status hw_mmu_event_ack(void __iomem *base_address,
57 				  u32 irq_mask);
58 
59 extern hw_status hw_mmu_event_disable(void __iomem *base_address,
60 				      u32 irq_mask);
61 
62 extern hw_status hw_mmu_event_enable(void __iomem *base_address,
63 				     u32 irq_mask);
64 
65 extern hw_status hw_mmu_event_status(void __iomem *base_address,
66 				     u32 *irq_mask);
67 
68 extern hw_status hw_mmu_fault_addr_read(void __iomem *base_address,
69 					u32 *addr);
70 
71 /* Set the TT base address */
72 extern hw_status hw_mmu_ttb_set(void __iomem *base_address,
73 				u32 ttb_phys_addr);
74 
75 extern hw_status hw_mmu_twl_enable(void __iomem *base_address);
76 
77 extern hw_status hw_mmu_twl_disable(void __iomem *base_address);
78 
79 extern hw_status hw_mmu_tlb_add(void __iomem *base_address,
80 				u32 physical_addr,
81 				u32 virtual_addr,
82 				u32 page_sz,
83 				u32 entry_num,
84 				struct hw_mmu_map_attrs_t *map_attrs,
85 				s8 preserved_bit, s8 valid_bit);
86 
87 /* For PTEs */
88 extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
89 				u32 physical_addr,
90 				u32 virtual_addr,
91 				u32 page_sz,
92 				struct hw_mmu_map_attrs_t *map_attrs);
93 
94 extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
95 				  u32 virtual_addr, u32 page_size);
96 
97 void hw_mmu_tlb_flush_all(void __iomem *base);
98 
hw_mmu_pte_addr_l1(u32 l1_base,u32 va)99 static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
100 {
101 	u32 pte_addr;
102 	u32 va31_to20;
103 
104 	va31_to20 = va >> (20 - 2);	/* Left-shift by 2 here itself */
105 	va31_to20 &= 0xFFFFFFFCUL;
106 	pte_addr = l1_base + va31_to20;
107 
108 	return pte_addr;
109 }
110 
hw_mmu_pte_addr_l2(u32 l2_base,u32 va)111 static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
112 {
113 	u32 pte_addr;
114 
115 	pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
116 
117 	return pte_addr;
118 }
119 
hw_mmu_pte_coarse_l1(u32 pte_val)120 static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
121 {
122 	u32 pte_coarse;
123 
124 	pte_coarse = pte_val & 0xFFFFFC00;
125 
126 	return pte_coarse;
127 }
128 
hw_mmu_pte_size_l1(u32 pte_val)129 static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
130 {
131 	u32 pte_size = 0;
132 
133 	if ((pte_val & 0x3) == 0x1) {
134 		/* Points to L2 PT */
135 		pte_size = HW_MMU_COARSE_PAGE_SIZE;
136 	}
137 
138 	if ((pte_val & 0x3) == 0x2) {
139 		if (pte_val & (1 << 18))
140 			pte_size = HW_PAGE_SIZE16MB;
141 		else
142 			pte_size = HW_PAGE_SIZE1MB;
143 	}
144 
145 	return pte_size;
146 }
147 
hw_mmu_pte_size_l2(u32 pte_val)148 static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
149 {
150 	u32 pte_size = 0;
151 
152 	if (pte_val & 0x2)
153 		pte_size = HW_PAGE_SIZE4KB;
154 	else if (pte_val & 0x1)
155 		pte_size = HW_PAGE_SIZE64KB;
156 
157 	return pte_size;
158 }
159 
160 #endif /* _HW_MMU_H */
161