• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Blackfin CPLB initialization
3  *
4  * Copyright 2007-2009 Analog Devices Inc.
5  *
6  * Licensed under the GPL-2 or later.
7  */
8 
9 #include <linux/module.h>
10 
11 #include <asm/blackfin.h>
12 #include <asm/cacheflush.h>
13 #include <asm/cplb.h>
14 #include <asm/cplbinit.h>
15 #include <asm/mem_map.h>
16 
17 struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
18 struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
19 
20 int first_switched_icplb PDT_ATTR;
21 int first_switched_dcplb PDT_ATTR;
22 
23 struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
24 struct cplb_boundary icplb_bounds[9] PDT_ATTR;
25 
26 int icplb_nr_bounds PDT_ATTR;
27 int dcplb_nr_bounds PDT_ATTR;
28 
generate_cplb_tables_cpu(unsigned int cpu)29 void __init generate_cplb_tables_cpu(unsigned int cpu)
30 {
31 	int i_d, i_i;
32 	unsigned long addr;
33 	unsigned long cplb_pageflags, cplb_pagesize;
34 
35 	struct cplb_entry *d_tbl = dcplb_tbl[cpu];
36 	struct cplb_entry *i_tbl = icplb_tbl[cpu];
37 
38 	printk(KERN_INFO "NOMPU: setting up cplb tables\n");
39 
40 	i_d = i_i = 0;
41 
42 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
43 	/* Set up the zero page.  */
44 	d_tbl[i_d].addr = 0;
45 	d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
46 	i_tbl[i_i].addr = 0;
47 	i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
48 #endif
49 
50 	/* Cover kernel memory with 4M pages.  */
51 	addr = 0;
52 
53 #ifdef PAGE_SIZE_16MB
54 	cplb_pageflags = PAGE_SIZE_16MB;
55 	cplb_pagesize = SIZE_16M;
56 #else
57 	cplb_pageflags = PAGE_SIZE_4MB;
58 	cplb_pagesize = SIZE_4M;
59 #endif
60 
61 
62 	for (; addr < memory_start; addr += cplb_pagesize) {
63 		d_tbl[i_d].addr = addr;
64 		d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags;
65 		i_tbl[i_i].addr = addr;
66 		i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags;
67 	}
68 
69 #ifdef CONFIG_ROMKERNEL
70 	/* Cover kernel XIP flash area */
71 #ifdef CONFIG_BF60x
72 	addr = CONFIG_ROM_BASE & ~(16 * 1024 * 1024 - 1);
73 	d_tbl[i_d].addr = addr;
74 	d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_16MB;
75 	i_tbl[i_i].addr = addr;
76 	i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_16MB;
77 #else
78 	addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
79 	d_tbl[i_d].addr = addr;
80 	d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
81 	i_tbl[i_i].addr = addr;
82 	i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
83 #endif
84 #endif
85 
86 	/* Cover L1 memory.  One 4M area for code and data each is enough.  */
87 	if (cpu == 0) {
88 		if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
89 			d_tbl[i_d].addr = L1_DATA_A_START;
90 			d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
91 		}
92 		i_tbl[i_i].addr = L1_CODE_START;
93 		i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
94 	}
95 #ifdef CONFIG_SMP
96 	else {
97 		if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
98 			d_tbl[i_d].addr = COREB_L1_DATA_A_START;
99 			d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
100 		}
101 		i_tbl[i_i].addr = COREB_L1_CODE_START;
102 		i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
103 	}
104 #endif
105 	first_switched_dcplb = i_d;
106 	first_switched_icplb = i_i;
107 
108 	BUG_ON(first_switched_dcplb > MAX_CPLBS);
109 	BUG_ON(first_switched_icplb > MAX_CPLBS);
110 
111 	while (i_d < MAX_CPLBS)
112 		d_tbl[i_d++].data = 0;
113 	while (i_i < MAX_CPLBS)
114 		i_tbl[i_i++].data = 0;
115 }
116 
generate_cplb_tables_all(void)117 void __init generate_cplb_tables_all(void)
118 {
119 	unsigned long uncached_end;
120 	int i_d, i_i;
121 
122 	i_d = 0;
123 	/* Normal RAM, including MTD FS.  */
124 #ifdef CONFIG_MTD_UCLINUX
125 	uncached_end = memory_mtd_start + mtd_size;
126 #else
127 	uncached_end = memory_end;
128 #endif
129 	/*
130 	 * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
131 	 * so that we don't have to use 4kB pages and cause CPLB thrashing
132 	 */
133 	if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
134 	    ((_ramend - uncached_end) >= 1 * 1024 * 1024))
135 		dcplb_bounds[i_d].eaddr = uncached_end;
136 	else
137 		dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
138 	dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
139 	/* DMA uncached region.  */
140 	if (DMA_UNCACHED_REGION) {
141 		dcplb_bounds[i_d].eaddr = _ramend;
142 		dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
143 	}
144 	if (_ramend != physical_mem_end) {
145 		/* Reserved memory.  */
146 		dcplb_bounds[i_d].eaddr = physical_mem_end;
147 		dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
148 					    SDRAM_DGENERIC : SDRAM_DNON_CHBL);
149 	}
150 	/* Addressing hole up to the async bank.  */
151 	dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
152 	dcplb_bounds[i_d++].data = 0;
153 	/* ASYNC banks.  */
154 	dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
155 	dcplb_bounds[i_d++].data = SDRAM_EBIU;
156 	/* Addressing hole up to BootROM.  */
157 	dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
158 	dcplb_bounds[i_d++].data = 0;
159 	/* BootROM -- largest one should be less than 1 meg.  */
160 	dcplb_bounds[i_d].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
161 	dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
162 	if (L2_LENGTH) {
163 		/* Addressing hole up to L2 SRAM.  */
164 		dcplb_bounds[i_d].eaddr = L2_START;
165 		dcplb_bounds[i_d++].data = 0;
166 		/* L2 SRAM.  */
167 		dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
168 		dcplb_bounds[i_d++].data = L2_DMEMORY;
169 	}
170 	dcplb_nr_bounds = i_d;
171 	BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
172 
173 	i_i = 0;
174 	/* Normal RAM, including MTD FS.  */
175 	icplb_bounds[i_i].eaddr = uncached_end;
176 	icplb_bounds[i_i++].data = SDRAM_IGENERIC;
177 	if (_ramend != physical_mem_end) {
178 		/* DMA uncached region.  */
179 		if (DMA_UNCACHED_REGION) {
180 			/* Normally this hole is caught by the async below.  */
181 			icplb_bounds[i_i].eaddr = _ramend;
182 			icplb_bounds[i_i++].data = 0;
183 		}
184 		/* Reserved memory.  */
185 		icplb_bounds[i_i].eaddr = physical_mem_end;
186 		icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
187 					    SDRAM_IGENERIC : SDRAM_INON_CHBL);
188 	}
189 	/* Addressing hole up to the async bank.  */
190 	icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
191 	icplb_bounds[i_i++].data = 0;
192 	/* ASYNC banks.  */
193 	icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
194 	icplb_bounds[i_i++].data = SDRAM_EBIU;
195 	/* Addressing hole up to BootROM.  */
196 	icplb_bounds[i_i].eaddr = BOOT_ROM_START;
197 	icplb_bounds[i_i++].data = 0;
198 	/* BootROM -- largest one should be less than 1 meg.  */
199 	icplb_bounds[i_i].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
200 	icplb_bounds[i_i++].data = SDRAM_IGENERIC;
201 
202 	if (L2_LENGTH) {
203 		/* Addressing hole up to L2 SRAM.  */
204 		icplb_bounds[i_i].eaddr = L2_START;
205 		icplb_bounds[i_i++].data = 0;
206 		/* L2 SRAM.  */
207 		icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
208 		icplb_bounds[i_i++].data = L2_IMEMORY;
209 	}
210 	icplb_nr_bounds = i_i;
211 	BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
212 }
213