• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * OMAP4 specific common source file.
3  *
4  * Copyright (C) 2010 Texas Instruments, Inc.
5  * Author:
6  *	Santosh Shilimkar <santosh.shilimkar@ti.com>
7  *
8  *
9  * This program is free software,you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/io.h>
17 #include <linux/platform_device.h>
18 #include <linux/memblock.h>
19 
20 #include <asm/hardware/gic.h>
21 #include <asm/hardware/cache-l2x0.h>
22 #include <asm/mach/map.h>
23 #include <asm/memblock.h>
24 
25 #include <plat/irqs.h>
26 #include <plat/sram.h>
27 #include <plat/omap-secure.h>
28 
29 #include <mach/hardware.h>
30 #include <mach/omap-wakeupgen.h>
31 
32 #include "common.h"
33 #include "omap4-sar-layout.h"
34 #include <linux/export.h>
35 
36 #ifdef CONFIG_CACHE_L2X0
37 static void __iomem *l2cache_base;
38 #endif
39 
40 static void __iomem *sar_ram_base;
41 
42 #ifdef CONFIG_OMAP4_ERRATA_I688
43 /* Used to implement memory barrier on DRAM path */
44 #define OMAP4_DRAM_BARRIER_VA			0xfe600000
45 
46 void __iomem *dram_sync, *sram_sync;
47 
48 static phys_addr_t paddr;
49 static u32 size;
50 
omap_bus_sync(void)51 void omap_bus_sync(void)
52 {
53 	if (dram_sync && sram_sync) {
54 		writel_relaxed(readl_relaxed(dram_sync), dram_sync);
55 		writel_relaxed(readl_relaxed(sram_sync), sram_sync);
56 		isb();
57 	}
58 }
59 EXPORT_SYMBOL(omap_bus_sync);
60 
61 /* Steal one page physical memory for barrier implementation */
omap_barrier_reserve_memblock(void)62 int __init omap_barrier_reserve_memblock(void)
63 {
64 
65 	size = ALIGN(PAGE_SIZE, SZ_1M);
66 	paddr = arm_memblock_steal(size, SZ_1M);
67 
68 	return 0;
69 }
70 
omap_barriers_init(void)71 void __init omap_barriers_init(void)
72 {
73 	struct map_desc dram_io_desc[1];
74 
75 	dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
76 	dram_io_desc[0].pfn = __phys_to_pfn(paddr);
77 	dram_io_desc[0].length = size;
78 	dram_io_desc[0].type = MT_MEMORY_SO;
79 	iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
80 	dram_sync = (void __iomem *) dram_io_desc[0].virtual;
81 	sram_sync = (void __iomem *) OMAP4_SRAM_VA;
82 
83 	pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
84 		(long long) paddr, dram_io_desc[0].virtual);
85 
86 }
87 #else
omap_barriers_init(void)88 void __init omap_barriers_init(void)
89 {}
90 #endif
91 
gic_init_irq(void)92 void __init gic_init_irq(void)
93 {
94 	void __iomem *omap_irq_base;
95 	void __iomem *gic_dist_base_addr;
96 
97 	/* Static mapping, never released */
98 	gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
99 	BUG_ON(!gic_dist_base_addr);
100 
101 	/* Static mapping, never released */
102 	omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
103 	BUG_ON(!omap_irq_base);
104 
105 	omap_wakeupgen_init();
106 
107 	gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
108 }
109 
110 #ifdef CONFIG_CACHE_L2X0
111 
omap4_get_l2cache_base(void)112 void __iomem *omap4_get_l2cache_base(void)
113 {
114 	return l2cache_base;
115 }
116 
omap4_l2x0_disable(void)117 static void omap4_l2x0_disable(void)
118 {
119 	/* Disable PL310 L2 Cache controller */
120 	omap_smc1(0x102, 0x0);
121 }
122 
omap4_l2x0_set_debug(unsigned long val)123 static void omap4_l2x0_set_debug(unsigned long val)
124 {
125 	/* Program PL310 L2 Cache controller debug register */
126 	omap_smc1(0x100, val);
127 }
128 
omap_l2_cache_init(void)129 static int __init omap_l2_cache_init(void)
130 {
131 	u32 aux_ctrl = 0;
132 
133 	/*
134 	 * To avoid code running on other OMAPs in
135 	 * multi-omap builds
136 	 */
137 	if (!cpu_is_omap44xx())
138 		return -ENODEV;
139 
140 	/* Static mapping, never released */
141 	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
142 	if (WARN_ON(!l2cache_base))
143 		return -ENOMEM;
144 
145 	/*
146 	 * 16-way associativity, parity disabled
147 	 * Way size - 32KB (es1.0)
148 	 * Way size - 64KB (es2.0 +)
149 	 */
150 	aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
151 			(0x1 << 25) |
152 			(0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
153 			(0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
154 
155 	if (omap_rev() == OMAP4430_REV_ES1_0) {
156 		aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
157 	} else {
158 		aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
159 			(1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
160 			(1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
161 			(1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
162 			(1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
163 	}
164 	if (omap_rev() != OMAP4430_REV_ES1_0)
165 		omap_smc1(0x109, aux_ctrl);
166 
167 	/* Enable PL310 L2 Cache controller */
168 	omap_smc1(0x102, 0x1);
169 
170 	l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
171 
172 	/*
173 	 * Override default outer_cache.disable with a OMAP4
174 	 * specific one
175 	*/
176 	outer_cache.disable = omap4_l2x0_disable;
177 	outer_cache.set_debug = omap4_l2x0_set_debug;
178 
179 	return 0;
180 }
181 early_initcall(omap_l2_cache_init);
182 #endif
183 
omap4_get_sar_ram_base(void)184 void __iomem *omap4_get_sar_ram_base(void)
185 {
186 	return sar_ram_base;
187 }
188 
189 /*
190  * SAR RAM used to save and restore the HW
191  * context in low power modes
192  */
omap4_sar_ram_init(void)193 static int __init omap4_sar_ram_init(void)
194 {
195 	/*
196 	 * To avoid code running on other OMAPs in
197 	 * multi-omap builds
198 	 */
199 	if (!cpu_is_omap44xx())
200 		return -ENOMEM;
201 
202 	/* Static mapping, never released */
203 	sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K);
204 	if (WARN_ON(!sar_ram_base))
205 		return -ENOMEM;
206 
207 	return 0;
208 }
209 early_initcall(omap4_sar_ram_init);
210