1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2012-2014 Panasonic Corporation
4 * Copyright (C) 2015-2016 Socionext Inc.
5 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
6 */
7
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <asm/armv7.h>
13 #include <asm/processor.h>
14
15 #include "cache-uniphier.h"
16
17 /* control registers */
18 #define UNIPHIER_SSCC 0x500c0000 /* Control Register */
19 #define UNIPHIER_SSCC_BST (0x1 << 20) /* UCWG burst read */
20 #define UNIPHIER_SSCC_ACT (0x1 << 19) /* Inst-Data separate */
21 #define UNIPHIER_SSCC_WTG (0x1 << 18) /* WT gathering on */
22 #define UNIPHIER_SSCC_PRD (0x1 << 17) /* enable pre-fetch */
23 #define UNIPHIER_SSCC_ON (0x1 << 0) /* enable cache */
24 #define UNIPHIER_SSCLPDAWCR 0x500c0030 /* Unified/Data Active Way Control */
25 #define UNIPHIER_SSCLPIAWCR 0x500c0034 /* Instruction Active Way Control */
26
27 /* revision registers */
28 #define UNIPHIER_SSCID 0x503c0100 /* ID Register */
29
30 /* operation registers */
31 #define UNIPHIER_SSCOPE 0x506c0244 /* Cache Operation Primitive Entry */
32 #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
33 #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
34 #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
35 #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
36 #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
37 #define UNIPHIER_SSCOQM 0x506c0248
38 #define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
39 #define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
40 #define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
41 #define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
42 #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
43 #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
44 #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
45 #define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
46 #define UNIPHIER_SSCOQM_CE (0x1 << 15) /* notify completion */
47 #define UNIPHIER_SSCOQM_CW (0x1 << 14)
48 #define UNIPHIER_SSCOQM_CM_MASK (0x7)
49 #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
50 #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
51 #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
52 #define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
53 #define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
54 #define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
55 #define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
56 #define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
57 #define UNIPHIER_SSCOQAD 0x506c024c /* Cache Operation Queue Address */
58 #define UNIPHIER_SSCOQSZ 0x506c0250 /* Cache Operation Queue Size */
59 #define UNIPHIER_SSCOQMASK 0x506c0254 /* Cache Operation Queue Address Mask */
60 #define UNIPHIER_SSCOQWN 0x506c0258 /* Cache Operation Queue Way Number */
61 #define UNIPHIER_SSCOPPQSEF 0x506c025c /* Cache Operation Queue Set Complete */
62 #define UNIPHIER_SSCOPPQSEF_FE (0x1 << 1)
63 #define UNIPHIER_SSCOPPQSEF_OE (0x1 << 0)
64 #define UNIPHIER_SSCOLPQS 0x506c0260 /* Cache Operation Queue Status */
65 #define UNIPHIER_SSCOLPQS_EF (0x1 << 2)
66 #define UNIPHIER_SSCOLPQS_EST (0x1 << 1)
67 #define UNIPHIER_SSCOLPQS_QST (0x1 << 0)
68
69 #define UNIPHIER_SSC_LINE_SIZE 128
70 #define UNIPHIER_SSC_RANGE_OP_MAX_SIZE (0x00400000 - (UNIPHIER_SSC_LINE_SIZE))
71
72 #define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
73 ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
74 #define UNIPHIER_SSCOQWM_IS_NEEDED(op) \
75 (((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_WAY) || \
76 ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY))
77
78 /* uniphier_cache_sync - perform a sync point for a particular cache level */
uniphier_cache_sync(void)79 static void uniphier_cache_sync(void)
80 {
81 /* drain internal buffers */
82 writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
83 /* need a read back to confirm */
84 readl(UNIPHIER_SSCOPE);
85 }
86
87 /**
88 * uniphier_cache_maint_common - run a queue operation
89 *
90 * @start: start address of range operation (don't care for "all" operation)
91 * @size: data size of range operation (don't care for "all" operation)
92 * @ways: target ways (don't care for operations other than pre-fetch, touch
93 * @operation: flags to specify the desired cache operation
94 */
uniphier_cache_maint_common(u32 start,u32 size,u32 ways,u32 operation)95 static void uniphier_cache_maint_common(u32 start, u32 size, u32 ways,
96 u32 operation)
97 {
98 /* clear the complete notification flag */
99 writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
100
101 do {
102 /* set cache operation */
103 writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
104
105 /* set address range if needed */
106 if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
107 writel(start, UNIPHIER_SSCOQAD);
108 writel(size, UNIPHIER_SSCOQSZ);
109 }
110
111 /* set target ways if needed */
112 if (unlikely(UNIPHIER_SSCOQWM_IS_NEEDED(operation)))
113 writel(ways, UNIPHIER_SSCOQWN);
114 } while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
115 (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
116
117 /* wait until the operation is completed */
118 while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
119 cpu_relax();
120 }
121
uniphier_cache_maint_all(u32 operation)122 static void uniphier_cache_maint_all(u32 operation)
123 {
124 uniphier_cache_maint_common(0, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
125
126 uniphier_cache_sync();
127 }
128
uniphier_cache_maint_range(u32 start,u32 end,u32 ways,u32 operation)129 static void uniphier_cache_maint_range(u32 start, u32 end, u32 ways,
130 u32 operation)
131 {
132 u32 size;
133
134 /*
135 * If the start address is not aligned,
136 * perform a cache operation for the first cache-line
137 */
138 start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
139
140 size = end - start;
141
142 if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
143 /* this means cache operation for all range */
144 uniphier_cache_maint_all(operation);
145 return;
146 }
147
148 /*
149 * If the end address is not aligned,
150 * perform a cache operation for the last cache-line
151 */
152 size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
153
154 while (size) {
155 u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
156
157 uniphier_cache_maint_common(start, chunk_size, ways,
158 UNIPHIER_SSCOQM_S_RANGE | operation);
159
160 start += chunk_size;
161 size -= chunk_size;
162 }
163
164 uniphier_cache_sync();
165 }
166
uniphier_cache_prefetch_range(u32 start,u32 end,u32 ways)167 void uniphier_cache_prefetch_range(u32 start, u32 end, u32 ways)
168 {
169 uniphier_cache_maint_range(start, end, ways,
170 UNIPHIER_SSCOQM_TID_WAY |
171 UNIPHIER_SSCOQM_CM_PREFETCH);
172 }
173
uniphier_cache_touch_range(u32 start,u32 end,u32 ways)174 void uniphier_cache_touch_range(u32 start, u32 end, u32 ways)
175 {
176 uniphier_cache_maint_range(start, end, ways,
177 UNIPHIER_SSCOQM_TID_WAY |
178 UNIPHIER_SSCOQM_CM_TOUCH);
179 }
180
uniphier_cache_touch_zero_range(u32 start,u32 end,u32 ways)181 void uniphier_cache_touch_zero_range(u32 start, u32 end, u32 ways)
182 {
183 uniphier_cache_maint_range(start, end, ways,
184 UNIPHIER_SSCOQM_TID_WAY |
185 UNIPHIER_SSCOQM_CM_TOUCH_ZERO);
186 }
187
uniphier_cache_inv_way(u32 ways)188 void uniphier_cache_inv_way(u32 ways)
189 {
190 uniphier_cache_maint_common(0, 0, ways,
191 UNIPHIER_SSCOQM_S_WAY |
192 UNIPHIER_SSCOQM_CM_INV);
193 }
194
uniphier_cache_set_active_ways(int cpu,u32 active_ways)195 void uniphier_cache_set_active_ways(int cpu, u32 active_ways)
196 {
197 void __iomem *base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
198
199 switch (readl(UNIPHIER_SSCID)) { /* revision */
200 case 0x12: /* LD4 */
201 case 0x16: /* sld8 */
202 base = (void __iomem *)UNIPHIER_SSCC + 0x840;
203 break;
204 default:
205 base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
206 break;
207 }
208
209 writel(active_ways, base + 4 * cpu);
210 }
211
uniphier_cache_endisable(int enable)212 static void uniphier_cache_endisable(int enable)
213 {
214 u32 tmp;
215
216 tmp = readl(UNIPHIER_SSCC);
217 if (enable)
218 tmp |= UNIPHIER_SSCC_ON;
219 else
220 tmp &= ~UNIPHIER_SSCC_ON;
221 writel(tmp, UNIPHIER_SSCC);
222 }
223
uniphier_cache_enable(void)224 void uniphier_cache_enable(void)
225 {
226 uniphier_cache_endisable(1);
227 }
228
uniphier_cache_disable(void)229 void uniphier_cache_disable(void)
230 {
231 uniphier_cache_endisable(0);
232 }
233
234 #ifdef CONFIG_CACHE_UNIPHIER
v7_outer_cache_flush_all(void)235 void v7_outer_cache_flush_all(void)
236 {
237 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
238 }
239
v7_outer_cache_inval_all(void)240 void v7_outer_cache_inval_all(void)
241 {
242 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
243 }
244
v7_outer_cache_flush_range(u32 start,u32 end)245 void v7_outer_cache_flush_range(u32 start, u32 end)
246 {
247 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_FLUSH);
248 }
249
v7_outer_cache_inval_range(u32 start,u32 end)250 void v7_outer_cache_inval_range(u32 start, u32 end)
251 {
252 if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
253 start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
254 uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE, 0,
255 UNIPHIER_SSCOQM_CM_FLUSH);
256 start += UNIPHIER_SSC_LINE_SIZE;
257 }
258
259 if (start >= end) {
260 uniphier_cache_sync();
261 return;
262 }
263
264 if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
265 end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
266 uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE, 0,
267 UNIPHIER_SSCOQM_CM_FLUSH);
268 }
269
270 if (start >= end) {
271 uniphier_cache_sync();
272 return;
273 }
274
275 uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_INV);
276 }
277
v7_outer_cache_enable(void)278 void v7_outer_cache_enable(void)
279 {
280 uniphier_cache_set_active_ways(0, U32_MAX); /* activate all ways */
281 uniphier_cache_enable();
282 }
283
v7_outer_cache_disable(void)284 void v7_outer_cache_disable(void)
285 {
286 uniphier_cache_disable();
287 }
288 #endif
289
enable_caches(void)290 void enable_caches(void)
291 {
292 dcache_enable();
293 }
294