1 /** @file
2 *
3 * Copyright (c) 2015, Hisilicon Limited. All rights reserved.
4 * Copyright (c) 2015, Linaro Limited. All rights reserved.
5 *
6 * This program and the accompanying materials
7 * are licensed and made available under the terms and conditions of the BSD License
8 * which accompanies this distribution. The full text of the license may be found at
9 * http://opensource.org/licenses/bsd-license.php
10 *
11 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13 *
14 **/
15
16 #include <Uefi.h>
17 #include <Library/BaseLib.h>
18 #include <Library/DebugLib.h>
19 #include <Library/TimerLib.h>
20 #include <Library/CacheMaintenanceLib.h>
21 #include <Library/IoLib.h>
22 #include <Library/MemoryAllocationLib.h>
23 #include <Library/BaseMemoryLib.h>
24 #include <Library/ArmLib.h>
25
26 #include "Smmu.h"
27
28 /* Maximum number of context banks per SMMU */
29 #define SMMU_MAX_CBS 256
30
31 #ifdef CONFIG_MM_OUTER_SHAREABLE
32 #define SH_DOMAIN 2 /* outer shareable */
33 #else
34 #define SH_DOMAIN 3 /* inner shareable */
35 #endif
36
37 #define SMMU_OS_VMID 0
38 #define SMMU_CB_NUMIRPT 8
39 #define SMMU_S1CBT_SIZE 0x10000
40 #define SMMU_S2CBT_SIZE 0x2000
41 #define SMMU_S1CBT_SHIFT 16
42 #define SMMU_S2CBT_SHIFT 12
43
44
45 #define SMMU_CTRL_CR0 0x0
46 #define SMMU_CTRL_ACR 0x8
47 #define SMMU_CFG_S2CTBAR 0xc
48 #define SMMU_IDR0 0x10
49 #define SMMU_IDR1 0x14
50 #define SMMU_IDR2 0x18
51 #define SMMU_HIS_GFAR_LOW 0x20
52 #define SMMU_HIS_GFAR_HIGH 0x24
53 #define SMMU_RINT_GFSR 0x28
54 #define SMMU_RINT_GFSYNR 0x2c
55 #define SMMU_CFG_GFIM 0x30
56 #define SMMU_CFG_CBF 0x34
57 #define SMMU_TLBIALL 0x40
58 #define SMMU_TLBIVMID 0x44
59 #define SMMU_TLBISID 0x48
60 #define SMMU_TLBIVA_LOW 0x4c
61 #define SMMU_TLBIVA_HIGH 0x50
62 #define SMMU_TLBGSYNC 0x54
63 #define SMMU_TLBGSTATUS 0x58
64 #define SMMU_CXTIALL 0x60
65 #define SMMU_CXTIVMID 0x64
66 #define SMMU_CXTISID 0x68
67 #define SMMU_CXTGSYNC 0x6c
68 #define SMMU_CXTGSTATUS 0x70
69 #define SMMU_RINT_CB_FSR(n) (0x100 + ((n) << 2))
70 #define SMMU_RINT_CB_FSYNR(n) (0x120 + ((n) << 2))
71 #define SMMU_HIS_CB_FAR_LOW(n) (0x140 + ((n) << 3))
72 #define SMMU_HIS_CB_FAR_HIGH(n) (0x144 + ((n) << 3))
73 #define SMMU_CTRL_CB_RESUME(n) (0x180 + ((n) << 2))
74 #define SMMU_RINT_CB_FSYNR_MSTID 0x1a0
75
76 #define SMMU_CB_S2CR(n) (0x0 + ((n) << 5))
77 #define SMMU_CB_CBAR(n) (0x4 + ((n) << 5))
78 #define SMMU_CB_S1CTBAR(n) (0x18 + ((n) << 5))
79
80 #define SMMU_S1_MAIR0 0x0
81 #define SMMU_S1_MAIR1 0x4
82 #define SMMU_S1_TTBR0_L 0x8
83 #define SMMU_S1_TTBR0_H 0xc
84 #define SMMU_S1_TTBR1_L 0x10
85 #define SMMU_S1_TTBR1_H 0x14
86 #define SMMU_S1_TTBCR 0x18
87 #define SMMU_S1_SCTLR 0x1c
88
89 #define CFG_CBF_S1_ORGN_WA (1 << 12)
90 #define CFG_CBF_S1_IRGN_WA (1 << 10)
91 #define CFG_CBF_S1_SHCFG (SH_DOMAIN << 8)
92 #define CFG_CBF_S2_ORGN_WA (1 << 4)
93 #define CFG_CBF_S2_IRGN_WA (1 << 2)
94 #define CFG_CBF_S2_SHCFG (SH_DOMAIN << 0)
95
96 /* Configuration registers */
97 #define sCR0_CLIENTPD (1 << 0)
98 #define sCR0_GFRE (1 << 1)
99 #define sCR0_GFIE (1 << 2)
100 #define sCR0_GCFGFRE (1 << 4)
101 #define sCR0_GCFGFIE (1 << 5)
102
103 #define sACR_WC_EN (7 << 0)
104
105 #define ID0_S1TS (1 << 30)
106 #define ID0_S2TS (1 << 29)
107 #define ID0_NTS (1 << 28)
108 #define ID0_PTFS_SHIFT 24
109 #define ID0_PTFS_MASK 0x2
110 #define ID0_PTFS_V8_ONLY 0x2
111 #define ID0_CTTW (1 << 14)
112
113 #define ID2_OAS_SHIFT 8
114 #define ID2_OAS_MASK 0xff
115 #define ID2_IAS_SHIFT 0
116 #define ID2_IAS_MASK 0xff
117
118 #define S2CR_TYPE_SHIFT 16
119 #define S2CR_TYPE_MASK 0x3
120 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
121 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
122 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
123 #define S2CR_SHCFG_NS (3 << 8)
124 #define S2CR_MTCFG (1 << 11)
125 #define S2CR_MEMATTR_OIWB (0xf << 12)
126 #define S2CR_MTSH_WEAKEST (S2CR_SHCFG_NS | \
127 S2CR_MTCFG | S2CR_MEMATTR_OIWB)
128
129 /* Context bank attribute registers */
130 #define CBAR_VMID_SHIFT 0
131 #define CBAR_VMID_MASK 0xff
132 #define CBAR_S1_BPSHCFG_SHIFT 8
133 #define CBAR_S1_BPSHCFG_MASK 3
134 #define CBAR_S1_BPSHCFG_NSH 3
135 #define CBAR_S1_MEMATTR_SHIFT 12
136 #define CBAR_S1_MEMATTR_MASK 0xf
137 #define CBAR_S1_MEMATTR_WB 0xf
138 #define CBAR_TYPE_SHIFT 16
139 #define CBAR_TYPE_MASK 0x3
140 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
141 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
142 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
143 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
144 #define CBAR_IRPTNDX_SHIFT 24
145 #define CBAR_IRPTNDX_MASK 0xff
146
147 #define SMMU_CB_BASE(smmu) ((smmu)->s1cbt)
148 #define SMMU_CB(n) ((n) << 5)
149
150 #define sTLBGSTATUS_GSACTIVE (1 << 0)
151 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
152
153 #define SCTLR_WACFG_WA (2 << 26)
154 #define SCTLR_RACFG_RA (2 << 24)
155 #ifdef CONFIG_P660_2P
156 #define SCTLR_SHCFG (1 << 22)
157 #else
158 #define SCTLR_SHCFG (2 << 22)
159 #endif
160 #define SCTLR_MTCFG (1 << 20)
161 #define SCTLR_MEMATTR_WB (0xf << 16)
162 #define SCTLR_MEMATTR_NC (0x5 << 16)
163 #define SCTLR_MEMATTR_NGNRE (0x1 << 16)
164 #define SCTLR_CACHE_WBRAWA (SCTLR_WACFG_WA | SCTLR_RACFG_RA | \
165 SCTLR_SHCFG | SCTLR_MTCFG | SCTLR_MEMATTR_WB)
166 #define SCTLR_CACHE_NC (SCTLR_SHCFG | \
167 SCTLR_MTCFG | SCTLR_MEMATTR_NC)
168 #define SCTLR_CACHE_NGNRE (SCTLR_SHCFG | \
169 SCTLR_MTCFG | SCTLR_MEMATTR_NGNRE)
170
171 #define SCTLR_CFCFG (1 << 7)
172 #define SCTLR_CFIE (1 << 6)
173 #define SCTLR_CFRE (1 << 5)
174 #define SCTLR_E (1 << 4)
175 #define SCTLR_AFED (1 << 3)
176 #define SCTLR_M (1 << 0)
177 #define SCTLR_EAE_SBOP (SCTLR_AFED)
178
179 #define RESUME_RETRY (0 << 0)
180 #define RESUME_TERMINATE (1 << 0)
181
182 #define TTBCR_TG0_4K (0 << 14)
183 #define TTBCR_TG0_64K (3 << 14)
184
185 #define TTBCR_SH0_SHIFT 12
186 #define TTBCR_SH0_MASK 0x3
187 #define TTBCR_SH_NS 0
188 #define TTBCR_SH_OS 2
189 #define TTBCR_SH_IS 3
190 #define TTBCR_ORGN0_SHIFT 10
191 #define TTBCR_IRGN0_SHIFT 8
192 #define TTBCR_RGN_MASK 0x3
193 #define TTBCR_RGN_NC 0
194 #define TTBCR_RGN_WBWA 1
195 #define TTBCR_RGN_WT 2
196 #define TTBCR_RGN_WB 3
197 #define TTBCR_T1SZ_SHIFT 16
198 #define TTBCR_T0SZ_SHIFT 0
199 #define TTBCR_SZ_MASK 0xf
200
201 #define MAIR_ATTR_SHIFT(n) ((n) << 3)
202 #define MAIR_ATTR_MASK 0xff
203 #define MAIR_ATTR_DEVICE 0x04
204 #define MAIR_ATTR_NC 0x44
205 #define MAIR_ATTR_WBRWA 0xff
206 #define MAIR_ATTR_IDX_NC 0
207 #define MAIR_ATTR_IDX_CACHE 1
208 #define MAIR_ATTR_IDX_DEV 2
209
210 #define FSR_MULTI (1 << 31)
211 #define FSR_EF (1 << 4)
212 #define FSR_PF (1 << 3)
213 #define FSR_AFF (1 << 2)
214 #define FSR_TF (1 << 1)
215 #define FSR_IGN (FSR_AFF)
216 #define FSR_FAULT (FSR_MULTI | FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
217
218 #define FSYNR0_ASID(n) (0xff & ((n) >> 24))
219 #define FSYNR0_VMID(n) (0xff & ((n) >> 16))
220 #define FSYNR0_WNR (1 << 4)
221 #define FSYNR0_SS (1 << 2)
222 #define FSYNR0_CF (1 << 0)
223
224 #define SMMU_FEAT_COHERENT_WALK (1 << 0)
225 #define SMMU_FEAT_STREAM_MATCH (1 << 1)
226 #define SMMU_FEAT_TRANS_S1 (1 << 2)
227 #define SMMU_FEAT_TRANS_S2 (1 << 3)
228 #define SMMU_FEAT_TRANS_NESTED (1 << 4)
229
230 static UINT32 hisi_bypass_vmid = 0xff;
231
writel_relaxed(UINT32 Value,UINTN Base)232 VOID writel_relaxed (UINT32 Value, UINTN Base)
233 {
234 MmioWrite32 (Base, Value);
235 }
236
readl_relaxed(UINTN Base)237 UINT32 readl_relaxed (UINTN Base)
238 {
239 return MmioRead32 (Base);
240 }
241
242 /* Wait for any pending TLB invalidations to complete */
hisi_smmu_tlb_sync(SMMU_DEVICE * smmu)243 static void hisi_smmu_tlb_sync(SMMU_DEVICE *smmu)
244 {
245 int count = 0;
246 UINTN gr0_base = smmu->Base;
247
248 writel_relaxed(0, gr0_base + SMMU_TLBGSYNC);
249 while (readl_relaxed(gr0_base + SMMU_TLBGSTATUS)
250 & sTLBGSTATUS_GSACTIVE) {
251 if (++count == TLB_LOOP_TIMEOUT) {
252 DEBUG ((EFI_D_ERROR, "TLB sync timed out -- SMMU (0x%p) may be deadlocked\n", gr0_base));
253 return;
254 }
255 MicroSecondDelay (1);
256 }
257 }
258
259
260 VOID *
SmmuAllocateTable(UINTN Size,UINTN Alignment)261 SmmuAllocateTable (
262 UINTN Size,
263 UINTN Alignment
264 )
265 {
266 return AllocateAlignedReservedPages (EFI_SIZE_TO_PAGES (Size), Alignment);
267 }
268
269
270 EFI_STATUS
SmmuInit(SMMU_DEVICE * Smmu)271 SmmuInit (
272 SMMU_DEVICE *Smmu
273 )
274 {
275 UINT32 Value;
276 UINTN Base = Smmu->Base;
277 UINTN Index;
278
279 /* Clear Global FSR */
280 Value = MmioRead32 (Base + SMMU_RINT_GFSR);
281 MmioWrite32 (Base + SMMU_RINT_GFSR, Value);
282
283 /* mask all global interrupt */
284 MmioWrite32 (Base + SMMU_CFG_GFIM, 0xFFFFFFFF);
285
286 Value = CFG_CBF_S1_ORGN_WA | CFG_CBF_S1_IRGN_WA | CFG_CBF_S1_SHCFG;
287 Value |= CFG_CBF_S2_ORGN_WA | CFG_CBF_S2_IRGN_WA | CFG_CBF_S2_SHCFG;
288 MmioWrite32 (Base + SMMU_CFG_CBF, Value);
289
290 /* Clear CB_FSR */
291 for (Index = 0; Index < SMMU_CB_NUMIRPT; Index++) {
292 MmioWrite32 (Base + SMMU_RINT_CB_FSR(Index), FSR_FAULT);
293 }
294
295 return EFI_SUCCESS;
296 }
297
298 VOID *
SmmuCreateS2Cbt(VOID)299 SmmuCreateS2Cbt (VOID)
300 {
301 VOID *Table;
302 UINTN Index;
303
304 Table = SmmuAllocateTable (SMMU_S2CBT_SIZE, LShiftU64 (1, SMMU_S2CBT_SHIFT));
305 if (Table == NULL) {
306 DEBUG ((EFI_D_ERROR, "[%a]:[%dL] Allocate table failed!\n", __FUNCTION__, __LINE__));
307 return NULL;
308 }
309 ZeroMem (Table, SMMU_S2CBT_SIZE);
310
311 for (Index = 0; Index < SMMU_MAX_CBS; Index++) {
312 MmioWrite32 ((UINTN)Table + SMMU_CB_S1CTBAR(Index), 0);
313 MmioWrite32 ((UINTN)Table + SMMU_CB_S2CR(Index), S2CR_TYPE_BYPASS);
314 }
315 return Table;
316 }
317
318 VOID *
SmmuCreateS1Cbt(VOID)319 SmmuCreateS1Cbt (VOID)
320 {
321 VOID *Table;
322
323 Table = SmmuAllocateTable (SMMU_S1CBT_SIZE, LShiftU64 (1, SMMU_S1CBT_SHIFT));
324 if (Table == NULL) {
325 DEBUG ((EFI_D_ERROR, "[%a]:[%dL] Allocate table failed!\n", __FUNCTION__, __LINE__));
326 return NULL;
327 }
328 ZeroMem (Table, SMMU_S1CBT_SIZE);
329
330 return Table;
331 }
332
333 EFI_STATUS
SmmuConfigSwitch(SMMU_DEVICE * Smmu)334 SmmuConfigSwitch (
335 SMMU_DEVICE *Smmu
336 )
337 {
338 VOID* S2;
339 VOID* S1;
340 UINT32 reg;
341
342 S2 = SmmuCreateS2Cbt ();
343 if (S2 == NULL) {
344 return EFI_OUT_OF_RESOURCES;
345 }
346 Smmu->S2Cbt = (UINTN) S2;
347
348 S1 = SmmuCreateS1Cbt ();
349 if (S1 == NULL) {
350 return EFI_OUT_OF_RESOURCES;
351 }
352
353 MmioWrite32 (Smmu->S2Cbt + SMMU_CB_S1CTBAR(SMMU_OS_VMID), (UINT32) RShiftU64 ((UINT64)S1, SMMU_S1CBT_SHIFT));
354
355 // Force device for VMID 0 ASID 0
356 MmioWrite32 ((UINTN)S1 + SMMU_CB(0) + SMMU_S1_SCTLR, SCTLR_CACHE_WBRAWA);
357 // Force device for VMID 0 ASID 1
358 MmioWrite32 ((UINTN)S1 + SMMU_CB(1) + SMMU_S1_SCTLR, SCTLR_CACHE_NGNRE);
359
360 /*
361 * Use the weakest attribute, so no impact stage 1 output attribute.
362 */
363 reg = CBAR_TYPE_S1_TRANS_S2_BYPASS |
364 (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
365 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
366 MmioWrite32 (Smmu->S2Cbt + SMMU_CB_CBAR(SMMU_OS_VMID), reg);
367
368 /* Mark S2CR as translation */
369 reg = S2CR_TYPE_TRANS | S2CR_MTSH_WEAKEST;
370 MmioWrite32 (Smmu->S2Cbt + SMMU_CB_S2CR(SMMU_OS_VMID), reg);
371
372 /* Bypass need use another S2CR */
373 reg = S2CR_TYPE_BYPASS;
374 MmioWrite32 (Smmu->S2Cbt + SMMU_CB_S2CR(hisi_bypass_vmid), reg);
375
376 return EFI_SUCCESS;
377 }
378
379 EFI_STATUS
SmmuFlushCbt(SMMU_DEVICE * Smmu)380 SmmuFlushCbt (
381 SMMU_DEVICE *Smmu
382 )
383 {
384 UINTN Index;
385
386 if (Smmu->S2Cbt == 0) {
387 DEBUG ((EFI_D_ERROR, "[%a]:[%dL] S2Cbt is null!\n", __FUNCTION__, __LINE__));
388 return EFI_INVALID_PARAMETER;
389 }
390
391 WriteBackInvalidateDataCacheRange ((VOID *)Smmu->S2Cbt, SMMU_S2CBT_SIZE);
392 for (Index = 0; Index < SMMU_MAX_CBS; Index++) {
393 UINTN S1Ctb = MmioRead32 (Smmu->S2Cbt + SMMU_CB_S1CTBAR(SMMU_OS_VMID));
394 if (S1Ctb) {
395 // TODO: shall we really need to flush 64KB? Or 8KB is enough?
396 WriteBackInvalidateDataCacheRange ((VOID *)LShiftU64 (S1Ctb, SMMU_S1CBT_SHIFT), SMMU_S1CBT_SIZE);
397 }
398 }
399
400 return EFI_SUCCESS;
401 }
402
403 EFI_STATUS
SmmuEnableTable(SMMU_DEVICE * Smmu)404 SmmuEnableTable (
405 SMMU_DEVICE *Smmu
406 )
407 {
408 UINT32 reg;
409 UINTN gr0_base = Smmu->Base;
410
411 (VOID) SmmuFlushCbt (Smmu);
412
413 /* Clear Global FSR */
414 reg = readl_relaxed(gr0_base + SMMU_RINT_GFSR);
415 writel_relaxed(reg, gr0_base + SMMU_RINT_GFSR);
416
417 /* unmask all global interrupt */
418 writel_relaxed(0, gr0_base + SMMU_CFG_GFIM);
419
420 reg = CFG_CBF_S1_ORGN_WA | CFG_CBF_S1_IRGN_WA | CFG_CBF_S1_SHCFG;
421 reg |= CFG_CBF_S2_ORGN_WA | CFG_CBF_S2_IRGN_WA | CFG_CBF_S2_SHCFG;
422 writel_relaxed(reg, gr0_base + SMMU_CFG_CBF);
423
424 reg = (UINT32) RShiftU64 (Smmu->S2Cbt, SMMU_S2CBT_SHIFT);
425 writel_relaxed(reg, gr0_base + SMMU_CFG_S2CTBAR);
426
427 /* Invalidate all TLB, just in case */
428 writel_relaxed(0, gr0_base + SMMU_TLBIALL);
429 hisi_smmu_tlb_sync(Smmu);
430
431 writel_relaxed(sACR_WC_EN, gr0_base + SMMU_CTRL_ACR);
432
433 /* Enable fault reporting */
434 reg = (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
435 reg &= ~sCR0_CLIENTPD;
436
437 writel_relaxed(reg, gr0_base + SMMU_CTRL_CR0);
438 ArmDataSynchronizationBarrier ();
439
440 return EFI_SUCCESS;
441 };
442
443