• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Miscellaneous Arm SMMU implementation and integration quirks
3 // Copyright (C) 2019 Arm Limited
4 
5 #define pr_fmt(fmt) "arm-smmu: " fmt
6 
7 #include <linux/bitfield.h>
8 #include <linux/of.h>
9 
10 #include "arm-smmu.h"
11 
12 
arm_smmu_gr0_ns(int offset)13 static int arm_smmu_gr0_ns(int offset)
14 {
15 	switch(offset) {
16 	case ARM_SMMU_GR0_sCR0:
17 	case ARM_SMMU_GR0_sACR:
18 	case ARM_SMMU_GR0_sGFSR:
19 	case ARM_SMMU_GR0_sGFSYNR0:
20 	case ARM_SMMU_GR0_sGFSYNR1:
21 	case ARM_SMMU_GR0_sGFSYNR2:
22 		return offset + 0x400;
23 	default:
24 		return offset;
25 	}
26 }
27 
arm_smmu_read_ns(struct arm_smmu_device * smmu,int page,int offset)28 static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
29 			    int offset)
30 {
31 	if (page == ARM_SMMU_GR0)
32 		offset = arm_smmu_gr0_ns(offset);
33 	return readl_relaxed(arm_smmu_page(smmu, page) + offset);
34 }
35 
arm_smmu_write_ns(struct arm_smmu_device * smmu,int page,int offset,u32 val)36 static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
37 			      int offset, u32 val)
38 {
39 	if (page == ARM_SMMU_GR0)
40 		offset = arm_smmu_gr0_ns(offset);
41 	writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
42 }
43 
44 /* Since we don't care for sGFAR, we can do without 64-bit accessors */
45 static const struct arm_smmu_impl calxeda_impl = {
46 	.read_reg = arm_smmu_read_ns,
47 	.write_reg = arm_smmu_write_ns,
48 };
49 
50 
51 struct cavium_smmu {
52 	struct arm_smmu_device smmu;
53 	u32 id_base;
54 };
55 
cavium_cfg_probe(struct arm_smmu_device * smmu)56 static int cavium_cfg_probe(struct arm_smmu_device *smmu)
57 {
58 	static atomic_t context_count = ATOMIC_INIT(0);
59 	struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
60 	/*
61 	 * Cavium CN88xx erratum #27704.
62 	 * Ensure ASID and VMID allocation is unique across all SMMUs in
63 	 * the system.
64 	 */
65 	cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
66 	dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
67 
68 	return 0;
69 }
70 
cavium_init_context(struct arm_smmu_domain * smmu_domain)71 static int cavium_init_context(struct arm_smmu_domain *smmu_domain)
72 {
73 	struct cavium_smmu *cs = container_of(smmu_domain->smmu,
74 					      struct cavium_smmu, smmu);
75 
76 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
77 		smmu_domain->cfg.vmid += cs->id_base;
78 	else
79 		smmu_domain->cfg.asid += cs->id_base;
80 
81 	return 0;
82 }
83 
84 static const struct arm_smmu_impl cavium_impl = {
85 	.cfg_probe = cavium_cfg_probe,
86 	.init_context = cavium_init_context,
87 };
88 
cavium_smmu_impl_init(struct arm_smmu_device * smmu)89 static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
90 {
91 	struct cavium_smmu *cs;
92 
93 	cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
94 	if (!cs)
95 		return ERR_PTR(-ENOMEM);
96 
97 	cs->smmu = *smmu;
98 	cs->smmu.impl = &cavium_impl;
99 
100 	devm_kfree(smmu->dev, smmu);
101 
102 	return &cs->smmu;
103 }
104 
105 
106 #define ARM_MMU500_ACTLR_CPRE		(1 << 1)
107 
108 #define ARM_MMU500_ACR_CACHE_LOCK	(1 << 26)
109 #define ARM_MMU500_ACR_S2CRB_TLBEN	(1 << 10)
110 #define ARM_MMU500_ACR_SMTNMB_TLBEN	(1 << 8)
111 
arm_mmu500_reset(struct arm_smmu_device * smmu)112 int arm_mmu500_reset(struct arm_smmu_device *smmu)
113 {
114 	u32 reg, major;
115 	int i;
116 	/*
117 	 * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
118 	 * writes to the context bank ACTLRs will stick. And we just hope that
119 	 * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
120 	 */
121 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
122 	major = FIELD_GET(ID7_MAJOR, reg);
123 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
124 	if (major >= 2)
125 		reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
126 	/*
127 	 * Allow unmatched Stream IDs to allocate bypass
128 	 * TLB entries for reduced latency.
129 	 */
130 	reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
131 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
132 
133 	/*
134 	 * Disable MMU-500's not-particularly-beneficial next-page
135 	 * prefetcher for the sake of errata #841119 and #826419.
136 	 */
137 	for (i = 0; i < smmu->num_context_banks; ++i) {
138 		reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
139 		reg &= ~ARM_MMU500_ACTLR_CPRE;
140 		arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
141 	}
142 
143 	return 0;
144 }
145 
146 static const struct arm_smmu_impl arm_mmu500_impl = {
147 	.reset = arm_mmu500_reset,
148 };
149 
150 
arm_smmu_impl_init(struct arm_smmu_device * smmu)151 struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
152 {
153 	/*
154 	 * We will inevitably have to combine model-specific implementation
155 	 * quirks with platform-specific integration quirks, but everything
156 	 * we currently support happens to work out as straightforward
157 	 * mutually-exclusive assignments.
158 	 */
159 	switch (smmu->model) {
160 	case ARM_MMU500:
161 		smmu->impl = &arm_mmu500_impl;
162 		break;
163 	case CAVIUM_SMMUV2:
164 		return cavium_smmu_impl_init(smmu);
165 	default:
166 		break;
167 	}
168 
169 	if (of_property_read_bool(smmu->dev->of_node,
170 				  "calxeda,smmu-secure-config-access"))
171 		smmu->impl = &calxeda_impl;
172 
173 	if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500"))
174 		return qcom_smmu_impl_init(smmu);
175 
176 	return smmu;
177 }
178