• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/of_device.h>
7 #include <linux/qcom_scm.h>
8 
9 #include "arm-smmu.h"
10 
11 struct qcom_smmu {
12 	struct arm_smmu_device smmu;
13 	bool bypass_quirk;
14 	u8 bypass_cbndx;
15 };
16 
to_qcom_smmu(struct arm_smmu_device * smmu)17 static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
18 {
19 	return container_of(smmu, struct qcom_smmu, smmu);
20 }
21 
22 static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
23 	{ .compatible = "qcom,adreno" },
24 	{ .compatible = "qcom,mdp4" },
25 	{ .compatible = "qcom,mdss" },
26 	{ .compatible = "qcom,sc7180-mdss" },
27 	{ .compatible = "qcom,sc7180-mss-pil" },
28 	{ .compatible = "qcom,sdm845-mdss" },
29 	{ .compatible = "qcom,sdm845-mss-pil" },
30 	{ }
31 };
32 
qcom_smmu_cfg_probe(struct arm_smmu_device * smmu)33 static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
34 {
35 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
36 	unsigned int last_s2cr;
37 	u32 reg;
38 	u32 smr;
39 	int i;
40 
41 	/*
42 	 * Some platforms support more than the Arm SMMU architected maximum of
43 	 * 128 stream matching groups. For unknown reasons, the additional
44 	 * groups don't exhibit the same behavior as the architected registers,
45 	 * so limit the groups to 128 until the behavior is fixed for the other
46 	 * groups.
47 	 */
48 	if (smmu->num_mapping_groups > 128) {
49 		dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
50 		smmu->num_mapping_groups = 128;
51 	}
52 
53 	last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
54 
55 	/*
56 	 * With some firmware versions writes to S2CR of type FAULT are
57 	 * ignored, and writing BYPASS will end up written as FAULT in the
58 	 * register. Perform a write to S2CR to detect if this is the case and
59 	 * if so reserve a context bank to emulate bypass streams.
60 	 */
61 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
62 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
63 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
64 	arm_smmu_gr0_write(smmu, last_s2cr, reg);
65 	reg = arm_smmu_gr0_read(smmu, last_s2cr);
66 	if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
67 		qsmmu->bypass_quirk = true;
68 		qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
69 
70 		set_bit(qsmmu->bypass_cbndx, smmu->context_map);
71 
72 		arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
73 
74 		reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
75 		arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
76 	}
77 
78 	for (i = 0; i < smmu->num_mapping_groups; i++) {
79 		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
80 
81 		if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
82 			/* Ignore valid bit for SMR mask extraction. */
83 			smr &= ~ARM_SMMU_SMR_VALID;
84 			smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
85 			smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
86 			smmu->smrs[i].valid = true;
87 
88 			smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
89 			smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
90 			smmu->s2crs[i].cbndx = 0xff;
91 		}
92 	}
93 
94 	return 0;
95 }
96 
qcom_smmu_write_s2cr(struct arm_smmu_device * smmu,int idx)97 static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
98 {
99 	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
100 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
101 	u32 cbndx = s2cr->cbndx;
102 	u32 type = s2cr->type;
103 	u32 reg;
104 
105 	if (qsmmu->bypass_quirk) {
106 		if (type == S2CR_TYPE_BYPASS) {
107 			/*
108 			 * Firmware with quirky S2CR handling will substitute
109 			 * BYPASS writes with FAULT, so point the stream to the
110 			 * reserved context bank and ask for translation on the
111 			 * stream
112 			 */
113 			type = S2CR_TYPE_TRANS;
114 			cbndx = qsmmu->bypass_cbndx;
115 		} else if (type == S2CR_TYPE_FAULT) {
116 			/*
117 			 * Firmware with quirky S2CR handling will ignore FAULT
118 			 * writes, so trick it to write FAULT by asking for a
119 			 * BYPASS.
120 			 */
121 			type = S2CR_TYPE_BYPASS;
122 			cbndx = 0xff;
123 		}
124 	}
125 
126 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
127 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
128 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
129 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
130 }
131 
qcom_smmu_def_domain_type(struct device * dev)132 static int qcom_smmu_def_domain_type(struct device *dev)
133 {
134 	const struct of_device_id *match =
135 		of_match_device(qcom_smmu_client_of_match, dev);
136 
137 	return match ? IOMMU_DOMAIN_IDENTITY : 0;
138 }
139 
qcom_sdm845_smmu500_reset(struct arm_smmu_device * smmu)140 static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
141 {
142 	int ret;
143 
144 	/*
145 	 * To address performance degradation in non-real time clients,
146 	 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
147 	 * such as MTP and db845, whose firmwares implement secure monitor
148 	 * call handlers to turn on/off the wait-for-safe logic.
149 	 */
150 	ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
151 	if (ret)
152 		dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
153 
154 	return ret;
155 }
156 
qcom_smmu500_reset(struct arm_smmu_device * smmu)157 static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
158 {
159 	const struct device_node *np = smmu->dev->of_node;
160 
161 	arm_mmu500_reset(smmu);
162 
163 	if (of_device_is_compatible(np, "qcom,sdm845-smmu-500"))
164 		return qcom_sdm845_smmu500_reset(smmu);
165 
166 	return 0;
167 }
168 
169 static const struct arm_smmu_impl qcom_smmu_impl = {
170 	.cfg_probe = qcom_smmu_cfg_probe,
171 	.def_domain_type = qcom_smmu_def_domain_type,
172 	.reset = qcom_smmu500_reset,
173 	.write_s2cr = qcom_smmu_write_s2cr,
174 };
175 
qcom_smmu_impl_init(struct arm_smmu_device * smmu)176 struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
177 {
178 	struct qcom_smmu *qsmmu;
179 
180 	/* Check to make sure qcom_scm has finished probing */
181 	if (!qcom_scm_is_available())
182 		return ERR_PTR(-EPROBE_DEFER);
183 
184 	qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
185 	if (!qsmmu)
186 		return ERR_PTR(-ENOMEM);
187 
188 	qsmmu->smmu = *smmu;
189 
190 	qsmmu->smmu.impl = &qcom_smmu_impl;
191 	devm_kfree(smmu->dev, smmu);
192 
193 	return &qsmmu->smmu;
194 }
195