• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/io-pgtable.h>
9 #include "msm_drv.h"
10 #include "msm_mmu.h"
11 
12 struct msm_iommu {
13 	struct msm_mmu base;
14 	struct iommu_domain *domain;
15 	atomic_t pagetables;
16 };
17 
18 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
19 
20 struct msm_iommu_pagetable {
21 	struct msm_mmu base;
22 	struct msm_mmu *parent;
23 	struct io_pgtable_ops *pgtbl_ops;
24 	phys_addr_t ttbr;
25 	u32 asid;
26 };
to_pagetable(struct msm_mmu * mmu)27 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
28 {
29 	return container_of(mmu, struct msm_iommu_pagetable, base);
30 }
31 
msm_iommu_pagetable_unmap(struct msm_mmu * mmu,u64 iova,size_t size)32 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
33 		size_t size)
34 {
35 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
36 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
37 	size_t unmapped = 0;
38 
39 	/* Unmap the block one page at a time */
40 	while (size) {
41 		unmapped += ops->unmap(ops, iova, 4096, NULL);
42 		iova += 4096;
43 		size -= 4096;
44 	}
45 
46 	iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
47 
48 	return (unmapped == size) ? 0 : -EINVAL;
49 }
50 
msm_iommu_pagetable_map(struct msm_mmu * mmu,u64 iova,struct sg_table * sgt,size_t len,int prot)51 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
52 		struct sg_table *sgt, size_t len, int prot)
53 {
54 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
55 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
56 	struct scatterlist *sg;
57 	size_t mapped = 0;
58 	u64 addr = iova;
59 	unsigned int i;
60 
61 	for_each_sgtable_sg(sgt, sg, i) {
62 		size_t size = sg->length;
63 		phys_addr_t phys = sg_phys(sg);
64 
65 		/* Map the block one page at a time */
66 		while (size) {
67 			if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
68 				msm_iommu_pagetable_unmap(mmu, iova, mapped);
69 				return -EINVAL;
70 			}
71 
72 			phys += 4096;
73 			addr += 4096;
74 			size -= 4096;
75 			mapped += 4096;
76 		}
77 	}
78 
79 	return 0;
80 }
81 
msm_iommu_pagetable_destroy(struct msm_mmu * mmu)82 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
83 {
84 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
85 	struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
86 	struct adreno_smmu_priv *adreno_smmu =
87 		dev_get_drvdata(pagetable->parent->dev);
88 
89 	/*
90 	 * If this is the last attached pagetable for the parent,
91 	 * disable TTBR0 in the arm-smmu driver
92 	 */
93 	if (atomic_dec_return(&iommu->pagetables) == 0)
94 		adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
95 
96 	free_io_pgtable_ops(pagetable->pgtbl_ops);
97 	kfree(pagetable);
98 }
99 
msm_iommu_pagetable_params(struct msm_mmu * mmu,phys_addr_t * ttbr,int * asid)100 int msm_iommu_pagetable_params(struct msm_mmu *mmu,
101 		phys_addr_t *ttbr, int *asid)
102 {
103 	struct msm_iommu_pagetable *pagetable;
104 
105 	if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
106 		return -EINVAL;
107 
108 	pagetable = to_pagetable(mmu);
109 
110 	if (ttbr)
111 		*ttbr = pagetable->ttbr;
112 
113 	if (asid)
114 		*asid = pagetable->asid;
115 
116 	return 0;
117 }
118 
119 static const struct msm_mmu_funcs pagetable_funcs = {
120 		.map = msm_iommu_pagetable_map,
121 		.unmap = msm_iommu_pagetable_unmap,
122 		.destroy = msm_iommu_pagetable_destroy,
123 };
124 
msm_iommu_tlb_flush_all(void * cookie)125 static void msm_iommu_tlb_flush_all(void *cookie)
126 {
127 }
128 
msm_iommu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)129 static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
130 		size_t granule, void *cookie)
131 {
132 }
133 
msm_iommu_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)134 static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
135 		unsigned long iova, size_t granule, void *cookie)
136 {
137 }
138 
139 static const struct iommu_flush_ops null_tlb_ops = {
140 	.tlb_flush_all = msm_iommu_tlb_flush_all,
141 	.tlb_flush_walk = msm_iommu_tlb_flush_walk,
142 	.tlb_flush_leaf = msm_iommu_tlb_flush_walk,
143 	.tlb_add_page = msm_iommu_tlb_add_page,
144 };
145 
msm_iommu_pagetable_create(struct msm_mmu * parent)146 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
147 {
148 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
149 	struct msm_iommu *iommu = to_msm_iommu(parent);
150 	struct msm_iommu_pagetable *pagetable;
151 	const struct io_pgtable_cfg *ttbr1_cfg = NULL;
152 	struct io_pgtable_cfg ttbr0_cfg;
153 	int ret;
154 
155 	/* Get the pagetable configuration from the domain */
156 	if (adreno_smmu->cookie)
157 		ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
158 
159 	/*
160 	 * If you hit this WARN_ONCE() you are probably missing an entry in
161 	 * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
162 	 */
163 	if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
164 		return ERR_PTR(-ENODEV);
165 
166 	pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
167 	if (!pagetable)
168 		return ERR_PTR(-ENOMEM);
169 
170 	msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
171 		MSM_MMU_IOMMU_PAGETABLE);
172 
173 	/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
174 	ttbr0_cfg = *ttbr1_cfg;
175 
176 	/* The incoming cfg will have the TTBR1 quirk enabled */
177 	ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
178 	ttbr0_cfg.tlb = &null_tlb_ops;
179 
180 	pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
181 		&ttbr0_cfg, iommu->domain);
182 
183 	if (!pagetable->pgtbl_ops) {
184 		kfree(pagetable);
185 		return ERR_PTR(-ENOMEM);
186 	}
187 
188 	/*
189 	 * If this is the first pagetable that we've allocated, send it back to
190 	 * the arm-smmu driver as a trigger to set up TTBR0
191 	 */
192 	if (atomic_inc_return(&iommu->pagetables) == 1) {
193 		ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
194 		if (ret) {
195 			free_io_pgtable_ops(pagetable->pgtbl_ops);
196 			kfree(pagetable);
197 			return ERR_PTR(ret);
198 		}
199 	}
200 
201 	/* Needed later for TLB flush */
202 	pagetable->parent = parent;
203 	pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
204 
205 	/*
206 	 * TODO we would like each set of page tables to have a unique ASID
207 	 * to optimize TLB invalidation.  But iommu_flush_iotlb_all() will
208 	 * end up flushing the ASID used for TTBR1 pagetables, which is not
209 	 * what we want.  So for now just use the same ASID as TTBR1.
210 	 */
211 	pagetable->asid = 0;
212 
213 	return &pagetable->base;
214 }
215 
msm_fault_handler(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * arg)216 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
217 		unsigned long iova, int flags, void *arg)
218 {
219 	struct msm_iommu *iommu = arg;
220 	if (iommu->base.handler)
221 		return iommu->base.handler(iommu->base.arg, iova, flags);
222 	pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
223 	return 0;
224 }
225 
msm_iommu_detach(struct msm_mmu * mmu)226 static void msm_iommu_detach(struct msm_mmu *mmu)
227 {
228 	struct msm_iommu *iommu = to_msm_iommu(mmu);
229 
230 	iommu_detach_device(iommu->domain, mmu->dev);
231 }
232 
msm_iommu_map(struct msm_mmu * mmu,uint64_t iova,struct sg_table * sgt,size_t len,int prot)233 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
234 		struct sg_table *sgt, size_t len, int prot)
235 {
236 	struct msm_iommu *iommu = to_msm_iommu(mmu);
237 	size_t ret;
238 
239 	/* The arm-smmu driver expects the addresses to be sign extended */
240 	if (iova & BIT_ULL(48))
241 		iova |= GENMASK_ULL(63, 49);
242 
243 	ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
244 	WARN_ON(!ret);
245 
246 	return (ret == len) ? 0 : -EINVAL;
247 }
248 
msm_iommu_unmap(struct msm_mmu * mmu,uint64_t iova,size_t len)249 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
250 {
251 	struct msm_iommu *iommu = to_msm_iommu(mmu);
252 
253 	if (iova & BIT_ULL(48))
254 		iova |= GENMASK_ULL(63, 49);
255 
256 	iommu_unmap(iommu->domain, iova, len);
257 
258 	return 0;
259 }
260 
msm_iommu_destroy(struct msm_mmu * mmu)261 static void msm_iommu_destroy(struct msm_mmu *mmu)
262 {
263 	struct msm_iommu *iommu = to_msm_iommu(mmu);
264 	iommu_domain_free(iommu->domain);
265 	kfree(iommu);
266 }
267 
268 static const struct msm_mmu_funcs funcs = {
269 		.detach = msm_iommu_detach,
270 		.map = msm_iommu_map,
271 		.unmap = msm_iommu_unmap,
272 		.destroy = msm_iommu_destroy,
273 };
274 
msm_iommu_new(struct device * dev,struct iommu_domain * domain)275 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
276 {
277 	struct msm_iommu *iommu;
278 	int ret;
279 
280 	if (!domain)
281 		return ERR_PTR(-ENODEV);
282 
283 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
284 	if (!iommu)
285 		return ERR_PTR(-ENOMEM);
286 
287 	iommu->domain = domain;
288 	msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
289 	iommu_set_fault_handler(domain, msm_fault_handler, iommu);
290 
291 	atomic_set(&iommu->pagetables, 0);
292 
293 	ret = iommu_attach_device(iommu->domain, dev);
294 	if (ret) {
295 		kfree(iommu);
296 		return ERR_PTR(ret);
297 	}
298 
299 	return &iommu->base;
300 }
301