• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/io-pgtable.h>
9 #include "msm_drv.h"
10 #include "msm_mmu.h"
11 
12 struct msm_iommu {
13 	struct msm_mmu base;
14 	struct iommu_domain *domain;
15 	atomic_t pagetables;
16 };
17 
18 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
19 
20 struct msm_iommu_pagetable {
21 	struct msm_mmu base;
22 	struct msm_mmu *parent;
23 	struct io_pgtable_ops *pgtbl_ops;
24 	phys_addr_t ttbr;
25 	u32 asid;
26 };
to_pagetable(struct msm_mmu * mmu)27 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
28 {
29 	return container_of(mmu, struct msm_iommu_pagetable, base);
30 }
31 
msm_iommu_pagetable_unmap(struct msm_mmu * mmu,u64 iova,size_t size)32 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
33 		size_t size)
34 {
35 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
36 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
37 	size_t unmapped = 0;
38 
39 	/* Unmap the block one page at a time */
40 	while (size) {
41 		unmapped += ops->unmap(ops, iova, 4096, NULL);
42 		iova += 4096;
43 		size -= 4096;
44 	}
45 
46 	iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
47 
48 	return (unmapped == size) ? 0 : -EINVAL;
49 }
50 
msm_iommu_pagetable_map(struct msm_mmu * mmu,u64 iova,struct sg_table * sgt,size_t len,int prot)51 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
52 		struct sg_table *sgt, size_t len, int prot)
53 {
54 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
55 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
56 	struct scatterlist *sg;
57 	size_t mapped = 0;
58 	u64 addr = iova;
59 	unsigned int i;
60 
61 	for_each_sgtable_sg(sgt, sg, i) {
62 		size_t size = sg->length;
63 		phys_addr_t phys = sg_phys(sg);
64 
65 		/* Map the block one page at a time */
66 		while (size) {
67 			if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
68 				msm_iommu_pagetable_unmap(mmu, iova, mapped);
69 				return -EINVAL;
70 			}
71 
72 			phys += 4096;
73 			addr += 4096;
74 			size -= 4096;
75 			mapped += 4096;
76 		}
77 	}
78 
79 	return 0;
80 }
81 
msm_iommu_pagetable_destroy(struct msm_mmu * mmu)82 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
83 {
84 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
85 	struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
86 	struct adreno_smmu_priv *adreno_smmu =
87 		dev_get_drvdata(pagetable->parent->dev);
88 
89 	/*
90 	 * If this is the last attached pagetable for the parent,
91 	 * disable TTBR0 in the arm-smmu driver
92 	 */
93 	if (atomic_dec_return(&iommu->pagetables) == 0)
94 		adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
95 
96 	free_io_pgtable_ops(pagetable->pgtbl_ops);
97 	kfree(pagetable);
98 }
99 
msm_iommu_pagetable_params(struct msm_mmu * mmu,phys_addr_t * ttbr,int * asid)100 int msm_iommu_pagetable_params(struct msm_mmu *mmu,
101 		phys_addr_t *ttbr, int *asid)
102 {
103 	struct msm_iommu_pagetable *pagetable;
104 
105 	if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
106 		return -EINVAL;
107 
108 	pagetable = to_pagetable(mmu);
109 
110 	if (ttbr)
111 		*ttbr = pagetable->ttbr;
112 
113 	if (asid)
114 		*asid = pagetable->asid;
115 
116 	return 0;
117 }
118 
119 static const struct msm_mmu_funcs pagetable_funcs = {
120 		.map = msm_iommu_pagetable_map,
121 		.unmap = msm_iommu_pagetable_unmap,
122 		.destroy = msm_iommu_pagetable_destroy,
123 };
124 
msm_iommu_tlb_flush_all(void * cookie)125 static void msm_iommu_tlb_flush_all(void *cookie)
126 {
127 }
128 
msm_iommu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)129 static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
130 		size_t granule, void *cookie)
131 {
132 }
133 
msm_iommu_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)134 static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
135 		unsigned long iova, size_t granule, void *cookie)
136 {
137 }
138 
139 static const struct iommu_flush_ops null_tlb_ops = {
140 	.tlb_flush_all = msm_iommu_tlb_flush_all,
141 	.tlb_flush_walk = msm_iommu_tlb_flush_walk,
142 	.tlb_flush_leaf = msm_iommu_tlb_flush_walk,
143 	.tlb_add_page = msm_iommu_tlb_add_page,
144 };
145 
msm_iommu_pagetable_create(struct msm_mmu * parent)146 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
147 {
148 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
149 	struct msm_iommu *iommu = to_msm_iommu(parent);
150 	struct msm_iommu_pagetable *pagetable;
151 	const struct io_pgtable_cfg *ttbr1_cfg = NULL;
152 	struct io_pgtable_cfg ttbr0_cfg;
153 	int ret;
154 
155 	/* Get the pagetable configuration from the domain */
156 	if (adreno_smmu->cookie)
157 		ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
158 	if (!ttbr1_cfg)
159 		return ERR_PTR(-ENODEV);
160 
161 	pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
162 	if (!pagetable)
163 		return ERR_PTR(-ENOMEM);
164 
165 	msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
166 		MSM_MMU_IOMMU_PAGETABLE);
167 
168 	/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
169 	ttbr0_cfg = *ttbr1_cfg;
170 
171 	/* The incoming cfg will have the TTBR1 quirk enabled */
172 	ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
173 	ttbr0_cfg.tlb = &null_tlb_ops;
174 
175 	pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
176 		&ttbr0_cfg, iommu->domain);
177 
178 	if (!pagetable->pgtbl_ops) {
179 		kfree(pagetable);
180 		return ERR_PTR(-ENOMEM);
181 	}
182 
183 	/*
184 	 * If this is the first pagetable that we've allocated, send it back to
185 	 * the arm-smmu driver as a trigger to set up TTBR0
186 	 */
187 	if (atomic_inc_return(&iommu->pagetables) == 1) {
188 		ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
189 		if (ret) {
190 			free_io_pgtable_ops(pagetable->pgtbl_ops);
191 			kfree(pagetable);
192 			return ERR_PTR(ret);
193 		}
194 	}
195 
196 	/* Needed later for TLB flush */
197 	pagetable->parent = parent;
198 	pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
199 
200 	/*
201 	 * TODO we would like each set of page tables to have a unique ASID
202 	 * to optimize TLB invalidation.  But iommu_flush_iotlb_all() will
203 	 * end up flushing the ASID used for TTBR1 pagetables, which is not
204 	 * what we want.  So for now just use the same ASID as TTBR1.
205 	 */
206 	pagetable->asid = 0;
207 
208 	return &pagetable->base;
209 }
210 
msm_fault_handler(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * arg)211 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
212 		unsigned long iova, int flags, void *arg)
213 {
214 	struct msm_iommu *iommu = arg;
215 	if (iommu->base.handler)
216 		return iommu->base.handler(iommu->base.arg, iova, flags);
217 	pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
218 	return 0;
219 }
220 
msm_iommu_detach(struct msm_mmu * mmu)221 static void msm_iommu_detach(struct msm_mmu *mmu)
222 {
223 	struct msm_iommu *iommu = to_msm_iommu(mmu);
224 
225 	iommu_detach_device(iommu->domain, mmu->dev);
226 }
227 
msm_iommu_map(struct msm_mmu * mmu,uint64_t iova,struct sg_table * sgt,size_t len,int prot)228 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
229 		struct sg_table *sgt, size_t len, int prot)
230 {
231 	struct msm_iommu *iommu = to_msm_iommu(mmu);
232 	size_t ret;
233 
234 	/* The arm-smmu driver expects the addresses to be sign extended */
235 	if (iova & BIT_ULL(48))
236 		iova |= GENMASK_ULL(63, 49);
237 
238 	ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
239 	WARN_ON(!ret);
240 
241 	return (ret == len) ? 0 : -EINVAL;
242 }
243 
msm_iommu_unmap(struct msm_mmu * mmu,uint64_t iova,size_t len)244 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
245 {
246 	struct msm_iommu *iommu = to_msm_iommu(mmu);
247 
248 	if (iova & BIT_ULL(48))
249 		iova |= GENMASK_ULL(63, 49);
250 
251 	iommu_unmap(iommu->domain, iova, len);
252 
253 	return 0;
254 }
255 
msm_iommu_destroy(struct msm_mmu * mmu)256 static void msm_iommu_destroy(struct msm_mmu *mmu)
257 {
258 	struct msm_iommu *iommu = to_msm_iommu(mmu);
259 	iommu_domain_free(iommu->domain);
260 	kfree(iommu);
261 }
262 
263 static const struct msm_mmu_funcs funcs = {
264 		.detach = msm_iommu_detach,
265 		.map = msm_iommu_map,
266 		.unmap = msm_iommu_unmap,
267 		.destroy = msm_iommu_destroy,
268 };
269 
msm_iommu_new(struct device * dev,struct iommu_domain * domain)270 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
271 {
272 	struct msm_iommu *iommu;
273 	int ret;
274 
275 	if (!domain)
276 		return ERR_PTR(-ENODEV);
277 
278 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
279 	if (!iommu)
280 		return ERR_PTR(-ENOMEM);
281 
282 	iommu->domain = domain;
283 	msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
284 	iommu_set_fault_handler(domain, msm_fault_handler, iommu);
285 
286 	atomic_set(&iommu->pagetables, 0);
287 
288 	ret = iommu_attach_device(iommu->domain, dev);
289 	if (ret) {
290 		kfree(iommu);
291 		return ERR_PTR(ret);
292 	}
293 
294 	return &iommu->base;
295 }
296