1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Xen grant DMA-mapping layer - contains special DMA-mapping routines
4 * for providing grant references as DMA addresses to be used by frontends
5 * (e.g. virtio) in Xen guests
6 *
7 * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/dma-map-ops.h>
12 #include <linux/of.h>
13 #include <linux/pfn.h>
14 #include <linux/xarray.h>
15 #include <linux/virtio_anchor.h>
16 #include <linux/virtio.h>
17 #include <xen/xen.h>
18 #include <xen/xen-ops.h>
19 #include <xen/grant_table.h>
20
21 struct xen_grant_dma_data {
22 /* The ID of backend domain */
23 domid_t backend_domid;
24 /* Is device behaving sane? */
25 bool broken;
26 };
27
28 static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
29
30 #define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
31
grant_to_dma(grant_ref_t grant)32 static inline dma_addr_t grant_to_dma(grant_ref_t grant)
33 {
34 return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
35 }
36
dma_to_grant(dma_addr_t dma)37 static inline grant_ref_t dma_to_grant(dma_addr_t dma)
38 {
39 return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
40 }
41
find_xen_grant_dma_data(struct device * dev)42 static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
43 {
44 struct xen_grant_dma_data *data;
45 unsigned long flags;
46
47 xa_lock_irqsave(&xen_grant_dma_devices, flags);
48 data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
49 xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
50
51 return data;
52 }
53
store_xen_grant_dma_data(struct device * dev,struct xen_grant_dma_data * data)54 static int store_xen_grant_dma_data(struct device *dev,
55 struct xen_grant_dma_data *data)
56 {
57 unsigned long flags;
58 int ret;
59
60 xa_lock_irqsave(&xen_grant_dma_devices, flags);
61 ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
62 GFP_ATOMIC));
63 xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
64
65 return ret;
66 }
67
68 /*
69 * DMA ops for Xen frontends (e.g. virtio).
70 *
71 * Used to act as a kind of software IOMMU for Xen guests by using grants as
72 * DMA addresses.
73 * Such a DMA address is formed by using the grant reference as a frame
74 * number and setting the highest address bit (this bit is for the backend
75 * to be able to distinguish it from e.g. a mmio address).
76 */
xen_grant_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)77 static void *xen_grant_dma_alloc(struct device *dev, size_t size,
78 dma_addr_t *dma_handle, gfp_t gfp,
79 unsigned long attrs)
80 {
81 struct xen_grant_dma_data *data;
82 unsigned int i, n_pages = XEN_PFN_UP(size);
83 unsigned long pfn;
84 grant_ref_t grant;
85 void *ret;
86
87 data = find_xen_grant_dma_data(dev);
88 if (!data)
89 return NULL;
90
91 if (unlikely(data->broken))
92 return NULL;
93
94 ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
95 if (!ret)
96 return NULL;
97
98 pfn = virt_to_pfn(ret);
99
100 if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
101 free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
102 return NULL;
103 }
104
105 for (i = 0; i < n_pages; i++) {
106 gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
107 pfn_to_gfn(pfn + i), 0);
108 }
109
110 *dma_handle = grant_to_dma(grant);
111
112 return ret;
113 }
114
xen_grant_dma_free(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)115 static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
116 dma_addr_t dma_handle, unsigned long attrs)
117 {
118 struct xen_grant_dma_data *data;
119 unsigned int i, n_pages = XEN_PFN_UP(size);
120 grant_ref_t grant;
121
122 data = find_xen_grant_dma_data(dev);
123 if (!data)
124 return;
125
126 if (unlikely(data->broken))
127 return;
128
129 grant = dma_to_grant(dma_handle);
130
131 for (i = 0; i < n_pages; i++) {
132 if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
133 dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
134 data->broken = true;
135 return;
136 }
137 }
138
139 gnttab_free_grant_reference_seq(grant, n_pages);
140
141 free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
142 }
143
xen_grant_dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)144 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
145 dma_addr_t *dma_handle,
146 enum dma_data_direction dir,
147 gfp_t gfp)
148 {
149 void *vaddr;
150
151 vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
152 if (!vaddr)
153 return NULL;
154
155 return virt_to_page(vaddr);
156 }
157
xen_grant_dma_free_pages(struct device * dev,size_t size,struct page * vaddr,dma_addr_t dma_handle,enum dma_data_direction dir)158 static void xen_grant_dma_free_pages(struct device *dev, size_t size,
159 struct page *vaddr, dma_addr_t dma_handle,
160 enum dma_data_direction dir)
161 {
162 xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
163 }
164
xen_grant_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)165 static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
166 unsigned long offset, size_t size,
167 enum dma_data_direction dir,
168 unsigned long attrs)
169 {
170 struct xen_grant_dma_data *data;
171 unsigned long dma_offset = xen_offset_in_page(offset),
172 pfn_offset = XEN_PFN_DOWN(offset);
173 unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
174 grant_ref_t grant;
175 dma_addr_t dma_handle;
176
177 if (WARN_ON(dir == DMA_NONE))
178 return DMA_MAPPING_ERROR;
179
180 data = find_xen_grant_dma_data(dev);
181 if (!data)
182 return DMA_MAPPING_ERROR;
183
184 if (unlikely(data->broken))
185 return DMA_MAPPING_ERROR;
186
187 if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
188 return DMA_MAPPING_ERROR;
189
190 for (i = 0; i < n_pages; i++) {
191 gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
192 pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
193 dir == DMA_TO_DEVICE);
194 }
195
196 dma_handle = grant_to_dma(grant) + dma_offset;
197
198 return dma_handle;
199 }
200
xen_grant_dma_unmap_page(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)201 static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
202 size_t size, enum dma_data_direction dir,
203 unsigned long attrs)
204 {
205 struct xen_grant_dma_data *data;
206 unsigned long dma_offset = xen_offset_in_page(dma_handle);
207 unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
208 grant_ref_t grant;
209
210 if (WARN_ON(dir == DMA_NONE))
211 return;
212
213 data = find_xen_grant_dma_data(dev);
214 if (!data)
215 return;
216
217 if (unlikely(data->broken))
218 return;
219
220 grant = dma_to_grant(dma_handle);
221
222 for (i = 0; i < n_pages; i++) {
223 if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
224 dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
225 data->broken = true;
226 return;
227 }
228 }
229
230 gnttab_free_grant_reference_seq(grant, n_pages);
231 }
232
xen_grant_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)233 static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
234 int nents, enum dma_data_direction dir,
235 unsigned long attrs)
236 {
237 struct scatterlist *s;
238 unsigned int i;
239
240 if (WARN_ON(dir == DMA_NONE))
241 return;
242
243 for_each_sg(sg, s, nents, i)
244 xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
245 attrs);
246 }
247
xen_grant_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)248 static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
249 int nents, enum dma_data_direction dir,
250 unsigned long attrs)
251 {
252 struct scatterlist *s;
253 unsigned int i;
254
255 if (WARN_ON(dir == DMA_NONE))
256 return -EINVAL;
257
258 for_each_sg(sg, s, nents, i) {
259 s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
260 s->length, dir, attrs);
261 if (s->dma_address == DMA_MAPPING_ERROR)
262 goto out;
263
264 sg_dma_len(s) = s->length;
265 }
266
267 return nents;
268
269 out:
270 xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
271 sg_dma_len(sg) = 0;
272
273 return -EIO;
274 }
275
xen_grant_dma_supported(struct device * dev,u64 mask)276 static int xen_grant_dma_supported(struct device *dev, u64 mask)
277 {
278 return mask == DMA_BIT_MASK(64);
279 }
280
281 static const struct dma_map_ops xen_grant_dma_ops = {
282 .alloc = xen_grant_dma_alloc,
283 .free = xen_grant_dma_free,
284 .alloc_pages = xen_grant_dma_alloc_pages,
285 .free_pages = xen_grant_dma_free_pages,
286 .mmap = dma_common_mmap,
287 .get_sgtable = dma_common_get_sgtable,
288 .map_page = xen_grant_dma_map_page,
289 .unmap_page = xen_grant_dma_unmap_page,
290 .map_sg = xen_grant_dma_map_sg,
291 .unmap_sg = xen_grant_dma_unmap_sg,
292 .dma_supported = xen_grant_dma_supported,
293 };
294
xen_is_dt_grant_dma_device(struct device * dev)295 static bool xen_is_dt_grant_dma_device(struct device *dev)
296 {
297 struct device_node *iommu_np;
298 bool has_iommu;
299
300 iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
301 has_iommu = iommu_np &&
302 of_device_is_compatible(iommu_np, "xen,grant-dma");
303 of_node_put(iommu_np);
304
305 return has_iommu;
306 }
307
xen_is_grant_dma_device(struct device * dev)308 bool xen_is_grant_dma_device(struct device *dev)
309 {
310 /* XXX Handle only DT devices for now */
311 if (dev->of_node)
312 return xen_is_dt_grant_dma_device(dev);
313
314 return false;
315 }
316
xen_virtio_mem_acc(struct virtio_device * dev)317 bool xen_virtio_mem_acc(struct virtio_device *dev)
318 {
319 if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
320 return true;
321
322 return xen_is_grant_dma_device(dev->dev.parent);
323 }
324
xen_dt_grant_init_backend_domid(struct device * dev,struct xen_grant_dma_data * data)325 static int xen_dt_grant_init_backend_domid(struct device *dev,
326 struct xen_grant_dma_data *data)
327 {
328 struct of_phandle_args iommu_spec;
329
330 if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
331 0, &iommu_spec)) {
332 dev_err(dev, "Cannot parse iommus property\n");
333 return -ESRCH;
334 }
335
336 if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
337 iommu_spec.args_count != 1) {
338 dev_err(dev, "Incompatible IOMMU node\n");
339 of_node_put(iommu_spec.np);
340 return -ESRCH;
341 }
342
343 of_node_put(iommu_spec.np);
344
345 /*
346 * The endpoint ID here means the ID of the domain where the
347 * corresponding backend is running
348 */
349 data->backend_domid = iommu_spec.args[0];
350
351 return 0;
352 }
353
xen_grant_setup_dma_ops(struct device * dev)354 void xen_grant_setup_dma_ops(struct device *dev)
355 {
356 struct xen_grant_dma_data *data;
357
358 data = find_xen_grant_dma_data(dev);
359 if (data) {
360 dev_err(dev, "Xen grant DMA data is already created\n");
361 return;
362 }
363
364 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
365 if (!data)
366 goto err;
367
368 if (dev->of_node) {
369 if (xen_dt_grant_init_backend_domid(dev, data))
370 goto err;
371 } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
372 dev_info(dev, "Using dom0 as backend\n");
373 data->backend_domid = 0;
374 } else {
375 /* XXX ACPI device unsupported for now */
376 goto err;
377 }
378
379 if (store_xen_grant_dma_data(dev, data)) {
380 dev_err(dev, "Cannot store Xen grant DMA data\n");
381 goto err;
382 }
383
384 dev->dma_ops = &xen_grant_dma_ops;
385
386 return;
387
388 err:
389 devm_kfree(dev, data);
390 dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
391 }
392
xen_virtio_restricted_mem_acc(struct virtio_device * dev)393 bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
394 {
395 bool ret = xen_virtio_mem_acc(dev);
396
397 if (ret)
398 xen_grant_setup_dma_ops(dev->dev.parent);
399
400 return ret;
401 }
402
403 MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
404 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
405 MODULE_LICENSE("GPL");
406