1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 */
5 #include <linux/iommu.h>
6 #include <uapi/linux/iommufd.h>
7
8 #include "../iommu-priv.h"
9 #include "iommufd_private.h"
10
__iommufd_hwpt_destroy(struct iommufd_hw_pagetable * hwpt)11 static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt)
12 {
13 if (hwpt->domain)
14 iommu_domain_free(hwpt->domain);
15
16 if (hwpt->fault)
17 refcount_dec(&hwpt->fault->obj.users);
18 }
19
iommufd_hwpt_paging_destroy(struct iommufd_object * obj)20 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj)
21 {
22 struct iommufd_hwpt_paging *hwpt_paging =
23 container_of(obj, struct iommufd_hwpt_paging, common.obj);
24
25 if (!list_empty(&hwpt_paging->hwpt_item)) {
26 mutex_lock(&hwpt_paging->ioas->mutex);
27 list_del(&hwpt_paging->hwpt_item);
28 mutex_unlock(&hwpt_paging->ioas->mutex);
29
30 iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
31 hwpt_paging->common.domain);
32 }
33
34 __iommufd_hwpt_destroy(&hwpt_paging->common);
35 refcount_dec(&hwpt_paging->ioas->obj.users);
36 }
37
iommufd_hwpt_paging_abort(struct iommufd_object * obj)38 void iommufd_hwpt_paging_abort(struct iommufd_object *obj)
39 {
40 struct iommufd_hwpt_paging *hwpt_paging =
41 container_of(obj, struct iommufd_hwpt_paging, common.obj);
42
43 /* The ioas->mutex must be held until finalize is called. */
44 lockdep_assert_held(&hwpt_paging->ioas->mutex);
45
46 if (!list_empty(&hwpt_paging->hwpt_item)) {
47 list_del_init(&hwpt_paging->hwpt_item);
48 iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
49 hwpt_paging->common.domain);
50 }
51 iommufd_hwpt_paging_destroy(obj);
52 }
53
iommufd_hwpt_nested_destroy(struct iommufd_object * obj)54 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
55 {
56 struct iommufd_hwpt_nested *hwpt_nested =
57 container_of(obj, struct iommufd_hwpt_nested, common.obj);
58
59 __iommufd_hwpt_destroy(&hwpt_nested->common);
60 refcount_dec(&hwpt_nested->parent->common.obj.users);
61 }
62
iommufd_hwpt_nested_abort(struct iommufd_object * obj)63 void iommufd_hwpt_nested_abort(struct iommufd_object *obj)
64 {
65 iommufd_hwpt_nested_destroy(obj);
66 }
67
68 static int
iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging * hwpt_paging)69 iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
70 {
71 struct iommu_domain *paging_domain = hwpt_paging->common.domain;
72
73 if (hwpt_paging->enforce_cache_coherency)
74 return 0;
75
76 if (paging_domain->ops->enforce_cache_coherency)
77 hwpt_paging->enforce_cache_coherency =
78 paging_domain->ops->enforce_cache_coherency(
79 paging_domain);
80 if (!hwpt_paging->enforce_cache_coherency)
81 return -EINVAL;
82 return 0;
83 }
84
85 /**
86 * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device
87 * @ictx: iommufd context
88 * @ioas: IOAS to associate the domain with
89 * @idev: Device to get an iommu_domain for
90 * @flags: Flags from userspace
91 * @immediate_attach: True if idev should be attached to the hwpt
92 * @user_data: The user provided driver specific data describing the domain to
93 * create
94 *
95 * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
96 * will be linked to the given ioas and upon return the underlying iommu_domain
97 * is fully popoulated.
98 *
99 * The caller must hold the ioas->mutex until after
100 * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
101 * the returned hwpt.
102 */
103 struct iommufd_hwpt_paging *
iommufd_hwpt_paging_alloc(struct iommufd_ctx * ictx,struct iommufd_ioas * ioas,struct iommufd_device * idev,u32 flags,bool immediate_attach,const struct iommu_user_data * user_data)104 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
105 struct iommufd_device *idev, u32 flags,
106 bool immediate_attach,
107 const struct iommu_user_data *user_data)
108 {
109 const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT |
110 IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
111 const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
112 struct iommufd_hwpt_paging *hwpt_paging;
113 struct iommufd_hw_pagetable *hwpt;
114 int rc;
115
116 lockdep_assert_held(&ioas->mutex);
117
118 if ((flags || user_data) && !ops->domain_alloc_user)
119 return ERR_PTR(-EOPNOTSUPP);
120 if (flags & ~valid_flags)
121 return ERR_PTR(-EOPNOTSUPP);
122 if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
123 !device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
124 return ERR_PTR(-EOPNOTSUPP);
125 if ((flags & IOMMU_HWPT_FAULT_ID_VALID) &&
126 (flags & IOMMU_HWPT_ALLOC_NEST_PARENT))
127 return ERR_PTR(-EOPNOTSUPP);
128
129 hwpt_paging = __iommufd_object_alloc(
130 ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
131 if (IS_ERR(hwpt_paging))
132 return ERR_CAST(hwpt_paging);
133 hwpt = &hwpt_paging->common;
134
135 INIT_LIST_HEAD(&hwpt_paging->hwpt_item);
136 /* Pairs with iommufd_hw_pagetable_destroy() */
137 refcount_inc(&ioas->obj.users);
138 hwpt_paging->ioas = ioas;
139 hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
140
141 if (ops->domain_alloc_user) {
142 hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL,
143 user_data);
144 if (IS_ERR(hwpt->domain)) {
145 rc = PTR_ERR(hwpt->domain);
146 hwpt->domain = NULL;
147 goto out_abort;
148 }
149 hwpt->domain->owner = ops;
150 } else {
151 hwpt->domain = iommu_paging_domain_alloc(idev->dev);
152 if (IS_ERR(hwpt->domain)) {
153 rc = PTR_ERR(hwpt->domain);
154 hwpt->domain = NULL;
155 goto out_abort;
156 }
157 }
158
159 /*
160 * Set the coherency mode before we do iopt_table_add_domain() as some
161 * iommus have a per-PTE bit that controls it and need to decide before
162 * doing any maps. It is an iommu driver bug to report
163 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
164 * a new domain.
165 *
166 * The cache coherency mode must be configured here and unchanged later.
167 * Note that a HWPT (non-CC) created for a device (non-CC) can be later
168 * reused by another device (either non-CC or CC). However, A HWPT (CC)
169 * created for a device (CC) cannot be reused by another device (non-CC)
170 * but only devices (CC). Instead user space in this case would need to
171 * allocate a separate HWPT (non-CC).
172 */
173 if (idev->enforce_cache_coherency) {
174 rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging);
175 if (WARN_ON(rc))
176 goto out_abort;
177 }
178
179 /*
180 * immediate_attach exists only to accommodate iommu drivers that cannot
181 * directly allocate a domain. These drivers do not finish creating the
182 * domain until attach is completed. Thus we must have this call
183 * sequence. Once those drivers are fixed this should be removed.
184 */
185 if (immediate_attach) {
186 rc = iommufd_hw_pagetable_attach(hwpt, idev);
187 if (rc)
188 goto out_abort;
189 }
190
191 rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain);
192 if (rc)
193 goto out_detach;
194 list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list);
195 return hwpt_paging;
196
197 out_detach:
198 if (immediate_attach)
199 iommufd_hw_pagetable_detach(idev);
200 out_abort:
201 iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
202 return ERR_PTR(rc);
203 }
204
205 /**
206 * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device
207 * @ictx: iommufd context
208 * @parent: Parent PAGING-type hwpt to associate the domain with
209 * @idev: Device to get an iommu_domain for
210 * @flags: Flags from userspace
211 * @user_data: user_data pointer. Must be valid
212 *
213 * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as
214 * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of
215 * being a parent.
216 */
217 static struct iommufd_hwpt_nested *
iommufd_hwpt_nested_alloc(struct iommufd_ctx * ictx,struct iommufd_hwpt_paging * parent,struct iommufd_device * idev,u32 flags,const struct iommu_user_data * user_data)218 iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
219 struct iommufd_hwpt_paging *parent,
220 struct iommufd_device *idev, u32 flags,
221 const struct iommu_user_data *user_data)
222 {
223 const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
224 struct iommufd_hwpt_nested *hwpt_nested;
225 struct iommufd_hw_pagetable *hwpt;
226 int rc;
227
228 if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) ||
229 !user_data->len || !ops->domain_alloc_user)
230 return ERR_PTR(-EOPNOTSUPP);
231 if (parent->auto_domain || !parent->nest_parent ||
232 parent->common.domain->owner != ops)
233 return ERR_PTR(-EINVAL);
234
235 hwpt_nested = __iommufd_object_alloc(
236 ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
237 if (IS_ERR(hwpt_nested))
238 return ERR_CAST(hwpt_nested);
239 hwpt = &hwpt_nested->common;
240
241 refcount_inc(&parent->common.obj.users);
242 hwpt_nested->parent = parent;
243
244 hwpt->domain = ops->domain_alloc_user(idev->dev,
245 flags & ~IOMMU_HWPT_FAULT_ID_VALID,
246 parent->common.domain, user_data);
247 if (IS_ERR(hwpt->domain)) {
248 rc = PTR_ERR(hwpt->domain);
249 hwpt->domain = NULL;
250 goto out_abort;
251 }
252 hwpt->domain->owner = ops;
253
254 if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
255 !hwpt->domain->ops->cache_invalidate_user)) {
256 rc = -EINVAL;
257 goto out_abort;
258 }
259 return hwpt_nested;
260
261 out_abort:
262 iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
263 return ERR_PTR(rc);
264 }
265
iommufd_hwpt_alloc(struct iommufd_ucmd * ucmd)266 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
267 {
268 struct iommu_hwpt_alloc *cmd = ucmd->cmd;
269 const struct iommu_user_data user_data = {
270 .type = cmd->data_type,
271 .uptr = u64_to_user_ptr(cmd->data_uptr),
272 .len = cmd->data_len,
273 };
274 struct iommufd_hw_pagetable *hwpt;
275 struct iommufd_ioas *ioas = NULL;
276 struct iommufd_object *pt_obj;
277 struct iommufd_device *idev;
278 int rc;
279
280 if (cmd->__reserved)
281 return -EOPNOTSUPP;
282 if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
283 (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
284 return -EINVAL;
285
286 idev = iommufd_get_device(ucmd, cmd->dev_id);
287 if (IS_ERR(idev))
288 return PTR_ERR(idev);
289
290 pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY);
291 if (IS_ERR(pt_obj)) {
292 rc = -EINVAL;
293 goto out_put_idev;
294 }
295
296 if (pt_obj->type == IOMMUFD_OBJ_IOAS) {
297 struct iommufd_hwpt_paging *hwpt_paging;
298
299 ioas = container_of(pt_obj, struct iommufd_ioas, obj);
300 mutex_lock(&ioas->mutex);
301 hwpt_paging = iommufd_hwpt_paging_alloc(
302 ucmd->ictx, ioas, idev, cmd->flags, false,
303 user_data.len ? &user_data : NULL);
304 if (IS_ERR(hwpt_paging)) {
305 rc = PTR_ERR(hwpt_paging);
306 goto out_unlock;
307 }
308 hwpt = &hwpt_paging->common;
309 } else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) {
310 struct iommufd_hwpt_nested *hwpt_nested;
311
312 hwpt_nested = iommufd_hwpt_nested_alloc(
313 ucmd->ictx,
314 container_of(pt_obj, struct iommufd_hwpt_paging,
315 common.obj),
316 idev, cmd->flags, &user_data);
317 if (IS_ERR(hwpt_nested)) {
318 rc = PTR_ERR(hwpt_nested);
319 goto out_unlock;
320 }
321 hwpt = &hwpt_nested->common;
322 } else {
323 rc = -EINVAL;
324 goto out_put_pt;
325 }
326
327 if (cmd->flags & IOMMU_HWPT_FAULT_ID_VALID) {
328 struct iommufd_fault *fault;
329
330 fault = iommufd_get_fault(ucmd, cmd->fault_id);
331 if (IS_ERR(fault)) {
332 rc = PTR_ERR(fault);
333 goto out_hwpt;
334 }
335 hwpt->fault = fault;
336 hwpt->domain->iopf_handler = iommufd_fault_iopf_handler;
337 hwpt->domain->fault_data = hwpt;
338 refcount_inc(&fault->obj.users);
339 iommufd_put_object(ucmd->ictx, &fault->obj);
340 }
341
342 cmd->out_hwpt_id = hwpt->obj.id;
343 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
344 if (rc)
345 goto out_hwpt;
346 iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
347 goto out_unlock;
348
349 out_hwpt:
350 iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
351 out_unlock:
352 if (ioas)
353 mutex_unlock(&ioas->mutex);
354 out_put_pt:
355 iommufd_put_object(ucmd->ictx, pt_obj);
356 out_put_idev:
357 iommufd_put_object(ucmd->ictx, &idev->obj);
358 return rc;
359 }
360
iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd * ucmd)361 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
362 {
363 struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd;
364 struct iommufd_hwpt_paging *hwpt_paging;
365 struct iommufd_ioas *ioas;
366 int rc = -EOPNOTSUPP;
367 bool enable;
368
369 if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE)
370 return rc;
371
372 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
373 if (IS_ERR(hwpt_paging))
374 return PTR_ERR(hwpt_paging);
375
376 ioas = hwpt_paging->ioas;
377 enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE;
378
379 rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
380 enable);
381
382 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
383 return rc;
384 }
385
iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd * ucmd)386 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
387 {
388 struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd;
389 struct iommufd_hwpt_paging *hwpt_paging;
390 struct iommufd_ioas *ioas;
391 int rc = -EOPNOTSUPP;
392
393 if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) ||
394 cmd->__reserved)
395 return -EOPNOTSUPP;
396
397 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
398 if (IS_ERR(hwpt_paging))
399 return PTR_ERR(hwpt_paging);
400
401 ioas = hwpt_paging->ioas;
402 rc = iopt_read_and_clear_dirty_data(
403 &ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);
404
405 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
406 return rc;
407 }
408
iommufd_hwpt_invalidate(struct iommufd_ucmd * ucmd)409 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd)
410 {
411 struct iommu_hwpt_invalidate *cmd = ucmd->cmd;
412 struct iommu_user_data_array data_array = {
413 .type = cmd->data_type,
414 .uptr = u64_to_user_ptr(cmd->data_uptr),
415 .entry_len = cmd->entry_len,
416 .entry_num = cmd->entry_num,
417 };
418 struct iommufd_hw_pagetable *hwpt;
419 u32 done_num = 0;
420 int rc;
421
422 if (cmd->__reserved) {
423 rc = -EOPNOTSUPP;
424 goto out;
425 }
426
427 if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) {
428 rc = -EINVAL;
429 goto out;
430 }
431
432 hwpt = iommufd_get_hwpt_nested(ucmd, cmd->hwpt_id);
433 if (IS_ERR(hwpt)) {
434 rc = PTR_ERR(hwpt);
435 goto out;
436 }
437
438 rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain,
439 &data_array);
440 done_num = data_array.entry_num;
441
442 iommufd_put_object(ucmd->ictx, &hwpt->obj);
443 out:
444 cmd->entry_num = done_num;
445 if (iommufd_ucmd_respond(ucmd, sizeof(*cmd)))
446 return -EFAULT;
447 return rc;
448 }
449