1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vDPA bus.
4 *
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/virtio_ids.h>
18
19 static LIST_HEAD(mdev_head);
20 /* A global mutex that protects vdpa management device and device level operations. */
21 static DECLARE_RWSEM(vdpa_dev_lock);
22 static DEFINE_IDA(vdpa_index_ida);
23
vdpa_set_status(struct vdpa_device * vdev,u8 status)24 void vdpa_set_status(struct vdpa_device *vdev, u8 status)
25 {
26 down_write(&vdev->cf_lock);
27 vdev->config->set_status(vdev, status);
28 up_write(&vdev->cf_lock);
29 }
30 EXPORT_SYMBOL(vdpa_set_status);
31
32 static struct genl_family vdpa_nl_family;
33
vdpa_dev_probe(struct device * d)34 static int vdpa_dev_probe(struct device *d)
35 {
36 struct vdpa_device *vdev = dev_to_vdpa(d);
37 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
38 const struct vdpa_config_ops *ops = vdev->config;
39 u32 max_num, min_num = 1;
40 int ret = 0;
41
42 max_num = ops->get_vq_num_max(vdev);
43 if (ops->get_vq_num_min)
44 min_num = ops->get_vq_num_min(vdev);
45 if (max_num < min_num)
46 return -EINVAL;
47
48 if (drv && drv->probe)
49 ret = drv->probe(vdev);
50
51 return ret;
52 }
53
vdpa_dev_remove(struct device * d)54 static void vdpa_dev_remove(struct device *d)
55 {
56 struct vdpa_device *vdev = dev_to_vdpa(d);
57 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
58
59 if (drv && drv->remove)
60 drv->remove(vdev);
61 }
62
vdpa_dev_match(struct device * dev,struct device_driver * drv)63 static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
64 {
65 struct vdpa_device *vdev = dev_to_vdpa(dev);
66
67 /* Check override first, and if set, only use the named driver */
68 if (vdev->driver_override)
69 return strcmp(vdev->driver_override, drv->name) == 0;
70
71 /* Currently devices must be supported by all vDPA bus drivers */
72 return 1;
73 }
74
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)75 static ssize_t driver_override_store(struct device *dev,
76 struct device_attribute *attr,
77 const char *buf, size_t count)
78 {
79 struct vdpa_device *vdev = dev_to_vdpa(dev);
80 int ret;
81
82 ret = driver_set_override(dev, &vdev->driver_override, buf, count);
83 if (ret)
84 return ret;
85
86 return count;
87 }
88
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)89 static ssize_t driver_override_show(struct device *dev,
90 struct device_attribute *attr, char *buf)
91 {
92 struct vdpa_device *vdev = dev_to_vdpa(dev);
93 ssize_t len;
94
95 device_lock(dev);
96 len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
97 device_unlock(dev);
98
99 return len;
100 }
101 static DEVICE_ATTR_RW(driver_override);
102
103 static struct attribute *vdpa_dev_attrs[] = {
104 &dev_attr_driver_override.attr,
105 NULL,
106 };
107
108 static const struct attribute_group vdpa_dev_group = {
109 .attrs = vdpa_dev_attrs,
110 };
111 __ATTRIBUTE_GROUPS(vdpa_dev);
112
113 static struct bus_type vdpa_bus = {
114 .name = "vdpa",
115 .dev_groups = vdpa_dev_groups,
116 .match = vdpa_dev_match,
117 .probe = vdpa_dev_probe,
118 .remove = vdpa_dev_remove,
119 };
120
vdpa_release_dev(struct device * d)121 static void vdpa_release_dev(struct device *d)
122 {
123 struct vdpa_device *vdev = dev_to_vdpa(d);
124 const struct vdpa_config_ops *ops = vdev->config;
125
126 if (ops->free)
127 ops->free(vdev);
128
129 ida_simple_remove(&vdpa_index_ida, vdev->index);
130 kfree(vdev->driver_override);
131 kfree(vdev);
132 }
133
134 /**
135 * __vdpa_alloc_device - allocate and initilaize a vDPA device
136 * This allows driver to some prepartion after device is
137 * initialized but before registered.
138 * @parent: the parent device
139 * @config: the bus operations that is supported by this device
140 * @ngroups: number of groups supported by this device
141 * @nas: number of address spaces supported by this device
142 * @size: size of the parent structure that contains private data
143 * @name: name of the vdpa device; optional.
144 * @use_va: indicate whether virtual address must be used by this device
145 *
146 * Driver should use vdpa_alloc_device() wrapper macro instead of
147 * using this directly.
148 *
149 * Return: Returns an error when parent/config/dma_dev is not set or fail to get
150 * ida.
151 */
__vdpa_alloc_device(struct device * parent,const struct vdpa_config_ops * config,unsigned int ngroups,unsigned int nas,size_t size,const char * name,bool use_va)152 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
153 const struct vdpa_config_ops *config,
154 unsigned int ngroups, unsigned int nas,
155 size_t size, const char *name,
156 bool use_va)
157 {
158 struct vdpa_device *vdev;
159 int err = -EINVAL;
160
161 if (!config)
162 goto err;
163
164 if (!!config->dma_map != !!config->dma_unmap)
165 goto err;
166
167 /* It should only work for the device that use on-chip IOMMU */
168 if (use_va && !(config->dma_map || config->set_map))
169 goto err;
170
171 err = -ENOMEM;
172 vdev = kzalloc(size, GFP_KERNEL);
173 if (!vdev)
174 goto err;
175
176 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
177 if (err < 0)
178 goto err_ida;
179
180 vdev->dev.bus = &vdpa_bus;
181 vdev->dev.parent = parent;
182 vdev->dev.release = vdpa_release_dev;
183 vdev->index = err;
184 vdev->config = config;
185 vdev->features_valid = false;
186 vdev->use_va = use_va;
187 vdev->ngroups = ngroups;
188 vdev->nas = nas;
189
190 if (name)
191 err = dev_set_name(&vdev->dev, "%s", name);
192 else
193 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
194 if (err)
195 goto err_name;
196
197 init_rwsem(&vdev->cf_lock);
198 device_initialize(&vdev->dev);
199
200 return vdev;
201
202 err_name:
203 ida_simple_remove(&vdpa_index_ida, vdev->index);
204 err_ida:
205 kfree(vdev);
206 err:
207 return ERR_PTR(err);
208 }
209 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
210
vdpa_name_match(struct device * dev,const void * data)211 static int vdpa_name_match(struct device *dev, const void *data)
212 {
213 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
214
215 return (strcmp(dev_name(&vdev->dev), data) == 0);
216 }
217
__vdpa_register_device(struct vdpa_device * vdev,u32 nvqs)218 static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
219 {
220 struct device *dev;
221
222 vdev->nvqs = nvqs;
223
224 lockdep_assert_held(&vdpa_dev_lock);
225 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
226 if (dev) {
227 put_device(dev);
228 return -EEXIST;
229 }
230 return device_add(&vdev->dev);
231 }
232
233 /**
234 * _vdpa_register_device - register a vDPA device with vdpa lock held
235 * Caller must have a succeed call of vdpa_alloc_device() before.
236 * Caller must invoke this routine in the management device dev_add()
237 * callback after setting up valid mgmtdev for this vdpa device.
238 * @vdev: the vdpa device to be registered to vDPA bus
239 * @nvqs: number of virtqueues supported by this device
240 *
241 * Return: Returns an error when fail to add device to vDPA bus
242 */
_vdpa_register_device(struct vdpa_device * vdev,u32 nvqs)243 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
244 {
245 if (!vdev->mdev)
246 return -EINVAL;
247
248 return __vdpa_register_device(vdev, nvqs);
249 }
250 EXPORT_SYMBOL_GPL(_vdpa_register_device);
251
252 /**
253 * vdpa_register_device - register a vDPA device
254 * Callers must have a succeed call of vdpa_alloc_device() before.
255 * @vdev: the vdpa device to be registered to vDPA bus
256 * @nvqs: number of virtqueues supported by this device
257 *
258 * Return: Returns an error when fail to add to vDPA bus
259 */
vdpa_register_device(struct vdpa_device * vdev,u32 nvqs)260 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
261 {
262 int err;
263
264 down_write(&vdpa_dev_lock);
265 err = __vdpa_register_device(vdev, nvqs);
266 up_write(&vdpa_dev_lock);
267 return err;
268 }
269 EXPORT_SYMBOL_GPL(vdpa_register_device);
270
271 /**
272 * _vdpa_unregister_device - unregister a vDPA device
273 * Caller must invoke this routine as part of management device dev_del()
274 * callback.
275 * @vdev: the vdpa device to be unregisted from vDPA bus
276 */
_vdpa_unregister_device(struct vdpa_device * vdev)277 void _vdpa_unregister_device(struct vdpa_device *vdev)
278 {
279 lockdep_assert_held(&vdpa_dev_lock);
280 WARN_ON(!vdev->mdev);
281 device_unregister(&vdev->dev);
282 }
283 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
284
285 /**
286 * vdpa_unregister_device - unregister a vDPA device
287 * @vdev: the vdpa device to be unregisted from vDPA bus
288 */
vdpa_unregister_device(struct vdpa_device * vdev)289 void vdpa_unregister_device(struct vdpa_device *vdev)
290 {
291 down_write(&vdpa_dev_lock);
292 device_unregister(&vdev->dev);
293 up_write(&vdpa_dev_lock);
294 }
295 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
296
297 /**
298 * __vdpa_register_driver - register a vDPA device driver
299 * @drv: the vdpa device driver to be registered
300 * @owner: module owner of the driver
301 *
302 * Return: Returns an err when fail to do the registration
303 */
__vdpa_register_driver(struct vdpa_driver * drv,struct module * owner)304 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
305 {
306 drv->driver.bus = &vdpa_bus;
307 drv->driver.owner = owner;
308
309 return driver_register(&drv->driver);
310 }
311 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
312
313 /**
314 * vdpa_unregister_driver - unregister a vDPA device driver
315 * @drv: the vdpa device driver to be unregistered
316 */
vdpa_unregister_driver(struct vdpa_driver * drv)317 void vdpa_unregister_driver(struct vdpa_driver *drv)
318 {
319 driver_unregister(&drv->driver);
320 }
321 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
322
323 /**
324 * vdpa_mgmtdev_register - register a vdpa management device
325 *
326 * @mdev: Pointer to vdpa management device
327 * vdpa_mgmtdev_register() register a vdpa management device which supports
328 * vdpa device management.
329 * Return: Returns 0 on success or failure when required callback ops are not
330 * initialized.
331 */
vdpa_mgmtdev_register(struct vdpa_mgmt_dev * mdev)332 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
333 {
334 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
335 return -EINVAL;
336
337 INIT_LIST_HEAD(&mdev->list);
338 down_write(&vdpa_dev_lock);
339 list_add_tail(&mdev->list, &mdev_head);
340 up_write(&vdpa_dev_lock);
341 return 0;
342 }
343 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
344
vdpa_match_remove(struct device * dev,void * data)345 static int vdpa_match_remove(struct device *dev, void *data)
346 {
347 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
348 struct vdpa_mgmt_dev *mdev = vdev->mdev;
349
350 if (mdev == data)
351 mdev->ops->dev_del(mdev, vdev);
352 return 0;
353 }
354
vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev * mdev)355 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
356 {
357 down_write(&vdpa_dev_lock);
358
359 list_del(&mdev->list);
360
361 /* Filter out all the entries belong to this management device and delete it. */
362 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
363
364 up_write(&vdpa_dev_lock);
365 }
366 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
367
vdpa_get_config_unlocked(struct vdpa_device * vdev,unsigned int offset,void * buf,unsigned int len)368 static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
369 unsigned int offset,
370 void *buf, unsigned int len)
371 {
372 const struct vdpa_config_ops *ops = vdev->config;
373
374 /*
375 * Config accesses aren't supposed to trigger before features are set.
376 * If it does happen we assume a legacy guest.
377 */
378 if (!vdev->features_valid)
379 vdpa_set_features_unlocked(vdev, 0);
380 ops->get_config(vdev, offset, buf, len);
381 }
382
383 /**
384 * vdpa_get_config - Get one or more device configuration fields.
385 * @vdev: vdpa device to operate on
386 * @offset: starting byte offset of the field
387 * @buf: buffer pointer to read to
388 * @len: length of the configuration fields in bytes
389 */
vdpa_get_config(struct vdpa_device * vdev,unsigned int offset,void * buf,unsigned int len)390 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
391 void *buf, unsigned int len)
392 {
393 down_read(&vdev->cf_lock);
394 vdpa_get_config_unlocked(vdev, offset, buf, len);
395 up_read(&vdev->cf_lock);
396 }
397 EXPORT_SYMBOL_GPL(vdpa_get_config);
398
399 /**
400 * vdpa_set_config - Set one or more device configuration fields.
401 * @vdev: vdpa device to operate on
402 * @offset: starting byte offset of the field
403 * @buf: buffer pointer to read from
404 * @length: length of the configuration fields in bytes
405 */
vdpa_set_config(struct vdpa_device * vdev,unsigned int offset,const void * buf,unsigned int length)406 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
407 const void *buf, unsigned int length)
408 {
409 down_write(&vdev->cf_lock);
410 vdev->config->set_config(vdev, offset, buf, length);
411 up_write(&vdev->cf_lock);
412 }
413 EXPORT_SYMBOL_GPL(vdpa_set_config);
414
mgmtdev_handle_match(const struct vdpa_mgmt_dev * mdev,const char * busname,const char * devname)415 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
416 const char *busname, const char *devname)
417 {
418 /* Bus name is optional for simulated management device, so ignore the
419 * device with bus if bus attribute is provided.
420 */
421 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
422 return false;
423
424 if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
425 return true;
426
427 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
428 (strcmp(dev_name(mdev->device), devname) == 0))
429 return true;
430
431 return false;
432 }
433
vdpa_mgmtdev_get_from_attr(struct nlattr ** attrs)434 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
435 {
436 struct vdpa_mgmt_dev *mdev;
437 const char *busname = NULL;
438 const char *devname;
439
440 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
441 return ERR_PTR(-EINVAL);
442 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
443 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
444 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
445
446 list_for_each_entry(mdev, &mdev_head, list) {
447 if (mgmtdev_handle_match(mdev, busname, devname))
448 return mdev;
449 }
450 return ERR_PTR(-ENODEV);
451 }
452
vdpa_nl_mgmtdev_handle_fill(struct sk_buff * msg,const struct vdpa_mgmt_dev * mdev)453 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
454 {
455 if (mdev->device->bus &&
456 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
457 return -EMSGSIZE;
458 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
459 return -EMSGSIZE;
460 return 0;
461 }
462
vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev * mdev,struct sk_buff * msg,u32 portid,u32 seq,int flags)463 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
464 u32 portid, u32 seq, int flags)
465 {
466 u64 supported_classes = 0;
467 void *hdr;
468 int i = 0;
469 int err;
470
471 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
472 if (!hdr)
473 return -EMSGSIZE;
474 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
475 if (err)
476 goto msg_err;
477
478 while (mdev->id_table[i].device) {
479 if (mdev->id_table[i].device <= 63)
480 supported_classes |= BIT_ULL(mdev->id_table[i].device);
481 i++;
482 }
483
484 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
485 supported_classes, VDPA_ATTR_UNSPEC)) {
486 err = -EMSGSIZE;
487 goto msg_err;
488 }
489 if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
490 mdev->max_supported_vqs)) {
491 err = -EMSGSIZE;
492 goto msg_err;
493 }
494 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
495 mdev->supported_features, VDPA_ATTR_PAD)) {
496 err = -EMSGSIZE;
497 goto msg_err;
498 }
499
500 genlmsg_end(msg, hdr);
501 return 0;
502
503 msg_err:
504 genlmsg_cancel(msg, hdr);
505 return err;
506 }
507
vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff * skb,struct genl_info * info)508 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
509 {
510 struct vdpa_mgmt_dev *mdev;
511 struct sk_buff *msg;
512 int err;
513
514 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
515 if (!msg)
516 return -ENOMEM;
517
518 down_read(&vdpa_dev_lock);
519 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
520 if (IS_ERR(mdev)) {
521 up_read(&vdpa_dev_lock);
522 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
523 err = PTR_ERR(mdev);
524 goto out;
525 }
526
527 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
528 up_read(&vdpa_dev_lock);
529 if (err)
530 goto out;
531 err = genlmsg_reply(msg, info);
532 return err;
533
534 out:
535 nlmsg_free(msg);
536 return err;
537 }
538
539 static int
vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff * msg,struct netlink_callback * cb)540 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
541 {
542 struct vdpa_mgmt_dev *mdev;
543 int start = cb->args[0];
544 int idx = 0;
545 int err;
546
547 down_read(&vdpa_dev_lock);
548 list_for_each_entry(mdev, &mdev_head, list) {
549 if (idx < start) {
550 idx++;
551 continue;
552 }
553 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
554 cb->nlh->nlmsg_seq, NLM_F_MULTI);
555 if (err)
556 goto out;
557 idx++;
558 }
559 out:
560 up_read(&vdpa_dev_lock);
561 cb->args[0] = idx;
562 return msg->len;
563 }
564
565 #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
566 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
567 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
568
vdpa_nl_cmd_dev_add_set_doit(struct sk_buff * skb,struct genl_info * info)569 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
570 {
571 struct vdpa_dev_set_config config = {};
572 struct nlattr **nl_attrs = info->attrs;
573 struct vdpa_mgmt_dev *mdev;
574 const u8 *macaddr;
575 const char *name;
576 int err = 0;
577
578 if (!info->attrs[VDPA_ATTR_DEV_NAME])
579 return -EINVAL;
580
581 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
582
583 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
584 macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
585 memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
586 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
587 }
588 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
589 config.net.mtu =
590 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
591 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
592 }
593 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
594 config.net.max_vq_pairs =
595 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
596 if (!config.net.max_vq_pairs) {
597 NL_SET_ERR_MSG_MOD(info->extack,
598 "At least one pair of VQs is required");
599 return -EINVAL;
600 }
601 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
602 }
603 if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
604 config.device_features =
605 nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
606 config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
607 }
608
609 /* Skip checking capability if user didn't prefer to configure any
610 * device networking attributes. It is likely that user might have used
611 * a device specific method to configure such attributes or using device
612 * default attributes.
613 */
614 if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
615 !netlink_capable(skb, CAP_NET_ADMIN))
616 return -EPERM;
617
618 down_write(&vdpa_dev_lock);
619 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
620 if (IS_ERR(mdev)) {
621 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
622 err = PTR_ERR(mdev);
623 goto err;
624 }
625 if ((config.mask & mdev->config_attr_mask) != config.mask) {
626 NL_SET_ERR_MSG_MOD(info->extack,
627 "All provided attributes are not supported");
628 err = -EOPNOTSUPP;
629 goto err;
630 }
631
632 err = mdev->ops->dev_add(mdev, name, &config);
633 err:
634 up_write(&vdpa_dev_lock);
635 return err;
636 }
637
vdpa_nl_cmd_dev_del_set_doit(struct sk_buff * skb,struct genl_info * info)638 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
639 {
640 struct vdpa_mgmt_dev *mdev;
641 struct vdpa_device *vdev;
642 struct device *dev;
643 const char *name;
644 int err = 0;
645
646 if (!info->attrs[VDPA_ATTR_DEV_NAME])
647 return -EINVAL;
648 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
649
650 down_write(&vdpa_dev_lock);
651 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
652 if (!dev) {
653 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
654 err = -ENODEV;
655 goto dev_err;
656 }
657 vdev = container_of(dev, struct vdpa_device, dev);
658 if (!vdev->mdev) {
659 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
660 err = -EINVAL;
661 goto mdev_err;
662 }
663 mdev = vdev->mdev;
664 mdev->ops->dev_del(mdev, vdev);
665 mdev_err:
666 put_device(dev);
667 dev_err:
668 up_write(&vdpa_dev_lock);
669 return err;
670 }
671
672 static int
vdpa_dev_fill(struct vdpa_device * vdev,struct sk_buff * msg,u32 portid,u32 seq,int flags,struct netlink_ext_ack * extack)673 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
674 int flags, struct netlink_ext_ack *extack)
675 {
676 u16 max_vq_size;
677 u16 min_vq_size = 1;
678 u32 device_id;
679 u32 vendor_id;
680 void *hdr;
681 int err;
682
683 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
684 if (!hdr)
685 return -EMSGSIZE;
686
687 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
688 if (err)
689 goto msg_err;
690
691 device_id = vdev->config->get_device_id(vdev);
692 vendor_id = vdev->config->get_vendor_id(vdev);
693 max_vq_size = vdev->config->get_vq_num_max(vdev);
694 if (vdev->config->get_vq_num_min)
695 min_vq_size = vdev->config->get_vq_num_min(vdev);
696
697 err = -EMSGSIZE;
698 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
699 goto msg_err;
700 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
701 goto msg_err;
702 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
703 goto msg_err;
704 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
705 goto msg_err;
706 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
707 goto msg_err;
708 if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
709 goto msg_err;
710
711 genlmsg_end(msg, hdr);
712 return 0;
713
714 msg_err:
715 genlmsg_cancel(msg, hdr);
716 return err;
717 }
718
vdpa_nl_cmd_dev_get_doit(struct sk_buff * skb,struct genl_info * info)719 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
720 {
721 struct vdpa_device *vdev;
722 struct sk_buff *msg;
723 const char *devname;
724 struct device *dev;
725 int err;
726
727 if (!info->attrs[VDPA_ATTR_DEV_NAME])
728 return -EINVAL;
729 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
730 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
731 if (!msg)
732 return -ENOMEM;
733
734 down_read(&vdpa_dev_lock);
735 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
736 if (!dev) {
737 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
738 err = -ENODEV;
739 goto err;
740 }
741 vdev = container_of(dev, struct vdpa_device, dev);
742 if (!vdev->mdev) {
743 err = -EINVAL;
744 goto mdev_err;
745 }
746 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
747 if (err)
748 goto mdev_err;
749
750 err = genlmsg_reply(msg, info);
751 put_device(dev);
752 up_read(&vdpa_dev_lock);
753 return err;
754
755 mdev_err:
756 put_device(dev);
757 err:
758 up_read(&vdpa_dev_lock);
759 nlmsg_free(msg);
760 return err;
761 }
762
763 struct vdpa_dev_dump_info {
764 struct sk_buff *msg;
765 struct netlink_callback *cb;
766 int start_idx;
767 int idx;
768 };
769
vdpa_dev_dump(struct device * dev,void * data)770 static int vdpa_dev_dump(struct device *dev, void *data)
771 {
772 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
773 struct vdpa_dev_dump_info *info = data;
774 int err;
775
776 if (!vdev->mdev)
777 return 0;
778 if (info->idx < info->start_idx) {
779 info->idx++;
780 return 0;
781 }
782 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
783 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
784 if (err)
785 return err;
786
787 info->idx++;
788 return 0;
789 }
790
vdpa_nl_cmd_dev_get_dumpit(struct sk_buff * msg,struct netlink_callback * cb)791 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
792 {
793 struct vdpa_dev_dump_info info;
794
795 info.msg = msg;
796 info.cb = cb;
797 info.start_idx = cb->args[0];
798 info.idx = 0;
799
800 down_read(&vdpa_dev_lock);
801 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
802 up_read(&vdpa_dev_lock);
803 cb->args[0] = info.idx;
804 return msg->len;
805 }
806
vdpa_dev_net_mq_config_fill(struct sk_buff * msg,u64 features,const struct virtio_net_config * config)807 static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
808 const struct virtio_net_config *config)
809 {
810 u16 val_u16;
811
812 if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
813 (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
814 return 0;
815
816 val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);
817
818 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
819 }
820
vdpa_dev_net_mtu_config_fill(struct sk_buff * msg,u64 features,const struct virtio_net_config * config)821 static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
822 const struct virtio_net_config *config)
823 {
824 u16 val_u16;
825
826 if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
827 return 0;
828
829 val_u16 = __virtio16_to_cpu(true, config->mtu);
830
831 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
832 }
833
vdpa_dev_net_mac_config_fill(struct sk_buff * msg,u64 features,const struct virtio_net_config * config)834 static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
835 const struct virtio_net_config *config)
836 {
837 if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
838 return 0;
839
840 return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
841 sizeof(config->mac), config->mac);
842 }
843
vdpa_dev_net_config_fill(struct vdpa_device * vdev,struct sk_buff * msg)844 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
845 {
846 struct virtio_net_config config = {};
847 u64 features_device;
848 u16 val_u16;
849
850 vdev->config->get_config(vdev, 0, &config, sizeof(config));
851
852 val_u16 = __virtio16_to_cpu(true, config.status);
853 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
854 return -EMSGSIZE;
855
856 features_device = vdev->config->get_device_features(vdev);
857
858 if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device,
859 VDPA_ATTR_PAD))
860 return -EMSGSIZE;
861
862 if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
863 return -EMSGSIZE;
864
865 if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
866 return -EMSGSIZE;
867
868 return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
869 }
870
871 static int
vdpa_dev_config_fill(struct vdpa_device * vdev,struct sk_buff * msg,u32 portid,u32 seq,int flags,struct netlink_ext_ack * extack)872 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
873 int flags, struct netlink_ext_ack *extack)
874 {
875 u64 features_driver;
876 u8 status = 0;
877 u32 device_id;
878 void *hdr;
879 int err;
880
881 down_read(&vdev->cf_lock);
882 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
883 VDPA_CMD_DEV_CONFIG_GET);
884 if (!hdr) {
885 err = -EMSGSIZE;
886 goto out;
887 }
888
889 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
890 err = -EMSGSIZE;
891 goto msg_err;
892 }
893
894 device_id = vdev->config->get_device_id(vdev);
895 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
896 err = -EMSGSIZE;
897 goto msg_err;
898 }
899
900 /* only read driver features after the feature negotiation is done */
901 status = vdev->config->get_status(vdev);
902 if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
903 features_driver = vdev->config->get_driver_features(vdev);
904 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
905 VDPA_ATTR_PAD)) {
906 err = -EMSGSIZE;
907 goto msg_err;
908 }
909 }
910
911 switch (device_id) {
912 case VIRTIO_ID_NET:
913 err = vdpa_dev_net_config_fill(vdev, msg);
914 break;
915 default:
916 err = -EOPNOTSUPP;
917 break;
918 }
919 if (err)
920 goto msg_err;
921
922 up_read(&vdev->cf_lock);
923 genlmsg_end(msg, hdr);
924 return 0;
925
926 msg_err:
927 genlmsg_cancel(msg, hdr);
928 out:
929 up_read(&vdev->cf_lock);
930 return err;
931 }
932
vdpa_fill_stats_rec(struct vdpa_device * vdev,struct sk_buff * msg,struct genl_info * info,u32 index)933 static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
934 struct genl_info *info, u32 index)
935 {
936 struct virtio_net_config config = {};
937 u64 features;
938 u16 max_vqp;
939 u8 status;
940 int err;
941
942 status = vdev->config->get_status(vdev);
943 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
944 NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
945 return -EAGAIN;
946 }
947 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
948
949 max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
950 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
951 return -EMSGSIZE;
952
953 features = vdev->config->get_driver_features(vdev);
954 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
955 features, VDPA_ATTR_PAD))
956 return -EMSGSIZE;
957
958 if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
959 return -EMSGSIZE;
960
961 err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
962 if (err)
963 return err;
964
965 return 0;
966 }
967
vendor_stats_fill(struct vdpa_device * vdev,struct sk_buff * msg,struct genl_info * info,u32 index)968 static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
969 struct genl_info *info, u32 index)
970 {
971 int err;
972
973 down_read(&vdev->cf_lock);
974 if (!vdev->config->get_vendor_vq_stats) {
975 err = -EOPNOTSUPP;
976 goto out;
977 }
978
979 err = vdpa_fill_stats_rec(vdev, msg, info, index);
980 out:
981 up_read(&vdev->cf_lock);
982 return err;
983 }
984
vdpa_dev_vendor_stats_fill(struct vdpa_device * vdev,struct sk_buff * msg,struct genl_info * info,u32 index)985 static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
986 struct sk_buff *msg,
987 struct genl_info *info, u32 index)
988 {
989 u32 device_id;
990 void *hdr;
991 int err;
992 u32 portid = info->snd_portid;
993 u32 seq = info->snd_seq;
994 u32 flags = 0;
995
996 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
997 VDPA_CMD_DEV_VSTATS_GET);
998 if (!hdr)
999 return -EMSGSIZE;
1000
1001 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
1002 err = -EMSGSIZE;
1003 goto undo_msg;
1004 }
1005
1006 device_id = vdev->config->get_device_id(vdev);
1007 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
1008 err = -EMSGSIZE;
1009 goto undo_msg;
1010 }
1011
1012 switch (device_id) {
1013 case VIRTIO_ID_NET:
1014 if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
1015 NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
1016 err = -ERANGE;
1017 break;
1018 }
1019
1020 err = vendor_stats_fill(vdev, msg, info, index);
1021 break;
1022 default:
1023 err = -EOPNOTSUPP;
1024 break;
1025 }
1026 genlmsg_end(msg, hdr);
1027
1028 return err;
1029
1030 undo_msg:
1031 genlmsg_cancel(msg, hdr);
1032 return err;
1033 }
1034
vdpa_nl_cmd_dev_config_get_doit(struct sk_buff * skb,struct genl_info * info)1035 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
1036 {
1037 struct vdpa_device *vdev;
1038 struct sk_buff *msg;
1039 const char *devname;
1040 struct device *dev;
1041 int err;
1042
1043 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1044 return -EINVAL;
1045 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1046 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1047 if (!msg)
1048 return -ENOMEM;
1049
1050 down_read(&vdpa_dev_lock);
1051 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1052 if (!dev) {
1053 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1054 err = -ENODEV;
1055 goto dev_err;
1056 }
1057 vdev = container_of(dev, struct vdpa_device, dev);
1058 if (!vdev->mdev) {
1059 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1060 err = -EINVAL;
1061 goto mdev_err;
1062 }
1063 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
1064 0, info->extack);
1065 if (!err)
1066 err = genlmsg_reply(msg, info);
1067
1068 mdev_err:
1069 put_device(dev);
1070 dev_err:
1071 up_read(&vdpa_dev_lock);
1072 if (err)
1073 nlmsg_free(msg);
1074 return err;
1075 }
1076
vdpa_dev_config_dump(struct device * dev,void * data)1077 static int vdpa_dev_config_dump(struct device *dev, void *data)
1078 {
1079 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
1080 struct vdpa_dev_dump_info *info = data;
1081 int err;
1082
1083 if (!vdev->mdev)
1084 return 0;
1085 if (info->idx < info->start_idx) {
1086 info->idx++;
1087 return 0;
1088 }
1089 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
1090 info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1091 info->cb->extack);
1092 if (err)
1093 return err;
1094
1095 info->idx++;
1096 return 0;
1097 }
1098
1099 static int
vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff * msg,struct netlink_callback * cb)1100 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
1101 {
1102 struct vdpa_dev_dump_info info;
1103
1104 info.msg = msg;
1105 info.cb = cb;
1106 info.start_idx = cb->args[0];
1107 info.idx = 0;
1108
1109 down_read(&vdpa_dev_lock);
1110 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
1111 up_read(&vdpa_dev_lock);
1112 cb->args[0] = info.idx;
1113 return msg->len;
1114 }
1115
vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff * skb,struct genl_info * info)1116 static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
1117 struct genl_info *info)
1118 {
1119 struct vdpa_device *vdev;
1120 struct sk_buff *msg;
1121 const char *devname;
1122 struct device *dev;
1123 u32 index;
1124 int err;
1125
1126 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1127 return -EINVAL;
1128
1129 if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
1130 return -EINVAL;
1131
1132 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1133 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1134 if (!msg)
1135 return -ENOMEM;
1136
1137 index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
1138 down_read(&vdpa_dev_lock);
1139 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1140 if (!dev) {
1141 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1142 err = -ENODEV;
1143 goto dev_err;
1144 }
1145 vdev = container_of(dev, struct vdpa_device, dev);
1146 if (!vdev->mdev) {
1147 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1148 err = -EINVAL;
1149 goto mdev_err;
1150 }
1151 err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
1152 if (err)
1153 goto mdev_err;
1154
1155 err = genlmsg_reply(msg, info);
1156
1157 put_device(dev);
1158 up_read(&vdpa_dev_lock);
1159
1160 return err;
1161
1162 mdev_err:
1163 put_device(dev);
1164 dev_err:
1165 nlmsg_free(msg);
1166 up_read(&vdpa_dev_lock);
1167 return err;
1168 }
1169
1170 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
1171 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
1172 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
1173 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
1174 [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
1175 [VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 },
1176 /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
1177 [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
1178 [VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 },
1179 [VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 },
1180 };
1181
1182 static const struct genl_ops vdpa_nl_ops[] = {
1183 {
1184 .cmd = VDPA_CMD_MGMTDEV_GET,
1185 .doit = vdpa_nl_cmd_mgmtdev_get_doit,
1186 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
1187 },
1188 {
1189 .cmd = VDPA_CMD_DEV_NEW,
1190 .doit = vdpa_nl_cmd_dev_add_set_doit,
1191 .flags = GENL_ADMIN_PERM,
1192 },
1193 {
1194 .cmd = VDPA_CMD_DEV_DEL,
1195 .doit = vdpa_nl_cmd_dev_del_set_doit,
1196 .flags = GENL_ADMIN_PERM,
1197 },
1198 {
1199 .cmd = VDPA_CMD_DEV_GET,
1200 .doit = vdpa_nl_cmd_dev_get_doit,
1201 .dumpit = vdpa_nl_cmd_dev_get_dumpit,
1202 },
1203 {
1204 .cmd = VDPA_CMD_DEV_CONFIG_GET,
1205 .doit = vdpa_nl_cmd_dev_config_get_doit,
1206 .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
1207 },
1208 {
1209 .cmd = VDPA_CMD_DEV_VSTATS_GET,
1210 .doit = vdpa_nl_cmd_dev_stats_get_doit,
1211 .flags = GENL_ADMIN_PERM,
1212 },
1213 };
1214
1215 static struct genl_family vdpa_nl_family __ro_after_init = {
1216 .name = VDPA_GENL_NAME,
1217 .version = VDPA_GENL_VERSION,
1218 .maxattr = VDPA_ATTR_MAX,
1219 .policy = vdpa_nl_policy,
1220 .netnsok = false,
1221 .module = THIS_MODULE,
1222 .ops = vdpa_nl_ops,
1223 .n_ops = ARRAY_SIZE(vdpa_nl_ops),
1224 .resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1,
1225 };
1226
vdpa_init(void)1227 static int vdpa_init(void)
1228 {
1229 int err;
1230
1231 err = bus_register(&vdpa_bus);
1232 if (err)
1233 return err;
1234 err = genl_register_family(&vdpa_nl_family);
1235 if (err)
1236 goto err;
1237 return 0;
1238
1239 err:
1240 bus_unregister(&vdpa_bus);
1241 return err;
1242 }
1243
vdpa_exit(void)1244 static void __exit vdpa_exit(void)
1245 {
1246 genl_unregister_family(&vdpa_nl_family);
1247 bus_unregister(&vdpa_bus);
1248 ida_destroy(&vdpa_index_ida);
1249 }
1250 core_initcall(vdpa_init);
1251 module_exit(vdpa_exit);
1252
1253 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
1254 MODULE_LICENSE("GPL v2");
1255