• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vDPA bridge driver for Alibaba ENI(Elastic Network Interface)
4  *
5  * Copyright (c) 2021, Alibaba Inc. All rights reserved.
6  * Author: Wu Zongyong <wuzongyong@linux.alibaba.com>
7  *
8  */
9 
10 #include "linux/bits.h"
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/vdpa.h>
15 #include <linux/virtio.h>
16 #include <linux/virtio_config.h>
17 #include <linux/virtio_ring.h>
18 #include <linux/virtio_pci.h>
19 #include <linux/virtio_pci_legacy.h>
20 #include <uapi/linux/virtio_net.h>
21 
22 #define ENI_MSIX_NAME_SIZE 256
23 
24 #define ENI_ERR(pdev, fmt, ...)	\
25 	dev_err(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
26 #define ENI_DBG(pdev, fmt, ...)	\
27 	dev_dbg(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
28 #define ENI_INFO(pdev, fmt, ...) \
29 	dev_info(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
30 
31 struct eni_vring {
32 	void __iomem *notify;
33 	char msix_name[ENI_MSIX_NAME_SIZE];
34 	struct vdpa_callback cb;
35 	int irq;
36 };
37 
38 struct eni_vdpa {
39 	struct vdpa_device vdpa;
40 	struct virtio_pci_legacy_device ldev;
41 	struct eni_vring *vring;
42 	struct vdpa_callback config_cb;
43 	char msix_name[ENI_MSIX_NAME_SIZE];
44 	int config_irq;
45 	int queues;
46 	int vectors;
47 };
48 
vdpa_to_eni(struct vdpa_device * vdpa)49 static struct eni_vdpa *vdpa_to_eni(struct vdpa_device *vdpa)
50 {
51 	return container_of(vdpa, struct eni_vdpa, vdpa);
52 }
53 
vdpa_to_ldev(struct vdpa_device * vdpa)54 static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
55 {
56 	struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
57 
58 	return &eni_vdpa->ldev;
59 }
60 
eni_vdpa_get_device_features(struct vdpa_device * vdpa)61 static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa)
62 {
63 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
64 	u64 features = vp_legacy_get_features(ldev);
65 
66 	features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
67 	features |= BIT_ULL(VIRTIO_F_ORDER_PLATFORM);
68 
69 	return features;
70 }
71 
eni_vdpa_set_driver_features(struct vdpa_device * vdpa,u64 features)72 static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
73 {
74 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
75 
76 	if (!(features & BIT_ULL(VIRTIO_NET_F_MRG_RXBUF)) && features) {
77 		ENI_ERR(ldev->pci_dev,
78 			"VIRTIO_NET_F_MRG_RXBUF is not negotiated\n");
79 		return -EINVAL;
80 	}
81 
82 	vp_legacy_set_features(ldev, (u32)features);
83 
84 	return 0;
85 }
86 
eni_vdpa_get_driver_features(struct vdpa_device * vdpa)87 static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa)
88 {
89 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
90 
91 	return vp_legacy_get_driver_features(ldev);
92 }
93 
eni_vdpa_get_status(struct vdpa_device * vdpa)94 static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
95 {
96 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
97 
98 	return vp_legacy_get_status(ldev);
99 }
100 
eni_vdpa_get_vq_irq(struct vdpa_device * vdpa,u16 idx)101 static int eni_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
102 {
103 	struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
104 	int irq = eni_vdpa->vring[idx].irq;
105 
106 	if (irq == VIRTIO_MSI_NO_VECTOR)
107 		return -EINVAL;
108 
109 	return irq;
110 }
111 
eni_vdpa_free_irq(struct eni_vdpa * eni_vdpa)112 static void eni_vdpa_free_irq(struct eni_vdpa *eni_vdpa)
113 {
114 	struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
115 	struct pci_dev *pdev = ldev->pci_dev;
116 	int i;
117 
118 	for (i = 0; i < eni_vdpa->queues; i++) {
119 		if (eni_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
120 			vp_legacy_queue_vector(ldev, i, VIRTIO_MSI_NO_VECTOR);
121 			devm_free_irq(&pdev->dev, eni_vdpa->vring[i].irq,
122 				      &eni_vdpa->vring[i]);
123 			eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
124 		}
125 	}
126 
127 	if (eni_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
128 		vp_legacy_config_vector(ldev, VIRTIO_MSI_NO_VECTOR);
129 		devm_free_irq(&pdev->dev, eni_vdpa->config_irq, eni_vdpa);
130 		eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
131 	}
132 
133 	if (eni_vdpa->vectors) {
134 		pci_free_irq_vectors(pdev);
135 		eni_vdpa->vectors = 0;
136 	}
137 }
138 
eni_vdpa_vq_handler(int irq,void * arg)139 static irqreturn_t eni_vdpa_vq_handler(int irq, void *arg)
140 {
141 	struct eni_vring *vring = arg;
142 
143 	if (vring->cb.callback)
144 		return vring->cb.callback(vring->cb.private);
145 
146 	return IRQ_HANDLED;
147 }
148 
eni_vdpa_config_handler(int irq,void * arg)149 static irqreturn_t eni_vdpa_config_handler(int irq, void *arg)
150 {
151 	struct eni_vdpa *eni_vdpa = arg;
152 
153 	if (eni_vdpa->config_cb.callback)
154 		return eni_vdpa->config_cb.callback(eni_vdpa->config_cb.private);
155 
156 	return IRQ_HANDLED;
157 }
158 
eni_vdpa_request_irq(struct eni_vdpa * eni_vdpa)159 static int eni_vdpa_request_irq(struct eni_vdpa *eni_vdpa)
160 {
161 	struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
162 	struct pci_dev *pdev = ldev->pci_dev;
163 	int i, ret, irq;
164 	int queues = eni_vdpa->queues;
165 	int vectors = queues + 1;
166 
167 	ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
168 	if (ret != vectors) {
169 		ENI_ERR(pdev,
170 			"failed to allocate irq vectors want %d but %d\n",
171 			vectors, ret);
172 		return ret;
173 	}
174 
175 	eni_vdpa->vectors = vectors;
176 
177 	for (i = 0; i < queues; i++) {
178 		snprintf(eni_vdpa->vring[i].msix_name, ENI_MSIX_NAME_SIZE,
179 			 "eni-vdpa[%s]-%d\n", pci_name(pdev), i);
180 		irq = pci_irq_vector(pdev, i);
181 		ret = devm_request_irq(&pdev->dev, irq,
182 				       eni_vdpa_vq_handler,
183 				       0, eni_vdpa->vring[i].msix_name,
184 				       &eni_vdpa->vring[i]);
185 		if (ret) {
186 			ENI_ERR(pdev, "failed to request irq for vq %d\n", i);
187 			goto err;
188 		}
189 		vp_legacy_queue_vector(ldev, i, i);
190 		eni_vdpa->vring[i].irq = irq;
191 	}
192 
193 	snprintf(eni_vdpa->msix_name, ENI_MSIX_NAME_SIZE, "eni-vdpa[%s]-config\n",
194 		 pci_name(pdev));
195 	irq = pci_irq_vector(pdev, queues);
196 	ret = devm_request_irq(&pdev->dev, irq, eni_vdpa_config_handler, 0,
197 			       eni_vdpa->msix_name, eni_vdpa);
198 	if (ret) {
199 		ENI_ERR(pdev, "failed to request irq for config vq %d\n", i);
200 		goto err;
201 	}
202 	vp_legacy_config_vector(ldev, queues);
203 	eni_vdpa->config_irq = irq;
204 
205 	return 0;
206 err:
207 	eni_vdpa_free_irq(eni_vdpa);
208 	return ret;
209 }
210 
eni_vdpa_set_status(struct vdpa_device * vdpa,u8 status)211 static void eni_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
212 {
213 	struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
214 	struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
215 	u8 s = eni_vdpa_get_status(vdpa);
216 
217 	if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
218 	    !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
219 		eni_vdpa_request_irq(eni_vdpa);
220 	}
221 
222 	vp_legacy_set_status(ldev, status);
223 
224 	if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
225 	    (s & VIRTIO_CONFIG_S_DRIVER_OK))
226 		eni_vdpa_free_irq(eni_vdpa);
227 }
228 
eni_vdpa_reset(struct vdpa_device * vdpa)229 static int eni_vdpa_reset(struct vdpa_device *vdpa)
230 {
231 	struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
232 	struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
233 	u8 s = eni_vdpa_get_status(vdpa);
234 
235 	vp_legacy_set_status(ldev, 0);
236 
237 	if (s & VIRTIO_CONFIG_S_DRIVER_OK)
238 		eni_vdpa_free_irq(eni_vdpa);
239 
240 	return 0;
241 }
242 
eni_vdpa_get_vq_num_max(struct vdpa_device * vdpa)243 static u16 eni_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
244 {
245 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
246 
247 	return vp_legacy_get_queue_size(ldev, 0);
248 }
249 
eni_vdpa_get_vq_num_min(struct vdpa_device * vdpa)250 static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa)
251 {
252 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
253 
254 	return vp_legacy_get_queue_size(ldev, 0);
255 }
256 
eni_vdpa_get_vq_state(struct vdpa_device * vdpa,u16 qid,struct vdpa_vq_state * state)257 static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
258 				struct vdpa_vq_state *state)
259 {
260 	return -EOPNOTSUPP;
261 }
262 
eni_vdpa_set_vq_state(struct vdpa_device * vdpa,u16 qid,const struct vdpa_vq_state * state)263 static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
264 				 const struct vdpa_vq_state *state)
265 {
266 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
267 	const struct vdpa_vq_state_split *split = &state->split;
268 
269 	/* ENI is build upon virtio-pci specfication which not support
270 	 * to set state of virtqueue. But if the state is equal to the
271 	 * device initial state by chance, we can let it go.
272 	 */
273 	if (!vp_legacy_get_queue_enable(ldev, qid)
274 	    && split->avail_index == 0)
275 		return 0;
276 
277 	return -EOPNOTSUPP;
278 }
279 
280 
eni_vdpa_set_vq_cb(struct vdpa_device * vdpa,u16 qid,struct vdpa_callback * cb)281 static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
282 			       struct vdpa_callback *cb)
283 {
284 	struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
285 
286 	eni_vdpa->vring[qid].cb = *cb;
287 }
288 
eni_vdpa_set_vq_ready(struct vdpa_device * vdpa,u16 qid,bool ready)289 static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid,
290 				  bool ready)
291 {
292 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
293 
294 	/* ENI is a legacy virtio-pci device. This is not supported
295 	 * by specification. But we can disable virtqueue by setting
296 	 * address to 0.
297 	 */
298 	if (!ready)
299 		vp_legacy_set_queue_address(ldev, qid, 0);
300 }
301 
eni_vdpa_get_vq_ready(struct vdpa_device * vdpa,u16 qid)302 static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
303 {
304 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
305 
306 	return vp_legacy_get_queue_enable(ldev, qid);
307 }
308 
eni_vdpa_set_vq_num(struct vdpa_device * vdpa,u16 qid,u32 num)309 static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
310 			       u32 num)
311 {
312 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
313 	struct pci_dev *pdev = ldev->pci_dev;
314 	u16 n = vp_legacy_get_queue_size(ldev, qid);
315 
316 	/* ENI is a legacy virtio-pci device which not allow to change
317 	 * virtqueue size. Just report a error if someone tries to
318 	 * change it.
319 	 */
320 	if (num != n)
321 		ENI_ERR(pdev,
322 			"not support to set vq %u fixed num %u to %u\n",
323 			qid, n, num);
324 }
325 
eni_vdpa_set_vq_address(struct vdpa_device * vdpa,u16 qid,u64 desc_area,u64 driver_area,u64 device_area)326 static int eni_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
327 				   u64 desc_area, u64 driver_area,
328 				   u64 device_area)
329 {
330 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
331 	u32 pfn = desc_area >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
332 
333 	vp_legacy_set_queue_address(ldev, qid, pfn);
334 
335 	return 0;
336 }
337 
eni_vdpa_kick_vq(struct vdpa_device * vdpa,u16 qid)338 static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
339 {
340 	struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
341 
342 	iowrite16(qid, eni_vdpa->vring[qid].notify);
343 }
344 
eni_vdpa_get_device_id(struct vdpa_device * vdpa)345 static u32 eni_vdpa_get_device_id(struct vdpa_device *vdpa)
346 {
347 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
348 
349 	return ldev->id.device;
350 }
351 
eni_vdpa_get_vendor_id(struct vdpa_device * vdpa)352 static u32 eni_vdpa_get_vendor_id(struct vdpa_device *vdpa)
353 {
354 	struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
355 
356 	return ldev->id.vendor;
357 }
358 
eni_vdpa_get_vq_align(struct vdpa_device * vdpa)359 static u32 eni_vdpa_get_vq_align(struct vdpa_device *vdpa)
360 {
361 	return VIRTIO_PCI_VRING_ALIGN;
362 }
363 
eni_vdpa_get_config_size(struct vdpa_device * vdpa)364 static size_t eni_vdpa_get_config_size(struct vdpa_device *vdpa)
365 {
366 	return sizeof(struct virtio_net_config);
367 }
368 
369 
eni_vdpa_get_config(struct vdpa_device * vdpa,unsigned int offset,void * buf,unsigned int len)370 static void eni_vdpa_get_config(struct vdpa_device *vdpa,
371 				unsigned int offset,
372 				void *buf, unsigned int len)
373 {
374 	struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
375 	struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
376 	void __iomem *ioaddr = ldev->ioaddr +
377 		VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
378 		offset;
379 	u8 *p = buf;
380 	int i;
381 
382 	for (i = 0; i < len; i++)
383 		*p++ = ioread8(ioaddr + i);
384 }
385 
eni_vdpa_set_config(struct vdpa_device * vdpa,unsigned int offset,const void * buf,unsigned int len)386 static void eni_vdpa_set_config(struct vdpa_device *vdpa,
387 				unsigned int offset, const void *buf,
388 				unsigned int len)
389 {
390 	struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
391 	struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
392 	void __iomem *ioaddr = ldev->ioaddr +
393 		VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
394 		offset;
395 	const u8 *p = buf;
396 	int i;
397 
398 	for (i = 0; i < len; i++)
399 		iowrite8(*p++, ioaddr + i);
400 }
401 
eni_vdpa_set_config_cb(struct vdpa_device * vdpa,struct vdpa_callback * cb)402 static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
403 				   struct vdpa_callback *cb)
404 {
405 	struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
406 
407 	eni_vdpa->config_cb = *cb;
408 }
409 
410 static const struct vdpa_config_ops eni_vdpa_ops = {
411 	.get_device_features = eni_vdpa_get_device_features,
412 	.set_driver_features = eni_vdpa_set_driver_features,
413 	.get_driver_features = eni_vdpa_get_driver_features,
414 	.get_status	= eni_vdpa_get_status,
415 	.set_status	= eni_vdpa_set_status,
416 	.reset		= eni_vdpa_reset,
417 	.get_vq_num_max	= eni_vdpa_get_vq_num_max,
418 	.get_vq_num_min	= eni_vdpa_get_vq_num_min,
419 	.get_vq_state	= eni_vdpa_get_vq_state,
420 	.set_vq_state	= eni_vdpa_set_vq_state,
421 	.set_vq_cb	= eni_vdpa_set_vq_cb,
422 	.set_vq_ready	= eni_vdpa_set_vq_ready,
423 	.get_vq_ready	= eni_vdpa_get_vq_ready,
424 	.set_vq_num	= eni_vdpa_set_vq_num,
425 	.set_vq_address	= eni_vdpa_set_vq_address,
426 	.kick_vq	= eni_vdpa_kick_vq,
427 	.get_device_id	= eni_vdpa_get_device_id,
428 	.get_vendor_id	= eni_vdpa_get_vendor_id,
429 	.get_vq_align	= eni_vdpa_get_vq_align,
430 	.get_config_size = eni_vdpa_get_config_size,
431 	.get_config	= eni_vdpa_get_config,
432 	.set_config	= eni_vdpa_set_config,
433 	.set_config_cb  = eni_vdpa_set_config_cb,
434 	.get_vq_irq	= eni_vdpa_get_vq_irq,
435 };
436 
437 
eni_vdpa_get_num_queues(struct eni_vdpa * eni_vdpa)438 static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
439 {
440 	struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
441 	u32 features = vp_legacy_get_features(ldev);
442 	u16 num = 2;
443 
444 	if (features & BIT_ULL(VIRTIO_NET_F_MQ)) {
445 		__virtio16 max_virtqueue_pairs;
446 
447 		eni_vdpa_get_config(&eni_vdpa->vdpa,
448 			offsetof(struct virtio_net_config, max_virtqueue_pairs),
449 			&max_virtqueue_pairs,
450 			sizeof(max_virtqueue_pairs));
451 		num = 2 * __virtio16_to_cpu(virtio_legacy_is_little_endian(),
452 				max_virtqueue_pairs);
453 	}
454 
455 	if (features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
456 		num += 1;
457 
458 	return num;
459 }
460 
eni_vdpa_probe(struct pci_dev * pdev,const struct pci_device_id * id)461 static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
462 {
463 	struct device *dev = &pdev->dev;
464 	struct eni_vdpa *eni_vdpa;
465 	struct virtio_pci_legacy_device *ldev;
466 	int ret, i;
467 
468 	ret = pcim_enable_device(pdev);
469 	if (ret)
470 		return ret;
471 
472 	eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
473 				     dev, &eni_vdpa_ops, 1, 1, NULL, false);
474 	if (IS_ERR(eni_vdpa)) {
475 		ENI_ERR(pdev, "failed to allocate vDPA structure\n");
476 		return PTR_ERR(eni_vdpa);
477 	}
478 
479 	ldev = &eni_vdpa->ldev;
480 	ldev->pci_dev = pdev;
481 
482 	ret = vp_legacy_probe(ldev);
483 	if (ret) {
484 		ENI_ERR(pdev, "failed to probe legacy PCI device\n");
485 		goto err;
486 	}
487 
488 	pci_set_master(pdev);
489 	pci_set_drvdata(pdev, eni_vdpa);
490 
491 	eni_vdpa->vdpa.dma_dev = &pdev->dev;
492 	eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
493 
494 	eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
495 				      sizeof(*eni_vdpa->vring),
496 				      GFP_KERNEL);
497 	if (!eni_vdpa->vring) {
498 		ret = -ENOMEM;
499 		ENI_ERR(pdev, "failed to allocate virtqueues\n");
500 		goto err_remove_vp_legacy;
501 	}
502 
503 	for (i = 0; i < eni_vdpa->queues; i++) {
504 		eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
505 		eni_vdpa->vring[i].notify = ldev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
506 	}
507 	eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
508 
509 	ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
510 	if (ret) {
511 		ENI_ERR(pdev, "failed to register to vdpa bus\n");
512 		goto err_remove_vp_legacy;
513 	}
514 
515 	return 0;
516 
517 err_remove_vp_legacy:
518 	vp_legacy_remove(&eni_vdpa->ldev);
519 err:
520 	put_device(&eni_vdpa->vdpa.dev);
521 	return ret;
522 }
523 
eni_vdpa_remove(struct pci_dev * pdev)524 static void eni_vdpa_remove(struct pci_dev *pdev)
525 {
526 	struct eni_vdpa *eni_vdpa = pci_get_drvdata(pdev);
527 
528 	vdpa_unregister_device(&eni_vdpa->vdpa);
529 	vp_legacy_remove(&eni_vdpa->ldev);
530 }
531 
532 static struct pci_device_id eni_pci_ids[] = {
533 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
534 			 VIRTIO_TRANS_ID_NET,
535 			 PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
536 			 VIRTIO_ID_NET) },
537 	{ 0 },
538 };
539 
540 static struct pci_driver eni_vdpa_driver = {
541 	.name		= "alibaba-eni-vdpa",
542 	.id_table	= eni_pci_ids,
543 	.probe		= eni_vdpa_probe,
544 	.remove		= eni_vdpa_remove,
545 };
546 
547 module_pci_driver(eni_vdpa_driver);
548 
549 MODULE_AUTHOR("Wu Zongyong <wuzongyong@linux.alibaba.com>");
550 MODULE_DESCRIPTION("Alibaba ENI vDPA driver");
551 MODULE_LICENSE("GPL v2");
552