• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Intel MIC Platform Software Stack (MPSS)
3  *
4  * Copyright(c) 2013 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, version 2, as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * The full GNU General Public License is included in this distribution in
16  * the file called "COPYING".
17  *
18  * Intel MIC Host driver.
19  *
20  */
21 #include <linux/pci.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/dmaengine.h>
25 #include <linux/mic_common.h>
26 
27 #include "../common/mic_dev.h"
28 #include "mic_device.h"
29 #include "mic_smpt.h"
30 #include "mic_virtio.h"
31 
32 /*
33  * Size of the internal buffer used during DMA's as an intermediate buffer
34  * for copy to/from user.
35  */
36 #define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
37 
mic_sync_dma(struct mic_device * mdev,dma_addr_t dst,dma_addr_t src,size_t len)38 static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
39 			dma_addr_t src, size_t len)
40 {
41 	int err = 0;
42 	struct dma_async_tx_descriptor *tx;
43 	struct dma_chan *mic_ch = mdev->dma_ch;
44 
45 	if (!mic_ch) {
46 		err = -EBUSY;
47 		goto error;
48 	}
49 
50 	tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len,
51 						    DMA_PREP_FENCE);
52 	if (!tx) {
53 		err = -ENOMEM;
54 		goto error;
55 	} else {
56 		dma_cookie_t cookie = tx->tx_submit(tx);
57 
58 		err = dma_submit_error(cookie);
59 		if (err)
60 			goto error;
61 		err = dma_sync_wait(mic_ch, cookie);
62 	}
63 error:
64 	if (err)
65 		dev_err(mdev->sdev->parent, "%s %d err %d\n",
66 			__func__, __LINE__, err);
67 	return err;
68 }
69 
70 /*
71  * Initiates the copies across the PCIe bus from card memory to a user
72  * space buffer. When transfers are done using DMA, source/destination
73  * addresses and transfer length must follow the alignment requirements of
74  * the MIC DMA engine.
75  */
mic_virtio_copy_to_user(struct mic_vdev * mvdev,void __user * ubuf,size_t len,u64 daddr,size_t dlen,int vr_idx)76 static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
77 				   size_t len, u64 daddr, size_t dlen,
78 				   int vr_idx)
79 {
80 	struct mic_device *mdev = mvdev->mdev;
81 	void __iomem *dbuf = mdev->aper.va + daddr;
82 	struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
83 	size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
84 	size_t dma_offset;
85 	size_t partlen;
86 	int err;
87 
88 	dma_offset = daddr - round_down(daddr, dma_alignment);
89 	daddr -= dma_offset;
90 	len += dma_offset;
91 
92 	while (len) {
93 		partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
94 
95 		err = mic_sync_dma(mdev, mvr->buf_da, daddr,
96 				   ALIGN(partlen, dma_alignment));
97 		if (err)
98 			goto err;
99 
100 		if (copy_to_user(ubuf, mvr->buf + dma_offset,
101 				 partlen - dma_offset)) {
102 			err = -EFAULT;
103 			goto err;
104 		}
105 		daddr += partlen;
106 		ubuf += partlen;
107 		dbuf += partlen;
108 		mvdev->in_bytes_dma += partlen;
109 		mvdev->in_bytes += partlen;
110 		len -= partlen;
111 		dma_offset = 0;
112 	}
113 	return 0;
114 err:
115 	dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
116 	return err;
117 }
118 
119 /*
120  * Initiates copies across the PCIe bus from a user space buffer to card
121  * memory. When transfers are done using DMA, source/destination addresses
122  * and transfer length must follow the alignment requirements of the MIC
123  * DMA engine.
124  */
mic_virtio_copy_from_user(struct mic_vdev * mvdev,void __user * ubuf,size_t len,u64 daddr,size_t dlen,int vr_idx)125 static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
126 				     size_t len, u64 daddr, size_t dlen,
127 				     int vr_idx)
128 {
129 	struct mic_device *mdev = mvdev->mdev;
130 	void __iomem *dbuf = mdev->aper.va + daddr;
131 	struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
132 	size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
133 	size_t partlen;
134 	int err;
135 
136 	if (daddr & (dma_alignment - 1)) {
137 		mvdev->tx_dst_unaligned += len;
138 		goto memcpy;
139 	} else if (ALIGN(len, dma_alignment) > dlen) {
140 		mvdev->tx_len_unaligned += len;
141 		goto memcpy;
142 	}
143 
144 	while (len) {
145 		partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
146 
147 		if (copy_from_user(mvr->buf, ubuf, partlen)) {
148 			err = -EFAULT;
149 			goto err;
150 		}
151 		err = mic_sync_dma(mdev, daddr, mvr->buf_da,
152 				   ALIGN(partlen, dma_alignment));
153 		if (err)
154 			goto err;
155 		daddr += partlen;
156 		ubuf += partlen;
157 		dbuf += partlen;
158 		mvdev->out_bytes_dma += partlen;
159 		mvdev->out_bytes += partlen;
160 		len -= partlen;
161 	}
162 memcpy:
163 	/*
164 	 * We are copying to IO below and should ideally use something
165 	 * like copy_from_user_toio(..) if it existed.
166 	 */
167 	if (copy_from_user((void __force *)dbuf, ubuf, len)) {
168 		err = -EFAULT;
169 		goto err;
170 	}
171 	mvdev->out_bytes += len;
172 	return 0;
173 err:
174 	dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
175 	return err;
176 }
177 
178 #define MIC_VRINGH_READ true
179 
180 /* The function to call to notify the card about added buffers */
mic_notify(struct vringh * vrh)181 static void mic_notify(struct vringh *vrh)
182 {
183 	struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
184 	struct mic_vdev *mvdev = mvrh->mvdev;
185 	s8 db = mvdev->dc->h2c_vdev_db;
186 
187 	if (db != -1)
188 		mvdev->mdev->ops->send_intr(mvdev->mdev, db);
189 }
190 
191 /* Determine the total number of bytes consumed in a VRINGH KIOV */
mic_vringh_iov_consumed(struct vringh_kiov * iov)192 static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
193 {
194 	int i;
195 	u32 total = iov->consumed;
196 
197 	for (i = 0; i < iov->i; i++)
198 		total += iov->iov[i].iov_len;
199 	return total;
200 }
201 
202 /*
203  * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
204  * This API is heavily based on the vringh_iov_xfer(..) implementation
205  * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
206  * and vringh_iov_push_kern(..) directly is because there is no
207  * way to override the VRINGH xfer(..) routines as of v3.10.
208  */
mic_vringh_copy(struct mic_vdev * mvdev,struct vringh_kiov * iov,void __user * ubuf,size_t len,bool read,int vr_idx,size_t * out_len)209 static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
210 			void __user *ubuf, size_t len, bool read, int vr_idx,
211 			size_t *out_len)
212 {
213 	int ret = 0;
214 	size_t partlen, tot_len = 0;
215 
216 	while (len && iov->i < iov->used) {
217 		partlen = min(iov->iov[iov->i].iov_len, len);
218 		if (read)
219 			ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen,
220 						(u64)iov->iov[iov->i].iov_base,
221 						iov->iov[iov->i].iov_len,
222 						vr_idx);
223 		else
224 			ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen,
225 						(u64)iov->iov[iov->i].iov_base,
226 						iov->iov[iov->i].iov_len,
227 						vr_idx);
228 		if (ret) {
229 			dev_err(mic_dev(mvdev), "%s %d err %d\n",
230 				__func__, __LINE__, ret);
231 			break;
232 		}
233 		len -= partlen;
234 		ubuf += partlen;
235 		tot_len += partlen;
236 		iov->consumed += partlen;
237 		iov->iov[iov->i].iov_len -= partlen;
238 		iov->iov[iov->i].iov_base += partlen;
239 		if (!iov->iov[iov->i].iov_len) {
240 			/* Fix up old iov element then increment. */
241 			iov->iov[iov->i].iov_len = iov->consumed;
242 			iov->iov[iov->i].iov_base -= iov->consumed;
243 
244 			iov->consumed = 0;
245 			iov->i++;
246 		}
247 	}
248 	*out_len = tot_len;
249 	return ret;
250 }
251 
252 /*
253  * Use the standard VRINGH infrastructure in the kernel to fetch new
254  * descriptors, initiate the copies and update the used ring.
255  */
_mic_virtio_copy(struct mic_vdev * mvdev,struct mic_copy_desc * copy)256 static int _mic_virtio_copy(struct mic_vdev *mvdev,
257 	struct mic_copy_desc *copy)
258 {
259 	int ret = 0;
260 	u32 iovcnt = copy->iovcnt;
261 	struct iovec iov;
262 	struct iovec __user *u_iov = copy->iov;
263 	void __user *ubuf = NULL;
264 	struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
265 	struct vringh_kiov *riov = &mvr->riov;
266 	struct vringh_kiov *wiov = &mvr->wiov;
267 	struct vringh *vrh = &mvr->vrh;
268 	u16 *head = &mvr->head;
269 	struct mic_vring *vr = &mvr->vring;
270 	size_t len = 0, out_len;
271 
272 	copy->out_len = 0;
273 	/* Fetch a new IOVEC if all previous elements have been processed */
274 	if (riov->i == riov->used && wiov->i == wiov->used) {
275 		ret = vringh_getdesc_kern(vrh, riov, wiov,
276 				head, GFP_KERNEL);
277 		/* Check if there are available descriptors */
278 		if (ret <= 0)
279 			return ret;
280 	}
281 	while (iovcnt) {
282 		if (!len) {
283 			/* Copy over a new iovec from user space. */
284 			ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
285 			if (ret) {
286 				ret = -EINVAL;
287 				dev_err(mic_dev(mvdev), "%s %d err %d\n",
288 					__func__, __LINE__, ret);
289 				break;
290 			}
291 			len = iov.iov_len;
292 			ubuf = iov.iov_base;
293 		}
294 		/* Issue all the read descriptors first */
295 		ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ,
296 				      copy->vr_idx, &out_len);
297 		if (ret) {
298 			dev_err(mic_dev(mvdev), "%s %d err %d\n",
299 				__func__, __LINE__, ret);
300 			break;
301 		}
302 		len -= out_len;
303 		ubuf += out_len;
304 		copy->out_len += out_len;
305 		/* Issue the write descriptors next */
306 		ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ,
307 				      copy->vr_idx, &out_len);
308 		if (ret) {
309 			dev_err(mic_dev(mvdev), "%s %d err %d\n",
310 				__func__, __LINE__, ret);
311 			break;
312 		}
313 		len -= out_len;
314 		ubuf += out_len;
315 		copy->out_len += out_len;
316 		if (!len) {
317 			/* One user space iovec is now completed */
318 			iovcnt--;
319 			u_iov++;
320 		}
321 		/* Exit loop if all elements in KIOVs have been processed. */
322 		if (riov->i == riov->used && wiov->i == wiov->used)
323 			break;
324 	}
325 	/*
326 	 * Update the used ring if a descriptor was available and some data was
327 	 * copied in/out and the user asked for a used ring update.
328 	 */
329 	if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
330 		u32 total = 0;
331 
332 		/* Determine the total data consumed */
333 		total += mic_vringh_iov_consumed(riov);
334 		total += mic_vringh_iov_consumed(wiov);
335 		vringh_complete_kern(vrh, *head, total);
336 		*head = USHRT_MAX;
337 		if (vringh_need_notify_kern(vrh) > 0)
338 			vringh_notify(vrh);
339 		vringh_kiov_cleanup(riov);
340 		vringh_kiov_cleanup(wiov);
341 		/* Update avail idx for user space */
342 		vr->info->avail_idx = vrh->last_avail_idx;
343 	}
344 	return ret;
345 }
346 
mic_verify_copy_args(struct mic_vdev * mvdev,struct mic_copy_desc * copy)347 static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
348 		struct mic_copy_desc *copy)
349 {
350 	if (copy->vr_idx >= mvdev->dd->num_vq) {
351 		dev_err(mic_dev(mvdev), "%s %d err %d\n",
352 			__func__, __LINE__, -EINVAL);
353 		return -EINVAL;
354 	}
355 	return 0;
356 }
357 
358 /* Copy a specified number of virtio descriptors in a chain */
mic_virtio_copy_desc(struct mic_vdev * mvdev,struct mic_copy_desc * copy)359 int mic_virtio_copy_desc(struct mic_vdev *mvdev,
360 		struct mic_copy_desc *copy)
361 {
362 	int err;
363 	struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
364 
365 	err = mic_verify_copy_args(mvdev, copy);
366 	if (err)
367 		return err;
368 
369 	mutex_lock(&mvr->vr_mutex);
370 	if (!mic_vdevup(mvdev)) {
371 		err = -ENODEV;
372 		dev_err(mic_dev(mvdev), "%s %d err %d\n",
373 			__func__, __LINE__, err);
374 		goto err;
375 	}
376 	err = _mic_virtio_copy(mvdev, copy);
377 	if (err) {
378 		dev_err(mic_dev(mvdev), "%s %d err %d\n",
379 			__func__, __LINE__, err);
380 	}
381 err:
382 	mutex_unlock(&mvr->vr_mutex);
383 	return err;
384 }
385 
mic_virtio_init_post(struct mic_vdev * mvdev)386 static void mic_virtio_init_post(struct mic_vdev *mvdev)
387 {
388 	struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
389 	int i;
390 
391 	for (i = 0; i < mvdev->dd->num_vq; i++) {
392 		if (!le64_to_cpu(vqconfig[i].used_address)) {
393 			dev_warn(mic_dev(mvdev), "used_address zero??\n");
394 			continue;
395 		}
396 		mvdev->mvr[i].vrh.vring.used =
397 			(void __force *)mvdev->mdev->aper.va +
398 			le64_to_cpu(vqconfig[i].used_address);
399 	}
400 
401 	mvdev->dc->used_address_updated = 0;
402 
403 	dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
404 		__func__, mvdev->virtio_id);
405 }
406 
mic_virtio_device_reset(struct mic_vdev * mvdev)407 static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
408 {
409 	int i;
410 
411 	dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
412 		__func__, mvdev->dd->status, mvdev->virtio_id);
413 
414 	for (i = 0; i < mvdev->dd->num_vq; i++)
415 		/*
416 		 * Avoid lockdep false positive. The + 1 is for the mic
417 		 * mutex which is held in the reset devices code path.
418 		 */
419 		mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
420 
421 	/* 0 status means "reset" */
422 	mvdev->dd->status = 0;
423 	mvdev->dc->vdev_reset = 0;
424 	mvdev->dc->host_ack = 1;
425 
426 	for (i = 0; i < mvdev->dd->num_vq; i++) {
427 		struct vringh *vrh = &mvdev->mvr[i].vrh;
428 		mvdev->mvr[i].vring.info->avail_idx = 0;
429 		vrh->completed = 0;
430 		vrh->last_avail_idx = 0;
431 		vrh->last_used_idx = 0;
432 	}
433 
434 	for (i = 0; i < mvdev->dd->num_vq; i++)
435 		mutex_unlock(&mvdev->mvr[i].vr_mutex);
436 }
437 
mic_virtio_reset_devices(struct mic_device * mdev)438 void mic_virtio_reset_devices(struct mic_device *mdev)
439 {
440 	struct list_head *pos, *tmp;
441 	struct mic_vdev *mvdev;
442 
443 	dev_dbg(mdev->sdev->parent, "%s\n",  __func__);
444 
445 	list_for_each_safe(pos, tmp, &mdev->vdev_list) {
446 		mvdev = list_entry(pos, struct mic_vdev, list);
447 		mic_virtio_device_reset(mvdev);
448 		mvdev->poll_wake = 1;
449 		wake_up(&mvdev->waitq);
450 	}
451 }
452 
mic_bh_handler(struct work_struct * work)453 void mic_bh_handler(struct work_struct *work)
454 {
455 	struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
456 			virtio_bh_work);
457 
458 	if (mvdev->dc->used_address_updated)
459 		mic_virtio_init_post(mvdev);
460 
461 	if (mvdev->dc->vdev_reset)
462 		mic_virtio_device_reset(mvdev);
463 
464 	mvdev->poll_wake = 1;
465 	wake_up(&mvdev->waitq);
466 }
467 
mic_virtio_intr_handler(int irq,void * data)468 static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
469 {
470 	struct mic_vdev *mvdev = data;
471 	struct mic_device *mdev = mvdev->mdev;
472 
473 	mdev->ops->intr_workarounds(mdev);
474 	schedule_work(&mvdev->virtio_bh_work);
475 	return IRQ_HANDLED;
476 }
477 
mic_virtio_config_change(struct mic_vdev * mvdev,void __user * argp)478 int mic_virtio_config_change(struct mic_vdev *mvdev,
479 			void __user *argp)
480 {
481 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
482 	int ret = 0, retry, i;
483 	struct mic_bootparam *bootparam = mvdev->mdev->dp;
484 	s8 db = bootparam->h2c_config_db;
485 
486 	mutex_lock(&mvdev->mdev->mic_mutex);
487 	for (i = 0; i < mvdev->dd->num_vq; i++)
488 		mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
489 
490 	if (db == -1 || mvdev->dd->type == -1) {
491 		ret = -EIO;
492 		goto exit;
493 	}
494 
495 	if (copy_from_user(mic_vq_configspace(mvdev->dd),
496 			   argp, mvdev->dd->config_len)) {
497 		dev_err(mic_dev(mvdev), "%s %d err %d\n",
498 			__func__, __LINE__, -EFAULT);
499 		ret = -EFAULT;
500 		goto exit;
501 	}
502 	mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
503 	mvdev->mdev->ops->send_intr(mvdev->mdev, db);
504 
505 	for (retry = 100; retry--;) {
506 		ret = wait_event_timeout(wake,
507 			mvdev->dc->guest_ack, msecs_to_jiffies(100));
508 		if (ret)
509 			break;
510 	}
511 
512 	dev_dbg(mic_dev(mvdev),
513 		"%s %d retry: %d\n", __func__, __LINE__, retry);
514 	mvdev->dc->config_change = 0;
515 	mvdev->dc->guest_ack = 0;
516 exit:
517 	for (i = 0; i < mvdev->dd->num_vq; i++)
518 		mutex_unlock(&mvdev->mvr[i].vr_mutex);
519 	mutex_unlock(&mvdev->mdev->mic_mutex);
520 	return ret;
521 }
522 
mic_copy_dp_entry(struct mic_vdev * mvdev,void __user * argp,__u8 * type,struct mic_device_desc ** devpage)523 static int mic_copy_dp_entry(struct mic_vdev *mvdev,
524 					void __user *argp,
525 					__u8 *type,
526 					struct mic_device_desc **devpage)
527 {
528 	struct mic_device *mdev = mvdev->mdev;
529 	struct mic_device_desc dd, *dd_config, *devp;
530 	struct mic_vqconfig *vqconfig;
531 	int ret = 0, i;
532 	bool slot_found = false;
533 
534 	if (copy_from_user(&dd, argp, sizeof(dd))) {
535 		dev_err(mic_dev(mvdev), "%s %d err %d\n",
536 			__func__, __LINE__, -EFAULT);
537 		return -EFAULT;
538 	}
539 
540 	if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
541 	    dd.num_vq > MIC_MAX_VRINGS) {
542 		dev_err(mic_dev(mvdev), "%s %d err %d\n",
543 			__func__, __LINE__, -EINVAL);
544 		return -EINVAL;
545 	}
546 
547 	dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
548 	if (dd_config == NULL) {
549 		dev_err(mic_dev(mvdev), "%s %d err %d\n",
550 			__func__, __LINE__, -ENOMEM);
551 		return -ENOMEM;
552 	}
553 	if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
554 		ret = -EFAULT;
555 		dev_err(mic_dev(mvdev), "%s %d err %d\n",
556 			__func__, __LINE__, ret);
557 		goto exit;
558 	}
559 
560 	vqconfig = mic_vq_config(dd_config);
561 	for (i = 0; i < dd.num_vq; i++) {
562 		if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
563 			ret =  -EINVAL;
564 			dev_err(mic_dev(mvdev), "%s %d err %d\n",
565 				__func__, __LINE__, ret);
566 			goto exit;
567 		}
568 	}
569 
570 	/* Find the first free device page entry */
571 	for (i = sizeof(struct mic_bootparam);
572 		i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
573 		i += mic_total_desc_size(devp)) {
574 		devp = mdev->dp + i;
575 		if (devp->type == 0 || devp->type == -1) {
576 			slot_found = true;
577 			break;
578 		}
579 	}
580 	if (!slot_found) {
581 		ret =  -EINVAL;
582 		dev_err(mic_dev(mvdev), "%s %d err %d\n",
583 			__func__, __LINE__, ret);
584 		goto exit;
585 	}
586 	/*
587 	 * Save off the type before doing the memcpy. Type will be set in the
588 	 * end after completing all initialization for the new device.
589 	 */
590 	*type = dd_config->type;
591 	dd_config->type = 0;
592 	memcpy(devp, dd_config, mic_desc_size(dd_config));
593 
594 	*devpage = devp;
595 exit:
596 	kfree(dd_config);
597 	return ret;
598 }
599 
mic_init_device_ctrl(struct mic_vdev * mvdev,struct mic_device_desc * devpage)600 static void mic_init_device_ctrl(struct mic_vdev *mvdev,
601 				struct mic_device_desc *devpage)
602 {
603 	struct mic_device_ctrl *dc;
604 
605 	dc = (void *)devpage + mic_aligned_desc_size(devpage);
606 
607 	dc->config_change = 0;
608 	dc->guest_ack = 0;
609 	dc->vdev_reset = 0;
610 	dc->host_ack = 0;
611 	dc->used_address_updated = 0;
612 	dc->c2h_vdev_db = -1;
613 	dc->h2c_vdev_db = -1;
614 	mvdev->dc = dc;
615 }
616 
mic_virtio_add_device(struct mic_vdev * mvdev,void __user * argp)617 int mic_virtio_add_device(struct mic_vdev *mvdev,
618 			void __user *argp)
619 {
620 	struct mic_device *mdev = mvdev->mdev;
621 	struct mic_device_desc *dd = NULL;
622 	struct mic_vqconfig *vqconfig;
623 	int vr_size, i, j, ret;
624 	u8 type = 0;
625 	s8 db;
626 	char irqname[10];
627 	struct mic_bootparam *bootparam = mdev->dp;
628 	u16 num;
629 	dma_addr_t vr_addr;
630 
631 	mutex_lock(&mdev->mic_mutex);
632 
633 	ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
634 	if (ret) {
635 		mutex_unlock(&mdev->mic_mutex);
636 		return ret;
637 	}
638 
639 	mic_init_device_ctrl(mvdev, dd);
640 
641 	mvdev->dd = dd;
642 	mvdev->virtio_id = type;
643 	vqconfig = mic_vq_config(dd);
644 	INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
645 
646 	for (i = 0; i < dd->num_vq; i++) {
647 		struct mic_vringh *mvr = &mvdev->mvr[i];
648 		struct mic_vring *vr = &mvdev->mvr[i].vring;
649 		num = le16_to_cpu(vqconfig[i].num);
650 		mutex_init(&mvr->vr_mutex);
651 		vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
652 			sizeof(struct _mic_vring_info));
653 		vr->va = (void *)
654 			__get_free_pages(GFP_KERNEL | __GFP_ZERO,
655 					 get_order(vr_size));
656 		if (!vr->va) {
657 			ret = -ENOMEM;
658 			dev_err(mic_dev(mvdev), "%s %d err %d\n",
659 				__func__, __LINE__, ret);
660 			goto err;
661 		}
662 		vr->len = vr_size;
663 		vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
664 		vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
665 		vr_addr = mic_map_single(mdev, vr->va, vr_size);
666 		if (mic_map_error(vr_addr)) {
667 			free_pages((unsigned long)vr->va, get_order(vr_size));
668 			ret = -ENOMEM;
669 			dev_err(mic_dev(mvdev), "%s %d err %d\n",
670 				__func__, __LINE__, ret);
671 			goto err;
672 		}
673 		vqconfig[i].address = cpu_to_le64(vr_addr);
674 
675 		vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
676 		ret = vringh_init_kern(&mvr->vrh,
677 			*(u32 *)mic_vq_features(mvdev->dd), num, false,
678 			vr->vr.desc, vr->vr.avail, vr->vr.used);
679 		if (ret) {
680 			dev_err(mic_dev(mvdev), "%s %d err %d\n",
681 				__func__, __LINE__, ret);
682 			goto err;
683 		}
684 		vringh_kiov_init(&mvr->riov, NULL, 0);
685 		vringh_kiov_init(&mvr->wiov, NULL, 0);
686 		mvr->head = USHRT_MAX;
687 		mvr->mvdev = mvdev;
688 		mvr->vrh.notify = mic_notify;
689 		dev_dbg(mdev->sdev->parent,
690 			"%s %d index %d va %p info %p vr_size 0x%x\n",
691 			__func__, __LINE__, i, vr->va, vr->info, vr_size);
692 		mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
693 					get_order(MIC_INT_DMA_BUF_SIZE));
694 		mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf,
695 					  MIC_INT_DMA_BUF_SIZE);
696 	}
697 
698 	snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
699 		 mvdev->virtio_id);
700 	mvdev->virtio_db = mic_next_db(mdev);
701 	mvdev->virtio_cookie = mic_request_threaded_irq(mdev,
702 					       mic_virtio_intr_handler,
703 					       NULL, irqname, mvdev,
704 					       mvdev->virtio_db, MIC_INTR_DB);
705 	if (IS_ERR(mvdev->virtio_cookie)) {
706 		ret = PTR_ERR(mvdev->virtio_cookie);
707 		dev_dbg(mdev->sdev->parent, "request irq failed\n");
708 		goto err;
709 	}
710 
711 	mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
712 
713 	list_add_tail(&mvdev->list, &mdev->vdev_list);
714 	/*
715 	 * Order the type update with previous stores. This write barrier
716 	 * is paired with the corresponding read barrier before the uncached
717 	 * system memory read of the type, on the card while scanning the
718 	 * device page.
719 	 */
720 	smp_wmb();
721 	dd->type = type;
722 
723 	dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type);
724 
725 	db = bootparam->h2c_config_db;
726 	if (db != -1)
727 		mdev->ops->send_intr(mdev, db);
728 	mutex_unlock(&mdev->mic_mutex);
729 	return 0;
730 err:
731 	vqconfig = mic_vq_config(dd);
732 	for (j = 0; j < i; j++) {
733 		struct mic_vringh *mvr = &mvdev->mvr[j];
734 		mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
735 				 mvr->vring.len);
736 		free_pages((unsigned long)mvr->vring.va,
737 			   get_order(mvr->vring.len));
738 	}
739 	mutex_unlock(&mdev->mic_mutex);
740 	return ret;
741 }
742 
mic_virtio_del_device(struct mic_vdev * mvdev)743 void mic_virtio_del_device(struct mic_vdev *mvdev)
744 {
745 	struct list_head *pos, *tmp;
746 	struct mic_vdev *tmp_mvdev;
747 	struct mic_device *mdev = mvdev->mdev;
748 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
749 	int i, ret, retry;
750 	struct mic_vqconfig *vqconfig;
751 	struct mic_bootparam *bootparam = mdev->dp;
752 	s8 db;
753 
754 	mutex_lock(&mdev->mic_mutex);
755 	db = bootparam->h2c_config_db;
756 	if (db == -1)
757 		goto skip_hot_remove;
758 	dev_dbg(mdev->sdev->parent,
759 		"Requesting hot remove id %d\n", mvdev->virtio_id);
760 	mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
761 	mdev->ops->send_intr(mdev, db);
762 	for (retry = 100; retry--;) {
763 		ret = wait_event_timeout(wake,
764 			mvdev->dc->guest_ack, msecs_to_jiffies(100));
765 		if (ret)
766 			break;
767 	}
768 	dev_dbg(mdev->sdev->parent,
769 		"Device id %d config_change %d guest_ack %d retry %d\n",
770 		mvdev->virtio_id, mvdev->dc->config_change,
771 		mvdev->dc->guest_ack, retry);
772 	mvdev->dc->config_change = 0;
773 	mvdev->dc->guest_ack = 0;
774 skip_hot_remove:
775 	mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
776 	flush_work(&mvdev->virtio_bh_work);
777 	vqconfig = mic_vq_config(mvdev->dd);
778 	for (i = 0; i < mvdev->dd->num_vq; i++) {
779 		struct mic_vringh *mvr = &mvdev->mvr[i];
780 
781 		mic_unmap_single(mvdev->mdev, mvr->buf_da,
782 				 MIC_INT_DMA_BUF_SIZE);
783 		free_pages((unsigned long)mvr->buf,
784 			   get_order(MIC_INT_DMA_BUF_SIZE));
785 		vringh_kiov_cleanup(&mvr->riov);
786 		vringh_kiov_cleanup(&mvr->wiov);
787 		mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
788 				 mvr->vring.len);
789 		free_pages((unsigned long)mvr->vring.va,
790 			   get_order(mvr->vring.len));
791 	}
792 
793 	list_for_each_safe(pos, tmp, &mdev->vdev_list) {
794 		tmp_mvdev = list_entry(pos, struct mic_vdev, list);
795 		if (tmp_mvdev == mvdev) {
796 			list_del(pos);
797 			dev_dbg(mdev->sdev->parent,
798 				"Removing virtio device id %d\n",
799 				mvdev->virtio_id);
800 			break;
801 		}
802 	}
803 	/*
804 	 * Order the type update with previous stores. This write barrier
805 	 * is paired with the corresponding read barrier before the uncached
806 	 * system memory read of the type, on the card while scanning the
807 	 * device page.
808 	 */
809 	smp_wmb();
810 	mvdev->dd->type = -1;
811 	mutex_unlock(&mdev->mic_mutex);
812 }
813