• Home
  • Raw
  • Download

Lines Matching refs:rq

91 	struct receive_queue *rq;  member
183 static void give_pages(struct receive_queue *rq, struct page *page) in give_pages() argument
189 end->private = (unsigned long)rq->pages; in give_pages()
190 rq->pages = page; in give_pages()
193 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) in get_a_page() argument
195 struct page *p = rq->pages; in get_a_page()
198 rq->pages = (struct page *)p->private; in get_a_page()
234 static struct sk_buff *page_to_skb(struct receive_queue *rq, in page_to_skb() argument
237 struct virtnet_info *vi = rq->vq->vdev->priv; in page_to_skb()
292 give_pages(rq, page); in page_to_skb()
297 static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) in receive_mergeable() argument
311 page = virtqueue_get_buf(rq->vq, &len); in receive_mergeable()
324 --rq->num; in receive_mergeable()
329 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) in receive_buf() argument
331 struct virtnet_info *vi = rq->vq->vdev->priv; in receive_buf()
342 give_pages(rq, buf); in receive_buf()
354 skb = page_to_skb(rq, page, len); in receive_buf()
357 give_pages(rq, page); in receive_buf()
361 if (receive_mergeable(rq, skb)) { in receive_buf()
428 static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) in add_recvbuf_small() argument
430 struct virtnet_info *vi = rq->vq->vdev->priv; in add_recvbuf_small()
442 sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); in add_recvbuf_small()
444 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); in add_recvbuf_small()
446 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); in add_recvbuf_small()
453 static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) in add_recvbuf_big() argument
461 first = get_a_page(rq, gfp); in add_recvbuf_big()
464 give_pages(rq, list); in add_recvbuf_big()
467 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); in add_recvbuf_big()
474 first = get_a_page(rq, gfp); in add_recvbuf_big()
476 give_pages(rq, list); in add_recvbuf_big()
483 sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); in add_recvbuf_big()
487 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); in add_recvbuf_big()
491 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, in add_recvbuf_big()
494 give_pages(rq, first); in add_recvbuf_big()
499 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) in add_recvbuf_mergeable() argument
504 page = get_a_page(rq, gfp); in add_recvbuf_mergeable()
508 sg_init_one(rq->sg, page_address(page), PAGE_SIZE); in add_recvbuf_mergeable()
510 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp); in add_recvbuf_mergeable()
512 give_pages(rq, page); in add_recvbuf_mergeable()
524 static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) in try_fill_recv() argument
526 struct virtnet_info *vi = rq->vq->vdev->priv; in try_fill_recv()
532 err = add_recvbuf_mergeable(rq, gfp); in try_fill_recv()
534 err = add_recvbuf_big(rq, gfp); in try_fill_recv()
536 err = add_recvbuf_small(rq, gfp); in try_fill_recv()
541 ++rq->num; in try_fill_recv()
542 } while (rq->vq->num_free); in try_fill_recv()
543 if (unlikely(rq->num > rq->max)) in try_fill_recv()
544 rq->max = rq->num; in try_fill_recv()
545 virtqueue_kick(rq->vq); in try_fill_recv()
552 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done() local
555 if (napi_schedule_prep(&rq->napi)) { in skb_recv_done()
557 __napi_schedule(&rq->napi); in skb_recv_done()
561 static void virtnet_napi_enable(struct receive_queue *rq) in virtnet_napi_enable() argument
563 napi_enable(&rq->napi); in virtnet_napi_enable()
569 if (napi_schedule_prep(&rq->napi)) { in virtnet_napi_enable()
570 virtqueue_disable_cb(rq->vq); in virtnet_napi_enable()
572 __napi_schedule(&rq->napi); in virtnet_napi_enable()
585 struct receive_queue *rq = &vi->rq[i]; in refill_work() local
587 napi_disable(&rq->napi); in refill_work()
588 still_empty = !try_fill_recv(rq, GFP_KERNEL); in refill_work()
589 virtnet_napi_enable(rq); in refill_work()
601 struct receive_queue *rq = in virtnet_poll() local
603 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll()
609 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { in virtnet_poll()
610 receive_buf(rq, buf, len); in virtnet_poll()
611 --rq->num; in virtnet_poll()
615 if (rq->num < rq->max / 2) { in virtnet_poll()
616 if (!try_fill_recv(rq, GFP_ATOMIC)) in virtnet_poll()
623 if (unlikely(!virtqueue_enable_cb(rq->vq)) && in virtnet_poll()
625 virtqueue_disable_cb(rq->vq); in virtnet_poll()
642 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) in virtnet_open()
644 virtnet_napi_enable(&vi->rq[i]); in virtnet_open()
886 napi_schedule(&vi->rq[i].napi); in virtnet_netpoll()
919 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) in virtnet_set_queues()
936 napi_disable(&vi->rq[i].napi); in virtnet_close()
1050 virtqueue_set_affinity(vi->rq[i].vq, -1); in virtnet_clean_affinity()
1085 virtqueue_set_affinity(vi->rq[i].vq, cpu); in virtnet_set_affinity()
1119 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
1287 kfree(vi->rq); in virtnet_free_queues()
1296 while (vi->rq[i].pages) in free_receive_bufs()
1297 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in free_receive_bufs()
1313 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
1317 give_pages(&vi->rq[i], buf); in free_unused_bufs()
1320 --vi->rq[i].num; in free_unused_bufs()
1322 BUG_ON(vi->rq[i].num != 0); in free_unused_bufs()
1373 sprintf(vi->rq[i].name, "input.%d", i); in virtnet_find_vqs()
1375 names[rxq2vq(i)] = vi->rq[i].name; in virtnet_find_vqs()
1391 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
1418 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); in virtnet_alloc_queues()
1419 if (!vi->rq) in virtnet_alloc_queues()
1424 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
1425 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
1428 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
1580 try_fill_recv(&vi->rq[i], GFP_KERNEL); in virtnet_probe()
1583 if (vi->rq[i].num == 0) { in virtnet_probe()
1677 napi_disable(&vi->rq[i].napi); in virtnet_freeze()
1678 netif_napi_del(&vi->rq[i].napi); in virtnet_freeze()
1699 virtnet_napi_enable(&vi->rq[i]); in virtnet_restore()
1704 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) in virtnet_restore()