1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6 #include <linux/vmalloc.h>
7 #include "rxe.h"
8 #include "rxe_loc.h"
9 #include "rxe_queue.h"
10
rxe_cq_chk_attr(struct rxe_dev * rxe,struct rxe_cq * cq,int cqe,int comp_vector)11 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
12 int cqe, int comp_vector)
13 {
14 int count;
15
16 if (cqe <= 0) {
17 pr_warn("cqe(%d) <= 0\n", cqe);
18 goto err1;
19 }
20
21 if (cqe > rxe->attr.max_cqe) {
22 pr_warn("cqe(%d) > max_cqe(%d)\n",
23 cqe, rxe->attr.max_cqe);
24 goto err1;
25 }
26
27 if (cq) {
28 count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
29 if (cqe < count) {
30 pr_warn("cqe(%d) < current # elements in queue (%d)",
31 cqe, count);
32 goto err1;
33 }
34 }
35
36 return 0;
37
38 err1:
39 return -EINVAL;
40 }
41
rxe_send_complete(struct tasklet_struct * t)42 static void rxe_send_complete(struct tasklet_struct *t)
43 {
44 struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
45 unsigned long flags;
46
47 spin_lock_irqsave(&cq->cq_lock, flags);
48 if (cq->is_dying) {
49 spin_unlock_irqrestore(&cq->cq_lock, flags);
50 return;
51 }
52 spin_unlock_irqrestore(&cq->cq_lock, flags);
53
54 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
55 }
56
rxe_cq_from_init(struct rxe_dev * rxe,struct rxe_cq * cq,int cqe,int comp_vector,struct ib_udata * udata,struct rxe_create_cq_resp __user * uresp)57 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
58 int comp_vector, struct ib_udata *udata,
59 struct rxe_create_cq_resp __user *uresp)
60 {
61 int err;
62 enum queue_type type;
63
64 type = QUEUE_TYPE_TO_CLIENT;
65 cq->queue = rxe_queue_init(rxe, &cqe,
66 sizeof(struct rxe_cqe), type);
67 if (!cq->queue) {
68 pr_warn("unable to create cq\n");
69 return -ENOMEM;
70 }
71
72 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
73 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
74 if (err) {
75 vfree(cq->queue->buf);
76 kfree(cq->queue);
77 return err;
78 }
79
80 if (uresp)
81 cq->is_user = 1;
82
83 cq->is_dying = false;
84
85 tasklet_setup(&cq->comp_task, rxe_send_complete);
86
87 spin_lock_init(&cq->cq_lock);
88 cq->ibcq.cqe = cqe;
89 return 0;
90 }
91
rxe_cq_resize_queue(struct rxe_cq * cq,int cqe,struct rxe_resize_cq_resp __user * uresp,struct ib_udata * udata)92 int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
93 struct rxe_resize_cq_resp __user *uresp,
94 struct ib_udata *udata)
95 {
96 int err;
97
98 err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
99 sizeof(struct rxe_cqe), udata,
100 uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
101 if (!err)
102 cq->ibcq.cqe = cqe;
103
104 return err;
105 }
106
rxe_cq_post(struct rxe_cq * cq,struct rxe_cqe * cqe,int solicited)107 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
108 {
109 struct ib_event ev;
110 unsigned long flags;
111 int full;
112 void *addr;
113
114 spin_lock_irqsave(&cq->cq_lock, flags);
115
116 full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
117 if (unlikely(full)) {
118 spin_unlock_irqrestore(&cq->cq_lock, flags);
119 if (cq->ibcq.event_handler) {
120 ev.device = cq->ibcq.device;
121 ev.element.cq = &cq->ibcq;
122 ev.event = IB_EVENT_CQ_ERR;
123 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
124 }
125
126 return -EBUSY;
127 }
128
129 addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT);
130 memcpy(addr, cqe, sizeof(*cqe));
131
132 queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
133
134 spin_unlock_irqrestore(&cq->cq_lock, flags);
135
136 if ((cq->notify == IB_CQ_NEXT_COMP) ||
137 (cq->notify == IB_CQ_SOLICITED && solicited)) {
138 cq->notify = 0;
139 tasklet_schedule(&cq->comp_task);
140 }
141
142 return 0;
143 }
144
rxe_cq_disable(struct rxe_cq * cq)145 void rxe_cq_disable(struct rxe_cq *cq)
146 {
147 unsigned long flags;
148
149 spin_lock_irqsave(&cq->cq_lock, flags);
150 cq->is_dying = true;
151 spin_unlock_irqrestore(&cq->cq_lock, flags);
152 }
153
rxe_cq_cleanup(struct rxe_pool_entry * arg)154 void rxe_cq_cleanup(struct rxe_pool_entry *arg)
155 {
156 struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
157
158 if (cq->queue)
159 rxe_queue_cleanup(cq->queue);
160 }
161