1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6 #include <linux/vmalloc.h>
7 #include "rxe.h"
8 #include "rxe_loc.h"
9 #include "rxe_queue.h"
10
rxe_cq_chk_attr(struct rxe_dev * rxe,struct rxe_cq * cq,int cqe,int comp_vector)11 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
12 int cqe, int comp_vector)
13 {
14 int count;
15
16 if (cqe <= 0) {
17 pr_warn("cqe(%d) <= 0\n", cqe);
18 goto err1;
19 }
20
21 if (cqe > rxe->attr.max_cqe) {
22 pr_warn("cqe(%d) > max_cqe(%d)\n",
23 cqe, rxe->attr.max_cqe);
24 goto err1;
25 }
26
27 if (cq) {
28 count = queue_count(cq->queue);
29 if (cqe < count) {
30 pr_warn("cqe(%d) < current # elements in queue (%d)",
31 cqe, count);
32 goto err1;
33 }
34 }
35
36 return 0;
37
38 err1:
39 return -EINVAL;
40 }
41
rxe_send_complete(struct tasklet_struct * t)42 static void rxe_send_complete(struct tasklet_struct *t)
43 {
44 struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
45 unsigned long flags;
46
47 spin_lock_irqsave(&cq->cq_lock, flags);
48 if (cq->is_dying) {
49 spin_unlock_irqrestore(&cq->cq_lock, flags);
50 return;
51 }
52 spin_unlock_irqrestore(&cq->cq_lock, flags);
53
54 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
55 }
56
rxe_cq_from_init(struct rxe_dev * rxe,struct rxe_cq * cq,int cqe,int comp_vector,struct ib_udata * udata,struct rxe_create_cq_resp __user * uresp)57 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
58 int comp_vector, struct ib_udata *udata,
59 struct rxe_create_cq_resp __user *uresp)
60 {
61 int err;
62
63 cq->queue = rxe_queue_init(rxe, &cqe,
64 sizeof(struct rxe_cqe));
65 if (!cq->queue) {
66 pr_warn("unable to create cq\n");
67 return -ENOMEM;
68 }
69
70 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
71 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
72 if (err) {
73 vfree(cq->queue->buf);
74 kfree(cq->queue);
75 return err;
76 }
77
78 if (uresp)
79 cq->is_user = 1;
80
81 cq->is_dying = false;
82
83 tasklet_setup(&cq->comp_task, rxe_send_complete);
84
85 spin_lock_init(&cq->cq_lock);
86 cq->ibcq.cqe = cqe;
87 return 0;
88 }
89
rxe_cq_resize_queue(struct rxe_cq * cq,int cqe,struct rxe_resize_cq_resp __user * uresp,struct ib_udata * udata)90 int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
91 struct rxe_resize_cq_resp __user *uresp,
92 struct ib_udata *udata)
93 {
94 int err;
95
96 err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
97 sizeof(struct rxe_cqe), udata,
98 uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
99 if (!err)
100 cq->ibcq.cqe = cqe;
101
102 return err;
103 }
104
rxe_cq_post(struct rxe_cq * cq,struct rxe_cqe * cqe,int solicited)105 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
106 {
107 struct ib_event ev;
108 unsigned long flags;
109
110 spin_lock_irqsave(&cq->cq_lock, flags);
111
112 if (unlikely(queue_full(cq->queue))) {
113 spin_unlock_irqrestore(&cq->cq_lock, flags);
114 if (cq->ibcq.event_handler) {
115 ev.device = cq->ibcq.device;
116 ev.element.cq = &cq->ibcq;
117 ev.event = IB_EVENT_CQ_ERR;
118 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
119 }
120
121 return -EBUSY;
122 }
123
124 memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
125
126 /* make sure all changes to the CQ are written before we update the
127 * producer pointer
128 */
129 smp_wmb();
130
131 advance_producer(cq->queue);
132 spin_unlock_irqrestore(&cq->cq_lock, flags);
133
134 if ((cq->notify == IB_CQ_NEXT_COMP) ||
135 (cq->notify == IB_CQ_SOLICITED && solicited)) {
136 cq->notify = 0;
137 tasklet_schedule(&cq->comp_task);
138 }
139
140 return 0;
141 }
142
rxe_cq_disable(struct rxe_cq * cq)143 void rxe_cq_disable(struct rxe_cq *cq)
144 {
145 unsigned long flags;
146
147 spin_lock_irqsave(&cq->cq_lock, flags);
148 cq->is_dying = true;
149 spin_unlock_irqrestore(&cq->cq_lock, flags);
150 }
151
rxe_cq_cleanup(struct rxe_pool_entry * arg)152 void rxe_cq_cleanup(struct rxe_pool_entry *arg)
153 {
154 struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
155
156 if (cq->queue)
157 rxe_queue_cleanup(cq->queue);
158 }
159