1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015 HGST, a Western Digital Company.
4 */
5 #include <linux/module.h>
6 #include <linux/err.h>
7 #include <linux/slab.h>
8 #include <rdma/ib_verbs.h>
9
10 /* # of WCs to poll for with a single call to ib_poll_cq */
11 #define IB_POLL_BATCH 16
12 #define IB_POLL_BATCH_DIRECT 8
13
14 /* # of WCs to iterate over before yielding */
15 #define IB_POLL_BUDGET_IRQ 256
16 #define IB_POLL_BUDGET_WORKQUEUE 65536
17
18 #define IB_POLL_FLAGS \
19 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
20
21 static const struct dim_cq_moder
22 rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = {
23 {1, 0, 1, 0},
24 {1, 0, 4, 0},
25 {2, 0, 4, 0},
26 {2, 0, 8, 0},
27 {4, 0, 8, 0},
28 {16, 0, 8, 0},
29 {16, 0, 16, 0},
30 {32, 0, 16, 0},
31 {32, 0, 32, 0},
32 };
33
ib_cq_rdma_dim_work(struct work_struct * w)34 static void ib_cq_rdma_dim_work(struct work_struct *w)
35 {
36 struct dim *dim = container_of(w, struct dim, work);
37 struct ib_cq *cq = dim->priv;
38
39 u16 usec = rdma_dim_prof[dim->profile_ix].usec;
40 u16 comps = rdma_dim_prof[dim->profile_ix].comps;
41
42 dim->state = DIM_START_MEASURE;
43
44 cq->device->ops.modify_cq(cq, comps, usec);
45 }
46
rdma_dim_init(struct ib_cq * cq)47 static void rdma_dim_init(struct ib_cq *cq)
48 {
49 struct dim *dim;
50
51 if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
52 cq->poll_ctx == IB_POLL_DIRECT)
53 return;
54
55 dim = kzalloc(sizeof(struct dim), GFP_KERNEL);
56 if (!dim)
57 return;
58
59 dim->state = DIM_START_MEASURE;
60 dim->tune_state = DIM_GOING_RIGHT;
61 dim->profile_ix = RDMA_DIM_START_PROFILE;
62 dim->priv = cq;
63 cq->dim = dim;
64
65 INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
66 }
67
__ib_process_cq(struct ib_cq * cq,int budget,struct ib_wc * wcs,int batch)68 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
69 int batch)
70 {
71 int i, n, completed = 0;
72
73 /*
74 * budget might be (-1) if the caller does not
75 * want to bound this call, thus we need unsigned
76 * minimum here.
77 */
78 while ((n = ib_poll_cq(cq, min_t(u32, batch,
79 budget - completed), wcs)) > 0) {
80 for (i = 0; i < n; i++) {
81 struct ib_wc *wc = &wcs[i];
82
83 if (wc->wr_cqe)
84 wc->wr_cqe->done(cq, wc);
85 else
86 WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
87 }
88
89 completed += n;
90
91 if (n != batch || (budget != -1 && completed >= budget))
92 break;
93 }
94
95 return completed;
96 }
97
98 /**
99 * ib_process_direct_cq - process a CQ in caller context
100 * @cq: CQ to process
101 * @budget: number of CQEs to poll for
102 *
103 * This function is used to process all outstanding CQ entries.
104 * It does not offload CQ processing to a different context and does
105 * not ask for completion interrupts from the HCA.
106 * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
107 * concurrent processing.
108 *
109 * Note: do not pass -1 as %budget unless it is guaranteed that the number
110 * of completions that will be processed is small.
111 */
ib_process_cq_direct(struct ib_cq * cq,int budget)112 int ib_process_cq_direct(struct ib_cq *cq, int budget)
113 {
114 struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
115
116 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
117 }
118 EXPORT_SYMBOL(ib_process_cq_direct);
119
ib_cq_completion_direct(struct ib_cq * cq,void * private)120 static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
121 {
122 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
123 }
124
ib_poll_handler(struct irq_poll * iop,int budget)125 static int ib_poll_handler(struct irq_poll *iop, int budget)
126 {
127 struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
128 struct dim *dim = cq->dim;
129 int completed;
130
131 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
132 if (completed < budget) {
133 irq_poll_complete(&cq->iop);
134 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
135 irq_poll_sched(&cq->iop);
136 }
137
138 if (dim)
139 rdma_dim(dim, completed);
140
141 return completed;
142 }
143
ib_cq_completion_softirq(struct ib_cq * cq,void * private)144 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
145 {
146 irq_poll_sched(&cq->iop);
147 }
148
ib_cq_poll_work(struct work_struct * work)149 static void ib_cq_poll_work(struct work_struct *work)
150 {
151 struct ib_cq *cq = container_of(work, struct ib_cq, work);
152 int completed;
153
154 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
155 IB_POLL_BATCH);
156 if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
157 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
158 queue_work(cq->comp_wq, &cq->work);
159 else if (cq->dim)
160 rdma_dim(cq->dim, completed);
161 }
162
ib_cq_completion_workqueue(struct ib_cq * cq,void * private)163 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
164 {
165 queue_work(cq->comp_wq, &cq->work);
166 }
167
168 /**
169 * __ib_alloc_cq_user - allocate a completion queue
170 * @dev: device to allocate the CQ for
171 * @private: driver private data, accessible from cq->cq_context
172 * @nr_cqe: number of CQEs to allocate
173 * @comp_vector: HCA completion vectors for this CQ
174 * @poll_ctx: context to poll the CQ from.
175 * @caller: module owner name.
176 * @udata: Valid user data or NULL for kernel object
177 *
178 * This is the proper interface to allocate a CQ for in-kernel users. A
179 * CQ allocated with this interface will automatically be polled from the
180 * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
181 * to use this CQ abstraction.
182 */
__ib_alloc_cq_user(struct ib_device * dev,void * private,int nr_cqe,int comp_vector,enum ib_poll_context poll_ctx,const char * caller,struct ib_udata * udata)183 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
184 int nr_cqe, int comp_vector,
185 enum ib_poll_context poll_ctx,
186 const char *caller, struct ib_udata *udata)
187 {
188 struct ib_cq_init_attr cq_attr = {
189 .cqe = nr_cqe,
190 .comp_vector = comp_vector,
191 };
192 struct ib_cq *cq;
193 int ret = -ENOMEM;
194
195 cq = rdma_zalloc_drv_obj(dev, ib_cq);
196 if (!cq)
197 return ERR_PTR(ret);
198
199 cq->device = dev;
200 cq->cq_context = private;
201 cq->poll_ctx = poll_ctx;
202 atomic_set(&cq->usecnt, 0);
203
204 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
205 if (!cq->wc)
206 goto out_free_cq;
207
208 cq->res.type = RDMA_RESTRACK_CQ;
209 rdma_restrack_set_task(&cq->res, caller);
210
211 ret = dev->ops.create_cq(cq, &cq_attr, NULL);
212 if (ret)
213 goto out_free_wc;
214
215 rdma_restrack_kadd(&cq->res);
216
217 rdma_dim_init(cq);
218
219 switch (cq->poll_ctx) {
220 case IB_POLL_DIRECT:
221 cq->comp_handler = ib_cq_completion_direct;
222 break;
223 case IB_POLL_SOFTIRQ:
224 cq->comp_handler = ib_cq_completion_softirq;
225
226 irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
227 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
228 break;
229 case IB_POLL_WORKQUEUE:
230 case IB_POLL_UNBOUND_WORKQUEUE:
231 cq->comp_handler = ib_cq_completion_workqueue;
232 INIT_WORK(&cq->work, ib_cq_poll_work);
233 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
234 cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
235 ib_comp_wq : ib_comp_unbound_wq;
236 break;
237 default:
238 ret = -EINVAL;
239 goto out_destroy_cq;
240 }
241
242 return cq;
243
244 out_destroy_cq:
245 rdma_restrack_del(&cq->res);
246 cq->device->ops.destroy_cq(cq, udata);
247 out_free_wc:
248 kfree(cq->wc);
249 out_free_cq:
250 kfree(cq);
251 return ERR_PTR(ret);
252 }
253 EXPORT_SYMBOL(__ib_alloc_cq_user);
254
255 /**
256 * __ib_alloc_cq_any - allocate a completion queue
257 * @dev: device to allocate the CQ for
258 * @private: driver private data, accessible from cq->cq_context
259 * @nr_cqe: number of CQEs to allocate
260 * @poll_ctx: context to poll the CQ from
261 * @caller: module owner name
262 *
263 * Attempt to spread ULP Completion Queues over each device's interrupt
264 * vectors. A simple best-effort mechanism is used.
265 */
__ib_alloc_cq_any(struct ib_device * dev,void * private,int nr_cqe,enum ib_poll_context poll_ctx,const char * caller)266 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
267 int nr_cqe, enum ib_poll_context poll_ctx,
268 const char *caller)
269 {
270 static atomic_t counter;
271 int comp_vector = 0;
272
273 if (dev->num_comp_vectors > 1)
274 comp_vector =
275 atomic_inc_return(&counter) %
276 min_t(int, dev->num_comp_vectors, num_online_cpus());
277
278 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
279 caller, NULL);
280 }
281 EXPORT_SYMBOL(__ib_alloc_cq_any);
282
283 /**
284 * ib_free_cq_user - free a completion queue
285 * @cq: completion queue to free.
286 * @udata: User data or NULL for kernel object
287 */
ib_free_cq_user(struct ib_cq * cq,struct ib_udata * udata)288 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
289 {
290 if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
291 return;
292
293 switch (cq->poll_ctx) {
294 case IB_POLL_DIRECT:
295 break;
296 case IB_POLL_SOFTIRQ:
297 irq_poll_disable(&cq->iop);
298 break;
299 case IB_POLL_WORKQUEUE:
300 case IB_POLL_UNBOUND_WORKQUEUE:
301 cancel_work_sync(&cq->work);
302 break;
303 default:
304 WARN_ON_ONCE(1);
305 }
306
307 rdma_restrack_del(&cq->res);
308 cq->device->ops.destroy_cq(cq, udata);
309 if (cq->dim)
310 cancel_work_sync(&cq->dim->work);
311 kfree(cq->dim);
312 kfree(cq->wc);
313 kfree(cq);
314 }
315 EXPORT_SYMBOL(ib_free_cq_user);
316