1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Completion queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11 *
12 *
13 * Copyright (c) 2005 IBM Corporation
14 *
15 * All rights reserved.
16 *
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
19 *
20 * OpenIB BSD License
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
24 *
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46 #include <linux/slab.h>
47
48 #include "ehca_iverbs.h"
49 #include "ehca_classes.h"
50 #include "ehca_irq.h"
51 #include "hcp_if.h"
52
53 static struct kmem_cache *cq_cache;
54
ehca_cq_assign_qp(struct ehca_cq * cq,struct ehca_qp * qp)55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
56 {
57 unsigned int qp_num = qp->real_qp_num;
58 unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
59 unsigned long flags;
60
61 spin_lock_irqsave(&cq->spinlock, flags);
62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
63 spin_unlock_irqrestore(&cq->spinlock, flags);
64
65 ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
66 cq->cq_number, qp_num);
67
68 return 0;
69 }
70
ehca_cq_unassign_qp(struct ehca_cq * cq,unsigned int real_qp_num)71 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
72 {
73 int ret = -EINVAL;
74 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
75 struct hlist_node *iter;
76 struct ehca_qp *qp;
77 unsigned long flags;
78
79 spin_lock_irqsave(&cq->spinlock, flags);
80 hlist_for_each(iter, &cq->qp_hashtab[key]) {
81 qp = hlist_entry(iter, struct ehca_qp, list_entries);
82 if (qp->real_qp_num == real_qp_num) {
83 hlist_del(iter);
84 ehca_dbg(cq->ib_cq.device,
85 "removed qp from cq .cq_num=%x real_qp_num=%x",
86 cq->cq_number, real_qp_num);
87 ret = 0;
88 break;
89 }
90 }
91 spin_unlock_irqrestore(&cq->spinlock, flags);
92 if (ret)
93 ehca_err(cq->ib_cq.device,
94 "qp not found cq_num=%x real_qp_num=%x",
95 cq->cq_number, real_qp_num);
96
97 return ret;
98 }
99
ehca_cq_get_qp(struct ehca_cq * cq,int real_qp_num)100 struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
101 {
102 struct ehca_qp *ret = NULL;
103 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
104 struct hlist_node *iter;
105 struct ehca_qp *qp;
106 hlist_for_each(iter, &cq->qp_hashtab[key]) {
107 qp = hlist_entry(iter, struct ehca_qp, list_entries);
108 if (qp->real_qp_num == real_qp_num) {
109 ret = qp;
110 break;
111 }
112 }
113 return ret;
114 }
115
ehca_create_cq(struct ib_device * device,const struct ib_cq_init_attr * attr,struct ib_ucontext * context,struct ib_udata * udata)116 struct ib_cq *ehca_create_cq(struct ib_device *device,
117 const struct ib_cq_init_attr *attr,
118 struct ib_ucontext *context,
119 struct ib_udata *udata)
120 {
121 int cqe = attr->cqe;
122 static const u32 additional_cqe = 20;
123 struct ib_cq *cq;
124 struct ehca_cq *my_cq;
125 struct ehca_shca *shca =
126 container_of(device, struct ehca_shca, ib_device);
127 struct ipz_adapter_handle adapter_handle;
128 struct ehca_alloc_cq_parms param; /* h_call's out parameters */
129 struct h_galpa gal;
130 void *vpage;
131 u32 counter;
132 u64 rpage, cqx_fec, h_ret;
133 int rc, i;
134 unsigned long flags;
135
136 if (attr->flags)
137 return ERR_PTR(-EINVAL);
138
139 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
140 return ERR_PTR(-EINVAL);
141
142 if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
143 ehca_err(device, "Unable to create CQ, max number of %i "
144 "CQs reached.", shca->max_num_cqs);
145 ehca_err(device, "To increase the maximum number of CQs "
146 "use the number_of_cqs module parameter.\n");
147 return ERR_PTR(-ENOSPC);
148 }
149
150 my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
151 if (!my_cq) {
152 ehca_err(device, "Out of memory for ehca_cq struct device=%p",
153 device);
154 atomic_dec(&shca->num_cqs);
155 return ERR_PTR(-ENOMEM);
156 }
157
158 memset(¶m, 0, sizeof(struct ehca_alloc_cq_parms));
159
160 spin_lock_init(&my_cq->spinlock);
161 spin_lock_init(&my_cq->cb_lock);
162 spin_lock_init(&my_cq->task_lock);
163 atomic_set(&my_cq->nr_events, 0);
164 init_waitqueue_head(&my_cq->wait_completion);
165
166 cq = &my_cq->ib_cq;
167
168 adapter_handle = shca->ipz_hca_handle;
169 param.eq_handle = shca->eq.ipz_eq_handle;
170
171 idr_preload(GFP_KERNEL);
172 write_lock_irqsave(&ehca_cq_idr_lock, flags);
173 rc = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
174 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
175 idr_preload_end();
176
177 if (rc < 0) {
178 cq = ERR_PTR(-ENOMEM);
179 ehca_err(device, "Can't allocate new idr entry. device=%p",
180 device);
181 goto create_cq_exit1;
182 }
183 my_cq->token = rc;
184
185 /*
186 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
187 * for receiving errors CQEs.
188 */
189 param.nr_cqe = cqe + additional_cqe;
190 h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, ¶m);
191
192 if (h_ret != H_SUCCESS) {
193 ehca_err(device, "hipz_h_alloc_resource_cq() failed "
194 "h_ret=%lli device=%p", h_ret, device);
195 cq = ERR_PTR(ehca2ib_return_code(h_ret));
196 goto create_cq_exit2;
197 }
198
199 rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
200 EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
201 if (!rc) {
202 ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
203 rc, device);
204 cq = ERR_PTR(-EINVAL);
205 goto create_cq_exit3;
206 }
207
208 for (counter = 0; counter < param.act_pages; counter++) {
209 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
210 if (!vpage) {
211 ehca_err(device, "ipz_qpageit_get_inc() "
212 "returns NULL device=%p", device);
213 cq = ERR_PTR(-EAGAIN);
214 goto create_cq_exit4;
215 }
216 rpage = __pa(vpage);
217
218 h_ret = hipz_h_register_rpage_cq(adapter_handle,
219 my_cq->ipz_cq_handle,
220 &my_cq->pf,
221 0,
222 0,
223 rpage,
224 1,
225 my_cq->galpas.
226 kernel);
227
228 if (h_ret < H_SUCCESS) {
229 ehca_err(device, "hipz_h_register_rpage_cq() failed "
230 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
231 "act_pages=%i", my_cq, my_cq->cq_number,
232 h_ret, counter, param.act_pages);
233 cq = ERR_PTR(-EINVAL);
234 goto create_cq_exit4;
235 }
236
237 if (counter == (param.act_pages - 1)) {
238 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
239 if ((h_ret != H_SUCCESS) || vpage) {
240 ehca_err(device, "Registration of pages not "
241 "complete ehca_cq=%p cq_num=%x "
242 "h_ret=%lli", my_cq, my_cq->cq_number,
243 h_ret);
244 cq = ERR_PTR(-EAGAIN);
245 goto create_cq_exit4;
246 }
247 } else {
248 if (h_ret != H_PAGE_REGISTERED) {
249 ehca_err(device, "Registration of page failed "
250 "ehca_cq=%p cq_num=%x h_ret=%lli "
251 "counter=%i act_pages=%i",
252 my_cq, my_cq->cq_number,
253 h_ret, counter, param.act_pages);
254 cq = ERR_PTR(-ENOMEM);
255 goto create_cq_exit4;
256 }
257 }
258 }
259
260 ipz_qeit_reset(&my_cq->ipz_queue);
261
262 gal = my_cq->galpas.kernel;
263 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
264 ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
265 my_cq, my_cq->cq_number, cqx_fec);
266
267 my_cq->ib_cq.cqe = my_cq->nr_of_entries =
268 param.act_nr_of_entries - additional_cqe;
269 my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
270
271 for (i = 0; i < QP_HASHTAB_LEN; i++)
272 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
273
274 INIT_LIST_HEAD(&my_cq->sqp_err_list);
275 INIT_LIST_HEAD(&my_cq->rqp_err_list);
276
277 if (context) {
278 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
279 struct ehca_create_cq_resp resp;
280 memset(&resp, 0, sizeof(resp));
281 resp.cq_number = my_cq->cq_number;
282 resp.token = my_cq->token;
283 resp.ipz_queue.qe_size = ipz_queue->qe_size;
284 resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
285 resp.ipz_queue.queue_length = ipz_queue->queue_length;
286 resp.ipz_queue.pagesize = ipz_queue->pagesize;
287 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
288 resp.fw_handle_ofs = (u32)
289 (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
290 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
291 ehca_err(device, "Copy to udata failed.");
292 cq = ERR_PTR(-EFAULT);
293 goto create_cq_exit4;
294 }
295 }
296
297 return cq;
298
299 create_cq_exit4:
300 ipz_queue_dtor(NULL, &my_cq->ipz_queue);
301
302 create_cq_exit3:
303 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
304 if (h_ret != H_SUCCESS)
305 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
306 "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
307
308 create_cq_exit2:
309 write_lock_irqsave(&ehca_cq_idr_lock, flags);
310 idr_remove(&ehca_cq_idr, my_cq->token);
311 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
312
313 create_cq_exit1:
314 kmem_cache_free(cq_cache, my_cq);
315
316 atomic_dec(&shca->num_cqs);
317 return cq;
318 }
319
ehca_destroy_cq(struct ib_cq * cq)320 int ehca_destroy_cq(struct ib_cq *cq)
321 {
322 u64 h_ret;
323 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
324 int cq_num = my_cq->cq_number;
325 struct ib_device *device = cq->device;
326 struct ehca_shca *shca = container_of(device, struct ehca_shca,
327 ib_device);
328 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
329 unsigned long flags;
330
331 if (cq->uobject) {
332 if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
333 ehca_err(device, "Resources still referenced in "
334 "user space cq_num=%x", my_cq->cq_number);
335 return -EINVAL;
336 }
337 }
338
339 /*
340 * remove the CQ from the idr first to make sure
341 * no more interrupt tasklets will touch this CQ
342 */
343 write_lock_irqsave(&ehca_cq_idr_lock, flags);
344 idr_remove(&ehca_cq_idr, my_cq->token);
345 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
346
347 /* now wait until all pending events have completed */
348 wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
349
350 /* nobody's using our CQ any longer -- we can destroy it */
351 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
352 if (h_ret == H_R_STATE) {
353 /* cq in err: read err data and destroy it forcibly */
354 ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
355 "state. Try to delete it forcibly.",
356 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
357 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
358 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
359 if (h_ret == H_SUCCESS)
360 ehca_dbg(device, "cq_num=%x deleted successfully.",
361 cq_num);
362 }
363 if (h_ret != H_SUCCESS) {
364 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
365 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
366 return ehca2ib_return_code(h_ret);
367 }
368 ipz_queue_dtor(NULL, &my_cq->ipz_queue);
369 kmem_cache_free(cq_cache, my_cq);
370
371 atomic_dec(&shca->num_cqs);
372 return 0;
373 }
374
ehca_resize_cq(struct ib_cq * cq,int cqe,struct ib_udata * udata)375 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
376 {
377 /* TODO: proper resize needs to be done */
378 ehca_err(cq->device, "not implemented yet");
379
380 return -EFAULT;
381 }
382
ehca_init_cq_cache(void)383 int ehca_init_cq_cache(void)
384 {
385 cq_cache = kmem_cache_create("ehca_cache_cq",
386 sizeof(struct ehca_cq), 0,
387 SLAB_HWCACHE_ALIGN,
388 NULL);
389 if (!cq_cache)
390 return -ENOMEM;
391 return 0;
392 }
393
ehca_cleanup_cq_cache(void)394 void ehca_cleanup_cq_cache(void)
395 {
396 if (cq_cache)
397 kmem_cache_destroy(cq_cache);
398 }
399