• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  Completion queue handling
5  *
6  *  Authors: Waleri Fomin <fomin@de.ibm.com>
7  *           Khadija Souissi <souissi@de.ibm.com>
8  *           Reinhard Ernst <rernst@de.ibm.com>
9  *           Heiko J Schick <schickhj@de.ibm.com>
10  *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11  *
12  *
13  *  Copyright (c) 2005 IBM Corporation
14  *
15  *  All rights reserved.
16  *
17  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
18  *  BSD.
19  *
20  * OpenIB BSD License
21  *
22  * Redistribution and use in source and binary forms, with or without
23  * modification, are permitted provided that the following conditions are met:
24  *
25  * Redistributions of source code must retain the above copyright notice, this
26  * list of conditions and the following disclaimer.
27  *
28  * Redistributions in binary form must reproduce the above copyright notice,
29  * this list of conditions and the following disclaimer in the documentation
30  * and/or other materials
31  * provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43  * POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include "ehca_iverbs.h"
47 #include "ehca_classes.h"
48 #include "ehca_irq.h"
49 #include "hcp_if.h"
50 
51 static struct kmem_cache *cq_cache;
52 
ehca_cq_assign_qp(struct ehca_cq * cq,struct ehca_qp * qp)53 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
54 {
55 	unsigned int qp_num = qp->real_qp_num;
56 	unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
57 	unsigned long flags;
58 
59 	spin_lock_irqsave(&cq->spinlock, flags);
60 	hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
61 	spin_unlock_irqrestore(&cq->spinlock, flags);
62 
63 	ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
64 		 cq->cq_number, qp_num);
65 
66 	return 0;
67 }
68 
ehca_cq_unassign_qp(struct ehca_cq * cq,unsigned int real_qp_num)69 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
70 {
71 	int ret = -EINVAL;
72 	unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
73 	struct hlist_node *iter;
74 	struct ehca_qp *qp;
75 	unsigned long flags;
76 
77 	spin_lock_irqsave(&cq->spinlock, flags);
78 	hlist_for_each(iter, &cq->qp_hashtab[key]) {
79 		qp = hlist_entry(iter, struct ehca_qp, list_entries);
80 		if (qp->real_qp_num == real_qp_num) {
81 			hlist_del(iter);
82 			ehca_dbg(cq->ib_cq.device,
83 				 "removed qp from cq .cq_num=%x real_qp_num=%x",
84 				 cq->cq_number, real_qp_num);
85 			ret = 0;
86 			break;
87 		}
88 	}
89 	spin_unlock_irqrestore(&cq->spinlock, flags);
90 	if (ret)
91 		ehca_err(cq->ib_cq.device,
92 			 "qp not found cq_num=%x real_qp_num=%x",
93 			 cq->cq_number, real_qp_num);
94 
95 	return ret;
96 }
97 
ehca_cq_get_qp(struct ehca_cq * cq,int real_qp_num)98 struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
99 {
100 	struct ehca_qp *ret = NULL;
101 	unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
102 	struct hlist_node *iter;
103 	struct ehca_qp *qp;
104 	hlist_for_each(iter, &cq->qp_hashtab[key]) {
105 		qp = hlist_entry(iter, struct ehca_qp, list_entries);
106 		if (qp->real_qp_num == real_qp_num) {
107 			ret = qp;
108 			break;
109 		}
110 	}
111 	return ret;
112 }
113 
ehca_create_cq(struct ib_device * device,int cqe,int comp_vector,struct ib_ucontext * context,struct ib_udata * udata)114 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
115 			     struct ib_ucontext *context,
116 			     struct ib_udata *udata)
117 {
118 	static const u32 additional_cqe = 20;
119 	struct ib_cq *cq;
120 	struct ehca_cq *my_cq;
121 	struct ehca_shca *shca =
122 		container_of(device, struct ehca_shca, ib_device);
123 	struct ipz_adapter_handle adapter_handle;
124 	struct ehca_alloc_cq_parms param; /* h_call's out parameters */
125 	struct h_galpa gal;
126 	void *vpage;
127 	u32 counter;
128 	u64 rpage, cqx_fec, h_ret;
129 	int ipz_rc, ret, i;
130 	unsigned long flags;
131 
132 	if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
133 		return ERR_PTR(-EINVAL);
134 
135 	if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
136 		ehca_err(device, "Unable to create CQ, max number of %i "
137 			"CQs reached.", shca->max_num_cqs);
138 		ehca_err(device, "To increase the maximum number of CQs "
139 			"use the number_of_cqs module parameter.\n");
140 		return ERR_PTR(-ENOSPC);
141 	}
142 
143 	my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
144 	if (!my_cq) {
145 		ehca_err(device, "Out of memory for ehca_cq struct device=%p",
146 			 device);
147 		atomic_dec(&shca->num_cqs);
148 		return ERR_PTR(-ENOMEM);
149 	}
150 
151 	memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
152 
153 	spin_lock_init(&my_cq->spinlock);
154 	spin_lock_init(&my_cq->cb_lock);
155 	spin_lock_init(&my_cq->task_lock);
156 	atomic_set(&my_cq->nr_events, 0);
157 	init_waitqueue_head(&my_cq->wait_completion);
158 
159 	cq = &my_cq->ib_cq;
160 
161 	adapter_handle = shca->ipz_hca_handle;
162 	param.eq_handle = shca->eq.ipz_eq_handle;
163 
164 	do {
165 		if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
166 			cq = ERR_PTR(-ENOMEM);
167 			ehca_err(device, "Can't reserve idr nr. device=%p",
168 				 device);
169 			goto create_cq_exit1;
170 		}
171 
172 		write_lock_irqsave(&ehca_cq_idr_lock, flags);
173 		ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
174 		write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
175 	} while (ret == -EAGAIN);
176 
177 	if (ret) {
178 		cq = ERR_PTR(-ENOMEM);
179 		ehca_err(device, "Can't allocate new idr entry. device=%p",
180 			 device);
181 		goto create_cq_exit1;
182 	}
183 
184 	if (my_cq->token > 0x1FFFFFF) {
185 		cq = ERR_PTR(-ENOMEM);
186 		ehca_err(device, "Invalid number of cq. device=%p", device);
187 		goto create_cq_exit2;
188 	}
189 
190 	/*
191 	 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
192 	 * for receiving errors CQEs.
193 	 */
194 	param.nr_cqe = cqe + additional_cqe;
195 	h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
196 
197 	if (h_ret != H_SUCCESS) {
198 		ehca_err(device, "hipz_h_alloc_resource_cq() failed "
199 			 "h_ret=%lli device=%p", h_ret, device);
200 		cq = ERR_PTR(ehca2ib_return_code(h_ret));
201 		goto create_cq_exit2;
202 	}
203 
204 	ipz_rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
205 				EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
206 	if (!ipz_rc) {
207 		ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
208 			 ipz_rc, device);
209 		cq = ERR_PTR(-EINVAL);
210 		goto create_cq_exit3;
211 	}
212 
213 	for (counter = 0; counter < param.act_pages; counter++) {
214 		vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
215 		if (!vpage) {
216 			ehca_err(device, "ipz_qpageit_get_inc() "
217 				 "returns NULL device=%p", device);
218 			cq = ERR_PTR(-EAGAIN);
219 			goto create_cq_exit4;
220 		}
221 		rpage = virt_to_abs(vpage);
222 
223 		h_ret = hipz_h_register_rpage_cq(adapter_handle,
224 						 my_cq->ipz_cq_handle,
225 						 &my_cq->pf,
226 						 0,
227 						 0,
228 						 rpage,
229 						 1,
230 						 my_cq->galpas.
231 						 kernel);
232 
233 		if (h_ret < H_SUCCESS) {
234 			ehca_err(device, "hipz_h_register_rpage_cq() failed "
235 				 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
236 				 "act_pages=%i", my_cq, my_cq->cq_number,
237 				 h_ret, counter, param.act_pages);
238 			cq = ERR_PTR(-EINVAL);
239 			goto create_cq_exit4;
240 		}
241 
242 		if (counter == (param.act_pages - 1)) {
243 			vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
244 			if ((h_ret != H_SUCCESS) || vpage) {
245 				ehca_err(device, "Registration of pages not "
246 					 "complete ehca_cq=%p cq_num=%x "
247 					 "h_ret=%lli", my_cq, my_cq->cq_number,
248 					 h_ret);
249 				cq = ERR_PTR(-EAGAIN);
250 				goto create_cq_exit4;
251 			}
252 		} else {
253 			if (h_ret != H_PAGE_REGISTERED) {
254 				ehca_err(device, "Registration of page failed "
255 					 "ehca_cq=%p cq_num=%x h_ret=%lli "
256 					 "counter=%i act_pages=%i",
257 					 my_cq, my_cq->cq_number,
258 					 h_ret, counter, param.act_pages);
259 				cq = ERR_PTR(-ENOMEM);
260 				goto create_cq_exit4;
261 			}
262 		}
263 	}
264 
265 	ipz_qeit_reset(&my_cq->ipz_queue);
266 
267 	gal = my_cq->galpas.kernel;
268 	cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
269 	ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
270 		 my_cq, my_cq->cq_number, cqx_fec);
271 
272 	my_cq->ib_cq.cqe = my_cq->nr_of_entries =
273 		param.act_nr_of_entries - additional_cqe;
274 	my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
275 
276 	for (i = 0; i < QP_HASHTAB_LEN; i++)
277 		INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
278 
279 	INIT_LIST_HEAD(&my_cq->sqp_err_list);
280 	INIT_LIST_HEAD(&my_cq->rqp_err_list);
281 
282 	if (context) {
283 		struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
284 		struct ehca_create_cq_resp resp;
285 		memset(&resp, 0, sizeof(resp));
286 		resp.cq_number = my_cq->cq_number;
287 		resp.token = my_cq->token;
288 		resp.ipz_queue.qe_size = ipz_queue->qe_size;
289 		resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
290 		resp.ipz_queue.queue_length = ipz_queue->queue_length;
291 		resp.ipz_queue.pagesize = ipz_queue->pagesize;
292 		resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
293 		resp.fw_handle_ofs = (u32)
294 			(my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
295 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
296 			ehca_err(device, "Copy to udata failed.");
297 			goto create_cq_exit4;
298 		}
299 	}
300 
301 	return cq;
302 
303 create_cq_exit4:
304 	ipz_queue_dtor(NULL, &my_cq->ipz_queue);
305 
306 create_cq_exit3:
307 	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
308 	if (h_ret != H_SUCCESS)
309 		ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
310 			 "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
311 
312 create_cq_exit2:
313 	write_lock_irqsave(&ehca_cq_idr_lock, flags);
314 	idr_remove(&ehca_cq_idr, my_cq->token);
315 	write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
316 
317 create_cq_exit1:
318 	kmem_cache_free(cq_cache, my_cq);
319 
320 	atomic_dec(&shca->num_cqs);
321 	return cq;
322 }
323 
ehca_destroy_cq(struct ib_cq * cq)324 int ehca_destroy_cq(struct ib_cq *cq)
325 {
326 	u64 h_ret;
327 	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
328 	int cq_num = my_cq->cq_number;
329 	struct ib_device *device = cq->device;
330 	struct ehca_shca *shca = container_of(device, struct ehca_shca,
331 					      ib_device);
332 	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
333 	unsigned long flags;
334 
335 	if (cq->uobject) {
336 		if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
337 			ehca_err(device, "Resources still referenced in "
338 				 "user space cq_num=%x", my_cq->cq_number);
339 			return -EINVAL;
340 		}
341 	}
342 
343 	/*
344 	 * remove the CQ from the idr first to make sure
345 	 * no more interrupt tasklets will touch this CQ
346 	 */
347 	write_lock_irqsave(&ehca_cq_idr_lock, flags);
348 	idr_remove(&ehca_cq_idr, my_cq->token);
349 	write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
350 
351 	/* now wait until all pending events have completed */
352 	wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
353 
354 	/* nobody's using our CQ any longer -- we can destroy it */
355 	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
356 	if (h_ret == H_R_STATE) {
357 		/* cq in err: read err data and destroy it forcibly */
358 		ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
359 			 "state. Try to delete it forcibly.",
360 			 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
361 		ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
362 		h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
363 		if (h_ret == H_SUCCESS)
364 			ehca_dbg(device, "cq_num=%x deleted successfully.",
365 				 cq_num);
366 	}
367 	if (h_ret != H_SUCCESS) {
368 		ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
369 			 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
370 		return ehca2ib_return_code(h_ret);
371 	}
372 	ipz_queue_dtor(NULL, &my_cq->ipz_queue);
373 	kmem_cache_free(cq_cache, my_cq);
374 
375 	atomic_dec(&shca->num_cqs);
376 	return 0;
377 }
378 
ehca_resize_cq(struct ib_cq * cq,int cqe,struct ib_udata * udata)379 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
380 {
381 	/* TODO: proper resize needs to be done */
382 	ehca_err(cq->device, "not implemented yet");
383 
384 	return -EFAULT;
385 }
386 
ehca_init_cq_cache(void)387 int ehca_init_cq_cache(void)
388 {
389 	cq_cache = kmem_cache_create("ehca_cache_cq",
390 				     sizeof(struct ehca_cq), 0,
391 				     SLAB_HWCACHE_ALIGN,
392 				     NULL);
393 	if (!cq_cache)
394 		return -ENOMEM;
395 	return 0;
396 }
397 
ehca_cleanup_cq_cache(void)398 void ehca_cleanup_cq_cache(void)
399 {
400 	if (cq_cache)
401 		kmem_cache_destroy(cq_cache);
402 }
403