• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
36 
37 #include "ipath_verbs.h"
38 
39 /**
40  * ipath_post_srq_receive - post a receive on a shared receive queue
41  * @ibsrq: the SRQ to post the receive on
42  * @wr: the list of work requests to post
43  * @bad_wr: the first WR to cause a problem is put here
44  *
45  * This may be called from interrupt context.
46  */
ipath_post_srq_receive(struct ib_srq * ibsrq,struct ib_recv_wr * wr,struct ib_recv_wr ** bad_wr)47 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
48 			   struct ib_recv_wr **bad_wr)
49 {
50 	struct ipath_srq *srq = to_isrq(ibsrq);
51 	struct ipath_rwq *wq;
52 	unsigned long flags;
53 	int ret;
54 
55 	for (; wr; wr = wr->next) {
56 		struct ipath_rwqe *wqe;
57 		u32 next;
58 		int i;
59 
60 		if ((unsigned) wr->num_sge > srq->rq.max_sge) {
61 			*bad_wr = wr;
62 			ret = -EINVAL;
63 			goto bail;
64 		}
65 
66 		spin_lock_irqsave(&srq->rq.lock, flags);
67 		wq = srq->rq.wq;
68 		next = wq->head + 1;
69 		if (next >= srq->rq.size)
70 			next = 0;
71 		if (next == wq->tail) {
72 			spin_unlock_irqrestore(&srq->rq.lock, flags);
73 			*bad_wr = wr;
74 			ret = -ENOMEM;
75 			goto bail;
76 		}
77 
78 		wqe = get_rwqe_ptr(&srq->rq, wq->head);
79 		wqe->wr_id = wr->wr_id;
80 		wqe->num_sge = wr->num_sge;
81 		for (i = 0; i < wr->num_sge; i++)
82 			wqe->sg_list[i] = wr->sg_list[i];
83 		/* Make sure queue entry is written before the head index. */
84 		smp_wmb();
85 		wq->head = next;
86 		spin_unlock_irqrestore(&srq->rq.lock, flags);
87 	}
88 	ret = 0;
89 
90 bail:
91 	return ret;
92 }
93 
94 /**
95  * ipath_create_srq - create a shared receive queue
96  * @ibpd: the protection domain of the SRQ to create
97  * @srq_init_attr: the attributes of the SRQ
98  * @udata: data from libipathverbs when creating a user SRQ
99  */
ipath_create_srq(struct ib_pd * ibpd,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)100 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
101 				struct ib_srq_init_attr *srq_init_attr,
102 				struct ib_udata *udata)
103 {
104 	struct ipath_ibdev *dev = to_idev(ibpd->device);
105 	struct ipath_srq *srq;
106 	u32 sz;
107 	struct ib_srq *ret;
108 
109 	if (srq_init_attr->attr.max_wr == 0) {
110 		ret = ERR_PTR(-EINVAL);
111 		goto done;
112 	}
113 
114 	if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
115 	    (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
116 		ret = ERR_PTR(-EINVAL);
117 		goto done;
118 	}
119 
120 	srq = kmalloc(sizeof(*srq), GFP_KERNEL);
121 	if (!srq) {
122 		ret = ERR_PTR(-ENOMEM);
123 		goto done;
124 	}
125 
126 	/*
127 	 * Need to use vmalloc() if we want to support large #s of entries.
128 	 */
129 	srq->rq.size = srq_init_attr->attr.max_wr + 1;
130 	srq->rq.max_sge = srq_init_attr->attr.max_sge;
131 	sz = sizeof(struct ib_sge) * srq->rq.max_sge +
132 		sizeof(struct ipath_rwqe);
133 	srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
134 	if (!srq->rq.wq) {
135 		ret = ERR_PTR(-ENOMEM);
136 		goto bail_srq;
137 	}
138 
139 	/*
140 	 * Return the address of the RWQ as the offset to mmap.
141 	 * See ipath_mmap() for details.
142 	 */
143 	if (udata && udata->outlen >= sizeof(__u64)) {
144 		int err;
145 		u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
146 
147 		srq->ip =
148 		    ipath_create_mmap_info(dev, s,
149 					   ibpd->uobject->context,
150 					   srq->rq.wq);
151 		if (!srq->ip) {
152 			ret = ERR_PTR(-ENOMEM);
153 			goto bail_wq;
154 		}
155 
156 		err = ib_copy_to_udata(udata, &srq->ip->offset,
157 				       sizeof(srq->ip->offset));
158 		if (err) {
159 			ret = ERR_PTR(err);
160 			goto bail_ip;
161 		}
162 	} else
163 		srq->ip = NULL;
164 
165 	/*
166 	 * ib_create_srq() will initialize srq->ibsrq.
167 	 */
168 	spin_lock_init(&srq->rq.lock);
169 	srq->rq.wq->head = 0;
170 	srq->rq.wq->tail = 0;
171 	srq->limit = srq_init_attr->attr.srq_limit;
172 
173 	spin_lock(&dev->n_srqs_lock);
174 	if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
175 		spin_unlock(&dev->n_srqs_lock);
176 		ret = ERR_PTR(-ENOMEM);
177 		goto bail_ip;
178 	}
179 
180  	dev->n_srqs_allocated++;
181 	spin_unlock(&dev->n_srqs_lock);
182 
183 	if (srq->ip) {
184 		spin_lock_irq(&dev->pending_lock);
185 		list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
186 		spin_unlock_irq(&dev->pending_lock);
187 	}
188 
189 	ret = &srq->ibsrq;
190 	goto done;
191 
192 bail_ip:
193 	kfree(srq->ip);
194 bail_wq:
195 	vfree(srq->rq.wq);
196 bail_srq:
197 	kfree(srq);
198 done:
199 	return ret;
200 }
201 
202 /**
203  * ipath_modify_srq - modify a shared receive queue
204  * @ibsrq: the SRQ to modify
205  * @attr: the new attributes of the SRQ
206  * @attr_mask: indicates which attributes to modify
207  * @udata: user data for ipathverbs.so
208  */
ipath_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)209 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
210 		     enum ib_srq_attr_mask attr_mask,
211 		     struct ib_udata *udata)
212 {
213 	struct ipath_srq *srq = to_isrq(ibsrq);
214 	struct ipath_rwq *wq;
215 	int ret = 0;
216 
217 	if (attr_mask & IB_SRQ_MAX_WR) {
218 		struct ipath_rwq *owq;
219 		struct ipath_rwqe *p;
220 		u32 sz, size, n, head, tail;
221 
222 		/* Check that the requested sizes are below the limits. */
223 		if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
224 		    ((attr_mask & IB_SRQ_LIMIT) ?
225 		     attr->srq_limit : srq->limit) > attr->max_wr) {
226 			ret = -EINVAL;
227 			goto bail;
228 		}
229 
230 		sz = sizeof(struct ipath_rwqe) +
231 			srq->rq.max_sge * sizeof(struct ib_sge);
232 		size = attr->max_wr + 1;
233 		wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
234 		if (!wq) {
235 			ret = -ENOMEM;
236 			goto bail;
237 		}
238 
239 		/* Check that we can write the offset to mmap. */
240 		if (udata && udata->inlen >= sizeof(__u64)) {
241 			__u64 offset_addr;
242 			__u64 offset = 0;
243 
244 			ret = ib_copy_from_udata(&offset_addr, udata,
245 						 sizeof(offset_addr));
246 			if (ret)
247 				goto bail_free;
248 			udata->outbuf =
249 				(void __user *) (unsigned long) offset_addr;
250 			ret = ib_copy_to_udata(udata, &offset,
251 					       sizeof(offset));
252 			if (ret)
253 				goto bail_free;
254 		}
255 
256 		spin_lock_irq(&srq->rq.lock);
257 		/*
258 		 * validate head pointer value and compute
259 		 * the number of remaining WQEs.
260 		 */
261 		owq = srq->rq.wq;
262 		head = owq->head;
263 		if (head >= srq->rq.size)
264 			head = 0;
265 		tail = owq->tail;
266 		if (tail >= srq->rq.size)
267 			tail = 0;
268 		n = head;
269 		if (n < tail)
270 			n += srq->rq.size - tail;
271 		else
272 			n -= tail;
273 		if (size <= n) {
274 			ret = -EINVAL;
275 			goto bail_unlock;
276 		}
277 		n = 0;
278 		p = wq->wq;
279 		while (tail != head) {
280 			struct ipath_rwqe *wqe;
281 			int i;
282 
283 			wqe = get_rwqe_ptr(&srq->rq, tail);
284 			p->wr_id = wqe->wr_id;
285 			p->num_sge = wqe->num_sge;
286 			for (i = 0; i < wqe->num_sge; i++)
287 				p->sg_list[i] = wqe->sg_list[i];
288 			n++;
289 			p = (struct ipath_rwqe *)((char *) p + sz);
290 			if (++tail >= srq->rq.size)
291 				tail = 0;
292 		}
293 		srq->rq.wq = wq;
294 		srq->rq.size = size;
295 		wq->head = n;
296 		wq->tail = 0;
297 		if (attr_mask & IB_SRQ_LIMIT)
298 			srq->limit = attr->srq_limit;
299 		spin_unlock_irq(&srq->rq.lock);
300 
301 		vfree(owq);
302 
303 		if (srq->ip) {
304 			struct ipath_mmap_info *ip = srq->ip;
305 			struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
306 			u32 s = sizeof(struct ipath_rwq) + size * sz;
307 
308 			ipath_update_mmap_info(dev, ip, s, wq);
309 
310 			/*
311 			 * Return the offset to mmap.
312 			 * See ipath_mmap() for details.
313 			 */
314 			if (udata && udata->inlen >= sizeof(__u64)) {
315 				ret = ib_copy_to_udata(udata, &ip->offset,
316 						       sizeof(ip->offset));
317 				if (ret)
318 					goto bail;
319 			}
320 
321 			spin_lock_irq(&dev->pending_lock);
322 			if (list_empty(&ip->pending_mmaps))
323 				list_add(&ip->pending_mmaps,
324 					 &dev->pending_mmaps);
325 			spin_unlock_irq(&dev->pending_lock);
326 		}
327 	} else if (attr_mask & IB_SRQ_LIMIT) {
328 		spin_lock_irq(&srq->rq.lock);
329 		if (attr->srq_limit >= srq->rq.size)
330 			ret = -EINVAL;
331 		else
332 			srq->limit = attr->srq_limit;
333 		spin_unlock_irq(&srq->rq.lock);
334 	}
335 	goto bail;
336 
337 bail_unlock:
338 	spin_unlock_irq(&srq->rq.lock);
339 bail_free:
340 	vfree(wq);
341 bail:
342 	return ret;
343 }
344 
ipath_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr)345 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
346 {
347 	struct ipath_srq *srq = to_isrq(ibsrq);
348 
349 	attr->max_wr = srq->rq.size - 1;
350 	attr->max_sge = srq->rq.max_sge;
351 	attr->srq_limit = srq->limit;
352 	return 0;
353 }
354 
355 /**
356  * ipath_destroy_srq - destroy a shared receive queue
357  * @ibsrq: the SRQ to destroy
358  */
ipath_destroy_srq(struct ib_srq * ibsrq)359 int ipath_destroy_srq(struct ib_srq *ibsrq)
360 {
361 	struct ipath_srq *srq = to_isrq(ibsrq);
362 	struct ipath_ibdev *dev = to_idev(ibsrq->device);
363 
364 	spin_lock(&dev->n_srqs_lock);
365 	dev->n_srqs_allocated--;
366 	spin_unlock(&dev->n_srqs_lock);
367 	if (srq->ip)
368 		kref_put(&srq->ip->ref, ipath_release_mmap_info);
369 	else
370 		vfree(srq->rq.wq);
371 	kfree(srq);
372 
373 	return 0;
374 }
375