• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  userspace support verbs
5  *
6  *  Authors: Christoph Raisch <raisch@de.ibm.com>
7  *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8  *           Heiko J Schick <schickhj@de.ibm.com>
9  *
10  *  Copyright (c) 2005 IBM Corporation
11  *
12  *  All rights reserved.
13  *
14  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
15  *  BSD.
16  *
17  * OpenIB BSD License
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions are met:
21  *
22  * Redistributions of source code must retain the above copyright notice, this
23  * list of conditions and the following disclaimer.
24  *
25  * Redistributions in binary form must reproduce the above copyright notice,
26  * this list of conditions and the following disclaimer in the documentation
27  * and/or other materials
28  * provided with the distribution.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40  * POSSIBILITY OF SUCH DAMAGE.
41  */
42 
43 #include "ehca_classes.h"
44 #include "ehca_iverbs.h"
45 #include "ehca_mrmw.h"
46 #include "ehca_tools.h"
47 #include "hcp_if.h"
48 
ehca_alloc_ucontext(struct ib_device * device,struct ib_udata * udata)49 struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
50 					struct ib_udata *udata)
51 {
52 	struct ehca_ucontext *my_context;
53 
54 	my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
55 	if (!my_context) {
56 		ehca_err(device, "Out of memory device=%p", device);
57 		return ERR_PTR(-ENOMEM);
58 	}
59 
60 	return &my_context->ib_ucontext;
61 }
62 
ehca_dealloc_ucontext(struct ib_ucontext * context)63 int ehca_dealloc_ucontext(struct ib_ucontext *context)
64 {
65 	kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
66 	return 0;
67 }
68 
ehca_mm_open(struct vm_area_struct * vma)69 static void ehca_mm_open(struct vm_area_struct *vma)
70 {
71 	u32 *count = (u32 *)vma->vm_private_data;
72 	if (!count) {
73 		ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
74 			     vma->vm_start, vma->vm_end);
75 		return;
76 	}
77 	(*count)++;
78 	if (!(*count))
79 		ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
80 			     vma->vm_start, vma->vm_end);
81 	ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
82 		     vma->vm_start, vma->vm_end, *count);
83 }
84 
ehca_mm_close(struct vm_area_struct * vma)85 static void ehca_mm_close(struct vm_area_struct *vma)
86 {
87 	u32 *count = (u32 *)vma->vm_private_data;
88 	if (!count) {
89 		ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
90 			     vma->vm_start, vma->vm_end);
91 		return;
92 	}
93 	(*count)--;
94 	ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
95 		     vma->vm_start, vma->vm_end, *count);
96 }
97 
98 static struct vm_operations_struct vm_ops = {
99 	.open =	ehca_mm_open,
100 	.close = ehca_mm_close,
101 };
102 
ehca_mmap_fw(struct vm_area_struct * vma,struct h_galpas * galpas,u32 * mm_count)103 static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
104 			u32 *mm_count)
105 {
106 	int ret;
107 	u64 vsize, physical;
108 
109 	vsize = vma->vm_end - vma->vm_start;
110 	if (vsize < EHCA_PAGESIZE) {
111 		ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
112 		return -EINVAL;
113 	}
114 
115 	physical = galpas->user.fw_handle;
116 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
117 	ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
118 	/* VM_IO | VM_RESERVED are set by remap_pfn_range() */
119 	ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
120 			   vma->vm_page_prot);
121 	if (unlikely(ret)) {
122 		ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
123 		return -ENOMEM;
124 	}
125 
126 	vma->vm_private_data = mm_count;
127 	(*mm_count)++;
128 	vma->vm_ops = &vm_ops;
129 
130 	return 0;
131 }
132 
ehca_mmap_queue(struct vm_area_struct * vma,struct ipz_queue * queue,u32 * mm_count)133 static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
134 			   u32 *mm_count)
135 {
136 	int ret;
137 	u64 start, ofs;
138 	struct page *page;
139 
140 	vma->vm_flags |= VM_RESERVED;
141 	start = vma->vm_start;
142 	for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
143 		u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
144 		page = virt_to_page(virt_addr);
145 		ret = vm_insert_page(vma, start, page);
146 		if (unlikely(ret)) {
147 			ehca_gen_err("vm_insert_page() failed rc=%i", ret);
148 			return ret;
149 		}
150 		start += PAGE_SIZE;
151 	}
152 	vma->vm_private_data = mm_count;
153 	(*mm_count)++;
154 	vma->vm_ops = &vm_ops;
155 
156 	return 0;
157 }
158 
ehca_mmap_cq(struct vm_area_struct * vma,struct ehca_cq * cq,u32 rsrc_type)159 static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
160 			u32 rsrc_type)
161 {
162 	int ret;
163 
164 	switch (rsrc_type) {
165 	case 0: /* galpa fw handle */
166 		ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
167 		ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
168 		if (unlikely(ret)) {
169 			ehca_err(cq->ib_cq.device,
170 				 "ehca_mmap_fw() failed rc=%i cq_num=%x",
171 				 ret, cq->cq_number);
172 			return ret;
173 		}
174 		break;
175 
176 	case 1: /* cq queue_addr */
177 		ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
178 		ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
179 		if (unlikely(ret)) {
180 			ehca_err(cq->ib_cq.device,
181 				 "ehca_mmap_queue() failed rc=%i cq_num=%x",
182 				 ret, cq->cq_number);
183 			return ret;
184 		}
185 		break;
186 
187 	default:
188 		ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
189 			 rsrc_type, cq->cq_number);
190 		return -EINVAL;
191 	}
192 
193 	return 0;
194 }
195 
ehca_mmap_qp(struct vm_area_struct * vma,struct ehca_qp * qp,u32 rsrc_type)196 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
197 			u32 rsrc_type)
198 {
199 	int ret;
200 
201 	switch (rsrc_type) {
202 	case 0: /* galpa fw handle */
203 		ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
204 		ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
205 		if (unlikely(ret)) {
206 			ehca_err(qp->ib_qp.device,
207 				 "remap_pfn_range() failed ret=%i qp_num=%x",
208 				 ret, qp->ib_qp.qp_num);
209 			return -ENOMEM;
210 		}
211 		break;
212 
213 	case 1: /* qp rqueue_addr */
214 		ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
215 		ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
216 				      &qp->mm_count_rqueue);
217 		if (unlikely(ret)) {
218 			ehca_err(qp->ib_qp.device,
219 				 "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
220 				 ret, qp->ib_qp.qp_num);
221 			return ret;
222 		}
223 		break;
224 
225 	case 2: /* qp squeue_addr */
226 		ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
227 		ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
228 				      &qp->mm_count_squeue);
229 		if (unlikely(ret)) {
230 			ehca_err(qp->ib_qp.device,
231 				 "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
232 				 ret, qp->ib_qp.qp_num);
233 			return ret;
234 		}
235 		break;
236 
237 	default:
238 		ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
239 			 rsrc_type, qp->ib_qp.qp_num);
240 		return -EINVAL;
241 	}
242 
243 	return 0;
244 }
245 
ehca_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)246 int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
247 {
248 	u64 fileoffset = vma->vm_pgoff;
249 	u32 idr_handle = fileoffset & 0x1FFFFFF;
250 	u32 q_type = (fileoffset >> 27) & 0x1;	  /* CQ, QP,...        */
251 	u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
252 	u32 ret;
253 	struct ehca_cq *cq;
254 	struct ehca_qp *qp;
255 	struct ib_uobject *uobject;
256 
257 	switch (q_type) {
258 	case  0: /* CQ */
259 		read_lock(&ehca_cq_idr_lock);
260 		cq = idr_find(&ehca_cq_idr, idr_handle);
261 		read_unlock(&ehca_cq_idr_lock);
262 
263 		/* make sure this mmap really belongs to the authorized user */
264 		if (!cq)
265 			return -EINVAL;
266 
267 		if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
268 			return -EINVAL;
269 
270 		ret = ehca_mmap_cq(vma, cq, rsrc_type);
271 		if (unlikely(ret)) {
272 			ehca_err(cq->ib_cq.device,
273 				 "ehca_mmap_cq() failed rc=%i cq_num=%x",
274 				 ret, cq->cq_number);
275 			return ret;
276 		}
277 		break;
278 
279 	case 1: /* QP */
280 		read_lock(&ehca_qp_idr_lock);
281 		qp = idr_find(&ehca_qp_idr, idr_handle);
282 		read_unlock(&ehca_qp_idr_lock);
283 
284 		/* make sure this mmap really belongs to the authorized user */
285 		if (!qp)
286 			return -EINVAL;
287 
288 		uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
289 		if (!uobject || uobject->context != context)
290 			return -EINVAL;
291 
292 		ret = ehca_mmap_qp(vma, qp, rsrc_type);
293 		if (unlikely(ret)) {
294 			ehca_err(qp->ib_qp.device,
295 				 "ehca_mmap_qp() failed rc=%i qp_num=%x",
296 				 ret, qp->ib_qp.qp_num);
297 			return ret;
298 		}
299 		break;
300 
301 	default:
302 		ehca_gen_err("bad queue type %x", q_type);
303 		return -EINVAL;
304 	}
305 
306 	return 0;
307 }
308