1 /*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/bitmap.h>
13 #include <linux/sched.h>
14 #include <linux/pid.h>
15 #include <linux/fs.h>
16 #include <linux/mm.h>
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <asm/cputable.h>
21 #include <asm/current.h>
22 #include <asm/copro.h>
23
24 #include "cxl.h"
25
26 /*
27 * Allocates space for a CXL context.
28 */
cxl_context_alloc(void)29 struct cxl_context *cxl_context_alloc(void)
30 {
31 return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
32 }
33
34 /*
35 * Initialises a CXL context.
36 */
cxl_context_init(struct cxl_context * ctx,struct cxl_afu * afu,bool master,struct address_space * mapping)37 int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
38 struct address_space *mapping)
39 {
40 int i;
41
42 spin_lock_init(&ctx->sste_lock);
43 ctx->afu = afu;
44 ctx->master = master;
45 ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
46 mutex_init(&ctx->mapping_lock);
47 ctx->mapping = mapping;
48
49 /*
50 * Allocate the segment table before we put it in the IDR so that we
51 * can always access it when dereferenced from IDR. For the same
52 * reason, the segment table is only destroyed after the context is
53 * removed from the IDR. Access to this in the IOCTL is protected by
54 * Linux filesytem symantics (can't IOCTL until open is complete).
55 */
56 i = cxl_alloc_sst(ctx);
57 if (i)
58 return i;
59
60 INIT_WORK(&ctx->fault_work, cxl_handle_fault);
61
62 init_waitqueue_head(&ctx->wq);
63 spin_lock_init(&ctx->lock);
64
65 ctx->irq_bitmap = NULL;
66 ctx->pending_irq = false;
67 ctx->pending_fault = false;
68 ctx->pending_afu_err = false;
69
70 /*
71 * When we have to destroy all contexts in cxl_context_detach_all() we
72 * end up with afu_release_irqs() called from inside a
73 * idr_for_each_entry(). Hence we need to make sure that anything
74 * dereferenced from this IDR is ok before we allocate the IDR here.
75 * This clears out the IRQ ranges to ensure this.
76 */
77 for (i = 0; i < CXL_IRQ_RANGES; i++)
78 ctx->irqs.range[i] = 0;
79
80 mutex_init(&ctx->status_mutex);
81
82 ctx->status = OPENED;
83
84 /*
85 * Allocating IDR! We better make sure everything's setup that
86 * dereferences from it.
87 */
88 mutex_lock(&afu->contexts_lock);
89 idr_preload(GFP_KERNEL);
90 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
91 ctx->afu->num_procs, GFP_NOWAIT);
92 idr_preload_end();
93 mutex_unlock(&afu->contexts_lock);
94 if (i < 0)
95 return i;
96
97 ctx->pe = i;
98 ctx->elem = &ctx->afu->spa[i];
99 ctx->pe_inserted = false;
100
101 /*
102 * take a ref on the afu so that it stays alive at-least till
103 * this context is reclaimed inside reclaim_ctx.
104 */
105 cxl_afu_get(afu);
106 return 0;
107 }
108
cxl_mmap_fault(struct vm_area_struct * vma,struct vm_fault * vmf)109 static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
110 {
111 struct cxl_context *ctx = vma->vm_file->private_data;
112 unsigned long address = (unsigned long)vmf->virtual_address;
113 u64 area, offset;
114
115 offset = vmf->pgoff << PAGE_SHIFT;
116
117 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
118 __func__, ctx->pe, address, offset);
119
120 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
121 area = ctx->afu->psn_phys;
122 if (offset >= ctx->afu->adapter->ps_size)
123 return VM_FAULT_SIGBUS;
124 } else {
125 area = ctx->psn_phys;
126 if (offset >= ctx->psn_size)
127 return VM_FAULT_SIGBUS;
128 }
129
130 mutex_lock(&ctx->status_mutex);
131
132 if (ctx->status != STARTED) {
133 mutex_unlock(&ctx->status_mutex);
134 pr_devel("%s: Context not started, failing problem state access\n", __func__);
135 if (ctx->mmio_err_ff) {
136 if (!ctx->ff_page) {
137 ctx->ff_page = alloc_page(GFP_USER);
138 if (!ctx->ff_page)
139 return VM_FAULT_OOM;
140 memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
141 }
142 get_page(ctx->ff_page);
143 vmf->page = ctx->ff_page;
144 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
145 return 0;
146 }
147 return VM_FAULT_SIGBUS;
148 }
149
150 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
151
152 mutex_unlock(&ctx->status_mutex);
153
154 return VM_FAULT_NOPAGE;
155 }
156
157 static const struct vm_operations_struct cxl_mmap_vmops = {
158 .fault = cxl_mmap_fault,
159 };
160
161 /*
162 * Map a per-context mmio space into the given vma.
163 */
cxl_context_iomap(struct cxl_context * ctx,struct vm_area_struct * vma)164 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
165 {
166 u64 start = vma->vm_pgoff << PAGE_SHIFT;
167 u64 len = vma->vm_end - vma->vm_start;
168
169 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
170 if (start + len > ctx->afu->adapter->ps_size)
171 return -EINVAL;
172 } else {
173 if (start + len > ctx->psn_size)
174 return -EINVAL;
175 }
176
177 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
178 /* make sure there is a valid per process space for this AFU */
179 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
180 pr_devel("AFU doesn't support mmio space\n");
181 return -EINVAL;
182 }
183
184 /* Can't mmap until the AFU is enabled */
185 if (!ctx->afu->enabled)
186 return -EBUSY;
187 }
188
189 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
190 ctx->psn_phys, ctx->pe , ctx->master);
191
192 vma->vm_flags |= VM_IO | VM_PFNMAP;
193 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
194 vma->vm_ops = &cxl_mmap_vmops;
195 return 0;
196 }
197
198 /*
199 * Detach a context from the hardware. This disables interrupts and doesn't
200 * return until all outstanding interrupts for this context have completed. The
201 * hardware should no longer access *ctx after this has returned.
202 */
__detach_context(struct cxl_context * ctx)203 int __detach_context(struct cxl_context *ctx)
204 {
205 enum cxl_context_status status;
206
207 mutex_lock(&ctx->status_mutex);
208 status = ctx->status;
209 ctx->status = CLOSED;
210 mutex_unlock(&ctx->status_mutex);
211 if (status != STARTED)
212 return -EBUSY;
213
214 /* Only warn if we detached while the link was OK.
215 * If detach fails when hw is down, we don't care.
216 */
217 WARN_ON(cxl_detach_process(ctx) &&
218 cxl_adapter_link_ok(ctx->afu->adapter));
219 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
220
221 /* release the reference to the group leader and mm handling pid */
222 put_pid(ctx->pid);
223 put_pid(ctx->glpid);
224
225 cxl_ctx_put();
226 return 0;
227 }
228
229 /*
230 * Detach the given context from the AFU. This doesn't actually
231 * free the context but it should stop the context running in hardware
232 * (ie. prevent this context from generating any further interrupts
233 * so that it can be freed).
234 */
cxl_context_detach(struct cxl_context * ctx)235 void cxl_context_detach(struct cxl_context *ctx)
236 {
237 int rc;
238
239 rc = __detach_context(ctx);
240 if (rc)
241 return;
242
243 afu_release_irqs(ctx, ctx);
244 wake_up_all(&ctx->wq);
245 }
246
247 /*
248 * Detach all contexts on the given AFU.
249 */
cxl_context_detach_all(struct cxl_afu * afu)250 void cxl_context_detach_all(struct cxl_afu *afu)
251 {
252 struct cxl_context *ctx;
253 int tmp;
254
255 mutex_lock(&afu->contexts_lock);
256 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
257 /*
258 * Anything done in here needs to be setup before the IDR is
259 * created and torn down after the IDR removed
260 */
261 cxl_context_detach(ctx);
262
263 /*
264 * We are force detaching - remove any active PSA mappings so
265 * userspace cannot interfere with the card if it comes back.
266 * Easiest way to exercise this is to unbind and rebind the
267 * driver via sysfs while it is in use.
268 */
269 mutex_lock(&ctx->mapping_lock);
270 if (ctx->mapping)
271 unmap_mapping_range(ctx->mapping, 0, 0, 1);
272 mutex_unlock(&ctx->mapping_lock);
273 }
274 mutex_unlock(&afu->contexts_lock);
275 }
276
reclaim_ctx(struct rcu_head * rcu)277 static void reclaim_ctx(struct rcu_head *rcu)
278 {
279 struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
280
281 free_page((u64)ctx->sstp);
282 if (ctx->ff_page)
283 __free_page(ctx->ff_page);
284 ctx->sstp = NULL;
285 if (ctx->kernelapi)
286 kfree(ctx->mapping);
287
288 if (ctx->irq_bitmap)
289 kfree(ctx->irq_bitmap);
290
291 /* Drop ref to the afu device taken during cxl_context_init */
292 cxl_afu_put(ctx->afu);
293
294 kfree(ctx);
295 }
296
cxl_context_free(struct cxl_context * ctx)297 void cxl_context_free(struct cxl_context *ctx)
298 {
299 mutex_lock(&ctx->afu->contexts_lock);
300 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
301 mutex_unlock(&ctx->afu->contexts_lock);
302 call_rcu(&ctx->rcu, reclaim_ctx);
303 }
304