• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/pci.h>
11 #include <linux/slab.h>
12 #include <linux/anon_inodes.h>
13 #include <linux/file.h>
14 #include <misc/cxl.h>
15 #include <linux/fs.h>
16 
17 #include "cxl.h"
18 
cxl_dev_context_init(struct pci_dev * dev)19 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
20 {
21 	struct address_space *mapping;
22 	struct cxl_afu *afu;
23 	struct cxl_context  *ctx;
24 	int rc;
25 
26 	afu = cxl_pci_to_afu(dev);
27 
28 	ctx = cxl_context_alloc();
29 	if (IS_ERR(ctx)) {
30 		rc = PTR_ERR(ctx);
31 		goto err_dev;
32 	}
33 
34 	ctx->kernelapi = true;
35 
36 	/*
37 	 * Make our own address space since we won't have one from the
38 	 * filesystem like the user api has, and even if we do associate a file
39 	 * with this context we don't want to use the global anonymous inode's
40 	 * address space as that can invalidate unrelated users:
41 	 */
42 	mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
43 	if (!mapping) {
44 		rc = -ENOMEM;
45 		goto err_ctx;
46 	}
47 	address_space_init_once(mapping);
48 
49 	/* Make it a slave context.  We can promote it later? */
50 	rc = cxl_context_init(ctx, afu, false, mapping);
51 	if (rc)
52 		goto err_mapping;
53 
54 	cxl_assign_psn_space(ctx);
55 
56 	return ctx;
57 
58 err_mapping:
59 	kfree(mapping);
60 err_ctx:
61 	kfree(ctx);
62 err_dev:
63 	return ERR_PTR(rc);
64 }
65 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
66 
cxl_get_context(struct pci_dev * dev)67 struct cxl_context *cxl_get_context(struct pci_dev *dev)
68 {
69 	return dev->dev.archdata.cxl_ctx;
70 }
71 EXPORT_SYMBOL_GPL(cxl_get_context);
72 
cxl_get_phys_dev(struct pci_dev * dev)73 struct device *cxl_get_phys_dev(struct pci_dev *dev)
74 {
75 	struct cxl_afu *afu;
76 
77 	afu = cxl_pci_to_afu(dev);
78 
79 	return afu->adapter->dev.parent;
80 }
81 EXPORT_SYMBOL_GPL(cxl_get_phys_dev);
82 
cxl_release_context(struct cxl_context * ctx)83 int cxl_release_context(struct cxl_context *ctx)
84 {
85 	if (ctx->status >= STARTED)
86 		return -EBUSY;
87 
88 	cxl_context_free(ctx);
89 
90 	return 0;
91 }
92 EXPORT_SYMBOL_GPL(cxl_release_context);
93 
cxl_allocate_afu_irqs(struct cxl_context * ctx,int num)94 int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
95 {
96 	if (num == 0)
97 		num = ctx->afu->pp_irqs;
98 	return afu_allocate_irqs(ctx, num);
99 }
100 EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
101 
cxl_free_afu_irqs(struct cxl_context * ctx)102 void cxl_free_afu_irqs(struct cxl_context *ctx)
103 {
104 	afu_irq_name_free(ctx);
105 	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
106 }
107 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
108 
cxl_find_afu_irq(struct cxl_context * ctx,int num)109 static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
110 {
111 	__u16 range;
112 	int r;
113 
114 	WARN_ON(num == 0);
115 
116 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
117 		range = ctx->irqs.range[r];
118 		if (num < range) {
119 			return ctx->irqs.offset[r] + num;
120 		}
121 		num -= range;
122 	}
123 	return 0;
124 }
125 
cxl_map_afu_irq(struct cxl_context * ctx,int num,irq_handler_t handler,void * cookie,char * name)126 int cxl_map_afu_irq(struct cxl_context *ctx, int num,
127 		    irq_handler_t handler, void *cookie, char *name)
128 {
129 	irq_hw_number_t hwirq;
130 
131 	/*
132 	 * Find interrupt we are to register.
133 	 */
134 	hwirq = cxl_find_afu_irq(ctx, num);
135 	if (!hwirq)
136 		return -ENOENT;
137 
138 	return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
139 }
140 EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
141 
cxl_unmap_afu_irq(struct cxl_context * ctx,int num,void * cookie)142 void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
143 {
144 	irq_hw_number_t hwirq;
145 	unsigned int virq;
146 
147 	hwirq = cxl_find_afu_irq(ctx, num);
148 	if (!hwirq)
149 		return;
150 
151 	virq = irq_find_mapping(NULL, hwirq);
152 	if (virq)
153 		cxl_unmap_irq(virq, cookie);
154 }
155 EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
156 
157 /*
158  * Start a context
159  * Code here similar to afu_ioctl_start_work().
160  */
cxl_start_context(struct cxl_context * ctx,u64 wed,struct task_struct * task)161 int cxl_start_context(struct cxl_context *ctx, u64 wed,
162 		      struct task_struct *task)
163 {
164 	int rc = 0;
165 	bool kernel = true;
166 
167 	pr_devel("%s: pe: %i\n", __func__, ctx->pe);
168 
169 	mutex_lock(&ctx->status_mutex);
170 	if (ctx->status == STARTED)
171 		goto out; /* already started */
172 
173 	if (task) {
174 		ctx->pid = get_task_pid(task, PIDTYPE_PID);
175 		ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
176 		kernel = false;
177 	}
178 
179 	/*
180 	 * Increment driver use count. Enables global TLBIs for hash
181 	 * and callbacks to handle the segment table
182 	 */
183 	cxl_ctx_get();
184 
185 	if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {
186 		put_pid(ctx->pid);
187 		cxl_ctx_put();
188 		goto out;
189 	}
190 
191 	ctx->status = STARTED;
192 out:
193 	mutex_unlock(&ctx->status_mutex);
194 	return rc;
195 }
196 EXPORT_SYMBOL_GPL(cxl_start_context);
197 
cxl_process_element(struct cxl_context * ctx)198 int cxl_process_element(struct cxl_context *ctx)
199 {
200 	return ctx->pe;
201 }
202 EXPORT_SYMBOL_GPL(cxl_process_element);
203 
204 /* Stop a context.  Returns 0 on success, otherwise -Errno */
cxl_stop_context(struct cxl_context * ctx)205 int cxl_stop_context(struct cxl_context *ctx)
206 {
207 	return __detach_context(ctx);
208 }
209 EXPORT_SYMBOL_GPL(cxl_stop_context);
210 
cxl_set_master(struct cxl_context * ctx)211 void cxl_set_master(struct cxl_context *ctx)
212 {
213 	ctx->master = true;
214 	cxl_assign_psn_space(ctx);
215 }
216 EXPORT_SYMBOL_GPL(cxl_set_master);
217 
218 /* wrappers around afu_* file ops which are EXPORTED */
cxl_fd_open(struct inode * inode,struct file * file)219 int cxl_fd_open(struct inode *inode, struct file *file)
220 {
221 	return afu_open(inode, file);
222 }
223 EXPORT_SYMBOL_GPL(cxl_fd_open);
cxl_fd_release(struct inode * inode,struct file * file)224 int cxl_fd_release(struct inode *inode, struct file *file)
225 {
226 	return afu_release(inode, file);
227 }
228 EXPORT_SYMBOL_GPL(cxl_fd_release);
cxl_fd_ioctl(struct file * file,unsigned int cmd,unsigned long arg)229 long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
230 {
231 	return afu_ioctl(file, cmd, arg);
232 }
233 EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
cxl_fd_mmap(struct file * file,struct vm_area_struct * vm)234 int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
235 {
236 	return afu_mmap(file, vm);
237 }
238 EXPORT_SYMBOL_GPL(cxl_fd_mmap);
cxl_fd_poll(struct file * file,struct poll_table_struct * poll)239 unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
240 {
241 	return afu_poll(file, poll);
242 }
243 EXPORT_SYMBOL_GPL(cxl_fd_poll);
cxl_fd_read(struct file * file,char __user * buf,size_t count,loff_t * off)244 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
245 			loff_t *off)
246 {
247 	return afu_read(file, buf, count, off);
248 }
249 EXPORT_SYMBOL_GPL(cxl_fd_read);
250 
251 #define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
252 
253 /* Get a struct file and fd for a context and attach the ops */
cxl_get_fd(struct cxl_context * ctx,struct file_operations * fops,int * fd)254 struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
255 			int *fd)
256 {
257 	struct file *file;
258 	int rc, flags, fdtmp;
259 
260 	flags = O_RDWR | O_CLOEXEC;
261 
262 	/* This code is similar to anon_inode_getfd() */
263 	rc = get_unused_fd_flags(flags);
264 	if (rc < 0)
265 		return ERR_PTR(rc);
266 	fdtmp = rc;
267 
268 	/*
269 	 * Patch the file ops.  Needs to be careful that this is rentrant safe.
270 	 */
271 	if (fops) {
272 		PATCH_FOPS(open);
273 		PATCH_FOPS(poll);
274 		PATCH_FOPS(read);
275 		PATCH_FOPS(release);
276 		PATCH_FOPS(unlocked_ioctl);
277 		PATCH_FOPS(compat_ioctl);
278 		PATCH_FOPS(mmap);
279 	} else /* use default ops */
280 		fops = (struct file_operations *)&afu_fops;
281 
282 	file = anon_inode_getfile("cxl", fops, ctx, flags);
283 	if (IS_ERR(file))
284 		goto err_fd;
285 
286 	file->f_mapping = ctx->mapping;
287 
288 	*fd = fdtmp;
289 	return file;
290 
291 err_fd:
292 	put_unused_fd(fdtmp);
293 	return NULL;
294 }
295 EXPORT_SYMBOL_GPL(cxl_get_fd);
296 
cxl_fops_get_context(struct file * file)297 struct cxl_context *cxl_fops_get_context(struct file *file)
298 {
299 	return file->private_data;
300 }
301 EXPORT_SYMBOL_GPL(cxl_fops_get_context);
302 
cxl_start_work(struct cxl_context * ctx,struct cxl_ioctl_start_work * work)303 int cxl_start_work(struct cxl_context *ctx,
304 		   struct cxl_ioctl_start_work *work)
305 {
306 	int rc;
307 
308 	/* code taken from afu_ioctl_start_work */
309 	if (!(work->flags & CXL_START_WORK_NUM_IRQS))
310 		work->num_interrupts = ctx->afu->pp_irqs;
311 	else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
312 		 (work->num_interrupts > ctx->afu->irqs_max)) {
313 		return -EINVAL;
314 	}
315 
316 	rc = afu_register_irqs(ctx, work->num_interrupts);
317 	if (rc)
318 		return rc;
319 
320 	rc = cxl_start_context(ctx, work->work_element_descriptor, current);
321 	if (rc < 0) {
322 		afu_release_irqs(ctx, ctx);
323 		return rc;
324 	}
325 
326 	return 0;
327 }
328 EXPORT_SYMBOL_GPL(cxl_start_work);
329 
cxl_psa_map(struct cxl_context * ctx)330 void __iomem *cxl_psa_map(struct cxl_context *ctx)
331 {
332 	struct cxl_afu *afu = ctx->afu;
333 	int rc;
334 
335 	rc = cxl_afu_check_and_enable(afu);
336 	if (rc)
337 		return NULL;
338 
339 	pr_devel("%s: psn_phys%llx size:%llx\n",
340 		 __func__, afu->psn_phys, afu->adapter->ps_size);
341 	return ioremap(ctx->psn_phys, ctx->psn_size);
342 }
343 EXPORT_SYMBOL_GPL(cxl_psa_map);
344 
cxl_psa_unmap(void __iomem * addr)345 void cxl_psa_unmap(void __iomem *addr)
346 {
347 	iounmap(addr);
348 }
349 EXPORT_SYMBOL_GPL(cxl_psa_unmap);
350 
cxl_afu_reset(struct cxl_context * ctx)351 int cxl_afu_reset(struct cxl_context *ctx)
352 {
353 	struct cxl_afu *afu = ctx->afu;
354 	int rc;
355 
356 	rc = __cxl_afu_reset(afu);
357 	if (rc)
358 		return rc;
359 
360 	return cxl_afu_check_and_enable(afu);
361 }
362 EXPORT_SYMBOL_GPL(cxl_afu_reset);
363 
cxl_perst_reloads_same_image(struct cxl_afu * afu,bool perst_reloads_same_image)364 void cxl_perst_reloads_same_image(struct cxl_afu *afu,
365 				  bool perst_reloads_same_image)
366 {
367 	afu->adapter->perst_same_image = perst_reloads_same_image;
368 }
369 EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
370