• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2019 IBM Corp.
3 #include <linux/idr.h>
4 #include "ocxl_internal.h"
5 
ocxl_fn_get(struct ocxl_fn * fn)6 static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn)
7 {
8 	return (get_device(&fn->dev) == NULL) ? NULL : fn;
9 }
10 
ocxl_fn_put(struct ocxl_fn * fn)11 static void ocxl_fn_put(struct ocxl_fn *fn)
12 {
13 	put_device(&fn->dev);
14 }
15 
alloc_afu(struct ocxl_fn * fn)16 static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn)
17 {
18 	struct ocxl_afu *afu;
19 
20 	afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL);
21 	if (!afu)
22 		return NULL;
23 
24 	kref_init(&afu->kref);
25 	mutex_init(&afu->contexts_lock);
26 	mutex_init(&afu->afu_control_lock);
27 	idr_init(&afu->contexts_idr);
28 	afu->fn = fn;
29 	ocxl_fn_get(fn);
30 	return afu;
31 }
32 
free_afu(struct kref * kref)33 static void free_afu(struct kref *kref)
34 {
35 	struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref);
36 
37 	idr_destroy(&afu->contexts_idr);
38 	ocxl_fn_put(afu->fn);
39 	kfree(afu);
40 }
41 
ocxl_afu_get(struct ocxl_afu * afu)42 void ocxl_afu_get(struct ocxl_afu *afu)
43 {
44 	kref_get(&afu->kref);
45 }
46 EXPORT_SYMBOL_GPL(ocxl_afu_get);
47 
ocxl_afu_put(struct ocxl_afu * afu)48 void ocxl_afu_put(struct ocxl_afu *afu)
49 {
50 	kref_put(&afu->kref, free_afu);
51 }
52 EXPORT_SYMBOL_GPL(ocxl_afu_put);
53 
assign_afu_actag(struct ocxl_afu * afu)54 static int assign_afu_actag(struct ocxl_afu *afu)
55 {
56 	struct ocxl_fn *fn = afu->fn;
57 	int actag_count, actag_offset;
58 	struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
59 
60 	/*
61 	 * if there were not enough actags for the function, each afu
62 	 * reduces its count as well
63 	 */
64 	actag_count = afu->config.actag_supported *
65 		fn->actag_enabled / fn->actag_supported;
66 	actag_offset = ocxl_actag_afu_alloc(fn, actag_count);
67 	if (actag_offset < 0) {
68 		dev_err(&pci_dev->dev, "Can't allocate %d actags for AFU: %d\n",
69 			actag_count, actag_offset);
70 		return actag_offset;
71 	}
72 	afu->actag_base = fn->actag_base + actag_offset;
73 	afu->actag_enabled = actag_count;
74 
75 	ocxl_config_set_afu_actag(pci_dev, afu->config.dvsec_afu_control_pos,
76 				afu->actag_base, afu->actag_enabled);
77 	dev_dbg(&pci_dev->dev, "actag base=%d enabled=%d\n",
78 		afu->actag_base, afu->actag_enabled);
79 	return 0;
80 }
81 
reclaim_afu_actag(struct ocxl_afu * afu)82 static void reclaim_afu_actag(struct ocxl_afu *afu)
83 {
84 	struct ocxl_fn *fn = afu->fn;
85 	int start_offset, size;
86 
87 	start_offset = afu->actag_base - fn->actag_base;
88 	size = afu->actag_enabled;
89 	ocxl_actag_afu_free(afu->fn, start_offset, size);
90 }
91 
assign_afu_pasid(struct ocxl_afu * afu)92 static int assign_afu_pasid(struct ocxl_afu *afu)
93 {
94 	struct ocxl_fn *fn = afu->fn;
95 	int pasid_count, pasid_offset;
96 	struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
97 
98 	/*
99 	 * We only support the case where the function configuration
100 	 * requested enough PASIDs to cover all AFUs.
101 	 */
102 	pasid_count = 1 << afu->config.pasid_supported_log;
103 	pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count);
104 	if (pasid_offset < 0) {
105 		dev_err(&pci_dev->dev, "Can't allocate %d PASIDs for AFU: %d\n",
106 			pasid_count, pasid_offset);
107 		return pasid_offset;
108 	}
109 	afu->pasid_base = fn->pasid_base + pasid_offset;
110 	afu->pasid_count = 0;
111 	afu->pasid_max = pasid_count;
112 
113 	ocxl_config_set_afu_pasid(pci_dev, afu->config.dvsec_afu_control_pos,
114 				afu->pasid_base,
115 				afu->config.pasid_supported_log);
116 	dev_dbg(&pci_dev->dev, "PASID base=%d, enabled=%d\n",
117 		afu->pasid_base, pasid_count);
118 	return 0;
119 }
120 
reclaim_afu_pasid(struct ocxl_afu * afu)121 static void reclaim_afu_pasid(struct ocxl_afu *afu)
122 {
123 	struct ocxl_fn *fn = afu->fn;
124 	int start_offset, size;
125 
126 	start_offset = afu->pasid_base - fn->pasid_base;
127 	size = 1 << afu->config.pasid_supported_log;
128 	ocxl_pasid_afu_free(afu->fn, start_offset, size);
129 }
130 
reserve_fn_bar(struct ocxl_fn * fn,int bar)131 static int reserve_fn_bar(struct ocxl_fn *fn, int bar)
132 {
133 	struct pci_dev *dev = to_pci_dev(fn->dev.parent);
134 	int rc, idx;
135 
136 	if (bar != 0 && bar != 2 && bar != 4)
137 		return -EINVAL;
138 
139 	idx = bar >> 1;
140 	if (fn->bar_used[idx]++ == 0) {
141 		rc = pci_request_region(dev, bar, "ocxl");
142 		if (rc)
143 			return rc;
144 	}
145 	return 0;
146 }
147 
release_fn_bar(struct ocxl_fn * fn,int bar)148 static void release_fn_bar(struct ocxl_fn *fn, int bar)
149 {
150 	struct pci_dev *dev = to_pci_dev(fn->dev.parent);
151 	int idx;
152 
153 	if (bar != 0 && bar != 2 && bar != 4)
154 		return;
155 
156 	idx = bar >> 1;
157 	if (--fn->bar_used[idx] == 0)
158 		pci_release_region(dev, bar);
159 	WARN_ON(fn->bar_used[idx] < 0);
160 }
161 
map_mmio_areas(struct ocxl_afu * afu)162 static int map_mmio_areas(struct ocxl_afu *afu)
163 {
164 	int rc;
165 	struct pci_dev *pci_dev = to_pci_dev(afu->fn->dev.parent);
166 
167 	rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar);
168 	if (rc)
169 		return rc;
170 
171 	rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar);
172 	if (rc) {
173 		release_fn_bar(afu->fn, afu->config.global_mmio_bar);
174 		return rc;
175 	}
176 
177 	afu->global_mmio_start =
178 		pci_resource_start(pci_dev, afu->config.global_mmio_bar) +
179 		afu->config.global_mmio_offset;
180 	afu->pp_mmio_start =
181 		pci_resource_start(pci_dev, afu->config.pp_mmio_bar) +
182 		afu->config.pp_mmio_offset;
183 
184 	afu->global_mmio_ptr = ioremap(afu->global_mmio_start,
185 				afu->config.global_mmio_size);
186 	if (!afu->global_mmio_ptr) {
187 		release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
188 		release_fn_bar(afu->fn, afu->config.global_mmio_bar);
189 		dev_err(&pci_dev->dev, "Error mapping global mmio area\n");
190 		return -ENOMEM;
191 	}
192 
193 	/*
194 	 * Leave an empty page between the per-process mmio area and
195 	 * the AFU interrupt mappings
196 	 */
197 	afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE;
198 	return 0;
199 }
200 
unmap_mmio_areas(struct ocxl_afu * afu)201 static void unmap_mmio_areas(struct ocxl_afu *afu)
202 {
203 	if (afu->global_mmio_ptr) {
204 		iounmap(afu->global_mmio_ptr);
205 		afu->global_mmio_ptr = NULL;
206 	}
207 	afu->global_mmio_start = 0;
208 	afu->pp_mmio_start = 0;
209 	release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
210 	release_fn_bar(afu->fn, afu->config.global_mmio_bar);
211 }
212 
configure_afu(struct ocxl_afu * afu,u8 afu_idx,struct pci_dev * dev)213 static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev)
214 {
215 	int rc;
216 
217 	rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx);
218 	if (rc)
219 		return rc;
220 
221 	rc = assign_afu_actag(afu);
222 	if (rc)
223 		return rc;
224 
225 	rc = assign_afu_pasid(afu);
226 	if (rc)
227 		goto err_free_actag;
228 
229 	rc = map_mmio_areas(afu);
230 	if (rc)
231 		goto err_free_pasid;
232 
233 	return 0;
234 
235 err_free_pasid:
236 	reclaim_afu_pasid(afu);
237 err_free_actag:
238 	reclaim_afu_actag(afu);
239 	return rc;
240 }
241 
deconfigure_afu(struct ocxl_afu * afu)242 static void deconfigure_afu(struct ocxl_afu *afu)
243 {
244 	unmap_mmio_areas(afu);
245 	reclaim_afu_pasid(afu);
246 	reclaim_afu_actag(afu);
247 }
248 
activate_afu(struct pci_dev * dev,struct ocxl_afu * afu)249 static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu)
250 {
251 	ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1);
252 
253 	return 0;
254 }
255 
deactivate_afu(struct ocxl_afu * afu)256 static void deactivate_afu(struct ocxl_afu *afu)
257 {
258 	struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent);
259 
260 	ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0);
261 }
262 
init_afu(struct pci_dev * dev,struct ocxl_fn * fn,u8 afu_idx)263 static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx)
264 {
265 	int rc;
266 	struct ocxl_afu *afu;
267 
268 	afu = alloc_afu(fn);
269 	if (!afu)
270 		return -ENOMEM;
271 
272 	rc = configure_afu(afu, afu_idx, dev);
273 	if (rc) {
274 		ocxl_afu_put(afu);
275 		return rc;
276 	}
277 
278 	rc = activate_afu(dev, afu);
279 	if (rc) {
280 		deconfigure_afu(afu);
281 		ocxl_afu_put(afu);
282 		return rc;
283 	}
284 
285 	list_add_tail(&afu->list, &fn->afu_list);
286 
287 	return 0;
288 }
289 
remove_afu(struct ocxl_afu * afu)290 static void remove_afu(struct ocxl_afu *afu)
291 {
292 	list_del(&afu->list);
293 	ocxl_context_detach_all(afu);
294 	deactivate_afu(afu);
295 	deconfigure_afu(afu);
296 	ocxl_afu_put(afu); // matches the implicit get in alloc_afu
297 }
298 
alloc_function(void)299 static struct ocxl_fn *alloc_function(void)
300 {
301 	struct ocxl_fn *fn;
302 
303 	fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL);
304 	if (!fn)
305 		return NULL;
306 
307 	INIT_LIST_HEAD(&fn->afu_list);
308 	INIT_LIST_HEAD(&fn->pasid_list);
309 	INIT_LIST_HEAD(&fn->actag_list);
310 
311 	return fn;
312 }
313 
free_function(struct ocxl_fn * fn)314 static void free_function(struct ocxl_fn *fn)
315 {
316 	WARN_ON(!list_empty(&fn->afu_list));
317 	WARN_ON(!list_empty(&fn->pasid_list));
318 	kfree(fn);
319 }
320 
free_function_dev(struct device * dev)321 static void free_function_dev(struct device *dev)
322 {
323 	struct ocxl_fn *fn = container_of(dev, struct ocxl_fn, dev);
324 
325 	free_function(fn);
326 }
327 
set_function_device(struct ocxl_fn * fn,struct pci_dev * dev)328 static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev)
329 {
330 	fn->dev.parent = &dev->dev;
331 	fn->dev.release = free_function_dev;
332 	return dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
333 }
334 
assign_function_actag(struct ocxl_fn * fn)335 static int assign_function_actag(struct ocxl_fn *fn)
336 {
337 	struct pci_dev *dev = to_pci_dev(fn->dev.parent);
338 	u16 base, enabled, supported;
339 	int rc;
340 
341 	rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported);
342 	if (rc)
343 		return rc;
344 
345 	fn->actag_base = base;
346 	fn->actag_enabled = enabled;
347 	fn->actag_supported = supported;
348 
349 	ocxl_config_set_actag(dev, fn->config.dvsec_function_pos,
350 			fn->actag_base,	fn->actag_enabled);
351 	dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n",
352 		fn->actag_base, fn->actag_enabled);
353 	return 0;
354 }
355 
set_function_pasid(struct ocxl_fn * fn)356 static int set_function_pasid(struct ocxl_fn *fn)
357 {
358 	struct pci_dev *dev = to_pci_dev(fn->dev.parent);
359 	int rc, desired_count, max_count;
360 
361 	/* A function may not require any PASID */
362 	if (fn->config.max_pasid_log < 0)
363 		return 0;
364 
365 	rc = ocxl_config_get_pasid_info(dev, &max_count);
366 	if (rc)
367 		return rc;
368 
369 	desired_count = 1 << fn->config.max_pasid_log;
370 
371 	if (desired_count > max_count) {
372 		dev_err(&fn->dev,
373 			"Function requires more PASIDs than is available (%d vs. %d)\n",
374 			desired_count, max_count);
375 		return -ENOSPC;
376 	}
377 
378 	fn->pasid_base = 0;
379 	return 0;
380 }
381 
configure_function(struct ocxl_fn * fn,struct pci_dev * dev)382 static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev)
383 {
384 	int rc;
385 
386 	rc = pci_enable_device(dev);
387 	if (rc) {
388 		dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc);
389 		return rc;
390 	}
391 
392 	/*
393 	 * Once it has been confirmed to work on our hardware, we
394 	 * should reset the function, to force the adapter to restart
395 	 * from scratch.
396 	 * A function reset would also reset all its AFUs.
397 	 *
398 	 * Some hints for implementation:
399 	 *
400 	 * - there's not status bit to know when the reset is done. We
401 	 *   should try reading the config space to know when it's
402 	 *   done.
403 	 * - probably something like:
404 	 *	Reset
405 	 *	wait 100ms
406 	 *	issue config read
407 	 *	allow device up to 1 sec to return success on config
408 	 *	read before declaring it broken
409 	 *
410 	 * Some shared logic on the card (CFG, TLX) won't be reset, so
411 	 * there's no guarantee that it will be enough.
412 	 */
413 	rc = ocxl_config_read_function(dev, &fn->config);
414 	if (rc)
415 		return rc;
416 
417 	rc = set_function_device(fn, dev);
418 	if (rc)
419 		return rc;
420 
421 	rc = assign_function_actag(fn);
422 	if (rc)
423 		return rc;
424 
425 	rc = set_function_pasid(fn);
426 	if (rc)
427 		return rc;
428 
429 	rc = ocxl_link_setup(dev, 0, &fn->link);
430 	if (rc)
431 		return rc;
432 
433 	rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos);
434 	if (rc) {
435 		ocxl_link_release(dev, fn->link);
436 		return rc;
437 	}
438 	return 0;
439 }
440 
deconfigure_function(struct ocxl_fn * fn)441 static void deconfigure_function(struct ocxl_fn *fn)
442 {
443 	struct pci_dev *dev = to_pci_dev(fn->dev.parent);
444 
445 	ocxl_link_release(dev, fn->link);
446 	pci_disable_device(dev);
447 }
448 
init_function(struct pci_dev * dev)449 static struct ocxl_fn *init_function(struct pci_dev *dev)
450 {
451 	struct ocxl_fn *fn;
452 	int rc;
453 
454 	fn = alloc_function();
455 	if (!fn)
456 		return ERR_PTR(-ENOMEM);
457 
458 	rc = configure_function(fn, dev);
459 	if (rc) {
460 		free_function(fn);
461 		return ERR_PTR(rc);
462 	}
463 
464 	rc = device_register(&fn->dev);
465 	if (rc) {
466 		deconfigure_function(fn);
467 		put_device(&fn->dev);
468 		return ERR_PTR(rc);
469 	}
470 	return fn;
471 }
472 
473 // Device detection & initialisation
474 
ocxl_function_open(struct pci_dev * dev)475 struct ocxl_fn *ocxl_function_open(struct pci_dev *dev)
476 {
477 	int rc, afu_count = 0;
478 	u8 afu;
479 	struct ocxl_fn *fn;
480 
481 	if (!radix_enabled()) {
482 		dev_err(&dev->dev, "Unsupported memory model (hash)\n");
483 		return ERR_PTR(-ENODEV);
484 	}
485 
486 	fn = init_function(dev);
487 	if (IS_ERR(fn)) {
488 		dev_err(&dev->dev, "function init failed: %li\n",
489 			PTR_ERR(fn));
490 		return fn;
491 	}
492 
493 	for (afu = 0; afu <= fn->config.max_afu_index; afu++) {
494 		rc = ocxl_config_check_afu_index(dev, &fn->config, afu);
495 		if (rc > 0) {
496 			rc = init_afu(dev, fn, afu);
497 			if (rc) {
498 				dev_err(&dev->dev,
499 					"Can't initialize AFU index %d\n", afu);
500 				continue;
501 			}
502 			afu_count++;
503 		}
504 	}
505 	dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count);
506 	return fn;
507 }
508 EXPORT_SYMBOL_GPL(ocxl_function_open);
509 
ocxl_function_afu_list(struct ocxl_fn * fn)510 struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn)
511 {
512 	return &fn->afu_list;
513 }
514 EXPORT_SYMBOL_GPL(ocxl_function_afu_list);
515 
ocxl_function_fetch_afu(struct ocxl_fn * fn,u8 afu_idx)516 struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx)
517 {
518 	struct ocxl_afu *afu;
519 
520 	list_for_each_entry(afu, &fn->afu_list, list) {
521 		if (afu->config.idx == afu_idx)
522 			return afu;
523 	}
524 
525 	return NULL;
526 }
527 EXPORT_SYMBOL_GPL(ocxl_function_fetch_afu);
528 
ocxl_function_config(struct ocxl_fn * fn)529 const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn)
530 {
531 	return &fn->config;
532 }
533 EXPORT_SYMBOL_GPL(ocxl_function_config);
534 
ocxl_function_close(struct ocxl_fn * fn)535 void ocxl_function_close(struct ocxl_fn *fn)
536 {
537 	struct ocxl_afu *afu, *tmp;
538 
539 	list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) {
540 		remove_afu(afu);
541 	}
542 
543 	deconfigure_function(fn);
544 	device_unregister(&fn->dev);
545 }
546 EXPORT_SYMBOL_GPL(ocxl_function_close);
547 
548 // AFU Metadata
549 
ocxl_afu_config(struct ocxl_afu * afu)550 struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu)
551 {
552 	return &afu->config;
553 }
554 EXPORT_SYMBOL_GPL(ocxl_afu_config);
555 
ocxl_afu_set_private(struct ocxl_afu * afu,void * private)556 void ocxl_afu_set_private(struct ocxl_afu *afu, void *private)
557 {
558 	afu->private = private;
559 }
560 EXPORT_SYMBOL_GPL(ocxl_afu_set_private);
561 
ocxl_afu_get_private(struct ocxl_afu * afu)562 void *ocxl_afu_get_private(struct ocxl_afu *afu)
563 {
564 	if (afu)
565 		return afu->private;
566 
567 	return NULL;
568 }
569 EXPORT_SYMBOL_GPL(ocxl_afu_get_private);
570