• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015-2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_cppcore.c
36  * Provides low-level access to the NFP's internal CPP bus
37  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38  *          Jason McMullan <jason.mcmullan@netronome.com>
39  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
40  */
41 
42 #include <asm/unaligned.h>
43 #include <linux/delay.h>
44 #include <linux/device.h>
45 #include <linux/ioport.h>
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/mutex.h>
49 #include <linux/sched.h>
50 #include <linux/slab.h>
51 #include <linux/wait.h>
52 
53 #include "nfp_arm.h"
54 #include "nfp_cpp.h"
55 #include "nfp6000/nfp6000.h"
56 
57 #define NFP_ARM_GCSR_SOFTMODEL2                              0x0000014c
58 #define NFP_ARM_GCSR_SOFTMODEL3                              0x00000150
59 
60 struct nfp_cpp_resource {
61 	struct list_head list;
62 	const char *name;
63 	u32 cpp_id;
64 	u64 start;
65 	u64 end;
66 };
67 
68 /**
69  * struct nfp_cpp - main nfpcore device structure
70  * Following fields are read-only after probe() exits or netdevs are spawned.
71  * @dev:		embedded device structure
72  * @op:			low-level implementation ops
73  * @priv:		private data of the low-level implementation
74  * @model:		chip model
75  * @interface:		chip interface id we are using to reach it
76  * @serial:		chip serial number
77  * @imb_cat_table:	CPP Mapping Table
78  *
79  * Following fields use explicit locking:
80  * @resource_list:	NFP CPP resource list
81  * @resource_lock:	protects @resource_list
82  *
83  * @area_cache_list:	cached areas for cpp/xpb read/write speed up
84  * @area_cache_mutex:	protects @area_cache_list
85  *
86  * @waitq:		area wait queue
87  */
88 struct nfp_cpp {
89 	struct device dev;
90 
91 	void *priv;
92 
93 	u32 model;
94 	u16 interface;
95 	u8 serial[NFP_SERIAL_LEN];
96 
97 	const struct nfp_cpp_operations *op;
98 	struct list_head resource_list;
99 	rwlock_t resource_lock;
100 	wait_queue_head_t waitq;
101 
102 	u32 imb_cat_table[16];
103 
104 	struct mutex area_cache_mutex;
105 	struct list_head area_cache_list;
106 };
107 
108 /* Element of the area_cache_list */
109 struct nfp_cpp_area_cache {
110 	struct list_head entry;
111 	u32 id;
112 	u64 addr;
113 	u32 size;
114 	struct nfp_cpp_area *area;
115 };
116 
117 struct nfp_cpp_area {
118 	struct nfp_cpp *cpp;
119 	struct kref kref;
120 	atomic_t refcount;
121 	struct mutex mutex;	/* Lock for the area's refcount */
122 	unsigned long long offset;
123 	unsigned long size;
124 	struct nfp_cpp_resource resource;
125 	void __iomem *iomem;
126 	/* Here follows the 'priv' part of nfp_cpp_area. */
127 };
128 
129 struct nfp_cpp_explicit {
130 	struct nfp_cpp *cpp;
131 	struct nfp_cpp_explicit_command cmd;
132 	/* Here follows the 'priv' part of nfp_cpp_area. */
133 };
134 
__resource_add(struct list_head * head,struct nfp_cpp_resource * res)135 static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res)
136 {
137 	struct nfp_cpp_resource *tmp;
138 	struct list_head *pos;
139 
140 	list_for_each(pos, head) {
141 		tmp = container_of(pos, struct nfp_cpp_resource, list);
142 
143 		if (tmp->cpp_id > res->cpp_id)
144 			break;
145 
146 		if (tmp->cpp_id == res->cpp_id && tmp->start > res->start)
147 			break;
148 	}
149 
150 	list_add_tail(&res->list, pos);
151 }
152 
__resource_del(struct nfp_cpp_resource * res)153 static void __resource_del(struct nfp_cpp_resource *res)
154 {
155 	list_del_init(&res->list);
156 }
157 
__release_cpp_area(struct kref * kref)158 static void __release_cpp_area(struct kref *kref)
159 {
160 	struct nfp_cpp_area *area =
161 		container_of(kref, struct nfp_cpp_area, kref);
162 	struct nfp_cpp *cpp = nfp_cpp_area_cpp(area);
163 
164 	if (area->cpp->op->area_cleanup)
165 		area->cpp->op->area_cleanup(area);
166 
167 	write_lock(&cpp->resource_lock);
168 	__resource_del(&area->resource);
169 	write_unlock(&cpp->resource_lock);
170 	kfree(area);
171 }
172 
nfp_cpp_area_put(struct nfp_cpp_area * area)173 static void nfp_cpp_area_put(struct nfp_cpp_area *area)
174 {
175 	kref_put(&area->kref, __release_cpp_area);
176 }
177 
nfp_cpp_area_get(struct nfp_cpp_area * area)178 static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area)
179 {
180 	kref_get(&area->kref);
181 
182 	return area;
183 }
184 
185 /**
186  * nfp_cpp_free() - free the CPP handle
187  * @cpp:	CPP handle
188  */
nfp_cpp_free(struct nfp_cpp * cpp)189 void nfp_cpp_free(struct nfp_cpp *cpp)
190 {
191 	struct nfp_cpp_area_cache *cache, *ctmp;
192 	struct nfp_cpp_resource *res, *rtmp;
193 
194 	/* Remove all caches */
195 	list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
196 		list_del(&cache->entry);
197 		if (cache->id)
198 			nfp_cpp_area_release(cache->area);
199 		nfp_cpp_area_free(cache->area);
200 		kfree(cache);
201 	}
202 
203 	/* There should be no dangling areas at this point */
204 	WARN_ON(!list_empty(&cpp->resource_list));
205 
206 	/* .. but if they weren't, try to clean up. */
207 	list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) {
208 		struct nfp_cpp_area *area = container_of(res,
209 							 struct nfp_cpp_area,
210 							 resource);
211 
212 		dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n",
213 			NFP_CPP_ID_TARGET_of(res->cpp_id),
214 			NFP_CPP_ID_ACTION_of(res->cpp_id),
215 			NFP_CPP_ID_TOKEN_of(res->cpp_id),
216 			res->start, res->end,
217 			res->name ? " " : "",
218 			res->name ? res->name : "");
219 
220 		if (area->cpp->op->area_release)
221 			area->cpp->op->area_release(area);
222 
223 		__release_cpp_area(&area->kref);
224 	}
225 
226 	if (cpp->op->free)
227 		cpp->op->free(cpp);
228 
229 	device_unregister(&cpp->dev);
230 
231 	kfree(cpp);
232 }
233 
234 /**
235  * nfp_cpp_model() - Retrieve the Model ID of the NFP
236  * @cpp:	NFP CPP handle
237  *
238  * Return: NFP CPP Model ID
239  */
nfp_cpp_model(struct nfp_cpp * cpp)240 u32 nfp_cpp_model(struct nfp_cpp *cpp)
241 {
242 	return cpp->model;
243 }
244 
245 /**
246  * nfp_cpp_interface() - Retrieve the Interface ID of the NFP
247  * @cpp:	NFP CPP handle
248  *
249  * Return: NFP CPP Interface ID
250  */
nfp_cpp_interface(struct nfp_cpp * cpp)251 u16 nfp_cpp_interface(struct nfp_cpp *cpp)
252 {
253 	return cpp->interface;
254 }
255 
256 /**
257  * nfp_cpp_serial() - Retrieve the Serial ID of the NFP
258  * @cpp:	NFP CPP handle
259  * @serial:	Pointer to NFP serial number
260  *
261  * Return:  Length of NFP serial number
262  */
nfp_cpp_serial(struct nfp_cpp * cpp,const u8 ** serial)263 int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
264 {
265 	*serial = &cpp->serial[0];
266 	return sizeof(cpp->serial);
267 }
268 
269 /**
270  * nfp_cpp_area_alloc_with_name() - allocate a new CPP area
271  * @cpp:	CPP device handle
272  * @dest:	NFP CPP ID
273  * @name:	Name of region
274  * @address:	Address of region
275  * @size:	Size of region
276  *
277  * Allocate and initialize a CPP area structure.  The area must later
278  * be locked down with an 'acquire' before it can be safely accessed.
279  *
280  * NOTE: @address and @size must be 32-bit aligned values.
281  *
282  * Return: NFP CPP area handle, or NULL
283  */
284 struct nfp_cpp_area *
nfp_cpp_area_alloc_with_name(struct nfp_cpp * cpp,u32 dest,const char * name,unsigned long long address,unsigned long size)285 nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name,
286 			     unsigned long long address, unsigned long size)
287 {
288 	struct nfp_cpp_area *area;
289 	u64 tmp64 = address;
290 	int err, name_len;
291 
292 	/* Remap from cpp_island to cpp_target */
293 	err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
294 	if (err < 0)
295 		return NULL;
296 
297 	address = tmp64;
298 
299 	if (!name)
300 		name = "(reserved)";
301 
302 	name_len = strlen(name) + 1;
303 	area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len,
304 		       GFP_KERNEL);
305 	if (!area)
306 		return NULL;
307 
308 	area->cpp = cpp;
309 	area->resource.name = (void *)area + sizeof(*area) +
310 		cpp->op->area_priv_size;
311 	memcpy((char *)area->resource.name, name, name_len);
312 
313 	area->resource.cpp_id = dest;
314 	area->resource.start = address;
315 	area->resource.end = area->resource.start + size - 1;
316 	INIT_LIST_HEAD(&area->resource.list);
317 
318 	atomic_set(&area->refcount, 0);
319 	kref_init(&area->kref);
320 	mutex_init(&area->mutex);
321 
322 	if (cpp->op->area_init) {
323 		int err;
324 
325 		err = cpp->op->area_init(area, dest, address, size);
326 		if (err < 0) {
327 			kfree(area);
328 			return NULL;
329 		}
330 	}
331 
332 	write_lock(&cpp->resource_lock);
333 	__resource_add(&cpp->resource_list, &area->resource);
334 	write_unlock(&cpp->resource_lock);
335 
336 	area->offset = address;
337 	area->size = size;
338 
339 	return area;
340 }
341 
342 /**
343  * nfp_cpp_area_alloc() - allocate a new CPP area
344  * @cpp:	CPP handle
345  * @dest:	CPP id
346  * @address:	Start address on CPP target
347  * @size:	Size of area in bytes
348  *
349  * Allocate and initialize a CPP area structure.  The area must later
350  * be locked down with an 'acquire' before it can be safely accessed.
351  *
352  * NOTE: @address and @size must be 32-bit aligned values.
353  *
354  * Return: NFP CPP Area handle, or NULL
355  */
356 struct nfp_cpp_area *
nfp_cpp_area_alloc(struct nfp_cpp * cpp,u32 dest,unsigned long long address,unsigned long size)357 nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
358 		   unsigned long long address, unsigned long size)
359 {
360 	return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
361 }
362 
363 /**
364  * nfp_cpp_area_alloc_acquire() - allocate a new CPP area and lock it down
365  * @cpp:	CPP handle
366  * @name:	Name of region
367  * @dest:	CPP id
368  * @address:	Start address on CPP target
369  * @size:	Size of area
370  *
371  * Allocate and initialize a CPP area structure, and lock it down so
372  * that it can be accessed directly.
373  *
374  * NOTE: @address and @size must be 32-bit aligned values.
375  *
376  * NOTE: The area must also be 'released' when the structure is freed.
377  *
378  * Return: NFP CPP Area handle, or NULL
379  */
380 struct nfp_cpp_area *
nfp_cpp_area_alloc_acquire(struct nfp_cpp * cpp,const char * name,u32 dest,unsigned long long address,unsigned long size)381 nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 dest,
382 			   unsigned long long address, unsigned long size)
383 {
384 	struct nfp_cpp_area *area;
385 
386 	area = nfp_cpp_area_alloc_with_name(cpp, dest, name, address, size);
387 	if (!area)
388 		return NULL;
389 
390 	if (nfp_cpp_area_acquire(area)) {
391 		nfp_cpp_area_free(area);
392 		return NULL;
393 	}
394 
395 	return area;
396 }
397 
398 /**
399  * nfp_cpp_area_free() - free up the CPP area
400  * @area:	CPP area handle
401  *
402  * Frees up memory resources held by the CPP area.
403  */
nfp_cpp_area_free(struct nfp_cpp_area * area)404 void nfp_cpp_area_free(struct nfp_cpp_area *area)
405 {
406 	if (atomic_read(&area->refcount))
407 		nfp_warn(area->cpp, "Warning: freeing busy area\n");
408 	nfp_cpp_area_put(area);
409 }
410 
nfp_cpp_area_acquire_try(struct nfp_cpp_area * area,int * status)411 static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status)
412 {
413 	*status = area->cpp->op->area_acquire(area);
414 
415 	return *status != -EAGAIN;
416 }
417 
__nfp_cpp_area_acquire(struct nfp_cpp_area * area)418 static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area)
419 {
420 	int err, status;
421 
422 	if (atomic_inc_return(&area->refcount) > 1)
423 		return 0;
424 
425 	if (!area->cpp->op->area_acquire)
426 		return 0;
427 
428 	err = wait_event_interruptible(area->cpp->waitq,
429 				       nfp_cpp_area_acquire_try(area, &status));
430 	if (!err)
431 		err = status;
432 	if (err) {
433 		nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err);
434 		atomic_dec(&area->refcount);
435 		return err;
436 	}
437 
438 	nfp_cpp_area_get(area);
439 
440 	return 0;
441 }
442 
443 /**
444  * nfp_cpp_area_acquire() - lock down a CPP area for access
445  * @area:	CPP area handle
446  *
447  * Locks down the CPP area for a potential long term activity.  Area
448  * must always be locked down before being accessed.
449  *
450  * Return: 0, or -ERRNO
451  */
nfp_cpp_area_acquire(struct nfp_cpp_area * area)452 int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
453 {
454 	int ret;
455 
456 	mutex_lock(&area->mutex);
457 	ret = __nfp_cpp_area_acquire(area);
458 	mutex_unlock(&area->mutex);
459 
460 	return ret;
461 }
462 
463 /**
464  * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access
465  * @area:	CPP area handle
466  *
467  * Locks down the CPP area for a potential long term activity.  Area
468  * must always be locked down before being accessed.
469  *
470  * NOTE: Returns -EAGAIN is no area is available
471  *
472  * Return: 0, or -ERRNO
473  */
nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area * area)474 int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area)
475 {
476 	mutex_lock(&area->mutex);
477 	if (atomic_inc_return(&area->refcount) == 1) {
478 		if (area->cpp->op->area_acquire) {
479 			int err;
480 
481 			err = area->cpp->op->area_acquire(area);
482 			if (err < 0) {
483 				atomic_dec(&area->refcount);
484 				mutex_unlock(&area->mutex);
485 				return err;
486 			}
487 		}
488 	}
489 	mutex_unlock(&area->mutex);
490 
491 	nfp_cpp_area_get(area);
492 	return 0;
493 }
494 
495 /**
496  * nfp_cpp_area_release() - release a locked down CPP area
497  * @area:	CPP area handle
498  *
499  * Releases a previously locked down CPP area.
500  */
nfp_cpp_area_release(struct nfp_cpp_area * area)501 void nfp_cpp_area_release(struct nfp_cpp_area *area)
502 {
503 	mutex_lock(&area->mutex);
504 	/* Only call the release on refcount == 0 */
505 	if (atomic_dec_and_test(&area->refcount)) {
506 		if (area->cpp->op->area_release) {
507 			area->cpp->op->area_release(area);
508 			/* Let anyone waiting for a BAR try to get one.. */
509 			wake_up_interruptible_all(&area->cpp->waitq);
510 		}
511 	}
512 	mutex_unlock(&area->mutex);
513 
514 	nfp_cpp_area_put(area);
515 }
516 
517 /**
518  * nfp_cpp_area_release_free() - release CPP area and free it
519  * @area:	CPP area handle
520  *
521  * Releases CPP area and frees up memory resources held by the it.
522  */
nfp_cpp_area_release_free(struct nfp_cpp_area * area)523 void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
524 {
525 	nfp_cpp_area_release(area);
526 	nfp_cpp_area_free(area);
527 }
528 
529 /**
530  * nfp_cpp_area_read() - read data from CPP area
531  * @area:	  CPP area handle
532  * @offset:	  offset into CPP area
533  * @kernel_vaddr: kernel address to put data into
534  * @length:	  number of bytes to read
535  *
536  * Read data from indicated CPP region.
537  *
538  * NOTE: @offset and @length must be 32-bit aligned values.
539  *
540  * NOTE: Area must have been locked down with an 'acquire'.
541  *
542  * Return: length of io, or -ERRNO
543  */
nfp_cpp_area_read(struct nfp_cpp_area * area,unsigned long offset,void * kernel_vaddr,size_t length)544 int nfp_cpp_area_read(struct nfp_cpp_area *area,
545 		      unsigned long offset, void *kernel_vaddr,
546 		      size_t length)
547 {
548 	return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
549 }
550 
551 /**
552  * nfp_cpp_area_write() - write data to CPP area
553  * @area:	CPP area handle
554  * @offset:	offset into CPP area
555  * @kernel_vaddr: kernel address to read data from
556  * @length:	number of bytes to write
557  *
558  * Write data to indicated CPP region.
559  *
560  * NOTE: @offset and @length must be 32-bit aligned values.
561  *
562  * NOTE: Area must have been locked down with an 'acquire'.
563  *
564  * Return: length of io, or -ERRNO
565  */
nfp_cpp_area_write(struct nfp_cpp_area * area,unsigned long offset,const void * kernel_vaddr,size_t length)566 int nfp_cpp_area_write(struct nfp_cpp_area *area,
567 		       unsigned long offset, const void *kernel_vaddr,
568 		       size_t length)
569 {
570 	return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
571 }
572 
573 /**
574  * nfp_cpp_area_name() - return name of a CPP area
575  * @cpp_area:	CPP area handle
576  *
577  * Return: Name of the area, or NULL
578  */
nfp_cpp_area_name(struct nfp_cpp_area * cpp_area)579 const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
580 {
581 	return cpp_area->resource.name;
582 }
583 
584 /**
585  * nfp_cpp_area_priv() - return private struct for CPP area
586  * @cpp_area:	CPP area handle
587  *
588  * Return: Private data for the CPP area
589  */
nfp_cpp_area_priv(struct nfp_cpp_area * cpp_area)590 void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
591 {
592 	return &cpp_area[1];
593 }
594 
595 /**
596  * nfp_cpp_area_cpp() - return CPP handle for CPP area
597  * @cpp_area:	CPP area handle
598  *
599  * Return: NFP CPP handle
600  */
nfp_cpp_area_cpp(struct nfp_cpp_area * cpp_area)601 struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
602 {
603 	return cpp_area->cpp;
604 }
605 
606 /**
607  * nfp_cpp_area_resource() - get resource
608  * @area:	CPP area handle
609  *
610  * NOTE: Area must have been locked down with an 'acquire'.
611  *
612  * Return: struct resource pointer, or NULL
613  */
nfp_cpp_area_resource(struct nfp_cpp_area * area)614 struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area)
615 {
616 	struct resource *res = NULL;
617 
618 	if (area->cpp->op->area_resource)
619 		res = area->cpp->op->area_resource(area);
620 
621 	return res;
622 }
623 
624 /**
625  * nfp_cpp_area_phys() - get physical address of CPP area
626  * @area:	CPP area handle
627  *
628  * NOTE: Area must have been locked down with an 'acquire'.
629  *
630  * Return: phy_addr_t of the area, or NULL
631  */
nfp_cpp_area_phys(struct nfp_cpp_area * area)632 phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area)
633 {
634 	phys_addr_t addr = ~0;
635 
636 	if (area->cpp->op->area_phys)
637 		addr = area->cpp->op->area_phys(area);
638 
639 	return addr;
640 }
641 
642 /**
643  * nfp_cpp_area_iomem() - get IOMEM region for CPP area
644  * @area:	CPP area handle
645  *
646  * Returns an iomem pointer for use with readl()/writel() style
647  * operations.
648  *
649  * NOTE: Area must have been locked down with an 'acquire'.
650  *
651  * Return: __iomem pointer to the area, or NULL
652  */
nfp_cpp_area_iomem(struct nfp_cpp_area * area)653 void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
654 {
655 	void __iomem *iomem = NULL;
656 
657 	if (area->cpp->op->area_iomem)
658 		iomem = area->cpp->op->area_iomem(area);
659 
660 	return iomem;
661 }
662 
663 /**
664  * nfp_cpp_area_readl() - Read a u32 word from an area
665  * @area:	CPP Area handle
666  * @offset:	Offset into area
667  * @value:	Pointer to read buffer
668  *
669  * Return: length of the io, or -ERRNO
670  */
nfp_cpp_area_readl(struct nfp_cpp_area * area,unsigned long offset,u32 * value)671 int nfp_cpp_area_readl(struct nfp_cpp_area *area,
672 		       unsigned long offset, u32 *value)
673 {
674 	u8 tmp[4];
675 	int err;
676 
677 	err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
678 	*value = get_unaligned_le32(tmp);
679 
680 	return err;
681 }
682 
683 /**
684  * nfp_cpp_area_writel() - Write a u32 word to an area
685  * @area:	CPP Area handle
686  * @offset:	Offset into area
687  * @value:	Value to write
688  *
689  * Return: length of the io, or -ERRNO
690  */
nfp_cpp_area_writel(struct nfp_cpp_area * area,unsigned long offset,u32 value)691 int nfp_cpp_area_writel(struct nfp_cpp_area *area,
692 			unsigned long offset, u32 value)
693 {
694 	u8 tmp[4];
695 
696 	put_unaligned_le32(value, tmp);
697 
698 	return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
699 }
700 
701 /**
702  * nfp_cpp_area_readq() - Read a u64 word from an area
703  * @area:	CPP Area handle
704  * @offset:	Offset into area
705  * @value:	Pointer to read buffer
706  *
707  * Return: length of the io, or -ERRNO
708  */
nfp_cpp_area_readq(struct nfp_cpp_area * area,unsigned long offset,u64 * value)709 int nfp_cpp_area_readq(struct nfp_cpp_area *area,
710 		       unsigned long offset, u64 *value)
711 {
712 	u8 tmp[8];
713 	int err;
714 
715 	err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
716 	*value = get_unaligned_le64(tmp);
717 
718 	return err;
719 }
720 
721 /**
722  * nfp_cpp_area_writeq() - Write a u64 word to an area
723  * @area:	CPP Area handle
724  * @offset:	Offset into area
725  * @value:	Value to write
726  *
727  * Return: length of the io, or -ERRNO
728  */
nfp_cpp_area_writeq(struct nfp_cpp_area * area,unsigned long offset,u64 value)729 int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
730 			unsigned long offset, u64 value)
731 {
732 	u8 tmp[8];
733 
734 	put_unaligned_le64(value, tmp);
735 
736 	return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
737 }
738 
739 /**
740  * nfp_cpp_area_fill() - fill a CPP area with a value
741  * @area:	CPP area
742  * @offset:	offset into CPP area
743  * @value:	value to fill with
744  * @length:	length of area to fill
745  *
746  * Fill indicated area with given value.
747  *
748  * Return: length of io, or -ERRNO
749  */
nfp_cpp_area_fill(struct nfp_cpp_area * area,unsigned long offset,u32 value,size_t length)750 int nfp_cpp_area_fill(struct nfp_cpp_area *area,
751 		      unsigned long offset, u32 value, size_t length)
752 {
753 	u8 tmp[4];
754 	size_t i;
755 	int k;
756 
757 	put_unaligned_le32(value, tmp);
758 
759 	if (offset % sizeof(tmp) || length % sizeof(tmp))
760 		return -EINVAL;
761 
762 	for (i = 0; i < length; i += sizeof(tmp)) {
763 		k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp));
764 		if (k < 0)
765 			return k;
766 	}
767 
768 	return i;
769 }
770 
771 /**
772  * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache
773  * @cpp:	NFP CPP handle
774  * @size:	Size of the area - MUST BE A POWER OF 2.
775  */
nfp_cpp_area_cache_add(struct nfp_cpp * cpp,size_t size)776 int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
777 {
778 	struct nfp_cpp_area_cache *cache;
779 	struct nfp_cpp_area *area;
780 
781 	/* Allocate an area - we use the MU target's base as a placeholder,
782 	 * as all supported chips have a MU.
783 	 */
784 	area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0),
785 				  0, size);
786 	if (!area)
787 		return -ENOMEM;
788 
789 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
790 	if (!cache)
791 		return -ENOMEM;
792 
793 	cache->id = 0;
794 	cache->addr = 0;
795 	cache->size = size;
796 	cache->area = area;
797 	mutex_lock(&cpp->area_cache_mutex);
798 	list_add_tail(&cache->entry, &cpp->area_cache_list);
799 	mutex_unlock(&cpp->area_cache_mutex);
800 
801 	return 0;
802 }
803 
804 static struct nfp_cpp_area_cache *
area_cache_get(struct nfp_cpp * cpp,u32 id,u64 addr,unsigned long * offset,size_t length)805 area_cache_get(struct nfp_cpp *cpp, u32 id,
806 	       u64 addr, unsigned long *offset, size_t length)
807 {
808 	struct nfp_cpp_area_cache *cache;
809 	int err;
810 
811 	/* Early exit when length == 0, which prevents
812 	 * the need for special case code below when
813 	 * checking against available cache size.
814 	 */
815 	if (length == 0 || id == 0)
816 		return NULL;
817 
818 	/* Remap from cpp_island to cpp_target */
819 	err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table);
820 	if (err < 0)
821 		return NULL;
822 
823 	mutex_lock(&cpp->area_cache_mutex);
824 
825 	if (list_empty(&cpp->area_cache_list)) {
826 		mutex_unlock(&cpp->area_cache_mutex);
827 		return NULL;
828 	}
829 
830 	addr += *offset;
831 
832 	/* See if we have a match */
833 	list_for_each_entry(cache, &cpp->area_cache_list, entry) {
834 		if (id == cache->id &&
835 		    addr >= cache->addr &&
836 		    addr + length <= cache->addr + cache->size)
837 			goto exit;
838 	}
839 
840 	/* No matches - inspect the tail of the LRU */
841 	cache = list_entry(cpp->area_cache_list.prev,
842 			   struct nfp_cpp_area_cache, entry);
843 
844 	/* Can we fit in the cache entry? */
845 	if (round_down(addr + length - 1, cache->size) !=
846 	    round_down(addr, cache->size)) {
847 		mutex_unlock(&cpp->area_cache_mutex);
848 		return NULL;
849 	}
850 
851 	/* If id != 0, we will need to release it */
852 	if (cache->id) {
853 		nfp_cpp_area_release(cache->area);
854 		cache->id = 0;
855 		cache->addr = 0;
856 	}
857 
858 	/* Adjust the start address to be cache size aligned */
859 	cache->id = id;
860 	cache->addr = addr & ~(u64)(cache->size - 1);
861 
862 	/* Re-init to the new ID and address */
863 	if (cpp->op->area_init) {
864 		err = cpp->op->area_init(cache->area,
865 					 id, cache->addr, cache->size);
866 		if (err < 0) {
867 			mutex_unlock(&cpp->area_cache_mutex);
868 			return NULL;
869 		}
870 	}
871 
872 	/* Attempt to acquire */
873 	err = nfp_cpp_area_acquire(cache->area);
874 	if (err < 0) {
875 		mutex_unlock(&cpp->area_cache_mutex);
876 		return NULL;
877 	}
878 
879 exit:
880 	/* Adjust offset */
881 	*offset = addr - cache->addr;
882 	return cache;
883 }
884 
885 static void
area_cache_put(struct nfp_cpp * cpp,struct nfp_cpp_area_cache * cache)886 area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
887 {
888 	if (!cache)
889 		return;
890 
891 	/* Move to front of LRU */
892 	list_del(&cache->entry);
893 	list_add(&cache->entry, &cpp->area_cache_list);
894 
895 	mutex_unlock(&cpp->area_cache_mutex);
896 }
897 
__nfp_cpp_read(struct nfp_cpp * cpp,u32 destination,unsigned long long address,void * kernel_vaddr,size_t length)898 static int __nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
899 			  unsigned long long address, void *kernel_vaddr,
900 			  size_t length)
901 {
902 	struct nfp_cpp_area_cache *cache;
903 	struct nfp_cpp_area *area;
904 	unsigned long offset = 0;
905 	int err;
906 
907 	cache = area_cache_get(cpp, destination, address, &offset, length);
908 	if (cache) {
909 		area = cache->area;
910 	} else {
911 		area = nfp_cpp_area_alloc(cpp, destination, address, length);
912 		if (!area)
913 			return -ENOMEM;
914 
915 		err = nfp_cpp_area_acquire(area);
916 		if (err) {
917 			nfp_cpp_area_free(area);
918 			return err;
919 		}
920 	}
921 
922 	err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
923 
924 	if (cache)
925 		area_cache_put(cpp, cache);
926 	else
927 		nfp_cpp_area_release_free(area);
928 
929 	return err;
930 }
931 
932 /**
933  * nfp_cpp_read() - read from CPP target
934  * @cpp:		CPP handle
935  * @destination:	CPP id
936  * @address:		offset into CPP target
937  * @kernel_vaddr:	kernel buffer for result
938  * @length:		number of bytes to read
939  *
940  * Return: length of io, or -ERRNO
941  */
nfp_cpp_read(struct nfp_cpp * cpp,u32 destination,unsigned long long address,void * kernel_vaddr,size_t length)942 int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
943 		 unsigned long long address, void *kernel_vaddr,
944 		 size_t length)
945 {
946 	size_t n, offset;
947 	int ret;
948 
949 	for (offset = 0; offset < length; offset += n) {
950 		unsigned long long r_addr = address + offset;
951 
952 		/* make first read smaller to align to safe window */
953 		n = min_t(size_t, length - offset,
954 			  ALIGN(r_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - r_addr);
955 
956 		ret = __nfp_cpp_read(cpp, destination, address + offset,
957 				     kernel_vaddr + offset, n);
958 		if (ret < 0)
959 			return ret;
960 		if (ret != n)
961 			return offset + n;
962 	}
963 
964 	return length;
965 }
966 
__nfp_cpp_write(struct nfp_cpp * cpp,u32 destination,unsigned long long address,const void * kernel_vaddr,size_t length)967 static int __nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
968 			   unsigned long long address,
969 			   const void *kernel_vaddr, size_t length)
970 {
971 	struct nfp_cpp_area_cache *cache;
972 	struct nfp_cpp_area *area;
973 	unsigned long offset = 0;
974 	int err;
975 
976 	cache = area_cache_get(cpp, destination, address, &offset, length);
977 	if (cache) {
978 		area = cache->area;
979 	} else {
980 		area = nfp_cpp_area_alloc(cpp, destination, address, length);
981 		if (!area)
982 			return -ENOMEM;
983 
984 		err = nfp_cpp_area_acquire(area);
985 		if (err) {
986 			nfp_cpp_area_free(area);
987 			return err;
988 		}
989 	}
990 
991 	err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
992 
993 	if (cache)
994 		area_cache_put(cpp, cache);
995 	else
996 		nfp_cpp_area_release_free(area);
997 
998 	return err;
999 }
1000 
1001 /**
1002  * nfp_cpp_write() - write to CPP target
1003  * @cpp:		CPP handle
1004  * @destination:	CPP id
1005  * @address:		offset into CPP target
1006  * @kernel_vaddr:	kernel buffer to read from
1007  * @length:		number of bytes to write
1008  *
1009  * Return: length of io, or -ERRNO
1010  */
nfp_cpp_write(struct nfp_cpp * cpp,u32 destination,unsigned long long address,const void * kernel_vaddr,size_t length)1011 int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
1012 		  unsigned long long address,
1013 		  const void *kernel_vaddr, size_t length)
1014 {
1015 	size_t n, offset;
1016 	int ret;
1017 
1018 	for (offset = 0; offset < length; offset += n) {
1019 		unsigned long long w_addr = address + offset;
1020 
1021 		/* make first write smaller to align to safe window */
1022 		n = min_t(size_t, length - offset,
1023 			  ALIGN(w_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - w_addr);
1024 
1025 		ret = __nfp_cpp_write(cpp, destination, address + offset,
1026 				      kernel_vaddr + offset, n);
1027 		if (ret < 0)
1028 			return ret;
1029 		if (ret != n)
1030 			return offset + n;
1031 	}
1032 
1033 	return length;
1034 }
1035 
1036 /* Return the correct CPP address, and fixup xpb_addr as needed. */
nfp_xpb_to_cpp(struct nfp_cpp * cpp,u32 * xpb_addr)1037 static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
1038 {
1039 	int island;
1040 	u32 xpb;
1041 
1042 	xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
1043 	/* Ensure that non-local XPB accesses go
1044 	 * out through the global XPBM bus.
1045 	 */
1046 	island = (*xpb_addr >> 24) & 0x3f;
1047 	if (!island)
1048 		return xpb;
1049 
1050 	if (island != 1) {
1051 		*xpb_addr |= 1 << 30;
1052 		return xpb;
1053 	}
1054 
1055 	/* Accesses to the ARM Island overlay uses Island 0 / Global Bit */
1056 	*xpb_addr &= ~0x7f000000;
1057 	if (*xpb_addr < 0x60000) {
1058 		*xpb_addr |= 1 << 30;
1059 	} else {
1060 		/* And only non-ARM interfaces use the island id = 1 */
1061 		if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp))
1062 		    != NFP_CPP_INTERFACE_TYPE_ARM)
1063 			*xpb_addr |= 1 << 24;
1064 	}
1065 
1066 	return xpb;
1067 }
1068 
1069 /**
1070  * nfp_xpb_readl() - Read a u32 word from a XPB location
1071  * @cpp:	CPP device handle
1072  * @xpb_addr:	Address for operation
1073  * @value:	Pointer to read buffer
1074  *
1075  * Return: length of the io, or -ERRNO
1076  */
nfp_xpb_readl(struct nfp_cpp * cpp,u32 xpb_addr,u32 * value)1077 int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
1078 {
1079 	u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1080 
1081 	return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
1082 }
1083 
1084 /**
1085  * nfp_xpb_writel() - Write a u32 word to a XPB location
1086  * @cpp:	CPP device handle
1087  * @xpb_addr:	Address for operation
1088  * @value:	Value to write
1089  *
1090  * Return: length of the io, or -ERRNO
1091  */
nfp_xpb_writel(struct nfp_cpp * cpp,u32 xpb_addr,u32 value)1092 int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
1093 {
1094 	u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1095 
1096 	return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
1097 }
1098 
1099 /**
1100  * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus
1101  * @cpp:	NFP CPP device handle
1102  * @xpb_tgt:	XPB target and address
1103  * @mask:	mask of bits to alter
1104  * @value:	value to modify
1105  *
1106  * KERNEL: This operation is safe to call in interrupt or softirq context.
1107  *
1108  * Return: length of the io, or -ERRNO
1109  */
nfp_xpb_writelm(struct nfp_cpp * cpp,u32 xpb_tgt,u32 mask,u32 value)1110 int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
1111 		    u32 mask, u32 value)
1112 {
1113 	int err;
1114 	u32 tmp;
1115 
1116 	err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
1117 	if (err < 0)
1118 		return err;
1119 
1120 	tmp &= ~mask;
1121 	tmp |= mask & value;
1122 	return nfp_xpb_writel(cpp, xpb_tgt, tmp);
1123 }
1124 
1125 /* Lockdep markers */
1126 static struct lock_class_key nfp_cpp_resource_lock_key;
1127 
nfp_cpp_dev_release(struct device * dev)1128 static void nfp_cpp_dev_release(struct device *dev)
1129 {
1130 	/* Nothing to do here - it just makes the kernel happy */
1131 }
1132 
1133 /**
1134  * nfp_cpp_from_operations() - Create a NFP CPP handle
1135  *                             from an operations structure
1136  * @ops:	NFP CPP operations structure
1137  * @parent:	Parent device
1138  * @priv:	Private data of low-level implementation
1139  *
1140  * NOTE: On failure, cpp_ops->free will be called!
1141  *
1142  * Return: NFP CPP handle on success, ERR_PTR on failure
1143  */
1144 struct nfp_cpp *
nfp_cpp_from_operations(const struct nfp_cpp_operations * ops,struct device * parent,void * priv)1145 nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
1146 			struct device *parent, void *priv)
1147 {
1148 	const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
1149 	struct nfp_cpp *cpp;
1150 	u32 mask[2];
1151 	u32 xpbaddr;
1152 	size_t tgt;
1153 	int err;
1154 
1155 	cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
1156 	if (!cpp) {
1157 		err = -ENOMEM;
1158 		goto err_malloc;
1159 	}
1160 
1161 	cpp->op = ops;
1162 	cpp->priv = priv;
1163 	cpp->interface = ops->get_interface(parent);
1164 	if (ops->read_serial)
1165 		ops->read_serial(parent, cpp->serial);
1166 	rwlock_init(&cpp->resource_lock);
1167 	init_waitqueue_head(&cpp->waitq);
1168 	lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
1169 	INIT_LIST_HEAD(&cpp->resource_list);
1170 	INIT_LIST_HEAD(&cpp->area_cache_list);
1171 	mutex_init(&cpp->area_cache_mutex);
1172 	cpp->dev.init_name = "cpp";
1173 	cpp->dev.parent = parent;
1174 	cpp->dev.release = nfp_cpp_dev_release;
1175 	err = device_register(&cpp->dev);
1176 	if (err < 0) {
1177 		put_device(&cpp->dev);
1178 		goto err_dev;
1179 	}
1180 
1181 	dev_set_drvdata(&cpp->dev, cpp);
1182 
1183 	/* NOTE: cpp_lock is NOT locked for op->init,
1184 	 * since it may call NFP CPP API operations
1185 	 */
1186 	if (cpp->op->init) {
1187 		err = cpp->op->init(cpp);
1188 		if (err < 0) {
1189 			dev_err(parent,
1190 				"NFP interface initialization failed\n");
1191 			goto err_out;
1192 		}
1193 	}
1194 
1195 	err = nfp_cpp_model_autodetect(cpp, &cpp->model);
1196 	if (err < 0) {
1197 		dev_err(parent, "NFP model detection failed\n");
1198 		goto err_out;
1199 	}
1200 
1201 	for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
1202 			/* Hardcoded XPB IMB Base, island 0 */
1203 		xpbaddr = 0x000a0000 + (tgt * 4);
1204 		err = nfp_xpb_readl(cpp, xpbaddr,
1205 				    &cpp->imb_cat_table[tgt]);
1206 		if (err < 0) {
1207 			dev_err(parent,
1208 				"Can't read CPP mapping from device\n");
1209 			goto err_out;
1210 		}
1211 	}
1212 
1213 	nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2,
1214 		      &mask[0]);
1215 	nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3,
1216 		      &mask[1]);
1217 
1218 	dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n",
1219 		 nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp));
1220 
1221 	return cpp;
1222 
1223 err_out:
1224 	device_unregister(&cpp->dev);
1225 err_dev:
1226 	kfree(cpp);
1227 err_malloc:
1228 	return ERR_PTR(err);
1229 }
1230 
1231 /**
1232  * nfp_cpp_priv() - Get the operations private data of a CPP handle
1233  * @cpp:	CPP handle
1234  *
1235  * Return: Private data for the NFP CPP handle
1236  */
nfp_cpp_priv(struct nfp_cpp * cpp)1237 void *nfp_cpp_priv(struct nfp_cpp *cpp)
1238 {
1239 	return cpp->priv;
1240 }
1241 
1242 /**
1243  * nfp_cpp_device() - Get the Linux device handle of a CPP handle
1244  * @cpp:	CPP handle
1245  *
1246  * Return: Device for the NFP CPP bus
1247  */
nfp_cpp_device(struct nfp_cpp * cpp)1248 struct device *nfp_cpp_device(struct nfp_cpp *cpp)
1249 {
1250 	return &cpp->dev;
1251 }
1252 
1253 #define NFP_EXPL_OP(func, expl, args...)			  \
1254 	({							  \
1255 		struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1256 		int err = -ENODEV;				  \
1257 								  \
1258 		if (cpp->op->func)				  \
1259 			err = cpp->op->func(expl, ##args);	  \
1260 		err;						  \
1261 	})
1262 
1263 #define NFP_EXPL_OP_NR(func, expl, args...)			  \
1264 	({							  \
1265 		struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1266 								  \
1267 		if (cpp->op->func)				  \
1268 			cpp->op->func(expl, ##args);		  \
1269 								  \
1270 	})
1271 
1272 /**
1273  * nfp_cpp_explicit_acquire() - Acquire explicit access handle
1274  * @cpp:	NFP CPP handle
1275  *
1276  * The 'data_ref' and 'signal_ref' values are useful when
1277  * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values.
1278  *
1279  * Return: NFP CPP explicit handle
1280  */
nfp_cpp_explicit_acquire(struct nfp_cpp * cpp)1281 struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp)
1282 {
1283 	struct nfp_cpp_explicit *expl;
1284 	int err;
1285 
1286 	expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL);
1287 	if (!expl)
1288 		return NULL;
1289 
1290 	expl->cpp = cpp;
1291 	err = NFP_EXPL_OP(explicit_acquire, expl);
1292 	if (err < 0) {
1293 		kfree(expl);
1294 		return NULL;
1295 	}
1296 
1297 	return expl;
1298 }
1299 
1300 /**
1301  * nfp_cpp_explicit_set_target() - Set target fields for explicit
1302  * @expl:	Explicit handle
1303  * @cpp_id:	CPP ID field
1304  * @len:	CPP Length field
1305  * @mask:	CPP Mask field
1306  *
1307  * Return: 0, or -ERRNO
1308  */
nfp_cpp_explicit_set_target(struct nfp_cpp_explicit * expl,u32 cpp_id,u8 len,u8 mask)1309 int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl,
1310 				u32 cpp_id, u8 len, u8 mask)
1311 {
1312 	expl->cmd.cpp_id = cpp_id;
1313 	expl->cmd.len = len;
1314 	expl->cmd.byte_mask = mask;
1315 
1316 	return 0;
1317 }
1318 
1319 /**
1320  * nfp_cpp_explicit_set_data() - Set data fields for explicit
1321  * @expl:	Explicit handle
1322  * @data_master: CPP Data Master field
1323  * @data_ref:	CPP Data Ref field
1324  *
1325  * Return: 0, or -ERRNO
1326  */
nfp_cpp_explicit_set_data(struct nfp_cpp_explicit * expl,u8 data_master,u16 data_ref)1327 int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
1328 			      u8 data_master, u16 data_ref)
1329 {
1330 	expl->cmd.data_master = data_master;
1331 	expl->cmd.data_ref = data_ref;
1332 
1333 	return 0;
1334 }
1335 
1336 /**
1337  * nfp_cpp_explicit_set_signal() - Set signal fields for explicit
1338  * @expl:	Explicit handle
1339  * @signal_master: CPP Signal Master field
1340  * @signal_ref:	CPP Signal Ref field
1341  *
1342  * Return: 0, or -ERRNO
1343  */
nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit * expl,u8 signal_master,u8 signal_ref)1344 int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
1345 				u8 signal_master, u8 signal_ref)
1346 {
1347 	expl->cmd.signal_master = signal_master;
1348 	expl->cmd.signal_ref = signal_ref;
1349 
1350 	return 0;
1351 }
1352 
1353 /**
1354  * nfp_cpp_explicit_set_posted() - Set completion fields for explicit
1355  * @expl:	Explicit handle
1356  * @posted:	True for signaled completion, false otherwise
1357  * @siga:	CPP Signal A field
1358  * @siga_mode:	CPP Signal A Mode field
1359  * @sigb:	CPP Signal B field
1360  * @sigb_mode:	CPP Signal B Mode field
1361  *
1362  * Return: 0, or -ERRNO
1363  */
nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit * expl,int posted,u8 siga,enum nfp_cpp_explicit_signal_mode siga_mode,u8 sigb,enum nfp_cpp_explicit_signal_mode sigb_mode)1364 int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
1365 				u8 siga,
1366 				enum nfp_cpp_explicit_signal_mode siga_mode,
1367 				u8 sigb,
1368 				enum nfp_cpp_explicit_signal_mode sigb_mode)
1369 {
1370 	expl->cmd.posted = posted;
1371 	expl->cmd.siga = siga;
1372 	expl->cmd.sigb = sigb;
1373 	expl->cmd.siga_mode = siga_mode;
1374 	expl->cmd.sigb_mode = sigb_mode;
1375 
1376 	return 0;
1377 }
1378 
1379 /**
1380  * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access
1381  * @expl:	NFP CPP Explicit handle
1382  * @buff:	Data to have the target pull in the transaction
1383  * @len:	Length of data, in bytes
1384  *
1385  * The 'len' parameter must be less than or equal to 128 bytes.
1386  *
1387  * If this function is called before the configuration
1388  * registers are set, it will return -EINVAL.
1389  *
1390  * Return: 0, or -ERRNO
1391  */
nfp_cpp_explicit_put(struct nfp_cpp_explicit * expl,const void * buff,size_t len)1392 int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
1393 			 const void *buff, size_t len)
1394 {
1395 	return NFP_EXPL_OP(explicit_put, expl, buff, len);
1396 }
1397 
1398 /**
1399  * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete
1400  * @expl:	NFP CPP Explicit handle
1401  * @address:	Address to send in the explicit transaction
1402  *
1403  * If this function is called before the configuration
1404  * registers are set, it will return -1, with an errno of EINVAL.
1405  *
1406  * Return: 0, or -ERRNO
1407  */
nfp_cpp_explicit_do(struct nfp_cpp_explicit * expl,u64 address)1408 int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address)
1409 {
1410 	return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address);
1411 }
1412 
1413 /**
1414  * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access
1415  * @expl:	NFP CPP Explicit handle
1416  * @buff:	Data that the target pushed in the transaction
1417  * @len:	Length of data, in bytes
1418  *
1419  * The 'len' parameter must be less than or equal to 128 bytes.
1420  *
1421  * If this function is called before all three configuration
1422  * registers are set, it will return -1, with an errno of EINVAL.
1423  *
1424  * If this function is called before nfp_cpp_explicit_do()
1425  * has completed, it will return -1, with an errno of EBUSY.
1426  *
1427  * Return: 0, or -ERRNO
1428  */
nfp_cpp_explicit_get(struct nfp_cpp_explicit * expl,void * buff,size_t len)1429 int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len)
1430 {
1431 	return NFP_EXPL_OP(explicit_get, expl, buff, len);
1432 }
1433 
1434 /**
1435  * nfp_cpp_explicit_release() - Release explicit access handle
1436  * @expl:	NFP CPP Explicit handle
1437  *
1438  */
nfp_cpp_explicit_release(struct nfp_cpp_explicit * expl)1439 void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl)
1440 {
1441 	NFP_EXPL_OP_NR(explicit_release, expl);
1442 	kfree(expl);
1443 }
1444 
1445 /**
1446  * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit
1447  * @cpp_explicit:	CPP explicit handle
1448  *
1449  * Return: NFP CPP handle of the explicit
1450  */
nfp_cpp_explicit_cpp(struct nfp_cpp_explicit * cpp_explicit)1451 struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit)
1452 {
1453 	return cpp_explicit->cpp;
1454 }
1455 
1456 /**
1457  * nfp_cpp_explicit_priv() - return private struct for CPP explicit
1458  * @cpp_explicit:	CPP explicit handle
1459  *
1460  * Return: private data of the explicit, or NULL
1461  */
nfp_cpp_explicit_priv(struct nfp_cpp_explicit * cpp_explicit)1462 void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
1463 {
1464 	return &cpp_explicit[1];
1465 }
1466