• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2016-2019 NXP
5  *
6  */
7 
8 #include <asm/cacheflush.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <soc/fsl/dpaa2-global.h>
13 
14 #include "qbman-portal.h"
15 
16 /* All QBMan command and result structures use this "valid bit" encoding */
17 #define QB_VALID_BIT ((u32)0x80)
18 
19 /* QBMan portal management command codes */
20 #define QBMAN_MC_ACQUIRE       0x30
21 #define QBMAN_WQCHAN_CONFIGURE 0x46
22 
23 /* CINH register offsets */
24 #define QBMAN_CINH_SWP_EQCR_PI      0x800
25 #define QBMAN_CINH_SWP_EQCR_CI	    0x840
26 #define QBMAN_CINH_SWP_EQAR    0x8c0
27 #define QBMAN_CINH_SWP_CR_RT        0x900
28 #define QBMAN_CINH_SWP_VDQCR_RT     0x940
29 #define QBMAN_CINH_SWP_EQCR_AM_RT   0x980
30 #define QBMAN_CINH_SWP_RCR_AM_RT    0x9c0
31 #define QBMAN_CINH_SWP_DQPI    0xa00
32 #define QBMAN_CINH_SWP_DCAP    0xac0
33 #define QBMAN_CINH_SWP_SDQCR   0xb00
34 #define QBMAN_CINH_SWP_EQCR_AM_RT2  0xb40
35 #define QBMAN_CINH_SWP_RCR_PI       0xc00
36 #define QBMAN_CINH_SWP_RAR     0xcc0
37 #define QBMAN_CINH_SWP_ISR     0xe00
38 #define QBMAN_CINH_SWP_IER     0xe40
39 #define QBMAN_CINH_SWP_ISDR    0xe80
40 #define QBMAN_CINH_SWP_IIR     0xec0
41 
42 /* CENA register offsets */
43 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
44 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
45 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
46 #define QBMAN_CENA_SWP_CR      0x600
47 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
48 #define QBMAN_CENA_SWP_VDQCR   0x780
49 #define QBMAN_CENA_SWP_EQCR_CI 0x840
50 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
51 
52 /* CENA register offsets in memory-backed mode */
53 #define QBMAN_CENA_SWP_DQRR_MEM(n)  (0x800 + ((u32)(n) << 6))
54 #define QBMAN_CENA_SWP_RCR_MEM(n)   (0x1400 + ((u32)(n) << 6))
55 #define QBMAN_CENA_SWP_CR_MEM       0x1600
56 #define QBMAN_CENA_SWP_RR_MEM       0x1680
57 #define QBMAN_CENA_SWP_VDQCR_MEM    0x1780
58 
59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
61 
62 /* Define token used to determine if response written to memory is valid */
63 #define QMAN_DQ_TOKEN_VALID 1
64 
65 /* SDQCR attribute codes */
66 #define QB_SDQCR_FC_SHIFT   29
67 #define QB_SDQCR_FC_MASK    0x1
68 #define QB_SDQCR_DCT_SHIFT  24
69 #define QB_SDQCR_DCT_MASK   0x3
70 #define QB_SDQCR_TOK_SHIFT  16
71 #define QB_SDQCR_TOK_MASK   0xff
72 #define QB_SDQCR_SRC_SHIFT  0
73 #define QB_SDQCR_SRC_MASK   0xffff
74 
75 /* opaque token for static dequeues */
76 #define QMAN_SDQCR_TOKEN    0xbb
77 
78 #define QBMAN_EQCR_DCA_IDXMASK          0x0f
79 #define QBMAN_ENQUEUE_FLAG_DCA          (1ULL << 31)
80 
81 #define EQ_DESC_SIZE_WITHOUT_FD 29
82 #define EQ_DESC_SIZE_FD_START 32
83 
84 enum qbman_sdqcr_dct {
85 	qbman_sdqcr_dct_null = 0,
86 	qbman_sdqcr_dct_prio_ics,
87 	qbman_sdqcr_dct_active_ics,
88 	qbman_sdqcr_dct_active
89 };
90 
91 enum qbman_sdqcr_fc {
92 	qbman_sdqcr_fc_one = 0,
93 	qbman_sdqcr_fc_up_to_3 = 1
94 };
95 
96 /* Internal Function declaration */
97 static int qbman_swp_enqueue_direct(struct qbman_swp *s,
98 				    const struct qbman_eq_desc *d,
99 				    const struct dpaa2_fd *fd);
100 static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
101 				      const struct qbman_eq_desc *d,
102 				      const struct dpaa2_fd *fd);
103 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
104 					     const struct qbman_eq_desc *d,
105 					     const struct dpaa2_fd *fd,
106 					     uint32_t *flags,
107 					     int num_frames);
108 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
109 					       const struct qbman_eq_desc *d,
110 					       const struct dpaa2_fd *fd,
111 					       uint32_t *flags,
112 					       int num_frames);
113 static int
114 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
115 				       const struct qbman_eq_desc *d,
116 				       const struct dpaa2_fd *fd,
117 				       int num_frames);
118 static
119 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
120 					     const struct qbman_eq_desc *d,
121 					     const struct dpaa2_fd *fd,
122 					     int num_frames);
123 static int qbman_swp_pull_direct(struct qbman_swp *s,
124 				 struct qbman_pull_desc *d);
125 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
126 				   struct qbman_pull_desc *d);
127 
128 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
129 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
130 
131 static int qbman_swp_release_direct(struct qbman_swp *s,
132 				    const struct qbman_release_desc *d,
133 				    const u64 *buffers,
134 				    unsigned int num_buffers);
135 static int qbman_swp_release_mem_back(struct qbman_swp *s,
136 				      const struct qbman_release_desc *d,
137 				      const u64 *buffers,
138 				      unsigned int num_buffers);
139 
140 /* Function pointers */
141 int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
142 			     const struct qbman_eq_desc *d,
143 			     const struct dpaa2_fd *fd)
144 	= qbman_swp_enqueue_direct;
145 
146 int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
147 				      const struct qbman_eq_desc *d,
148 				      const struct dpaa2_fd *fd,
149 				      uint32_t *flags,
150 					     int num_frames)
151 	= qbman_swp_enqueue_multiple_direct;
152 
153 int
154 (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
155 				       const struct qbman_eq_desc *d,
156 				       const struct dpaa2_fd *fd,
157 				       int num_frames)
158 	= qbman_swp_enqueue_multiple_desc_direct;
159 
160 int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
161 			= qbman_swp_pull_direct;
162 
163 const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
164 			= qbman_swp_dqrr_next_direct;
165 
166 int (*qbman_swp_release_ptr)(struct qbman_swp *s,
167 			     const struct qbman_release_desc *d,
168 			     const u64 *buffers,
169 			     unsigned int num_buffers)
170 			= qbman_swp_release_direct;
171 
172 /* Portal Access */
173 
qbman_read_register(struct qbman_swp * p,u32 offset)174 static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
175 {
176 	return readl_relaxed(p->addr_cinh + offset);
177 }
178 
qbman_write_register(struct qbman_swp * p,u32 offset,u32 value)179 static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
180 					u32 value)
181 {
182 	writel_relaxed(value, p->addr_cinh + offset);
183 }
184 
qbman_get_cmd(struct qbman_swp * p,u32 offset)185 static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
186 {
187 	return p->addr_cena + offset;
188 }
189 
190 #define QBMAN_CINH_SWP_CFG   0xd00
191 
192 #define SWP_CFG_DQRR_MF_SHIFT 20
193 #define SWP_CFG_EST_SHIFT     16
194 #define SWP_CFG_CPBS_SHIFT    15
195 #define SWP_CFG_WN_SHIFT      14
196 #define SWP_CFG_RPM_SHIFT     12
197 #define SWP_CFG_DCM_SHIFT     10
198 #define SWP_CFG_EPM_SHIFT     8
199 #define SWP_CFG_VPM_SHIFT     7
200 #define SWP_CFG_CPM_SHIFT     6
201 #define SWP_CFG_SD_SHIFT      5
202 #define SWP_CFG_SP_SHIFT      4
203 #define SWP_CFG_SE_SHIFT      3
204 #define SWP_CFG_DP_SHIFT      2
205 #define SWP_CFG_DE_SHIFT      1
206 #define SWP_CFG_EP_SHIFT      0
207 
qbman_set_swp_cfg(u8 max_fill,u8 wn,u8 est,u8 rpm,u8 dcm,u8 epm,int sd,int sp,int se,int dp,int de,int ep)208 static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn,	u8 est, u8 rpm, u8 dcm,
209 				    u8 epm, int sd, int sp, int se,
210 				    int dp, int de, int ep)
211 {
212 	return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
213 		est << SWP_CFG_EST_SHIFT |
214 		wn << SWP_CFG_WN_SHIFT |
215 		rpm << SWP_CFG_RPM_SHIFT |
216 		dcm << SWP_CFG_DCM_SHIFT |
217 		epm << SWP_CFG_EPM_SHIFT |
218 		sd << SWP_CFG_SD_SHIFT |
219 		sp << SWP_CFG_SP_SHIFT |
220 		se << SWP_CFG_SE_SHIFT |
221 		dp << SWP_CFG_DP_SHIFT |
222 		de << SWP_CFG_DE_SHIFT |
223 		ep << SWP_CFG_EP_SHIFT);
224 }
225 
226 #define QMAN_RT_MODE	   0x00000100
227 
qm_cyc_diff(u8 ringsize,u8 first,u8 last)228 static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
229 {
230 	/* 'first' is included, 'last' is excluded */
231 	if (first <= last)
232 		return last - first;
233 	else
234 		return (2 * ringsize) - (first - last);
235 }
236 
237 /**
238  * qbman_swp_init() - Create a functional object representing the given
239  *                    QBMan portal descriptor.
240  * @d: the given qbman swp descriptor
241  *
242  * Return qbman_swp portal for success, NULL if the object cannot
243  * be created.
244  */
qbman_swp_init(const struct qbman_swp_desc * d)245 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
246 {
247 	struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
248 	u32 reg;
249 	u32 mask_size;
250 	u32 eqcr_pi;
251 
252 	if (!p)
253 		return NULL;
254 
255 	spin_lock_init(&p->access_spinlock);
256 
257 	p->desc = d;
258 	p->mc.valid_bit = QB_VALID_BIT;
259 	p->sdq = 0;
260 	p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
261 	p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
262 	p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
263 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
264 		p->mr.valid_bit = QB_VALID_BIT;
265 
266 	atomic_set(&p->vdq.available, 1);
267 	p->vdq.valid_bit = QB_VALID_BIT;
268 	p->dqrr.next_idx = 0;
269 	p->dqrr.valid_bit = QB_VALID_BIT;
270 
271 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
272 		p->dqrr.dqrr_size = 4;
273 		p->dqrr.reset_bug = 1;
274 	} else {
275 		p->dqrr.dqrr_size = 8;
276 		p->dqrr.reset_bug = 0;
277 	}
278 
279 	p->addr_cena = d->cena_bar;
280 	p->addr_cinh = d->cinh_bar;
281 
282 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
283 
284 		reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
285 			1, /* Writes Non-cacheable */
286 			0, /* EQCR_CI stashing threshold */
287 			3, /* RPM: RCR in array mode */
288 			2, /* DCM: Discrete consumption ack */
289 			2, /* EPM: EQCR in ring mode */
290 			1, /* mem stashing drop enable enable */
291 			1, /* mem stashing priority enable */
292 			1, /* mem stashing enable */
293 			1, /* dequeue stashing priority enable */
294 			0, /* dequeue stashing enable enable */
295 			0); /* EQCR_CI stashing priority enable */
296 	} else {
297 		memset(p->addr_cena, 0, 64 * 1024);
298 		reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
299 			1, /* Writes Non-cacheable */
300 			1, /* EQCR_CI stashing threshold */
301 			3, /* RPM: RCR in array mode */
302 			2, /* DCM: Discrete consumption ack */
303 			0, /* EPM: EQCR in ring mode */
304 			1, /* mem stashing drop enable */
305 			1, /* mem stashing priority enable */
306 			1, /* mem stashing enable */
307 			1, /* dequeue stashing priority enable */
308 			0, /* dequeue stashing enable */
309 			0); /* EQCR_CI stashing priority enable */
310 		reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
311 		       1 << SWP_CFG_VPM_SHIFT |  /* VDQCR read triggered mode */
312 		       1 << SWP_CFG_CPM_SHIFT;   /* CR read triggered mode */
313 	}
314 
315 	qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
316 	reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
317 	if (!reg) {
318 		pr_err("qbman: the portal is not enabled!\n");
319 		kfree(p);
320 		return NULL;
321 	}
322 
323 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
324 		qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
325 		qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
326 	}
327 	/*
328 	 * SDQCR needs to be initialized to 0 when no channels are
329 	 * being dequeued from or else the QMan HW will indicate an
330 	 * error.  The values that were calculated above will be
331 	 * applied when dequeues from a specific channel are enabled.
332 	 */
333 	qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
334 
335 	p->eqcr.pi_ring_size = 8;
336 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
337 		p->eqcr.pi_ring_size = 32;
338 		qbman_swp_enqueue_ptr =
339 			qbman_swp_enqueue_mem_back;
340 		qbman_swp_enqueue_multiple_ptr =
341 			qbman_swp_enqueue_multiple_mem_back;
342 		qbman_swp_enqueue_multiple_desc_ptr =
343 			qbman_swp_enqueue_multiple_desc_mem_back;
344 		qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
345 		qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
346 		qbman_swp_release_ptr = qbman_swp_release_mem_back;
347 	}
348 
349 	for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
350 		p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
351 	eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
352 	p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
353 	p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
354 	p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
355 			& p->eqcr.pi_ci_mask;
356 	p->eqcr.available = p->eqcr.pi_ring_size;
357 
358 	return p;
359 }
360 
361 /**
362  * qbman_swp_finish() - Create and destroy a functional object representing
363  *                      the given QBMan portal descriptor.
364  * @p: the qbman_swp object to be destroyed
365  */
qbman_swp_finish(struct qbman_swp * p)366 void qbman_swp_finish(struct qbman_swp *p)
367 {
368 	kfree(p);
369 }
370 
371 /**
372  * qbman_swp_interrupt_read_status()
373  * @p: the given software portal
374  *
375  * Return the value in the SWP_ISR register.
376  */
qbman_swp_interrupt_read_status(struct qbman_swp * p)377 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
378 {
379 	return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
380 }
381 
382 /**
383  * qbman_swp_interrupt_clear_status()
384  * @p: the given software portal
385  * @mask: The mask to clear in SWP_ISR register
386  */
qbman_swp_interrupt_clear_status(struct qbman_swp * p,u32 mask)387 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
388 {
389 	qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
390 }
391 
392 /**
393  * qbman_swp_interrupt_get_trigger() - read interrupt enable register
394  * @p: the given software portal
395  *
396  * Return the value in the SWP_IER register.
397  */
qbman_swp_interrupt_get_trigger(struct qbman_swp * p)398 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
399 {
400 	return qbman_read_register(p, QBMAN_CINH_SWP_IER);
401 }
402 
403 /**
404  * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
405  * @p: the given software portal
406  * @mask: The mask of bits to enable in SWP_IER
407  */
qbman_swp_interrupt_set_trigger(struct qbman_swp * p,u32 mask)408 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
409 {
410 	qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
411 }
412 
413 /**
414  * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
415  * @p: the given software portal object
416  *
417  * Return the value in the SWP_IIR register.
418  */
qbman_swp_interrupt_get_inhibit(struct qbman_swp * p)419 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
420 {
421 	return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
422 }
423 
424 /**
425  * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
426  * @p: the given software portal object
427  * @mask: The mask to set in SWP_IIR register
428  */
qbman_swp_interrupt_set_inhibit(struct qbman_swp * p,int inhibit)429 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
430 {
431 	qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
432 }
433 
434 /*
435  * Different management commands all use this common base layer of code to issue
436  * commands and poll for results.
437  */
438 
439 /*
440  * Returns a pointer to where the caller should fill in their management command
441  * (caller should ignore the verb byte)
442  */
qbman_swp_mc_start(struct qbman_swp * p)443 void *qbman_swp_mc_start(struct qbman_swp *p)
444 {
445 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
446 		return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
447 	else
448 		return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
449 }
450 
451 /*
452  * Commits merges in the caller-supplied command verb (which should not include
453  * the valid-bit) and submits the command to hardware
454  */
qbman_swp_mc_submit(struct qbman_swp * p,void * cmd,u8 cmd_verb)455 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
456 {
457 	u8 *v = cmd;
458 
459 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
460 		dma_wmb();
461 		*v = cmd_verb | p->mc.valid_bit;
462 	} else {
463 		*v = cmd_verb | p->mc.valid_bit;
464 		dma_wmb();
465 		qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
466 	}
467 }
468 
469 /*
470  * Checks for a completed response (returns non-NULL if only if the response
471  * is complete).
472  */
qbman_swp_mc_result(struct qbman_swp * p)473 void *qbman_swp_mc_result(struct qbman_swp *p)
474 {
475 	u32 *ret, verb;
476 
477 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
478 		ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
479 		/* Remove the valid-bit - command completed if the rest
480 		 * is non-zero.
481 		 */
482 		verb = ret[0] & ~QB_VALID_BIT;
483 		if (!verb)
484 			return NULL;
485 		p->mc.valid_bit ^= QB_VALID_BIT;
486 	} else {
487 		ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
488 		/* Command completed if the valid bit is toggled */
489 		if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
490 			return NULL;
491 		/* Command completed if the rest is non-zero */
492 		verb = ret[0] & ~QB_VALID_BIT;
493 		if (!verb)
494 			return NULL;
495 		p->mr.valid_bit ^= QB_VALID_BIT;
496 	}
497 
498 	return ret;
499 }
500 
501 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
502 enum qb_enqueue_commands {
503 	enqueue_empty = 0,
504 	enqueue_response_always = 1,
505 	enqueue_rejects_to_fq = 2
506 };
507 
508 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
509 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
510 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
511 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
512 
513 /**
514  * qbman_eq_desc_clear() - Clear the contents of a descriptor to
515  *                         default/starting state.
516  */
qbman_eq_desc_clear(struct qbman_eq_desc * d)517 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
518 {
519 	memset(d, 0, sizeof(*d));
520 }
521 
522 /**
523  * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
524  * @d:                the enqueue descriptor.
525  * @response_success: 1 = enqueue with response always; 0 = enqueue with
526  *                    rejections returned on a FQ.
527  */
qbman_eq_desc_set_no_orp(struct qbman_eq_desc * d,int respond_success)528 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
529 {
530 	d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
531 	if (respond_success)
532 		d->verb |= enqueue_response_always;
533 	else
534 		d->verb |= enqueue_rejects_to_fq;
535 }
536 
537 /*
538  * Exactly one of the following descriptor "targets" should be set. (Calling any
539  * one of these will replace the effect of any prior call to one of these.)
540  *   -enqueue to a frame queue
541  *   -enqueue to a queuing destination
542  */
543 
544 /**
545  * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
546  * @d:    the enqueue descriptor
547  * @fqid: the id of the frame queue to be enqueued
548  */
qbman_eq_desc_set_fq(struct qbman_eq_desc * d,u32 fqid)549 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
550 {
551 	d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
552 	d->tgtid = cpu_to_le32(fqid);
553 }
554 
555 /**
556  * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
557  * @d:       the enqueue descriptor
558  * @qdid:    the id of the queuing destination to be enqueued
559  * @qd_bin:  the queuing destination bin
560  * @qd_prio: the queuing destination priority
561  */
qbman_eq_desc_set_qd(struct qbman_eq_desc * d,u32 qdid,u32 qd_bin,u32 qd_prio)562 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
563 			  u32 qd_bin, u32 qd_prio)
564 {
565 	d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
566 	d->tgtid = cpu_to_le32(qdid);
567 	d->qdbin = cpu_to_le16(qd_bin);
568 	d->qpri = qd_prio;
569 }
570 
571 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
572 #define EQAR_VB(eqar)      ((eqar) & 0x80)
573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
574 
575 #define QB_RT_BIT ((u32)0x100)
576 /**
577  * qbman_swp_enqueue_direct() - Issue an enqueue command
578  * @s:  the software portal used for enqueue
579  * @d:  the enqueue descriptor
580  * @fd: the frame descriptor to be enqueued
581  *
582  * Please note that 'fd' should only be NULL if the "action" of the
583  * descriptor is "orp_hole" or "orp_nesn".
584  *
585  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
586  */
587 static
qbman_swp_enqueue_direct(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd)588 int qbman_swp_enqueue_direct(struct qbman_swp *s,
589 			     const struct qbman_eq_desc *d,
590 			     const struct dpaa2_fd *fd)
591 {
592 	int flags = 0;
593 	int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
594 
595 	if (ret >= 0)
596 		ret = 0;
597 	else
598 		ret = -EBUSY;
599 	return  ret;
600 }
601 
602 /**
603  * qbman_swp_enqueue_mem_back() - Issue an enqueue command
604  * @s:  the software portal used for enqueue
605  * @d:  the enqueue descriptor
606  * @fd: the frame descriptor to be enqueued
607  *
608  * Please note that 'fd' should only be NULL if the "action" of the
609  * descriptor is "orp_hole" or "orp_nesn".
610  *
611  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
612  */
613 static
qbman_swp_enqueue_mem_back(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd)614 int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
615 			       const struct qbman_eq_desc *d,
616 			       const struct dpaa2_fd *fd)
617 {
618 	int flags = 0;
619 	int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
620 
621 	if (ret >= 0)
622 		ret = 0;
623 	else
624 		ret = -EBUSY;
625 	return  ret;
626 }
627 
628 /**
629  * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
630  * using one enqueue descriptor
631  * @s:  the software portal used for enqueue
632  * @d:  the enqueue descriptor
633  * @fd: table pointer of frame descriptor table to be enqueued
634  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
635  * @num_frames: number of fd to be enqueued
636  *
637  * Return the number of fd enqueued, or a negative error number.
638  */
639 static
qbman_swp_enqueue_multiple_direct(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,uint32_t * flags,int num_frames)640 int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
641 				      const struct qbman_eq_desc *d,
642 				      const struct dpaa2_fd *fd,
643 				      uint32_t *flags,
644 				      int num_frames)
645 {
646 	uint32_t *p = NULL;
647 	const uint32_t *cl = (uint32_t *)d;
648 	uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
649 	int i, num_enqueued = 0;
650 
651 	spin_lock(&s->access_spinlock);
652 	half_mask = (s->eqcr.pi_ci_mask>>1);
653 	full_mask = s->eqcr.pi_ci_mask;
654 
655 	if (!s->eqcr.available) {
656 		eqcr_ci = s->eqcr.ci;
657 		p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
658 		s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
659 		s->eqcr.ci &= full_mask;
660 
661 		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
662 					eqcr_ci, s->eqcr.ci);
663 		if (!s->eqcr.available) {
664 			spin_unlock(&s->access_spinlock);
665 			return 0;
666 		}
667 	}
668 
669 	eqcr_pi = s->eqcr.pi;
670 	num_enqueued = (s->eqcr.available < num_frames) ?
671 			s->eqcr.available : num_frames;
672 	s->eqcr.available -= num_enqueued;
673 	/* Fill in the EQCR ring */
674 	for (i = 0; i < num_enqueued; i++) {
675 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
676 		/* Skip copying the verb */
677 		memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
678 		memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
679 		       &fd[i], sizeof(*fd));
680 		eqcr_pi++;
681 	}
682 
683 	dma_wmb();
684 
685 	/* Set the verb byte, have to substitute in the valid-bit */
686 	eqcr_pi = s->eqcr.pi;
687 	for (i = 0; i < num_enqueued; i++) {
688 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
689 		p[0] = cl[0] | s->eqcr.pi_vb;
690 		if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
691 			struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
692 
693 			d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
694 				((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
695 		}
696 		eqcr_pi++;
697 		if (!(eqcr_pi & half_mask))
698 			s->eqcr.pi_vb ^= QB_VALID_BIT;
699 	}
700 
701 	/* Flush all the cacheline without load/store in between */
702 	eqcr_pi = s->eqcr.pi;
703 	for (i = 0; i < num_enqueued; i++)
704 		eqcr_pi++;
705 	s->eqcr.pi = eqcr_pi & full_mask;
706 	spin_unlock(&s->access_spinlock);
707 
708 	return num_enqueued;
709 }
710 
711 /**
712  * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
713  * using one enqueue descriptor
714  * @s:  the software portal used for enqueue
715  * @d:  the enqueue descriptor
716  * @fd: table pointer of frame descriptor table to be enqueued
717  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
718  * @num_frames: number of fd to be enqueued
719  *
720  * Return the number of fd enqueued, or a negative error number.
721  */
722 static
qbman_swp_enqueue_multiple_mem_back(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,uint32_t * flags,int num_frames)723 int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
724 					const struct qbman_eq_desc *d,
725 					const struct dpaa2_fd *fd,
726 					uint32_t *flags,
727 					int num_frames)
728 {
729 	uint32_t *p = NULL;
730 	const uint32_t *cl = (uint32_t *)(d);
731 	uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
732 	int i, num_enqueued = 0;
733 	unsigned long irq_flags;
734 
735 	spin_lock_irqsave(&s->access_spinlock, irq_flags);
736 
737 	half_mask = (s->eqcr.pi_ci_mask>>1);
738 	full_mask = s->eqcr.pi_ci_mask;
739 	if (!s->eqcr.available) {
740 		eqcr_ci = s->eqcr.ci;
741 		p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
742 		s->eqcr.ci = *p & full_mask;
743 		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
744 					eqcr_ci, s->eqcr.ci);
745 		if (!s->eqcr.available) {
746 			spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
747 			return 0;
748 		}
749 	}
750 
751 	eqcr_pi = s->eqcr.pi;
752 	num_enqueued = (s->eqcr.available < num_frames) ?
753 			s->eqcr.available : num_frames;
754 	s->eqcr.available -= num_enqueued;
755 	/* Fill in the EQCR ring */
756 	for (i = 0; i < num_enqueued; i++) {
757 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
758 		/* Skip copying the verb */
759 		memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
760 		memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
761 		       &fd[i], sizeof(*fd));
762 		eqcr_pi++;
763 	}
764 
765 	/* Set the verb byte, have to substitute in the valid-bit */
766 	eqcr_pi = s->eqcr.pi;
767 	for (i = 0; i < num_enqueued; i++) {
768 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
769 		p[0] = cl[0] | s->eqcr.pi_vb;
770 		if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
771 			struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
772 
773 			d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
774 				((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
775 		}
776 		eqcr_pi++;
777 		if (!(eqcr_pi & half_mask))
778 			s->eqcr.pi_vb ^= QB_VALID_BIT;
779 	}
780 	s->eqcr.pi = eqcr_pi & full_mask;
781 
782 	dma_wmb();
783 	qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
784 				(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
785 	spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
786 
787 	return num_enqueued;
788 }
789 
790 /**
791  * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
792  * using multiple enqueue descriptor
793  * @s:  the software portal used for enqueue
794  * @d:  table of minimal enqueue descriptor
795  * @fd: table pointer of frame descriptor table to be enqueued
796  * @num_frames: number of fd to be enqueued
797  *
798  * Return the number of fd enqueued, or a negative error number.
799  */
800 static
qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,int num_frames)801 int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
802 					   const struct qbman_eq_desc *d,
803 					   const struct dpaa2_fd *fd,
804 					   int num_frames)
805 {
806 	uint32_t *p;
807 	const uint32_t *cl;
808 	uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
809 	int i, num_enqueued = 0;
810 
811 	half_mask = (s->eqcr.pi_ci_mask>>1);
812 	full_mask = s->eqcr.pi_ci_mask;
813 	if (!s->eqcr.available) {
814 		eqcr_ci = s->eqcr.ci;
815 		p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
816 		s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
817 		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
818 					eqcr_ci, s->eqcr.ci);
819 		if (!s->eqcr.available)
820 			return 0;
821 	}
822 
823 	eqcr_pi = s->eqcr.pi;
824 	num_enqueued = (s->eqcr.available < num_frames) ?
825 			s->eqcr.available : num_frames;
826 	s->eqcr.available -= num_enqueued;
827 	/* Fill in the EQCR ring */
828 	for (i = 0; i < num_enqueued; i++) {
829 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
830 		cl = (uint32_t *)(&d[i]);
831 		/* Skip copying the verb */
832 		memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
833 		memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
834 		       &fd[i], sizeof(*fd));
835 		eqcr_pi++;
836 	}
837 
838 	dma_wmb();
839 
840 	/* Set the verb byte, have to substitute in the valid-bit */
841 	eqcr_pi = s->eqcr.pi;
842 	for (i = 0; i < num_enqueued; i++) {
843 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
844 		cl = (uint32_t *)(&d[i]);
845 		p[0] = cl[0] | s->eqcr.pi_vb;
846 		eqcr_pi++;
847 		if (!(eqcr_pi & half_mask))
848 			s->eqcr.pi_vb ^= QB_VALID_BIT;
849 	}
850 
851 	/* Flush all the cacheline without load/store in between */
852 	eqcr_pi = s->eqcr.pi;
853 	for (i = 0; i < num_enqueued; i++)
854 		eqcr_pi++;
855 	s->eqcr.pi = eqcr_pi & full_mask;
856 
857 	return num_enqueued;
858 }
859 
860 /**
861  * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
862  * using multiple enqueue descriptor
863  * @s:  the software portal used for enqueue
864  * @d:  table of minimal enqueue descriptor
865  * @fd: table pointer of frame descriptor table to be enqueued
866  * @num_frames: number of fd to be enqueued
867  *
868  * Return the number of fd enqueued, or a negative error number.
869  */
870 static
qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,int num_frames)871 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
872 					   const struct qbman_eq_desc *d,
873 					   const struct dpaa2_fd *fd,
874 					   int num_frames)
875 {
876 	uint32_t *p;
877 	const uint32_t *cl;
878 	uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
879 	int i, num_enqueued = 0;
880 
881 	half_mask = (s->eqcr.pi_ci_mask>>1);
882 	full_mask = s->eqcr.pi_ci_mask;
883 	if (!s->eqcr.available) {
884 		eqcr_ci = s->eqcr.ci;
885 		p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
886 		s->eqcr.ci = *p & full_mask;
887 		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
888 					eqcr_ci, s->eqcr.ci);
889 		if (!s->eqcr.available)
890 			return 0;
891 	}
892 
893 	eqcr_pi = s->eqcr.pi;
894 	num_enqueued = (s->eqcr.available < num_frames) ?
895 			s->eqcr.available : num_frames;
896 	s->eqcr.available -= num_enqueued;
897 	/* Fill in the EQCR ring */
898 	for (i = 0; i < num_enqueued; i++) {
899 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
900 		cl = (uint32_t *)(&d[i]);
901 		/* Skip copying the verb */
902 		memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
903 		memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
904 		       &fd[i], sizeof(*fd));
905 		eqcr_pi++;
906 	}
907 
908 	/* Set the verb byte, have to substitute in the valid-bit */
909 	eqcr_pi = s->eqcr.pi;
910 	for (i = 0; i < num_enqueued; i++) {
911 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
912 		cl = (uint32_t *)(&d[i]);
913 		p[0] = cl[0] | s->eqcr.pi_vb;
914 		eqcr_pi++;
915 		if (!(eqcr_pi & half_mask))
916 			s->eqcr.pi_vb ^= QB_VALID_BIT;
917 	}
918 
919 	s->eqcr.pi = eqcr_pi & full_mask;
920 
921 	dma_wmb();
922 	qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
923 				(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
924 
925 	return num_enqueued;
926 }
927 
928 /* Static (push) dequeue */
929 
930 /**
931  * qbman_swp_push_get() - Get the push dequeue setup
932  * @p:           the software portal object
933  * @channel_idx: the channel index to query
934  * @enabled:     returned boolean to show whether the push dequeue is enabled
935  *               for the given channel
936  */
qbman_swp_push_get(struct qbman_swp * s,u8 channel_idx,int * enabled)937 void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
938 {
939 	u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
940 
941 	WARN_ON(channel_idx > 15);
942 	*enabled = src | (1 << channel_idx);
943 }
944 
945 /**
946  * qbman_swp_push_set() - Enable or disable push dequeue
947  * @p:           the software portal object
948  * @channel_idx: the channel index (0 to 15)
949  * @enable:      enable or disable push dequeue
950  */
qbman_swp_push_set(struct qbman_swp * s,u8 channel_idx,int enable)951 void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
952 {
953 	u16 dqsrc;
954 
955 	WARN_ON(channel_idx > 15);
956 	if (enable)
957 		s->sdq |= 1 << channel_idx;
958 	else
959 		s->sdq &= ~(1 << channel_idx);
960 
961 	/* Read make the complete src map.  If no channels are enabled
962 	 * the SDQCR must be 0 or else QMan will assert errors
963 	 */
964 	dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
965 	if (dqsrc != 0)
966 		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
967 	else
968 		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
969 }
970 
971 #define QB_VDQCR_VERB_DCT_SHIFT    0
972 #define QB_VDQCR_VERB_DT_SHIFT     2
973 #define QB_VDQCR_VERB_RLS_SHIFT    4
974 #define QB_VDQCR_VERB_WAE_SHIFT    5
975 
976 enum qb_pull_dt_e {
977 	qb_pull_dt_channel,
978 	qb_pull_dt_workqueue,
979 	qb_pull_dt_framequeue
980 };
981 
982 /**
983  * qbman_pull_desc_clear() - Clear the contents of a descriptor to
984  *                           default/starting state
985  * @d: the pull dequeue descriptor to be cleared
986  */
qbman_pull_desc_clear(struct qbman_pull_desc * d)987 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
988 {
989 	memset(d, 0, sizeof(*d));
990 }
991 
992 /**
993  * qbman_pull_desc_set_storage()- Set the pull dequeue storage
994  * @d:            the pull dequeue descriptor to be set
995  * @storage:      the pointer of the memory to store the dequeue result
996  * @storage_phys: the physical address of the storage memory
997  * @stash:        to indicate whether write allocate is enabled
998  *
999  * If not called, or if called with 'storage' as NULL, the result pull dequeues
1000  * will produce results to DQRR. If 'storage' is non-NULL, then results are
1001  * produced to the given memory location (using the DMA address which
1002  * the caller provides in 'storage_phys'), and 'stash' controls whether or not
1003  * those writes to main-memory express a cache-warming attribute.
1004  */
qbman_pull_desc_set_storage(struct qbman_pull_desc * d,struct dpaa2_dq * storage,dma_addr_t storage_phys,int stash)1005 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1006 				 struct dpaa2_dq *storage,
1007 				 dma_addr_t storage_phys,
1008 				 int stash)
1009 {
1010 	/* save the virtual address */
1011 	d->rsp_addr_virt = (u64)(uintptr_t)storage;
1012 
1013 	if (!storage) {
1014 		d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1015 		return;
1016 	}
1017 	d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1018 	if (stash)
1019 		d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1020 	else
1021 		d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1022 
1023 	d->rsp_addr = cpu_to_le64(storage_phys);
1024 }
1025 
1026 /**
1027  * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
1028  * @d:         the pull dequeue descriptor to be set
1029  * @numframes: number of frames to be set, must be between 1 and 16, inclusive
1030  */
qbman_pull_desc_set_numframes(struct qbman_pull_desc * d,u8 numframes)1031 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1032 {
1033 	d->numf = numframes - 1;
1034 }
1035 
1036 /*
1037  * Exactly one of the following descriptor "actions" should be set. (Calling any
1038  * one of these will replace the effect of any prior call to one of these.)
1039  * - pull dequeue from the given frame queue (FQ)
1040  * - pull dequeue from any FQ in the given work queue (WQ)
1041  * - pull dequeue from any FQ in any WQ in the given channel
1042  */
1043 
1044 /**
1045  * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
1046  * @fqid: the frame queue index of the given FQ
1047  */
qbman_pull_desc_set_fq(struct qbman_pull_desc * d,u32 fqid)1048 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1049 {
1050 	d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1051 	d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1052 	d->dq_src = cpu_to_le32(fqid);
1053 }
1054 
1055 /**
1056  * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
1057  * @wqid: composed of channel id and wqid within the channel
1058  * @dct:  the dequeue command type
1059  */
qbman_pull_desc_set_wq(struct qbman_pull_desc * d,u32 wqid,enum qbman_pull_type_e dct)1060 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1061 			    enum qbman_pull_type_e dct)
1062 {
1063 	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1064 	d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1065 	d->dq_src = cpu_to_le32(wqid);
1066 }
1067 
1068 /**
1069  * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
1070  *                                 dequeues
1071  * @chid: the channel id to be dequeued
1072  * @dct:  the dequeue command type
1073  */
qbman_pull_desc_set_channel(struct qbman_pull_desc * d,u32 chid,enum qbman_pull_type_e dct)1074 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1075 				 enum qbman_pull_type_e dct)
1076 {
1077 	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1078 	d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1079 	d->dq_src = cpu_to_le32(chid);
1080 }
1081 
1082 /**
1083  * qbman_swp_pull_direct() - Issue the pull dequeue command
1084  * @s: the software portal object
1085  * @d: the software portal descriptor which has been configured with
1086  *     the set of qbman_pull_desc_set_*() calls
1087  *
1088  * Return 0 for success, and -EBUSY if the software portal is not ready
1089  * to do pull dequeue.
1090  */
1091 static
qbman_swp_pull_direct(struct qbman_swp * s,struct qbman_pull_desc * d)1092 int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1093 {
1094 	struct qbman_pull_desc *p;
1095 
1096 	if (!atomic_dec_and_test(&s->vdq.available)) {
1097 		atomic_inc(&s->vdq.available);
1098 		return -EBUSY;
1099 	}
1100 	s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1101 	if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1102 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1103 	else
1104 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1105 	p->numf = d->numf;
1106 	p->tok = QMAN_DQ_TOKEN_VALID;
1107 	p->dq_src = d->dq_src;
1108 	p->rsp_addr = d->rsp_addr;
1109 	p->rsp_addr_virt = d->rsp_addr_virt;
1110 	dma_wmb();
1111 	/* Set the verb byte, have to substitute in the valid-bit */
1112 	p->verb = d->verb | s->vdq.valid_bit;
1113 	s->vdq.valid_bit ^= QB_VALID_BIT;
1114 
1115 	return 0;
1116 }
1117 
1118 /**
1119  * qbman_swp_pull_mem_back() - Issue the pull dequeue command
1120  * @s: the software portal object
1121  * @d: the software portal descriptor which has been configured with
1122  *     the set of qbman_pull_desc_set_*() calls
1123  *
1124  * Return 0 for success, and -EBUSY if the software portal is not ready
1125  * to do pull dequeue.
1126  */
1127 static
qbman_swp_pull_mem_back(struct qbman_swp * s,struct qbman_pull_desc * d)1128 int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1129 {
1130 	struct qbman_pull_desc *p;
1131 
1132 	if (!atomic_dec_and_test(&s->vdq.available)) {
1133 		atomic_inc(&s->vdq.available);
1134 		return -EBUSY;
1135 	}
1136 	s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1137 	if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1138 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1139 	else
1140 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1141 	p->numf = d->numf;
1142 	p->tok = QMAN_DQ_TOKEN_VALID;
1143 	p->dq_src = d->dq_src;
1144 	p->rsp_addr = d->rsp_addr;
1145 	p->rsp_addr_virt = d->rsp_addr_virt;
1146 
1147 	/* Set the verb byte, have to substitute in the valid-bit */
1148 	p->verb = d->verb | s->vdq.valid_bit;
1149 	s->vdq.valid_bit ^= QB_VALID_BIT;
1150 	dma_wmb();
1151 	qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1152 
1153 	return 0;
1154 }
1155 
1156 #define QMAN_DQRR_PI_MASK   0xf
1157 
1158 /**
1159  * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
1160  * @s: the software portal object
1161  *
1162  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1163  * only once, so repeated calls can return a sequence of DQRR entries, without
1164  * requiring they be consumed immediately or in any particular order.
1165  */
qbman_swp_dqrr_next_direct(struct qbman_swp * s)1166 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1167 {
1168 	u32 verb;
1169 	u32 response_verb;
1170 	u32 flags;
1171 	struct dpaa2_dq *p;
1172 
1173 	/* Before using valid-bit to detect if something is there, we have to
1174 	 * handle the case of the DQRR reset bug...
1175 	 */
1176 	if (unlikely(s->dqrr.reset_bug)) {
1177 		/*
1178 		 * We pick up new entries by cache-inhibited producer index,
1179 		 * which means that a non-coherent mapping would require us to
1180 		 * invalidate and read *only* once that PI has indicated that
1181 		 * there's an entry here. The first trip around the DQRR ring
1182 		 * will be much less efficient than all subsequent trips around
1183 		 * it...
1184 		 */
1185 		u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1186 			QMAN_DQRR_PI_MASK;
1187 
1188 		/* there are new entries if pi != next_idx */
1189 		if (pi == s->dqrr.next_idx)
1190 			return NULL;
1191 
1192 		/*
1193 		 * if next_idx is/was the last ring index, and 'pi' is
1194 		 * different, we can disable the workaround as all the ring
1195 		 * entries have now been DMA'd to so valid-bit checking is
1196 		 * repaired. Note: this logic needs to be based on next_idx
1197 		 * (which increments one at a time), rather than on pi (which
1198 		 * can burst and wrap-around between our snapshots of it).
1199 		 */
1200 		if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1201 			pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1202 				 s->dqrr.next_idx, pi);
1203 			s->dqrr.reset_bug = 0;
1204 		}
1205 		prefetch(qbman_get_cmd(s,
1206 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1207 	}
1208 
1209 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1210 	verb = p->dq.verb;
1211 
1212 	/*
1213 	 * If the valid-bit isn't of the expected polarity, nothing there. Note,
1214 	 * in the DQRR reset bug workaround, we shouldn't need to skip these
1215 	 * check, because we've already determined that a new entry is available
1216 	 * and we've invalidated the cacheline before reading it, so the
1217 	 * valid-bit behaviour is repaired and should tell us what we already
1218 	 * knew from reading PI.
1219 	 */
1220 	if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1221 		prefetch(qbman_get_cmd(s,
1222 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1223 		return NULL;
1224 	}
1225 	/*
1226 	 * There's something there. Move "next_idx" attention to the next ring
1227 	 * entry (and prefetch it) before returning what we found.
1228 	 */
1229 	s->dqrr.next_idx++;
1230 	s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1231 	if (!s->dqrr.next_idx)
1232 		s->dqrr.valid_bit ^= QB_VALID_BIT;
1233 
1234 	/*
1235 	 * If this is the final response to a volatile dequeue command
1236 	 * indicate that the vdq is available
1237 	 */
1238 	flags = p->dq.stat;
1239 	response_verb = verb & QBMAN_RESULT_MASK;
1240 	if ((response_verb == QBMAN_RESULT_DQ) &&
1241 	    (flags & DPAA2_DQ_STAT_VOLATILE) &&
1242 	    (flags & DPAA2_DQ_STAT_EXPIRED))
1243 		atomic_inc(&s->vdq.available);
1244 
1245 	prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1246 
1247 	return p;
1248 }
1249 
1250 /**
1251  * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
1252  * @s: the software portal object
1253  *
1254  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1255  * only once, so repeated calls can return a sequence of DQRR entries, without
1256  * requiring they be consumed immediately or in any particular order.
1257  */
qbman_swp_dqrr_next_mem_back(struct qbman_swp * s)1258 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1259 {
1260 	u32 verb;
1261 	u32 response_verb;
1262 	u32 flags;
1263 	struct dpaa2_dq *p;
1264 
1265 	/* Before using valid-bit to detect if something is there, we have to
1266 	 * handle the case of the DQRR reset bug...
1267 	 */
1268 	if (unlikely(s->dqrr.reset_bug)) {
1269 		/*
1270 		 * We pick up new entries by cache-inhibited producer index,
1271 		 * which means that a non-coherent mapping would require us to
1272 		 * invalidate and read *only* once that PI has indicated that
1273 		 * there's an entry here. The first trip around the DQRR ring
1274 		 * will be much less efficient than all subsequent trips around
1275 		 * it...
1276 		 */
1277 		u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1278 			QMAN_DQRR_PI_MASK;
1279 
1280 		/* there are new entries if pi != next_idx */
1281 		if (pi == s->dqrr.next_idx)
1282 			return NULL;
1283 
1284 		/*
1285 		 * if next_idx is/was the last ring index, and 'pi' is
1286 		 * different, we can disable the workaround as all the ring
1287 		 * entries have now been DMA'd to so valid-bit checking is
1288 		 * repaired. Note: this logic needs to be based on next_idx
1289 		 * (which increments one at a time), rather than on pi (which
1290 		 * can burst and wrap-around between our snapshots of it).
1291 		 */
1292 		if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1293 			pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1294 				 s->dqrr.next_idx, pi);
1295 			s->dqrr.reset_bug = 0;
1296 		}
1297 		prefetch(qbman_get_cmd(s,
1298 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1299 	}
1300 
1301 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1302 	verb = p->dq.verb;
1303 
1304 	/*
1305 	 * If the valid-bit isn't of the expected polarity, nothing there. Note,
1306 	 * in the DQRR reset bug workaround, we shouldn't need to skip these
1307 	 * check, because we've already determined that a new entry is available
1308 	 * and we've invalidated the cacheline before reading it, so the
1309 	 * valid-bit behaviour is repaired and should tell us what we already
1310 	 * knew from reading PI.
1311 	 */
1312 	if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1313 		prefetch(qbman_get_cmd(s,
1314 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1315 		return NULL;
1316 	}
1317 	/*
1318 	 * There's something there. Move "next_idx" attention to the next ring
1319 	 * entry (and prefetch it) before returning what we found.
1320 	 */
1321 	s->dqrr.next_idx++;
1322 	s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1323 	if (!s->dqrr.next_idx)
1324 		s->dqrr.valid_bit ^= QB_VALID_BIT;
1325 
1326 	/*
1327 	 * If this is the final response to a volatile dequeue command
1328 	 * indicate that the vdq is available
1329 	 */
1330 	flags = p->dq.stat;
1331 	response_verb = verb & QBMAN_RESULT_MASK;
1332 	if ((response_verb == QBMAN_RESULT_DQ) &&
1333 	    (flags & DPAA2_DQ_STAT_VOLATILE) &&
1334 	    (flags & DPAA2_DQ_STAT_EXPIRED))
1335 		atomic_inc(&s->vdq.available);
1336 
1337 	prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1338 
1339 	return p;
1340 }
1341 
1342 /**
1343  * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
1344  *                             qbman_swp_dqrr_next().
1345  * @s: the software portal object
1346  * @dq: the DQRR entry to be consumed
1347  */
qbman_swp_dqrr_consume(struct qbman_swp * s,const struct dpaa2_dq * dq)1348 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1349 {
1350 	qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1351 }
1352 
1353 /**
1354  * qbman_result_has_new_result() - Check and get the dequeue response from the
1355  *                                 dq storage memory set in pull dequeue command
1356  * @s: the software portal object
1357  * @dq: the dequeue result read from the memory
1358  *
1359  * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
1360  * dequeue result.
1361  *
1362  * Only used for user-provided storage of dequeue results, not DQRR. For
1363  * efficiency purposes, the driver will perform any required endianness
1364  * conversion to ensure that the user's dequeue result storage is in host-endian
1365  * format. As such, once the user has called qbman_result_has_new_result() and
1366  * been returned a valid dequeue result, they should not call it again on
1367  * the same memory location (except of course if another dequeue command has
1368  * been executed to produce a new result to that location).
1369  */
qbman_result_has_new_result(struct qbman_swp * s,const struct dpaa2_dq * dq)1370 int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1371 {
1372 	if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1373 		return 0;
1374 
1375 	/*
1376 	 * Set token to be 0 so we will detect change back to 1
1377 	 * next time the looping is traversed. Const is cast away here
1378 	 * as we want users to treat the dequeue responses as read only.
1379 	 */
1380 	((struct dpaa2_dq *)dq)->dq.tok = 0;
1381 
1382 	/*
1383 	 * Determine whether VDQCR is available based on whether the
1384 	 * current result is sitting in the first storage location of
1385 	 * the busy command.
1386 	 */
1387 	if (s->vdq.storage == dq) {
1388 		s->vdq.storage = NULL;
1389 		atomic_inc(&s->vdq.available);
1390 	}
1391 
1392 	return 1;
1393 }
1394 
1395 /**
1396  * qbman_release_desc_clear() - Clear the contents of a descriptor to
1397  *                              default/starting state.
1398  */
qbman_release_desc_clear(struct qbman_release_desc * d)1399 void qbman_release_desc_clear(struct qbman_release_desc *d)
1400 {
1401 	memset(d, 0, sizeof(*d));
1402 	d->verb = 1 << 5; /* Release Command Valid */
1403 }
1404 
1405 /**
1406  * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1407  */
qbman_release_desc_set_bpid(struct qbman_release_desc * d,u16 bpid)1408 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1409 {
1410 	d->bpid = cpu_to_le16(bpid);
1411 }
1412 
1413 /**
1414  * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1415  * interrupt source should be asserted after the release command is completed.
1416  */
qbman_release_desc_set_rcdi(struct qbman_release_desc * d,int enable)1417 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1418 {
1419 	if (enable)
1420 		d->verb |= 1 << 6;
1421 	else
1422 		d->verb &= ~(1 << 6);
1423 }
1424 
1425 #define RAR_IDX(rar)     ((rar) & 0x7)
1426 #define RAR_VB(rar)      ((rar) & 0x80)
1427 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1428 
1429 /**
1430  * qbman_swp_release_direct() - Issue a buffer release command
1431  * @s:           the software portal object
1432  * @d:           the release descriptor
1433  * @buffers:     a pointer pointing to the buffer address to be released
1434  * @num_buffers: number of buffers to be released,  must be less than 8
1435  *
1436  * Return 0 for success, -EBUSY if the release command ring is not ready.
1437  */
qbman_swp_release_direct(struct qbman_swp * s,const struct qbman_release_desc * d,const u64 * buffers,unsigned int num_buffers)1438 int qbman_swp_release_direct(struct qbman_swp *s,
1439 			     const struct qbman_release_desc *d,
1440 			     const u64 *buffers, unsigned int num_buffers)
1441 {
1442 	int i;
1443 	struct qbman_release_desc *p;
1444 	u32 rar;
1445 
1446 	if (!num_buffers || (num_buffers > 7))
1447 		return -EINVAL;
1448 
1449 	rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1450 	if (!RAR_SUCCESS(rar))
1451 		return -EBUSY;
1452 
1453 	/* Start the release command */
1454 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1455 
1456 	/* Copy the caller's buffer pointers to the command */
1457 	for (i = 0; i < num_buffers; i++)
1458 		p->buf[i] = cpu_to_le64(buffers[i]);
1459 	p->bpid = d->bpid;
1460 
1461 	/*
1462 	 * Set the verb byte, have to substitute in the valid-bit
1463 	 * and the number of buffers.
1464 	 */
1465 	dma_wmb();
1466 	p->verb = d->verb | RAR_VB(rar) | num_buffers;
1467 
1468 	return 0;
1469 }
1470 
1471 /**
1472  * qbman_swp_release_mem_back() - Issue a buffer release command
1473  * @s:           the software portal object
1474  * @d:           the release descriptor
1475  * @buffers:     a pointer pointing to the buffer address to be released
1476  * @num_buffers: number of buffers to be released,  must be less than 8
1477  *
1478  * Return 0 for success, -EBUSY if the release command ring is not ready.
1479  */
qbman_swp_release_mem_back(struct qbman_swp * s,const struct qbman_release_desc * d,const u64 * buffers,unsigned int num_buffers)1480 int qbman_swp_release_mem_back(struct qbman_swp *s,
1481 			       const struct qbman_release_desc *d,
1482 			       const u64 *buffers, unsigned int num_buffers)
1483 {
1484 	int i;
1485 	struct qbman_release_desc *p;
1486 	u32 rar;
1487 
1488 	if (!num_buffers || (num_buffers > 7))
1489 		return -EINVAL;
1490 
1491 	rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1492 	if (!RAR_SUCCESS(rar))
1493 		return -EBUSY;
1494 
1495 	/* Start the release command */
1496 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1497 
1498 	/* Copy the caller's buffer pointers to the command */
1499 	for (i = 0; i < num_buffers; i++)
1500 		p->buf[i] = cpu_to_le64(buffers[i]);
1501 	p->bpid = d->bpid;
1502 
1503 	p->verb = d->verb | RAR_VB(rar) | num_buffers;
1504 	dma_wmb();
1505 	qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1506 			     RAR_IDX(rar)  * 4, QMAN_RT_MODE);
1507 
1508 	return 0;
1509 }
1510 
1511 struct qbman_acquire_desc {
1512 	u8 verb;
1513 	u8 reserved;
1514 	__le16 bpid;
1515 	u8 num;
1516 	u8 reserved2[59];
1517 };
1518 
1519 struct qbman_acquire_rslt {
1520 	u8 verb;
1521 	u8 rslt;
1522 	__le16 reserved;
1523 	u8 num;
1524 	u8 reserved2[3];
1525 	__le64 buf[7];
1526 };
1527 
1528 /**
1529  * qbman_swp_acquire() - Issue a buffer acquire command
1530  * @s:           the software portal object
1531  * @bpid:        the buffer pool index
1532  * @buffers:     a pointer pointing to the acquired buffer addresses
1533  * @num_buffers: number of buffers to be acquired, must be less than 8
1534  *
1535  * Return 0 for success, or negative error code if the acquire command
1536  * fails.
1537  */
qbman_swp_acquire(struct qbman_swp * s,u16 bpid,u64 * buffers,unsigned int num_buffers)1538 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1539 		      unsigned int num_buffers)
1540 {
1541 	struct qbman_acquire_desc *p;
1542 	struct qbman_acquire_rslt *r;
1543 	int i;
1544 
1545 	if (!num_buffers || (num_buffers > 7))
1546 		return -EINVAL;
1547 
1548 	/* Start the management command */
1549 	p = qbman_swp_mc_start(s);
1550 
1551 	if (!p)
1552 		return -EBUSY;
1553 
1554 	/* Encode the caller-provided attributes */
1555 	p->bpid = cpu_to_le16(bpid);
1556 	p->num = num_buffers;
1557 
1558 	/* Complete the management command */
1559 	r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1560 	if (unlikely(!r)) {
1561 		pr_err("qbman: acquire from BPID %d failed, no response\n",
1562 		       bpid);
1563 		return -EIO;
1564 	}
1565 
1566 	/* Decode the outcome */
1567 	WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1568 
1569 	/* Determine success or failure */
1570 	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1571 		pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1572 		       bpid, r->rslt);
1573 		return -EIO;
1574 	}
1575 
1576 	WARN_ON(r->num > num_buffers);
1577 
1578 	/* Copy the acquired buffers to the caller's array */
1579 	for (i = 0; i < r->num; i++)
1580 		buffers[i] = le64_to_cpu(r->buf[i]);
1581 
1582 	return (int)r->num;
1583 }
1584 
1585 struct qbman_alt_fq_state_desc {
1586 	u8 verb;
1587 	u8 reserved[3];
1588 	__le32 fqid;
1589 	u8 reserved2[56];
1590 };
1591 
1592 struct qbman_alt_fq_state_rslt {
1593 	u8 verb;
1594 	u8 rslt;
1595 	u8 reserved[62];
1596 };
1597 
1598 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1599 
qbman_swp_alt_fq_state(struct qbman_swp * s,u32 fqid,u8 alt_fq_verb)1600 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1601 			   u8 alt_fq_verb)
1602 {
1603 	struct qbman_alt_fq_state_desc *p;
1604 	struct qbman_alt_fq_state_rslt *r;
1605 
1606 	/* Start the management command */
1607 	p = qbman_swp_mc_start(s);
1608 	if (!p)
1609 		return -EBUSY;
1610 
1611 	p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1612 
1613 	/* Complete the management command */
1614 	r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1615 	if (unlikely(!r)) {
1616 		pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1617 		       alt_fq_verb);
1618 		return -EIO;
1619 	}
1620 
1621 	/* Decode the outcome */
1622 	WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1623 
1624 	/* Determine success or failure */
1625 	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1626 		pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1627 		       fqid, r->verb, r->rslt);
1628 		return -EIO;
1629 	}
1630 
1631 	return 0;
1632 }
1633 
1634 struct qbman_cdan_ctrl_desc {
1635 	u8 verb;
1636 	u8 reserved;
1637 	__le16 ch;
1638 	u8 we;
1639 	u8 ctrl;
1640 	__le16 reserved2;
1641 	__le64 cdan_ctx;
1642 	u8 reserved3[48];
1643 
1644 };
1645 
1646 struct qbman_cdan_ctrl_rslt {
1647 	u8 verb;
1648 	u8 rslt;
1649 	__le16 ch;
1650 	u8 reserved[60];
1651 };
1652 
qbman_swp_CDAN_set(struct qbman_swp * s,u16 channelid,u8 we_mask,u8 cdan_en,u64 ctx)1653 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1654 		       u8 we_mask, u8 cdan_en,
1655 		       u64 ctx)
1656 {
1657 	struct qbman_cdan_ctrl_desc *p = NULL;
1658 	struct qbman_cdan_ctrl_rslt *r = NULL;
1659 
1660 	/* Start the management command */
1661 	p = qbman_swp_mc_start(s);
1662 	if (!p)
1663 		return -EBUSY;
1664 
1665 	/* Encode the caller-provided attributes */
1666 	p->ch = cpu_to_le16(channelid);
1667 	p->we = we_mask;
1668 	if (cdan_en)
1669 		p->ctrl = 1;
1670 	else
1671 		p->ctrl = 0;
1672 	p->cdan_ctx = cpu_to_le64(ctx);
1673 
1674 	/* Complete the management command */
1675 	r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1676 	if (unlikely(!r)) {
1677 		pr_err("qbman: wqchan config failed, no response\n");
1678 		return -EIO;
1679 	}
1680 
1681 	WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1682 
1683 	/* Determine success or failure */
1684 	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1685 		pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1686 		       channelid, r->rslt);
1687 		return -EIO;
1688 	}
1689 
1690 	return 0;
1691 }
1692 
1693 #define QBMAN_RESPONSE_VERB_MASK	0x7f
1694 #define QBMAN_FQ_QUERY_NP		0x45
1695 #define QBMAN_BP_QUERY			0x32
1696 
1697 struct qbman_fq_query_desc {
1698 	u8 verb;
1699 	u8 reserved[3];
1700 	__le32 fqid;
1701 	u8 reserved2[56];
1702 };
1703 
qbman_fq_query_state(struct qbman_swp * s,u32 fqid,struct qbman_fq_query_np_rslt * r)1704 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1705 			 struct qbman_fq_query_np_rslt *r)
1706 {
1707 	struct qbman_fq_query_desc *p;
1708 	void *resp;
1709 
1710 	p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1711 	if (!p)
1712 		return -EBUSY;
1713 
1714 	/* FQID is a 24 bit value */
1715 	p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1716 	resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1717 	if (!resp) {
1718 		pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1719 		       fqid);
1720 		return -EIO;
1721 	}
1722 	*r = *(struct qbman_fq_query_np_rslt *)resp;
1723 	/* Decode the outcome */
1724 	WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1725 
1726 	/* Determine success or failure */
1727 	if (r->rslt != QBMAN_MC_RSLT_OK) {
1728 		pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1729 		       p->fqid, r->rslt);
1730 		return -EIO;
1731 	}
1732 
1733 	return 0;
1734 }
1735 
qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt * r)1736 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1737 {
1738 	return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1739 }
1740 
qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt * r)1741 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1742 {
1743 	return le32_to_cpu(r->byte_cnt);
1744 }
1745 
1746 struct qbman_bp_query_desc {
1747 	u8 verb;
1748 	u8 reserved;
1749 	__le16 bpid;
1750 	u8 reserved2[60];
1751 };
1752 
qbman_bp_query(struct qbman_swp * s,u16 bpid,struct qbman_bp_query_rslt * r)1753 int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1754 		   struct qbman_bp_query_rslt *r)
1755 {
1756 	struct qbman_bp_query_desc *p;
1757 	void *resp;
1758 
1759 	p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1760 	if (!p)
1761 		return -EBUSY;
1762 
1763 	p->bpid = cpu_to_le16(bpid);
1764 	resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1765 	if (!resp) {
1766 		pr_err("qbman: Query BPID %d fields failed, no response\n",
1767 		       bpid);
1768 		return -EIO;
1769 	}
1770 	*r = *(struct qbman_bp_query_rslt *)resp;
1771 	/* Decode the outcome */
1772 	WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1773 
1774 	/* Determine success or failure */
1775 	if (r->rslt != QBMAN_MC_RSLT_OK) {
1776 		pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1777 		       bpid, r->rslt);
1778 		return -EIO;
1779 	}
1780 
1781 	return 0;
1782 }
1783 
qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt * a)1784 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1785 {
1786 	return le32_to_cpu(a->fill);
1787 }
1788