• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2 /*
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2016-2019 NXP
5  *
6  */
7 #ifndef __FSL_QBMAN_PORTAL_H
8 #define __FSL_QBMAN_PORTAL_H
9 
10 #include <soc/fsl/dpaa2-fd.h>
11 
12 #define QMAN_REV_4000   0x04000000
13 #define QMAN_REV_4100   0x04010000
14 #define QMAN_REV_4101   0x04010001
15 #define QMAN_REV_5000   0x05000000
16 
17 #define QMAN_REV_MASK   0xffff0000
18 
19 struct dpaa2_dq;
20 struct qbman_swp;
21 
22 /* qbman software portal descriptor structure */
23 struct qbman_swp_desc {
24 	void *cena_bar; /* Cache-enabled portal base address */
25 	void __iomem *cinh_bar; /* Cache-inhibited portal base address */
26 	u32 qman_version;
27 };
28 
29 #define QBMAN_SWP_INTERRUPT_EQRI 0x01
30 #define QBMAN_SWP_INTERRUPT_EQDI 0x02
31 #define QBMAN_SWP_INTERRUPT_DQRI 0x04
32 #define QBMAN_SWP_INTERRUPT_RCRI 0x08
33 #define QBMAN_SWP_INTERRUPT_RCDI 0x10
34 #define QBMAN_SWP_INTERRUPT_VDCI 0x20
35 
36 /* the structure for pull dequeue descriptor */
37 struct qbman_pull_desc {
38 	u8 verb;
39 	u8 numf;
40 	u8 tok;
41 	u8 reserved;
42 	__le32 dq_src;
43 	__le64 rsp_addr;
44 	u64 rsp_addr_virt;
45 	u8 padding[40];
46 };
47 
48 enum qbman_pull_type_e {
49 	/* dequeue with priority precedence, respect intra-class scheduling */
50 	qbman_pull_type_prio = 1,
51 	/* dequeue with active FQ precedence, respect ICS */
52 	qbman_pull_type_active,
53 	/* dequeue with active FQ precedence, no ICS */
54 	qbman_pull_type_active_noics
55 };
56 
57 /* Definitions for parsing dequeue entries */
58 #define QBMAN_RESULT_MASK      0x7f
59 #define QBMAN_RESULT_DQ        0x60
60 #define QBMAN_RESULT_FQRN      0x21
61 #define QBMAN_RESULT_FQRNI     0x22
62 #define QBMAN_RESULT_FQPN      0x24
63 #define QBMAN_RESULT_FQDAN     0x25
64 #define QBMAN_RESULT_CDAN      0x26
65 #define QBMAN_RESULT_CSCN_MEM  0x27
66 #define QBMAN_RESULT_CGCU      0x28
67 #define QBMAN_RESULT_BPSCN     0x29
68 #define QBMAN_RESULT_CSCN_WQ   0x2a
69 
70 /* QBMan FQ management command codes */
71 #define QBMAN_FQ_SCHEDULE	0x48
72 #define QBMAN_FQ_FORCE		0x49
73 #define QBMAN_FQ_XON		0x4d
74 #define QBMAN_FQ_XOFF		0x4e
75 
76 /* structure of enqueue descriptor */
77 struct qbman_eq_desc {
78 	u8 verb;
79 	u8 dca;
80 	__le16 seqnum;
81 	__le16 orpid;
82 	__le16 reserved1;
83 	__le32 tgtid;
84 	__le32 tag;
85 	__le16 qdbin;
86 	u8 qpri;
87 	u8 reserved[3];
88 	u8 wae;
89 	u8 rspid;
90 	__le64 rsp_addr;
91 };
92 
93 struct qbman_eq_desc_with_fd {
94 	struct qbman_eq_desc desc;
95 	u8 fd[32];
96 };
97 
98 /* buffer release descriptor */
99 struct qbman_release_desc {
100 	u8 verb;
101 	u8 reserved;
102 	__le16 bpid;
103 	__le32 reserved2;
104 	__le64 buf[7];
105 };
106 
107 /* Management command result codes */
108 #define QBMAN_MC_RSLT_OK      0xf0
109 
110 #define CODE_CDAN_WE_EN    0x1
111 #define CODE_CDAN_WE_CTX   0x4
112 
113 /* portal data structure */
114 struct qbman_swp {
115 	const struct qbman_swp_desc *desc;
116 	void *addr_cena;
117 	void __iomem *addr_cinh;
118 
119 	/* Management commands */
120 	struct {
121 		u32 valid_bit; /* 0x00 or 0x80 */
122 	} mc;
123 
124 	/* Management response */
125 	struct {
126 		u32 valid_bit; /* 0x00 or 0x80 */
127 	} mr;
128 
129 	/* Push dequeues */
130 	u32 sdq;
131 
132 	/* Volatile dequeues */
133 	struct {
134 		atomic_t available; /* indicates if a command can be sent */
135 		u32 valid_bit; /* 0x00 or 0x80 */
136 		struct dpaa2_dq *storage; /* NULL if DQRR */
137 	} vdq;
138 
139 	/* DQRR */
140 	struct {
141 		u32 next_idx;
142 		u32 valid_bit;
143 		u8 dqrr_size;
144 		int reset_bug; /* indicates dqrr reset workaround is needed */
145 	} dqrr;
146 
147 	struct {
148 		u32 pi;
149 		u32 pi_vb;
150 		u32 pi_ring_size;
151 		u32 pi_ci_mask;
152 		u32 ci;
153 		int available;
154 		u32 pend;
155 		u32 no_pfdr;
156 	} eqcr;
157 
158 	spinlock_t access_spinlock;
159 };
160 
161 /* Function pointers */
162 extern
163 int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
164 			     const struct qbman_eq_desc *d,
165 			     const struct dpaa2_fd *fd);
166 extern
167 int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
168 				      const struct qbman_eq_desc *d,
169 				      const struct dpaa2_fd *fd,
170 				      uint32_t *flags,
171 				      int num_frames);
172 extern
173 int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
174 					   const struct qbman_eq_desc *d,
175 					   const struct dpaa2_fd *fd,
176 					   int num_frames);
177 extern
178 int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
179 extern
180 const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
181 extern
182 int (*qbman_swp_release_ptr)(struct qbman_swp *s,
183 			     const struct qbman_release_desc *d,
184 			     const u64 *buffers,
185 			     unsigned int num_buffers);
186 
187 /* Functions */
188 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
189 void qbman_swp_finish(struct qbman_swp *p);
190 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
191 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
192 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
193 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
194 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
195 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
196 
197 void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
198 void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
199 
200 void qbman_pull_desc_clear(struct qbman_pull_desc *d);
201 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
202 				 struct dpaa2_dq *storage,
203 				 dma_addr_t storage_phys,
204 				 int stash);
205 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
206 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
207 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
208 			    enum qbman_pull_type_e dct);
209 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
210 				 enum qbman_pull_type_e dct);
211 
212 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
213 
214 int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
215 
216 void qbman_eq_desc_clear(struct qbman_eq_desc *d);
217 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
218 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
219 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
220 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
221 			  u32 qd_bin, u32 qd_prio);
222 
223 
224 void qbman_release_desc_clear(struct qbman_release_desc *d);
225 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
226 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
227 
228 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
229 		      unsigned int num_buffers);
230 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
231 			   u8 alt_fq_verb);
232 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
233 		       u8 we_mask, u8 cdan_en,
234 		       u64 ctx);
235 
236 void *qbman_swp_mc_start(struct qbman_swp *p);
237 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
238 void *qbman_swp_mc_result(struct qbman_swp *p);
239 
240 /**
241  * qbman_swp_enqueue() - Issue an enqueue command
242  * @s:  the software portal used for enqueue
243  * @d:  the enqueue descriptor
244  * @fd: the frame descriptor to be enqueued
245  *
246  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
247  */
248 static inline int
qbman_swp_enqueue(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd)249 qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
250 		  const struct dpaa2_fd *fd)
251 {
252 	return qbman_swp_enqueue_ptr(s, d, fd);
253 }
254 
255 /**
256  * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
257  * using one enqueue descriptor
258  * @s:  the software portal used for enqueue
259  * @d:  the enqueue descriptor
260  * @fd: table pointer of frame descriptor table to be enqueued
261  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
262  * @num_frames: number of fd to be enqueued
263  *
264  * Return the number of fd enqueued, or a negative error number.
265  */
266 static inline int
qbman_swp_enqueue_multiple(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,uint32_t * flags,int num_frames)267 qbman_swp_enqueue_multiple(struct qbman_swp *s,
268 			   const struct qbman_eq_desc *d,
269 			   const struct dpaa2_fd *fd,
270 			   uint32_t *flags,
271 			   int num_frames)
272 {
273 	return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
274 }
275 
276 /**
277  * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
278  * using multiple enqueue descriptor
279  * @s:  the software portal used for enqueue
280  * @d:  table of minimal enqueue descriptor
281  * @fd: table pointer of frame descriptor table to be enqueued
282  * @num_frames: number of fd to be enqueued
283  *
284  * Return the number of fd enqueued, or a negative error number.
285  */
286 static inline int
qbman_swp_enqueue_multiple_desc(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,int num_frames)287 qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
288 				const struct qbman_eq_desc *d,
289 				const struct dpaa2_fd *fd,
290 				int num_frames)
291 {
292 	return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
293 }
294 
295 /**
296  * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
297  * @dq: the dequeue result to be checked
298  *
299  * DQRR entries may contain non-dequeue results, ie. notifications
300  */
qbman_result_is_DQ(const struct dpaa2_dq * dq)301 static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
302 {
303 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
304 }
305 
306 /**
307  * qbman_result_is_SCN() - Check the dequeue result is notification or not
308  * @dq: the dequeue result to be checked
309  *
310  */
qbman_result_is_SCN(const struct dpaa2_dq * dq)311 static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
312 {
313 	return !qbman_result_is_DQ(dq);
314 }
315 
316 /* FQ Data Availability */
qbman_result_is_FQDAN(const struct dpaa2_dq * dq)317 static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
318 {
319 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
320 }
321 
322 /* Channel Data Availability */
qbman_result_is_CDAN(const struct dpaa2_dq * dq)323 static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
324 {
325 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
326 }
327 
328 /* Congestion State Change */
qbman_result_is_CSCN(const struct dpaa2_dq * dq)329 static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
330 {
331 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
332 }
333 
334 /* Buffer Pool State Change */
qbman_result_is_BPSCN(const struct dpaa2_dq * dq)335 static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
336 {
337 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
338 }
339 
340 /* Congestion Group Count Update */
qbman_result_is_CGCU(const struct dpaa2_dq * dq)341 static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
342 {
343 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
344 }
345 
346 /* Retirement */
qbman_result_is_FQRN(const struct dpaa2_dq * dq)347 static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
348 {
349 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
350 }
351 
352 /* Retirement Immediate */
qbman_result_is_FQRNI(const struct dpaa2_dq * dq)353 static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
354 {
355 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
356 }
357 
358  /* Park */
qbman_result_is_FQPN(const struct dpaa2_dq * dq)359 static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
360 {
361 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
362 }
363 
364 /**
365  * qbman_result_SCN_state() - Get the state field in State-change notification
366  */
qbman_result_SCN_state(const struct dpaa2_dq * scn)367 static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
368 {
369 	return scn->scn.state;
370 }
371 
372 #define SCN_RID_MASK 0x00FFFFFF
373 
374 /**
375  * qbman_result_SCN_rid() - Get the resource id in State-change notification
376  */
qbman_result_SCN_rid(const struct dpaa2_dq * scn)377 static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
378 {
379 	return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
380 }
381 
382 /**
383  * qbman_result_SCN_ctx() - Get the context data in State-change notification
384  */
qbman_result_SCN_ctx(const struct dpaa2_dq * scn)385 static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
386 {
387 	return le64_to_cpu(scn->scn.ctx);
388 }
389 
390 /**
391  * qbman_swp_fq_schedule() - Move the fq to the scheduled state
392  * @s:    the software portal object
393  * @fqid: the index of frame queue to be scheduled
394  *
395  * There are a couple of different ways that a FQ can end up parked state,
396  * This schedules it.
397  *
398  * Return 0 for success, or negative error code for failure.
399  */
qbman_swp_fq_schedule(struct qbman_swp * s,u32 fqid)400 static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
401 {
402 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
403 }
404 
405 /**
406  * qbman_swp_fq_force() - Force the FQ to fully scheduled state
407  * @s:    the software portal object
408  * @fqid: the index of frame queue to be forced
409  *
410  * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
411  * and thus be available for selection by any channel-dequeuing behaviour (push
412  * or pull). If the FQ is subsequently "dequeued" from the channel and is still
413  * empty at the time this happens, the resulting dq_entry will have no FD.
414  * (qbman_result_DQ_fd() will return NULL.)
415  *
416  * Return 0 for success, or negative error code for failure.
417  */
qbman_swp_fq_force(struct qbman_swp * s,u32 fqid)418 static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
419 {
420 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
421 }
422 
423 /**
424  * qbman_swp_fq_xon() - sets FQ flow-control to XON
425  * @s:    the software portal object
426  * @fqid: the index of frame queue
427  *
428  * This setting doesn't affect enqueues to the FQ, just dequeues.
429  *
430  * Return 0 for success, or negative error code for failure.
431  */
qbman_swp_fq_xon(struct qbman_swp * s,u32 fqid)432 static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
433 {
434 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
435 }
436 
437 /**
438  * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
439  * @s:    the software portal object
440  * @fqid: the index of frame queue
441  *
442  * This setting doesn't affect enqueues to the FQ, just dequeues.
443  * XOFF FQs will remain in the tenatively-scheduled state, even when
444  * non-empty, meaning they won't be selected for scheduled dequeuing.
445  * If a FQ is changed to XOFF after it had already become truly-scheduled
446  * to a channel, and a pull dequeue of that channel occurs that selects
447  * that FQ for dequeuing, then the resulting dq_entry will have no FD.
448  * (qbman_result_DQ_fd() will return NULL.)
449  *
450  * Return 0 for success, or negative error code for failure.
451  */
qbman_swp_fq_xoff(struct qbman_swp * s,u32 fqid)452 static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
453 {
454 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
455 }
456 
457 /* If the user has been allocated a channel object that is going to generate
458  * CDANs to another channel, then the qbman_swp_CDAN* functions will be
459  * necessary.
460  *
461  * CDAN-enabled channels only generate a single CDAN notification, after which
462  * they need to be reenabled before they'll generate another. The idea is
463  * that pull dequeuing will occur in reaction to the CDAN, followed by a
464  * reenable step. Each function generates a distinct command to hardware, so a
465  * combination function is provided if the user wishes to modify the "context"
466  * (which shows up in each CDAN message) each time they reenable, as a single
467  * command to hardware.
468  */
469 
470 /**
471  * qbman_swp_CDAN_set_context() - Set CDAN context
472  * @s:         the software portal object
473  * @channelid: the channel index
474  * @ctx:       the context to be set in CDAN
475  *
476  * Return 0 for success, or negative error code for failure.
477  */
qbman_swp_CDAN_set_context(struct qbman_swp * s,u16 channelid,u64 ctx)478 static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
479 					     u64 ctx)
480 {
481 	return qbman_swp_CDAN_set(s, channelid,
482 				  CODE_CDAN_WE_CTX,
483 				  0, ctx);
484 }
485 
486 /**
487  * qbman_swp_CDAN_enable() - Enable CDAN for the channel
488  * @s:         the software portal object
489  * @channelid: the index of the channel to generate CDAN
490  *
491  * Return 0 for success, or negative error code for failure.
492  */
qbman_swp_CDAN_enable(struct qbman_swp * s,u16 channelid)493 static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
494 {
495 	return qbman_swp_CDAN_set(s, channelid,
496 				  CODE_CDAN_WE_EN,
497 				  1, 0);
498 }
499 
500 /**
501  * qbman_swp_CDAN_disable() - disable CDAN for the channel
502  * @s:         the software portal object
503  * @channelid: the index of the channel to generate CDAN
504  *
505  * Return 0 for success, or negative error code for failure.
506  */
qbman_swp_CDAN_disable(struct qbman_swp * s,u16 channelid)507 static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
508 {
509 	return qbman_swp_CDAN_set(s, channelid,
510 				  CODE_CDAN_WE_EN,
511 				  0, 0);
512 }
513 
514 /**
515  * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
516  * @s:         the software portal object
517  * @channelid: the index of the channel to generate CDAN
518  * @ctx:i      the context set in CDAN
519  *
520  * Return 0 for success, or negative error code for failure.
521  */
qbman_swp_CDAN_set_context_enable(struct qbman_swp * s,u16 channelid,u64 ctx)522 static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
523 						    u16 channelid,
524 						    u64 ctx)
525 {
526 	return qbman_swp_CDAN_set(s, channelid,
527 				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
528 				  1, ctx);
529 }
530 
531 /* Wraps up submit + poll-for-result */
qbman_swp_mc_complete(struct qbman_swp * swp,void * cmd,u8 cmd_verb)532 static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
533 					  u8 cmd_verb)
534 {
535 	int loopvar = 2000;
536 
537 	qbman_swp_mc_submit(swp, cmd, cmd_verb);
538 
539 	do {
540 		cmd = qbman_swp_mc_result(swp);
541 	} while (!cmd && loopvar--);
542 
543 	WARN_ON(!loopvar);
544 
545 	return cmd;
546 }
547 
548 /* Query APIs */
549 struct qbman_fq_query_np_rslt {
550 	u8 verb;
551 	u8 rslt;
552 	u8 st1;
553 	u8 st2;
554 	u8 reserved[2];
555 	__le16 od1_sfdr;
556 	__le16 od2_sfdr;
557 	__le16 od3_sfdr;
558 	__le16 ra1_sfdr;
559 	__le16 ra2_sfdr;
560 	__le32 pfdr_hptr;
561 	__le32 pfdr_tptr;
562 	__le32 frm_cnt;
563 	__le32 byte_cnt;
564 	__le16 ics_surp;
565 	u8 is;
566 	u8 reserved2[29];
567 };
568 
569 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
570 			 struct qbman_fq_query_np_rslt *r);
571 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
572 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
573 
574 struct qbman_bp_query_rslt {
575 	u8 verb;
576 	u8 rslt;
577 	u8 reserved[4];
578 	u8 bdi;
579 	u8 state;
580 	__le32 fill;
581 	__le32 hdotr;
582 	__le16 swdet;
583 	__le16 swdxt;
584 	__le16 hwdet;
585 	__le16 hwdxt;
586 	__le16 swset;
587 	__le16 swsxt;
588 	__le16 vbpid;
589 	__le16 icid;
590 	__le64 bpscn_addr;
591 	__le64 bpscn_ctx;
592 	__le16 hw_targ;
593 	u8 dbe;
594 	u8 reserved2;
595 	u8 sdcnt;
596 	u8 hdcnt;
597 	u8 sscnt;
598 	u8 reserved3[9];
599 };
600 
601 int qbman_bp_query(struct qbman_swp *s, u16 bpid,
602 		   struct qbman_bp_query_rslt *r);
603 
604 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
605 
606 /**
607  * qbman_swp_release() - Issue a buffer release command
608  * @s:           the software portal object
609  * @d:           the release descriptor
610  * @buffers:     a pointer pointing to the buffer address to be released
611  * @num_buffers: number of buffers to be released,  must be less than 8
612  *
613  * Return 0 for success, -EBUSY if the release command ring is not ready.
614  */
qbman_swp_release(struct qbman_swp * s,const struct qbman_release_desc * d,const u64 * buffers,unsigned int num_buffers)615 static inline int qbman_swp_release(struct qbman_swp *s,
616 				    const struct qbman_release_desc *d,
617 				    const u64 *buffers,
618 				    unsigned int num_buffers)
619 {
620 	return qbman_swp_release_ptr(s, d, buffers, num_buffers);
621 }
622 
623 /**
624  * qbman_swp_pull() - Issue the pull dequeue command
625  * @s: the software portal object
626  * @d: the software portal descriptor which has been configured with
627  *     the set of qbman_pull_desc_set_*() calls
628  *
629  * Return 0 for success, and -EBUSY if the software portal is not ready
630  * to do pull dequeue.
631  */
qbman_swp_pull(struct qbman_swp * s,struct qbman_pull_desc * d)632 static inline int qbman_swp_pull(struct qbman_swp *s,
633 				 struct qbman_pull_desc *d)
634 {
635 	return qbman_swp_pull_ptr(s, d);
636 }
637 
638 /**
639  * qbman_swp_dqrr_next() - Get an valid DQRR entry
640  * @s: the software portal object
641  *
642  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
643  * only once, so repeated calls can return a sequence of DQRR entries, without
644  * requiring they be consumed immediately or in any particular order.
645  */
qbman_swp_dqrr_next(struct qbman_swp * s)646 static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
647 {
648 	return qbman_swp_dqrr_next_ptr(s);
649 }
650 
651 #endif /* __FSL_QBMAN_PORTAL_H */
652