• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2   *
3   * Redistribution and use in source and binary forms, with or without
4   * modification, are permitted provided that the following conditions are met:
5   *     * Redistributions of source code must retain the above copyright
6   *	 notice, this list of conditions and the following disclaimer.
7   *     * Redistributions in binary form must reproduce the above copyright
8   *	 notice, this list of conditions and the following disclaimer in the
9   *	 documentation and/or other materials provided with the distribution.
10   *     * Neither the name of Freescale Semiconductor nor the
11   *	 names of its contributors may be used to endorse or promote products
12   *	 derived from this software without specific prior written permission.
13   *
14   * ALTERNATIVELY, this software may be distributed under the terms of the
15   * GNU General Public License ("GPL") as published by the Free Software
16   * Foundation, either version 2 of that License or (at your option) any
17   * later version.
18   *
19   * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20   * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21   * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22   * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23   * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24   * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26   * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28   * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29   */
30  
31  #ifndef __FSL_QMAN_H
32  #define __FSL_QMAN_H
33  
34  #include <linux/bitops.h>
35  #include <linux/device.h>
36  
37  /* Hardware constants */
38  #define QM_CHANNEL_SWPORTAL0 0
39  #define QMAN_CHANNEL_POOL1 0x21
40  #define QMAN_CHANNEL_CAAM 0x80
41  #define QMAN_CHANNEL_POOL1_REV3 0x401
42  #define QMAN_CHANNEL_CAAM_REV3 0x840
43  extern u16 qm_channel_pool1;
44  extern u16 qm_channel_caam;
45  
46  /* Portal processing (interrupt) sources */
47  #define QM_PIRQ_CSCI	0x00100000	/* Congestion State Change */
48  #define QM_PIRQ_EQCI	0x00080000	/* Enqueue Command Committed */
49  #define QM_PIRQ_EQRI	0x00040000	/* EQCR Ring (below threshold) */
50  #define QM_PIRQ_DQRI	0x00020000	/* DQRR Ring (non-empty) */
51  #define QM_PIRQ_MRI	0x00010000	/* MR Ring (non-empty) */
52  /*
53   * This mask contains all the interrupt sources that need handling except DQRI,
54   * ie. that if present should trigger slow-path processing.
55   */
56  #define QM_PIRQ_SLOW	(QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
57  			 QM_PIRQ_MRI)
58  
59  /* For qman_static_dequeue_*** APIs */
60  #define QM_SDQCR_CHANNELS_POOL_MASK	0x00007fff
61  /* for n in [1,15] */
62  #define QM_SDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
63  /* for conversion from n of qm_channel */
QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)64  static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
65  {
66  	return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
67  }
68  
69  /* --- QMan data structures (and associated constants) --- */
70  
71  /* "Frame Descriptor (FD)" */
72  struct qm_fd {
73  	union {
74  		struct {
75  			u8 cfg8b_w1;
76  			u8 bpid;	/* Buffer Pool ID */
77  			u8 cfg8b_w3;
78  			u8 addr_hi;	/* high 8-bits of 40-bit address */
79  			__be32 addr_lo;	/* low 32-bits of 40-bit address */
80  		} __packed;
81  		__be64 data;
82  	};
83  	__be32 cfg;	/* format, offset, length / congestion */
84  	union {
85  		__be32 cmd;
86  		__be32 status;
87  	};
88  } __aligned(8);
89  
90  #define QM_FD_FORMAT_SG		BIT(31)
91  #define QM_FD_FORMAT_LONG	BIT(30)
92  #define QM_FD_FORMAT_COMPOUND	BIT(29)
93  #define QM_FD_FORMAT_MASK	GENMASK(31, 29)
94  #define QM_FD_OFF_SHIFT		20
95  #define QM_FD_OFF_MASK		GENMASK(28, 20)
96  #define QM_FD_LEN_MASK		GENMASK(19, 0)
97  #define QM_FD_LEN_BIG_MASK	GENMASK(28, 0)
98  
99  enum qm_fd_format {
100  	/*
101  	 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
102  	 * scatter-gather table. 'big' implies a 29-bit length with no offset
103  	 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
104  	 * implies a s/g-like table, where each entry itself represents a frame
105  	 * (contiguous or scatter-gather) and the 29-bit "length" is
106  	 * interpreted purely for congestion calculations, ie. a "congestion
107  	 * weight".
108  	 */
109  	qm_fd_contig = 0,
110  	qm_fd_contig_big = QM_FD_FORMAT_LONG,
111  	qm_fd_sg = QM_FD_FORMAT_SG,
112  	qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
113  	qm_fd_compound = QM_FD_FORMAT_COMPOUND
114  };
115  
qm_fd_addr(const struct qm_fd * fd)116  static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
117  {
118  	return be64_to_cpu(fd->data) & 0xffffffffffLLU;
119  }
120  
qm_fd_addr_get64(const struct qm_fd * fd)121  static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
122  {
123  	return be64_to_cpu(fd->data) & 0xffffffffffLLU;
124  }
125  
qm_fd_addr_set64(struct qm_fd * fd,u64 addr)126  static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
127  {
128  	fd->addr_hi = upper_32_bits(addr);
129  	fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
130  }
131  
132  /*
133   * The 'format' field indicates the interpretation of the remaining
134   * 29 bits of the 32-bit word.
135   * If 'format' is _contig or _sg, 20b length and 9b offset.
136   * If 'format' is _contig_big or _sg_big, 29b length.
137   * If 'format' is _compound, 29b "congestion weight".
138   */
qm_fd_get_format(const struct qm_fd * fd)139  static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
140  {
141  	return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
142  }
143  
qm_fd_get_offset(const struct qm_fd * fd)144  static inline int qm_fd_get_offset(const struct qm_fd *fd)
145  {
146  	return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
147  }
148  
qm_fd_get_length(const struct qm_fd * fd)149  static inline int qm_fd_get_length(const struct qm_fd *fd)
150  {
151  	return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
152  }
153  
qm_fd_get_len_big(const struct qm_fd * fd)154  static inline int qm_fd_get_len_big(const struct qm_fd *fd)
155  {
156  	return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
157  }
158  
qm_fd_set_param(struct qm_fd * fd,enum qm_fd_format fmt,int off,int len)159  static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
160  				   int off, int len)
161  {
162  	fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
163  			      ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
164  }
165  
166  #define qm_fd_set_contig(fd, off, len) \
167  	qm_fd_set_param(fd, qm_fd_contig, off, len)
168  #define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
169  #define qm_fd_set_contig_big(fd, len) \
170  	qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
171  #define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
172  #define qm_fd_set_compound(fd, len) qm_fd_set_param(fd, qm_fd_compound, 0, len)
173  
qm_fd_clear_fd(struct qm_fd * fd)174  static inline void qm_fd_clear_fd(struct qm_fd *fd)
175  {
176  	fd->data = 0;
177  	fd->cfg = 0;
178  	fd->cmd = 0;
179  }
180  
181  /* Scatter/Gather table entry */
182  struct qm_sg_entry {
183  	union {
184  		struct {
185  			u8 __reserved1[3];
186  			u8 addr_hi;	/* high 8-bits of 40-bit address */
187  			__be32 addr_lo;	/* low 32-bits of 40-bit address */
188  		};
189  		__be64 data;
190  	};
191  	__be32 cfg;	/* E bit, F bit, length */
192  	u8 __reserved2;
193  	u8 bpid;
194  	__be16 offset; /* 13-bit, _res[13-15]*/
195  } __packed;
196  
197  #define QM_SG_LEN_MASK	GENMASK(29, 0)
198  #define QM_SG_OFF_MASK	GENMASK(12, 0)
199  #define QM_SG_FIN	BIT(30)
200  #define QM_SG_EXT	BIT(31)
201  
qm_sg_addr(const struct qm_sg_entry * sg)202  static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
203  {
204  	return be64_to_cpu(sg->data) & 0xffffffffffLLU;
205  }
206  
qm_sg_entry_get64(const struct qm_sg_entry * sg)207  static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
208  {
209  	return be64_to_cpu(sg->data) & 0xffffffffffLLU;
210  }
211  
qm_sg_entry_set64(struct qm_sg_entry * sg,u64 addr)212  static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
213  {
214  	sg->addr_hi = upper_32_bits(addr);
215  	sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
216  }
217  
qm_sg_entry_is_final(const struct qm_sg_entry * sg)218  static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
219  {
220  	return be32_to_cpu(sg->cfg) & QM_SG_FIN;
221  }
222  
qm_sg_entry_is_ext(const struct qm_sg_entry * sg)223  static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
224  {
225  	return be32_to_cpu(sg->cfg) & QM_SG_EXT;
226  }
227  
qm_sg_entry_get_len(const struct qm_sg_entry * sg)228  static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
229  {
230  	return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
231  }
232  
qm_sg_entry_set_len(struct qm_sg_entry * sg,int len)233  static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
234  {
235  	sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
236  }
237  
qm_sg_entry_set_f(struct qm_sg_entry * sg,int len)238  static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
239  {
240  	sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
241  }
242  
qm_sg_entry_get_off(const struct qm_sg_entry * sg)243  static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
244  {
245  	return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
246  }
247  
248  /* "Frame Dequeue Response" */
249  struct qm_dqrr_entry {
250  	u8 verb;
251  	u8 stat;
252  	__be16 seqnum;	/* 15-bit */
253  	u8 tok;
254  	u8 __reserved2[3];
255  	__be32 fqid;	/* 24-bit */
256  	__be32 context_b;
257  	struct qm_fd fd;
258  	u8 __reserved4[32];
259  } __packed __aligned(64);
260  #define QM_DQRR_VERB_VBIT		0x80
261  #define QM_DQRR_VERB_MASK		0x7f	/* where the verb contains; */
262  #define QM_DQRR_VERB_FRAME_DEQUEUE	0x60	/* "this format" */
263  #define QM_DQRR_STAT_FQ_EMPTY		0x80	/* FQ empty */
264  #define QM_DQRR_STAT_FQ_HELDACTIVE	0x40	/* FQ held active */
265  #define QM_DQRR_STAT_FQ_FORCEELIGIBLE	0x20	/* FQ was force-eligible'd */
266  #define QM_DQRR_STAT_FD_VALID		0x10	/* has a non-NULL FD */
267  #define QM_DQRR_STAT_UNSCHEDULED	0x02	/* Unscheduled dequeue */
268  #define QM_DQRR_STAT_DQCR_EXPIRED	0x01	/* VDQCR or PDQCR expired*/
269  
270  /* 'fqid' is a 24-bit field in every h/w descriptor */
271  #define QM_FQID_MASK	GENMASK(23, 0)
272  #define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
273  #define qm_fqid_get(p)    (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
274  
275  /* "ERN Message Response" */
276  /* "FQ State Change Notification" */
277  union qm_mr_entry {
278  	struct {
279  		u8 verb;
280  		u8 __reserved[63];
281  	};
282  	struct {
283  		u8 verb;
284  		u8 dca;
285  		__be16 seqnum;
286  		u8 rc;		/* Rej Code: 8-bit */
287  		u8 __reserved[3];
288  		__be32 fqid;	/* 24-bit */
289  		__be32 tag;
290  		struct qm_fd fd;
291  		u8 __reserved1[32];
292  	} __packed __aligned(64) ern;
293  	struct {
294  		u8 verb;
295  		u8 fqs;		/* Frame Queue Status */
296  		u8 __reserved1[6];
297  		__be32 fqid;	/* 24-bit */
298  		__be32 context_b;
299  		u8 __reserved2[48];
300  	} __packed fq;		/* FQRN/FQRNI/FQRL/FQPN */
301  };
302  #define QM_MR_VERB_VBIT			0x80
303  /*
304   * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
305   * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
306   * from the other MR types by noting if the 0x20 bit is unset.
307   */
308  #define QM_MR_VERB_TYPE_MASK		0x27
309  #define QM_MR_VERB_DC_ERN		0x20
310  #define QM_MR_VERB_FQRN			0x21
311  #define QM_MR_VERB_FQRNI		0x22
312  #define QM_MR_VERB_FQRL			0x23
313  #define QM_MR_VERB_FQPN			0x24
314  #define QM_MR_RC_MASK			0xf0	/* contains one of; */
315  #define QM_MR_RC_CGR_TAILDROP		0x00
316  #define QM_MR_RC_WRED			0x10
317  #define QM_MR_RC_ERROR			0x20
318  #define QM_MR_RC_ORPWINDOW_EARLY	0x30
319  #define QM_MR_RC_ORPWINDOW_LATE		0x40
320  #define QM_MR_RC_FQ_TAILDROP		0x50
321  #define QM_MR_RC_ORPWINDOW_RETIRED	0x60
322  #define QM_MR_RC_ORP_ZERO		0x70
323  #define QM_MR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
324  #define QM_MR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
325  
326  /*
327   * An identical structure of FQD fields is present in the "Init FQ" command and
328   * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
329   * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
330   * latter has two inlines to assist with converting to/from the mant+exp
331   * representation.
332   */
333  struct qm_fqd_stashing {
334  	/* See QM_STASHING_EXCL_<...> */
335  	u8 exclusive;
336  	/* Numbers of cachelines */
337  	u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
338  };
339  
340  struct qm_fqd_oac {
341  	/* "Overhead Accounting Control", see QM_OAC_<...> */
342  	u8 oac; /* oac[6-7], _res[0-5] */
343  	/* Two's-complement value (-128 to +127) */
344  	s8 oal; /* "Overhead Accounting Length" */
345  };
346  
347  struct qm_fqd {
348  	/* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
349  	u8 orpc;
350  	u8 cgid;
351  	__be16 fq_ctrl;	/* See QM_FQCTRL_<...> */
352  	__be16 dest_wq;	/* channel[3-15], wq[0-2] */
353  	__be16 ics_cred; /* 15-bit */
354  	/*
355  	 * For "Initialize Frame Queue" commands, the write-enable mask
356  	 * determines whether 'td' or 'oac_init' is observed. For query
357  	 * commands, this field is always 'td', and 'oac_query' (below) reflects
358  	 * the Overhead ACcounting values.
359  	 */
360  	union {
361  		__be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
362  		struct qm_fqd_oac oac_init;
363  	};
364  	__be32 context_b;
365  	union {
366  		/* Treat it as 64-bit opaque */
367  		__be64 opaque;
368  		struct {
369  			__be32 hi;
370  			__be32 lo;
371  		};
372  		/* Treat it as s/w portal stashing config */
373  		/* see "FQD Context_A field used for [...]" */
374  		struct {
375  			struct qm_fqd_stashing stashing;
376  			/*
377  			 * 48-bit address of FQ context to
378  			 * stash, must be cacheline-aligned
379  			 */
380  			__be16 context_hi;
381  			__be32 context_lo;
382  		} __packed;
383  	} context_a;
384  	struct qm_fqd_oac oac_query;
385  } __packed;
386  
387  #define QM_FQD_CHAN_OFF		3
388  #define QM_FQD_WQ_MASK		GENMASK(2, 0)
389  #define QM_FQD_TD_EXP_MASK	GENMASK(4, 0)
390  #define QM_FQD_TD_MANT_OFF	5
391  #define QM_FQD_TD_MANT_MASK	GENMASK(12, 5)
392  #define QM_FQD_TD_MAX		0xe0000000
393  #define QM_FQD_TD_MANT_MAX	0xff
394  #define QM_FQD_OAC_OFF		6
395  #define QM_FQD_AS_OFF		4
396  #define QM_FQD_DS_OFF		2
397  #define QM_FQD_XS_MASK		0x3
398  
399  /* 64-bit converters for context_hi/lo */
qm_fqd_stashing_get64(const struct qm_fqd * fqd)400  static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
401  {
402  	return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
403  }
404  
qm_fqd_stashing_addr(const struct qm_fqd * fqd)405  static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
406  {
407  	return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
408  }
409  
qm_fqd_context_a_get64(const struct qm_fqd * fqd)410  static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
411  {
412  	return qm_fqd_stashing_get64(fqd);
413  }
414  
qm_fqd_stashing_set64(struct qm_fqd * fqd,u64 addr)415  static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
416  {
417  	fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
418  	fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
419  }
420  
qm_fqd_context_a_set64(struct qm_fqd * fqd,u64 addr)421  static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
422  {
423  	fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
424  	fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
425  }
426  
427  /* convert a threshold value into mant+exp representation */
qm_fqd_set_taildrop(struct qm_fqd * fqd,u32 val,int roundup)428  static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
429  				      int roundup)
430  {
431  	u32 e = 0;
432  	int td, oddbit = 0;
433  
434  	if (val > QM_FQD_TD_MAX)
435  		return -ERANGE;
436  
437  	while (val > QM_FQD_TD_MANT_MAX) {
438  		oddbit = val & 1;
439  		val >>= 1;
440  		e++;
441  		if (roundup && oddbit)
442  			val++;
443  	}
444  
445  	td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
446  	td |= (e & QM_FQD_TD_EXP_MASK);
447  	fqd->td = cpu_to_be16(td);
448  	return 0;
449  }
450  /* and the other direction */
qm_fqd_get_taildrop(const struct qm_fqd * fqd)451  static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
452  {
453  	int td = be16_to_cpu(fqd->td);
454  
455  	return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
456  		<< (td & QM_FQD_TD_EXP_MASK);
457  }
458  
qm_fqd_set_stashing(struct qm_fqd * fqd,u8 as,u8 ds,u8 cs)459  static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
460  {
461  	struct qm_fqd_stashing *st = &fqd->context_a.stashing;
462  
463  	st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
464  		 ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
465  		 (cs & QM_FQD_XS_MASK);
466  }
467  
qm_fqd_get_stashing(const struct qm_fqd * fqd)468  static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
469  {
470  	return fqd->context_a.stashing.cl;
471  }
472  
qm_fqd_set_oac(struct qm_fqd * fqd,u8 val)473  static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
474  {
475  	fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
476  }
477  
qm_fqd_set_oal(struct qm_fqd * fqd,s8 val)478  static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
479  {
480  	fqd->oac_init.oal = val;
481  }
482  
qm_fqd_set_destwq(struct qm_fqd * fqd,int ch,int wq)483  static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
484  {
485  	fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
486  				   (wq & QM_FQD_WQ_MASK));
487  }
488  
qm_fqd_get_chan(const struct qm_fqd * fqd)489  static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
490  {
491  	return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
492  }
493  
qm_fqd_get_wq(const struct qm_fqd * fqd)494  static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
495  {
496  	return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
497  }
498  
499  /* See "Frame Queue Descriptor (FQD)" */
500  /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
501  #define QM_FQCTRL_MASK		0x07ff	/* 'fq_ctrl' flags; */
502  #define QM_FQCTRL_CGE		0x0400	/* Congestion Group Enable */
503  #define QM_FQCTRL_TDE		0x0200	/* Tail-Drop Enable */
504  #define QM_FQCTRL_CTXASTASHING	0x0080	/* Context-A stashing */
505  #define QM_FQCTRL_CPCSTASH	0x0040	/* CPC Stash Enable */
506  #define QM_FQCTRL_FORCESFDR	0x0008	/* High-priority SFDRs */
507  #define QM_FQCTRL_AVOIDBLOCK	0x0004	/* Don't block active */
508  #define QM_FQCTRL_HOLDACTIVE	0x0002	/* Hold active in portal */
509  #define QM_FQCTRL_PREFERINCACHE	0x0001	/* Aggressively cache FQD */
510  #define QM_FQCTRL_LOCKINCACHE	QM_FQCTRL_PREFERINCACHE /* older naming */
511  
512  /* See "FQD Context_A field used for [...] */
513  /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
514  #define QM_STASHING_EXCL_ANNOTATION	0x04
515  #define QM_STASHING_EXCL_DATA		0x02
516  #define QM_STASHING_EXCL_CTX		0x01
517  
518  /* See "Intra Class Scheduling" */
519  /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
520  #define QM_OAC_ICS		0x2 /* Accounting for Intra-Class Scheduling */
521  #define QM_OAC_CG		0x1 /* Accounting for Congestion Groups */
522  
523  /*
524   * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
525   * and associated commands/responses. The WRED parameters are calculated from
526   * these fields as follows;
527   *   MaxTH = MA * (2 ^ Mn)
528   *   Slope = SA / (2 ^ Sn)
529   *    MaxP = 4 * (Pn + 1)
530   */
531  struct qm_cgr_wr_parm {
532  	/* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
533  	__be32 word;
534  };
535  /*
536   * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
537   * management commands, this is padded to a 16-bit structure field, so that's
538   * how we represent it here. The congestion state threshold is calculated from
539   * these fields as follows;
540   *   CS threshold = TA * (2 ^ Tn)
541   */
542  struct qm_cgr_cs_thres {
543  	/* _res[13-15], TA[5-12], Tn[0-4] */
544  	__be16 word;
545  };
546  /*
547   * This identical structure of CGR fields is present in the "Init/Modify CGR"
548   * commands and the "Query CGR" result. It's suctioned out here into its own
549   * struct.
550   */
551  struct __qm_mc_cgr {
552  	struct qm_cgr_wr_parm wr_parm_g;
553  	struct qm_cgr_wr_parm wr_parm_y;
554  	struct qm_cgr_wr_parm wr_parm_r;
555  	u8 wr_en_g;	/* boolean, use QM_CGR_EN */
556  	u8 wr_en_y;	/* boolean, use QM_CGR_EN */
557  	u8 wr_en_r;	/* boolean, use QM_CGR_EN */
558  	u8 cscn_en;	/* boolean, use QM_CGR_EN */
559  	union {
560  		struct {
561  			__be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
562  			__be16 cscn_targ_dcp_low;
563  		};
564  		__be32 cscn_targ;	/* use QM_CGR_TARG_* */
565  	};
566  	u8 cstd_en;	/* boolean, use QM_CGR_EN */
567  	u8 cs;		/* boolean, only used in query response */
568  	struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
569  	u8 mode;	/* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
570  } __packed;
571  #define QM_CGR_EN		0x01 /* For wr_en_*, cscn_en, cstd_en */
572  #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT	0x8000 /* value written to portal bit*/
573  #define QM_CGR_TARG_UDP_CTRL_DCP	0x4000 /* 0: SWP, 1: DCP */
574  #define QM_CGR_TARG_PORTAL(n)	(0x80000000 >> (n)) /* s/w portal, 0-9 */
575  #define QM_CGR_TARG_FMAN0	0x00200000 /* direct-connect portal: fman0 */
576  #define QM_CGR_TARG_FMAN1	0x00100000 /*			   : fman1 */
577  /* Convert CGR thresholds to/from "cs_thres" format */
qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres * th)578  static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
579  {
580  	int thres = be16_to_cpu(th->word);
581  
582  	return ((thres >> 5) & 0xff) << (thres & 0x1f);
583  }
584  
qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres * th,u64 val,int roundup)585  static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
586  					int roundup)
587  {
588  	u32 e = 0;
589  	int oddbit = 0;
590  
591  	while (val > 0xff) {
592  		oddbit = val & 1;
593  		val >>= 1;
594  		e++;
595  		if (roundup && oddbit)
596  			val++;
597  	}
598  	th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
599  	return 0;
600  }
601  
602  /* "Initialize FQ" */
603  struct qm_mcc_initfq {
604  	u8 __reserved1[2];
605  	__be16 we_mask;	/* Write Enable Mask */
606  	__be32 fqid;	/* 24-bit */
607  	__be16 count;	/* Initialises 'count+1' FQDs */
608  	struct qm_fqd fqd; /* the FQD fields go here */
609  	u8 __reserved2[30];
610  } __packed;
611  /* "Initialize/Modify CGR" */
612  struct qm_mcc_initcgr {
613  	u8 __reserve1[2];
614  	__be16 we_mask;	/* Write Enable Mask */
615  	struct __qm_mc_cgr cgr;	/* CGR fields */
616  	u8 __reserved2[2];
617  	u8 cgid;
618  	u8 __reserved3[32];
619  } __packed;
620  
621  /* INITFQ-specific flags */
622  #define QM_INITFQ_WE_MASK		0x01ff	/* 'Write Enable' flags; */
623  #define QM_INITFQ_WE_OAC		0x0100
624  #define QM_INITFQ_WE_ORPC		0x0080
625  #define QM_INITFQ_WE_CGID		0x0040
626  #define QM_INITFQ_WE_FQCTRL		0x0020
627  #define QM_INITFQ_WE_DESTWQ		0x0010
628  #define QM_INITFQ_WE_ICSCRED		0x0008
629  #define QM_INITFQ_WE_TDTHRESH		0x0004
630  #define QM_INITFQ_WE_CONTEXTB		0x0002
631  #define QM_INITFQ_WE_CONTEXTA		0x0001
632  /* INITCGR/MODIFYCGR-specific flags */
633  #define QM_CGR_WE_MASK			0x07ff	/* 'Write Enable Mask'; */
634  #define QM_CGR_WE_WR_PARM_G		0x0400
635  #define QM_CGR_WE_WR_PARM_Y		0x0200
636  #define QM_CGR_WE_WR_PARM_R		0x0100
637  #define QM_CGR_WE_WR_EN_G		0x0080
638  #define QM_CGR_WE_WR_EN_Y		0x0040
639  #define QM_CGR_WE_WR_EN_R		0x0020
640  #define QM_CGR_WE_CSCN_EN		0x0010
641  #define QM_CGR_WE_CSCN_TARG		0x0008
642  #define QM_CGR_WE_CSTD_EN		0x0004
643  #define QM_CGR_WE_CS_THRES		0x0002
644  #define QM_CGR_WE_MODE			0x0001
645  
646  #define QMAN_CGR_FLAG_USE_INIT	     0x00000001
647  #define QMAN_CGR_MODE_FRAME          0x00000001
648  
649  	/* Portal and Frame Queues */
650  /* Represents a managed portal */
651  struct qman_portal;
652  
653  /*
654   * This object type represents QMan frame queue descriptors (FQD), it is
655   * cacheline-aligned, and initialised by qman_create_fq(). The structure is
656   * defined further down.
657   */
658  struct qman_fq;
659  
660  /*
661   * This object type represents a QMan congestion group, it is defined further
662   * down.
663   */
664  struct qman_cgr;
665  
666  /*
667   * This enum, and the callback type that returns it, are used when handling
668   * dequeued frames via DQRR. Note that for "null" callbacks registered with the
669   * portal object (for handling dequeues that do not demux because context_b is
670   * NULL), the return value *MUST* be qman_cb_dqrr_consume.
671   */
672  enum qman_cb_dqrr_result {
673  	/* DQRR entry can be consumed */
674  	qman_cb_dqrr_consume,
675  	/* Like _consume, but requests parking - FQ must be held-active */
676  	qman_cb_dqrr_park,
677  	/* Does not consume, for DCA mode only. */
678  	qman_cb_dqrr_defer,
679  	/*
680  	 * Stop processing without consuming this ring entry. Exits the current
681  	 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
682  	 * an interrupt handler, the callback would typically call
683  	 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
684  	 * otherwise the interrupt will reassert immediately.
685  	 */
686  	qman_cb_dqrr_stop,
687  	/* Like qman_cb_dqrr_stop, but consumes the current entry. */
688  	qman_cb_dqrr_consume_stop
689  };
690  typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
691  					struct qman_fq *fq,
692  					const struct qm_dqrr_entry *dqrr,
693  					bool sched_napi);
694  
695  /*
696   * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
697   * are always consumed after the callback returns.
698   */
699  typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
700  			   const union qm_mr_entry *msg);
701  
702  /*
703   * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
704   * held-active + held-suspended are just "sched". Things like "retired" will not
705   * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
706   * then, to indicate it's completing and to gate attempts to retry the retire
707   * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
708   * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
709   * index rather than the FQ that ring entry corresponds to), so repeated park
710   * commands are allowed (if you're silly enough to try) but won't change FQ
711   * state, and the resulting park notifications move FQs from "sched" to
712   * "parked".
713   */
714  enum qman_fq_state {
715  	qman_fq_state_oos,
716  	qman_fq_state_parked,
717  	qman_fq_state_sched,
718  	qman_fq_state_retired
719  };
720  
721  #define QMAN_FQ_STATE_CHANGING	     0x80000000 /* 'state' is changing */
722  #define QMAN_FQ_STATE_NE	     0x40000000 /* retired FQ isn't empty */
723  #define QMAN_FQ_STATE_ORL	     0x20000000 /* retired FQ has ORL */
724  #define QMAN_FQ_STATE_BLOCKOOS	     0xe0000000 /* if any are set, no OOS */
725  #define QMAN_FQ_STATE_CGR_EN	     0x10000000 /* CGR enabled */
726  #define QMAN_FQ_STATE_VDQCR	     0x08000000 /* being volatile dequeued */
727  
728  /*
729   * Frame queue objects (struct qman_fq) are stored within memory passed to
730   * qman_create_fq(), as this allows stashing of caller-provided demux callback
731   * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
732   * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
733   * they should;
734   *
735   * (a) extend the qman_fq structure with their state; eg.
736   *
737   *     // myfq is allocated and driver_fq callbacks filled in;
738   *     struct my_fq {
739   *	   struct qman_fq base;
740   *	   int an_extra_field;
741   *	   [ ... add other fields to be associated with each FQ ...]
742   *     } *myfq = some_my_fq_allocator();
743   *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
744   *
745   *     // in a dequeue callback, access extra fields from 'fq' via a cast;
746   *     struct my_fq *myfq = (struct my_fq *)fq;
747   *     do_something_with(myfq->an_extra_field);
748   *     [...]
749   *
750   * (b) when and if configuring the FQ for context stashing, specify how ever
751   *     many cachelines are required to stash 'struct my_fq', to accelerate not
752   *     only the QMan driver but the callback as well.
753   */
754  
755  struct qman_fq_cb {
756  	qman_cb_dqrr dqrr;	/* for dequeued frames */
757  	qman_cb_mr ern;		/* for s/w ERNs */
758  	qman_cb_mr fqs;		/* frame-queue state changes*/
759  };
760  
761  struct qman_fq {
762  	/* Caller of qman_create_fq() provides these demux callbacks */
763  	struct qman_fq_cb cb;
764  	/*
765  	 * These are internal to the driver, don't touch. In particular, they
766  	 * may change, be removed, or extended (so you shouldn't rely on
767  	 * sizeof(qman_fq) being a constant).
768  	 */
769  	u32 fqid, idx;
770  	unsigned long flags;
771  	enum qman_fq_state state;
772  	int cgr_groupid;
773  };
774  
775  /*
776   * This callback type is used when handling congestion group entry/exit.
777   * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
778   */
779  typedef void (*qman_cb_cgr)(struct qman_portal *qm,
780  			    struct qman_cgr *cgr, int congested);
781  
782  struct qman_cgr {
783  	/* Set these prior to qman_create_cgr() */
784  	u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
785  	qman_cb_cgr cb;
786  	/* These are private to the driver */
787  	u16 chan; /* portal channel this object is created on */
788  	struct list_head node;
789  };
790  
791  /* Flags to qman_create_fq() */
792  #define QMAN_FQ_FLAG_NO_ENQUEUE	     0x00000001 /* can't enqueue */
793  #define QMAN_FQ_FLAG_NO_MODIFY	     0x00000002 /* can only enqueue */
794  #define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
795  #define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
796  
797  /* Flags to qman_init_fq() */
798  #define QMAN_INITFQ_FLAG_SCHED	     0x00000001 /* schedule rather than park */
799  #define QMAN_INITFQ_FLAG_LOCAL	     0x00000004 /* set dest portal */
800  
801  /*
802   * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
803   * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
804   * FQID(n) to fill in the frame queue ID.
805   */
806  #define QM_VDQCR_PRECEDENCE_VDQCR	0x0
807  #define QM_VDQCR_PRECEDENCE_SDQCR	0x80000000
808  #define QM_VDQCR_EXACT			0x40000000
809  #define QM_VDQCR_NUMFRAMES_MASK		0x3f000000
810  #define QM_VDQCR_NUMFRAMES_SET(n)	(((n) & 0x3f) << 24)
811  #define QM_VDQCR_NUMFRAMES_GET(n)	(((n) >> 24) & 0x3f)
812  #define QM_VDQCR_NUMFRAMES_TILLEMPTY	QM_VDQCR_NUMFRAMES_SET(0)
813  
814  #define QMAN_VOLATILE_FLAG_WAIT	     0x00000001 /* wait if VDQCR is in use */
815  #define QMAN_VOLATILE_FLAG_WAIT_INT  0x00000002 /* if wait, interruptible? */
816  #define QMAN_VOLATILE_FLAG_FINISH    0x00000004 /* wait till VDQCR completes */
817  
818  /* "Query FQ Non-Programmable Fields" */
819  struct qm_mcr_queryfq_np {
820  	u8 verb;
821  	u8 result;
822  	u8 __reserved1;
823  	u8 state;		/* QM_MCR_NP_STATE_*** */
824  	u32 fqd_link;		/* 24-bit, _res2[24-31] */
825  	u16 odp_seq;		/* 14-bit, _res3[14-15] */
826  	u16 orp_nesn;		/* 14-bit, _res4[14-15] */
827  	u16 orp_ea_hseq;	/* 15-bit, _res5[15] */
828  	u16 orp_ea_tseq;	/* 15-bit, _res6[15] */
829  	u32 orp_ea_hptr;	/* 24-bit, _res7[24-31] */
830  	u32 orp_ea_tptr;	/* 24-bit, _res8[24-31] */
831  	u32 pfdr_hptr;		/* 24-bit, _res9[24-31] */
832  	u32 pfdr_tptr;		/* 24-bit, _res10[24-31] */
833  	u8 __reserved2[5];
834  	u8 is;			/* 1-bit, _res12[1-7] */
835  	u16 ics_surp;
836  	u32 byte_cnt;
837  	u32 frm_cnt;		/* 24-bit, _res13[24-31] */
838  	u32 __reserved3;
839  	u16 ra1_sfdr;		/* QM_MCR_NP_RA1_*** */
840  	u16 ra2_sfdr;		/* QM_MCR_NP_RA2_*** */
841  	u16 __reserved4;
842  	u16 od1_sfdr;		/* QM_MCR_NP_OD1_*** */
843  	u16 od2_sfdr;		/* QM_MCR_NP_OD2_*** */
844  	u16 od3_sfdr;		/* QM_MCR_NP_OD3_*** */
845  } __packed;
846  
847  #define QM_MCR_NP_STATE_FE		0x10
848  #define QM_MCR_NP_STATE_R		0x08
849  #define QM_MCR_NP_STATE_MASK		0x07	/* Reads FQD::STATE; */
850  #define QM_MCR_NP_STATE_OOS		0x00
851  #define QM_MCR_NP_STATE_RETIRED		0x01
852  #define QM_MCR_NP_STATE_TEN_SCHED	0x02
853  #define QM_MCR_NP_STATE_TRU_SCHED	0x03
854  #define QM_MCR_NP_STATE_PARKED		0x04
855  #define QM_MCR_NP_STATE_ACTIVE		0x05
856  #define QM_MCR_NP_PTR_MASK		0x07ff	/* for RA[12] & OD[123] */
857  #define QM_MCR_NP_RA1_NRA(v)		(((v) >> 14) & 0x3)	/* FQD::NRA */
858  #define QM_MCR_NP_RA2_IT(v)		(((v) >> 14) & 0x1)	/* FQD::IT */
859  #define QM_MCR_NP_OD1_NOD(v)		(((v) >> 14) & 0x3)	/* FQD::NOD */
860  #define QM_MCR_NP_OD3_NPC(v)		(((v) >> 14) & 0x3)	/* FQD::NPC */
861  
862  enum qm_mcr_queryfq_np_masks {
863  	qm_mcr_fqd_link_mask = BIT(24) - 1,
864  	qm_mcr_odp_seq_mask = BIT(14) - 1,
865  	qm_mcr_orp_nesn_mask = BIT(14) - 1,
866  	qm_mcr_orp_ea_hseq_mask = BIT(15) - 1,
867  	qm_mcr_orp_ea_tseq_mask = BIT(15) - 1,
868  	qm_mcr_orp_ea_hptr_mask = BIT(24) - 1,
869  	qm_mcr_orp_ea_tptr_mask = BIT(24) - 1,
870  	qm_mcr_pfdr_hptr_mask = BIT(24) - 1,
871  	qm_mcr_pfdr_tptr_mask = BIT(24) - 1,
872  	qm_mcr_is_mask = BIT(1) - 1,
873  	qm_mcr_frm_cnt_mask = BIT(24) - 1,
874  };
875  
876  #define qm_mcr_np_get(np, field) \
877  	((np)->field & (qm_mcr_##field##_mask))
878  
879  	/* Portal Management */
880  /**
881   * qman_p_irqsource_add - add processing sources to be interrupt-driven
882   * @bits: bitmask of QM_PIRQ_**I processing sources
883   *
884   * Adds processing sources that should be interrupt-driven (rather than
885   * processed via qman_poll_***() functions).
886   */
887  void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
888  
889  /**
890   * qman_p_irqsource_remove - remove processing sources from being int-driven
891   * @bits: bitmask of QM_PIRQ_**I processing sources
892   *
893   * Removes processing sources from being interrupt-driven, so that they will
894   * instead be processed via qman_poll_***() functions.
895   */
896  void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
897  
898  /**
899   * qman_affine_cpus - return a mask of cpus that have affine portals
900   */
901  const cpumask_t *qman_affine_cpus(void);
902  
903  /**
904   * qman_affine_channel - return the channel ID of an portal
905   * @cpu: the cpu whose affine portal is the subject of the query
906   *
907   * If @cpu is -1, the affine portal for the current CPU will be used. It is a
908   * bug to call this function for any value of @cpu (other than -1) that is not a
909   * member of the mask returned from qman_affine_cpus().
910   */
911  u16 qman_affine_channel(int cpu);
912  
913  /**
914   * qman_get_affine_portal - return the portal pointer affine to cpu
915   * @cpu: the cpu whose affine portal is the subject of the query
916   */
917  struct qman_portal *qman_get_affine_portal(int cpu);
918  
919  /**
920   * qman_start_using_portal - register a device link for the portal user
921   * @p: the portal that will be in use
922   * @dev: the device that will use the portal
923   *
924   * Makes sure that the devices that use the portal are unbound when the
925   * portal is unbound
926   */
927  int qman_start_using_portal(struct qman_portal *p, struct device *dev);
928  
929  /**
930   * qman_p_poll_dqrr - process DQRR (fast-path) entries
931   * @limit: the maximum number of DQRR entries to process
932   *
933   * Use of this function requires that DQRR processing not be interrupt-driven.
934   * The return value represents the number of DQRR entries processed.
935   */
936  int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
937  
938  /**
939   * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
940   * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
941   *
942   * Adds a set of pool channels to the portal's static dequeue command register
943   * (SDQCR). The requested pools are limited to those the portal has dequeue
944   * access to.
945   */
946  void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
947  
948  	/* FQ management */
949  /**
950   * qman_create_fq - Allocates a FQ
951   * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
952   * @flags: bit-mask of QMAN_FQ_FLAG_*** options
953   * @fq: memory for storing the 'fq', with callbacks filled in
954   *
955   * Creates a frame queue object for the given @fqid, unless the
956   * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
957   * dynamically allocated (or the function fails if none are available). Once
958   * created, the caller should not touch the memory at 'fq' except as extended to
959   * adjacent memory for user-defined fields (see the definition of "struct
960   * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
961   * pre-existing frame-queues that aren't to be otherwise interfered with, it
962   * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
963   * causes the driver to honour any context_b modifications requested in the
964   * qm_init_fq() API, as this indicates the frame queue will be consumed by a
965   * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
966   * software portals, the context_b field is controlled by the driver and can't
967   * be modified by the caller.
968   */
969  int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
970  
971  /**
972   * qman_destroy_fq - Deallocates a FQ
973   * @fq: the frame queue object to release
974   *
975   * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
976   * not deallocated but the caller regains ownership, to do with as desired. The
977   * FQ must be in the 'out-of-service' or in the 'parked' state.
978   */
979  void qman_destroy_fq(struct qman_fq *fq);
980  
981  /**
982   * qman_fq_fqid - Queries the frame queue ID of a FQ object
983   * @fq: the frame queue object to query
984   */
985  u32 qman_fq_fqid(struct qman_fq *fq);
986  
987  /**
988   * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
989   * @fq: the frame queue object to modify, must be 'parked' or new.
990   * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
991   * @opts: the FQ-modification settings, as defined in the low-level API
992   *
993   * The @opts parameter comes from the low-level portal API. Select
994   * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
995   * rather than parked. NB, @opts can be NULL.
996   *
997   * Note that some fields and options within @opts may be ignored or overwritten
998   * by the driver;
999   * 1. the 'count' and 'fqid' fields are always ignored (this operation only
1000   * affects one frame queue: @fq).
1001   * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
1002   * 'fqd' structure's 'context_b' field are sometimes overwritten;
1003   *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
1004   *     initialised to a value used by the driver for demux.
1005   *   - if context_b is initialised for demux, so is context_a in case stashing
1006   *     is requested (see item 4).
1007   * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
1008   * objects.)
1009   * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
1010   * 'dest::channel' field will be overwritten to match the portal used to issue
1011   * the command. If the WE_DESTWQ write-enable bit had already been set by the
1012   * caller, the channel workqueue will be left as-is, otherwise the write-enable
1013   * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
1014   * isn't set, the destination channel/workqueue fields and the write-enable bit
1015   * are left as-is.
1016   * 4. if the driver overwrites context_a/b for demux, then if
1017   * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
1018   * context_a.address fields and will leave the stashing fields provided by the
1019   * user alone, otherwise it will zero out the context_a.stashing fields.
1020   */
1021  int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
1022  
1023  /**
1024   * qman_schedule_fq - Schedules a FQ
1025   * @fq: the frame queue object to schedule, must be 'parked'
1026   *
1027   * Schedules the frame queue, which must be Parked, which takes it to
1028   * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
1029   */
1030  int qman_schedule_fq(struct qman_fq *fq);
1031  
1032  /**
1033   * qman_retire_fq - Retires a FQ
1034   * @fq: the frame queue object to retire
1035   * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
1036   *
1037   * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
1038   * the retirement was started asynchronously, otherwise it returns negative for
1039   * failure. When this function returns zero, @flags is set to indicate whether
1040   * the retired FQ is empty and/or whether it has any ORL fragments (to show up
1041   * as ERNs). Otherwise the corresponding flags will be known when a subsequent
1042   * FQRN message shows up on the portal's message ring.
1043   *
1044   * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
1045   * Active state), the completion will be via the message ring as a FQRN - but
1046   * the corresponding callback may occur before this function returns!! Ie. the
1047   * caller should be prepared to accept the callback as the function is called,
1048   * not only once it has returned.
1049   */
1050  int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1051  
1052  /**
1053   * qman_oos_fq - Puts a FQ "out of service"
1054   * @fq: the frame queue object to be put out-of-service, must be 'retired'
1055   *
1056   * The frame queue must be retired and empty, and if any order restoration list
1057   * was released as ERNs at the time of retirement, they must all be consumed.
1058   */
1059  int qman_oos_fq(struct qman_fq *fq);
1060  
1061  /*
1062   * qman_volatile_dequeue - Issue a volatile dequeue command
1063   * @fq: the frame queue object to dequeue from
1064   * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
1065   * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
1066   *
1067   * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
1068   * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
1069   * the VDQCR is already in use, otherwise returns non-zero for failure. If
1070   * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
1071   * the VDQCR command has finished executing (ie. once the callback for the last
1072   * DQRR entry resulting from the VDQCR command has been called). If not using
1073   * the FINISH flag, completion can be determined either by detecting the
1074   * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
1075   * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
1076   * for the QMAN_FQ_STATE_VDQCR bit to disappear.
1077   */
1078  int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
1079  
1080  /**
1081   * qman_enqueue - Enqueue a frame to a frame queue
1082   * @fq: the frame queue object to enqueue to
1083   * @fd: a descriptor of the frame to be enqueued
1084   *
1085   * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
1086   * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
1087   * field is ignored. The return value is non-zero on error, such as ring full.
1088   */
1089  int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
1090  
1091  /**
1092   * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
1093   * @result: is set by the API to the base FQID of the allocated range
1094   * @count: the number of FQIDs required
1095   *
1096   * Returns 0 on success, or a negative error code.
1097   */
1098  int qman_alloc_fqid_range(u32 *result, u32 count);
1099  #define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
1100  
1101  /**
1102   * qman_release_fqid - Release the specified frame queue ID
1103   * @fqid: the FQID to be released back to the resource pool
1104   *
1105   * This function can also be used to seed the allocator with
1106   * FQID ranges that it can subsequently allocate from.
1107   * Returns 0 on success, or a negative error code.
1108   */
1109  int qman_release_fqid(u32 fqid);
1110  
1111  /**
1112   * qman_query_fq_np - Queries non-programmable FQD fields
1113   * @fq: the frame queue object to be queried
1114   * @np: storage for the queried FQD fields
1115   */
1116  int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
1117  
1118  	/* Pool-channel management */
1119  /**
1120   * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
1121   * @result: is set by the API to the base pool-channel ID of the allocated range
1122   * @count: the number of pool-channel IDs required
1123   *
1124   * Returns 0 on success, or a negative error code.
1125   */
1126  int qman_alloc_pool_range(u32 *result, u32 count);
1127  #define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
1128  
1129  /**
1130   * qman_release_pool - Release the specified pool-channel ID
1131   * @id: the pool-chan ID to be released back to the resource pool
1132   *
1133   * This function can also be used to seed the allocator with
1134   * pool-channel ID ranges that it can subsequently allocate from.
1135   * Returns 0 on success, or a negative error code.
1136   */
1137  int qman_release_pool(u32 id);
1138  
1139  	/* CGR management */
1140  /**
1141   * qman_create_cgr - Register a congestion group object
1142   * @cgr: the 'cgr' object, with fields filled in
1143   * @flags: QMAN_CGR_FLAG_* values
1144   * @opts: optional state of CGR settings
1145   *
1146   * Registers this object to receiving congestion entry/exit callbacks on the
1147   * portal affine to the cpu portal on which this API is executed. If opts is
1148   * NULL then only the callback (cgr->cb) function is registered. If @flags
1149   * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
1150   * any unspecified parameters) will be used rather than a modify hw hardware
1151   * (which only modifies the specified parameters).
1152   */
1153  int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
1154  		    struct qm_mcc_initcgr *opts);
1155  
1156  /**
1157   * qman_delete_cgr - Deregisters a congestion group object
1158   * @cgr: the 'cgr' object to deregister
1159   *
1160   * "Unplugs" this CGR object from the portal affine to the cpu on which this API
1161   * is executed. This must be excuted on the same affine portal on which it was
1162   * created.
1163   */
1164  int qman_delete_cgr(struct qman_cgr *cgr);
1165  
1166  /**
1167   * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
1168   * @cgr: the 'cgr' object to deregister
1169   *
1170   * This will select the proper CPU and run there qman_delete_cgr().
1171   */
1172  void qman_delete_cgr_safe(struct qman_cgr *cgr);
1173  
1174  /**
1175   * qman_query_cgr_congested - Queries CGR's congestion status
1176   * @cgr: the 'cgr' object to query
1177   * @result: returns 'cgr's congestion status, 1 (true) if congested
1178   */
1179  int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
1180  
1181  /**
1182   * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
1183   * @result: is set by the API to the base CGR ID of the allocated range
1184   * @count: the number of CGR IDs required
1185   *
1186   * Returns 0 on success, or a negative error code.
1187   */
1188  int qman_alloc_cgrid_range(u32 *result, u32 count);
1189  #define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
1190  
1191  /**
1192   * qman_release_cgrid - Release the specified CGR ID
1193   * @id: the CGR ID to be released back to the resource pool
1194   *
1195   * This function can also be used to seed the allocator with
1196   * CGR ID ranges that it can subsequently allocate from.
1197   * Returns 0 on success, or a negative error code.
1198   */
1199  int qman_release_cgrid(u32 id);
1200  
1201  /**
1202   * qman_is_probed - Check if qman is probed
1203   *
1204   * Returns 1 if the qman driver successfully probed, -1 if the qman driver
1205   * failed to probe or 0 if the qman driver did not probed yet.
1206   */
1207  int qman_is_probed(void);
1208  
1209  /**
1210   * qman_portals_probed - Check if all cpu bound qman portals are probed
1211   *
1212   * Returns 1 if all the required cpu bound qman portals successfully probed,
1213   * -1 if probe errors appeared or 0 if the qman portals did not yet finished
1214   * probing.
1215   */
1216  int qman_portals_probed(void);
1217  
1218  /**
1219   * qman_dqrr_get_ithresh - Get coalesce interrupt threshold
1220   * @portal: portal to get the value for
1221   * @ithresh: threshold pointer
1222   */
1223  void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh);
1224  
1225  /**
1226   * qman_dqrr_set_ithresh - Set coalesce interrupt threshold
1227   * @portal: portal to set the new value on
1228   * @ithresh: new threshold value
1229   *
1230   * Returns 0 on success, or a negative error code.
1231   */
1232  int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
1233  
1234  /**
1235   * qman_dqrr_get_iperiod - Get coalesce interrupt period
1236   * @portal: portal to get the value for
1237   * @iperiod: period pointer
1238   */
1239  void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod);
1240  
1241  /**
1242   * qman_dqrr_set_iperiod - Set coalesce interrupt period
1243   * @portal: portal to set the new value on
1244   * @ithresh: new period value
1245   *
1246   * Returns 0 on success, or a negative error code.
1247   */
1248  int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
1249  
1250  #endif	/* __FSL_QMAN_H */
1251