• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are met:
5  *     * Redistributions of source code must retain the above copyright
6  *	 notice, this list of conditions and the following disclaimer.
7  *     * Redistributions in binary form must reproduce the above copyright
8  *	 notice, this list of conditions and the following disclaimer in the
9  *	 documentation and/or other materials provided with the distribution.
10  *     * Neither the name of Freescale Semiconductor nor the
11  *	 names of its contributors may be used to endorse or promote products
12  *	 derived from this software without specific prior written permission.
13  *
14  * ALTERNATIVELY, this software may be distributed under the terms of the
15  * GNU General Public License ("GPL") as published by the Free Software
16  * Foundation, either version 2 of that License or (at your option) any
17  * later version.
18  *
19  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "dpaa_sys.h"
32 
33 #include <soc/fsl/qman.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/iommu.h>
36 
37 #if defined(CONFIG_FSL_PAMU)
38 #include <asm/fsl_pamu_stash.h>
39 #endif
40 
41 struct qm_mcr_querywq {
42 	u8 verb;
43 	u8 result;
44 	u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
45 	u8 __reserved[28];
46 	u32 wq_len[8];
47 } __packed;
48 
qm_mcr_querywq_get_chan(const struct qm_mcr_querywq * wq)49 static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
50 {
51 	return wq->channel_wq >> 3;
52 }
53 
54 struct __qm_mcr_querycongestion {
55 	u32 state[8];
56 };
57 
58 /* "Query Congestion Group State" */
59 struct qm_mcr_querycongestion {
60 	u8 verb;
61 	u8 result;
62 	u8 __reserved[30];
63 	/* Access this struct using qman_cgrs_get() */
64 	struct __qm_mcr_querycongestion state;
65 } __packed;
66 
67 /* "Query CGR" */
68 struct qm_mcr_querycgr {
69 	u8 verb;
70 	u8 result;
71 	u16 __reserved1;
72 	struct __qm_mc_cgr cgr; /* CGR fields */
73 	u8 __reserved2[6];
74 	u8 i_bcnt_hi;	/* high 8-bits of 40-bit "Instant" */
75 	__be32 i_bcnt_lo;	/* low 32-bits of 40-bit */
76 	u8 __reserved3[3];
77 	u8 a_bcnt_hi;	/* high 8-bits of 40-bit "Average" */
78 	__be32 a_bcnt_lo;	/* low 32-bits of 40-bit */
79 	__be32 cscn_targ_swp[4];
80 } __packed;
81 
qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr * q)82 static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
83 {
84 	return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
85 }
qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr * q)86 static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
87 {
88 	return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
89 }
90 
91 /* Congestion Groups */
92 
93 /*
94  * This wrapper represents a bit-array for the state of the 256 QMan congestion
95  * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
96  * those that don't concern us. We harness the structure and accessor details
97  * already used in the management command to query congestion groups.
98  */
99 #define CGR_BITS_PER_WORD 5
100 #define CGR_WORD(x)	((x) >> CGR_BITS_PER_WORD)
101 #define CGR_BIT(x)	(BIT(31) >> ((x) & 0x1f))
102 #define CGR_NUM	(sizeof(struct __qm_mcr_querycongestion) << 3)
103 
104 struct qman_cgrs {
105 	struct __qm_mcr_querycongestion q;
106 };
107 
qman_cgrs_init(struct qman_cgrs * c)108 static inline void qman_cgrs_init(struct qman_cgrs *c)
109 {
110 	memset(c, 0, sizeof(*c));
111 }
112 
qman_cgrs_fill(struct qman_cgrs * c)113 static inline void qman_cgrs_fill(struct qman_cgrs *c)
114 {
115 	memset(c, 0xff, sizeof(*c));
116 }
117 
qman_cgrs_get(struct qman_cgrs * c,u8 cgr)118 static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
119 {
120 	return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
121 }
122 
qman_cgrs_cp(struct qman_cgrs * dest,const struct qman_cgrs * src)123 static inline void qman_cgrs_cp(struct qman_cgrs *dest,
124 				const struct qman_cgrs *src)
125 {
126 	*dest = *src;
127 }
128 
qman_cgrs_and(struct qman_cgrs * dest,const struct qman_cgrs * a,const struct qman_cgrs * b)129 static inline void qman_cgrs_and(struct qman_cgrs *dest,
130 			const struct qman_cgrs *a, const struct qman_cgrs *b)
131 {
132 	int ret;
133 	u32 *_d = dest->q.state;
134 	const u32 *_a = a->q.state;
135 	const u32 *_b = b->q.state;
136 
137 	for (ret = 0; ret < 8; ret++)
138 		*_d++ = *_a++ & *_b++;
139 }
140 
qman_cgrs_xor(struct qman_cgrs * dest,const struct qman_cgrs * a,const struct qman_cgrs * b)141 static inline void qman_cgrs_xor(struct qman_cgrs *dest,
142 			const struct qman_cgrs *a, const struct qman_cgrs *b)
143 {
144 	int ret;
145 	u32 *_d = dest->q.state;
146 	const u32 *_a = a->q.state;
147 	const u32 *_b = b->q.state;
148 
149 	for (ret = 0; ret < 8; ret++)
150 		*_d++ = *_a++ ^ *_b++;
151 }
152 
153 void qman_init_cgr_all(void);
154 
155 struct qm_portal_config {
156 	/* Portal addresses */
157 	void *addr_virt_ce;
158 	void __iomem *addr_virt_ci;
159 	struct device *dev;
160 	struct iommu_domain *iommu_domain;
161 	/* Allow these to be joined in lists */
162 	struct list_head list;
163 	/* User-visible portal configuration settings */
164 	/* portal is affined to this cpu */
165 	int cpu;
166 	/* portal interrupt line */
167 	int irq;
168 	/*
169 	 * the portal's dedicated channel id, used initialising
170 	 * frame queues to target this portal when scheduled
171 	 */
172 	u16 channel;
173 	/*
174 	 * mask of pool channels this portal has dequeue access to
175 	 * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
176 	 */
177 	u32 pools;
178 };
179 
180 /* Revision info (for errata and feature handling) */
181 #define QMAN_REV11 0x0101
182 #define QMAN_REV12 0x0102
183 #define QMAN_REV20 0x0200
184 #define QMAN_REV30 0x0300
185 #define QMAN_REV31 0x0301
186 #define QMAN_REV32 0x0302
187 extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
188 
189 #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
190 extern struct gen_pool *qm_fqalloc; /* FQID allocator */
191 extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
192 extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
193 u32 qm_get_pools_sdqcr(void);
194 
195 int qman_wq_alloc(void);
196 #ifdef CONFIG_FSL_PAMU
197 #define qman_liodn_fixup __qman_liodn_fixup
198 #else
qman_liodn_fixup(u16 channel)199 static inline void qman_liodn_fixup(u16 channel)
200 {
201 }
202 #endif
203 void __qman_liodn_fixup(u16 channel);
204 void qman_set_sdest(u16 channel, unsigned int cpu_idx);
205 
206 struct qman_portal *qman_create_affine_portal(
207 			const struct qm_portal_config *config,
208 			const struct qman_cgrs *cgrs);
209 const struct qm_portal_config *qman_destroy_affine_portal(void);
210 
211 /*
212  * qman_query_fq - Queries FQD fields (via h/w query command)
213  * @fq: the frame queue object to be queried
214  * @fqd: storage for the queried FQD fields
215  */
216 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
217 
218 int qman_alloc_fq_table(u32 num_fqids);
219 
220 /*   QMan s/w corenet portal, low-level i/face	 */
221 
222 /*
223  * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
224  * dequeue TYPE. Choose TOKEN (8-bit).
225  * If SOURCE == CHANNELS,
226  *   Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
227  *   You can choose DEDICATED_PRECEDENCE if the portal channel should have
228  *   priority.
229  * If SOURCE == SPECIFICWQ,
230  *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
231  *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
232  *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
233  *     same value.
234  */
235 #define QM_SDQCR_SOURCE_CHANNELS	0x0
236 #define QM_SDQCR_SOURCE_SPECIFICWQ	0x40000000
237 #define QM_SDQCR_COUNT_EXACT1		0x0
238 #define QM_SDQCR_COUNT_UPTO3		0x20000000
239 #define QM_SDQCR_DEDICATED_PRECEDENCE	0x10000000
240 #define QM_SDQCR_TYPE_MASK		0x03000000
241 #define QM_SDQCR_TYPE_NULL		0x0
242 #define QM_SDQCR_TYPE_PRIO_QOS		0x01000000
243 #define QM_SDQCR_TYPE_ACTIVE_QOS	0x02000000
244 #define QM_SDQCR_TYPE_ACTIVE		0x03000000
245 #define QM_SDQCR_TOKEN_MASK		0x00ff0000
246 #define QM_SDQCR_TOKEN_SET(v)		(((v) & 0xff) << 16)
247 #define QM_SDQCR_TOKEN_GET(v)		(((v) >> 16) & 0xff)
248 #define QM_SDQCR_CHANNELS_DEDICATED	0x00008000
249 #define QM_SDQCR_SPECIFICWQ_MASK	0x000000f7
250 #define QM_SDQCR_SPECIFICWQ_DEDICATED	0x00000000
251 #define QM_SDQCR_SPECIFICWQ_POOL(n)	((n) << 4)
252 #define QM_SDQCR_SPECIFICWQ_WQ(n)	(n)
253 
254 /* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
255 #define QM_VDQCR_FQID_MASK		0x00ffffff
256 #define QM_VDQCR_FQID(n)		((n) & QM_VDQCR_FQID_MASK)
257 
258 /*
259  * Used by all portal interrupt registers except 'inhibit'
260  * Channels with frame availability
261  */
262 #define QM_PIRQ_DQAVAIL	0x0000ffff
263 
264 /* The DQAVAIL interrupt fields break down into these bits; */
265 #define QM_DQAVAIL_PORTAL	0x8000		/* Portal channel */
266 #define QM_DQAVAIL_POOL(n)	(0x8000 >> (n))	/* Pool channel, n==[1..15] */
267 #define QM_DQAVAIL_MASK		0xffff
268 /* This mask contains all the "irqsource" bits visible to API users */
269 #define QM_PIRQ_VISIBLE	(QM_PIRQ_SLOW | QM_PIRQ_DQRI)
270 
271 extern struct qman_portal *affine_portals[NR_CPUS];
272 extern struct qman_portal *qman_dma_portal;
273 const struct qm_portal_config *qman_get_qm_portal_config(
274 						struct qman_portal *portal);
275 
276 unsigned int qm_get_fqid_maxcnt(void);
277 
278 int qman_shutdown_fq(u32 fqid);
279 
280 int qman_requires_cleanup(void);
281 void qman_done_cleanup(void);
282 void qman_enable_irqs(void);
283