• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/drivers/net/ethernet/ibm/ehea/ehea_hw.h
3  *
4  *  eHEA ethernet device driver for IBM eServer System p
5  *
6  *  (C) Copyright IBM Corp. 2006
7  *
8  *  Authors:
9  *       Christoph Raisch <raisch@de.ibm.com>
10  *       Jan-Bernd Themann <themann@de.ibm.com>
11  *       Thomas Klein <tklein@de.ibm.com>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28 
29 #ifndef __EHEA_HW_H__
30 #define __EHEA_HW_H__
31 
32 #define QPX_SQA_VALUE   EHEA_BMASK_IBM(48, 63)
33 #define QPX_RQ1A_VALUE  EHEA_BMASK_IBM(48, 63)
34 #define QPX_RQ2A_VALUE  EHEA_BMASK_IBM(48, 63)
35 #define QPX_RQ3A_VALUE  EHEA_BMASK_IBM(48, 63)
36 
37 #define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
38 
39 struct ehea_qptemm {
40 	u64 qpx_hcr;
41 	u64 qpx_c;
42 	u64 qpx_herr;
43 	u64 qpx_aer;
44 	u64 qpx_sqa;
45 	u64 qpx_sqc;
46 	u64 qpx_rq1a;
47 	u64 qpx_rq1c;
48 	u64 qpx_st;
49 	u64 qpx_aerr;
50 	u64 qpx_tenure;
51 	u64 qpx_reserved1[(0x098 - 0x058) / 8];
52 	u64 qpx_portp;
53 	u64 qpx_reserved2[(0x100 - 0x0A0) / 8];
54 	u64 qpx_t;
55 	u64 qpx_sqhp;
56 	u64 qpx_sqptp;
57 	u64 qpx_reserved3[(0x140 - 0x118) / 8];
58 	u64 qpx_sqwsize;
59 	u64 qpx_reserved4[(0x170 - 0x148) / 8];
60 	u64 qpx_sqsize;
61 	u64 qpx_reserved5[(0x1B0 - 0x178) / 8];
62 	u64 qpx_sigt;
63 	u64 qpx_wqecnt;
64 	u64 qpx_rq1hp;
65 	u64 qpx_rq1ptp;
66 	u64 qpx_rq1size;
67 	u64 qpx_reserved6[(0x220 - 0x1D8) / 8];
68 	u64 qpx_rq1wsize;
69 	u64 qpx_reserved7[(0x240 - 0x228) / 8];
70 	u64 qpx_pd;
71 	u64 qpx_scqn;
72 	u64 qpx_rcqn;
73 	u64 qpx_aeqn;
74 	u64 reserved49;
75 	u64 qpx_ram;
76 	u64 qpx_reserved8[(0x300 - 0x270) / 8];
77 	u64 qpx_rq2a;
78 	u64 qpx_rq2c;
79 	u64 qpx_rq2hp;
80 	u64 qpx_rq2ptp;
81 	u64 qpx_rq2size;
82 	u64 qpx_rq2wsize;
83 	u64 qpx_rq2th;
84 	u64 qpx_rq3a;
85 	u64 qpx_rq3c;
86 	u64 qpx_rq3hp;
87 	u64 qpx_rq3ptp;
88 	u64 qpx_rq3size;
89 	u64 qpx_rq3wsize;
90 	u64 qpx_rq3th;
91 	u64 qpx_lpn;
92 	u64 qpx_reserved9[(0x400 - 0x378) / 8];
93 	u64 reserved_ext[(0x500 - 0x400) / 8];
94 	u64 reserved2[(0x1000 - 0x500) / 8];
95 };
96 
97 #define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)
98 
99 #define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)
100 
101 struct ehea_mrmwmm {
102 	u64 mrx_hcr;
103 	u64 mrx_c;
104 	u64 mrx_herr;
105 	u64 mrx_aer;
106 	u64 mrx_pp;
107 	u64 reserved1;
108 	u64 reserved2;
109 	u64 reserved3;
110 	u64 reserved4[(0x200 - 0x40) / 8];
111 	u64 mrx_ctl[64];
112 };
113 
114 #define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)
115 
116 struct ehea_qpedmm {
117 
118 	u64 reserved0[(0x400) / 8];
119 	u64 qpedx_phh;
120 	u64 qpedx_ppsgp;
121 	u64 qpedx_ppsgu;
122 	u64 qpedx_ppdgp;
123 	u64 qpedx_ppdgu;
124 	u64 qpedx_aph;
125 	u64 qpedx_apsgp;
126 	u64 qpedx_apsgu;
127 	u64 qpedx_apdgp;
128 	u64 qpedx_apdgu;
129 	u64 qpedx_apav;
130 	u64 qpedx_apsav;
131 	u64 qpedx_hcr;
132 	u64 reserved1[4];
133 	u64 qpedx_rrl0;
134 	u64 qpedx_rrrkey0;
135 	u64 qpedx_rrva0;
136 	u64 reserved2;
137 	u64 qpedx_rrl1;
138 	u64 qpedx_rrrkey1;
139 	u64 qpedx_rrva1;
140 	u64 reserved3;
141 	u64 qpedx_rrl2;
142 	u64 qpedx_rrrkey2;
143 	u64 qpedx_rrva2;
144 	u64 reserved4;
145 	u64 qpedx_rrl3;
146 	u64 qpedx_rrrkey3;
147 	u64 qpedx_rrva3;
148 };
149 
150 #define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
151 #define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
152 #define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
153 #define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)
154 
155 #define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)
156 
157 struct ehea_cqtemm {
158 	u64 cqx_hcr;
159 	u64 cqx_c;
160 	u64 cqx_herr;
161 	u64 cqx_aer;
162 	u64 cqx_ptp;
163 	u64 cqx_tp;
164 	u64 cqx_fec;
165 	u64 cqx_feca;
166 	u64 cqx_ep;
167 	u64 cqx_eq;
168 	u64 reserved1;
169 	u64 cqx_n0;
170 	u64 cqx_n1;
171 	u64 reserved2[(0x1000 - 0x60) / 8];
172 };
173 
174 #define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)
175 
176 struct ehea_eqtemm {
177 	u64 eqx_hcr;
178 	u64 eqx_c;
179 	u64 eqx_herr;
180 	u64 eqx_aer;
181 	u64 eqx_ptp;
182 	u64 eqx_tp;
183 	u64 eqx_ssba;
184 	u64 eqx_psba;
185 	u64 eqx_cec;
186 	u64 eqx_meql;
187 	u64 eqx_xisbi;
188 	u64 eqx_xisc;
189 	u64 eqx_it;
190 };
191 
192 /*
193  * These access functions will be changed when the dissuccsion about
194  * the new access methods for POWER has settled.
195  */
196 
epa_load(struct h_epa epa,u32 offset)197 static inline u64 epa_load(struct h_epa epa, u32 offset)
198 {
199 	return __raw_readq((void __iomem *)(epa.addr + offset));
200 }
201 
epa_store(struct h_epa epa,u32 offset,u64 value)202 static inline void epa_store(struct h_epa epa, u32 offset, u64 value)
203 {
204 	__raw_writeq(value, (void __iomem *)(epa.addr + offset));
205 	epa_load(epa, offset);	/* synchronize explicitly to eHEA */
206 }
207 
epa_store_acc(struct h_epa epa,u32 offset,u64 value)208 static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
209 {
210 	__raw_writeq(value, (void __iomem *)(epa.addr + offset));
211 }
212 
213 #define epa_store_cq(epa, offset, value)\
214 	epa_store(epa, CQTEMM_OFFSET(offset), value)
215 #define epa_load_cq(epa, offset)\
216 	epa_load(epa, CQTEMM_OFFSET(offset))
217 
ehea_update_sqa(struct ehea_qp * qp,u16 nr_wqes)218 static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
219 {
220 	struct h_epa epa = qp->epas.kernel;
221 	epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa),
222 		      EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes));
223 }
224 
ehea_update_rq3a(struct ehea_qp * qp,u16 nr_wqes)225 static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes)
226 {
227 	struct h_epa epa = qp->epas.kernel;
228 	epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a),
229 		      EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes));
230 }
231 
ehea_update_rq2a(struct ehea_qp * qp,u16 nr_wqes)232 static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes)
233 {
234 	struct h_epa epa = qp->epas.kernel;
235 	epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a),
236 		      EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes));
237 }
238 
ehea_update_rq1a(struct ehea_qp * qp,u16 nr_wqes)239 static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes)
240 {
241 	struct h_epa epa = qp->epas.kernel;
242 	epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a),
243 		      EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes));
244 }
245 
ehea_update_feca(struct ehea_cq * cq,u32 nr_cqes)246 static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes)
247 {
248 	struct h_epa epa = cq->epas.kernel;
249 	epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca),
250 		      EHEA_BMASK_SET(CQX_FECADDER, nr_cqes));
251 }
252 
ehea_reset_cq_n1(struct ehea_cq * cq)253 static inline void ehea_reset_cq_n1(struct ehea_cq *cq)
254 {
255 	struct h_epa epa = cq->epas.kernel;
256 	epa_store_cq(epa, cqx_n1,
257 		     EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1));
258 }
259 
ehea_reset_cq_ep(struct ehea_cq * my_cq)260 static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq)
261 {
262 	struct h_epa epa = my_cq->epas.kernel;
263 	epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep),
264 		      EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0));
265 }
266 
267 #endif	/* __EHEA_HW_H__ */
268