• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * SN Platform GRU Driver
4  *
5  *              GRU HANDLE DEFINITION
6  *
7  *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
8  */
9 
10 #ifndef __GRUHANDLES_H__
11 #define __GRUHANDLES_H__
12 #include "gru_instructions.h"
13 
14 /*
15  * Manifest constants for GRU Memory Map
16  */
17 #define GRU_GSEG0_BASE		0
18 #define GRU_MCS_BASE		(64 * 1024 * 1024)
19 #define GRU_SIZE		(128UL * 1024 * 1024)
20 
21 /* Handle & resource counts */
22 #define GRU_NUM_CB		128
23 #define GRU_NUM_DSR_BYTES	(32 * 1024)
24 #define GRU_NUM_TFM		16
25 #define GRU_NUM_TGH		24
26 #define GRU_NUM_CBE		128
27 #define GRU_NUM_TFH		128
28 #define GRU_NUM_CCH		16
29 
30 /* Maximum resource counts that can be reserved by user programs */
31 #define GRU_NUM_USER_CBR	GRU_NUM_CBE
32 #define GRU_NUM_USER_DSR_BYTES	GRU_NUM_DSR_BYTES
33 
34 /* Bytes per handle & handle stride. Code assumes all cb, tfh, cbe handles
35  * are the same */
36 #define GRU_HANDLE_BYTES	64
37 #define GRU_HANDLE_STRIDE	256
38 
39 /* Base addresses of handles */
40 #define GRU_TFM_BASE		(GRU_MCS_BASE + 0x00000)
41 #define GRU_TGH_BASE		(GRU_MCS_BASE + 0x08000)
42 #define GRU_CBE_BASE		(GRU_MCS_BASE + 0x10000)
43 #define GRU_TFH_BASE		(GRU_MCS_BASE + 0x18000)
44 #define GRU_CCH_BASE		(GRU_MCS_BASE + 0x20000)
45 
46 /* User gseg constants */
47 #define GRU_GSEG_STRIDE		(4 * 1024 * 1024)
48 #define GSEG_BASE(a)		((a) & ~(GRU_GSEG_PAGESIZE - 1))
49 
50 /* Data segment constants */
51 #define GRU_DSR_AU_BYTES	1024
52 #define GRU_DSR_CL		(GRU_NUM_DSR_BYTES / GRU_CACHE_LINE_BYTES)
53 #define GRU_DSR_AU_CL		(GRU_DSR_AU_BYTES / GRU_CACHE_LINE_BYTES)
54 #define GRU_DSR_AU		(GRU_NUM_DSR_BYTES / GRU_DSR_AU_BYTES)
55 
56 /* Control block constants */
57 #define GRU_CBR_AU_SIZE		2
58 #define GRU_CBR_AU		(GRU_NUM_CBE / GRU_CBR_AU_SIZE)
59 
60 /* Convert resource counts to the number of AU */
61 #define GRU_DS_BYTES_TO_AU(n)	DIV_ROUND_UP(n, GRU_DSR_AU_BYTES)
62 #define GRU_CB_COUNT_TO_AU(n)	DIV_ROUND_UP(n, GRU_CBR_AU_SIZE)
63 
64 /* UV limits */
65 #define GRU_CHIPLETS_PER_HUB	2
66 #define GRU_HUBS_PER_BLADE	1
67 #define GRU_CHIPLETS_PER_BLADE	(GRU_HUBS_PER_BLADE * GRU_CHIPLETS_PER_HUB)
68 
69 /* User GRU Gseg offsets */
70 #define GRU_CB_BASE		0
71 #define GRU_CB_LIMIT		(GRU_CB_BASE + GRU_HANDLE_STRIDE * GRU_NUM_CBE)
72 #define GRU_DS_BASE		0x20000
73 #define GRU_DS_LIMIT		(GRU_DS_BASE + GRU_NUM_DSR_BYTES)
74 
75 /* Convert a GRU physical address to the chiplet offset */
76 #define GSEGPOFF(h) 		((h) & (GRU_SIZE - 1))
77 
78 /* Convert an arbitrary handle address to the beginning of the GRU segment */
79 #define GRUBASE(h)		((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
80 
81 /* Test a valid handle address to determine the type */
82 #define TYPE_IS(hn, h)		((h) >= GRU_##hn##_BASE && (h) <	\
83 		GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE &&   \
84 		(((h) & (GRU_HANDLE_STRIDE - 1)) == 0))
85 
86 
87 /* General addressing macros. */
get_gseg_base_address(void * base,int ctxnum)88 static inline void *get_gseg_base_address(void *base, int ctxnum)
89 {
90 	return (void *)(base + GRU_GSEG0_BASE + GRU_GSEG_STRIDE * ctxnum);
91 }
92 
get_gseg_base_address_cb(void * base,int ctxnum,int line)93 static inline void *get_gseg_base_address_cb(void *base, int ctxnum, int line)
94 {
95 	return (void *)(get_gseg_base_address(base, ctxnum) +
96 			GRU_CB_BASE + GRU_HANDLE_STRIDE * line);
97 }
98 
get_gseg_base_address_ds(void * base,int ctxnum,int line)99 static inline void *get_gseg_base_address_ds(void *base, int ctxnum, int line)
100 {
101 	return (void *)(get_gseg_base_address(base, ctxnum) + GRU_DS_BASE +
102 			GRU_CACHE_LINE_BYTES * line);
103 }
104 
get_tfm(void * base,int ctxnum)105 static inline struct gru_tlb_fault_map *get_tfm(void *base, int ctxnum)
106 {
107 	return (struct gru_tlb_fault_map *)(base + GRU_TFM_BASE +
108 					ctxnum * GRU_HANDLE_STRIDE);
109 }
110 
get_tgh(void * base,int ctxnum)111 static inline struct gru_tlb_global_handle *get_tgh(void *base, int ctxnum)
112 {
113 	return (struct gru_tlb_global_handle *)(base + GRU_TGH_BASE +
114 					ctxnum * GRU_HANDLE_STRIDE);
115 }
116 
get_cbe(void * base,int ctxnum)117 static inline struct gru_control_block_extended *get_cbe(void *base, int ctxnum)
118 {
119 	return (struct gru_control_block_extended *)(base + GRU_CBE_BASE +
120 					ctxnum * GRU_HANDLE_STRIDE);
121 }
122 
get_tfh(void * base,int ctxnum)123 static inline struct gru_tlb_fault_handle *get_tfh(void *base, int ctxnum)
124 {
125 	return (struct gru_tlb_fault_handle *)(base + GRU_TFH_BASE +
126 					ctxnum * GRU_HANDLE_STRIDE);
127 }
128 
get_cch(void * base,int ctxnum)129 static inline struct gru_context_configuration_handle *get_cch(void *base,
130 					int ctxnum)
131 {
132 	return (struct gru_context_configuration_handle *)(base +
133 				GRU_CCH_BASE + ctxnum * GRU_HANDLE_STRIDE);
134 }
135 
get_cb_number(void * cb)136 static inline unsigned long get_cb_number(void *cb)
137 {
138 	return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) /
139 					GRU_HANDLE_STRIDE;
140 }
141 
142 /* byte offset to a specific GRU chiplet. (p=pnode, c=chiplet (0 or 1)*/
gru_chiplet_paddr(unsigned long paddr,int pnode,int chiplet)143 static inline unsigned long gru_chiplet_paddr(unsigned long paddr, int pnode,
144 							int chiplet)
145 {
146 	return paddr + GRU_SIZE * (2 * pnode  + chiplet);
147 }
148 
gru_chiplet_vaddr(void * vaddr,int pnode,int chiplet)149 static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
150 {
151 	return vaddr + GRU_SIZE * (2 * pnode  + chiplet);
152 }
153 
gru_tfh_to_cbe(struct gru_tlb_fault_handle * tfh)154 static inline struct gru_control_block_extended *gru_tfh_to_cbe(
155 					struct gru_tlb_fault_handle *tfh)
156 {
157 	unsigned long cbe;
158 
159 	cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
160 	return (struct gru_control_block_extended*)cbe;
161 }
162 
163 
164 
165 
166 /*
167  * Global TLB Fault Map
168  * 	Bitmap of outstanding TLB misses needing interrupt/polling service.
169  *
170  */
171 struct gru_tlb_fault_map {
172 	unsigned long fault_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
173 	unsigned long fill0[2];
174 	unsigned long done_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
175 	unsigned long fill1[2];
176 };
177 
178 /*
179  * TGH - TLB Global Handle
180  * 	Used for TLB flushing.
181  *
182  */
183 struct gru_tlb_global_handle {
184 	unsigned int cmd:1;		/* DW 0 */
185 	unsigned int delresp:1;
186 	unsigned int opc:1;
187 	unsigned int fill1:5;
188 
189 	unsigned int fill2:8;
190 
191 	unsigned int status:2;
192 	unsigned long fill3:2;
193 	unsigned int state:3;
194 	unsigned long fill4:1;
195 
196 	unsigned int cause:3;
197 	unsigned long fill5:37;
198 
199 	unsigned long vaddr:64;		/* DW 1 */
200 
201 	unsigned int asid:24;		/* DW 2 */
202 	unsigned int fill6:8;
203 
204 	unsigned int pagesize:5;
205 	unsigned int fill7:11;
206 
207 	unsigned int global:1;
208 	unsigned int fill8:15;
209 
210 	unsigned long vaddrmask:39;	/* DW 3 */
211 	unsigned int fill9:9;
212 	unsigned int n:10;
213 	unsigned int fill10:6;
214 
215 	unsigned int ctxbitmap:16;	/* DW4 */
216 	unsigned long fill11[3];
217 };
218 
219 enum gru_tgh_cmd {
220 	TGHCMD_START
221 };
222 
223 enum gru_tgh_opc {
224 	TGHOP_TLBNOP,
225 	TGHOP_TLBINV
226 };
227 
228 enum gru_tgh_status {
229 	TGHSTATUS_IDLE,
230 	TGHSTATUS_EXCEPTION,
231 	TGHSTATUS_ACTIVE
232 };
233 
234 enum gru_tgh_state {
235 	TGHSTATE_IDLE,
236 	TGHSTATE_PE_INVAL,
237 	TGHSTATE_INTERRUPT_INVAL,
238 	TGHSTATE_WAITDONE,
239 	TGHSTATE_RESTART_CTX,
240 };
241 
242 enum gru_tgh_cause {
243 	TGHCAUSE_RR_ECC,
244 	TGHCAUSE_TLB_ECC,
245 	TGHCAUSE_LRU_ECC,
246 	TGHCAUSE_PS_ECC,
247 	TGHCAUSE_MUL_ERR,
248 	TGHCAUSE_DATA_ERR,
249 	TGHCAUSE_SW_FORCE
250 };
251 
252 
253 /*
254  * TFH - TLB Global Handle
255  * 	Used for TLB dropins into the GRU TLB.
256  *
257  */
258 struct gru_tlb_fault_handle {
259 	unsigned int cmd:1;		/* DW 0 - low 32*/
260 	unsigned int delresp:1;
261 	unsigned int fill0:2;
262 	unsigned int opc:3;
263 	unsigned int fill1:9;
264 
265 	unsigned int status:2;
266 	unsigned int fill2:2;
267 	unsigned int state:3;
268 	unsigned int fill3:1;
269 
270 	unsigned int cause:6;
271 	unsigned int cb_int:1;
272 	unsigned int fill4:1;
273 
274 	unsigned int indexway:12;	/* DW 0 - high 32 */
275 	unsigned int fill5:4;
276 
277 	unsigned int ctxnum:4;
278 	unsigned int fill6:12;
279 
280 	unsigned long missvaddr:64;	/* DW 1 */
281 
282 	unsigned int missasid:24;	/* DW 2 */
283 	unsigned int fill7:8;
284 	unsigned int fillasid:24;
285 	unsigned int dirty:1;
286 	unsigned int gaa:2;
287 	unsigned long fill8:5;
288 
289 	unsigned long pfn:41;		/* DW 3 */
290 	unsigned int fill9:7;
291 	unsigned int pagesize:5;
292 	unsigned int fill10:11;
293 
294 	unsigned long fillvaddr:64;	/* DW 4 */
295 
296 	unsigned long fill11[3];
297 };
298 
299 enum gru_tfh_opc {
300 	TFHOP_NOOP,
301 	TFHOP_RESTART,
302 	TFHOP_WRITE_ONLY,
303 	TFHOP_WRITE_RESTART,
304 	TFHOP_EXCEPTION,
305 	TFHOP_USER_POLLING_MODE = 7,
306 };
307 
308 enum tfh_status {
309 	TFHSTATUS_IDLE,
310 	TFHSTATUS_EXCEPTION,
311 	TFHSTATUS_ACTIVE,
312 };
313 
314 enum tfh_state {
315 	TFHSTATE_INACTIVE,
316 	TFHSTATE_IDLE,
317 	TFHSTATE_MISS_UPM,
318 	TFHSTATE_MISS_FMM,
319 	TFHSTATE_HW_ERR,
320 	TFHSTATE_WRITE_TLB,
321 	TFHSTATE_RESTART_CBR,
322 };
323 
324 /* TFH cause bits */
325 enum tfh_cause {
326 	TFHCAUSE_NONE,
327 	TFHCAUSE_TLB_MISS,
328 	TFHCAUSE_TLB_MOD,
329 	TFHCAUSE_HW_ERROR_RR,
330 	TFHCAUSE_HW_ERROR_MAIN_ARRAY,
331 	TFHCAUSE_HW_ERROR_VALID,
332 	TFHCAUSE_HW_ERROR_PAGESIZE,
333 	TFHCAUSE_INSTRUCTION_EXCEPTION,
334 	TFHCAUSE_UNCORRECTIBLE_ERROR,
335 };
336 
337 /* GAA values */
338 #define GAA_RAM				0x0
339 #define GAA_NCRAM			0x2
340 #define GAA_MMIO			0x1
341 #define GAA_REGISTER			0x3
342 
343 /* GRU paddr shift for pfn. (NOTE: shift is NOT by actual pagesize) */
344 #define GRU_PADDR_SHIFT			12
345 
346 /*
347  * Context Configuration handle
348  * 	Used to allocate resources to a GSEG context.
349  *
350  */
351 struct gru_context_configuration_handle {
352 	unsigned int cmd:1;			/* DW0 */
353 	unsigned int delresp:1;
354 	unsigned int opc:3;
355 	unsigned int unmap_enable:1;
356 	unsigned int req_slice_set_enable:1;
357 	unsigned int req_slice:2;
358 	unsigned int cb_int_enable:1;
359 	unsigned int tlb_int_enable:1;
360 	unsigned int tfm_fault_bit_enable:1;
361 	unsigned int tlb_int_select:4;
362 
363 	unsigned int status:2;
364 	unsigned int state:2;
365 	unsigned int reserved2:4;
366 
367 	unsigned int cause:4;
368 	unsigned int tfm_done_bit_enable:1;
369 	unsigned int unused:3;
370 
371 	unsigned int dsr_allocation_map;
372 
373 	unsigned long cbr_allocation_map;	/* DW1 */
374 
375 	unsigned int asid[8];			/* DW 2 - 5 */
376 	unsigned short sizeavail[8];		/* DW 6 - 7 */
377 } __attribute__ ((packed));
378 
379 enum gru_cch_opc {
380 	CCHOP_START = 1,
381 	CCHOP_ALLOCATE,
382 	CCHOP_INTERRUPT,
383 	CCHOP_DEALLOCATE,
384 	CCHOP_INTERRUPT_SYNC,
385 };
386 
387 enum gru_cch_status {
388 	CCHSTATUS_IDLE,
389 	CCHSTATUS_EXCEPTION,
390 	CCHSTATUS_ACTIVE,
391 };
392 
393 enum gru_cch_state {
394 	CCHSTATE_INACTIVE,
395 	CCHSTATE_MAPPED,
396 	CCHSTATE_ACTIVE,
397 	CCHSTATE_INTERRUPTED,
398 };
399 
400 /* CCH Exception cause */
401 enum gru_cch_cause {
402 	CCHCAUSE_REGION_REGISTER_WRITE_ERROR = 1,
403 	CCHCAUSE_ILLEGAL_OPCODE = 2,
404 	CCHCAUSE_INVALID_START_REQUEST = 3,
405 	CCHCAUSE_INVALID_ALLOCATION_REQUEST = 4,
406 	CCHCAUSE_INVALID_DEALLOCATION_REQUEST = 5,
407 	CCHCAUSE_INVALID_INTERRUPT_REQUEST = 6,
408 	CCHCAUSE_CCH_BUSY = 7,
409 	CCHCAUSE_NO_CBRS_TO_ALLOCATE = 8,
410 	CCHCAUSE_BAD_TFM_CONFIG = 9,
411 	CCHCAUSE_CBR_RESOURCES_OVERSUBSCRIPED = 10,
412 	CCHCAUSE_DSR_RESOURCES_OVERSUBSCRIPED = 11,
413 	CCHCAUSE_CBR_DEALLOCATION_ERROR = 12,
414 };
415 /*
416  * CBE - Control Block Extended
417  * 	Maintains internal GRU state for active CBs.
418  *
419  */
420 struct gru_control_block_extended {
421 	unsigned int reserved0:1;	/* DW 0  - low */
422 	unsigned int imacpy:3;
423 	unsigned int reserved1:4;
424 	unsigned int xtypecpy:3;
425 	unsigned int iaa0cpy:2;
426 	unsigned int iaa1cpy:2;
427 	unsigned int reserved2:1;
428 	unsigned int opccpy:8;
429 	unsigned int exopccpy:8;
430 
431 	unsigned int idef2cpy:22;	/* DW 0  - high */
432 	unsigned int reserved3:10;
433 
434 	unsigned int idef4cpy:22;	/* DW 1 */
435 	unsigned int reserved4:10;
436 	unsigned int idef4upd:22;
437 	unsigned int reserved5:10;
438 
439 	unsigned long idef1upd:64;	/* DW 2 */
440 
441 	unsigned long idef5cpy:64;	/* DW 3 */
442 
443 	unsigned long idef6cpy:64;	/* DW 4 */
444 
445 	unsigned long idef3upd:64;	/* DW 5 */
446 
447 	unsigned long idef5upd:64;	/* DW 6 */
448 
449 	unsigned int idef2upd:22;	/* DW 7 */
450 	unsigned int reserved6:10;
451 
452 	unsigned int ecause:20;
453 	unsigned int cbrstate:4;
454 	unsigned int cbrexecstatus:8;
455 };
456 
457 /* CBE fields for active BCOPY instructions */
458 #define cbe_baddr0	idef1upd
459 #define cbe_baddr1	idef3upd
460 #define cbe_src_cl	idef6cpy
461 #define cbe_nelemcur	idef5upd
462 
463 enum gru_cbr_state {
464 	CBRSTATE_INACTIVE,
465 	CBRSTATE_IDLE,
466 	CBRSTATE_PE_CHECK,
467 	CBRSTATE_QUEUED,
468 	CBRSTATE_WAIT_RESPONSE,
469 	CBRSTATE_INTERRUPTED,
470 	CBRSTATE_INTERRUPTED_MISS_FMM,
471 	CBRSTATE_BUSY_INTERRUPT_MISS_FMM,
472 	CBRSTATE_INTERRUPTED_MISS_UPM,
473 	CBRSTATE_BUSY_INTERRUPTED_MISS_UPM,
474 	CBRSTATE_REQUEST_ISSUE,
475 	CBRSTATE_BUSY_INTERRUPT,
476 };
477 
478 /* CBE cbrexecstatus bits  - defined in gru_instructions.h*/
479 /* CBE ecause bits  - defined in gru_instructions.h */
480 
481 /*
482  * Convert a processor pagesize into the strange encoded pagesize used by the
483  * GRU. Processor pagesize is encoded as log of bytes per page. (or PAGE_SHIFT)
484  * 	pagesize	log pagesize	grupagesize
485  * 	  4k			12	0
486  * 	 16k 			14	1
487  * 	 64k			16	2
488  * 	256k			18	3
489  * 	  1m			20	4
490  * 	  2m			21	5
491  * 	  4m			22	6
492  * 	 16m			24	7
493  * 	 64m			26	8
494  * 	...
495  */
496 #define GRU_PAGESIZE(sh)	((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6)
497 #define GRU_SIZEAVAIL(sh)	(1UL << GRU_PAGESIZE(sh))
498 
499 /* minimum TLB purge count to ensure a full purge */
500 #define GRUMAXINVAL		1024UL
501 
502 int cch_allocate(struct gru_context_configuration_handle *cch);
503 int cch_start(struct gru_context_configuration_handle *cch);
504 int cch_interrupt(struct gru_context_configuration_handle *cch);
505 int cch_deallocate(struct gru_context_configuration_handle *cch);
506 int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
507 int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
508 	unsigned long vaddrmask, int asid, int pagesize, int global, int n,
509 	unsigned short ctxbitmap);
510 int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
511 	int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
512 void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
513 	int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
514 void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh);
515 void tfh_exception(struct gru_tlb_fault_handle *tfh);
516 
517 #endif /* __GRUHANDLES_H__ */
518