• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SN Platform GRU Driver
3  *
4  *              GRU HANDLE DEFINITION
5  *
6  *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to the Free Software
20  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
21  */
22 
23 #ifndef __GRUHANDLES_H__
24 #define __GRUHANDLES_H__
25 #include "gru_instructions.h"
26 
27 /*
28  * Manifest constants for GRU Memory Map
29  */
30 #define GRU_GSEG0_BASE		0
31 #define GRU_MCS_BASE		(64 * 1024 * 1024)
32 #define GRU_SIZE		(128UL * 1024 * 1024)
33 
34 /* Handle & resource counts */
35 #define GRU_NUM_CB		128
36 #define GRU_NUM_DSR_BYTES	(32 * 1024)
37 #define GRU_NUM_TFM		16
38 #define GRU_NUM_TGH		24
39 #define GRU_NUM_CBE		128
40 #define GRU_NUM_TFH		128
41 #define GRU_NUM_CCH		16
42 #define GRU_NUM_GSH		1
43 
44 /* Maximum resource counts that can be reserved by user programs */
45 #define GRU_NUM_USER_CBR	GRU_NUM_CBE
46 #define GRU_NUM_USER_DSR_BYTES	GRU_NUM_DSR_BYTES
47 
48 /* Bytes per handle & handle stride. Code assumes all cb, tfh, cbe handles
49  * are the same */
50 #define GRU_HANDLE_BYTES	64
51 #define GRU_HANDLE_STRIDE	256
52 
53 /* Base addresses of handles */
54 #define GRU_TFM_BASE		(GRU_MCS_BASE + 0x00000)
55 #define GRU_TGH_BASE		(GRU_MCS_BASE + 0x08000)
56 #define GRU_CBE_BASE		(GRU_MCS_BASE + 0x10000)
57 #define GRU_TFH_BASE		(GRU_MCS_BASE + 0x18000)
58 #define GRU_CCH_BASE		(GRU_MCS_BASE + 0x20000)
59 #define GRU_GSH_BASE		(GRU_MCS_BASE + 0x30000)
60 
61 /* User gseg constants */
62 #define GRU_GSEG_STRIDE		(4 * 1024 * 1024)
63 #define GSEG_BASE(a)		((a) & ~(GRU_GSEG_PAGESIZE - 1))
64 
65 /* Data segment constants */
66 #define GRU_DSR_AU_BYTES	1024
67 #define GRU_DSR_CL		(GRU_NUM_DSR_BYTES / GRU_CACHE_LINE_BYTES)
68 #define GRU_DSR_AU_CL		(GRU_DSR_AU_BYTES / GRU_CACHE_LINE_BYTES)
69 #define GRU_DSR_AU		(GRU_NUM_DSR_BYTES / GRU_DSR_AU_BYTES)
70 
71 /* Control block constants */
72 #define GRU_CBR_AU_SIZE		2
73 #define GRU_CBR_AU		(GRU_NUM_CBE / GRU_CBR_AU_SIZE)
74 
75 /* Convert resource counts to the number of AU */
76 #define GRU_DS_BYTES_TO_AU(n)	DIV_ROUND_UP(n, GRU_DSR_AU_BYTES)
77 #define GRU_CB_COUNT_TO_AU(n)	DIV_ROUND_UP(n, GRU_CBR_AU_SIZE)
78 
79 /* UV limits */
80 #define GRU_CHIPLETS_PER_HUB	2
81 #define GRU_HUBS_PER_BLADE	1
82 #define GRU_CHIPLETS_PER_BLADE	(GRU_HUBS_PER_BLADE * GRU_CHIPLETS_PER_HUB)
83 
84 /* User GRU Gseg offsets */
85 #define GRU_CB_BASE		0
86 #define GRU_CB_LIMIT		(GRU_CB_BASE + GRU_HANDLE_STRIDE * GRU_NUM_CBE)
87 #define GRU_DS_BASE		0x20000
88 #define GRU_DS_LIMIT		(GRU_DS_BASE + GRU_NUM_DSR_BYTES)
89 
90 /* Convert a GRU physical address to the chiplet offset */
91 #define GSEGPOFF(h) 		((h) & (GRU_SIZE - 1))
92 
93 /* Convert an arbitrary handle address to the beginning of the GRU segment */
94 #define GRUBASE(h)		((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
95 
96 /* General addressing macros. */
get_gseg_base_address(void * base,int ctxnum)97 static inline void *get_gseg_base_address(void *base, int ctxnum)
98 {
99 	return (void *)(base + GRU_GSEG0_BASE + GRU_GSEG_STRIDE * ctxnum);
100 }
101 
get_gseg_base_address_cb(void * base,int ctxnum,int line)102 static inline void *get_gseg_base_address_cb(void *base, int ctxnum, int line)
103 {
104 	return (void *)(get_gseg_base_address(base, ctxnum) +
105 			GRU_CB_BASE + GRU_HANDLE_STRIDE * line);
106 }
107 
get_gseg_base_address_ds(void * base,int ctxnum,int line)108 static inline void *get_gseg_base_address_ds(void *base, int ctxnum, int line)
109 {
110 	return (void *)(get_gseg_base_address(base, ctxnum) + GRU_DS_BASE +
111 			GRU_CACHE_LINE_BYTES * line);
112 }
113 
get_tfm(void * base,int ctxnum)114 static inline struct gru_tlb_fault_map *get_tfm(void *base, int ctxnum)
115 {
116 	return (struct gru_tlb_fault_map *)(base + GRU_TFM_BASE +
117 					ctxnum * GRU_HANDLE_STRIDE);
118 }
119 
get_tgh(void * base,int ctxnum)120 static inline struct gru_tlb_global_handle *get_tgh(void *base, int ctxnum)
121 {
122 	return (struct gru_tlb_global_handle *)(base + GRU_TGH_BASE +
123 					ctxnum * GRU_HANDLE_STRIDE);
124 }
125 
get_cbe(void * base,int ctxnum)126 static inline struct gru_control_block_extended *get_cbe(void *base, int ctxnum)
127 {
128 	return (struct gru_control_block_extended *)(base + GRU_CBE_BASE +
129 					ctxnum * GRU_HANDLE_STRIDE);
130 }
131 
get_tfh(void * base,int ctxnum)132 static inline struct gru_tlb_fault_handle *get_tfh(void *base, int ctxnum)
133 {
134 	return (struct gru_tlb_fault_handle *)(base + GRU_TFH_BASE +
135 					ctxnum * GRU_HANDLE_STRIDE);
136 }
137 
get_cch(void * base,int ctxnum)138 static inline struct gru_context_configuration_handle *get_cch(void *base,
139 					int ctxnum)
140 {
141 	return (struct gru_context_configuration_handle *)(base +
142 				GRU_CCH_BASE + ctxnum * GRU_HANDLE_STRIDE);
143 }
144 
get_cb_number(void * cb)145 static inline unsigned long get_cb_number(void *cb)
146 {
147 	return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) /
148 					GRU_HANDLE_STRIDE;
149 }
150 
151 /* byte offset to a specific GRU chiplet. (p=pnode, c=chiplet (0 or 1)*/
gru_chiplet_paddr(unsigned long paddr,int pnode,int chiplet)152 static inline unsigned long gru_chiplet_paddr(unsigned long paddr, int pnode,
153 							int chiplet)
154 {
155 	return paddr + GRU_SIZE * (2 * pnode  + chiplet);
156 }
157 
gru_chiplet_vaddr(void * vaddr,int pnode,int chiplet)158 static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
159 {
160 	return vaddr + GRU_SIZE * (2 * pnode  + chiplet);
161 }
162 
163 
164 
165 /*
166  * Global TLB Fault Map
167  * 	Bitmap of outstanding TLB misses needing interrupt/polling service.
168  *
169  */
170 struct gru_tlb_fault_map {
171 	unsigned long fault_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
172 	unsigned long fill0[2];
173 	unsigned long done_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
174 	unsigned long fill1[2];
175 };
176 
177 /*
178  * TGH - TLB Global Handle
179  * 	Used for TLB flushing.
180  *
181  */
182 struct gru_tlb_global_handle {
183 	unsigned int cmd:1;		/* DW 0 */
184 	unsigned int delresp:1;
185 	unsigned int opc:1;
186 	unsigned int fill1:5;
187 
188 	unsigned int fill2:8;
189 
190 	unsigned int status:2;
191 	unsigned long fill3:2;
192 	unsigned int state:3;
193 	unsigned long fill4:1;
194 
195 	unsigned int cause:3;
196 	unsigned long fill5:37;
197 
198 	unsigned long vaddr:64;		/* DW 1 */
199 
200 	unsigned int asid:24;		/* DW 2 */
201 	unsigned int fill6:8;
202 
203 	unsigned int pagesize:5;
204 	unsigned int fill7:11;
205 
206 	unsigned int global:1;
207 	unsigned int fill8:15;
208 
209 	unsigned long vaddrmask:39;	/* DW 3 */
210 	unsigned int fill9:9;
211 	unsigned int n:10;
212 	unsigned int fill10:6;
213 
214 	unsigned int ctxbitmap:16;	/* DW4 */
215 	unsigned long fill11[3];
216 };
217 
218 enum gru_tgh_cmd {
219 	TGHCMD_START
220 };
221 
222 enum gru_tgh_opc {
223 	TGHOP_TLBNOP,
224 	TGHOP_TLBINV
225 };
226 
227 enum gru_tgh_status {
228 	TGHSTATUS_IDLE,
229 	TGHSTATUS_EXCEPTION,
230 	TGHSTATUS_ACTIVE
231 };
232 
233 enum gru_tgh_state {
234 	TGHSTATE_IDLE,
235 	TGHSTATE_PE_INVAL,
236 	TGHSTATE_INTERRUPT_INVAL,
237 	TGHSTATE_WAITDONE,
238 	TGHSTATE_RESTART_CTX,
239 };
240 
241 /*
242  * TFH - TLB Global Handle
243  * 	Used for TLB dropins into the GRU TLB.
244  *
245  */
246 struct gru_tlb_fault_handle {
247 	unsigned int cmd:1;		/* DW 0 - low 32*/
248 	unsigned int delresp:1;
249 	unsigned int fill0:2;
250 	unsigned int opc:3;
251 	unsigned int fill1:9;
252 
253 	unsigned int status:2;
254 	unsigned int fill2:1;
255 	unsigned int color:1;
256 	unsigned int state:3;
257 	unsigned int fill3:1;
258 
259 	unsigned int cause:7;		/* DW 0 - high 32 */
260 	unsigned int fill4:1;
261 
262 	unsigned int indexway:12;
263 	unsigned int fill5:4;
264 
265 	unsigned int ctxnum:4;
266 	unsigned int fill6:12;
267 
268 	unsigned long missvaddr:64;	/* DW 1 */
269 
270 	unsigned int missasid:24;	/* DW 2 */
271 	unsigned int fill7:8;
272 	unsigned int fillasid:24;
273 	unsigned int dirty:1;
274 	unsigned int gaa:2;
275 	unsigned long fill8:5;
276 
277 	unsigned long pfn:41;		/* DW 3 */
278 	unsigned int fill9:7;
279 	unsigned int pagesize:5;
280 	unsigned int fill10:11;
281 
282 	unsigned long fillvaddr:64;	/* DW 4 */
283 
284 	unsigned long fill11[3];
285 };
286 
287 enum gru_tfh_opc {
288 	TFHOP_NOOP,
289 	TFHOP_RESTART,
290 	TFHOP_WRITE_ONLY,
291 	TFHOP_WRITE_RESTART,
292 	TFHOP_EXCEPTION,
293 	TFHOP_USER_POLLING_MODE = 7,
294 };
295 
296 enum tfh_status {
297 	TFHSTATUS_IDLE,
298 	TFHSTATUS_EXCEPTION,
299 	TFHSTATUS_ACTIVE,
300 };
301 
302 enum tfh_state {
303 	TFHSTATE_INACTIVE,
304 	TFHSTATE_IDLE,
305 	TFHSTATE_MISS_UPM,
306 	TFHSTATE_MISS_FMM,
307 	TFHSTATE_HW_ERR,
308 	TFHSTATE_WRITE_TLB,
309 	TFHSTATE_RESTART_CBR,
310 };
311 
312 /* TFH cause bits */
313 enum tfh_cause {
314 	TFHCAUSE_NONE,
315 	TFHCAUSE_TLB_MISS,
316 	TFHCAUSE_TLB_MOD,
317 	TFHCAUSE_HW_ERROR_RR,
318 	TFHCAUSE_HW_ERROR_MAIN_ARRAY,
319 	TFHCAUSE_HW_ERROR_VALID,
320 	TFHCAUSE_HW_ERROR_PAGESIZE,
321 	TFHCAUSE_INSTRUCTION_EXCEPTION,
322 	TFHCAUSE_UNCORRECTIBLE_ERROR,
323 };
324 
325 /* GAA values */
326 #define GAA_RAM				0x0
327 #define GAA_NCRAM			0x2
328 #define GAA_MMIO			0x1
329 #define GAA_REGISTER			0x3
330 
331 /* GRU paddr shift for pfn. (NOTE: shift is NOT by actual pagesize) */
332 #define GRU_PADDR_SHIFT			12
333 
334 /*
335  * Context Configuration handle
336  * 	Used to allocate resources to a GSEG context.
337  *
338  */
339 struct gru_context_configuration_handle {
340 	unsigned int cmd:1;			/* DW0 */
341 	unsigned int delresp:1;
342 	unsigned int opc:3;
343 	unsigned int unmap_enable:1;
344 	unsigned int req_slice_set_enable:1;
345 	unsigned int req_slice:2;
346 	unsigned int cb_int_enable:1;
347 	unsigned int tlb_int_enable:1;
348 	unsigned int tfm_fault_bit_enable:1;
349 	unsigned int tlb_int_select:4;
350 
351 	unsigned int status:2;
352 	unsigned int state:2;
353 	unsigned int reserved2:4;
354 
355 	unsigned int cause:4;
356 	unsigned int tfm_done_bit_enable:1;
357 	unsigned int unused:3;
358 
359 	unsigned int dsr_allocation_map;
360 
361 	unsigned long cbr_allocation_map;	/* DW1 */
362 
363 	unsigned int asid[8];			/* DW 2 - 5 */
364 	unsigned short sizeavail[8];		/* DW 6 - 7 */
365 } __attribute__ ((packed));
366 
367 enum gru_cch_opc {
368 	CCHOP_START = 1,
369 	CCHOP_ALLOCATE,
370 	CCHOP_INTERRUPT,
371 	CCHOP_DEALLOCATE,
372 	CCHOP_INTERRUPT_SYNC,
373 };
374 
375 enum gru_cch_status {
376 	CCHSTATUS_IDLE,
377 	CCHSTATUS_EXCEPTION,
378 	CCHSTATUS_ACTIVE,
379 };
380 
381 enum gru_cch_state {
382 	CCHSTATE_INACTIVE,
383 	CCHSTATE_MAPPED,
384 	CCHSTATE_ACTIVE,
385 	CCHSTATE_INTERRUPTED,
386 };
387 
388 /* CCH Exception cause */
389 enum gru_cch_cause {
390 	CCHCAUSE_REGION_REGISTER_WRITE_ERROR = 1,
391 	CCHCAUSE_ILLEGAL_OPCODE = 2,
392 	CCHCAUSE_INVALID_START_REQUEST = 3,
393 	CCHCAUSE_INVALID_ALLOCATION_REQUEST = 4,
394 	CCHCAUSE_INVALID_DEALLOCATION_REQUEST = 5,
395 	CCHCAUSE_INVALID_INTERRUPT_REQUEST = 6,
396 	CCHCAUSE_CCH_BUSY = 7,
397 	CCHCAUSE_NO_CBRS_TO_ALLOCATE = 8,
398 	CCHCAUSE_BAD_TFM_CONFIG = 9,
399 	CCHCAUSE_CBR_RESOURCES_OVERSUBSCRIPED = 10,
400 	CCHCAUSE_DSR_RESOURCES_OVERSUBSCRIPED = 11,
401 	CCHCAUSE_CBR_DEALLOCATION_ERROR = 12,
402 };
403 /*
404  * CBE - Control Block Extended
405  * 	Maintains internal GRU state for active CBs.
406  *
407  */
408 struct gru_control_block_extended {
409 	unsigned int reserved0:1;	/* DW 0  - low */
410 	unsigned int imacpy:3;
411 	unsigned int reserved1:4;
412 	unsigned int xtypecpy:3;
413 	unsigned int iaa0cpy:2;
414 	unsigned int iaa1cpy:2;
415 	unsigned int reserved2:1;
416 	unsigned int opccpy:8;
417 	unsigned int exopccpy:8;
418 
419 	unsigned int idef2cpy:22;	/* DW 0  - high */
420 	unsigned int reserved3:10;
421 
422 	unsigned int idef4cpy:22;	/* DW 1 */
423 	unsigned int reserved4:10;
424 	unsigned int idef4upd:22;
425 	unsigned int reserved5:10;
426 
427 	unsigned long idef1upd:64;	/* DW 2 */
428 
429 	unsigned long idef5cpy:64;	/* DW 3 */
430 
431 	unsigned long idef6cpy:64;	/* DW 4 */
432 
433 	unsigned long idef3upd:64;	/* DW 5 */
434 
435 	unsigned long idef5upd:64;	/* DW 6 */
436 
437 	unsigned int idef2upd:22;	/* DW 7 */
438 	unsigned int reserved6:10;
439 
440 	unsigned int ecause:20;
441 	unsigned int cbrstate:4;
442 	unsigned int cbrexecstatus:8;
443 };
444 
445 enum gru_cbr_state {
446 	CBRSTATE_INACTIVE,
447 	CBRSTATE_IDLE,
448 	CBRSTATE_PE_CHECK,
449 	CBRSTATE_QUEUED,
450 	CBRSTATE_WAIT_RESPONSE,
451 	CBRSTATE_INTERRUPTED,
452 	CBRSTATE_INTERRUPTED_MISS_FMM,
453 	CBRSTATE_BUSY_INTERRUPT_MISS_FMM,
454 	CBRSTATE_INTERRUPTED_MISS_UPM,
455 	CBRSTATE_BUSY_INTERRUPTED_MISS_UPM,
456 	CBRSTATE_REQUEST_ISSUE,
457 	CBRSTATE_BUSY_INTERRUPT,
458 };
459 
460 /* CBE cbrexecstatus bits */
461 #define CBR_EXS_ABORT_OCC_BIT			0
462 #define CBR_EXS_INT_OCC_BIT			1
463 #define CBR_EXS_PENDING_BIT			2
464 #define CBR_EXS_QUEUED_BIT			3
465 #define CBR_EXS_TLBHW_BIT			4
466 #define CBR_EXS_EXCEPTION_BIT			5
467 
468 #define CBR_EXS_ABORT_OCC			(1 << CBR_EXS_ABORT_OCC_BIT)
469 #define CBR_EXS_INT_OCC				(1 << CBR_EXS_INT_OCC_BIT)
470 #define CBR_EXS_PENDING				(1 << CBR_EXS_PENDING_BIT)
471 #define CBR_EXS_QUEUED				(1 << CBR_EXS_QUEUED_BIT)
472 #define CBR_EXS_TLBHW				(1 << CBR_EXS_TLBHW_BIT)
473 #define CBR_EXS_EXCEPTION			(1 << CBR_EXS_EXCEPTION_BIT)
474 
475 /* CBE ecause bits  - defined in gru_instructions.h */
476 
477 /*
478  * Convert a processor pagesize into the strange encoded pagesize used by the
479  * GRU. Processor pagesize is encoded as log of bytes per page. (or PAGE_SHIFT)
480  * 	pagesize	log pagesize	grupagesize
481  * 	  4k			12	0
482  * 	 16k 			14	1
483  * 	 64k			16	2
484  * 	256k			18	3
485  * 	  1m			20	4
486  * 	  2m			21	5
487  * 	  4m			22	6
488  * 	 16m			24	7
489  * 	 64m			26	8
490  * 	...
491  */
492 #define GRU_PAGESIZE(sh)	((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6)
493 #define GRU_SIZEAVAIL(sh)	(1UL << GRU_PAGESIZE(sh))
494 
495 /* minimum TLB purge count to ensure a full purge */
496 #define GRUMAXINVAL		1024UL
497 
498 
499 /* Extract the status field from a kernel handle */
500 #define GET_MSEG_HANDLE_STATUS(h)	(((*(unsigned long *)(h)) >> 16) & 3)
501 
start_instruction(void * h)502 static inline void start_instruction(void *h)
503 {
504 	unsigned long *w0 = h;
505 
506 	wmb();		/* setting CMD bit must be last */
507 	*w0 = *w0 | 1;
508 	gru_flush_cache(h);
509 }
510 
wait_instruction_complete(void * h)511 static inline int wait_instruction_complete(void *h)
512 {
513 	int status;
514 
515 	do {
516 		cpu_relax();
517 		barrier();
518 		status = GET_MSEG_HANDLE_STATUS(h);
519 	} while (status == CCHSTATUS_ACTIVE);
520 	return status;
521 }
522 
523 #if defined CONFIG_IA64
cch_allocate_set_asids(struct gru_context_configuration_handle * cch,int asidval)524 static inline void cch_allocate_set_asids(
525 		  struct gru_context_configuration_handle *cch, int asidval)
526 {
527 	int i;
528 
529 	for (i = 0; i <= RGN_HPAGE; i++) {  /*  assume HPAGE is last region */
530 		cch->asid[i] = (asidval++);
531 #if 0
532 		/* ZZZ hugepages not supported yet */
533 		if (i == RGN_HPAGE)
534 			cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift);
535 		else
536 #endif
537 			cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT);
538 	}
539 }
540 #elif defined CONFIG_X86_64
cch_allocate_set_asids(struct gru_context_configuration_handle * cch,int asidval)541 static inline void cch_allocate_set_asids(
542 		  struct gru_context_configuration_handle *cch, int asidval)
543 {
544 	int i;
545 
546 	for (i = 0; i < 8; i++) {
547 		cch->asid[i] = asidval++;
548 		cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) |
549 			GRU_SIZEAVAIL(21);
550 	}
551 }
552 #endif
553 
cch_allocate(struct gru_context_configuration_handle * cch,int asidval,unsigned long cbrmap,unsigned long dsrmap)554 static inline int cch_allocate(struct gru_context_configuration_handle *cch,
555 			       int asidval, unsigned long cbrmap,
556 			       unsigned long dsrmap)
557 {
558 	cch_allocate_set_asids(cch, asidval);
559 	cch->dsr_allocation_map = dsrmap;
560 	cch->cbr_allocation_map = cbrmap;
561 	cch->opc = CCHOP_ALLOCATE;
562 	start_instruction(cch);
563 	return wait_instruction_complete(cch);
564 }
565 
cch_start(struct gru_context_configuration_handle * cch)566 static inline int cch_start(struct gru_context_configuration_handle *cch)
567 {
568 	cch->opc = CCHOP_START;
569 	start_instruction(cch);
570 	return wait_instruction_complete(cch);
571 }
572 
cch_interrupt(struct gru_context_configuration_handle * cch)573 static inline int cch_interrupt(struct gru_context_configuration_handle *cch)
574 {
575 	cch->opc = CCHOP_INTERRUPT;
576 	start_instruction(cch);
577 	return wait_instruction_complete(cch);
578 }
579 
cch_deallocate(struct gru_context_configuration_handle * cch)580 static inline int cch_deallocate(struct gru_context_configuration_handle *cch)
581 {
582 	cch->opc = CCHOP_DEALLOCATE;
583 	start_instruction(cch);
584 	return wait_instruction_complete(cch);
585 }
586 
cch_interrupt_sync(struct gru_context_configuration_handle * cch)587 static inline int cch_interrupt_sync(struct gru_context_configuration_handle
588 				     *cch)
589 {
590 	cch->opc = CCHOP_INTERRUPT_SYNC;
591 	start_instruction(cch);
592 	return wait_instruction_complete(cch);
593 }
594 
tgh_invalidate(struct gru_tlb_global_handle * tgh,unsigned long vaddr,unsigned long vaddrmask,int asid,int pagesize,int global,int n,unsigned short ctxbitmap)595 static inline int tgh_invalidate(struct gru_tlb_global_handle *tgh,
596 				 unsigned long vaddr, unsigned long vaddrmask,
597 				 int asid, int pagesize, int global, int n,
598 				 unsigned short ctxbitmap)
599 {
600 	tgh->vaddr = vaddr;
601 	tgh->asid = asid;
602 	tgh->pagesize = pagesize;
603 	tgh->n = n;
604 	tgh->global = global;
605 	tgh->vaddrmask = vaddrmask;
606 	tgh->ctxbitmap = ctxbitmap;
607 	tgh->opc = TGHOP_TLBINV;
608 	start_instruction(tgh);
609 	return wait_instruction_complete(tgh);
610 }
611 
tfh_write_only(struct gru_tlb_fault_handle * tfh,unsigned long pfn,unsigned long vaddr,int asid,int dirty,int pagesize)612 static inline void tfh_write_only(struct gru_tlb_fault_handle *tfh,
613 				  unsigned long pfn, unsigned long vaddr,
614 				  int asid, int dirty, int pagesize)
615 {
616 	tfh->fillasid = asid;
617 	tfh->fillvaddr = vaddr;
618 	tfh->pfn = pfn;
619 	tfh->dirty = dirty;
620 	tfh->pagesize = pagesize;
621 	tfh->opc = TFHOP_WRITE_ONLY;
622 	start_instruction(tfh);
623 }
624 
tfh_write_restart(struct gru_tlb_fault_handle * tfh,unsigned long paddr,int gaa,unsigned long vaddr,int asid,int dirty,int pagesize)625 static inline void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
626 				     unsigned long paddr, int gaa,
627 				     unsigned long vaddr, int asid, int dirty,
628 				     int pagesize)
629 {
630 	tfh->fillasid = asid;
631 	tfh->fillvaddr = vaddr;
632 	tfh->pfn = paddr >> GRU_PADDR_SHIFT;
633 	tfh->gaa = gaa;
634 	tfh->dirty = dirty;
635 	tfh->pagesize = pagesize;
636 	tfh->opc = TFHOP_WRITE_RESTART;
637 	start_instruction(tfh);
638 }
639 
tfh_restart(struct gru_tlb_fault_handle * tfh)640 static inline void tfh_restart(struct gru_tlb_fault_handle *tfh)
641 {
642 	tfh->opc = TFHOP_RESTART;
643 	start_instruction(tfh);
644 }
645 
tfh_user_polling_mode(struct gru_tlb_fault_handle * tfh)646 static inline void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
647 {
648 	tfh->opc = TFHOP_USER_POLLING_MODE;
649 	start_instruction(tfh);
650 }
651 
tfh_exception(struct gru_tlb_fault_handle * tfh)652 static inline void tfh_exception(struct gru_tlb_fault_handle *tfh)
653 {
654 	tfh->opc = TFHOP_EXCEPTION;
655 	start_instruction(tfh);
656 }
657 
658 #endif /* __GRUHANDLES_H__ */
659