• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * bpf_jit64.h: BPF JIT compiler for PPC64
3  *
4  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5  *		  IBM Corporation
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 #ifndef _BPF_JIT64_H
13 #define _BPF_JIT64_H
14 
15 #include "bpf_jit.h"
16 
17 /*
18  * Stack layout:
19  * Ensure the top half (upto local_tmp_var) stays consistent
20  * with our redzone usage.
21  *
22  *		[	prev sp		] <-------------
23  *		[   nv gpr save area	] 8*8		|
24  *		[    tail_call_cnt	] 8		|
25  *		[    local_tmp_var	] 8		|
26  * fp (r31) -->	[   ebpf stack space	] 512		|
27  *		[     frame header	] 32/112	|
28  * sp (r1) --->	[    stack pointer	] --------------
29  */
30 
31 /* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
32 #define BPF_PPC_STACK_SAVE	(8*8)
33 /* for bpf JIT code internal usage */
34 #define BPF_PPC_STACK_LOCALS	16
35 /* Ensure this is quadword aligned */
36 #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + MAX_BPF_STACK + \
37 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
38 
39 #ifndef __ASSEMBLY__
40 
41 /* BPF register usage */
42 #define SKB_HLEN_REG	(MAX_BPF_JIT_REG + 0)
43 #define SKB_DATA_REG	(MAX_BPF_JIT_REG + 1)
44 #define TMP_REG_1	(MAX_BPF_JIT_REG + 2)
45 #define TMP_REG_2	(MAX_BPF_JIT_REG + 3)
46 
47 /* BPF to ppc register mappings */
48 static const int b2p[] = {
49 	/* function return value */
50 	[BPF_REG_0] = 8,
51 	/* function arguments */
52 	[BPF_REG_1] = 3,
53 	[BPF_REG_2] = 4,
54 	[BPF_REG_3] = 5,
55 	[BPF_REG_4] = 6,
56 	[BPF_REG_5] = 7,
57 	/* non volatile registers */
58 	[BPF_REG_6] = 27,
59 	[BPF_REG_7] = 28,
60 	[BPF_REG_8] = 29,
61 	[BPF_REG_9] = 30,
62 	/* frame pointer aka BPF_REG_10 */
63 	[BPF_REG_FP] = 31,
64 	/* eBPF jit internal registers */
65 	[BPF_REG_AX] = 2,
66 	[SKB_HLEN_REG] = 25,
67 	[SKB_DATA_REG] = 26,
68 	[TMP_REG_1] = 9,
69 	[TMP_REG_2] = 10
70 };
71 
72 /* PPC NVR range -- update this if we ever use NVRs below r24 */
73 #define BPF_PPC_NVR_MIN		24
74 
75 /* Assembly helpers */
76 #define DECLARE_LOAD_FUNC(func)	u64 func(u64 r3, u64 r4);			\
77 				u64 func##_negative_offset(u64 r3, u64 r4);	\
78 				u64 func##_positive_offset(u64 r3, u64 r4);
79 
80 DECLARE_LOAD_FUNC(sk_load_word);
81 DECLARE_LOAD_FUNC(sk_load_half);
82 DECLARE_LOAD_FUNC(sk_load_byte);
83 
84 #define CHOOSE_LOAD_FUNC(imm, func)						\
85 			(imm < 0 ?						\
86 			(imm >= SKF_LL_OFF ? func##_negative_offset : func) :	\
87 			func##_positive_offset)
88 
89 #define SEEN_FUNC	0x1000 /* might call external helpers */
90 #define SEEN_STACK	0x2000 /* uses BPF stack */
91 #define SEEN_SKB	0x4000 /* uses sk_buff */
92 #define SEEN_TAILCALL	0x8000 /* uses tail calls */
93 
94 struct codegen_context {
95 	/*
96 	 * This is used to track register usage as well
97 	 * as calls to external helpers.
98 	 * - register usage is tracked with corresponding
99 	 *   bits (r3-r10 and r25-r31)
100 	 * - rest of the bits can be used to track other
101 	 *   things -- for now, we use bits 16 to 23
102 	 *   encoded in SEEN_* macros above
103 	 */
104 	unsigned int seen;
105 	unsigned int idx;
106 };
107 
108 #endif /* !__ASSEMBLY__ */
109 
110 #endif
111