1 /* 2 * bpf_jit64.h: BPF JIT compiler for PPC64 3 * 4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> 5 * IBM Corporation 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 #ifndef _BPF_JIT64_H 13 #define _BPF_JIT64_H 14 15 #include "bpf_jit.h" 16 17 /* 18 * Stack layout: 19 * Ensure the top half (upto local_tmp_var) stays consistent 20 * with our redzone usage. 21 * 22 * [ prev sp ] <------------- 23 * [ nv gpr save area ] 6*8 | 24 * [ tail_call_cnt ] 8 | 25 * [ local_tmp_var ] 8 | 26 * fp (r31) --> [ ebpf stack space ] upto 512 | 27 * [ frame header ] 32/112 | 28 * sp (r1) ---> [ stack pointer ] -------------- 29 */ 30 31 /* for gpr non volatile registers BPG_REG_6 to 10 */ 32 #define BPF_PPC_STACK_SAVE (6*8) 33 /* for bpf JIT code internal usage */ 34 #define BPF_PPC_STACK_LOCALS 16 35 /* stack frame excluding BPF stack, ensure this is quadword aligned */ 36 #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ 37 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) 38 39 #ifndef __ASSEMBLY__ 40 41 /* BPF register usage */ 42 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) 43 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) 44 45 /* BPF to ppc register mappings */ 46 static const int b2p[] = { 47 /* function return value */ 48 [BPF_REG_0] = 8, 49 /* function arguments */ 50 [BPF_REG_1] = 3, 51 [BPF_REG_2] = 4, 52 [BPF_REG_3] = 5, 53 [BPF_REG_4] = 6, 54 [BPF_REG_5] = 7, 55 /* non volatile registers */ 56 [BPF_REG_6] = 27, 57 [BPF_REG_7] = 28, 58 [BPF_REG_8] = 29, 59 [BPF_REG_9] = 30, 60 /* frame pointer aka BPF_REG_10 */ 61 [BPF_REG_FP] = 31, 62 /* eBPF jit internal registers */ 63 [BPF_REG_AX] = 2, 64 [TMP_REG_1] = 9, 65 [TMP_REG_2] = 10 66 }; 67 68 /* PPC NVR range -- update this if we ever use NVRs below r27 */ 69 #define BPF_PPC_NVR_MIN 27 70 71 /* 72 * WARNING: These can use TMP_REG_2 if the offset is not at word boundary, 73 * so ensure that it isn't in use already. 74 */ 75 #define PPC_BPF_LL(r, base, i) do { \ 76 if ((i) % 4) { \ 77 PPC_LI(b2p[TMP_REG_2], (i)); \ 78 PPC_LDX(r, base, b2p[TMP_REG_2]); \ 79 } else \ 80 PPC_LD(r, base, i); \ 81 } while(0) 82 #define PPC_BPF_STL(r, base, i) do { \ 83 if ((i) % 4) { \ 84 PPC_LI(b2p[TMP_REG_2], (i)); \ 85 PPC_STDX(r, base, b2p[TMP_REG_2]); \ 86 } else \ 87 PPC_STD(r, base, i); \ 88 } while(0) 89 #define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) 90 91 #define SEEN_FUNC 0x1000 /* might call external helpers */ 92 #define SEEN_STACK 0x2000 /* uses BPF stack */ 93 #define SEEN_TAILCALL 0x4000 /* uses tail calls */ 94 95 struct codegen_context { 96 /* 97 * This is used to track register usage as well 98 * as calls to external helpers. 99 * - register usage is tracked with corresponding 100 * bits (r3-r10 and r27-r31) 101 * - rest of the bits can be used to track other 102 * things -- for now, we use bits 16 to 23 103 * encoded in SEEN_* macros above 104 */ 105 unsigned int seen; 106 unsigned int idx; 107 unsigned int stack_size; 108 }; 109 110 #endif /* !__ASSEMBLY__ */ 111 112 #endif 113