1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
4 */
5
6 /*\
7 * [Description]
8 *
9 * Check if eBPF can do arithmetic with 64bits. This targets a specific
10 * regression which only effects unprivileged users who are subject to extra
11 * pointer arithmetic checks during verification.
12 *
13 * Fixed by kernel commit
14 * 3612af783cf5 ("bpf: fix sanitation rewrite in case of non-pointers")
15 *
16 * https://blog.cloudflare.com/ebpf-cant-count/
17 *
18 * This test is very similar in structure to bpf_prog01 which is better
19 * annotated.
20 */
21
22 #include <limits.h>
23 #include <string.h>
24 #include <stdio.h>
25 #include <inttypes.h>
26
27 #include "config.h"
28 #include "tst_test.h"
29 #include "tst_capability.h"
30 #include "bpf_common.h"
31
32 #define A64INT (((uint64_t)1) << 60)
33
34 const char MSG[] = "Ahoj!";
35 static char *msg;
36
37 static char *log;
38 static uint32_t *key;
39 static uint64_t *val;
40 static union bpf_attr *attr;
41
load_prog(int fd)42 static int load_prog(int fd)
43 {
44 struct bpf_insn insn[] = {
45 BPF_MOV64_IMM(BPF_REG_6, 1), /* 0: r6 = 1 */
46
47 BPF_LD_MAP_FD(BPF_REG_1, fd), /* 1: r1 = &fd */
48 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 3: r2 = fp */
49 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* 4: r2 = r2 - 4 */
50 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), /* 5: *r2 = 0 */
51 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),/* 6: map_lookup_elem */
52 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17), /* 7: if(!r0) goto 25 */
53 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), /* 8: r3 = r0 */
54 BPF_LD_IMM64(BPF_REG_4, A64INT), /* 9: r4 = 2^61 */
55 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_6), /* 11: r4 += r6 */
56 BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_4, 0), /* 12: *r3 = r4 */
57
58 BPF_LD_MAP_FD(BPF_REG_1, fd), /* 13: r1 = &fd */
59 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 15: r2 = fp */
60 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* 16: r2 = r2 - 4 */
61 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 1), /* 17: *r2 = 1 */
62 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),/* 18: map_lookup_elem */
63 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), /* 19: if(!r0) goto 25 */
64 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), /* 20: r3 = r0 */
65 BPF_LD_IMM64(BPF_REG_4, A64INT), /* 21: r4 = 2^60 */
66 BPF_ALU64_REG(BPF_SUB, BPF_REG_4, BPF_REG_6), /* 23: r4 -= r6 */
67 BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_4, 0), /* 24: *r3 = r4 */
68
69 BPF_MOV64_IMM(BPF_REG_0, 0), /* 25: r0 = 0 */
70 BPF_EXIT_INSN(), /* 26: return r0 */
71 };
72
73 bpf_init_prog_attr(attr, insn, sizeof(insn), log, BUFSIZE);
74 return bpf_load_prog(attr, log);
75 }
76
setup(void)77 static void setup(void)
78 {
79 rlimit_bump_memlock();
80 memcpy(msg, MSG, sizeof(MSG));
81 }
82
run(void)83 static void run(void)
84 {
85 int map_fd, prog_fd;
86
87 map_fd = bpf_map_array_create(2);
88 prog_fd = load_prog(map_fd);
89 bpf_run_prog(prog_fd, msg, sizeof(MSG));
90 SAFE_CLOSE(prog_fd);
91
92 *key = 0;
93 bpf_map_array_get(map_fd, key, val);
94 if (*val != A64INT + 1) {
95 tst_res(TFAIL,
96 "val = %"PRIu64", but should be val = %"PRIu64" + 1",
97 *val, A64INT);
98 } else {
99 tst_res(TPASS, "val = %"PRIu64" + 1", A64INT);
100 }
101
102 *key = 1;
103 bpf_map_array_get(map_fd, key, val);
104 if (*val != A64INT - 1) {
105 tst_res(TFAIL,
106 "val = %"PRIu64", but should be val = %"PRIu64" - 1",
107 *val, A64INT);
108 } else {
109 tst_res(TPASS, "val = %"PRIu64" - 1", A64INT);
110 }
111
112 SAFE_CLOSE(map_fd);
113 }
114
115 static struct tst_test test = {
116 .setup = setup,
117 .test_all = run,
118 .caps = (struct tst_cap []) {
119 TST_CAP(TST_CAP_DROP, CAP_SYS_ADMIN),
120 {}
121 },
122 .bufs = (struct tst_buffers []) {
123 {&key, .size = sizeof(*key)},
124 {&val, .size = sizeof(*val)},
125 {&log, .size = BUFSIZE},
126 {&attr, .size = sizeof(*attr)},
127 {&msg, .size = sizeof(MSG)},
128 {},
129 },
130 .tags = (const struct tst_tag[]) {
131 {"linux-git", "3612af783cf5"},
132 {}
133 }
134 };
135