1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
3
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6
7 #define LOOP_BOUND 0xf
8 #define MAX_ENTRIES 8
9 #define HALF_ENTRIES (MAX_ENTRIES >> 1)
10
11 _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
12
13 enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
14 __u32 g_line = 0;
15
16 #define VERIFY_TYPE(type, func) ({ \
17 g_map_type = type; \
18 if (!func()) \
19 return 0; \
20 })
21
22
23 #define VERIFY(expr) ({ \
24 g_line = __LINE__; \
25 if (!(expr)) \
26 return 0; \
27 })
28
29 struct bpf_map_memory {
30 __u32 pages;
31 } __attribute__((preserve_access_index));
32
33 struct bpf_map {
34 enum bpf_map_type map_type;
35 __u32 key_size;
36 __u32 value_size;
37 __u32 max_entries;
38 __u32 id;
39 struct bpf_map_memory memory;
40 } __attribute__((preserve_access_index));
41
check_bpf_map_fields(struct bpf_map * map,__u32 key_size,__u32 value_size,__u32 max_entries)42 static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
43 __u32 value_size, __u32 max_entries)
44 {
45 VERIFY(map->map_type == g_map_type);
46 VERIFY(map->key_size == key_size);
47 VERIFY(map->value_size == value_size);
48 VERIFY(map->max_entries == max_entries);
49 VERIFY(map->id > 0);
50 VERIFY(map->memory.pages > 0);
51
52 return 1;
53 }
54
check_bpf_map_ptr(struct bpf_map * indirect,struct bpf_map * direct)55 static inline int check_bpf_map_ptr(struct bpf_map *indirect,
56 struct bpf_map *direct)
57 {
58 VERIFY(indirect->map_type == direct->map_type);
59 VERIFY(indirect->key_size == direct->key_size);
60 VERIFY(indirect->value_size == direct->value_size);
61 VERIFY(indirect->max_entries == direct->max_entries);
62 VERIFY(indirect->id == direct->id);
63 VERIFY(indirect->memory.pages == direct->memory.pages);
64
65 return 1;
66 }
67
check(struct bpf_map * indirect,struct bpf_map * direct,__u32 key_size,__u32 value_size,__u32 max_entries)68 static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
69 __u32 key_size, __u32 value_size, __u32 max_entries)
70 {
71 VERIFY(check_bpf_map_ptr(indirect, direct));
72 VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
73 max_entries));
74 return 1;
75 }
76
check_default(struct bpf_map * indirect,struct bpf_map * direct)77 static inline int check_default(struct bpf_map *indirect,
78 struct bpf_map *direct)
79 {
80 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
81 MAX_ENTRIES));
82 return 1;
83 }
84
85 static __noinline int
check_default_noinline(struct bpf_map * indirect,struct bpf_map * direct)86 check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
87 {
88 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
89 MAX_ENTRIES));
90 return 1;
91 }
92
93 typedef struct {
94 int counter;
95 } atomic_t;
96
97 struct bpf_htab {
98 struct bpf_map map;
99 atomic_t count;
100 __u32 n_buckets;
101 __u32 elem_size;
102 } __attribute__((preserve_access_index));
103
104 struct {
105 __uint(type, BPF_MAP_TYPE_HASH);
106 __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
107 __uint(max_entries, MAX_ENTRIES);
108 __type(key, __u32);
109 __type(value, __u32);
110 } m_hash SEC(".maps");
111
check_hash(void)112 static inline int check_hash(void)
113 {
114 struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
115 struct bpf_map *map = (struct bpf_map *)&m_hash;
116 int i;
117
118 VERIFY(check_default_noinline(&hash->map, map));
119
120 VERIFY(hash->n_buckets == MAX_ENTRIES);
121 VERIFY(hash->elem_size == 64);
122
123 VERIFY(hash->count.counter == 0);
124 for (i = 0; i < HALF_ENTRIES; ++i) {
125 const __u32 key = i;
126 const __u32 val = 1;
127
128 if (bpf_map_update_elem(hash, &key, &val, 0))
129 return 0;
130 }
131 VERIFY(hash->count.counter == HALF_ENTRIES);
132
133 return 1;
134 }
135
136 struct bpf_array {
137 struct bpf_map map;
138 __u32 elem_size;
139 } __attribute__((preserve_access_index));
140
141 struct {
142 __uint(type, BPF_MAP_TYPE_ARRAY);
143 __uint(max_entries, MAX_ENTRIES);
144 __type(key, __u32);
145 __type(value, __u32);
146 } m_array SEC(".maps");
147
check_array(void)148 static inline int check_array(void)
149 {
150 struct bpf_array *array = (struct bpf_array *)&m_array;
151 struct bpf_map *map = (struct bpf_map *)&m_array;
152 int i, n_lookups = 0, n_keys = 0;
153
154 VERIFY(check_default(&array->map, map));
155
156 VERIFY(array->elem_size == 8);
157
158 for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
159 const __u32 key = i;
160 __u32 *val = bpf_map_lookup_elem(array, &key);
161
162 ++n_lookups;
163 if (val)
164 ++n_keys;
165 }
166
167 VERIFY(n_lookups == MAX_ENTRIES);
168 VERIFY(n_keys == MAX_ENTRIES);
169
170 return 1;
171 }
172
173 struct {
174 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
175 __uint(max_entries, MAX_ENTRIES);
176 __type(key, __u32);
177 __type(value, __u32);
178 } m_prog_array SEC(".maps");
179
check_prog_array(void)180 static inline int check_prog_array(void)
181 {
182 struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
183 struct bpf_map *map = (struct bpf_map *)&m_prog_array;
184
185 VERIFY(check_default(&prog_array->map, map));
186
187 return 1;
188 }
189
190 struct {
191 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
192 __uint(max_entries, MAX_ENTRIES);
193 __type(key, __u32);
194 __type(value, __u32);
195 } m_perf_event_array SEC(".maps");
196
check_perf_event_array(void)197 static inline int check_perf_event_array(void)
198 {
199 struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
200 struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
201
202 VERIFY(check_default(&perf_event_array->map, map));
203
204 return 1;
205 }
206
207 struct {
208 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
209 __uint(max_entries, MAX_ENTRIES);
210 __type(key, __u32);
211 __type(value, __u32);
212 } m_percpu_hash SEC(".maps");
213
check_percpu_hash(void)214 static inline int check_percpu_hash(void)
215 {
216 struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
217 struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
218
219 VERIFY(check_default(&percpu_hash->map, map));
220
221 return 1;
222 }
223
224 struct {
225 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
226 __uint(max_entries, MAX_ENTRIES);
227 __type(key, __u32);
228 __type(value, __u32);
229 } m_percpu_array SEC(".maps");
230
check_percpu_array(void)231 static inline int check_percpu_array(void)
232 {
233 struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
234 struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
235
236 VERIFY(check_default(&percpu_array->map, map));
237
238 return 1;
239 }
240
241 struct bpf_stack_map {
242 struct bpf_map map;
243 } __attribute__((preserve_access_index));
244
245 struct {
246 __uint(type, BPF_MAP_TYPE_STACK_TRACE);
247 __uint(max_entries, MAX_ENTRIES);
248 __type(key, __u32);
249 __type(value, __u64);
250 } m_stack_trace SEC(".maps");
251
check_stack_trace(void)252 static inline int check_stack_trace(void)
253 {
254 struct bpf_stack_map *stack_trace =
255 (struct bpf_stack_map *)&m_stack_trace;
256 struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
257
258 VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
259 MAX_ENTRIES));
260
261 return 1;
262 }
263
264 struct {
265 __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
266 __uint(max_entries, MAX_ENTRIES);
267 __type(key, __u32);
268 __type(value, __u32);
269 } m_cgroup_array SEC(".maps");
270
check_cgroup_array(void)271 static inline int check_cgroup_array(void)
272 {
273 struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
274 struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
275
276 VERIFY(check_default(&cgroup_array->map, map));
277
278 return 1;
279 }
280
281 struct {
282 __uint(type, BPF_MAP_TYPE_LRU_HASH);
283 __uint(max_entries, MAX_ENTRIES);
284 __type(key, __u32);
285 __type(value, __u32);
286 } m_lru_hash SEC(".maps");
287
check_lru_hash(void)288 static inline int check_lru_hash(void)
289 {
290 struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
291 struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
292
293 VERIFY(check_default(&lru_hash->map, map));
294
295 return 1;
296 }
297
298 struct {
299 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
300 __uint(max_entries, MAX_ENTRIES);
301 __type(key, __u32);
302 __type(value, __u32);
303 } m_lru_percpu_hash SEC(".maps");
304
check_lru_percpu_hash(void)305 static inline int check_lru_percpu_hash(void)
306 {
307 struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
308 struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
309
310 VERIFY(check_default(&lru_percpu_hash->map, map));
311
312 return 1;
313 }
314
315 struct lpm_trie {
316 struct bpf_map map;
317 } __attribute__((preserve_access_index));
318
319 struct lpm_key {
320 struct bpf_lpm_trie_key trie_key;
321 __u32 data;
322 };
323
324 struct {
325 __uint(type, BPF_MAP_TYPE_LPM_TRIE);
326 __uint(map_flags, BPF_F_NO_PREALLOC);
327 __uint(max_entries, MAX_ENTRIES);
328 __type(key, struct lpm_key);
329 __type(value, __u32);
330 } m_lpm_trie SEC(".maps");
331
check_lpm_trie(void)332 static inline int check_lpm_trie(void)
333 {
334 struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
335 struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
336
337 VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
338 MAX_ENTRIES));
339
340 return 1;
341 }
342
343 struct inner_map {
344 __uint(type, BPF_MAP_TYPE_ARRAY);
345 __uint(max_entries, 1);
346 __type(key, __u32);
347 __type(value, __u32);
348 } inner_map SEC(".maps");
349
350 struct {
351 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
352 __uint(max_entries, MAX_ENTRIES);
353 __type(key, __u32);
354 __type(value, __u32);
355 __array(values, struct {
356 __uint(type, BPF_MAP_TYPE_ARRAY);
357 __uint(max_entries, 1);
358 __type(key, __u32);
359 __type(value, __u32);
360 });
361 } m_array_of_maps SEC(".maps") = {
362 .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
363 };
364
check_array_of_maps(void)365 static inline int check_array_of_maps(void)
366 {
367 struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
368 struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
369
370 VERIFY(check_default(&array_of_maps->map, map));
371
372 return 1;
373 }
374
375 struct {
376 __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
377 __uint(max_entries, MAX_ENTRIES);
378 __type(key, __u32);
379 __type(value, __u32);
380 __array(values, struct inner_map);
381 } m_hash_of_maps SEC(".maps") = {
382 .values = {
383 [2] = &inner_map,
384 },
385 };
386
check_hash_of_maps(void)387 static inline int check_hash_of_maps(void)
388 {
389 struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
390 struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
391
392 VERIFY(check_default(&hash_of_maps->map, map));
393
394 return 1;
395 }
396
397 struct bpf_dtab {
398 struct bpf_map map;
399 } __attribute__((preserve_access_index));
400
401 struct {
402 __uint(type, BPF_MAP_TYPE_DEVMAP);
403 __uint(max_entries, MAX_ENTRIES);
404 __type(key, __u32);
405 __type(value, __u32);
406 } m_devmap SEC(".maps");
407
check_devmap(void)408 static inline int check_devmap(void)
409 {
410 struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
411 struct bpf_map *map = (struct bpf_map *)&m_devmap;
412
413 VERIFY(check_default(&devmap->map, map));
414
415 return 1;
416 }
417
418 struct bpf_stab {
419 struct bpf_map map;
420 } __attribute__((preserve_access_index));
421
422 struct {
423 __uint(type, BPF_MAP_TYPE_SOCKMAP);
424 __uint(max_entries, MAX_ENTRIES);
425 __type(key, __u32);
426 __type(value, __u32);
427 } m_sockmap SEC(".maps");
428
check_sockmap(void)429 static inline int check_sockmap(void)
430 {
431 struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
432 struct bpf_map *map = (struct bpf_map *)&m_sockmap;
433
434 VERIFY(check_default(&sockmap->map, map));
435
436 return 1;
437 }
438
439 struct bpf_cpu_map {
440 struct bpf_map map;
441 } __attribute__((preserve_access_index));
442
443 struct {
444 __uint(type, BPF_MAP_TYPE_CPUMAP);
445 __uint(max_entries, MAX_ENTRIES);
446 __type(key, __u32);
447 __type(value, __u32);
448 } m_cpumap SEC(".maps");
449
check_cpumap(void)450 static inline int check_cpumap(void)
451 {
452 struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
453 struct bpf_map *map = (struct bpf_map *)&m_cpumap;
454
455 VERIFY(check_default(&cpumap->map, map));
456
457 return 1;
458 }
459
460 struct xsk_map {
461 struct bpf_map map;
462 } __attribute__((preserve_access_index));
463
464 struct {
465 __uint(type, BPF_MAP_TYPE_XSKMAP);
466 __uint(max_entries, MAX_ENTRIES);
467 __type(key, __u32);
468 __type(value, __u32);
469 } m_xskmap SEC(".maps");
470
check_xskmap(void)471 static inline int check_xskmap(void)
472 {
473 struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
474 struct bpf_map *map = (struct bpf_map *)&m_xskmap;
475
476 VERIFY(check_default(&xskmap->map, map));
477
478 return 1;
479 }
480
481 struct bpf_shtab {
482 struct bpf_map map;
483 } __attribute__((preserve_access_index));
484
485 struct {
486 __uint(type, BPF_MAP_TYPE_SOCKHASH);
487 __uint(max_entries, MAX_ENTRIES);
488 __type(key, __u32);
489 __type(value, __u32);
490 } m_sockhash SEC(".maps");
491
check_sockhash(void)492 static inline int check_sockhash(void)
493 {
494 struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
495 struct bpf_map *map = (struct bpf_map *)&m_sockhash;
496
497 VERIFY(check_default(&sockhash->map, map));
498
499 return 1;
500 }
501
502 struct bpf_cgroup_storage_map {
503 struct bpf_map map;
504 } __attribute__((preserve_access_index));
505
506 struct {
507 __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
508 __type(key, struct bpf_cgroup_storage_key);
509 __type(value, __u32);
510 } m_cgroup_storage SEC(".maps");
511
check_cgroup_storage(void)512 static inline int check_cgroup_storage(void)
513 {
514 struct bpf_cgroup_storage_map *cgroup_storage =
515 (struct bpf_cgroup_storage_map *)&m_cgroup_storage;
516 struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
517
518 VERIFY(check(&cgroup_storage->map, map,
519 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
520
521 return 1;
522 }
523
524 struct reuseport_array {
525 struct bpf_map map;
526 } __attribute__((preserve_access_index));
527
528 struct {
529 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
530 __uint(max_entries, MAX_ENTRIES);
531 __type(key, __u32);
532 __type(value, __u32);
533 } m_reuseport_sockarray SEC(".maps");
534
check_reuseport_sockarray(void)535 static inline int check_reuseport_sockarray(void)
536 {
537 struct reuseport_array *reuseport_sockarray =
538 (struct reuseport_array *)&m_reuseport_sockarray;
539 struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
540
541 VERIFY(check_default(&reuseport_sockarray->map, map));
542
543 return 1;
544 }
545
546 struct {
547 __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
548 __type(key, struct bpf_cgroup_storage_key);
549 __type(value, __u32);
550 } m_percpu_cgroup_storage SEC(".maps");
551
check_percpu_cgroup_storage(void)552 static inline int check_percpu_cgroup_storage(void)
553 {
554 struct bpf_cgroup_storage_map *percpu_cgroup_storage =
555 (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
556 struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
557
558 VERIFY(check(&percpu_cgroup_storage->map, map,
559 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
560
561 return 1;
562 }
563
564 struct bpf_queue_stack {
565 struct bpf_map map;
566 } __attribute__((preserve_access_index));
567
568 struct {
569 __uint(type, BPF_MAP_TYPE_QUEUE);
570 __uint(max_entries, MAX_ENTRIES);
571 __type(value, __u32);
572 } m_queue SEC(".maps");
573
check_queue(void)574 static inline int check_queue(void)
575 {
576 struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
577 struct bpf_map *map = (struct bpf_map *)&m_queue;
578
579 VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
580
581 return 1;
582 }
583
584 struct {
585 __uint(type, BPF_MAP_TYPE_STACK);
586 __uint(max_entries, MAX_ENTRIES);
587 __type(value, __u32);
588 } m_stack SEC(".maps");
589
check_stack(void)590 static inline int check_stack(void)
591 {
592 struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
593 struct bpf_map *map = (struct bpf_map *)&m_stack;
594
595 VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
596
597 return 1;
598 }
599
600 struct bpf_local_storage_map {
601 struct bpf_map map;
602 } __attribute__((preserve_access_index));
603
604 struct {
605 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
606 __uint(map_flags, BPF_F_NO_PREALLOC);
607 __type(key, __u32);
608 __type(value, __u32);
609 } m_sk_storage SEC(".maps");
610
check_sk_storage(void)611 static inline int check_sk_storage(void)
612 {
613 struct bpf_local_storage_map *sk_storage =
614 (struct bpf_local_storage_map *)&m_sk_storage;
615 struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
616
617 VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
618
619 return 1;
620 }
621
622 struct {
623 __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
624 __uint(max_entries, MAX_ENTRIES);
625 __type(key, __u32);
626 __type(value, __u32);
627 } m_devmap_hash SEC(".maps");
628
check_devmap_hash(void)629 static inline int check_devmap_hash(void)
630 {
631 struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
632 struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
633
634 VERIFY(check_default(&devmap_hash->map, map));
635
636 return 1;
637 }
638
639 struct bpf_ringbuf_map {
640 struct bpf_map map;
641 } __attribute__((preserve_access_index));
642
643 struct {
644 __uint(type, BPF_MAP_TYPE_RINGBUF);
645 __uint(max_entries, 1 << 12);
646 } m_ringbuf SEC(".maps");
647
check_ringbuf(void)648 static inline int check_ringbuf(void)
649 {
650 struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
651 struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
652
653 VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
654
655 return 1;
656 }
657
658 SEC("cgroup_skb/egress")
cg_skb(void * ctx)659 int cg_skb(void *ctx)
660 {
661 VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
662 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
663 VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
664 VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
665 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
666 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
667 VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
668 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
669 VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
670 VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
671 VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
672 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
673 VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
674 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
675 VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
676 VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
677 VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
678 VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
679 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
680 VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
681 check_reuseport_sockarray);
682 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
683 check_percpu_cgroup_storage);
684 VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
685 VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
686 VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
687 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
688 VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
689
690 return 1;
691 }
692
693 __u32 _version SEC("version") = 1;
694 char _license[] SEC("license") = "GPL";
695