1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6 */
7
8 #define pr_fmt(fmt) "kasan test: %s " fmt, __func__
9
10 #include <linux/mman.h>
11 #include <linux/module.h>
12 #include <linux/printk.h>
13 #include <linux/slab.h>
14 #include <linux/uaccess.h>
15
16 #include "../mm/kasan/kasan.h"
17
copy_user_test(void)18 static noinline void __init copy_user_test(void)
19 {
20 char *kmem;
21 char __user *usermem;
22 size_t size = 128 - KASAN_GRANULE_SIZE;
23 int __maybe_unused unused;
24
25 kmem = kmalloc(size, GFP_KERNEL);
26 if (!kmem)
27 return;
28
29 usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
30 PROT_READ | PROT_WRITE | PROT_EXEC,
31 MAP_ANONYMOUS | MAP_PRIVATE, 0);
32 if (IS_ERR(usermem)) {
33 pr_err("Failed to allocate user memory\n");
34 kfree(kmem);
35 return;
36 }
37
38 OPTIMIZER_HIDE_VAR(size);
39
40 pr_info("out-of-bounds in copy_from_user()\n");
41 unused = copy_from_user(kmem, usermem, size + 1);
42
43 pr_info("out-of-bounds in copy_to_user()\n");
44 unused = copy_to_user(usermem, kmem, size + 1);
45
46 pr_info("out-of-bounds in __copy_from_user()\n");
47 unused = __copy_from_user(kmem, usermem, size + 1);
48
49 pr_info("out-of-bounds in __copy_to_user()\n");
50 unused = __copy_to_user(usermem, kmem, size + 1);
51
52 pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
53 unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
54
55 pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
56 unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
57
58 pr_info("out-of-bounds in strncpy_from_user()\n");
59 unused = strncpy_from_user(kmem, usermem, size + 1);
60
61 vm_munmap((unsigned long)usermem, PAGE_SIZE);
62 kfree(kmem);
63 }
64
65 static struct kasan_rcu_info {
66 int i;
67 struct rcu_head rcu;
68 } *global_rcu_ptr;
69
kasan_rcu_reclaim(struct rcu_head * rp)70 static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
71 {
72 struct kasan_rcu_info *fp = container_of(rp,
73 struct kasan_rcu_info, rcu);
74
75 kfree(fp);
76 ((volatile struct kasan_rcu_info *)fp)->i;
77 }
78
kasan_rcu_uaf(void)79 static noinline void __init kasan_rcu_uaf(void)
80 {
81 struct kasan_rcu_info *ptr;
82
83 pr_info("use-after-free in kasan_rcu_reclaim\n");
84 ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
85 if (!ptr) {
86 pr_err("Allocation failed\n");
87 return;
88 }
89
90 global_rcu_ptr = rcu_dereference_protected(ptr, NULL);
91 call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
92 }
93
kasan_workqueue_work(struct work_struct * work)94 static noinline void __init kasan_workqueue_work(struct work_struct *work)
95 {
96 kfree(work);
97 }
98
kasan_workqueue_uaf(void)99 static noinline void __init kasan_workqueue_uaf(void)
100 {
101 struct workqueue_struct *workqueue;
102 struct work_struct *work;
103
104 workqueue = create_workqueue("kasan_wq_test");
105 if (!workqueue) {
106 pr_err("Allocation failed\n");
107 return;
108 }
109 work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
110 if (!work) {
111 pr_err("Allocation failed\n");
112 return;
113 }
114
115 INIT_WORK(work, kasan_workqueue_work);
116 queue_work(workqueue, work);
117 destroy_workqueue(workqueue);
118
119 pr_info("use-after-free on workqueue\n");
120 ((volatile struct work_struct *)work)->data;
121 }
122
test_kasan_module_init(void)123 static int __init test_kasan_module_init(void)
124 {
125 /*
126 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
127 * report the first detected bug and panic the kernel if panic_on_warn
128 * is enabled.
129 */
130 bool multishot = kasan_save_enable_multi_shot();
131
132 copy_user_test();
133 kasan_rcu_uaf();
134 kasan_workqueue_uaf();
135
136 kasan_restore_multi_shot(multishot);
137 return -EAGAIN;
138 }
139
140 module_init(test_kasan_module_init);
141 MODULE_LICENSE("GPL");
142