• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define pr_fmt(fmt) "kcsan: " fmt
4 
5 #include <linux/atomic.h>
6 #include <linux/bug.h>
7 #include <linux/delay.h>
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/minmax.h>
13 #include <linux/moduleparam.h>
14 #include <linux/percpu.h>
15 #include <linux/preempt.h>
16 #include <linux/sched.h>
17 #include <linux/string.h>
18 #include <linux/uaccess.h>
19 
20 #include "atomic.h"
21 #include "encoding.h"
22 #include "kcsan.h"
23 
24 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
25 unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
26 unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
27 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
28 static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
29 
30 #ifdef MODULE_PARAM_PREFIX
31 #undef MODULE_PARAM_PREFIX
32 #endif
33 #define MODULE_PARAM_PREFIX "kcsan."
34 module_param_named(early_enable, kcsan_early_enable, bool, 0);
35 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
36 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
37 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
38 module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
39 
40 bool kcsan_enabled;
41 
42 /* Per-CPU kcsan_ctx for interrupts */
43 static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
44 	.disable_count		= 0,
45 	.atomic_next		= 0,
46 	.atomic_nest_count	= 0,
47 	.in_flat_atomic		= false,
48 	.access_mask		= 0,
49 	.scoped_accesses	= {LIST_POISON1, NULL},
50 };
51 
52 /*
53  * Helper macros to index into adjacent slots, starting from address slot
54  * itself, followed by the right and left slots.
55  *
56  * The purpose is 2-fold:
57  *
58  *	1. if during insertion the address slot is already occupied, check if
59  *	   any adjacent slots are free;
60  *	2. accesses that straddle a slot boundary due to size that exceeds a
61  *	   slot's range may check adjacent slots if any watchpoint matches.
62  *
63  * Note that accesses with very large size may still miss a watchpoint; however,
64  * given this should be rare, this is a reasonable trade-off to make, since this
65  * will avoid:
66  *
67  *	1. excessive contention between watchpoint checks and setup;
68  *	2. larger number of simultaneous watchpoints without sacrificing
69  *	   performance.
70  *
71  * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
72  *
73  *   slot=0:  [ 1,  2,  0]
74  *   slot=9:  [10, 11,  9]
75  *   slot=63: [64, 65, 63]
76  */
77 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
78 
79 /*
80  * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
81  * slot (middle) is fine if we assume that races occur rarely. The set of
82  * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
83  * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
84  */
85 #define SLOT_IDX_FAST(slot, i) (slot + i)
86 
87 /*
88  * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
89  * able to safely update and access a watchpoint without introducing locking
90  * overhead, we encode each watchpoint as a single atomic long. The initial
91  * zero-initialized state matches INVALID_WATCHPOINT.
92  *
93  * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
94  * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
95  */
96 static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
97 
98 /*
99  * Instructions to skip watching counter, used in should_watch(). We use a
100  * per-CPU counter to avoid excessive contention.
101  */
102 static DEFINE_PER_CPU(long, kcsan_skip);
103 
104 /* For kcsan_prandom_u32_max(). */
105 static DEFINE_PER_CPU(u32, kcsan_rand_state);
106 
find_watchpoint(unsigned long addr,size_t size,bool expect_write,long * encoded_watchpoint)107 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
108 						      size_t size,
109 						      bool expect_write,
110 						      long *encoded_watchpoint)
111 {
112 	const int slot = watchpoint_slot(addr);
113 	const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
114 	atomic_long_t *watchpoint;
115 	unsigned long wp_addr_masked;
116 	size_t wp_size;
117 	bool is_write;
118 	int i;
119 
120 	BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
121 
122 	for (i = 0; i < NUM_SLOTS; ++i) {
123 		watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
124 		*encoded_watchpoint = atomic_long_read(watchpoint);
125 		if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
126 				       &wp_size, &is_write))
127 			continue;
128 
129 		if (expect_write && !is_write)
130 			continue;
131 
132 		/* Check if the watchpoint matches the access. */
133 		if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
134 			return watchpoint;
135 	}
136 
137 	return NULL;
138 }
139 
140 static inline atomic_long_t *
insert_watchpoint(unsigned long addr,size_t size,bool is_write)141 insert_watchpoint(unsigned long addr, size_t size, bool is_write)
142 {
143 	const int slot = watchpoint_slot(addr);
144 	const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
145 	atomic_long_t *watchpoint;
146 	int i;
147 
148 	/* Check slot index logic, ensuring we stay within array bounds. */
149 	BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
150 	BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
151 	BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
152 	BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
153 
154 	for (i = 0; i < NUM_SLOTS; ++i) {
155 		long expect_val = INVALID_WATCHPOINT;
156 
157 		/* Try to acquire this slot. */
158 		watchpoint = &watchpoints[SLOT_IDX(slot, i)];
159 		if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
160 			return watchpoint;
161 	}
162 
163 	return NULL;
164 }
165 
166 /*
167  * Return true if watchpoint was successfully consumed, false otherwise.
168  *
169  * This may return false if:
170  *
171  *	1. another thread already consumed the watchpoint;
172  *	2. the thread that set up the watchpoint already removed it;
173  *	3. the watchpoint was removed and then re-used.
174  */
175 static __always_inline bool
try_consume_watchpoint(atomic_long_t * watchpoint,long encoded_watchpoint)176 try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
177 {
178 	return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
179 }
180 
181 /* Return true if watchpoint was not touched, false if already consumed. */
consume_watchpoint(atomic_long_t * watchpoint)182 static inline bool consume_watchpoint(atomic_long_t *watchpoint)
183 {
184 	return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
185 }
186 
187 /* Remove the watchpoint -- its slot may be reused after. */
remove_watchpoint(atomic_long_t * watchpoint)188 static inline void remove_watchpoint(atomic_long_t *watchpoint)
189 {
190 	atomic_long_set(watchpoint, INVALID_WATCHPOINT);
191 }
192 
get_ctx(void)193 static __always_inline struct kcsan_ctx *get_ctx(void)
194 {
195 	/*
196 	 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
197 	 * also result in calls that generate warnings in uaccess regions.
198 	 */
199 	return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
200 }
201 
202 /* Check scoped accesses; never inline because this is a slow-path! */
kcsan_check_scoped_accesses(void)203 static noinline void kcsan_check_scoped_accesses(void)
204 {
205 	struct kcsan_ctx *ctx = get_ctx();
206 	struct list_head *prev_save = ctx->scoped_accesses.prev;
207 	struct kcsan_scoped_access *scoped_access;
208 
209 	ctx->scoped_accesses.prev = NULL;  /* Avoid recursion. */
210 	list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
211 		__kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
212 	ctx->scoped_accesses.prev = prev_save;
213 }
214 
215 /* Rules for generic atomic accesses. Called from fast-path. */
216 static __always_inline bool
is_atomic(const volatile void * ptr,size_t size,int type,struct kcsan_ctx * ctx)217 is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
218 {
219 	if (type & KCSAN_ACCESS_ATOMIC)
220 		return true;
221 
222 	/*
223 	 * Unless explicitly declared atomic, never consider an assertion access
224 	 * as atomic. This allows using them also in atomic regions, such as
225 	 * seqlocks, without implicitly changing their semantics.
226 	 */
227 	if (type & KCSAN_ACCESS_ASSERT)
228 		return false;
229 
230 	if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
231 	    (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
232 	    !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
233 		return true; /* Assume aligned writes up to word size are atomic. */
234 
235 	if (ctx->atomic_next > 0) {
236 		/*
237 		 * Because we do not have separate contexts for nested
238 		 * interrupts, in case atomic_next is set, we simply assume that
239 		 * the outer interrupt set atomic_next. In the worst case, we
240 		 * will conservatively consider operations as atomic. This is a
241 		 * reasonable trade-off to make, since this case should be
242 		 * extremely rare; however, even if extremely rare, it could
243 		 * lead to false positives otherwise.
244 		 */
245 		if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
246 			--ctx->atomic_next; /* in task, or outer interrupt */
247 		return true;
248 	}
249 
250 	return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
251 }
252 
253 static __always_inline bool
should_watch(const volatile void * ptr,size_t size,int type,struct kcsan_ctx * ctx)254 should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
255 {
256 	/*
257 	 * Never set up watchpoints when memory operations are atomic.
258 	 *
259 	 * Need to check this first, before kcsan_skip check below: (1) atomics
260 	 * should not count towards skipped instructions, and (2) to actually
261 	 * decrement kcsan_atomic_next for consecutive instruction stream.
262 	 */
263 	if (is_atomic(ptr, size, type, ctx))
264 		return false;
265 
266 	if (this_cpu_dec_return(kcsan_skip) >= 0)
267 		return false;
268 
269 	/*
270 	 * NOTE: If we get here, kcsan_skip must always be reset in slow path
271 	 * via reset_kcsan_skip() to avoid underflow.
272 	 */
273 
274 	/* this operation should be watched */
275 	return true;
276 }
277 
278 /*
279  * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
280  * congruential generator, using constants from "Numerical Recipes".
281  */
kcsan_prandom_u32_max(u32 ep_ro)282 static u32 kcsan_prandom_u32_max(u32 ep_ro)
283 {
284 	u32 state = this_cpu_read(kcsan_rand_state);
285 
286 	state = 1664525 * state + 1013904223;
287 	this_cpu_write(kcsan_rand_state, state);
288 
289 	return state % ep_ro;
290 }
291 
reset_kcsan_skip(void)292 static inline void reset_kcsan_skip(void)
293 {
294 	long skip_count = kcsan_skip_watch -
295 			  (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
296 				   kcsan_prandom_u32_max(kcsan_skip_watch) :
297 				   0);
298 	this_cpu_write(kcsan_skip, skip_count);
299 }
300 
kcsan_is_enabled(void)301 static __always_inline bool kcsan_is_enabled(void)
302 {
303 	return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
304 }
305 
306 /* Introduce delay depending on context and configuration. */
delay_access(int type)307 static void delay_access(int type)
308 {
309 	unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
310 	/* For certain access types, skew the random delay to be longer. */
311 	unsigned int skew_delay_order =
312 		(type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
313 
314 	delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
315 			       kcsan_prandom_u32_max(delay >> skew_delay_order) :
316 			       0;
317 	udelay(delay);
318 }
319 
kcsan_save_irqtrace(struct task_struct * task)320 void kcsan_save_irqtrace(struct task_struct *task)
321 {
322 #ifdef CONFIG_TRACE_IRQFLAGS
323 	task->kcsan_save_irqtrace = task->irqtrace;
324 #endif
325 }
326 
kcsan_restore_irqtrace(struct task_struct * task)327 void kcsan_restore_irqtrace(struct task_struct *task)
328 {
329 #ifdef CONFIG_TRACE_IRQFLAGS
330 	task->irqtrace = task->kcsan_save_irqtrace;
331 #endif
332 }
333 
334 /*
335  * Pull everything together: check_access() below contains the performance
336  * critical operations; the fast-path (including check_access) functions should
337  * all be inlinable by the instrumentation functions.
338  *
339  * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
340  * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
341  * be filtered from the stacktrace, as well as give them unique names for the
342  * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
343  * since they do not access any user memory, but instrumentation is still
344  * emitted in UACCESS regions.
345  */
346 
kcsan_found_watchpoint(const volatile void * ptr,size_t size,int type,atomic_long_t * watchpoint,long encoded_watchpoint)347 static noinline void kcsan_found_watchpoint(const volatile void *ptr,
348 					    size_t size,
349 					    int type,
350 					    atomic_long_t *watchpoint,
351 					    long encoded_watchpoint)
352 {
353 	unsigned long flags;
354 	bool consumed;
355 
356 	if (!kcsan_is_enabled())
357 		return;
358 
359 	/*
360 	 * The access_mask check relies on value-change comparison. To avoid
361 	 * reporting a race where e.g. the writer set up the watchpoint, but the
362 	 * reader has access_mask!=0, we have to ignore the found watchpoint.
363 	 */
364 	if (get_ctx()->access_mask != 0)
365 		return;
366 
367 	/*
368 	 * Consume the watchpoint as soon as possible, to minimize the chances
369 	 * of !consumed. Consuming the watchpoint must always be guarded by
370 	 * kcsan_is_enabled() check, as otherwise we might erroneously
371 	 * triggering reports when disabled.
372 	 */
373 	consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
374 
375 	/* keep this after try_consume_watchpoint */
376 	flags = user_access_save();
377 
378 	if (consumed) {
379 		kcsan_save_irqtrace(current);
380 		kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
381 			     KCSAN_REPORT_CONSUMED_WATCHPOINT,
382 			     watchpoint - watchpoints);
383 		kcsan_restore_irqtrace(current);
384 	} else {
385 		/*
386 		 * The other thread may not print any diagnostics, as it has
387 		 * already removed the watchpoint, or another thread consumed
388 		 * the watchpoint before this thread.
389 		 */
390 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
391 	}
392 
393 	if ((type & KCSAN_ACCESS_ASSERT) != 0)
394 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
395 	else
396 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
397 
398 	user_access_restore(flags);
399 }
400 
401 static noinline void
kcsan_setup_watchpoint(const volatile void * ptr,size_t size,int type)402 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
403 {
404 	const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
405 	const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
406 	atomic_long_t *watchpoint;
407 	union {
408 		u8 _1;
409 		u16 _2;
410 		u32 _4;
411 		u64 _8;
412 	} expect_value;
413 	unsigned long access_mask;
414 	enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
415 	unsigned long ua_flags = user_access_save();
416 	unsigned long irq_flags = 0;
417 
418 	/*
419 	 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
420 	 * should_watch().
421 	 */
422 	reset_kcsan_skip();
423 
424 	if (!kcsan_is_enabled())
425 		goto out;
426 
427 	/*
428 	 * Special atomic rules: unlikely to be true, so we check them here in
429 	 * the slow-path, and not in the fast-path in is_atomic(). Call after
430 	 * kcsan_is_enabled(), as we may access memory that is not yet
431 	 * initialized during early boot.
432 	 */
433 	if (!is_assert && kcsan_is_atomic_special(ptr))
434 		goto out;
435 
436 	if (!check_encodable((unsigned long)ptr, size)) {
437 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
438 		goto out;
439 	}
440 
441 	/*
442 	 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
443 	 * runtime is entered for every memory access, and potentially useful
444 	 * information is lost if dirtied by KCSAN.
445 	 */
446 	kcsan_save_irqtrace(current);
447 	if (!kcsan_interrupt_watcher)
448 		local_irq_save(irq_flags);
449 
450 	watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
451 	if (watchpoint == NULL) {
452 		/*
453 		 * Out of capacity: the size of 'watchpoints', and the frequency
454 		 * with which should_watch() returns true should be tweaked so
455 		 * that this case happens very rarely.
456 		 */
457 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
458 		goto out_unlock;
459 	}
460 
461 	atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
462 	atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
463 
464 	/*
465 	 * Read the current value, to later check and infer a race if the data
466 	 * was modified via a non-instrumented access, e.g. from a device.
467 	 */
468 	expect_value._8 = 0;
469 	switch (size) {
470 	case 1:
471 		expect_value._1 = READ_ONCE(*(const u8 *)ptr);
472 		break;
473 	case 2:
474 		expect_value._2 = READ_ONCE(*(const u16 *)ptr);
475 		break;
476 	case 4:
477 		expect_value._4 = READ_ONCE(*(const u32 *)ptr);
478 		break;
479 	case 8:
480 		expect_value._8 = READ_ONCE(*(const u64 *)ptr);
481 		break;
482 	default:
483 		break; /* ignore; we do not diff the values */
484 	}
485 
486 	if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
487 		kcsan_disable_current();
488 		pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
489 		       is_write ? "write" : "read", size, ptr,
490 		       watchpoint_slot((unsigned long)ptr),
491 		       encode_watchpoint((unsigned long)ptr, size, is_write));
492 		kcsan_enable_current();
493 	}
494 
495 	/*
496 	 * Delay this thread, to increase probability of observing a racy
497 	 * conflicting access.
498 	 */
499 	delay_access(type);
500 
501 	/*
502 	 * Re-read value, and check if it is as expected; if not, we infer a
503 	 * racy access.
504 	 */
505 	access_mask = get_ctx()->access_mask;
506 	switch (size) {
507 	case 1:
508 		expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
509 		if (access_mask)
510 			expect_value._1 &= (u8)access_mask;
511 		break;
512 	case 2:
513 		expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
514 		if (access_mask)
515 			expect_value._2 &= (u16)access_mask;
516 		break;
517 	case 4:
518 		expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
519 		if (access_mask)
520 			expect_value._4 &= (u32)access_mask;
521 		break;
522 	case 8:
523 		expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
524 		if (access_mask)
525 			expect_value._8 &= (u64)access_mask;
526 		break;
527 	default:
528 		break; /* ignore; we do not diff the values */
529 	}
530 
531 	/* Were we able to observe a value-change? */
532 	if (expect_value._8 != 0)
533 		value_change = KCSAN_VALUE_CHANGE_TRUE;
534 
535 	/* Check if this access raced with another. */
536 	if (!consume_watchpoint(watchpoint)) {
537 		/*
538 		 * Depending on the access type, map a value_change of MAYBE to
539 		 * TRUE (always report) or FALSE (never report).
540 		 */
541 		if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
542 			if (access_mask != 0) {
543 				/*
544 				 * For access with access_mask, we require a
545 				 * value-change, as it is likely that races on
546 				 * ~access_mask bits are expected.
547 				 */
548 				value_change = KCSAN_VALUE_CHANGE_FALSE;
549 			} else if (size > 8 || is_assert) {
550 				/* Always assume a value-change. */
551 				value_change = KCSAN_VALUE_CHANGE_TRUE;
552 			}
553 		}
554 
555 		/*
556 		 * No need to increment 'data_races' counter, as the racing
557 		 * thread already did.
558 		 *
559 		 * Count 'assert_failures' for each failed ASSERT access,
560 		 * therefore both this thread and the racing thread may
561 		 * increment this counter.
562 		 */
563 		if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
564 			atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
565 
566 		kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
567 			     watchpoint - watchpoints);
568 	} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
569 		/* Inferring a race, since the value should not have changed. */
570 
571 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
572 		if (is_assert)
573 			atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
574 
575 		if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
576 			kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
577 				     KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
578 				     watchpoint - watchpoints);
579 	}
580 
581 	/*
582 	 * Remove watchpoint; must be after reporting, since the slot may be
583 	 * reused after this point.
584 	 */
585 	remove_watchpoint(watchpoint);
586 	atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
587 out_unlock:
588 	if (!kcsan_interrupt_watcher)
589 		local_irq_restore(irq_flags);
590 	kcsan_restore_irqtrace(current);
591 out:
592 	user_access_restore(ua_flags);
593 }
594 
check_access(const volatile void * ptr,size_t size,int type)595 static __always_inline void check_access(const volatile void *ptr, size_t size,
596 					 int type)
597 {
598 	const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
599 	atomic_long_t *watchpoint;
600 	long encoded_watchpoint;
601 
602 	/*
603 	 * Do nothing for 0 sized check; this comparison will be optimized out
604 	 * for constant sized instrumentation (__tsan_{read,write}N).
605 	 */
606 	if (unlikely(size == 0))
607 		return;
608 
609 	/*
610 	 * Avoid user_access_save in fast-path: find_watchpoint is safe without
611 	 * user_access_save, as the address that ptr points to is only used to
612 	 * check if a watchpoint exists; ptr is never dereferenced.
613 	 */
614 	watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
615 				     &encoded_watchpoint);
616 	/*
617 	 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
618 	 * slow-path, as long as no state changes that cause a race to be
619 	 * detected and reported have occurred until kcsan_is_enabled() is
620 	 * checked.
621 	 */
622 
623 	if (unlikely(watchpoint != NULL))
624 		kcsan_found_watchpoint(ptr, size, type, watchpoint,
625 				       encoded_watchpoint);
626 	else {
627 		struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
628 
629 		if (unlikely(should_watch(ptr, size, type, ctx)))
630 			kcsan_setup_watchpoint(ptr, size, type);
631 		else if (unlikely(ctx->scoped_accesses.prev))
632 			kcsan_check_scoped_accesses();
633 	}
634 }
635 
636 /* === Public interface ===================================================== */
637 
kcsan_init(void)638 void __init kcsan_init(void)
639 {
640 	int cpu;
641 
642 	BUG_ON(!in_task());
643 
644 	for_each_possible_cpu(cpu)
645 		per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
646 
647 	/*
648 	 * We are in the init task, and no other tasks should be running;
649 	 * WRITE_ONCE without memory barrier is sufficient.
650 	 */
651 	if (kcsan_early_enable) {
652 		pr_info("enabled early\n");
653 		WRITE_ONCE(kcsan_enabled, true);
654 	}
655 }
656 
657 /* === Exported interface =================================================== */
658 
kcsan_disable_current(void)659 void kcsan_disable_current(void)
660 {
661 	++get_ctx()->disable_count;
662 }
663 EXPORT_SYMBOL(kcsan_disable_current);
664 
kcsan_enable_current(void)665 void kcsan_enable_current(void)
666 {
667 	if (get_ctx()->disable_count-- == 0) {
668 		/*
669 		 * Warn if kcsan_enable_current() calls are unbalanced with
670 		 * kcsan_disable_current() calls, which causes disable_count to
671 		 * become negative and should not happen.
672 		 */
673 		kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
674 		kcsan_disable_current(); /* disable to generate warning */
675 		WARN(1, "Unbalanced %s()", __func__);
676 		kcsan_enable_current();
677 	}
678 }
679 EXPORT_SYMBOL(kcsan_enable_current);
680 
kcsan_enable_current_nowarn(void)681 void kcsan_enable_current_nowarn(void)
682 {
683 	if (get_ctx()->disable_count-- == 0)
684 		kcsan_disable_current();
685 }
686 EXPORT_SYMBOL(kcsan_enable_current_nowarn);
687 
kcsan_nestable_atomic_begin(void)688 void kcsan_nestable_atomic_begin(void)
689 {
690 	/*
691 	 * Do *not* check and warn if we are in a flat atomic region: nestable
692 	 * and flat atomic regions are independent from each other.
693 	 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
694 	 * comments.
695 	 */
696 
697 	++get_ctx()->atomic_nest_count;
698 }
699 EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
700 
kcsan_nestable_atomic_end(void)701 void kcsan_nestable_atomic_end(void)
702 {
703 	if (get_ctx()->atomic_nest_count-- == 0) {
704 		/*
705 		 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
706 		 * kcsan_nestable_atomic_begin() calls, which causes
707 		 * atomic_nest_count to become negative and should not happen.
708 		 */
709 		kcsan_nestable_atomic_begin(); /* restore to 0 */
710 		kcsan_disable_current(); /* disable to generate warning */
711 		WARN(1, "Unbalanced %s()", __func__);
712 		kcsan_enable_current();
713 	}
714 }
715 EXPORT_SYMBOL(kcsan_nestable_atomic_end);
716 
kcsan_flat_atomic_begin(void)717 void kcsan_flat_atomic_begin(void)
718 {
719 	get_ctx()->in_flat_atomic = true;
720 }
721 EXPORT_SYMBOL(kcsan_flat_atomic_begin);
722 
kcsan_flat_atomic_end(void)723 void kcsan_flat_atomic_end(void)
724 {
725 	get_ctx()->in_flat_atomic = false;
726 }
727 EXPORT_SYMBOL(kcsan_flat_atomic_end);
728 
kcsan_atomic_next(int n)729 void kcsan_atomic_next(int n)
730 {
731 	get_ctx()->atomic_next = n;
732 }
733 EXPORT_SYMBOL(kcsan_atomic_next);
734 
kcsan_set_access_mask(unsigned long mask)735 void kcsan_set_access_mask(unsigned long mask)
736 {
737 	get_ctx()->access_mask = mask;
738 }
739 EXPORT_SYMBOL(kcsan_set_access_mask);
740 
741 struct kcsan_scoped_access *
kcsan_begin_scoped_access(const volatile void * ptr,size_t size,int type,struct kcsan_scoped_access * sa)742 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
743 			  struct kcsan_scoped_access *sa)
744 {
745 	struct kcsan_ctx *ctx = get_ctx();
746 
747 	__kcsan_check_access(ptr, size, type);
748 
749 	ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
750 
751 	INIT_LIST_HEAD(&sa->list);
752 	sa->ptr = ptr;
753 	sa->size = size;
754 	sa->type = type;
755 
756 	if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
757 		INIT_LIST_HEAD(&ctx->scoped_accesses);
758 	list_add(&sa->list, &ctx->scoped_accesses);
759 
760 	ctx->disable_count--;
761 	return sa;
762 }
763 EXPORT_SYMBOL(kcsan_begin_scoped_access);
764 
kcsan_end_scoped_access(struct kcsan_scoped_access * sa)765 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
766 {
767 	struct kcsan_ctx *ctx = get_ctx();
768 
769 	if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
770 		return;
771 
772 	ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
773 
774 	list_del(&sa->list);
775 	if (list_empty(&ctx->scoped_accesses))
776 		/*
777 		 * Ensure we do not enter kcsan_check_scoped_accesses()
778 		 * slow-path if unnecessary, and avoids requiring list_empty()
779 		 * in the fast-path (to avoid a READ_ONCE() and potential
780 		 * uaccess warning).
781 		 */
782 		ctx->scoped_accesses.prev = NULL;
783 
784 	ctx->disable_count--;
785 
786 	__kcsan_check_access(sa->ptr, sa->size, sa->type);
787 }
788 EXPORT_SYMBOL(kcsan_end_scoped_access);
789 
__kcsan_check_access(const volatile void * ptr,size_t size,int type)790 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
791 {
792 	check_access(ptr, size, type);
793 }
794 EXPORT_SYMBOL(__kcsan_check_access);
795 
796 /*
797  * KCSAN uses the same instrumentation that is emitted by supported compilers
798  * for ThreadSanitizer (TSAN).
799  *
800  * When enabled, the compiler emits instrumentation calls (the functions
801  * prefixed with "__tsan" below) for all loads and stores that it generated;
802  * inline asm is not instrumented.
803  *
804  * Note that, not all supported compiler versions distinguish aligned/unaligned
805  * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
806  * version to the generic version, which can handle both.
807  */
808 
809 #define DEFINE_TSAN_READ_WRITE(size)                                           \
810 	void __tsan_read##size(void *ptr);                                     \
811 	void __tsan_read##size(void *ptr)                                      \
812 	{                                                                      \
813 		check_access(ptr, size, 0);                                    \
814 	}                                                                      \
815 	EXPORT_SYMBOL(__tsan_read##size);                                      \
816 	void __tsan_unaligned_read##size(void *ptr)                            \
817 		__alias(__tsan_read##size);                                    \
818 	EXPORT_SYMBOL(__tsan_unaligned_read##size);                            \
819 	void __tsan_write##size(void *ptr);                                    \
820 	void __tsan_write##size(void *ptr)                                     \
821 	{                                                                      \
822 		check_access(ptr, size, KCSAN_ACCESS_WRITE);                   \
823 	}                                                                      \
824 	EXPORT_SYMBOL(__tsan_write##size);                                     \
825 	void __tsan_unaligned_write##size(void *ptr)                           \
826 		__alias(__tsan_write##size);                                   \
827 	EXPORT_SYMBOL(__tsan_unaligned_write##size);                           \
828 	void __tsan_read_write##size(void *ptr);                               \
829 	void __tsan_read_write##size(void *ptr)                                \
830 	{                                                                      \
831 		check_access(ptr, size,                                        \
832 			     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE);      \
833 	}                                                                      \
834 	EXPORT_SYMBOL(__tsan_read_write##size);                                \
835 	void __tsan_unaligned_read_write##size(void *ptr)                      \
836 		__alias(__tsan_read_write##size);                              \
837 	EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
838 
839 DEFINE_TSAN_READ_WRITE(1);
840 DEFINE_TSAN_READ_WRITE(2);
841 DEFINE_TSAN_READ_WRITE(4);
842 DEFINE_TSAN_READ_WRITE(8);
843 DEFINE_TSAN_READ_WRITE(16);
844 
845 void __tsan_read_range(void *ptr, size_t size);
__tsan_read_range(void * ptr,size_t size)846 void __tsan_read_range(void *ptr, size_t size)
847 {
848 	check_access(ptr, size, 0);
849 }
850 EXPORT_SYMBOL(__tsan_read_range);
851 
852 void __tsan_write_range(void *ptr, size_t size);
__tsan_write_range(void * ptr,size_t size)853 void __tsan_write_range(void *ptr, size_t size)
854 {
855 	check_access(ptr, size, KCSAN_ACCESS_WRITE);
856 }
857 EXPORT_SYMBOL(__tsan_write_range);
858 
859 /*
860  * Use of explicit volatile is generally disallowed [1], however, volatile is
861  * still used in various concurrent context, whether in low-level
862  * synchronization primitives or for legacy reasons.
863  * [1] https://lwn.net/Articles/233479/
864  *
865  * We only consider volatile accesses atomic if they are aligned and would pass
866  * the size-check of compiletime_assert_rwonce_type().
867  */
868 #define DEFINE_TSAN_VOLATILE_READ_WRITE(size)                                  \
869 	void __tsan_volatile_read##size(void *ptr);                            \
870 	void __tsan_volatile_read##size(void *ptr)                             \
871 	{                                                                      \
872 		const bool is_atomic = size <= sizeof(long long) &&            \
873 				       IS_ALIGNED((unsigned long)ptr, size);   \
874 		if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic)      \
875 			return;                                                \
876 		check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0);  \
877 	}                                                                      \
878 	EXPORT_SYMBOL(__tsan_volatile_read##size);                             \
879 	void __tsan_unaligned_volatile_read##size(void *ptr)                   \
880 		__alias(__tsan_volatile_read##size);                           \
881 	EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size);                   \
882 	void __tsan_volatile_write##size(void *ptr);                           \
883 	void __tsan_volatile_write##size(void *ptr)                            \
884 	{                                                                      \
885 		const bool is_atomic = size <= sizeof(long long) &&            \
886 				       IS_ALIGNED((unsigned long)ptr, size);   \
887 		if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic)      \
888 			return;                                                \
889 		check_access(ptr, size,                                        \
890 			     KCSAN_ACCESS_WRITE |                              \
891 				     (is_atomic ? KCSAN_ACCESS_ATOMIC : 0));   \
892 	}                                                                      \
893 	EXPORT_SYMBOL(__tsan_volatile_write##size);                            \
894 	void __tsan_unaligned_volatile_write##size(void *ptr)                  \
895 		__alias(__tsan_volatile_write##size);                          \
896 	EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
897 
898 DEFINE_TSAN_VOLATILE_READ_WRITE(1);
899 DEFINE_TSAN_VOLATILE_READ_WRITE(2);
900 DEFINE_TSAN_VOLATILE_READ_WRITE(4);
901 DEFINE_TSAN_VOLATILE_READ_WRITE(8);
902 DEFINE_TSAN_VOLATILE_READ_WRITE(16);
903 
904 /*
905  * The below are not required by KCSAN, but can still be emitted by the
906  * compiler.
907  */
908 void __tsan_func_entry(void *call_pc);
__tsan_func_entry(void * call_pc)909 void __tsan_func_entry(void *call_pc)
910 {
911 }
912 EXPORT_SYMBOL(__tsan_func_entry);
913 void __tsan_func_exit(void);
__tsan_func_exit(void)914 void __tsan_func_exit(void)
915 {
916 }
917 EXPORT_SYMBOL(__tsan_func_exit);
918 void __tsan_init(void);
__tsan_init(void)919 void __tsan_init(void)
920 {
921 }
922 EXPORT_SYMBOL(__tsan_init);
923 
924 /*
925  * Instrumentation for atomic builtins (__atomic_*, __sync_*).
926  *
927  * Normal kernel code _should not_ be using them directly, but some
928  * architectures may implement some or all atomics using the compilers'
929  * builtins.
930  *
931  * Note: If an architecture decides to fully implement atomics using the
932  * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
933  * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
934  * atomic-instrumented) is no longer necessary.
935  *
936  * TSAN instrumentation replaces atomic accesses with calls to any of the below
937  * functions, whose job is to also execute the operation itself.
938  */
939 
940 #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits)                                                        \
941 	u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder);                      \
942 	u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder)                       \
943 	{                                                                                          \
944 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
945 			check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC);              \
946 		}                                                                                  \
947 		return __atomic_load_n(ptr, memorder);                                             \
948 	}                                                                                          \
949 	EXPORT_SYMBOL(__tsan_atomic##bits##_load);                                                 \
950 	void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder);                   \
951 	void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder)                    \
952 	{                                                                                          \
953 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
954 			check_access(ptr, bits / BITS_PER_BYTE,                                    \
955 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC);                    \
956 		}                                                                                  \
957 		__atomic_store_n(ptr, v, memorder);                                                \
958 	}                                                                                          \
959 	EXPORT_SYMBOL(__tsan_atomic##bits##_store)
960 
961 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix)                                                   \
962 	u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder);                 \
963 	u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder)                  \
964 	{                                                                                          \
965 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
966 			check_access(ptr, bits / BITS_PER_BYTE,                                    \
967 				     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE |                  \
968 					     KCSAN_ACCESS_ATOMIC);                                 \
969 		}                                                                                  \
970 		return __atomic_##op##suffix(ptr, v, memorder);                                    \
971 	}                                                                                          \
972 	EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
973 
974 /*
975  * Note: CAS operations are always classified as write, even in case they
976  * fail. We cannot perform check_access() after a write, as it might lead to
977  * false positives, in cases such as:
978  *
979  *	T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
980  *
981  *	T1: if (__atomic_load_n(&p->flag, ...)) {
982  *		modify *p;
983  *		p->flag = 0;
984  *	    }
985  *
986  * The only downside is that, if there are 3 threads, with one CAS that
987  * succeeds, another CAS that fails, and an unmarked racing operation, we may
988  * point at the wrong CAS as the source of the race. However, if we assume that
989  * all CAS can succeed in some other execution, the data race is still valid.
990  */
991 #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak)                                           \
992 	int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp,          \
993 							      u##bits val, int mo, int fail_mo);   \
994 	int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp,          \
995 							      u##bits val, int mo, int fail_mo)    \
996 	{                                                                                          \
997 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
998 			check_access(ptr, bits / BITS_PER_BYTE,                                    \
999 				     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE |                  \
1000 					     KCSAN_ACCESS_ATOMIC);                                 \
1001 		}                                                                                  \
1002 		return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo);              \
1003 	}                                                                                          \
1004 	EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
1005 
1006 #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)                                                       \
1007 	u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1008 							   int mo, int fail_mo);                   \
1009 	u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1010 							   int mo, int fail_mo)                    \
1011 	{                                                                                          \
1012 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
1013 			check_access(ptr, bits / BITS_PER_BYTE,                                    \
1014 				     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE |                  \
1015 					     KCSAN_ACCESS_ATOMIC);                                 \
1016 		}                                                                                  \
1017 		__atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo);                       \
1018 		return exp;                                                                        \
1019 	}                                                                                          \
1020 	EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
1021 
1022 #define DEFINE_TSAN_ATOMIC_OPS(bits)                                                               \
1023 	DEFINE_TSAN_ATOMIC_LOAD_STORE(bits);                                                       \
1024 	DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n);                                                \
1025 	DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, );                                                 \
1026 	DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, );                                                 \
1027 	DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, );                                                 \
1028 	DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, );                                                  \
1029 	DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, );                                                 \
1030 	DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, );                                                \
1031 	DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0);                                               \
1032 	DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1);                                                 \
1033 	DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1034 
1035 DEFINE_TSAN_ATOMIC_OPS(8);
1036 DEFINE_TSAN_ATOMIC_OPS(16);
1037 DEFINE_TSAN_ATOMIC_OPS(32);
1038 #ifdef CONFIG_64BIT
1039 DEFINE_TSAN_ATOMIC_OPS(64);
1040 #endif
1041 
1042 void __tsan_atomic_thread_fence(int memorder);
__tsan_atomic_thread_fence(int memorder)1043 void __tsan_atomic_thread_fence(int memorder)
1044 {
1045 	__atomic_thread_fence(memorder);
1046 }
1047 EXPORT_SYMBOL(__tsan_atomic_thread_fence);
1048 
1049 void __tsan_atomic_signal_fence(int memorder);
__tsan_atomic_signal_fence(int memorder)1050 void __tsan_atomic_signal_fence(int memorder) { }
1051 EXPORT_SYMBOL(__tsan_atomic_signal_fence);
1052 
1053 #ifdef __HAVE_ARCH_MEMSET
1054 void *__tsan_memset(void *s, int c, size_t count);
__tsan_memset(void * s,int c,size_t count)1055 noinline void *__tsan_memset(void *s, int c, size_t count)
1056 {
1057 	/*
1058 	 * Instead of not setting up watchpoints where accessed size is greater
1059 	 * than MAX_ENCODABLE_SIZE, truncate checked size to MAX_ENCODABLE_SIZE.
1060 	 */
1061 	size_t check_len = min_t(size_t, count, MAX_ENCODABLE_SIZE);
1062 
1063 	check_access(s, check_len, KCSAN_ACCESS_WRITE);
1064 	return memset(s, c, count);
1065 }
1066 #else
1067 void *__tsan_memset(void *s, int c, size_t count) __alias(memset);
1068 #endif
1069 EXPORT_SYMBOL(__tsan_memset);
1070 
1071 #ifdef __HAVE_ARCH_MEMMOVE
1072 void *__tsan_memmove(void *dst, const void *src, size_t len);
__tsan_memmove(void * dst,const void * src,size_t len)1073 noinline void *__tsan_memmove(void *dst, const void *src, size_t len)
1074 {
1075 	size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
1076 
1077 	check_access(dst, check_len, KCSAN_ACCESS_WRITE);
1078 	check_access(src, check_len, 0);
1079 	return memmove(dst, src, len);
1080 }
1081 #else
1082 void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove);
1083 #endif
1084 EXPORT_SYMBOL(__tsan_memmove);
1085 
1086 #ifdef __HAVE_ARCH_MEMCPY
1087 void *__tsan_memcpy(void *dst, const void *src, size_t len);
__tsan_memcpy(void * dst,const void * src,size_t len)1088 noinline void *__tsan_memcpy(void *dst, const void *src, size_t len)
1089 {
1090 	size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
1091 
1092 	check_access(dst, check_len, KCSAN_ACCESS_WRITE);
1093 	check_access(src, check_len, 0);
1094 	return memcpy(dst, src, len);
1095 }
1096 #else
1097 void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy);
1098 #endif
1099 EXPORT_SYMBOL(__tsan_memcpy);
1100