/kernel/kcsan/ |
D | core.c | 211 __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type); in kcsan_check_scoped_accesses() 217 is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) in is_atomic() argument 232 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size)) in is_atomic() 254 should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) in should_watch() argument 263 if (is_atomic(ptr, size, type, ctx)) in should_watch() 347 static noinline void kcsan_found_watchpoint(const volatile void *ptr, in kcsan_found_watchpoint() argument 380 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE, in kcsan_found_watchpoint() 402 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) in kcsan_setup_watchpoint() argument 433 if (!is_assert && kcsan_is_atomic_special(ptr)) in kcsan_setup_watchpoint() 436 if (!check_encodable((unsigned long)ptr, size)) { in kcsan_setup_watchpoint() [all …]
|
D | report.c | 24 const volatile void *ptr; member 389 get_access_type(other_info->ai.access_type), other_info->ai.ptr, in print_report() 403 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 409 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 501 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr && in set_other_info_task_blocking() 552 …if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info-… in prepare_report_consumer() 553 (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size))) in prepare_report_consumer() 556 if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size, in prepare_report_consumer() 557 (unsigned long)ai->ptr, ai->size)) { in prepare_report_consumer() 596 void kcsan_report(const volatile void *ptr, size_t size, int access_type, in kcsan_report() argument [all …]
|
/kernel/ |
D | cfi.c | 28 static inline void handle_cfi_failure(void *ptr) in handle_cfi_failure() argument 31 WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr); in handle_cfi_failure() 33 panic("CFI failure (target: %pS)\n", ptr); in handle_cfi_failure() 71 static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr) in ptr_to_shadow() argument 74 unsigned long page = ptr >> PAGE_SHIFT; in ptr_to_shadow() 151 unsigned long ptr; in add_module_to_shadow() local 163 for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) { in add_module_to_shadow() 164 int index = ptr_to_shadow(s, ptr); in add_module_to_shadow() 177 unsigned long ptr; in remove_module_from_shadow() local 179 for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) { in remove_module_from_shadow() [all …]
|
D | iomem.c | 142 void **ptr, *addr; in devm_memremap() local 144 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, in devm_memremap() 146 if (!ptr) in devm_memremap() 151 *ptr = addr; in devm_memremap() 152 devres_add(dev, ptr); in devm_memremap() 154 devres_free(ptr); in devm_memremap()
|
D | kexec_core.c | 606 #define for_each_kimage_entry(image, ptr, entry) \ argument 607 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ 608 ptr = (entry & IND_INDIRECTION) ? \ 609 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) 621 kimage_entry_t *ptr, entry; in kimage_free() local 633 for_each_kimage_entry(image, ptr, entry) { in kimage_free() 668 kimage_entry_t *ptr, entry; in kimage_dst_used() local 671 for_each_kimage_entry(image, ptr, entry) { in kimage_dst_used() 676 return ptr; in kimage_dst_used() 805 char *ptr; in kimage_load_normal_segment() local [all …]
|
D | rseq.c | 118 u64 ptr; in rseq_get_rseq_cs() local 124 if (get_user(ptr, &t->rseq->rseq_cs)) in rseq_get_rseq_cs() 127 if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr))) in rseq_get_rseq_cs() 130 if (!ptr) { in rseq_get_rseq_cs() 134 if (ptr >= TASK_SIZE) in rseq_get_rseq_cs() 136 urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; in rseq_get_rseq_cs()
|
D | compat.c | 279 void __user *ptr; in compat_alloc_user_space() local 285 ptr = arch_compat_alloc_user_space(len); in compat_alloc_user_space() 287 if (unlikely(!access_ok(ptr, len))) in compat_alloc_user_space() 290 return ptr; in compat_alloc_user_space()
|
D | extable.c | 168 int func_ptr_is_kernel_text(void *ptr) in func_ptr_is_kernel_text() argument 171 addr = (unsigned long) dereference_function_descriptor(ptr); in func_ptr_is_kernel_text()
|
D | resource.c | 1414 static void devm_resource_release(struct device *dev, void *ptr) in devm_resource_release() argument 1416 struct resource **r = ptr; in devm_resource_release() 1442 struct resource *conflict, **ptr; in devm_request_resource() local 1444 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); in devm_request_resource() 1445 if (!ptr) in devm_request_resource() 1448 *ptr = new; in devm_request_resource() 1454 devres_free(ptr); in devm_request_resource() 1458 devres_add(dev, ptr); in devm_request_resource() 1465 struct resource **ptr = res; in devm_resource_match() local 1467 return *ptr == data; in devm_resource_match()
|
D | crash_core.c | 330 void crash_update_vmcoreinfo_safecopy(void *ptr) in crash_update_vmcoreinfo_safecopy() argument 332 if (ptr) in crash_update_vmcoreinfo_safecopy() 333 memcpy(ptr, vmcoreinfo_data, vmcoreinfo_size); in crash_update_vmcoreinfo_safecopy() 335 vmcoreinfo_data_safecopy = ptr; in crash_update_vmcoreinfo_safecopy()
|
/kernel/debug/ |
D | gdbstub.c | 293 int kgdb_hex2long(char **ptr, unsigned long *long_val) in kgdb_hex2long() argument 301 if (**ptr == '-') { in kgdb_hex2long() 303 (*ptr)++; in kgdb_hex2long() 305 while (**ptr) { in kgdb_hex2long() 306 hex_val = hex_to_bin(**ptr); in kgdb_hex2long() 312 (*ptr)++; in kgdb_hex2long() 346 char *ptr = (char *)gdb_regs; in pt_regs_to_gdb_regs() local 349 dbg_get_reg(i, ptr + idx, regs); in pt_regs_to_gdb_regs() 358 char *ptr = (char *)gdb_regs; in gdb_regs_to_pt_regs() local 361 dbg_set_reg(i, ptr + idx, regs); in gdb_regs_to_pt_regs() [all …]
|
/kernel/locking/ |
D | lockdep_internals.h | 212 #define __debug_atomic_inc(ptr) \ argument 213 this_cpu_inc(lockdep_stats.ptr); 215 #define debug_atomic_inc(ptr) { \ argument 217 __this_cpu_inc(lockdep_stats.ptr); \ 220 #define debug_atomic_dec(ptr) { \ argument 222 __this_cpu_dec(lockdep_stats.ptr); \ 225 #define debug_atomic_read(ptr) ({ \ argument 231 __total += __cpu_lockdep_stats->ptr; \ 256 # define __debug_atomic_inc(ptr) do { } while (0) argument 257 # define debug_atomic_inc(ptr) do { } while (0) argument [all …]
|
D | qspinlock_stat.h | 120 static inline void __pv_wait(u8 *ptr, u8 val) in __pv_wait() argument 125 pv_wait(ptr, val); in __pv_wait()
|
D | lock_events.c | 101 unsigned long *ptr = per_cpu_ptr(lockevents, cpu); in lockevent_write() local 104 WRITE_ONCE(ptr[i], 0); in lockevent_write()
|
/kernel/time/ |
D | alarmtimer.c | 567 struct k_itimer *ptr = container_of(alarm, struct k_itimer, in alarm_handle_timer() local 573 spin_lock_irqsave(&ptr->it_lock, flags); in alarm_handle_timer() 575 ptr->it_active = 0; in alarm_handle_timer() 576 if (ptr->it_interval) in alarm_handle_timer() 577 si_private = ++ptr->it_requeue_pending; in alarm_handle_timer() 579 if (posix_timer_event(ptr, si_private) && ptr->it_interval) { in alarm_handle_timer() 585 ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true); in alarm_handle_timer() 586 ++ptr->it_requeue_pending; in alarm_handle_timer() 587 ptr->it_active = 1; in alarm_handle_timer() 590 spin_unlock_irqrestore(&ptr->it_lock, flags); in alarm_handle_timer()
|
/kernel/bpf/ |
D | map_in_map.c | 103 void bpf_map_fd_put_ptr(void *ptr) in bpf_map_fd_put_ptr() argument 108 bpf_map_put(ptr); in bpf_map_fd_put_ptr() 111 u32 bpf_map_fd_sys_lookup_elem(void *ptr) in bpf_map_fd_sys_lookup_elem() argument 113 return ((struct bpf_map *)ptr)->id; in bpf_map_fd_sys_lookup_elem()
|
D | arraymap.c | 33 void __percpu *ptr; in bpf_array_alloc_percpu() local 37 ptr = __alloc_percpu_gfp(array->elem_size, 8, in bpf_array_alloc_percpu() 39 if (!ptr) { in bpf_array_alloc_percpu() 43 array->pptrs[i] = ptr; in bpf_array_alloc_percpu() 721 void **elem, *ptr; in bpf_fd_array_map_lookup_elem() local 729 if (elem && (ptr = READ_ONCE(*elem))) in bpf_fd_array_map_lookup_elem() 730 *value = map->ops->map_fd_sys_lookup_elem(ptr); in bpf_fd_array_map_lookup_elem() 814 static void prog_fd_array_put_ptr(void *ptr) in prog_fd_array_put_ptr() argument 816 bpf_prog_put(ptr); in prog_fd_array_put_ptr() 819 static u32 prog_fd_array_sys_lookup_elem(void *ptr) in prog_fd_array_sys_lookup_elem() argument [all …]
|
D | queue_stack_maps.c | 112 void *ptr; in __queue_map_get() local 127 ptr = &qs->elements[qs->tail * qs->map.value_size]; in __queue_map_get() 128 memcpy(value, ptr, qs->map.value_size); in __queue_map_get() 146 void *ptr; in __stack_map_get() local 166 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get() 167 memcpy(value, ptr, qs->map.value_size); in __stack_map_get()
|
D | map_in_map.h | 16 void bpf_map_fd_put_ptr(void *ptr); 17 u32 bpf_map_fd_sys_lookup_elem(void *ptr);
|
D | helpers.c | 386 void *ptr; in BPF_CALL_2() local 398 ptr = &READ_ONCE(storage->buf)->data[0]; in BPF_CALL_2() 400 ptr = this_cpu_ptr(storage->percpu_buf); in BPF_CALL_2() 402 return (unsigned long)ptr; in BPF_CALL_2() 634 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) in BPF_CALL_2() argument 639 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); in BPF_CALL_2()
|
D | hashtab.c | 785 void *ptr; in htab_put_fd_value() local 788 ptr = fd_htab_map_get_ptr(map, l); in htab_put_fd_value() 789 map->ops->map_fd_put_ptr(ptr); in htab_put_fd_value() 2024 void *ptr = fd_htab_map_get_ptr(map, l); in fd_htab_map_free() local 2026 map->ops->map_fd_put_ptr(ptr); in fd_htab_map_free() 2036 void **ptr; in bpf_fd_htab_map_lookup_elem() local 2043 ptr = htab_map_lookup_elem(map, key); in bpf_fd_htab_map_lookup_elem() 2044 if (ptr) in bpf_fd_htab_map_lookup_elem() 2045 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); in bpf_fd_htab_map_lookup_elem() 2057 void *ptr; in bpf_fd_htab_map_update_elem() local [all …]
|
/kernel/trace/ |
D | trace_events.c | 2319 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) in eval_replace() argument 2325 elen = snprintf(ptr, 0, "%ld", map->eval_value); in eval_replace() 2330 snprintf(ptr, elen + 1, "%ld", map->eval_value); in eval_replace() 2333 rlen = strlen(ptr + len); in eval_replace() 2334 memmove(ptr + elen, ptr + len, rlen); in eval_replace() 2336 ptr[elen + rlen] = 0; in eval_replace() 2338 return ptr + elen; in eval_replace() 2344 char *ptr; in update_event_printk() local 2348 for (ptr = call->print_fmt; *ptr; ptr++) { in update_event_printk() 2349 if (*ptr == '\\') { in update_event_printk() [all …]
|
D | trace_stack.c | 328 unsigned long *ptr = filp->private_data; in stack_max_size_read() local 332 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); in stack_max_size_read() 342 long *ptr = filp->private_data; in stack_max_size_write() local 360 *ptr = val; in stack_max_size_write()
|
D | trace.c | 2500 int *ptr = trace_find_tgid_ptr(pid); in trace_find_tgid() local 2502 return ptr ? *ptr : 0; in trace_find_tgid() 2507 int *ptr; in trace_save_tgid() local 2513 ptr = trace_find_tgid_ptr(tsk->pid); in trace_save_tgid() 2514 if (!ptr) in trace_save_tgid() 2517 *ptr = tsk->tgid; in trace_save_tgid() 5446 unsigned int *ptr = v; in saved_cmdlines_next() local 5449 ptr++; in saved_cmdlines_next() 5453 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; in saved_cmdlines_next() 5454 ptr++) { in saved_cmdlines_next() [all …]
|
/kernel/sched/ |
D | clock.c | 302 u64 *ptr, old_val, val; in sched_clock_remote() local 342 ptr = &scd->clock; in sched_clock_remote() 349 ptr = &my_scd->clock; in sched_clock_remote() 354 if (cmpxchg64(ptr, old_val, val) != old_val) in sched_clock_remote()
|