/kernel/kcsan/ |
D | core.c | 210 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip); 223 check_access(scoped_access->ptr, scoped_access->size, in kcsan_check_scoped_accesses() 231 is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in is_atomic() argument 246 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size)) in is_atomic() 268 should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in should_watch() argument 277 if (is_atomic(ctx, ptr, size, type)) in should_watch() 338 static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size) in read_instrumented_memory() argument 350 case 1: return *(const volatile u8 *)ptr; in read_instrumented_memory() 351 case 2: return *(const volatile u16 *)ptr; in read_instrumented_memory() 352 case 4: return *(const volatile u32 *)ptr; in read_instrumented_memory() [all …]
|
D | report.c | 30 const volatile void *ptr; member 446 get_access_type(other_info->ai.access_type), other_info->ai.ptr, in print_report() 459 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 463 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 565 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr && in set_other_info_task_blocking() 616 …if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info-… in prepare_report_consumer() 617 (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size))) in prepare_report_consumer() 620 if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size, in prepare_report_consumer() 621 (unsigned long)ai->ptr, ai->size)) { in prepare_report_consumer() 637 static struct access_info prepare_access_info(const volatile void *ptr, size_t size, in prepare_access_info() argument [all …]
|
D | kcsan.h | 123 void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, 131 void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, 139 void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
|
/kernel/debug/ |
D | gdbstub.c | 290 int kgdb_hex2long(char **ptr, unsigned long *long_val) in kgdb_hex2long() argument 298 if (**ptr == '-') { in kgdb_hex2long() 300 (*ptr)++; in kgdb_hex2long() 302 while (**ptr) { in kgdb_hex2long() 303 hex_val = hex_to_bin(**ptr); in kgdb_hex2long() 309 (*ptr)++; in kgdb_hex2long() 343 char *ptr = (char *)gdb_regs; in pt_regs_to_gdb_regs() local 346 dbg_get_reg(i, ptr + idx, regs); in pt_regs_to_gdb_regs() 355 char *ptr = (char *)gdb_regs; in gdb_regs_to_pt_regs() local 358 dbg_set_reg(i, ptr + idx, regs); in gdb_regs_to_pt_regs() [all …]
|
/kernel/ |
D | extable.c | 143 void *dereference_function_descriptor(void *ptr) in dereference_function_descriptor() argument 145 func_desc_t *desc = ptr; in dereference_function_descriptor() 149 ptr = p; in dereference_function_descriptor() 150 return ptr; in dereference_function_descriptor() 154 void *dereference_kernel_function_descriptor(void *ptr) in dereference_kernel_function_descriptor() argument 156 if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd) in dereference_kernel_function_descriptor() 157 return ptr; in dereference_kernel_function_descriptor() 159 return dereference_function_descriptor(ptr); in dereference_kernel_function_descriptor() 163 int func_ptr_is_kernel_text(void *ptr) in func_ptr_is_kernel_text() argument 166 addr = (unsigned long) dereference_function_descriptor(ptr); in func_ptr_is_kernel_text()
|
D | iomem.c | 139 void **ptr, *addr; in devm_memremap() local 141 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, in devm_memremap() 143 if (!ptr) in devm_memremap() 148 *ptr = addr; in devm_memremap() 149 devres_add(dev, ptr); in devm_memremap() 151 devres_free(ptr); in devm_memremap()
|
D | kexec_core.c | 600 #define for_each_kimage_entry(image, ptr, entry) \ argument 601 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ 602 ptr = (entry & IND_INDIRECTION) ? \ 603 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) 615 kimage_entry_t *ptr, entry; in kimage_free() local 627 for_each_kimage_entry(image, ptr, entry) { in kimage_free() 662 kimage_entry_t *ptr, entry; in kimage_dst_used() local 665 for_each_kimage_entry(image, ptr, entry) { in kimage_dst_used() 670 return ptr; in kimage_dst_used() 797 char *ptr; in kimage_load_normal_segment() local [all …]
|
D | rseq.c | 155 u64 ptr; in rseq_get_rseq_cs() local 161 if (get_user(ptr, &t->rseq->rseq_cs)) in rseq_get_rseq_cs() 164 if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr))) in rseq_get_rseq_cs() 167 if (!ptr) { in rseq_get_rseq_cs() 171 if (ptr >= TASK_SIZE) in rseq_get_rseq_cs() 173 urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; in rseq_get_rseq_cs()
|
D | stacktrace.c | 377 static inline bool in_irqentry_text(unsigned long ptr) in in_irqentry_text() argument 379 return (ptr >= (unsigned long)&__irqentry_text_start && in in_irqentry_text() 380 ptr < (unsigned long)&__irqentry_text_end) || in in_irqentry_text() 381 (ptr >= (unsigned long)&__softirqentry_text_start && in in_irqentry_text() 382 ptr < (unsigned long)&__softirqentry_text_end); in in_irqentry_text()
|
D | crash_core.c | 538 void crash_update_vmcoreinfo_safecopy(void *ptr) in crash_update_vmcoreinfo_safecopy() argument 540 if (ptr) in crash_update_vmcoreinfo_safecopy() 541 memcpy(ptr, vmcoreinfo_data, vmcoreinfo_size); in crash_update_vmcoreinfo_safecopy() 543 vmcoreinfo_data_safecopy = ptr; in crash_update_vmcoreinfo_safecopy() 830 unsigned char *ptr; in crash_handle_hotplug_event() local 835 ptr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT)); in crash_handle_hotplug_event() 836 if (ptr) { in crash_handle_hotplug_event() 838 if (memcmp(ptr, ELFMAG, SELFMAG) == 0) in crash_handle_hotplug_event() 840 kunmap_local(ptr); in crash_handle_hotplug_event()
|
/kernel/locking/ |
D | lockdep_internals.h | 212 #define __debug_atomic_inc(ptr) \ argument 213 this_cpu_inc(lockdep_stats.ptr); 215 #define debug_atomic_inc(ptr) { \ argument 217 __this_cpu_inc(lockdep_stats.ptr); \ 220 #define debug_atomic_dec(ptr) { \ argument 222 __this_cpu_dec(lockdep_stats.ptr); \ 225 #define debug_atomic_read(ptr) ({ \ argument 231 __total += __cpu_lockdep_stats->ptr; \ 256 # define __debug_atomic_inc(ptr) do { } while (0) argument 257 # define debug_atomic_inc(ptr) do { } while (0) argument [all …]
|
/kernel/bpf/ |
D | helpers.c | 711 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) in BPF_CALL_2() argument 716 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); in BPF_CALL_2() 1484 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) in BPF_CALL_2() argument 1488 return xchg(kptr, (unsigned long)ptr); in BPF_CALL_2() 1513 static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) in __bpf_dynptr_is_rdonly() argument 1515 return ptr->size & DYNPTR_RDONLY_BIT; in __bpf_dynptr_is_rdonly() 1518 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) in bpf_dynptr_set_rdonly() argument 1520 ptr->size |= DYNPTR_RDONLY_BIT; in bpf_dynptr_set_rdonly() 1523 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) in bpf_dynptr_set_type() argument 1525 ptr->size |= type << DYNPTR_TYPE_SHIFT; in bpf_dynptr_set_type() [all …]
|
D | memalloc.c | 758 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) in unit_free() argument 760 struct llist_node *llnode = ptr - LLIST_NODE_SZ; in unit_free() 793 static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr) in unit_free_rcu() argument 795 struct llist_node *llnode = ptr - LLIST_NODE_SZ; in unit_free_rcu() 833 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) in bpf_mem_free() argument 838 if (!ptr) in bpf_mem_free() 841 c = *(void **)(ptr - LLIST_NODE_SZ); in bpf_mem_free() 846 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); in bpf_mem_free() 849 void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr) in bpf_mem_free_rcu() argument 854 if (!ptr) in bpf_mem_free_rcu() [all …]
|
D | ringbuf.c | 599 struct bpf_dynptr_kern *, ptr) in BPF_CALL_4() 606 bpf_dynptr_set_null(ptr); in BPF_CALL_4() 612 bpf_dynptr_set_null(ptr); in BPF_CALL_4() 620 bpf_dynptr_set_null(ptr); in BPF_CALL_4() 624 bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size); in BPF_CALL_4() 638 BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) in BPF_CALL_2() argument 640 if (!ptr->data) in BPF_CALL_2() 643 bpf_ringbuf_commit(ptr->data, flags, false /* discard */); in BPF_CALL_2() 645 bpf_dynptr_set_null(ptr); in BPF_CALL_2() 657 BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) in BPF_CALL_2() argument [all …]
|
D | queue_stack_maps.c | 99 void *ptr; in __queue_map_get() local 114 ptr = &qs->elements[qs->tail * qs->map.value_size]; in __queue_map_get() 115 memcpy(value, ptr, qs->map.value_size); in __queue_map_get() 133 void *ptr; in __stack_map_get() local 153 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get() 154 memcpy(value, ptr, qs->map.value_size); in __stack_map_get()
|
D | map_in_map.c | 130 void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) in bpf_map_fd_put_ptr() argument 132 struct bpf_map *inner_map = ptr; in bpf_map_fd_put_ptr() 143 u32 bpf_map_fd_sys_lookup_elem(void *ptr) in bpf_map_fd_sys_lookup_elem() argument 145 return ((struct bpf_map *)ptr)->id; in bpf_map_fd_sys_lookup_elem()
|
D | arraymap.c | 34 void __percpu *ptr; in bpf_array_alloc_percpu() local 38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, in bpf_array_alloc_percpu() 40 if (!ptr) { in bpf_array_alloc_percpu() 44 array->pptrs[i] = ptr; in bpf_array_alloc_percpu() 824 void **elem, *ptr; in bpf_fd_array_map_lookup_elem() local 832 if (elem && (ptr = READ_ONCE(*elem))) in bpf_fd_array_map_lookup_elem() 833 *value = map->ops->map_fd_sys_lookup_elem(ptr); in bpf_fd_array_map_lookup_elem() 921 static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) in prog_fd_array_put_ptr() argument 924 bpf_prog_put(ptr); in prog_fd_array_put_ptr() 927 static u32 prog_fd_array_sys_lookup_elem(void *ptr) in prog_fd_array_sys_lookup_elem() argument [all …]
|
D | core.c | 76 u8 *ptr = NULL; in bpf_internal_load_pointer_neg_helper() local 79 ptr = skb_network_header(skb) + k - SKF_NET_OFF; in bpf_internal_load_pointer_neg_helper() 83 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; in bpf_internal_load_pointer_neg_helper() 85 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) in bpf_internal_load_pointer_neg_helper() 86 return ptr; in bpf_internal_load_pointer_neg_helper() 848 void *ptr; member 886 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); in alloc_new_pack() 887 if (!pack->ptr) { in alloc_new_pack() 891 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); in alloc_new_pack() 895 set_vm_flush_reset_perms(pack->ptr); in alloc_new_pack() [all …]
|
D | map_in_map.h | 16 void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer); 17 u32 bpf_map_fd_sys_lookup_elem(void *ptr);
|
/kernel/trace/rv/ |
D | rv_reactors.c | 196 char *ptr; in monitor_reactors_write() local 208 ptr = strim(buff); in monitor_reactors_write() 210 len = strlen(ptr); in monitor_reactors_write() 225 if (strcmp(ptr, rdef->reactor->name) != 0) in monitor_reactors_write() 331 struct rv_reactor_def *ptr, *next; in rv_unregister_reactor() local 336 list_for_each_entry_safe(ptr, next, &rv_reactors_list, list) { in rv_unregister_reactor() 337 if (strcmp(reactor->name, ptr->reactor->name) == 0) { in rv_unregister_reactor() 339 if (!ptr->counter) { in rv_unregister_reactor() 340 list_del(&ptr->list); in rv_unregister_reactor() 344 ptr->reactor->name, ptr->counter); in rv_unregister_reactor() [all …]
|
D | rv.c | 519 char *ptr; in enabled_monitors_write() local 531 ptr = strim(buff); in enabled_monitors_write() 533 if (ptr[0] == '!') { in enabled_monitors_write() 535 ptr++; in enabled_monitors_write() 538 len = strlen(ptr); in enabled_monitors_write() 547 if (strcmp(ptr, mdef->monitor->name) != 0) in enabled_monitors_write() 744 struct rv_monitor_def *ptr, *next; in rv_unregister_monitor() local 748 list_for_each_entry_safe(ptr, next, &rv_monitors_list, list) { in rv_unregister_monitor() 749 if (strcmp(monitor->name, ptr->monitor->name) == 0) { in rv_unregister_monitor() 750 rv_disable_monitor(ptr); in rv_unregister_monitor() [all …]
|
/kernel/sched/ |
D | core_sched.c | 25 struct sched_core_cookie *ptr = (void *)cookie; in sched_core_put_cookie() local 27 if (ptr && refcount_dec_and_test(&ptr->refcnt)) { in sched_core_put_cookie() 28 kfree(ptr); in sched_core_put_cookie() 35 struct sched_core_cookie *ptr = (void *)cookie; in sched_core_get_cookie() local 37 if (ptr) in sched_core_get_cookie() 38 refcount_inc(&ptr->refcnt); in sched_core_get_cookie()
|
/kernel/trace/ |
D | trace_events.c | 2690 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) in eval_replace() argument 2696 elen = snprintf(ptr, 0, "%ld", map->eval_value); in eval_replace() 2701 snprintf(ptr, elen + 1, "%ld", map->eval_value); in eval_replace() 2704 rlen = strlen(ptr + len); in eval_replace() 2705 memmove(ptr + elen, ptr + len, rlen); in eval_replace() 2707 ptr[elen + rlen] = 0; in eval_replace() 2709 return ptr + elen; in eval_replace() 2715 char *ptr; in update_event_printk() local 2719 for (ptr = call->print_fmt; *ptr; ptr++) { in update_event_printk() 2720 if (*ptr == '\\') { in update_event_printk() [all …]
|
D | trace_printk.c | 256 const char **ptr = __start___tracepoint_str; in trace_is_tracepoint_string() local 258 for (ptr = __start___tracepoint_str; ptr < __stop___tracepoint_str; ptr++) { in trace_is_tracepoint_string() 259 if (str == *ptr) in trace_is_tracepoint_string()
|
/kernel/time/ |
D | alarmtimer.c | 562 struct k_itimer *ptr = container_of(alarm, struct k_itimer, in alarm_handle_timer() local 568 spin_lock_irqsave(&ptr->it_lock, flags); in alarm_handle_timer() 570 ptr->it_active = 0; in alarm_handle_timer() 571 if (ptr->it_interval) in alarm_handle_timer() 572 si_private = ++ptr->it_requeue_pending; in alarm_handle_timer() 574 if (posix_timer_event(ptr, si_private) && ptr->it_interval) { in alarm_handle_timer() 580 ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true); in alarm_handle_timer() 581 ++ptr->it_requeue_pending; in alarm_handle_timer() 582 ptr->it_active = 1; in alarm_handle_timer() 585 spin_unlock_irqrestore(&ptr->it_lock, flags); in alarm_handle_timer()
|