/kernel/kcsan/ |
D | core.c | 210 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip); 223 check_access(scoped_access->ptr, scoped_access->size, in kcsan_check_scoped_accesses() 231 is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in is_atomic() argument 246 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size)) in is_atomic() 268 should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in should_watch() argument 277 if (is_atomic(ctx, ptr, size, type)) in should_watch() 338 static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size) in read_instrumented_memory() argument 350 case 1: return *(const volatile u8 *)ptr; in read_instrumented_memory() 351 case 2: return *(const volatile u16 *)ptr; in read_instrumented_memory() 352 case 4: return *(const volatile u32 *)ptr; in read_instrumented_memory() [all …]
|
D | report.c | 30 const volatile void *ptr; member 446 get_access_type(other_info->ai.access_type), other_info->ai.ptr, in print_report() 459 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 463 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 565 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr && in set_other_info_task_blocking() 616 …if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info-… in prepare_report_consumer() 617 (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size))) in prepare_report_consumer() 620 if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size, in prepare_report_consumer() 621 (unsigned long)ai->ptr, ai->size)) { in prepare_report_consumer() 637 static struct access_info prepare_access_info(const volatile void *ptr, size_t size, in prepare_access_info() argument [all …]
|
D | kcsan.h | 123 void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, 131 void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, 139 void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
|
/kernel/debug/ |
D | gdbstub.c | 290 int kgdb_hex2long(char **ptr, unsigned long *long_val) in kgdb_hex2long() argument 298 if (**ptr == '-') { in kgdb_hex2long() 300 (*ptr)++; in kgdb_hex2long() 302 while (**ptr) { in kgdb_hex2long() 303 hex_val = hex_to_bin(**ptr); in kgdb_hex2long() 309 (*ptr)++; in kgdb_hex2long() 343 char *ptr = (char *)gdb_regs; in pt_regs_to_gdb_regs() local 346 dbg_get_reg(i, ptr + idx, regs); in pt_regs_to_gdb_regs() 355 char *ptr = (char *)gdb_regs; in gdb_regs_to_pt_regs() local 358 dbg_set_reg(i, ptr + idx, regs); in gdb_regs_to_pt_regs() [all …]
|
/kernel/ |
D | extable.c | 143 void *dereference_function_descriptor(void *ptr) in dereference_function_descriptor() argument 145 func_desc_t *desc = ptr; in dereference_function_descriptor() 149 ptr = p; in dereference_function_descriptor() 150 return ptr; in dereference_function_descriptor() 154 void *dereference_kernel_function_descriptor(void *ptr) in dereference_kernel_function_descriptor() argument 156 if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd) in dereference_kernel_function_descriptor() 157 return ptr; in dereference_kernel_function_descriptor() 159 return dereference_function_descriptor(ptr); in dereference_kernel_function_descriptor() 163 int func_ptr_is_kernel_text(void *ptr) in func_ptr_is_kernel_text() argument 166 addr = (unsigned long) dereference_function_descriptor(ptr); in func_ptr_is_kernel_text()
|
D | kexec_core.c | 602 #define for_each_kimage_entry(image, ptr, entry) \ argument 603 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ 604 ptr = (entry & IND_INDIRECTION) ? \ 605 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) 617 kimage_entry_t *ptr, entry; in kimage_free() local 629 for_each_kimage_entry(image, ptr, entry) { in kimage_free() 664 kimage_entry_t *ptr, entry; in kimage_dst_used() local 667 for_each_kimage_entry(image, ptr, entry) { in kimage_dst_used() 672 return ptr; in kimage_dst_used() 799 char *ptr; in kimage_load_normal_segment() local [all …]
|
D | iomem.c | 142 void **ptr, *addr; in devm_memremap() local 144 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, in devm_memremap() 146 if (!ptr) in devm_memremap() 151 *ptr = addr; in devm_memremap() 152 devres_add(dev, ptr); in devm_memremap() 154 devres_free(ptr); in devm_memremap()
|
D | rseq.c | 126 u64 ptr; in rseq_get_rseq_cs() local 132 if (get_user(ptr, &t->rseq->rseq_cs)) in rseq_get_rseq_cs() 135 if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr))) in rseq_get_rseq_cs() 138 if (!ptr) { in rseq_get_rseq_cs() 142 if (ptr >= TASK_SIZE) in rseq_get_rseq_cs() 144 urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; in rseq_get_rseq_cs()
|
D | stacktrace.c | 377 static inline bool in_irqentry_text(unsigned long ptr) in in_irqentry_text() argument 379 return (ptr >= (unsigned long)&__irqentry_text_start && in in_irqentry_text() 380 ptr < (unsigned long)&__irqentry_text_end) || in in_irqentry_text() 381 (ptr >= (unsigned long)&__softirqentry_text_start && in in_irqentry_text() 382 ptr < (unsigned long)&__softirqentry_text_end); in in_irqentry_text()
|
/kernel/locking/ |
D | lockdep_internals.h | 212 #define __debug_atomic_inc(ptr) \ argument 213 this_cpu_inc(lockdep_stats.ptr); 215 #define debug_atomic_inc(ptr) { \ argument 217 __this_cpu_inc(lockdep_stats.ptr); \ 220 #define debug_atomic_dec(ptr) { \ argument 222 __this_cpu_dec(lockdep_stats.ptr); \ 225 #define debug_atomic_read(ptr) ({ \ argument 231 __total += __cpu_lockdep_stats->ptr; \ 256 # define __debug_atomic_inc(ptr) do { } while (0) argument 257 # define debug_atomic_inc(ptr) do { } while (0) argument [all …]
|
/kernel/trace/rv/ |
D | rv_reactors.c | 196 char *ptr; in monitor_reactors_write() local 208 ptr = strim(buff); in monitor_reactors_write() 210 len = strlen(ptr); in monitor_reactors_write() 225 if (strcmp(ptr, rdef->reactor->name) != 0) in monitor_reactors_write() 331 struct rv_reactor_def *ptr, *next; in rv_unregister_reactor() local 336 list_for_each_entry_safe(ptr, next, &rv_reactors_list, list) { in rv_unregister_reactor() 337 if (strcmp(reactor->name, ptr->reactor->name) == 0) { in rv_unregister_reactor() 339 if (!ptr->counter) { in rv_unregister_reactor() 340 list_del(&ptr->list); in rv_unregister_reactor() 344 ptr->reactor->name, ptr->counter); in rv_unregister_reactor() [all …]
|
D | rv.c | 519 char *ptr = buff; in enabled_monitors_write() local 531 ptr = strim(buff); in enabled_monitors_write() 533 if (ptr[0] == '!') { in enabled_monitors_write() 535 ptr++; in enabled_monitors_write() 538 len = strlen(ptr); in enabled_monitors_write() 547 if (strcmp(ptr, mdef->monitor->name) != 0) in enabled_monitors_write() 744 struct rv_monitor_def *ptr, *next; in rv_unregister_monitor() local 748 list_for_each_entry_safe(ptr, next, &rv_monitors_list, list) { in rv_unregister_monitor() 749 if (strcmp(monitor->name, ptr->monitor->name) == 0) { in rv_unregister_monitor() 750 rv_disable_monitor(ptr); in rv_unregister_monitor() [all …]
|
/kernel/bpf/ |
D | helpers.c | 702 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) in BPF_CALL_2() argument 707 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); in BPF_CALL_2() 1396 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) in BPF_CALL_2() argument 1400 return xchg(kptr, (unsigned long)ptr); in BPF_CALL_2() 1425 static bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr) in bpf_dynptr_is_rdonly() argument 1427 return ptr->size & DYNPTR_RDONLY_BIT; in bpf_dynptr_is_rdonly() 1430 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) in bpf_dynptr_set_type() argument 1432 ptr->size |= type << DYNPTR_TYPE_SHIFT; in bpf_dynptr_set_type() 1435 u32 bpf_dynptr_get_size(struct bpf_dynptr_kern *ptr) in bpf_dynptr_get_size() argument 1437 return ptr->size & DYNPTR_SIZE_MASK; in bpf_dynptr_get_size() [all …]
|
D | ringbuf.c | 566 struct bpf_dynptr_kern *, ptr) in BPF_CALL_4() 573 bpf_dynptr_set_null(ptr); in BPF_CALL_4() 579 bpf_dynptr_set_null(ptr); in BPF_CALL_4() 587 bpf_dynptr_set_null(ptr); in BPF_CALL_4() 591 bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size); in BPF_CALL_4() 605 BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) in BPF_CALL_2() argument 607 if (!ptr->data) in BPF_CALL_2() 610 bpf_ringbuf_commit(ptr->data, flags, false /* discard */); in BPF_CALL_2() 612 bpf_dynptr_set_null(ptr); in BPF_CALL_2() 624 BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) in BPF_CALL_2() argument [all …]
|
D | queue_stack_maps.c | 103 void *ptr; in __queue_map_get() local 118 ptr = &qs->elements[qs->tail * qs->map.value_size]; in __queue_map_get() 119 memcpy(value, ptr, qs->map.value_size); in __queue_map_get() 137 void *ptr; in __stack_map_get() local 157 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get() 158 memcpy(value, ptr, qs->map.value_size); in __stack_map_get()
|
D | memalloc.c | 573 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) in unit_free() argument 575 struct llist_node *llnode = ptr - LLIST_NODE_SZ; in unit_free() 621 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) in bpf_mem_free() argument 625 if (!ptr) in bpf_mem_free() 628 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); in bpf_mem_free() 632 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); in bpf_mem_free() 643 void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr) in bpf_mem_cache_free() argument 645 if (!ptr) in bpf_mem_cache_free() 648 unit_free(this_cpu_ptr(ma->cache), ptr); in bpf_mem_cache_free()
|
D | map_in_map.c | 118 void bpf_map_fd_put_ptr(void *ptr) in bpf_map_fd_put_ptr() argument 123 bpf_map_put(ptr); in bpf_map_fd_put_ptr() 126 u32 bpf_map_fd_sys_lookup_elem(void *ptr) in bpf_map_fd_sys_lookup_elem() argument 128 return ((struct bpf_map *)ptr)->id; in bpf_map_fd_sys_lookup_elem()
|
D | arraymap.c | 34 void __percpu *ptr; in bpf_array_alloc_percpu() local 38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, in bpf_array_alloc_percpu() 40 if (!ptr) { in bpf_array_alloc_percpu() 44 array->pptrs[i] = ptr; in bpf_array_alloc_percpu() 809 void **elem, *ptr; in bpf_fd_array_map_lookup_elem() local 817 if (elem && (ptr = READ_ONCE(*elem))) in bpf_fd_array_map_lookup_elem() 818 *value = map->ops->map_fd_sys_lookup_elem(ptr); in bpf_fd_array_map_lookup_elem() 901 static void prog_fd_array_put_ptr(void *ptr) in prog_fd_array_put_ptr() argument 903 bpf_prog_put(ptr); in prog_fd_array_put_ptr() 906 static u32 prog_fd_array_sys_lookup_elem(void *ptr) in prog_fd_array_sys_lookup_elem() argument [all …]
|
D | core.c | 70 u8 *ptr = NULL; in bpf_internal_load_pointer_neg_helper() local 73 ptr = skb_network_header(skb) + k - SKF_NET_OFF; in bpf_internal_load_pointer_neg_helper() 77 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; in bpf_internal_load_pointer_neg_helper() 79 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) in bpf_internal_load_pointer_neg_helper() 80 return ptr; in bpf_internal_load_pointer_neg_helper() 829 void *ptr; member 862 pack->ptr = module_alloc(BPF_PROG_PACK_SIZE); in alloc_new_pack() 863 if (!pack->ptr) { in alloc_new_pack() 867 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); in alloc_new_pack() 871 set_vm_flush_reset_perms(pack->ptr); in alloc_new_pack() [all …]
|
D | map_in_map.h | 16 void bpf_map_fd_put_ptr(void *ptr); 17 u32 bpf_map_fd_sys_lookup_elem(void *ptr);
|
/kernel/sched/ |
D | core_sched.c | 25 struct sched_core_cookie *ptr = (void *)cookie; in sched_core_put_cookie() local 27 if (ptr && refcount_dec_and_test(&ptr->refcnt)) { in sched_core_put_cookie() 28 kfree(ptr); in sched_core_put_cookie() 35 struct sched_core_cookie *ptr = (void *)cookie; in sched_core_get_cookie() local 37 if (ptr) in sched_core_get_cookie() 38 refcount_inc(&ptr->refcnt); in sched_core_get_cookie()
|
D | clock.c | 300 u64 *ptr, old_val, val; in sched_clock_remote() local 340 ptr = &scd->clock; in sched_clock_remote() 347 ptr = &my_scd->clock; in sched_clock_remote() 352 if (!try_cmpxchg64(ptr, &old_val, val)) in sched_clock_remote()
|
/kernel/trace/ |
D | trace_events.c | 2574 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) in eval_replace() argument 2580 elen = snprintf(ptr, 0, "%ld", map->eval_value); in eval_replace() 2585 snprintf(ptr, elen + 1, "%ld", map->eval_value); in eval_replace() 2588 rlen = strlen(ptr + len); in eval_replace() 2589 memmove(ptr + elen, ptr + len, rlen); in eval_replace() 2591 ptr[elen + rlen] = 0; in eval_replace() 2593 return ptr + elen; in eval_replace() 2599 char *ptr; in update_event_printk() local 2603 for (ptr = call->print_fmt; *ptr; ptr++) { in update_event_printk() 2604 if (*ptr == '\\') { in update_event_printk() [all …]
|
D | trace_printk.c | 256 const char **ptr = __start___tracepoint_str; in trace_is_tracepoint_string() local 258 for (ptr = __start___tracepoint_str; ptr < __stop___tracepoint_str; ptr++) { in trace_is_tracepoint_string() 259 if (str == *ptr) in trace_is_tracepoint_string()
|
/kernel/time/ |
D | alarmtimer.c | 563 struct k_itimer *ptr = container_of(alarm, struct k_itimer, in alarm_handle_timer() local 569 spin_lock_irqsave(&ptr->it_lock, flags); in alarm_handle_timer() 571 ptr->it_active = 0; in alarm_handle_timer() 572 if (ptr->it_interval) in alarm_handle_timer() 573 si_private = ++ptr->it_requeue_pending; in alarm_handle_timer() 575 if (posix_timer_event(ptr, si_private) && ptr->it_interval) { in alarm_handle_timer() 581 ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true); in alarm_handle_timer() 582 ++ptr->it_requeue_pending; in alarm_handle_timer() 583 ptr->it_active = 1; in alarm_handle_timer() 586 spin_unlock_irqrestore(&ptr->it_lock, flags); in alarm_handle_timer()
|