/kernel/ |
D | iomem.c | 9 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) in ioremap_cache() argument 11 return ioremap(offset, size); in ioremap_cache() 16 static void *arch_memremap_wb(resource_size_t offset, unsigned long size) in arch_memremap_wb() argument 18 return (__force void *)ioremap_cache(offset, size); in arch_memremap_wb() 23 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, in arch_memremap_can_ram_remap() argument 30 static void *try_ram_remap(resource_size_t offset, size_t size, in try_ram_remap() argument 33 unsigned long pfn = PHYS_PFN(offset); in try_ram_remap() 37 arch_memremap_can_ram_remap(offset, size, flags)) in try_ram_remap() 38 return __va(offset); in try_ram_remap() 71 void *memremap(resource_size_t offset, size_t size, unsigned long flags) in memremap() argument [all …]
|
D | kallsyms.c | 326 unsigned long *offset) in get_symbol_pos() argument 378 if (offset) in get_symbol_pos() 379 *offset = addr - symbol_start; in get_symbol_pos() 388 unsigned long *offset) in kallsyms_lookup_size_offset() argument 393 get_symbol_pos(addr, symbolsize, offset); in kallsyms_lookup_size_offset() 396 return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) || in kallsyms_lookup_size_offset() 397 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); in kallsyms_lookup_size_offset() 402 unsigned long *offset, char **modname, in kallsyms_lookup_buildid() argument 413 pos = get_symbol_pos(addr, symbolsize, offset); in kallsyms_lookup_buildid() 427 ret = module_address_lookup(addr, symbolsize, offset, in kallsyms_lookup_buildid() [all …]
|
D | context_tracking.c | 81 static noinstr void ct_kernel_exit_state(int offset) in ct_kernel_exit_state() argument 91 seq = ct_state_inc(offset); in ct_kernel_exit_state() 101 static noinstr void ct_kernel_enter_state(int offset) in ct_kernel_enter_state() argument 110 seq = ct_state_inc(offset); in ct_kernel_enter_state() 124 static void noinstr ct_kernel_exit(bool user, int offset) in ct_kernel_exit() argument 150 ct_kernel_exit_state(offset); in ct_kernel_exit() 163 static void noinstr ct_kernel_enter(bool user, int offset) in ct_kernel_enter() argument 178 ct_kernel_enter_state(offset); in ct_kernel_enter() 426 static __always_inline void ct_kernel_exit(bool user, int offset) { } in ct_kernel_exit() argument 427 static __always_inline void ct_kernel_enter(bool user, int offset) { } in ct_kernel_enter() argument
|
D | relay.c | 300 buf->offset = 0; in __relay_reset() 667 if (buf->offset != buf->chan->subbuf_size + 1) { in relay_switch_subbuf() 668 buf->prev_padding = buf->chan->subbuf_size - buf->offset; in relay_switch_subbuf() 694 buf->offset = 0; in relay_switch_subbuf() 696 buf->offset = buf->chan->subbuf_size + 1; in relay_switch_subbuf() 702 if (unlikely(length + buf->offset > buf->chan->subbuf_size)) in relay_switch_subbuf() 887 buf->offset == buf->bytes_consumed) in relay_file_read_consume() 902 (buf->offset == subbuf_size)) in relay_file_read_consume() 923 if (unlikely(buf->offset > subbuf_size)) { in relay_file_read_avail() 935 produced = (produced % n_subbufs) * subbuf_size + buf->offset; in relay_file_read_avail() [all …]
|
/kernel/trace/ |
D | trace_probe.c | 165 void __trace_probe_log_err(int offset, int err_type) in __trace_probe_log_err() argument 190 offset = 0; in __trace_probe_log_err() 204 trace_probe_err_text, err_type, pos + offset); in __trace_probe_log_err() 210 int traceprobe_split_symbol_offset(char *symbol, long *offset) in traceprobe_split_symbol_offset() argument 215 if (!offset) in traceprobe_split_symbol_offset() 220 ret = kstrtol(tmp, 0, offset); in traceprobe_split_symbol_offset() 225 *offset = 0; in traceprobe_split_symbol_offset() 232 char *buf, int offset) in traceprobe_parse_event_name() argument 243 trace_probe_log_err(offset, NO_GROUP_NAME); in traceprobe_parse_event_name() 247 trace_probe_log_err(offset, GROUP_TOO_LONG); in traceprobe_parse_event_name() [all …]
|
D | trace_probe_tmpl.h | 116 ret = probe_mem_read(&val, (void *)val + code->offset, in process_fetch_insn_bottom() 121 (void *)val + code->offset, sizeof(val)); in process_fetch_insn_bottom() 135 ret = fetch_store_strlen(val + code->offset); in process_fetch_insn_bottom() 139 ret = fetch_store_strlen_user(val + code->offset); in process_fetch_insn_bottom() 143 ret = fetch_store_symstrlen(val + code->offset); in process_fetch_insn_bottom() 156 probe_mem_read(dest, (void *)val + code->offset, code->size); in process_fetch_insn_bottom() 159 probe_mem_read_user(dest, (void *)val + code->offset, code->size); in process_fetch_insn_bottom() 163 ret = fetch_store_string(val + code->offset, dest, base); in process_fetch_insn_bottom() 167 ret = fetch_store_string_user(val + code->offset, dest, base); in process_fetch_insn_bottom() 171 ret = fetch_store_symstring(val + code->offset, dest, base); in process_fetch_insn_bottom() [all …]
|
D | trace_events_inject.c | 146 if (field->size + field->offset > size) in trace_get_entry_size() 147 size = field->size + field->offset; in trace_get_entry_size() 177 str_loc -= field->offset + field->size; in trace_alloc_entry() 179 str_item = (u32 *)(entry + field->offset); in trace_alloc_entry() 184 paddr = (char **)(entry + field->offset); in trace_alloc_entry() 220 strlcpy(entry + field->offset, addr, field->size); in parse_entry() 236 str_item = (u32 *)(entry + field->offset); in parse_entry() 238 str_loc -= field->offset + field->size; in parse_entry() 243 paddr = (char **)(entry + field->offset); in parse_entry() 251 memcpy(entry + field->offset, &tmp, 1); in parse_entry() [all …]
|
D | trace_probe.h | 72 u32 offset = get_loc_offs(loc); in update_data_loc() local 74 return make_data_loc(maxlen - consumed, offset + consumed); in update_data_loc() 117 int offset; member 223 unsigned int offset; /* Offset from argument entry */ member 369 extern int traceprobe_split_symbol_offset(char *symbol, long *offset); 371 char *buf, int offset); 393 size_t offset, struct trace_probe *tp); 476 void __trace_probe_log_err(int offset, int err);
|
D | trace_uprobe.c | 63 unsigned long offset; member 213 base_addr = udd->bp_addr - udd->tu->offset; in translate_user_vaddr() 299 (int)(sizeof(void *) * 2), tu->offset); in trace_uprobe_match_command_head() 302 (int)(sizeof(void *) * 2), tu->offset, in trace_uprobe_match_command_head() 421 comp->offset != orig->offset) in trace_uprobe_has_same_uprobe() 483 new->offset == tmp->offset && in validate_ref_ctr_offset() 549 unsigned long offset, ref_ctr_offset; in __trace_uprobe_create() local 642 ret = kstrtoul(arg, 0, &offset); in __trace_uprobe_create() 671 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); in __trace_uprobe_create() 686 tu->offset = offset; in __trace_uprobe_create() [all …]
|
/kernel/entry/ |
D | syscall_user_dispatch.c | 39 if (likely(instruction_pointer(regs) - sd->offset < sd->len)) in syscall_user_dispatch() 71 int set_syscall_user_dispatch(unsigned long mode, unsigned long offset, in set_syscall_user_dispatch() argument 76 if (offset || len || selector) in set_syscall_user_dispatch() 86 if (offset && offset + len <= offset) in set_syscall_user_dispatch() 98 current->syscall_dispatch.offset = offset; in set_syscall_user_dispatch()
|
/kernel/time/ |
D | namespace.c | 26 ktime_t offset; in do_timens_ktime_to_host() local 30 offset = timespec64_to_ktime(ns_offsets->monotonic); in do_timens_ktime_to_host() 34 offset = timespec64_to_ktime(ns_offsets->boottime); in do_timens_ktime_to_host() 44 if (tim < offset) { in do_timens_ktime_to_host() 51 tim = ktime_sub(tim, offset); in do_timens_ktime_to_host() 182 struct timens_offset *offset = vdata->offset; in timens_setup_vdso_data() local 188 offset[CLOCK_MONOTONIC] = monotonic; in timens_setup_vdso_data() 189 offset[CLOCK_MONOTONIC_RAW] = monotonic; in timens_setup_vdso_data() 190 offset[CLOCK_MONOTONIC_COARSE] = monotonic; in timens_setup_vdso_data() 191 offset[CLOCK_BOOTTIME] = boottime; in timens_setup_vdso_data() [all …]
|
D | ntp.c | 125 static inline s64 ntp_offset_chunk(s64 offset) in ntp_offset_chunk() argument 128 return offset; in ntp_offset_chunk() 130 return shift_right(offset, SHIFT_PLL + time_constant); in ntp_offset_chunk() 210 static inline s64 ntp_offset_chunk(s64 offset) in ntp_offset_chunk() argument 212 return shift_right(offset, SHIFT_PLL + time_constant); in ntp_offset_chunk() 296 static void ntp_update_offset(long offset) in ntp_update_offset() argument 307 offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC); in ntp_update_offset() 308 offset *= NSEC_PER_USEC; in ntp_update_offset() 315 offset = clamp(offset, -MAXPHASE, MAXPHASE); in ntp_update_offset() 327 offset64 = offset; in ntp_update_offset() [all …]
|
D | timer_list.c | 109 (unsigned long long) ktime_to_ns(base->offset)); in print_base() 112 print_active_timers(m, base, now + ktime_to_ns(base->offset)); in print_base() 301 static void *move_iter(struct timer_list_iter *iter, loff_t offset) in move_iter() argument 303 for (; offset; offset--) { in move_iter() 320 static void *timer_list_start(struct seq_file *file, loff_t *offset) in timer_list_start() argument 324 if (!*offset) in timer_list_start() 328 return move_iter(iter, *offset); in timer_list_start() 331 static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) in timer_list_next() argument 334 ++*offset; in timer_list_next()
|
D | timekeeping.c | 201 static void timekeeping_check_update(struct timekeeper *tk, u64 offset) in timekeeping_check_update() argument 207 if (offset > max_cycles) { in timekeeping_check_update() 209 offset, name, max_cycles); in timekeeping_check_update() 212 if (offset > (max_cycles >> 1)) { in timekeeping_check_update() 214 offset, name, max_cycles >> 1); in timekeeping_check_update() 281 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset) in timekeeping_check_update() argument 883 ktime_t base, *offset = offsets[offs]; in ktime_get_with_offset() local 890 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_with_offset() 904 ktime_t base, *offset = offsets[offs]; in ktime_get_coarse_with_offset() local 911 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_coarse_with_offset() [all …]
|
/kernel/events/ |
D | uprobes.c | 63 loff_t offset; member 131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) in offset_to_vaddr() argument 133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr() 410 (unsigned long long) uprobe->offset, in update_ref_ctr_warn() 624 if (l_offset < r->offset) in uprobe_cmp() 627 if (l_offset > r->offset) in uprobe_cmp() 638 loff_t offset; member 644 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); in __uprobe_cmp_key() 650 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); in __uprobe_cmp() 653 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) in __find_uprobe() argument [all …]
|
/kernel/power/ |
D | user.c | 209 compat_loff_t offset; member 216 sector_t offset; in snapshot_set_swap_area() local 228 offset = swap_area.offset; in snapshot_set_swap_area() 235 offset = swap_area.offset; in snapshot_set_swap_area() 242 data->swap = swap_type_of(swdev, offset); in snapshot_set_swap_area() 255 sector_t offset; in snapshot_ioctl() local 369 offset = alloc_swapdev_block(data->swap); in snapshot_ioctl() 370 if (offset) { in snapshot_ioctl() 371 offset <<= PAGE_SHIFT; in snapshot_ioctl() 372 error = put_user(offset, (loff_t __user *)arg); in snapshot_ioctl()
|
D | swap.c | 181 unsigned long offset; in alloc_swapdev_block() local 183 offset = swp_offset(get_swap_page_of_type(swap)); in alloc_swapdev_block() 184 if (offset) { in alloc_swapdev_block() 185 if (swsusp_extents_insert(offset)) in alloc_swapdev_block() 186 swap_free(swp_entry(swap, offset)); in alloc_swapdev_block() 188 return swapdev_block(swap, offset); in alloc_swapdev_block() 206 unsigned long offset; in free_all_swap_pages() local 210 for (offset = ext->start; offset <= ext->end; offset++) in free_all_swap_pages() 211 swap_free(swp_entry(swap, offset)); in free_all_swap_pages() 380 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) in write_page() argument [all …]
|
/kernel/locking/ |
D | qspinlock_paravirt.h | 207 #define for_each_hash_entry(he, offset, hash) \ argument 208 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \ 209 offset < (1 << pv_lock_hash_bits); \ 210 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)]) 214 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_hash() local 218 for_each_hash_entry(he, offset, hash) { in pv_hash() 241 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_unhash() local 245 for_each_hash_entry(he, offset, hash) { in pv_unhash()
|
/kernel/dma/ |
D | debug.c | 76 size_t offset; member 393 return __pfn_to_phys(entry->pfn) + entry->offset; in phys_addr() 395 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; in phys_addr() 458 (entry->offset >> L1_CACHE_SHIFT); in to_cacheline_number() 1050 struct page *page, size_t offset) in check_for_stack() argument 1059 addr = page_address(page) + offset; in check_for_stack() 1070 addr = (u8 *)current->stack + i * PAGE_SIZE + offset; in check_for_stack() 1202 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, in debug_dma_map_page() argument 1221 entry->offset = offset; in debug_dma_map_page() 1227 check_for_stack(dev, page, offset); in debug_dma_map_page() [all …]
|
D | swiotlb.c | 593 unsigned int offset = orig_addr & ~PAGE_MASK; in swiotlb_bounce() local 599 sz = min_t(size_t, PAGE_SIZE - offset, size); in swiotlb_bounce() 604 memcpy_from_page(vaddr, page, offset, sz); in swiotlb_bounce() 606 memcpy_to_page(page, offset, vaddr, sz); in swiotlb_bounce() 612 offset = 0; in swiotlb_bounce() 661 unsigned int offset = swiotlb_align_offset(dev, orig_addr); in swiotlb_do_find_slots() local 717 mem->slots[i].alloc_size = alloc_size - (offset + in swiotlb_do_find_slots() 772 unsigned int offset = swiotlb_align_offset(dev, orig_addr); in swiotlb_tbl_map_single() local 793 alloc_size + offset, alloc_align_mask); in swiotlb_tbl_map_single() 807 for (i = 0; i < nr_slots(alloc_size + offset); i++) in swiotlb_tbl_map_single() [all …]
|
/kernel/sched/ |
D | stats.c | 186 static void *schedstat_start(struct seq_file *file, loff_t *offset) in schedstat_start() argument 188 unsigned long n = *offset; in schedstat_start() 200 *offset = n + 1; in schedstat_start() 208 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset) in schedstat_next() argument 210 (*offset)++; in schedstat_next() 212 return schedstat_start(file, offset); in schedstat_next()
|
D | loadavg.c | 71 void get_avenrun(unsigned long *loads, unsigned long offset, int shift) in get_avenrun() argument 73 loads[0] = (avenrun[0] + offset) << shift; in get_avenrun() 74 loads[1] = (avenrun[1] + offset) << shift; in get_avenrun() 75 loads[2] = (avenrun[2] + offset) << shift; in get_avenrun()
|
/kernel/irq/ |
D | irq_sim.c | 103 unsigned int offset = 0; in irq_sim_handle_irq() local 109 offset = find_next_bit(work_ctx->pending, in irq_sim_handle_irq() 110 work_ctx->irq_count, offset); in irq_sim_handle_irq() 111 clear_bit(offset, work_ctx->pending); in irq_sim_handle_irq() 112 irqnum = irq_find_mapping(work_ctx->domain, offset); in irq_sim_handle_irq()
|
D | ipi.c | 26 unsigned int nr_irqs, offset; in irq_reserve_ipi() local 54 offset = 0; in irq_reserve_ipi() 64 offset = cpumask_first(dest); in irq_reserve_ipi() 69 next = cpumask_next_zero(offset, dest); in irq_reserve_ipi() 95 data->common->ipi_offset = offset; in irq_reserve_ipi()
|
/kernel/rcu/ |
D | tiny.c | 88 unsigned long offset = (unsigned long)head->func; in rcu_reclaim_tiny() local 91 if (__is_kvfree_rcu_offset(offset)) { in rcu_reclaim_tiny() 92 trace_rcu_invoke_kvfree_callback("", head, offset); in rcu_reclaim_tiny() 93 kvfree((void *)head - offset); in rcu_reclaim_tiny()
|