/kernel/ |
D | iomem.c | 9 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) in ioremap_cache() argument 11 return ioremap(offset, size); in ioremap_cache() 16 static void *arch_memremap_wb(resource_size_t offset, unsigned long size) in arch_memremap_wb() argument 18 return (__force void *)ioremap_cache(offset, size); in arch_memremap_wb() 23 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, in arch_memremap_can_ram_remap() argument 30 static void *try_ram_remap(resource_size_t offset, size_t size, in try_ram_remap() argument 33 unsigned long pfn = PHYS_PFN(offset); in try_ram_remap() 37 arch_memremap_can_ram_remap(offset, size, flags)) in try_ram_remap() 38 return __va(offset); in try_ram_remap() 71 void *memremap(resource_size_t offset, size_t size, unsigned long flags) in memremap() argument [all …]
|
D | kallsyms.c | 231 unsigned long *offset) in get_symbol_pos() argument 283 if (offset) in get_symbol_pos() 284 *offset = addr - symbol_start; in get_symbol_pos() 293 unsigned long *offset) in kallsyms_lookup_size_offset() argument 298 get_symbol_pos(addr, symbolsize, offset); in kallsyms_lookup_size_offset() 301 return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) || in kallsyms_lookup_size_offset() 302 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); in kallsyms_lookup_size_offset() 307 unsigned long *offset, char **modname, in kallsyms_lookup_buildid() argument 318 pos = get_symbol_pos(addr, symbolsize, offset); in kallsyms_lookup_buildid() 332 ret = module_address_lookup(addr, symbolsize, offset, in kallsyms_lookup_buildid() [all …]
|
D | relay.c | 303 buf->offset = 0; in __relay_reset() 670 if (buf->offset != buf->chan->subbuf_size + 1) { in relay_switch_subbuf() 671 buf->prev_padding = buf->chan->subbuf_size - buf->offset; in relay_switch_subbuf() 697 buf->offset = 0; in relay_switch_subbuf() 699 buf->offset = buf->chan->subbuf_size + 1; in relay_switch_subbuf() 705 if (unlikely(length + buf->offset > buf->chan->subbuf_size)) in relay_switch_subbuf() 890 buf->offset == buf->bytes_consumed) in relay_file_read_consume() 905 (buf->offset == subbuf_size)) in relay_file_read_consume() 926 if (unlikely(buf->offset > subbuf_size)) { in relay_file_read_avail() 938 produced = (produced % n_subbufs) * subbuf_size + buf->offset; in relay_file_read_avail() [all …]
|
/kernel/trace/ |
D | trace_probe.c | 165 void __trace_probe_log_err(int offset, int err_type) in __trace_probe_log_err() argument 190 offset = 0; in __trace_probe_log_err() 204 trace_probe_err_text, err_type, pos + offset); in __trace_probe_log_err() 210 int traceprobe_split_symbol_offset(char *symbol, long *offset) in traceprobe_split_symbol_offset() argument 215 if (!offset) in traceprobe_split_symbol_offset() 220 ret = kstrtol(tmp, 0, offset); in traceprobe_split_symbol_offset() 225 *offset = 0; in traceprobe_split_symbol_offset() 232 char *buf, int offset) in traceprobe_parse_event_name() argument 243 trace_probe_log_err(offset, NO_GROUP_NAME); in traceprobe_parse_event_name() 247 trace_probe_log_err(offset, GROUP_TOO_LONG); in traceprobe_parse_event_name() [all …]
|
D | trace_probe_tmpl.h | 116 ret = probe_mem_read(&val, (void *)val + code->offset, in process_fetch_insn_bottom() 121 (void *)val + code->offset, sizeof(val)); in process_fetch_insn_bottom() 135 ret = fetch_store_strlen(val + code->offset); in process_fetch_insn_bottom() 139 ret = fetch_store_strlen_user(val + code->offset); in process_fetch_insn_bottom() 143 ret = fetch_store_symstrlen(val + code->offset); in process_fetch_insn_bottom() 156 probe_mem_read(dest, (void *)val + code->offset, code->size); in process_fetch_insn_bottom() 159 probe_mem_read_user(dest, (void *)val + code->offset, code->size); in process_fetch_insn_bottom() 163 ret = fetch_store_string(val + code->offset, dest, base); in process_fetch_insn_bottom() 167 ret = fetch_store_string_user(val + code->offset, dest, base); in process_fetch_insn_bottom() 171 ret = fetch_store_symstring(val + code->offset, dest, base); in process_fetch_insn_bottom() [all …]
|
D | trace_events_inject.c | 146 if (field->size + field->offset > size) in trace_get_entry_size() 147 size = field->size + field->offset; in trace_get_entry_size() 175 str_item = (u32 *)(entry + field->offset); in trace_alloc_entry() 180 paddr = (char **)(entry + field->offset); in trace_alloc_entry() 216 strlcpy(entry + field->offset, addr, field->size); in parse_entry() 231 str_item = (u32 *)(entry + field->offset); in parse_entry() 236 paddr = (char **)(entry + field->offset); in parse_entry() 244 memcpy(entry + field->offset, &tmp, 1); in parse_entry() 250 memcpy(entry + field->offset, &tmp, 2); in parse_entry() 256 memcpy(entry + field->offset, &tmp, 4); in parse_entry() [all …]
|
D | trace_probe.h | 73 u32 offset = get_loc_offs(loc); in update_data_loc() local 75 return make_data_loc(maxlen - consumed, offset + consumed); in update_data_loc() 118 int offset; member 224 unsigned int offset; /* Offset from argument entry */ member 370 extern int traceprobe_split_symbol_offset(char *symbol, long *offset); 372 char *buf, int offset); 394 size_t offset, struct trace_probe *tp); 473 void __trace_probe_log_err(int offset, int err);
|
D | trace_uprobe.c | 61 unsigned long offset; member 211 base_addr = udd->bp_addr - udd->tu->offset; in translate_user_vaddr() 297 (int)(sizeof(void *) * 2), tu->offset); in trace_uprobe_match_command_head() 300 (int)(sizeof(void *) * 2), tu->offset, in trace_uprobe_match_command_head() 420 comp->offset != orig->offset) in trace_uprobe_has_same_uprobe() 482 new->offset == tmp->offset && in validate_ref_ctr_offset() 547 unsigned long offset, ref_ctr_offset; in __trace_uprobe_create() local 641 ret = kstrtoul(arg, 0, &offset); in __trace_uprobe_create() 668 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); in __trace_uprobe_create() 683 tu->offset = offset; in __trace_uprobe_create() [all …]
|
D | trace_kprobe.c | 96 return tk->rp.kp.offset; in trace_kprobe_offset() 149 else if (tk->rp.kp.offset) in trace_kprobe_match_command_head() 151 trace_kprobe_symbol(tk), tk->rp.kp.offset); in trace_kprobe_match_command_head() 198 addr += tk->rp.kp.offset; in trace_kprobe_address() 223 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false; in trace_kprobe_on_func_entry() 278 tk->rp.kp.offset = offs; in alloc_trace_kprobe() 438 unsigned long offset, size; in __within_notrace_func() local 440 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset)) in __within_notrace_func() 444 addr -= offset; in __within_notrace_func() 776 long offset = 0; in __trace_kprobe_create() local [all …]
|
/kernel/entry/ |
D | syscall_user_dispatch.c | 39 if (likely(instruction_pointer(regs) - sd->offset < sd->len)) in syscall_user_dispatch() 71 int set_syscall_user_dispatch(unsigned long mode, unsigned long offset, in set_syscall_user_dispatch() argument 76 if (offset || len || selector) in set_syscall_user_dispatch() 86 if (offset && offset + len <= offset) in set_syscall_user_dispatch() 98 current->syscall_dispatch.offset = offset; in set_syscall_user_dispatch()
|
/kernel/time/ |
D | namespace.c | 26 ktime_t offset; in do_timens_ktime_to_host() local 30 offset = timespec64_to_ktime(ns_offsets->monotonic); in do_timens_ktime_to_host() 34 offset = timespec64_to_ktime(ns_offsets->boottime); in do_timens_ktime_to_host() 44 if (tim < offset) { in do_timens_ktime_to_host() 51 tim = ktime_sub(tim, offset); in do_timens_ktime_to_host() 182 struct timens_offset *offset = vdata->offset; in timens_setup_vdso_data() local 188 offset[CLOCK_MONOTONIC] = monotonic; in timens_setup_vdso_data() 189 offset[CLOCK_MONOTONIC_RAW] = monotonic; in timens_setup_vdso_data() 190 offset[CLOCK_MONOTONIC_COARSE] = monotonic; in timens_setup_vdso_data() 191 offset[CLOCK_BOOTTIME] = boottime; in timens_setup_vdso_data() [all …]
|
D | ntp.c | 125 static inline s64 ntp_offset_chunk(s64 offset) in ntp_offset_chunk() argument 128 return offset; in ntp_offset_chunk() 130 return shift_right(offset, SHIFT_PLL + time_constant); in ntp_offset_chunk() 210 static inline s64 ntp_offset_chunk(s64 offset) in ntp_offset_chunk() argument 212 return shift_right(offset, SHIFT_PLL + time_constant); in ntp_offset_chunk() 296 static void ntp_update_offset(long offset) in ntp_update_offset() argument 307 offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC); in ntp_update_offset() 308 offset *= NSEC_PER_USEC; in ntp_update_offset() 315 offset = clamp(offset, -MAXPHASE, MAXPHASE); in ntp_update_offset() 327 offset64 = offset; in ntp_update_offset() [all …]
|
D | timer_list.c | 109 (unsigned long long) ktime_to_ns(base->offset)); in print_base() 112 print_active_timers(m, base, now + ktime_to_ns(base->offset)); in print_base() 301 static void *move_iter(struct timer_list_iter *iter, loff_t offset) in move_iter() argument 303 for (; offset; offset--) { in move_iter() 320 static void *timer_list_start(struct seq_file *file, loff_t *offset) in timer_list_start() argument 324 if (!*offset) in timer_list_start() 328 return move_iter(iter, *offset); in timer_list_start() 331 static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) in timer_list_next() argument 334 ++*offset; in timer_list_next()
|
D | timekeeping.c | 202 static void timekeeping_check_update(struct timekeeper *tk, u64 offset) in timekeeping_check_update() argument 208 if (offset > max_cycles) { in timekeeping_check_update() 210 offset, name, max_cycles); in timekeeping_check_update() 213 if (offset > (max_cycles >> 1)) { in timekeeping_check_update() 215 offset, name, max_cycles >> 1); in timekeeping_check_update() 282 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset) in timekeeping_check_update() argument 867 ktime_t base, *offset = offsets[offs]; in ktime_get_with_offset() local 874 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_with_offset() 888 ktime_t base, *offset = offsets[offs]; in ktime_get_coarse_with_offset() local 895 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_coarse_with_offset() [all …]
|
/kernel/events/ |
D | uprobes.c | 63 loff_t offset; member 131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) in offset_to_vaddr() argument 133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr() 413 (unsigned long long) uprobe->offset, in update_ref_ctr_warn() 627 if (l_offset < r->offset) in uprobe_cmp() 630 if (l_offset > r->offset) in uprobe_cmp() 641 loff_t offset; member 647 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); in __uprobe_cmp_key() 653 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); in __uprobe_cmp() 656 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) in __find_uprobe() argument [all …]
|
/kernel/power/ |
D | user.c | 205 compat_loff_t offset; member 212 sector_t offset; in snapshot_set_swap_area() local 224 offset = swap_area.offset; in snapshot_set_swap_area() 231 offset = swap_area.offset; in snapshot_set_swap_area() 238 data->swap = swap_type_of(swdev, offset); in snapshot_set_swap_area() 251 sector_t offset; in snapshot_ioctl() local 365 offset = alloc_swapdev_block(data->swap); in snapshot_ioctl() 366 if (offset) { in snapshot_ioctl() 367 offset <<= PAGE_SHIFT; in snapshot_ioctl() 368 error = put_user(offset, (loff_t __user *)arg); in snapshot_ioctl()
|
D | swap.c | 178 unsigned long offset; in alloc_swapdev_block() local 180 offset = swp_offset(get_swap_page_of_type(swap)); in alloc_swapdev_block() 181 if (offset) { in alloc_swapdev_block() 182 if (swsusp_extents_insert(offset)) in alloc_swapdev_block() 183 swap_free(swp_entry(swap, offset)); in alloc_swapdev_block() 185 return swapdev_block(swap, offset); in alloc_swapdev_block() 202 unsigned long offset; in free_all_swap_pages() local 206 for (offset = ext->start; offset <= ext->end; offset++) in free_all_swap_pages() 207 swap_free(swp_entry(swap, offset)); in free_all_swap_pages() 376 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) in write_page() argument [all …]
|
/kernel/locking/ |
D | qspinlock_paravirt.h | 207 #define for_each_hash_entry(he, offset, hash) \ argument 208 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \ 209 offset < (1 << pv_lock_hash_bits); \ 210 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)]) 214 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_hash() local 218 for_each_hash_entry(he, offset, hash) { in pv_hash() 241 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_unhash() local 245 for_each_hash_entry(he, offset, hash) { in pv_unhash()
|
/kernel/sched/ |
D | stats.c | 83 static void *schedstat_start(struct seq_file *file, loff_t *offset) in schedstat_start() argument 85 unsigned long n = *offset; in schedstat_start() 97 *offset = n + 1; in schedstat_start() 105 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset) in schedstat_next() argument 107 (*offset)++; in schedstat_next() 109 return schedstat_start(file, offset); in schedstat_next()
|
D | loadavg.c | 72 void get_avenrun(unsigned long *loads, unsigned long offset, int shift) in get_avenrun() argument 74 loads[0] = (avenrun[0] + offset) << shift; in get_avenrun() 75 loads[1] = (avenrun[1] + offset) << shift; in get_avenrun() 76 loads[2] = (avenrun[2] + offset) << shift; in get_avenrun()
|
/kernel/dma/ |
D | debug.c | 76 size_t offset; member 395 return __pfn_to_phys(entry->pfn) + entry->offset; in phys_addr() 397 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; in phys_addr() 460 (entry->offset >> L1_CACHE_SHIFT); in to_cacheline_number() 1052 struct page *page, size_t offset) in check_for_stack() argument 1061 addr = page_address(page) + offset; in check_for_stack() 1072 addr = (u8 *)current->stack + i * PAGE_SIZE + offset; in check_for_stack() 1203 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, in debug_dma_map_page() argument 1222 entry->offset = offset; in debug_dma_map_page() 1228 check_for_stack(dev, page, offset); in debug_dma_map_page() [all …]
|
D | swiotlb.c | 409 unsigned int offset = orig_addr & ~PAGE_MASK; in swiotlb_bounce() local 415 sz = min_t(size_t, PAGE_SIZE - offset, size); in swiotlb_bounce() 420 memcpy(vaddr, buffer + offset, sz); in swiotlb_bounce() 422 memcpy(buffer + offset, vaddr, sz); in swiotlb_bounce() 429 offset = 0; in swiotlb_bounce() 476 unsigned int offset = swiotlb_align_offset(dev, orig_addr); in swiotlb_find_slots() local 526 alloc_size - (offset + ((i - index) << IO_TLB_SHIFT)); in swiotlb_find_slots() 552 unsigned int offset = swiotlb_align_offset(dev, orig_addr); in swiotlb_tbl_map_single() local 570 alloc_size + offset, alloc_align_mask); in swiotlb_tbl_map_single() 584 for (i = 0; i < nr_slots(alloc_size + offset); i++) in swiotlb_tbl_map_single() [all …]
|
/kernel/irq/ |
D | irq_sim.c | 103 unsigned int offset = 0; in irq_sim_handle_irq() local 109 offset = find_next_bit(work_ctx->pending, in irq_sim_handle_irq() 110 work_ctx->irq_count, offset); in irq_sim_handle_irq() 111 clear_bit(offset, work_ctx->pending); in irq_sim_handle_irq() 112 irqnum = irq_find_mapping(work_ctx->domain, offset); in irq_sim_handle_irq()
|
D | ipi.c | 26 unsigned int nr_irqs, offset; in irq_reserve_ipi() local 54 offset = 0; in irq_reserve_ipi() 64 offset = cpumask_first(dest); in irq_reserve_ipi() 69 next = cpumask_next_zero(offset, dest); in irq_reserve_ipi() 95 data->common->ipi_offset = offset; in irq_reserve_ipi()
|
/kernel/rcu/ |
D | tiny.c | 88 unsigned long offset = (unsigned long)head->func; in rcu_reclaim_tiny() local 91 if (__is_kvfree_rcu_offset(offset)) { in rcu_reclaim_tiny() 92 trace_rcu_invoke_kvfree_callback("", head, offset); in rcu_reclaim_tiny() 93 kvfree((void *)head - offset); in rcu_reclaim_tiny()
|