/kernel/ |
D | resource.c | 141 static void free_resource(struct resource *res) in free_resource() argument 149 if (res && PageSlab(virt_to_head_page(res))) in free_resource() 150 kfree(res); in free_resource() 315 struct resource *res) in find_next_iomem_res() argument 319 if (!res) in find_next_iomem_res() 349 *res = (struct resource) { in find_next_iomem_res() 367 struct resource res; in __walk_iomem_res_desc() local 371 !find_next_iomem_res(start, end, flags, desc, &res)) { in __walk_iomem_res_desc() 372 ret = (*func)(&res, arg); in __walk_iomem_res_desc() 376 start = res.end + 1; in __walk_iomem_res_desc() [all …]
|
D | regset.c | 12 int res; in __regset_get() local 23 res = regset->regset_get(target, regset, in __regset_get() 25 if (res < 0) { in __regset_get() 27 return res; in __regset_get() 30 return size - res; in __regset_get()
|
D | kallsyms.c | 176 char *res; in cleanup_symbol_name() local 178 res = strrchr(s, '$'); in cleanup_symbol_name() 179 if (res) in cleanup_symbol_name() 180 *res = '\0'; in cleanup_symbol_name() 182 return res != NULL; in cleanup_symbol_name() 365 int res; in lookup_symbol_name() local 380 res = lookup_module_symbol_name(addr, symname); in lookup_symbol_name() 381 if (res) in lookup_symbol_name() 382 return res; in lookup_symbol_name() 392 int res; in lookup_symbol_attrs() local [all …]
|
D | acct.c | 146 struct bsd_acct_struct *res; in acct_get() local 150 res = to_acct(READ_ONCE(ns->bacct)); in acct_get() 151 if (!res) { in acct_get() 155 if (!atomic_long_inc_not_zero(&res->count)) { in acct_get() 161 mutex_lock(&res->lock); in acct_get() 162 if (res != to_acct(READ_ONCE(ns->bacct))) { in acct_get() 163 mutex_unlock(&res->lock); in acct_get() 164 acct_put(res); in acct_get() 167 return res; in acct_get()
|
D | iomem.c | 129 static void devm_memremap_release(struct device *dev, void *res) in devm_memremap_release() argument 131 memunmap(*(void **)res); in devm_memremap_release() 134 static int devm_memremap_match(struct device *dev, void *res, void *match_data) in devm_memremap_match() argument 136 return *(void **)res == match_data; in devm_memremap_match()
|
D | kexec_file.c | 495 static int locate_mem_hole_callback(struct resource *res, void *arg) in locate_mem_hole_callback() argument 498 u64 start = res->start, end = res->end; in locate_mem_hole_callback() 504 if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED) in locate_mem_hole_callback() 529 struct resource res = { }; in kexec_walk_memblock() local 542 res.start = mstart; in kexec_walk_memblock() 543 res.end = mend - 1; in kexec_walk_memblock() 544 ret = func(&res, kbuf); in kexec_walk_memblock() 556 res.start = mstart; in kexec_walk_memblock() 557 res.end = mend - 1; in kexec_walk_memblock() 558 ret = func(&res, kbuf); in kexec_walk_memblock()
|
D | reboot.c | 114 static void devm_unregister_reboot_notifier(struct device *dev, void *res) in devm_unregister_reboot_notifier() argument 116 WARN_ON(unregister_reboot_notifier(*(struct notifier_block **)res)); in devm_unregister_reboot_notifier() 759 bool res; in force_store() local 764 if (kstrtobool(buf, &res)) in force_store() 768 reboot_force = res; in force_store()
|
D | kcov.c | 458 int res = 0; in kcov_mmap() local 473 res = -EINVAL; in kcov_mmap() 490 return res; in kcov_mmap() 688 int res; in kcov_ioctl() local 714 res = kcov_ioctl_locked(kcov, cmd, arg); in kcov_ioctl() 719 return res; in kcov_ioctl()
|
/kernel/power/ |
D | user.c | 136 ssize_t res; in snapshot_read() local 143 res = -ENODATA; in snapshot_read() 147 res = snapshot_read_next(&data->handle); in snapshot_read() 148 if (res <= 0) in snapshot_read() 151 res = PAGE_SIZE - pg_offp; in snapshot_read() 154 res = simple_read_from_buffer(buf, count, &pg_offp, in snapshot_read() 155 data_of(data->handle), res); in snapshot_read() 156 if (res > 0) in snapshot_read() 157 *offp += res; in snapshot_read() 162 return res; in snapshot_read() [all …]
|
D | swap.c | 347 int res; in swsusp_swap_check() local 350 res = swap_type_of(swsusp_resume_device, swsusp_resume_block); in swsusp_swap_check() 352 res = find_first_swap(&swsusp_resume_device); in swsusp_swap_check() 353 if (res < 0) in swsusp_swap_check() 354 return res; in swsusp_swap_check() 355 root_swap = res; in swsusp_swap_check() 362 res = set_blocksize(hib_resume_bdev, PAGE_SIZE); in swsusp_swap_check() 363 if (res < 0) in swsusp_swap_check() 366 return res; in swsusp_swap_check()
|
/kernel/cgroup/ |
D | misc.c | 80 return atomic_long_read(&root_cg.res[type].usage); in misc_cg_res_total_usage() 119 WARN_ONCE(atomic_long_add_negative(-amount, &cg->res[type].usage), in misc_cg_cancel_charge() 145 struct misc_res *res; in misc_cg_try_charge() local 155 res = &i->res[type]; in misc_cg_try_charge() 157 new_usage = atomic_long_add_return(amount, &res->usage); in misc_cg_try_charge() 158 if (new_usage > READ_ONCE(res->max) || in misc_cg_try_charge() 160 if (!res->failed) { in misc_cg_try_charge() 165 res->failed = true; in misc_cg_try_charge() 218 max = READ_ONCE(cg->res[i].max); in misc_cg_max_show() 284 WRITE_ONCE(cg->res[type].max, max); in misc_cg_max_write() [all …]
|
/kernel/irq/ |
D | devres.c | 18 static void devm_irq_release(struct device *dev, void *res) in devm_irq_release() argument 20 struct irq_devres *this = res; in devm_irq_release() 25 static int devm_irq_match(struct device *dev, void *res, void *data) in devm_irq_match() argument 27 struct irq_devres *this = res, *match = data; in devm_irq_match() 154 static void devm_irq_desc_release(struct device *dev, void *res) in devm_irq_desc_release() argument 156 struct irq_desc_devres *this = res; in devm_irq_desc_release() 240 static void devm_irq_remove_generic_chip(struct device *dev, void *res) in devm_irq_remove_generic_chip() argument 242 struct irq_generic_chip_devres *this = res; in devm_irq_remove_generic_chip()
|
D | handle.c | 146 irqreturn_t res; in __handle_irq_event_percpu() local 156 res = action->handler(irq, action->dev_id); in __handle_irq_event_percpu() 157 trace_irq_handler_exit(irq, action, res); in __handle_irq_event_percpu() 163 switch (res) { in __handle_irq_event_percpu() 185 retval |= res; in __handle_irq_event_percpu()
|
D | chip.c | 762 irqreturn_t res; in handle_fasteoi_nmi() local 770 res = action->handler(irq, action->dev_id); in handle_fasteoi_nmi() 771 trace_irq_handler_exit(irq, action, res); in handle_fasteoi_nmi() 935 irqreturn_t res; in handle_percpu_devid_irq() local 948 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); in handle_percpu_devid_irq() 949 trace_irq_handler_exit(irq, action, res); in handle_percpu_devid_irq() 978 irqreturn_t res; in handle_percpu_devid_fasteoi_nmi() local 983 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); in handle_percpu_devid_fasteoi_nmi() 984 trace_irq_handler_exit(irq, action, res); in handle_percpu_devid_fasteoi_nmi()
|
/kernel/time/ |
D | sched_clock.c | 85 u64 cyc, res; in sched_clock() local 94 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); in sched_clock() 97 return res; in sched_clock() 155 u64 res, wrap, new_mask, new_epoch, cyc, ns; in sched_clock_register() local 214 res = cyc_to_ns(1ULL, new_mult, new_shift); in sched_clock_register() 217 bits, r, r_unit, res, wrap); in sched_clock_register()
|
D | clockevents.c | 29 int res; member 403 int res; in __clockevents_unbind() local 406 res = __clockevents_try_unbind(cu->ce, smp_processor_id()); in __clockevents_unbind() 407 if (res == -EAGAIN) in __clockevents_unbind() 408 res = clockevents_replace(cu->ce); in __clockevents_unbind() 409 cu->res = res; in __clockevents_unbind() 419 struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; in clockevents_unbind() 422 return cu.res; in clockevents_unbind()
|
D | time.c | 769 struct timespec64 res; in timespec64_add_safe() local 771 set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec, in timespec64_add_safe() 774 if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) { in timespec64_add_safe() 775 res.tv_sec = TIME64_MAX; in timespec64_add_safe() 776 res.tv_nsec = 0; in timespec64_add_safe() 779 return res; in timespec64_add_safe()
|
D | ntp.c | 631 int res = -EAGAIN; in sync_hw_clock() local 651 res = update_persistent_clock64(to_set); in sync_hw_clock() 652 if (res != -ENODEV) in sync_hw_clock() 656 res = update_rtc(&to_set, &offset_nsec); in sync_hw_clock() 657 if (res == -ENODEV) in sync_hw_clock() 660 sched_sync_hw_clock(offset_nsec, res != 0); in sync_hw_clock()
|
/kernel/debug/kdb/ |
D | kdb_support.c | 267 int kdb_getarea_size(void *res, unsigned long addr, size_t size) in kdb_getarea_size() argument 269 int ret = copy_from_kernel_nofault((char *)res, (char *)addr, size); in kdb_getarea_size() 292 int kdb_putarea_size(unsigned long addr, void *res, size_t size) in kdb_putarea_size() argument 294 int ret = copy_to_kernel_nofault((char *)addr, (char *)res, size); in kdb_putarea_size() 318 static int kdb_getphys(void *res, unsigned long addr, size_t size) in kdb_getphys() argument 329 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); in kdb_getphys()
|
/kernel/locking/ |
D | rtmutex.c | 1126 int chain_walk = 0, res; in task_blocks_on_rt_mutex() local 1161 res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx); in task_blocks_on_rt_mutex() 1162 if (res) { in task_blocks_on_rt_mutex() 1167 return res; in task_blocks_on_rt_mutex() 1207 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex() 1212 return res; in task_blocks_on_rt_mutex() 1392 bool res = true; in rtmutex_spin_on_owner() local 1417 res = false; in rtmutex_spin_on_owner() 1423 return res; in rtmutex_spin_on_owner() 1560 static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, in rt_mutex_handle_deadlock() argument [all …]
|
/kernel/bpf/ |
D | offload.c | 302 int res; in bpf_prog_offload_info_fill() local 305 res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); in bpf_prog_offload_info_fill() 306 if (res) { in bpf_prog_offload_info_fill() 309 return res; in bpf_prog_offload_info_fill() 526 int res; in bpf_map_offload_info_fill() local 528 res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args); in bpf_map_offload_info_fill() 529 if (res) { in bpf_map_offload_info_fill() 532 return res; in bpf_map_offload_info_fill()
|
D | helpers.c | 442 unsigned long long *res, bool *is_negative) in __bpf_strtoull() argument 451 if (!buf || !buf_len || !res || !is_negative) in __bpf_strtoull() 478 val_len = _parse_integer(cur_buf, base, res); in __bpf_strtoull() 493 long long *res) in __bpf_strtoll() argument 505 *res = -_res; in __bpf_strtoll() 509 *res = _res; in __bpf_strtoll() 515 long *, res) in BPF_CALL_4() argument 525 *res = _res; in BPF_CALL_4() 540 unsigned long *, res) in BPF_CALL_4() argument 553 *res = _res; in BPF_CALL_4()
|
D | disasm.c | 28 const char *res; in __func_get_name() local 30 res = cbs->cb_call(cbs->private_data, insn); in __func_get_name() 31 if (res) in __func_get_name() 32 return res; in __func_get_name()
|
D | bpf_local_storage.c | 458 u16 i, res = 0; in bpf_local_storage_cache_idx_get() local 465 res = i; in bpf_local_storage_cache_idx_get() 472 cache->idx_usage_counts[res]++; in bpf_local_storage_cache_idx_get() 476 return res; in bpf_local_storage_cache_idx_get()
|
/kernel/sched/ |
D | psi.c | 1089 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) in psi_show() argument 1111 if (!(group == &psi_system && res == PSI_CPU && full)) { in psi_show() 1113 avg[w] = group->avg[res * 2 + full][w]; in psi_show() 1114 total = div_u64(group->total[PSI_AVGS][res * 2 + full], in psi_show() 1130 char *buf, size_t nbytes, enum psi_res res) in psi_trigger_create() argument 1141 state = PSI_IO_SOME + res * 2; in psi_trigger_create() 1143 state = PSI_IO_FULL + res * 2; in psi_trigger_create() 1321 size_t nbytes, enum psi_res res) in psi_write() argument 1351 new = psi_trigger_create(&psi_system, buf, nbytes, res); in psi_write()
|