/kernel/ |
D | compat.c | 447 struct rlimit r; in COMPAT_SYSCALL_DEFINE2() local 450 __get_user(r.rlim_cur, &rlim->rlim_cur) || in COMPAT_SYSCALL_DEFINE2() 451 __get_user(r.rlim_max, &rlim->rlim_max)) in COMPAT_SYSCALL_DEFINE2() 454 if (r.rlim_cur == COMPAT_RLIM_INFINITY) in COMPAT_SYSCALL_DEFINE2() 455 r.rlim_cur = RLIM_INFINITY; in COMPAT_SYSCALL_DEFINE2() 456 if (r.rlim_max == COMPAT_RLIM_INFINITY) in COMPAT_SYSCALL_DEFINE2() 457 r.rlim_max = RLIM_INFINITY; in COMPAT_SYSCALL_DEFINE2() 458 return do_prlimit(current, resource, &r, NULL); in COMPAT_SYSCALL_DEFINE2() 466 struct rlimit r; in COMPAT_SYSCALL_DEFINE2() local 471 ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r); in COMPAT_SYSCALL_DEFINE2() [all …]
|
D | audit_watch.c | 238 static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watch *w, char *op) in audit_watch_log_rule_change() argument 251 audit_log_key(ab, r->filterkey); in audit_watch_log_rule_change() 252 audit_log_format(ab, " list=%d res=1", r->listnr); in audit_watch_log_rule_change() 263 struct audit_krule *r, *nextr; in audit_update_watch() local 290 list_for_each_entry_safe(r, nextr, &owatch->rules, rlist) { in audit_update_watch() 292 oentry = container_of(r, struct audit_entry, rule); in audit_update_watch() 338 struct audit_krule *r, *nextr; in audit_remove_parent_watches() local 343 list_for_each_entry_safe(r, nextr, &w->rules, rlist) { in audit_remove_parent_watches() 344 e = container_of(r, struct audit_entry, rule); in audit_remove_parent_watches() 345 audit_watch_log_rule_change(r, w, "remove_rule"); in audit_remove_parent_watches() [all …]
|
D | sys.c | 1534 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) in accumulate_thread_rusage() argument 1536 r->ru_nvcsw += t->nvcsw; in accumulate_thread_rusage() 1537 r->ru_nivcsw += t->nivcsw; in accumulate_thread_rusage() 1538 r->ru_minflt += t->min_flt; in accumulate_thread_rusage() 1539 r->ru_majflt += t->maj_flt; in accumulate_thread_rusage() 1540 r->ru_inblock += task_io_get_inblock(t); in accumulate_thread_rusage() 1541 r->ru_oublock += task_io_get_oublock(t); in accumulate_thread_rusage() 1544 static void k_getrusage(struct task_struct *p, int who, struct rusage *r) in k_getrusage() argument 1551 memset((char *)r, 0, sizeof (*r)); in k_getrusage() 1556 accumulate_thread_rusage(p, r); in k_getrusage() [all …]
|
D | auditfilter.c | 1044 struct audit_krule *r; in audit_list_rules() local 1050 list_for_each_entry(r, &audit_rules_list[i], list) { in audit_list_rules() 1053 data = audit_krule_to_data(r); in audit_list_rules() 1411 static int update_lsm_rule(struct audit_krule *r) in update_lsm_rule() argument 1413 struct audit_entry *entry = container_of(r, struct audit_entry, rule); in update_lsm_rule() 1417 if (!security_audit_rule_known(r)) in update_lsm_rule() 1420 nentry = audit_dupe_rule(r); in update_lsm_rule() 1428 if (r->watch) in update_lsm_rule() 1429 list_del(&r->rlist); in update_lsm_rule() 1431 list_del(&r->list); in update_lsm_rule() [all …]
|
D | utsname_sysctl.c | 39 int r; in proc_do_uts_string() local 54 r = proc_dostring(&uts_table, write, buffer, lenp, ppos); in proc_do_uts_string() 69 return r; in proc_do_uts_string()
|
D | resource.c | 107 struct resource *r = v, *p; in r_show() local 112 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) in r_show() 117 start = r->start; in r_show() 118 end = r->end; in r_show() 127 r->name ? r->name : "<BAD>"); in r_show() 264 static void __release_child_resources(struct resource *r) in __release_child_resources() argument 269 p = r->child; in __release_child_resources() 270 r->child = NULL; in __release_child_resources() 287 void release_child_resources(struct resource *r) in release_child_resources() argument 290 __release_child_resources(r); in release_child_resources() [all …]
|
D | kexec_core.c | 1331 size_t r; in vmcoreinfo_append_str() local 1334 r = vscnprintf(buf, sizeof(buf), fmt, args); in vmcoreinfo_append_str() 1337 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); in vmcoreinfo_append_str() 1339 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); in vmcoreinfo_append_str() 1341 vmcoreinfo_size += r; in vmcoreinfo_append_str()
|
D | sysctl.c | 2789 int r; in proc_do_cad_pid() local 2793 r = __do_proc_dointvec(&tmp, table, write, buffer, in proc_do_cad_pid() 2795 if (r || !write) in proc_do_cad_pid() 2796 return r; in proc_do_cad_pid()
|
/kernel/time/ |
D | sched_clock.c | 172 unsigned long r; in sched_clock_register() local 213 r = rate; in sched_clock_register() 214 if (r >= 4000000) { in sched_clock_register() 215 r /= 1000000; in sched_clock_register() 218 if (r >= 1000) { in sched_clock_register() 219 r /= 1000; in sched_clock_register() 230 bits, r, r_unit, res, wrap); in sched_clock_register()
|
/kernel/irq/ |
D | cpuhotplug.c | 41 int r = irq_do_set_affinity(d, affinity, false); in migrate_one_irq() local 42 if (r) in migrate_one_irq() 44 d->irq, r); in migrate_one_irq()
|
/kernel/sched/ |
D | cpudeadline.c | 46 int l, r, largest; in cpudl_heapify() local 51 r = right_child(idx); in cpudl_heapify() 57 if ((r < cp->size) && dl_time_before(cp->elements[largest].dl, in cpudl_heapify() 58 cp->elements[r].dl)) in cpudl_heapify() 59 largest = r; in cpudl_heapify()
|
D | fair.c | 3185 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); in update_cfs_rq_load_avg() local 3186 sub_positive(&sa->load_avg, r); in update_cfs_rq_load_avg() 3187 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX); in update_cfs_rq_load_avg() 3193 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); in update_cfs_rq_load_avg() local 3194 sub_positive(&sa->util_avg, r); in update_cfs_rq_load_avg() 3195 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX); in update_cfs_rq_load_avg()
|
/kernel/trace/ |
D | blktrace.c | 1006 struct blk_io_trace_remap r; in blk_add_trace_bio_remap() local 1015 r.device_from = cpu_to_be32(dev); in blk_add_trace_bio_remap() 1016 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); in blk_add_trace_bio_remap() 1017 r.sector_from = cpu_to_be64(from); in blk_add_trace_bio_remap() 1021 sizeof(r), &r); in blk_add_trace_bio_remap() 1044 struct blk_io_trace_remap r; in blk_add_trace_rq_remap() local 1053 r.device_from = cpu_to_be32(dev); in blk_add_trace_rq_remap() 1054 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); in blk_add_trace_rq_remap() 1055 r.sector_from = cpu_to_be64(from); in blk_add_trace_rq_remap() 1059 sizeof(r), &r); in blk_add_trace_rq_remap() [all …]
|
D | trace_events_filter.c | 316 static int regex_match_full(char *str, struct regex *r, int len) in regex_match_full() argument 318 if (strncmp(str, r->pattern, len) == 0) in regex_match_full() 323 static int regex_match_front(char *str, struct regex *r, int len) in regex_match_front() argument 325 if (len < r->len) in regex_match_front() 328 if (strncmp(str, r->pattern, r->len) == 0) in regex_match_front() 333 static int regex_match_middle(char *str, struct regex *r, int len) in regex_match_middle() argument 335 if (strnstr(str, r->pattern, len)) in regex_match_middle() 340 static int regex_match_end(char *str, struct regex *r, int len) in regex_match_end() argument 344 if (strlen >= r->len && in regex_match_end() 345 memcmp(str + strlen - r->len, r->pattern, r->len) == 0) in regex_match_end() [all …]
|
D | trace_stack.c | 241 int r; in stack_max_size_read() local 243 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); in stack_max_size_read() 244 if (r > sizeof(buf)) in stack_max_size_read() 245 r = sizeof(buf); in stack_max_size_read() 246 return simple_read_from_buffer(ubuf, count, ppos, buf, r); in stack_max_size_read()
|
D | trace.c | 3991 int r; in tracing_saved_cmdlines_size_read() local 3994 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); in tracing_saved_cmdlines_size_read() 3997 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); in tracing_saved_cmdlines_size_read() 4253 int r; in tracing_saved_tgids_read() local 4260 r = sprintf(buf, "%d %d\n", pid, tgid); in tracing_saved_tgids_read() 4261 buf += r; in tracing_saved_tgids_read() 4262 len += r; in tracing_saved_tgids_read() 4285 int r; in tracing_set_trace_read() local 4288 r = sprintf(buf, "%s\n", tr->current_trace->name); in tracing_set_trace_read() 4291 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); in tracing_set_trace_read() [all …]
|
D | trace_events.c | 1308 int r = -ENODEV; in event_filter_read() local 1327 r = simple_read_from_buffer(ubuf, cnt, ppos, in event_filter_read() 1332 return r; in event_filter_read() 1482 int r; in subsystem_filter_read() local 1494 r = simple_read_from_buffer(ubuf, cnt, ppos, in subsystem_filter_read() 1499 return r; in subsystem_filter_read() 1538 int r; in show_header() local 1550 r = simple_read_from_buffer(ubuf, cnt, ppos, in show_header() 1555 return r; in show_header()
|
D | trace.h | 1085 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
|
D | ring_buffer.c | 1574 struct list_head *head_page, *prev_page, *r; in rb_insert_pages() local 1592 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); in rb_insert_pages() 1594 if (r == head_page_with_bit) { in rb_insert_pages()
|
D | ftrace.c | 1029 int r; in ftrace_profile_read() local 1031 r = sprintf(buf, "%u\n", ftrace_profile_enabled); in ftrace_profile_read() 1032 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); in ftrace_profile_read()
|
/kernel/printk/ |
D | printk.c | 1863 int r; in printk_emit() local 1866 r = vprintk_emit(facility, level, dict, dictlen, fmt, args); in printk_emit() 1869 return r; in printk_emit() 1875 int r; in vprintk_default() local 1879 r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); in vprintk_default() 1880 return r; in vprintk_default() 1883 r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); in vprintk_default() 1885 return r; in vprintk_default() 1922 int r; in printk() local 1933 r = vprintk_func(fmt, args); in printk() [all …]
|
/kernel/debug/kdb/ |
D | kdb_io.c | 881 int r; in kdb_printf() local 884 r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); in kdb_printf() 887 return r; in kdb_printf()
|
/kernel/events/ |
D | uprobes.c | 374 static int match_uprobe(struct uprobe *l, struct uprobe *r) in match_uprobe() argument 376 if (l->inode < r->inode) in match_uprobe() 379 if (l->inode > r->inode) in match_uprobe() 382 if (l->offset < r->offset) in match_uprobe() 385 if (l->offset > r->offset) in match_uprobe()
|
/kernel/locking/ |
D | lockdep_proc.c | 380 static int lock_stat_cmp(const void *l, const void *r) in lock_stat_cmp() argument 382 const struct lock_stat_data *dl = l, *dr = r; in lock_stat_cmp()
|
/kernel/rcu/ |
D | update.c | 830 static void test_callback(struct rcu_head *r) in test_callback() argument
|