/kernel/ |
D | cred.c | 38 static struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; 44 .usage = ATOMIC_INIT(4), 104 atomic_read(&cred->usage) != 0 || in put_cred_rcu() 109 atomic_read(&cred->usage), in put_cred_rcu() 112 if (atomic_read(&cred->usage) != 0) in put_cred_rcu() 114 cred, atomic_read(&cred->usage)); in put_cred_rcu() 140 atomic_read(&cred->usage), in __put_cred() 143 BUG_ON(atomic_read(&cred->usage) != 0); in __put_cred() 167 atomic_read(&tsk->cred->usage), in exit_creds() 227 atomic_set(&new->usage, 1); in cred_alloc_blank() [all …]
|
D | watch_queue.c | 396 container_of(kref, struct watch_queue, usage); in __put_watch_queue() 417 kref_put(&wqueue->usage, __put_watch_queue); in put_watch_queue() 433 struct watch *watch = container_of(kref, struct watch, usage); in __put_watch() 443 kref_put(&watch->usage, __put_watch); in put_watch() 455 kref_init(&watch->usage); in init_watch() 481 kref_get(&wqueue->usage); in add_one_watch() 482 kref_get(&watch->usage); in add_one_watch() 677 kref_get(&wqueue->usage); in get_watch_queue() 698 kref_init(&wqueue->usage); in watch_queue_init()
|
D | groups.c | 22 atomic_set(&gi->usage, 1); in groups_alloc()
|
D | Kconfig.preempt | 118 which is the likely usage by Linux distributions, there should
|
D | fork.c | 796 WARN_ON(refcount_read(&tsk->usage)); in __put_task_struct() 1004 refcount_set(&tsk->usage, 1); in dup_task_struct()
|
/kernel/cgroup/ |
D | misc.c | 80 return atomic_long_read(&root_cg.res[type].usage); in misc_cg_res_total_usage() 119 WARN_ONCE(atomic_long_add_negative(-amount, &cg->res[type].usage), in misc_cg_cancel_charge() 157 new_usage = atomic_long_add_return(amount, &res->usage); in misc_cg_try_charge() 302 unsigned long usage; in misc_cg_current_show() local 306 usage = atomic_long_read(&cg->res[i].usage); in misc_cg_current_show() 307 if (READ_ONCE(misc_res_capacity[i]) || usage) in misc_cg_current_show() 308 seq_printf(sf, "%s %lu\n", misc_res_name[i], usage); in misc_cg_current_show() 384 atomic_long_set(&cg->res[i].usage, 0); in misc_cg_alloc()
|
D | rdma.c | 45 int usage; member 180 rpool->resources[index].usage--; in uncharge_cg_locked() 186 WARN_ON_ONCE(rpool->resources[index].usage < 0); in uncharge_cg_locked() 284 new = rpool->resources[index].usage + 1; in rdmacg_try_charge() 289 rpool->resources[index].usage = new; in rdmacg_try_charge() 511 value = rpool->resources[i].usage; in print_rpool_values()
|
D | rstat.c | 442 u64 usage, utime, stime; in cgroup_base_stat_cputime_show() local 447 usage = cgrp->bstat.cputime.sum_exec_runtime; in cgroup_base_stat_cputime_show() 453 usage = cputime.sum_exec_runtime; in cgroup_base_stat_cputime_show() 458 do_div(usage, NSEC_PER_USEC); in cgroup_base_stat_cputime_show() 465 usage, utime, stime); in cgroup_base_stat_cputime_show()
|
/kernel/debug/kdb/ |
D | kdb_main.c | 769 kp->name, kp->usage, kp->help); in kdb_defcmd() 796 mp->usage = kdb_strdup(argv[2], GFP_KDB); in kdb_defcmd() 797 if (!mp->usage) in kdb_defcmd() 802 if (mp->usage[0] == '"') { in kdb_defcmd() 803 strcpy(mp->usage, argv[2]+1); in kdb_defcmd() 804 mp->usage[strlen(mp->usage)-1] = '\0'; in kdb_defcmd() 815 kfree(mp->usage); in kdb_defcmd() 2477 if (strlen(kt->usage) > 20) in kdb_help() 2480 kt->usage, space, kt->help); in kdb_help() 2730 .usage = "<vaddr>", [all …]
|
D | kdb_bp.c | 528 .usage = "[<vaddr>]", 534 .usage = "[<vaddr>]", 540 .usage = "<bpnum>", 546 .usage = "<bpnum>", 552 .usage = "<bpnum>", 558 .usage = "", 568 .usage = "[<vaddr>]",
|
/kernel/sched/ |
D | cputime.c | 928 enum cpu_usage_stat usage, in kcpustat_field_vtime() argument 943 *val = cpustat[usage]; in kcpustat_field_vtime() 952 switch (usage) { in kcpustat_field_vtime() 982 enum cpu_usage_stat usage, int cpu) in kcpustat_field() argument 985 u64 val = cpustat[usage]; in kcpustat_field() 1001 return cpustat[usage]; in kcpustat_field() 1004 err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val); in kcpustat_field()
|
/kernel/locking/ |
D | lockdep_proc.c | 81 char usage[LOCK_USAGE_CHARS]; in l_show() local 98 get_usage_chars(class, usage); in l_show() 99 seq_printf(m, " %s", usage); in l_show()
|
D | lockdep_internals.h | 129 char usage[LOCK_USAGE_CHARS]);
|
D | lockdep.c | 668 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) in get_usage_chars() 673 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ in get_usage_chars() 674 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); in get_usage_chars() 678 usage[i] = '\0'; in get_usage_chars() 701 char usage[LOCK_USAGE_CHARS]; in print_lock_name() local 703 get_usage_chars(class, usage); in print_lock_name() 707 printk(KERN_CONT "){%s}-{%d:%d}", usage, in print_lock_name()
|
/kernel/trace/ |
D | trace_kdb.c | 153 .usage = "[skip_#entries] [cpu]",
|
/kernel/dma/ |
D | Kconfig | 212 bool "Enable debugging of DMA-API usage" 226 bool "Debug DMA scatter-gather usage"
|
/kernel/bpf/ |
D | bpf_task_storage.c | 246 if (refcount_read(&task->usage) && in BPF_CALL_4()
|
/kernel/rcu/ |
D | Kconfig.debug | 129 when looking for certain types of RCU usage bugs, for example,
|
/kernel/power/ |
D | Kconfig | 303 lower power usage at the cost of small performance overhead. 331 The exact usage of the energy model is subsystem-dependent.
|