/kernel/ |
D | cred.c | 41 static struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; 47 .usage = ATOMIC_INIT(4), 76 if (atomic_long_read(&cred->usage) != 0) in put_cred_rcu() 78 cred, atomic_long_read(&cred->usage)); in put_cred_rcu() 103 atomic_long_read(&cred->usage)); in __put_cred() 105 BUG_ON(atomic_long_read(&cred->usage) != 0); in __put_cred() 124 atomic_long_read(&tsk->cred->usage)); in exit_creds() 179 atomic_long_set(&new->usage, 1); in cred_alloc_blank() 220 atomic_long_set(&new->usage, 1); in prepare_creds() 306 p->cred, atomic_long_read(&p->cred->usage)); in copy_creds() [all …]
|
D | watch_queue.c | 389 container_of(kref, struct watch_queue, usage); in __put_watch_queue() 410 kref_put(&wqueue->usage, __put_watch_queue); in put_watch_queue() 426 struct watch *watch = container_of(kref, struct watch, usage); in __put_watch() 436 kref_put(&watch->usage, __put_watch); in put_watch() 448 kref_init(&watch->usage); in init_watch() 474 kref_get(&wqueue->usage); in add_one_watch() 475 kref_get(&watch->usage); in add_one_watch() 673 kref_get(&wqueue->usage); in get_watch_queue() 694 kref_init(&wqueue->usage); in watch_queue_init()
|
D | groups.c | 22 atomic_set(&gi->usage, 1); in groups_alloc()
|
/kernel/debug/kdb/ |
D | kdb_main.c | 768 kp->name, kp->usage, kp->help); in kdb_defcmd() 795 mp->usage = kdb_strdup(argv[2], GFP_KDB); in kdb_defcmd() 796 if (!mp->usage) in kdb_defcmd() 801 if (mp->usage[0] == '"') { in kdb_defcmd() 802 strcpy(mp->usage, argv[2]+1); in kdb_defcmd() 803 mp->usage[strlen(mp->usage)-1] = '\0'; in kdb_defcmd() 814 kfree(mp->usage); in kdb_defcmd() 2428 if (strlen(kt->usage) > 20) in kdb_help() 2431 kt->usage, space, kt->help); in kdb_help() 2681 .usage = "<vaddr>", [all …]
|
D | kdb_bp.c | 528 .usage = "[<vaddr>]", 534 .usage = "[<vaddr>]", 540 .usage = "<bpnum>", 546 .usage = "<bpnum>", 552 .usage = "<bpnum>", 558 .usage = "", 568 .usage = "[<vaddr>]",
|
/kernel/cgroup/ |
D | misc.c | 80 return atomic64_read(&root_cg.res[type].usage); in misc_cg_res_total_usage() 119 WARN_ONCE(atomic64_add_negative(-amount, &cg->res[type].usage), in misc_cg_cancel_charge() 156 new_usage = atomic64_add_return(amount, &res->usage); in misc_cg_try_charge() 298 u64 usage; in misc_cg_current_show() local 302 usage = atomic64_read(&cg->res[i].usage); in misc_cg_current_show() 303 if (READ_ONCE(misc_res_capacity[i]) || usage) in misc_cg_current_show() 304 seq_printf(sf, "%s %llu\n", misc_res_name[i], usage); in misc_cg_current_show() 399 atomic64_set(&cg->res[i].usage, 0); in misc_cg_alloc()
|
D | rdma.c | 45 int usage; member 180 rpool->resources[index].usage--; in uncharge_cg_locked() 186 WARN_ON_ONCE(rpool->resources[index].usage < 0); in uncharge_cg_locked() 286 new = rpool->resources[index].usage + 1; in rdmacg_try_charge() 291 rpool->resources[index].usage = new; in rdmacg_try_charge() 513 value = rpool->resources[i].usage; in print_rpool_values()
|
D | rstat.c | 525 u64 usage, utime, stime; in cgroup_base_stat_cputime_show() local 533 usage = cgrp->bstat.cputime.sum_exec_runtime; in cgroup_base_stat_cputime_show() 542 usage = bstat.cputime.sum_exec_runtime; in cgroup_base_stat_cputime_show() 550 do_div(usage, NSEC_PER_USEC); in cgroup_base_stat_cputime_show() 560 usage, utime, stime); in cgroup_base_stat_cputime_show()
|
/kernel/bpf/ |
D | cpumask.c | 27 refcount_t usage; member 63 refcount_set(&cpumask->usage, 1); in bpf_cpumask_create() 79 refcount_inc(&cpumask->usage); in bpf_cpumask_acquire() 93 if (!refcount_dec_and_test(&cpumask->usage)) in bpf_cpumask_release()
|
D | queue_stack_maps.c | 262 u64 usage = sizeof(struct bpf_queue_stack); in queue_stack_map_mem_usage() local 264 usage += ((u64)map->max_entries + 1) * map->value_size; in queue_stack_map_mem_usage() 265 return usage; in queue_stack_map_mem_usage()
|
D | bpf_struct_ops.c | 706 u64 usage; in bpf_struct_ops_map_mem_usage() local 708 usage = sizeof(*st_map) + in bpf_struct_ops_map_mem_usage() 710 usage += vt->size; in bpf_struct_ops_map_mem_usage() 711 usage += btf_type_vlen(vt) * sizeof(struct bpf_links *); in bpf_struct_ops_map_mem_usage() 712 usage += PAGE_SIZE; in bpf_struct_ops_map_mem_usage() 713 return usage; in bpf_struct_ops_map_mem_usage()
|
D | arraymap.c | 730 u64 usage = sizeof(*array); in array_map_mem_usage() local 733 usage += entries * sizeof(void *); in array_map_mem_usage() 734 usage += entries * elem_size * num_possible_cpus(); in array_map_mem_usage() 737 usage = PAGE_ALIGN(usage); in array_map_mem_usage() 738 usage += PAGE_ALIGN(entries * elem_size); in array_map_mem_usage() 740 usage += entries * elem_size; in array_map_mem_usage() 743 return usage; in array_map_mem_usage()
|
D | ringbuf.c | 344 u64 usage = sizeof(struct bpf_ringbuf_map); in ringbuf_map_mem_usage() local 347 usage += (u64)rb->nr_pages << PAGE_SHIFT; in ringbuf_map_mem_usage() 350 usage += (nr_meta_pages + 2 * nr_data_pages) * sizeof(struct page *); in ringbuf_map_mem_usage() 351 return usage; in ringbuf_map_mem_usage()
|
D | stackmap.c | 672 u64 usage = sizeof(*smap); in stack_map_mem_usage() local 674 usage += n_buckets * sizeof(struct stack_map_bucket *); in stack_map_mem_usage() 675 usage += enties * (sizeof(struct stack_map_bucket) + value_size); in stack_map_mem_usage() 676 return usage; in stack_map_mem_usage()
|
D | devmap.c | 1022 u64 usage = sizeof(struct bpf_dtab); in dev_map_mem_usage() local 1025 usage += (u64)dtab->n_buckets * sizeof(struct hlist_head); in dev_map_mem_usage() 1027 usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *); in dev_map_mem_usage() 1028 usage += atomic_read((atomic_t *)&dtab->items) * in dev_map_mem_usage() 1030 return usage; in dev_map_mem_usage()
|
D | hashtab.c | 2224 u64 usage = sizeof(struct bpf_htab); in htab_map_mem_usage() local 2226 usage += sizeof(struct bucket) * htab->n_buckets; in htab_map_mem_usage() 2227 usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT; in htab_map_mem_usage() 2233 usage += htab->elem_size * num_entries; in htab_map_mem_usage() 2236 usage += value_size * num_possible_cpus() * num_entries; in htab_map_mem_usage() 2238 usage += sizeof(struct htab_elem *) * num_possible_cpus(); in htab_map_mem_usage() 2245 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; in htab_map_mem_usage() 2247 usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries; in htab_map_mem_usage() 2248 usage += value_size * num_possible_cpus() * num_entries; in htab_map_mem_usage() 2251 return usage; in htab_map_mem_usage()
|
D | cpumap.c | 646 u64 usage = sizeof(struct bpf_cpu_map); in cpu_map_mem_usage() local 649 usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *); in cpu_map_mem_usage() 650 return usage; in cpu_map_mem_usage()
|
D | bpf_local_storage.c | 774 u64 usage = sizeof(*smap); in bpf_local_storage_map_mem_usage() local 777 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log); in bpf_local_storage_map_mem_usage() 778 return usage; in bpf_local_storage_map_mem_usage()
|
D | bpf_task_storage.c | 218 if (refcount_read(&task->usage) && in __bpf_task_storage_get()
|
/kernel/sched/ |
D | cputime.c | 957 enum cpu_usage_stat usage, in kcpustat_field_vtime() argument 972 *val = cpustat[usage]; in kcpustat_field_vtime() 981 switch (usage) { in kcpustat_field_vtime() 1011 enum cpu_usage_stat usage, int cpu) in kcpustat_field() argument 1014 u64 val = cpustat[usage]; in kcpustat_field() 1030 return cpustat[usage]; in kcpustat_field() 1033 err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val); in kcpustat_field()
|
/kernel/trace/rv/ |
D | Kconfig | 35 the usage of per-cpu monitors, and one limitation of the 47 sample monitor that illustrates the usage of per-task monitor.
|
/kernel/locking/ |
D | lockdep_proc.c | 81 char usage[LOCK_USAGE_CHARS]; in l_show() local 98 get_usage_chars(class, usage); in l_show() 99 seq_printf(m, " %s", usage); in l_show()
|
D | lockdep_internals.h | 129 char usage[LOCK_USAGE_CHARS]);
|
/kernel/trace/ |
D | trace_kdb.c | 153 .usage = "[skip_#entries] [cpu]",
|
/kernel/dma/ |
D | Kconfig | 235 bool "Enable debugging of DMA-API usage" 249 bool "Debug DMA scatter-gather usage"
|