/fs/proc/ |
D | stat.c | 18 #define arch_irq_stat_cpu(cpu) 0 argument 26 static u64 get_idle_time(struct kernel_cpustat *kcs, int cpu) in get_idle_time() argument 31 if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) in get_idle_time() 32 idle += arch_idle_time(cpu); in get_idle_time() 36 static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) in get_iowait_time() argument 41 if (cpu_online(cpu) && nr_iowait_cpu(cpu)) in get_iowait_time() 42 iowait += arch_idle_time(cpu); in get_iowait_time() 48 static u64 get_idle_time(struct kernel_cpustat *kcs, int cpu) in get_idle_time() argument 52 if (cpu_online(cpu)) in get_idle_time() 53 idle_usecs = get_cpu_idle_time_us(cpu, NULL); in get_idle_time() [all …]
|
/fs/squashfs/ |
D | decompressor_multi_percpu.c | 31 int err, cpu; in squashfs_decompressor_create() local 37 for_each_possible_cpu(cpu) { in squashfs_decompressor_create() 38 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 50 for_each_possible_cpu(cpu) { in squashfs_decompressor_create() 51 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 64 int cpu; in squashfs_decompressor_destroy() local 67 for_each_possible_cpu(cpu) { in squashfs_decompressor_destroy() 68 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy()
|
/fs/btrfs/ |
D | ctree.h | 1838 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, in btrfs_disk_key_to_cpu() argument 1841 cpu->offset = le64_to_cpu(disk->offset); in btrfs_disk_key_to_cpu() 1842 cpu->type = disk->type; in btrfs_disk_key_to_cpu() 1843 cpu->objectid = le64_to_cpu(disk->objectid); in btrfs_disk_key_to_cpu() 1847 const struct btrfs_key *cpu) in btrfs_cpu_key_to_disk() argument 1849 disk->offset = cpu_to_le64(cpu->offset); in btrfs_cpu_key_to_disk() 1850 disk->type = cpu->type; in btrfs_cpu_key_to_disk() 1851 disk->objectid = cpu_to_le64(cpu->objectid); in btrfs_cpu_key_to_disk() 2075 btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, in btrfs_disk_balance_args_to_cpu() argument 2078 memset(cpu, 0, sizeof(*cpu)); in btrfs_disk_balance_args_to_cpu() [all …]
|
/fs/ |
D | seq_file.c | 1063 seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos) in seq_hlist_start_percpu() argument 1067 for_each_possible_cpu(*cpu) { in seq_hlist_start_percpu() 1068 hlist_for_each(node, per_cpu_ptr(head, *cpu)) { in seq_hlist_start_percpu() 1088 int *cpu, loff_t *pos) in seq_hlist_next_percpu() argument 1097 for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids; in seq_hlist_next_percpu() 1098 *cpu = cpumask_next(*cpu, cpu_possible_mask)) { in seq_hlist_next_percpu() 1099 struct hlist_head *bucket = per_cpu_ptr(head, *cpu); in seq_hlist_next_percpu()
|
D | aio.c | 105 struct __percpu kioctx_cpu *cpu; member 589 free_percpu(ctx->cpu); in free_ioctx() 756 ctx->cpu = alloc_percpu(struct kioctx_cpu); in ioctx_alloc() 757 if (!ctx->cpu) in ioctx_alloc() 803 free_percpu(ctx->cpu); in ioctx_alloc() 908 kcpu = this_cpu_ptr(ctx->cpu); in put_reqs_available() 926 kcpu = this_cpu_ptr(ctx->cpu); in __get_reqs_available()
|
D | buffer.c | 1382 static bool has_bh_in_lru(int cpu, void *dummy) in has_bh_in_lru() argument 1384 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); in has_bh_in_lru() 3371 static int buffer_exit_cpu_dead(unsigned int cpu) in buffer_exit_cpu_dead() argument 3374 struct bh_lru *b = &per_cpu(bh_lrus, cpu); in buffer_exit_cpu_dead() 3380 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); in buffer_exit_cpu_dead() 3381 per_cpu(bh_accounting, cpu).nr = 0; in buffer_exit_cpu_dead()
|
D | namespace.c | 163 int cpu; in mnt_get_count() local 165 for_each_possible_cpu(cpu) { in mnt_get_count() 166 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; in mnt_get_count() 275 int cpu; in mnt_get_writers() local 277 for_each_possible_cpu(cpu) { in mnt_get_writers() 278 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; in mnt_get_writers()
|
D | io_uring.c | 3208 int cpu = p->sq_thread_cpu; in io_sq_offload_start() local 3211 if (cpu >= nr_cpu_ids) in io_sq_offload_start() 3213 if (!cpu_online(cpu)) in io_sq_offload_start() 3217 ctx, cpu, in io_sq_offload_start()
|
/fs/fscache/ |
D | main.c | 103 unsigned int cpu; in fscache_init() local 126 for_each_possible_cpu(cpu) in fscache_init() 127 init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu)); in fscache_init()
|
/fs/xfs/ |
D | xfs_stats.c | 12 int val = 0, cpu; in counter_val() local 14 for_each_possible_cpu(cpu) in counter_val() 15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); in counter_val()
|
/fs/gfs2/ |
D | rgrp.c | 1927 int cpu, nonzero = 0; in gfs2_rgrp_congested() local 1930 for_each_present_cpu(cpu) { in gfs2_rgrp_congested() 1931 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP]; in gfs2_rgrp_congested()
|
/fs/nfs/ |
D | super.c | 824 int i, cpu; in nfs_show_stats() local 875 for_each_possible_cpu(cpu) { in nfs_show_stats() 879 stats = per_cpu_ptr(nfss->io_stats, cpu); in nfs_show_stats()
|