/kernel/ |
D | cfi.c | 63 unsigned long index; in ptr_to_shadow() local 69 index = page - s->r.min_page; in ptr_to_shadow() 71 if (index >= SHADOW_SIZE) in ptr_to_shadow() 74 return (int)index; in ptr_to_shadow() 78 int index) in shadow_to_ptr() argument 80 if (unlikely(index < 0 || index >= SHADOW_SIZE)) in shadow_to_ptr() 83 if (unlikely(s->shadow[index] == SHADOW_INVALID)) in shadow_to_ptr() 86 return (s->r.min_page + s->shadow[index]) << PAGE_SHIFT; in shadow_to_ptr() 90 int index) in shadow_to_page() argument 92 if (unlikely(index < 0 || index >= SHADOW_SIZE)) in shadow_to_page() [all …]
|
D | audit_tree.c | 36 unsigned index; /* index; upper bit indicates 'will prune' */ member 205 chunk->owners[i].index = i; in alloc_chunk() 276 int index = p->index & ~(1U<<31); in find_chunk() local 277 p -= index; in find_chunk() 311 new->owners[i].index = old->owners[j].index - j + i; in replace_chunk() 434 chunk->owners[0].index = (1U << 31); in create_chunk() 506 p->index = (chunk->count - 1) | (1U<<31); in tag_chunk() 581 if (tagged && !(p->index & (1U<<31))) in prune_tree_chunks() 623 if (node->index & (1U<<31)) { in trim_marked() 711 node->index |= 1U<<31; in audit_trim_trees() [all …]
|
D | module.c | 644 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; in percpu_modalloc() 741 if (info->sechdrs[info->index.pcpu].sh_size != 0) in percpu_modalloc() 1300 unsigned int versindex = info->index.vers; in check_version() 2128 symndx = info->index.sym; in copy_module_elf() 2298 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; in simplify_symbols() 2351 if (sym[i].st_shndx == info->index.pcpu) in simplify_symbols() 2386 info->index.sym, i, mod); in apply_relocations() 2389 info->index.sym, i, mod); in apply_relocations() 2546 Elf_Shdr *infosec = &info->sechdrs[info->index.info]; in get_next_modinfo() 2630 if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) in elf_type() [all …]
|
D | module-internal.h | 28 } index; member
|
/kernel/trace/ |
D | fgraph.c | 63 int index; in ftrace_push_return_trace() local 85 index = ++current->curr_ret_stack; in ftrace_push_return_trace() 87 current->ret_stack[index].ret = ret; in ftrace_push_return_trace() 88 current->ret_stack[index].func = func; in ftrace_push_return_trace() 89 current->ret_stack[index].calltime = calltime; in ftrace_push_return_trace() 91 current->ret_stack[index].fp = frame_pointer; in ftrace_push_return_trace() 94 current->ret_stack[index].retp = retp; in ftrace_push_return_trace() 127 int index; in ftrace_pop_return_trace() local 129 index = current->curr_ret_stack; in ftrace_pop_return_trace() 131 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { in ftrace_pop_return_trace() [all …]
|
D | ftrace.c | 401 unsigned long index; member 441 if ((void *)rec >= (void *)&pg->records[pg->index]) { in function_stat_next() 580 pg->index = 0; in ftrace_profile_reset() 743 if (stat->pages->index == PROFILES_PER_PAGE) { in ftrace_profile_alloc() 749 rec = &stat->pages->records[stat->pages->index++]; in ftrace_profile_alloc() 1102 int index; member 1517 for (_____i = 0; _____i < pg->index; _____i++) { \ 1560 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in ftrace_location_range() 1562 rec = bsearch(&key, pg->records, pg->index, in ftrace_location_range() 2426 int index; member [all …]
|
D | bpf_trace.c | 352 u64 index = flags & BPF_F_INDEX_MASK; in get_map_perf_counter() local 357 if (index == BPF_F_CURRENT_CPU) in get_map_perf_counter() 358 index = cpu; in get_map_perf_counter() 359 if (unlikely(index >= array->map.max_entries)) in get_map_perf_counter() 362 ee = READ_ONCE(array->ptrs[index]); in get_map_perf_counter() 425 u64 index = flags & BPF_F_INDEX_MASK; in __bpf_perf_event_output() local 429 if (index == BPF_F_CURRENT_CPU) in __bpf_perf_event_output() 430 index = cpu; in __bpf_perf_event_output() 431 if (unlikely(index >= array->map.max_entries)) in __bpf_perf_event_output() 434 ee = READ_ONCE(array->ptrs[index]); in __bpf_perf_event_output()
|
D | trace_probe.c | 150 trace_probe_log.index = 0; in trace_probe_log_init() 158 void trace_probe_log_set_index(int index) in trace_probe_log_set_index() argument 160 trace_probe_log.index = index; in trace_probe_log_set_index() 173 if (i == trace_probe_log.index) in __trace_probe_log_err() 181 if (trace_probe_log.index >= trace_probe_log.argc) { in __trace_probe_log_err()
|
/kernel/dma/ |
D | swiotlb.c | 455 unsigned int nslots, stride, index, wrap; in swiotlb_tbl_map_single() local 508 index = ALIGN(io_tlb_index, stride); in swiotlb_tbl_map_single() 509 if (index >= io_tlb_nslabs) in swiotlb_tbl_map_single() 510 index = 0; in swiotlb_tbl_map_single() 511 wrap = index; in swiotlb_tbl_map_single() 514 while (iommu_is_span_boundary(index, nslots, offset_slots, in swiotlb_tbl_map_single() 516 index += stride; in swiotlb_tbl_map_single() 517 if (index >= io_tlb_nslabs) in swiotlb_tbl_map_single() 518 index = 0; in swiotlb_tbl_map_single() 519 if (index == wrap) in swiotlb_tbl_map_single() [all …]
|
/kernel/sched/ |
D | cpuacct.c | 99 enum cpuacct_stat_index index) in cpuacct_cpuusage_read() argument 108 BUG_ON(index > CPUACCT_STAT_NSTATS); in cpuacct_cpuusage_read() 117 if (index == CPUACCT_STAT_NSTATS) { in cpuacct_cpuusage_read() 124 data = cpuusage->usages[index]; in cpuacct_cpuusage_read() 156 enum cpuacct_stat_index index) in __cpuusage_read() argument 163 totalcpuusage += cpuacct_cpuusage_read(ca, i, index); in __cpuusage_read() 204 enum cpuacct_stat_index index) in __cpuacct_percpu_seq_show() argument 211 percpu = cpuacct_cpuusage_read(ca, i, index); in __cpuacct_percpu_seq_show() 236 int index; in cpuacct_all_seq_show() local 240 for (index = 0; index < CPUACCT_STAT_NSTATS; index++) in cpuacct_all_seq_show() [all …]
|
D | cputime.c | 99 static inline void task_group_account_field(struct task_struct *p, int index, in task_group_account_field() argument 108 __this_cpu_add(kernel_cpustat.cpustat[index], tmp); in task_group_account_field() 110 cgroup_account_cputime_field(p, index, tmp); in task_group_account_field() 120 int index; in account_user_time() local 126 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; in account_user_time() 129 task_group_account_field(p, index, cputime); in account_user_time() 169 u64 cputime, enum cpu_usage_stat index) in account_system_index_time() argument 176 task_group_account_field(p, index, cputime); in account_system_index_time() 193 int index; in account_system_time() local 201 index = CPUTIME_IRQ; in account_system_time() [all …]
|
/kernel/irq/ |
D | timings.c | 384 int index, i, period_max, count, start, min = INT_MAX; in __irq_timings_next_event() local 422 int index = (start + i) & IRQ_TIMINGS_MASK; in __irq_timings_next_event() local 424 irqs->timings[i] = irqs->circ_timings[index]; in __irq_timings_next_event() 428 index = irq_timings_next_event_index(irqs->timings, count, period_max); in __irq_timings_next_event() 429 if (index < 0) in __irq_timings_next_event() 432 return irqs->last_ts + irqs->ema_time[index]; in __irq_timings_next_event() 449 int index; in __irq_timings_store() local 454 index = irq_timings_interval_index(interval); in __irq_timings_store() 460 irqs->circ_timings[irqs->count & IRQ_TIMINGS_MASK] = index; in __irq_timings_store() 462 irqs->ema_time[index] = irq_timings_ema_new(interval, in __irq_timings_store() [all …]
|
/kernel/cgroup/ |
D | rdma.c | 83 int index, int new_max) in set_resource_limit() argument 86 if (rpool->resources[index].max != S32_MAX) in set_resource_limit() 89 if (rpool->resources[index].max == S32_MAX) in set_resource_limit() 92 rpool->resources[index].max = new_max; in set_resource_limit() 164 enum rdmacg_resource_type index) in uncharge_cg_locked() argument 180 rpool->resources[index].usage--; in uncharge_cg_locked() 186 WARN_ON_ONCE(rpool->resources[index].usage < 0); in uncharge_cg_locked() 208 enum rdmacg_resource_type index) in rdmacg_uncharge_hierarchy() argument 215 uncharge_cg_locked(p, device, index); in rdmacg_uncharge_hierarchy() 229 enum rdmacg_resource_type index) in rdmacg_uncharge() argument [all …]
|
D | cgroup-v1.c | 404 int index = 0, pid = *pos; in cgroup_pidlist_start() local 433 while (index < end) { in cgroup_pidlist_start() 434 int mid = (index + end) / 2; in cgroup_pidlist_start() 436 index = mid; in cgroup_pidlist_start() 439 index = mid + 1; in cgroup_pidlist_start() 445 if (index >= l->length) in cgroup_pidlist_start() 448 iter = l->list + index; in cgroup_pidlist_start()
|
/kernel/bpf/ |
D | reuseport_array.c | 52 u32 index = *(u32 *)key; in reuseport_array_lookup_elem() local 54 if (unlikely(index >= array->map.max_entries)) in reuseport_array_lookup_elem() 57 return rcu_dereference(array->ptrs[index]); in reuseport_array_lookup_elem() 64 u32 index = *(u32 *)key; in reuseport_array_delete_elem() local 68 if (index >= map->max_entries) in reuseport_array_delete_elem() 71 if (!rcu_access_pointer(array->ptrs[index])) in reuseport_array_delete_elem() 76 sk = rcu_dereference_protected(array->ptrs[index], in reuseport_array_delete_elem() 81 RCU_INIT_POINTER(array->ptrs[index], NULL); in reuseport_array_delete_elem() 254 u32 index = *(u32 *)key; in bpf_fd_reuseport_array_update_elem() local 261 if (index >= map->max_entries) in bpf_fd_reuseport_array_update_elem() [all …]
|
D | arraymap.c | 146 u32 index = *(u32 *)key; in array_map_lookup_elem() local 148 if (unlikely(index >= array->map.max_entries)) in array_map_lookup_elem() 151 return array->value + array->elem_size * (index & array->index_mask); in array_map_lookup_elem() 192 const int index = BPF_REG_2; in array_map_gen_lookup() local 195 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); in array_map_gen_lookup() 218 u32 index = *(u32 *)key; in percpu_array_map_lookup_elem() local 220 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_elem() 223 return this_cpu_ptr(array->pptrs[index & array->index_mask]); in percpu_array_map_lookup_elem() 229 u32 index = *(u32 *)key; in bpf_percpu_array_copy() local 234 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_copy() [all …]
|
D | queue_stack_maps.c | 149 u32 index; in __stack_map_get() local 159 index = qs->head - 1; in __stack_map_get() 160 if (unlikely(index >= qs->size)) in __stack_map_get() 161 index = qs->size - 1; in __stack_map_get() 163 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get() 167 qs->head = index; in __stack_map_get()
|
D | xskmap.c | 149 u32 index = key ? *(u32 *)key : U32_MAX; in xsk_map_get_next_key() local 152 if (index >= m->map.max_entries) { in xsk_map_get_next_key() 157 if (index == m->map.max_entries - 1) in xsk_map_get_next_key() 159 *next = index + 1; in xsk_map_get_next_key()
|
D | cpumap.c | 578 u32 index = key ? *(u32 *)key : U32_MAX; in cpu_map_get_next_key() local 581 if (index >= cmap->map.max_entries) { in cpu_map_get_next_key() 586 if (index == cmap->map.max_entries - 1) in cpu_map_get_next_key() 588 *next = index + 1; in cpu_map_get_next_key()
|
D | devmap.c | 276 u32 index = key ? *(u32 *)key : U32_MAX; in dev_map_get_next_key() local 279 if (index >= dtab->map.max_entries) { in dev_map_get_next_key() 284 if (index == dtab->map.max_entries - 1) in dev_map_get_next_key() 286 *next = index + 1; in dev_map_get_next_key()
|
/kernel/power/ |
D | main.c | 300 int index; in last_failed_dev_show() local 303 index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; in last_failed_dev_show() 304 index %= REC_FAILED_NUM; in last_failed_dev_show() 305 last_failed_dev = suspend_stats.failed_devs[index]; in last_failed_dev_show() 314 int index; in last_failed_errno_show() local 317 index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; in last_failed_errno_show() 318 index %= REC_FAILED_NUM; in last_failed_errno_show() 319 last_failed_errno = suspend_stats.errno[index]; in last_failed_errno_show() 328 int index; in last_failed_step_show() local 332 index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; in last_failed_step_show() [all …]
|
D | snapshot.c | 466 int index; in add_rtree_block() local 476 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); in add_rtree_block() 477 index &= BM_RTREE_LEVEL_MASK; in add_rtree_block() 478 dst = (struct rtree_node **)&((*dst)->data[index]); in add_rtree_block() 752 int index; in memory_bm_find_bit() local 754 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); in memory_bm_find_bit() 755 index &= BM_RTREE_LEVEL_MASK; in memory_bm_find_bit() 756 BUG_ON(node->data[index] == 0); in memory_bm_find_bit() 757 node = (struct rtree_node *)node->data[index]; in memory_bm_find_bit()
|
/kernel/printk/ |
D | printk.c | 2130 if (strcmp(c->name, name) == 0 && c->index == idx) { in __add_preferred_console() 2144 c->index = idx; in __add_preferred_console() 2579 struct tty_driver *console_device(int *index) in console_device() argument 2588 driver = c->device(c, index); in console_device() 2660 bcon->name, bcon->index)) in register_console() 2672 newcon->name, newcon->index); in register_console() 2690 if (newcon->index < 0) in register_console() 2691 newcon->index = 0; in register_console() 2710 newcon->match(newcon, c->name, c->index, c->options) != 0) { in register_console() 2715 if (newcon->index >= 0 && in register_console() [all …]
|
D | console_cmdline.h | 8 int index; /* Minor dev. to use */ member
|
/kernel/time/ |
D | hrtimer.c | 74 .index = HRTIMER_BASE_MONOTONIC, 79 .index = HRTIMER_BASE_REALTIME, 84 .index = HRTIMER_BASE_BOOTTIME, 89 .index = HRTIMER_BASE_TAI, 94 .index = HRTIMER_BASE_MONOTONIC_SOFT, 99 .index = HRTIMER_BASE_REALTIME_SOFT, 104 .index = HRTIMER_BASE_BOOTTIME_SOFT, 109 .index = HRTIMER_BASE_TAI_SOFT, 226 int basenum = base->index; in switch_hrtimer_base() 967 base->cpu_base->active_bases |= 1 << base->index; in enqueue_hrtimer() [all …]
|