/kernel/trace/ |
D | trace_recursion_record.c | 27 int index = 0; in ftrace_record_recursion() local 58 if (index < i) in ftrace_record_recursion() 59 index = i; in ftrace_record_recursion() 60 if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) in ftrace_record_recursion() 63 for (i = index - 1; i >= 0; i--) { in ftrace_record_recursion() 78 old = cmpxchg(&recursed_functions[index].ip, 0, ip); in ftrace_record_recursion() 84 index++; in ftrace_record_recursion() 88 recursed_functions[index].parent_ip = parent_ip; in ftrace_record_recursion() 112 cmpxchg(&recursed_functions[index].ip, ip, 0); in ftrace_record_recursion() 113 else if (i <= index) in ftrace_record_recursion() [all …]
|
D | fgraph.c | 72 int index; in ftrace_push_return_trace() local 94 index = ++current->curr_ret_stack; in ftrace_push_return_trace() 96 current->ret_stack[index].ret = ret; in ftrace_push_return_trace() 97 current->ret_stack[index].func = func; in ftrace_push_return_trace() 98 current->ret_stack[index].calltime = calltime; in ftrace_push_return_trace() 100 current->ret_stack[index].fp = frame_pointer; in ftrace_push_return_trace() 103 current->ret_stack[index].retp = retp; in ftrace_push_return_trace() 161 int index; in ftrace_pop_return_trace() local 163 index = current->curr_ret_stack; in ftrace_pop_return_trace() 165 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { in ftrace_pop_return_trace() [all …]
|
D | trace_events_user.c | 62 #define MAP_STATUS_BYTE(index) ((index) >> 3) argument 63 #define MAP_STATUS_MASK(index) BIT((index) & 7) argument 114 int index; member 277 int i = user->index; in user_event_register_set() 285 int i = user->index; in user_event_register_clear() 837 clear_bit(user->index, user->group->page_bitmap); in destroy_user_event() 1277 int index; in user_event_parse() local 1296 index = find_first_zero_bit(group->page_bitmap, MAX_EVENTS); in user_event_parse() 1298 if (index == MAX_EVENTS) in user_event_parse() 1346 user->index = index; in user_event_parse() [all …]
|
D | ftrace.c | 394 unsigned long index; member 434 if ((void *)rec >= (void *)&pg->records[pg->index]) { in function_stat_next() 573 pg->index = 0; in ftrace_profile_reset() 736 if (stat->pages->index == PROFILES_PER_PAGE) { in ftrace_profile_alloc() 742 rec = &stat->pages->records[stat->pages->index++]; in ftrace_profile_alloc() 1087 int index; member 1510 for (_____i = 0; _____i < pg->index; _____i++) { \ 1540 if (pg->index == 0 || in lookup_rec() 1542 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in lookup_rec() 1544 rec = bsearch(&key, pg->records, pg->index, in lookup_rec() [all …]
|
/kernel/irq/ |
D | timings.c | 384 int index, i, period_max, count, start, min = INT_MAX; in __irq_timings_next_event() local 422 int index = (start + i) & IRQ_TIMINGS_MASK; in __irq_timings_next_event() local 424 irqs->timings[i] = irqs->circ_timings[index]; in __irq_timings_next_event() 428 index = irq_timings_next_event_index(irqs->timings, count, period_max); in __irq_timings_next_event() 429 if (index < 0) in __irq_timings_next_event() 432 return irqs->last_ts + irqs->ema_time[index]; in __irq_timings_next_event() 449 int index; in __irq_timings_store() local 454 index = irq_timings_interval_index(interval); in __irq_timings_store() 456 if (index > PREDICTION_BUFFER_SIZE - 1) { in __irq_timings_store() 465 irqs->circ_timings[irqs->count & IRQ_TIMINGS_MASK] = index; in __irq_timings_store() [all …]
|
D | msi.c | 61 static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index) in msi_insert_desc() argument 65 desc->msi_index = index; in msi_insert_desc() 66 ret = xa_insert(&md->__store, index, desc, GFP_KERNEL); in msi_insert_desc() 102 static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc) in msi_add_simple_msi_descs() argument 104 unsigned int idx, last = index + ndesc - 1; in msi_add_simple_msi_descs() 110 for (idx = index; idx <= last; idx++) { in msi_add_simple_msi_descs() 123 msi_free_msi_descs_range(dev, MSI_DESC_NOTASSOCIATED, index, last); in msi_add_simple_msi_descs() 317 unsigned int msi_get_virq(struct device *dev, unsigned int index) in msi_get_virq() argument 329 desc = xa_load(&dev->msi.data->__store, pcimsi ? 0 : index); in msi_get_virq() 337 if (index < desc->nvec_used) in msi_get_virq() [all …]
|
/kernel/bpf/ |
D | reuseport_array.c | 53 u32 index = *(u32 *)key; in reuseport_array_lookup_elem() local 55 if (unlikely(index >= array->map.max_entries)) in reuseport_array_lookup_elem() 58 return rcu_dereference(array->ptrs[index]); in reuseport_array_lookup_elem() 65 u32 index = *(u32 *)key; in reuseport_array_delete_elem() local 69 if (index >= map->max_entries) in reuseport_array_delete_elem() 72 if (!rcu_access_pointer(array->ptrs[index])) in reuseport_array_delete_elem() 77 sk = rcu_dereference_protected(array->ptrs[index], in reuseport_array_delete_elem() 82 RCU_INIT_POINTER(array->ptrs[index], NULL); in reuseport_array_delete_elem() 241 u32 index = *(u32 *)key; in bpf_fd_reuseport_array_update_elem() local 249 if (index >= map->max_entries) in bpf_fd_reuseport_array_update_elem() [all …]
|
D | arraymap.c | 157 static void *array_map_elem_ptr(struct bpf_array* array, u32 index) in array_map_elem_ptr() argument 159 return array->value + (u64)array->elem_size * index; in array_map_elem_ptr() 166 u32 index = *(u32 *)key; in array_map_lookup_elem() local 168 if (unlikely(index >= array->map.max_entries)) in array_map_lookup_elem() 171 return array->value + (u64)array->elem_size * (index & array->index_mask); in array_map_lookup_elem() 212 const int index = BPF_REG_2; in array_map_gen_lookup() local 218 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); in array_map_gen_lookup() 241 u32 index = *(u32 *)key; in percpu_array_map_lookup_elem() local 243 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_elem() 246 return this_cpu_ptr(array->pptrs[index & array->index_mask]); in percpu_array_map_lookup_elem() [all …]
|
D | queue_stack_maps.c | 138 u32 index; in __stack_map_get() local 153 index = qs->head - 1; in __stack_map_get() 154 if (unlikely(index >= qs->size)) in __stack_map_get() 155 index = qs->size - 1; in __stack_map_get() 157 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get() 161 qs->head = index; in __stack_map_get()
|
D | bloom_filter.c | 31 u32 value_size, u32 index) in hash() argument 37 bloom->hash_seed + index); in hash() 39 h = jhash(value, value_size, bloom->hash_seed + index); in hash()
|
D | bpf_iter.c | 128 p = seq->op->start(seq, &seq->index); in bpf_seq_read() 155 loff_t pos = seq->index; in bpf_seq_read() 159 p = seq->op->next(seq, p, &seq->index); in bpf_seq_read() 160 if (pos == seq->index) { in bpf_seq_read() 164 seq->index++; in bpf_seq_read()
|
D | cpumap.c | 672 u32 index = key ? *(u32 *)key : U32_MAX; in cpu_map_get_next_key() local 675 if (index >= cmap->map.max_entries) { in cpu_map_get_next_key() 680 if (index == cmap->map.max_entries - 1) in cpu_map_get_next_key() 682 *next = index + 1; in cpu_map_get_next_key()
|
/kernel/cgroup/ |
D | rdma.c | 83 int index, int new_max) in set_resource_limit() argument 86 if (rpool->resources[index].max != S32_MAX) in set_resource_limit() 89 if (rpool->resources[index].max == S32_MAX) in set_resource_limit() 92 rpool->resources[index].max = new_max; in set_resource_limit() 164 enum rdmacg_resource_type index) in uncharge_cg_locked() argument 180 rpool->resources[index].usage--; in uncharge_cg_locked() 186 WARN_ON_ONCE(rpool->resources[index].usage < 0); in uncharge_cg_locked() 208 enum rdmacg_resource_type index) in rdmacg_uncharge_hierarchy() argument 215 uncharge_cg_locked(p, device, index); in rdmacg_uncharge_hierarchy() 229 enum rdmacg_resource_type index) in rdmacg_uncharge() argument [all …]
|
D | cgroup-v1.c | 401 int index = 0, pid = *pos; in cgroup_pidlist_start() local 429 while (index < end) { in cgroup_pidlist_start() 430 int mid = (index + end) / 2; in cgroup_pidlist_start() 432 index = mid; in cgroup_pidlist_start() 435 index = mid + 1; in cgroup_pidlist_start() 441 if (index >= l->length) in cgroup_pidlist_start() 444 iter = l->list + index; in cgroup_pidlist_start()
|
/kernel/dma/ |
D | swiotlb.c | 93 unsigned int index; member 305 mem->areas[i].index = 0; in swiotlb_init_io_tlb_mem() 555 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce() local 556 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce() 557 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce() 636 static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index) in wrap_area_index() argument 638 if (index >= mem->area_nslabs) in wrap_area_index() 640 return index; in wrap_area_index() 660 unsigned int index, wrap, count = 0, i; in swiotlb_do_find_slots() local 684 index = wrap = wrap_area_index(mem, ALIGN(area->index, stride)); in swiotlb_do_find_slots() [all …]
|
/kernel/sched/ |
D | cpuacct.c | 95 enum cpuacct_stat_index index) in cpuacct_cpuusage_read() argument 105 if (WARN_ON_ONCE(index > CPUACCT_STAT_NSTATS)) in cpuacct_cpuusage_read() 115 switch (index) { in cpuacct_cpuusage_read() 162 enum cpuacct_stat_index index) in __cpuusage_read() argument 169 totalcpuusage += cpuacct_cpuusage_read(ca, i, index); in __cpuusage_read() 210 enum cpuacct_stat_index index) in __cpuacct_percpu_seq_show() argument 217 percpu = cpuacct_cpuusage_read(ca, i, index); in __cpuacct_percpu_seq_show() 242 int index; in cpuacct_all_seq_show() local 246 for (index = 0; index < CPUACCT_STAT_NSTATS; index++) in cpuacct_all_seq_show() 247 seq_printf(m, " %s", cpuacct_stat_desc[index]); in cpuacct_all_seq_show() [all …]
|
D | cputime.c | 107 static inline void task_group_account_field(struct task_struct *p, int index, in task_group_account_field() argument 116 __this_cpu_add(kernel_cpustat.cpustat[index], tmp); in task_group_account_field() 118 cgroup_account_cputime_field(p, index, tmp); in task_group_account_field() 128 int index; in account_user_time() local 134 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; in account_user_time() 137 task_group_account_field(p, index, cputime); in account_user_time() 177 u64 cputime, enum cpu_usage_stat index) in account_system_index_time() argument 184 task_group_account_field(p, index, cputime); in account_system_index_time() 201 int index; in account_system_time() local 209 index = CPUTIME_IRQ; in account_system_time() [all …]
|
/kernel/power/ |
D | main.c | 339 int index; in last_failed_dev_show() local 342 index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; in last_failed_dev_show() 343 index %= REC_FAILED_NUM; in last_failed_dev_show() 344 last_failed_dev = suspend_stats.failed_devs[index]; in last_failed_dev_show() 353 int index; in last_failed_errno_show() local 356 index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; in last_failed_errno_show() 357 index %= REC_FAILED_NUM; in last_failed_errno_show() 358 last_failed_errno = suspend_stats.errno[index]; in last_failed_errno_show() 367 int index; in last_failed_step_show() local 371 index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; in last_failed_step_show() [all …]
|
/kernel/ |
D | audit_tree.c | 36 unsigned index; /* index; upper bit indicates 'will prune' */ member 203 chunk->owners[i].index = i; in alloc_chunk() 274 int index = p->index & ~(1U<<31); in find_chunk() local 275 p -= index; in find_chunk() 309 new->owners[i].index = old->owners[j].index - j + i; in replace_chunk() 432 chunk->owners[0].index = (1U << 31); in create_chunk() 504 p->index = (chunk->count - 1) | (1U<<31); in tag_chunk() 579 if (tagged && !(p->index & (1U<<31))) in prune_tree_chunks() 621 if (node->index & (1U<<31)) { in trim_marked() 708 node->index |= 1U<<31; in audit_trim_trees() [all …]
|
/kernel/module/ |
D | kallsyms.c | 49 if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) in elf_type() 112 Elf_Shdr *symsect = info->sechdrs + info->index.sym; in layout_symtab() 113 Elf_Shdr *strsect = info->sechdrs + info->index.str; in layout_symtab() 120 info->index.sym) | INIT_OFFSET_MASK; in layout_symtab() 130 info->index.pcpu)) { in layout_symtab() 148 info->index.str) | INIT_OFFSET_MASK; in layout_symtab() 172 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; in add_kallsyms() 185 (void *)info->sechdrs[info->index.str].sh_addr; in add_kallsyms() 201 info->index.pcpu)) { in add_kallsyms()
|
D | main.c | 376 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; in percpu_modalloc() 472 if (info->sechdrs[info->index.pcpu].sh_size != 0) in percpu_modalloc() 1318 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; in simplify_symbols() 1381 if (sym[i].st_shndx == info->index.pcpu) in simplify_symbols() 1414 info->index.sym, i, in apply_relocations() 1418 info->index.sym, i, mod); in apply_relocations() 1421 info->index.sym, i, mod); in apply_relocations() 1593 Elf_Shdr *infosec = &info->sechdrs[info->index.info]; in get_next_modinfo() 1929 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; in rewrite_section_headers() 1930 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; in rewrite_section_headers() [all …]
|
/kernel/time/ |
D | hrtimer.c | 74 .index = HRTIMER_BASE_MONOTONIC, 79 .index = HRTIMER_BASE_REALTIME, 84 .index = HRTIMER_BASE_BOOTTIME, 89 .index = HRTIMER_BASE_TAI, 94 .index = HRTIMER_BASE_MONOTONIC_SOFT, 99 .index = HRTIMER_BASE_REALTIME_SOFT, 104 .index = HRTIMER_BASE_BOOTTIME_SOFT, 109 .index = HRTIMER_BASE_TAI_SOFT, 230 int basenum = base->index; in switch_hrtimer_base() 1086 base->cpu_base->active_bases |= 1 << base->index; in enqueue_hrtimer() [all …]
|
/kernel/printk/ |
D | printk.c | 2391 if (strcmp(c->name, name) == 0 && c->index == idx) { in __add_preferred_console() 2407 c->index = idx; in __add_preferred_console() 2977 struct tty_driver *console_device(int *index) in console_device() argument 2986 driver = c->device(c, index); in console_device() 3050 newcon->match(newcon, c->name, c->index, c->options) != 0) { in try_enable_preferred_console() 3055 if (newcon->index >= 0 && in try_enable_preferred_console() 3056 newcon->index != c->index) in try_enable_preferred_console() 3058 if (newcon->index < 0) in try_enable_preferred_console() 3059 newcon->index = c->index; in try_enable_preferred_console() 3088 if (newcon->index < 0) in try_enable_default_console() [all …]
|
D | console_cmdline.h | 8 int index; /* Minor dev. to use */ member
|
D | Makefile | 5 obj-$(CONFIG_PRINTK_INDEX) += index.o
|