Home
last modified time | relevance | path

Searched refs:idx (Results 1 – 25 of 64) sorted by relevance

123

/kernel/events/
Dhw_breakpoint_test.c31 static struct perf_event *register_test_bp(int cpu, struct task_struct *tsk, int idx) in register_test_bp() argument
35 if (WARN_ON(idx < 0 || idx >= MAX_TEST_BREAKPOINTS)) in register_test_bp()
39 attr.bp_addr = (unsigned long)&break_vars[idx]; in register_test_bp()
124 int idx = 0; in test_one_cpu() local
126 fill_bp_slots(test, &idx, get_test_cpu(0), NULL, 0); in test_one_cpu()
127 TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); in test_one_cpu()
128 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx)); in test_one_cpu()
133 int idx = 0; in test_many_cpus() local
138 bool do_continue = fill_bp_slots(test, &idx, cpu, NULL, 0); in test_many_cpus()
140 TEST_EXPECT_NOSPC(register_test_bp(cpu, NULL, idx)); in test_many_cpus()
[all …]
/kernel/sched/
Dcpudeadline.c25 static void cpudl_heapify_down(struct cpudl *cp, int idx) in cpudl_heapify_down() argument
29 int orig_cpu = cp->elements[idx].cpu; in cpudl_heapify_down()
30 u64 orig_dl = cp->elements[idx].dl; in cpudl_heapify_down()
32 if (left_child(idx) >= cp->size) in cpudl_heapify_down()
39 l = left_child(idx); in cpudl_heapify_down()
40 r = right_child(idx); in cpudl_heapify_down()
41 largest = idx; in cpudl_heapify_down()
53 if (largest == idx) in cpudl_heapify_down()
57 cp->elements[idx].cpu = cp->elements[largest].cpu; in cpudl_heapify_down()
58 cp->elements[idx].dl = cp->elements[largest].dl; in cpudl_heapify_down()
[all …]
Dloadavg.c210 int idx = calc_load_idx; in calc_load_write_idx() local
223 idx++; in calc_load_write_idx()
225 return idx & 1; in calc_load_write_idx()
239 int idx = calc_load_write_idx(); in calc_load_nohz_fold() local
241 atomic_long_add(delta, &calc_load_nohz[idx]); in calc_load_nohz_fold()
285 int idx = calc_load_read_idx(); in calc_load_nohz_read() local
288 if (atomic_long_read(&calc_load_nohz[idx])) in calc_load_nohz_read()
289 delta = atomic_long_xchg(&calc_load_nohz[idx], 0); in calc_load_nohz_read()
Dcpupri.c68 struct cpumask *lowest_mask, int idx) in __cpupri_find() argument
70 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; in __cpupri_find()
149 int idx, cpu; in cpupri_find_fitness() local
153 for (idx = 0; idx < task_pri; idx++) { in cpupri_find_fitness()
155 if (!__cpupri_find(cp, p, lowest_mask, idx)) in cpupri_find_fitness()
Dautogroup.c236 int err, idx; in proc_sched_autogroup_set_nice() local
255 idx = array_index_nospec(nice + 20, 40); in proc_sched_autogroup_set_nice()
256 shares = scale_load(sched_prio_to_weight[idx]); in proc_sched_autogroup_set_nice()
/kernel/rcu/
Dsrcutree.c412 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) in srcu_readers_lock_idx() argument
420 sum += READ_ONCE(cpuc->srcu_lock_count[idx]); in srcu_readers_lock_idx()
429 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) in srcu_readers_unlock_idx() argument
437 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); in srcu_readers_unlock_idx()
446 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) in srcu_readers_active_idx_check() argument
450 unlocks = srcu_readers_unlock_idx(ssp, idx); in srcu_readers_active_idx_check()
486 return srcu_readers_lock_idx(ssp, idx) == unlocks; in srcu_readers_active_idx_check()
636 int idx; in __srcu_read_lock() local
638 idx = READ_ONCE(ssp->srcu_idx) & 0x1; in __srcu_read_lock()
639 this_cpu_inc(ssp->sda->srcu_lock_count[idx]); in __srcu_read_lock()
[all …]
Dsrcutiny.c97 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) in __srcu_read_unlock() argument
99 int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; in __srcu_read_unlock()
101 WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); in __srcu_read_unlock()
114 int idx; in srcu_drive_gp() local
130 idx = (ssp->srcu_idx & 0x2) / 2; in srcu_drive_gp()
133 swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); in srcu_drive_gp()
Drcuscale.c133 void (*readunlock)(int idx);
156 static void rcu_scale_read_unlock(int idx) __releases(RCU) in rcu_scale_read_unlock() argument
197 static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp) in srcu_scale_read_unlock() argument
199 srcu_read_unlock(srcu_ctlp, idx); in srcu_scale_read_unlock()
282 static void tasks_scale_read_unlock(int idx) in tasks_scale_read_unlock() argument
320 static void tasks_trace_scale_read_unlock(int idx) in tasks_trace_scale_read_unlock() argument
376 int idx; in rcu_scale_reader() local
386 idx = cur_ops->readlock(); in rcu_scale_reader()
387 cur_ops->readunlock(idx); in rcu_scale_reader()
/kernel/locking/
Dqspinlock.c116 static inline __pure u32 encode_tail(int cpu, int idx) in encode_tail() argument
121 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ in encode_tail()
129 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; in decode_tail() local
131 return per_cpu_ptr(&qnodes[idx].mcs, cpu); in decode_tail()
135 struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx) in grab_mcs_node() argument
137 return &((struct qnode *)base + idx)->mcs; in grab_mcs_node()
320 int idx; in queued_spin_lock_slowpath() local
402 idx = node->count++; in queued_spin_lock_slowpath()
403 tail = encode_tail(smp_processor_id(), idx); in queued_spin_lock_slowpath()
416 if (unlikely(idx >= MAX_NODES)) { in queued_spin_lock_slowpath()
[all …]
Dlockdep_proc.c34 #define iterate_lock_classes(idx, class) \ argument
35 for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \
36 idx++, class++)
49 unsigned long idx = *pos; in l_start() local
51 if (idx > max_lock_class_idx) in l_start()
53 return lock_classes + idx; in l_start()
82 int idx = class - lock_classes; in l_show() local
87 if (!test_bit(idx, lock_classes_in_use)) in l_show()
244 unsigned long idx; in lockdep_stats_show() local
246 iterate_lock_classes(idx, class) { in lockdep_stats_show()
[all …]
Dlockdep_internals.h238 int idx; in debug_class_ops_inc() local
240 idx = class - lock_classes; in debug_class_ops_inc()
241 __debug_atomic_inc(lock_class_ops[idx]); in debug_class_ops_inc()
246 int idx, cpu; in debug_class_ops_read() local
249 idx = class - lock_classes; in debug_class_ops_read()
251 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); in debug_class_ops_read()
/kernel/
Duser_namespace.c296 unsigned idx; in map_id_range_down_base() local
302 for (idx = 0; idx < extents; idx++) { in map_id_range_down_base()
303 first = map->extent[idx].first; in map_id_range_down_base()
304 last = first + map->extent[idx].count - 1; in map_id_range_down_base()
307 return &map->extent[idx]; in map_id_range_down_base()
345 unsigned idx; in map_id_up_base() local
349 for (idx = 0; idx < extents; idx++) { in map_id_up_base()
350 first = map->extent[idx].lower_first; in map_id_up_base()
351 last = first + map->extent[idx].count - 1; in map_id_up_base()
353 return &map->extent[idx]; in map_id_up_base()
[all …]
/kernel/time/
Dtimer.c511 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) in timer_set_idx() argument
514 idx << TIMER_ARRAYSHIFT; in timer_set_idx()
543 unsigned int idx; in calc_wheel_index() local
546 idx = calc_index(expires, 0, bucket_expiry); in calc_wheel_index()
548 idx = calc_index(expires, 1, bucket_expiry); in calc_wheel_index()
550 idx = calc_index(expires, 2, bucket_expiry); in calc_wheel_index()
552 idx = calc_index(expires, 3, bucket_expiry); in calc_wheel_index()
554 idx = calc_index(expires, 4, bucket_expiry); in calc_wheel_index()
556 idx = calc_index(expires, 5, bucket_expiry); in calc_wheel_index()
558 idx = calc_index(expires, 6, bucket_expiry); in calc_wheel_index()
[all …]
/kernel/trace/
Dtracing_map.h173 #define TRACING_MAP_ARRAY_ELT(array, idx) \ argument
174 (array->pages[idx >> array->entry_shift] + \
175 ((idx & array->entry_mask) << array->entry_size_shift))
177 #define TRACING_MAP_ENTRY(array, idx) \ argument
178 ((struct tracing_map_entry *)TRACING_MAP_ARRAY_ELT(array, idx))
180 #define TRACING_MAP_ELT(array, idx) \ argument
181 ((struct tracing_map_elt **)TRACING_MAP_ARRAY_ELT(array, idx))
Dfgraph.c281 ftrace_graph_get_ret_stack(struct task_struct *task, int idx) in ftrace_graph_get_ret_stack() argument
283 idx = task->curr_ret_stack - idx; in ftrace_graph_get_ret_stack()
285 if (idx >= 0 && idx <= task->curr_ret_stack) in ftrace_graph_get_ret_stack()
286 return &task->ret_stack[idx]; in ftrace_graph_get_ret_stack()
307 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, in ftrace_graph_ret_addr() argument
326 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, in ftrace_graph_ret_addr() argument
336 if (!task->ret_stack || task_idx < *idx) in ftrace_graph_ret_addr()
339 task_idx -= *idx; in ftrace_graph_ret_addr()
340 (*idx)++; in ftrace_graph_ret_addr()
Dtracing_map.c275 int idx = tracing_map_add_field(map, cmp_fn); in tracing_map_add_key_field() local
277 if (idx < 0) in tracing_map_add_key_field()
278 return idx; in tracing_map_add_key_field()
280 map->fields[idx].offset = offset; in tracing_map_add_key_field()
282 map->key_idx[map->n_keys++] = idx; in tracing_map_add_key_field()
284 return idx; in tracing_map_add_key_field()
455 int idx; in get_free_elt() local
457 idx = atomic_inc_return(&map->next_elt); in get_free_elt()
458 if (idx < map->max_elts) { in get_free_elt()
459 elt = *(TRACING_MAP_ELT(map->elts, idx)); in get_free_elt()
[all …]
Dpreemptirq_delay_test.c68 static void execute_preemptirqtest(int idx) in execute_preemptirqtest() argument
75 if (idx % 2 == 0) in execute_preemptirqtest()
83 static void preemptirqtest_##POSTFIX(int idx) \
85 execute_preemptirqtest(idx); \
Dtrace_events_hist.c152 unsigned int idx; member
910 if (hist_field && hist_field->var.idx == var_idx && in check_field_for_var_ref()
999 if (find_any_var_ref(hist_data, field->var.idx)) { in check_var_refs()
1292 var_idx = hist_field->var.idx; in resolve_var_refs()
2082 ref_field->var.idx = var_field->var.idx; in init_var_ref()
2143 if (ref_field->var.idx == var_field->var.idx && in find_var_ref_idx()
2177 if (ref_field->var.idx == var_field->var.idx && in create_var_ref()
3110 var_idx = var->var.idx; in __update_field_vars()
3150 int idx; in create_var() local
3163 idx = tracing_map_add_var(hist_data->map); in create_var()
[all …]
/kernel/bpf/
Ddevmap.c71 unsigned int idx; member
106 int idx) in dev_map_index_hash() argument
108 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; in dev_map_index_hash()
275 if (dev->idx == key) in __dev_map_hash_lookup_elem()
285 u32 idx, *next = next_key; in dev_map_hash_get_next_key() local
293 idx = *(u32 *)key; in dev_map_hash_get_next_key()
295 dev = __dev_map_hash_lookup_elem(map, idx); in dev_map_hash_get_next_key()
303 *next = next_dev->idx; in dev_map_hash_get_next_key()
307 i = idx & (dtab->n_buckets - 1); in dev_map_hash_get_next_key()
318 *next = next_dev->idx; in dev_map_hash_get_next_key()
[all …]
Dmemalloc.c607 int idx; in bpf_mem_alloc() local
613 idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ); in bpf_mem_alloc()
614 if (idx < 0) in bpf_mem_alloc()
617 ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx); in bpf_mem_alloc()
623 int idx; in bpf_mem_free() local
628 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); in bpf_mem_free()
629 if (idx < 0) in bpf_mem_free()
632 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); in bpf_mem_free()
Dnet_namespace.c109 int cnt, idx; in bpf_netns_link_release() local
125 idx = link_index(net, type, net_link); in bpf_netns_link_release()
138 WARN_ON(bpf_prog_array_delete_safe_at(old_array, idx)); in bpf_netns_link_release()
173 int idx, ret; in bpf_netns_link_update_prog() local
191 idx = link_index(net, type, net_link); in bpf_netns_link_update_prog()
192 ret = bpf_prog_array_update_at(run_array, idx, new_prog); in bpf_netns_link_update_prog()
/kernel/irq/
Dgeneric-chip.c350 int idx; in __irq_get_domain_generic_chip() local
354 idx = hw_irq / dgc->irqs_per_chip; in __irq_get_domain_generic_chip()
355 if (idx >= dgc->num_chips) in __irq_get_domain_generic_chip()
357 return dgc->gc[idx]; in __irq_get_domain_generic_chip()
393 int idx; in irq_map_generic_chip() local
399 idx = hw_irq % dgc->irqs_per_chip; in irq_map_generic_chip()
401 if (test_bit(idx, &gc->unused)) in irq_map_generic_chip()
404 if (test_bit(idx, &gc->installed)) in irq_map_generic_chip()
418 set_bit(idx, &gc->installed); in irq_map_generic_chip()
427 data->mask = 1 << idx; in irq_map_generic_chip()
Dtimings.c345 int idx = period; in irq_timings_next_event_index() local
354 while (!memcmp(buffer, &buffer[idx], size * sizeof(int))) { in irq_timings_next_event_index()
359 idx += size; in irq_timings_next_event_index()
366 if (idx == len) in irq_timings_next_event_index()
374 if (len - idx < period) in irq_timings_next_event_index()
375 size = len - idx; in irq_timings_next_event_index()
Dmsi.c104 unsigned int idx, last = index + ndesc - 1; in msi_add_simple_msi_descs() local
110 for (idx = index; idx <= last; idx++) { in msi_add_simple_msi_descs()
114 ret = msi_insert_desc(dev->msi.data, desc, idx); in msi_add_simple_msi_descs()
153 unsigned long idx; in msi_free_msi_descs_range() local
157 xa_for_each_range(xa, idx, desc, first_index, last_index) { in msi_free_msi_descs_range()
159 xa_erase(xa, idx); in msi_free_msi_descs_range()
/kernel/dma/
Ddebug.c250 __acquires(&dma_entry_hash[idx].lock) in get_hash_bucket()
252 int idx = hash_fn(entry); in get_hash_bucket() local
255 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); in get_hash_bucket()
257 return &dma_entry_hash[idx]; in get_hash_bucket()
403 int idx; in debug_dma_dump_mappings() local
405 for (idx = 0; idx < HASH_SIZE; idx++) { in debug_dma_dump_mappings()
406 struct hash_bucket *bucket = &dma_entry_hash[idx]; in debug_dma_dump_mappings()
416 type2name[entry->type], idx, in debug_dma_dump_mappings()
779 int idx; in dump_show() local
781 for (idx = 0; idx < HASH_SIZE; idx++) { in dump_show()
[all …]

123