Home
last modified time | relevance | path

Searched refs:idx (Results 1 – 25 of 59) sorted by relevance

123

/kernel/sched/
Dcpudeadline.c26 static void cpudl_heapify_down(struct cpudl *cp, int idx) in cpudl_heapify_down() argument
30 int orig_cpu = cp->elements[idx].cpu; in cpudl_heapify_down()
31 u64 orig_dl = cp->elements[idx].dl; in cpudl_heapify_down()
33 if (left_child(idx) >= cp->size) in cpudl_heapify_down()
40 l = left_child(idx); in cpudl_heapify_down()
41 r = right_child(idx); in cpudl_heapify_down()
42 largest = idx; in cpudl_heapify_down()
54 if (largest == idx) in cpudl_heapify_down()
58 cp->elements[idx].cpu = cp->elements[largest].cpu; in cpudl_heapify_down()
59 cp->elements[idx].dl = cp->elements[largest].dl; in cpudl_heapify_down()
[all …]
Dloadavg.c211 int idx = calc_load_idx; in calc_load_write_idx() local
224 idx++; in calc_load_write_idx()
226 return idx & 1; in calc_load_write_idx()
240 int idx = calc_load_write_idx(); in calc_load_nohz_fold() local
242 atomic_long_add(delta, &calc_load_nohz[idx]); in calc_load_nohz_fold()
286 int idx = calc_load_read_idx(); in calc_load_nohz_read() local
289 if (atomic_long_read(&calc_load_nohz[idx])) in calc_load_nohz_read()
290 delta = atomic_long_xchg(&calc_load_nohz[idx], 0); in calc_load_nohz_read()
Dcpupri.c89 struct cpumask *lowest_mask, int idx, in __cpupri_find() argument
92 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; in __cpupri_find()
176 int idx, cpu; in cpupri_find_fitness() local
184 for (idx = 0; idx < task_pri; idx++) { in cpupri_find_fitness()
186 if (!__cpupri_find(cp, p, lowest_mask, idx, drop_nopreempts)) in cpupri_find_fitness()
Dautogroup.c213 int err, idx; in proc_sched_autogroup_set_nice() local
232 idx = array_index_nospec(nice + 20, 40); in proc_sched_autogroup_set_nice()
233 shares = scale_load(sched_prio_to_weight[idx]); in proc_sched_autogroup_set_nice()
/kernel/rcu/
Dsrcutree.c244 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) in srcu_readers_lock_idx() argument
252 sum += READ_ONCE(cpuc->srcu_lock_count[idx]); in srcu_readers_lock_idx()
261 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) in srcu_readers_unlock_idx() argument
269 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); in srcu_readers_unlock_idx()
278 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) in srcu_readers_active_idx_check() argument
282 unlocks = srcu_readers_unlock_idx(ssp, idx); in srcu_readers_active_idx_check()
318 return srcu_readers_lock_idx(ssp, idx) == unlocks; in srcu_readers_active_idx_check()
402 int idx; in __srcu_read_lock() local
404 idx = READ_ONCE(ssp->srcu_idx) & 0x1; in __srcu_read_lock()
405 this_cpu_inc(ssp->sda->srcu_lock_count[idx]); in __srcu_read_lock()
[all …]
Dsrcutiny.c97 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) in __srcu_read_unlock() argument
99 int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; in __srcu_read_unlock()
101 WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); in __srcu_read_unlock()
114 int idx; in srcu_drive_gp() local
130 idx = (ssp->srcu_idx & 0x2) / 2; in srcu_drive_gp()
133 swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); in srcu_drive_gp()
Drcuscale.c133 void (*readunlock)(int idx);
156 static void rcu_scale_read_unlock(int idx) __releases(RCU) in rcu_scale_read_unlock() argument
197 static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp) in srcu_scale_read_unlock() argument
199 srcu_read_unlock(srcu_ctlp, idx); in srcu_scale_read_unlock()
280 static void tasks_scale_read_unlock(int idx) in tasks_scale_read_unlock() argument
308 static void tasks_trace_scale_read_unlock(int idx) in tasks_trace_scale_read_unlock() argument
356 int idx; in rcu_scale_reader() local
366 idx = cur_ops->readlock(); in rcu_scale_reader()
367 cur_ops->readunlock(idx); in rcu_scale_reader()
Drefscale.c181 int idx; in srcu_ref_scale_read_section() local
184 idx = srcu_read_lock(srcu_ctlp); in srcu_ref_scale_read_section()
185 srcu_read_unlock(srcu_ctlp, idx); in srcu_ref_scale_read_section()
192 int idx; in srcu_ref_scale_delay_section() local
195 idx = srcu_read_lock(srcu_ctlp); in srcu_ref_scale_delay_section()
197 srcu_read_unlock(srcu_ctlp, idx); in srcu_ref_scale_delay_section()
Drcutorture.c330 void (*readunlock)(int idx);
408 static void rcu_torture_read_unlock(int idx) __releases(RCU) in rcu_torture_read_unlock() argument
594 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) in srcu_torture_read_unlock() argument
596 srcu_read_unlock(srcu_ctlp, idx); in srcu_torture_read_unlock()
739 static void tasks_torture_read_unlock(int idx) in tasks_torture_read_unlock() argument
794 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) in rcu_torture_read_unlock_trivial() argument
853 static void tasks_tracing_torture_read_unlock(int idx) in tasks_tracing_torture_read_unlock() argument
1155 int idx; in rcu_torture_writer() local
1201 idx = cur_ops->readlock(); in rcu_torture_writer()
1210 cur_ops->readunlock(idx); in rcu_torture_writer()
[all …]
/kernel/locking/
Dqspinlock.c115 static inline __pure u32 encode_tail(int cpu, int idx) in encode_tail() argument
120 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ in encode_tail()
128 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; in decode_tail() local
130 return per_cpu_ptr(&qnodes[idx].mcs, cpu); in decode_tail()
134 struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx) in grab_mcs_node() argument
136 return &((struct qnode *)base + idx)->mcs; in grab_mcs_node()
319 int idx; in queued_spin_lock_slowpath() local
401 idx = node->count++; in queued_spin_lock_slowpath()
402 tail = encode_tail(smp_processor_id(), idx); in queued_spin_lock_slowpath()
413 if (unlikely(idx >= MAX_NODES)) { in queued_spin_lock_slowpath()
[all …]
Dlockdep_proc.c34 #define iterate_lock_classes(idx, class) \ argument
35 for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \
36 idx++, class++)
49 unsigned long idx = *pos; in l_start() local
51 if (idx > max_lock_class_idx) in l_start()
53 return lock_classes + idx; in l_start()
82 int idx = class - lock_classes; in l_show() local
87 if (!test_bit(idx, lock_classes_in_use)) in l_show()
244 unsigned long idx; in lockdep_stats_show() local
246 iterate_lock_classes(idx, class) { in lockdep_stats_show()
[all …]
Dlockdep_internals.h238 int idx; in debug_class_ops_inc() local
240 idx = class - lock_classes; in debug_class_ops_inc()
241 __debug_atomic_inc(lock_class_ops[idx]); in debug_class_ops_inc()
246 int idx, cpu; in debug_class_ops_read() local
249 idx = class - lock_classes; in debug_class_ops_read()
251 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); in debug_class_ops_read()
/kernel/
Duser_namespace.c291 unsigned idx; in map_id_range_down_base() local
297 for (idx = 0; idx < extents; idx++) { in map_id_range_down_base()
298 first = map->extent[idx].first; in map_id_range_down_base()
299 last = first + map->extent[idx].count - 1; in map_id_range_down_base()
302 return &map->extent[idx]; in map_id_range_down_base()
340 unsigned idx; in map_id_up_base() local
344 for (idx = 0; idx < extents; idx++) { in map_id_up_base()
345 first = map->extent[idx].lower_first; in map_id_up_base()
346 last = first + map->extent[idx].count - 1; in map_id_up_base()
348 return &map->extent[idx]; in map_id_up_base()
[all …]
Dkallsyms.c150 static unsigned long kallsyms_sym_address(int idx) in kallsyms_sym_address() argument
153 return kallsyms_addresses[idx]; in kallsyms_sym_address()
157 return kallsyms_relative_base + (u32)kallsyms_offsets[idx]; in kallsyms_sym_address()
160 if (kallsyms_offsets[idx] >= 0) in kallsyms_sym_address()
161 return kallsyms_offsets[idx]; in kallsyms_sym_address()
164 return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; in kallsyms_sym_address()
/kernel/time/
Dtimer.c488 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) in timer_set_idx() argument
491 idx << TIMER_ARRAYSHIFT; in timer_set_idx()
520 unsigned int idx; in calc_wheel_index() local
523 idx = calc_index(expires, 0, bucket_expiry); in calc_wheel_index()
525 idx = calc_index(expires, 1, bucket_expiry); in calc_wheel_index()
527 idx = calc_index(expires, 2, bucket_expiry); in calc_wheel_index()
529 idx = calc_index(expires, 3, bucket_expiry); in calc_wheel_index()
531 idx = calc_index(expires, 4, bucket_expiry); in calc_wheel_index()
533 idx = calc_index(expires, 5, bucket_expiry); in calc_wheel_index()
535 idx = calc_index(expires, 6, bucket_expiry); in calc_wheel_index()
[all …]
/kernel/trace/
Dtracing_map.h173 #define TRACING_MAP_ARRAY_ELT(array, idx) \ argument
174 (array->pages[idx >> array->entry_shift] + \
175 ((idx & array->entry_mask) << array->entry_size_shift))
177 #define TRACING_MAP_ENTRY(array, idx) \ argument
178 ((struct tracing_map_entry *)TRACING_MAP_ARRAY_ELT(array, idx))
180 #define TRACING_MAP_ELT(array, idx) \ argument
181 ((struct tracing_map_elt **)TRACING_MAP_ARRAY_ELT(array, idx))
Dfgraph.c270 ftrace_graph_get_ret_stack(struct task_struct *task, int idx) in ftrace_graph_get_ret_stack() argument
272 idx = task->curr_ret_stack - idx; in ftrace_graph_get_ret_stack()
274 if (idx >= 0 && idx <= task->curr_ret_stack) in ftrace_graph_get_ret_stack()
275 return &task->ret_stack[idx]; in ftrace_graph_get_ret_stack()
296 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, in ftrace_graph_ret_addr() argument
315 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, in ftrace_graph_ret_addr() argument
325 if (!task->ret_stack || task_idx < *idx) in ftrace_graph_ret_addr()
328 task_idx -= *idx; in ftrace_graph_ret_addr()
329 (*idx)++; in ftrace_graph_ret_addr()
Dtracing_map.c275 int idx = tracing_map_add_field(map, cmp_fn); in tracing_map_add_key_field() local
277 if (idx < 0) in tracing_map_add_key_field()
278 return idx; in tracing_map_add_key_field()
280 map->fields[idx].offset = offset; in tracing_map_add_key_field()
282 map->key_idx[map->n_keys++] = idx; in tracing_map_add_key_field()
284 return idx; in tracing_map_add_key_field()
455 int idx; in get_free_elt() local
457 idx = atomic_inc_return(&map->next_elt); in get_free_elt()
458 if (idx < map->max_elts) { in get_free_elt()
459 elt = *(TRACING_MAP_ELT(map->elts, idx)); in get_free_elt()
[all …]
Dpreemptirq_delay_test.c68 static void execute_preemptirqtest(int idx) in execute_preemptirqtest() argument
75 if (idx % 2 == 0) in execute_preemptirqtest()
83 static void preemptirqtest_##POSTFIX(int idx) \
85 execute_preemptirqtest(idx); \
Dtrace_events_hist.c120 unsigned int idx; member
846 if (hist_field && hist_field->var.idx == var_idx && in check_field_for_var_ref()
935 if (find_any_var_ref(hist_data, field->var.idx)) { in check_var_refs()
1228 var_idx = hist_field->var.idx; in resolve_var_refs()
2018 ref_field->var.idx = var_field->var.idx; in init_var_ref()
2079 if (ref_field->var.idx == var_field->var.idx && in find_var_ref_idx()
2113 if (ref_field->var.idx == var_field->var.idx && in create_var_ref()
3036 var_idx = var->var.idx; in __update_field_vars()
3076 int idx; in create_var() local
3089 idx = tracing_map_add_var(hist_data->map); in create_var()
[all …]
/kernel/bpf/
Ddevmap.c70 unsigned int idx; member
105 int idx) in dev_map_index_hash() argument
107 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; in dev_map_index_hash()
274 if (dev->idx == key) in __dev_map_hash_lookup_elem()
284 u32 idx, *next = next_key; in dev_map_hash_get_next_key() local
292 idx = *(u32 *)key; in dev_map_hash_get_next_key()
294 dev = __dev_map_hash_lookup_elem(map, idx); in dev_map_hash_get_next_key()
302 *next = next_dev->idx; in dev_map_hash_get_next_key()
306 i = idx & (dtab->n_buckets - 1); in dev_map_hash_get_next_key()
317 *next = next_dev->idx; in dev_map_hash_get_next_key()
[all …]
Dnet_namespace.c108 int cnt, idx; in bpf_netns_link_release() local
124 idx = link_index(net, type, net_link); in bpf_netns_link_release()
137 WARN_ON(bpf_prog_array_delete_safe_at(old_array, idx)); in bpf_netns_link_release()
172 int idx, ret; in bpf_netns_link_update_prog() local
190 idx = link_index(net, type, net_link); in bpf_netns_link_update_prog()
191 ret = bpf_prog_array_update_at(run_array, idx, new_prog); in bpf_netns_link_update_prog()
/kernel/irq/
Dgeneric-chip.c347 int idx; in __irq_get_domain_generic_chip() local
351 idx = hw_irq / dgc->irqs_per_chip; in __irq_get_domain_generic_chip()
352 if (idx >= dgc->num_chips) in __irq_get_domain_generic_chip()
354 return dgc->gc[idx]; in __irq_get_domain_generic_chip()
390 int idx; in irq_map_generic_chip() local
396 idx = hw_irq % dgc->irqs_per_chip; in irq_map_generic_chip()
398 if (test_bit(idx, &gc->unused)) in irq_map_generic_chip()
401 if (test_bit(idx, &gc->installed)) in irq_map_generic_chip()
415 set_bit(idx, &gc->installed); in irq_map_generic_chip()
424 data->mask = 1 << idx; in irq_map_generic_chip()
Dtimings.c345 int idx = period; in irq_timings_next_event_index() local
354 while (!memcmp(buffer, &buffer[idx], size * sizeof(int))) { in irq_timings_next_event_index()
359 idx += size; in irq_timings_next_event_index()
366 if (idx == len) in irq_timings_next_event_index()
374 if (len - idx < period) in irq_timings_next_event_index()
375 size = len - idx; in irq_timings_next_event_index()
/kernel/dma/
Ddebug.c250 __acquires(&dma_entry_hash[idx].lock) in get_hash_bucket()
252 int idx = hash_fn(entry); in get_hash_bucket() local
255 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); in get_hash_bucket()
257 return &dma_entry_hash[idx]; in get_hash_bucket()
405 int idx; in debug_dma_dump_mappings() local
407 for (idx = 0; idx < HASH_SIZE; idx++) { in debug_dma_dump_mappings()
408 struct hash_bucket *bucket = &dma_entry_hash[idx]; in debug_dma_dump_mappings()
418 type2name[entry->type], idx, in debug_dma_dump_mappings()
781 int idx; in dump_show() local
783 for (idx = 0; idx < HASH_SIZE; idx++) { in dump_show()
[all …]

123