/kernel/locking/ |
D | qspinlock.c | 116 u32 tail; in encode_tail() local 118 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; in encode_tail() 119 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ in encode_tail() 121 return tail; in encode_tail() 124 static inline __pure struct mcs_spinlock *decode_tail(u32 tail) in decode_tail() argument 126 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; in decode_tail() 127 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; in decode_tail() 175 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() argument 181 return (u32)xchg_relaxed(&lock->tail, in xchg_tail() 182 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; in xchg_tail() [all …]
|
D | osq_lock.c | 58 if (atomic_read(&lock->tail) == curr && in osq_wait_next() 59 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { in osq_wait_next() 107 old = atomic_xchg(&lock->tail, curr); in osq_lock() 214 if (likely(atomic_cmpxchg_release(&lock->tail, curr, in osq_unlock()
|
/kernel/ |
D | softirq.c | 465 struct tasklet_struct **tail; member 481 *head->tail = t; in __tasklet_schedule_common() 482 head->tail = &(t->next); in __tasklet_schedule_common() 510 tl_head->tail = &tl_head->head; in tasklet_action_common() 532 *tl_head->tail = t; in tasklet_action_common() 533 tl_head->tail = &t->next; in tasklet_action_common() 580 per_cpu(tasklet_vec, cpu).tail = in softirq_init() 582 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init() 637 per_cpu(tasklet_vec, cpu).tail = i; in tasklet_kill_immediate() 650 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets() [all …]
|
D | kallsyms.c | 86 goto tail; in kallsyms_expand_symbol() 96 tail: in kallsyms_expand_symbol()
|
D | futex.c | 531 struct page *page, *tail; in get_futex_key() local 600 tail = page; in get_futex_key() 725 key->shared.pgoff = basepage_index(tail); in get_futex_key()
|
/kernel/rcu/ |
D | rcu_segcblist.c | 21 rclp->tail = &rclp->head; in rcu_cblist_init() 34 *rclp->tail = rhp; in rcu_cblist_enqueue() 35 rclp->tail = &rhp->next; in rcu_cblist_enqueue() 53 drclp->tail = srclp->tail; in rcu_cblist_flush_enqueue() 55 drclp->tail = &drclp->head; in rcu_cblist_flush_enqueue() 63 srclp->tail = &rhp->next; in rcu_cblist_flush_enqueue() 86 rclp->tail = &rclp->head; in rcu_cblist_dequeue() 327 *rclp->tail = rsclp->head; in rcu_segcblist_extract_done_cbs() 330 rclp->tail = rsclp->tails[RCU_DONE_TAIL]; in rcu_segcblist_extract_done_cbs() 350 *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; in rcu_segcblist_extract_pend_cbs() [all …]
|
/kernel/bpf/ |
D | queue_stack_maps.c | 19 u32 head, tail; member 32 return qs->head == qs->tail; in queue_stack_map_is_empty() 42 return head == qs->tail; in queue_stack_map_is_full() 129 ptr = &qs->elements[qs->tail * qs->map.value_size]; in __queue_map_get() 133 if (unlikely(++qs->tail >= qs->size)) in __queue_map_get() 134 qs->tail = 0; in __queue_map_get() 224 if (unlikely(++qs->tail >= qs->size)) in queue_stack_map_push_elem() 225 qs->tail = 0; in queue_stack_map_push_elem()
|
/kernel/events/ |
D | ring_buffer.c | 138 ring_buffer_has_space(unsigned long head, unsigned long tail, in ring_buffer_has_space() argument 143 return CIRC_SPACE(head, tail, data_size) >= size; in ring_buffer_has_space() 145 return CIRC_SPACE(tail, head, data_size) >= size; in ring_buffer_has_space() 154 unsigned long tail, offset, head; in __perf_output_begin() local 192 tail = READ_ONCE(rb->user_page->data_tail); in __perf_output_begin() 195 if (unlikely(!ring_buffer_has_space(head, tail, in __perf_output_begin()
|
/kernel/trace/ |
D | trace_probe.c | 514 char *tail; in __parse_bitfield_probe_arg() local 519 bw = simple_strtoul(bf + 1, &tail, 0); /* Use simple one */ in __parse_bitfield_probe_arg() 521 if (bw == 0 || *tail != '@') in __parse_bitfield_probe_arg() 524 bf = tail + 1; in __parse_bitfield_probe_arg() 525 bo = simple_strtoul(bf, &tail, 0); in __parse_bitfield_probe_arg() 527 if (tail == bf || *tail != '/') in __parse_bitfield_probe_arg()
|
D | ring_buffer.c | 2130 unsigned long tail, struct rb_event_info *info) in rb_reset_tail() argument 2140 if (tail >= BUF_PAGE_SIZE) { in rb_reset_tail() 2146 if (tail == BUF_PAGE_SIZE) in rb_reset_tail() 2153 event = __rb_page_index(tail_page, tail); in rb_reset_tail() 2156 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail() 2163 tail_page->real_end = tail; in rb_reset_tail() 2176 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { in rb_reset_tail() 2188 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail() 2194 length = (tail + length) - BUF_PAGE_SIZE; in rb_reset_tail() 2205 unsigned long tail, struct rb_event_info *info) in rb_move_tail() argument [all …]
|
D | trace_uprobe.c | 650 char *tail; in trace_uprobe_create() local 653 tail = kstrdup(kbasename(filename), GFP_KERNEL); in trace_uprobe_create() 654 if (!tail) { in trace_uprobe_create() 659 ptr = strpbrk(tail, ".-_"); in trace_uprobe_create() 663 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); in trace_uprobe_create() 665 kfree(tail); in trace_uprobe_create()
|
D | trace_events.c | 206 struct ftrace_event_field *tail; in trace_event_get_offsets() local 214 tail = list_first_entry(head, struct ftrace_event_field, link); in trace_event_get_offsets() 215 return tail->offset + tail->size; in trace_event_get_offsets()
|
D | trace.c | 156 struct trace_eval_map_tail tail; member 5257 if (ptr->tail.next) { in update_eval_map() 5258 ptr = ptr->tail.next; in update_eval_map() 5384 if (!ptr->tail.next) in trace_insert_eval_map_file() 5386 ptr = ptr->tail.next; in trace_insert_eval_map_file() 5389 ptr->tail.next = map_array; in trace_insert_eval_map_file() 8726 last = &map->tail.next; in trace_module_remove_evals() 8727 map = map->tail.next; in trace_module_remove_evals() 8732 *last = trace_eval_jmp_to_tail(map)->tail.next; in trace_module_remove_evals()
|
D | trace_functions_graph.c | 60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
|