• Home
  • Raw
  • Download

Lines Matching +full:t +full:- +full:head

1 // SPDX-License-Identifier: GPL-2.0-only
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
36 - Even if softirq is serialized, only local cpu is marked for
42 - NET RX softirq. It is multithreaded and does not require
44 - NET TX softirq. It kicks software netdevice queues, hence
47 - Tasklets: serialized wrt itself.
66 * but we also don't want to introduce a worst case 1/HZ latency
75 if (tsk && tsk->state != TASK_RUNNING) in wakeup_softirqd()
91 return tsk && (tsk->state == TASK_RUNNING) && in ksoftirqd_running()
97 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
99 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
106 * This one is for softirq.c-internal use,
126 * is set and before current->softirq_enabled is cleared. in __local_bh_disable_ip()
140 current->preempt_disable_ip = get_lock_parent_ip(); in __local_bh_disable_ip()
162 * Special-case - softirqs can safely be enabled by __do_softirq(),
163 * without processing still-pending softirqs:
188 preempt_count_sub(cnt - 1); in __local_bh_enable_ip()
215 * The two things to balance is latency against fairness -
226 * not miss-qualify lock contexts and miss possible deadlocks.
258 unsigned long old_flags = current->flags; in __do_softirq()
270 current->flags &= ~PF_MEMALLOC; in __do_softirq()
290 h += softirq_bit - 1; in __do_softirq()
292 vec_nr = h - softirq_vec; in __do_softirq()
298 h->action(h); in __do_softirq()
302 vec_nr, softirq_to_name[vec_nr], h->action, in __do_softirq()
317 --max_restart) in __do_softirq()
349 * irq_enter_rcu - Enter an interrupt context with RCU watching
366 * irq_enter - Enter an interrupt context including RCU update
429 * irq_exit_rcu() - Exit an interrupt context without updating RCU
441 * irq_exit - Exit an interrupt context, update RCU and lockdep
462 * (this also catches softirq-disabled code). We will in raise_softirq_irqoff()
498 struct tasklet_struct *head; member
505 static void __tasklet_schedule_common(struct tasklet_struct *t, in __tasklet_schedule_common() argument
509 struct tasklet_head *head; in __tasklet_schedule_common() local
513 head = this_cpu_ptr(headp); in __tasklet_schedule_common()
514 t->next = NULL; in __tasklet_schedule_common()
515 *head->tail = t; in __tasklet_schedule_common()
516 head->tail = &(t->next); in __tasklet_schedule_common()
521 void __tasklet_schedule(struct tasklet_struct *t) in __tasklet_schedule() argument
523 __tasklet_schedule_common(t, &tasklet_vec, in __tasklet_schedule()
528 void __tasklet_hi_schedule(struct tasklet_struct *t) in __tasklet_hi_schedule() argument
530 __tasklet_schedule_common(t, &tasklet_hi_vec, in __tasklet_hi_schedule()
542 list = tl_head->head; in tasklet_action_common()
543 tl_head->head = NULL; in tasklet_action_common()
544 tl_head->tail = &tl_head->head; in tasklet_action_common()
548 struct tasklet_struct *t = list; in tasklet_action_common() local
550 list = list->next; in tasklet_action_common()
552 if (tasklet_trylock(t)) { in tasklet_action_common()
553 if (!atomic_read(&t->count)) { in tasklet_action_common()
555 &t->state)) in tasklet_action_common()
557 if (t->use_callback) in tasklet_action_common()
558 t->callback(t); in tasklet_action_common()
560 t->func(t->data); in tasklet_action_common()
561 tasklet_unlock(t); in tasklet_action_common()
564 tasklet_unlock(t); in tasklet_action_common()
568 t->next = NULL; in tasklet_action_common()
569 *tl_head->tail = t; in tasklet_action_common()
570 tl_head->tail = &t->next; in tasklet_action_common()
586 void tasklet_setup(struct tasklet_struct *t, in tasklet_setup() argument
589 t->next = NULL; in tasklet_setup()
590 t->state = 0; in tasklet_setup()
591 atomic_set(&t->count, 0); in tasklet_setup()
592 t->callback = callback; in tasklet_setup()
593 t->use_callback = true; in tasklet_setup()
594 t->data = 0; in tasklet_setup()
598 void tasklet_init(struct tasklet_struct *t, in tasklet_init() argument
601 t->next = NULL; in tasklet_init()
602 t->state = 0; in tasklet_init()
603 atomic_set(&t->count, 0); in tasklet_init()
604 t->func = func; in tasklet_init()
605 t->use_callback = false; in tasklet_init()
606 t->data = data; in tasklet_init()
610 void tasklet_kill(struct tasklet_struct *t) in tasklet_kill() argument
615 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { in tasklet_kill()
618 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); in tasklet_kill()
620 tasklet_unlock_wait(t); in tasklet_kill()
621 clear_bit(TASKLET_STATE_SCHED, &t->state); in tasklet_kill()
631 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
633 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init()
671 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) in tasklet_kill_immediate() argument
676 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); in tasklet_kill_immediate()
678 if (!test_bit(TASKLET_STATE_SCHED, &t->state)) in tasklet_kill_immediate()
682 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { in tasklet_kill_immediate()
683 if (*i == t) { in tasklet_kill_immediate()
684 *i = t->next; in tasklet_kill_immediate()
700 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets()
701 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets()
703 per_cpu(tasklet_vec, cpu).head = NULL; in takeover_tasklets()
704 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; in takeover_tasklets()
708 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { in takeover_tasklets()
709 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; in takeover_tasklets()
711 per_cpu(tasklet_hi_vec, cpu).head = NULL; in takeover_tasklets()
712 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; in takeover_tasklets()