Lines Matching defs:rcu_data
157 struct rcu_data { struct
159 unsigned long gp_seq; /* Track rsp->gp_seq counter. */
160 unsigned long gp_seq_needed; /* Track furthest future GP request. */
161 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
162 bool core_needs_qs; /* Core waits for quiescent state. */
163 bool beenonline; /* CPU online at least once. */
164 bool gpwrap; /* Possible ->gp_seq wrap. */
165 bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
166 bool cpu_started; /* RCU watching this onlining CPU. */
167 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
168 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
169 unsigned long ticks_this_gp; /* The number of scheduling-clock */
173 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
174 bool defer_qs_iw_pending; /* Scheduler attention pending? */
175 struct work_struct strict_work; /* Schedule readers for strict GPs. */
178 struct rcu_segcblist cblist; /* Segmented callback list, with */
181 long qlen_last_fqs_check;
183 unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */
184 unsigned long n_force_qs_snap;
186 long blimit; /* Upper limit on a processed batch */
189 int dynticks_snap; /* Per-GP tracking for dynticks. */
190 long dynticks_nesting; /* Track process nesting level. */
191 long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
192 atomic_t dynticks; /* Even value for idle, else odd. */
193 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
194 bool rcu_urgent_qs; /* GP old need light quiescent state. */
195 bool rcu_forced_tick; /* Forced tick to provide QS. */
196 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
198 unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
199 unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
200 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
204 struct rcu_head barrier_head;
205 int exp_dynticks_snap; /* Double-check need for IPI. */
209 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
210 struct swait_queue_head nocb_state_wq; /* For offloading state changes */
211 struct task_struct *nocb_gp_kthread;
235 struct rcu_data *nocb_next_cb_rdp; argument
239 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; argument
264 /* Values for nocb_defer_wakeup field in struct rcu_data. */ argument