Home
last modified time | relevance | path

Searched refs:rhp (Results 1 – 18 of 18) sorted by relevance

/kernel/rcu/
Drcu_segcblist.c28 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp) in rcu_cblist_enqueue() argument
30 *rclp->tail = rhp; in rcu_cblist_enqueue()
31 rclp->tail = &rhp->next; in rcu_cblist_enqueue()
45 struct rcu_head *rhp) in rcu_cblist_flush_enqueue() argument
53 if (!rhp) { in rcu_cblist_flush_enqueue()
56 rhp->next = NULL; in rcu_cblist_flush_enqueue()
57 srclp->head = rhp; in rcu_cblist_flush_enqueue()
58 srclp->tail = &rhp->next; in rcu_cblist_flush_enqueue()
69 struct rcu_head *rhp; in rcu_cblist_dequeue() local
71 rhp = rclp->head; in rcu_cblist_dequeue()
[all …]
Dsrcutiny.c116 struct rcu_head *rhp; in srcu_drive_gp() local
139 rhp = lh; in srcu_drive_gp()
142 rhp->func(rhp); in srcu_drive_gp()
178 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in call_srcu() argument
183 rhp->func = func; in call_srcu()
184 rhp->next = NULL; in call_srcu()
186 *ssp->srcu_cb_tail = rhp; in call_srcu()
187 ssp->srcu_cb_tail = &rhp->next; in call_srcu()
Drcutorture.c1351 static void rcu_torture_timer_cb(struct rcu_head *rhp) in rcu_torture_timer_cb() argument
1353 kfree(rhp); in rcu_torture_timer_cb()
1663 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); in rcu_torture_timer() local
1665 if (rhp) in rcu_torture_timer()
1666 cur_ops->call(rhp, rcu_torture_timer_cb); in rcu_torture_timer()
1904 struct rcu_head *rhp; in rcu_torture_mem_dump_obj() local
1909 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); in rcu_torture_mem_dump_obj()
1910 …slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); in rcu_torture_mem_dump_obj()
1915 pr_alert("mem_dump_obj(%px):", &rhp); in rcu_torture_mem_dump_obj()
1916 mem_dump_obj(&rhp); in rcu_torture_mem_dump_obj()
[all …]
Dupdate.c469 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, in do_trace_rcu_torture_read() argument
473 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); in do_trace_rcu_torture_read()
477 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ argument
537 struct early_boot_kfree_rcu *rhp; in early_boot_test_call_rcu() local
544 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); in early_boot_test_call_rcu()
545 if (!WARN_ON_ONCE(!rhp)) in early_boot_test_call_rcu()
546 kfree_rcu(rhp, rh); in early_boot_test_call_rcu()
Dsrcutree.c801 static void srcu_leak_callback(struct rcu_head *rhp) in srcu_leak_callback() argument
809 struct rcu_head *rhp, bool do_norm) in srcu_gp_start_if_needed() argument
822 if (rhp) in srcu_gp_start_if_needed()
823 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); in srcu_gp_start_if_needed()
873 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in __call_srcu() argument
876 if (debug_rcu_head_queue(rhp)) { in __call_srcu()
878 WRITE_ONCE(rhp->func, srcu_leak_callback); in __call_srcu()
882 rhp->func = func; in __call_srcu()
883 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); in __call_srcu()
903 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in call_srcu() argument
[all …]
Dtasks.h152 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, in call_rcu_tasks_generic() argument
158 rhp->next = NULL; in call_rcu_tasks_generic()
159 rhp->func = func; in call_rcu_tasks_generic()
162 WRITE_ONCE(*rtp->cbs_tail, rhp); in call_rcu_tasks_generic()
163 rtp->cbs_tail = &rhp->next; in call_rcu_tasks_generic()
544 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
565 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) in call_rcu_tasks() argument
567 call_rcu_tasks_generic(rhp, func, &rcu_tasks); in call_rcu_tasks()
696 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
718 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) in call_rcu_tasks_rude() argument
[all …]
Drcuscale.c378 static void rcu_scale_async_cb(struct rcu_head *rhp) in rcu_scale_async_cb() argument
381 kfree(rhp); in rcu_scale_async_cb()
393 struct rcu_head *rhp = NULL; in rcu_scale_writer() local
433 if (!rhp) in rcu_scale_writer()
434 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); in rcu_scale_writer()
435 if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) { in rcu_scale_writer()
437 cur_ops->async(rhp, rcu_scale_async_cb); in rcu_scale_writer()
438 rhp = NULL; in rcu_scale_writer()
443 kfree(rhp); /* Because we are stopping. */ in rcu_scale_writer()
Drcu_segcblist.h22 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp);
25 struct rcu_head *rhp);
135 struct rcu_head *rhp);
137 struct rcu_head *rhp);
Dsync.c43 static void rcu_sync_func(struct rcu_head *rhp);
73 static void rcu_sync_func(struct rcu_head *rhp) in rcu_sync_func() argument
75 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); in rcu_sync_func()
Dtree_nocb.h303 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, in rcu_nocb_do_flush_bypass() argument
311 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { in rcu_nocb_do_flush_bypass()
316 if (rhp) in rcu_nocb_do_flush_bypass()
318 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); in rcu_nocb_do_flush_bypass()
333 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, in rcu_nocb_flush_bypass() argument
340 return rcu_nocb_do_flush_bypass(rdp, rhp, j); in rcu_nocb_flush_bypass()
374 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, in rcu_nocb_try_bypass() argument
441 if (!rcu_nocb_flush_bypass(rdp, rhp, j)) { in rcu_nocb_try_bypass()
464 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); in rcu_nocb_try_bypass()
1452 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, in rcu_nocb_flush_bypass() argument
[all …]
Drcu.h466 struct rcu_head *rhp,
480 struct rcu_head *rhp,
485 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ argument
Dtree.h440 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
442 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
Dtree.c2465 struct rcu_head *rhp; in rcu_do_batch() local
2510 rhp = rcu_cblist_dequeue(&rcl); in rcu_do_batch()
2512 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { in rcu_do_batch()
2516 debug_rcu_head_unqueue(rhp); in rcu_do_batch()
2519 trace_rcu_invoke_callback(rcu_state.name, rhp); in rcu_do_batch()
2521 f = rhp->func; in rcu_do_batch()
2522 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); in rcu_do_batch()
2523 f(rhp); in rcu_do_batch()
2933 static void rcu_leak_callback(struct rcu_head *rhp) in rcu_leak_callback() argument
3987 static void rcu_barrier_callback(struct rcu_head *rhp) in rcu_barrier_callback() argument
/kernel/
Dpid.c120 static void delayed_put_pid(struct rcu_head *rhp) in delayed_put_pid() argument
122 struct pid *pid = container_of(rhp, struct pid, rcu); in delayed_put_pid()
Dfork.c813 void __put_task_struct_rcu_cb(struct rcu_head *rhp) in __put_task_struct_rcu_cb() argument
815 struct task_struct *task = container_of(rhp, struct task_struct, rcu); in __put_task_struct_rcu_cb()
1956 static void __delayed_free_task(struct rcu_head *rhp) in __delayed_free_task() argument
1958 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); in __delayed_free_task()
Dexit.c216 static void delayed_put_task_struct(struct rcu_head *rhp) in delayed_put_task_struct() argument
218 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); in delayed_put_task_struct()
/kernel/irq/
Dirqdesc.c435 static void delayed_free_desc(struct rcu_head *rhp) in delayed_free_desc() argument
437 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); in delayed_free_desc()
/kernel/sched/
Dcore.c10049 static void sched_unregister_group_rcu(struct rcu_head *rhp) in sched_unregister_group_rcu() argument
10052 sched_unregister_group(container_of(rhp, struct task_group, rcu)); in sched_unregister_group_rcu()