• Home
  • Raw
  • Download

Lines Matching refs:pd

36 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)  in padata_index_to_cpu()  argument
40 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu()
42 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu()
47 static int padata_cpu_hash(struct parallel_data *pd) in padata_cpu_hash() argument
56 spin_lock(&pd->seq_lock); in padata_cpu_hash()
57 cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash()
58 pd->seq_nr++; in padata_cpu_hash()
59 spin_unlock(&pd->seq_lock); in padata_cpu_hash()
61 return padata_index_to_cpu(pd, cpu_index); in padata_cpu_hash()
67 struct parallel_data *pd; in padata_parallel_worker() local
74 pd = pqueue->pd; in padata_parallel_worker()
75 pinst = pd->pinst; in padata_parallel_worker()
112 struct parallel_data *pd; in padata_do_parallel() local
116 pd = rcu_dereference(pinst->pd); in padata_do_parallel()
122 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) in padata_do_parallel()
129 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) in padata_do_parallel()
133 atomic_inc(&pd->refcnt); in padata_do_parallel()
134 padata->pd = pd; in padata_do_parallel()
137 target_cpu = padata_cpu_hash(pd); in padata_do_parallel()
138 queue = per_cpu_ptr(pd->pqueue, target_cpu); in padata_do_parallel()
170 static struct padata_priv *padata_get_next(struct parallel_data *pd) in padata_get_next() argument
178 num_cpus = cpumask_weight(pd->cpumask.pcpu); in padata_get_next()
184 next_nr = pd->processed; in padata_get_next()
186 cpu = padata_index_to_cpu(pd, next_index); in padata_get_next()
187 next_queue = per_cpu_ptr(pd->pqueue, cpu); in padata_get_next()
199 atomic_dec(&pd->reorder_objects); in padata_get_next()
202 pd->processed++; in padata_get_next()
207 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { in padata_get_next()
217 static void padata_reorder(struct parallel_data *pd) in padata_reorder() argument
222 struct padata_instance *pinst = pd->pinst; in padata_reorder()
234 if (!spin_trylock_bh(&pd->lock)) in padata_reorder()
238 padata = padata_get_next(pd); in padata_reorder()
255 del_timer(&pd->timer); in padata_reorder()
256 spin_unlock_bh(&pd->lock); in padata_reorder()
261 squeue = per_cpu_ptr(pd->squeue, cb_cpu); in padata_reorder()
270 spin_unlock_bh(&pd->lock); in padata_reorder()
277 if (atomic_read(&pd->reorder_objects) in padata_reorder()
279 mod_timer(&pd->timer, jiffies + HZ); in padata_reorder()
281 del_timer(&pd->timer); in padata_reorder()
288 struct parallel_data *pd = (struct parallel_data *)arg; in padata_reorder_timer() local
290 padata_reorder(pd); in padata_reorder_timer()
296 struct parallel_data *pd; in padata_serial_worker() local
301 pd = squeue->pd; in padata_serial_worker()
316 atomic_dec(&pd->refcnt); in padata_serial_worker()
333 struct parallel_data *pd; in padata_do_serial() local
335 pd = padata->pd; in padata_do_serial()
338 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_do_serial()
341 atomic_inc(&pd->reorder_objects); in padata_do_serial()
347 padata_reorder(pd); in padata_do_serial()
351 static int padata_setup_cpumasks(struct parallel_data *pd, in padata_setup_cpumasks() argument
355 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) in padata_setup_cpumasks()
358 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); in padata_setup_cpumasks()
359 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { in padata_setup_cpumasks()
360 free_cpumask_var(pd->cpumask.cbcpu); in padata_setup_cpumasks()
364 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); in padata_setup_cpumasks()
375 static void padata_init_squeues(struct parallel_data *pd) in padata_init_squeues() argument
380 for_each_cpu(cpu, pd->cpumask.cbcpu) { in padata_init_squeues()
381 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_init_squeues()
382 squeue->pd = pd; in padata_init_squeues()
389 static void padata_init_pqueues(struct parallel_data *pd) in padata_init_pqueues() argument
395 for_each_cpu(cpu, pd->cpumask.pcpu) { in padata_init_pqueues()
396 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_init_pqueues()
397 pqueue->pd = pd; in padata_init_pqueues()
413 struct parallel_data *pd; in padata_alloc_pd() local
415 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); in padata_alloc_pd()
416 if (!pd) in padata_alloc_pd()
419 pd->pqueue = alloc_percpu(struct padata_parallel_queue); in padata_alloc_pd()
420 if (!pd->pqueue) in padata_alloc_pd()
423 pd->squeue = alloc_percpu(struct padata_serial_queue); in padata_alloc_pd()
424 if (!pd->squeue) in padata_alloc_pd()
426 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) in padata_alloc_pd()
429 padata_init_pqueues(pd); in padata_alloc_pd()
430 padata_init_squeues(pd); in padata_alloc_pd()
431 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); in padata_alloc_pd()
432 pd->seq_nr = 0; in padata_alloc_pd()
433 atomic_set(&pd->reorder_objects, 0); in padata_alloc_pd()
434 atomic_set(&pd->refcnt, 0); in padata_alloc_pd()
435 pd->pinst = pinst; in padata_alloc_pd()
436 spin_lock_init(&pd->lock); in padata_alloc_pd()
438 return pd; in padata_alloc_pd()
441 free_percpu(pd->squeue); in padata_alloc_pd()
443 free_percpu(pd->pqueue); in padata_alloc_pd()
445 kfree(pd); in padata_alloc_pd()
450 static void padata_free_pd(struct parallel_data *pd) in padata_free_pd() argument
452 free_cpumask_var(pd->cpumask.pcpu); in padata_free_pd()
453 free_cpumask_var(pd->cpumask.cbcpu); in padata_free_pd()
454 free_percpu(pd->pqueue); in padata_free_pd()
455 free_percpu(pd->squeue); in padata_free_pd()
456 kfree(pd); in padata_free_pd()
460 static void padata_flush_queues(struct parallel_data *pd) in padata_flush_queues() argument
466 for_each_cpu(cpu, pd->cpumask.pcpu) { in padata_flush_queues()
467 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_flush_queues()
471 del_timer_sync(&pd->timer); in padata_flush_queues()
473 if (atomic_read(&pd->reorder_objects)) in padata_flush_queues()
474 padata_reorder(pd); in padata_flush_queues()
476 for_each_cpu(cpu, pd->cpumask.cbcpu) { in padata_flush_queues()
477 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_flush_queues()
481 BUG_ON(atomic_read(&pd->refcnt) != 0); in padata_flush_queues()
499 padata_flush_queues(pinst->pd); in __padata_stop()
507 struct parallel_data *pd_old = pinst->pd; in padata_replace()
512 rcu_assign_pointer(pinst->pd, pd_new); in padata_replace()
582 struct parallel_data *pd; in __padata_set_cpumasks() local
595 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); in __padata_set_cpumasks()
596 if (!pd) in __padata_set_cpumasks()
602 padata_replace(pinst, pd); in __padata_set_cpumasks()
680 struct parallel_data *pd; in __padata_add_cpu() local
683 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, in __padata_add_cpu()
685 if (!pd) in __padata_add_cpu()
688 padata_replace(pinst, pd); in __padata_add_cpu()
736 struct parallel_data *pd = NULL; in __padata_remove_cpu() local
744 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, in __padata_remove_cpu()
746 if (!pd) in __padata_remove_cpu()
749 padata_replace(pinst, pd); in __padata_remove_cpu()
751 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); in __padata_remove_cpu()
752 cpumask_clear_cpu(cpu, pd->cpumask.pcpu); in __padata_remove_cpu()
897 padata_free_pd(pinst->pd); in __padata_free()
1059 struct parallel_data *pd = NULL; in padata_alloc() local
1076 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); in padata_alloc()
1077 if (!pd) in padata_alloc()
1080 rcu_assign_pointer(pinst->pd, pd); in padata_alloc()