Lines Matching full:reorder
248 * serialization, if present in one of the percpu reorder queues.
251 * the cpu's reorder queue.
257 struct padata_list *reorder; in padata_find_next() local
260 reorder = per_cpu_ptr(pd->reorder_list, cpu); in padata_find_next()
262 spin_lock(&reorder->lock); in padata_find_next()
263 if (list_empty(&reorder->list)) { in padata_find_next()
264 spin_unlock(&reorder->lock); in padata_find_next()
268 padata = list_entry(reorder->list.next, struct padata_priv, list); in padata_find_next()
275 spin_unlock(&reorder->lock); in padata_find_next()
285 spin_unlock(&reorder->lock); in padata_find_next()
295 struct padata_list *reorder; in padata_reorder() local
299 * the reorder queue the time. Calculating in which percpu reorder in padata_reorder()
302 * the objects arrive to the reorder queues. So a cpu could wait to in padata_reorder()
316 * cpu's reorder queue, nothing to do for now. in padata_reorder()
335 * the reorder queues in the meantime. in padata_reorder()
337 * Ensure reorder queue is read after pd->lock is dropped so we see in padata_reorder()
343 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); in padata_reorder()
344 if (!list_empty(&reorder->list) && padata_find_next(pd, false)) in padata_reorder()
404 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); in padata_do_serial() local
408 spin_lock(&reorder->lock); in padata_do_serial()
410 list_for_each_prev(pos, &reorder->list) { in padata_do_serial()
416 spin_unlock(&reorder->lock); in padata_do_serial()
419 * Ensure the addition to the reorder list is ordered correctly in padata_do_serial()
558 /* Initialize per-CPU reorder lists */