Lines Matching +full:rpc +full:- +full:if
4 * Scheduling for synchronous and asynchronous RPC requests.
27 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
35 * RPC slabs and memory pools
50 * RPC tasks sit here while waiting for conditions to improve.
55 * rpciod-related stuff
61 * Disable the timer for a given RPC task. Should be called with
62 * queue->lock and bh_disabled in order to avoid races within
68 if (task->tk_timeout == 0) in __rpc_disable_timer()
70 dprintk("RPC: %5u disabling timer\n", task->tk_pid); in __rpc_disable_timer()
71 task->tk_timeout = 0; in __rpc_disable_timer()
72 list_del(&task->u.tk_wait.timer_list); in __rpc_disable_timer()
73 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer()
74 del_timer(&queue->timer_list.timer); in __rpc_disable_timer()
80 queue->timer_list.expires = expires; in rpc_set_queue_timer()
81 mod_timer(&queue->timer_list.timer, expires); in rpc_set_queue_timer()
90 if (!task->tk_timeout) in __rpc_add_timer()
93 dprintk("RPC: %5u setting alarm for %u ms\n", in __rpc_add_timer()
94 task->tk_pid, jiffies_to_msecs(task->tk_timeout)); in __rpc_add_timer()
96 task->u.tk_wait.expires = jiffies + task->tk_timeout; in __rpc_add_timer()
97 …if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.… in __rpc_add_timer()
98 rpc_set_queue_timer(queue, task->u.tk_wait.expires); in __rpc_add_timer()
99 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer()
104 if (queue->priority != priority) { in rpc_set_waitqueue_priority()
105 queue->priority = priority; in rpc_set_waitqueue_priority()
106 queue->nr = 1U << priority; in rpc_set_waitqueue_priority()
112 rpc_set_waitqueue_priority(queue, queue->maxpriority); in rpc_reset_waitqueue_priority()
124 if (t->tk_owner == task->tk_owner) { in __rpc_list_enqueue_task()
125 list_add_tail(&task->u.tk_wait.links, in __rpc_list_enqueue_task()
126 &t->u.tk_wait.links); in __rpc_list_enqueue_task()
127 /* Cache the queue head in task->u.tk_wait.list */ in __rpc_list_enqueue_task()
128 task->u.tk_wait.list.next = q; in __rpc_list_enqueue_task()
129 task->u.tk_wait.list.prev = NULL; in __rpc_list_enqueue_task()
133 INIT_LIST_HEAD(&task->u.tk_wait.links); in __rpc_list_enqueue_task()
134 list_add_tail(&task->u.tk_wait.list, q); in __rpc_list_enqueue_task()
146 if (task->u.tk_wait.list.prev == NULL) { in __rpc_list_dequeue_task()
147 list_del(&task->u.tk_wait.links); in __rpc_list_dequeue_task()
150 if (!list_empty(&task->u.tk_wait.links)) { in __rpc_list_dequeue_task()
151 t = list_first_entry(&task->u.tk_wait.links, in __rpc_list_dequeue_task()
155 q = t->u.tk_wait.list.next; in __rpc_list_dequeue_task()
156 list_add_tail(&t->u.tk_wait.list, q); in __rpc_list_dequeue_task()
157 list_del(&task->u.tk_wait.links); in __rpc_list_dequeue_task()
159 list_del(&task->u.tk_wait.list); in __rpc_list_dequeue_task()
169 if (unlikely(queue_priority > queue->maxpriority)) in __rpc_add_wait_queue_priority()
170 queue_priority = queue->maxpriority; in __rpc_add_wait_queue_priority()
171 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); in __rpc_add_wait_queue_priority()
187 if (RPC_IS_QUEUED(task)) in __rpc_add_wait_queue()
190 if (RPC_IS_PRIORITY(queue)) in __rpc_add_wait_queue()
192 else if (RPC_IS_SWAPPER(task)) in __rpc_add_wait_queue()
193 list_add(&task->u.tk_wait.list, &queue->tasks[0]); in __rpc_add_wait_queue()
195 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); in __rpc_add_wait_queue()
196 task->tk_waitqueue = queue; in __rpc_add_wait_queue()
197 queue->qlen++; in __rpc_add_wait_queue()
202 dprintk("RPC: %5u added to queue %p \"%s\"\n", in __rpc_add_wait_queue()
203 task->tk_pid, queue, rpc_qname(queue)); in __rpc_add_wait_queue()
221 if (RPC_IS_PRIORITY(queue)) in __rpc_remove_wait_queue()
224 list_del(&task->u.tk_wait.list); in __rpc_remove_wait_queue()
225 queue->qlen--; in __rpc_remove_wait_queue()
226 dprintk("RPC: %5u removed from queue %p \"%s\"\n", in __rpc_remove_wait_queue()
227 task->tk_pid, queue, rpc_qname(queue)); in __rpc_remove_wait_queue()
234 spin_lock_init(&queue->lock); in __rpc_init_priority_wait_queue()
235 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) in __rpc_init_priority_wait_queue()
236 INIT_LIST_HEAD(&queue->tasks[i]); in __rpc_init_priority_wait_queue()
237 queue->maxpriority = nr_queues - 1; in __rpc_init_priority_wait_queue()
239 queue->qlen = 0; in __rpc_init_priority_wait_queue()
240 timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0); in __rpc_init_priority_wait_queue()
241 INIT_LIST_HEAD(&queue->timer_list.list); in __rpc_init_priority_wait_queue()
259 del_timer_sync(&queue->timer_list.timer); in rpc_destroy_wait_queue()
266 if (signal_pending_state(mode, current)) in rpc_wait_bit_killable()
267 return -ERESTARTSYS; in rpc_wait_bit_killable()
271 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
276 task->tk_pid = atomic_inc_return(&rpc_pid); in rpc_task_set_debuginfo()
287 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); in rpc_set_active()
292 * Mark an RPC call as having completed by clearing the 'active' bit
297 void *m = &task->tk_runstate; in rpc_complete_task()
305 spin_lock_irqsave(&wq->lock, flags); in rpc_complete_task()
306 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); in rpc_complete_task()
307 ret = atomic_dec_and_test(&task->tk_count); in rpc_complete_task()
308 if (waitqueue_active(wq)) in rpc_complete_task()
310 spin_unlock_irqrestore(&wq->lock, flags); in rpc_complete_task()
315 * Allow callers to wait for completion of an RPC call
318 * to enforce taking of the wq->lock and hence avoid races with
323 if (action == NULL) in __rpc_wait_for_completion_task()
325 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, in __rpc_wait_for_completion_task()
331 * Make an RPC task runnable.
333 * Note: If the task is ASYNC, and is being made runnable after sitting on an
347 if (!need_wakeup) in rpc_make_runnable()
349 if (RPC_IS_ASYNC(task)) { in rpc_make_runnable()
350 INIT_WORK(&task->u.tk_work, rpc_async_schedule); in rpc_make_runnable()
351 queue_work(wq, &task->u.tk_work); in rpc_make_runnable()
353 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); in rpc_make_runnable()
359 * NB: An RPC task will only receive interrupt-driven events as long
367 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", in __rpc_sleep_on_priority()
368 task->tk_pid, rpc_qname(q), jiffies); in __rpc_sleep_on_priority()
374 WARN_ON_ONCE(task->tk_callback != NULL); in __rpc_sleep_on_priority()
375 task->tk_callback = action; in __rpc_sleep_on_priority()
384 if (!RPC_IS_ACTIVATED(task)) { in rpc_sleep_on()
385 task->tk_status = -EIO; in rpc_sleep_on()
393 spin_lock_bh(&q->lock); in rpc_sleep_on()
394 __rpc_sleep_on_priority(q, task, action, task->tk_priority); in rpc_sleep_on()
395 spin_unlock_bh(&q->lock); in rpc_sleep_on()
404 if (!RPC_IS_ACTIVATED(task)) { in rpc_sleep_on_priority()
405 task->tk_status = -EIO; in rpc_sleep_on_priority()
413 spin_lock_bh(&q->lock); in rpc_sleep_on_priority()
414 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); in rpc_sleep_on_priority()
415 spin_unlock_bh(&q->lock); in rpc_sleep_on_priority()
420 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
425 * Caller must hold queue->lock, and have cleared the task queued flag.
431 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", in __rpc_do_wake_up_task_on_wq()
432 task->tk_pid, jiffies); in __rpc_do_wake_up_task_on_wq()
434 /* Has the task been executed yet? If not, we cannot wake it up! */ in __rpc_do_wake_up_task_on_wq()
435 if (!RPC_IS_ACTIVATED(task)) { in __rpc_do_wake_up_task_on_wq()
436 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); in __rpc_do_wake_up_task_on_wq()
446 dprintk("RPC: __rpc_wake_up_task done\n"); in __rpc_do_wake_up_task_on_wq()
455 if (RPC_IS_QUEUED(task)) { in rpc_wake_up_task_on_wq_queue_locked()
457 if (task->tk_waitqueue == queue) in rpc_wake_up_task_on_wq_queue_locked()
477 spin_lock_bh(&queue->lock); in rpc_wake_up_queued_task_on_wq()
479 spin_unlock_bh(&queue->lock); in rpc_wake_up_queued_task_on_wq()
487 spin_lock_bh(&queue->lock); in rpc_wake_up_queued_task()
489 spin_unlock_bh(&queue->lock); in rpc_wake_up_queued_task()
504 q = &queue->tasks[queue->priority]; in __rpc_find_next_queued_priority()
505 if (!list_empty(q) && --queue->nr) { in __rpc_find_next_queued_priority()
514 if (q == &queue->tasks[0]) in __rpc_find_next_queued_priority()
515 q = &queue->tasks[queue->maxpriority]; in __rpc_find_next_queued_priority()
517 q = q - 1; in __rpc_find_next_queued_priority()
518 if (!list_empty(q)) { in __rpc_find_next_queued_priority()
522 } while (q != &queue->tasks[queue->priority]); in __rpc_find_next_queued_priority()
528 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); in __rpc_find_next_queued_priority()
535 if (RPC_IS_PRIORITY(queue)) in __rpc_find_next_queued()
537 if (!list_empty(&queue->tasks[0])) in __rpc_find_next_queued()
538 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); in __rpc_find_next_queued()
551 dprintk("RPC: wake_up_first(%p \"%s\")\n", in rpc_wake_up_first_on_wq()
553 spin_lock_bh(&queue->lock); in rpc_wake_up_first_on_wq()
555 if (task != NULL) { in rpc_wake_up_first_on_wq()
556 if (func(task, data)) in rpc_wake_up_first_on_wq()
561 spin_unlock_bh(&queue->lock); in rpc_wake_up_first_on_wq()
591 * rpc_wake_up - wake up all rpc_tasks
594 * Grabs queue->lock
600 spin_lock_bh(&queue->lock); in rpc_wake_up()
601 head = &queue->tasks[queue->maxpriority]; in rpc_wake_up()
610 if (head == &queue->tasks[0]) in rpc_wake_up()
612 head--; in rpc_wake_up()
614 spin_unlock_bh(&queue->lock); in rpc_wake_up()
619 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
623 * Grabs queue->lock
629 spin_lock_bh(&queue->lock); in rpc_wake_up_status()
630 head = &queue->tasks[queue->maxpriority]; in rpc_wake_up_status()
637 task->tk_status = status; in rpc_wake_up_status()
640 if (head == &queue->tasks[0]) in rpc_wake_up_status()
642 head--; in rpc_wake_up_status()
644 spin_unlock_bh(&queue->lock); in rpc_wake_up_status()
654 spin_lock(&queue->lock); in __rpc_queue_timer_fn()
656 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { in __rpc_queue_timer_fn()
657 timeo = task->u.tk_wait.expires; in __rpc_queue_timer_fn()
658 if (time_after_eq(now, timeo)) { in __rpc_queue_timer_fn()
659 dprintk("RPC: %5u timeout\n", task->tk_pid); in __rpc_queue_timer_fn()
660 task->tk_status = -ETIMEDOUT; in __rpc_queue_timer_fn()
664 if (expires == now || time_after(expires, timeo)) in __rpc_queue_timer_fn()
667 if (!list_empty(&queue->timer_list.list)) in __rpc_queue_timer_fn()
669 spin_unlock(&queue->lock); in __rpc_queue_timer_fn()
674 if (task->tk_status == -ETIMEDOUT) in __rpc_atrun()
675 task->tk_status = 0; in __rpc_atrun()
683 task->tk_timeout = delay; in rpc_delay()
689 * Helper to call task->tk_ops->rpc_call_prepare
693 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); in rpc_prepare_task()
700 task->tk_garb_retry = 2; in rpc_init_task_statistics()
701 task->tk_cred_retry = 2; in rpc_init_task_statistics()
702 task->tk_rebind_retry = 2; in rpc_init_task_statistics()
705 task->tk_start = ktime_get(); in rpc_init_task_statistics()
711 task->tk_timeouts = 0; in rpc_reset_task_statistics()
712 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); in rpc_reset_task_statistics()
718 * Helper that calls task->tk_ops->rpc_call_done if it exists
722 task->tk_action = NULL; in rpc_exit_task()
723 if (task->tk_ops->rpc_call_done != NULL) { in rpc_exit_task()
724 task->tk_ops->rpc_call_done(task, task->tk_calldata); in rpc_exit_task()
725 if (task->tk_action != NULL) { in rpc_exit_task()
727 /* Always release the RPC slot and buffer memory */ in rpc_exit_task()
736 task->tk_status = status; in rpc_exit()
737 task->tk_action = rpc_exit_task; in rpc_exit()
738 if (RPC_IS_QUEUED(task)) in rpc_exit()
739 rpc_wake_up_queued_task(task->tk_waitqueue, task); in rpc_exit()
745 if (ops->rpc_release != NULL) in rpc_release_calldata()
746 ops->rpc_release(calldata); in rpc_release_calldata()
750 * This is the RPC `scheduler' (or rather, the finite state machine).
758 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", in __rpc_execute()
759 task->tk_pid, task->tk_flags); in __rpc_execute()
762 if (RPC_IS_QUEUED(task)) in __rpc_execute()
771 * tk_action may be NULL if the task has been killed. in __rpc_execute()
775 do_action = task->tk_action; in __rpc_execute()
776 if (task->tk_callback) { in __rpc_execute()
777 do_action = task->tk_callback; in __rpc_execute()
778 task->tk_callback = NULL; in __rpc_execute()
780 if (!do_action) in __rpc_execute()
788 if (!RPC_IS_QUEUED(task)) in __rpc_execute()
791 * The queue->lock protects against races with in __rpc_execute()
799 queue = task->tk_waitqueue; in __rpc_execute()
800 spin_lock_bh(&queue->lock); in __rpc_execute()
801 if (!RPC_IS_QUEUED(task)) { in __rpc_execute()
802 spin_unlock_bh(&queue->lock); in __rpc_execute()
806 spin_unlock_bh(&queue->lock); in __rpc_execute()
807 if (task_is_async) in __rpc_execute()
811 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); in __rpc_execute()
812 status = out_of_line_wait_on_bit(&task->tk_runstate, in __rpc_execute()
815 if (status == -ERESTARTSYS) { in __rpc_execute()
818 * -ERESTARTSYS. In order to catch any callbacks that in __rpc_execute()
822 dprintk("RPC: %5u got signal\n", task->tk_pid); in __rpc_execute()
823 task->tk_flags |= RPC_TASK_KILLED; in __rpc_execute()
824 rpc_exit(task, -ERESTARTSYS); in __rpc_execute()
826 dprintk("RPC: %5u sync task resuming\n", task->tk_pid); in __rpc_execute()
829 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, in __rpc_execute()
830 task->tk_status); in __rpc_execute()
836 * User-visible entry point to the scheduler.
838 * This may be called recursively if e.g. an async NFS task updates
850 if (!is_async) in rpc_execute()
860 * rpc_malloc - allocate RPC buffer resources
861 * @task: RPC task
864 * RPC call and RPC reply that this task is being used for. When
865 * this RPC is retired, the memory is released by calling rpc_free.
868 * returning -ENOMEM and suppressing warning if the request cannot
881 struct rpc_rqst *rqst = task->tk_rqstp; in rpc_malloc()
882 size_t size = rqst->rq_callsize + rqst->rq_rcvsize; in rpc_malloc()
886 if (RPC_IS_SWAPPER(task)) in rpc_malloc()
890 if (size <= RPC_BUFFER_MAXSIZE) in rpc_malloc()
895 if (!buf) in rpc_malloc()
896 return -ENOMEM; in rpc_malloc()
898 buf->len = size; in rpc_malloc()
899 dprintk("RPC: %5u allocated buffer of size %zu at %p\n", in rpc_malloc()
900 task->tk_pid, size, buf); in rpc_malloc()
901 rqst->rq_buffer = buf->data; in rpc_malloc()
902 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; in rpc_malloc()
908 * rpc_free - free RPC buffer resources allocated via rpc_malloc
909 * @task: RPC task
914 void *buffer = task->tk_rqstp->rq_buffer; in rpc_free()
919 size = buf->len; in rpc_free()
921 dprintk("RPC: freeing buffer of size %zu at %p\n", in rpc_free()
924 if (size <= RPC_BUFFER_MAXSIZE) in rpc_free()
932 * Creation and deletion of RPC task structures
937 atomic_set(&task->tk_count, 1); in rpc_init_task()
938 task->tk_flags = task_setup_data->flags; in rpc_init_task()
939 task->tk_ops = task_setup_data->callback_ops; in rpc_init_task()
940 task->tk_calldata = task_setup_data->callback_data; in rpc_init_task()
941 INIT_LIST_HEAD(&task->tk_task); in rpc_init_task()
943 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; in rpc_init_task()
944 task->tk_owner = current->tgid; in rpc_init_task()
947 task->tk_workqueue = task_setup_data->workqueue; in rpc_init_task()
949 task->tk_xprt = xprt_get(task_setup_data->rpc_xprt); in rpc_init_task()
951 if (task->tk_ops->rpc_call_prepare != NULL) in rpc_init_task()
952 task->tk_action = rpc_prepare_task; in rpc_init_task()
956 dprintk("RPC: new task initialized, procpid %u\n", in rpc_init_task()
971 struct rpc_task *task = setup_data->task; in rpc_new_task()
974 if (task == NULL) { in rpc_new_task()
980 task->tk_flags |= flags; in rpc_new_task()
981 dprintk("RPC: allocated task %p\n", task); in rpc_new_task()
986 * rpc_free_task - release rpc task and perform cleanups
992 * "Workqueue currently considers two work items to be the same if they're
993 * on the same address and won't execute them concurrently - ie. it
997 * If a work function frees the work item, and then waits for an event
1006 unsigned short tk_flags = task->tk_flags; in rpc_free_task()
1008 rpc_release_calldata(task->tk_ops, task->tk_calldata); in rpc_free_task()
1010 if (tk_flags & RPC_TASK_DYNAMIC) { in rpc_free_task()
1011 dprintk("RPC: %5u freeing task\n", task->tk_pid); in rpc_free_task()
1024 if (task->tk_msg.rpc_cred) { in rpc_release_resources_task()
1025 put_rpccred(task->tk_msg.rpc_cred); in rpc_release_resources_task()
1026 task->tk_msg.rpc_cred = NULL; in rpc_release_resources_task()
1034 if (q != NULL) { in rpc_final_put_task()
1035 INIT_WORK(&task->u.tk_work, rpc_async_release); in rpc_final_put_task()
1036 queue_work(q, &task->u.tk_work); in rpc_final_put_task()
1043 if (atomic_dec_and_test(&task->tk_count)) { in rpc_do_put_task()
1057 rpc_do_put_task(task, task->tk_workqueue); in rpc_put_task_async()
1063 dprintk("RPC: %5u release task\n", task->tk_pid); in rpc_release_task()
1070 * Note: at this point we have been removed from rpc_clnt->cl_tasks, in rpc_release_task()
1071 * so it should be safe to use task->tk_count as a test for whether in rpc_release_task()
1074 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { in rpc_release_task()
1076 if (!rpc_complete_task(task)) in rpc_release_task()
1079 if (!atomic_dec_and_test(&task->tk_count)) in rpc_release_task()
1082 rpc_final_put_task(task, task->tk_workqueue); in rpc_release_task()
1087 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; in rpciod_up()
1105 dprintk("RPC: creating workqueue rpciod\n"); in rpciod_start()
1107 if (!wq) in rpciod_start()
1112 if (!wq) in rpciod_start()
1128 if (rpciod_workqueue == NULL) in rpciod_stop()
1130 dprintk("RPC: destroying workqueue rpciod\n"); in rpciod_stop()
1159 if (!rpciod_start()) in rpc_init_mempool()
1166 if (!rpc_task_slabp) in rpc_init_mempool()
1172 if (!rpc_buffer_slabp) in rpc_init_mempool()
1176 if (!rpc_task_mempool) in rpc_init_mempool()
1180 if (!rpc_buffer_mempool) in rpc_init_mempool()
1185 return -ENOMEM; in rpc_init_mempool()