• Home
  • Raw
  • Download

Lines Matching full:call

1 /* RxRPC individual remote procedure call handling
50 struct rxrpc_call *call = from_timer(call, t, timer); in rxrpc_call_timer_expired() local
52 _enter("%d", call->debug_id); in rxrpc_call_timer_expired()
54 if (call->state < RXRPC_CALL_COMPLETE) { in rxrpc_call_timer_expired()
55 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies); in rxrpc_call_timer_expired()
56 rxrpc_queue_call(call); in rxrpc_call_timer_expired()
63 * find an extant server call
69 struct rxrpc_call *call; in rxrpc_find_call_by_user_ID() local
78 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_find_call_by_user_ID()
80 if (user_call_ID < call->user_call_ID) in rxrpc_find_call_by_user_ID()
82 else if (user_call_ID > call->user_call_ID) in rxrpc_find_call_by_user_ID()
93 rxrpc_get_call(call, rxrpc_call_got); in rxrpc_find_call_by_user_ID()
95 _leave(" = %p [%d]", call, atomic_read(&call->usage)); in rxrpc_find_call_by_user_ID()
96 return call; in rxrpc_find_call_by_user_ID()
100 * allocate a new call
105 struct rxrpc_call *call; in rxrpc_alloc_call() local
108 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); in rxrpc_alloc_call()
109 if (!call) in rxrpc_alloc_call()
112 call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE, in rxrpc_alloc_call()
115 if (!call->rxtx_buffer) in rxrpc_alloc_call()
118 call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp); in rxrpc_alloc_call()
119 if (!call->rxtx_annotations) in rxrpc_alloc_call()
122 mutex_init(&call->user_mutex); in rxrpc_alloc_call()
128 lockdep_set_class(&call->user_mutex, in rxrpc_alloc_call()
131 timer_setup(&call->timer, rxrpc_call_timer_expired, 0); in rxrpc_alloc_call()
132 INIT_WORK(&call->processor, &rxrpc_process_call); in rxrpc_alloc_call()
133 INIT_LIST_HEAD(&call->link); in rxrpc_alloc_call()
134 INIT_LIST_HEAD(&call->chan_wait_link); in rxrpc_alloc_call()
135 INIT_LIST_HEAD(&call->accept_link); in rxrpc_alloc_call()
136 INIT_LIST_HEAD(&call->recvmsg_link); in rxrpc_alloc_call()
137 INIT_LIST_HEAD(&call->sock_link); in rxrpc_alloc_call()
138 init_waitqueue_head(&call->waitq); in rxrpc_alloc_call()
139 spin_lock_init(&call->lock); in rxrpc_alloc_call()
140 spin_lock_init(&call->notify_lock); in rxrpc_alloc_call()
141 spin_lock_init(&call->input_lock); in rxrpc_alloc_call()
142 rwlock_init(&call->state_lock); in rxrpc_alloc_call()
143 atomic_set(&call->usage, 1); in rxrpc_alloc_call()
144 call->debug_id = debug_id; in rxrpc_alloc_call()
145 call->tx_total_len = -1; in rxrpc_alloc_call()
146 call->next_rx_timo = 20 * HZ; in rxrpc_alloc_call()
147 call->next_req_timo = 1 * HZ; in rxrpc_alloc_call()
149 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); in rxrpc_alloc_call()
152 call->rx_winsize = rxrpc_rx_window_size; in rxrpc_alloc_call()
153 call->tx_winsize = 16; in rxrpc_alloc_call()
154 call->rx_expect_next = 1; in rxrpc_alloc_call()
156 call->cong_cwnd = 2; in rxrpc_alloc_call()
157 call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; in rxrpc_alloc_call()
159 call->rxnet = rxnet; in rxrpc_alloc_call()
161 return call; in rxrpc_alloc_call()
164 kfree(call->rxtx_buffer); in rxrpc_alloc_call()
166 kmem_cache_free(rxrpc_call_jar, call); in rxrpc_alloc_call()
171 * Allocate a new client call.
178 struct rxrpc_call *call; in rxrpc_alloc_client_call() local
183 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_alloc_client_call()
184 if (!call) in rxrpc_alloc_client_call()
186 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; in rxrpc_alloc_client_call()
187 call->service_id = srx->srx_service; in rxrpc_alloc_client_call()
188 call->tx_phase = true; in rxrpc_alloc_client_call()
190 call->acks_latest_ts = now; in rxrpc_alloc_client_call()
191 call->cong_tstamp = now; in rxrpc_alloc_client_call()
193 _leave(" = %p", call); in rxrpc_alloc_client_call()
194 return call; in rxrpc_alloc_client_call()
198 * Initiate the call ack/resend/expiry timer.
200 static void rxrpc_start_call_timer(struct rxrpc_call *call) in rxrpc_start_call_timer() argument
205 call->ack_at = j; in rxrpc_start_call_timer()
206 call->ack_lost_at = j; in rxrpc_start_call_timer()
207 call->resend_at = j; in rxrpc_start_call_timer()
208 call->ping_at = j; in rxrpc_start_call_timer()
209 call->expect_rx_by = j; in rxrpc_start_call_timer()
210 call->expect_req_by = j; in rxrpc_start_call_timer()
211 call->expect_term_by = j; in rxrpc_start_call_timer()
212 call->timer.expires = now; in rxrpc_start_call_timer()
216 * Set up a call for the given parameters.
218 * - If it returns a call, the call's lock will need releasing by the caller.
227 __acquires(&call->user_mutex) in rxrpc_new_client_call()
229 struct rxrpc_call *call, *xcall; in rxrpc_new_client_call() local
237 call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id); in rxrpc_new_client_call()
238 if (IS_ERR(call)) { in rxrpc_new_client_call()
240 _leave(" = %ld", PTR_ERR(call)); in rxrpc_new_client_call()
241 return call; in rxrpc_new_client_call()
244 call->tx_total_len = p->tx_total_len; in rxrpc_new_client_call()
245 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), in rxrpc_new_client_call()
248 /* We need to protect a partially set up call against the user as we in rxrpc_new_client_call()
251 mutex_lock(&call->user_mutex); in rxrpc_new_client_call()
253 /* Publish the call, even though it is incompletely set up as yet */ in rxrpc_new_client_call()
270 rcu_assign_pointer(call->socket, rx); in rxrpc_new_client_call()
271 call->user_call_ID = p->user_call_ID; in rxrpc_new_client_call()
272 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); in rxrpc_new_client_call()
273 rxrpc_get_call(call, rxrpc_call_got_userid); in rxrpc_new_client_call()
274 rb_link_node(&call->sock_node, parent, pp); in rxrpc_new_client_call()
275 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_new_client_call()
276 list_add(&call->sock_link, &rx->sock_calls); in rxrpc_new_client_call()
280 rxnet = call->rxnet; in rxrpc_new_client_call()
282 list_add_tail(&call->link, &rxnet->calls); in rxrpc_new_client_call()
285 /* From this point on, the call is protected by its own lock. */ in rxrpc_new_client_call()
289 * including channel number and call ID. in rxrpc_new_client_call()
291 ret = rxrpc_connect_call(rx, call, cp, srx, gfp); in rxrpc_new_client_call()
295 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), in rxrpc_new_client_call()
298 rxrpc_start_call_timer(call); in rxrpc_new_client_call()
300 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); in rxrpc_new_client_call()
302 _leave(" = %p [new]", call); in rxrpc_new_client_call()
303 return call; in rxrpc_new_client_call()
313 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, in rxrpc_new_client_call()
315 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), in rxrpc_new_client_call()
317 rxrpc_release_call(rx, call); in rxrpc_new_client_call()
318 mutex_unlock(&call->user_mutex); in rxrpc_new_client_call()
319 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_new_client_call()
323 /* We got an error, but the call is attached to the socket and is in in rxrpc_new_client_call()
325 * completing the call queues it. Return 0 from sys_sendmsg() and in rxrpc_new_client_call()
329 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), in rxrpc_new_client_call()
331 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); in rxrpc_new_client_call()
332 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, in rxrpc_new_client_call()
334 _leave(" = c=%08x [err]", call->debug_id); in rxrpc_new_client_call()
335 return call; in rxrpc_new_client_call()
339 * Retry a call to a new address. It is expected that the Tx queue of the call
340 * will contain data previously packaged for an old call.
343 struct rxrpc_call *call, in rxrpc_retry_client_call() argument
352 * including channel number and call ID. in rxrpc_retry_client_call()
354 ret = rxrpc_connect_call(rx, call, cp, srx, gfp); in rxrpc_retry_client_call()
358 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), in rxrpc_retry_client_call()
361 rxrpc_start_call_timer(call); in rxrpc_retry_client_call()
363 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); in rxrpc_retry_client_call()
365 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) in rxrpc_retry_client_call()
366 rxrpc_queue_call(call); in rxrpc_retry_client_call()
372 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, in rxrpc_retry_client_call()
374 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), in rxrpc_retry_client_call()
381 * Set up an incoming call. call->conn points to the connection.
385 struct rxrpc_call *call, in rxrpc_incoming_call() argument
388 struct rxrpc_connection *conn = call->conn; in rxrpc_incoming_call()
392 _enter(",%d", call->conn->debug_id); in rxrpc_incoming_call()
394 rcu_assign_pointer(call->socket, rx); in rxrpc_incoming_call()
395 call->call_id = sp->hdr.callNumber; in rxrpc_incoming_call()
396 call->service_id = sp->hdr.serviceId; in rxrpc_incoming_call()
397 call->cid = sp->hdr.cid; in rxrpc_incoming_call()
398 call->state = RXRPC_CALL_SERVER_ACCEPTING; in rxrpc_incoming_call()
400 call->state = RXRPC_CALL_SERVER_SECURING; in rxrpc_incoming_call()
401 call->cong_tstamp = skb->tstamp; in rxrpc_incoming_call()
403 /* Set the channel for this call. We don't get channel_lock as we're in rxrpc_incoming_call()
407 * call pointer). in rxrpc_incoming_call()
410 conn->channels[chan].call_counter = call->call_id; in rxrpc_incoming_call()
411 conn->channels[chan].call_id = call->call_id; in rxrpc_incoming_call()
412 rcu_assign_pointer(conn->channels[chan].call, call); in rxrpc_incoming_call()
415 hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets); in rxrpc_incoming_call()
418 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); in rxrpc_incoming_call()
420 rxrpc_start_call_timer(call); in rxrpc_incoming_call()
425 * Queue a call's work processor, getting a ref to pass to the work queue.
427 bool rxrpc_queue_call(struct rxrpc_call *call) in rxrpc_queue_call() argument
430 int n = atomic_fetch_add_unless(&call->usage, 1, 0); in rxrpc_queue_call()
433 if (rxrpc_queue_work(&call->processor)) in rxrpc_queue_call()
434 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL); in rxrpc_queue_call()
436 rxrpc_put_call(call, rxrpc_call_put_noqueue); in rxrpc_queue_call()
441 * Queue a call's work processor, passing the callers ref to the work queue.
443 bool __rxrpc_queue_call(struct rxrpc_call *call) in __rxrpc_queue_call() argument
446 int n = atomic_read(&call->usage); in __rxrpc_queue_call()
448 if (rxrpc_queue_work(&call->processor)) in __rxrpc_queue_call()
449 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL); in __rxrpc_queue_call()
451 rxrpc_put_call(call, rxrpc_call_put_noqueue); in __rxrpc_queue_call()
456 * Note the re-emergence of a call.
458 void rxrpc_see_call(struct rxrpc_call *call) in rxrpc_see_call() argument
461 if (call) { in rxrpc_see_call()
462 int n = atomic_read(&call->usage); in rxrpc_see_call()
464 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL); in rxrpc_see_call()
469 * Note the addition of a ref on a call.
471 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) in rxrpc_get_call() argument
474 int n = atomic_inc_return(&call->usage); in rxrpc_get_call()
476 trace_rxrpc_call(call, op, n, here, NULL); in rxrpc_get_call()
480 * Detach a call from its owning socket.
482 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) in rxrpc_release_call() argument
485 struct rxrpc_connection *conn = call->conn; in rxrpc_release_call()
489 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); in rxrpc_release_call()
491 trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage), in rxrpc_release_call()
492 here, (const void *)call->flags); in rxrpc_release_call()
494 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); in rxrpc_release_call()
496 spin_lock_bh(&call->lock); in rxrpc_release_call()
497 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) in rxrpc_release_call()
499 spin_unlock_bh(&call->lock); in rxrpc_release_call()
501 del_timer_sync(&call->timer); in rxrpc_release_call()
506 if (!list_empty(&call->recvmsg_link)) { in rxrpc_release_call()
507 _debug("unlinking once-pending call %p { e=%lx f=%lx }", in rxrpc_release_call()
508 call, call->events, call->flags); in rxrpc_release_call()
509 list_del(&call->recvmsg_link); in rxrpc_release_call()
514 call->recvmsg_link.next = NULL; in rxrpc_release_call()
515 call->recvmsg_link.prev = NULL; in rxrpc_release_call()
519 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_release_call()
523 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { in rxrpc_release_call()
524 rb_erase(&call->sock_node, &rx->calls); in rxrpc_release_call()
525 memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); in rxrpc_release_call()
526 rxrpc_put_call(call, rxrpc_call_put_userid); in rxrpc_release_call()
529 list_del(&call->sock_link); in rxrpc_release_call()
532 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); in rxrpc_release_call()
534 if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) in rxrpc_release_call()
535 rxrpc_disconnect_call(call); in rxrpc_release_call()
538 rxrpc_free_skb(call->rxtx_buffer[i], in rxrpc_release_call()
539 (call->tx_phase ? rxrpc_skb_tx_cleaned : in rxrpc_release_call()
541 call->rxtx_buffer[i] = NULL; in rxrpc_release_call()
548 * Prepare a kernel service call for retry.
550 int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call) in rxrpc_prepare_call_for_retry() argument
556 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); in rxrpc_prepare_call_for_retry()
558 trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage), in rxrpc_prepare_call_for_retry()
559 here, (const void *)call->flags); in rxrpc_prepare_call_for_retry()
561 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); in rxrpc_prepare_call_for_retry()
562 ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED); in rxrpc_prepare_call_for_retry()
563 ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED); in rxrpc_prepare_call_for_retry()
564 ASSERT(list_empty(&call->recvmsg_link)); in rxrpc_prepare_call_for_retry()
566 del_timer_sync(&call->timer); in rxrpc_prepare_call_for_retry()
568 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn); in rxrpc_prepare_call_for_retry()
570 if (call->conn) in rxrpc_prepare_call_for_retry()
571 rxrpc_disconnect_call(call); in rxrpc_prepare_call_for_retry()
573 if (rxrpc_is_service_call(call) || in rxrpc_prepare_call_for_retry()
574 !call->tx_phase || in rxrpc_prepare_call_for_retry()
575 call->tx_hard_ack != 0 || in rxrpc_prepare_call_for_retry()
576 call->rx_hard_ack != 0 || in rxrpc_prepare_call_for_retry()
577 call->rx_top != 0) in rxrpc_prepare_call_for_retry()
580 call->state = RXRPC_CALL_UNINITIALISED; in rxrpc_prepare_call_for_retry()
581 call->completion = RXRPC_CALL_SUCCEEDED; in rxrpc_prepare_call_for_retry()
582 call->call_id = 0; in rxrpc_prepare_call_for_retry()
583 call->cid = 0; in rxrpc_prepare_call_for_retry()
584 call->cong_cwnd = 0; in rxrpc_prepare_call_for_retry()
585 call->cong_extra = 0; in rxrpc_prepare_call_for_retry()
586 call->cong_ssthresh = 0; in rxrpc_prepare_call_for_retry()
587 call->cong_mode = 0; in rxrpc_prepare_call_for_retry()
588 call->cong_dup_acks = 0; in rxrpc_prepare_call_for_retry()
589 call->cong_cumul_acks = 0; in rxrpc_prepare_call_for_retry()
590 call->acks_lowest_nak = 0; in rxrpc_prepare_call_for_retry()
593 last |= call->rxtx_annotations[i]; in rxrpc_prepare_call_for_retry()
594 call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST; in rxrpc_prepare_call_for_retry()
595 call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS; in rxrpc_prepare_call_for_retry()
607 struct rxrpc_call *call; in rxrpc_release_calls_on_socket() local
612 call = list_entry(rx->to_be_accepted.next, in rxrpc_release_calls_on_socket()
614 list_del(&call->accept_link); in rxrpc_release_calls_on_socket()
615 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET); in rxrpc_release_calls_on_socket()
616 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_release_calls_on_socket()
620 call = list_entry(rx->sock_calls.next, in rxrpc_release_calls_on_socket()
622 rxrpc_get_call(call, rxrpc_call_got); in rxrpc_release_calls_on_socket()
623 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET); in rxrpc_release_calls_on_socket()
624 rxrpc_send_abort_packet(call); in rxrpc_release_calls_on_socket()
625 rxrpc_release_call(rx, call); in rxrpc_release_calls_on_socket()
626 rxrpc_put_call(call, rxrpc_call_put); in rxrpc_release_calls_on_socket()
633 * release a call
635 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) in rxrpc_put_call() argument
637 struct rxrpc_net *rxnet = call->rxnet; in rxrpc_put_call()
641 ASSERT(call != NULL); in rxrpc_put_call()
643 n = atomic_dec_return(&call->usage); in rxrpc_put_call()
644 trace_rxrpc_call(call, op, n, here, NULL); in rxrpc_put_call()
647 _debug("call %d dead", call->debug_id); in rxrpc_put_call()
648 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); in rxrpc_put_call()
650 if (!list_empty(&call->link)) { in rxrpc_put_call()
652 list_del_init(&call->link); in rxrpc_put_call()
656 rxrpc_cleanup_call(call); in rxrpc_put_call()
661 * Final call destruction - but must be done in process context.
665 struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); in rxrpc_destroy_call() local
666 struct rxrpc_net *rxnet = call->rxnet; in rxrpc_destroy_call()
668 rxrpc_put_connection(call->conn); in rxrpc_destroy_call()
669 rxrpc_put_peer(call->peer); in rxrpc_destroy_call()
670 kfree(call->rxtx_buffer); in rxrpc_destroy_call()
671 kfree(call->rxtx_annotations); in rxrpc_destroy_call()
672 kmem_cache_free(rxrpc_call_jar, call); in rxrpc_destroy_call()
678 * Final call destruction under RCU.
682 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); in rxrpc_rcu_destroy_call() local
685 INIT_WORK(&call->processor, rxrpc_destroy_call); in rxrpc_rcu_destroy_call()
686 if (!rxrpc_queue_work(&call->processor)) in rxrpc_rcu_destroy_call()
689 rxrpc_destroy_call(&call->processor); in rxrpc_rcu_destroy_call()
694 * clean up a call
696 void rxrpc_cleanup_call(struct rxrpc_call *call) in rxrpc_cleanup_call() argument
700 _net("DESTROY CALL %d", call->debug_id); in rxrpc_cleanup_call()
702 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); in rxrpc_cleanup_call()
704 del_timer_sync(&call->timer); in rxrpc_cleanup_call()
706 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); in rxrpc_cleanup_call()
707 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); in rxrpc_cleanup_call()
711 rxrpc_free_skb(call->rxtx_buffer[i], in rxrpc_cleanup_call()
712 (call->tx_phase ? rxrpc_skb_tx_cleaned : in rxrpc_cleanup_call()
715 rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned); in rxrpc_cleanup_call()
717 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); in rxrpc_cleanup_call()
727 struct rxrpc_call *call; in rxrpc_destroy_all_calls() local
735 call = list_entry(rxnet->calls.next, in rxrpc_destroy_all_calls()
737 _debug("Zapping call %p", call); in rxrpc_destroy_all_calls()
739 rxrpc_see_call(call); in rxrpc_destroy_all_calls()
740 list_del_init(&call->link); in rxrpc_destroy_all_calls()
742 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", in rxrpc_destroy_all_calls()
743 call, atomic_read(&call->usage), in rxrpc_destroy_all_calls()
744 rxrpc_call_states[call->state], in rxrpc_destroy_all_calls()
745 call->flags, call->events); in rxrpc_destroy_all_calls()