Lines Matching full:rx
38 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, in rxrpc_service_prealloc_one() argument
47 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); in rxrpc_service_prealloc_one()
52 max = rx->sk.sk_max_ack_backlog; in rxrpc_service_prealloc_one()
53 tmp = rx->sk.sk_ack_backlog; in rxrpc_service_prealloc_one()
76 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); in rxrpc_service_prealloc_one()
103 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_service_prealloc_one()
113 write_lock(&rx->call_lock); in rxrpc_service_prealloc_one()
119 pp = &rx->calls.rb_node; in rxrpc_service_prealloc_one()
138 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_service_prealloc_one()
142 list_add(&call->sock_link, &rx->sock_calls); in rxrpc_service_prealloc_one()
144 write_unlock(&rx->call_lock); in rxrpc_service_prealloc_one()
157 write_unlock(&rx->call_lock); in rxrpc_service_prealloc_one()
170 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) in rxrpc_service_prealloc() argument
172 struct rxrpc_backlog *b = rx->backlog; in rxrpc_service_prealloc()
178 rx->backlog = b; in rxrpc_service_prealloc()
181 if (rx->discard_new_call) in rxrpc_service_prealloc()
184 while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, in rxrpc_service_prealloc()
194 void rxrpc_discard_prealloc(struct rxrpc_sock *rx) in rxrpc_discard_prealloc() argument
196 struct rxrpc_backlog *b = rx->backlog; in rxrpc_discard_prealloc()
197 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); in rxrpc_discard_prealloc()
202 rx->backlog = NULL; in rxrpc_discard_prealloc()
207 spin_lock_bh(&rx->incoming_lock); in rxrpc_discard_prealloc()
208 spin_unlock_bh(&rx->incoming_lock); in rxrpc_discard_prealloc()
236 rcu_assign_pointer(call->socket, rx); in rxrpc_discard_prealloc()
237 if (rx->discard_new_call) { in rxrpc_discard_prealloc()
239 rx->discard_new_call(call, call->user_call_ID); in rxrpc_discard_prealloc()
245 rxrpc_release_call(rx, call); in rxrpc_discard_prealloc()
257 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, in rxrpc_alloc_incoming_call() argument
263 struct rxrpc_backlog *b = rx->backlog; in rxrpc_alloc_incoming_call()
297 rxrpc_new_incoming_peer(rx, local, peer); in rxrpc_alloc_incoming_call()
308 rxrpc_new_incoming_connection(rx, conn, skb); in rxrpc_alloc_incoming_call()
342 struct rxrpc_sock *rx, in rxrpc_new_incoming_call() argument
352 spin_lock(&rx->incoming_lock); in rxrpc_new_incoming_call()
353 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || in rxrpc_new_incoming_call()
354 rx->sk.sk_state == RXRPC_CLOSE) { in rxrpc_new_incoming_call()
367 * rx->incoming_lock, so the values should remain stable. in rxrpc_new_incoming_call()
371 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); in rxrpc_new_incoming_call()
395 rxrpc_incoming_call(rx, call, skb); in rxrpc_new_incoming_call()
398 if (rx->notify_new_call) in rxrpc_new_incoming_call()
399 rx->notify_new_call(&rx->sk, call, call->user_call_ID); in rxrpc_new_incoming_call()
401 sk_acceptq_added(&rx->sk); in rxrpc_new_incoming_call()
414 if (rx->discard_new_call) in rxrpc_new_incoming_call()
447 spin_unlock(&rx->incoming_lock); in rxrpc_new_incoming_call()
456 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, in rxrpc_accept_call() argument
459 __releases(&rx->sk.sk_lock.slock) in rxrpc_accept_call()
470 write_lock(&rx->call_lock); in rxrpc_accept_call()
472 if (list_empty(&rx->to_be_accepted)) { in rxrpc_accept_call()
473 write_unlock(&rx->call_lock); in rxrpc_accept_call()
474 release_sock(&rx->sk); in rxrpc_accept_call()
480 pp = &rx->calls.rb_node; in rxrpc_accept_call()
497 call = list_entry(rx->to_be_accepted.next, in rxrpc_accept_call()
499 write_unlock(&rx->call_lock); in rxrpc_accept_call()
507 release_sock(&rx->sk); in rxrpc_accept_call()
512 write_lock(&rx->call_lock); in rxrpc_accept_call()
514 sk_acceptq_removed(&rx->sk); in rxrpc_accept_call()
518 pp = &rx->calls.rb_node; in rxrpc_accept_call()
549 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_accept_call()
554 write_unlock(&rx->call_lock); in rxrpc_accept_call()
556 rxrpc_service_prealloc(rx, GFP_KERNEL); in rxrpc_accept_call()
557 release_sock(&rx->sk); in rxrpc_accept_call()
564 write_unlock(&rx->call_lock); in rxrpc_accept_call()
565 rxrpc_release_call(rx, call); in rxrpc_accept_call()
571 write_unlock(&rx->call_lock); in rxrpc_accept_call()
573 rxrpc_service_prealloc(rx, GFP_KERNEL); in rxrpc_accept_call()
574 release_sock(&rx->sk); in rxrpc_accept_call()
583 int rxrpc_reject_call(struct rxrpc_sock *rx) in rxrpc_reject_call() argument
593 write_lock(&rx->call_lock); in rxrpc_reject_call()
595 if (list_empty(&rx->to_be_accepted)) { in rxrpc_reject_call()
596 write_unlock(&rx->call_lock); in rxrpc_reject_call()
603 call = list_entry(rx->to_be_accepted.next, in rxrpc_reject_call()
606 sk_acceptq_removed(&rx->sk); in rxrpc_reject_call()
624 write_unlock(&rx->call_lock); in rxrpc_reject_call()
627 rxrpc_release_call(rx, call); in rxrpc_reject_call()
630 rxrpc_service_prealloc(rx, GFP_KERNEL); in rxrpc_reject_call()
656 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); in rxrpc_kernel_charge_accept() local
657 struct rxrpc_backlog *b = rx->backlog; in rxrpc_kernel_charge_accept()
662 return rxrpc_service_prealloc_one(rx, b, notify_rx, in rxrpc_kernel_charge_accept()