• Home
  • Raw
  • Download

Lines Matching refs:sk

41 	struct sock *sk;  in smc_close_cleanup_listen()  local
44 while ((sk = smc_accept_dequeue(parent, NULL))) in smc_close_cleanup_listen()
45 smc_close_non_accepted(sk); in smc_close_cleanup_listen()
52 struct sock *sk = &smc->sk; in smc_close_stream_wait() local
61 add_wait_queue(sk_sleep(sk), &wait); in smc_close_stream_wait()
65 rc = sk_wait_event(sk, &timeout, in smc_close_stream_wait()
67 sk->sk_err == ECONNABORTED || in smc_close_stream_wait()
68 sk->sk_err == ECONNRESET || in smc_close_stream_wait()
74 remove_wait_queue(sk_sleep(sk), &wait); in smc_close_stream_wait()
82 smc->sk.sk_state_change(&smc->sk); in smc_close_wake_tx_prepared()
113 struct sock *sk = &smc->sk; in smc_close_cancel_work() local
115 release_sock(sk); in smc_close_cancel_work()
118 lock_sock(sk); in smc_close_cancel_work()
126 struct sock *sk = &smc->sk; in smc_close_active_abort() local
129 if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) { in smc_close_active_abort()
130 sk->sk_err = ECONNABORTED; in smc_close_active_abort()
131 if (smc->clcsock && smc->clcsock->sk) in smc_close_active_abort()
132 tcp_abort(smc->clcsock->sk, ECONNABORTED); in smc_close_active_abort()
134 switch (sk->sk_state) { in smc_close_active_abort()
138 sk->sk_state = SMC_PEERABORTWAIT; in smc_close_active_abort()
140 if (sk->sk_state != SMC_PEERABORTWAIT) in smc_close_active_abort()
142 sk->sk_state = SMC_CLOSED; in smc_close_active_abort()
143 sock_put(sk); /* (postponed) passive closing */ in smc_close_active_abort()
148 sk->sk_state = SMC_PEERABORTWAIT; in smc_close_active_abort()
150 if (sk->sk_state != SMC_PEERABORTWAIT) in smc_close_active_abort()
152 sk->sk_state = SMC_CLOSED; in smc_close_active_abort()
155 sock_put(sk); /* passive closing */ in smc_close_active_abort()
159 sk->sk_state = SMC_PEERABORTWAIT; in smc_close_active_abort()
161 if (sk->sk_state != SMC_PEERABORTWAIT) in smc_close_active_abort()
163 sk->sk_state = SMC_CLOSED; in smc_close_active_abort()
173 sock_set_flag(sk, SOCK_DEAD); in smc_close_active_abort()
174 sk->sk_state_change(sk); in smc_close_active_abort()
177 release_sock(sk); in smc_close_active_abort()
179 lock_sock(sk); in smc_close_active_abort()
194 struct sock *sk = &smc->sk; in smc_close_active() local
201 0 : sock_flag(sk, SOCK_LINGER) ? in smc_close_active()
202 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; in smc_close_active()
204 old_state = sk->sk_state; in smc_close_active()
206 switch (sk->sk_state) { in smc_close_active()
208 sk->sk_state = SMC_CLOSED; in smc_close_active()
211 sk->sk_state = SMC_CLOSED; in smc_close_active()
212 sk->sk_state_change(sk); /* wake up accept */ in smc_close_active()
213 if (smc->clcsock && smc->clcsock->sk) { in smc_close_active()
214 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; in smc_close_active()
215 smc->clcsock->sk->sk_user_data = NULL; in smc_close_active()
218 smc_close_cleanup_listen(sk); in smc_close_active()
219 release_sock(sk); in smc_close_active()
221 lock_sock(sk); in smc_close_active()
225 release_sock(sk); in smc_close_active()
227 lock_sock(sk); in smc_close_active()
228 if (sk->sk_state == SMC_ACTIVE) { in smc_close_active()
231 sk->sk_state = SMC_PEERCLOSEWAIT1; in smc_close_active()
236 if (smc->clcsock && smc->clcsock->sk) { in smc_close_active()
253 sk->sk_state = SMC_CLOSED; in smc_close_active()
259 release_sock(sk); in smc_close_active()
261 lock_sock(sk); in smc_close_active()
262 if (sk->sk_state != SMC_APPCLOSEWAIT1 && in smc_close_active()
263 sk->sk_state != SMC_APPCLOSEWAIT2) in smc_close_active()
269 sk->sk_state = SMC_CLOSED; in smc_close_active()
270 sock_put(sk); /* postponed passive closing */ in smc_close_active()
273 sk->sk_state = SMC_PEERFINCLOSEWAIT; in smc_close_active()
290 sk->sk_state = SMC_CLOSED; in smc_close_active()
293 sk->sk_state = SMC_CLOSED; in smc_close_active()
300 if (old_state != sk->sk_state) in smc_close_active()
301 sk->sk_state_change(sk); in smc_close_active()
309 struct sock *sk = &smc->sk; in smc_close_passive_abort_received() local
311 switch (sk->sk_state) { in smc_close_passive_abort_received()
315 sk->sk_state = SMC_PROCESSABORT; in smc_close_passive_abort_received()
316 sock_put(sk); /* passive closing */ in smc_close_passive_abort_received()
319 sk->sk_state = SMC_PROCESSABORT; in smc_close_passive_abort_received()
326 sk->sk_state = SMC_PROCESSABORT; in smc_close_passive_abort_received()
328 sk->sk_state = SMC_CLOSED; in smc_close_passive_abort_received()
329 sock_put(sk); /* passive closing */ in smc_close_passive_abort_received()
333 sk->sk_state = SMC_CLOSED; in smc_close_passive_abort_received()
334 sock_put(sk); /* passive closing */ in smc_close_passive_abort_received()
337 sk->sk_state = SMC_CLOSED; in smc_close_passive_abort_received()
357 struct sock *sk = &smc->sk; in smc_close_passive_work() local
360 lock_sock(sk); in smc_close_passive_work()
361 old_state = sk->sk_state; in smc_close_passive_work()
367 release_sock(&smc->sk); in smc_close_passive_work()
369 lock_sock(&smc->sk); in smc_close_passive_work()
373 switch (sk->sk_state) { in smc_close_passive_work()
375 sk->sk_state = SMC_APPCLOSEWAIT1; in smc_close_passive_work()
378 sk->sk_state = SMC_APPCLOSEWAIT1; in smc_close_passive_work()
385 sk->sk_state = SMC_PEERCLOSEWAIT2; in smc_close_passive_work()
391 if (sock_flag(sk, SOCK_DEAD) && in smc_close_passive_work()
394 sk->sk_state = SMC_CLOSED; in smc_close_passive_work()
397 sk->sk_state = SMC_APPFINCLOSEWAIT; in smc_close_passive_work()
399 sock_put(sk); /* passive closing */ in smc_close_passive_work()
403 sk->sk_state = SMC_CLOSED; in smc_close_passive_work()
404 sock_put(sk); /* passive closing */ in smc_close_passive_work()
422 sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */ in smc_close_passive_work()
423 sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */ in smc_close_passive_work()
425 if (old_state != sk->sk_state) { in smc_close_passive_work()
426 sk->sk_state_change(sk); in smc_close_passive_work()
427 if ((sk->sk_state == SMC_CLOSED) && in smc_close_passive_work()
428 (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { in smc_close_passive_work()
434 release_sock(sk); in smc_close_passive_work()
437 sock_put(sk); /* sock_hold done by schedulers of close_work */ in smc_close_passive_work()
443 struct sock *sk = &smc->sk; in smc_close_shutdown_write() local
449 0 : sock_flag(sk, SOCK_LINGER) ? in smc_close_shutdown_write()
450 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; in smc_close_shutdown_write()
452 old_state = sk->sk_state; in smc_close_shutdown_write()
454 switch (sk->sk_state) { in smc_close_shutdown_write()
457 release_sock(sk); in smc_close_shutdown_write()
459 lock_sock(sk); in smc_close_shutdown_write()
460 if (sk->sk_state != SMC_ACTIVE) in smc_close_shutdown_write()
464 sk->sk_state = SMC_PEERCLOSEWAIT1; in smc_close_shutdown_write()
470 release_sock(sk); in smc_close_shutdown_write()
472 lock_sock(sk); in smc_close_shutdown_write()
473 if (sk->sk_state != SMC_APPCLOSEWAIT1) in smc_close_shutdown_write()
477 sk->sk_state = SMC_APPCLOSEWAIT2; in smc_close_shutdown_write()
490 if (old_state != sk->sk_state) in smc_close_shutdown_write()
491 sk->sk_state_change(sk); in smc_close_shutdown_write()