• Home
  • Raw
  • Download

Lines Matching refs:sk

41 	struct sock *sk;  in smc_close_cleanup_listen()  local
44 while ((sk = smc_accept_dequeue(parent, NULL))) in smc_close_cleanup_listen()
45 smc_close_non_accepted(sk); in smc_close_cleanup_listen()
52 struct sock *sk = &smc->sk; in smc_close_stream_wait() local
61 add_wait_queue(sk_sleep(sk), &wait); in smc_close_stream_wait()
65 rc = sk_wait_event(sk, &timeout, in smc_close_stream_wait()
67 READ_ONCE(sk->sk_err) == ECONNABORTED || in smc_close_stream_wait()
68 READ_ONCE(sk->sk_err) == ECONNRESET || in smc_close_stream_wait()
74 remove_wait_queue(sk_sleep(sk), &wait); in smc_close_stream_wait()
82 smc->sk.sk_state_change(&smc->sk); in smc_close_wake_tx_prepared()
113 struct sock *sk = &smc->sk; in smc_close_cancel_work() local
115 release_sock(sk); in smc_close_cancel_work()
117 sock_put(sk); in smc_close_cancel_work()
119 lock_sock(sk); in smc_close_cancel_work()
127 struct sock *sk = &smc->sk; in smc_close_active_abort() local
130 if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) { in smc_close_active_abort()
131 sk->sk_err = ECONNABORTED; in smc_close_active_abort()
132 if (smc->clcsock && smc->clcsock->sk) in smc_close_active_abort()
133 tcp_abort(smc->clcsock->sk, ECONNABORTED); in smc_close_active_abort()
135 switch (sk->sk_state) { in smc_close_active_abort()
139 sk->sk_state = SMC_PEERABORTWAIT; in smc_close_active_abort()
141 if (sk->sk_state != SMC_PEERABORTWAIT) in smc_close_active_abort()
143 sk->sk_state = SMC_CLOSED; in smc_close_active_abort()
144 sock_put(sk); /* (postponed) passive closing */ in smc_close_active_abort()
149 sk->sk_state = SMC_PEERABORTWAIT; in smc_close_active_abort()
151 if (sk->sk_state != SMC_PEERABORTWAIT) in smc_close_active_abort()
153 sk->sk_state = SMC_CLOSED; in smc_close_active_abort()
156 sock_put(sk); /* passive closing */ in smc_close_active_abort()
160 sk->sk_state = SMC_PEERABORTWAIT; in smc_close_active_abort()
162 if (sk->sk_state != SMC_PEERABORTWAIT) in smc_close_active_abort()
164 sk->sk_state = SMC_CLOSED; in smc_close_active_abort()
174 smc_sock_set_flag(sk, SOCK_DEAD); in smc_close_active_abort()
175 sk->sk_state_change(sk); in smc_close_active_abort()
178 release_sock(sk); in smc_close_active_abort()
180 lock_sock(sk); in smc_close_active_abort()
195 struct sock *sk = &smc->sk; in smc_close_active() local
202 0 : sock_flag(sk, SOCK_LINGER) ? in smc_close_active()
203 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; in smc_close_active()
205 old_state = sk->sk_state; in smc_close_active()
207 switch (sk->sk_state) { in smc_close_active()
209 sk->sk_state = SMC_CLOSED; in smc_close_active()
212 sk->sk_state = SMC_CLOSED; in smc_close_active()
213 sk->sk_state_change(sk); /* wake up accept */ in smc_close_active()
214 if (smc->clcsock && smc->clcsock->sk) { in smc_close_active()
215 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; in smc_close_active()
216 smc->clcsock->sk->sk_user_data = NULL; in smc_close_active()
219 smc_close_cleanup_listen(sk); in smc_close_active()
220 release_sock(sk); in smc_close_active()
222 lock_sock(sk); in smc_close_active()
226 release_sock(sk); in smc_close_active()
228 lock_sock(sk); in smc_close_active()
229 if (sk->sk_state == SMC_ACTIVE) { in smc_close_active()
232 sk->sk_state = SMC_PEERCLOSEWAIT1; in smc_close_active()
237 if (smc->clcsock && smc->clcsock->sk) { in smc_close_active()
254 sk->sk_state = SMC_CLOSED; in smc_close_active()
260 release_sock(sk); in smc_close_active()
262 lock_sock(sk); in smc_close_active()
263 if (sk->sk_state != SMC_APPCLOSEWAIT1 && in smc_close_active()
264 sk->sk_state != SMC_APPCLOSEWAIT2) in smc_close_active()
270 sk->sk_state = SMC_CLOSED; in smc_close_active()
271 sock_put(sk); /* postponed passive closing */ in smc_close_active()
274 sk->sk_state = SMC_PEERFINCLOSEWAIT; in smc_close_active()
291 sk->sk_state = SMC_CLOSED; in smc_close_active()
294 sk->sk_state = SMC_CLOSED; in smc_close_active()
301 if (old_state != sk->sk_state) in smc_close_active()
302 sk->sk_state_change(sk); in smc_close_active()
310 struct sock *sk = &smc->sk; in smc_close_passive_abort_received() local
312 switch (sk->sk_state) { in smc_close_passive_abort_received()
316 sk->sk_state = SMC_PROCESSABORT; in smc_close_passive_abort_received()
317 sock_put(sk); /* passive closing */ in smc_close_passive_abort_received()
320 sk->sk_state = SMC_PROCESSABORT; in smc_close_passive_abort_received()
327 sk->sk_state = SMC_PROCESSABORT; in smc_close_passive_abort_received()
329 sk->sk_state = SMC_CLOSED; in smc_close_passive_abort_received()
330 sock_put(sk); /* passive closing */ in smc_close_passive_abort_received()
334 sk->sk_state = SMC_CLOSED; in smc_close_passive_abort_received()
335 sock_put(sk); /* passive closing */ in smc_close_passive_abort_received()
338 sk->sk_state = SMC_CLOSED; in smc_close_passive_abort_received()
358 struct sock *sk = &smc->sk; in smc_close_passive_work() local
361 lock_sock(sk); in smc_close_passive_work()
362 old_state = sk->sk_state; in smc_close_passive_work()
368 release_sock(&smc->sk); in smc_close_passive_work()
370 lock_sock(&smc->sk); in smc_close_passive_work()
374 switch (sk->sk_state) { in smc_close_passive_work()
376 sk->sk_state = SMC_APPCLOSEWAIT1; in smc_close_passive_work()
379 sk->sk_state = SMC_APPCLOSEWAIT1; in smc_close_passive_work()
386 sk->sk_state = SMC_PEERCLOSEWAIT2; in smc_close_passive_work()
392 if (sock_flag(sk, SOCK_DEAD) && in smc_close_passive_work()
395 sk->sk_state = SMC_CLOSED; in smc_close_passive_work()
398 sk->sk_state = SMC_APPFINCLOSEWAIT; in smc_close_passive_work()
400 sock_put(sk); /* passive closing */ in smc_close_passive_work()
404 sk->sk_state = SMC_CLOSED; in smc_close_passive_work()
405 sock_put(sk); /* passive closing */ in smc_close_passive_work()
423 sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */ in smc_close_passive_work()
424 sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */ in smc_close_passive_work()
426 if (old_state != sk->sk_state) { in smc_close_passive_work()
427 sk->sk_state_change(sk); in smc_close_passive_work()
428 if ((sk->sk_state == SMC_CLOSED) && in smc_close_passive_work()
429 (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { in smc_close_passive_work()
435 release_sock(sk); in smc_close_passive_work()
438 sock_put(sk); /* sock_hold done by schedulers of close_work */ in smc_close_passive_work()
444 struct sock *sk = &smc->sk; in smc_close_shutdown_write() local
450 0 : sock_flag(sk, SOCK_LINGER) ? in smc_close_shutdown_write()
451 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; in smc_close_shutdown_write()
453 old_state = sk->sk_state; in smc_close_shutdown_write()
455 switch (sk->sk_state) { in smc_close_shutdown_write()
458 release_sock(sk); in smc_close_shutdown_write()
460 lock_sock(sk); in smc_close_shutdown_write()
461 if (sk->sk_state != SMC_ACTIVE) in smc_close_shutdown_write()
465 sk->sk_state = SMC_PEERCLOSEWAIT1; in smc_close_shutdown_write()
471 release_sock(sk); in smc_close_shutdown_write()
473 lock_sock(sk); in smc_close_shutdown_write()
474 if (sk->sk_state != SMC_APPCLOSEWAIT1) in smc_close_shutdown_write()
478 sk->sk_state = SMC_APPCLOSEWAIT2; in smc_close_shutdown_write()
491 if (old_state != sk->sk_state) in smc_close_shutdown_write()
492 sk->sk_state_change(sk); in smc_close_shutdown_write()