• Home
  • Raw
  • Download

Lines Matching refs:sk

35 	int	    (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
36 void (*send_check)(struct sock *sk, struct sk_buff *skb);
37 int (*rebuild_header)(struct sock *sk);
38 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
39 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
40 struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
48 int (*setsockopt)(struct sock *sk, int level, int optname,
50 int (*getsockopt)(struct sock *sk, int level, int optname,
52 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
53 void (*mtu_reduced)(struct sock *sk);
98 void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
100 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
148 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() argument
150 return (struct inet_connection_sock *)sk; in inet_csk()
153 static inline void *inet_csk_ca(const struct sock *sk) in inet_csk_ca() argument
155 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca()
158 struct sock *inet_csk_clone_lock(const struct sock *sk,
170 void inet_csk_init_xmit_timers(struct sock *sk,
174 void inet_csk_clear_xmit_timers(struct sock *sk);
176 static inline void inet_csk_schedule_ack(struct sock *sk) in inet_csk_schedule_ack() argument
178 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack()
181 static inline int inet_csk_ack_scheduled(const struct sock *sk) in inet_csk_ack_scheduled() argument
183 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; in inet_csk_ack_scheduled()
186 static inline void inet_csk_delack_init(struct sock *sk) in inet_csk_delack_init() argument
188 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init()
191 void inet_csk_delete_keepalive_timer(struct sock *sk);
192 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
194 static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) in inet_csk_clear_xmit_timer() argument
196 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer()
201 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); in inet_csk_clear_xmit_timer()
207 sk_stop_timer(sk, &icsk->icsk_delack_timer); in inet_csk_clear_xmit_timer()
217 static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, in inet_csk_reset_xmit_timer() argument
221 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer()
225 sk, what, when, (void *)_THIS_IP_); in inet_csk_reset_xmit_timer()
234 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in inet_csk_reset_xmit_timer()
238 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in inet_csk_reset_xmit_timer()
253 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
255 int inet_csk_get_port(struct sock *sk, unsigned short snum);
257 struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
259 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
263 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
266 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
268 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
272 static inline void inet_csk_reqsk_queue_added(struct sock *sk) in inet_csk_reqsk_queue_added() argument
274 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added()
277 static inline int inet_csk_reqsk_queue_len(const struct sock *sk) in inet_csk_reqsk_queue_len() argument
279 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len()
282 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) in inet_csk_reqsk_queue_is_full() argument
284 return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; in inet_csk_reqsk_queue_is_full()
287 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
288 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
290 static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk) in inet_csk_prepare_for_destroy_sock() argument
293 sock_set_flag(sk, SOCK_DEAD); in inet_csk_prepare_for_destroy_sock()
294 this_cpu_inc(*sk->sk_prot->orphan_count); in inet_csk_prepare_for_destroy_sock()
297 void inet_csk_destroy_sock(struct sock *sk);
298 void inet_csk_prepare_forced_close(struct sock *sk);
303 static inline __poll_t inet_csk_listen_poll(const struct sock *sk) in inet_csk_listen_poll() argument
305 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? in inet_csk_listen_poll()
309 int inet_csk_listen_start(struct sock *sk, int backlog);
310 void inet_csk_listen_stop(struct sock *sk);
312 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
316 struct sock *sk);
318 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
322 static inline void inet_csk_enter_pingpong_mode(struct sock *sk) in inet_csk_enter_pingpong_mode() argument
324 inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH; in inet_csk_enter_pingpong_mode()
327 static inline void inet_csk_exit_pingpong_mode(struct sock *sk) in inet_csk_exit_pingpong_mode() argument
329 inet_csk(sk)->icsk_ack.pingpong = 0; in inet_csk_exit_pingpong_mode()
332 static inline bool inet_csk_in_pingpong_mode(struct sock *sk) in inet_csk_in_pingpong_mode() argument
334 return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; in inet_csk_in_pingpong_mode()
337 static inline bool inet_csk_has_ulp(struct sock *sk) in inet_csk_has_ulp() argument
339 return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops; in inet_csk_has_ulp()
342 static inline void inet_init_csk_locks(struct sock *sk) in inet_init_csk_locks() argument
344 struct inet_connection_sock *icsk = inet_csk(sk); in inet_init_csk_locks()