• Home
  • Raw
  • Download

Lines Matching refs:sk

71 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \  argument
76 SOCK_DEBUG(struct sock *sk, const char *msg, ...) in SOCK_DEBUG() argument
288 void (*sk_state_change)(struct sock *sk);
289 void (*sk_data_ready)(struct sock *sk, int bytes);
290 void (*sk_write_space)(struct sock *sk);
291 void (*sk_error_report)(struct sock *sk);
292 int (*sk_backlog_rcv)(struct sock *sk,
294 void (*sk_destruct)(struct sock *sk);
320 static inline struct sock *sk_next(const struct sock *sk) in sk_next() argument
322 return sk->sk_node.next ? in sk_next()
323 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; in sk_next()
326 static inline struct sock *sk_nulls_next(const struct sock *sk) in sk_nulls_next() argument
328 return (!is_a_nulls(sk->sk_nulls_node.next)) ? in sk_nulls_next()
329 hlist_nulls_entry(sk->sk_nulls_node.next, in sk_nulls_next()
334 static inline int sk_unhashed(const struct sock *sk) in sk_unhashed() argument
336 return hlist_unhashed(&sk->sk_node); in sk_unhashed()
339 static inline int sk_hashed(const struct sock *sk) in sk_hashed() argument
341 return !sk_unhashed(sk); in sk_hashed()
354 static __inline__ void __sk_del_node(struct sock *sk) in __sk_del_node() argument
356 __hlist_del(&sk->sk_node); in __sk_del_node()
359 static __inline__ int __sk_del_node_init(struct sock *sk) in __sk_del_node_init() argument
361 if (sk_hashed(sk)) { in __sk_del_node_init()
362 __sk_del_node(sk); in __sk_del_node_init()
363 sk_node_init(&sk->sk_node); in __sk_del_node_init()
375 static inline void sock_hold(struct sock *sk) in sock_hold() argument
377 atomic_inc(&sk->sk_refcnt); in sock_hold()
383 static inline void __sock_put(struct sock *sk) in __sock_put() argument
385 atomic_dec(&sk->sk_refcnt); in __sock_put()
388 static __inline__ int sk_del_node_init(struct sock *sk) in sk_del_node_init() argument
390 int rc = __sk_del_node_init(sk); in sk_del_node_init()
394 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); in sk_del_node_init()
395 __sock_put(sk); in sk_del_node_init()
400 static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) in __sk_nulls_del_node_init_rcu() argument
402 if (sk_hashed(sk)) { in __sk_nulls_del_node_init_rcu()
403 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); in __sk_nulls_del_node_init_rcu()
409 static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) in sk_nulls_del_node_init_rcu() argument
411 int rc = __sk_nulls_del_node_init_rcu(sk); in sk_nulls_del_node_init_rcu()
415 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); in sk_nulls_del_node_init_rcu()
416 __sock_put(sk); in sk_nulls_del_node_init_rcu()
421 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) in __sk_add_node() argument
423 hlist_add_head(&sk->sk_node, list); in __sk_add_node()
426 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) in sk_add_node() argument
428 sock_hold(sk); in sk_add_node()
429 __sk_add_node(sk, list); in sk_add_node()
432 static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_rcu() argument
434 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_rcu()
437 static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in sk_nulls_add_node_rcu() argument
439 sock_hold(sk); in sk_nulls_add_node_rcu()
440 __sk_nulls_add_node_rcu(sk, list); in sk_nulls_add_node_rcu()
443 static __inline__ void __sk_del_bind_node(struct sock *sk) in __sk_del_bind_node() argument
445 __hlist_del(&sk->sk_bind_node); in __sk_del_bind_node()
448 static __inline__ void sk_add_bind_node(struct sock *sk, in sk_add_bind_node() argument
451 hlist_add_head(&sk->sk_bind_node, list); in sk_add_bind_node()
498 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) in sock_set_flag() argument
500 __set_bit(flag, &sk->sk_flags); in sock_set_flag()
503 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) in sock_reset_flag() argument
505 __clear_bit(flag, &sk->sk_flags); in sock_reset_flag()
508 static inline int sock_flag(struct sock *sk, enum sock_flags flag) in sock_flag() argument
510 return test_bit(flag, &sk->sk_flags); in sock_flag()
513 static inline void sk_acceptq_removed(struct sock *sk) in sk_acceptq_removed() argument
515 sk->sk_ack_backlog--; in sk_acceptq_removed()
518 static inline void sk_acceptq_added(struct sock *sk) in sk_acceptq_added() argument
520 sk->sk_ack_backlog++; in sk_acceptq_added()
523 static inline int sk_acceptq_is_full(struct sock *sk) in sk_acceptq_is_full() argument
525 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; in sk_acceptq_is_full()
531 static inline int sk_stream_min_wspace(struct sock *sk) in sk_stream_min_wspace() argument
533 return sk->sk_wmem_queued >> 1; in sk_stream_min_wspace()
536 static inline int sk_stream_wspace(struct sock *sk) in sk_stream_wspace() argument
538 return sk->sk_sndbuf - sk->sk_wmem_queued; in sk_stream_wspace()
541 extern void sk_stream_write_space(struct sock *sk);
543 static inline int sk_stream_memory_free(struct sock *sk) in sk_stream_memory_free() argument
545 return sk->sk_wmem_queued < sk->sk_sndbuf; in sk_stream_memory_free()
549 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) in sk_add_backlog() argument
551 if (!sk->sk_backlog.tail) { in sk_add_backlog()
552 sk->sk_backlog.head = sk->sk_backlog.tail = skb; in sk_add_backlog()
554 sk->sk_backlog.tail->next = skb; in sk_add_backlog()
555 sk->sk_backlog.tail = skb; in sk_add_backlog()
560 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in sk_backlog_rcv() argument
562 return sk->sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
577 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
578 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
579 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
580 extern int sk_stream_error(struct sock *sk, int flags, int err);
581 extern void sk_stream_kill_queues(struct sock *sk);
583 extern int sk_wait_data(struct sock *sk, long *timeo);
595 void (*close)(struct sock *sk,
597 int (*connect)(struct sock *sk,
600 int (*disconnect)(struct sock *sk, int flags);
602 struct sock * (*accept) (struct sock *sk, int flags, int *err);
604 int (*ioctl)(struct sock *sk, int cmd,
606 int (*init)(struct sock *sk);
607 void (*destroy)(struct sock *sk);
608 void (*shutdown)(struct sock *sk, int how);
609 int (*setsockopt)(struct sock *sk, int level,
612 int (*getsockopt)(struct sock *sk, int level,
616 int (*compat_setsockopt)(struct sock *sk,
620 int (*compat_getsockopt)(struct sock *sk,
625 int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
627 int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
631 int (*sendpage)(struct sock *sk, struct page *page,
633 int (*bind)(struct sock *sk,
636 int (*backlog_rcv) (struct sock *sk,
640 void (*hash)(struct sock *sk);
641 void (*unhash)(struct sock *sk);
642 int (*get_port)(struct sock *sk, unsigned short snum);
650 void (*enter_memory_pressure)(struct sock *sk);
694 static inline void sk_refcnt_debug_inc(struct sock *sk) in sk_refcnt_debug_inc() argument
696 atomic_inc(&sk->sk_prot->socks); in sk_refcnt_debug_inc()
699 static inline void sk_refcnt_debug_dec(struct sock *sk) in sk_refcnt_debug_dec() argument
701 atomic_dec(&sk->sk_prot->socks); in sk_refcnt_debug_dec()
703 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); in sk_refcnt_debug_dec()
706 static inline void sk_refcnt_debug_release(const struct sock *sk) in sk_refcnt_debug_release() argument
708 if (atomic_read(&sk->sk_refcnt) != 1) in sk_refcnt_debug_release()
710 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); in sk_refcnt_debug_release()
713 #define sk_refcnt_debug_inc(sk) do { } while (0) argument
714 #define sk_refcnt_debug_dec(sk) do { } while (0) argument
715 #define sk_refcnt_debug_release(sk) do { } while (0) argument
734 static inline void __sk_prot_rehash(struct sock *sk) in __sk_prot_rehash() argument
736 sk->sk_prot->unhash(sk); in __sk_prot_rehash()
737 sk->sk_prot->hash(sk); in __sk_prot_rehash()
762 struct sock *sk; member
796 extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
797 extern void __sk_mem_reclaim(struct sock *sk);
809 static inline int sk_has_account(struct sock *sk) in sk_has_account() argument
812 return !!sk->sk_prot->memory_allocated; in sk_has_account()
815 static inline int sk_wmem_schedule(struct sock *sk, int size) in sk_wmem_schedule() argument
817 if (!sk_has_account(sk)) in sk_wmem_schedule()
819 return size <= sk->sk_forward_alloc || in sk_wmem_schedule()
820 __sk_mem_schedule(sk, size, SK_MEM_SEND); in sk_wmem_schedule()
823 static inline int sk_rmem_schedule(struct sock *sk, int size) in sk_rmem_schedule() argument
825 if (!sk_has_account(sk)) in sk_rmem_schedule()
827 return size <= sk->sk_forward_alloc || in sk_rmem_schedule()
828 __sk_mem_schedule(sk, size, SK_MEM_RECV); in sk_rmem_schedule()
831 static inline void sk_mem_reclaim(struct sock *sk) in sk_mem_reclaim() argument
833 if (!sk_has_account(sk)) in sk_mem_reclaim()
835 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) in sk_mem_reclaim()
836 __sk_mem_reclaim(sk); in sk_mem_reclaim()
839 static inline void sk_mem_reclaim_partial(struct sock *sk) in sk_mem_reclaim_partial() argument
841 if (!sk_has_account(sk)) in sk_mem_reclaim_partial()
843 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) in sk_mem_reclaim_partial()
844 __sk_mem_reclaim(sk); in sk_mem_reclaim_partial()
847 static inline void sk_mem_charge(struct sock *sk, int size) in sk_mem_charge() argument
849 if (!sk_has_account(sk)) in sk_mem_charge()
851 sk->sk_forward_alloc -= size; in sk_mem_charge()
854 static inline void sk_mem_uncharge(struct sock *sk, int size) in sk_mem_uncharge() argument
856 if (!sk_has_account(sk)) in sk_mem_uncharge()
858 sk->sk_forward_alloc += size; in sk_mem_uncharge()
861 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) in sk_wmem_free_skb() argument
863 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); in sk_wmem_free_skb()
864 sk->sk_wmem_queued -= skb->truesize; in sk_wmem_free_skb()
865 sk_mem_uncharge(sk, skb->truesize); in sk_wmem_free_skb()
882 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) argument
891 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ argument
893 sk->sk_lock.owned = 0; \
894 init_waitqueue_head(&sk->sk_lock.wq); \
895 spin_lock_init(&(sk)->sk_lock.slock); \
896 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
897 sizeof((sk)->sk_lock)); \
898 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
900 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
903 extern void lock_sock_nested(struct sock *sk, int subclass);
905 static inline void lock_sock(struct sock *sk) in lock_sock() argument
907 lock_sock_nested(sk, 0); in lock_sock()
910 extern void release_sock(struct sock *sk);
922 extern void sk_free(struct sock *sk);
923 extern void sk_release_kernel(struct sock *sk);
924 extern struct sock *sk_clone(const struct sock *sk,
927 extern struct sk_buff *sock_wmalloc(struct sock *sk,
930 extern struct sk_buff *sock_rmalloc(struct sock *sk,
943 extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
947 extern void *sock_kmalloc(struct sock *sk, int size,
949 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
950 extern void sk_send_sigurg(struct sock *sk);
1003 extern void sk_common_release(struct sock *sk);
1010 extern void sock_init_data(struct socket *sock, struct sock *sk);
1025 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) in sk_filter_uncharge() argument
1029 atomic_sub(size, &sk->sk_omem_alloc); in sk_filter_uncharge()
1033 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) in sk_filter_charge() argument
1036 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); in sk_filter_charge()
1065 static inline void sock_put(struct sock *sk) in sock_put() argument
1067 if (atomic_dec_and_test(&sk->sk_refcnt)) in sock_put()
1068 sk_free(sk); in sock_put()
1071 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1074 static inline void sk_set_socket(struct sock *sk, struct socket *sock) in sk_set_socket() argument
1076 sk->sk_socket = sock; in sk_set_socket()
1086 static inline void sock_orphan(struct sock *sk) in sock_orphan() argument
1088 write_lock_bh(&sk->sk_callback_lock); in sock_orphan()
1089 sock_set_flag(sk, SOCK_DEAD); in sock_orphan()
1090 sk_set_socket(sk, NULL); in sock_orphan()
1091 sk->sk_sleep = NULL; in sock_orphan()
1092 write_unlock_bh(&sk->sk_callback_lock); in sock_orphan()
1095 static inline void sock_graft(struct sock *sk, struct socket *parent) in sock_graft() argument
1097 write_lock_bh(&sk->sk_callback_lock); in sock_graft()
1098 sk->sk_sleep = &parent->wait; in sock_graft()
1099 parent->sk = sk; in sock_graft()
1100 sk_set_socket(sk, parent); in sock_graft()
1101 security_sock_graft(sk, parent); in sock_graft()
1102 write_unlock_bh(&sk->sk_callback_lock); in sock_graft()
1105 extern int sock_i_uid(struct sock *sk);
1106 extern unsigned long sock_i_ino(struct sock *sk);
1109 __sk_dst_get(struct sock *sk) in __sk_dst_get() argument
1111 return sk->sk_dst_cache; in __sk_dst_get()
1115 sk_dst_get(struct sock *sk) in sk_dst_get() argument
1119 read_lock(&sk->sk_dst_lock); in sk_dst_get()
1120 dst = sk->sk_dst_cache; in sk_dst_get()
1123 read_unlock(&sk->sk_dst_lock); in sk_dst_get()
1128 __sk_dst_set(struct sock *sk, struct dst_entry *dst) in __sk_dst_set() argument
1132 old_dst = sk->sk_dst_cache; in __sk_dst_set()
1133 sk->sk_dst_cache = dst; in __sk_dst_set()
1138 sk_dst_set(struct sock *sk, struct dst_entry *dst) in sk_dst_set() argument
1140 write_lock(&sk->sk_dst_lock); in sk_dst_set()
1141 __sk_dst_set(sk, dst); in sk_dst_set()
1142 write_unlock(&sk->sk_dst_lock); in sk_dst_set()
1146 __sk_dst_reset(struct sock *sk) in __sk_dst_reset() argument
1150 old_dst = sk->sk_dst_cache; in __sk_dst_reset()
1151 sk->sk_dst_cache = NULL; in __sk_dst_reset()
1156 sk_dst_reset(struct sock *sk) in sk_dst_reset() argument
1158 write_lock(&sk->sk_dst_lock); in sk_dst_reset()
1159 __sk_dst_reset(sk); in sk_dst_reset()
1160 write_unlock(&sk->sk_dst_lock); in sk_dst_reset()
1163 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1165 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1167 static inline int sk_can_gso(const struct sock *sk) in sk_can_gso() argument
1169 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); in sk_can_gso()
1172 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1174 static inline int skb_copy_to_page(struct sock *sk, char __user *from, in skb_copy_to_page() argument
1192 sk->sk_wmem_queued += copy; in skb_copy_to_page()
1193 sk_mem_charge(sk, copy); in skb_copy_to_page()
1206 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) in skb_set_owner_w() argument
1208 sock_hold(sk); in skb_set_owner_w()
1209 skb->sk = sk; in skb_set_owner_w()
1211 atomic_add(skb->truesize, &sk->sk_wmem_alloc); in skb_set_owner_w()
1214 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) in skb_set_owner_r() argument
1216 skb->sk = sk; in skb_set_owner_r()
1218 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in skb_set_owner_r()
1219 sk_mem_charge(sk, skb->truesize); in skb_set_owner_r()
1222 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1225 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1227 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1229 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
1234 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
1235 (unsigned)sk->sk_rcvbuf) in sock_queue_err_skb()
1237 skb_set_owner_r(skb, sk); in sock_queue_err_skb()
1238 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
1239 if (!sock_flag(sk, SOCK_DEAD)) in sock_queue_err_skb()
1240 sk->sk_data_ready(sk, skb->len); in sock_queue_err_skb()
1248 static inline int sock_error(struct sock *sk) in sock_error() argument
1251 if (likely(!sk->sk_err)) in sock_error()
1253 err = xchg(&sk->sk_err, 0); in sock_error()
1257 static inline unsigned long sock_wspace(struct sock *sk) in sock_wspace() argument
1261 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in sock_wspace()
1262 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); in sock_wspace()
1269 static inline void sk_wake_async(struct sock *sk, int how, int band) in sk_wake_async() argument
1271 if (sk->sk_socket && sk->sk_socket->fasync_list) in sk_wake_async()
1272 sock_wake_async(sk->sk_socket, how, band); in sk_wake_async()
1278 static inline void sk_stream_moderate_sndbuf(struct sock *sk) in sk_stream_moderate_sndbuf() argument
1280 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { in sk_stream_moderate_sndbuf()
1281 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); in sk_stream_moderate_sndbuf()
1282 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); in sk_stream_moderate_sndbuf()
1286 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
1288 static inline struct page *sk_stream_alloc_page(struct sock *sk) in sk_stream_alloc_page() argument
1292 page = alloc_pages(sk->sk_allocation, 0); in sk_stream_alloc_page()
1294 sk->sk_prot->enter_memory_pressure(sk); in sk_stream_alloc_page()
1295 sk_stream_moderate_sndbuf(sk); in sk_stream_alloc_page()
1303 static inline int sock_writeable(const struct sock *sk) in sock_writeable() argument
1305 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); in sock_writeable()
1313 static inline long sock_rcvtimeo(const struct sock *sk, int noblock) in sock_rcvtimeo() argument
1315 return noblock ? 0 : sk->sk_rcvtimeo; in sock_rcvtimeo()
1318 static inline long sock_sndtimeo(const struct sock *sk, int noblock) in sock_sndtimeo() argument
1320 return noblock ? 0 : sk->sk_sndtimeo; in sock_sndtimeo()
1323 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) in sock_rcvlowat() argument
1325 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; in sock_rcvlowat()
1336 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1340 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) in sock_recv_timestamp() argument
1344 if (sock_flag(sk, SOCK_RCVTSTAMP)) in sock_recv_timestamp()
1345 __sock_recv_timestamp(msg, sk, skb); in sock_recv_timestamp()
1347 sk->sk_stamp = kt; in sock_recv_timestamp()
1360 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) in sk_eat_skb() argument
1362 __skb_unlink(skb, &sk->sk_receive_queue); in sk_eat_skb()
1366 __skb_queue_tail(&sk->sk_async_wait_queue, skb); in sk_eat_skb()
1369 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) in sk_eat_skb() argument
1371 __skb_unlink(skb, &sk->sk_receive_queue); in sk_eat_skb()
1377 struct net *sock_net(const struct sock *sk) in sock_net() argument
1380 return sk->sk_net; in sock_net()
1387 void sock_net_set(struct sock *sk, struct net *net) in sock_net_set() argument
1390 sk->sk_net = net; in sock_net_set()
1400 static inline void sk_change_net(struct sock *sk, struct net *net) in sk_change_net() argument
1402 put_net(sock_net(sk)); in sk_change_net()
1403 sock_net_set(sk, hold_net(net)); in sk_change_net()
1408 if (unlikely(skb->sk)) { in skb_steal_sock()
1409 struct sock *sk = skb->sk; in skb_steal_sock() local
1412 skb->sk = NULL; in skb_steal_sock()
1413 return sk; in skb_steal_sock()
1418 extern void sock_enable_timestamp(struct sock *sk);