• Home
  • Raw
  • Download

Lines Matching refs:sk

84 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \  argument
89 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) in SOCK_DEBUG() argument
513 void (*sk_state_change)(struct sock *sk);
514 void (*sk_data_ready)(struct sock *sk);
515 void (*sk_write_space)(struct sock *sk);
516 void (*sk_error_report)(struct sock *sk);
517 int (*sk_backlog_rcv)(struct sock *sk,
520 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
524 void (*sk_destruct)(struct sock *sk);
563 static inline bool sk_user_data_is_nocopy(const struct sock *sk) in sk_user_data_is_nocopy() argument
565 return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY); in sk_user_data_is_nocopy()
568 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) argument
579 __rcu_dereference_sk_user_data_with_flags(const struct sock *sk, in __rcu_dereference_sk_user_data_with_flags() argument
582 uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk)); in __rcu_dereference_sk_user_data_with_flags()
591 #define rcu_dereference_sk_user_data(sk) \ argument
592 __rcu_dereference_sk_user_data_with_flags(sk, 0)
593 #define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \ argument
599 rcu_assign_pointer(__sk_user_data((sk)), \
602 #define rcu_assign_sk_user_data(sk, ptr) \ argument
603 __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
616 int sk_set_peek_off(struct sock *sk, int val);
618 static inline int sk_peek_offset(struct sock *sk, int flags) in sk_peek_offset() argument
621 return READ_ONCE(sk->sk_peek_off); in sk_peek_offset()
627 static inline void sk_peek_offset_bwd(struct sock *sk, int val) in sk_peek_offset_bwd() argument
629 s32 off = READ_ONCE(sk->sk_peek_off); in sk_peek_offset_bwd()
633 WRITE_ONCE(sk->sk_peek_off, off); in sk_peek_offset_bwd()
637 static inline void sk_peek_offset_fwd(struct sock *sk, int val) in sk_peek_offset_fwd() argument
639 sk_peek_offset_bwd(sk, -val); in sk_peek_offset_fwd()
670 static inline struct sock *sk_next(const struct sock *sk) in sk_next() argument
672 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node); in sk_next()
675 static inline struct sock *sk_nulls_next(const struct sock *sk) in sk_nulls_next() argument
677 return (!is_a_nulls(sk->sk_nulls_node.next)) ? in sk_nulls_next()
678 hlist_nulls_entry(sk->sk_nulls_node.next, in sk_nulls_next()
683 static inline bool sk_unhashed(const struct sock *sk) in sk_unhashed() argument
685 return hlist_unhashed(&sk->sk_node); in sk_unhashed()
688 static inline bool sk_hashed(const struct sock *sk) in sk_hashed() argument
690 return !sk_unhashed(sk); in sk_hashed()
703 static inline void __sk_del_node(struct sock *sk) in __sk_del_node() argument
705 __hlist_del(&sk->sk_node); in __sk_del_node()
709 static inline bool __sk_del_node_init(struct sock *sk) in __sk_del_node_init() argument
711 if (sk_hashed(sk)) { in __sk_del_node_init()
712 __sk_del_node(sk); in __sk_del_node_init()
713 sk_node_init(&sk->sk_node); in __sk_del_node_init()
725 static __always_inline void sock_hold(struct sock *sk) in sock_hold() argument
727 refcount_inc(&sk->sk_refcnt); in sock_hold()
733 static __always_inline void __sock_put(struct sock *sk) in __sock_put() argument
735 refcount_dec(&sk->sk_refcnt); in __sock_put()
738 static inline bool sk_del_node_init(struct sock *sk) in sk_del_node_init() argument
740 bool rc = __sk_del_node_init(sk); in sk_del_node_init()
744 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); in sk_del_node_init()
745 __sock_put(sk); in sk_del_node_init()
749 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) argument
751 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) in __sk_nulls_del_node_init_rcu() argument
753 if (sk_hashed(sk)) { in __sk_nulls_del_node_init_rcu()
754 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); in __sk_nulls_del_node_init_rcu()
760 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) in sk_nulls_del_node_init_rcu() argument
762 bool rc = __sk_nulls_del_node_init_rcu(sk); in sk_nulls_del_node_init_rcu()
766 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); in sk_nulls_del_node_init_rcu()
767 __sock_put(sk); in sk_nulls_del_node_init_rcu()
772 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) in __sk_add_node() argument
774 hlist_add_head(&sk->sk_node, list); in __sk_add_node()
777 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) in sk_add_node() argument
779 sock_hold(sk); in sk_add_node()
780 __sk_add_node(sk, list); in sk_add_node()
783 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_rcu() argument
785 sock_hold(sk); in sk_add_node_rcu()
786 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && in sk_add_node_rcu()
787 sk->sk_family == AF_INET6) in sk_add_node_rcu()
788 hlist_add_tail_rcu(&sk->sk_node, list); in sk_add_node_rcu()
790 hlist_add_head_rcu(&sk->sk_node, list); in sk_add_node_rcu()
793 static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_tail_rcu() argument
795 sock_hold(sk); in sk_add_node_tail_rcu()
796 hlist_add_tail_rcu(&sk->sk_node, list); in sk_add_node_tail_rcu()
799 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_rcu() argument
801 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_rcu()
804 static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_tail_rcu() argument
806 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_tail_rcu()
809 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in sk_nulls_add_node_rcu() argument
811 sock_hold(sk); in sk_nulls_add_node_rcu()
812 __sk_nulls_add_node_rcu(sk, list); in sk_nulls_add_node_rcu()
815 static inline void __sk_del_bind_node(struct sock *sk) in __sk_del_bind_node() argument
817 __hlist_del(&sk->sk_bind_node); in __sk_del_bind_node()
820 static inline void sk_add_bind_node(struct sock *sk, in sk_add_bind_node() argument
823 hlist_add_head(&sk->sk_bind_node, list); in sk_add_bind_node()
858 static inline struct user_namespace *sk_user_ns(struct sock *sk) in sk_user_ns() argument
864 return sk->sk_socket->file->f_cred->user_ns; in sk_user_ns()
908 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) in sock_set_flag() argument
910 __set_bit(flag, &sk->sk_flags); in sock_set_flag()
913 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) in sock_reset_flag() argument
915 __clear_bit(flag, &sk->sk_flags); in sock_reset_flag()
918 static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, in sock_valbool_flag() argument
922 sock_set_flag(sk, bit); in sock_valbool_flag()
924 sock_reset_flag(sk, bit); in sock_valbool_flag()
927 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) in sock_flag() argument
929 return test_bit(flag, &sk->sk_flags); in sock_flag()
951 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) in sk_gfp_mask() argument
953 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC); in sk_gfp_mask()
956 static inline void sk_acceptq_removed(struct sock *sk) in sk_acceptq_removed() argument
958 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1); in sk_acceptq_removed()
961 static inline void sk_acceptq_added(struct sock *sk) in sk_acceptq_added() argument
963 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1); in sk_acceptq_added()
966 static inline bool sk_acceptq_is_full(const struct sock *sk) in sk_acceptq_is_full() argument
968 return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog); in sk_acceptq_is_full()
974 static inline int sk_stream_min_wspace(const struct sock *sk) in sk_stream_min_wspace() argument
976 return READ_ONCE(sk->sk_wmem_queued) >> 1; in sk_stream_min_wspace()
979 static inline int sk_stream_wspace(const struct sock *sk) in sk_stream_wspace() argument
981 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued); in sk_stream_wspace()
984 static inline void sk_wmem_queued_add(struct sock *sk, int val) in sk_wmem_queued_add() argument
986 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val); in sk_wmem_queued_add()
989 void sk_stream_write_space(struct sock *sk);
992 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) in __sk_add_backlog() argument
997 if (!sk->sk_backlog.tail) in __sk_add_backlog()
998 WRITE_ONCE(sk->sk_backlog.head, skb); in __sk_add_backlog()
1000 sk->sk_backlog.tail->next = skb; in __sk_add_backlog()
1002 WRITE_ONCE(sk->sk_backlog.tail, skb); in __sk_add_backlog()
1011 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) in sk_rcvqueues_full() argument
1013 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); in sk_rcvqueues_full()
1019 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, in sk_add_backlog() argument
1022 if (sk_rcvqueues_full(sk, limit)) in sk_add_backlog()
1030 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) in sk_add_backlog()
1033 __sk_add_backlog(sk, skb); in sk_add_backlog()
1034 sk->sk_backlog.len += skb->truesize; in sk_add_backlog()
1038 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1040 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in sk_backlog_rcv() argument
1043 return __sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
1045 return sk->sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
1048 static inline void sk_incoming_cpu_update(struct sock *sk) in sk_incoming_cpu_update() argument
1052 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu)) in sk_incoming_cpu_update()
1053 WRITE_ONCE(sk->sk_incoming_cpu, cpu); in sk_incoming_cpu_update()
1068 static inline void sock_rps_record_flow(const struct sock *sk) in sock_rps_record_flow() argument
1082 if (sk->sk_state == TCP_ESTABLISHED) in sock_rps_record_flow()
1083 sock_rps_record_flow_hash(sk->sk_rxhash); in sock_rps_record_flow()
1088 static inline void sock_rps_save_rxhash(struct sock *sk, in sock_rps_save_rxhash() argument
1092 if (unlikely(sk->sk_rxhash != skb->hash)) in sock_rps_save_rxhash()
1093 sk->sk_rxhash = skb->hash; in sock_rps_save_rxhash()
1097 static inline void sock_rps_reset_rxhash(struct sock *sk) in sock_rps_reset_rxhash() argument
1100 sk->sk_rxhash = 0; in sock_rps_reset_rxhash()
1119 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1120 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1121 void sk_stream_wait_close(struct sock *sk, long timeo_p);
1122 int sk_stream_error(struct sock *sk, int flags, int err);
1123 void sk_stream_kill_queues(struct sock *sk);
1124 void sk_set_memalloc(struct sock *sk);
1125 void sk_clear_memalloc(struct sock *sk);
1127 void __sk_flush_backlog(struct sock *sk);
1129 static inline bool sk_flush_backlog(struct sock *sk) in sk_flush_backlog() argument
1131 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) { in sk_flush_backlog()
1132 __sk_flush_backlog(sk); in sk_flush_backlog()
1138 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1151 static inline void sk_prot_clear_nulls(struct sock *sk, int size) in sk_prot_clear_nulls() argument
1154 memset(sk, 0, offsetof(struct sock, sk_node.next)); in sk_prot_clear_nulls()
1155 memset(&sk->sk_node.pprev, 0, in sk_prot_clear_nulls()
1163 void (*close)(struct sock *sk,
1165 int (*pre_connect)(struct sock *sk,
1168 int (*connect)(struct sock *sk,
1171 int (*disconnect)(struct sock *sk, int flags);
1173 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1176 int (*ioctl)(struct sock *sk, int cmd,
1178 int (*init)(struct sock *sk);
1179 void (*destroy)(struct sock *sk);
1180 void (*shutdown)(struct sock *sk, int how);
1181 int (*setsockopt)(struct sock *sk, int level,
1184 int (*getsockopt)(struct sock *sk, int level,
1187 void (*keepalive)(struct sock *sk, int valbool);
1189 int (*compat_ioctl)(struct sock *sk,
1192 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1194 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1197 int (*sendpage)(struct sock *sk, struct page *page,
1199 int (*bind)(struct sock *sk,
1201 int (*bind_add)(struct sock *sk,
1204 int (*backlog_rcv) (struct sock *sk,
1207 void (*release_cb)(struct sock *sk);
1210 int (*hash)(struct sock *sk);
1211 void (*unhash)(struct sock *sk);
1212 void (*rehash)(struct sock *sk);
1213 int (*get_port)(struct sock *sk, unsigned short snum);
1220 bool (*stream_memory_free)(const struct sock *sk, int wake);
1221 bool (*stream_memory_read)(const struct sock *sk);
1223 void (*enter_memory_pressure)(struct sock *sk);
1224 void (*leave_memory_pressure)(struct sock *sk);
1270 int (*diag_destroy)(struct sock *sk, int err);
1278 static inline void sk_refcnt_debug_inc(struct sock *sk) in sk_refcnt_debug_inc() argument
1280 atomic_inc(&sk->sk_prot->socks); in sk_refcnt_debug_inc()
1283 static inline void sk_refcnt_debug_dec(struct sock *sk) in sk_refcnt_debug_dec() argument
1285 atomic_dec(&sk->sk_prot->socks); in sk_refcnt_debug_dec()
1287 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); in sk_refcnt_debug_dec()
1290 static inline void sk_refcnt_debug_release(const struct sock *sk) in sk_refcnt_debug_release() argument
1292 if (refcount_read(&sk->sk_refcnt) != 1) in sk_refcnt_debug_release()
1294 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt)); in sk_refcnt_debug_release()
1297 #define sk_refcnt_debug_inc(sk) do { } while (0) argument
1298 #define sk_refcnt_debug_dec(sk) do { } while (0) argument
1299 #define sk_refcnt_debug_release(sk) do { } while (0) argument
1302 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) in __sk_stream_memory_free() argument
1304 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) in __sk_stream_memory_free()
1307 return sk->sk_prot->stream_memory_free ? in __sk_stream_memory_free()
1308 sk->sk_prot->stream_memory_free(sk, wake) : true; in __sk_stream_memory_free()
1311 static inline bool sk_stream_memory_free(const struct sock *sk) in sk_stream_memory_free() argument
1313 return __sk_stream_memory_free(sk, 0); in sk_stream_memory_free()
1316 static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake) in __sk_stream_is_writeable() argument
1318 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && in __sk_stream_is_writeable()
1319 __sk_stream_memory_free(sk, wake); in __sk_stream_is_writeable()
1322 static inline bool sk_stream_is_writeable(const struct sock *sk) in sk_stream_is_writeable() argument
1324 return __sk_stream_is_writeable(sk, 0); in sk_stream_is_writeable()
1327 static inline int sk_under_cgroup_hierarchy(struct sock *sk, in sk_under_cgroup_hierarchy() argument
1331 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), in sk_under_cgroup_hierarchy()
1338 static inline bool sk_has_memory_pressure(const struct sock *sk) in sk_has_memory_pressure() argument
1340 return sk->sk_prot->memory_pressure != NULL; in sk_has_memory_pressure()
1343 static inline bool sk_under_memory_pressure(const struct sock *sk) in sk_under_memory_pressure() argument
1345 if (!sk->sk_prot->memory_pressure) in sk_under_memory_pressure()
1348 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in sk_under_memory_pressure()
1349 mem_cgroup_under_socket_pressure(sk->sk_memcg)) in sk_under_memory_pressure()
1352 return !!*sk->sk_prot->memory_pressure; in sk_under_memory_pressure()
1356 sk_memory_allocated(const struct sock *sk) in sk_memory_allocated() argument
1358 return atomic_long_read(sk->sk_prot->memory_allocated); in sk_memory_allocated()
1362 sk_memory_allocated_add(struct sock *sk, int amt) in sk_memory_allocated_add() argument
1364 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated); in sk_memory_allocated_add()
1368 sk_memory_allocated_sub(struct sock *sk, int amt) in sk_memory_allocated_sub() argument
1370 atomic_long_sub(amt, sk->sk_prot->memory_allocated); in sk_memory_allocated_sub()
1373 static inline void sk_sockets_allocated_dec(struct sock *sk) in sk_sockets_allocated_dec() argument
1375 percpu_counter_dec(sk->sk_prot->sockets_allocated); in sk_sockets_allocated_dec()
1378 static inline void sk_sockets_allocated_inc(struct sock *sk) in sk_sockets_allocated_inc() argument
1380 percpu_counter_inc(sk->sk_prot->sockets_allocated); in sk_sockets_allocated_inc()
1384 sk_sockets_allocated_read_positive(struct sock *sk) in sk_sockets_allocated_read_positive() argument
1386 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); in sk_sockets_allocated_read_positive()
1426 static inline int __sk_prot_rehash(struct sock *sk) in __sk_prot_rehash() argument
1428 sk->sk_prot->unhash(sk); in __sk_prot_rehash()
1429 return sk->sk_prot->hash(sk); in __sk_prot_rehash()
1465 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1466 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1467 void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1468 void __sk_mem_reclaim(struct sock *sk, int amount);
1479 static inline long sk_prot_mem_limits(const struct sock *sk, int index) in sk_prot_mem_limits() argument
1481 long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]); in sk_prot_mem_limits()
1496 static inline bool sk_has_account(struct sock *sk) in sk_has_account() argument
1499 return !!sk->sk_prot->memory_allocated; in sk_has_account()
1502 static inline bool sk_wmem_schedule(struct sock *sk, int size) in sk_wmem_schedule() argument
1506 if (!sk_has_account(sk)) in sk_wmem_schedule()
1508 delta = size - sk->sk_forward_alloc; in sk_wmem_schedule()
1509 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND); in sk_wmem_schedule()
1513 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) in sk_rmem_schedule() argument
1517 if (!sk_has_account(sk)) in sk_rmem_schedule()
1519 delta = size - sk->sk_forward_alloc; in sk_rmem_schedule()
1520 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || in sk_rmem_schedule()
1524 static inline void sk_mem_reclaim(struct sock *sk) in sk_mem_reclaim() argument
1526 if (!sk_has_account(sk)) in sk_mem_reclaim()
1528 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) in sk_mem_reclaim()
1529 __sk_mem_reclaim(sk, sk->sk_forward_alloc); in sk_mem_reclaim()
1532 static inline void sk_mem_reclaim_partial(struct sock *sk) in sk_mem_reclaim_partial() argument
1534 if (!sk_has_account(sk)) in sk_mem_reclaim_partial()
1536 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) in sk_mem_reclaim_partial()
1537 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); in sk_mem_reclaim_partial()
1540 static inline void sk_mem_charge(struct sock *sk, int size) in sk_mem_charge() argument
1542 if (!sk_has_account(sk)) in sk_mem_charge()
1544 sk->sk_forward_alloc -= size; in sk_mem_charge()
1547 static inline void sk_mem_uncharge(struct sock *sk, int size) in sk_mem_uncharge() argument
1549 if (!sk_has_account(sk)) in sk_mem_uncharge()
1551 sk->sk_forward_alloc += size; in sk_mem_uncharge()
1560 if (unlikely(sk->sk_forward_alloc >= 1 << 21)) in sk_mem_uncharge()
1561 __sk_mem_reclaim(sk, 1 << 20); in sk_mem_uncharge()
1565 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) in sk_wmem_free_skb() argument
1567 sk_wmem_queued_add(sk, -skb->truesize); in sk_wmem_free_skb()
1568 sk_mem_uncharge(sk, skb->truesize); in sk_wmem_free_skb()
1570 !sk->sk_tx_skb_cache && !skb_cloned(skb)) { in sk_wmem_free_skb()
1573 sk->sk_tx_skb_cache = skb; in sk_wmem_free_skb()
1579 static inline void sock_release_ownership(struct sock *sk) in sock_release_ownership() argument
1581 if (sk->sk_lock.owned) { in sock_release_ownership()
1582 sk->sk_lock.owned = 0; in sock_release_ownership()
1585 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in sock_release_ownership()
1596 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ argument
1598 sk->sk_lock.owned = 0; \
1599 init_waitqueue_head(&sk->sk_lock.wq); \
1600 spin_lock_init(&(sk)->sk_lock.slock); \
1601 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1602 sizeof((sk)->sk_lock)); \
1603 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1605 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1609 static inline bool lockdep_sock_is_held(const struct sock *sk) in lockdep_sock_is_held() argument
1611 return lockdep_is_held(&sk->sk_lock) || in lockdep_sock_is_held()
1612 lockdep_is_held(&sk->sk_lock.slock); in lockdep_sock_is_held()
1616 void lock_sock_nested(struct sock *sk, int subclass);
1618 static inline void lock_sock(struct sock *sk) in lock_sock() argument
1620 lock_sock_nested(sk, 0); in lock_sock()
1623 void __release_sock(struct sock *sk);
1624 void release_sock(struct sock *sk);
1633 bool lock_sock_fast(struct sock *sk);
1642 static inline void unlock_sock_fast(struct sock *sk, bool slow) in unlock_sock_fast() argument
1645 release_sock(sk); in unlock_sock_fast()
1647 spin_unlock_bh(&sk->sk_lock.slock); in unlock_sock_fast()
1664 static inline void sock_owned_by_me(const struct sock *sk) in sock_owned_by_me() argument
1667 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); in sock_owned_by_me()
1671 static inline bool sock_owned_by_user(const struct sock *sk) in sock_owned_by_user() argument
1673 sock_owned_by_me(sk); in sock_owned_by_user()
1674 return sk->sk_lock.owned; in sock_owned_by_user()
1677 static inline bool sock_owned_by_user_nocheck(const struct sock *sk) in sock_owned_by_user_nocheck() argument
1679 return sk->sk_lock.owned; in sock_owned_by_user_nocheck()
1685 struct sock *sk = (struct sock *)csk; in sock_allow_reclassification() local
1687 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock); in sock_allow_reclassification()
1692 void sk_free(struct sock *sk);
1693 void sk_destruct(struct sock *sk);
1694 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1695 void sk_free_unlock_clone(struct sock *sk);
1697 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1701 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1720 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1722 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1725 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1726 void sock_kfree_s(struct sock *sk, void *mem, int size);
1727 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1728 void sk_send_sigurg(struct sock *sk);
1737 const struct sock *sk) in sockcm_init() argument
1739 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags }; in sockcm_init()
1742 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1744 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1760 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1766 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1780 void sk_common_release(struct sock *sk);
1787 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
1792 void sock_init_data(struct socket *sock, struct sock *sk);
1820 static inline void sock_put(struct sock *sk) in sock_put() argument
1822 if (refcount_dec_and_test(&sk->sk_refcnt)) in sock_put()
1823 sk_free(sk); in sock_put()
1828 void sock_gen_put(struct sock *sk);
1830 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1832 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, in sk_receive_skb() argument
1835 return __sk_receive_skb(sk, skb, nested, 1, true); in sk_receive_skb()
1838 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) in sk_tx_queue_set() argument
1843 sk->sk_tx_queue_mapping = tx_queue; in sk_tx_queue_set()
1848 static inline void sk_tx_queue_clear(struct sock *sk) in sk_tx_queue_clear() argument
1850 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING; in sk_tx_queue_clear()
1853 static inline int sk_tx_queue_get(const struct sock *sk) in sk_tx_queue_get() argument
1855 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING) in sk_tx_queue_get()
1856 return sk->sk_tx_queue_mapping; in sk_tx_queue_get()
1861 static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) in sk_rx_queue_set() argument
1870 sk->sk_rx_queue_mapping = rx_queue; in sk_rx_queue_set()
1875 static inline void sk_rx_queue_clear(struct sock *sk) in sk_rx_queue_clear() argument
1878 sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING; in sk_rx_queue_clear()
1883 static inline int sk_rx_queue_get(const struct sock *sk) in sk_rx_queue_get() argument
1885 if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING) in sk_rx_queue_get()
1886 return sk->sk_rx_queue_mapping; in sk_rx_queue_get()
1892 static inline void sk_set_socket(struct sock *sk, struct socket *sock) in sk_set_socket() argument
1894 sk->sk_socket = sock; in sk_set_socket()
1897 static inline wait_queue_head_t *sk_sleep(struct sock *sk) in sk_sleep() argument
1900 return &rcu_dereference_raw(sk->sk_wq)->wait; in sk_sleep()
1909 static inline void sock_orphan(struct sock *sk) in sock_orphan() argument
1911 write_lock_bh(&sk->sk_callback_lock); in sock_orphan()
1912 sock_set_flag(sk, SOCK_DEAD); in sock_orphan()
1913 sk_set_socket(sk, NULL); in sock_orphan()
1914 sk->sk_wq = NULL; in sock_orphan()
1915 write_unlock_bh(&sk->sk_callback_lock); in sock_orphan()
1918 static inline void sock_graft(struct sock *sk, struct socket *parent) in sock_graft() argument
1920 WARN_ON(parent->sk); in sock_graft()
1921 write_lock_bh(&sk->sk_callback_lock); in sock_graft()
1922 rcu_assign_pointer(sk->sk_wq, &parent->wq); in sock_graft()
1923 parent->sk = sk; in sock_graft()
1924 sk_set_socket(sk, parent); in sock_graft()
1925 sk->sk_uid = SOCK_INODE(parent)->i_uid; in sock_graft()
1926 security_sock_graft(sk, parent); in sock_graft()
1927 write_unlock_bh(&sk->sk_callback_lock); in sock_graft()
1930 kuid_t sock_i_uid(struct sock *sk);
1931 unsigned long sock_i_ino(struct sock *sk);
1933 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) in sock_net_uid() argument
1935 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); in sock_net_uid()
1945 static inline void sk_set_txhash(struct sock *sk) in sk_set_txhash() argument
1948 WRITE_ONCE(sk->sk_txhash, net_tx_rndhash()); in sk_set_txhash()
1951 static inline bool sk_rethink_txhash(struct sock *sk) in sk_rethink_txhash() argument
1953 if (sk->sk_txhash) { in sk_rethink_txhash()
1954 sk_set_txhash(sk); in sk_rethink_txhash()
1961 __sk_dst_get(struct sock *sk) in __sk_dst_get() argument
1963 return rcu_dereference_check(sk->sk_dst_cache, in __sk_dst_get()
1964 lockdep_sock_is_held(sk)); in __sk_dst_get()
1968 sk_dst_get(struct sock *sk) in sk_dst_get() argument
1973 dst = rcu_dereference(sk->sk_dst_cache); in sk_dst_get()
1980 static inline void __dst_negative_advice(struct sock *sk) in __dst_negative_advice() argument
1982 struct dst_entry *ndst, *dst = __sk_dst_get(sk); in __dst_negative_advice()
1988 rcu_assign_pointer(sk->sk_dst_cache, ndst); in __dst_negative_advice()
1989 sk_tx_queue_clear(sk); in __dst_negative_advice()
1990 sk->sk_dst_pending_confirm = 0; in __dst_negative_advice()
1995 static inline void dst_negative_advice(struct sock *sk) in dst_negative_advice() argument
1997 sk_rethink_txhash(sk); in dst_negative_advice()
1998 __dst_negative_advice(sk); in dst_negative_advice()
2002 __sk_dst_set(struct sock *sk, struct dst_entry *dst) in __sk_dst_set() argument
2006 sk_tx_queue_clear(sk); in __sk_dst_set()
2007 sk->sk_dst_pending_confirm = 0; in __sk_dst_set()
2008 old_dst = rcu_dereference_protected(sk->sk_dst_cache, in __sk_dst_set()
2009 lockdep_sock_is_held(sk)); in __sk_dst_set()
2010 rcu_assign_pointer(sk->sk_dst_cache, dst); in __sk_dst_set()
2015 sk_dst_set(struct sock *sk, struct dst_entry *dst) in sk_dst_set() argument
2019 sk_tx_queue_clear(sk); in sk_dst_set()
2020 sk->sk_dst_pending_confirm = 0; in sk_dst_set()
2021 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); in sk_dst_set()
2026 __sk_dst_reset(struct sock *sk) in __sk_dst_reset() argument
2028 __sk_dst_set(sk, NULL); in __sk_dst_reset()
2032 sk_dst_reset(struct sock *sk) in sk_dst_reset() argument
2034 sk_dst_set(sk, NULL); in sk_dst_reset()
2037 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
2039 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
2041 static inline void sk_dst_confirm(struct sock *sk) in sk_dst_confirm() argument
2043 if (!READ_ONCE(sk->sk_dst_pending_confirm)) in sk_dst_confirm()
2044 WRITE_ONCE(sk->sk_dst_pending_confirm, 1); in sk_dst_confirm()
2050 struct sock *sk = skb->sk; in sock_confirm_neigh() local
2056 if (sk && READ_ONCE(sk->sk_dst_pending_confirm)) in sock_confirm_neigh()
2057 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); in sock_confirm_neigh()
2061 bool sk_mc_loop(struct sock *sk);
2063 static inline bool sk_can_gso(const struct sock *sk) in sk_can_gso() argument
2065 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); in sk_can_gso()
2068 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2070 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) in sk_nocaps_add() argument
2072 sk->sk_route_nocaps |= flags; in sk_nocaps_add()
2073 sk->sk_route_caps &= ~flags; in sk_nocaps_add()
2076 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_do_copy_data_nocache() argument
2085 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { in skb_do_copy_data_nocache()
2094 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_add_data_nocache() argument
2099 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), in skb_add_data_nocache()
2107 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, in skb_copy_to_page_nocache() argument
2114 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, in skb_copy_to_page_nocache()
2122 sk_wmem_queued_add(sk, copy); in skb_copy_to_page_nocache()
2123 sk_mem_charge(sk, copy); in skb_copy_to_page_nocache()
2133 static inline int sk_wmem_alloc_get(const struct sock *sk) in sk_wmem_alloc_get() argument
2135 return refcount_read(&sk->sk_wmem_alloc) - 1; in sk_wmem_alloc_get()
2144 static inline int sk_rmem_alloc_get(const struct sock *sk) in sk_rmem_alloc_get() argument
2146 return atomic_read(&sk->sk_rmem_alloc); in sk_rmem_alloc_get()
2155 static inline bool sk_has_allocations(const struct sock *sk) in sk_has_allocations() argument
2157 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); in sk_has_allocations()
2218 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) in skb_set_hash_from_sk() argument
2221 u32 txhash = READ_ONCE(sk->sk_txhash); in skb_set_hash_from_sk()
2229 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2239 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) in skb_set_owner_r() argument
2242 skb->sk = sk; in skb_set_owner_r()
2244 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in skb_set_owner_r()
2245 sk_mem_charge(sk, skb->truesize); in skb_set_owner_r()
2248 static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk) in skb_set_owner_sk_safe() argument
2250 if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { in skb_set_owner_sk_safe()
2253 skb->sk = sk; in skb_set_owner_sk_safe()
2259 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2262 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2264 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
2266 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2268 void (*destructor)(struct sock *sk,
2270 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2271 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2273 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2274 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2280 static inline int sock_error(struct sock *sk) in sock_error() argument
2287 if (likely(data_race(!sk->sk_err))) in sock_error()
2290 err = xchg(&sk->sk_err, 0); in sock_error()
2294 static inline unsigned long sock_wspace(struct sock *sk) in sock_wspace() argument
2298 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in sock_wspace()
2299 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); in sock_wspace()
2310 static inline void sk_set_bit(int nr, struct sock *sk) in sk_set_bit() argument
2313 !sock_flag(sk, SOCK_FASYNC)) in sk_set_bit()
2316 set_bit(nr, &sk->sk_wq_raw->flags); in sk_set_bit()
2319 static inline void sk_clear_bit(int nr, struct sock *sk) in sk_clear_bit() argument
2322 !sock_flag(sk, SOCK_FASYNC)) in sk_clear_bit()
2325 clear_bit(nr, &sk->sk_wq_raw->flags); in sk_clear_bit()
2328 static inline void sk_wake_async(const struct sock *sk, int how, int band) in sk_wake_async() argument
2330 if (sock_flag(sk, SOCK_FASYNC)) { in sk_wake_async()
2332 sock_wake_async(rcu_dereference(sk->sk_wq), how, band); in sk_wake_async()
2347 static inline void sk_stream_moderate_sndbuf(struct sock *sk) in sk_stream_moderate_sndbuf() argument
2351 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) in sk_stream_moderate_sndbuf()
2354 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); in sk_stream_moderate_sndbuf()
2356 WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); in sk_stream_moderate_sndbuf()
2359 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2379 static inline struct page_frag *sk_page_frag(struct sock *sk) in sk_page_frag() argument
2381 if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) == in sk_page_frag()
2385 return &sk->sk_frag; in sk_page_frag()
2388 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2393 static inline bool sock_writeable(const struct sock *sk) in sock_writeable() argument
2395 return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1); in sock_writeable()
2403 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) in sock_rcvtimeo() argument
2405 return noblock ? 0 : sk->sk_rcvtimeo; in sock_rcvtimeo()
2408 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) in sock_sndtimeo() argument
2410 return noblock ? 0 : sk->sk_sndtimeo; in sock_sndtimeo()
2413 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) in sock_rcvlowat() argument
2415 int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len); in sock_rcvlowat()
2446 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) in sock_skb_set_dropcount() argument
2448 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? in sock_skb_set_dropcount()
2449 atomic_read(&sk->sk_drops) : 0; in sock_skb_set_dropcount()
2452 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) in sk_drops_add() argument
2456 atomic_add(segs, &sk->sk_drops); in sk_drops_add()
2459 static inline ktime_t sock_read_timestamp(struct sock *sk) in sock_read_timestamp() argument
2466 seq = read_seqbegin(&sk->sk_stamp_seq); in sock_read_timestamp()
2467 kt = sk->sk_stamp; in sock_read_timestamp()
2468 } while (read_seqretry(&sk->sk_stamp_seq, seq)); in sock_read_timestamp()
2472 return READ_ONCE(sk->sk_stamp); in sock_read_timestamp()
2476 static inline void sock_write_timestamp(struct sock *sk, ktime_t kt) in sock_write_timestamp() argument
2479 write_seqlock(&sk->sk_stamp_seq); in sock_write_timestamp()
2480 sk->sk_stamp = kt; in sock_write_timestamp()
2481 write_sequnlock(&sk->sk_stamp_seq); in sock_write_timestamp()
2483 WRITE_ONCE(sk->sk_stamp, kt); in sock_write_timestamp()
2487 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2489 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2493 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) in sock_recv_timestamp() argument
2504 if (sock_flag(sk, SOCK_RCVTSTAMP) || in sock_recv_timestamp()
2505 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || in sock_recv_timestamp()
2506 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || in sock_recv_timestamp()
2508 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) in sock_recv_timestamp()
2509 __sock_recv_timestamp(msg, sk, skb); in sock_recv_timestamp()
2511 sock_write_timestamp(sk, kt); in sock_recv_timestamp()
2513 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) in sock_recv_timestamp()
2514 __sock_recv_wifi_status(msg, sk, skb); in sock_recv_timestamp()
2517 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2521 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, in sock_recv_ts_and_drops() argument
2529 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY) in sock_recv_ts_and_drops()
2530 __sock_recv_ts_and_drops(msg, sk, skb); in sock_recv_ts_and_drops()
2531 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) in sock_recv_ts_and_drops()
2532 sock_write_timestamp(sk, skb->tstamp); in sock_recv_ts_and_drops()
2533 else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP)) in sock_recv_ts_and_drops()
2534 sock_write_timestamp(sk, 0); in sock_recv_ts_and_drops()
2548 static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags, in _sock_tx_timestamp() argument
2555 *tskey = sk->sk_tskey++; in _sock_tx_timestamp()
2557 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) in _sock_tx_timestamp()
2561 static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags, in sock_tx_timestamp() argument
2564 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL); in sock_tx_timestamp()
2569 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags, in skb_setup_tx_timestamp()
2582 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) in sk_eat_skb() argument
2584 __skb_unlink(skb, &sk->sk_receive_queue); in sk_eat_skb()
2586 !sk->sk_rx_skb_cache) { in sk_eat_skb()
2587 sk->sk_rx_skb_cache = skb; in sk_eat_skb()
2595 struct net *sock_net(const struct sock *sk) in sock_net() argument
2597 return read_pnet(&sk->sk_net); in sock_net()
2601 void sock_net_set(struct sock *sk, struct net *net) in sock_net_set() argument
2603 write_pnet(&sk->sk_net, net); in sock_net_set()
2619 static inline bool sk_fullsock(const struct sock *sk) in sk_fullsock() argument
2621 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); in sk_fullsock()
2625 sk_is_refcounted(struct sock *sk) in sk_is_refcounted() argument
2628 return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE); in sk_is_refcounted()
2639 if (skb->sk) { in skb_steal_sock()
2640 struct sock *sk = skb->sk; in skb_steal_sock() local
2644 *refcounted = sk_is_refcounted(sk); in skb_steal_sock()
2646 skb->sk = NULL; in skb_steal_sock()
2647 return sk; in skb_steal_sock()
2661 struct sock *sk = skb->sk; in sk_validate_xmit_skb() local
2663 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { in sk_validate_xmit_skb()
2664 skb = sk->sk_validate_xmit_skb(sk, dev, skb); in sk_validate_xmit_skb()
2680 static inline bool sk_listener(const struct sock *sk) in sk_listener() argument
2682 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); in sk_listener()
2685 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
2686 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2689 bool sk_ns_capable(const struct sock *sk,
2691 bool sk_capable(const struct sock *sk, int cap);
2692 bool sk_net_capable(const struct sock *sk, int cap);
2694 void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2718 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) in sk_get_wmem0() argument
2722 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset)); in sk_get_wmem0()
2727 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) in sk_get_rmem0() argument
2731 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset)); in sk_get_rmem0()
2740 static inline void sk_pacing_shift_update(struct sock *sk, int val) in sk_pacing_shift_update() argument
2742 if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val) in sk_pacing_shift_update()
2744 WRITE_ONCE(sk->sk_pacing_shift, val); in sk_pacing_shift_update()
2752 static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) in sk_dev_equal_l3scope() argument
2756 if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif) in sk_dev_equal_l3scope()
2759 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif); in sk_dev_equal_l3scope()
2760 if (mdif && mdif == sk->sk_bound_dev_if) in sk_dev_equal_l3scope()
2766 void sock_def_readable(struct sock *sk);
2768 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
2769 void sock_enable_timestamps(struct sock *sk);
2770 void sock_no_linger(struct sock *sk);
2771 void sock_set_keepalive(struct sock *sk);
2772 void sock_set_priority(struct sock *sk, u32 priority);
2773 void sock_set_rcvbuf(struct sock *sk, int val);
2774 void sock_set_mark(struct sock *sk, u32 val);
2775 void sock_set_reuseaddr(struct sock *sk);
2776 void sock_set_reuseport(struct sock *sk);
2777 void sock_set_sndtimeo(struct sock *sk, s64 secs);
2779 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);