Home
last modified time | relevance | path

Searched refs:base (Results 1 – 25 of 71) sorted by relevance

123

/net/ipv4/
Dinetpeer.c196 struct inet_peer_base *base) in lookup_rcu() argument
198 struct inet_peer *u = rcu_dereference(base->root); in lookup_rcu()
222 #define lookup_rightempty(start, base) \ argument
228 for (u = rcu_deref_locked(*v, base); \
232 u = rcu_deref_locked(*v, base); \
243 struct inet_peer_base *base) in peer_avl_rebalance() argument
251 node = rcu_deref_locked(*nodep, base); in peer_avl_rebalance()
252 l = rcu_deref_locked(node->avl_left, base); in peer_avl_rebalance()
253 r = rcu_deref_locked(node->avl_right, base); in peer_avl_rebalance()
259 ll = rcu_deref_locked(l->avl_left, base); in peer_avl_rebalance()
[all …]
Desp4.c95 static void esp_output_done(struct crypto_async_request *base, int err) in esp_output_done() argument
97 struct sk_buff *skb = base->data; in esp_output_done()
123 static void esp_output_done_esn(struct crypto_async_request *base, int err) in esp_output_done_esn() argument
125 struct sk_buff *skb = base->data; in esp_output_done_esn()
128 esp_output_done(base, err); in esp_output_done_esn()
392 static void esp_input_done(struct crypto_async_request *base, int err) in esp_input_done() argument
394 struct sk_buff *skb = base->data; in esp_input_done()
405 static void esp_input_done_esn(struct crypto_async_request *base, int err) in esp_input_done_esn() argument
407 struct sk_buff *skb = base->data; in esp_input_done_esn()
410 esp_input_done(base, err); in esp_input_done_esn()
/net/netfilter/
Dnf_conntrack_h323_asn1.c114 static int decode_nul(bitstr_t *bs, const struct field_t *f, char *base, int level);
115 static int decode_bool(bitstr_t *bs, const struct field_t *f, char *base, int level);
116 static int decode_oid(bitstr_t *bs, const struct field_t *f, char *base, int level);
117 static int decode_int(bitstr_t *bs, const struct field_t *f, char *base, int level);
118 static int decode_enum(bitstr_t *bs, const struct field_t *f, char *base, int level);
119 static int decode_bitstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
120 static int decode_numstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
121 static int decode_octstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
122 static int decode_bmpstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
123 static int decode_seq(bitstr_t *bs, const struct field_t *f, char *base, int level);
[all …]
Dnft_payload.c78 switch (priv->base) { in nft_payload_eval()
127 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); in nft_payload_init()
141 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || in nft_payload_dump()
179 switch (priv->base) { in nft_payload_set_eval()
201 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER || in nft_payload_set_eval()
233 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); in nft_payload_set_init()
261 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || in nft_payload_set_dump()
286 enum nft_payload_bases base; in nft_payload_select_ops() local
294 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); in nft_payload_select_ops()
295 switch (base) { in nft_payload_select_ops()
[all …]
/net/sctp/
Dendpointola.c113 ep->base.type = SCTP_EP_TYPE_SOCKET; in sctp_endpoint_init()
116 atomic_set(&ep->base.refcnt, 1); in sctp_endpoint_init()
117 ep->base.dead = false; in sctp_endpoint_init()
120 sctp_inq_init(&ep->base.inqueue); in sctp_endpoint_init()
123 sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); in sctp_endpoint_init()
126 sctp_bind_addr_init(&ep->base.bind_addr, 0); in sctp_endpoint_init()
129 ep->base.sk = sk; in sctp_endpoint_init()
130 sock_hold(ep->base.sk); in sctp_endpoint_init()
209 struct sock *sk = ep->base.sk; in sctp_endpoint_add_asoc()
231 ep->base.dead = true; in sctp_endpoint_free()
[all …]
Dassociola.c83 asoc->base.sk = (struct sock *)sk; in sctp_association_init()
86 sock_hold(asoc->base.sk); in sctp_association_init()
89 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; in sctp_association_init()
92 atomic_set(&asoc->base.refcnt, 1); in sctp_association_init()
95 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); in sctp_association_init()
188 asoc->c.my_port = ep->base.bind_addr.port; in sctp_association_init()
240 sctp_inq_init(&asoc->base.inqueue); in sctp_association_init()
241 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); in sctp_association_init()
253 if (asoc->base.sk->sk_family == PF_INET6) in sctp_association_init()
290 sock_put(asoc->base.sk); in sctp_association_init()
[all …]
Dinput.c176 rcvr = asoc ? &asoc->base : &ep->base; in sctp_rcv()
195 rcvr = &ep->base; in sctp_rcv()
503 sk = asoc->base.sk; in sctp_err_lookup()
719 struct net *net = sock_net(ep->base.sk); in __sctp_hash_endpoint()
723 epb = &ep->base; in __sctp_hash_endpoint()
744 struct net *net = sock_net(ep->base.sk); in __sctp_unhash_endpoint()
748 epb = &ep->base; in __sctp_unhash_endpoint()
815 if (!net_eq(sock_net(asoc->base.sk), x->net)) in sctp_hash_cmp()
821 if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port)) in sctp_hash_cmp()
823 if (!sctp_bind_addr_match(&asoc->base.bind_addr, in sctp_hash_cmp()
[all …]
Dsctp_diag.c18 laddr = list_entry(asoc->base.bind_addr.address_list.next, in inet_diag_msg_sctpasoc_fill()
24 r->id.idiag_sport = htons(asoc->base.bind_addr.port); in inet_diag_msg_sctpasoc_fill()
184 addr_list = asoc ? &asoc->base.bind_addr.address_list in inet_sctp_diag_fill()
185 : &ep->base.bind_addr.address_list; in inet_sctp_diag_fill()
219 list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list, in inet_assoc_attr_size()
238 struct sock *sk = tsp->asoc->base.sk; in sctp_tsp_dump_one()
257 if (sk != assoc->base.sk) { in sctp_tsp_dump_one()
259 sk = assoc->base.sk; in sctp_tsp_dump_one()
297 if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) && in sctp_sock_dump()
342 struct sock *sk = ep->base.sk; in sctp_get_sock()
[all …]
Dulpqueue.c174 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); in sctp_ulpq_set_pd()
185 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); in sctp_ulpq_clear_pd()
193 struct sock *sk = ulpq->asoc->base.sk; in sctp_ulpq_tail_event()
494 if (!sctp_sk(asoc->base.sk)->frag_interleave && in sctp_ulpq_retrieve_reassembled()
495 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) in sctp_ulpq_retrieve_reassembled()
499 pd_point = sctp_sk(asoc->base.sk)->pd_point; in sctp_ulpq_retrieve_reassembled()
501 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), in sctp_ulpq_retrieve_reassembled()
512 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_ulpq_retrieve_reassembled()
578 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_ulpq_retrieve_partial()
679 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_ulpq_retrieve_first()
[all …]
Dsm_sideeffect.c249 struct sock *sk = asoc->base.sk; in sctp_generate_t3_rtx_event()
285 struct sock *sk = asoc->base.sk; in sctp_generate_timeout_event()
303 if (asoc->base.dead) in sctp_generate_timeout_event()
366 struct sock *sk = asoc->base.sk; in sctp_generate_heartbeat_event()
410 struct sock *sk = asoc->base.sk; in sctp_generate_proto_unreach_event()
427 if (asoc->base.dead) in sctp_generate_proto_unreach_event()
481 struct net *net = sock_net(asoc->base.sk); in sctp_do_8_2_transport_strike()
757 struct net *net = sock_net(asoc->base.sk); in sctp_cmd_process_sack()
794 struct sock *sk = asoc->base.sk; in sctp_cmd_new_state()
849 struct sock *sk = asoc->base.sk; in sctp_cmd_delete_tcb()
[all …]
Dulpevent.c104 sctp_skb_set_owner_r(skb, asoc->base.sk); in sctp_ulpevent_set_owner()
106 chunk->head_skb->sk = asoc->base.sk; in sctp_ulpevent_set_owner()
347 sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_to_user( in sctp_ulpevent_make_peer_addr_change()
348 sctp_sk(asoc->base.sk), in sctp_ulpevent_make_peer_addr_change()
655 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); in sctp_ulpevent_make_rcvmsg()
657 if (rx_count >= asoc->base.sk->sk_rcvbuf) { in sctp_ulpevent_make_rcvmsg()
659 if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || in sctp_ulpevent_make_rcvmsg()
660 (!sk_rmem_schedule(asoc->base.sk, chunk->skb, in sctp_ulpevent_make_rcvmsg()
Dsm_make_chunk.c104 skb->sk = asoc ? asoc->base.sk : NULL; in sctp_control_set_owner_w()
218 struct net *net = sock_net(asoc->base.sk); in sctp_make_init()
252 sp = sctp_sk(asoc->base.sk); in sctp_make_init()
402 addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); in sctp_make_init_ack()
421 sp = sctp_sk(asoc->base.sk); in sctp_make_init_ack()
1393 sk = asoc ? asoc->base.sk : NULL; in _sctp_make_chunk()
1591 asoc = sctp_association_new(ep, ep->base.sk, scope, gfp); in sctp_make_temp_asoc()
1669 if (sctp_sk(ep->base.sk)->hmac) { in sctp_pack_cookie()
1670 SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac); in sctp_pack_cookie()
1674 desc->tfm = sctp_sk(ep->base.sk)->hmac; in sctp_pack_cookie()
[all …]
/net/sunrpc/
Dsocklib.c72 ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *de… in xdr_partial_copy_from_skb() argument
80 if (base < len) { in xdr_partial_copy_from_skb()
81 len -= base; in xdr_partial_copy_from_skb()
82 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); in xdr_partial_copy_from_skb()
86 base = 0; in xdr_partial_copy_from_skb()
88 base -= len; in xdr_partial_copy_from_skb()
92 if (unlikely(base >= pglen)) { in xdr_partial_copy_from_skb()
93 base -= pglen; in xdr_partial_copy_from_skb()
96 if (base || xdr->page_base) { in xdr_partial_copy_from_skb()
97 pglen -= base; in xdr_partial_copy_from_skb()
[all …]
Dxdr.c133 struct page **pages, unsigned int base, unsigned int len) in xdr_inline_pages() argument
143 xdr->page_base = base; in xdr_inline_pages()
691 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, in xdr_write_pages() argument
697 buf->page_base = base; in xdr_write_pages()
730 unsigned int base, unsigned int len) in xdr_set_page_base() argument
739 if (base >= maxlen) in xdr_set_page_base()
741 maxlen -= base; in xdr_set_page_base()
745 base += xdr->buf->page_base; in xdr_set_page_base()
747 pgnr = base >> PAGE_SHIFT; in xdr_set_page_base()
751 pgoff = base & ~PAGE_MASK; in xdr_set_page_base()
[all …]
Dauth_unix.c154 __be32 *base, *hold; in unx_marshal() local
158 base = p++; in unx_marshal()
172 *base = htonl((p - base - 1) << 2); /* cred length */ in unx_marshal()
Dxprtsock.c325 …ct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more) in xs_send_kvec() argument
333 .iov_base = vec->iov_base + base, in xs_send_kvec()
334 .iov_len = vec->iov_len - base, in xs_send_kvec()
342 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, … in xs_send_pagedata() argument
350 remainder = xdr->page_len - base; in xs_send_pagedata()
351 base += xdr->page_base; in xs_send_pagedata()
352 ppage = xdr->pages + (base >> PAGE_SHIFT); in xs_send_pagedata()
353 base &= ~PAGE_MASK; in xs_send_pagedata()
358 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); in xs_send_pagedata()
366 err = do_sendpage(sock, *ppage, base, len, flags); in xs_send_pagedata()
[all …]
Dsvcsock.c180 size_t base = xdr->page_base; in svc_send_common() local
200 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; in svc_send_common()
204 result = kernel_sendpage(sock, *ppage, base, size, flags); in svc_send_common()
212 base = 0; in svc_send_common()
343 int buflen, unsigned int base) in svc_partial_recvfrom() argument
350 if (base == 0) in svc_partial_recvfrom()
354 if (iov[i].iov_len > base) in svc_partial_recvfrom()
356 base -= iov[i].iov_len; in svc_partial_recvfrom()
360 iov[i].iov_len -= base; in svc_partial_recvfrom()
361 iov[i].iov_base += base; in svc_partial_recvfrom()
[all …]
/net/core/
Dethtool.c457 link_ksettings->base.speed in convert_legacy_settings_to_link_ksettings()
459 link_ksettings->base.duplex in convert_legacy_settings_to_link_ksettings()
461 link_ksettings->base.port in convert_legacy_settings_to_link_ksettings()
463 link_ksettings->base.phy_address in convert_legacy_settings_to_link_ksettings()
465 link_ksettings->base.autoneg in convert_legacy_settings_to_link_ksettings()
467 link_ksettings->base.mdio_support in convert_legacy_settings_to_link_ksettings()
469 link_ksettings->base.eth_tp_mdix in convert_legacy_settings_to_link_ksettings()
471 link_ksettings->base.eth_tp_mdix_ctrl in convert_legacy_settings_to_link_ksettings()
502 ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed); in convert_link_ksettings_to_legacy_settings()
504 = link_ksettings->base.duplex; in convert_link_ksettings_to_legacy_settings()
[all …]
/net/ipv6/
Desp6.c117 static void esp_output_done(struct crypto_async_request *base, int err) in esp_output_done() argument
119 struct sk_buff *skb = base->data; in esp_output_done()
141 static void esp_output_done_esn(struct crypto_async_request *base, int err) in esp_output_done_esn() argument
143 struct sk_buff *skb = base->data; in esp_output_done_esn()
146 esp_output_done(base, err); in esp_output_done_esn()
335 static void esp_input_done(struct crypto_async_request *base, int err) in esp_input_done() argument
337 struct sk_buff *skb = base->data; in esp_input_done()
348 static void esp_input_done_esn(struct crypto_async_request *base, int err) in esp_input_done_esn() argument
350 struct sk_buff *skb = base->data; in esp_input_done_esn()
353 esp_input_done(base, err); in esp_input_done_esn()
Dip6_output.c1268 cork->base.dst = &rt->dst; in ip6_setup_cork()
1284 cork->base.fragsize = mtu; in ip6_setup_cork()
1286 cork->base.flags |= IPCORK_ALLFRAG; in ip6_setup_cork()
1287 cork->base.length = 0; in ip6_setup_cork()
1643 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base, in ip6_append_data()
1661 if (cork->base.dst) { in ip6_cork_release()
1662 dst_release(cork->base.dst); in ip6_cork_release()
1663 cork->base.dst = NULL; in ip6_cork_release()
1664 cork->base.flags &= ~IPCORK_ALLFRAG; in ip6_cork_release()
1681 struct rt6_info *rt = (struct rt6_info *)cork->base.dst; in __ip6_make_skb()
[all …]
/net/sched/
Dcls_matchall.c116 unsigned long base, struct nlattr **tb, in mall_set_parms() argument
129 tcf_bind_filter(tp, &head->res, base); in mall_set_parms()
138 struct tcf_proto *tp, unsigned long base, in mall_change() argument
177 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr); in mall_change()
Dcls_basic.c129 struct basic_filter *f, unsigned long base, in basic_set_parms() argument
150 tcf_bind_filter(tp, &f->res, base); in basic_set_parms()
164 struct tcf_proto *tp, unsigned long base, u32 handle, in basic_change() argument
214 err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr); in basic_change()
Dcls_fw.c191 struct nlattr **tb, struct nlattr **tca, unsigned long base, in fw_change_attrs() argument
208 tcf_bind_filter(tp, &f->res, base); in fw_change_attrs()
240 struct tcf_proto *tp, unsigned long base, in fw_change() argument
281 err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr); in fw_change()
329 err = fw_change_attrs(net, tp, f, tb, tca, base, ovr); in fw_change()
/net/bridge/netfilter/
Debtables.c193 const char *base; in ebt_do_table() local
217 base = private->entries; in ebt_do_table()
290 chaininfo = (struct ebt_entries *) (base + verdict); in ebt_do_table()
769 unsigned int udc_cnt, unsigned int hooknr, char *base) in check_chainloops() argument
804 (struct ebt_entries *)(base + verdict); in check_chainloops()
1354 const char *base, char __user *ubase) in ebt_make_matchname() argument
1356 char __user *hlp = ubase + ((char *)m - base); in ebt_make_matchname()
1369 const char *base, char __user *ubase) in ebt_make_watchername() argument
1371 char __user *hlp = ubase + ((char *)w - base); in ebt_make_watchername()
1380 static inline int ebt_make_names(struct ebt_entry *e, const char *base, in ebt_make_names() argument
[all …]
/net/ipv4/netfilter/
Darp_tables.c170 get_entry(const void *base, unsigned int offset) in get_entry() argument
172 return (struct arpt_entry *)(base + offset); in get_entry()
470 const unsigned char *base, in check_entry_size_and_hooks() argument
500 if ((unsigned char *)e - base == hook_entries[h]) in check_entry_size_and_hooks()
502 if ((unsigned char *)e - base == underflows[h]) { in check_entry_size_and_hooks()
745 const void *base, struct xt_table_info *newinfo) in compat_calc_entry() argument
752 entry_offset = (void *)e - base; in compat_calc_entry()
763 (e < (struct arpt_entry *)(base + info->hook_entry[i]))) in compat_calc_entry()
766 (e < (struct arpt_entry *)(base + info->underflow[i]))) in compat_calc_entry()
1083 const unsigned char *base, in check_compat_entry_size_and_hooks() argument
[all …]

123