Home
last modified time | relevance | path

Searched refs:base (Results 1 – 25 of 85) sorted by relevance

1234

/net/sctp/
Dendpointola.c65 ep->base.type = SCTP_EP_TYPE_SOCKET; in sctp_endpoint_init()
68 refcount_set(&ep->base.refcnt, 1); in sctp_endpoint_init()
69 ep->base.dead = false; in sctp_endpoint_init()
72 sctp_inq_init(&ep->base.inqueue); in sctp_endpoint_init()
75 sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); in sctp_endpoint_init()
78 sctp_bind_addr_init(&ep->base.bind_addr, 0); in sctp_endpoint_init()
112 ep->base.sk = sk; in sctp_endpoint_init()
113 ep->base.net = sock_net(sk); in sctp_endpoint_init()
114 sock_hold(ep->base.sk); in sctp_endpoint_init()
154 struct sock *sk = ep->base.sk; in sctp_endpoint_add_asoc()
[all …]
Dassociola.c66 asoc->base.sk = (struct sock *)sk; in sctp_association_init()
67 asoc->base.net = sock_net(sk); in sctp_association_init()
70 sock_hold(asoc->base.sk); in sctp_association_init()
73 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; in sctp_association_init()
76 refcount_set(&asoc->base.refcnt, 1); in sctp_association_init()
79 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); in sctp_association_init()
173 asoc->c.my_port = ep->base.bind_addr.port; in sctp_association_init()
218 sctp_inq_init(&asoc->base.inqueue); in sctp_association_init()
219 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); in sctp_association_init()
238 if (asoc->base.sk->sk_family == PF_INET6) in sctp_association_init()
[all …]
Dinput.c166 rcvr = asoc ? &asoc->base : &ep->base; in sctp_rcv()
186 rcvr = &ep->base; in sctp_rcv()
505 sk = asoc->base.sk; in sctp_err_lookup()
722 struct sock *sk = ep->base.sk; in __sctp_hash_endpoint()
727 epb = &ep->base; in __sctp_hash_endpoint()
737 list_for_each(list, &ep->base.bind_addr.address_list) in __sctp_hash_endpoint()
788 struct sock *sk = ep->base.sk; in __sctp_unhash_endpoint()
792 epb = &ep->base; in __sctp_unhash_endpoint()
854 sk = ep->base.sk; in __sctp_rcv_lookup_endpoint()
887 if (!net_eq(t->asoc->base.net, x->net)) in sctp_hash_cmp()
[all …]
Ddiag.c34 laddr = list_entry(asoc->base.bind_addr.address_list.next, in inet_diag_msg_sctpasoc_fill()
40 r->id.idiag_sport = htons(asoc->base.bind_addr.port); in inet_diag_msg_sctpasoc_fill()
197 addr_list = asoc ? &asoc->base.bind_addr.address_list in inet_sctp_diag_fill()
198 : &ep->base.bind_addr.address_list; in inet_sctp_diag_fill()
232 list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list, in inet_assoc_attr_size()
248 struct sock *sk = tsp->asoc->base.sk; in sctp_tsp_dump_one()
267 if (sk != assoc->base.sk) { in sctp_tsp_dump_one()
269 sk = assoc->base.sk; in sctp_tsp_dump_one()
295 struct sock *sk = ep->base.sk; in sctp_sock_dump()
309 if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) && in sctp_sock_dump()
[all …]
Dulpqueue.c168 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); in sctp_ulpq_set_pd()
179 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); in sctp_ulpq_clear_pd()
184 struct sock *sk = ulpq->asoc->base.sk; in sctp_ulpq_tail_event()
482 if (!sctp_sk(asoc->base.sk)->frag_interleave && in sctp_ulpq_retrieve_reassembled()
483 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) in sctp_ulpq_retrieve_reassembled()
487 pd_point = sctp_sk(asoc->base.sk)->pd_point; in sctp_ulpq_retrieve_reassembled()
489 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), in sctp_ulpq_retrieve_reassembled()
500 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_ulpq_retrieve_reassembled()
566 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_ulpq_retrieve_partial()
667 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_ulpq_retrieve_first()
[all …]
Dstream_interleave.c244 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_intl_retrieve_partial()
327 pd_point = sctp_sk(asoc->base.sk)->pd_point; in sctp_intl_retrieve_reassembled()
329 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), in sctp_intl_retrieve_reassembled()
340 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), in sctp_intl_retrieve_reassembled()
474 struct sock *sk = ulpq->asoc->base.sk; in sctp_enqueue_event()
633 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_intl_retrieve_partial_uo()
717 pd_point = sctp_sk(asoc->base.sk)->pd_point; in sctp_intl_retrieve_reassembled_uo()
719 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), in sctp_intl_retrieve_reassembled_uo()
730 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), in sctp_intl_retrieve_reassembled_uo()
817 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_intl_retrieve_first_uo()
[all …]
Dsm_sideeffect.c236 struct sock *sk = asoc->base.sk; in sctp_generate_t3_rtx_event()
273 struct sock *sk = asoc->base.sk; in sctp_generate_timeout_event()
291 if (asoc->base.dead) in sctp_generate_timeout_event()
366 struct sock *sk = asoc->base.sk; in sctp_generate_heartbeat_event()
412 struct sock *sk = asoc->base.sk; in sctp_generate_proto_unreach_event()
429 if (asoc->base.dead) in sctp_generate_proto_unreach_event()
447 struct sock *sk = asoc->base.sk; in sctp_generate_reconf_event()
523 struct net *net = sock_net(asoc->base.sk); in sctp_do_8_2_transport_strike()
800 struct net *net = sock_net(asoc->base.sk); in sctp_cmd_process_sack()
836 struct net *net = sock_net(asoc->base.sk); in sctp_cmd_assoc_update()
[all …]
/net/netfilter/
Dnf_conntrack_h323_asn1.c110 static int decode_nul(struct bitstr *bs, const struct field_t *f, char *base, int level);
111 static int decode_bool(struct bitstr *bs, const struct field_t *f, char *base, int level);
112 static int decode_oid(struct bitstr *bs, const struct field_t *f, char *base, int level);
113 static int decode_int(struct bitstr *bs, const struct field_t *f, char *base, int level);
114 static int decode_enum(struct bitstr *bs, const struct field_t *f, char *base, int level);
115 static int decode_bitstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
116 static int decode_numstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
117 static int decode_octstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
118 static int decode_bmpstr(struct bitstr *bs, const struct field_t *f, char *base, int level);
119 static int decode_seq(struct bitstr *bs, const struct field_t *f, char *base, int level);
[all …]
Dnft_payload.c85 switch (priv->base) { in nft_payload_eval()
135 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); in nft_payload_init()
149 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || in nft_payload_dump()
366 switch (priv->base) { in nft_payload_offload()
511 switch (priv->base) { in nft_payload_set_eval()
533 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER || in nft_payload_set_eval()
564 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); in nft_payload_set_init()
606 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || in nft_payload_set_dump()
632 enum nft_payload_bases base; in nft_payload_select_ops() local
641 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); in nft_payload_select_ops()
[all …]
/net/sunrpc/
Dsocklib.c74 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_… in xdr_partial_copy_from_skb() argument
82 if (base < len) { in xdr_partial_copy_from_skb()
83 len -= base; in xdr_partial_copy_from_skb()
84 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); in xdr_partial_copy_from_skb()
88 base = 0; in xdr_partial_copy_from_skb()
90 base -= len; in xdr_partial_copy_from_skb()
94 if (unlikely(base >= pglen)) { in xdr_partial_copy_from_skb()
95 base -= pglen; in xdr_partial_copy_from_skb()
98 if (base || xdr->page_base) { in xdr_partial_copy_from_skb()
99 pglen -= base; in xdr_partial_copy_from_skb()
[all …]
Dxdr.c178 struct page **pages, unsigned int base, unsigned int len) in xdr_inline_pages() argument
188 xdr->page_base = base; in xdr_inline_pages()
761 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, in xdr_write_pages() argument
767 buf->page_base = base; in xdr_write_pages()
800 unsigned int base, unsigned int len) in xdr_set_page_base() argument
809 if (base >= maxlen) in xdr_set_page_base()
811 maxlen -= base; in xdr_set_page_base()
815 base += xdr->buf->page_base; in xdr_set_page_base()
817 pgnr = base >> PAGE_SHIFT; in xdr_set_page_base()
821 pgoff = base & ~PAGE_MASK; in xdr_set_page_base()
[all …]
Dsvcsock.c187 size_t base = xdr->page_base; in svc_send_common() local
207 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; in svc_send_common()
211 result = kernel_sendpage(sock, *ppage, base, size, flags); in svc_send_common()
219 base = 0; in svc_send_common()
329 unsigned int nr, size_t buflen, unsigned int base) in svc_recvfrom() argument
340 if (base != 0) { in svc_recvfrom()
341 iov_iter_advance(&msg.msg_iter, base); in svc_recvfrom()
342 buflen -= base; in svc_recvfrom()
1045 unsigned int want, base; in svc_tcp_recvfrom() local
1059 base = svc_tcp_restore_pages(svsk, rqstp); in svc_tcp_recvfrom()
[all …]
/net/ipv4/
Dinetpeer.c99 struct inet_peer_base *base, in lookup() argument
109 pp = &base->rb_root.rb_node; in lookup()
128 } else if (unlikely(read_seqretry(&base->lock, seq))) { in lookup()
147 static void inet_peer_gc(struct inet_peer_base *base, in inet_peer_gc() argument
160 if (base->total >= peer_threshold) in inet_peer_gc()
164 base->total / peer_threshold * HZ; in inet_peer_gc()
179 rb_erase(&p->rb_node, &base->rb_root); in inet_peer_gc()
180 base->total--; in inet_peer_gc()
186 struct inet_peer *inet_getpeer(struct inet_peer_base *base, in inet_getpeer() argument
199 seq = read_seqbegin(&base->lock); in inet_getpeer()
[all …]
Desp4.c120 static void esp_output_done(struct crypto_async_request *base, int err) in esp_output_done() argument
122 struct sk_buff *skb = base->data; in esp_output_done()
204 static void esp_output_done_esn(struct crypto_async_request *base, int err) in esp_output_done_esn() argument
206 struct sk_buff *skb = base->data; in esp_output_done_esn()
209 esp_output_done(base, err); in esp_output_done_esn()
654 static void esp_input_done(struct crypto_async_request *base, int err) in esp_input_done() argument
656 struct sk_buff *skb = base->data; in esp_input_done()
684 static void esp_input_done_esn(struct crypto_async_request *base, int err) in esp_input_done_esn() argument
686 struct sk_buff *skb = base->data; in esp_input_done_esn()
689 esp_input_done(base, err); in esp_input_done_esn()
/net/rxrpc/
Dpeer_event.c303 time64_t base, in rxrpc_peer_keepalive_dispatch() argument
325 slot = keepalive_at - base; in rxrpc_peer_keepalive_dispatch()
329 if (keepalive_at <= base || in rxrpc_peer_keepalive_dispatch()
330 keepalive_at > base + RXRPC_KEEPALIVE_TIME) { in rxrpc_peer_keepalive_dispatch()
360 time64_t base, now, delay; in rxrpc_peer_keepalive_worker() local
365 base = rxnet->peer_keepalive_base; in rxrpc_peer_keepalive_worker()
367 _enter("%lld,%u", base - now, cursor); in rxrpc_peer_keepalive_worker()
383 while (base <= now && (s8)(cursor - stop) < 0) { in rxrpc_peer_keepalive_worker()
386 base++; in rxrpc_peer_keepalive_worker()
390 base = now; in rxrpc_peer_keepalive_worker()
[all …]
/net/sched/
Dcls_basic.c146 struct basic_filter *f, unsigned long base, in basic_set_parms() argument
163 tcf_bind_filter(tp, &f->res, base); in basic_set_parms()
171 struct tcf_proto *tp, unsigned long base, u32 handle, in basic_change() argument
219 err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr, in basic_change()
267 unsigned long base) in basic_bind_class() argument
273 __tcf_bind_filter(q, &f->res, base); in basic_bind_class()
Dcls_fw.c201 struct nlattr **tca, unsigned long base, bool ovr, in fw_set_parms() argument
231 tcf_bind_filter(tp, &f->res, base); in fw_set_parms()
238 struct tcf_proto *tp, unsigned long base, in fw_change() argument
279 err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr, extack); in fw_change()
328 err = fw_set_parms(net, tp, f, tb, tca, base, ovr, extack); in fw_change()
422 unsigned long base) in fw_bind_class() argument
428 __tcf_bind_filter(q, &f->res, base); in fw_bind_class()
Dcls_matchall.c165 unsigned long base, struct nlattr **tb, in mall_set_parms() argument
178 tcf_bind_filter(tp, &head->res, base); in mall_set_parms()
184 struct tcf_proto *tp, unsigned long base, in mall_change() argument
230 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr, in mall_change()
398 unsigned long base) in mall_bind_class() argument
404 __tcf_bind_filter(q, &head->res, base); in mall_bind_class()
/net/core/
Dethtool.c465 link_ksettings->base.speed in convert_legacy_settings_to_link_ksettings()
467 link_ksettings->base.duplex in convert_legacy_settings_to_link_ksettings()
469 link_ksettings->base.port in convert_legacy_settings_to_link_ksettings()
471 link_ksettings->base.phy_address in convert_legacy_settings_to_link_ksettings()
473 link_ksettings->base.autoneg in convert_legacy_settings_to_link_ksettings()
475 link_ksettings->base.mdio_support in convert_legacy_settings_to_link_ksettings()
477 link_ksettings->base.eth_tp_mdix in convert_legacy_settings_to_link_ksettings()
479 link_ksettings->base.eth_tp_mdix_ctrl in convert_legacy_settings_to_link_ksettings()
510 ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed); in convert_link_ksettings_to_legacy_settings()
512 = link_ksettings->base.duplex; in convert_link_ksettings_to_legacy_settings()
[all …]
/net/ipv6/
Dip6_output.c1361 cork->base.dst = &rt->dst; in ip6_setup_cork()
1375 cork->base.fragsize = mtu; in ip6_setup_cork()
1376 cork->base.gso_size = ipc6->gso_size; in ip6_setup_cork()
1377 cork->base.tx_flags = 0; in ip6_setup_cork()
1378 cork->base.mark = ipc6->sockc.mark; in ip6_setup_cork()
1379 sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags); in ip6_setup_cork()
1382 cork->base.flags |= IPCORK_ALLFRAG; in ip6_setup_cork()
1383 cork->base.length = 0; in ip6_setup_cork()
1385 cork->base.transmit_time = ipc6->sockc.transmit_time; in ip6_setup_cork()
1772 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base, in ip6_append_data()
[all …]
Desp6.c127 static void esp_output_done(struct crypto_async_request *base, int err) in esp_output_done() argument
129 struct sk_buff *skb = base->data; in esp_output_done()
202 static void esp_output_done_esn(struct crypto_async_request *base, int err) in esp_output_done_esn() argument
204 struct sk_buff *skb = base->data; in esp_output_done_esn()
207 esp_output_done(base, err); in esp_output_done_esn()
554 static void esp_input_done(struct crypto_async_request *base, int err) in esp_input_done() argument
556 struct sk_buff *skb = base->data; in esp_input_done()
584 static void esp_input_done_esn(struct crypto_async_request *base, int err) in esp_input_done_esn() argument
586 struct sk_buff *skb = base->data; in esp_input_done_esn()
589 esp_input_done(base, err); in esp_input_done_esn()
/net/sunrpc/xprtrdma/
Dsvc_rdma_sendto.c513 unsigned char *base, in svc_rdma_dma_map_buf() argument
516 return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base), in svc_rdma_dma_map_buf()
517 offset_in_page(base), len); in svc_rdma_dma_map_buf()
650 unsigned char *base; in svc_rdma_map_reply_msg() local
670 base = xdr->tail[0].iov_base; in svc_rdma_map_reply_msg()
675 base += xdr_pad; in svc_rdma_map_reply_msg()
698 base = xdr->tail[0].iov_base; in svc_rdma_map_reply_msg()
703 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); in svc_rdma_map_reply_msg()
/net/bridge/netfilter/
Debtables.c191 const char *base; in ebt_do_table() local
211 base = private->entries; in ebt_do_table()
278 chaininfo = (struct ebt_entries *) (base + verdict); in ebt_do_table()
762 unsigned int udc_cnt, unsigned int hooknr, char *base) in check_chainloops() argument
795 (struct ebt_entries *)(base + verdict); in check_chainloops()
1350 const char *base, char __user *ubase) in ebt_match_to_user() argument
1352 return ebt_obj_to_user(ubase + ((char *)m - base), in ebt_match_to_user()
1359 const char *base, char __user *ubase) in ebt_watcher_to_user() argument
1361 return ebt_obj_to_user(ubase + ((char *)w - base), in ebt_watcher_to_user()
1367 static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base, in ebt_entry_to_user() argument
[all …]
/net/ipv4/netfilter/
Darp_tables.c171 get_entry(const void *base, unsigned int offset) in get_entry() argument
173 return (struct arpt_entry *)(base + offset); in get_entry()
453 const unsigned char *base, in check_entry_size_and_hooks() argument
483 if ((unsigned char *)e - base == hook_entries[h]) in check_entry_size_and_hooks()
485 if ((unsigned char *)e - base == underflows[h]) { in check_entry_size_and_hooks()
737 const void *base, struct xt_table_info *newinfo) in compat_calc_entry() argument
744 entry_offset = (void *)e - base; in compat_calc_entry()
755 (e < (struct arpt_entry *)(base + info->hook_entry[i]))) in compat_calc_entry()
758 (e < (struct arpt_entry *)(base + info->underflow[i]))) in compat_calc_entry()
1075 const unsigned char *base, in check_compat_entry_size_and_hooks() argument
[all …]
/net/openvswitch/
Dflow_netlink.c1299 const struct ovs_nsh_key_base *base = nla_data(a); in nsh_hdr_from_nlattr() local
1301 flags = base->flags; in nsh_hdr_from_nlattr()
1302 ttl = base->ttl; in nsh_hdr_from_nlattr()
1303 nh->np = base->np; in nsh_hdr_from_nlattr()
1304 nh->mdtype = base->mdtype; in nsh_hdr_from_nlattr()
1305 nh->path_hdr = base->path_hdr; in nsh_hdr_from_nlattr()
1347 const struct ovs_nsh_key_base *base = nla_data(a); in nsh_key_from_nlattr() local
1348 const struct ovs_nsh_key_base *base_mask = base + 1; in nsh_key_from_nlattr()
1350 nsh->base = *base; in nsh_key_from_nlattr()
1351 nsh_mask->base = *base_mask; in nsh_key_from_nlattr()
[all …]

1234