/net/mac80211/ |
D | chan.c | 16 struct ieee80211_chanctx *ctx) in ieee80211_chanctx_num_assigned() argument 23 list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) in ieee80211_chanctx_num_assigned() 30 struct ieee80211_chanctx *ctx) in ieee80211_chanctx_num_reserved() argument 37 list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) in ieee80211_chanctx_num_reserved() 44 struct ieee80211_chanctx *ctx) in ieee80211_chanctx_refcount() argument 46 return ieee80211_chanctx_num_assigned(local, ctx) + in ieee80211_chanctx_refcount() 47 ieee80211_chanctx_num_reserved(local, ctx); in ieee80211_chanctx_refcount() 52 struct ieee80211_chanctx *ctx; in ieee80211_num_chanctx() local 57 list_for_each_entry(ctx, &local->chanctx_list, list) in ieee80211_num_chanctx() 85 struct ieee80211_chanctx *ctx, in ieee80211_chanctx_reserved_chandef() argument [all …]
|
D | tkip.c | 82 static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx, in tkip_mixing_phase1() argument 86 u16 *p1k = ctx->p1k; in tkip_mixing_phase1() 102 ctx->state = TKIP_STATE_PHASE1_DONE; in tkip_mixing_phase1() 103 ctx->p1k_iv32 = tsc_IV32; in tkip_mixing_phase1() 106 static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, in tkip_mixing_phase2() argument 110 const u16 *p1k = ctx->p1k; in tkip_mixing_phase2() 155 struct tkip_ctx *ctx = &key->u.tkip.tx; in ieee80211_compute_tkip_p1k() local 167 if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT) in ieee80211_compute_tkip_p1k() 168 tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32); in ieee80211_compute_tkip_p1k() 176 struct tkip_ctx *ctx = &key->u.tkip.tx; in ieee80211_get_tkip_p1k_iv() local [all …]
|
/net/sunrpc/auth_gss/ |
D | gss_krb5_mech.c | 297 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v2() argument 300 .len = ctx->gk5e->keylength, in gss_krb5_import_ctx_v2() 301 .data = ctx->Ksess, in gss_krb5_import_ctx_v2() 311 keyout.len = ctx->gk5e->Ke_length; in gss_krb5_import_ctx_v2() 312 if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_INITIATOR_SEAL, in gss_krb5_import_ctx_v2() 315 ctx->initiator_enc = gss_krb5_alloc_cipher_v2(ctx->gk5e->encrypt_name, in gss_krb5_import_ctx_v2() 317 if (ctx->initiator_enc == NULL) in gss_krb5_import_ctx_v2() 319 if (ctx->gk5e->aux_cipher) { in gss_krb5_import_ctx_v2() 320 ctx->initiator_enc_aux = in gss_krb5_import_ctx_v2() 321 gss_krb5_alloc_cipher_v2(ctx->gk5e->aux_cipher, in gss_krb5_import_ctx_v2() [all …]
|
D | auth_gss.c | 114 gss_get_ctx(struct gss_cl_ctx *ctx) in gss_get_ctx() argument 116 refcount_inc(&ctx->count); in gss_get_ctx() 117 return ctx; in gss_get_ctx() 121 gss_put_ctx(struct gss_cl_ctx *ctx) in gss_put_ctx() argument 123 if (refcount_dec_and_test(&ctx->count)) in gss_put_ctx() 124 gss_free_ctx(ctx); in gss_put_ctx() 133 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) in gss_cred_set_ctx() argument 139 gss_get_ctx(ctx); in gss_cred_set_ctx() 140 rcu_assign_pointer(gss_cred->gc_ctx, ctx); in gss_cred_set_ctx() 150 struct gss_cl_ctx *ctx = NULL; in gss_cred_get_ctx() local [all …]
|
D | gss_krb5_seal.c | 75 setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) in setup_token_v2() argument 81 if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) in setup_token_v2() 83 if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) in setup_token_v2() 100 token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; in setup_token_v2() 105 gss_krb5_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, in gss_krb5_get_mic_v2() argument 108 struct crypto_ahash *tfm = ctx->initiate ? in gss_krb5_get_mic_v2() 109 ctx->initiator_sign : ctx->acceptor_sign; in gss_krb5_get_mic_v2() 111 .len = ctx->gk5e->cksumlength, in gss_krb5_get_mic_v2() 119 krb5_hdr = setup_token_v2(ctx, token); in gss_krb5_get_mic_v2() 123 seq_send_be64 = cpu_to_be64(atomic64_fetch_inc(&ctx->seq_send64)); in gss_krb5_get_mic_v2() [all …]
|
/net/tls/ |
D | tls_main.c | 130 void update_sk_prot(struct sock *sk, struct tls_context *ctx) in update_sk_prot() argument 135 &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]); in update_sk_prot() 137 &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]); in update_sk_prot() 170 struct tls_context *ctx, in tls_push_sg() argument 187 ctx->splicing_pages = true; in tls_push_sg() 206 ctx->partially_sent_offset = offset; in tls_push_sg() 207 ctx->partially_sent_record = (void *)sg; in tls_push_sg() 208 ctx->splicing_pages = false; in tls_push_sg() 222 ctx->splicing_pages = false; in tls_push_sg() 229 struct tls_context *ctx = tls_get_ctx(sk); in tls_handle_open_record() local [all …]
|
D | tls_device.c | 57 static void tls_device_free_ctx(struct tls_context *ctx) in tls_device_free_ctx() argument 59 if (ctx->tx_conf == TLS_HW) { in tls_device_free_ctx() 60 kfree(tls_offload_ctx_tx(ctx)); in tls_device_free_ctx() 61 kfree(ctx->tx.rec_seq); in tls_device_free_ctx() 62 kfree(ctx->tx.iv); in tls_device_free_ctx() 65 if (ctx->rx_conf == TLS_HW) in tls_device_free_ctx() 66 kfree(tls_offload_ctx_rx(ctx)); in tls_device_free_ctx() 68 tls_ctx_free(NULL, ctx); in tls_device_free_ctx() 75 struct tls_context *ctx = offload_ctx->ctx; in tls_device_tx_del_task() local 81 netdev = rcu_dereference_protected(ctx->netdev, in tls_device_tx_del_task() [all …]
|
D | tls_sw.c | 192 struct tls_sw_context_rx *ctx; in tls_decrypt_done() local 217 ctx = tls_sw_ctx_rx(tls_ctx); in tls_decrypt_done() 223 ctx->async_wait.err = err; in tls_decrypt_done() 239 if (atomic_dec_and_test(&ctx->decrypt_pending)) in tls_decrypt_done() 240 complete(&ctx->async_wait.completion); in tls_decrypt_done() 243 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) in tls_decrypt_async_wait() argument 245 if (!atomic_dec_and_test(&ctx->decrypt_pending)) in tls_decrypt_async_wait() 246 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); in tls_decrypt_async_wait() 247 atomic_inc(&ctx->decrypt_pending); in tls_decrypt_async_wait() 249 return ctx->async_wait.err; in tls_decrypt_async_wait() [all …]
|
D | tls.h | 139 void tls_ctx_free(struct sock *sk, struct tls_context *ctx); 140 void update_sk_prot(struct sock *sk, struct tls_context *ctx); 145 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 147 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); 170 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); 171 void tls_device_write_space(struct sock *sk, struct tls_context *ctx); 196 int tls_strp_msg_cow(struct tls_sw_context_rx *ctx); 197 struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx); 207 static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx) in tls_strp_msg() argument 209 DEBUG_NET_WARN_ON_ONCE(!ctx->strp.msg_ready || !ctx->strp.anchor->len); in tls_strp_msg() [all …]
|
D | tls_toe.c | 49 struct tls_context *ctx = tls_get_ctx(sk); in tls_toe_sk_destruct() local 51 ctx->sk_destruct(sk); in tls_toe_sk_destruct() 54 tls_ctx_free(sk, ctx); in tls_toe_sk_destruct() 60 struct tls_context *ctx; in tls_toe_bypass() local 66 ctx = tls_ctx_create(sk); in tls_toe_bypass() 67 if (!ctx) in tls_toe_bypass() 70 ctx->sk_destruct = sk->sk_destruct; in tls_toe_bypass() 72 ctx->rx_conf = TLS_HW_RECORD; in tls_toe_bypass() 73 ctx->tx_conf = TLS_HW_RECORD; in tls_toe_bypass() 74 update_sk_prot(sk, ctx); in tls_toe_bypass() [all …]
|
/net/6lowpan/ |
D | debugfs.c | 19 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_flag_active_set() local 25 set_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags); in lowpan_ctx_flag_active_set() 27 clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags); in lowpan_ctx_flag_active_set() 44 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_flag_c_set() local 50 set_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); in lowpan_ctx_flag_c_set() 52 clear_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); in lowpan_ctx_flag_c_set() 68 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_plen_set() local 70 container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); in lowpan_ctx_plen_set() 76 ctx->plen = val; in lowpan_ctx_plen_set() 84 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_plen_get() local [all …]
|
/net/xfrm/ |
D | espintcp.c | 14 static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb, in handle_nonesp() argument 27 skb_queue_tail(&ctx->ike_queue, skb); in handle_nonesp() 28 ctx->saved_data_ready(sk); in handle_nonesp() 55 struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx, in espintcp_rcv() local 108 handle_nonesp(ctx, skb, strp->sk); in espintcp_rcv() 137 struct espintcp_ctx *ctx = espintcp_getctx(sk); in espintcp_recvmsg() local 143 skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err); in espintcp_recvmsg() 170 struct espintcp_ctx *ctx = espintcp_getctx(sk); in espintcp_queue_out() local 172 if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog)) in espintcp_queue_out() 175 __skb_queue_tail(&ctx->out_queue, skb); in espintcp_queue_out() [all …]
|
/net/netfilter/ |
D | nfnetlink_osf.c | 67 struct nf_osf_hdr_ctx *ctx) in nf_osf_match_one() argument 69 const __u8 *optpinit = ctx->optp; in nf_osf_match_one() 75 if (ctx->totlen != f->ss || !nf_osf_ttl(skb, ttl_check, f->ttl)) in nf_osf_match_one() 91 ctx->optsize > MAX_IPOPTLEN || in nf_osf_match_one() 92 ctx->optsize != foptsize) in nf_osf_match_one() 98 if (f->opt[optnum].kind == *ctx->optp) { in nf_osf_match_one() 100 const __u8 *optend = ctx->optp + len; in nf_osf_match_one() 104 switch (*ctx->optp) { in nf_osf_match_one() 106 mss = ctx->optp[3]; in nf_osf_match_one() 108 mss |= ctx->optp[2]; in nf_osf_match_one() [all …]
|
D | nf_tables_api.c | 130 static void nft_ctx_init(struct nft_ctx *ctx, in nft_ctx_init() argument 139 ctx->net = net; in nft_ctx_init() 140 ctx->family = family; in nft_ctx_init() 141 ctx->level = 0; in nft_ctx_init() 142 ctx->table = table; in nft_ctx_init() 143 ctx->chain = chain; in nft_ctx_init() 144 ctx->nla = nla; in nft_ctx_init() 145 ctx->portid = NETLINK_CB(skb).portid; in nft_ctx_init() 146 ctx->report = nlmsg_report(nlh); in nft_ctx_init() 147 ctx->flags = nlh->nlmsg_flags; in nft_ctx_init() [all …]
|
D | nft_synproxy.c | 152 static int nft_synproxy_do_init(const struct nft_ctx *ctx, in nft_synproxy_do_init() argument 156 struct synproxy_net *snet = synproxy_pernet(ctx->net); in nft_synproxy_do_init() 171 err = nf_ct_netns_get(ctx->net, ctx->family); in nft_synproxy_do_init() 175 switch (ctx->family) { in nft_synproxy_do_init() 177 err = nf_synproxy_ipv4_init(snet, ctx->net); in nft_synproxy_do_init() 183 err = nf_synproxy_ipv6_init(snet, ctx->net); in nft_synproxy_do_init() 189 err = nf_synproxy_ipv4_init(snet, ctx->net); in nft_synproxy_do_init() 192 err = nf_synproxy_ipv6_init(snet, ctx->net); in nft_synproxy_do_init() 194 nf_synproxy_ipv4_fini(snet, ctx->net); in nft_synproxy_do_init() 203 nf_ct_netns_put(ctx->net, ctx->family); in nft_synproxy_do_init() [all …]
|
D | nft_inner.c | 53 struct nft_inner_tun_ctx *ctx, u32 off) in nft_inner_parse_l2l3() argument 86 ctx->inner_lloff = off; in nft_inner_parse_l2l3() 87 ctx->flags |= NFT_PAYLOAD_CTX_INNER_LL; in nft_inner_parse_l2l3() 109 ctx->llproto = llproto; in nft_inner_parse_l2l3() 126 ctx->inner_nhoff = nhoff; in nft_inner_parse_l2l3() 127 ctx->flags |= NFT_PAYLOAD_CTX_INNER_NH; in nft_inner_parse_l2l3() 131 ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH; in nft_inner_parse_l2l3() 132 ctx->inner_thoff = thoff; in nft_inner_parse_l2l3() 133 ctx->l4proto = iph->protocol; in nft_inner_parse_l2l3() 150 ctx->inner_nhoff = nhoff; in nft_inner_parse_l2l3() [all …]
|
D | nft_compat.c | 31 static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx, in nft_compat_chain_validate_dependency() argument 35 const struct nft_chain *chain = ctx->chain; in nft_compat_chain_validate_dependency() 44 if (ctx->family != NFPROTO_BRIDGE) in nft_compat_chain_validate_dependency() 144 const struct nft_ctx *ctx, in nft_target_set_tgchk_param() argument 148 par->net = ctx->net; in nft_target_set_tgchk_param() 149 par->table = ctx->table->name; in nft_target_set_tgchk_param() 150 switch (ctx->family) { in nft_target_set_tgchk_param() 172 if (nft_is_base_chain(ctx->chain)) { in nft_target_set_tgchk_param() 174 nft_base_chain(ctx->chain); in nft_target_set_tgchk_param() 181 par->family = ctx->family; in nft_target_set_tgchk_param() [all …]
|
D | nft_immediate.c | 46 static int nft_immediate_init(const struct nft_ctx *ctx, in nft_immediate_init() argument 61 err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]); in nft_immediate_init() 67 err = nft_parse_register_store(ctx, tb[NFTA_IMMEDIATE_DREG], in nft_immediate_init() 79 err = nf_tables_bind_chain(ctx, chain); in nft_immediate_init() 95 static void nft_immediate_activate(const struct nft_ctx *ctx, in nft_immediate_activate() argument 112 chain_ctx = *ctx; in nft_immediate_activate() 118 nft_clear(ctx->net, chain); in nft_immediate_activate() 128 static void nft_immediate_chain_deactivate(const struct nft_ctx *ctx, in nft_immediate_chain_deactivate() argument 135 chain_ctx = *ctx; in nft_immediate_chain_deactivate() 142 static void nft_immediate_deactivate(const struct nft_ctx *ctx, in nft_immediate_deactivate() argument [all …]
|
D | nf_tables_offload.c | 53 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx, in nft_flow_rule_transfer_vlan() argument 91 struct nft_offload_ctx *ctx; in nft_flow_rule_create() local 114 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL); in nft_flow_rule_create() 115 if (!ctx) { in nft_flow_rule_create() 119 ctx->net = net; in nft_flow_rule_create() 120 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; in nft_flow_rule_create() 127 err = expr->ops->offload(ctx, flow, expr); in nft_flow_rule_create() 133 nft_flow_rule_transfer_vlan(ctx, flow); in nft_flow_rule_create() 135 flow->proto = ctx->dep.l3num; in nft_flow_rule_create() 136 kfree(ctx); in nft_flow_rule_create() [all …]
|
D | nft_objref.c | 25 static int nft_objref_init(const struct nft_ctx *ctx, in nft_objref_init() argument 30 u8 genmask = nft_genmask_next(ctx->net); in nft_objref_init() 38 obj = nft_obj_lookup(ctx->net, ctx->table, in nft_objref_init() 68 static void nft_objref_deactivate(const struct nft_ctx *ctx, in nft_objref_deactivate() argument 80 static void nft_objref_activate(const struct nft_ctx *ctx, in nft_objref_activate() argument 128 static int nft_objref_map_init(const struct nft_ctx *ctx, in nft_objref_map_init() argument 133 u8 genmask = nft_genmask_next(ctx->net); in nft_objref_map_init() 137 set = nft_set_lookup_global(ctx->net, ctx->table, in nft_objref_map_init() 153 err = nf_tables_bind_set(ctx, set, &priv->binding); in nft_objref_map_init() 176 static void nft_objref_map_deactivate(const struct nft_ctx *ctx, in nft_objref_map_deactivate() argument [all …]
|
/net/ipv4/netfilter/ |
D | nf_nat_snmp_basic_main.c | 67 static void fast_csum(struct snmp_ctx *ctx, unsigned char offset) in fast_csum() argument 73 memcpy(&s[1], &ctx->from, 4); in fast_csum() 74 memcpy(&s[7], &ctx->to, 4); in fast_csum() 83 memcpy(&s[0], &ctx->from, 4); in fast_csum() 84 memcpy(&s[4], &ctx->to, 4); in fast_csum() 91 *ctx->check = csum_fold(csum_partial(s, size, in fast_csum() 92 ~csum_unfold(*ctx->check))); in fast_csum() 108 struct snmp_ctx *ctx = (struct snmp_ctx *)context; in snmp_helper() local 114 if (*pdata == ctx->from) { in snmp_helper() 116 (void *)&ctx->from, (void *)&ctx->to); in snmp_helper() [all …]
|
/net/sctp/ |
D | outqueue.c | 800 static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx, in sctp_outq_select_transport() argument 816 if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest, in sctp_outq_select_transport() 817 &ctx->transport->ipaddr)) in sctp_outq_select_transport() 818 new_transport = ctx->transport; in sctp_outq_select_transport() 820 new_transport = sctp_assoc_lookup_paddr(ctx->asoc, in sctp_outq_select_transport() 828 new_transport = ctx->asoc->peer.active_path; in sctp_outq_select_transport() 853 new_transport = ctx->asoc->peer.active_path; in sctp_outq_select_transport() 861 if (new_transport != ctx->transport) { in sctp_outq_select_transport() 862 ctx->transport = new_transport; in sctp_outq_select_transport() 863 ctx->packet = &ctx->transport->packet; in sctp_outq_select_transport() [all …]
|
/net/8021q/ |
D | vlan_dev.c | 805 static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx, in vlan_dev_fill_forward_path() argument 808 struct vlan_dev_priv *vlan = vlan_dev_priv(ctx->dev); in vlan_dev_fill_forward_path() 813 path->dev = ctx->dev; in vlan_dev_fill_forward_path() 814 ctx->dev = vlan->real_dev; in vlan_dev_fill_forward_path() 815 if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) in vlan_dev_fill_forward_path() 818 ctx->vlan[ctx->num_vlans].id = vlan->vlan_id; in vlan_dev_fill_forward_path() 819 ctx->vlan[ctx->num_vlans].proto = vlan->vlan_proto; in vlan_dev_fill_forward_path() 820 ctx->num_vlans++; in vlan_dev_fill_forward_path() 827 static const struct macsec_ops *vlan_get_macsec_ops(const struct macsec_context *ctx) in vlan_get_macsec_ops() argument 829 return vlan_dev_priv(ctx->netdev)->real_dev->macsec_ops; in vlan_get_macsec_ops() [all …]
|
/net/ipv4/ |
D | tcp_fastopen.c | 32 struct tcp_fastopen_context *ctx = in tcp_fastopen_ctx_free() local 35 kfree_sensitive(ctx); in tcp_fastopen_ctx_free() 40 struct tcp_fastopen_context *ctx; in tcp_fastopen_destroy_cipher() local 42 ctx = rcu_dereference_protected( in tcp_fastopen_destroy_cipher() 43 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); in tcp_fastopen_destroy_cipher() 44 if (ctx) in tcp_fastopen_destroy_cipher() 45 call_rcu(&ctx->rcu, tcp_fastopen_ctx_free); in tcp_fastopen_destroy_cipher() 61 struct tcp_fastopen_context *ctx, *octx; in tcp_fastopen_reset_cipher() local 65 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); in tcp_fastopen_reset_cipher() 66 if (!ctx) { in tcp_fastopen_reset_cipher() [all …]
|
/net/bpf/ |
D | test_run.c | 102 struct xdp_buff ctx; member 144 new_ctx = &head->ctx; in xdp_test_run_init_page() 233 return head->orig_ctx.data != head->ctx.data || in ctx_was_changed() 234 head->orig_ctx.data_meta != head->ctx.data_meta || in ctx_was_changed() 235 head->orig_ctx.data_end != head->ctx.data_end; in ctx_was_changed() 243 head->ctx.data = head->orig_ctx.data; in reset_ctx() 244 head->ctx.data_meta = head->orig_ctx.data_meta; in reset_ctx() 245 head->ctx.data_end = head->orig_ctx.data_end; in reset_ctx() 246 xdp_update_frame_from_buff(&head->ctx, head->frame); in reset_ctx() 290 struct xdp_buff *ctx; in xdp_test_run_batch() local [all …]
|