/net/sunrpc/auth_gss/ |
D | gss_krb5_mech.c | 198 struct krb5_ctx *ctx, struct crypto_sync_skcipher **res) in get_key() argument 226 *res = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0); in get_key() 229 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); in get_key() 235 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); in get_key() 252 gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) in gss_import_v1_context() argument 257 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); in gss_import_v1_context() 262 ctx->enctype = ENCTYPE_DES_CBC_RAW; in gss_import_v1_context() 264 ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); in gss_import_v1_context() 265 if (ctx->gk5e == NULL) { in gss_import_v1_context() 293 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); in gss_import_v1_context() [all …]
|
D | gss_krb5_seal.c | 73 setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token) in setup_token() argument 77 int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; in setup_token() 79 token->len = g_token_size(&ctx->mech_used, body_size); in setup_token() 82 g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr); in setup_token() 91 *ptr++ = (__force u16)cpu_to_le16(ctx->gk5e->signalg); in setup_token() 99 setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) in setup_token_v2() argument 105 if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) in setup_token_v2() 107 if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) in setup_token_v2() 122 token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; in setup_token_v2() 127 gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, in gss_get_mic_v1() argument [all …]
|
D | gss_krb5_unseal.c | 74 gss_verify_mic_v1(struct krb5_ctx *ctx, in gss_verify_mic_v1() argument 91 if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, in gss_verify_mic_v1() 102 if (signalg != ctx->gk5e->signalg) in gss_verify_mic_v1() 112 if (ctx->gk5e->keyed_cksum) in gss_verify_mic_v1() 113 cksumkey = ctx->cksum; in gss_verify_mic_v1() 117 if (make_checksum(ctx, ptr, 8, message_buffer, 0, in gss_verify_mic_v1() 122 ctx->gk5e->cksumlength)) in gss_verify_mic_v1() 129 if (now > ctx->endtime) in gss_verify_mic_v1() 134 if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, in gss_verify_mic_v1() 138 if ((ctx->initiate && direction != 0xff) || in gss_verify_mic_v1() [all …]
|
D | auth_gss.c | 95 gss_get_ctx(struct gss_cl_ctx *ctx) in gss_get_ctx() argument 97 refcount_inc(&ctx->count); in gss_get_ctx() 98 return ctx; in gss_get_ctx() 102 gss_put_ctx(struct gss_cl_ctx *ctx) in gss_put_ctx() argument 104 if (refcount_dec_and_test(&ctx->count)) in gss_put_ctx() 105 gss_free_ctx(ctx); in gss_put_ctx() 114 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) in gss_cred_set_ctx() argument 120 gss_get_ctx(ctx); in gss_cred_set_ctx() 121 rcu_assign_pointer(gss_cred->gc_ctx, ctx); in gss_cred_set_ctx() 160 struct gss_cl_ctx *ctx = NULL; in gss_cred_get_ctx() local [all …]
|
/net/tls/ |
D | tls_main.c | 68 static void update_sk_prot(struct sock *sk, struct tls_context *ctx) in update_sk_prot() argument 72 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; in update_sk_prot() 100 struct tls_context *ctx, in tls_push_sg() argument 114 ctx->in_tcp_sendpages = true; in tls_push_sg() 133 ctx->partially_sent_offset = offset; in tls_push_sg() 134 ctx->partially_sent_record = (void *)sg; in tls_push_sg() 135 ctx->in_tcp_sendpages = false; in tls_push_sg() 149 ctx->in_tcp_sendpages = false; in tls_push_sg() 156 struct tls_context *ctx = tls_get_ctx(sk); in tls_handle_open_record() local 158 if (tls_is_pending_open_record(ctx)) in tls_handle_open_record() [all …]
|
D | tls_device.c | 53 static void tls_device_free_ctx(struct tls_context *ctx) in tls_device_free_ctx() argument 55 if (ctx->tx_conf == TLS_HW) { in tls_device_free_ctx() 56 kfree(tls_offload_ctx_tx(ctx)); in tls_device_free_ctx() 57 kfree(ctx->tx.rec_seq); in tls_device_free_ctx() 58 kfree(ctx->tx.iv); in tls_device_free_ctx() 61 if (ctx->rx_conf == TLS_HW) in tls_device_free_ctx() 62 kfree(tls_offload_ctx_rx(ctx)); in tls_device_free_ctx() 64 tls_ctx_free(NULL, ctx); in tls_device_free_ctx() 69 struct tls_context *ctx, *tmp; in tls_device_gc_task() local 77 list_for_each_entry_safe(ctx, tmp, &gc_list, list) { in tls_device_gc_task() [all …]
|
D | tls_sw.c | 121 static int padding_length(struct tls_sw_context_rx *ctx, in padding_length() argument 146 ctx->control = content_type; in padding_length() 156 struct tls_sw_context_rx *ctx; in tls_decrypt_done() local 166 ctx = tls_sw_ctx_rx(tls_ctx); in tls_decrypt_done() 171 ctx->async_wait.err = err; in tls_decrypt_done() 177 pad = padding_length(ctx, prot, skb); in tls_decrypt_done() 179 ctx->async_wait.err = pad; in tls_decrypt_done() 206 pending = atomic_dec_return(&ctx->decrypt_pending); in tls_decrypt_done() 208 if (!pending && READ_ONCE(ctx->async_notify)) in tls_decrypt_done() 209 complete(&ctx->async_wait.completion); in tls_decrypt_done() [all …]
|
/net/mac80211/ |
D | chan.c | 14 struct ieee80211_chanctx *ctx) in ieee80211_chanctx_num_assigned() argument 21 list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list) in ieee80211_chanctx_num_assigned() 28 struct ieee80211_chanctx *ctx) in ieee80211_chanctx_num_reserved() argument 35 list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list) in ieee80211_chanctx_num_reserved() 42 struct ieee80211_chanctx *ctx) in ieee80211_chanctx_refcount() argument 44 return ieee80211_chanctx_num_assigned(local, ctx) + in ieee80211_chanctx_refcount() 45 ieee80211_chanctx_num_reserved(local, ctx); in ieee80211_chanctx_refcount() 50 struct ieee80211_chanctx *ctx; in ieee80211_num_chanctx() local 55 list_for_each_entry(ctx, &local->chanctx_list, list) in ieee80211_num_chanctx() 83 struct ieee80211_chanctx *ctx, in ieee80211_chanctx_reserved_chandef() argument [all …]
|
D | tkip.c | 82 static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx, in tkip_mixing_phase1() argument 86 u16 *p1k = ctx->p1k; in tkip_mixing_phase1() 102 ctx->state = TKIP_STATE_PHASE1_DONE; in tkip_mixing_phase1() 103 ctx->p1k_iv32 = tsc_IV32; in tkip_mixing_phase1() 106 static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, in tkip_mixing_phase2() argument 110 const u16 *p1k = ctx->p1k; in tkip_mixing_phase2() 155 struct tkip_ctx *ctx = &key->u.tkip.tx; in ieee80211_compute_tkip_p1k() local 167 if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT) in ieee80211_compute_tkip_p1k() 168 tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32); in ieee80211_compute_tkip_p1k() 176 struct tkip_ctx *ctx = &key->u.tkip.tx; in ieee80211_get_tkip_p1k_iv() local [all …]
|
/net/6lowpan/ |
D | debugfs.c | 19 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_flag_active_set() local 25 set_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags); in lowpan_ctx_flag_active_set() 27 clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags); in lowpan_ctx_flag_active_set() 44 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_flag_c_set() local 50 set_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); in lowpan_ctx_flag_c_set() 52 clear_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); in lowpan_ctx_flag_c_set() 68 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_plen_set() local 70 container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); in lowpan_ctx_plen_set() 76 ctx->plen = val; in lowpan_ctx_plen_set() 84 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_plen_get() local [all …]
|
D | iphc.c | 192 struct lowpan_iphc_ctx *ret = &lowpan_dev(dev)->ctx.table[id]; in lowpan_iphc_ctx_get_by_id() 204 struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; in lowpan_iphc_ctx_get_by_addr() 248 struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; in lowpan_iphc_ctx_get_by_mcast_addr() 365 const struct lowpan_iphc_ctx *ctx, in lowpan_iphc_uncompress_ctx_addr() argument 384 ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen); in lowpan_iphc_uncompress_ctx_addr() 391 ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen); in lowpan_iphc_uncompress_ctx_addr() 404 ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen); in lowpan_iphc_uncompress_ctx_addr() 480 struct lowpan_iphc_ctx *ctx, in lowpan_uncompress_multicast_ctx_daddr() argument 494 ipaddr->s6_addr[3] = ctx->plen; in lowpan_uncompress_multicast_ctx_daddr() 496 ipv6_addr_prefix(&network_pfx, &ctx->pfx, ctx->plen); in lowpan_uncompress_multicast_ctx_daddr() [all …]
|
/net/netfilter/ |
D | nf_tables_api.c | 86 static void nft_ctx_init(struct nft_ctx *ctx, in nft_ctx_init() argument 95 ctx->net = net; in nft_ctx_init() 96 ctx->family = family; in nft_ctx_init() 97 ctx->level = 0; in nft_ctx_init() 98 ctx->table = table; in nft_ctx_init() 99 ctx->chain = chain; in nft_ctx_init() 100 ctx->nla = nla; in nft_ctx_init() 101 ctx->portid = NETLINK_CB(skb).portid; in nft_ctx_init() 102 ctx->report = nlmsg_report(nlh); in nft_ctx_init() 103 ctx->flags = nlh->nlmsg_flags; in nft_ctx_init() [all …]
|
D | nfnetlink_osf.c | 67 struct nf_osf_hdr_ctx *ctx) in nf_osf_match_one() argument 69 const __u8 *optpinit = ctx->optp; in nf_osf_match_one() 75 if (ctx->totlen != f->ss || !nf_osf_ttl(skb, ttl_check, f->ttl)) in nf_osf_match_one() 91 ctx->optsize > MAX_IPOPTLEN || in nf_osf_match_one() 92 ctx->optsize != foptsize) in nf_osf_match_one() 98 if (f->opt[optnum].kind == *ctx->optp) { in nf_osf_match_one() 100 const __u8 *optend = ctx->optp + len; in nf_osf_match_one() 104 switch (*ctx->optp) { in nf_osf_match_one() 106 mss = ctx->optp[3]; in nf_osf_match_one() 108 mss |= ctx->optp[2]; in nf_osf_match_one() [all …]
|
D | nft_synproxy.c | 152 static int nft_synproxy_do_init(const struct nft_ctx *ctx, in nft_synproxy_do_init() argument 156 struct synproxy_net *snet = synproxy_pernet(ctx->net); in nft_synproxy_do_init() 171 err = nf_ct_netns_get(ctx->net, ctx->family); in nft_synproxy_do_init() 175 switch (ctx->family) { in nft_synproxy_do_init() 177 err = nf_synproxy_ipv4_init(snet, ctx->net); in nft_synproxy_do_init() 183 err = nf_synproxy_ipv6_init(snet, ctx->net); in nft_synproxy_do_init() 190 err = nf_synproxy_ipv4_init(snet, ctx->net); in nft_synproxy_do_init() 193 err = nf_synproxy_ipv6_init(snet, ctx->net); in nft_synproxy_do_init() 202 nf_ct_netns_put(ctx->net, ctx->family); in nft_synproxy_do_init() 206 static void nft_synproxy_do_destroy(const struct nft_ctx *ctx) in nft_synproxy_do_destroy() argument [all …]
|
D | nft_compat.c | 30 static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx, in nft_compat_chain_validate_dependency() argument 34 const struct nft_chain *chain = ctx->chain; in nft_compat_chain_validate_dependency() 43 if (ctx->family != NFPROTO_BRIDGE) in nft_compat_chain_validate_dependency() 136 const struct nft_ctx *ctx, in nft_target_set_tgchk_param() argument 140 par->net = ctx->net; in nft_target_set_tgchk_param() 141 par->table = ctx->table->name; in nft_target_set_tgchk_param() 142 switch (ctx->family) { in nft_target_set_tgchk_param() 164 if (nft_is_base_chain(ctx->chain)) { in nft_target_set_tgchk_param() 166 nft_base_chain(ctx->chain); in nft_target_set_tgchk_param() 173 par->family = ctx->family; in nft_target_set_tgchk_param() [all …]
|
D | nf_tables_offload.c | 34 struct nft_offload_ctx *ctx; in nft_flow_rule_create() local 56 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL); in nft_flow_rule_create() 57 if (!ctx) { in nft_flow_rule_create() 61 ctx->net = net; in nft_flow_rule_create() 62 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; in nft_flow_rule_create() 69 err = expr->ops->offload(ctx, flow, expr); in nft_flow_rule_create() 75 flow->proto = ctx->dep.l3num; in nft_flow_rule_create() 76 kfree(ctx); in nft_flow_rule_create() 80 kfree(ctx); in nft_flow_rule_create() 105 void nft_offload_set_dependency(struct nft_offload_ctx *ctx, in nft_offload_set_dependency() argument [all …]
|
D | nft_lookup.c | 57 static int nft_lookup_init(const struct nft_ctx *ctx, in nft_lookup_init() argument 62 u8 genmask = nft_genmask_next(ctx->net); in nft_lookup_init() 71 set = nft_set_lookup_global(ctx->net, ctx->table, tb[NFTA_LOOKUP_SET], in nft_lookup_init() 101 err = nft_validate_register_store(ctx, priv->dreg, NULL, in nft_lookup_init() 110 err = nf_tables_bind_set(ctx, set, &priv->binding); in nft_lookup_init() 118 static void nft_lookup_deactivate(const struct nft_ctx *ctx, in nft_lookup_deactivate() argument 124 nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase); in nft_lookup_deactivate() 127 static void nft_lookup_activate(const struct nft_ctx *ctx, in nft_lookup_activate() argument 135 static void nft_lookup_destroy(const struct nft_ctx *ctx, in nft_lookup_destroy() argument 140 nf_tables_destroy_set(ctx, priv->set); in nft_lookup_destroy() [all …]
|
D | nft_objref.c | 25 static int nft_objref_init(const struct nft_ctx *ctx, in nft_objref_init() argument 30 u8 genmask = nft_genmask_next(ctx->net); in nft_objref_init() 38 obj = nft_obj_lookup(ctx->net, ctx->table, in nft_objref_init() 65 static void nft_objref_deactivate(const struct nft_ctx *ctx, in nft_objref_deactivate() argument 77 static void nft_objref_activate(const struct nft_ctx *ctx, in nft_objref_activate() argument 122 static int nft_objref_map_init(const struct nft_ctx *ctx, in nft_objref_map_init() argument 127 u8 genmask = nft_genmask_next(ctx->net); in nft_objref_map_init() 131 set = nft_set_lookup_global(ctx->net, ctx->table, in nft_objref_map_init() 147 err = nf_tables_bind_set(ctx, set, &priv->binding); in nft_objref_map_init() 169 static void nft_objref_map_deactivate(const struct nft_ctx *ctx, in nft_objref_map_deactivate() argument [all …]
|
D | nft_immediate.c | 32 static int nft_immediate_init(const struct nft_ctx *ctx, in nft_immediate_init() argument 44 err = nft_data_init(ctx, &priv->data, sizeof(priv->data), &desc, in nft_immediate_init() 52 err = nft_validate_register_store(ctx, priv->dreg, &priv->data, in nft_immediate_init() 64 static void nft_immediate_activate(const struct nft_ctx *ctx, in nft_immediate_activate() argument 72 static void nft_immediate_deactivate(const struct nft_ctx *ctx, in nft_immediate_deactivate() argument 98 static int nft_immediate_validate(const struct nft_ctx *ctx, in nft_immediate_validate() argument 103 struct nft_ctx *pctx = (struct nft_ctx *)ctx; in nft_immediate_validate() 116 err = nft_chain_validate(ctx, data->verdict.chain); in nft_immediate_validate() 128 static int nft_immediate_offload_verdict(struct nft_offload_ctx *ctx, in nft_immediate_offload_verdict() argument 135 entry = &flow->rule->action.entries[ctx->num_actions++]; in nft_immediate_offload_verdict() [all …]
|
D | nft_connlimit.c | 59 static int nft_connlimit_do_init(const struct nft_ctx *ctx, in nft_connlimit_do_init() argument 83 return nf_ct_netns_get(ctx->net, ctx->family); in nft_connlimit_do_init() 86 static void nft_connlimit_do_destroy(const struct nft_ctx *ctx, in nft_connlimit_do_destroy() argument 89 nf_ct_netns_put(ctx->net, ctx->family); in nft_connlimit_do_destroy() 117 static int nft_connlimit_obj_init(const struct nft_ctx *ctx, in nft_connlimit_obj_init() argument 123 return nft_connlimit_do_init(ctx, tb, priv); in nft_connlimit_obj_init() 126 static void nft_connlimit_obj_destroy(const struct nft_ctx *ctx, in nft_connlimit_obj_destroy() argument 131 nft_connlimit_do_destroy(ctx, priv); in nft_connlimit_obj_destroy() 181 static int nft_connlimit_init(const struct nft_ctx *ctx, in nft_connlimit_init() argument 187 return nft_connlimit_do_init(ctx, tb, priv); in nft_connlimit_init() [all …]
|
D | nft_payload.c | 126 static int nft_payload_init(const struct nft_ctx *ctx, in nft_payload_init() argument 137 return nft_validate_register_store(ctx, priv->dreg, NULL, in nft_payload_init() 156 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx, in nft_payload_offload_ll() argument 160 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; in nft_payload_offload_ll() 184 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx, in nft_payload_offload_ip() argument 188 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; in nft_payload_offload_ip() 211 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); in nft_payload_offload_ip() 220 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx, in nft_payload_offload_ip6() argument 224 struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; in nft_payload_offload_ip6() 247 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); in nft_payload_offload_ip6() [all …]
|
D | nft_meta.c | 317 int nft_meta_get_init(const struct nft_ctx *ctx, in nft_meta_get_init() argument 384 return nft_validate_register_store(ctx, priv->dreg, NULL, in nft_meta_get_init() 389 static int nft_meta_get_validate(const struct nft_ctx *ctx, in nft_meta_get_validate() argument 400 switch (ctx->family) { in nft_meta_get_validate() 415 return nft_chain_validate_hooks(ctx->chain, hooks); in nft_meta_get_validate() 421 int nft_meta_set_validate(const struct nft_ctx *ctx, in nft_meta_set_validate() argument 431 switch (ctx->family) { in nft_meta_set_validate() 447 return nft_chain_validate_hooks(ctx->chain, hooks); in nft_meta_set_validate() 451 int nft_meta_set_init(const struct nft_ctx *ctx, in nft_meta_set_init() argument 522 void nft_meta_set_destroy(const struct nft_ctx *ctx, in nft_meta_set_destroy() argument [all …]
|
/net/ipv4/netfilter/ |
D | nf_nat_snmp_basic_main.c | 67 static void fast_csum(struct snmp_ctx *ctx, unsigned char offset) in fast_csum() argument 73 memcpy(&s[1], &ctx->from, 4); in fast_csum() 74 memcpy(&s[7], &ctx->to, 4); in fast_csum() 83 memcpy(&s[0], &ctx->from, 4); in fast_csum() 84 memcpy(&s[4], &ctx->to, 4); in fast_csum() 91 *ctx->check = csum_fold(csum_partial(s, size, in fast_csum() 92 ~csum_unfold(*ctx->check))); in fast_csum() 108 struct snmp_ctx *ctx = (struct snmp_ctx *)context; in snmp_helper() local 114 if (*pdata == ctx->from) { in snmp_helper() 116 (void *)&ctx->from, (void *)&ctx->to); in snmp_helper() [all …]
|
/net/sctp/ |
D | outqueue.c | 789 static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx, in sctp_outq_select_transport() argument 805 if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest, in sctp_outq_select_transport() 806 &ctx->transport->ipaddr)) in sctp_outq_select_transport() 807 new_transport = ctx->transport; in sctp_outq_select_transport() 809 new_transport = sctp_assoc_lookup_paddr(ctx->asoc, in sctp_outq_select_transport() 817 new_transport = ctx->asoc->peer.active_path; in sctp_outq_select_transport() 842 new_transport = ctx->asoc->peer.active_path; in sctp_outq_select_transport() 850 if (new_transport != ctx->transport) { in sctp_outq_select_transport() 851 ctx->transport = new_transport; in sctp_outq_select_transport() 852 ctx->packet = &ctx->transport->packet; in sctp_outq_select_transport() [all …]
|
/net/ipv4/ |
D | tcp_fastopen.c | 38 struct tcp_fastopen_context *ctx = in tcp_fastopen_ctx_free() local 41 kzfree(ctx); in tcp_fastopen_ctx_free() 46 struct tcp_fastopen_context *ctx; in tcp_fastopen_destroy_cipher() local 48 ctx = rcu_dereference_protected( in tcp_fastopen_destroy_cipher() 49 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); in tcp_fastopen_destroy_cipher() 50 if (ctx) in tcp_fastopen_destroy_cipher() 51 call_rcu(&ctx->rcu, tcp_fastopen_ctx_free); in tcp_fastopen_destroy_cipher() 72 struct tcp_fastopen_context *ctx, *octx; in tcp_fastopen_reset_cipher() local 76 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); in tcp_fastopen_reset_cipher() 77 if (!ctx) { in tcp_fastopen_reset_cipher() [all …]
|