Home
last modified time | relevance | path

Searched refs:new (Results 1 – 25 of 115) sorted by relevance

12345

/net/sched/
Dact_police.c60 struct tcf_police_params *new; in tcf_police_init() local
145 new = kzalloc(sizeof(*new), GFP_KERNEL); in tcf_police_init()
146 if (unlikely(!new)) { in tcf_police_init()
152 new->tcfp_result = tcfp_result; in tcf_police_init()
153 new->tcfp_mtu = parm->mtu; in tcf_police_init()
154 if (!new->tcfp_mtu) { in tcf_police_init()
155 new->tcfp_mtu = ~0; in tcf_police_init()
157 new->tcfp_mtu = 255 << R_tab->rate.cell_log; in tcf_police_init()
160 new->rate_present = true; in tcf_police_init()
163 psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64); in tcf_police_init()
[all …]
Dcls_matchall.c191 struct cls_mall_head *new; in mall_change() local
212 new = kzalloc(sizeof(*new), GFP_KERNEL); in mall_change()
213 if (!new) in mall_change()
216 err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0); in mall_change()
222 new->handle = handle; in mall_change()
223 new->flags = flags; in mall_change()
224 new->pf = alloc_percpu(struct tc_matchall_pcnt); in mall_change()
225 if (!new->pf) { in mall_change()
230 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr, in mall_change()
235 if (!tc_skip_hw(new->flags)) { in mall_change()
[all …]
Dcls_cgroup.c84 struct cls_cgroup_head *new; in cls_cgroup_change() local
96 new = kzalloc(sizeof(*head), GFP_KERNEL); in cls_cgroup_change()
97 if (!new) in cls_cgroup_change()
100 err = tcf_exts_init(&new->exts, net, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); in cls_cgroup_change()
103 new->handle = handle; in cls_cgroup_change()
104 new->tp = tp; in cls_cgroup_change()
111 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr, in cls_cgroup_change()
116 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches); in cls_cgroup_change()
120 rcu_assign_pointer(tp->root, new); in cls_cgroup_change()
127 tcf_exts_destroy(&new->exts); in cls_cgroup_change()
[all …]
Dsch_api.c841 struct Qdisc *new, struct Qdisc *old, in qdisc_offload_graft_helper() argument
854 if (!err || !new || new == &noop_qdisc) in qdisc_offload_graft_helper()
860 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED; in qdisc_offload_graft_helper()
870 struct Qdisc *new, struct Qdisc *old, in qdisc_offload_graft_root() argument
875 .handle = new ? new->handle : 0, in qdisc_offload_graft_root()
876 .ingress = (new && new->flags & TCQ_F_INGRESS) || in qdisc_offload_graft_root()
880 qdisc_offload_graft_helper(dev, NULL, new, old, in qdisc_offload_graft_root()
975 struct Qdisc *old, struct Qdisc *new) in qdisc_notify() argument
989 if (new && !tc_qdisc_dump_ignore(new, false)) { in qdisc_notify()
990 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq, in qdisc_notify()
[all …]
Dcls_u32.c803 struct tc_u_knode *new; in u32_init_knode() local
805 new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL); in u32_init_knode()
806 if (!new) in u32_init_knode()
809 RCU_INIT_POINTER(new->next, n->next); in u32_init_knode()
810 new->handle = n->handle; in u32_init_knode()
811 RCU_INIT_POINTER(new->ht_up, n->ht_up); in u32_init_knode()
813 new->ifindex = n->ifindex; in u32_init_knode()
814 new->fshift = n->fshift; in u32_init_knode()
815 new->flags = n->flags; in u32_init_knode()
816 RCU_INIT_POINTER(new->ht_down, ht); in u32_init_knode()
[all …]
/net/netfilter/
Dnf_conntrack_extend.c45 struct nf_ct_ext *new; in nf_ct_ext_add() local
58 oldlen = sizeof(*new); in nf_ct_ext_add()
73 new = krealloc(ct->ext, alloc, gfp); in nf_ct_ext_add()
74 if (!new) in nf_ct_ext_add()
78 memset(new->offset, 0, sizeof(new->offset)); in nf_ct_ext_add()
80 new->offset[id] = newoff; in nf_ct_ext_add()
81 new->len = newlen; in nf_ct_ext_add()
82 memset((void *)new + newoff, 0, newlen - newoff); in nf_ct_ext_add()
84 ct->ext = new; in nf_ct_ext_add()
85 return (void *)new + newoff; in nf_ct_ext_add()
Dcore.c110 struct nf_hook_entries *new; in nf_hook_entries_grow() local
128 new = allocate_hook_entries_size(alloc_entries); in nf_hook_entries_grow()
129 if (!new) in nf_hook_entries_grow()
132 new_ops = nf_hook_entries_get_hook_ops(new); in nf_hook_entries_grow()
144 new->hooks[nhooks] = old->hooks[i]; in nf_hook_entries_grow()
148 new->hooks[nhooks].hook = reg->hook; in nf_hook_entries_grow()
149 new->hooks[nhooks].priv = reg->priv; in nf_hook_entries_grow()
157 new->hooks[nhooks].hook = reg->hook; in nf_hook_entries_grow()
158 new->hooks[nhooks].priv = reg->priv; in nf_hook_entries_grow()
161 return new; in nf_hook_entries_grow()
[all …]
Dnft_set_rbtree.c311 struct nft_rbtree_elem *new, in __nft_rbtree_insert() argument
330 d = nft_rbtree_cmp(set, rbe, new); in __nft_rbtree_insert()
380 d = nft_rbtree_cmp(set, rbe, new); in __nft_rbtree_insert()
397 if (nft_rbtree_cmp(set, rbe_ge, new) != 0) { in __nft_rbtree_insert()
406 if ((nft_rbtree_interval_start(new) && in __nft_rbtree_insert()
408 (nft_rbtree_interval_end(new) && in __nft_rbtree_insert()
427 if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) && in __nft_rbtree_insert()
428 nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) { in __nft_rbtree_insert()
436 if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) && in __nft_rbtree_insert()
437 nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) { in __nft_rbtree_insert()
[all …]
Dnf_conntrack_ecache.c274 struct nf_ct_event_notifier *new) in nf_conntrack_register_notifier() argument
286 rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new); in nf_conntrack_register_notifier()
296 struct nf_ct_event_notifier *new) in nf_conntrack_unregister_notifier() argument
303 BUG_ON(notify != new); in nf_conntrack_unregister_notifier()
311 struct nf_exp_event_notifier *new) in nf_ct_expect_register_notifier() argument
323 rcu_assign_pointer(net->ct.nf_expect_event_cb, new); in nf_ct_expect_register_notifier()
333 struct nf_exp_event_notifier *new) in nf_ct_expect_unregister_notifier() argument
340 BUG_ON(notify != new); in nf_ct_expect_unregister_notifier()
/net/smc/
Dsmc_cdc.h151 union smc_host_cursor *new) in smc_curs_diff() argument
153 if (old->wrap != new->wrap) in smc_curs_diff()
155 ((size - old->count) + new->count)); in smc_curs_diff()
157 return max_t(int, 0, (new->count - old->count)); in smc_curs_diff()
165 union smc_host_cursor *new) in smc_curs_comp() argument
167 if (old->wrap > new->wrap || in smc_curs_comp()
168 (old->wrap == new->wrap && old->count > new->count)) in smc_curs_comp()
169 return -smc_curs_diff(size, new, old); in smc_curs_comp()
170 return smc_curs_diff(size, old, new); in smc_curs_comp()
178 union smc_host_cursor *new) in smc_curs_diff_large() argument
[all …]
/net/openvswitch/
Dflow_table.c219 struct mask_array *new; in tbl_mask_array_alloc() local
222 new = kzalloc(sizeof(struct mask_array) + in tbl_mask_array_alloc()
225 if (!new) in tbl_mask_array_alloc()
228 new->masks_usage_zero_cntr = (u64 *)((u8 *)new + in tbl_mask_array_alloc()
233 new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) + in tbl_mask_array_alloc()
236 if (!new->masks_usage_stats) { in tbl_mask_array_alloc()
237 kfree(new); in tbl_mask_array_alloc()
241 new->count = 0; in tbl_mask_array_alloc()
242 new->max = size; in tbl_mask_array_alloc()
244 return new; in tbl_mask_array_alloc()
[all …]
/net/sunrpc/
Dauth.c293 struct rpc_cred_cache *new; in rpcauth_init_credcache() local
296 new = kmalloc(sizeof(*new), GFP_KERNEL); in rpcauth_init_credcache()
297 if (!new) in rpcauth_init_credcache()
299 new->hashbits = auth_hashbits; in rpcauth_init_credcache()
300 hashsize = 1U << new->hashbits; in rpcauth_init_credcache()
301 new->hashtable = kcalloc(hashsize, sizeof(new->hashtable[0]), GFP_KERNEL); in rpcauth_init_credcache()
302 if (!new->hashtable) in rpcauth_init_credcache()
304 spin_lock_init(&new->lock); in rpcauth_init_credcache()
305 auth->au_credcache = new; in rpcauth_init_credcache()
308 kfree(new); in rpcauth_init_credcache()
[all …]
Dsvcauth_unix.c58 struct unix_domain *new = NULL; in unix_domain_find() local
63 if (new && rv != &new->h) in unix_domain_find()
64 svcauth_unix_domain_release(&new->h); in unix_domain_find()
73 new = kmalloc(sizeof(*new), GFP_KERNEL); in unix_domain_find()
74 if (new == NULL) in unix_domain_find()
76 kref_init(&new->h.ref); in unix_domain_find()
77 new->h.name = kstrdup(name, GFP_KERNEL); in unix_domain_find()
78 if (new->h.name == NULL) { in unix_domain_find()
79 kfree(new); in unix_domain_find()
82 new->h.flavour = &svcauth_unix; in unix_domain_find()
[all …]
Dbackchannel_rqst.c252 struct rpc_rqst *new) in xprt_get_bc_request() argument
258 if (!new) in xprt_get_bc_request()
262 list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list); in xprt_get_bc_request()
341 struct rpc_rqst *req, *new = NULL; in xprt_lookup_bc_request() local
351 req = xprt_get_bc_request(xprt, xid, new); in xprt_lookup_bc_request()
354 if (new) { in xprt_lookup_bc_request()
355 if (req != new) in xprt_lookup_bc_request()
356 xprt_free_allocation(new); in xprt_lookup_bc_request()
360 new = xprt_alloc_bc_req(xprt, GFP_KERNEL); in xprt_lookup_bc_request()
361 } while (new); in xprt_lookup_bc_request()
/net/ipv6/
Dcalipso.c913 struct ipv6_opt_hdr *new; in calipso_opt_insert() local
928 new = kzalloc(buf_len, GFP_ATOMIC); in calipso_opt_insert()
929 if (!new) in calipso_opt_insert()
933 memcpy(new, hop, start); in calipso_opt_insert()
934 ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, in calipso_opt_insert()
937 kfree(new); in calipso_opt_insert()
944 calipso_pad_write((unsigned char *)new, buf_len, pad); in calipso_opt_insert()
948 memcpy((char *)new + buf_len, (char *)hop + end, hop_len - end); in calipso_opt_insert()
951 new->nexthdr = 0; in calipso_opt_insert()
952 new->hdrlen = buf_len / 8 - 1; in calipso_opt_insert()
[all …]
/net/mac80211/
Dkey.c289 struct ieee80211_key *new) in ieee80211_pairwise_rekey() argument
291 struct ieee80211_local *local = new->local; in ieee80211_pairwise_rekey()
292 struct sta_info *sta = new->sta; in ieee80211_pairwise_rekey()
297 if (new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX) { in ieee80211_pairwise_rekey()
431 struct ieee80211_key *new) in ieee80211_key_replace() argument
438 if (WARN_ON(!new && !old)) in ieee80211_key_replace()
441 if (new) in ieee80211_key_replace()
442 list_add_tail_rcu(&new->list, &sdata->key_list); in ieee80211_key_replace()
444 WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx); in ieee80211_key_replace()
446 if (new && sta && pairwise) { in ieee80211_key_replace()
[all …]
/net/dccp/ccids/lib/
Dloss_interval.c138 struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new; in tfrc_lh_interval_add() local
143 new = tfrc_lh_demand_next(lh); in tfrc_lh_interval_add()
144 if (unlikely(new == NULL)) { in tfrc_lh_interval_add()
149 new->li_seqno = tfrc_rx_hist_loss_prev(rh)->tfrchrx_seqno; in tfrc_lh_interval_add()
150 new->li_ccval = tfrc_rx_hist_loss_prev(rh)->tfrchrx_ccval; in tfrc_lh_interval_add()
151 new->li_is_closed = 0; in tfrc_lh_interval_add()
154 lh->i_mean = new->li_length = (*calc_first_li)(sk); in tfrc_lh_interval_add()
156 cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno); in tfrc_lh_interval_add()
157 new->li_length = dccp_delta_seqno(new->li_seqno, in tfrc_lh_interval_add()
/net/x25/
Dx25_facilities.c266 struct x25_facilities *new, struct x25_dte_facilities *dte) in x25_negotiate_facilities() argument
274 memcpy(new, ours, sizeof(*new)); in x25_negotiate_facilities()
289 new->reverse = theirs.reverse; in x25_negotiate_facilities()
298 new->throughput = (new->throughput & 0xf0) | theirs_in; in x25_negotiate_facilities()
303 new->throughput = (new->throughput & 0x0f) | theirs_out; in x25_negotiate_facilities()
310 new->pacsize_in = theirs.pacsize_in; in x25_negotiate_facilities()
314 new->pacsize_out = theirs.pacsize_out; in x25_negotiate_facilities()
321 new->winsize_in = theirs.winsize_in; in x25_negotiate_facilities()
325 new->winsize_out = theirs.winsize_out; in x25_negotiate_facilities()
/net/sunrpc/auth_gss/
Dsvcauth_gss.c82 static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old);
138 struct rsi *new = container_of(cnew, struct rsi, h); in rsi_init() local
141 new->out_handle.data = NULL; in rsi_init()
142 new->out_handle.len = 0; in rsi_init()
143 new->out_token.data = NULL; in rsi_init()
144 new->out_token.len = 0; in rsi_init()
145 new->in_handle.len = item->in_handle.len; in rsi_init()
147 new->in_token.len = item->in_token.len; in rsi_init()
149 new->in_handle.data = item->in_handle.data; in rsi_init()
151 new->in_token.data = item->in_token.data; in rsi_init()
[all …]
/net/6lowpan/
Dnhc.c21 struct rb_node **new = &rb_root.rb_node, *parent = NULL; in lowpan_nhc_insert() local
24 while (*new) { in lowpan_nhc_insert()
25 struct lowpan_nhc *this = rb_entry(*new, struct lowpan_nhc, in lowpan_nhc_insert()
40 parent = *new; in lowpan_nhc_insert()
42 new = &((*new)->rb_left); in lowpan_nhc_insert()
44 new = &((*new)->rb_right); in lowpan_nhc_insert()
50 rb_link_node(&nhc->node, parent, new); in lowpan_nhc_insert()
/net/dccp/
Dfeat.c392 struct dccp_feat_entry *new; in dccp_feat_clone_entry() local
398 new = kmemdup(original, sizeof(struct dccp_feat_entry), gfp_any()); in dccp_feat_clone_entry()
399 if (new == NULL) in dccp_feat_clone_entry()
402 if (type == FEAT_SP && dccp_feat_clone_sp_val(&new->val, in dccp_feat_clone_entry()
405 kfree(new); in dccp_feat_clone_entry()
408 return new; in dccp_feat_clone_entry()
484 struct dccp_feat_entry *new = dccp_feat_entry_new(fn_list, feat, local); in dccp_feat_push_change() local
486 if (new == NULL) in dccp_feat_push_change()
489 new->feat_num = feat; in dccp_feat_push_change()
490 new->is_local = local; in dccp_feat_push_change()
[all …]
/net/sctp/
Dauth.c81 struct sctp_shared_key *new; in sctp_auth_shkey_create() local
84 new = kzalloc(sizeof(struct sctp_shared_key), gfp); in sctp_auth_shkey_create()
85 if (!new) in sctp_auth_shkey_create()
88 INIT_LIST_HEAD(&new->key_list); in sctp_auth_shkey_create()
89 refcount_set(&new->refcnt, 1); in sctp_auth_shkey_create()
90 new->key_id = key_id; in sctp_auth_shkey_create()
92 return new; in sctp_auth_shkey_create()
190 struct sctp_auth_bytes *new; in sctp_auth_make_key_vector() local
202 new = sctp_auth_create_key(len, gfp); in sctp_auth_make_key_vector()
203 if (!new) in sctp_auth_make_key_vector()
[all …]
/net/wireless/
Dscan.c1567 struct cfg80211_internal_bss *new) in cfg80211_combine_bsses() argument
1576 ies = rcu_access_pointer(new->pub.beacon_ies); in cfg80211_combine_bsses()
1604 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) in cfg80211_combine_bsses()
1606 if (bss->pub.channel != new->pub.channel) in cfg80211_combine_bsses()
1608 if (bss->pub.scan_width != new->pub.scan_width) in cfg80211_combine_bsses()
1625 list_add(&bss->hidden_list, &new->hidden_list); in cfg80211_combine_bsses()
1626 bss->pub.hidden_beacon_bss = &new->pub; in cfg80211_combine_bsses()
1627 new->refcount += bss->refcount; in cfg80211_combine_bsses()
1629 new->pub.beacon_ies); in cfg80211_combine_bsses()
1665 struct cfg80211_internal_bss *new, in cfg80211_update_known_bss() argument
[all …]
/net/core/
Dnetprio_cgroup.c43 struct netprio_map *old, *new; in extend_netdev_table() local
59 sizeof(new->priomap[0]); in extend_netdev_table()
69 new = kzalloc(new_sz, GFP_KERNEL); in extend_netdev_table()
70 if (!new) in extend_netdev_table()
74 memcpy(new->priomap, old->priomap, in extend_netdev_table()
77 new->priomap_len = new_len; in extend_netdev_table()
80 rcu_assign_pointer(dev->priomap, new); in extend_netdev_table()
/net/vmw_vsock/
Dhyperv_transport.c292 struct sock *sk, *new = NULL; in hvs_open_connection() local
320 new = vsock_create_connected(sk); in hvs_open_connection()
321 if (!new) in hvs_open_connection()
324 new->sk_state = TCP_SYN_SENT; in hvs_open_connection()
325 vnew = vsock_sk(new); in hvs_open_connection()
338 sock_put(new); in hvs_open_connection()
376 conn_from_host ? new : sk); in hvs_open_connection()
380 sock_put(new); in hvs_open_connection()
387 set_per_channel_state(chan, conn_from_host ? new : sk); in hvs_open_connection()
390 sock_hold(conn_from_host ? new : sk); in hvs_open_connection()
[all …]

12345