Home
last modified time | relevance | path

Searched refs:tn (Results 1 – 25 of 42) sorted by relevance

12

/net/tipc/
Dcore.c55 struct tipc_net *tn = net_generic(net, tipc_net_id); in tipc_init_net() local
58 tn->net_id = 4711; in tipc_init_net()
59 tn->node_addr = 0; in tipc_init_net()
60 tn->trial_addr = 0; in tipc_init_net()
61 tn->addr_trial_end = 0; in tipc_init_net()
62 tn->capabilities = TIPC_NODE_CAPABILITIES; in tipc_init_net()
63 INIT_WORK(&tn->work, tipc_net_finalize_work); in tipc_init_net()
64 memset(tn->node_id, 0, sizeof(tn->node_id)); in tipc_init_net()
65 memset(tn->node_id_string, 0, sizeof(tn->node_id_string)); in tipc_init_net()
66 tn->mon_threshold = TIPC_DEF_MON_THRESHOLD; in tipc_init_net()
[all …]
Ddiscover.c84 struct tipc_net *tn = tipc_net(net); in tipc_disc_init_msg() local
89 tipc_msg_init(tn->trial_addr, hdr, LINK_CONFIG, mtyp, in tipc_disc_init_msg()
93 msg_set_node_sig(hdr, tn->random); in tipc_disc_init_msg()
96 msg_set_bc_netid(hdr, tn->net_id); in tipc_disc_init_msg()
98 msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random)); in tipc_disc_init_msg()
150 struct tipc_net *tn = tipc_net(net); in tipc_disc_addr_trial_msg() local
152 bool trial = time_before(jiffies, tn->addr_trial_end) && !self; in tipc_disc_addr_trial_msg()
159 if (dst != tn->trial_addr) in tipc_disc_addr_trial_msg()
163 tn->trial_addr = sugg_addr; in tipc_disc_addr_trial_msg()
165 tn->addr_trial_end = jiffies + msecs_to_jiffies(1000); in tipc_disc_addr_trial_msg()
[all …]
Daddr.c58 struct tipc_net *tn = tipc_net(net); in tipc_set_node_id() local
60 memcpy(tn->node_id, id, NODE_ID_LEN); in tipc_set_node_id()
61 tipc_nodeid2string(tn->node_id_string, id); in tipc_set_node_id()
62 tn->trial_addr = hash128to32(id); in tipc_set_node_id()
64 tipc_own_id_string(net), tn->net_id); in tipc_set_node_id()
69 struct tipc_net *tn = tipc_net(net); in tipc_set_node_addr() local
72 tn->node_addr = addr; in tipc_set_node_addr()
77 tn->trial_addr = addr; in tipc_set_node_addr()
78 tn->addr_trial_end = jiffies; in tipc_set_node_addr()
Dnet.c128 struct tipc_net *tn = tipc_net(net); in tipc_net_finalize() local
135 if (cmpxchg(&tn->node_addr, 0, addr)) in tipc_net_finalize()
146 struct tipc_net *tn = container_of(work, struct tipc_net, work); in tipc_net_finalize_work() local
148 tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr); in tipc_net_finalize_work()
166 struct tipc_net *tn = net_generic(net, tipc_net_id); in __tipc_nl_add_net() local
167 u64 *w0 = (u64 *)&tn->node_id[0]; in __tipc_nl_add_net()
168 u64 *w1 = (u64 *)&tn->node_id[8]; in __tipc_nl_add_net()
181 if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id)) in __tipc_nl_add_net()
229 struct tipc_net *tn = tipc_net(net); in __tipc_nl_net_set() local
253 tn->net_id = val; in __tipc_nl_net_set()
[all …]
Dname_distr.c209 struct tipc_net *tn = tipc_net(net); in tipc_named_node_up() local
214 spin_lock_bh(&tn->nametbl_lock); in tipc_named_node_up()
218 spin_unlock_bh(&tn->nametbl_lock); in tipc_named_node_up()
237 struct tipc_net *tn = tipc_net(net); in tipc_publ_purge() local
243 spin_lock_bh(&tn->nametbl_lock); in tipc_publ_purge()
247 spin_unlock_bh(&tn->nametbl_lock); in tipc_publ_purge()
256 struct tipc_net *tn = tipc_net(net); in tipc_publ_notify() local
262 spin_lock_bh(&tn->nametbl_lock); in tipc_publ_notify()
265 spin_unlock_bh(&tn->nametbl_lock); in tipc_publ_notify()
367 struct tipc_net *tn = tipc_net(net); in tipc_named_rcv() local
[all …]
Dnode.c334 struct tipc_net *tn = tipc_net(net); in tipc_node_find() local
339 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { in tipc_node_find()
356 struct tipc_net *tn = tipc_net(net); in tipc_node_find_by_id() local
361 list_for_each_entry_rcu(n, &tn->node_list, list) { in tipc_node_find_by_id()
474 struct tipc_net *tn = net_generic(net, tipc_net_id); in tipc_node_create() local
481 spin_lock_bh(&tn->node_list_lock); in tipc_node_create()
505 &tn->node_htable[tipc_hashfn(addr)]); in tipc_node_create()
507 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { in tipc_node_create()
530 tn->capabilities = TIPC_NODE_CAPABILITIES; in tipc_node_create()
531 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { in tipc_node_create()
[all …]
Dbearer.c64 struct tipc_net *tn = tipc_net(net); in bearer_get() local
66 return rcu_dereference(tn->bearer_list[bearer_id]); in bearer_get()
179 struct tipc_net *tn = tipc_net(net); in tipc_bearer_find() local
184 b = rtnl_dereference(tn->bearer_list[i]); in tipc_bearer_find()
198 struct tipc_net *tn = tipc_net(net); in tipc_bearer_get_name() local
204 b = rtnl_dereference(tn->bearer_list[bearer_id]); in tipc_bearer_get_name()
248 struct tipc_net *tn = tipc_net(net); in tipc_enable_bearer() local
284 b = rtnl_dereference(tn->bearer_list[i]); in tipc_enable_bearer()
360 rcu_assign_pointer(tn->bearer_list[bearer_id], b); in tipc_enable_bearer()
405 struct tipc_net *tn = tipc_net(net); in bearer_disable() local
[all …]
Dname_table.c573 struct tipc_net *tn = tipc_net(net); in tipc_nametbl_lookup_anycast() local
574 bool legacy = tn->legacy_addr_format; in tipc_nametbl_lookup_anycast()
764 struct tipc_net *tn = tipc_net(net); in tipc_nametbl_publish() local
769 spin_lock_bh(&tn->nametbl_lock); in tipc_nametbl_publish()
783 spin_unlock_bh(&tn->nametbl_lock); in tipc_nametbl_publish()
802 struct tipc_net *tn = tipc_net(net); in tipc_nametbl_withdraw() local
807 spin_lock_bh(&tn->nametbl_lock); in tipc_nametbl_withdraw()
817 spin_unlock_bh(&tn->nametbl_lock); in tipc_nametbl_withdraw()
829 struct tipc_net *tn = tipc_net(sub->net); in tipc_nametbl_subscribe() local
837 spin_lock_bh(&tn->nametbl_lock); in tipc_nametbl_subscribe()
[all …]
Dmonitor.c204 struct tipc_net *tn = tipc_net(net); in tipc_mon_is_active() local
206 return mon->peer_cnt > tn->mon_threshold; in tipc_mon_is_active()
649 struct tipc_net *tn = tipc_net(net); in tipc_mon_create() local
654 if (tn->monitors[bearer_id]) in tipc_mon_create()
666 tn->monitors[bearer_id] = mon; in tipc_mon_create()
677 mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff)); in tipc_mon_create()
684 struct tipc_net *tn = tipc_net(net); in tipc_mon_delete() local
694 tn->monitors[bearer_id] = NULL; in tipc_mon_delete()
726 struct tipc_net *tn = tipc_net(net); in tipc_nl_monitor_set_threshold() local
731 tn->mon_threshold = cluster_size; in tipc_nl_monitor_set_threshold()
[all …]
Daddr.h98 struct tipc_net *tn = tipc_net(net); in tipc_own_id() local
100 if (!strlen(tn->node_id_string)) in tipc_own_id()
102 return tn->node_id; in tipc_own_id()
/net/ipv4/
Dfib_trie.c173 static struct key_vector *resize(struct trie *t, struct key_vector *tn);
194 #define node_parent(tn) rtnl_dereference(tn_info(tn)->parent) argument
195 #define get_child(tn, i) rtnl_dereference((tn)->tnode[i]) argument
198 #define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent) argument
199 #define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i]) argument
213 static inline unsigned long child_length(const struct key_vector *tn) in child_length() argument
215 return (1ul << tn->bits) & ~(1ul); in child_length()
379 struct key_vector *tn; in tnode_new() local
397 tn = tnode->kv; in tnode_new()
398 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0; in tnode_new()
[all …]
/net/netfilter/
Dxt_TEE.c65 struct tee_net *tn = net_generic(net, tee_net_id); in tee_netdev_event() local
68 mutex_lock(&tn->lock); in tee_netdev_event()
69 list_for_each_entry(priv, &tn->priv_list, list) { in tee_netdev_event()
87 mutex_unlock(&tn->lock); in tee_netdev_event()
94 struct tee_net *tn = net_generic(par->net, tee_net_id); in tee_tg_check() local
122 mutex_lock(&tn->lock); in tee_tg_check()
123 list_add(&priv->list, &tn->priv_list); in tee_tg_check()
124 mutex_unlock(&tn->lock); in tee_tg_check()
134 struct tee_net *tn = net_generic(par->net, tee_net_id); in tee_tg_destroy() local
138 mutex_lock(&tn->lock); in tee_tg_destroy()
[all …]
Dnf_conntrack_proto_tcp.c491 const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct)); in nf_tcp_log_invalid() local
496 be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal; in nf_tcp_log_invalid()
729 const struct nf_tcp_net *tn; in nf_tcp_handle_invalid() local
760 tn = nf_tcp_pernet(nf_ct_net(ct)); in nf_tcp_handle_invalid()
763 timeouts = tn->timeouts; in nf_tcp_handle_invalid()
844 const struct nf_tcp_net *tn = nf_tcp_pernet(net); in tcp_new() local
868 } else if (tn->tcp_loose == 0) { in tcp_new()
937 const struct nf_tcp_net *tn; in nf_conntrack_tcp_set_closing() local
939 tn = nf_tcp_pernet(nf_ct_net(ct)); in nf_conntrack_tcp_set_closing()
940 timeouts = tn->timeouts; in nf_conntrack_tcp_set_closing()
[all …]
Dnf_conntrack_standalone.c962 struct nf_tcp_net *tn = nf_tcp_pernet(net); in nf_conntrack_standalone_init_tcp_sysctl() local
964 #define XASSIGN(XNAME, tn) \ in nf_conntrack_standalone_init_tcp_sysctl() argument
966 &(tn)->timeouts[TCP_CONNTRACK_ ## XNAME] in nf_conntrack_standalone_init_tcp_sysctl()
968 XASSIGN(SYN_SENT, tn); in nf_conntrack_standalone_init_tcp_sysctl()
969 XASSIGN(SYN_RECV, tn); in nf_conntrack_standalone_init_tcp_sysctl()
970 XASSIGN(ESTABLISHED, tn); in nf_conntrack_standalone_init_tcp_sysctl()
971 XASSIGN(FIN_WAIT, tn); in nf_conntrack_standalone_init_tcp_sysctl()
972 XASSIGN(CLOSE_WAIT, tn); in nf_conntrack_standalone_init_tcp_sysctl()
973 XASSIGN(LAST_ACK, tn); in nf_conntrack_standalone_init_tcp_sysctl()
974 XASSIGN(TIME_WAIT, tn); in nf_conntrack_standalone_init_tcp_sysctl()
[all …]
Dnf_flow_table_core.c181 struct nf_tcp_net *tn = nf_tcp_pernet(net); in flow_offload_fixup_ct() local
185 timeout = tn->timeouts[ct->proto.tcp.state]; in flow_offload_fixup_ct()
186 timeout -= tn->offload_timeout; in flow_offload_fixup_ct()
188 struct nf_udp_net *tn = nf_udp_pernet(net); in flow_offload_fixup_ct() local
193 timeout = tn->timeouts[state]; in flow_offload_fixup_ct()
194 timeout -= tn->offload_timeout; in flow_offload_fixup_ct()
267 struct nf_tcp_net *tn = nf_tcp_pernet(net); in flow_offload_get_timeout() local
269 timeout = tn->offload_timeout; in flow_offload_get_timeout()
271 struct nf_udp_net *tn = nf_udp_pernet(net); in flow_offload_get_timeout() local
273 timeout = tn->offload_timeout; in flow_offload_get_timeout()
/net/sched/
Dact_vlan.c121 struct tc_action_net *tn = net_generic(net, act_vlan_ops.net_id); in tcf_vlan_init() local
149 err = tcf_idr_check_alloc(tn, &index, a, bind); in tcf_vlan_init()
165 tcf_idr_cleanup(tn, index); in tcf_vlan_init()
173 tcf_idr_cleanup(tn, index); in tcf_vlan_init()
187 tcf_idr_cleanup(tn, index); in tcf_vlan_init()
205 tcf_idr_cleanup(tn, index); in tcf_vlan_init()
213 tcf_idr_cleanup(tn, index); in tcf_vlan_init()
219 ret = tcf_idr_create_from_flags(tn, index, est, a, in tcf_vlan_init()
222 tcf_idr_cleanup(tn, index); in tcf_vlan_init()
433 struct tc_action_net *tn = net_generic(net, act_vlan_ops.net_id); in vlan_init_net() local
[all …]
Dact_simple.c93 struct tc_action_net *tn = net_generic(net, act_simp_ops.net_id); in tcf_simp_init() local
116 err = tcf_idr_check_alloc(tn, &index, a, bind); in tcf_simp_init()
127 tcf_idr_cleanup(tn, index); in tcf_simp_init()
132 ret = tcf_idr_create(tn, index, est, a, in tcf_simp_init()
135 tcf_idr_cleanup(tn, index); in tcf_simp_init()
215 struct tc_action_net *tn = net_generic(net, act_simp_ops.net_id); in simp_init_net() local
217 return tc_action_net_init(net, tn, &act_simp_ops); in simp_init_net()
Dact_ipt.c100 struct tc_action_net *tn = net_generic(net, id); in __tcf_ipt_init() local
122 err = tcf_idr_check_alloc(tn, &index, a, bind); in __tcf_ipt_init()
133 tcf_idr_cleanup(tn, index); in __tcf_ipt_init()
142 tcf_idr_cleanup(tn, index); in __tcf_ipt_init()
147 ret = tcf_idr_create(tn, index, est, a, ops, bind, in __tcf_ipt_init()
150 tcf_idr_cleanup(tn, index); in __tcf_ipt_init()
387 struct tc_action_net *tn = net_generic(net, act_ipt_ops.net_id); in ipt_init_net() local
389 return tc_action_net_init(net, tn, &act_ipt_ops); in ipt_init_net()
417 struct tc_action_net *tn = net_generic(net, act_xt_ops.net_id); in xt_init_net() local
419 return tc_action_net_init(net, tn, &act_xt_ops); in xt_init_net()
Dact_skbmod.c107 struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); in tcf_skbmod_init() local
155 err = tcf_idr_check_alloc(tn, &index, a, bind); in tcf_skbmod_init()
166 tcf_idr_cleanup(tn, index); in tcf_skbmod_init()
171 ret = tcf_idr_create(tn, index, est, a, in tcf_skbmod_init()
174 tcf_idr_cleanup(tn, index); in tcf_skbmod_init()
293 struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); in skbmod_init_net() local
295 return tc_action_net_init(net, tn, &act_skbmod_ops); in skbmod_init_net()
Dact_connmark.c103 struct tc_action_net *tn = net_generic(net, act_connmark_ops.net_id); in tcf_connmark_init() local
130 ret = tcf_idr_check_alloc(tn, &index, a, bind); in tcf_connmark_init()
132 ret = tcf_idr_create_from_flags(tn, index, est, a, in tcf_connmark_init()
135 tcf_idr_cleanup(tn, index); in tcf_connmark_init()
248 struct tc_action_net *tn = net_generic(net, act_connmark_ops.net_id); in connmark_init_net() local
250 return tc_action_net_init(net, tn, &act_connmark_ops); in connmark_init_net()
Dact_gact.c58 struct tc_action_net *tn = net_generic(net, act_gact_ops.net_id); in tcf_gact_init() local
100 err = tcf_idr_check_alloc(tn, &index, a, bind); in tcf_gact_init()
102 ret = tcf_idr_create_from_flags(tn, index, est, a, in tcf_gact_init()
105 tcf_idr_cleanup(tn, index); in tcf_gact_init()
302 struct tc_action_net *tn = net_generic(net, act_gact_ops.net_id); in gact_init_net() local
304 return tc_action_net_init(net, tn, &act_gact_ops); in gact_init_net()
Dact_mirred.c97 struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id); in tcf_mirred_init() local
122 err = tcf_idr_check_alloc(tn, &index, a, bind); in tcf_mirred_init()
139 tcf_idr_cleanup(tn, index); in tcf_mirred_init()
146 tcf_idr_cleanup(tn, index); in tcf_mirred_init()
150 ret = tcf_idr_create_from_flags(tn, index, est, a, in tcf_mirred_init()
153 tcf_idr_cleanup(tn, index); in tcf_mirred_init()
520 struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id); in mirred_init_net() local
522 return tc_action_net_init(net, tn, &act_mirred_ops); in mirred_init_net()
Dact_nat.c39 struct tc_action_net *tn = net_generic(net, act_nat_ops.net_id); in tcf_nat_init() local
61 err = tcf_idr_check_alloc(tn, &index, a, bind); in tcf_nat_init()
63 ret = tcf_idr_create_from_flags(tn, index, est, a, &act_nat_ops, in tcf_nat_init()
66 tcf_idr_cleanup(tn, index); in tcf_nat_init()
330 struct tc_action_net *tn = net_generic(net, act_nat_ops.net_id); in nat_init_net() local
332 return tc_action_net_init(net, tn, &act_nat_ops); in nat_init_net()
Dact_sample.c41 struct tc_action_net *tn = net_generic(net, act_sample_ops.net_id); in tcf_sample_init() local
64 err = tcf_idr_check_alloc(tn, &index, a, bind); in tcf_sample_init()
72 ret = tcf_idr_create(tn, index, est, a, in tcf_sample_init()
75 tcf_idr_cleanup(tn, index); in tcf_sample_init()
322 struct tc_action_net *tn = net_generic(net, act_sample_ops.net_id); in sample_init_net() local
324 return tc_action_net_init(net, tn, &act_sample_ops); in sample_init_net()
Dact_ctinfo.c161 struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id); in tcf_ctinfo_init() local
213 err = tcf_idr_check_alloc(tn, &index, a, bind); in tcf_ctinfo_init()
215 ret = tcf_idr_create_from_flags(tn, index, est, a, in tcf_ctinfo_init()
218 tcf_idr_cleanup(tn, index); in tcf_ctinfo_init()
369 struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id); in ctinfo_init_net() local
371 return tc_action_net_init(net, tn, &act_ctinfo_ops); in ctinfo_init_net()

12