Home
last modified time | relevance | path

Searched refs:n (Results 1 – 25 of 249) sorted by relevance

12345678910

/net/tipc/
Dnode.c176 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
179 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
181 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
184 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
187 static bool node_is_up(struct tipc_node *n);
197 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) in node_active_link() argument
199 int bearer_id = n->active_links[sel & 1]; in node_active_link()
204 return n->links[bearer_id].link; in node_active_link()
209 struct tipc_node *n; in tipc_node_get_mtu() local
213 n = tipc_node_find(net, addr); in tipc_node_get_mtu()
[all …]
Dmsg.h260 static inline void msg_set_user(struct tipc_msg *m, u32 n) in msg_set_user() argument
262 msg_set_bits(m, 0, 25, 0xf, n); in msg_set_user()
270 static inline void msg_set_hdr_sz(struct tipc_msg *m, u32 n) in msg_set_hdr_sz() argument
272 msg_set_bits(m, 0, 21, 0xf, n>>2); in msg_set_hdr_sz()
295 static inline void msg_set_non_seq(struct tipc_msg *m, u32 n) in msg_set_non_seq() argument
297 msg_set_bits(m, 0, 20, 1, n); in msg_set_non_seq()
393 static inline void msg_set_type(struct tipc_msg *m, u32 n) in msg_set_type() argument
395 msg_set_bits(m, 1, 29, 0x7, n); in msg_set_type()
493 static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n) in msg_set_lookup_scope() argument
495 msg_set_bits(m, 1, 19, 0x3, n); in msg_set_lookup_scope()
[all …]
Dname_table.c128 static struct service_range *service_range_match_first(struct rb_node *n,
135 if (!n || service_range_entry(n)->max < start)
138 while (n) {
139 l = n->rb_left;
145 n = l;
152 sr = service_range_entry(n);
157 r = n->rb_right;
160 n = r;
178 static struct service_range *service_range_match_next(struct rb_node *n, in service_range_match_next() argument
184 while (n) { in service_range_match_next()
[all …]
Dgroup.c235 struct rb_node *n = grp->members.rb_node; in tipc_group_find_member() local
239 while (n) { in tipc_group_find_member()
240 m = container_of(n, struct tipc_member, tree_node); in tipc_group_find_member()
243 n = n->rb_left; in tipc_group_find_member()
245 n = n->rb_right; in tipc_group_find_member()
267 struct rb_node *n; in tipc_group_find_node() local
269 for (n = rb_first(&grp->members); n; n = rb_next(n)) { in tipc_group_find_node()
270 m = container_of(n, struct tipc_member, tree_node); in tipc_group_find_node()
281 struct rb_node **n, *parent = NULL; in tipc_group_add_to_tree() local
284 n = &grp->members.rb_node; in tipc_group_add_to_tree()
[all …]
/net/sched/
Dcls_u32.c113 struct tc_u_knode *n; in u32_classify() local
123 n = rcu_dereference_bh(ht->ht[sel]); in u32_classify()
126 if (n) { in u32_classify()
127 struct tc_u32_key *key = n->sel.keys; in u32_classify()
130 __this_cpu_inc(n->pf->rcnt); in u32_classify()
134 if (tc_skip_sw(n->flags)) { in u32_classify()
135 n = rcu_dereference_bh(n->next); in u32_classify()
140 if ((skb->mark & n->mask) != n->val) { in u32_classify()
141 n = rcu_dereference_bh(n->next); in u32_classify()
144 __this_cpu_inc(*n->pcpu_success); in u32_classify()
[all …]
Dsch_api.c660 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) in qdisc_class_hash_alloc() argument
665 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL); in qdisc_class_hash_alloc()
668 for (i = 0; i < n; i++) in qdisc_class_hash_alloc()
770 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) in qdisc_tree_reduce_backlog() argument
779 if (n == 0 && len == 0) in qdisc_tree_reduce_backlog()
781 drops = max_t(int, n, 0); in qdisc_tree_reduce_backlog()
798 notify = !sch->q.qlen && !WARN_ON_ONCE(!n && in qdisc_tree_reduce_backlog()
811 sch->q.qlen -= n; in qdisc_tree_reduce_backlog()
974 struct nlmsghdr *n, u32 clid, in qdisc_notify() argument
985 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq, in qdisc_notify()
[all …]
/net/ceph/
Ddebugfs.c60 struct rb_node *n; in osdmap_show() local
69 for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) { in osdmap_show()
71 rb_entry(n, struct ceph_pg_pool_info, node); in osdmap_show()
92 for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) { in osdmap_show()
94 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
103 for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) { in osdmap_show()
105 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
110 for (n = rb_first(&map->pg_upmap); n; n = rb_next(n)) { in osdmap_show()
112 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
121 for (n = rb_first(&map->pg_upmap_items); n; n = rb_next(n)) { in osdmap_show()
[all …]
/net/ipv4/
Dfib_trie.c115 #define IS_TRIE(n) ((n)->pos >= KEYLENGTH) argument
116 #define IS_TNODE(n) ((n)->bits) argument
117 #define IS_LEAF(n) (!(n)->bits) argument
141 #define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n]) argument
201 static inline void node_set_parent(struct key_vector *n, struct key_vector *tp) in node_set_parent() argument
203 if (n) in node_set_parent()
204 rcu_assign_pointer(tn_info(n)->parent, tp); in node_set_parent()
207 #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p) argument
309 struct tnode *n = container_of(head, struct tnode, rcu); in __node_free_rcu() local
311 if (!n->tn_bits) in __node_free_rcu()
[all …]
Darp.c123 static bool arp_key_eq(const struct neighbour *n, const void *pkey);
689 struct neighbour *n; in arp_process() local
828 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); in arp_process()
829 if (n) { in arp_process()
834 neigh_release(n); in arp_process()
844 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); in arp_process()
845 if (n) in arp_process()
846 neigh_release(n); in arp_process()
867 n = __neigh_lookup(&arp_tbl, &sip, dev, 0); in arp_process()
870 if (n || IN_DEV_ARP_ACCEPT(in_dev)) { in arp_process()
[all …]
/net/core/
Dneighbour.c54 static void __neigh_notify(struct neighbour *n, int type, int flags,
118 static void neigh_mark_dead(struct neighbour *n) in neigh_mark_dead() argument
120 n->dead = 1; in neigh_mark_dead()
121 if (!list_empty(&n->gc_list)) { in neigh_mark_dead()
122 list_del_init(&n->gc_list); in neigh_mark_dead()
123 atomic_dec(&n->tbl->gc_entries); in neigh_mark_dead()
127 static void neigh_update_gc_list(struct neighbour *n) in neigh_update_gc_list() argument
131 write_lock_bh(&n->tbl->lock); in neigh_update_gc_list()
132 write_lock(&n->lock); in neigh_update_gc_list()
134 if (n->dead) in neigh_update_gc_list()
[all …]
/net/netfilter/ipset/
Dip_set_hash_gen.h65 #define ahash_region(n, htable_bits) \ argument
66 ((n) % ahash_numof_locks(htable_bits))
91 #define ext_size(n, dsize) \ argument
92 (sizeof(struct hbucket) + (n) * (dsize))
313 mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n) in mtype_add_cidr() argument
319 for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) { in mtype_add_cidr()
322 } else if (h->nets[i].cidr[n] < cidr) { in mtype_add_cidr()
324 } else if (h->nets[i].cidr[n] == cidr) { in mtype_add_cidr()
325 h->nets[CIDR_POS(cidr)].nets[n]++; in mtype_add_cidr()
331 h->nets[i].cidr[n] = h->nets[i - 1].cidr[n]; in mtype_add_cidr()
[all …]
Dip_set_list_set.c177 struct set_elem *e, *n; in set_cleanup_entries() local
179 list_for_each_entry_safe(e, n, &map->members, list) in set_cleanup_entries()
237 struct set_elem *e, *n, *prev, *next; in list_set_uadd() local
241 n = prev = next = NULL; in list_set_uadd()
247 n = e; in list_set_uadd()
262 if (n) { in list_set_uadd()
266 ip_set_ext_destroy(set, n); in list_set_uadd()
267 list_set_init_extensions(set, ext, n); in list_set_uadd()
276 n = list_empty(&map->members) ? NULL : in list_set_uadd()
281 n = list_next_entry(next, list); in list_set_uadd()
[all …]
/net/ipv6/
Drpl.c32 size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, in ipv6_rpl_srh_size() argument
35 return sizeof(struct ipv6_rpl_sr_hdr) + (n * IPV6_PFXTAIL_LEN(cmpri)) + in ipv6_rpl_srh_size()
41 const struct in6_addr *daddr, unsigned char n) in ipv6_rpl_srh_decompress() argument
46 outhdr->hdrlen = (((n + 1) * sizeof(struct in6_addr)) >> 3); in ipv6_rpl_srh_decompress()
53 for (i = 0; i < n; i++) in ipv6_rpl_srh_decompress()
58 ipv6_rpl_addr_decompress(&outhdr->rpl_segaddr[n], daddr, in ipv6_rpl_srh_decompress()
59 ipv6_rpl_segdata_pos(inhdr, n), in ipv6_rpl_srh_decompress()
65 unsigned char n) in ipv6_rpl_srh_calc_cmpri() argument
71 for (i = 0; i < n; i++) { in ipv6_rpl_srh_calc_cmpri()
96 const struct in6_addr *daddr, unsigned char n) in ipv6_rpl_srh_compress() argument
[all …]
/net/rxrpc/
Dskbuff.c26 int n = atomic_inc_return(select_skb_count(skb)); in rxrpc_new_skb() local
27 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, in rxrpc_new_skb()
38 int n = atomic_read(select_skb_count(skb)); in rxrpc_see_skb() local
39 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, in rxrpc_see_skb()
50 int n = atomic_inc_return(select_skb_count(skb)); in rxrpc_get_skb() local
51 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, in rxrpc_get_skb()
62 int n = atomic_inc_return(&rxrpc_n_rx_skbs); in rxrpc_eaten_skb() local
63 trace_rxrpc_skb(skb, op, 0, n, 0, here); in rxrpc_eaten_skb()
73 int n; in rxrpc_free_skb() local
74 n = atomic_dec_return(select_skb_count(skb)); in rxrpc_free_skb()
[all …]
Dproc.c250 unsigned int bucket, n; in rxrpc_peer_seq_start() local
259 n = *_pos & ((1U << shift) - 1); in rxrpc_peer_seq_start()
266 if (n == 0) { in rxrpc_peer_seq_start()
270 n++; in rxrpc_peer_seq_start()
273 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); in rxrpc_peer_seq_start()
277 n = 1; in rxrpc_peer_seq_start()
278 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_start()
285 unsigned int bucket, n; in rxrpc_peer_seq_next() local
300 n = 1; in rxrpc_peer_seq_next()
301 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_next()
[all …]
/net/xfrm/
Dxfrm_hash.c18 struct hlist_head *n; in xfrm_hash_alloc() local
21 n = kzalloc(sz, GFP_KERNEL); in xfrm_hash_alloc()
23 n = vzalloc(sz); in xfrm_hash_alloc()
25 n = (struct hlist_head *) in xfrm_hash_alloc()
29 return n; in xfrm_hash_alloc()
32 void xfrm_hash_free(struct hlist_head *n, unsigned int sz) in xfrm_hash_free() argument
35 kfree(n); in xfrm_hash_free()
37 vfree(n); in xfrm_hash_free()
39 free_pages((unsigned long)n, get_order(sz)); in xfrm_hash_free()
/net/ax25/
Dax25_addr.c49 int n; in ax2asc() local
51 for (n = 0, s = buf; n < 6; n++) { in ax2asc()
52 c = (a->ax25_call[n] >> 1) & 0x7F; in ax2asc()
59 if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) { in ax2asc()
61 n -= 10; in ax2asc()
64 *s++ = n + '0'; in ax2asc()
82 int n; in asc2ax() local
84 for (s = callsign, n = 0; n < 6; n++) { in asc2ax()
86 addr->ax25_call[n] = *s++; in asc2ax()
88 addr->ax25_call[n] = ' '; in asc2ax()
[all …]
/net/netlabel/
Dnetlabel_addrlist.h62 struct netlbl_af4list *n = __af4list_entry(s); in __af4list_valid() local
63 while (i != h && !n->valid) { in __af4list_valid()
65 n = __af4list_entry(i); in __af4list_valid()
67 return n; in __af4list_valid()
74 struct netlbl_af4list *n = __af4list_entry(s); in __af4list_valid_rcu() local
75 while (i != h && !n->valid) { in __af4list_valid_rcu()
77 n = __af4list_entry(i); in __af4list_valid_rcu()
79 return n; in __af4list_valid_rcu()
129 struct netlbl_af6list *n = __af6list_entry(s); in __af6list_valid() local
130 while (i != h && !n->valid) { in __af6list_valid()
[all …]
/net/bridge/
Dbr_arp_nd_proxy.c130 struct neighbour *n; in br_do_proxy_suppress_arp() local
191 n = neigh_lookup(&arp_tbl, &tip, vlandev); in br_do_proxy_suppress_arp()
192 if (n) { in br_do_proxy_suppress_arp()
195 if (!(n->nud_state & NUD_VALID)) { in br_do_proxy_suppress_arp()
196 neigh_release(n); in br_do_proxy_suppress_arp()
200 f = br_fdb_find_rcu(br, n->ha, vid); in br_do_proxy_suppress_arp()
209 sha, n->ha, sha, 0, 0); in br_do_proxy_suppress_arp()
212 sha, n->ha, sha, in br_do_proxy_suppress_arp()
226 neigh_release(n); in br_do_proxy_suppress_arp()
250 struct sk_buff *request, struct neighbour *n, in br_nd_send() argument
[all …]
/net/bpfilter/
Dmain.c36 int n; in loop() local
38 n = read(0, &req, sizeof(req)); in loop()
39 if (n != sizeof(req)) { in loop()
40 fprintf(debug_f, "invalid request %d\n", n); in loop()
48 n = write(1, &reply, sizeof(reply)); in loop()
49 if (n != sizeof(reply)) { in loop()
50 fprintf(debug_f, "reply failed %d\n", n); in loop()
/net/rose/
Daf_rose.c703 int n; in rose_bind() local
743 for (n = 0 ; n < addr->srose_ndigis ; n++) in rose_bind()
744 rose->source_digis[n] = full_addr->srose_digis[n]; in rose_bind()
765 int n, err = 0; in rose_connect() local
851 for (n = 0 ; n < addr->srose_ndigis ; n++) in rose_connect()
852 rose->dest_digis[n] = full_addr->srose_digis[n]; in rose_connect()
989 int n; in rose_getname() local
999 for (n = 0; n < rose->dest_ndigis; n++) in rose_getname()
1000 srose->srose_digis[n] = rose->dest_digis[n]; in rose_getname()
1006 for (n = 0; n < rose->source_ndigis; n++) in rose_getname()
[all …]
/net/appletalk/
Daarp.c252 static void __aarp_expire_timer(struct aarp_entry **n) in __aarp_expire_timer() argument
256 while (*n) in __aarp_expire_timer()
258 if (time_after(jiffies, (*n)->expires_at)) { in __aarp_expire_timer()
259 t = *n; in __aarp_expire_timer()
260 *n = (*n)->next; in __aarp_expire_timer()
263 n = &((*n)->next); in __aarp_expire_timer()
271 static void __aarp_kick(struct aarp_entry **n) in __aarp_kick() argument
275 while (*n) in __aarp_kick()
277 if ((*n)->xmit_count >= sysctl_aarp_retransmit_limit) { in __aarp_kick()
278 t = *n; in __aarp_kick()
[all …]
/net/6lowpan/
Dndisc.c56 static void lowpan_ndisc_802154_update(struct neighbour *n, u32 flags, in lowpan_ndisc_802154_update() argument
60 struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n)); in lowpan_ndisc_802154_update()
93 write_lock_bh(&n->lock); in lowpan_ndisc_802154_update()
99 write_unlock_bh(&n->lock); in lowpan_ndisc_802154_update()
103 struct neighbour *n, u32 flags, u8 icmp6_type, in lowpan_ndisc_update() argument
111 lowpan_ndisc_802154_update(n, flags, icmp6_type, ndopts); in lowpan_ndisc_update()
118 struct lowpan_802154_neigh *n; in lowpan_ndisc_opt_addr_space() local
127 n = lowpan_802154_neigh(neighbour_priv(neigh)); in lowpan_ndisc_opt_addr_space()
130 if (lowpan_802154_is_valid_src_short_addr(n->short_addr)) { in lowpan_ndisc_opt_addr_space()
131 memcpy(ha_buf, &n->short_addr, in lowpan_ndisc_opt_addr_space()
/net/9p/
Dtrans_virtio.c323 int n; in p9_get_mapped_pages() local
334 n = iov_iter_get_pages_alloc(data, pages, count, offs); in p9_get_mapped_pages()
335 if (n < 0) in p9_get_mapped_pages()
336 return n; in p9_get_mapped_pages()
338 nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE); in p9_get_mapped_pages()
340 return n; in p9_get_mapped_pages()
410 int n = p9_get_mapped_pages(chan, &out_pages, uodata, in p9_virtio_zc_request() local
412 if (n < 0) { in p9_virtio_zc_request()
413 err = n; in p9_virtio_zc_request()
416 out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE); in p9_virtio_zc_request()
[all …]
/net/bluetooth/bnep/
Dcore.c105 int n; in bnep_ctrl_set_netfilter() local
110 n = get_unaligned_be16(data); in bnep_ctrl_set_netfilter()
114 if (len < n) in bnep_ctrl_set_netfilter()
117 BT_DBG("filter len %d", n); in bnep_ctrl_set_netfilter()
120 n /= 4; in bnep_ctrl_set_netfilter()
121 if (n <= BNEP_MAX_PROTO_FILTERS) { in bnep_ctrl_set_netfilter()
125 for (i = 0; i < n; i++) { in bnep_ctrl_set_netfilter()
136 if (n == 0) in bnep_ctrl_set_netfilter()
151 int n; in bnep_ctrl_set_mcfilter() local
156 n = get_unaligned_be16(data); in bnep_ctrl_set_mcfilter()
[all …]

12345678910