/net/bridge/ |
D | br_forward.c | 114 static int deliver_clone(const struct net_bridge_port *prev, in deliver_clone() argument 125 __br_forward(prev, skb, local_orig); in deliver_clone() 169 struct net_bridge_port *prev, struct net_bridge_port *p, in maybe_deliver() argument 176 return prev; in maybe_deliver() 178 if (!prev) in maybe_deliver() 181 err = deliver_clone(prev, skb, local_orig); in maybe_deliver() 194 struct net_bridge_port *prev = NULL; in br_flood() local 223 prev = maybe_deliver(prev, p, skb, local_orig); in br_flood() 224 if (IS_ERR(prev)) in br_flood() 228 if (!prev) in br_flood() [all …]
|
/net/netfilter/ |
D | xt_limit.c | 19 unsigned long prev; member 72 priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY; in limit_mt() 119 priv->prev = jiffies; in limit_mt_check() 142 compat_ulong_t prev; member 157 .prev = cm->prev | (unsigned long)cm->master << 32, in limit_mt_compat_from_user() 171 .prev = m->prev, in limit_mt_compat_to_user() 175 .master = m->prev >> 32, in limit_mt_compat_to_user() 194 .usersize = offsetof(struct xt_rateinfo, prev),
|
D | nft_set_hash.c | 127 struct nft_rhash_elem *he, *prev; in nft_rhash_update() local 142 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, in nft_rhash_update() 144 if (IS_ERR(prev)) in nft_rhash_update() 148 if (prev) { in nft_rhash_update() 151 he = prev; in nft_rhash_update() 176 struct nft_rhash_elem *prev; in nft_rhash_insert() local 178 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, in nft_rhash_insert() 180 if (IS_ERR(prev)) in nft_rhash_insert() 181 return PTR_ERR(prev); in nft_rhash_insert() 182 if (prev) { in nft_rhash_insert() [all …]
|
D | nft_set_rbtree.c | 243 struct rb_node *prev = rb_prev(&rbe->node); in nft_rbtree_gc_elem() local 256 while (prev) { in nft_rbtree_gc_elem() 257 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); in nft_rbtree_gc_elem() 262 prev = rb_prev(prev); in nft_rbtree_gc_elem() 265 if (prev) { in nft_rbtree_gc_elem() 266 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); in nft_rbtree_gc_elem()
|
/net/can/j1939/ |
D | address-claim.c | 129 struct j1939_ecu *ecu, *prev; in j1939_ac_process() local 227 prev = j1939_ecu_get_by_addr_locked(priv, skcb->addr.sa); in j1939_ac_process() 228 if (prev) { in j1939_ac_process() 229 if (ecu->name > prev->name) { in j1939_ac_process() 231 j1939_ecu_put(prev); in j1939_ac_process() 235 j1939_ecu_unmap_locked(prev); in j1939_ac_process() 236 j1939_ecu_put(prev); in j1939_ac_process()
|
/net/core/ |
D | dst.c | 205 unsigned long prev, new; in dst_cow_metrics_generic() local 211 prev = cmpxchg(&dst->_metrics, old, new); in dst_cow_metrics_generic() 213 if (prev != old) { in dst_cow_metrics_generic() 215 p = (struct dst_metrics *)__DST_METRICS_PTR(prev); in dst_cow_metrics_generic() 216 if (prev & DST_METRICS_READ_ONLY) in dst_cow_metrics_generic() 218 } else if (prev & DST_METRICS_REFCOUNTED) { in dst_cow_metrics_generic() 231 unsigned long prev, new; in __dst_destroy_metrics_generic() local 234 prev = cmpxchg(&dst->_metrics, old, new); in __dst_destroy_metrics_generic() 235 if (prev == old) in __dst_destroy_metrics_generic()
|
D | datagram.c | 101 if (READ_ONCE(sk->sk_receive_queue.prev) != skb) in __skb_wait_for_more_packets() 152 skb->prev->next = nskb; in skb_set_peeked() 153 skb->next->prev = nskb; in skb_set_peeked() 154 nskb->prev = skb->prev; in skb_set_peeked() 183 *last = queue->prev; in __skb_try_recv_from_queue() 282 } while (READ_ONCE(sk->sk_receive_queue.prev) != *last); in __skb_try_recv_datagram()
|
/net/netfilter/ipset/ |
D | ip_set_list_set.c | 190 struct set_elem *e, *next, *prev = NULL; in list_set_utest() local 198 prev = e; in list_set_utest() 209 ret = prev && prev->id == d->refid; in list_set_utest() 237 struct set_elem *e, *n, *prev, *next; in list_set_uadd() local 241 n = prev = next = NULL; in list_set_uadd() 253 prev = e; in list_set_uadd() 258 (d->before < 0 && !prev)) in list_set_uadd() 284 if (prev->list.prev != &map->members) in list_set_uadd() 285 n = list_prev_entry(prev, list); in list_set_uadd() 304 else if (prev) in list_set_uadd() [all …]
|
/net/ipv4/ |
D | inet_fragment.c | 298 struct inet_frag_queue **prev) in inet_frag_create() argument 305 *prev = ERR_PTR(-ENOMEM); in inet_frag_create() 310 *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key, in inet_frag_create() 312 if (*prev) { in inet_frag_create() 326 struct inet_frag_queue *fq = NULL, *prev; in inet_frag_find() local 333 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params); in inet_frag_find() 334 if (!prev) in inet_frag_find() 335 fq = inet_frag_create(fqdir, key, &prev); in inet_frag_find() 336 if (!IS_ERR_OR_NULL(prev)) { in inet_frag_find() 337 fq = prev; in inet_frag_find() [all …]
|
D | tcp_cong.c | 223 const struct tcp_congestion_ops *prev; in tcp_set_default_congestion_control() local 237 prev = xchg(&net->ipv4.tcp_congestion_control, ca); in tcp_set_default_congestion_control() 238 if (prev) in tcp_set_default_congestion_control() 239 module_put(prev->owner); in tcp_set_default_congestion_control()
|
D | tcp_input.c | 1297 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, in tcp_shifted_skb() argument 1323 TCP_SKB_CB(prev)->end_seq += shifted; in tcp_shifted_skb() 1326 tcp_skb_pcount_add(prev, pcount); in tcp_shifted_skb() 1335 if (!TCP_SKB_CB(prev)->tcp_gso_size) in tcp_shifted_skb() 1336 TCP_SKB_CB(prev)->tcp_gso_size = mss; in tcp_shifted_skb() 1343 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); in tcp_shifted_skb() 1354 tp->retransmit_skb_hint = prev; in tcp_shifted_skb() 1356 tp->lost_skb_hint = prev; in tcp_shifted_skb() 1357 tp->lost_cnt_hint -= tcp_skb_pcount(prev); in tcp_shifted_skb() 1360 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; in tcp_shifted_skb() [all …]
|
/net/rds/ |
D | ib_send.c | 490 struct rds_ib_send_work *prev; in rds_ib_xmit() local 620 prev = NULL; in rds_ib_xmit() 686 if (prev) in rds_ib_xmit() 687 prev->s_wr.next = &send->s_wr; in rds_ib_xmit() 688 prev = send; in rds_ib_xmit() 704 prev->s_op = ic->i_data_op; in rds_ib_xmit() 705 prev->s_wr.send_flags |= IB_SEND_SOLICITED; in rds_ib_xmit() 706 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) in rds_ib_xmit() 707 nr_sig += rds_ib_set_wr_signal_state(ic, prev, true); in rds_ib_xmit() 733 if (prev->s_op) { in rds_ib_xmit() [all …]
|
/net/atm/ |
D | mpoa_caches.c | 113 entry->prev = NULL; in in_cache_add_entry() 115 client->in_cache->prev = entry; in in_cache_add_entry() 200 if (entry->prev != NULL) in in_cache_remove_entry() 201 entry->prev->next = entry->next; in in_cache_remove_entry() 205 entry->next->prev = entry->prev; in in_cache_remove_entry() 433 if (entry->prev != NULL) in eg_cache_remove_entry() 434 entry->prev->next = entry->next; in eg_cache_remove_entry() 438 entry->next->prev = entry->prev; in eg_cache_remove_entry() 473 entry->prev = NULL; in eg_cache_add_entry() 475 client->eg_cache->prev = entry; in eg_cache_add_entry()
|
D | mpoa_caches.h | 19 struct in_cache_entry *prev; member 56 struct eg_cache_entry *prev; member
|
/net/ceph/ |
D | pagelist.c | 32 struct page *page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_unmap_tail() 140 c->page_lru = pl->head.prev; in ceph_pagelist_set_cursor() 158 while (pl->head.prev != c->page_lru) { in ceph_pagelist_truncate() 159 page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_truncate() 166 page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_truncate()
|
/net/bridge/netfilter/ |
D | ebt_limit.c | 42 info->credit += (now - xchg(&info->prev, now)) * CREDITS_PER_JIFFY; in ebt_limit_mt() 82 info->prev = jiffies; in ebt_limit_mt_check() 97 compat_ulong_t prev; member 109 .usersize = offsetof(struct ebt_limit_info, prev),
|
/net/ipv6/ila/ |
D | ila_xlat.c | 243 struct ila_map *tila = head, *prev = NULL; in ila_add_mapping() local 254 prev = tila; in ila_add_mapping() 259 if (prev) { in ila_add_mapping() 262 rcu_assign_pointer(prev->next, ila); in ila_add_mapping() 286 struct ila_map *ila, *head, *prev; in ila_del_mapping() local 296 prev = NULL; in ila_del_mapping() 300 prev = ila; in ila_del_mapping() 308 if (prev) { in ila_del_mapping() 310 rcu_assign_pointer(prev->next, ila->next); in ila_del_mapping()
|
/net/sunrpc/xprtrdma/ |
D | frwr_ops.c | 528 struct ib_send_wr *first, **prev, *last; in frwr_unmap_sync() local 540 prev = &first; in frwr_unmap_sync() 557 *prev = last; in frwr_unmap_sync() 558 prev = &last->next; in frwr_unmap_sync() 633 struct ib_send_wr *first, *last, **prev; in frwr_unmap_async() local 643 prev = &first; in frwr_unmap_async() 660 *prev = last; in frwr_unmap_async() 661 prev = &last->next; in frwr_unmap_async()
|
/net/sched/ |
D | sch_sfq.c | 97 sfq_index prev; member 213 slot->dep.prev = p; in sfq_link() 216 sfq_dep_head(q, n)->prev = x; in sfq_link() 222 p = q->slots[x].dep.prev; \ 224 sfq_dep_head(q, n)->prev = p; \ 261 slot->skblist_prev = skb->prev; in slot_dequeue_tail() 262 skb->prev->next = (struct sk_buff *)slot; in slot_dequeue_tail() 263 skb->next = skb->prev = NULL; in slot_dequeue_tail() 273 skb->next->prev = (struct sk_buff *)slot; in slot_dequeue_head() 274 skb->next = skb->prev = NULL; in slot_dequeue_head() [all …]
|
D | sch_teql.c | 133 struct Qdisc *q, *prev; in teql_destroy() local 140 prev = master->slaves; in teql_destroy() 141 if (prev) { in teql_destroy() 143 q = NEXT_SLAVE(prev); in teql_destroy() 145 NEXT_SLAVE(prev) = NEXT_SLAVE(q); in teql_destroy() 165 } while ((prev = q) != master->slaves); in teql_destroy()
|
D | sch_hhf.c | 191 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in seek_list() local 193 if (hhf_time_before(prev, now)) { in seek_list() 221 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in alloc_new_hh() local 223 if (hhf_time_before(prev, now)) in alloc_new_hh() 255 u32 prev; in hhf_classify() local 259 prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; in hhf_classify() 260 if (hhf_time_before(prev, now)) { in hhf_classify()
|
/net/tipc/ |
D | monitor.c | 326 struct tipc_peer *peer, *prev, *head; in tipc_mon_remove_peer() local 332 prev = peer_prev(peer); in tipc_mon_remove_peer() 338 head = peer_head(prev); in tipc_mon_remove_peer() 341 mon_update_neighbors(mon, prev); in tipc_mon_remove_peer() 360 struct tipc_peer *cur, *prev, *p; in tipc_mon_add_peer() local 373 prev = self; in tipc_mon_add_peer() 375 if ((addr > prev->addr) && (addr < cur->addr)) in tipc_mon_add_peer() 377 if (((addr < cur->addr) || (addr > prev->addr)) && in tipc_mon_add_peer() 378 (prev->addr > cur->addr)) in tipc_mon_add_peer() 380 prev = cur; in tipc_mon_add_peer()
|
/net/netlabel/ |
D | netlabel_kapi.c | 560 struct netlbl_lsm_catmap *prev = NULL; in _netlbl_catmap_getnode() local 567 prev = iter; in _netlbl_catmap_getnode() 587 if (prev == NULL) { in _netlbl_catmap_getnode() 591 iter->next = prev->next; in _netlbl_catmap_getnode() 592 prev->next = iter; in _netlbl_catmap_getnode() 666 struct netlbl_lsm_catmap *prev = NULL; in netlbl_catmap_walkrng() local 692 if (prev && idx == 0 && bit == 0) in netlbl_catmap_walkrng() 693 return prev->startbit + NETLBL_CATMAP_SIZE - 1; in netlbl_catmap_walkrng() 700 prev = iter; in netlbl_catmap_walkrng()
|
D | netlabel_addrlist.c | 165 iter->list.prev, in netlbl_af4list_add() 203 iter->list.prev, in netlbl_af6list_add()
|
/net/rfkill/ |
D | core.c | 311 bool prev, curr; in rfkill_set_block() local 326 prev = rfkill->state & RFKILL_BLOCK_SW; in rfkill_set_block() 328 if (prev) in rfkill_set_block() 363 if (prev != curr) in rfkill_set_block() 528 bool ret, prev; in rfkill_set_hw_state() local 533 prev = !!(rfkill->state & RFKILL_BLOCK_HW); in rfkill_set_hw_state() 544 if (rfkill->registered && prev != blocked) in rfkill_set_hw_state() 568 bool prev, hwblock; in rfkill_set_sw_state() local 573 prev = !!(rfkill->state & RFKILL_BLOCK_SW); in rfkill_set_sw_state() 582 if (prev != blocked && !hwblock) in rfkill_set_sw_state()
|