/net/sunrpc/xprtrdma/ |
D | frwr_ops.c | 528 struct ib_send_wr *first, **prev, *last; in frwr_unmap_sync() local 548 last = &frwr->fr_invwr; in frwr_unmap_sync() 549 last->next = NULL; in frwr_unmap_sync() 550 last->wr_cqe = &frwr->fr_cqe; in frwr_unmap_sync() 551 last->sg_list = NULL; in frwr_unmap_sync() 552 last->num_sge = 0; in frwr_unmap_sync() 553 last->opcode = IB_WR_LOCAL_INV; in frwr_unmap_sync() 554 last->send_flags = IB_SEND_SIGNALED; in frwr_unmap_sync() 555 last->ex.invalidate_rkey = mr->mr_handle; in frwr_unmap_sync() 557 *prev = last; in frwr_unmap_sync() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_wrr.c | 163 struct ip_vs_dest *dest, *last, *stop = NULL; in ip_vs_wrr_schedule() local 174 last = dest; in ip_vs_wrr_schedule() 196 &last->n_list == &svc->destinations) in ip_vs_wrr_schedule() 202 &last->n_list != &svc->destinations) { in ip_vs_wrr_schedule() 207 stop = last; in ip_vs_wrr_schedule()
|
D | ip_vs_rr.c | 57 struct ip_vs_dest *dest, *last; in ip_vs_rr_schedule() local 64 last = dest = list_entry(p, struct ip_vs_dest, n_list); in ip_vs_rr_schedule() 74 if (dest == last) in ip_vs_rr_schedule()
|
/net/netfilter/ipset/ |
D | pfxlen.c | 174 u32 last; in ip_set_range_to_cidr() local 180 last = from | ~ip_set_hostmask(i); in ip_set_range_to_cidr() 181 if (!after(last, to)) { in ip_set_range_to_cidr() 183 return last; in ip_set_range_to_cidr()
|
/net/openvswitch/ |
D | actions.c | 157 bool last, bool clone_flow_key); 1008 bool last) in sample() argument 1023 if (last) in sample() 1029 return clone_execute(dp, skb, key, 0, actions, rem, last, in sample() 1039 bool last) in clone() argument 1051 return clone_execute(dp, skb, key, 0, actions, rem, last, in clone() 1167 const struct nlattr *a, bool last) in execute_recirc() argument 1181 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true); in execute_recirc() 1186 const struct nlattr *attr, bool last) in execute_check_pkt_len() argument 1220 nla_len(actions), last, clone_flow_key); in execute_check_pkt_len() [all …]
|
/net/sched/ |
D | sch_cbs.c | 73 s64 last; /* timestamp in ns */ member 124 q->last = ktime_get_ns(); in cbs_enqueue_soft() 185 if (now < q->last) { in cbs_dequeue_soft() 186 qdisc_watchdog_schedule_ns(&q->watchdog, q->last); in cbs_dequeue_soft() 190 credits = timediff_to_credits(now - q->last, q->idleslope); in cbs_dequeue_soft() 201 q->last = now; in cbs_dequeue_soft() 222 q->last = now; in cbs_dequeue_soft() 224 q->last = now + div64_s64(len * NSEC_PER_SEC, in cbs_dequeue_soft()
|
D | sch_netem.c | 103 u32 last; member 174 state->last = prandom_u32(); in init_crandom() 191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; in get_crandom() 192 state->last = answer; in get_crandom() 542 struct netem_skb_cb *last = NULL; in netem_enqueue() local 545 last = netem_skb_cb(sch->q.tail); in netem_enqueue() 552 if (!last || in netem_enqueue() 553 t_last->time_to_send > last->time_to_send) in netem_enqueue() 554 last = t_last; in netem_enqueue() 560 if (!last || in netem_enqueue() [all …]
|
D | sch_etf.c | 34 ktime_t last; /* The txtime of the last skb sent to the netdevice. */ member 102 if (ktime_before(txtime, now) || ktime_before(txtime, q->last)) in is_packet_valid() 248 q->last = skb->tstamp; in timesortedlist_remove() 451 q->last = 0; in etf_reset()
|
D | cls_fw.c | 154 static int fw_delete(struct tcf_proto *tp, void *arg, bool *last, in fw_delete() argument 181 *last = true; in fw_delete() 184 *last = false; in fw_delete()
|
/net/sctp/ |
D | ulpqueue.c | 326 struct sk_buff *pnext, *last; in sctp_make_reassembled_event() local 336 for (last = list; list; last = list, list = list->next) in sctp_make_reassembled_event() 342 if (last) in sctp_make_reassembled_event() 343 last->next = pos; in sctp_make_reassembled_event() 970 struct sk_buff *skb, *flist, *last; in sctp_ulpq_renege_list() local 990 for (last = flist; flist; flist = flist->next) { in sctp_ulpq_renege_list() 991 last = flist; in sctp_ulpq_renege_list() 992 freed += skb_headlen(last); in sctp_ulpq_renege_list() 994 if (last) in sctp_ulpq_renege_list() 995 last_tsn = sctp_skb2event(last)->tsn; in sctp_ulpq_renege_list()
|
D | sm_make_chunk.c | 3847 __be16 last = 0; in sctp_verify_reconf() local 3860 (last && last != SCTP_PARAM_RESET_RESPONSE && in sctp_verify_reconf() 3861 last != SCTP_PARAM_RESET_IN_REQUEST)) in sctp_verify_reconf() 3866 (last && last != SCTP_PARAM_RESET_OUT_REQUEST)) in sctp_verify_reconf() 3872 (last && last != SCTP_PARAM_RESET_RESPONSE && in sctp_verify_reconf() 3873 last != SCTP_PARAM_RESET_OUT_REQUEST)) in sctp_verify_reconf() 3878 sizeof(struct sctp_strreset_tsnreq) || last) in sctp_verify_reconf() 3883 (last && last != SCTP_PARAM_RESET_ADD_OUT_STREAMS)) in sctp_verify_reconf() 3888 (last && last != SCTP_PARAM_RESET_ADD_IN_STREAMS)) in sctp_verify_reconf() 3895 last = param.p->type; in sctp_verify_reconf()
|
/net/ipv4/ |
D | inet_fragment.c | 349 struct sk_buff *last = q->fragments_tail; in inet_frag_queue_insert() local 360 if (!last) in inet_frag_queue_insert() 362 else if (last->ip_defrag_offset + last->len < end) { in inet_frag_queue_insert() 365 if (offset < last->ip_defrag_offset + last->len) in inet_frag_queue_insert() 367 if (offset == last->ip_defrag_offset + last->len) in inet_frag_queue_insert()
|
D | ip_input.c | 152 struct sock *last = NULL; in ip_call_ra_chain() local 169 if (last) { in ip_call_ra_chain() 172 raw_rcv(last, skb2); in ip_call_ra_chain() 174 last = sk; in ip_call_ra_chain() 178 if (last) { in ip_call_ra_chain() 179 raw_rcv(last, skb); in ip_call_ra_chain()
|
/net/core/ |
D | datagram.c | 172 struct sk_buff **last) in __skb_try_recv_from_queue() argument 183 *last = queue->prev; in __skb_try_recv_from_queue() 249 struct sk_buff **last) in __skb_try_recv_datagram() argument 271 off, &error, last); in __skb_try_recv_datagram() 282 } while (READ_ONCE(sk->sk_receive_queue.prev) != *last); in __skb_try_recv_datagram() 297 struct sk_buff *skb, *last; in __skb_recv_datagram() local 304 &last); in __skb_recv_datagram() 311 !__skb_wait_for_more_packets(sk, err, &timeo, last)); in __skb_recv_datagram()
|
/net/rds/ |
D | bind.c | 98 u16 rover, last; in rds_add_bound() local 105 last = rover; in rds_add_bound() 108 last = rover - 1; in rds_add_bound() 143 } while (rover++ != last); in rds_add_bound()
|
/net/ax25/ |
D | ax25_out.c | 244 int last = 1; in ax25_kick() local 288 last = (next == end); in ax25_kick() 298 ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF); in ax25_kick() 315 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL); in ax25_kick()
|
/net/netfilter/ |
D | nfnetlink_acct.c | 186 struct nf_acct *cur, *last; in nfnl_acct_dump() local 192 last = (struct nf_acct *)cb->args[1]; in nfnl_acct_dump() 198 if (last) { in nfnl_acct_dump() 199 if (cur != last) in nfnl_acct_dump() 202 last = NULL; in nfnl_acct_dump()
|
D | nft_limit.c | 19 u64 last; member 35 tokens = limit->tokens + now - limit->last; in nft_limit_eval() 39 limit->last = now; in nft_limit_eval() 98 limit->last = ktime_get_ns(); in nft_limit_init()
|
D | nfnetlink_cttimeout.c | 205 struct ctnl_timeout *cur, *last; in ctnl_timeout_dump() local 210 last = (struct ctnl_timeout *)cb->args[1]; in ctnl_timeout_dump() 216 if (last) { in ctnl_timeout_dump() 217 if (cur != last) in ctnl_timeout_dump() 220 last = NULL; in ctnl_timeout_dump()
|
D | nf_conntrack_netlink.c | 900 struct nf_conn *ct, *last; in ctnetlink_dump_table() local 907 last = (struct nf_conn *)cb->args[1]; in ctnetlink_dump_table() 942 if (ct != last) in ctnetlink_dump_table() 971 if (last) { in ctnetlink_dump_table() 973 if ((struct nf_conn *)cb->args[1] == last) in ctnetlink_dump_table() 976 nf_ct_put(last); in ctnetlink_dump_table() 1385 struct nf_conn *ct, *last; in ctnetlink_dump_list() local 1398 last = (struct nf_conn *)cb->args[1]; in ctnetlink_dump_list() 1415 if (ct != last) in ctnetlink_dump_list() 1442 if (last) in ctnetlink_dump_list() [all …]
|
/net/rxrpc/ |
D | recvmsg.c | 180 bool last = false; in rxrpc_rotate_rx_window() local 201 last = true; in rxrpc_rotate_rx_window() 211 if (last) { in rxrpc_rotate_rx_window() 274 bool last = false; in rxrpc_locate_data() local 285 last = true; in rxrpc_locate_data() 296 *_last = last; in rxrpc_locate_data()
|
D | sendmsg.c | 195 struct sk_buff *skb, bool last, in rxrpc_queue_packet() argument 208 if (last) in rxrpc_queue_packet() 222 if (last) in rxrpc_queue_packet() 227 if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { in rxrpc_queue_packet() 242 if (!last) in rxrpc_queue_packet()
|
/net/ipv6/ |
D | addrlabel.c | 206 struct ip6addrlbl_entry *last = NULL, *p = NULL; in __ip6addrlbl_add() local 229 last = p; in __ip6addrlbl_add() 231 if (last) in __ip6addrlbl_add() 232 hlist_add_behind_rcu(&newp->list, &last->list); in __ip6addrlbl_add()
|
/net/rfkill/ |
D | input.c | 140 static unsigned long rfkill_ratelimit(const unsigned long last) in rfkill_ratelimit() argument 143 return time_after(jiffies, last + delay) ? 0 : delay; in rfkill_ratelimit()
|
/net/unix/ |
D | af_unix.c | 2122 struct sk_buff *skb, *last; in unix_dgram_recvmsg() local 2138 &last); in unix_dgram_recvmsg() 2147 !__skb_wait_for_more_packets(sk, &err, &timeo, last)); in unix_dgram_recvmsg() 2223 struct sk_buff *last, unsigned int last_len, in unix_stream_data_wait() argument 2235 if (tail != last || in unix_stream_data_wait() 2321 struct sk_buff *skb, *last; in unix_stream_read_generic() local 2329 last = skb = skb_peek(&sk->sk_receive_queue); in unix_stream_read_generic() 2330 last_len = last ? last->len : 0; in unix_stream_read_generic() 2354 timeo = unix_stream_data_wait(sk, timeo, last, in unix_stream_read_generic() 2372 last = skb; in unix_stream_read_generic() [all …]
|