/net/atm/ |
D | clip.c | 790 struct clip_vcc *curr) in clip_seq_next_vcc() argument 792 if (!curr) { in clip_seq_next_vcc() 793 curr = e->vccs; in clip_seq_next_vcc() 794 if (!curr) in clip_seq_next_vcc() 796 return curr; in clip_seq_next_vcc() 798 if (curr == SEQ_NO_VCC_TOKEN) in clip_seq_next_vcc() 801 curr = curr->next; in clip_seq_next_vcc() 803 return curr; in clip_seq_next_vcc()
|
D | mpc.c | 221 struct atm_mpoa_qos *curr; in atm_mpoa_delete_qos() local 231 curr = qos_head; in atm_mpoa_delete_qos() 232 while (curr != NULL) { in atm_mpoa_delete_qos() 233 if (curr->next == entry) { in atm_mpoa_delete_qos() 234 curr->next = entry->next; in atm_mpoa_delete_qos() 238 curr = curr->next; in atm_mpoa_delete_qos()
|
/net/ipv4/ |
D | inet_fragment.c | 410 struct sk_buff *curr; in inet_frag_queue_insert() local 414 curr = rb_to_skb(parent); in inet_frag_queue_insert() 415 curr_run_end = curr->ip_defrag_offset + in inet_frag_queue_insert() 416 FRAG_CB(curr)->frag_run_len; in inet_frag_queue_insert() 417 if (end <= curr->ip_defrag_offset) in inet_frag_queue_insert() 421 else if (offset >= curr->ip_defrag_offset && in inet_frag_queue_insert()
|
/net/netfilter/ |
D | x_tables.c | 1569 struct list_head *head, *curr; member 1597 trav->head = trav->curr = is_target ? in xt_mttg_seq_next() 1601 trav->curr = trav->curr->next; in xt_mttg_seq_next() 1602 if (trav->curr != trav->head) in xt_mttg_seq_next() 1606 trav->head = trav->curr = is_target ? in xt_mttg_seq_next() 1611 trav->curr = trav->curr->next; in xt_mttg_seq_next() 1612 if (trav->curr != trav->head) in xt_mttg_seq_next() 1667 if (trav->curr == trav->head) in xt_match_seq_show() 1669 match = list_entry(trav->curr, struct xt_match, list); in xt_match_seq_show() 1701 if (trav->curr == trav->head) in xt_target_seq_show() [all …]
|
/net/batman-adv/ |
D | main.c | 561 int (*curr)(struct sk_buff *skb, in batadv_recv_handler_register() local 563 curr = batadv_rx_handler[packet_type]; in batadv_recv_handler_register() 565 if (curr != batadv_recv_unhandled_packet && in batadv_recv_handler_register() 566 curr != batadv_recv_unhandled_unicast_packet) in batadv_recv_handler_register()
|
D | bridge_loop_avoidance.c | 1611 int i, curr; in batadv_bla_check_duplist() local 1620 curr = (bat_priv->bla.bcast_duplist_curr + i); in batadv_bla_check_duplist() 1621 curr %= BATADV_DUPLIST_SIZE; in batadv_bla_check_duplist() 1622 entry = &bat_priv->bla.bcast_duplist[curr]; in batadv_bla_check_duplist() 1659 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); in batadv_bla_check_duplist() 1660 curr %= BATADV_DUPLIST_SIZE; in batadv_bla_check_duplist() 1661 entry = &bat_priv->bla.bcast_duplist[curr]; in batadv_bla_check_duplist() 1672 bat_priv->bla.bcast_duplist_curr = curr; in batadv_bla_check_duplist()
|
/net/rds/ |
D | threads.c | 74 void rds_connect_path_complete(struct rds_conn_path *cp, int curr) in rds_connect_path_complete() argument 76 if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) { in rds_connect_path_complete()
|
D | rds.h | 1002 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
|
/net/core/ |
D | skmsg.c | 286 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length) in sk_msg_trim() 299 msg->sg.curr = msg->sg.start; in sk_msg_trim() 301 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >= in sk_msg_trim() 304 msg->sg.curr = i; in sk_msg_trim() 356 msg->sg.curr = msg->sg.end; in sk_msg_zerocopy_from_iter() 371 int ret = -ENOSPC, i = msg->sg.curr; in sk_msg_memcopy_from_iter() 406 msg->sg.curr = i; in sk_msg_memcopy_from_iter()
|
D | filter.c | 2591 msg->sg.curr = i; in sk_msg_reset_curr()
|
/net/sctp/ |
D | associola.c | 1253 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, in sctp_trans_elect_best() argument 1258 if (best == NULL || curr == best) in sctp_trans_elect_best() 1259 return curr; in sctp_trans_elect_best() 1261 score_curr = sctp_trans_score(curr); in sctp_trans_elect_best() 1269 return curr; in sctp_trans_elect_best() 1271 return sctp_trans_elect_tie(best, curr); in sctp_trans_elect_best()
|
/net/ethtool/ |
D | ioctl.c | 1779 struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS }; in ethtool_set_channels() local 1791 dev->ethtool_ops->get_channels(dev, &curr); in ethtool_set_channels() 1793 if (channels.rx_count == curr.rx_count && in ethtool_set_channels() 1794 channels.tx_count == curr.tx_count && in ethtool_set_channels() 1795 channels.combined_count == curr.combined_count && in ethtool_set_channels() 1796 channels.other_count == curr.other_count) in ethtool_set_channels() 1800 if (channels.rx_count > curr.max_rx || in ethtool_set_channels() 1801 channels.tx_count > curr.max_tx || in ethtool_set_channels() 1802 channels.combined_count > curr.max_combined || in ethtool_set_channels() 1803 channels.other_count > curr.max_other) in ethtool_set_channels() [all …]
|
/net/packet/ |
D | af_packet.c | 1000 static void prb_fill_curr_block(char *curr, in prb_fill_curr_block() argument 1008 ppd = (struct tpacket3_hdr *)curr; in prb_fill_curr_block() 1010 pkc->prev = curr; in prb_fill_curr_block() 1026 char *curr, *end; in __packet_lookup_frame_in_block() local 1052 curr = pkc->nxt_offset; in __packet_lookup_frame_in_block() 1057 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { in __packet_lookup_frame_in_block() 1058 prb_fill_curr_block(curr, pkc, pbd, len); in __packet_lookup_frame_in_block() 1059 return (void *)curr; in __packet_lookup_frame_in_block() 1066 curr = (char *)prb_dispatch_next_block(pkc, po); in __packet_lookup_frame_in_block() 1067 if (curr) { in __packet_lookup_frame_in_block() [all …]
|
/net/tipc/ |
D | msg.c | 205 int accounted, total, curr; in tipc_msg_append() local 228 curr = msg_blocks(hdr); in tipc_msg_append() 236 total += msg_blocks(hdr) - curr; in tipc_msg_append()
|
/net/tls/ |
D | tls_sw.c | 421 &msg_en->sg.data[msg_en->sg.curr], in tls_tx_records() 457 sge = sk_msg_elem(msg_en, msg_en->sg.curr); in tls_encrypt_done() 528 msg_en->sg.curr = start; in tls_do_encryption() 611 msg_opl->sg.curr = i; in tls_split_open_record() 641 msg_npl->sg.curr = j; in tls_split_open_record() 670 msg_opl->sg.curr = orig_end; in tls_merge_open_record() 1225 msg_pl->sg.curr = msg_pl->sg.end; in tls_sw_do_sendpage()
|
/net/rfkill/ |
D | core.c | 315 bool prev, curr; in rfkill_set_block() local 361 curr = rfkill->state & RFKILL_BLOCK_SW; in rfkill_set_block() 367 if (prev != curr) in rfkill_set_block()
|