/net/netfilter/ |
D | xt_IDLETIMER.c | 58 bool active; member 95 state = timer->active; in check_for_delayed_trigger() 211 timer->active = false; in idletimer_tg_expired() 238 if (!timer->active) { in idletimer_resume() 254 timer->active = false; in idletimer_resume() 317 info->timer->active = true; in idletimer_tg_create() 356 timer_prev = timer->active; in reset_timer() 357 timer->active = true; in reset_timer() 398 info->timer->active = true; in idletimer_tg_target()
|
/net/sched/ |
D | sch_cbq.c | 140 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes member 297 cl_tail = q->active[prio]; in cbq_activate_class() 298 q->active[prio] = cl; in cbq_activate_class() 320 struct cbq_class *cl_prev = q->active[prio]; in cbq_deactivate_class() 328 if (cl == q->active[prio]) { in cbq_deactivate_class() 329 q->active[prio] = cl_prev; in cbq_deactivate_class() 330 if (cl == q->active[prio]) { in cbq_deactivate_class() 331 q->active[prio] = NULL; in cbq_deactivate_class() 338 } while ((cl_prev = cl) != q->active[prio]); in cbq_deactivate_class() 448 struct cbq_class *cl_prev = q->active[prio]; in cbq_undelay_prio() [all …]
|
D | sch_drr.c | 33 struct list_head active; member 368 list_add_tail(&cl->alist, &q->active); in drr_enqueue() 384 if (list_empty(&q->active)) in drr_dequeue() 387 cl = list_first_entry(&q->active, struct drr_class, alist); in drr_dequeue() 411 list_move_tail(&cl->alist, &q->active); in drr_dequeue() 429 INIT_LIST_HEAD(&q->active); in drr_init_qdisc()
|
D | sch_qfq.c | 163 struct list_head active; /* DRR queue of active classes. */ member 256 INIT_LIST_HEAD(&agg->active); in qfq_init_agg() 319 list_add_tail(&cl->alist, &agg->active); in qfq_add_to_agg() 320 if (list_first_entry(&agg->active, struct qfq_class, alist) == in qfq_add_to_agg() 347 if (list_empty(&agg->active)) /* agg is now inactive */ in qfq_deactivate_class() 985 list_move_tail(&cl->alist, &agg->active); in agg_dequeue() 995 *cl = list_first_entry(&agg->active, struct qfq_class, alist); in qfq_peek_skb() 1089 if (!list_empty(&in_serv_agg->active)) in qfq_dequeue() 1104 if (!list_empty(&in_serv_agg->active)) { in qfq_dequeue() 1247 list_first_entry(&agg->active, struct qfq_class, alist) in qfq_enqueue() [all …]
|
/net/sctp/ |
D | stream_sched_prio.c | 38 INIT_LIST_HEAD(&p->active); in sctp_sched_prio_new_head() 85 if (pos == &p->active) in sctp_sched_prio_next_stream() 107 if (list_empty(&prio_head->active)) { in sctp_sched_prio_unsched() 137 list_add(&soute->prio_list, &prio_head->active); in sctp_sched_prio_sched() 317 list_for_each_entry_safe(soute, souttmp, &p->active, prio_list) in sctp_sched_prio_unsched_all()
|
D | associola.c | 545 struct sctp_transport *active = asoc->peer.active_path; in sctp_assoc_rm_peer() local 555 &active->transmitted); in sctp_assoc_rm_peer() 561 if (!timer_pending(&active->T3_rtx_timer)) in sctp_assoc_rm_peer() 562 if (!mod_timer(&active->T3_rtx_timer, in sctp_assoc_rm_peer() 563 jiffies + active->rto)) in sctp_assoc_rm_peer() 564 sctp_transport_hold(active); in sctp_assoc_rm_peer() 923 struct sctp_transport *active; in sctp_assoc_lookup_tsn() local 946 active = asoc->peer.active_path; in sctp_assoc_lookup_tsn() 948 list_for_each_entry(chunk, &active->transmitted, in sctp_assoc_lookup_tsn() 952 match = active; in sctp_assoc_lookup_tsn() [all …]
|
/net/tipc/ |
D | group.c | 83 struct list_head active; member 176 INIT_LIST_HEAD(&grp->active); in tipc_group_create() 584 struct list_head *active = &grp->active; in tipc_group_update_rcv_win() local 601 list_add_tail(&m->list, active); in tipc_group_update_rcv_win() 613 if (!list_empty(active)) { in tipc_group_update_rcv_win() 614 rm = list_first_entry(active, struct tipc_member, list); in tipc_group_update_rcv_win() 627 if (!list_is_last(&m->list, &grp->active)) in tipc_group_update_rcv_win() 628 list_move_tail(&m->list, &grp->active); in tipc_group_update_rcv_win() 649 list_move_tail(&pm->list, &grp->active); in tipc_group_update_rcv_win() 836 list_move_tail(&pm->list, &grp->active); in tipc_group_proto_rcv()
|
D | link.h | 104 void tipc_link_set_active(struct tipc_link *l, bool active);
|
D | link.c | 140 bool active; member 300 void tipc_link_set_active(struct tipc_link *l, bool active) in tipc_link_set_active() argument 302 l->active = active; in tipc_link_set_active() 2466 if (link->active) in __tipc_nl_add_link()
|
/net/mac80211/ |
D | led.c | 369 tpt_trig->active &= ~types_off; in ieee80211_mod_tpt_led_trig() 370 tpt_trig->active |= types_on; in ieee80211_mod_tpt_led_trig() 378 allowed = tpt_trig->active & IEEE80211_TPT_LEDTRIG_FL_RADIO; in ieee80211_mod_tpt_led_trig() 380 if (!allowed || !(tpt_trig->active & tpt_trig->want)) in ieee80211_mod_tpt_led_trig()
|
D | iface.c | 108 bool working, scanning, active; in __ieee80211_recalc_idle() local 113 active = force_active || in __ieee80211_recalc_idle() 128 if (active) in __ieee80211_recalc_idle() 135 if (working || scanning || active) in __ieee80211_recalc_idle()
|
D | ibss.c | 663 int active = 0; in ieee80211_sta_active_ibss() local 676 active++; in ieee80211_sta_active_ibss() 683 return active; in ieee80211_sta_active_ibss()
|
D | ieee80211_i.h | 1077 unsigned int active, want; member
|
/net/can/j1939/ |
D | transport.c | 1015 bool active = false; in j1939_session_deactivate_locked() local 1021 active = true; in j1939_session_deactivate_locked() 1028 return active; in j1939_session_deactivate_locked() 1033 bool active; in j1939_session_deactivate() local 1036 active = j1939_session_deactivate_locked(session); in j1939_session_deactivate() 1039 return active; in j1939_session_deactivate() 1484 struct j1939_session *active = NULL; in j1939_session_activate() local 1489 active = j1939_session_get_by_addr_locked(priv, in j1939_session_activate() 1493 if (active) { in j1939_session_activate() 1494 j1939_session_put(active); in j1939_session_activate()
|
/net/wireless/ |
D | chan.c | 599 bool active = false; in cfg80211_beaconing_iface_active() local 609 active = wdev->beacon_interval != 0; in cfg80211_beaconing_iface_active() 612 active = wdev->ssid_len != 0; in cfg80211_beaconing_iface_active() 615 active = wdev->mesh_id_len != 0; in cfg80211_beaconing_iface_active() 632 return active; in cfg80211_beaconing_iface_active()
|
/net/dccp/ |
D | output.c | 667 void dccp_send_close(struct sock *sk, const int active) in dccp_send_close() argument 671 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; in dccp_send_close() 684 if (active) { in dccp_send_close()
|
D | dccp.h | 323 void dccp_send_close(struct sock *sk, const int active);
|
/net/ncsi/ |
D | ncsi-manage.c | 1731 struct ncsi_channel *nc, *active, *tmp; in ncsi_reset_dev() local 1776 active = NULL; in ncsi_reset_dev() 1782 active = nc; in ncsi_reset_dev() 1791 if (active) in ncsi_reset_dev() 1795 if (!active) { in ncsi_reset_dev() 1805 ndp->active_channel = active; in ncsi_reset_dev() 1806 ndp->active_package = active->package; in ncsi_reset_dev()
|
/net/netfilter/ipvs/ |
D | Kconfig | 141 connections to the server with the least number of active 151 connections to the server with the least active connections 172 currently available and overflows to the next when active 185 overloaded (its active connection numbers is larger than its weight)
|
/net/core/ |
D | dev.c | 2104 bool active = false; in remove_xps_queue_cpu() local 2115 active |= i < 0; in remove_xps_queue_cpu() 2118 return active; in remove_xps_queue_cpu() 2139 bool active = false; in clean_xps_maps() local 2144 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, in clean_xps_maps() 2146 if (!active) in clean_xps_maps() 2249 bool active = false; in __netif_set_xps_queue() local 2385 active = true; in __netif_set_xps_queue() 2402 active |= remove_xps_queue(dev_maps, tci, index); in __netif_set_xps_queue() 2405 active |= remove_xps_queue(dev_maps, tci, index); in __netif_set_xps_queue() [all …]
|
D | ethtool.c | 156 features[i].active = (u32)(dev->features >> (32 * i)); in ethtool_get_features()
|
/net/ipv6/ |
D | Kconfig | 285 processing of SRv6 packets based on their active segment.
|
/net/netfilter/ipset/ |
D | ip_set_core.c | 453 const void *e, bool active) in ip_set_put_extensions() argument 459 htonl(active ? ip_set_timeout_get(timeout) in ip_set_put_extensions()
|
/net/ |
D | Kconfig | 325 backlog reaches netdev_max_backlog. If a few out of many active flows
|