/net/tipc/ |
D | name_table.c | 110 #define service_range_foreach_match(sr, sc, start, end) \ in RB_DECLARE_CALLBACKS_MAX() argument 111 for (sr = service_range_match_first((sc)->ranges.rb_node, \ in RB_DECLARE_CALLBACKS_MAX() 282 static struct service_range *tipc_service_find_range(struct tipc_service *sc, in tipc_service_find_range() argument 287 service_range_foreach_match(sr, sc, ua->sr.lower, ua->sr.upper) { in tipc_service_find_range() 296 static struct service_range *tipc_service_create_range(struct tipc_service *sc, in tipc_service_create_range() argument 304 n = &sc->ranges.rb_node; in tipc_service_create_range() 326 rb_insert_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); in tipc_service_create_range() 331 struct tipc_service *sc, in tipc_service_insert_publ() argument 342 spin_lock_bh(&sc->lock); in tipc_service_insert_publ() 343 sr = tipc_service_create_range(sc, p); in tipc_service_insert_publ() [all …]
|
D | addr.h | 120 static inline int tipc_scope2node(struct net *net, int sc) in tipc_scope2node() argument 122 return sc != TIPC_NODE_SCOPE ? 0 : tipc_own_addr(net); in tipc_scope2node()
|
/net/ipv6/ |
D | ioam6.c | 26 static void ioam6_sc_release(struct ioam6_schema *sc) in ioam6_sc_release() argument 28 kfree_rcu(sc, rcu); in ioam6_sc_release() 41 struct ioam6_schema *sc = (struct ioam6_schema *)ptr; in ioam6_free_sc() local 43 if (sc) in ioam6_free_sc() 44 ioam6_sc_release(sc); in ioam6_free_sc() 56 const struct ioam6_schema *sc = obj; in ioam6_sc_cmpfn() local 58 return (sc->id != *(u32 *)arg->key); in ioam6_sc_cmpfn() 163 struct ioam6_schema *sc; in ioam6_genl_delns() local 181 sc = rcu_dereference_protected(ns->schema, in ioam6_genl_delns() 189 if (sc) in ioam6_genl_delns() [all …]
|
/net/ceph/ |
D | snapshot.c | 46 struct ceph_snap_context *ceph_get_snap_context(struct ceph_snap_context *sc) in ceph_get_snap_context() argument 48 if (sc) in ceph_get_snap_context() 49 refcount_inc(&sc->nref); in ceph_get_snap_context() 50 return sc; in ceph_get_snap_context() 54 void ceph_put_snap_context(struct ceph_snap_context *sc) in ceph_put_snap_context() argument 56 if (!sc) in ceph_put_snap_context() 58 if (refcount_dec_and_test(&sc->nref)) { in ceph_put_snap_context() 60 kfree(sc); in ceph_put_snap_context()
|
D | auth_x.c | 680 struct ceph_x_server_challenge *sc = buf; in ceph_x_handle_reply() local 682 if (len != sizeof(*sc)) in ceph_x_handle_reply() 684 xi->server_challenge = le64_to_cpu(sc->server_challenge); in ceph_x_handle_reply()
|
/net/sunrpc/xprtrdma/ |
D | verbs.c | 80 struct rpcrdma_sendctx *sc); 155 struct rpcrdma_sendctx *sc = in rpcrdma_wc_send() local 160 trace_xprtrdma_wc_send(wc, &sc->sc_cid); in rpcrdma_wc_send() 161 rpcrdma_sendctx_put_locked(r_xprt, sc); in rpcrdma_wc_send() 630 struct rpcrdma_sendctx *sc; in rpcrdma_sendctx_create() local 632 sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge), in rpcrdma_sendctx_create() 634 if (!sc) in rpcrdma_sendctx_create() 637 sc->sc_cqe.done = rpcrdma_wc_send; in rpcrdma_sendctx_create() 638 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; in rpcrdma_sendctx_create() 639 sc->sc_cid.ci_completion_id = in rpcrdma_sendctx_create() [all …]
|
D | rpc_rdma.c | 533 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc) in rpcrdma_sendctx_unmap() argument 535 struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf; in rpcrdma_sendctx_unmap() 538 if (!sc->sc_unmap_count) in rpcrdma_sendctx_unmap() 545 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count; in rpcrdma_sendctx_unmap() 546 ++sge, --sc->sc_unmap_count) in rpcrdma_sendctx_unmap() 550 kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done); in rpcrdma_sendctx_unmap() 558 struct rpcrdma_sendctx *sc = req->rl_sendctx; in rpcrdma_prepare_hdr_sge() local 560 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; in rpcrdma_prepare_hdr_sge() 576 struct rpcrdma_sendctx *sc = req->rl_sendctx; in rpcrdma_prepare_head_iov() local 577 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; in rpcrdma_prepare_head_iov() [all …]
|
D | xprt_rdma.h | 557 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc);
|
/net/sched/ |
D | sch_hfsc.c | 474 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc) in sc2isc() argument 476 isc->sm1 = m2sm(sc->m1); in sc2isc() 477 isc->ism1 = m2ism(sc->m1); in sc2isc() 478 isc->dx = d2dx(sc->d); in sc2isc() 480 isc->sm2 = m2sm(sc->m2); in sc2isc() 481 isc->ism2 = m2ism(sc->m2); in sc2isc() 1273 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) in hfsc_dump_sc() argument 1277 tsc.m1 = sm2m(sc->sm1); in hfsc_dump_sc() 1278 tsc.d = dx2d(sc->dx); in hfsc_dump_sc() 1279 tsc.m2 = sm2m(sc->sm2); in hfsc_dump_sc()
|
/net/netfilter/ipvs/ |
D | ip_vs_sync.c | 789 ip_vs_conn_fill_param_sync(struct netns_ipvs *ipvs, int af, union ip_vs_sync_conn *sc, in ip_vs_conn_fill_param_sync() argument 796 ip_vs_conn_fill_param(ipvs, af, sc->v6.protocol, in ip_vs_conn_fill_param_sync() 797 (const union nf_inet_addr *)&sc->v6.caddr, in ip_vs_conn_fill_param_sync() 798 sc->v6.cport, in ip_vs_conn_fill_param_sync() 799 (const union nf_inet_addr *)&sc->v6.vaddr, in ip_vs_conn_fill_param_sync() 800 sc->v6.vport, p); in ip_vs_conn_fill_param_sync() 803 ip_vs_conn_fill_param(ipvs, af, sc->v4.protocol, in ip_vs_conn_fill_param_sync() 804 (const union nf_inet_addr *)&sc->v4.caddr, in ip_vs_conn_fill_param_sync() 805 sc->v4.cport, in ip_vs_conn_fill_param_sync() 806 (const union nf_inet_addr *)&sc->v4.vaddr, in ip_vs_conn_fill_param_sync() [all …]
|
/net/sunrpc/ |
D | auth.c | 478 rpcauth_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) in rpcauth_cache_shrink_scan() argument 481 if ((sc->gfp_mask & GFP_KERNEL) != GFP_KERNEL) in rpcauth_cache_shrink_scan() 488 return rpcauth_cache_do_shrink(sc->nr_to_scan); in rpcauth_cache_shrink_scan() 492 rpcauth_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) in rpcauth_cache_shrink_count() argument
|
/net/ieee802154/ |
D | header_ops.c | 198 static int ieee802154_hdr_sechdr_len(u8 sc) in ieee802154_hdr_sechdr_len() argument 200 return ieee802154_sechdr_lengths[IEEE802154_SCF_KEY_ID_MODE(sc)]; in ieee802154_hdr_sechdr_len()
|
/net/mac80211/ |
D | rx.c | 1268 u16 sc = le16_to_cpu(hdr->seq_ctrl); in ieee80211_sta_manage_reorder_buf() local 1269 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; in ieee80211_sta_manage_reorder_buf() 1368 u16 sc; in ieee80211_rx_reorder_ampdu() local 1413 sc = le16_to_cpu(hdr->seq_ctrl); in ieee80211_rx_reorder_ampdu() 1414 if (sc & IEEE80211_SCTL_FRAG) { in ieee80211_rx_reorder_ampdu() 2246 u16 sc; in ieee80211_rx_h_defragment() local 2259 sc = le16_to_cpu(hdr->seq_ctrl); in ieee80211_rx_h_defragment() 2260 frag = sc & IEEE80211_SCTL_FRAG; in ieee80211_rx_h_defragment() 2282 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; in ieee80211_rx_h_defragment()
|
/net/nfc/ |
D | digital_core.c | 293 params->sc = DIGITAL_SENSF_FELICA_SC; in digital_tg_listen_mdaa()
|