/net/ceph/crush/ |
D | mapper.c | 415 int item, int x) in is_out() argument 417 if (item >= weight_max) in is_out() 419 if (weight[item] >= 0x10000) in is_out() 421 if (weight[item] == 0) in is_out() 423 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1, x, item) & 0xffff) in is_out() 424 < weight[item]) in is_out() 473 int item = 0; in crush_choose_firstn() local 509 item = bucket_perm_choose( in crush_choose_firstn() 513 item = crush_bucket_choose( in crush_choose_firstn() 519 if (item >= map->max_devices) { in crush_choose_firstn() [all …]
|
/net/netfilter/ |
D | nf_conntrack_ecache.c | 130 const struct nf_ct_event *item) in __nf_conntrack_eventmask_report() argument 132 struct net *net = nf_ct_net(item->ct); in __nf_conntrack_eventmask_report() 148 ret = notify->ct_event(events | missed, item); in __nf_conntrack_eventmask_report() 169 struct nf_ct_event item; in nf_conntrack_eventmask_report() local 180 memset(&item, 0, sizeof(item)); in nf_conntrack_eventmask_report() 182 item.ct = ct; in nf_conntrack_eventmask_report() 183 item.portid = e->portid ? e->portid : portid; in nf_conntrack_eventmask_report() 184 item.report = report; in nf_conntrack_eventmask_report() 189 ret = __nf_conntrack_eventmask_report(e, events, missed, &item); in nf_conntrack_eventmask_report() 207 struct nf_ct_event item; in nf_ct_deliver_cached_events() local [all …]
|
D | nf_conntrack_h323_main.c | 856 &setup->fastStart.item[i]); in process_setup() 888 &callproc->fastStart.item[i]); in process_callproceeding() 919 &connect->fastStart.item[i]); in process_connect() 950 &alert->fastStart.item[i]); in process_alerting() 990 &facility->fastStart.item[i]); in process_facility() 1021 &progress->fastStart.item[i]); in process_progress() 1079 &pdu->h245Control.item[i]); in process_q931() 1333 rrq->callSignalAddress.item, in process_rrq() 1342 rrq->rasAddress.item, in process_rrq() 1374 rcf->callSignalAddress.item, in process_rcf() [all …]
|
D | nf_conntrack_netlink.c | 726 ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) in ctnetlink_conntrack_event() argument 732 struct nf_conn *ct = item->ct; in ctnetlink_conntrack_event() 752 if (!item->report && !nfnetlink_has_listeners(net, group)) in ctnetlink_conntrack_event() 760 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, nf_ct_l3num(ct), in ctnetlink_conntrack_event() 844 err = nfnetlink_send(skb, net, item->portid, group, item->report, in ctnetlink_conntrack_event() 3100 ctnetlink_expect_event(unsigned int events, const struct nf_exp_event *item) in ctnetlink_expect_event() argument 3102 struct nf_conntrack_expect *exp = item->exp; in ctnetlink_expect_event() 3119 if (!item->report && !nfnetlink_has_listeners(net, group)) in ctnetlink_expect_event() 3127 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, in ctnetlink_expect_event() 3136 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC); in ctnetlink_expect_event()
|
/net/tipc/ |
D | name_distr.c | 91 struct distr_item *item; in tipc_named_publish() local 108 item = (struct distr_item *)msg_data(buf_msg(skb)); in tipc_named_publish() 109 publ_to_item(item, p); in tipc_named_publish() 121 struct distr_item *item; in tipc_named_withdraw() local 137 item = (struct distr_item *)msg_data(buf_msg(skb)); in tipc_named_withdraw() 138 publ_to_item(item, p); in tipc_named_withdraw() 155 struct distr_item *item = NULL; in named_distribute() local 174 item = (struct distr_item *)msg_data(hdr); in named_distribute() 178 publ_to_item(item, publ); in named_distribute() 179 item++; in named_distribute() [all …]
|
/net/bridge/ |
D | br_switchdev.c | 128 struct switchdev_notifier_fdb_info *item, in br_switchdev_fdb_populate() argument 134 item->addr = fdb->key.addr.addr; in br_switchdev_fdb_populate() 135 item->vid = fdb->key.vlan_id; in br_switchdev_fdb_populate() 136 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); in br_switchdev_fdb_populate() 137 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags); in br_switchdev_fdb_populate() 138 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags); in br_switchdev_fdb_populate() 139 item->info.dev = (!p || item->is_local) ? br->dev : p->dev; in br_switchdev_fdb_populate() 140 item->info.ctx = ctx; in br_switchdev_fdb_populate() 147 struct switchdev_notifier_fdb_info item; in br_switchdev_fdb_notify() local 160 br_switchdev_fdb_populate(br, &item, fdb, NULL); in br_switchdev_fdb_notify() [all …]
|
/net/sunrpc/ |
D | cache.c | 42 static bool cache_defer_req(struct cache_req *req, struct cache_head *item); 43 static void cache_revisit_request(struct cache_head *item); 576 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE) argument 594 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) in __hash_deferred_req() argument 596 int hash = DFR_HASH(item); in __hash_deferred_req() 603 struct cache_head *item, in setup_deferral() argument 607 dreq->item = item; in setup_deferral() 611 __hash_deferred_req(dreq, item); in setup_deferral() 634 static void cache_wait_req(struct cache_req *req, struct cache_head *item) in cache_wait_req() argument 642 setup_deferral(dreq, item, 0); in cache_wait_req() [all …]
|
D | svcauth_unix.c | 107 struct cache_head *item = container_of(kref, struct cache_head, ref); in ip_map_put() local 108 struct ip_map *im = container_of(item, struct ip_map,h); in ip_map_put() 110 if (test_bit(CACHE_VALID, &item->flags) && in ip_map_put() 111 !test_bit(CACHE_NEGATIVE, &item->flags)) in ip_map_put() 130 struct ip_map *item = container_of(citem, struct ip_map, h); in ip_map_init() local 132 strcpy(new->m_class, item->m_class); in ip_map_init() 133 new->m_addr = item->m_addr; in ip_map_init() 138 struct ip_map *item = container_of(citem, struct ip_map, h); in update() local 140 kref_get(&item->m_client->h.ref); in update() 141 new->m_client = item->m_client; in update() [all …]
|
/net/sunrpc/auth_gss/ |
D | svcauth_gss.c | 83 static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item); 108 static inline int rsi_hash(struct rsi *item) in rsi_hash() argument 110 return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS) in rsi_hash() 111 ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS); in rsi_hash() 116 struct rsi *item = container_of(a, struct rsi, h); in rsi_match() local 118 return netobj_equal(&item->in_handle, &tmp->in_handle) && in rsi_match() 119 netobj_equal(&item->in_token, &tmp->in_token); in rsi_match() 139 struct rsi *item = container_of(citem, struct rsi, h); in rsi_init() local 145 new->in_handle.len = item->in_handle.len; in rsi_init() 146 item->in_handle.len = 0; in rsi_init() [all …]
|
/net/sched/ |
D | cls_api.c | 384 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, in tcf_chain_head_change_item() argument 387 if (item->chain_head_change) in tcf_chain_head_change_item() 388 item->chain_head_change(tp_head, item->chain_head_change_priv); in tcf_chain_head_change_item() 394 struct tcf_filter_chain_list_item *item; in tcf_chain0_head_change() local 401 list_for_each_entry(item, &block->chain0.filter_chain_list, list) in tcf_chain0_head_change() 402 tcf_chain_head_change_item(item, tp_head); in tcf_chain0_head_change() 784 struct tcf_filter_chain_list_item *item; in tcf_chain0_head_change_cb_add() local 787 item = kmalloc(sizeof(*item), GFP_KERNEL); in tcf_chain0_head_change_cb_add() 788 if (!item) { in tcf_chain0_head_change_cb_add() 792 item->chain_head_change = ei->chain_head_change; in tcf_chain0_head_change_cb_add() [all …]
|
D | act_gate.c | 471 struct nlattr *item; in dumping_entry() local 473 item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY); in dumping_entry() 474 if (!item) in dumping_entry() 492 return nla_nest_end(skb, item); in dumping_entry() 495 nla_nest_cancel(skb, item); in dumping_entry()
|
D | sch_taprio.c | 1898 struct nlattr *item; in dump_entry() local 1900 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY); in dump_entry() 1901 if (!item) in dump_entry() 1918 return nla_nest_end(msg, item); in dump_entry() 1921 nla_nest_cancel(msg, item); in dump_entry()
|
/net/ceph/ |
D | mon_client.c | 368 le64_to_cpu(monc->subs[i].item.start), in __send_subscribe() 369 monc->subs[i].item.flags); in __send_subscribe() 371 memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item)); in __send_subscribe() 372 p += sizeof(monc->subs[i].item); in __send_subscribe() 430 monc->subs[sub].item.start == start && in __ceph_monc_want_map() 431 monc->subs[sub].item.flags == flags) in __ceph_monc_want_map() 434 monc->subs[sub].item.start = start; in __ceph_monc_want_map() 435 monc->subs[sub].item.flags = flags; in __ceph_monc_want_map() 465 if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME) in __ceph_monc_got_map() 468 monc->subs[sub].item.start = cpu_to_le64(epoch + 1); in __ceph_monc_got_map()
|
D | debugfs.c | 153 le64_to_cpu(monc->subs[i].item.start), in monc_show() 154 (monc->subs[i].item.flags & in monc_show()
|
D | osdmap.c | 1003 INIT_LIST_HEAD(&work->item); in alloc_workspace() 1010 WARN_ON(!list_empty(&work->item)); in free_workspace() 1028 list_add(&work->item, &wsm->idle_ws); in add_initial_workspace() 1039 item); in cleanup_workspace_manager() 1040 list_del_init(&work->item); in cleanup_workspace_manager() 1061 item); in get_workspace() 1062 list_del_init(&work->item); in get_workspace() 1106 list_add(&work->item, &wsm->idle_ws); in put_workspace()
|
D | osd_client.c | 4952 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item) in decode_watcher() argument 4964 ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad); in decode_watcher() 4965 ceph_decode_64_safe(p, end, item->cookie, bad); in decode_watcher() 4969 ret = ceph_decode_entity_addr(p, end, &item->addr); in decode_watcher() 4977 ENTITY_NAME(item->name), item->cookie, in decode_watcher() 4978 ceph_pr_addr(&item->addr)); in decode_watcher()
|
/net/bpf/ |
D | test_run.c | 375 struct bpf_prog_array_item item = {.prog = prog}; in bpf_test_run() local 383 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype); in bpf_test_run() 384 if (IS_ERR(item.cgroup_storage[stype])) { in bpf_test_run() 385 item.cgroup_storage[stype] = NULL; in bpf_test_run() 387 bpf_cgroup_storage_free(item.cgroup_storage[stype]); in bpf_test_run() 398 run_ctx.prog_item = &item; in bpf_test_run() 408 bpf_cgroup_storage_free(item.cgroup_storage[stype]); in bpf_test_run()
|
/net/devlink/ |
D | leftover.c | 6801 struct devlink_fmsg_item *item, *tmp; in devlink_fmsg_free() local 6803 list_for_each_entry_safe(item, tmp, &fmsg->item_list, list) { in devlink_fmsg_free() 6804 list_del(&item->list); in devlink_fmsg_free() 6805 kfree(item); in devlink_fmsg_free() 6813 struct devlink_fmsg_item *item; in devlink_fmsg_nest_common() local 6815 item = kzalloc(sizeof(*item), GFP_KERNEL); in devlink_fmsg_nest_common() 6816 if (!item) in devlink_fmsg_nest_common() 6819 item->attrtype = attrtype; in devlink_fmsg_nest_common() 6820 list_add_tail(&item->list, &fmsg->item_list); in devlink_fmsg_nest_common() 6855 struct devlink_fmsg_item *item; in devlink_fmsg_put_name() local [all …]
|
/net/qrtr/ |
D | af_qrtr.c | 134 struct list_head item; member 184 list_del(&node->item); in __qrtr_node_release() 603 list_add(&node->item, &qrtr_all_nodes); in qrtr_endpoint_register() 881 list_for_each_entry(node, &qrtr_all_nodes, item) { in qrtr_bcast_enqueue()
|
/net/sunrpc/xprtrdma/ |
D | verbs.c | 654 unsigned long item) in rpcrdma_sendctx_next() argument 656 return likely(item < buf->rb_sc_last) ? item + 1 : 0; in rpcrdma_sendctx_next()
|