/net/ceph/crush/ |
D | crush.c | 27 int crush_get_bucket_item_weight(const struct crush_bucket *b, int p) in crush_get_bucket_item_weight() argument 29 if ((__u32)p >= b->size) in crush_get_bucket_item_weight() 32 switch (b->alg) { in crush_get_bucket_item_weight() 34 return ((struct crush_bucket_uniform *)b)->item_weight; in crush_get_bucket_item_weight() 36 return ((struct crush_bucket_list *)b)->item_weights[p]; in crush_get_bucket_item_weight() 38 return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)]; in crush_get_bucket_item_weight() 40 return ((struct crush_bucket_straw *)b)->item_weights[p]; in crush_get_bucket_item_weight() 42 return ((struct crush_bucket_straw2 *)b)->item_weights[p]; in crush_get_bucket_item_weight() 47 void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b) in crush_destroy_bucket_uniform() argument 49 kfree(b->h.items); in crush_destroy_bucket_uniform() [all …]
|
D | hash.c | 13 #define crush_hashmix(a, b, c) do { \ argument 14 a = a-b; a = a-c; a = a^(c>>13); \ 15 b = b-c; b = b-a; b = b^(a<<8); \ 16 c = c-a; c = c-b; c = c^(b>>13); \ 17 a = a-b; a = a-c; a = a^(c>>12); \ 18 b = b-c; b = b-a; b = b^(a<<16); \ 19 c = c-a; c = c-b; c = c^(b>>5); \ 20 a = a-b; a = a-c; a = a^(c>>3); \ 21 b = b-c; b = b-a; b = b^(a<<10); \ 22 c = c-a; c = c-b; c = c^(b>>15); \ [all …]
|
/net/ceph/ |
D | ceph_hash.c | 10 #define mix(a, b, c) \ argument 12 a = a - b; a = a - c; a = a ^ (c >> 13); \ 13 b = b - c; b = b - a; b = b ^ (a << 8); \ 14 c = c - a; c = c - b; c = c ^ (b >> 13); \ 15 a = a - b; a = a - c; a = a ^ (c >> 12); \ 16 b = b - c; b = b - a; b = b ^ (a << 16); \ 17 c = c - a; c = c - b; c = c ^ (b >> 5); \ 18 a = a - b; a = a - c; a = a ^ (c >> 3); \ 19 b = b - c; b = b - a; b = b ^ (a << 10); \ 20 c = c - a; c = c - b; c = c ^ (b >> 15); \ [all …]
|
D | buffer.c | 14 struct ceph_buffer *b; in ceph_buffer_new() local 16 b = kmalloc(sizeof(*b), gfp); in ceph_buffer_new() 17 if (!b) in ceph_buffer_new() 20 b->vec.iov_base = ceph_kvmalloc(len, gfp); in ceph_buffer_new() 21 if (!b->vec.iov_base) { in ceph_buffer_new() 22 kfree(b); in ceph_buffer_new() 26 kref_init(&b->kref); in ceph_buffer_new() 27 b->alloc_len = len; in ceph_buffer_new() 28 b->vec.iov_len = len; in ceph_buffer_new() 29 dout("buffer_new %p\n", b); in ceph_buffer_new() [all …]
|
D | armor.c | 43 unsigned char a, b, c; in ceph_armor() local 48 b = *src++; in ceph_armor() 49 *dst++ = encode_bits(((a & 3) << 4) | (b >> 4)); in ceph_armor() 52 *dst++ = encode_bits(((b & 15) << 2) | in ceph_armor() 56 *dst++ = encode_bits((b & 15) << 2); in ceph_armor() 80 int a, b, c, d; in ceph_unarmor() local 89 b = decode_bits(src[1]); in ceph_unarmor() 92 if (a < 0 || b < 0 || c < 0 || d < 0) in ceph_unarmor() 95 *dst++ = (a << 2) | (b >> 4); in ceph_unarmor() 98 *dst++ = ((b & 15) << 4) | (c >> 2); in ceph_unarmor()
|
D | osdmap.c | 35 int b = 0; in calc_bits_of() local 38 b++; in calc_bits_of() 40 return b; in calc_bits_of() 56 struct crush_bucket_uniform *b) in crush_decode_uniform_bucket() argument 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); in crush_decode_uniform_bucket() 60 b->item_weight = ceph_decode_32(p); in crush_decode_uniform_bucket() 67 struct crush_bucket_list *b) in crush_decode_list_bucket() argument 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); in crush_decode_list_bucket() 72 if (b->item_weights == NULL) in crush_decode_list_bucket() 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); in crush_decode_list_bucket() [all …]
|
/net/tipc/ |
D | bearer.c | 69 static void bearer_disable(struct net *net, struct tipc_bearer *b); 176 struct tipc_bearer *b; in tipc_bearer_find() local 180 b = rtnl_dereference(tn->bearer_list[i]); in tipc_bearer_find() 181 if (b && (!strcmp(b->name, name))) in tipc_bearer_find() 182 return b; in tipc_bearer_find() 195 struct tipc_bearer *b; in tipc_bearer_get_name() local 200 b = rtnl_dereference(tn->bearer_list[bearer_id]); in tipc_bearer_get_name() 201 if (!b) in tipc_bearer_get_name() 204 strcpy(name, b->name); in tipc_bearer_get_name() 211 struct tipc_bearer *b; in tipc_bearer_add_dest() local [all …]
|
D | discover.c | 81 u32 mtyp, struct tipc_bearer *b) in tipc_disc_init_msg() argument 84 u32 dest_domain = b->domain; in tipc_disc_init_msg() 96 b->media->addr2msg(msg_media_addr(hdr), &b->addr); in tipc_disc_init_msg() 104 struct tipc_bearer *b) in tipc_disc_msg_xmit() argument 113 tipc_disc_init_msg(net, skb, mtyp, b); in tipc_disc_msg_xmit() 116 tipc_bearer_xmit_skb(net, b->identity, skb, maddr); in tipc_disc_msg_xmit() 125 static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr, in disc_dupl_alert() argument 133 media_addr_str, b->name); in disc_dupl_alert() 142 struct tipc_bearer *b, in tipc_disc_addr_trial_msg() argument 182 self, sugg_addr, maddr, b); in tipc_disc_addr_trial_msg() [all …]
|
D | udp_media.c | 138 static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a, in tipc_udp_msg2addr() argument 226 struct tipc_bearer *b, in tipc_udp_send_msg() argument 229 struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; in tipc_udp_send_msg() 242 ub = rcu_dereference(b->media_ptr); in tipc_udp_send_msg() 273 static bool tipc_udp_is_known_peer(struct tipc_bearer *b, in tipc_udp_is_known_peer() argument 279 ub = rcu_dereference_rtnl(b->media_ptr); in tipc_udp_is_known_peer() 293 static int tipc_udp_rcast_add(struct tipc_bearer *b, in tipc_udp_rcast_add() argument 299 ub = rcu_dereference_rtnl(b->media_ptr); in tipc_udp_rcast_add() 320 b->bcast_addr.broadcast = TIPC_REPLICAST_SUPPORT; in tipc_udp_rcast_add() 325 static int tipc_udp_rcast_disc(struct tipc_bearer *b, struct sk_buff *skb) in tipc_udp_rcast_disc() argument [all …]
|
D | bearer.h | 106 struct tipc_bearer *b, 108 int (*enable_media)(struct net *net, struct tipc_bearer *b, 110 void (*disable_media)(struct tipc_bearer *b); 115 int (*msg2addr)(struct tipc_bearer *b, 118 int (*raw2addr)(struct tipc_bearer *b, 186 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b); 218 int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, 220 bool tipc_bearer_hold(struct tipc_bearer *b); 221 void tipc_bearer_put(struct tipc_bearer *b); 222 void tipc_disable_l2_media(struct tipc_bearer *b); [all …]
|
/net/rxrpc/ |
D | call_accept.c | 35 struct rxrpc_backlog *b, in rxrpc_service_prealloc_one() argument 61 call_head = b->call_backlog_head; in rxrpc_service_prealloc_one() 62 call_tail = READ_ONCE(b->call_backlog_tail); in rxrpc_service_prealloc_one() 70 head = b->peer_backlog_head; in rxrpc_service_prealloc_one() 71 tail = READ_ONCE(b->peer_backlog_tail); in rxrpc_service_prealloc_one() 76 b->peer_backlog[head] = peer; in rxrpc_service_prealloc_one() 77 smp_store_release(&b->peer_backlog_head, in rxrpc_service_prealloc_one() 81 head = b->conn_backlog_head; in rxrpc_service_prealloc_one() 82 tail = READ_ONCE(b->conn_backlog_tail); in rxrpc_service_prealloc_one() 89 b->conn_backlog[head] = conn; in rxrpc_service_prealloc_one() [all …]
|
/net/sched/ |
D | sch_cake.c | 146 u16 t:3, b:10; member 1404 q->tins[ii.t].overflow_idx[ii.b] = j; in cake_heap_swap() 1405 q->tins[jj.t].overflow_idx[jj.b] = i; in cake_heap_swap() 1412 return q->tins[ii.t].backlogs[ii.b]; in cake_heap_get_backlog() 1469 struct cake_tin_data *b, in cake_advance_shaper() argument 1479 u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft; in cake_advance_shaper() 1483 if (ktime_before(b->time_next_packet, now)) in cake_advance_shaper() 1484 b->time_next_packet = ktime_add_ns(b->time_next_packet, in cake_advance_shaper() 1487 else if (ktime_before(b->time_next_packet, in cake_advance_shaper() 1489 b->time_next_packet = ktime_add_ns(now, tin_dur); in cake_advance_shaper() [all …]
|
D | cls_route.c | 129 struct route4_bucket *b; in route4_classify() local 162 b = rcu_dereference_bh(head->table[h]); in route4_classify() 163 if (b) { in route4_classify() 164 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); in route4_classify() 170 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); in route4_classify() 176 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); in route4_classify() 218 struct route4_bucket *b; in route4_get() local 230 b = rtnl_dereference(head->table[h1]); in route4_get() 231 if (b) { in route4_get() 232 for (f = rtnl_dereference(b->ht[h2]); in route4_get() [all …]
|
D | sch_sfb.c | 126 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen() local 132 if (b[hash].qlen < 0xFFFF) in increment_one_qlen() 133 b[hash].qlen++; in increment_one_qlen() 134 b += SFB_NUMBUCKETS; /* next level */ in increment_one_qlen() 155 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen() local 161 if (b[hash].qlen > 0) in decrement_one_qlen() 162 b[hash].qlen--; in decrement_one_qlen() 163 b += SFB_NUMBUCKETS; /* next level */ in decrement_one_qlen() 180 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) in decrement_prob() argument 182 b->p_mark = prob_minus(b->p_mark, q->decrement); in decrement_prob() [all …]
|
/net/netfilter/ |
D | nft_set_pipapo_avx2.c | 55 #define NFT_PIPAPO_AVX2_AND(dst, a, b) \ argument 56 asm volatile("vpand %ymm" #a ", %ymm" #b ", %ymm" #dst) 219 int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b; in nft_pipapo_avx2_lookup_4b_2() local 243 b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last); in nft_pipapo_avx2_lookup_4b_2() 245 return b; in nft_pipapo_avx2_lookup_4b_2() 248 ret = b / XSAVE_YMM_SIZE; in nft_pipapo_avx2_lookup_4b_2() 281 int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b; in nft_pipapo_avx2_lookup_4b_4() local 319 b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last); in nft_pipapo_avx2_lookup_4b_4() 321 return b; in nft_pipapo_avx2_lookup_4b_4() 324 ret = b / XSAVE_YMM_SIZE; in nft_pipapo_avx2_lookup_4b_4() [all …]
|
D | nft_set_pipapo.c | 439 int b; in nft_pipapo_lookup() local 460 b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt, in nft_pipapo_lookup() 462 if (b < 0) { in nft_pipapo_lookup() 470 *ext = &f->mt[b].e->ext; in nft_pipapo_lookup() 541 int b; in pipapo_get() local 563 b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt, in pipapo_get() 565 if (b < 0) in pipapo_get() 569 if (nft_set_elem_expired(&f->mt[b].e->ext)) in pipapo_get() 572 !nft_set_elem_active(&f->mt[b].e->ext, genmask))) in pipapo_get() 575 ret = f->mt[b].e; in pipapo_get() [all …]
|
/net/bridge/ |
D | br_ioctl.c | 126 struct __bridge_info b; in old_dev_ioctl() local 128 memset(&b, 0, sizeof(struct __bridge_info)); in old_dev_ioctl() 130 memcpy(&b.designated_root, &br->designated_root, 8); in old_dev_ioctl() 131 memcpy(&b.bridge_id, &br->bridge_id, 8); in old_dev_ioctl() 132 b.root_path_cost = br->root_path_cost; in old_dev_ioctl() 133 b.max_age = jiffies_to_clock_t(br->max_age); in old_dev_ioctl() 134 b.hello_time = jiffies_to_clock_t(br->hello_time); in old_dev_ioctl() 135 b.forward_delay = br->forward_delay; in old_dev_ioctl() 136 b.bridge_max_age = br->bridge_max_age; in old_dev_ioctl() 137 b.bridge_hello_time = br->bridge_hello_time; in old_dev_ioctl() [all …]
|
/net/core/ |
D | gen_estimator.c | 63 struct gnet_stats_basic_packed *b) in est_fetch_counters() argument 65 memset(b, 0, sizeof(*b)); in est_fetch_counters() 69 __gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats); in est_fetch_counters() 79 struct gnet_stats_basic_packed b; in est_timer() local 82 est_fetch_counters(est, &b); in est_timer() 83 brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log); in est_timer() 86 rate = (b.packets - est->last_packets) << (10 - est->intvl_log); in est_timer() 94 est->last_bytes = b.bytes; in est_timer() 95 est->last_packets = b.packets; in est_timer() 133 struct gnet_stats_basic_packed b; in gen_new_estimator() local [all …]
|
D | gen_stats.c | 143 struct gnet_stats_basic_packed *b) in __gnet_stats_copy_basic() argument 154 bstats->bytes = b->bytes; in __gnet_stats_copy_basic() 155 bstats->packets = b->packets; in __gnet_stats_copy_basic() 164 struct gnet_stats_basic_packed *b, in ___gnet_stats_copy_basic() argument 169 __gnet_stats_copy_basic(running, &bstats, cpu, b); in ___gnet_stats_copy_basic() 210 struct gnet_stats_basic_packed *b) in gnet_stats_copy_basic() argument 212 return ___gnet_stats_copy_basic(running, d, cpu, b, in gnet_stats_copy_basic() 234 struct gnet_stats_basic_packed *b) in gnet_stats_copy_basic_hw() argument 236 return ___gnet_stats_copy_basic(running, d, cpu, b, in gnet_stats_copy_basic_hw()
|
/net/ipv4/ |
D | ipconfig.c | 800 struct bootp_pkt *b; in ic_bootp_send_if() local 811 b = skb_put_zero(skb, sizeof(struct bootp_pkt)); in ic_bootp_send_if() 826 b->udph.source = htons(68); in ic_bootp_send_if() 827 b->udph.dest = htons(67); in ic_bootp_send_if() 828 b->udph.len = htons(sizeof(struct bootp_pkt) - sizeof(struct iphdr)); in ic_bootp_send_if() 832 b->op = BOOTP_REQUEST; in ic_bootp_send_if() 834 b->htype = dev->type; in ic_bootp_send_if() 836 b->htype = ARPHRD_ETHER; in ic_bootp_send_if() 840 b->htype = dev->type; /* can cause undefined behavior */ in ic_bootp_send_if() 844 b->hlen = dev->addr_len; in ic_bootp_send_if() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_dh.c | 101 struct ip_vs_dh_bucket *b; in ip_vs_dh_reassign() local 106 b = &s->buckets[0]; in ip_vs_dh_reassign() 110 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_dh_reassign() 114 RCU_INIT_POINTER(b->dest, NULL); in ip_vs_dh_reassign() 121 RCU_INIT_POINTER(b->dest, dest); in ip_vs_dh_reassign() 125 b++; in ip_vs_dh_reassign() 137 struct ip_vs_dh_bucket *b; in ip_vs_dh_flush() local 140 b = &s->buckets[0]; in ip_vs_dh_flush() 142 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_dh_flush() 145 RCU_INIT_POINTER(b->dest, NULL); in ip_vs_dh_flush() [all …]
|
D | ip_vs_sh.c | 166 struct ip_vs_sh_bucket *b; in ip_vs_sh_reassign() local 172 b = &s->buckets[0]; in ip_vs_sh_reassign() 177 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_sh_reassign() 181 RCU_INIT_POINTER(b->dest, NULL); in ip_vs_sh_reassign() 188 RCU_INIT_POINTER(b->dest, dest); in ip_vs_sh_reassign() 201 b++; in ip_vs_sh_reassign() 213 struct ip_vs_sh_bucket *b; in ip_vs_sh_flush() local 216 b = &s->buckets[0]; in ip_vs_sh_flush() 218 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_sh_flush() 221 RCU_INIT_POINTER(b->dest, NULL); in ip_vs_sh_flush() [all …]
|
/net/dccp/ccids/lib/ |
D | tfrc.h | 27 static inline u64 scaled_div(u64 a, u64 b) in scaled_div() argument 29 BUG_ON(b == 0); in scaled_div() 30 return div64_u64(a * 1000000, b); in scaled_div() 33 static inline u32 scaled_div32(u64 a, u64 b) in scaled_div32() argument 35 u64 result = scaled_div(a, b); in scaled_div32() 39 (unsigned long long)a, (unsigned long long)b); in scaled_div32()
|
/net/sunrpc/ |
D | stats.c | 213 static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b) in _add_rpc_iostats() argument 215 a->om_ops += b->om_ops; in _add_rpc_iostats() 216 a->om_ntrans += b->om_ntrans; in _add_rpc_iostats() 217 a->om_timeouts += b->om_timeouts; in _add_rpc_iostats() 218 a->om_bytes_sent += b->om_bytes_sent; in _add_rpc_iostats() 219 a->om_bytes_recv += b->om_bytes_recv; in _add_rpc_iostats() 220 a->om_queue = ktime_add(a->om_queue, b->om_queue); in _add_rpc_iostats() 221 a->om_rtt = ktime_add(a->om_rtt, b->om_rtt); in _add_rpc_iostats() 222 a->om_execute = ktime_add(a->om_execute, b->om_execute); in _add_rpc_iostats() 223 a->om_error_status += b->om_error_status; in _add_rpc_iostats()
|
/net/ipv6/ |
D | seg6_local.c | 701 static int cmp_nla_srh(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_srh() argument 705 if (len != ((b->srh->hdrlen + 1) << 3)) in cmp_nla_srh() 708 return memcmp(a->srh, b->srh, len); in cmp_nla_srh() 726 static int cmp_nla_table(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_table() argument 728 if (a->table != b->table) in cmp_nla_table() 755 static int cmp_nla_nh4(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_nh4() argument 757 return memcmp(&a->nh4, &b->nh4, sizeof(struct in_addr)); in cmp_nla_nh4() 781 static int cmp_nla_nh6(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_nh6() argument 783 return memcmp(&a->nh6, &b->nh6, sizeof(struct in6_addr)); in cmp_nla_nh6() 801 static int cmp_nla_iif(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_iif() argument [all …]
|