/net/ceph/crush/ |
D | crush.c | 28 int crush_get_bucket_item_weight(const struct crush_bucket *b, int p) in crush_get_bucket_item_weight() argument 30 if ((__u32)p >= b->size) in crush_get_bucket_item_weight() 33 switch (b->alg) { in crush_get_bucket_item_weight() 35 return ((struct crush_bucket_uniform *)b)->item_weight; in crush_get_bucket_item_weight() 37 return ((struct crush_bucket_list *)b)->item_weights[p]; in crush_get_bucket_item_weight() 39 return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)]; in crush_get_bucket_item_weight() 41 return ((struct crush_bucket_straw *)b)->item_weights[p]; in crush_get_bucket_item_weight() 43 return ((struct crush_bucket_straw2 *)b)->item_weights[p]; in crush_get_bucket_item_weight() 48 void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b) in crush_destroy_bucket_uniform() argument 50 kfree(b->h.items); in crush_destroy_bucket_uniform() [all …]
|
D | hash.c | 13 #define crush_hashmix(a, b, c) do { \ argument 14 a = a-b; a = a-c; a = a^(c>>13); \ 15 b = b-c; b = b-a; b = b^(a<<8); \ 16 c = c-a; c = c-b; c = c^(b>>13); \ 17 a = a-b; a = a-c; a = a^(c>>12); \ 18 b = b-c; b = b-a; b = b^(a<<16); \ 19 c = c-a; c = c-b; c = c^(b>>5); \ 20 a = a-b; a = a-c; a = a^(c>>3); \ 21 b = b-c; b = b-a; b = b^(a<<10); \ 22 c = c-a; c = c-b; c = c^(b>>15); \ [all …]
|
/net/ceph/ |
D | ceph_hash.c | 10 #define mix(a, b, c) \ argument 12 a = a - b; a = a - c; a = a ^ (c >> 13); \ 13 b = b - c; b = b - a; b = b ^ (a << 8); \ 14 c = c - a; c = c - b; c = c ^ (b >> 13); \ 15 a = a - b; a = a - c; a = a ^ (c >> 12); \ 16 b = b - c; b = b - a; b = b ^ (a << 16); \ 17 c = c - a; c = c - b; c = c ^ (b >> 5); \ 18 a = a - b; a = a - c; a = a ^ (c >> 3); \ 19 b = b - c; b = b - a; b = b ^ (a << 10); \ 20 c = c - a; c = c - b; c = c ^ (b >> 15); \ [all …]
|
D | buffer.c | 14 struct ceph_buffer *b; in ceph_buffer_new() local 16 b = kmalloc(sizeof(*b), gfp); in ceph_buffer_new() 17 if (!b) in ceph_buffer_new() 20 b->vec.iov_base = ceph_kvmalloc(len, gfp); in ceph_buffer_new() 21 if (!b->vec.iov_base) { in ceph_buffer_new() 22 kfree(b); in ceph_buffer_new() 26 kref_init(&b->kref); in ceph_buffer_new() 27 b->alloc_len = len; in ceph_buffer_new() 28 b->vec.iov_len = len; in ceph_buffer_new() 29 dout("buffer_new %p\n", b); in ceph_buffer_new() [all …]
|
D | armor.c | 43 unsigned char a, b, c; in ceph_armor() local 48 b = *src++; in ceph_armor() 49 *dst++ = encode_bits(((a & 3) << 4) | (b >> 4)); in ceph_armor() 52 *dst++ = encode_bits(((b & 15) << 2) | in ceph_armor() 56 *dst++ = encode_bits((b & 15) << 2); in ceph_armor() 80 int a, b, c, d; in ceph_unarmor() local 89 b = decode_bits(src[1]); in ceph_unarmor() 92 if (a < 0 || b < 0 || c < 0 || d < 0) in ceph_unarmor() 95 *dst++ = (a << 2) | (b >> 4); in ceph_unarmor() 98 *dst++ = ((b & 15) << 4) | (c >> 2); in ceph_unarmor()
|
D | osdmap.c | 36 int b = 0; in calc_bits_of() local 39 b++; in calc_bits_of() 41 return b; in calc_bits_of() 57 struct crush_bucket_uniform *b) in crush_decode_uniform_bucket() argument 60 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); in crush_decode_uniform_bucket() 61 b->item_weight = ceph_decode_32(p); in crush_decode_uniform_bucket() 68 struct crush_bucket_list *b) in crush_decode_list_bucket() argument 72 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); in crush_decode_list_bucket() 73 if (b->item_weights == NULL) in crush_decode_list_bucket() 75 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); in crush_decode_list_bucket() [all …]
|
/net/tipc/ |
D | bearer.c | 67 static void bearer_disable(struct net *net, struct tipc_bearer *b); 173 struct tipc_bearer *b; in tipc_bearer_find() local 177 b = rtnl_dereference(tn->bearer_list[i]); in tipc_bearer_find() 178 if (b && (!strcmp(b->name, name))) in tipc_bearer_find() 179 return b; in tipc_bearer_find() 192 struct tipc_bearer *b; in tipc_bearer_get_name() local 197 b = rtnl_dereference(tn->bearer_list[bearer_id]); in tipc_bearer_get_name() 198 if (!b) in tipc_bearer_get_name() 201 strcpy(name, b->name); in tipc_bearer_get_name() 208 struct tipc_bearer *b; in tipc_bearer_add_dest() local [all …]
|
D | udp_media.c | 134 static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a, in tipc_udp_msg2addr() argument 208 struct tipc_bearer *b, in tipc_udp_send_msg() argument 211 struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; in tipc_udp_send_msg() 224 ub = rcu_dereference_rtnl(b->media_ptr); in tipc_udp_send_msg() 253 static bool tipc_udp_is_known_peer(struct tipc_bearer *b, in tipc_udp_is_known_peer() argument 259 ub = rcu_dereference_rtnl(b->media_ptr); in tipc_udp_is_known_peer() 273 static int tipc_udp_rcast_add(struct tipc_bearer *b, in tipc_udp_rcast_add() argument 279 ub = rcu_dereference_rtnl(b->media_ptr); in tipc_udp_rcast_add() 295 b->bcast_addr.broadcast = TIPC_REPLICAST_SUPPORT; in tipc_udp_rcast_add() 300 static int tipc_udp_rcast_disc(struct tipc_bearer *b, struct sk_buff *skb) in tipc_udp_rcast_disc() argument [all …]
|
D | discover.c | 81 struct tipc_bearer *b) in tipc_disc_init_msg() argument 85 u32 dest_domain = b->domain; in tipc_disc_init_msg() 95 b->media->addr2msg(msg_media_addr(msg), &b->addr); in tipc_disc_init_msg() 104 static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr, in disc_dupl_alert() argument 114 media_addr_str, b->name); in disc_dupl_alert() 273 int tipc_disc_create(struct net *net, struct tipc_bearer *b, in tipc_disc_create() argument 287 tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b); in tipc_disc_create() 290 req->bearer_id = b->identity; in tipc_disc_create() 291 req->domain = b->domain; in tipc_disc_create() 297 b->link_req = req; in tipc_disc_create() [all …]
|
D | bearer.h | 103 struct tipc_bearer *b, 105 int (*enable_media)(struct net *net, struct tipc_bearer *b, 107 void (*disable_media)(struct tipc_bearer *b); 112 int (*msg2addr)(struct tipc_bearer *b, 115 int (*raw2addr)(struct tipc_bearer *b, 176 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b); 204 int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, 206 void tipc_disable_l2_media(struct tipc_bearer *b); 208 struct tipc_bearer *b, struct tipc_media_addr *dest);
|
D | ib_media.c | 66 static int tipc_ib_raw2addr(struct tipc_bearer *b, in tipc_ib_raw2addr() argument 73 addr->broadcast = !memcmp(msg, b->bcast_addr.value, in tipc_ib_raw2addr() 79 static int tipc_ib_msg2addr(struct tipc_bearer *b, in tipc_ib_msg2addr() argument 83 return tipc_ib_raw2addr(b, addr, msg); in tipc_ib_msg2addr()
|
/net/rxrpc/ |
D | call_accept.c | 34 struct rxrpc_backlog *b, in rxrpc_service_prealloc_one() argument 58 call_head = b->call_backlog_head; in rxrpc_service_prealloc_one() 59 call_tail = READ_ONCE(b->call_backlog_tail); in rxrpc_service_prealloc_one() 67 head = b->peer_backlog_head; in rxrpc_service_prealloc_one() 68 tail = READ_ONCE(b->peer_backlog_tail); in rxrpc_service_prealloc_one() 73 b->peer_backlog[head] = peer; in rxrpc_service_prealloc_one() 74 smp_store_release(&b->peer_backlog_head, in rxrpc_service_prealloc_one() 78 head = b->conn_backlog_head; in rxrpc_service_prealloc_one() 79 tail = READ_ONCE(b->conn_backlog_tail); in rxrpc_service_prealloc_one() 86 b->conn_backlog[head] = conn; in rxrpc_service_prealloc_one() [all …]
|
/net/sched/ |
D | cls_route.c | 136 struct route4_bucket *b; in route4_classify() local 169 b = rcu_dereference_bh(head->table[h]); in route4_classify() 170 if (b) { in route4_classify() 171 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); in route4_classify() 177 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); in route4_classify() 183 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); in route4_classify() 225 struct route4_bucket *b; in route4_get() local 237 b = rtnl_dereference(head->table[h1]); in route4_get() 238 if (b) { in route4_get() 239 for (f = rtnl_dereference(b->ht[h2]); in route4_get() [all …]
|
D | sch_sfb.c | 130 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen() local 136 if (b[hash].qlen < 0xFFFF) in increment_one_qlen() 137 b[hash].qlen++; in increment_one_qlen() 138 b += SFB_NUMBUCKETS; /* next level */ in increment_one_qlen() 159 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen() local 165 if (b[hash].qlen > 0) in decrement_one_qlen() 166 b[hash].qlen--; in decrement_one_qlen() 167 b += SFB_NUMBUCKETS; /* next level */ in decrement_one_qlen() 184 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) in decrement_prob() argument 186 b->p_mark = prob_minus(b->p_mark, q->decrement); in decrement_prob() [all …]
|
/net/bridge/ |
D | br_ioctl.c | 130 struct __bridge_info b; in old_dev_ioctl() local 132 memset(&b, 0, sizeof(struct __bridge_info)); in old_dev_ioctl() 134 memcpy(&b.designated_root, &br->designated_root, 8); in old_dev_ioctl() 135 memcpy(&b.bridge_id, &br->bridge_id, 8); in old_dev_ioctl() 136 b.root_path_cost = br->root_path_cost; in old_dev_ioctl() 137 b.max_age = jiffies_to_clock_t(br->max_age); in old_dev_ioctl() 138 b.hello_time = jiffies_to_clock_t(br->hello_time); in old_dev_ioctl() 139 b.forward_delay = br->forward_delay; in old_dev_ioctl() 140 b.bridge_max_age = br->bridge_max_age; in old_dev_ioctl() 141 b.bridge_hello_time = br->bridge_hello_time; in old_dev_ioctl() [all …]
|
/net/core/ |
D | gen_estimator.c | 67 struct gnet_stats_basic_packed *b) in est_fetch_counters() argument 69 memset(b, 0, sizeof(*b)); in est_fetch_counters() 73 __gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats); in est_fetch_counters() 83 struct gnet_stats_basic_packed b; in est_timer() local 86 est_fetch_counters(est, &b); in est_timer() 87 brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log); in est_timer() 90 rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log); in est_timer() 98 est->last_bytes = b.bytes; in est_timer() 99 est->last_packets = b.packets; in est_timer() 137 struct gnet_stats_basic_packed b; in gen_new_estimator() local [all …]
|
/net/ipv4/ |
D | ipconfig.c | 811 struct bootp_pkt *b; in ic_bootp_send_if() local 822 b = skb_put_zero(skb, sizeof(struct bootp_pkt)); in ic_bootp_send_if() 837 b->udph.source = htons(68); in ic_bootp_send_if() 838 b->udph.dest = htons(67); in ic_bootp_send_if() 839 b->udph.len = htons(sizeof(struct bootp_pkt) - sizeof(struct iphdr)); in ic_bootp_send_if() 843 b->op = BOOTP_REQUEST; in ic_bootp_send_if() 845 b->htype = dev->type; in ic_bootp_send_if() 847 b->htype = ARPHRD_ETHER; in ic_bootp_send_if() 851 b->htype = dev->type; /* can cause undefined behavior */ in ic_bootp_send_if() 855 b->hlen = dev->addr_len; in ic_bootp_send_if() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_dh.c | 105 struct ip_vs_dh_bucket *b; in ip_vs_dh_reassign() local 110 b = &s->buckets[0]; in ip_vs_dh_reassign() 114 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_dh_reassign() 118 RCU_INIT_POINTER(b->dest, NULL); in ip_vs_dh_reassign() 125 RCU_INIT_POINTER(b->dest, dest); in ip_vs_dh_reassign() 129 b++; in ip_vs_dh_reassign() 141 struct ip_vs_dh_bucket *b; in ip_vs_dh_flush() local 144 b = &s->buckets[0]; in ip_vs_dh_flush() 146 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_dh_flush() 149 RCU_INIT_POINTER(b->dest, NULL); in ip_vs_dh_flush() [all …]
|
D | ip_vs_sh.c | 170 struct ip_vs_sh_bucket *b; in ip_vs_sh_reassign() local 176 b = &s->buckets[0]; in ip_vs_sh_reassign() 181 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_sh_reassign() 185 RCU_INIT_POINTER(b->dest, NULL); in ip_vs_sh_reassign() 192 RCU_INIT_POINTER(b->dest, dest); in ip_vs_sh_reassign() 205 b++; in ip_vs_sh_reassign() 217 struct ip_vs_sh_bucket *b; in ip_vs_sh_flush() local 220 b = &s->buckets[0]; in ip_vs_sh_flush() 222 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_sh_flush() 225 RCU_INIT_POINTER(b->dest, NULL); in ip_vs_sh_flush() [all …]
|
/net/dccp/ccids/lib/ |
D | tfrc.h | 31 static inline u64 scaled_div(u64 a, u64 b) in scaled_div() argument 33 BUG_ON(b == 0); in scaled_div() 34 return div64_u64(a * 1000000, b); in scaled_div() 37 static inline u32 scaled_div32(u64 a, u64 b) in scaled_div32() argument 39 u64 result = scaled_div(a, b); in scaled_div32() 43 (unsigned long long)a, (unsigned long long)b); in scaled_div32()
|
/net/wireless/ |
D | lib80211_crypt_ccmp.c | 104 static inline void xor_block(u8 * b, u8 * a, size_t len) in xor_block() argument 108 b[i] ^= a[i]; in xor_block() 225 u8 *b = key->tx_b; in lib80211_ccmp_encrypt() local 239 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); in lib80211_ccmp_encrypt() 247 xor_block(b, pos, len); in lib80211_ccmp_encrypt() 248 lib80211_ccmp_aes_encrypt(key->tfm, b, b); in lib80211_ccmp_encrypt() 259 mic[i] = b[i] ^ s0[i]; in lib80211_ccmp_encrypt() 291 u8 *b = key->rx_b; in lib80211_ccmp_decrypt() local 344 ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); in lib80211_ccmp_decrypt() 345 xor_block(mic, b, CCMP_MIC_LEN); in lib80211_ccmp_decrypt() [all …]
|
/net/ipv6/ |
D | seg6_local.c | 589 static int cmp_nla_srh(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_srh() argument 593 if (len != ((b->srh->hdrlen + 1) << 3)) in cmp_nla_srh() 596 return memcmp(a->srh, b->srh, len); in cmp_nla_srh() 614 static int cmp_nla_table(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_table() argument 616 if (a->table != b->table) in cmp_nla_table() 643 static int cmp_nla_nh4(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_nh4() argument 645 return memcmp(&a->nh4, &b->nh4, sizeof(struct in_addr)); in cmp_nla_nh4() 669 static int cmp_nla_nh6(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_nh6() argument 671 return memcmp(&a->nh6, &b->nh6, sizeof(struct in6_addr)); in cmp_nla_nh6() 689 static int cmp_nla_iif(struct seg6_local_lwt *a, struct seg6_local_lwt *b) in cmp_nla_iif() argument [all …]
|
/net/netfilter/ |
D | nf_conntrack_h323_asn1.c | 104 #define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;} argument 109 static unsigned int get_bits(bitstr_t *bs, unsigned int b); 110 static unsigned int get_bitmap(bitstr_t *bs, unsigned int b); 111 static unsigned int get_uint(bitstr_t *bs, int b); 171 unsigned int b = (*bs->cur) & (0x80 >> bs->bit); in get_bit() local 175 return b; in get_bit() 180 static unsigned int get_bits(bitstr_t *bs, unsigned int b) in get_bits() argument 185 l = b + bs->bit; in get_bits() 206 static unsigned int get_bitmap(bitstr_t *bs, unsigned int b) in get_bitmap() argument 210 if (!b) in get_bitmap() [all …]
|
D | nf_conntrack_expect.c | 233 const struct nf_conntrack_expect *b) in expect_clash() argument 240 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all; in expect_clash() 244 a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; in expect_clash() 247 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) && in expect_clash() 248 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && in expect_clash() 249 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master)); in expect_clash() 253 const struct nf_conntrack_expect *b) in expect_matches() argument 255 return a->master == b->master && a->class == b->class && in expect_matches() 256 nf_ct_tuple_equal(&a->tuple, &b->tuple) && in expect_matches() 257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) && in expect_matches() [all …]
|
/net/hsr/ |
D | hsr_framereg.c | 45 static bool seq_nr_after(u16 a, u16 b) in seq_nr_after() argument 50 if ((int) b - a == 32768) in seq_nr_after() 53 return (((s16) (b - a)) < 0); in seq_nr_after() 55 #define seq_nr_before(a, b) seq_nr_after((b), (a)) argument 56 #define seq_nr_after_or_eq(a, b) (!seq_nr_before((a), (b))) argument 57 #define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b))) argument
|