/net/bridge/ |
D | br_stp.c | 29 void br_set_state(struct net_bridge_port *p, unsigned int state) in br_set_state() argument 32 .orig_dev = p->dev, in br_set_state() 42 if (p->flags & BR_MRP_AWARE) in br_set_state() 45 p->state = state; in br_set_state() 46 if (br_opt_get(p->br, BROPT_MST_ENABLED)) { in br_set_state() 47 err = br_mst_set_state(p, 0, state, NULL); in br_set_state() 49 br_warn(p->br, "error setting MST state on port %u(%s)\n", in br_set_state() 50 p->port_no, netdev_name(p->dev)); in br_set_state() 52 err = switchdev_port_attr_set(p->dev, &attr, NULL); in br_set_state() 54 br_warn(p->br, "error setting offload STP state on port %u(%s)\n", in br_set_state() [all …]
|
D | br_if.c | 73 void br_port_carrier_check(struct net_bridge_port *p, bool *notified) in br_port_carrier_check() argument 75 struct net_device *dev = p->dev; in br_port_carrier_check() 76 struct net_bridge *br = p->br; in br_port_carrier_check() 78 if (!(p->flags & BR_ADMIN_COST) && in br_port_carrier_check() 80 p->path_cost = port_cost(dev); in br_port_carrier_check() 88 if (p->state == BR_STATE_DISABLED) { in br_port_carrier_check() 89 br_stp_enable_port(p); in br_port_carrier_check() 93 if (p->state != BR_STATE_DISABLED) { in br_port_carrier_check() 94 br_stp_disable_port(p); in br_port_carrier_check() 101 static void br_port_set_promisc(struct net_bridge_port *p) in br_port_set_promisc() argument [all …]
|
D | br_sysfs_if.c | 48 static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \ 50 return sprintf(buf, "%d\n", !!(p->flags & _mask)); \ 52 static int store_##_name(struct net_bridge_port *p, unsigned long v) \ 54 return store_flag(p, v, _mask); \ 59 static int store_flag(struct net_bridge_port *p, unsigned long v, in store_flag() argument 63 unsigned long flags = p->flags; in store_flag() 71 if (flags != p->flags) { in store_flag() 72 err = br_switchdev_set_port_flag(p, flags, mask, &extack); in store_flag() 74 netdev_err(p->dev, "%s\n", extack._msg); in store_flag() 78 p->flags = flags; in store_flag() [all …]
|
D | br_stp_if.c | 33 void br_init_port(struct net_bridge_port *p) in br_init_port() argument 37 p->port_id = br_make_port_id(p->priority, p->port_no); in br_init_port() 38 br_become_designated_port(p); in br_init_port() 39 br_set_state(p, BR_STATE_BLOCKING); in br_init_port() 40 p->topology_change_ack = 0; in br_init_port() 41 p->config_pending = 0; in br_init_port() 43 err = __set_ageing_time(p->dev, p->br->ageing_time); in br_init_port() 45 netdev_err(p->dev, "failed to offload ageing time\n"); in br_init_port() 51 struct net_bridge_port *p; in br_stp_enable_bridge() local 60 list_for_each_entry(p, &br->port_list, list) { in br_stp_enable_bridge() [all …]
|
D | br_mrp.c | 9 static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb); 90 struct net_bridge_port *p; in br_mrp_unique_ifindex() local 92 p = rtnl_dereference(mrp->p_port); in br_mrp_unique_ifindex() 93 if (p && p->dev->ifindex == ifindex) in br_mrp_unique_ifindex() 96 p = rtnl_dereference(mrp->s_port); in br_mrp_unique_ifindex() 97 if (p && p->dev->ifindex == ifindex) in br_mrp_unique_ifindex() 100 p = rtnl_dereference(mrp->i_port); in br_mrp_unique_ifindex() 101 if (p && p->dev->ifindex == ifindex) in br_mrp_unique_ifindex() 109 struct net_bridge_port *p) in br_mrp_find_port() argument 116 if (rcu_access_pointer(mrp->p_port) == p || in br_mrp_find_port() [all …]
|
D | br_stp_timer.c | 19 struct net_bridge_port *p; in br_is_designated_for_some_port() local 21 list_for_each_entry(p, &br->port_list, list) { in br_is_designated_for_some_port() 22 if (p->state != BR_STATE_DISABLED && in br_is_designated_for_some_port() 23 !memcmp(&p->designated_bridge, &br->bridge_id, 8)) in br_is_designated_for_some_port() 48 struct net_bridge_port *p = from_timer(p, t, message_age_timer); in br_message_age_timer_expired() local 49 struct net_bridge *br = p->br; in br_message_age_timer_expired() 50 const bridge_id *id = &p->designated_bridge; in br_message_age_timer_expired() 53 if (p->state == BR_STATE_DISABLED) in br_message_age_timer_expired() 57 (unsigned int) p->port_no, p->dev->name, in br_message_age_timer_expired() 66 if (p->state == BR_STATE_DISABLED) in br_message_age_timer_expired() [all …]
|
D | br_input.c | 78 struct net_bridge_port *p = br_port_get_rcu(skb->dev); in br_handle_frame_finish() local 91 if (!p) in br_handle_frame_finish() 94 br = p->br; in br_handle_frame_finish() 99 if (p->state == BR_STATE_DISABLED) in br_handle_frame_finish() 102 state = p->state; in br_handle_frame_finish() 105 brmctx = &p->br->multicast_ctx; in br_handle_frame_finish() 106 pmctx = &p->multicast_ctx; in br_handle_frame_finish() 107 if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid, in br_handle_frame_finish() 111 if (p->flags & BR_PORT_LOCKED) { in br_handle_frame_finish() 119 if (p->flags & BR_PORT_MAB) in br_handle_frame_finish() [all …]
|
D | br_switchdev.c | 14 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p, in nbp_switchdev_can_offload_tx_fwd() argument 20 return (p->flags & BR_TX_FWD_OFFLOAD) && in nbp_switchdev_can_offload_tx_fwd() 21 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom); in nbp_switchdev_can_offload_tx_fwd() 38 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p, in nbp_switchdev_frame_mark_tx_fwd_offload() argument 41 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) in nbp_switchdev_frame_mark_tx_fwd_offload() 50 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p, in nbp_switchdev_frame_mark_tx_fwd_to_hwdom() argument 53 if (nbp_switchdev_can_offload_tx_fwd(p, skb)) in nbp_switchdev_frame_mark_tx_fwd_to_hwdom() 54 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms); in nbp_switchdev_frame_mark_tx_fwd_to_hwdom() 57 void nbp_switchdev_frame_mark(const struct net_bridge_port *p, in nbp_switchdev_frame_mark() argument 60 if (p->hwdom) in nbp_switchdev_frame_mark() [all …]
|
D | br_forward.c | 21 static inline int should_deliver(const struct net_bridge_port *p, in should_deliver() argument 26 vg = nbp_vlan_group_rcu(p); in should_deliver() 27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && in should_deliver() 28 (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) && in should_deliver() 29 br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && in should_deliver() 30 !br_skb_isolated(p, skb); in should_deliver() 176 struct net_bridge_port *prev, struct net_bridge_port *p, in maybe_deliver() argument 182 if (!should_deliver(p, skb)) in maybe_deliver() 185 nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb); in maybe_deliver() 194 br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX); in maybe_deliver() [all …]
|
D | br_netlink.c | 97 struct net_bridge_port *p = NULL; in br_get_link_af_size_filtered() local 106 p = br_port_get_check_rcu(dev); in br_get_link_af_size_filtered() 107 if (p) in br_get_link_af_size_filtered() 108 vg = nbp_vlan_group_rcu(p); in br_get_link_af_size_filtered() 116 if (p && (p->flags & BR_VLAN_TUNNEL)) in br_get_link_af_size_filtered() 122 if (p && vg && (filter_mask & RTEXT_FILTER_MST)) in br_get_link_af_size_filtered() 234 const struct net_bridge_port *p) in br_port_fill_attrs() argument 236 u8 mode = !!(p->flags & BR_HAIRPIN_MODE); in br_port_fill_attrs() 240 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || in br_port_fill_attrs() 241 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || in br_port_fill_attrs() [all …]
|
/net/mac80211/ |
D | rc80211_minstrel_ht_debugfs.c | 48 minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) in minstrel_ht_stats_dump() argument 57 return p; in minstrel_ht_stats_dump() 78 p += sprintf(p, "HT%c0 ", htmode); in minstrel_ht_stats_dump() 79 p += sprintf(p, "%cGI ", gimode); in minstrel_ht_stats_dump() 80 p += sprintf(p, "%d ", mg->streams); in minstrel_ht_stats_dump() 82 p += sprintf(p, "VHT%c0 ", htmode); in minstrel_ht_stats_dump() 83 p += sprintf(p, "%cGI ", gimode); in minstrel_ht_stats_dump() 84 p += sprintf(p, "%d ", mg->streams); in minstrel_ht_stats_dump() 86 p += sprintf(p, "OFDM "); in minstrel_ht_stats_dump() 87 p += sprintf(p, "1 "); in minstrel_ht_stats_dump() [all …]
|
D | debugfs_sta.c | 111 char buf[17*IEEE80211_NUM_ACS], *p = buf; in sta_num_ps_buf_frames_read() local 115 p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac, in sta_num_ps_buf_frames_read() 118 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); in sta_num_ps_buf_frames_read() 125 char buf[15*IEEE80211_NUM_TIDS], *p = buf; in sta_last_seq_ctrl_read() local 129 p += scnprintf(p, sizeof(buf)+buf-p, "%x ", in sta_last_seq_ctrl_read() 131 p += scnprintf(p, sizeof(buf)+buf-p, "\n"); in sta_last_seq_ctrl_read() 132 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); in sta_last_seq_ctrl_read() 144 char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; in sta_aqm_read() local 155 p += scnprintf(p, in sta_aqm_read() 156 bufsz + buf - p, in sta_aqm_read() [all …]
|
/net/x25/ |
D | x25_facilities.c | 44 unsigned char *p; in x25_parse_facilities() local 68 p = skb->data + 1; in x25_parse_facilities() 71 switch (*p & X25_FAC_CLASS_MASK) { in x25_parse_facilities() 75 switch (*p) { in x25_parse_facilities() 77 if((p[1] & 0x81) == 0x81) { in x25_parse_facilities() 78 facilities->reverse = p[1] & 0x81; in x25_parse_facilities() 83 if((p[1] & 0x01) == 0x01) { in x25_parse_facilities() 84 facilities->reverse = p[1] & 0x01; in x25_parse_facilities() 89 if((p[1] & 0x80) == 0x80) { in x25_parse_facilities() 90 facilities->reverse = p[1] & 0x80; in x25_parse_facilities() [all …]
|
/net/rose/ |
D | rose_subr.c | 239 static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len) in rose_parse_national() argument 246 switch (*p & 0xC0) { in rose_parse_national() 250 p += 2; in rose_parse_national() 258 if (*p == FAC_NATIONAL_RAND) in rose_parse_national() 259 facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF); in rose_parse_national() 260 p += 3; in rose_parse_national() 268 p += 4; in rose_parse_national() 276 l = p[1]; in rose_parse_national() 279 if (*p == FAC_NATIONAL_DEST_DIGI) { in rose_parse_national() 283 memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN); in rose_parse_national() [all …]
|
/net/sunrpc/auth_gss/ |
D | gss_rpc_xdr.c | 13 __be32 *p; in gssx_enc_bool() local 15 p = xdr_reserve_space(xdr, 4); in gssx_enc_bool() 16 if (unlikely(p == NULL)) in gssx_enc_bool() 18 *p = v ? xdr_one : xdr_zero; in gssx_enc_bool() 24 __be32 *p; in gssx_dec_bool() local 26 p = xdr_inline_decode(xdr, 4); in gssx_dec_bool() 27 if (unlikely(p == NULL)) in gssx_dec_bool() 29 *v = be32_to_cpu(*p); in gssx_dec_bool() 36 __be32 *p; in gssx_enc_buffer() local 38 p = xdr_reserve_space(xdr, sizeof(u32) + buf->len); in gssx_enc_buffer() [all …]
|
/net/ceph/ |
D | osdmap.c | 71 static int crush_decode_uniform_bucket(void **p, void *end, in crush_decode_uniform_bucket() argument 74 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); in crush_decode_uniform_bucket() 75 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); in crush_decode_uniform_bucket() 76 b->item_weight = ceph_decode_32(p); in crush_decode_uniform_bucket() 82 static int crush_decode_list_bucket(void **p, void *end, in crush_decode_list_bucket() argument 86 dout("crush_decode_list_bucket %p to %p\n", *p, end); in crush_decode_list_bucket() 93 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); in crush_decode_list_bucket() 95 b->item_weights[j] = ceph_decode_32(p); in crush_decode_list_bucket() 96 b->sum_weights[j] = ceph_decode_32(p); in crush_decode_list_bucket() 103 static int crush_decode_tree_bucket(void **p, void *end, in crush_decode_tree_bucket() argument [all …]
|
D | cls_lock_client.c | 36 void *p, *end; in ceph_cls_lock() local 56 p = page_address(lock_op_page); in ceph_cls_lock() 57 end = p + lock_op_buf_size; in ceph_cls_lock() 60 ceph_start_encoding(&p, 1, 1, in ceph_cls_lock() 62 ceph_encode_string(&p, end, lock_name, name_len); in ceph_cls_lock() 63 ceph_encode_8(&p, type); in ceph_cls_lock() 64 ceph_encode_string(&p, end, cookie, cookie_len); in ceph_cls_lock() 65 ceph_encode_string(&p, end, tag, tag_len); in ceph_cls_lock() 66 ceph_encode_string(&p, end, desc, desc_len); in ceph_cls_lock() 69 ceph_encode_timespec64(p, &mtime); in ceph_cls_lock() [all …]
|
D | auth.c | 105 int ceph_auth_entity_name_encode(const char *name, void **p, void *end) in ceph_auth_entity_name_encode() argument 109 if (*p + 2*sizeof(u32) + len > end) in ceph_auth_entity_name_encode() 111 ceph_encode_32(p, CEPH_ENTITY_TYPE_CLIENT); in ceph_auth_entity_name_encode() 112 ceph_encode_32(p, len); in ceph_auth_entity_name_encode() 113 ceph_encode_copy(p, name, len); in ceph_auth_entity_name_encode() 124 void *p = monhdr + 1, *end = buf + len, *lenp; in ceph_auth_build_hello() local 134 ceph_encode_32(&p, CEPH_AUTH_UNKNOWN); /* no protocol, yet */ in ceph_auth_build_hello() 136 lenp = p; in ceph_auth_build_hello() 137 p += sizeof(u32); in ceph_auth_build_hello() 139 ceph_decode_need(&p, end, 1 + sizeof(u32), bad); in ceph_auth_build_hello() [all …]
|
D | decode.c | 10 ceph_decode_entity_addr_versioned(void **p, void *end, in ceph_decode_entity_addr_versioned() argument 18 ret = ceph_start_decoding(p, end, 1, "entity_addr_t", &struct_v, in ceph_decode_entity_addr_versioned() 24 struct_end = *p + struct_len; in ceph_decode_entity_addr_versioned() 26 ceph_decode_copy_safe(p, end, &addr->type, sizeof(addr->type), bad); in ceph_decode_entity_addr_versioned() 28 ceph_decode_copy_safe(p, end, &addr->nonce, sizeof(addr->nonce), bad); in ceph_decode_entity_addr_versioned() 30 ceph_decode_32_safe(p, end, addr_len, bad); in ceph_decode_entity_addr_versioned() 36 ceph_decode_copy_safe(p, end, &addr->in_addr, addr_len, bad); in ceph_decode_entity_addr_versioned() 43 *p = struct_end; in ceph_decode_entity_addr_versioned() 50 ceph_decode_entity_addr_legacy(void **p, void *end, in ceph_decode_entity_addr_legacy() argument 56 ceph_decode_skip_n(p, end, 3, bad); in ceph_decode_entity_addr_legacy() [all …]
|
/net/ipv4/ |
D | inetpeer.c | 100 struct inet_peer *p; in lookup() local 111 p = rb_entry(parent, struct inet_peer, rb_node); in lookup() 112 cmp = inetpeer_addr_cmp(daddr, &p->daddr); in lookup() 114 if (!refcount_inc_not_zero(&p->refcnt)) in lookup() 116 return p; in lookup() 120 gc_stack[(*gc_cnt)++] = p; in lookup() 145 struct inet_peer *p; in inet_peer_gc() local 159 p = gc_stack[i]; in inet_peer_gc() 164 delta = (__u32)jiffies - READ_ONCE(p->dtime); in inet_peer_gc() 166 if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) in inet_peer_gc() [all …]
|
/net/tipc/ |
D | name_table.c | 182 struct rb_node *p, *r; in service_range_match_next() local 197 while ((p = rb_parent(n)) && n == p->rb_right) in service_range_match_next() 198 n = p; in service_range_match_next() 199 if (!p) in service_range_match_next() 203 sr = service_range_entry(p); in service_range_match_next() 209 n = p; in service_range_match_next() 233 struct publication *p = kzalloc(sizeof(*p), GFP_ATOMIC); in tipc_publ_create() local 235 if (!p) in tipc_publ_create() 238 p->sr = ua->sr; in tipc_publ_create() 239 p->sk = *sk; in tipc_publ_create() [all …]
|
/net/sunrpc/ |
D | auth_unix.c | 115 __be32 *p, *cred_len, *gidarr_len; in unx_marshal() local 123 p = xdr_reserve_space(xdr, 3 * sizeof(*p)); in unx_marshal() 124 if (!p) in unx_marshal() 126 *p++ = rpc_auth_unix; in unx_marshal() 127 cred_len = p++; in unx_marshal() 128 *p++ = xdr_zero; /* stamp */ in unx_marshal() 132 p = xdr_reserve_space(xdr, 3 * sizeof(*p)); in unx_marshal() 133 if (!p) in unx_marshal() 135 *p++ = cpu_to_be32(from_kuid_munged(userns, cred->cr_cred->fsuid)); in unx_marshal() 136 *p++ = cpu_to_be32(from_kgid_munged(userns, cred->cr_cred->fsgid)); in unx_marshal() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_conn.c | 121 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p, in ip_vs_conn_hashkey_param() argument 127 if (p->pe_data && p->pe->hashkey_raw) in ip_vs_conn_hashkey_param() 128 return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) & in ip_vs_conn_hashkey_param() 132 addr = p->caddr; in ip_vs_conn_hashkey_param() 133 port = p->cport; in ip_vs_conn_hashkey_param() 135 addr = p->vaddr; in ip_vs_conn_hashkey_param() 136 port = p->vport; in ip_vs_conn_hashkey_param() 139 return ip_vs_conn_hashkey(p->ipvs, p->af, p->protocol, addr, port); in ip_vs_conn_hashkey_param() 144 struct ip_vs_conn_param p; in ip_vs_conn_hashkey_conn() local 147 &cp->caddr, cp->cport, NULL, 0, &p); in ip_vs_conn_hashkey_conn() [all …]
|
D | ip_vs_pe_sip.c | 67 ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) in ip_vs_sip_fill_param() argument 74 retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph); in ip_vs_sip_fill_param() 98 p->pe_data = kmemdup(dptr + matchoff, matchlen, GFP_ATOMIC); in ip_vs_sip_fill_param() 99 if (!p->pe_data) in ip_vs_sip_fill_param() 102 p->pe_data_len = matchlen; in ip_vs_sip_fill_param() 107 static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p, in ip_vs_sip_ct_match() argument 113 if (ct->af == p->af && in ip_vs_sip_ct_match() 114 ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) && in ip_vs_sip_ct_match() 117 ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af, in ip_vs_sip_ct_match() 118 p->vaddr, &ct->vaddr) && in ip_vs_sip_ct_match() [all …]
|
/net/ipv6/ila/ |
D | ila_common.c | 16 void ila_init_saved_csum(struct ila_params *p) in ila_init_saved_csum() argument 18 if (!p->locator_match.v64) in ila_init_saved_csum() 21 p->csum_diff = compute_csum_diff8( in ila_init_saved_csum() 22 (__be32 *)&p->locator, in ila_init_saved_csum() 23 (__be32 *)&p->locator_match); in ila_init_saved_csum() 26 static __wsum get_csum_diff_iaddr(struct ila_addr *iaddr, struct ila_params *p) in get_csum_diff_iaddr() argument 28 if (p->locator_match.v64) in get_csum_diff_iaddr() 29 return p->csum_diff; in get_csum_diff_iaddr() 31 return compute_csum_diff8((__be32 *)&p->locator, in get_csum_diff_iaddr() 35 static __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p) in get_csum_diff() argument [all …]
|