/net/netfilter/ |
D | nft_exthdr.c | 21 u8 offset; member 29 static unsigned int optlen(const u8 *opt, unsigned int offset) in optlen() argument 32 if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0) in optlen() 35 return opt[offset + 1]; in optlen() 38 static int nft_skb_copy_to_reg(const struct sk_buff *skb, int offset, u32 *dest, unsigned int len) in nft_skb_copy_to_reg() argument 43 return skb_copy_bits(skb, offset, dest, len); in nft_skb_copy_to_reg() 52 unsigned int offset = 0; in nft_exthdr_ipv6_eval() local 58 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); in nft_exthdr_ipv6_eval() 65 offset += priv->offset; in nft_exthdr_ipv6_eval() 67 if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0) in nft_exthdr_ipv6_eval() [all …]
|
D | nft_payload.c | 43 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) in nft_payload_copy_vlan() argument 52 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN) in nft_payload_copy_vlan() 56 if (offset < VLAN_ETH_HLEN + vlan_hlen) { in nft_payload_copy_vlan() 65 if (offset + len > VLAN_ETH_HLEN + vlan_hlen) in nft_payload_copy_vlan() 66 ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen; in nft_payload_copy_vlan() 68 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen); in nft_payload_copy_vlan() 75 offset = ETH_HLEN + vlan_hlen; in nft_payload_copy_vlan() 77 offset -= VLAN_HLEN + vlan_hlen; in nft_payload_copy_vlan() 80 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; in nft_payload_copy_vlan() 129 int offset; in nft_payload_eval() local [all …]
|
D | nft_set_pipapo_avx2.c | 101 int offset = start % BITS_PER_LONG; in nft_pipapo_avx2_fill() local 107 *data |= BIT(offset); in nft_pipapo_avx2_fill() 111 if (likely(len < BITS_PER_LONG || offset)) { in nft_pipapo_avx2_fill() 112 if (likely(len + offset <= BITS_PER_LONG)) { in nft_pipapo_avx2_fill() 113 *data |= GENMASK(len - 1 + offset, offset); in nft_pipapo_avx2_fill() 117 *data |= ~0UL << offset; in nft_pipapo_avx2_fill() 118 len -= BITS_PER_LONG - offset; in nft_pipapo_avx2_fill() 152 static int nft_pipapo_avx2_refill(int offset, unsigned long *map, in nft_pipapo_avx2_refill() argument 162 int i = (offset + (x)) * BITS_PER_LONG + r; \ in nft_pipapo_avx2_refill() 215 struct nft_pipapo_field *f, int offset, in nft_pipapo_avx2_lookup_4b_2() argument [all …]
|
D | nft_numgen.c | 21 u32 offset; member 33 return nval + priv->offset; in nft_ng_inc_gen() 59 priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET])); in nft_ng_inc_init() 65 if (priv->offset + priv->modulus - 1 < priv->offset) in nft_ng_inc_init() 75 u32 modulus, enum nft_ng_types type, u32 offset) in nft_ng_dump() argument 83 if (nla_put_be32(skb, NFTA_NG_OFFSET, htonl(offset))) in nft_ng_dump() 97 priv->offset); in nft_ng_inc_dump() 103 u32 offset; member 108 return reciprocal_scale(get_random_u32(), priv->modulus) + priv->offset; in nft_ng_random_gen() 127 priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET])); in nft_ng_random_init() [all …]
|
D | nft_hash.c | 23 u32 offset; member 37 regs->data[priv->dreg] = h + priv->offset; in nft_jhash_eval() 43 u32 offset; member 56 regs->data[priv->dreg] = h + priv->offset; in nft_symhash_eval() 84 priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET])); in nft_jhash_init() 102 if (priv->offset + priv->modulus - 1 < priv->offset) in nft_jhash_init() 127 priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET])); in nft_symhash_init() 133 if (priv->offset + priv->modulus - 1 < priv->offset) in nft_symhash_init() 157 if (priv->offset != 0) in nft_jhash_dump() 158 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) in nft_jhash_dump() [all …]
|
D | nf_flow_table_ip.c | 168 u32 offset) in nf_flow_tuple_ip() argument 174 if (!pskb_may_pull(skb, sizeof(*iph) + offset)) in nf_flow_tuple_ip() 177 iph = (struct iphdr *)(skb_network_header(skb) + offset); in nf_flow_tuple_ip() 184 thoff += offset; in nf_flow_tuple_ip() 203 iph = (struct iphdr *)(skb_network_header(skb) + offset); in nf_flow_tuple_ip() 250 u32 *offset) in nf_flow_skb_encap_protocol() argument 258 *offset += VLAN_HLEN; in nf_flow_skb_encap_protocol() 264 *offset += PPPOE_SES_HLEN; in nf_flow_skb_encap_protocol() 328 u32 hdrsize, offset = 0; in nf_flow_offload_ip_hook() local 336 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset)) in nf_flow_offload_ip_hook() [all …]
|
D | nf_flow_table_offload.c | 30 (__match)->dissector.offset[__type] = \ 194 enum flow_action_mangle_base htype, u32 offset, in flow_offload_mangle() argument 199 entry->mangle.offset = offset; in flow_offload_mangle() 324 u32 offset; in flow_offload_ipv4_snat() local 329 offset = offsetof(struct iphdr, saddr); in flow_offload_ipv4_snat() 333 offset = offsetof(struct iphdr, daddr); in flow_offload_ipv4_snat() 339 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset, in flow_offload_ipv4_snat() 351 u32 offset; in flow_offload_ipv4_dnat() local 356 offset = offsetof(struct iphdr, daddr); in flow_offload_ipv4_dnat() 360 offset = offsetof(struct iphdr, saddr); in flow_offload_ipv4_dnat() [all …]
|
/net/sunrpc/auth_gss/ |
D | gss_krb5_wrap.c | 49 gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) in gss_krb5_add_padding() argument 51 int padding = gss_krb5_padding(blocksize, buf->len - offset); in gss_krb5_add_padding() 83 unsigned int offset = (buf->page_base + len - 1) in gss_krb5_remove_padding() local 86 pad = *(ptr + offset); in gss_krb5_remove_padding() 158 gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, in gss_wrap_kerberos_v1() argument 178 gss_krb5_add_padding(buf, offset, blocksize); in gss_wrap_kerberos_v1() 179 BUG_ON((buf->len - offset) % blocksize); in gss_wrap_kerberos_v1() 180 plainlen = conflen + buf->len - offset; in gss_wrap_kerberos_v1() 184 (buf->len - offset); in gss_wrap_kerberos_v1() 186 ptr = buf->head[0].iov_base + offset; in gss_wrap_kerberos_v1() [all …]
|
D | gss_krb5_crypto.c | 355 sg->offset); in encryptor() 357 sg->offset); in encryptor() 383 sg->offset + sg->length - fraglen); in encryptor() 397 int offset, struct page **pages) in gss_encrypt_xdr_buf() argument 403 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0); in gss_encrypt_xdr_buf() 410 desc.pos = offset; in gss_encrypt_xdr_buf() 419 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); in gss_encrypt_xdr_buf() 445 sg->offset); in decryptor() 468 sg->offset + sg->length - fraglen); in decryptor() 480 int offset) in gss_decrypt_xdr_buf() argument [all …]
|
/net/rds/ |
D | info.c | 67 unsigned long offset; member 75 int offset = optname - RDS_INFO_FIRST; in rds_info_register_func() local 80 BUG_ON(rds_info_funcs[offset]); in rds_info_register_func() 81 rds_info_funcs[offset] = func; in rds_info_register_func() 88 int offset = optname - RDS_INFO_FIRST; in rds_info_deregister_func() local 93 BUG_ON(rds_info_funcs[offset] != func); in rds_info_deregister_func() 94 rds_info_funcs[offset] = NULL; in rds_info_deregister_func() 124 this = min(bytes, PAGE_SIZE - iter->offset); in rds_info_copy() 128 iter->offset, this, data, bytes); in rds_info_copy() 130 memcpy(iter->addr + iter->offset, data, this); in rds_info_copy() [all …]
|
/net/core/ |
D | datagram.c | 412 static int __skb_datagram_iter(const struct sk_buff *skb, int offset, in __skb_datagram_iter() argument 418 int i, copy = start - offset, start_off = offset, n; in __skb_datagram_iter() 426 skb->data + offset, copy, data, to); in __skb_datagram_iter() 427 offset += n; in __skb_datagram_iter() 439 WARN_ON(start > offset + len); in __skb_datagram_iter() 442 if ((copy = end - offset) > 0) { in __skb_datagram_iter() 449 vaddr + skb_frag_off(frag) + offset - start, in __skb_datagram_iter() 452 offset += n; in __skb_datagram_iter() 464 WARN_ON(start > offset + len); in __skb_datagram_iter() 467 if ((copy = end - offset) > 0) { in __skb_datagram_iter() [all …]
|
D | skbuff.c | 2110 int offset = skb_headlen(skb); in ___pskb_trim() local 2120 if (offset >= len) in ___pskb_trim() 2124 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim() 2127 offset = end; in ___pskb_trim() 2131 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim() 2146 int end = offset + frag->len; in ___pskb_trim() 2162 offset = end; in ___pskb_trim() 2167 unlikely((err = pskb_trim(frag, len - offset)))) in ___pskb_trim() 2203 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow() local 2205 if (offset + sizeof(__sum16) > hdlen) in pskb_trim_rcsum_slow() [all …]
|
D | flow_dissector.c | 55 BUG_ON(key->offset > USHRT_MAX); in skb_flow_dissector_init() 60 flow_dissector->offset[key->key_id] = key->offset; in skb_flow_dissector_init() 534 int offset = 0; in __skb_flow_dissect_gre() local 558 offset += sizeof(struct gre_base_hdr); in __skb_flow_dissect_gre() 561 offset += sizeof_field(struct gre_full_hdr, csum) + in __skb_flow_dissect_gre() 568 keyid = __skb_header_pointer(skb, *p_nhoff + offset, in __skb_flow_dissect_gre() 584 offset += sizeof_field(struct gre_full_hdr, key); in __skb_flow_dissect_gre() 588 offset += sizeof_field(struct pptp_gre_header, seq); in __skb_flow_dissect_gre() 595 eth = __skb_header_pointer(skb, *p_nhoff + offset, in __skb_flow_dissect_gre() 601 offset += sizeof(*eth); in __skb_flow_dissect_gre() [all …]
|
/net/ipv6/ |
D | exthdrs_core.c | 116 int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type) in ipv6_find_tlv() argument 123 if (offset + 2 > packet_len) in ipv6_find_tlv() 125 hdr = (struct ipv6_opt_hdr *)(nh + offset); in ipv6_find_tlv() 128 if (offset + len > packet_len) in ipv6_find_tlv() 131 offset += 2; in ipv6_find_tlv() 135 int opttype = nh[offset]; in ipv6_find_tlv() 139 return offset; in ipv6_find_tlv() 148 optlen = nh[offset + 1] + 2; in ipv6_find_tlv() 153 offset += optlen; in ipv6_find_tlv() 188 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, in ipv6_find_hdr() argument [all …]
|
D | mcast_snoop.c | 18 unsigned int offset = skb_network_offset(skb) + sizeof(*ip6h); in ipv6_mc_check_ip6hdr() local 20 if (!pskb_may_pull(skb, offset)) in ipv6_mc_check_ip6hdr() 28 len = offset + ntohs(ip6h->payload_len); in ipv6_mc_check_ip6hdr() 29 if (skb->len < len || len <= offset) in ipv6_mc_check_ip6hdr() 32 skb_set_transport_header(skb, offset); in ipv6_mc_check_ip6hdr() 40 int offset; in ipv6_mc_check_exthdrs() local 50 offset = skb_network_offset(skb) + sizeof(*ip6h); in ipv6_mc_check_exthdrs() 51 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); in ipv6_mc_check_exthdrs() 53 if (offset < 0) in ipv6_mc_check_exthdrs() 59 skb_set_transport_header(skb, offset); in ipv6_mc_check_exthdrs()
|
D | calipso.c | 687 static int calipso_pad_write(unsigned char *buf, unsigned int offset, in calipso_pad_write() argument 697 buf[offset] = IPV6_TLV_PAD1; in calipso_pad_write() 700 buf[offset] = IPV6_TLV_PADN; in calipso_pad_write() 701 buf[offset + 1] = count - 2; in calipso_pad_write() 703 memset(buf + offset + 2, 0, count - 2); in calipso_pad_write() 808 static int calipso_tlv_len(struct ipv6_opt_hdr *opt, unsigned int offset) in calipso_tlv_len() argument 813 if (offset < sizeof(*opt) || offset >= opt_len) in calipso_tlv_len() 815 if (tlv[offset] == IPV6_TLV_PAD1) in calipso_tlv_len() 817 if (offset + 1 >= opt_len) in calipso_tlv_len() 819 tlv_len = tlv[offset + 1] + 2; in calipso_tlv_len() [all …]
|
D | output_core.c | 66 unsigned int offset = sizeof(struct ipv6hdr); in ip6_find_1stfragopt() local 72 while (offset <= packet_len) { in ip6_find_1stfragopt() 84 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) in ip6_find_1stfragopt() 88 return offset; in ip6_find_1stfragopt() 91 return offset; in ip6_find_1stfragopt() 94 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) in ip6_find_1stfragopt() 98 offset); in ip6_find_1stfragopt() 99 offset += ipv6_optlen(exthdr); in ip6_find_1stfragopt() 100 if (offset > IPV6_MAXPLEN) in ip6_find_1stfragopt()
|
/net/ethtool/ |
D | eeprom.c | 10 u32 offset; member 33 u32 offset = request->offset; in fallback_set_params() local 37 offset = request->page * ETH_MODULE_EEPROM_PAGE_LEN + offset; in fallback_set_params() 41 offset += ETH_MODULE_EEPROM_PAGE_LEN * 2; in fallback_set_params() 43 if (offset >= modinfo->eeprom_len) in fallback_set_params() 48 eeprom->offset = offset; in fallback_set_params() 114 page_data.offset = request->offset; in eeprom_prepare_data() 159 request->offset = nla_get_u32(tb[ETHTOOL_A_MODULE_EEPROM_OFFSET]); in eeprom_parse_request() 169 if (request->page && request->offset < ETH_MODULE_EEPROM_PAGE_LEN) { in eeprom_parse_request() 175 if (request->offset < ETH_MODULE_EEPROM_PAGE_LEN && in eeprom_parse_request() [all …]
|
/net/wireless/ |
D | debugfs.c | 41 char *buf, int buf_size, int offset) in ht_print_chan() argument 43 if (WARN_ON(offset > buf_size)) in ht_print_chan() 47 return scnprintf(buf + offset, in ht_print_chan() 48 buf_size - offset, in ht_print_chan() 52 return scnprintf(buf + offset, in ht_print_chan() 53 buf_size - offset, in ht_print_chan() 68 unsigned int offset = 0, buf_size = PAGE_SIZE, i; in ht40allow_map_read() local 82 offset += ht_print_chan(&sband->channels[i], in ht40allow_map_read() 83 buf, buf_size, offset); in ht40allow_map_read() 86 r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); in ht40allow_map_read()
|
/net/netlabel/ |
D | netlabel_kapi.c | 555 u32 offset, in _netlbl_catmap_getnode() argument 564 if (offset < iter->startbit) in _netlbl_catmap_getnode() 566 while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { in _netlbl_catmap_getnode() 570 if (iter == NULL || offset < iter->startbit) in _netlbl_catmap_getnode() 585 iter->startbit = offset & ~(NETLBL_CATMAP_SIZE - 1); in _netlbl_catmap_getnode() 608 int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset) in netlbl_catmap_walk() argument 615 iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0); in netlbl_catmap_walk() 618 if (offset > iter->startbit) { in netlbl_catmap_walk() 619 offset -= iter->startbit; in netlbl_catmap_walk() 620 idx = offset / NETLBL_CATMAP_MAPSIZE; in netlbl_catmap_walk() [all …]
|
/net/xfrm/ |
D | espintcp.c | 65 err = skb_copy_bits(skb, rxm->offset + 2, &data, 1); in espintcp_rcv() 85 err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker, in espintcp_rcv() 94 if (!__pskb_pull(skb, rxm->offset + 2)) { in espintcp_rcv() 119 if (skb->len < rxm->offset + 2) in espintcp_parse() 122 err = skb_copy_bits(skb, rxm->offset, &blen, sizeof(blen)); in espintcp_parse() 192 emsg->offset, emsg->len); in espintcp_sendskb_locked() 197 emsg->offset += ret; in espintcp_sendskb_locked() 217 size_t size = sg->length - emsg->offset; in espintcp_sendskmsg_locked() 218 int offset = sg->offset + emsg->offset; in espintcp_sendskmsg_locked() local 221 emsg->offset = 0; in espintcp_sendskmsg_locked() [all …]
|
/net/sunrpc/ |
D | socklib.c | 28 unsigned int offset; member 50 if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) in xdr_skb_read_bits() 53 desc->offset += len; in xdr_skb_read_bits() 72 pos = desc->offset; in xdr_skb_read_and_csum_bits() 76 desc->offset += len; in xdr_skb_read_and_csum_bits() 174 desc.offset = 0; in csum_partial_copy_to_xdr() 175 desc.count = skb->len - desc.offset; in csum_partial_copy_to_xdr() 180 desc.csum = csum_partial(skb->data, desc.offset, skb->csum); in csum_partial_copy_to_xdr() 183 if (desc.offset != skb->len) { in csum_partial_copy_to_xdr() 185 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); in csum_partial_copy_to_xdr() [all …]
|
/net/sunrpc/xprtrdma/ |
D | svc_rdma_pcl.c | 70 u32 handle, u32 length, u64 offset) in pcl_set_read_segment() argument 77 segment->rs_offset = offset; in pcl_set_read_segment() 111 u64 offset; in pcl_alloc_call() local 115 &length, &offset); in pcl_alloc_call() 130 pcl_set_read_segment(rctxt, chunk, handle, length, offset); in pcl_alloc_call() 165 u64 offset; in pcl_alloc_read() local 169 &length, &offset); in pcl_alloc_read() 181 pcl_set_read_segment(rctxt, chunk, handle, length, offset); in pcl_alloc_read() 233 unsigned int offset, unsigned int length, in pcl_process_region() argument 241 if (xdr_buf_subsegment(xdr, &subbuf, offset, length)) in pcl_process_region()
|
/net/802/ |
D | mrp.c | 634 static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) in mrp_pdu_parse_end_mark() argument 638 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0) in mrp_pdu_parse_end_mark() 641 *offset += sizeof(endmark); in mrp_pdu_parse_end_mark() 687 struct sk_buff *skb, int *offset) in mrp_pdu_parse_vecattr() argument 693 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah), in mrp_pdu_parse_vecattr() 697 *offset += sizeof(_vah); in mrp_pdu_parse_vecattr() 714 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue, in mrp_pdu_parse_vecattr() 717 *offset += mrp_cb(skb)->mh->attrlen; in mrp_pdu_parse_vecattr() 723 if (skb_copy_bits(skb, *offset, &vaevents, in mrp_pdu_parse_vecattr() 726 *offset += sizeof(vaevents); in mrp_pdu_parse_vecattr() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_core.c | 692 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) in ip_vs_checksum_complete() argument 694 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); in ip_vs_checksum_complete() 860 unsigned int offset, unsigned int ihl, in handle_response_icmp() argument 878 offset += 2 * sizeof(__u16); in handle_response_icmp() 879 if (skb_ensure_writable(skb, offset)) in handle_response_icmp() 923 unsigned int offset, ihl; in ip_vs_out_icmp() local 935 offset = ihl = iph->ihl * 4; in ip_vs_out_icmp() 936 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); in ip_vs_out_icmp() 959 offset += sizeof(_icmph); in ip_vs_out_icmp() 960 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); in ip_vs_out_icmp() [all …]
|