/net/netfilter/ |
D | nft_exthdr.c | 19 u8 offset; member 27 static unsigned int optlen(const u8 *opt, unsigned int offset) in optlen() argument 30 if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0) in optlen() 33 return opt[offset + 1]; in optlen() 42 unsigned int offset = 0; in nft_exthdr_ipv6_eval() local 45 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); in nft_exthdr_ipv6_eval() 52 offset += priv->offset; in nft_exthdr_ipv6_eval() 55 if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0) in nft_exthdr_ipv6_eval() 71 unsigned int *offset, int target) in ipv4_find_option() argument 109 *offset = opt->srr + start; in ipv4_find_option() [all …]
|
D | nft_payload.c | 28 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) in nft_payload_copy_vlan() argument 35 if (offset < ETH_HLEN) { in nft_payload_copy_vlan() 36 u8 ethlen = min_t(u8, len, ETH_HLEN - offset); in nft_payload_copy_vlan() 43 memcpy(dst_u8, vlanh + offset, ethlen); in nft_payload_copy_vlan() 50 offset = ETH_HLEN; in nft_payload_copy_vlan() 51 } else if (offset >= VLAN_ETH_HLEN) { in nft_payload_copy_vlan() 52 offset -= VLAN_HLEN; in nft_payload_copy_vlan() 59 vlanh += offset; in nft_payload_copy_vlan() 61 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset); in nft_payload_copy_vlan() 70 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; in nft_payload_copy_vlan() [all …]
|
D | nft_numgen.c | 22 u32 offset; member 34 return nval + priv->offset; in nft_ng_inc_gen() 60 priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET])); in nft_ng_inc_init() 66 if (priv->offset + priv->modulus - 1 < priv->offset) in nft_ng_inc_init() 77 u32 modulus, enum nft_ng_types type, u32 offset) in nft_ng_dump() argument 85 if (nla_put_be32(skb, NFTA_NG_OFFSET, htonl(offset))) in nft_ng_dump() 99 priv->offset); in nft_ng_inc_dump() 105 u32 offset; member 113 priv->offset; in nft_ng_random_gen() 132 priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET])); in nft_ng_random_init() [all …]
|
D | nft_hash.c | 23 u32 offset; member 37 regs->data[priv->dreg] = h + priv->offset; in nft_jhash_eval() 43 u32 offset; member 56 regs->data[priv->dreg] = h + priv->offset; in nft_symhash_eval() 84 priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET])); in nft_jhash_init() 101 if (priv->offset + priv->modulus - 1 < priv->offset) in nft_jhash_init() 127 priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET])); in nft_symhash_init() 135 if (priv->offset + priv->modulus - 1 < priv->offset) in nft_symhash_init() 158 if (priv->offset != 0) in nft_jhash_dump() 159 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) in nft_jhash_dump() [all …]
|
/net/sunrpc/auth_gss/ |
D | gss_krb5_wrap.c | 49 gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) in gss_krb5_add_padding() argument 51 int padding = gss_krb5_padding(blocksize, buf->len - offset); in gss_krb5_add_padding() 83 unsigned int offset = (buf->page_base + len - 1) in gss_krb5_remove_padding() local 86 pad = *(ptr + offset); in gss_krb5_remove_padding() 158 gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, in gss_wrap_kerberos_v1() argument 178 gss_krb5_add_padding(buf, offset, blocksize); in gss_wrap_kerberos_v1() 179 BUG_ON((buf->len - offset) % blocksize); in gss_wrap_kerberos_v1() 180 plainlen = conflen + buf->len - offset; in gss_wrap_kerberos_v1() 184 (buf->len - offset); in gss_wrap_kerberos_v1() 186 ptr = buf->head[0].iov_base + offset; in gss_wrap_kerberos_v1() [all …]
|
D | gss_krb5_crypto.c | 489 sg->offset); in encryptor() 491 sg->offset); in encryptor() 517 sg->offset + sg->length - fraglen); in encryptor() 531 int offset, struct page **pages) in gss_encrypt_xdr_buf() argument 537 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0); in gss_encrypt_xdr_buf() 544 desc.pos = offset; in gss_encrypt_xdr_buf() 553 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); in gss_encrypt_xdr_buf() 579 sg->offset); in decryptor() 602 sg->offset + sg->length - fraglen); in decryptor() 614 int offset) in gss_decrypt_xdr_buf() argument [all …]
|
/net/core/ |
D | datagram.c | 410 static int __skb_datagram_iter(const struct sk_buff *skb, int offset, in __skb_datagram_iter() argument 416 int i, copy = start - offset, start_off = offset, n; in __skb_datagram_iter() 423 n = cb(skb->data + offset, copy, data, to); in __skb_datagram_iter() 424 offset += n; in __skb_datagram_iter() 436 WARN_ON(start > offset + len); in __skb_datagram_iter() 439 if ((copy = end - offset) > 0) { in __skb_datagram_iter() 445 n = cb(vaddr + skb_frag_off(frag) + offset - start, in __skb_datagram_iter() 448 offset += n; in __skb_datagram_iter() 460 WARN_ON(start > offset + len); in __skb_datagram_iter() 463 if ((copy = end - offset) > 0) { in __skb_datagram_iter() [all …]
|
D | skbuff.c | 1929 int offset = skb_headlen(skb); in ___pskb_trim() local 1939 if (offset >= len) in ___pskb_trim() 1943 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim() 1946 offset = end; in ___pskb_trim() 1950 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim() 1965 int end = offset + frag->len; in ___pskb_trim() 1981 offset = end; in ___pskb_trim() 1986 unlikely((err = pskb_trim(frag, len - offset)))) in ___pskb_trim() 2188 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument 2194 if (offset > (int)skb->len - len) in skb_copy_bits() [all …]
|
D | flow_dissector.c | 55 BUG_ON(key->offset > USHRT_MAX); in skb_flow_dissector_init() 60 flow_dissector->offset[key->key_id] = key->offset; in skb_flow_dissector_init() 499 int offset = 0; in __skb_flow_dissect_gre() local 523 offset += sizeof(struct gre_base_hdr); in __skb_flow_dissect_gre() 526 offset += FIELD_SIZEOF(struct gre_full_hdr, csum) + in __skb_flow_dissect_gre() 533 keyid = __skb_header_pointer(skb, *p_nhoff + offset, in __skb_flow_dissect_gre() 549 offset += FIELD_SIZEOF(struct gre_full_hdr, key); in __skb_flow_dissect_gre() 553 offset += FIELD_SIZEOF(struct pptp_gre_header, seq); in __skb_flow_dissect_gre() 560 eth = __skb_header_pointer(skb, *p_nhoff + offset, in __skb_flow_dissect_gre() 566 offset += sizeof(*eth); in __skb_flow_dissect_gre() [all …]
|
/net/rds/ |
D | info.c | 67 unsigned long offset; member 75 int offset = optname - RDS_INFO_FIRST; in rds_info_register_func() local 80 BUG_ON(rds_info_funcs[offset]); in rds_info_register_func() 81 rds_info_funcs[offset] = func; in rds_info_register_func() 88 int offset = optname - RDS_INFO_FIRST; in rds_info_deregister_func() local 93 BUG_ON(rds_info_funcs[offset] != func); in rds_info_deregister_func() 94 rds_info_funcs[offset] = NULL; in rds_info_deregister_func() 124 this = min(bytes, PAGE_SIZE - iter->offset); in rds_info_copy() 128 iter->offset, this, data, bytes); in rds_info_copy() 130 memcpy(iter->addr + iter->offset, data, this); in rds_info_copy() [all …]
|
/net/ipv6/ |
D | exthdrs_core.c | 116 int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type) in ipv6_find_tlv() argument 123 if (offset + 2 > packet_len) in ipv6_find_tlv() 125 hdr = (struct ipv6_opt_hdr *)(nh + offset); in ipv6_find_tlv() 128 if (offset + len > packet_len) in ipv6_find_tlv() 131 offset += 2; in ipv6_find_tlv() 135 int opttype = nh[offset]; in ipv6_find_tlv() 139 return offset; in ipv6_find_tlv() 146 optlen = nh[offset + 1] + 2; in ipv6_find_tlv() 151 offset += optlen; in ipv6_find_tlv() 186 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, in ipv6_find_hdr() argument [all …]
|
D | mip6.c | 204 int offset; in mip6_destopt_reject() local 213 offset = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); in mip6_destopt_reject() 214 if (likely(offset >= 0)) in mip6_destopt_reject() 216 (skb_network_header(skb) + offset); in mip6_destopt_reject() 253 u16 offset = sizeof(struct ipv6hdr); in mip6_destopt_offset() local 263 while (offset + 1 <= packet_len) { in mip6_destopt_offset() 277 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) { in mip6_destopt_offset() 279 return offset; in mip6_destopt_offset() 283 return offset; in mip6_destopt_offset() 287 return offset; in mip6_destopt_offset() [all …]
|
D | mcast_snoop.c | 18 unsigned int offset = skb_network_offset(skb) + sizeof(*ip6h); in ipv6_mc_check_ip6hdr() local 20 if (!pskb_may_pull(skb, offset)) in ipv6_mc_check_ip6hdr() 28 len = offset + ntohs(ip6h->payload_len); in ipv6_mc_check_ip6hdr() 29 if (skb->len < len || len <= offset) in ipv6_mc_check_ip6hdr() 32 skb_set_transport_header(skb, offset); in ipv6_mc_check_ip6hdr() 40 int offset; in ipv6_mc_check_exthdrs() local 50 offset = skb_network_offset(skb) + sizeof(*ip6h); in ipv6_mc_check_exthdrs() 51 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); in ipv6_mc_check_exthdrs() 53 if (offset < 0) in ipv6_mc_check_exthdrs() 59 skb_set_transport_header(skb, offset); in ipv6_mc_check_exthdrs()
|
D | calipso.c | 691 static int calipso_pad_write(unsigned char *buf, unsigned int offset, in calipso_pad_write() argument 701 buf[offset] = IPV6_TLV_PAD1; in calipso_pad_write() 704 buf[offset] = IPV6_TLV_PADN; in calipso_pad_write() 705 buf[offset + 1] = count - 2; in calipso_pad_write() 707 memset(buf + offset + 2, 0, count - 2); in calipso_pad_write() 812 static int calipso_tlv_len(struct ipv6_opt_hdr *opt, unsigned int offset) in calipso_tlv_len() argument 817 if (offset < sizeof(*opt) || offset >= opt_len) in calipso_tlv_len() 819 if (tlv[offset] == IPV6_TLV_PAD1) in calipso_tlv_len() 821 if (offset + 1 >= opt_len) in calipso_tlv_len() 823 tlv_len = tlv[offset + 1] + 2; in calipso_tlv_len() [all …]
|
D | output_core.c | 84 unsigned int offset = sizeof(struct ipv6hdr); in ip6_find_1stfragopt() local 90 while (offset <= packet_len) { in ip6_find_1stfragopt() 102 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) in ip6_find_1stfragopt() 106 return offset; in ip6_find_1stfragopt() 109 return offset; in ip6_find_1stfragopt() 112 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) in ip6_find_1stfragopt() 116 offset); in ip6_find_1stfragopt() 117 offset += ipv6_optlen(exthdr); in ip6_find_1stfragopt() 118 if (offset > IPV6_MAXPLEN) in ip6_find_1stfragopt()
|
/net/wireless/ |
D | debugfs.c | 41 char *buf, int buf_size, int offset) in ht_print_chan() argument 43 if (WARN_ON(offset > buf_size)) in ht_print_chan() 47 return scnprintf(buf + offset, in ht_print_chan() 48 buf_size - offset, in ht_print_chan() 52 return scnprintf(buf + offset, in ht_print_chan() 53 buf_size - offset, in ht_print_chan() 68 unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; in ht40allow_map_read() local 83 offset += ht_print_chan(&sband->channels[i], in ht40allow_map_read() 84 buf, buf_size, offset); in ht40allow_map_read() 89 r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); in ht40allow_map_read()
|
/net/sunrpc/ |
D | socklib.c | 35 if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) in xdr_skb_read_bits() 38 desc->offset += len; in xdr_skb_read_bits() 57 pos = desc->offset; in xdr_skb_read_and_csum_bits() 61 desc->offset += len; in xdr_skb_read_and_csum_bits() 159 desc.offset = 0; in csum_partial_copy_to_xdr() 160 desc.count = skb->len - desc.offset; in csum_partial_copy_to_xdr() 165 desc.csum = csum_partial(skb->data, desc.offset, skb->csum); in csum_partial_copy_to_xdr() 168 if (desc.offset != skb->len) { in csum_partial_copy_to_xdr() 170 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); in csum_partial_copy_to_xdr() 171 desc.csum = csum_block_add(desc.csum, csum2, desc.offset); in csum_partial_copy_to_xdr()
|
D | xdr.c | 177 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, in xdr_inline_pages() argument 185 head->iov_len = offset; in xdr_inline_pages() 191 tail->iov_base = buf + offset; in xdr_inline_pages() 192 tail->iov_len = buflen - offset; in xdr_inline_pages() 988 unsigned int copied, offset; in xdr_align_pages() local 996 offset = iov->iov_len - cur; in xdr_align_pages() 997 copied = xdr_shrink_bufhead(buf, offset); in xdr_align_pages() 998 trace_rpc_xdr_alignment(xdr, offset, copied); in xdr_align_pages() 1010 offset = buf->page_len - len; in xdr_align_pages() 1011 copied = xdr_shrink_pagelen(buf, offset); in xdr_align_pages() [all …]
|
/net/netlabel/ |
D | netlabel_kapi.c | 555 u32 offset, in _netlbl_catmap_getnode() argument 564 if (offset < iter->startbit) in _netlbl_catmap_getnode() 566 while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { in _netlbl_catmap_getnode() 570 if (iter == NULL || offset < iter->startbit) in _netlbl_catmap_getnode() 585 iter->startbit = offset & ~(NETLBL_CATMAP_SIZE - 1); in _netlbl_catmap_getnode() 608 int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset) in netlbl_catmap_walk() argument 615 iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0); in netlbl_catmap_walk() 618 if (offset > iter->startbit) { in netlbl_catmap_walk() 619 offset -= iter->startbit; in netlbl_catmap_walk() 620 idx = offset / NETLBL_CATMAP_MAPSIZE; in netlbl_catmap_walk() [all …]
|
/net/802/ |
D | mrp.c | 617 static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) in mrp_pdu_parse_end_mark() argument 621 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0) in mrp_pdu_parse_end_mark() 624 *offset += sizeof(endmark); in mrp_pdu_parse_end_mark() 670 struct sk_buff *skb, int *offset) in mrp_pdu_parse_vecattr() argument 676 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah), in mrp_pdu_parse_vecattr() 680 *offset += sizeof(_vah); in mrp_pdu_parse_vecattr() 697 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue, in mrp_pdu_parse_vecattr() 700 *offset += mrp_cb(skb)->mh->attrlen; in mrp_pdu_parse_vecattr() 706 if (skb_copy_bits(skb, *offset, &vaevents, in mrp_pdu_parse_vecattr() 709 *offset += sizeof(vaevents); in mrp_pdu_parse_vecattr() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_core.c | 710 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) in ip_vs_checksum_complete() argument 712 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); in ip_vs_checksum_complete() 878 unsigned int offset, unsigned int ihl, in handle_response_icmp() argument 896 offset += 2 * sizeof(__u16); in handle_response_icmp() 897 if (skb_ensure_writable(skb, offset)) in handle_response_icmp() 942 unsigned int offset, ihl; in ip_vs_out_icmp() local 954 offset = ihl = iph->ihl * 4; in ip_vs_out_icmp() 955 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); in ip_vs_out_icmp() 978 offset += sizeof(_icmph); in ip_vs_out_icmp() 979 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); in ip_vs_out_icmp() [all …]
|
D | ip_vs_proto.c | 217 int offset, in ip_vs_tcpudp_debug_packet_v4() argument 223 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); in ip_vs_tcpudp_debug_packet_v4() 231 pptr = skb_header_pointer(skb, offset + ih->ihl*4, in ip_vs_tcpudp_debug_packet_v4() 249 int offset, in ip_vs_tcpudp_debug_packet_v6() argument 255 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); in ip_vs_tcpudp_debug_packet_v6() 263 pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), in ip_vs_tcpudp_debug_packet_v6() 282 int offset, in ip_vs_tcpudp_debug_packet() argument 287 ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg); in ip_vs_tcpudp_debug_packet() 290 ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg); in ip_vs_tcpudp_debug_packet()
|
/net/atm/ |
D | lec.c | 1357 int i, j, offset; in dump_arp_table() local 1363 offset = 0; in dump_arp_table() 1364 offset += sprintf(buf, "%d: %p\n", i, rulla); in dump_arp_table() 1365 offset += sprintf(buf + offset, "Mac: %pM", in dump_arp_table() 1367 offset += sprintf(buf + offset, " Atm:"); in dump_arp_table() 1369 offset += sprintf(buf + offset, in dump_arp_table() 1373 offset += sprintf(buf + offset, in dump_arp_table() 1382 offset += in dump_arp_table() 1383 sprintf(buf + offset, in dump_arp_table() 1394 offset = 0; in dump_arp_table() [all …]
|
/net/dsa/ |
D | tag_brcm.c | 63 unsigned int offset) in brcm_tag_xmit_ll() argument 86 if (offset) in brcm_tag_xmit_ll() 87 memmove(skb->data, skb->data + BRCM_TAG_LEN, offset); in brcm_tag_xmit_ll() 89 brcm_tag = skb->data + offset; in brcm_tag_xmit_ll() 113 unsigned int offset) in brcm_tag_rcv_ll() argument 121 brcm_tag = skb->data - offset; in brcm_tag_rcv_ll()
|
/net/tls/ |
D | tls_sw.c | 45 static int __skb_nsg(struct sk_buff *skb, int offset, int len, in __skb_nsg() argument 49 int i, chunk = start - offset; in __skb_nsg() 63 offset += chunk; in __skb_nsg() 69 WARN_ON(start > offset + len); in __skb_nsg() 72 chunk = end - offset; in __skb_nsg() 80 offset += chunk; in __skb_nsg() 89 WARN_ON(start > offset + len); in __skb_nsg() 92 chunk = end - offset; in __skb_nsg() 96 ret = __skb_nsg(frag_iter, offset - start, chunk, in __skb_nsg() 104 offset += chunk; in __skb_nsg() [all …]
|