/net/ipv6/ |
D | calipso.c | 723 static int calipso_genopt(unsigned char *buf, u32 start, u32 buf_len, in calipso_genopt() argument 734 pad = padding[start & 3]; in calipso_genopt() 735 if (buf_len <= start + pad + CALIPSO_HDR_LEN) in calipso_genopt() 746 buf + start + pad + len, in calipso_genopt() 747 buf_len - start - pad - len); in calipso_genopt() 753 calipso_pad_write(buf, start, pad); in calipso_genopt() 754 calipso = buf + start + pad; in calipso_genopt() 846 static int calipso_opt_find(struct ipv6_opt_hdr *hop, unsigned int *start, in calipso_opt_find() argument 882 *start = offset_s + calipso_tlv_len(hop, offset_s); in calipso_opt_find() 884 *start = sizeof(*hop); in calipso_opt_find() [all …]
|
D | exthdrs_core.c | 72 int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, in ipv6_skip_exthdr() argument 85 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); in ipv6_skip_exthdr() 91 start+offsetof(struct frag_hdr, in ipv6_skip_exthdr() 108 start += hdrlen; in ipv6_skip_exthdr() 112 return start; in ipv6_skip_exthdr() 191 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); in ipv6_find_hdr() local 204 start = *offset + sizeof(struct ipv6hdr); in ipv6_find_hdr() 219 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); in ipv6_find_hdr() 226 rh = skb_header_pointer(skb, start, sizeof(_rh), in ipv6_find_hdr() 243 start+offsetof(struct frag_hdr, in ipv6_find_hdr() [all …]
|
/net/core/ |
D | datagram.c | 417 int start = skb_headlen(skb); in __skb_datagram_iter() local 418 int i, copy = start - offset, start_off = offset, n; in __skb_datagram_iter() 439 WARN_ON(start > offset + len); in __skb_datagram_iter() 441 end = start + skb_frag_size(frag); in __skb_datagram_iter() 449 vaddr + skb_frag_off(frag) + offset - start, in __skb_datagram_iter() 458 start = end; in __skb_datagram_iter() 464 WARN_ON(start > offset + len); in __skb_datagram_iter() 466 end = start + frag_iter->len; in __skb_datagram_iter() 470 if (__skb_datagram_iter(frag_iter, offset - start, in __skb_datagram_iter() 477 start = end; in __skb_datagram_iter() [all …]
|
D | skmsg.c | 14 if (msg->sg.end > msg->sg.start && in sk_msg_try_coalesce_ok() 18 if (msg->sg.end < msg->sg.start && in sk_msg_try_coalesce_ok() 19 (elem_first_coalesce > msg->sg.start || in sk_msg_try_coalesce_ok() 89 int i = src->sg.start; in sk_msg_clone() 139 int i = msg->sg.start; in sk_msg_return_zero() 157 msg->sg.start = i; in sk_msg_return_zero() 163 int i = msg->sg.start; in sk_msg_return() 212 return __sk_msg_free(sk, msg, msg->sg.start, false); in sk_msg_free_nocharge() 218 return __sk_msg_free(sk, msg, msg->sg.start, true); in sk_msg_free() 226 u32 i = msg->sg.start; in __sk_msg_free_partial() [all …]
|
/net/sctp/ |
D | tsnmap.c | 31 __u16 len, __u16 *start, __u16 *end); 146 iter->start = map->cumulative_tsn_ack_point + 1; in sctp_tsnmap_iter_init() 154 __u16 *start, __u16 *end) in sctp_tsnmap_next_gap_ack() argument 160 if (TSN_lte(map->max_tsn_seen, iter->start)) in sctp_tsnmap_next_gap_ack() 163 offset = iter->start - map->base_tsn; in sctp_tsnmap_next_gap_ack() 178 *start = start_ + 1; in sctp_tsnmap_next_gap_ack() 182 iter->start = map->cumulative_tsn_ack_point + *end + 1; in sctp_tsnmap_next_gap_ack() 273 __u16 len, __u16 *start, __u16 *end) in sctp_tsnmap_find_gap_ack() argument 286 *start = i; in sctp_tsnmap_find_gap_ack() 289 if (*start) { in sctp_tsnmap_find_gap_ack() [all …]
|
/net/rds/ |
D | info.c | 164 unsigned long start; in rds_info_getsockopt() local 177 start = (unsigned long)optval; in rds_info_getsockopt() 178 if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { in rds_info_getsockopt() 187 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) in rds_info_getsockopt() 195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt() 216 iter.offset = start & (PAGE_SIZE - 1); in rds_info_getsockopt()
|
/net/sched/ |
D | act_tunnel_key.c | 572 struct nlattr *start; in tunnel_key_geneve_opts_dump() local 574 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE); in tunnel_key_geneve_opts_dump() 575 if (!start) in tunnel_key_geneve_opts_dump() 587 nla_nest_cancel(skb, start); in tunnel_key_geneve_opts_dump() 595 nla_nest_end(skb, start); in tunnel_key_geneve_opts_dump() 603 struct nlattr *start; in tunnel_key_vxlan_opts_dump() local 605 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN); in tunnel_key_vxlan_opts_dump() 606 if (!start) in tunnel_key_vxlan_opts_dump() 610 nla_nest_cancel(skb, start); in tunnel_key_vxlan_opts_dump() 614 nla_nest_end(skb, start); in tunnel_key_vxlan_opts_dump() [all …]
|
D | act_gate.c | 35 static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start) in gate_get_start_time() argument 45 *start = base; in gate_get_start_time() 52 *start = ktime_add_ns(base, (n + 1) * cycle); in gate_get_start_time() 55 static void gate_start_timer(struct tcf_gate *gact, ktime_t start) in gate_start_timer() argument 63 start = min_t(ktime_t, start, expires); in gate_start_timer() 65 hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT); in gate_start_timer() 314 ktime_t start; in tcf_gate_init() local 423 gate_get_start_time(gact, &start); in tcf_gate_init() 425 gact->current_close_time = start; in tcf_gate_init() 433 gate_start_timer(gact, start); in tcf_gate_init()
|
/net/netfilter/ |
D | nf_conntrack_amanda.c | 96 unsigned int dataoff, start, stop, off, i; in amanda_help() local 118 start = skb_find_text(skb, dataoff, skb->len, in amanda_help() 120 if (start == UINT_MAX) in amanda_help() 122 start += dataoff + search[SEARCH_CONNECT].len; in amanda_help() 124 stop = skb_find_text(skb, start, skb->len, in amanda_help() 128 stop += start; in amanda_help() 131 off = skb_find_text(skb, start, stop, search[i].ts); in amanda_help() 134 off += start + search[i].len; in amanda_help()
|
D | nf_conntrack_sip.c | 208 const char *start = dptr; in skp_epaddr_len() local 224 dptr = start; in skp_epaddr_len() 242 const char *start = dptr, *limit = dptr + datalen, *end; in ct_sip_parse_request() local 281 *matchoff = dptr - start; in ct_sip_parse_request() 373 const char *start = dptr, *limit = dptr + datalen; in ct_sip_get_header() local 415 *matchoff = dptr - start; in ct_sip_get_header() 427 *matchoff = dptr - start + shift; in ct_sip_get_header() 441 const char *start = dptr, *limit = dptr + datalen; in ct_sip_next_header() local 455 *matchoff = dptr - start; in ct_sip_next_header() 544 const char *start; in ct_sip_parse_param() local [all …]
|
D | nft_set_pipapo.c | 1040 const u8 *start, const u8 *end, int len) argument 1045 memcpy(base, start, bytes); 1195 const u8 *start = (const u8 *)elem->key.val.data, *end; local 1207 end = start; 1209 dup = pipapo_get(net, set, start, genmask); 1220 if (!memcmp(start, dup_key->data, sizeof(*dup_key->data)) && 1242 start_p = start; 1266 ret = memcmp(start, end, 1269 ret = pipapo_insert(f, start, f->groups * f->bb); 1271 ret = pipapo_expand(f, start, end, f->groups * f->bb); [all …]
|
/net/openvswitch/ |
D | flow_netlink.c | 96 size_t start = rounddown(offset, sizeof(long)); in update_range() local 104 if (range->start == range->end) { in update_range() 105 range->start = start; in update_range() 110 if (range->start > start) in update_range() 111 range->start = start; in update_range() 1963 struct nlattr *start; in nsh_key_to_nlattr() local 1965 start = nla_nest_start_noflag(skb, OVS_KEY_ATTR_NSH); in nsh_key_to_nlattr() 1966 if (!start) in nsh_key_to_nlattr() 1980 nla_nest_end(skb, start); in nsh_key_to_nlattr() 2531 int rem, start, err; in validate_and_copy_sample() local [all …]
|
D | flow_table.c | 50 return range->end - range->start; in range_n_bytes() 56 int start = full ? 0 : mask->range.start; in ovs_flow_mask_key() local 58 const long *m = (const long *)((const u8 *)&mask->key + start); in ovs_flow_mask_key() 59 const long *s = (const long *)((const u8 *)src + start); in ovs_flow_mask_key() 60 long *d = (long *)((u8 *)dst + start); in ovs_flow_mask_key() 203 unsigned int start; in tbl_mask_array_reset_counters() local 208 start = u64_stats_fetch_begin_irq(&stats->syncp); in tbl_mask_array_reset_counters() 210 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); in tbl_mask_array_reset_counters() 650 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start); in flow_hash() 686 return cmp_key(&flow->key, key, range->start, range->end); in flow_cmp_masked_key() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_ftp.c | 100 __u16 af, char **start, char **end) in ip_vs_ftp_get_addrport() argument 160 *start = s; in ip_vs_ftp_get_addrport() 168 *start = s; in ip_vs_ftp_get_addrport() 253 char *start, *end; in ip_vs_ftp_out() local 285 &start, &end) != 1) in ip_vs_ftp_out() 306 &start, &end) != 1) in ip_vs_ftp_out() 376 start - data, in ip_vs_ftp_out() 377 end - start, in ip_vs_ftp_out() 423 char *start, *end; in ip_vs_ftp_in() local 497 &start, &end) == 1) { in ip_vs_ftp_in() [all …]
|
D | ip_vs_est.c | 64 unsigned int start; in ip_vs_read_cpu_stats() local 69 start = u64_stats_fetch_begin(&s->syncp); in ip_vs_read_cpu_stats() 75 } while (u64_stats_fetch_retry(&s->syncp, start)); in ip_vs_read_cpu_stats() 84 start = u64_stats_fetch_begin(&s->syncp); in ip_vs_read_cpu_stats() 90 } while (u64_stats_fetch_retry(&s->syncp, start)); in ip_vs_read_cpu_stats()
|
/net/mptcp/ |
D | diag.c | 19 struct nlattr *start; in subflow_get_info() local 23 start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP); in subflow_get_info() 24 if (!start) in subflow_get_info() 72 nla_nest_end(skb, start); in subflow_get_info() 77 nla_nest_cancel(skb, start); in subflow_get_info()
|
/net/ethtool/ |
D | netlink.c | 736 .start = ethnl_default_start, 745 .start = ethnl_default_start, 761 .start = ethnl_default_start, 777 .start = ethnl_default_start, 786 .start = ethnl_default_start, 803 .start = ethnl_default_start, 819 .start = ethnl_default_start, 835 .start = ethnl_default_start, 851 .start = ethnl_default_start, 867 .start = ethnl_default_start, [all …]
|
D | bitset.c | 34 static void ethnl_bitmap32_clear(u32 *dst, unsigned int start, unsigned int end, in ethnl_bitmap32_clear() argument 37 unsigned int start_word = start / 32; in ethnl_bitmap32_clear() 42 if (end <= start) in ethnl_bitmap32_clear() 45 if (start % 32) { in ethnl_bitmap32_clear() 46 mask = ethnl_upper_bits(start); in ethnl_bitmap32_clear() 86 static bool ethnl_bitmap32_not_zero(const u32 *map, unsigned int start, in ethnl_bitmap32_not_zero() argument 89 unsigned int start_word = start / 32; in ethnl_bitmap32_not_zero() 93 if (end <= start) in ethnl_bitmap32_not_zero() 96 if (start % 32) { in ethnl_bitmap32_not_zero() 97 mask = ethnl_upper_bits(start); in ethnl_bitmap32_not_zero()
|
/net/sunrpc/xprtrdma/ |
D | svc_rdma_pcl.c | 271 unsigned int start; in pcl_process_nonpayloads() local 290 start = pcl_chunk_end_offset(chunk); in pcl_process_nonpayloads() 291 ret = pcl_process_region(xdr, start, next->ch_position - start, in pcl_process_nonpayloads() 300 start = pcl_chunk_end_offset(chunk); in pcl_process_nonpayloads() 301 ret = pcl_process_region(xdr, start, xdr->len - start, actor, data); in pcl_process_nonpayloads()
|
D | svc_rdma_rw.c | 834 unsigned int start, length; in svc_rdma_read_multiple_chunks() local 837 start = 0; in svc_rdma_read_multiple_chunks() 840 ret = svc_rdma_copy_inline_range(info, start, length); in svc_rdma_read_multiple_chunks() 853 start += length; in svc_rdma_read_multiple_chunks() 855 ret = svc_rdma_copy_inline_range(info, start, length); in svc_rdma_read_multiple_chunks() 860 start += length; in svc_rdma_read_multiple_chunks() 861 length = head->rc_byte_len - start; in svc_rdma_read_multiple_chunks() 862 ret = svc_rdma_copy_inline_range(info, start, length); in svc_rdma_read_multiple_chunks() 997 unsigned int start, length; in svc_rdma_read_call_chunk() local 1003 start = 0; in svc_rdma_read_call_chunk() [all …]
|
/net/rose/ |
D | rose_out.c | 49 unsigned short start, end; in rose_kick() local 60 start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs; in rose_kick() 63 if (start == end) in rose_kick() 66 rose->vs = start; in rose_kick()
|
/net/9p/ |
D | trans_virtio.c | 171 static int pack_sg_list(struct scatterlist *sg, int start, in pack_sg_list() argument 175 int index = start; in pack_sg_list() 188 if (index-start) in pack_sg_list() 190 return index-start; in pack_sg_list() 218 pack_sg_list_p(struct scatterlist *sg, int start, int limit, in pack_sg_list_p() argument 223 int index = start; in pack_sg_list_p() 225 BUG_ON(nr_pages > (limit - start)); in pack_sg_list_p() 243 if (index-start) in pack_sg_list_p() 245 return index - start; in pack_sg_list_p()
|
/net/x25/ |
D | x25_out.c | 139 unsigned short start, end; in x25_kick() local 164 start = skb_peek(&x25->ack_queue) ? x25->vs : x25->va; in x25_kick() 167 if (start == end) in x25_kick() 170 x25->vs = start; in x25_kick()
|
/net/lapb/ |
D | lapb_out.c | 69 unsigned short modulus, start, end; in lapb_kick() local 72 start = !skb_peek(&lapb->ack_queue) ? lapb->va : lapb->vs; in lapb_kick() 76 start != end && skb_peek(&lapb->write_queue)) { in lapb_kick() 77 lapb->vs = start; in lapb_kick()
|
/net/tipc/ |
D | name_table.c | 99 #define service_range_overlap(sr, start, end) \ in RB_DECLARE_CALLBACKS_MAX() argument 100 ((sr)->lower <= (end) && (sr)->upper >= (start)) in RB_DECLARE_CALLBACKS_MAX() 110 #define service_range_foreach_match(sr, sc, start, end) \ in RB_DECLARE_CALLBACKS_MAX() argument 112 start, \ in RB_DECLARE_CALLBACKS_MAX() 116 start, \ 129 u32 start, u32 end) 135 if (!n || service_range_entry(n)->max < start) 140 if (l && service_range_entry(l)->max >= start) { 153 if (service_range_overlap(sr, start, end)) 159 r && service_range_entry(r)->max >= start) { [all …]
|