Home
last modified time | relevance | path

Searched refs:tail (Results 1 – 25 of 71) sorted by relevance

123

/net/sunrpc/
Dxdr.c179 const struct kvec *tail = xdr->tail; in xdr_buf_to_bvec() local
204 if (tail->iov_len) { in xdr_buf_to_bvec()
205 bvec_set_virt(bvec, tail->iov_base, tail->iov_len); in xdr_buf_to_bvec()
231 struct kvec *tail = xdr->tail; in xdr_inline_pages() local
241 tail->iov_base = buf + offset; in xdr_inline_pages()
242 tail->iov_len = buflen - offset; in xdr_inline_pages()
501 xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len); in xdr_buf_pages_zero()
505 xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len); in xdr_buf_pages_zero()
561 struct kvec *tail = buf->tail; in xdr_buf_try_expand() local
562 unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; in xdr_buf_try_expand()
[all …]
Dsocklib.c155 len = xdr->tail[0].iov_len; in xdr_partial_copy_from_skb()
157 copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); in xdr_partial_copy_from_skb()
314 if (base >= xdr->tail[0].iov_len) in xprt_sock_sendmsg()
317 err = xprt_send_kvec(sock, msg, &xdr->tail[0], base); in xprt_sock_sendmsg()
/net/sched/
Dsch_choke.c67 unsigned int tail; member
77 return (q->tail - q->head) & q->tab_mask; in choke_len()
97 if (q->head == q->tail) in choke_zap_head_holes()
106 q->tail = (q->tail - 1) & q->tab_mask; in choke_zap_tail_holes()
107 if (q->head == q->tail) in choke_zap_tail_holes()
109 } while (q->tab[q->tail] == NULL); in choke_zap_tail_holes()
123 if (idx == q->tail) in choke_drop_by_idx()
205 if (q->head == q->tail) in choke_match_random()
268 q->tab[q->tail] = skb; in choke_enqueue()
269 q->tail = (q->tail + 1) & q->tab_mask; in choke_enqueue()
[all …]
Dsch_sfq.c131 struct sfq_slot *tail; /* current slot in round */ member
318 x = q->tail->next; in sfq_drop()
320 q->tail->next = slot->next; in sfq_drop()
447 if (q->tail == NULL) { /* It is the first flow */ in sfq_enqueue()
450 slot->next = q->tail->next; in sfq_enqueue()
451 q->tail->next = x; in sfq_enqueue()
457 q->tail = slot; in sfq_enqueue()
488 if (q->tail == NULL) in sfq_dequeue()
492 a = q->tail->next; in sfq_dequeue()
495 q->tail = slot; in sfq_dequeue()
[all …]
Dematch.c440 u8 *tail; in tcf_em_tree_dump() local
455 tail = skb_tail_pointer(skb); in tcf_em_tree_dump()
457 struct nlattr *match_start = (struct nlattr *)tail; in tcf_em_tree_dump()
477 tail = skb_tail_pointer(skb); in tcf_em_tree_dump()
478 match_start->nla_len = tail - (u8 *)match_start; in tcf_em_tree_dump()
Dsch_fq_pie.c51 struct sk_buff *tail; member
124 flow->tail->next = skb; in flow_queue_add()
125 flow->tail = skb; in flow_queue_add()
534 rtnl_kfree_skbs(flow->head, flow->tail); in fq_pie_reset()
/net/rxrpc/
Dcall_accept.c46 unsigned int head, tail, call_head, call_tail; in rxrpc_service_prealloc_one() local
70 tail = READ_ONCE(b->peer_backlog_tail); in rxrpc_service_prealloc_one()
71 if (CIRC_CNT(head, tail, size) < max) { in rxrpc_service_prealloc_one()
83 tail = READ_ONCE(b->conn_backlog_tail); in rxrpc_service_prealloc_one()
84 if (CIRC_CNT(head, tail, size) < max) { in rxrpc_service_prealloc_one()
182 unsigned int size = RXRPC_BACKLOG_MAX, head, tail; in rxrpc_discard_prealloc() local
195 tail = b->peer_backlog_tail; in rxrpc_discard_prealloc()
196 while (CIRC_CNT(head, tail, size) > 0) { in rxrpc_discard_prealloc()
197 struct rxrpc_peer *peer = b->peer_backlog[tail]; in rxrpc_discard_prealloc()
200 tail = (tail + 1) & (size - 1); in rxrpc_discard_prealloc()
[all …]
Daf_rxrpc.c80 unsigned int tail; in rxrpc_validate_address() local
103 tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad); in rxrpc_validate_address()
112 tail = offsetof(struct sockaddr_rxrpc, transport) + in rxrpc_validate_address()
121 if (tail < len) in rxrpc_validate_address()
122 memset((void *)srx + tail, 0, len - tail); in rxrpc_validate_address()
/net/core/
Dgen_stats.c67 d->tail = (struct nlattr *)skb_tail_pointer(skb); in gnet_stats_start_copy_compat()
76 if (d->tail) { in gnet_stats_start_copy_compat()
85 if (ret == 0 && d->tail->nla_type == padattr) in gnet_stats_start_copy_compat()
86 d->tail = (struct nlattr *)((char *)d->tail + in gnet_stats_start_copy_compat()
87 NLA_ALIGN(d->tail->nla_len)); in gnet_stats_start_copy_compat()
225 if (d->tail) { in ___gnet_stats_copy_basic()
326 if (d->tail) { in gnet_stats_copy_rate_est()
403 if (d->tail) in gnet_stats_copy_queue()
435 if (d->tail) in gnet_stats_copy_app()
464 if (d->tail) in gnet_stats_finish_copy()
[all …]
Dskbuff.c192 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
403 memset(skb, 0, offsetof(struct sk_buff, tail)); in slab_build_skb()
458 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
519 memset(skb, 0, offsetof(struct sk_buff, tail)); in __napi_build_skb()
670 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
1446 C(tail); in __skb_clone()
1655 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in __msg_zerocopy_callback() local
1688 tail = skb_peek_tail(q); in __msg_zerocopy_callback()
1689 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || in __msg_zerocopy_callback()
1690 !skb_zerocopy_notify_extend(tail, lo, len)) { in __msg_zerocopy_callback()
[all …]
/net/bluetooth/
Dcoredump.c91 hdev->dump.tail = NULL; in hci_devcd_reset()
116 hdev->dump.tail = hdev->dump.head; in hci_devcd_alloc()
127 if (hdev->dump.tail + size > hdev->dump.end) in hci_devcd_copy()
130 memcpy(hdev->dump.tail, buf, size); in hci_devcd_copy()
131 hdev->dump.tail += size; in hci_devcd_copy()
139 if (hdev->dump.tail + len > hdev->dump.end) in hci_devcd_memset()
142 memset(hdev->dump.tail, pattern, len); in hci_devcd_memset()
143 hdev->dump.tail += len; in hci_devcd_memset()
254 dump_size = hdev->dump.tail - hdev->dump.head; in hci_devcd_handle_pkt_complete()
273 dump_size = hdev->dump.tail - hdev->dump.head; in hci_devcd_handle_pkt_abort()
[all …]
/net/sunrpc/auth_gss/
Dgss_krb5_crypto.c810 if (buf->tail[0].iov_base != NULL) { in gss_krb5_aes_encrypt()
811 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; in gss_krb5_aes_encrypt()
813 buf->tail[0].iov_base = buf->head[0].iov_base in gss_krb5_aes_encrypt()
815 buf->tail[0].iov_len = 0; in gss_krb5_aes_encrypt()
816 ecptr = buf->tail[0].iov_base; in gss_krb5_aes_encrypt()
821 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; in gss_krb5_aes_encrypt()
825 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; in gss_krb5_aes_encrypt()
850 buf->tail[0].iov_len += kctx->gk5e->cksumlength; in gss_krb5_aes_encrypt()
1039 if (buf->tail[0].iov_base) { in krb5_etm_encrypt()
1040 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; in krb5_etm_encrypt()
[all …]
Dsvcauth_gss.c1861 struct kvec *tail = buf->tail; in svcauth_gss_wrap_priv() local
1884 if (tail->iov_base) { in svcauth_gss_wrap_priv()
1885 if (tail->iov_base >= head->iov_base + PAGE_SIZE) in svcauth_gss_wrap_priv()
1887 if (tail->iov_base < head->iov_base) in svcauth_gss_wrap_priv()
1889 if (tail->iov_len + head->iov_len in svcauth_gss_wrap_priv()
1892 memmove(tail->iov_base + RPC_MAX_AUTH_SIZE, tail->iov_base, in svcauth_gss_wrap_priv()
1893 tail->iov_len); in svcauth_gss_wrap_priv()
1894 tail->iov_base += RPC_MAX_AUTH_SIZE; in svcauth_gss_wrap_priv()
1903 if (!tail->iov_base) { in svcauth_gss_wrap_priv()
1906 tail->iov_base = head->iov_base in svcauth_gss_wrap_priv()
[all …]
/net/ipv4/
Dtcp_cdg.c85 u8 tail; member
197 ca->gsum.min += gmin - ca->gradients[ca->tail].min; in tcp_cdg_grad()
198 ca->gsum.max += gmax - ca->gradients[ca->tail].max; in tcp_cdg_grad()
199 ca->gradients[ca->tail].min = gmin; in tcp_cdg_grad()
200 ca->gradients[ca->tail].max = gmax; in tcp_cdg_grad()
201 ca->tail = (ca->tail + 1) & (window - 1); in tcp_cdg_grad()
218 else if (ca->tail == 0) in tcp_cdg_grad()
221 grad = (grad * window) / (int)ca->tail; in tcp_cdg_grad()
Dtcp_ipv4.c1829 struct sk_buff *tail; in tcp_add_backlog() local
1863 tail = sk->sk_backlog.tail; in tcp_add_backlog()
1864 if (!tail) in tcp_add_backlog()
1866 thtail = (struct tcphdr *)tail->data; in tcp_add_backlog()
1868 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq || in tcp_add_backlog()
1869 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield || in tcp_add_backlog()
1870 ((TCP_SKB_CB(tail)->tcp_flags | in tcp_add_backlog()
1872 !((TCP_SKB_CB(tail)->tcp_flags & in tcp_add_backlog()
1874 ((TCP_SKB_CB(tail)->tcp_flags ^ in tcp_add_backlog()
1877 tail->decrypted != skb->decrypted || in tcp_add_backlog()
[all …]
Desp4.c443 u8 *tail; in esp_output_head() local
466 tail = skb_tail_pointer(trailer); in esp_output_head()
489 tail = page_address(page) + pfrag->offset; in esp_output_head()
491 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); in esp_output_head()
519 tail = skb_tail_pointer(trailer); in esp_output_head()
523 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); in esp_output_head()
/net/sunrpc/xprtrdma/
Drpc_rdma.c174 return (buf->head[0].iov_len + buf->tail[0].iov_len) < in rpcrdma_nonpayload_inline()
257 if (xdrbuf->tail[0].iov_len) in rpcrdma_convert_iovs()
258 rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n); in rpcrdma_convert_iovs()
643 struct page *page = virt_to_page(xdr->tail[0].iov_base); in rpcrdma_prepare_tail_iov()
670 memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len); in rpcrdma_pullup_tail_iov()
671 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len; in rpcrdma_pullup_tail_iov()
716 if (unlikely(xdr->tail[0].iov_len)) in rpcrdma_prepare_noch_pullup()
730 struct kvec *tail = &xdr->tail[0]; in rpcrdma_prepare_noch_mapped() local
737 if (tail->iov_len) in rpcrdma_prepare_noch_mapped()
739 offset_in_page(tail->iov_base), in rpcrdma_prepare_noch_mapped()
[all …]
Dsvc_rdma_sendto.c606 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]); in svc_rdma_xb_dma_map()
645 if (xdr->tail[0].iov_len) in svc_rdma_xb_count_sges()
718 if (xdr->tail[0].iov_len) { in svc_rdma_xb_linearize()
719 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len); in svc_rdma_xb_linearize()
720 args->pd_dest += xdr->tail[0].iov_len; in svc_rdma_xb_linearize()
/net/mac80211/
Dwpa.c38 int tail; in ieee80211_tx_h_michael_mic_add() local
66 tail = MICHAEL_MIC_LEN; in ieee80211_tx_h_michael_mic_add()
68 tail += IEEE80211_TKIP_ICV_LEN; in ieee80211_tx_h_michael_mic_add()
70 if (WARN(skb_tailroom(skb) < tail || in ieee80211_tx_h_michael_mic_add()
74 skb_tailroom(skb), tail)) in ieee80211_tx_h_michael_mic_add()
200 int len, tail; in tkip_encrypt_skb() local
215 tail = 0; in tkip_encrypt_skb()
217 tail = IEEE80211_TKIP_ICV_LEN; in tkip_encrypt_skb()
219 if (WARN_ON(skb_tailroom(skb) < tail || in tkip_encrypt_skb()
424 int hdrlen, len, tail; in ccmp_encrypt_skb() local
[all …]
/net/bluetooth/cmtp/
Dcore.c213 unsigned int size, tail; in cmtp_process_transmit() local
226 tail = session->mtu - nskb->len; in cmtp_process_transmit()
227 if (tail < 5) { in cmtp_process_transmit()
230 tail = session->mtu; in cmtp_process_transmit()
233 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len); in cmtp_process_transmit()
/net/vmw_vsock/
Dvmci_transport_notify.c185 u64 tail; in send_waiting_read() local
201 vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair, &tail, &head); in send_waiting_read()
227 u64 tail; in send_waiting_write() local
237 vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair, &tail, &head); in send_waiting_write()
238 room_left = vmci_trans(vsk)->produce_size - tail; in send_waiting_write()
244 waiting_info.offset = tail + room_needed + 1; in send_waiting_write()
/net/tipc/
Dmsg.c128 struct sk_buff *tail = NULL; in tipc_buf_append() local
152 TIPC_SKB_CB(head)->tail = NULL; in tipc_buf_append()
167 tail = TIPC_SKB_CB(head)->tail; in tipc_buf_append()
171 tail->next = frag; in tipc_buf_append()
175 TIPC_SKB_CB(head)->tail = frag; in tipc_buf_append()
183 TIPC_SKB_CB(head)->tail = NULL; in tipc_buf_append()
/net/dccp/
Doptions.c406 const unsigned char *tail, *from; in dccp_insert_option_ackvec() local
432 tail = av->av_buf + DCCPAV_MAX_ACKVEC_LEN; in dccp_insert_option_ackvec()
450 if (from + copylen > tail) { in dccp_insert_option_ackvec()
451 const u16 tailsize = tail - from; in dccp_insert_option_ackvec()
/net/hsr/
Dhsr_main.h237 unsigned char *tail = skb_tail_pointer(skb) - HSR_HLEN; in skb_get_PRP_rct() local
239 struct prp_rct *rct = (struct prp_rct *)tail; in skb_get_PRP_rct()
/net/ipv6/
Desp6.c479 u8 *tail; in esp6_output_head() local
501 tail = skb_tail_pointer(trailer); in esp6_output_head()
524 tail = page_address(page) + pfrag->offset; in esp6_output_head()
526 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); in esp6_output_head()
556 tail = skb_tail_pointer(trailer); in esp6_output_head()
560 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); in esp6_output_head()

123