Home
last modified time | relevance | path

Searched refs:sp (Results 1 – 25 of 44) sorted by relevance

12

/net/rxrpc/
Dar-input.c46 struct rxrpc_skb_priv *sp; in rxrpc_queue_rcv_skb() local
55 sp = rxrpc_skb(skb); in rxrpc_queue_rcv_skb()
56 ASSERTCMP(sp->call, ==, call); in rxrpc_queue_rcv_skb()
64 sp->call = NULL; in rxrpc_queue_rcv_skb()
129 sp->call = NULL; in rxrpc_queue_rcv_skb()
145 struct rxrpc_skb_priv *sp; in rxrpc_fast_process_data() local
151 sp = rxrpc_skb(skb); in rxrpc_fast_process_data()
152 ASSERTCMP(sp->call, ==, NULL); in rxrpc_fast_process_data()
213 sp->call = call; in rxrpc_fast_process_data()
215 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && in rxrpc_fast_process_data()
[all …]
Dar-recvmsg.c48 struct rxrpc_skb_priv *sp; in rxrpc_recvmsg() local
111 sp = rxrpc_skb(skb); in rxrpc_recvmsg()
112 call = sp->call; in rxrpc_recvmsg()
115 _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); in rxrpc_recvmsg()
157 ntohl(sp->hdr.seq), skb->len, sp->offset); in rxrpc_recvmsg()
168 ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); in rxrpc_recvmsg()
169 ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); in rxrpc_recvmsg()
170 call->rx_data_recv = ntohl(sp->hdr.seq); in rxrpc_recvmsg()
172 ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); in rxrpc_recvmsg()
174 offset = sp->offset; in rxrpc_recvmsg()
[all …]
Dar-ack.c168 struct rxrpc_skb_priv *sp; in rxrpc_resend() local
194 sp = rxrpc_skb(txb); in rxrpc_resend()
196 if (sp->need_resend) { in rxrpc_resend()
197 sp->need_resend = 0; in rxrpc_resend()
200 sp->hdr.serial = in rxrpc_resend()
204 hdr->serial = sp->hdr.serial; in rxrpc_resend()
207 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); in rxrpc_resend()
210 sp->resend_at = jiffies + 3; in rxrpc_resend()
212 sp->resend_at = in rxrpc_resend()
217 if (time_after_eq(jiffies + 1, sp->resend_at)) { in rxrpc_resend()
[all …]
Dar-skbuff.c55 struct rxrpc_skb_priv *sp) in rxrpc_hard_ACK_data() argument
62 _debug("hard ACK #%u", ntohl(sp->hdr.seq)); in rxrpc_hard_ACK_data()
70 seq = ntohl(sp->hdr.seq); in rxrpc_hard_ACK_data()
82 if (sp->hdr.flags & RXRPC_LAST_PACKET) { in rxrpc_hard_ACK_data()
87 __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial, in rxrpc_hard_ACK_data()
102 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); in rxrpc_packet_destructor() local
103 struct rxrpc_call *call = sp->call; in rxrpc_packet_destructor()
109 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) in rxrpc_packet_destructor()
110 rxrpc_hard_ACK_data(call, sp); in rxrpc_packet_destructor()
112 sp->call = NULL; in rxrpc_packet_destructor()
Dar-output.c454 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); in rxrpc_queue_packet() local
485 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); in rxrpc_queue_packet()
487 sp->need_resend = 0; in rxrpc_queue_packet()
488 sp->resend_at = jiffies + rxrpc_resend_timeout * HZ; in rxrpc_queue_packet()
491 call->resend_timer.expires = sp->resend_at; in rxrpc_queue_packet()
509 sp->need_resend = 1; in rxrpc_queue_packet()
526 struct rxrpc_skb_priv *sp; in rxrpc_send_data() local
611 sp = rxrpc_skb(skb); in rxrpc_send_data()
612 sp->remain = chunk; in rxrpc_send_data()
613 if (sp->remain > skb_tailroom(skb)) in rxrpc_send_data()
[all …]
Drxkad.c156 struct rxrpc_skb_priv *sp; in rxkad_secure_packet_auth() local
166 sp = rxrpc_skb(skb); in rxkad_secure_packet_auth()
170 check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); in rxkad_secure_packet_auth()
203 struct rxrpc_skb_priv *sp; in rxkad_secure_packet_encrypt() local
212 sp = rxrpc_skb(skb); in rxkad_secure_packet_encrypt()
216 check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); in rxkad_secure_packet_encrypt()
256 struct rxrpc_skb_priv *sp; in rxkad_secure_packet() local
267 sp = rxrpc_skb(skb); in rxkad_secure_packet()
270 call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq), in rxkad_secure_packet()
288 x |= sp->hdr.seq & cpu_to_be32(0x3fffffff); in rxkad_secure_packet()
[all …]
Dar-connevent.c148 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); in rxrpc_process_event() local
156 serial = ntohl(sp->hdr.serial); in rxrpc_process_event()
158 switch (sp->hdr.type) { in rxrpc_process_event()
257 struct rxrpc_skb_priv *sp; in rxrpc_process_connection() local
274 sp = rxrpc_skb(skb); in rxrpc_process_connection()
335 struct rxrpc_skb_priv *sp; in rxrpc_reject_packets() local
375 sp = rxrpc_skb(skb); in rxrpc_reject_packets()
382 hdr.epoch = sp->hdr.epoch; in rxrpc_reject_packets()
383 hdr.cid = sp->hdr.cid; in rxrpc_reject_packets()
384 hdr.callNumber = sp->hdr.callNumber; in rxrpc_reject_packets()
[all …]
Dar-accept.c79 struct rxrpc_skb_priv *sp, *nsp; in rxrpc_accept_incoming_call() local
87 sp = rxrpc_skb(skb); in rxrpc_accept_incoming_call()
109 conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO); in rxrpc_accept_incoming_call()
117 call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO); in rxrpc_accept_incoming_call()
135 if (sp->hdr.securityIndex > 0 && in rxrpc_accept_incoming_call()
204 struct rxrpc_skb_priv *sp; in rxrpc_accept_incoming_calls() local
234 sp = rxrpc_skb(skb); in rxrpc_accept_incoming_calls()
252 service_id = sp->hdr.serviceId; in rxrpc_accept_incoming_calls()
292 rxrpc_busy(local, &srx, &sp->hdr); in rxrpc_accept_incoming_calls()
Dar-call.c503 struct rxrpc_skb_priv *sp; in rxrpc_release_call() local
511 sp = rxrpc_skb(skb); in rxrpc_release_call()
512 if (sp->call) { in rxrpc_release_call()
513 ASSERTCMP(sp->call, ==, call); in rxrpc_release_call()
515 sp->call = NULL; in rxrpc_release_call()
521 rxrpc_pkts[sp->hdr.type], in rxrpc_release_call()
522 ntohl(sp->hdr.serial), in rxrpc_release_call()
523 ntohl(sp->hdr.seq)); in rxrpc_release_call()
674 struct rxrpc_skb_priv *sp; in rxrpc_cleanup_call() local
678 sp = rxrpc_skb((struct sk_buff *) _skb); in rxrpc_cleanup_call()
[all …]
/net/xfrm/
Dxfrm_input.c19 void __secpath_destroy(struct sec_path *sp) in __secpath_destroy() argument
22 for (i = 0; i < sp->len; i++) in __secpath_destroy()
23 xfrm_state_put(sp->xvec[i]); in __secpath_destroy()
24 kmem_cache_free(secpath_cachep, sp); in __secpath_destroy()
30 struct sec_path *sp; in secpath_dup() local
32 sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC); in secpath_dup()
33 if (!sp) in secpath_dup()
36 sp->len = 0; in secpath_dup()
40 memcpy(sp, src, sizeof(*sp)); in secpath_dup()
41 for (i = 0; i < sp->len; i++) in secpath_dup()
[all …]
/net/core/
Dskb_dma_map.c14 struct skb_shared_info *sp = skb_shinfo(skb); in skb_dma_map() local
23 sp->dma_maps[0] = map; in skb_dma_map()
24 for (i = 0; i < sp->nr_frags; i++) { in skb_dma_map()
25 skb_frag_t *fp = &sp->frags[i]; in skb_dma_map()
31 sp->dma_maps[i + 1] = map; in skb_dma_map()
33 sp->num_dma_maps = i + 1; in skb_dma_map()
39 skb_frag_t *fp = &sp->frags[i]; in skb_dma_map()
41 dma_unmap_page(dev, sp->dma_maps[i + 1], in skb_dma_map()
44 dma_unmap_single(dev, sp->dma_maps[0], in skb_dma_map()
54 struct skb_shared_info *sp = skb_shinfo(skb); in skb_dma_unmap() local
[all …]
/net/ipv6/
Dxfrm6_input.c66 if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { in xfrm6_input_addr()
67 struct sec_path *sp; in xfrm6_input_addr() local
69 sp = secpath_dup(skb->sp); in xfrm6_input_addr()
70 if (!sp) { in xfrm6_input_addr()
74 if (skb->sp) in xfrm6_input_addr()
75 secpath_put(skb->sp); in xfrm6_input_addr()
76 skb->sp = sp; in xfrm6_input_addr()
79 if (1 + skb->sp->len == XFRM_MAX_DEPTH) { in xfrm6_input_addr()
131 skb->sp->xvec[skb->sp->len++] = x; in xfrm6_input_addr()
/net/sctp/
Dsocket.c334 struct sctp_sock *sp = sctp_sk(sk); in sctp_do_bind() local
335 struct sctp_endpoint *ep = sp->ep; in sctp_do_bind()
342 af = sctp_sockaddr_af(sp, addr, len); in sctp_do_bind()
359 if (!sp->pf->bind_verify(sp, addr)) in sctp_do_bind()
383 if (sctp_bind_addr_match(bp, addr, sp)) in sctp_do_bind()
514 struct sctp_sock *sp; in sctp_send_asconf_add_ip() local
531 sp = sctp_sk(sk); in sctp_send_asconf_add_ip()
532 ep = sp->ep; in sctp_send_asconf_add_ip()
622 struct sctp_sock *sp = sctp_sk(sk); in sctp_bindx_rem() local
623 struct sctp_endpoint *ep = sp->ep; in sctp_bindx_rem()
[all …]
Dassociola.c76 struct sctp_sock *sp; in sctp_association_init() local
82 sp = sctp_sk((struct sock *)sk); in sctp_association_init()
111 asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000; in sctp_association_init()
112 asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) in sctp_association_init()
119 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; in sctp_association_init()
120 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); in sctp_association_init()
121 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); in sctp_association_init()
122 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); in sctp_association_init()
129 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); in sctp_association_init()
132 asoc->pathmaxrxt = sp->pathmaxrxt; in sctp_association_init()
[all …]
Dulpqueue.c144 struct sctp_sock *sp = sctp_sk(sk); in sctp_clear_pd() local
146 if (atomic_dec_and_test(&sp->pd_mode)) { in sctp_clear_pd()
150 if (!skb_queue_empty(&sp->pd_lobby)) { in sctp_clear_pd()
152 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); in sctp_clear_pd()
163 if (!skb_queue_empty(&sp->pd_lobby) && asoc) { in sctp_clear_pd()
167 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) { in sctp_clear_pd()
170 __skb_unlink(skb, &sp->pd_lobby); in sctp_clear_pd()
184 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); in sctp_ulpq_set_pd() local
186 atomic_inc(&sp->pd_mode); in sctp_ulpq_set_pd()
1004 struct sctp_sock *sp; in sctp_ulpq_partial_delivery() local
[all …]
Dipv6.c543 static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) in sctp_v6_available() argument
552 if (sp && !sp->v4mapped) in sctp_v6_available()
554 if (sp && ipv6_only_sock(sctp_opt2sk(sp))) in sctp_v6_available()
557 return sctp_get_af_specific(AF_INET)->available(addr, sp); in sctp_v6_available()
573 struct sctp_sock *sp, in sctp_v6_addr_valid() argument
583 if (!sp || !sp->v4mapped) in sctp_v6_addr_valid()
585 if (sp && ipv6_only_sock(sctp_opt2sk(sp))) in sctp_v6_addr_valid()
588 return sctp_get_af_specific(AF_INET)->addr_valid(addr, sp, skb); in sctp_v6_addr_valid()
701 static void sctp_v6_addr_v4map(struct sctp_sock *sp, union sctp_addr *addr) in sctp_v6_addr_v4map() argument
703 if (sp->v4mapped && AF_INET == addr->sa.sa_family) in sctp_v6_addr_v4map()
[all …]
Dbind_addr.c361 struct sctp_sock *sp; in sctp_bind_addr_conflict() local
368 sp = bp_sp; in sctp_bind_addr_conflict()
370 sp = addr_sp; in sctp_bind_addr_conflict()
372 sp = bp_sp; in sctp_bind_addr_conflict()
379 conflict = sp->pf->cmp_addr(&laddr->a, addr, sp); in sctp_bind_addr_conflict()
/net/dccp/
Dfeat.c211 if (fval->sp.vec == NULL) { in __dccp_feat_activate()
220 val = fval->sp.vec[0]; in __dccp_feat_activate()
242 fval->sp.len = len; in dccp_feat_clone_sp_val()
243 if (fval->sp.len > 0) { in dccp_feat_clone_sp_val()
244 fval->sp.vec = kmemdup(val, len, gfp_any()); in dccp_feat_clone_sp_val()
245 if (fval->sp.vec == NULL) { in dccp_feat_clone_sp_val()
246 fval->sp.len = 0; in dccp_feat_clone_sp_val()
258 kfree(val->sp.vec); in dccp_feat_val_destructor()
276 original->val.sp.vec, in dccp_feat_clone_entry()
277 original->val.sp.len)) { in dccp_feat_clone_entry()
[all …]
/net/sched/
Dcls_rsvp.h294 struct rsvp_session **sp; in rsvp_delete() local
312 for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF]; in rsvp_delete()
313 *sp; sp = &(*sp)->next) { in rsvp_delete()
314 if (*sp == s) { in rsvp_delete()
316 *sp = s->next; in rsvp_delete()
416 struct rsvp_session *s, **sp; in rsvp_change() local
493 for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) { in rsvp_change()
540 for (sp = &data->ht[h1]; *sp; sp = &(*sp)->next) { in rsvp_change()
541 if (((*sp)->dpi.mask&s->dpi.mask) != s->dpi.mask) in rsvp_change()
544 s->next = *sp; in rsvp_change()
[all …]
Dsch_htb.c728 } stk[TC_HTB_MAXDEPTH], *sp = stk; in htb_lookup_leaf() local
731 sp->root = tree->rb_node; in htb_lookup_leaf()
732 sp->pptr = pptr; in htb_lookup_leaf()
733 sp->pid = pid; in htb_lookup_leaf()
736 if (!*sp->pptr && *sp->pid) { in htb_lookup_leaf()
739 *sp->pptr = in htb_lookup_leaf()
740 htb_id_find_next_upper(prio, sp->root, *sp->pid); in htb_lookup_leaf()
742 *sp->pid = 0; /* ptr is valid now so that remove this hint as it in htb_lookup_leaf()
744 if (!*sp->pptr) { /* we are at right end; rewind & go up */ in htb_lookup_leaf()
745 *sp->pptr = sp->root; in htb_lookup_leaf()
[all …]
/net/netfilter/
Dxt_policy.c59 const struct sec_path *sp = skb->sp; in match_policy_in() local
63 if (sp == NULL) in match_policy_in()
65 if (strict && info->len != sp->len) in match_policy_in()
68 for (i = sp->len - 1; i >= 0; i--) { in match_policy_in()
69 pos = strict ? i - sp->len + 1 : 0; in match_policy_in()
74 if (match_xfrm_state(sp->xvec[i], e, family)) { in match_policy_in()
/net/decnet/
Ddn_neigh.c277 struct dn_short_packet *sp; in dn_short_output() local
298 sp = (struct dn_short_packet *)(data+2); in dn_short_output()
300 sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS)); in dn_short_output()
301 sp->dstnode = cb->dst; in dn_short_output()
302 sp->srcnode = cb->src; in dn_short_output()
303 sp->forward = cb->hops & 0x3f; in dn_short_output()
320 struct dn_short_packet *sp; in dn_phase3_output() local
340 sp = (struct dn_short_packet *)(data + 2); in dn_phase3_output()
342 sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS)); in dn_phase3_output()
343 sp->dstnode = cb->dst & cpu_to_le16(0x03ff); in dn_phase3_output()
[all …]
/net/bridge/netfilter/
Debt_stp.c126 const struct stp_header *sp; in ebt_stp_mt() local
130 sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph); in ebt_stp_mt()
131 if (sp == NULL) in ebt_stp_mt()
135 if (memcmp(sp, header, sizeof(header))) in ebt_stp_mt()
139 && FWINV(info->type != sp->type, EBT_STP_TYPE)) in ebt_stp_mt()
142 if (sp->type == BPDU_TYPE_CONFIG && in ebt_stp_mt()
/net/rfkill/
Drfkill-input.c116 const bool sp, const bool s, const bool c) in __rfkill_handle_normal_op() argument
120 if (sp) in __rfkill_handle_normal_op()
175 bool sp, s; in rfkill_task_handler() local
176 sp = test_and_clear_bit(i, in rfkill_task_handler()
185 __rfkill_handle_normal_op(i, sp, s, c); in rfkill_task_handler()
/net/ipv4/
Dtcp_input.c1202 struct tcp_sack_block_wire *sp, int num_sacks, in tcp_check_dsack() argument
1206 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); in tcp_check_dsack()
1207 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); in tcp_check_dsack()
1215 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); in tcp_check_dsack()
1216 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); in tcp_check_dsack()
1720 struct tcp_sack_block sp[TCP_NUM_SACKS]; in tcp_sacktag_write_queue() local
1759 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); in tcp_sacktag_write_queue()
1760 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); in tcp_sacktag_write_queue()
1763 sp[used_sacks].start_seq, in tcp_sacktag_write_queue()
1764 sp[used_sacks].end_seq)) { in tcp_sacktag_write_queue()
[all …]

12