Home
last modified time | relevance | path

Searched refs:chunk (Results 1 – 25 of 39) sorted by relevance

12

/net/sctp/
Dinqueue.c42 static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk) in sctp_inq_chunk_free() argument
44 if (chunk->head_skb) in sctp_inq_chunk_free()
45 chunk->skb = chunk->head_skb; in sctp_inq_chunk_free()
46 sctp_chunk_free(chunk); in sctp_inq_chunk_free()
52 struct sctp_chunk *chunk, *tmp; in sctp_inq_free() local
55 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free()
56 list_del_init(&chunk->list); in sctp_inq_free()
57 sctp_chunk_free(chunk); in sctp_inq_free()
72 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) in sctp_inq_push() argument
75 if (chunk->rcvr->dead) { in sctp_inq_push()
[all …]
Doutput.c46 struct sctp_chunk *chunk);
48 struct sctp_chunk *chunk);
50 struct sctp_chunk *chunk);
52 struct sctp_chunk *chunk,
122 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); in sctp_packet_config() local
124 if (chunk) in sctp_packet_config()
125 sctp_packet_append_chunk(packet, chunk); in sctp_packet_config()
163 struct sctp_chunk *chunk, *tmp; in sctp_packet_free() local
167 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_free()
168 list_del_init(&chunk->list); in sctp_packet_free()
[all …]
Dchunk.c60 struct sctp_chunk *chunk; in sctp_datamsg_free() local
65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free()
66 sctp_chunk_free(chunk); in sctp_datamsg_free()
76 struct sctp_chunk *chunk; in sctp_datamsg_destroy() local
83 chunk = list_entry(pos, struct sctp_chunk, frag_list); in sctp_datamsg_destroy()
86 sctp_chunk_put(chunk); in sctp_datamsg_destroy()
90 asoc = chunk->asoc; in sctp_datamsg_destroy()
92 sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT; in sctp_datamsg_destroy()
96 ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent, in sctp_datamsg_destroy()
104 ev = sctp_ulpevent_make_send_failed_event(asoc, chunk, in sctp_datamsg_destroy()
[all …]
Doutqueue.c210 struct sctp_chunk *chunk, *tmp; in __sctp_outq_teardown() local
216 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
219 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
220 sctp_chunk_free(chunk); in __sctp_outq_teardown()
227 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
229 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
230 sctp_chunk_free(chunk); in __sctp_outq_teardown()
236 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
238 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
239 sctp_chunk_free(chunk); in __sctp_outq_teardown()
[all …]
Dsm_statefuns.c54 struct sctp_chunk *chunk,
57 struct sctp_chunk *chunk,
62 const struct sctp_chunk *chunk);
66 const struct sctp_chunk *chunk,
97 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
149 struct sctp_chunk *chunk);
172 static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, in sctp_chunk_length_valid() argument
175 __u16 chunk_length = ntohs(chunk->chunk_hdr->length); in sctp_chunk_length_valid()
178 if (unlikely(chunk->pdiscard)) in sctp_chunk_length_valid()
187 static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk) in sctp_err_chunk_valid() argument
[all …]
Dsm_make_chunk.c67 static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
73 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; in sctp_control_release_owner() local
75 if (chunk->shkey) { in sctp_control_release_owner()
76 struct sctp_shared_key *shkey = chunk->shkey; in sctp_control_release_owner()
77 struct sctp_association *asoc = chunk->asoc; in sctp_control_release_owner()
93 sctp_auth_shkey_release(chunk->shkey); in sctp_control_release_owner()
97 static void sctp_control_set_owner_w(struct sctp_chunk *chunk) in sctp_control_set_owner_w() argument
99 struct sctp_association *asoc = chunk->asoc; in sctp_control_set_owner_w()
100 struct sk_buff *skb = chunk->skb; in sctp_control_set_owner_w()
109 if (chunk->auth) { in sctp_control_set_owner_w()
[all …]
Dulpevent.c79 struct sctp_chunk *chunk = event->chunk; in sctp_ulpevent_set_owner() local
90 if (chunk && chunk->head_skb && !chunk->head_skb->sk) in sctp_ulpevent_set_owner()
91 chunk->head_skb->sk = asoc->base.sk; in sctp_ulpevent_set_owner()
117 __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) in sctp_ulpevent_make_assoc_change() argument
126 if (chunk) { in sctp_ulpevent_make_assoc_change()
130 skb = skb_copy_expand(chunk->skb, in sctp_ulpevent_make_assoc_change()
145 ntohs(chunk->chunk_hdr->length) - in sctp_ulpevent_make_assoc_change()
375 struct sctp_chunk *chunk, __u16 flags, in sctp_ulpevent_make_remote_error() argument
385 ch = (struct sctp_errhdr *)(chunk->skb->data); in sctp_ulpevent_make_remote_error()
390 skb_pull(chunk->skb, sizeof(*ch)); in sctp_ulpevent_make_remote_error()
[all …]
Dsm_sideeffect.c87 struct sctp_chunk *chunk) in sctp_do_ecn_ecne_work() argument
119 repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); in sctp_do_ecn_ecne_work()
652 struct sctp_chunk *chunk, in sctp_cmd_assoc_failed() argument
661 if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) in sctp_cmd_assoc_failed()
663 (__u16)error, 0, 0, chunk, in sctp_cmd_assoc_failed()
674 abort = sctp_make_violation_max_retrans(asoc, chunk); in sctp_cmd_assoc_failed()
695 struct sctp_chunk *chunk, in sctp_cmd_process_init() argument
706 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) in sctp_cmd_process_init()
760 struct sctp_chunk *chunk) in sctp_cmd_transport_on() argument
816 hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data; in sctp_cmd_transport_on()
[all …]
Dendpointola.c334 struct sctp_chunk *chunk; in sctp_endpoint_bh_rcv() local
349 while (NULL != (chunk = sctp_inq_pop(inqueue))) { in sctp_endpoint_bh_rcv()
350 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); in sctp_endpoint_bh_rcv()
355 if (first_time && (subtype.chunk == SCTP_CID_AUTH)) { in sctp_endpoint_bh_rcv()
368 chunk->auth_chunk = skb_clone(chunk->skb, in sctp_endpoint_bh_rcv()
370 chunk->auth = 1; in sctp_endpoint_bh_rcv()
381 if (NULL == chunk->asoc) { in sctp_endpoint_bh_rcv()
383 sctp_source(chunk), in sctp_endpoint_bh_rcv()
385 chunk->asoc = asoc; in sctp_endpoint_bh_rcv()
386 chunk->transport = transport; in sctp_endpoint_bh_rcv()
[all …]
Dstream_interleave.c48 static void sctp_chunk_assign_mid(struct sctp_chunk *chunk) in sctp_chunk_assign_mid() argument
55 if (chunk->has_mid) in sctp_chunk_assign_mid()
58 sid = sctp_chunk_stream_no(chunk); in sctp_chunk_assign_mid()
59 stream = &chunk->asoc->stream; in sctp_chunk_assign_mid()
61 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) { in sctp_chunk_assign_mid()
87 static bool sctp_validate_data(struct sctp_chunk *chunk) in sctp_validate_data() argument
92 if (chunk->chunk_hdr->type != SCTP_CID_DATA) in sctp_validate_data()
95 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) in sctp_validate_data()
98 stream = &chunk->asoc->stream; in sctp_validate_data()
99 sid = sctp_chunk_stream_no(chunk); in sctp_validate_data()
[all …]
Dstream.c230 struct sctp_chunk *chunk) in sctp_send_reconf() argument
234 retval = sctp_primitive_RECONF(asoc->base.net, asoc, chunk); in sctp_send_reconf()
236 sctp_chunk_free(chunk); in sctp_send_reconf()
270 struct sctp_chunk *chunk; in sctp_send_reset_streams() local
334 chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in); in sctp_send_reset_streams()
338 if (!chunk) { in sctp_send_reset_streams()
353 asoc->strreset_chunk = chunk; in sctp_send_reset_streams()
356 retval = sctp_send_reconf(asoc, chunk); in sctp_send_reset_streams()
383 struct sctp_chunk *chunk = NULL; in sctp_send_reset_assoc() local
397 chunk = sctp_make_strreset_tsnreq(asoc); in sctp_send_reset_assoc()
[all …]
Dinput.c95 struct sctp_chunk *chunk; in sctp_rcv() local
197 chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC); in sctp_rcv()
198 if (!chunk) in sctp_rcv()
200 SCTP_INPUT_CB(skb)->chunk = chunk; in sctp_rcv()
203 chunk->rcvr = rcvr; in sctp_rcv()
206 chunk->sctp_hdr = sctp_hdr(skb); in sctp_rcv()
209 sctp_init_addrs(chunk, &src, &dest); in sctp_rcv()
212 chunk->transport = transport; in sctp_rcv()
236 sctp_chunk_free(chunk); in sctp_rcv()
243 sctp_inq_push(&chunk->rcvr->inqueue, chunk); in sctp_rcv()
[all …]
Dassociola.c928 struct sctp_chunk *chunk; in sctp_assoc_lookup_tsn() local
950 list_for_each_entry(chunk, &active->transmitted, in sctp_assoc_lookup_tsn()
953 if (key == chunk->subh.data_hdr->tsn) { in sctp_assoc_lookup_tsn()
965 list_for_each_entry(chunk, &transport->transmitted, in sctp_assoc_lookup_tsn()
967 if (key == chunk->subh.data_hdr->tsn) { in sctp_assoc_lookup_tsn()
986 struct sctp_chunk *chunk; in sctp_assoc_bh_rcv() local
997 while (NULL != (chunk = sctp_inq_pop(inqueue))) { in sctp_assoc_bh_rcv()
999 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); in sctp_assoc_bh_rcv()
1004 if (first_time && subtype.chunk == SCTP_CID_AUTH) { in sctp_assoc_bh_rcv()
1017 chunk->auth_chunk = skb_clone(chunk->skb, in sctp_assoc_bh_rcv()
[all …]
Dauth.c387 struct sctp_chunk *chunk; in sctp_auth_asoc_init_active_key() local
414 list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) { in sctp_auth_asoc_init_active_key()
415 if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc)) { in sctp_auth_asoc_init_active_key()
416 chunk->auth = 1; in sctp_auth_asoc_init_active_key()
417 if (!chunk->shkey) { in sctp_auth_asoc_init_active_key()
418 chunk->shkey = asoc->shkey; in sctp_auth_asoc_init_active_key()
419 sctp_auth_shkey_hold(chunk->shkey); in sctp_auth_asoc_init_active_key()
638 static int __sctp_auth_cid(enum sctp_cid chunk, struct sctp_chunks_param *param) in __sctp_auth_cid() argument
664 if (param->chunks[i] == chunk) in __sctp_auth_cid()
674 int sctp_auth_send_cid(enum sctp_cid chunk, const struct sctp_association *asoc) in sctp_auth_send_cid() argument
[all …]
Ddebug.c50 if (cid.chunk <= SCTP_CID_BASE_MAX) in sctp_cname()
51 return sctp_cid_tbl[cid.chunk]; in sctp_cname()
53 switch (cid.chunk) { in sctp_cname()
Dulpqueue.c82 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, in sctp_ulpq_tail_data() argument
90 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); in sctp_ulpq_tail_data()
94 event->ssn = ntohs(chunk->subh.data_hdr->ssn); in sctp_ulpq_tail_data()
95 event->ppid = chunk->subh.data_hdr->ppid; in sctp_ulpq_tail_data()
1069 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, in sctp_ulpq_renege() argument
1076 needed = ntohs(chunk->chunk_hdr->length) - in sctp_ulpq_renege()
1085 if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) && in sctp_ulpq_renege()
1087 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); in sctp_ulpq_renege()
Dobjcnt.c37 SCTP_DBG_OBJCNT(chunk);
50 SCTP_DBG_OBJCNT_ENTRY(chunk),
Dsocket.c88 struct sctp_chunk *chunk);
124 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) in sctp_set_owner_w() argument
126 struct sctp_association *asoc = chunk->asoc; in sctp_set_owner_w()
132 if (chunk->shkey) in sctp_set_owner_w()
133 sctp_auth_shkey_hold(chunk->shkey); in sctp_set_owner_w()
135 skb_set_owner_w(chunk->skb, sk); in sctp_set_owner_w()
137 chunk->skb->destructor = sctp_wfree; in sctp_set_owner_w()
139 skb_shinfo(chunk->skb)->destructor_arg = chunk; in sctp_set_owner_w()
142 asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk); in sctp_set_owner_w()
143 sk_wmem_queued_add(sk, chunk->skb->truesize + sizeof(struct sctp_chunk)); in sctp_set_owner_w()
[all …]
/net/sunrpc/xprtrdma/
Dsvc_rdma_pcl.c20 struct svc_rdma_chunk *chunk; in pcl_free() local
22 chunk = pcl_first_chunk(pcl); in pcl_free()
23 list_del(&chunk->ch_list); in pcl_free()
24 kfree(chunk); in pcl_free()
30 struct svc_rdma_chunk *chunk; in pcl_alloc_chunk() local
32 chunk = kmalloc(struct_size(chunk, ch_segments, segcount), GFP_KERNEL); in pcl_alloc_chunk()
33 if (!chunk) in pcl_alloc_chunk()
36 chunk->ch_position = position; in pcl_alloc_chunk()
37 chunk->ch_length = 0; in pcl_alloc_chunk()
38 chunk->ch_payload_length = 0; in pcl_alloc_chunk()
[all …]
Dsvc_rdma_rw.c234 const struct svc_rdma_chunk *chunk) in svc_rdma_write_info_alloc() argument
243 info->wi_chunk = chunk; in svc_rdma_write_info_alloc()
619 const struct svc_rdma_chunk *chunk, in svc_rdma_send_write_chunk() argument
626 info = svc_rdma_write_info_alloc(rdma, chunk); in svc_rdma_send_write_chunk()
665 struct svc_rdma_chunk *chunk; in svc_rdma_send_reply_chunk() local
671 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl); in svc_rdma_send_reply_chunk()
672 info = svc_rdma_write_info_alloc(rdma, chunk); in svc_rdma_send_reply_chunk()
775 const struct svc_rdma_chunk *chunk) in svc_rdma_build_read_chunk() argument
781 pcl_for_each_segment(segment, chunk) { in svc_rdma_build_read_chunk()
859 struct svc_rdma_chunk *chunk, *next; in svc_rdma_read_multiple_chunks() local
[all …]
Dsvc_rdma_sendto.c381 const struct svc_rdma_chunk *chunk, in svc_rdma_encode_write_segment() argument
384 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno]; in svc_rdma_encode_write_segment()
417 const struct svc_rdma_chunk *chunk) in svc_rdma_encode_write_chunk() argument
419 u32 remaining = chunk->ch_payload_length; in svc_rdma_encode_write_chunk()
429 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount); in svc_rdma_encode_write_chunk()
434 for (segno = 0; segno < chunk->ch_segcount; segno++) { in svc_rdma_encode_write_chunk()
435 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno); in svc_rdma_encode_write_chunk()
457 struct svc_rdma_chunk *chunk; in svc_rdma_encode_write_list() local
461 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) { in svc_rdma_encode_write_list()
462 ret = svc_rdma_encode_write_chunk(sctxt, chunk); in svc_rdma_encode_write_list()
[all …]
Dsvc_rdma_recvfrom.c587 struct svc_rdma_chunk *chunk; in svc_rdma_get_inv_rkey() local
596 pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) { in svc_rdma_get_inv_rkey()
597 pcl_for_each_segment(segment, chunk) { in svc_rdma_get_inv_rkey()
604 pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) { in svc_rdma_get_inv_rkey()
605 pcl_for_each_segment(segment, chunk) { in svc_rdma_get_inv_rkey()
612 pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) { in svc_rdma_get_inv_rkey()
613 pcl_for_each_segment(segment, chunk) { in svc_rdma_get_inv_rkey()
620 pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) { in svc_rdma_get_inv_rkey()
621 pcl_for_each_segment(segment, chunk) { in svc_rdma_get_inv_rkey()
/net/tls/
Dtls_strp.c162 int chunk, len, offset; in tls_strp_msg_hold() local
174 chunk = iter->len - offset; in tls_strp_msg_hold()
182 len -= chunk; in tls_strp_msg_hold()
213 size_t len, chunk; in tls_strp_copyin_frag() local
223 chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag)); in tls_strp_copyin_frag()
227 chunk)); in tls_strp_copyin_frag()
229 skb->len += chunk; in tls_strp_copyin_frag()
230 skb->data_len += chunk; in tls_strp_copyin_frag()
231 skb_frag_size_add(frag, chunk); in tls_strp_copyin_frag()
241 WARN_ON_ONCE(over > chunk); in tls_strp_copyin_frag()
[all …]
Dtls_sw.c85 int i, chunk = start - offset; in __skb_nsg() local
92 if (chunk > 0) { in __skb_nsg()
93 if (chunk > len) in __skb_nsg()
94 chunk = len; in __skb_nsg()
96 len -= chunk; in __skb_nsg()
99 offset += chunk; in __skb_nsg()
108 chunk = end - offset; in __skb_nsg()
109 if (chunk > 0) { in __skb_nsg()
110 if (chunk > len) in __skb_nsg()
111 chunk = len; in __skb_nsg()
[all …]
/net/bluetooth/
Daf_bluetooth.c402 int chunk; in bt_sock_stream_recvmsg() local
428 chunk = min_t(unsigned int, skb->len, size); in bt_sock_stream_recvmsg()
429 if (skb_copy_datagram_msg(skb, 0, msg, chunk)) { in bt_sock_stream_recvmsg()
435 copied += chunk; in bt_sock_stream_recvmsg()
436 size -= chunk; in bt_sock_stream_recvmsg()
443 if (chunk <= skb_len) { in bt_sock_stream_recvmsg()
444 __skb_pull(skb, chunk); in bt_sock_stream_recvmsg()
449 chunk -= skb_len; in bt_sock_stream_recvmsg()
452 if (chunk <= frag->len) { in bt_sock_stream_recvmsg()
454 skb->len -= chunk; in bt_sock_stream_recvmsg()
[all …]

12