1 /***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 * SPDX-License-Identifier: curl
22 *
23 ***************************************************************************/
24
25 #include "curl_setup.h"
26 #include "bufq.h"
27
28 /* The last 3 #include files should be in this order */
29 #include "curl_printf.h"
30 #include "curl_memory.h"
31 #include "memdebug.h"
32
chunk_is_empty(const struct buf_chunk * chunk)33 static bool chunk_is_empty(const struct buf_chunk *chunk)
34 {
35 return chunk->r_offset >= chunk->w_offset;
36 }
37
chunk_is_full(const struct buf_chunk * chunk)38 static bool chunk_is_full(const struct buf_chunk *chunk)
39 {
40 return chunk->w_offset >= chunk->dlen;
41 }
42
chunk_len(const struct buf_chunk * chunk)43 static size_t chunk_len(const struct buf_chunk *chunk)
44 {
45 return chunk->w_offset - chunk->r_offset;
46 }
47
chunk_reset(struct buf_chunk * chunk)48 static void chunk_reset(struct buf_chunk *chunk)
49 {
50 chunk->next = NULL;
51 chunk->r_offset = chunk->w_offset = 0;
52 }
53
chunk_append(struct buf_chunk * chunk,const unsigned char * buf,size_t len)54 static size_t chunk_append(struct buf_chunk *chunk,
55 const unsigned char *buf, size_t len)
56 {
57 unsigned char *p = &chunk->x.data[chunk->w_offset];
58 size_t n = chunk->dlen - chunk->w_offset;
59 DEBUGASSERT(chunk->dlen >= chunk->w_offset);
60 if(n) {
61 n = CURLMIN(n, len);
62 memcpy(p, buf, n);
63 chunk->w_offset += n;
64 }
65 return n;
66 }
67
chunk_read(struct buf_chunk * chunk,unsigned char * buf,size_t len)68 static size_t chunk_read(struct buf_chunk *chunk,
69 unsigned char *buf, size_t len)
70 {
71 unsigned char *p = &chunk->x.data[chunk->r_offset];
72 size_t n = chunk->w_offset - chunk->r_offset;
73 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
74 if(!n) {
75 return 0;
76 }
77 else if(n <= len) {
78 memcpy(buf, p, n);
79 chunk->r_offset = chunk->w_offset = 0;
80 return n;
81 }
82 else {
83 memcpy(buf, p, len);
84 chunk->r_offset += len;
85 return len;
86 }
87 }
88
chunk_unwrite(struct buf_chunk * chunk,size_t len)89 static size_t chunk_unwrite(struct buf_chunk *chunk, size_t len)
90 {
91 size_t n = chunk->w_offset - chunk->r_offset;
92 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
93 if(!n) {
94 return 0;
95 }
96 else if(n <= len) {
97 chunk->r_offset = chunk->w_offset = 0;
98 return n;
99 }
100 else {
101 chunk->w_offset -= len;
102 return len;
103 }
104 }
105
chunk_slurpn(struct buf_chunk * chunk,size_t max_len,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)106 static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len,
107 Curl_bufq_reader *reader,
108 void *reader_ctx, CURLcode *err)
109 {
110 unsigned char *p = &chunk->x.data[chunk->w_offset];
111 size_t n = chunk->dlen - chunk->w_offset; /* free amount */
112 ssize_t nread;
113
114 DEBUGASSERT(chunk->dlen >= chunk->w_offset);
115 if(!n) {
116 *err = CURLE_AGAIN;
117 return -1;
118 }
119 if(max_len && n > max_len)
120 n = max_len;
121 nread = reader(reader_ctx, p, n, err);
122 if(nread > 0) {
123 DEBUGASSERT((size_t)nread <= n);
124 chunk->w_offset += nread;
125 }
126 return nread;
127 }
128
chunk_peek(const struct buf_chunk * chunk,const unsigned char ** pbuf,size_t * plen)129 static void chunk_peek(const struct buf_chunk *chunk,
130 const unsigned char **pbuf, size_t *plen)
131 {
132 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
133 *pbuf = &chunk->x.data[chunk->r_offset];
134 *plen = chunk->w_offset - chunk->r_offset;
135 }
136
chunk_peek_at(const struct buf_chunk * chunk,size_t offset,const unsigned char ** pbuf,size_t * plen)137 static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
138 const unsigned char **pbuf, size_t *plen)
139 {
140 offset += chunk->r_offset;
141 DEBUGASSERT(chunk->w_offset >= offset);
142 *pbuf = &chunk->x.data[offset];
143 *plen = chunk->w_offset - offset;
144 }
145
chunk_skip(struct buf_chunk * chunk,size_t amount)146 static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
147 {
148 size_t n = chunk->w_offset - chunk->r_offset;
149 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
150 if(n) {
151 n = CURLMIN(n, amount);
152 chunk->r_offset += n;
153 if(chunk->r_offset == chunk->w_offset)
154 chunk->r_offset = chunk->w_offset = 0;
155 }
156 return n;
157 }
158
chunk_list_free(struct buf_chunk ** anchor)159 static void chunk_list_free(struct buf_chunk **anchor)
160 {
161 struct buf_chunk *chunk;
162 while(*anchor) {
163 chunk = *anchor;
164 *anchor = chunk->next;
165 free(chunk);
166 }
167 }
168
169
170
Curl_bufcp_init(struct bufc_pool * pool,size_t chunk_size,size_t spare_max)171 void Curl_bufcp_init(struct bufc_pool *pool,
172 size_t chunk_size, size_t spare_max)
173 {
174 DEBUGASSERT(chunk_size > 0);
175 DEBUGASSERT(spare_max > 0);
176 memset(pool, 0, sizeof(*pool));
177 pool->chunk_size = chunk_size;
178 pool->spare_max = spare_max;
179 }
180
bufcp_take(struct bufc_pool * pool,struct buf_chunk ** pchunk)181 static CURLcode bufcp_take(struct bufc_pool *pool,
182 struct buf_chunk **pchunk)
183 {
184 struct buf_chunk *chunk = NULL;
185
186 if(pool->spare) {
187 chunk = pool->spare;
188 pool->spare = chunk->next;
189 --pool->spare_count;
190 chunk_reset(chunk);
191 *pchunk = chunk;
192 return CURLE_OK;
193 }
194
195 chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
196 if(!chunk) {
197 *pchunk = NULL;
198 return CURLE_OUT_OF_MEMORY;
199 }
200 chunk->dlen = pool->chunk_size;
201 *pchunk = chunk;
202 return CURLE_OK;
203 }
204
bufcp_put(struct bufc_pool * pool,struct buf_chunk * chunk)205 static void bufcp_put(struct bufc_pool *pool,
206 struct buf_chunk *chunk)
207 {
208 if(pool->spare_count >= pool->spare_max) {
209 free(chunk);
210 }
211 else {
212 chunk_reset(chunk);
213 chunk->next = pool->spare;
214 pool->spare = chunk;
215 ++pool->spare_count;
216 }
217 }
218
Curl_bufcp_free(struct bufc_pool * pool)219 void Curl_bufcp_free(struct bufc_pool *pool)
220 {
221 chunk_list_free(&pool->spare);
222 pool->spare_count = 0;
223 }
224
bufq_init(struct bufq * q,struct bufc_pool * pool,size_t chunk_size,size_t max_chunks,int opts)225 static void bufq_init(struct bufq *q, struct bufc_pool *pool,
226 size_t chunk_size, size_t max_chunks, int opts)
227 {
228 DEBUGASSERT(chunk_size > 0);
229 DEBUGASSERT(max_chunks > 0);
230 memset(q, 0, sizeof(*q));
231 q->chunk_size = chunk_size;
232 q->max_chunks = max_chunks;
233 q->pool = pool;
234 q->opts = opts;
235 }
236
Curl_bufq_init2(struct bufq * q,size_t chunk_size,size_t max_chunks,int opts)237 void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
238 int opts)
239 {
240 bufq_init(q, NULL, chunk_size, max_chunks, opts);
241 }
242
Curl_bufq_init(struct bufq * q,size_t chunk_size,size_t max_chunks)243 void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
244 {
245 bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
246 }
247
Curl_bufq_initp(struct bufq * q,struct bufc_pool * pool,size_t max_chunks,int opts)248 void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
249 size_t max_chunks, int opts)
250 {
251 bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
252 }
253
Curl_bufq_free(struct bufq * q)254 void Curl_bufq_free(struct bufq *q)
255 {
256 chunk_list_free(&q->head);
257 chunk_list_free(&q->spare);
258 q->tail = NULL;
259 q->chunk_count = 0;
260 }
261
Curl_bufq_reset(struct bufq * q)262 void Curl_bufq_reset(struct bufq *q)
263 {
264 struct buf_chunk *chunk;
265 while(q->head) {
266 chunk = q->head;
267 q->head = chunk->next;
268 chunk->next = q->spare;
269 q->spare = chunk;
270 }
271 q->tail = NULL;
272 }
273
Curl_bufq_len(const struct bufq * q)274 size_t Curl_bufq_len(const struct bufq *q)
275 {
276 const struct buf_chunk *chunk = q->head;
277 size_t len = 0;
278 while(chunk) {
279 len += chunk_len(chunk);
280 chunk = chunk->next;
281 }
282 return len;
283 }
284
Curl_bufq_is_empty(const struct bufq * q)285 bool Curl_bufq_is_empty(const struct bufq *q)
286 {
287 return !q->head || chunk_is_empty(q->head);
288 }
289
Curl_bufq_is_full(const struct bufq * q)290 bool Curl_bufq_is_full(const struct bufq *q)
291 {
292 if(!q->tail || q->spare)
293 return FALSE;
294 if(q->chunk_count < q->max_chunks)
295 return FALSE;
296 if(q->chunk_count > q->max_chunks)
297 return TRUE;
298 /* we have no spares and cannot make more, is the tail full? */
299 return chunk_is_full(q->tail);
300 }
301
get_spare(struct bufq * q)302 static struct buf_chunk *get_spare(struct bufq *q)
303 {
304 struct buf_chunk *chunk = NULL;
305
306 if(q->spare) {
307 chunk = q->spare;
308 q->spare = chunk->next;
309 chunk_reset(chunk);
310 return chunk;
311 }
312
313 if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
314 return NULL;
315
316 if(q->pool) {
317 if(bufcp_take(q->pool, &chunk))
318 return NULL;
319 ++q->chunk_count;
320 return chunk;
321 }
322 else {
323 chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
324 if(!chunk)
325 return NULL;
326 chunk->dlen = q->chunk_size;
327 ++q->chunk_count;
328 return chunk;
329 }
330 }
331
prune_head(struct bufq * q)332 static void prune_head(struct bufq *q)
333 {
334 struct buf_chunk *chunk;
335
336 while(q->head && chunk_is_empty(q->head)) {
337 chunk = q->head;
338 q->head = chunk->next;
339 if(q->tail == chunk)
340 q->tail = q->head;
341 if(q->pool) {
342 bufcp_put(q->pool, chunk);
343 --q->chunk_count;
344 }
345 else if((q->chunk_count > q->max_chunks) ||
346 (q->opts & BUFQ_OPT_NO_SPARES)) {
347 /* SOFT_LIMIT allowed us more than max. free spares until
348 * we are at max again. Or free them if we are configured
349 * to not use spares. */
350 free(chunk);
351 --q->chunk_count;
352 }
353 else {
354 chunk->next = q->spare;
355 q->spare = chunk;
356 }
357 }
358 }
359
chunk_prev(struct buf_chunk * head,struct buf_chunk * chunk)360 static struct buf_chunk *chunk_prev(struct buf_chunk *head,
361 struct buf_chunk *chunk)
362 {
363 while(head) {
364 if(head == chunk)
365 return NULL;
366 if(head->next == chunk)
367 return head;
368 head = head->next;
369 }
370 return NULL;
371 }
372
prune_tail(struct bufq * q)373 static void prune_tail(struct bufq *q)
374 {
375 struct buf_chunk *chunk;
376
377 while(q->tail && chunk_is_empty(q->tail)) {
378 chunk = q->tail;
379 q->tail = chunk_prev(q->head, chunk);
380 if(q->tail)
381 q->tail->next = NULL;
382 if(q->head == chunk)
383 q->head = q->tail;
384 if(q->pool) {
385 bufcp_put(q->pool, chunk);
386 --q->chunk_count;
387 }
388 else if((q->chunk_count > q->max_chunks) ||
389 (q->opts & BUFQ_OPT_NO_SPARES)) {
390 /* SOFT_LIMIT allowed us more than max. free spares until
391 * we are at max again. Or free them if we are configured
392 * to not use spares. */
393 free(chunk);
394 --q->chunk_count;
395 }
396 else {
397 chunk->next = q->spare;
398 q->spare = chunk;
399 }
400 }
401 }
402
get_non_full_tail(struct bufq * q)403 static struct buf_chunk *get_non_full_tail(struct bufq *q)
404 {
405 struct buf_chunk *chunk;
406
407 if(q->tail && !chunk_is_full(q->tail))
408 return q->tail;
409 chunk = get_spare(q);
410 if(chunk) {
411 /* new tail, and possibly new head */
412 if(q->tail) {
413 q->tail->next = chunk;
414 q->tail = chunk;
415 }
416 else {
417 DEBUGASSERT(!q->head);
418 q->head = q->tail = chunk;
419 }
420 }
421 return chunk;
422 }
423
Curl_bufq_write(struct bufq * q,const unsigned char * buf,size_t len,CURLcode * err)424 ssize_t Curl_bufq_write(struct bufq *q,
425 const unsigned char *buf, size_t len,
426 CURLcode *err)
427 {
428 struct buf_chunk *tail;
429 ssize_t nwritten = 0;
430 size_t n;
431
432 DEBUGASSERT(q->max_chunks > 0);
433 while(len) {
434 tail = get_non_full_tail(q);
435 if(!tail) {
436 if((q->chunk_count < q->max_chunks) || (q->opts & BUFQ_OPT_SOFT_LIMIT)) {
437 *err = CURLE_OUT_OF_MEMORY;
438 return -1;
439 }
440 break;
441 }
442 n = chunk_append(tail, buf, len);
443 if(!n)
444 break;
445 nwritten += n;
446 buf += n;
447 len -= n;
448 }
449 if(nwritten == 0 && len) {
450 *err = CURLE_AGAIN;
451 return -1;
452 }
453 *err = CURLE_OK;
454 return nwritten;
455 }
456
Curl_bufq_cwrite(struct bufq * q,const char * buf,size_t len,size_t * pnwritten)457 CURLcode Curl_bufq_cwrite(struct bufq *q,
458 const char *buf, size_t len,
459 size_t *pnwritten)
460 {
461 ssize_t n;
462 CURLcode result;
463 n = Curl_bufq_write(q, (const unsigned char *)buf, len, &result);
464 *pnwritten = (n < 0) ? 0 : (size_t)n;
465 return result;
466 }
467
Curl_bufq_unwrite(struct bufq * q,size_t len)468 CURLcode Curl_bufq_unwrite(struct bufq *q, size_t len)
469 {
470 while(len && q->tail) {
471 len -= chunk_unwrite(q->tail, len);
472 prune_tail(q);
473 }
474 return len ? CURLE_AGAIN : CURLE_OK;
475 }
476
Curl_bufq_read(struct bufq * q,unsigned char * buf,size_t len,CURLcode * err)477 ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
478 CURLcode *err)
479 {
480 ssize_t nread = 0;
481 size_t n;
482
483 *err = CURLE_OK;
484 while(len && q->head) {
485 n = chunk_read(q->head, buf, len);
486 if(n) {
487 nread += n;
488 buf += n;
489 len -= n;
490 }
491 prune_head(q);
492 }
493 if(nread == 0) {
494 *err = CURLE_AGAIN;
495 return -1;
496 }
497 return nread;
498 }
499
Curl_bufq_cread(struct bufq * q,char * buf,size_t len,size_t * pnread)500 CURLcode Curl_bufq_cread(struct bufq *q, char *buf, size_t len,
501 size_t *pnread)
502 {
503 ssize_t n;
504 CURLcode result;
505 n = Curl_bufq_read(q, (unsigned char *)buf, len, &result);
506 *pnread = (n < 0) ? 0 : (size_t)n;
507 return result;
508 }
509
Curl_bufq_peek(struct bufq * q,const unsigned char ** pbuf,size_t * plen)510 bool Curl_bufq_peek(struct bufq *q,
511 const unsigned char **pbuf, size_t *plen)
512 {
513 if(q->head && chunk_is_empty(q->head)) {
514 prune_head(q);
515 }
516 if(q->head && !chunk_is_empty(q->head)) {
517 chunk_peek(q->head, pbuf, plen);
518 return TRUE;
519 }
520 *pbuf = NULL;
521 *plen = 0;
522 return FALSE;
523 }
524
Curl_bufq_peek_at(struct bufq * q,size_t offset,const unsigned char ** pbuf,size_t * plen)525 bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
526 const unsigned char **pbuf, size_t *plen)
527 {
528 struct buf_chunk *c = q->head;
529 size_t clen;
530
531 while(c) {
532 clen = chunk_len(c);
533 if(!clen)
534 break;
535 if(offset >= clen) {
536 offset -= clen;
537 c = c->next;
538 continue;
539 }
540 chunk_peek_at(c, offset, pbuf, plen);
541 return TRUE;
542 }
543 *pbuf = NULL;
544 *plen = 0;
545 return FALSE;
546 }
547
Curl_bufq_skip(struct bufq * q,size_t amount)548 void Curl_bufq_skip(struct bufq *q, size_t amount)
549 {
550 size_t n;
551
552 while(amount && q->head) {
553 n = chunk_skip(q->head, amount);
554 amount -= n;
555 prune_head(q);
556 }
557 }
558
Curl_bufq_pass(struct bufq * q,Curl_bufq_writer * writer,void * writer_ctx,CURLcode * err)559 ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
560 void *writer_ctx, CURLcode *err)
561 {
562 const unsigned char *buf;
563 size_t blen;
564 ssize_t nwritten = 0;
565
566 while(Curl_bufq_peek(q, &buf, &blen)) {
567 ssize_t chunk_written;
568
569 chunk_written = writer(writer_ctx, buf, blen, err);
570 if(chunk_written < 0) {
571 if(!nwritten || *err != CURLE_AGAIN) {
572 /* blocked on first write or real error, fail */
573 nwritten = -1;
574 }
575 break;
576 }
577 if(!chunk_written) {
578 if(!nwritten) {
579 /* treat as blocked */
580 *err = CURLE_AGAIN;
581 nwritten = -1;
582 }
583 break;
584 }
585 Curl_bufq_skip(q, (size_t)chunk_written);
586 nwritten += chunk_written;
587 }
588 return nwritten;
589 }
590
Curl_bufq_write_pass(struct bufq * q,const unsigned char * buf,size_t len,Curl_bufq_writer * writer,void * writer_ctx,CURLcode * err)591 ssize_t Curl_bufq_write_pass(struct bufq *q,
592 const unsigned char *buf, size_t len,
593 Curl_bufq_writer *writer, void *writer_ctx,
594 CURLcode *err)
595 {
596 ssize_t nwritten = 0, n;
597
598 *err = CURLE_OK;
599 while(len) {
600 if(Curl_bufq_is_full(q)) {
601 /* try to make room in case we are full */
602 n = Curl_bufq_pass(q, writer, writer_ctx, err);
603 if(n < 0) {
604 if(*err != CURLE_AGAIN) {
605 /* real error, fail */
606 return -1;
607 }
608 /* would block, bufq is full, give up */
609 break;
610 }
611 }
612
613 /* Add whatever is remaining now to bufq */
614 n = Curl_bufq_write(q, buf, len, err);
615 if(n < 0) {
616 if(*err != CURLE_AGAIN) {
617 /* real error, fail */
618 return -1;
619 }
620 /* no room in bufq */
621 break;
622 }
623 /* edge case of writer returning 0 (and len is >0)
624 * break or we might enter an infinite loop here */
625 if(n == 0)
626 break;
627
628 /* Maybe only part of `data` has been added, continue to loop */
629 buf += (size_t)n;
630 len -= (size_t)n;
631 nwritten += (size_t)n;
632 }
633
634 if(!nwritten && len) {
635 *err = CURLE_AGAIN;
636 return -1;
637 }
638 *err = CURLE_OK;
639 return nwritten;
640 }
641
Curl_bufq_sipn(struct bufq * q,size_t max_len,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)642 ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
643 Curl_bufq_reader *reader, void *reader_ctx,
644 CURLcode *err)
645 {
646 struct buf_chunk *tail = NULL;
647 ssize_t nread;
648
649 *err = CURLE_AGAIN;
650 tail = get_non_full_tail(q);
651 if(!tail) {
652 if(q->chunk_count < q->max_chunks) {
653 *err = CURLE_OUT_OF_MEMORY;
654 return -1;
655 }
656 /* full, blocked */
657 *err = CURLE_AGAIN;
658 return -1;
659 }
660
661 nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err);
662 if(nread < 0) {
663 return -1;
664 }
665 else if(nread == 0) {
666 /* eof */
667 *err = CURLE_OK;
668 }
669 return nread;
670 }
671
672 /**
673 * Read up to `max_len` bytes and append it to the end of the buffer queue.
674 * if `max_len` is 0, no limit is imposed and the call behaves exactly
675 * the same as `Curl_bufq_slurp()`.
676 * Returns the total amount of buf read (may be 0) or -1 on other
677 * reader errors.
678 * Note that even in case of a -1 chunks may have been read and
679 * the buffer queue will have different length than before.
680 */
bufq_slurpn(struct bufq * q,size_t max_len,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)681 static ssize_t bufq_slurpn(struct bufq *q, size_t max_len,
682 Curl_bufq_reader *reader, void *reader_ctx,
683 CURLcode *err)
684 {
685 ssize_t nread = 0, n;
686
687 *err = CURLE_AGAIN;
688 while(1) {
689
690 n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err);
691 if(n < 0) {
692 if(!nread || *err != CURLE_AGAIN) {
693 /* blocked on first read or real error, fail */
694 nread = -1;
695 }
696 else
697 *err = CURLE_OK;
698 break;
699 }
700 else if(n == 0) {
701 /* eof */
702 *err = CURLE_OK;
703 break;
704 }
705 nread += (size_t)n;
706 if(max_len) {
707 DEBUGASSERT((size_t)n <= max_len);
708 max_len -= (size_t)n;
709 if(!max_len)
710 break;
711 }
712 /* give up slurping when we get less bytes than we asked for */
713 if(q->tail && !chunk_is_full(q->tail))
714 break;
715 }
716 return nread;
717 }
718
Curl_bufq_slurp(struct bufq * q,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)719 ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
720 void *reader_ctx, CURLcode *err)
721 {
722 return bufq_slurpn(q, 0, reader, reader_ctx, err);
723 }
724