1 /***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 * SPDX-License-Identifier: curl
22 *
23 ***************************************************************************/
24
25 #include "curl_setup.h"
26 #include "bufq.h"
27
28 /* The last 3 #include files should be in this order */
29 #include "curl_printf.h"
30 #include "curl_memory.h"
31 #include "memdebug.h"
32
chunk_is_empty(const struct buf_chunk * chunk)33 static bool chunk_is_empty(const struct buf_chunk *chunk)
34 {
35 return chunk->r_offset >= chunk->w_offset;
36 }
37
chunk_is_full(const struct buf_chunk * chunk)38 static bool chunk_is_full(const struct buf_chunk *chunk)
39 {
40 return chunk->w_offset >= chunk->dlen;
41 }
42
chunk_len(const struct buf_chunk * chunk)43 static size_t chunk_len(const struct buf_chunk *chunk)
44 {
45 return chunk->w_offset - chunk->r_offset;
46 }
47
chunk_space(const struct buf_chunk * chunk)48 static size_t chunk_space(const struct buf_chunk *chunk)
49 {
50 return chunk->dlen - chunk->w_offset;
51 }
52
chunk_reset(struct buf_chunk * chunk)53 static void chunk_reset(struct buf_chunk *chunk)
54 {
55 chunk->next = NULL;
56 chunk->r_offset = chunk->w_offset = 0;
57 }
58
chunk_append(struct buf_chunk * chunk,const unsigned char * buf,size_t len)59 static size_t chunk_append(struct buf_chunk *chunk,
60 const unsigned char *buf, size_t len)
61 {
62 unsigned char *p = &chunk->x.data[chunk->w_offset];
63 size_t n = chunk->dlen - chunk->w_offset;
64 DEBUGASSERT(chunk->dlen >= chunk->w_offset);
65 if(n) {
66 n = CURLMIN(n, len);
67 memcpy(p, buf, n);
68 chunk->w_offset += n;
69 }
70 return n;
71 }
72
chunk_read(struct buf_chunk * chunk,unsigned char * buf,size_t len)73 static size_t chunk_read(struct buf_chunk *chunk,
74 unsigned char *buf, size_t len)
75 {
76 unsigned char *p = &chunk->x.data[chunk->r_offset];
77 size_t n = chunk->w_offset - chunk->r_offset;
78 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
79 if(!n) {
80 return 0;
81 }
82 else if(n <= len) {
83 memcpy(buf, p, n);
84 chunk->r_offset = chunk->w_offset = 0;
85 return n;
86 }
87 else {
88 memcpy(buf, p, len);
89 chunk->r_offset += len;
90 return len;
91 }
92 }
93
chunk_slurpn(struct buf_chunk * chunk,size_t max_len,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)94 static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len,
95 Curl_bufq_reader *reader,
96 void *reader_ctx, CURLcode *err)
97 {
98 unsigned char *p = &chunk->x.data[chunk->w_offset];
99 size_t n = chunk->dlen - chunk->w_offset; /* free amount */
100 ssize_t nread;
101
102 DEBUGASSERT(chunk->dlen >= chunk->w_offset);
103 if(!n) {
104 *err = CURLE_AGAIN;
105 return -1;
106 }
107 if(max_len && n > max_len)
108 n = max_len;
109 nread = reader(reader_ctx, p, n, err);
110 if(nread > 0) {
111 DEBUGASSERT((size_t)nread <= n);
112 chunk->w_offset += nread;
113 }
114 return nread;
115 }
116
chunk_peek(const struct buf_chunk * chunk,const unsigned char ** pbuf,size_t * plen)117 static void chunk_peek(const struct buf_chunk *chunk,
118 const unsigned char **pbuf, size_t *plen)
119 {
120 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
121 *pbuf = &chunk->x.data[chunk->r_offset];
122 *plen = chunk->w_offset - chunk->r_offset;
123 }
124
chunk_peek_at(const struct buf_chunk * chunk,size_t offset,const unsigned char ** pbuf,size_t * plen)125 static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
126 const unsigned char **pbuf, size_t *plen)
127 {
128 offset += chunk->r_offset;
129 DEBUGASSERT(chunk->w_offset >= offset);
130 *pbuf = &chunk->x.data[offset];
131 *plen = chunk->w_offset - offset;
132 }
133
chunk_skip(struct buf_chunk * chunk,size_t amount)134 static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
135 {
136 size_t n = chunk->w_offset - chunk->r_offset;
137 DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
138 if(n) {
139 n = CURLMIN(n, amount);
140 chunk->r_offset += n;
141 if(chunk->r_offset == chunk->w_offset)
142 chunk->r_offset = chunk->w_offset = 0;
143 }
144 return n;
145 }
146
chunk_list_free(struct buf_chunk ** anchor)147 static void chunk_list_free(struct buf_chunk **anchor)
148 {
149 struct buf_chunk *chunk;
150 while(*anchor) {
151 chunk = *anchor;
152 *anchor = chunk->next;
153 free(chunk);
154 }
155 }
156
157
158
Curl_bufcp_init(struct bufc_pool * pool,size_t chunk_size,size_t spare_max)159 void Curl_bufcp_init(struct bufc_pool *pool,
160 size_t chunk_size, size_t spare_max)
161 {
162 DEBUGASSERT(chunk_size > 0);
163 DEBUGASSERT(spare_max > 0);
164 memset(pool, 0, sizeof(*pool));
165 pool->chunk_size = chunk_size;
166 pool->spare_max = spare_max;
167 }
168
bufcp_take(struct bufc_pool * pool,struct buf_chunk ** pchunk)169 static CURLcode bufcp_take(struct bufc_pool *pool,
170 struct buf_chunk **pchunk)
171 {
172 struct buf_chunk *chunk = NULL;
173
174 if(pool->spare) {
175 chunk = pool->spare;
176 pool->spare = chunk->next;
177 --pool->spare_count;
178 chunk_reset(chunk);
179 *pchunk = chunk;
180 return CURLE_OK;
181 }
182
183 chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
184 if(!chunk) {
185 *pchunk = NULL;
186 return CURLE_OUT_OF_MEMORY;
187 }
188 chunk->dlen = pool->chunk_size;
189 *pchunk = chunk;
190 return CURLE_OK;
191 }
192
bufcp_put(struct bufc_pool * pool,struct buf_chunk * chunk)193 static void bufcp_put(struct bufc_pool *pool,
194 struct buf_chunk *chunk)
195 {
196 if(pool->spare_count >= pool->spare_max) {
197 free(chunk);
198 }
199 else {
200 chunk_reset(chunk);
201 chunk->next = pool->spare;
202 pool->spare = chunk;
203 ++pool->spare_count;
204 }
205 }
206
Curl_bufcp_free(struct bufc_pool * pool)207 void Curl_bufcp_free(struct bufc_pool *pool)
208 {
209 chunk_list_free(&pool->spare);
210 pool->spare_count = 0;
211 }
212
bufq_init(struct bufq * q,struct bufc_pool * pool,size_t chunk_size,size_t max_chunks,int opts)213 static void bufq_init(struct bufq *q, struct bufc_pool *pool,
214 size_t chunk_size, size_t max_chunks, int opts)
215 {
216 DEBUGASSERT(chunk_size > 0);
217 DEBUGASSERT(max_chunks > 0);
218 memset(q, 0, sizeof(*q));
219 q->chunk_size = chunk_size;
220 q->max_chunks = max_chunks;
221 q->pool = pool;
222 q->opts = opts;
223 }
224
Curl_bufq_init2(struct bufq * q,size_t chunk_size,size_t max_chunks,int opts)225 void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
226 int opts)
227 {
228 bufq_init(q, NULL, chunk_size, max_chunks, opts);
229 }
230
Curl_bufq_init(struct bufq * q,size_t chunk_size,size_t max_chunks)231 void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
232 {
233 bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
234 }
235
Curl_bufq_initp(struct bufq * q,struct bufc_pool * pool,size_t max_chunks,int opts)236 void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
237 size_t max_chunks, int opts)
238 {
239 bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
240 }
241
Curl_bufq_free(struct bufq * q)242 void Curl_bufq_free(struct bufq *q)
243 {
244 chunk_list_free(&q->head);
245 chunk_list_free(&q->spare);
246 q->tail = NULL;
247 q->chunk_count = 0;
248 }
249
Curl_bufq_reset(struct bufq * q)250 void Curl_bufq_reset(struct bufq *q)
251 {
252 struct buf_chunk *chunk;
253 while(q->head) {
254 chunk = q->head;
255 q->head = chunk->next;
256 chunk->next = q->spare;
257 q->spare = chunk;
258 }
259 q->tail = NULL;
260 }
261
Curl_bufq_len(const struct bufq * q)262 size_t Curl_bufq_len(const struct bufq *q)
263 {
264 const struct buf_chunk *chunk = q->head;
265 size_t len = 0;
266 while(chunk) {
267 len += chunk_len(chunk);
268 chunk = chunk->next;
269 }
270 return len;
271 }
272
Curl_bufq_space(const struct bufq * q)273 size_t Curl_bufq_space(const struct bufq *q)
274 {
275 size_t space = 0;
276 if(q->tail)
277 space += chunk_space(q->tail);
278 if(q->spare) {
279 struct buf_chunk *chunk = q->spare;
280 while(chunk) {
281 space += chunk->dlen;
282 chunk = chunk->next;
283 }
284 }
285 if(q->chunk_count < q->max_chunks) {
286 space += (q->max_chunks - q->chunk_count) * q->chunk_size;
287 }
288 return space;
289 }
290
Curl_bufq_is_empty(const struct bufq * q)291 bool Curl_bufq_is_empty(const struct bufq *q)
292 {
293 return !q->head || chunk_is_empty(q->head);
294 }
295
Curl_bufq_is_full(const struct bufq * q)296 bool Curl_bufq_is_full(const struct bufq *q)
297 {
298 if(!q->tail || q->spare)
299 return FALSE;
300 if(q->chunk_count < q->max_chunks)
301 return FALSE;
302 if(q->chunk_count > q->max_chunks)
303 return TRUE;
304 /* we have no spares and cannot make more, is the tail full? */
305 return chunk_is_full(q->tail);
306 }
307
get_spare(struct bufq * q)308 static struct buf_chunk *get_spare(struct bufq *q)
309 {
310 struct buf_chunk *chunk = NULL;
311
312 if(q->spare) {
313 chunk = q->spare;
314 q->spare = chunk->next;
315 chunk_reset(chunk);
316 return chunk;
317 }
318
319 if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
320 return NULL;
321
322 if(q->pool) {
323 if(bufcp_take(q->pool, &chunk))
324 return NULL;
325 ++q->chunk_count;
326 return chunk;
327 }
328 else {
329 chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
330 if(!chunk)
331 return NULL;
332 chunk->dlen = q->chunk_size;
333 ++q->chunk_count;
334 return chunk;
335 }
336 }
337
prune_head(struct bufq * q)338 static void prune_head(struct bufq *q)
339 {
340 struct buf_chunk *chunk;
341
342 while(q->head && chunk_is_empty(q->head)) {
343 chunk = q->head;
344 q->head = chunk->next;
345 if(q->tail == chunk)
346 q->tail = q->head;
347 if(q->pool) {
348 bufcp_put(q->pool, chunk);
349 --q->chunk_count;
350 }
351 else if((q->chunk_count > q->max_chunks) ||
352 (q->opts & BUFQ_OPT_NO_SPARES)) {
353 /* SOFT_LIMIT allowed us more than max. free spares until
354 * we are at max again. Or free them if we are configured
355 * to not use spares. */
356 free(chunk);
357 --q->chunk_count;
358 }
359 else {
360 chunk->next = q->spare;
361 q->spare = chunk;
362 }
363 }
364 }
365
get_non_full_tail(struct bufq * q)366 static struct buf_chunk *get_non_full_tail(struct bufq *q)
367 {
368 struct buf_chunk *chunk;
369
370 if(q->tail && !chunk_is_full(q->tail))
371 return q->tail;
372 chunk = get_spare(q);
373 if(chunk) {
374 /* new tail, and possibly new head */
375 if(q->tail) {
376 q->tail->next = chunk;
377 q->tail = chunk;
378 }
379 else {
380 DEBUGASSERT(!q->head);
381 q->head = q->tail = chunk;
382 }
383 }
384 return chunk;
385 }
386
Curl_bufq_write(struct bufq * q,const unsigned char * buf,size_t len,CURLcode * err)387 ssize_t Curl_bufq_write(struct bufq *q,
388 const unsigned char *buf, size_t len,
389 CURLcode *err)
390 {
391 struct buf_chunk *tail;
392 ssize_t nwritten = 0;
393 size_t n;
394
395 DEBUGASSERT(q->max_chunks > 0);
396 while(len) {
397 tail = get_non_full_tail(q);
398 if(!tail) {
399 if(q->chunk_count < q->max_chunks) {
400 *err = CURLE_OUT_OF_MEMORY;
401 return -1;
402 }
403 break;
404 }
405 n = chunk_append(tail, buf, len);
406 if(!n)
407 break;
408 nwritten += n;
409 buf += n;
410 len -= n;
411 }
412 if(nwritten == 0 && len) {
413 *err = CURLE_AGAIN;
414 return -1;
415 }
416 *err = CURLE_OK;
417 return nwritten;
418 }
419
Curl_bufq_read(struct bufq * q,unsigned char * buf,size_t len,CURLcode * err)420 ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
421 CURLcode *err)
422 {
423 ssize_t nread = 0;
424 size_t n;
425
426 *err = CURLE_OK;
427 while(len && q->head) {
428 n = chunk_read(q->head, buf, len);
429 if(n) {
430 nread += n;
431 buf += n;
432 len -= n;
433 }
434 prune_head(q);
435 }
436 if(nread == 0) {
437 *err = CURLE_AGAIN;
438 return -1;
439 }
440 return nread;
441 }
442
Curl_bufq_peek(struct bufq * q,const unsigned char ** pbuf,size_t * plen)443 bool Curl_bufq_peek(struct bufq *q,
444 const unsigned char **pbuf, size_t *plen)
445 {
446 if(q->head && chunk_is_empty(q->head)) {
447 prune_head(q);
448 }
449 if(q->head && !chunk_is_empty(q->head)) {
450 chunk_peek(q->head, pbuf, plen);
451 return TRUE;
452 }
453 *pbuf = NULL;
454 *plen = 0;
455 return FALSE;
456 }
457
Curl_bufq_peek_at(struct bufq * q,size_t offset,const unsigned char ** pbuf,size_t * plen)458 bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
459 const unsigned char **pbuf, size_t *plen)
460 {
461 struct buf_chunk *c = q->head;
462 size_t clen;
463
464 while(c) {
465 clen = chunk_len(c);
466 if(!clen)
467 break;
468 if(offset >= clen) {
469 offset -= clen;
470 c = c->next;
471 continue;
472 }
473 chunk_peek_at(c, offset, pbuf, plen);
474 return TRUE;
475 }
476 *pbuf = NULL;
477 *plen = 0;
478 return FALSE;
479 }
480
Curl_bufq_skip(struct bufq * q,size_t amount)481 void Curl_bufq_skip(struct bufq *q, size_t amount)
482 {
483 size_t n;
484
485 while(amount && q->head) {
486 n = chunk_skip(q->head, amount);
487 amount -= n;
488 prune_head(q);
489 }
490 }
491
Curl_bufq_pass(struct bufq * q,Curl_bufq_writer * writer,void * writer_ctx,CURLcode * err)492 ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
493 void *writer_ctx, CURLcode *err)
494 {
495 const unsigned char *buf;
496 size_t blen;
497 ssize_t nwritten = 0;
498
499 while(Curl_bufq_peek(q, &buf, &blen)) {
500 ssize_t chunk_written;
501
502 chunk_written = writer(writer_ctx, buf, blen, err);
503 if(chunk_written < 0) {
504 if(!nwritten || *err != CURLE_AGAIN) {
505 /* blocked on first write or real error, fail */
506 nwritten = -1;
507 }
508 break;
509 }
510 if(!chunk_written) {
511 if(!nwritten) {
512 /* treat as blocked */
513 *err = CURLE_AGAIN;
514 nwritten = -1;
515 }
516 break;
517 }
518 Curl_bufq_skip(q, (size_t)chunk_written);
519 nwritten += chunk_written;
520 }
521 return nwritten;
522 }
523
Curl_bufq_write_pass(struct bufq * q,const unsigned char * buf,size_t len,Curl_bufq_writer * writer,void * writer_ctx,CURLcode * err)524 ssize_t Curl_bufq_write_pass(struct bufq *q,
525 const unsigned char *buf, size_t len,
526 Curl_bufq_writer *writer, void *writer_ctx,
527 CURLcode *err)
528 {
529 ssize_t nwritten = 0, n;
530
531 *err = CURLE_OK;
532 while(len) {
533 if(Curl_bufq_is_full(q)) {
534 /* try to make room in case we are full */
535 n = Curl_bufq_pass(q, writer, writer_ctx, err);
536 if(n < 0) {
537 if(*err != CURLE_AGAIN) {
538 /* real error, fail */
539 return -1;
540 }
541 /* would block, bufq is full, give up */
542 break;
543 }
544 }
545
546 /* Add whatever is remaining now to bufq */
547 n = Curl_bufq_write(q, buf, len, err);
548 if(n < 0) {
549 if(*err != CURLE_AGAIN) {
550 /* real error, fail */
551 return -1;
552 }
553 /* no room in bufq */
554 break;
555 }
556 /* edge case of writer returning 0 (and len is >0)
557 * break or we might enter an infinite loop here */
558 if(n == 0)
559 break;
560
561 /* Maybe only part of `data` has been added, continue to loop */
562 buf += (size_t)n;
563 len -= (size_t)n;
564 nwritten += (size_t)n;
565 }
566
567 if(!nwritten && len) {
568 *err = CURLE_AGAIN;
569 return -1;
570 }
571 *err = CURLE_OK;
572 return nwritten;
573 }
574
Curl_bufq_sipn(struct bufq * q,size_t max_len,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)575 ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
576 Curl_bufq_reader *reader, void *reader_ctx,
577 CURLcode *err)
578 {
579 struct buf_chunk *tail = NULL;
580 ssize_t nread;
581
582 *err = CURLE_AGAIN;
583 tail = get_non_full_tail(q);
584 if(!tail) {
585 if(q->chunk_count < q->max_chunks) {
586 *err = CURLE_OUT_OF_MEMORY;
587 return -1;
588 }
589 /* full, blocked */
590 *err = CURLE_AGAIN;
591 return -1;
592 }
593
594 nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err);
595 if(nread < 0) {
596 return -1;
597 }
598 else if(nread == 0) {
599 /* eof */
600 *err = CURLE_OK;
601 }
602 return nread;
603 }
604
605 /**
606 * Read up to `max_len` bytes and append it to the end of the buffer queue.
607 * if `max_len` is 0, no limit is imposed and the call behaves exactly
608 * the same as `Curl_bufq_slurp()`.
609 * Returns the total amount of buf read (may be 0) or -1 on other
610 * reader errors.
611 * Note that even in case of a -1 chunks may have been read and
612 * the buffer queue will have different length than before.
613 */
bufq_slurpn(struct bufq * q,size_t max_len,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)614 static ssize_t bufq_slurpn(struct bufq *q, size_t max_len,
615 Curl_bufq_reader *reader, void *reader_ctx,
616 CURLcode *err)
617 {
618 ssize_t nread = 0, n;
619
620 *err = CURLE_AGAIN;
621 while(1) {
622
623 n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err);
624 if(n < 0) {
625 if(!nread || *err != CURLE_AGAIN) {
626 /* blocked on first read or real error, fail */
627 nread = -1;
628 }
629 else
630 *err = CURLE_OK;
631 break;
632 }
633 else if(n == 0) {
634 /* eof */
635 *err = CURLE_OK;
636 break;
637 }
638 nread += (size_t)n;
639 if(max_len) {
640 DEBUGASSERT((size_t)n <= max_len);
641 max_len -= (size_t)n;
642 if(!max_len)
643 break;
644 }
645 /* give up slurping when we get less bytes than we asked for */
646 if(q->tail && !chunk_is_full(q->tail))
647 break;
648 }
649 return nread;
650 }
651
Curl_bufq_slurp(struct bufq * q,Curl_bufq_reader * reader,void * reader_ctx,CURLcode * err)652 ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
653 void *reader_ctx, CURLcode *err)
654 {
655 return bufq_slurpn(q, 0, reader, reader_ctx, err);
656 }
657