1 /***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 * SPDX-License-Identifier: curl
22 *
23 ***************************************************************************/
24
25 #include "curl_setup.h"
26 #include "strtoofft.h"
27
28 #ifdef HAVE_NETINET_IN_H
29 #include <netinet/in.h>
30 #endif
31 #ifdef HAVE_NETDB_H
32 #include <netdb.h>
33 #endif
34 #ifdef HAVE_ARPA_INET_H
35 #include <arpa/inet.h>
36 #endif
37 #ifdef HAVE_NET_IF_H
38 #include <net/if.h>
39 #endif
40 #ifdef HAVE_SYS_IOCTL_H
41 #include <sys/ioctl.h>
42 #endif
43 #ifdef HAVE_SIGNAL_H
44 #include <signal.h>
45 #endif
46
47 #ifdef HAVE_SYS_PARAM_H
48 #include <sys/param.h>
49 #endif
50
51 #ifdef HAVE_SYS_SELECT_H
52 #include <sys/select.h>
53 #elif defined(HAVE_UNISTD_H)
54 #include <unistd.h>
55 #endif
56
57 #ifndef HAVE_SOCKET
58 #error "We can't compile without socket() support!"
59 #endif
60
61 #include "urldata.h"
62 #include <curl/curl.h>
63 #include "netrc.h"
64
65 #include "content_encoding.h"
66 #include "hostip.h"
67 #include "cfilters.h"
68 #include "transfer.h"
69 #include "sendf.h"
70 #include "speedcheck.h"
71 #include "progress.h"
72 #include "http.h"
73 #include "url.h"
74 #include "getinfo.h"
75 #include "vtls/vtls.h"
76 #include "vquic/vquic.h"
77 #include "select.h"
78 #include "multiif.h"
79 #include "connect.h"
80 #include "http2.h"
81 #include "mime.h"
82 #include "strcase.h"
83 #include "urlapi-int.h"
84 #include "hsts.h"
85 #include "setopt.h"
86 #include "headers.h"
87
88 /* The last 3 #include files should be in this order */
89 #include "curl_printf.h"
90 #include "curl_memory.h"
91 #include "memdebug.h"
92
93 #if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
94 !defined(CURL_DISABLE_IMAP)
95 /*
96 * checkheaders() checks the linked list of custom headers for a
97 * particular header (prefix). Provide the prefix without colon!
98 *
99 * Returns a pointer to the first matching header or NULL if none matched.
100 */
Curl_checkheaders(const struct Curl_easy * data,const char * thisheader,const size_t thislen)101 char *Curl_checkheaders(const struct Curl_easy *data,
102 const char *thisheader,
103 const size_t thislen)
104 {
105 struct curl_slist *head;
106 DEBUGASSERT(thislen);
107 DEBUGASSERT(thisheader[thislen-1] != ':');
108
109 for(head = data->set.headers; head; head = head->next) {
110 if(strncasecompare(head->data, thisheader, thislen) &&
111 Curl_headersep(head->data[thislen]) )
112 return head->data;
113 }
114
115 return NULL;
116 }
117 #endif
118
Curl_get_upload_buffer(struct Curl_easy * data)119 CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
120 {
121 if(!data->state.ulbuf) {
122 data->state.ulbuf = malloc(data->set.upload_buffer_size);
123 if(!data->state.ulbuf)
124 return CURLE_OUT_OF_MEMORY;
125 }
126 return CURLE_OK;
127 }
128
129 #ifndef CURL_DISABLE_HTTP
130 /*
131 * This function will be called to loop through the trailers buffer
132 * until no more data is available for sending.
133 */
trailers_read(char * buffer,size_t size,size_t nitems,void * raw)134 static size_t trailers_read(char *buffer, size_t size, size_t nitems,
135 void *raw)
136 {
137 struct Curl_easy *data = (struct Curl_easy *)raw;
138 struct dynbuf *trailers_buf = &data->state.trailers_buf;
139 size_t bytes_left = Curl_dyn_len(trailers_buf) -
140 data->state.trailers_bytes_sent;
141 size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
142 if(to_copy) {
143 memcpy(buffer,
144 Curl_dyn_ptr(trailers_buf) + data->state.trailers_bytes_sent,
145 to_copy);
146 data->state.trailers_bytes_sent += to_copy;
147 }
148 return to_copy;
149 }
150
trailers_left(void * raw)151 static size_t trailers_left(void *raw)
152 {
153 struct Curl_easy *data = (struct Curl_easy *)raw;
154 struct dynbuf *trailers_buf = &data->state.trailers_buf;
155 return Curl_dyn_len(trailers_buf) - data->state.trailers_bytes_sent;
156 }
157 #endif
158
159 /*
160 * This function will call the read callback to fill our buffer with data
161 * to upload.
162 */
Curl_fillreadbuffer(struct Curl_easy * data,size_t bytes,size_t * nreadp)163 CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
164 size_t *nreadp)
165 {
166 size_t buffersize = bytes;
167 size_t nread;
168
169 curl_read_callback readfunc = NULL;
170 void *extra_data = NULL;
171
172 #ifndef CURL_DISABLE_HTTP
173 if(data->state.trailers_state == TRAILERS_INITIALIZED) {
174 struct curl_slist *trailers = NULL;
175 CURLcode result;
176 int trailers_ret_code;
177
178 /* at this point we already verified that the callback exists
179 so we compile and store the trailers buffer, then proceed */
180 infof(data,
181 "Moving trailers state machine from initialized to sending.");
182 data->state.trailers_state = TRAILERS_SENDING;
183 Curl_dyn_init(&data->state.trailers_buf, DYN_TRAILERS);
184
185 data->state.trailers_bytes_sent = 0;
186 Curl_set_in_callback(data, true);
187 trailers_ret_code = data->set.trailer_callback(&trailers,
188 data->set.trailer_data);
189 Curl_set_in_callback(data, false);
190 if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
191 result = Curl_http_compile_trailers(trailers, &data->state.trailers_buf,
192 data);
193 }
194 else {
195 failf(data, "operation aborted by trailing headers callback");
196 *nreadp = 0;
197 result = CURLE_ABORTED_BY_CALLBACK;
198 }
199 if(result) {
200 Curl_dyn_free(&data->state.trailers_buf);
201 curl_slist_free_all(trailers);
202 return result;
203 }
204 infof(data, "Successfully compiled trailers.");
205 curl_slist_free_all(trailers);
206 }
207 #endif
208
209 #ifndef CURL_DISABLE_HTTP
210 /* if we are transmitting trailing data, we don't need to write
211 a chunk size so we skip this */
212 if(data->req.upload_chunky &&
213 data->state.trailers_state == TRAILERS_NONE) {
214 /* if chunked Transfer-Encoding */
215 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
216 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
217 }
218
219 if(data->state.trailers_state == TRAILERS_SENDING) {
220 /* if we're here then that means that we already sent the last empty chunk
221 but we didn't send a final CR LF, so we sent 0 CR LF. We then start
222 pulling trailing data until we have no more at which point we
223 simply return to the previous point in the state machine as if
224 nothing happened.
225 */
226 readfunc = trailers_read;
227 extra_data = (void *)data;
228 }
229 else
230 #endif
231 {
232 readfunc = data->state.fread_func;
233 extra_data = data->state.in;
234 }
235
236 Curl_set_in_callback(data, true);
237 nread = readfunc(data->req.upload_fromhere, 1,
238 buffersize, extra_data);
239 Curl_set_in_callback(data, false);
240
241 if(nread == CURL_READFUNC_ABORT) {
242 failf(data, "operation aborted by callback");
243 *nreadp = 0;
244 return CURLE_ABORTED_BY_CALLBACK;
245 }
246 if(nread == CURL_READFUNC_PAUSE) {
247 struct SingleRequest *k = &data->req;
248
249 if(data->conn->handler->flags & PROTOPT_NONETWORK) {
250 /* protocols that work without network cannot be paused. This is
251 actually only FILE:// just now, and it can't pause since the transfer
252 isn't done using the "normal" procedure. */
253 failf(data, "Read callback asked for PAUSE when not supported");
254 return CURLE_READ_ERROR;
255 }
256
257 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
258 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
259 if(data->req.upload_chunky) {
260 /* Back out the preallocation done above */
261 data->req.upload_fromhere -= (8 + 2);
262 }
263 *nreadp = 0;
264
265 return CURLE_OK; /* nothing was read */
266 }
267 else if(nread > buffersize) {
268 /* the read function returned a too large value */
269 *nreadp = 0;
270 failf(data, "read function returned funny value");
271 return CURLE_READ_ERROR;
272 }
273
274 #ifndef CURL_DISABLE_HTTP
275 if(!data->req.forbidchunk && data->req.upload_chunky) {
276 /* if chunked Transfer-Encoding
277 * build chunk:
278 *
279 * <HEX SIZE> CRLF
280 * <DATA> CRLF
281 */
282 /* On non-ASCII platforms the <DATA> may or may not be
283 translated based on state.prefer_ascii while the protocol
284 portion must always be translated to the network encoding.
285 To further complicate matters, line end conversion might be
286 done later on, so we need to prevent CRLFs from becoming
287 CRCRLFs if that's the case. To do this we use bare LFs
288 here, knowing they'll become CRLFs later on.
289 */
290
291 bool added_crlf = FALSE;
292 int hexlen = 0;
293 const char *endofline_native;
294 const char *endofline_network;
295
296 if(
297 #ifdef CURL_DO_LINEEND_CONV
298 (data->state.prefer_ascii) ||
299 #endif
300 (data->set.crlf)) {
301 /* \n will become \r\n later on */
302 endofline_native = "\n";
303 endofline_network = "\x0a";
304 }
305 else {
306 endofline_native = "\r\n";
307 endofline_network = "\x0d\x0a";
308 }
309
310 /* if we're not handling trailing data, proceed as usual */
311 if(data->state.trailers_state != TRAILERS_SENDING) {
312 char hexbuffer[11] = "";
313 hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
314 "%zx%s", nread, endofline_native);
315
316 /* move buffer pointer */
317 data->req.upload_fromhere -= hexlen;
318 nread += hexlen;
319
320 /* copy the prefix to the buffer, leaving out the NUL */
321 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
322
323 /* always append ASCII CRLF to the data unless
324 we have a valid trailer callback */
325 if((nread-hexlen) == 0 &&
326 data->set.trailer_callback != NULL &&
327 data->state.trailers_state == TRAILERS_NONE) {
328 data->state.trailers_state = TRAILERS_INITIALIZED;
329 }
330 else {
331 memcpy(data->req.upload_fromhere + nread,
332 endofline_network,
333 strlen(endofline_network));
334 added_crlf = TRUE;
335 }
336 }
337
338 if(data->state.trailers_state == TRAILERS_SENDING &&
339 !trailers_left(data)) {
340 Curl_dyn_free(&data->state.trailers_buf);
341 data->state.trailers_state = TRAILERS_DONE;
342 data->set.trailer_data = NULL;
343 data->set.trailer_callback = NULL;
344 /* mark the transfer as done */
345 data->req.upload_done = TRUE;
346 infof(data, "Signaling end of chunked upload after trailers.");
347 }
348 else
349 if((nread - hexlen) == 0 &&
350 data->state.trailers_state != TRAILERS_INITIALIZED) {
351 /* mark this as done once this chunk is transferred */
352 data->req.upload_done = TRUE;
353 infof(data,
354 "Signaling end of chunked upload via terminating chunk.");
355 }
356
357 if(added_crlf)
358 nread += strlen(endofline_network); /* for the added end of line */
359 }
360 #endif
361
362 *nreadp = nread;
363
364 return CURLE_OK;
365 }
366
data_pending(struct Curl_easy * data)367 static int data_pending(struct Curl_easy *data)
368 {
369 struct connectdata *conn = data->conn;
370
371 if(conn->handler->protocol&PROTO_FAMILY_FTP)
372 return Curl_conn_data_pending(data, SECONDARYSOCKET);
373
374 /* in the case of libssh2, we can never be really sure that we have emptied
375 its internal buffers so we MUST always try until we get EAGAIN back */
376 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
377 Curl_conn_data_pending(data, FIRSTSOCKET);
378 }
379
380 /*
381 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
382 * remote document with the time provided by CURLOPT_TIMEVAL
383 */
Curl_meets_timecondition(struct Curl_easy * data,time_t timeofdoc)384 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
385 {
386 if((timeofdoc == 0) || (data->set.timevalue == 0))
387 return TRUE;
388
389 switch(data->set.timecondition) {
390 case CURL_TIMECOND_IFMODSINCE:
391 default:
392 if(timeofdoc <= data->set.timevalue) {
393 infof(data,
394 "The requested document is not new enough");
395 data->info.timecond = TRUE;
396 return FALSE;
397 }
398 break;
399 case CURL_TIMECOND_IFUNMODSINCE:
400 if(timeofdoc >= data->set.timevalue) {
401 infof(data,
402 "The requested document is not old enough");
403 data->info.timecond = TRUE;
404 return FALSE;
405 }
406 break;
407 }
408
409 return TRUE;
410 }
411
412 /*
413 * Go ahead and do a read if we have a readable socket or if
414 * the stream was rewound (in which case we have data in a
415 * buffer)
416 *
417 * return '*comeback' TRUE if we didn't properly drain the socket so this
418 * function should get called again without select() or similar in between!
419 */
readwrite_data(struct Curl_easy * data,struct connectdata * conn,struct SingleRequest * k,int * didwhat,bool * done,bool * comeback)420 static CURLcode readwrite_data(struct Curl_easy *data,
421 struct connectdata *conn,
422 struct SingleRequest *k,
423 int *didwhat, bool *done,
424 bool *comeback)
425 {
426 CURLcode result = CURLE_OK;
427 ssize_t nread; /* number of bytes read */
428 size_t excess = 0; /* excess bytes read */
429 bool readmore = FALSE; /* used by RTP to signal for more data */
430 int maxloops = 100;
431 char *buf = data->state.buffer;
432 DEBUGASSERT(buf);
433
434 *done = FALSE;
435 *comeback = FALSE;
436
437 /* This is where we loop until we have read everything there is to
438 read or we get a CURLE_AGAIN */
439 do {
440 bool is_empty_data = FALSE;
441 size_t buffersize = data->set.buffer_size;
442 size_t bytestoread = buffersize;
443 /* For HTTP/2 and HTTP/3, read data without caring about the content
444 length. This is safe because body in HTTP/2 is always segmented
445 thanks to its framing layer. Meanwhile, we have to call Curl_read
446 to ensure that http2_handle_stream_close is called when we read all
447 incoming bytes for a particular stream. */
448 bool is_http3 = Curl_conn_is_http3(data, conn, FIRSTSOCKET);
449 bool data_eof_handled = is_http3
450 || Curl_conn_is_http2(data, conn, FIRSTSOCKET);
451
452 if(!data_eof_handled && k->size != -1 && !k->header) {
453 /* make sure we don't read too much */
454 curl_off_t totalleft = k->size - k->bytecount;
455 if(totalleft < (curl_off_t)bytestoread)
456 bytestoread = (size_t)totalleft;
457 }
458
459 if(bytestoread) {
460 /* receive data from the network! */
461 result = Curl_read(data, conn->sockfd, buf, bytestoread, &nread);
462
463 /* read would've blocked */
464 if(CURLE_AGAIN == result) {
465 result = CURLE_OK;
466 break; /* get out of loop */
467 }
468
469 if(result>0)
470 goto out;
471 }
472 else {
473 /* read nothing but since we wanted nothing we consider this an OK
474 situation to proceed from */
475 DEBUGF(infof(data, DMSG(data, "readwrite_data: we're done")));
476 nread = 0;
477 }
478
479 if(!k->bytecount) {
480 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
481 if(k->exp100 > EXP100_SEND_DATA)
482 /* set time stamp to compare with when waiting for the 100 */
483 k->start100 = Curl_now();
484 }
485
486 *didwhat |= KEEP_RECV;
487 /* indicates data of zero size, i.e. empty file */
488 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
489
490 if(0 < nread || is_empty_data) {
491 buf[nread] = 0;
492 }
493 else {
494 /* if we receive 0 or less here, either the data transfer is done or the
495 server closed the connection and we bail out from this! */
496 if(data_eof_handled)
497 DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
498 else
499 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing"));
500 k->keepon &= ~KEEP_RECV;
501 break;
502 }
503
504 /* Default buffer to use when we write the buffer, it may be changed
505 in the flow below before the actual storing is done. */
506 k->str = buf;
507
508 if(conn->handler->readwrite) {
509 result = conn->handler->readwrite(data, conn, &nread, &readmore);
510 if(result)
511 goto out;
512 if(readmore)
513 break;
514 }
515
516 #ifndef CURL_DISABLE_HTTP
517 /* Since this is a two-state thing, we check if we are parsing
518 headers at the moment or not. */
519 if(k->header) {
520 /* we are in parse-the-header-mode */
521 bool stop_reading = FALSE;
522 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
523 if(result)
524 goto out;
525
526 if(conn->handler->readwrite &&
527 (k->maxdownload <= 0 && nread > 0)) {
528 result = conn->handler->readwrite(data, conn, &nread, &readmore);
529 if(result)
530 goto out;
531 if(readmore)
532 break;
533 }
534
535 if(stop_reading) {
536 /* We've stopped dealing with input, get out of the do-while loop */
537
538 if(nread > 0) {
539 infof(data,
540 "Excess found:"
541 " excess = %zd"
542 " url = %s (zero-length body)",
543 nread, data->state.up.path);
544 }
545
546 break;
547 }
548 }
549 #endif /* CURL_DISABLE_HTTP */
550
551
552 /* This is not an 'else if' since it may be a rest from the header
553 parsing, where the beginning of the buffer is headers and the end
554 is non-headers. */
555 if(!k->header && (nread > 0 || is_empty_data)) {
556
557 if(data->req.no_body) {
558 /* data arrives although we want none, bail out */
559 streamclose(conn, "ignoring body");
560 *done = TRUE;
561 result = CURLE_WEIRD_SERVER_REPLY;
562 goto out;
563 }
564
565 #ifndef CURL_DISABLE_HTTP
566 if(0 == k->bodywrites && !is_empty_data) {
567 /* These checks are only made the first time we are about to
568 write a piece of the body */
569 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
570 /* HTTP-only checks */
571 result = Curl_http_firstwrite(data, conn, done);
572 if(result || *done)
573 goto out;
574 }
575 } /* this is the first time we write a body part */
576 #endif /* CURL_DISABLE_HTTP */
577
578 k->bodywrites++;
579
580 /* pass data to the debug function before it gets "dechunked" */
581 if(data->set.verbose) {
582 if(k->badheader) {
583 Curl_debug(data, CURLINFO_DATA_IN,
584 Curl_dyn_ptr(&data->state.headerb),
585 Curl_dyn_len(&data->state.headerb));
586 if(k->badheader == HEADER_PARTHEADER)
587 Curl_debug(data, CURLINFO_DATA_IN,
588 k->str, (size_t)nread);
589 }
590 else
591 Curl_debug(data, CURLINFO_DATA_IN,
592 k->str, (size_t)nread);
593 }
594
595 #ifndef CURL_DISABLE_HTTP
596 if(k->chunk) {
597 /*
598 * Here comes a chunked transfer flying and we need to decode this
599 * properly. While the name says read, this function both reads
600 * and writes away the data. The returned 'nread' holds the number
601 * of actual data it wrote to the client.
602 */
603 CURLcode extra;
604 CHUNKcode res =
605 Curl_httpchunk_read(data, k->str, nread, &nread, &extra);
606
607 if(CHUNKE_OK < res) {
608 if(CHUNKE_PASSTHRU_ERROR == res) {
609 failf(data, "Failed reading the chunked-encoded stream");
610 result = extra;
611 goto out;
612 }
613 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
614 result = CURLE_RECV_ERROR;
615 goto out;
616 }
617 if(CHUNKE_STOP == res) {
618 /* we're done reading chunks! */
619 k->keepon &= ~KEEP_RECV; /* read no more */
620
621 /* N number of bytes at the end of the str buffer that weren't
622 written to the client. */
623 if(conn->chunk.datasize) {
624 infof(data, "Leftovers after chunking: % "
625 CURL_FORMAT_CURL_OFF_T "u bytes",
626 conn->chunk.datasize);
627 }
628 }
629 /* If it returned OK, we just keep going */
630 }
631 #endif /* CURL_DISABLE_HTTP */
632
633 /* Account for body content stored in the header buffer */
634 if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
635 size_t headlen = Curl_dyn_len(&data->state.headerb);
636 DEBUGF(infof(data, "Increasing bytecount by %zu", headlen));
637 k->bytecount += headlen;
638 }
639
640 if((-1 != k->maxdownload) &&
641 (k->bytecount + nread >= k->maxdownload)) {
642
643 excess = (size_t)(k->bytecount + nread - k->maxdownload);
644 if(excess > 0 && !k->ignorebody) {
645 infof(data,
646 "Excess found in a read:"
647 " excess = %zu"
648 ", size = %" CURL_FORMAT_CURL_OFF_T
649 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
650 ", bytecount = %" CURL_FORMAT_CURL_OFF_T,
651 excess, k->size, k->maxdownload, k->bytecount);
652 connclose(conn, "excess found in a read");
653 }
654
655 nread = (ssize_t) (k->maxdownload - k->bytecount);
656 if(nread < 0) /* this should be unusual */
657 nread = 0;
658
659 /* HTTP/3 over QUIC should keep reading until QUIC connection
660 is closed. In contrast to HTTP/2 which can stop reading
661 from TCP connection, HTTP/3 over QUIC needs ACK from server
662 to ensure stream closure. It should keep reading. */
663 if(!is_http3) {
664 k->keepon &= ~KEEP_RECV; /* we're done reading */
665 }
666 }
667
668 k->bytecount += nread;
669
670 Curl_pgrsSetDownloadCounter(data, k->bytecount);
671
672 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
673 /* If this is chunky transfer, it was already written */
674
675 if(k->badheader && !k->ignorebody) {
676 /* we parsed a piece of data wrongly assuming it was a header
677 and now we output it as body instead */
678 size_t headlen = Curl_dyn_len(&data->state.headerb);
679
680 /* Don't let excess data pollute body writes */
681 if(k->maxdownload == -1 || (curl_off_t)headlen <= k->maxdownload)
682 result = Curl_client_write(data, CLIENTWRITE_BODY,
683 Curl_dyn_ptr(&data->state.headerb),
684 headlen);
685 else
686 result = Curl_client_write(data, CLIENTWRITE_BODY,
687 Curl_dyn_ptr(&data->state.headerb),
688 (size_t)k->maxdownload);
689
690 if(result)
691 goto out;
692 }
693 if(k->badheader < HEADER_ALLBAD) {
694 /* This switch handles various content encodings. If there's an
695 error here, be sure to check over the almost identical code
696 in http_chunks.c.
697 Make sure that ALL_CONTENT_ENCODINGS contains all the
698 encodings handled here. */
699 if(data->set.http_ce_skip || !k->writer_stack) {
700 if(!k->ignorebody && nread) {
701 #ifndef CURL_DISABLE_POP3
702 if(conn->handler->protocol & PROTO_FAMILY_POP3)
703 result = Curl_pop3_write(data, k->str, nread);
704 else
705 #endif /* CURL_DISABLE_POP3 */
706 result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
707 nread);
708 }
709 }
710 else if(!k->ignorebody && nread)
711 result = Curl_unencode_write(data, k->writer_stack, k->str, nread);
712 }
713 k->badheader = HEADER_NORMAL; /* taken care of now */
714
715 if(result)
716 goto out;
717 }
718
719 } /* if(!header and data to read) */
720
721 if(conn->handler->readwrite && excess) {
722 /* Parse the excess data */
723 k->str += nread;
724
725 if(&k->str[excess] > &buf[data->set.buffer_size]) {
726 /* the excess amount was too excessive(!), make sure
727 it doesn't read out of buffer */
728 excess = &buf[data->set.buffer_size] - k->str;
729 }
730 nread = (ssize_t)excess;
731
732 result = conn->handler->readwrite(data, conn, &nread, &readmore);
733 if(result)
734 goto out;
735
736 if(readmore)
737 k->keepon |= KEEP_RECV; /* we're not done reading */
738 break;
739 }
740
741 if(is_empty_data) {
742 /* if we received nothing, the server closed the connection and we
743 are done */
744 k->keepon &= ~KEEP_RECV;
745 }
746
747 if((k->keepon & KEEP_RECV_PAUSE) || !(k->keepon & KEEP_RECV)) {
748 /* this is a paused or stopped transfer */
749 break;
750 }
751
752 } while(data_pending(data) && maxloops--);
753
754 if(maxloops <= 0) {
755 /* we mark it as read-again-please */
756 conn->cselect_bits = CURL_CSELECT_IN;
757 *comeback = TRUE;
758 }
759
760 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
761 conn->bits.close) {
762 /* When we've read the entire thing and the close bit is set, the server
763 may now close the connection. If there's now any kind of sending going
764 on from our side, we need to stop that immediately. */
765 infof(data, "we are done reading and this is set to close, stop send");
766 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
767 }
768
769 out:
770 if(result)
771 DEBUGF(infof(data, DMSG(data, "readwrite_data() -> %d"), result));
772 return result;
773 }
774
Curl_done_sending(struct Curl_easy * data,struct SingleRequest * k)775 CURLcode Curl_done_sending(struct Curl_easy *data,
776 struct SingleRequest *k)
777 {
778 k->keepon &= ~KEEP_SEND; /* we're done writing */
779
780 /* These functions should be moved into the handler struct! */
781 Curl_conn_ev_data_done_send(data);
782
783 return CURLE_OK;
784 }
785
786 #if defined(WIN32) && defined(USE_WINSOCK)
787 #ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
788 #define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
789 #endif
790
win_update_buffer_size(curl_socket_t sockfd)791 static void win_update_buffer_size(curl_socket_t sockfd)
792 {
793 int result;
794 ULONG ideal;
795 DWORD ideallen;
796 result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
797 &ideal, sizeof(ideal), &ideallen, 0, 0);
798 if(result == 0) {
799 setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
800 (const char *)&ideal, sizeof(ideal));
801 }
802 }
803 #else
804 #define win_update_buffer_size(x)
805 #endif
806
807 #define curl_upload_refill_watermark(data) \
808 ((ssize_t)((data)->set.upload_buffer_size >> 5))
809
810 /*
811 * Send data to upload to the server, when the socket is writable.
812 */
readwrite_upload(struct Curl_easy * data,struct connectdata * conn,int * didwhat)813 static CURLcode readwrite_upload(struct Curl_easy *data,
814 struct connectdata *conn,
815 int *didwhat)
816 {
817 ssize_t i, si;
818 ssize_t bytes_written;
819 CURLcode result;
820 ssize_t nread; /* number of bytes read */
821 bool sending_http_headers = FALSE;
822 struct SingleRequest *k = &data->req;
823
824 if((k->bytecount == 0) && (k->writebytecount == 0))
825 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
826
827 *didwhat |= KEEP_SEND;
828
829 do {
830 curl_off_t nbody;
831 ssize_t offset = 0;
832
833 if(0 != k->upload_present &&
834 k->upload_present < curl_upload_refill_watermark(data) &&
835 !k->upload_chunky &&/*(variable sized chunked header; append not safe)*/
836 !k->upload_done && /*!(k->upload_done once k->upload_present sent)*/
837 !(k->writebytecount + k->upload_present - k->pendingheader ==
838 data->state.infilesize)) {
839 offset = k->upload_present;
840 }
841
842 /* only read more data if there's no upload data already
843 present in the upload buffer, or if appending to upload buffer */
844 if(0 == k->upload_present || offset) {
845 result = Curl_get_upload_buffer(data);
846 if(result)
847 return result;
848 if(offset && k->upload_fromhere != data->state.ulbuf)
849 memmove(data->state.ulbuf, k->upload_fromhere, offset);
850 /* init the "upload from here" pointer */
851 k->upload_fromhere = data->state.ulbuf;
852
853 if(!k->upload_done) {
854 /* HTTP pollution, this should be written nicer to become more
855 protocol agnostic. */
856 size_t fillcount;
857 struct HTTP *http = k->p.http;
858
859 if((k->exp100 == EXP100_SENDING_REQUEST) &&
860 (http->sending == HTTPSEND_BODY)) {
861 /* If this call is to send body data, we must take some action:
862 We have sent off the full HTTP 1.1 request, and we shall now
863 go into the Expect: 100 state and await such a header */
864 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
865 k->keepon &= ~KEEP_SEND; /* disable writing */
866 k->start100 = Curl_now(); /* timeout count starts now */
867 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
868 /* set a timeout for the multi interface */
869 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
870 break;
871 }
872
873 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
874 if(http->sending == HTTPSEND_REQUEST)
875 /* We're sending the HTTP request headers, not the data.
876 Remember that so we don't change the line endings. */
877 sending_http_headers = TRUE;
878 else
879 sending_http_headers = FALSE;
880 }
881
882 k->upload_fromhere += offset;
883 result = Curl_fillreadbuffer(data, data->set.upload_buffer_size-offset,
884 &fillcount);
885 k->upload_fromhere -= offset;
886 if(result)
887 return result;
888
889 nread = offset + fillcount;
890 }
891 else
892 nread = 0; /* we're done uploading/reading */
893
894 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
895 /* this is a paused transfer */
896 break;
897 }
898 if(nread <= 0) {
899 result = Curl_done_sending(data, k);
900 if(result)
901 return result;
902 break;
903 }
904
905 /* store number of bytes available for upload */
906 k->upload_present = nread;
907
908 /* convert LF to CRLF if so asked */
909 if((!sending_http_headers) && (
910 #ifdef CURL_DO_LINEEND_CONV
911 /* always convert if we're FTPing in ASCII mode */
912 (data->state.prefer_ascii) ||
913 #endif
914 (data->set.crlf))) {
915 /* Do we need to allocate a scratch buffer? */
916 if(!data->state.scratch) {
917 data->state.scratch = malloc(2 * data->set.upload_buffer_size);
918 if(!data->state.scratch) {
919 failf(data, "Failed to alloc scratch buffer");
920
921 return CURLE_OUT_OF_MEMORY;
922 }
923 }
924
925 /*
926 * ASCII/EBCDIC Note: This is presumably a text (not binary)
927 * transfer so the data should already be in ASCII.
928 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
929 * must be used instead of the escape sequences \r & \n.
930 */
931 if(offset)
932 memcpy(data->state.scratch, k->upload_fromhere, offset);
933 for(i = offset, si = offset; i < nread; i++, si++) {
934 if(k->upload_fromhere[i] == 0x0a) {
935 data->state.scratch[si++] = 0x0d;
936 data->state.scratch[si] = 0x0a;
937 if(!data->set.crlf) {
938 /* we're here only because FTP is in ASCII mode...
939 bump infilesize for the LF we just added */
940 if(data->state.infilesize != -1)
941 data->state.infilesize++;
942 }
943 }
944 else
945 data->state.scratch[si] = k->upload_fromhere[i];
946 }
947
948 if(si != nread) {
949 /* only perform the special operation if we really did replace
950 anything */
951 nread = si;
952
953 /* upload from the new (replaced) buffer instead */
954 k->upload_fromhere = data->state.scratch;
955
956 /* set the new amount too */
957 k->upload_present = nread;
958 }
959 }
960
961 #ifndef CURL_DISABLE_SMTP
962 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
963 result = Curl_smtp_escape_eob(data, nread, offset);
964 if(result)
965 return result;
966 }
967 #endif /* CURL_DISABLE_SMTP */
968 } /* if 0 == k->upload_present or appended to upload buffer */
969 else {
970 /* We have a partial buffer left from a previous "round". Use
971 that instead of reading more data */
972 }
973
974 /* write to socket (send away data) */
975 result = Curl_write(data,
976 conn->writesockfd, /* socket to send to */
977 k->upload_fromhere, /* buffer pointer */
978 k->upload_present, /* buffer size */
979 &bytes_written); /* actually sent */
980 if(result)
981 return result;
982
983 #if defined(WIN32) && defined(USE_WINSOCK)
984 {
985 struct curltime n = Curl_now();
986 if(Curl_timediff(n, k->last_sndbuf_update) > 1000) {
987 win_update_buffer_size(conn->writesockfd);
988 k->last_sndbuf_update = n;
989 }
990 }
991 #endif
992
993 if(k->pendingheader) {
994 /* parts of what was sent was header */
995 curl_off_t n = CURLMIN(k->pendingheader, bytes_written);
996 /* show the data before we change the pointer upload_fromhere */
997 Curl_debug(data, CURLINFO_HEADER_OUT, k->upload_fromhere, (size_t)n);
998 k->pendingheader -= n;
999 nbody = bytes_written - n; /* size of the written body part */
1000 }
1001 else
1002 nbody = bytes_written;
1003
1004 if(nbody) {
1005 /* show the data before we change the pointer upload_fromhere */
1006 Curl_debug(data, CURLINFO_DATA_OUT,
1007 &k->upload_fromhere[bytes_written - nbody],
1008 (size_t)nbody);
1009
1010 k->writebytecount += nbody;
1011 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1012 }
1013
1014 if((!k->upload_chunky || k->forbidchunk) &&
1015 (k->writebytecount == data->state.infilesize)) {
1016 /* we have sent all data we were supposed to */
1017 k->upload_done = TRUE;
1018 infof(data, "We are completely uploaded and fine");
1019 }
1020
1021 if(k->upload_present != bytes_written) {
1022 /* we only wrote a part of the buffer (if anything), deal with it! */
1023
1024 /* store the amount of bytes left in the buffer to write */
1025 k->upload_present -= bytes_written;
1026
1027 /* advance the pointer where to find the buffer when the next send
1028 is to happen */
1029 k->upload_fromhere += bytes_written;
1030 }
1031 else {
1032 /* we've uploaded that buffer now */
1033 result = Curl_get_upload_buffer(data);
1034 if(result)
1035 return result;
1036 k->upload_fromhere = data->state.ulbuf;
1037 k->upload_present = 0; /* no more bytes left */
1038
1039 if(k->upload_done) {
1040 result = Curl_done_sending(data, k);
1041 if(result)
1042 return result;
1043 }
1044 }
1045
1046
1047 } while(0); /* just to break out from! */
1048
1049 return CURLE_OK;
1050 }
1051
1052 /*
1053 * Curl_readwrite() is the low-level function to be called when data is to
1054 * be read and written to/from the connection.
1055 *
1056 * return '*comeback' TRUE if we didn't properly drain the socket so this
1057 * function should get called again without select() or similar in between!
1058 */
Curl_readwrite(struct connectdata * conn,struct Curl_easy * data,bool * done,bool * comeback)1059 CURLcode Curl_readwrite(struct connectdata *conn,
1060 struct Curl_easy *data,
1061 bool *done,
1062 bool *comeback)
1063 {
1064 struct SingleRequest *k = &data->req;
1065 CURLcode result;
1066 struct curltime now;
1067 int didwhat = 0;
1068
1069 curl_socket_t fd_read;
1070 curl_socket_t fd_write;
1071 int select_res = conn->cselect_bits;
1072
1073 conn->cselect_bits = 0;
1074
1075 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1076 then we are in rate limiting state in that transfer direction */
1077
1078 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1079 fd_read = conn->sockfd;
1080 else
1081 fd_read = CURL_SOCKET_BAD;
1082
1083 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1084 fd_write = conn->writesockfd;
1085 else
1086 fd_write = CURL_SOCKET_BAD;
1087
1088 #if defined(USE_HTTP2) || defined(USE_HTTP3)
1089 if(data->state.drain) {
1090 select_res |= CURL_CSELECT_IN;
1091 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data"));
1092 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1093 select_res |= CURL_CSELECT_OUT;
1094 }
1095 #endif
1096
1097 if(!select_res) /* Call for select()/poll() only, if read/write/error
1098 status is not known. */
1099 select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1100
1101 if(select_res == CURL_CSELECT_ERR) {
1102 failf(data, "select/poll returned error");
1103 result = CURLE_SEND_ERROR;
1104 goto out;
1105 }
1106
1107 #ifdef USE_HYPER
1108 if(conn->datastream) {
1109 result = conn->datastream(data, conn, &didwhat, done, select_res);
1110 if(result || *done)
1111 goto out;
1112 }
1113 else {
1114 #endif
1115 /* We go ahead and do a read if we have a readable socket or if
1116 the stream was rewound (in which case we have data in a
1117 buffer) */
1118 if((k->keepon & KEEP_RECV) && (select_res & CURL_CSELECT_IN)) {
1119 result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1120 if(result || *done)
1121 goto out;
1122 }
1123
1124 /* If we still have writing to do, we check if we have a writable socket. */
1125 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1126 /* write */
1127
1128 result = readwrite_upload(data, conn, &didwhat);
1129 if(result)
1130 goto out;
1131 }
1132 #ifdef USE_HYPER
1133 }
1134 #endif
1135
1136 now = Curl_now();
1137 if(!didwhat) {
1138 /* no read no write, this is a timeout? */
1139 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1140 /* This should allow some time for the header to arrive, but only a
1141 very short time as otherwise it'll be too much wasted time too
1142 often. */
1143
1144 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1145
1146 Therefore, when a client sends this header field to an origin server
1147 (possibly via a proxy) from which it has never seen a 100 (Continue)
1148 status, the client SHOULD NOT wait for an indefinite period before
1149 sending the request body.
1150
1151 */
1152
1153 timediff_t ms = Curl_timediff(now, k->start100);
1154 if(ms >= data->set.expect_100_timeout) {
1155 /* we've waited long enough, continue anyway */
1156 k->exp100 = EXP100_SEND_DATA;
1157 k->keepon |= KEEP_SEND;
1158 Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1159 infof(data, "Done waiting for 100-continue");
1160 }
1161 }
1162
1163 result = Curl_conn_ev_data_idle(data);
1164 if(result)
1165 goto out;
1166 }
1167
1168 if(Curl_pgrsUpdate(data))
1169 result = CURLE_ABORTED_BY_CALLBACK;
1170 else
1171 result = Curl_speedcheck(data, now);
1172 if(result)
1173 goto out;
1174
1175 if(k->keepon) {
1176 if(0 > Curl_timeleft(data, &now, FALSE)) {
1177 if(k->size != -1) {
1178 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1179 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1180 CURL_FORMAT_CURL_OFF_T " bytes received",
1181 Curl_timediff(now, data->progress.t_startsingle),
1182 k->bytecount, k->size);
1183 }
1184 else {
1185 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1186 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1187 Curl_timediff(now, data->progress.t_startsingle),
1188 k->bytecount);
1189 }
1190 result = CURLE_OPERATION_TIMEDOUT;
1191 goto out;
1192 }
1193 }
1194 else {
1195 /*
1196 * The transfer has been performed. Just make some general checks before
1197 * returning.
1198 */
1199
1200 if(!(data->req.no_body) && (k->size != -1) &&
1201 (k->bytecount != k->size) &&
1202 #ifdef CURL_DO_LINEEND_CONV
1203 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1204 so we'll check to see if the discrepancy can be explained
1205 by the number of CRLFs we've changed to LFs.
1206 */
1207 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1208 #endif /* CURL_DO_LINEEND_CONV */
1209 !k->newurl) {
1210 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1211 " bytes remaining to read", k->size - k->bytecount);
1212 result = CURLE_PARTIAL_FILE;
1213 goto out;
1214 }
1215 if(!(data->req.no_body) && k->chunk &&
1216 (conn->chunk.state != CHUNK_STOP)) {
1217 /*
1218 * In chunked mode, return an error if the connection is closed prior to
1219 * the empty (terminating) chunk is read.
1220 *
1221 * The condition above used to check for
1222 * conn->proto.http->chunk.datasize != 0 which is true after reading
1223 * *any* chunk, not just the empty chunk.
1224 *
1225 */
1226 failf(data, "transfer closed with outstanding read data remaining");
1227 result = CURLE_PARTIAL_FILE;
1228 goto out;
1229 }
1230 if(Curl_pgrsUpdate(data)) {
1231 result = CURLE_ABORTED_BY_CALLBACK;
1232 goto out;
1233 }
1234 }
1235
1236 /* Now update the "done" boolean we return */
1237 *done = (0 == (k->keepon&(KEEP_RECVBITS|KEEP_SENDBITS))) ? TRUE : FALSE;
1238 result = CURLE_OK;
1239 out:
1240 if(result)
1241 DEBUGF(infof(data, DMSG(data, "Curl_readwrite() -> %d"), result));
1242 return result;
1243 }
1244
1245 /*
1246 * Curl_single_getsock() gets called by the multi interface code when the app
1247 * has requested to get the sockets for the current connection. This function
1248 * will then be called once for every connection that the multi interface
1249 * keeps track of. This function will only be called for connections that are
1250 * in the proper state to have this information available.
1251 */
Curl_single_getsock(struct Curl_easy * data,struct connectdata * conn,curl_socket_t * sock)1252 int Curl_single_getsock(struct Curl_easy *data,
1253 struct connectdata *conn,
1254 curl_socket_t *sock)
1255 {
1256 int bitmap = GETSOCK_BLANK;
1257 unsigned sockindex = 0;
1258
1259 if(conn->handler->perform_getsock)
1260 return conn->handler->perform_getsock(data, conn, sock);
1261
1262 /* don't include HOLD and PAUSE connections */
1263 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1264
1265 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1266
1267 bitmap |= GETSOCK_READSOCK(sockindex);
1268 sock[sockindex] = conn->sockfd;
1269 }
1270
1271 /* don't include HOLD and PAUSE connections */
1272 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1273 if((conn->sockfd != conn->writesockfd) ||
1274 bitmap == GETSOCK_BLANK) {
1275 /* only if they are not the same socket and we have a readable
1276 one, we increase index */
1277 if(bitmap != GETSOCK_BLANK)
1278 sockindex++; /* increase index if we need two entries */
1279
1280 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1281
1282 sock[sockindex] = conn->writesockfd;
1283 }
1284
1285 bitmap |= GETSOCK_WRITESOCK(sockindex);
1286 }
1287
1288 return bitmap;
1289 }
1290
1291 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1292 which means this gets called once for each subsequent redirect etc */
Curl_init_CONNECT(struct Curl_easy * data)1293 void Curl_init_CONNECT(struct Curl_easy *data)
1294 {
1295 data->state.fread_func = data->set.fread_func_set;
1296 data->state.in = data->set.in_set;
1297 }
1298
1299 /*
1300 * Curl_pretransfer() is called immediately before a transfer starts, and only
1301 * once for one transfer no matter if it has redirects or do multi-pass
1302 * authentication etc.
1303 */
Curl_pretransfer(struct Curl_easy * data)1304 CURLcode Curl_pretransfer(struct Curl_easy *data)
1305 {
1306 CURLcode result;
1307
1308 if(!data->state.url && !data->set.uh) {
1309 /* we can't do anything without URL */
1310 failf(data, "No URL set");
1311 return CURLE_URL_MALFORMAT;
1312 }
1313
1314 /* since the URL may have been redirected in a previous use of this handle */
1315 if(data->state.url_alloc) {
1316 /* the already set URL is allocated, free it first! */
1317 Curl_safefree(data->state.url);
1318 data->state.url_alloc = FALSE;
1319 }
1320
1321 if(!data->state.url && data->set.uh) {
1322 CURLUcode uc;
1323 free(data->set.str[STRING_SET_URL]);
1324 uc = curl_url_get(data->set.uh,
1325 CURLUPART_URL, &data->set.str[STRING_SET_URL], 0);
1326 if(uc) {
1327 failf(data, "No URL set");
1328 return CURLE_URL_MALFORMAT;
1329 }
1330 }
1331
1332 data->state.prefer_ascii = data->set.prefer_ascii;
1333 data->state.list_only = data->set.list_only;
1334 data->state.httpreq = data->set.method;
1335 data->state.url = data->set.str[STRING_SET_URL];
1336
1337 /* Init the SSL session ID cache here. We do it here since we want to do it
1338 after the *_setopt() calls (that could specify the size of the cache) but
1339 before any transfer takes place. */
1340 result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1341 if(result)
1342 return result;
1343
1344 data->state.requests = 0;
1345 data->state.followlocation = 0; /* reset the location-follow counter */
1346 data->state.this_is_a_follow = FALSE; /* reset this */
1347 data->state.errorbuf = FALSE; /* no error has occurred */
1348 data->state.httpwant = data->set.httpwant;
1349 data->state.httpversion = 0;
1350 data->state.authproblem = FALSE;
1351 data->state.authhost.want = data->set.httpauth;
1352 data->state.authproxy.want = data->set.proxyauth;
1353 Curl_safefree(data->info.wouldredirect);
1354 Curl_data_priority_clear_state(data);
1355
1356 if(data->state.httpreq == HTTPREQ_PUT)
1357 data->state.infilesize = data->set.filesize;
1358 else if((data->state.httpreq != HTTPREQ_GET) &&
1359 (data->state.httpreq != HTTPREQ_HEAD)) {
1360 data->state.infilesize = data->set.postfieldsize;
1361 if(data->set.postfields && (data->state.infilesize == -1))
1362 data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1363 }
1364 else
1365 data->state.infilesize = 0;
1366
1367 /* If there is a list of cookie files to read, do it now! */
1368 Curl_cookie_loadfiles(data);
1369
1370 /* If there is a list of host pairs to deal with */
1371 if(data->state.resolve)
1372 result = Curl_loadhostpairs(data);
1373
1374 /* If there is a list of hsts files to read */
1375 Curl_hsts_loadfiles(data);
1376
1377 if(!result) {
1378 /* Allow data->set.use_port to set which port to use. This needs to be
1379 * disabled for example when we follow Location: headers to URLs using
1380 * different ports! */
1381 data->state.allow_port = TRUE;
1382
1383 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1384 /*************************************************************
1385 * Tell signal handler to ignore SIGPIPE
1386 *************************************************************/
1387 if(!data->set.no_signal)
1388 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1389 #endif
1390
1391 Curl_initinfo(data); /* reset session-specific information "variables" */
1392 Curl_pgrsResetTransferSizes(data);
1393 Curl_pgrsStartNow(data);
1394
1395 /* In case the handle is re-used and an authentication method was picked
1396 in the session we need to make sure we only use the one(s) we now
1397 consider to be fine */
1398 data->state.authhost.picked &= data->state.authhost.want;
1399 data->state.authproxy.picked &= data->state.authproxy.want;
1400
1401 #ifndef CURL_DISABLE_FTP
1402 data->state.wildcardmatch = data->set.wildcard_enabled;
1403 if(data->state.wildcardmatch) {
1404 struct WildcardData *wc;
1405 if(!data->wildcard) {
1406 data->wildcard = calloc(1, sizeof(struct WildcardData));
1407 if(!data->wildcard)
1408 return CURLE_OUT_OF_MEMORY;
1409 }
1410 wc = data->wildcard;
1411 if(wc->state < CURLWC_INIT) {
1412 result = Curl_wildcard_init(wc); /* init wildcard structures */
1413 if(result)
1414 return CURLE_OUT_OF_MEMORY;
1415 }
1416 }
1417 #endif
1418 result = Curl_hsts_loadcb(data, data->hsts);
1419 }
1420
1421 /*
1422 * Set user-agent. Used for HTTP, but since we can attempt to tunnel
1423 * basically anything through an HTTP proxy we can't limit this based on
1424 * protocol.
1425 */
1426 if(data->set.str[STRING_USERAGENT]) {
1427 Curl_safefree(data->state.aptr.uagent);
1428 data->state.aptr.uagent =
1429 aprintf("User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]);
1430 if(!data->state.aptr.uagent)
1431 return CURLE_OUT_OF_MEMORY;
1432 }
1433
1434 if(!result)
1435 result = Curl_setstropt(&data->state.aptr.user,
1436 data->set.str[STRING_USERNAME]);
1437 if(!result)
1438 result = Curl_setstropt(&data->state.aptr.passwd,
1439 data->set.str[STRING_PASSWORD]);
1440 if(!result)
1441 result = Curl_setstropt(&data->state.aptr.proxyuser,
1442 data->set.str[STRING_PROXYUSERNAME]);
1443 if(!result)
1444 result = Curl_setstropt(&data->state.aptr.proxypasswd,
1445 data->set.str[STRING_PROXYPASSWORD]);
1446
1447 data->req.headerbytecount = 0;
1448 Curl_headers_cleanup(data);
1449 return result;
1450 }
1451
1452 /*
1453 * Curl_posttransfer() is called immediately after a transfer ends
1454 */
Curl_posttransfer(struct Curl_easy * data)1455 CURLcode Curl_posttransfer(struct Curl_easy *data)
1456 {
1457 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1458 /* restore the signal handler for SIGPIPE before we get back */
1459 if(!data->set.no_signal)
1460 signal(SIGPIPE, data->state.prev_signal);
1461 #else
1462 (void)data; /* unused parameter */
1463 #endif
1464
1465 return CURLE_OK;
1466 }
1467
1468 /*
1469 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1470 * as given by the remote server and set up the new URL to request.
1471 *
1472 * This function DOES NOT FREE the given url.
1473 */
Curl_follow(struct Curl_easy * data,char * newurl,followtype type)1474 CURLcode Curl_follow(struct Curl_easy *data,
1475 char *newurl, /* the Location: string */
1476 followtype type) /* see transfer.h */
1477 {
1478 #ifdef CURL_DISABLE_HTTP
1479 (void)data;
1480 (void)newurl;
1481 (void)type;
1482 /* Location: following will not happen when HTTP is disabled */
1483 return CURLE_TOO_MANY_REDIRECTS;
1484 #else
1485
1486 /* Location: redirect */
1487 bool disallowport = FALSE;
1488 bool reachedmax = FALSE;
1489 CURLUcode uc;
1490
1491 DEBUGASSERT(type != FOLLOW_NONE);
1492
1493 if(type != FOLLOW_FAKE)
1494 data->state.requests++; /* count all real follows */
1495 if(type == FOLLOW_REDIR) {
1496 if((data->set.maxredirs != -1) &&
1497 (data->state.followlocation >= data->set.maxredirs)) {
1498 reachedmax = TRUE;
1499 type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1500 to URL */
1501 }
1502 else {
1503 data->state.followlocation++; /* count redirect-followings, including
1504 auth reloads */
1505
1506 if(data->set.http_auto_referer) {
1507 CURLU *u;
1508 char *referer = NULL;
1509
1510 /* We are asked to automatically set the previous URL as the referer
1511 when we get the next URL. We pick the ->url field, which may or may
1512 not be 100% correct */
1513
1514 if(data->state.referer_alloc) {
1515 Curl_safefree(data->state.referer);
1516 data->state.referer_alloc = FALSE;
1517 }
1518
1519 /* Make a copy of the URL without credentials and fragment */
1520 u = curl_url();
1521 if(!u)
1522 return CURLE_OUT_OF_MEMORY;
1523
1524 uc = curl_url_set(u, CURLUPART_URL, data->state.url, 0);
1525 if(!uc)
1526 uc = curl_url_set(u, CURLUPART_FRAGMENT, NULL, 0);
1527 if(!uc)
1528 uc = curl_url_set(u, CURLUPART_USER, NULL, 0);
1529 if(!uc)
1530 uc = curl_url_set(u, CURLUPART_PASSWORD, NULL, 0);
1531 if(!uc)
1532 uc = curl_url_get(u, CURLUPART_URL, &referer, 0);
1533
1534 curl_url_cleanup(u);
1535
1536 if(uc || !referer)
1537 return CURLE_OUT_OF_MEMORY;
1538
1539 data->state.referer = referer;
1540 data->state.referer_alloc = TRUE; /* yes, free this later */
1541 }
1542 }
1543 }
1544
1545 if((type != FOLLOW_RETRY) &&
1546 (data->req.httpcode != 401) && (data->req.httpcode != 407) &&
1547 Curl_is_absolute_url(newurl, NULL, 0, FALSE))
1548 /* If this is not redirect due to a 401 or 407 response and an absolute
1549 URL: don't allow a custom port number */
1550 disallowport = TRUE;
1551
1552 DEBUGASSERT(data->state.uh);
1553 uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
1554 (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
1555 ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) |
1556 CURLU_ALLOW_SPACE |
1557 (data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
1558 if(uc) {
1559 if(type != FOLLOW_FAKE) {
1560 failf(data, "The redirect target URL could not be parsed: %s",
1561 curl_url_strerror(uc));
1562 return Curl_uc_to_curlcode(uc);
1563 }
1564
1565 /* the URL could not be parsed for some reason, but since this is FAKE
1566 mode, just duplicate the field as-is */
1567 newurl = strdup(newurl);
1568 if(!newurl)
1569 return CURLE_OUT_OF_MEMORY;
1570 }
1571 else {
1572 uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
1573 if(uc)
1574 return Curl_uc_to_curlcode(uc);
1575
1576 /* Clear auth if this redirects to a different port number or protocol,
1577 unless permitted */
1578 if(!data->set.allow_auth_to_other_hosts && (type != FOLLOW_FAKE)) {
1579 char *portnum;
1580 int port;
1581 bool clear = FALSE;
1582
1583 if(data->set.use_port && data->state.allow_port)
1584 /* a custom port is used */
1585 port = (int)data->set.use_port;
1586 else {
1587 uc = curl_url_get(data->state.uh, CURLUPART_PORT, &portnum,
1588 CURLU_DEFAULT_PORT);
1589 if(uc) {
1590 free(newurl);
1591 return Curl_uc_to_curlcode(uc);
1592 }
1593 port = atoi(portnum);
1594 free(portnum);
1595 }
1596 if(port != data->info.conn_remote_port) {
1597 infof(data, "Clear auth, redirects to port from %u to %u",
1598 data->info.conn_remote_port, port);
1599 clear = TRUE;
1600 }
1601 else {
1602 char *scheme;
1603 const struct Curl_handler *p;
1604 uc = curl_url_get(data->state.uh, CURLUPART_SCHEME, &scheme, 0);
1605 if(uc) {
1606 free(newurl);
1607 return Curl_uc_to_curlcode(uc);
1608 }
1609
1610 p = Curl_builtin_scheme(scheme, CURL_ZERO_TERMINATED);
1611 if(p && (p->protocol != data->info.conn_protocol)) {
1612 infof(data, "Clear auth, redirects scheme from %s to %s",
1613 data->info.conn_scheme, scheme);
1614 clear = TRUE;
1615 }
1616 free(scheme);
1617 }
1618 if(clear) {
1619 Curl_safefree(data->state.aptr.user);
1620 Curl_safefree(data->state.aptr.passwd);
1621 }
1622 }
1623 }
1624
1625 if(type == FOLLOW_FAKE) {
1626 /* we're only figuring out the new url if we would've followed locations
1627 but now we're done so we can get out! */
1628 data->info.wouldredirect = newurl;
1629
1630 if(reachedmax) {
1631 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1632 return CURLE_TOO_MANY_REDIRECTS;
1633 }
1634 return CURLE_OK;
1635 }
1636
1637 if(disallowport)
1638 data->state.allow_port = FALSE;
1639
1640 if(data->state.url_alloc)
1641 Curl_safefree(data->state.url);
1642
1643 data->state.url = newurl;
1644 data->state.url_alloc = TRUE;
1645
1646 infof(data, "Issue another request to this URL: '%s'", data->state.url);
1647
1648 /*
1649 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1650 * differently based on exactly what return code there was.
1651 *
1652 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1653 * an HTTP (proxy-) authentication scheme other than Basic.
1654 */
1655 switch(data->info.httpcode) {
1656 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1657 Authorization: XXXX header in the HTTP request code snippet */
1658 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1659 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1660 /* 300 - Multiple Choices */
1661 /* 306 - Not used */
1662 /* 307 - Temporary Redirect */
1663 default: /* for all above (and the unknown ones) */
1664 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1665 * seem to be OK to POST to.
1666 */
1667 break;
1668 case 301: /* Moved Permanently */
1669 /* (quote from RFC7231, section 6.4.2)
1670 *
1671 * Note: For historical reasons, a user agent MAY change the request
1672 * method from POST to GET for the subsequent request. If this
1673 * behavior is undesired, the 307 (Temporary Redirect) status code
1674 * can be used instead.
1675 *
1676 * ----
1677 *
1678 * Many webservers expect this, so these servers often answers to a POST
1679 * request with an error page. To be sure that libcurl gets the page that
1680 * most user agents would get, libcurl has to force GET.
1681 *
1682 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1683 * can be overridden with CURLOPT_POSTREDIR.
1684 */
1685 if((data->state.httpreq == HTTPREQ_POST
1686 || data->state.httpreq == HTTPREQ_POST_FORM
1687 || data->state.httpreq == HTTPREQ_POST_MIME)
1688 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1689 infof(data, "Switch from POST to GET");
1690 data->state.httpreq = HTTPREQ_GET;
1691 }
1692 break;
1693 case 302: /* Found */
1694 /* (quote from RFC7231, section 6.4.3)
1695 *
1696 * Note: For historical reasons, a user agent MAY change the request
1697 * method from POST to GET for the subsequent request. If this
1698 * behavior is undesired, the 307 (Temporary Redirect) status code
1699 * can be used instead.
1700 *
1701 * ----
1702 *
1703 * Many webservers expect this, so these servers often answers to a POST
1704 * request with an error page. To be sure that libcurl gets the page that
1705 * most user agents would get, libcurl has to force GET.
1706 *
1707 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1708 * can be overridden with CURLOPT_POSTREDIR.
1709 */
1710 if((data->state.httpreq == HTTPREQ_POST
1711 || data->state.httpreq == HTTPREQ_POST_FORM
1712 || data->state.httpreq == HTTPREQ_POST_MIME)
1713 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1714 infof(data, "Switch from POST to GET");
1715 data->state.httpreq = HTTPREQ_GET;
1716 }
1717 break;
1718
1719 case 303: /* See Other */
1720 /* 'See Other' location is not the resource but a substitute for the
1721 * resource. In this case we switch the method to GET/HEAD, unless the
1722 * method is POST and the user specified to keep it as POST.
1723 * https://github.com/curl/curl/issues/5237#issuecomment-614641049
1724 */
1725 if(data->state.httpreq != HTTPREQ_GET &&
1726 ((data->state.httpreq != HTTPREQ_POST &&
1727 data->state.httpreq != HTTPREQ_POST_FORM &&
1728 data->state.httpreq != HTTPREQ_POST_MIME) ||
1729 !(data->set.keep_post & CURL_REDIR_POST_303))) {
1730 data->state.httpreq = HTTPREQ_GET;
1731 data->set.upload = false;
1732 infof(data, "Switch to %s",
1733 data->req.no_body?"HEAD":"GET");
1734 }
1735 break;
1736 case 304: /* Not Modified */
1737 /* 304 means we did a conditional request and it was "Not modified".
1738 * We shouldn't get any Location: header in this response!
1739 */
1740 break;
1741 case 305: /* Use Proxy */
1742 /* (quote from RFC2616, section 10.3.6):
1743 * "The requested resource MUST be accessed through the proxy given
1744 * by the Location field. The Location field gives the URI of the
1745 * proxy. The recipient is expected to repeat this single request
1746 * via the proxy. 305 responses MUST only be generated by origin
1747 * servers."
1748 */
1749 break;
1750 }
1751 Curl_pgrsTime(data, TIMER_REDIRECT);
1752 Curl_pgrsResetTransferSizes(data);
1753
1754 return CURLE_OK;
1755 #endif /* CURL_DISABLE_HTTP */
1756 }
1757
1758 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1759
1760 NOTE: that the *url is malloc()ed. */
Curl_retry_request(struct Curl_easy * data,char ** url)1761 CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
1762 {
1763 struct connectdata *conn = data->conn;
1764 bool retry = FALSE;
1765 *url = NULL;
1766
1767 /* if we're talking upload, we can't do the checks below, unless the protocol
1768 is HTTP as when uploading over HTTP we will still get a response */
1769 if(data->set.upload &&
1770 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1771 return CURLE_OK;
1772
1773 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1774 conn->bits.reuse &&
1775 (!data->req.no_body || (conn->handler->protocol & PROTO_FAMILY_HTTP))
1776 #ifndef CURL_DISABLE_RTSP
1777 && (data->set.rtspreq != RTSPREQ_RECEIVE)
1778 #endif
1779 )
1780 /* We got no data, we attempted to re-use a connection. For HTTP this
1781 can be a retry so we try again regardless if we expected a body.
1782 For other protocols we only try again only if we expected a body.
1783
1784 This might happen if the connection was left alive when we were
1785 done using it before, but that was closed when we wanted to read from
1786 it again. Bad luck. Retry the same request on a fresh connect! */
1787 retry = TRUE;
1788 else if(data->state.refused_stream &&
1789 (data->req.bytecount + data->req.headerbytecount == 0) ) {
1790 /* This was sent on a refused stream, safe to rerun. A refused stream
1791 error can typically only happen on HTTP/2 level if the stream is safe
1792 to issue again, but the nghttp2 API can deliver the message to other
1793 streams as well, which is why this adds the check the data counters
1794 too. */
1795 infof(data, "REFUSED_STREAM, retrying a fresh connect");
1796 data->state.refused_stream = FALSE; /* clear again */
1797 retry = TRUE;
1798 }
1799 if(retry) {
1800 #define CONN_MAX_RETRIES 5
1801 if(data->state.retrycount++ >= CONN_MAX_RETRIES) {
1802 failf(data, "Connection died, tried %d times before giving up",
1803 CONN_MAX_RETRIES);
1804 data->state.retrycount = 0;
1805 return CURLE_SEND_ERROR;
1806 }
1807 infof(data, "Connection died, retrying a fresh connect (retry count: %d)",
1808 data->state.retrycount);
1809 *url = strdup(data->state.url);
1810 if(!*url)
1811 return CURLE_OUT_OF_MEMORY;
1812
1813 connclose(conn, "retry"); /* close this connection */
1814 conn->bits.retry = TRUE; /* mark this as a connection we're about
1815 to retry. Marking it this way should
1816 prevent i.e HTTP transfers to return
1817 error just because nothing has been
1818 transferred! */
1819
1820
1821 if((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1822 data->req.writebytecount) {
1823 data->state.rewindbeforesend = TRUE;
1824 infof(data, "state.rewindbeforesend = TRUE");
1825 }
1826 }
1827 return CURLE_OK;
1828 }
1829
1830 /*
1831 * Curl_setup_transfer() is called to setup some basic properties for the
1832 * upcoming transfer.
1833 */
1834 void
Curl_setup_transfer(struct Curl_easy * data,int sockindex,curl_off_t size,bool getheader,int writesockindex)1835 Curl_setup_transfer(
1836 struct Curl_easy *data, /* transfer */
1837 int sockindex, /* socket index to read from or -1 */
1838 curl_off_t size, /* -1 if unknown at this point */
1839 bool getheader, /* TRUE if header parsing is wanted */
1840 int writesockindex /* socket index to write to, it may very well be
1841 the same we read from. -1 disables */
1842 )
1843 {
1844 struct SingleRequest *k = &data->req;
1845 struct connectdata *conn = data->conn;
1846 struct HTTP *http = data->req.p.http;
1847 bool httpsending;
1848
1849 DEBUGASSERT(conn != NULL);
1850 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1851
1852 httpsending = ((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1853 (http->sending == HTTPSEND_REQUEST));
1854
1855 if(conn->bits.multiplex || conn->httpversion >= 20 || httpsending) {
1856 /* when multiplexing, the read/write sockets need to be the same! */
1857 conn->sockfd = sockindex == -1 ?
1858 ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1859 conn->sock[sockindex];
1860 conn->writesockfd = conn->sockfd;
1861 if(httpsending)
1862 /* special and very HTTP-specific */
1863 writesockindex = FIRSTSOCKET;
1864 }
1865 else {
1866 conn->sockfd = sockindex == -1 ?
1867 CURL_SOCKET_BAD : conn->sock[sockindex];
1868 conn->writesockfd = writesockindex == -1 ?
1869 CURL_SOCKET_BAD:conn->sock[writesockindex];
1870 }
1871 k->getheader = getheader;
1872
1873 k->size = size;
1874
1875 /* The code sequence below is placed in this function just because all
1876 necessary input is not always known in do_complete() as this function may
1877 be called after that */
1878
1879 if(!k->getheader) {
1880 k->header = FALSE;
1881 if(size > 0)
1882 Curl_pgrsSetDownloadSize(data, size);
1883 }
1884 /* we want header and/or body, if neither then don't do this! */
1885 if(k->getheader || !data->req.no_body) {
1886
1887 if(sockindex != -1)
1888 k->keepon |= KEEP_RECV;
1889
1890 if(writesockindex != -1) {
1891 /* HTTP 1.1 magic:
1892
1893 Even if we require a 100-return code before uploading data, we might
1894 need to write data before that since the REQUEST may not have been
1895 finished sent off just yet.
1896
1897 Thus, we must check if the request has been sent before we set the
1898 state info where we wait for the 100-return code
1899 */
1900 if((data->state.expect100header) &&
1901 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1902 (http->sending == HTTPSEND_BODY)) {
1903 /* wait with write until we either got 100-continue or a timeout */
1904 k->exp100 = EXP100_AWAITING_CONTINUE;
1905 k->start100 = Curl_now();
1906
1907 /* Set a timeout for the multi interface. Add the inaccuracy margin so
1908 that we don't fire slightly too early and get denied to run. */
1909 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1910 }
1911 else {
1912 if(data->state.expect100header)
1913 /* when we've sent off the rest of the headers, we must await a
1914 100-continue but first finish sending the request */
1915 k->exp100 = EXP100_SENDING_REQUEST;
1916
1917 /* enable the write bit when we're not waiting for continue */
1918 k->keepon |= KEEP_SEND;
1919 }
1920 } /* if(writesockindex != -1) */
1921 } /* if(k->getheader || !data->req.no_body) */
1922
1923 }
1924