1 /***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.haxx.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 ***************************************************************************/
22
23 #include "curl_setup.h"
24 #include "strtoofft.h"
25
26 #ifdef HAVE_NETINET_IN_H
27 #include <netinet/in.h>
28 #endif
29 #ifdef HAVE_NETDB_H
30 #include <netdb.h>
31 #endif
32 #ifdef HAVE_ARPA_INET_H
33 #include <arpa/inet.h>
34 #endif
35 #ifdef HAVE_NET_IF_H
36 #include <net/if.h>
37 #endif
38 #ifdef HAVE_SYS_IOCTL_H
39 #include <sys/ioctl.h>
40 #endif
41 #ifdef HAVE_SIGNAL_H
42 #include <signal.h>
43 #endif
44
45 #ifdef HAVE_SYS_PARAM_H
46 #include <sys/param.h>
47 #endif
48
49 #ifdef HAVE_SYS_SELECT_H
50 #include <sys/select.h>
51 #elif defined(HAVE_UNISTD_H)
52 #include <unistd.h>
53 #endif
54
55 #ifndef HAVE_SOCKET
56 #error "We can't compile without socket() support!"
57 #endif
58
59 #include "urldata.h"
60 #include <curl/curl.h>
61 #include "netrc.h"
62
63 #include "content_encoding.h"
64 #include "hostip.h"
65 #include "transfer.h"
66 #include "sendf.h"
67 #include "speedcheck.h"
68 #include "progress.h"
69 #include "http.h"
70 #include "url.h"
71 #include "getinfo.h"
72 #include "vtls/vtls.h"
73 #include "select.h"
74 #include "multiif.h"
75 #include "connect.h"
76 #include "non-ascii.h"
77 #include "http2.h"
78 #include "mime.h"
79 #include "strcase.h"
80 #include "urlapi-int.h"
81
82 /* The last 3 #include files should be in this order */
83 #include "curl_printf.h"
84 #include "curl_memory.h"
85 #include "memdebug.h"
86
87 #if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
88 !defined(CURL_DISABLE_IMAP)
89 /*
90 * checkheaders() checks the linked list of custom headers for a
91 * particular header (prefix). Provide the prefix without colon!
92 *
93 * Returns a pointer to the first matching header or NULL if none matched.
94 */
Curl_checkheaders(const struct connectdata * conn,const char * thisheader)95 char *Curl_checkheaders(const struct connectdata *conn,
96 const char *thisheader)
97 {
98 struct curl_slist *head;
99 size_t thislen = strlen(thisheader);
100 struct Curl_easy *data = conn->data;
101
102 for(head = data->set.headers; head; head = head->next) {
103 if(strncasecompare(head->data, thisheader, thislen) &&
104 Curl_headersep(head->data[thislen]) )
105 return head->data;
106 }
107
108 return NULL;
109 }
110 #endif
111
Curl_get_upload_buffer(struct Curl_easy * data)112 CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
113 {
114 if(!data->state.ulbuf) {
115 data->state.ulbuf = malloc(data->set.upload_buffer_size);
116 if(!data->state.ulbuf)
117 return CURLE_OUT_OF_MEMORY;
118 }
119 return CURLE_OK;
120 }
121
122 #ifndef CURL_DISABLE_HTTP
123 /*
124 * This function will be called to loop through the trailers buffer
125 * until no more data is available for sending.
126 */
Curl_trailers_read(char * buffer,size_t size,size_t nitems,void * raw)127 static size_t Curl_trailers_read(char *buffer, size_t size, size_t nitems,
128 void *raw)
129 {
130 struct Curl_easy *data = (struct Curl_easy *)raw;
131 struct dynbuf *trailers_buf = &data->state.trailers_buf;
132 size_t bytes_left = Curl_dyn_len(trailers_buf) -
133 data->state.trailers_bytes_sent;
134 size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
135 if(to_copy) {
136 memcpy(buffer,
137 Curl_dyn_ptr(trailers_buf) + data->state.trailers_bytes_sent,
138 to_copy);
139 data->state.trailers_bytes_sent += to_copy;
140 }
141 return to_copy;
142 }
143
Curl_trailers_left(void * raw)144 static size_t Curl_trailers_left(void *raw)
145 {
146 struct Curl_easy *data = (struct Curl_easy *)raw;
147 struct dynbuf *trailers_buf = &data->state.trailers_buf;
148 return Curl_dyn_len(trailers_buf) - data->state.trailers_bytes_sent;
149 }
150 #endif
151
152 /*
153 * This function will call the read callback to fill our buffer with data
154 * to upload.
155 */
Curl_fillreadbuffer(struct connectdata * conn,size_t bytes,size_t * nreadp)156 CURLcode Curl_fillreadbuffer(struct connectdata *conn, size_t bytes,
157 size_t *nreadp)
158 {
159 struct Curl_easy *data = conn->data;
160 size_t buffersize = bytes;
161 size_t nread;
162
163 curl_read_callback readfunc = NULL;
164 void *extra_data = NULL;
165
166 #ifdef CURL_DOES_CONVERSIONS
167 bool sending_http_headers = FALSE;
168
169 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
170 const struct HTTP *http = data->req.protop;
171
172 if(http->sending == HTTPSEND_REQUEST)
173 /* We're sending the HTTP request headers, not the data.
174 Remember that so we don't re-translate them into garbage. */
175 sending_http_headers = TRUE;
176 }
177 #endif
178
179 #ifndef CURL_DISABLE_HTTP
180 if(data->state.trailers_state == TRAILERS_INITIALIZED) {
181 struct curl_slist *trailers = NULL;
182 CURLcode result;
183 int trailers_ret_code;
184
185 /* at this point we already verified that the callback exists
186 so we compile and store the trailers buffer, then proceed */
187 infof(data,
188 "Moving trailers state machine from initialized to sending.\n");
189 data->state.trailers_state = TRAILERS_SENDING;
190 Curl_dyn_init(&data->state.trailers_buf, DYN_TRAILERS);
191
192 data->state.trailers_bytes_sent = 0;
193 Curl_set_in_callback(data, true);
194 trailers_ret_code = data->set.trailer_callback(&trailers,
195 data->set.trailer_data);
196 Curl_set_in_callback(data, false);
197 if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
198 result = Curl_http_compile_trailers(trailers, &data->state.trailers_buf,
199 data);
200 }
201 else {
202 failf(data, "operation aborted by trailing headers callback");
203 *nreadp = 0;
204 result = CURLE_ABORTED_BY_CALLBACK;
205 }
206 if(result) {
207 Curl_dyn_free(&data->state.trailers_buf);
208 curl_slist_free_all(trailers);
209 return result;
210 }
211 infof(data, "Successfully compiled trailers.\r\n");
212 curl_slist_free_all(trailers);
213 }
214 #endif
215
216 /* if we are transmitting trailing data, we don't need to write
217 a chunk size so we skip this */
218 if(data->req.upload_chunky &&
219 data->state.trailers_state == TRAILERS_NONE) {
220 /* if chunked Transfer-Encoding */
221 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
222 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
223 }
224
225 #ifndef CURL_DISABLE_HTTP
226 if(data->state.trailers_state == TRAILERS_SENDING) {
227 /* if we're here then that means that we already sent the last empty chunk
228 but we didn't send a final CR LF, so we sent 0 CR LF. We then start
229 pulling trailing data until we have no more at which point we
230 simply return to the previous point in the state machine as if
231 nothing happened.
232 */
233 readfunc = Curl_trailers_read;
234 extra_data = (void *)data;
235 }
236 else
237 #endif
238 {
239 readfunc = data->state.fread_func;
240 extra_data = data->state.in;
241 }
242
243 Curl_set_in_callback(data, true);
244 nread = readfunc(data->req.upload_fromhere, 1,
245 buffersize, extra_data);
246 Curl_set_in_callback(data, false);
247
248 if(nread == CURL_READFUNC_ABORT) {
249 failf(data, "operation aborted by callback");
250 *nreadp = 0;
251 return CURLE_ABORTED_BY_CALLBACK;
252 }
253 if(nread == CURL_READFUNC_PAUSE) {
254 struct SingleRequest *k = &data->req;
255
256 if(conn->handler->flags & PROTOPT_NONETWORK) {
257 /* protocols that work without network cannot be paused. This is
258 actually only FILE:// just now, and it can't pause since the transfer
259 isn't done using the "normal" procedure. */
260 failf(data, "Read callback asked for PAUSE when not supported!");
261 return CURLE_READ_ERROR;
262 }
263
264 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
265 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
266 if(data->req.upload_chunky) {
267 /* Back out the preallocation done above */
268 data->req.upload_fromhere -= (8 + 2);
269 }
270 *nreadp = 0;
271
272 return CURLE_OK; /* nothing was read */
273 }
274 else if(nread > buffersize) {
275 /* the read function returned a too large value */
276 *nreadp = 0;
277 failf(data, "read function returned funny value");
278 return CURLE_READ_ERROR;
279 }
280
281 if(!data->req.forbidchunk && data->req.upload_chunky) {
282 /* if chunked Transfer-Encoding
283 * build chunk:
284 *
285 * <HEX SIZE> CRLF
286 * <DATA> CRLF
287 */
288 /* On non-ASCII platforms the <DATA> may or may not be
289 translated based on set.prefer_ascii while the protocol
290 portion must always be translated to the network encoding.
291 To further complicate matters, line end conversion might be
292 done later on, so we need to prevent CRLFs from becoming
293 CRCRLFs if that's the case. To do this we use bare LFs
294 here, knowing they'll become CRLFs later on.
295 */
296
297 bool added_crlf = FALSE;
298 int hexlen = 0;
299 const char *endofline_native;
300 const char *endofline_network;
301
302 if(
303 #ifdef CURL_DO_LINEEND_CONV
304 (data->set.prefer_ascii) ||
305 #endif
306 (data->set.crlf)) {
307 /* \n will become \r\n later on */
308 endofline_native = "\n";
309 endofline_network = "\x0a";
310 }
311 else {
312 endofline_native = "\r\n";
313 endofline_network = "\x0d\x0a";
314 }
315
316 /* if we're not handling trailing data, proceed as usual */
317 if(data->state.trailers_state != TRAILERS_SENDING) {
318 char hexbuffer[11] = "";
319 hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
320 "%zx%s", nread, endofline_native);
321
322 /* move buffer pointer */
323 data->req.upload_fromhere -= hexlen;
324 nread += hexlen;
325
326 /* copy the prefix to the buffer, leaving out the NUL */
327 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
328
329 /* always append ASCII CRLF to the data unless
330 we have a valid trailer callback */
331 #ifndef CURL_DISABLE_HTTP
332 if((nread-hexlen) == 0 &&
333 data->set.trailer_callback != NULL &&
334 data->state.trailers_state == TRAILERS_NONE) {
335 data->state.trailers_state = TRAILERS_INITIALIZED;
336 }
337 else
338 #endif
339 {
340 memcpy(data->req.upload_fromhere + nread,
341 endofline_network,
342 strlen(endofline_network));
343 added_crlf = TRUE;
344 }
345 }
346
347 #ifdef CURL_DOES_CONVERSIONS
348 {
349 CURLcode result;
350 size_t length;
351 if(data->set.prefer_ascii)
352 /* translate the protocol and data */
353 length = nread;
354 else
355 /* just translate the protocol portion */
356 length = hexlen;
357 if(length) {
358 result = Curl_convert_to_network(data, data->req.upload_fromhere,
359 length);
360 /* Curl_convert_to_network calls failf if unsuccessful */
361 if(result)
362 return result;
363 }
364 }
365 #endif /* CURL_DOES_CONVERSIONS */
366
367 #ifndef CURL_DISABLE_HTTP
368 if(data->state.trailers_state == TRAILERS_SENDING &&
369 !Curl_trailers_left(data)) {
370 Curl_dyn_free(&data->state.trailers_buf);
371 data->state.trailers_state = TRAILERS_DONE;
372 data->set.trailer_data = NULL;
373 data->set.trailer_callback = NULL;
374 /* mark the transfer as done */
375 data->req.upload_done = TRUE;
376 infof(data, "Signaling end of chunked upload after trailers.\n");
377 }
378 else
379 #endif
380 if((nread - hexlen) == 0 &&
381 data->state.trailers_state != TRAILERS_INITIALIZED) {
382 /* mark this as done once this chunk is transferred */
383 data->req.upload_done = TRUE;
384 infof(data,
385 "Signaling end of chunked upload via terminating chunk.\n");
386 }
387
388 if(added_crlf)
389 nread += strlen(endofline_network); /* for the added end of line */
390 }
391 #ifdef CURL_DOES_CONVERSIONS
392 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
393 CURLcode result;
394 result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
395 /* Curl_convert_to_network calls failf if unsuccessful */
396 if(result)
397 return result;
398 }
399 #endif /* CURL_DOES_CONVERSIONS */
400
401 *nreadp = nread;
402
403 return CURLE_OK;
404 }
405
406
407 /*
408 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
409 * POST/PUT with multi-pass authentication when a sending was denied and a
410 * resend is necessary.
411 */
Curl_readrewind(struct connectdata * conn)412 CURLcode Curl_readrewind(struct connectdata *conn)
413 {
414 struct Curl_easy *data = conn->data;
415 curl_mimepart *mimepart = &data->set.mimepost;
416
417 conn->bits.rewindaftersend = FALSE; /* we rewind now */
418
419 /* explicitly switch off sending data on this connection now since we are
420 about to restart a new transfer and thus we want to avoid inadvertently
421 sending more data on the existing connection until the next transfer
422 starts */
423 data->req.keepon &= ~KEEP_SEND;
424
425 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
426 CURLOPT_HTTPPOST, call app to rewind
427 */
428 if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
429 struct HTTP *http = data->req.protop;
430
431 if(http->sendit)
432 mimepart = http->sendit;
433 }
434 if(data->set.postfields)
435 ; /* do nothing */
436 else if(data->state.httpreq == HTTPREQ_POST_MIME ||
437 data->state.httpreq == HTTPREQ_POST_FORM) {
438 if(Curl_mime_rewind(mimepart)) {
439 failf(data, "Cannot rewind mime/post data");
440 return CURLE_SEND_FAIL_REWIND;
441 }
442 }
443 else {
444 if(data->set.seek_func) {
445 int err;
446
447 Curl_set_in_callback(data, true);
448 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
449 Curl_set_in_callback(data, false);
450 if(err) {
451 failf(data, "seek callback returned error %d", (int)err);
452 return CURLE_SEND_FAIL_REWIND;
453 }
454 }
455 else if(data->set.ioctl_func) {
456 curlioerr err;
457
458 Curl_set_in_callback(data, true);
459 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
460 data->set.ioctl_client);
461 Curl_set_in_callback(data, false);
462 infof(data, "the ioctl callback returned %d\n", (int)err);
463
464 if(err) {
465 failf(data, "ioctl callback returned error %d", (int)err);
466 return CURLE_SEND_FAIL_REWIND;
467 }
468 }
469 else {
470 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
471 given FILE * stream and we can actually attempt to rewind that
472 ourselves with fseek() */
473 if(data->state.fread_func == (curl_read_callback)fread) {
474 if(-1 != fseek(data->state.in, 0, SEEK_SET))
475 /* successful rewind */
476 return CURLE_OK;
477 }
478
479 /* no callback set or failure above, makes us fail at once */
480 failf(data, "necessary data rewind wasn't possible");
481 return CURLE_SEND_FAIL_REWIND;
482 }
483 }
484 return CURLE_OK;
485 }
486
data_pending(const struct Curl_easy * data)487 static int data_pending(const struct Curl_easy *data)
488 {
489 struct connectdata *conn = data->conn;
490
491 #ifdef ENABLE_QUIC
492 if(conn->transport == TRNSPRT_QUIC)
493 return Curl_quic_data_pending(data);
494 #endif
495
496 /* in the case of libssh2, we can never be really sure that we have emptied
497 its internal buffers so we MUST always try until we get EAGAIN back */
498 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
499 #if defined(USE_NGHTTP2)
500 Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
501 /* For HTTP/2, we may read up everything including response body
502 with header fields in Curl_http_readwrite_headers. If no
503 content-length is provided, curl waits for the connection
504 close, which we emulate it using conn->proto.httpc.closed =
505 TRUE. The thing is if we read everything, then http2_recv won't
506 be called and we cannot signal the HTTP/2 stream has closed. As
507 a workaround, we return nonzero here to call http2_recv. */
508 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion >= 20);
509 #else
510 Curl_ssl_data_pending(conn, FIRSTSOCKET);
511 #endif
512 }
513
514 /*
515 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
516 * remote document with the time provided by CURLOPT_TIMEVAL
517 */
Curl_meets_timecondition(struct Curl_easy * data,time_t timeofdoc)518 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
519 {
520 if((timeofdoc == 0) || (data->set.timevalue == 0))
521 return TRUE;
522
523 switch(data->set.timecondition) {
524 case CURL_TIMECOND_IFMODSINCE:
525 default:
526 if(timeofdoc <= data->set.timevalue) {
527 infof(data,
528 "The requested document is not new enough\n");
529 data->info.timecond = TRUE;
530 return FALSE;
531 }
532 break;
533 case CURL_TIMECOND_IFUNMODSINCE:
534 if(timeofdoc >= data->set.timevalue) {
535 infof(data,
536 "The requested document is not old enough\n");
537 data->info.timecond = TRUE;
538 return FALSE;
539 }
540 break;
541 }
542
543 return TRUE;
544 }
545
546 /*
547 * Go ahead and do a read if we have a readable socket or if
548 * the stream was rewound (in which case we have data in a
549 * buffer)
550 *
551 * return '*comeback' TRUE if we didn't properly drain the socket so this
552 * function should get called again without select() or similar in between!
553 */
readwrite_data(struct Curl_easy * data,struct connectdata * conn,struct SingleRequest * k,int * didwhat,bool * done,bool * comeback)554 static CURLcode readwrite_data(struct Curl_easy *data,
555 struct connectdata *conn,
556 struct SingleRequest *k,
557 int *didwhat, bool *done,
558 bool *comeback)
559 {
560 CURLcode result = CURLE_OK;
561 ssize_t nread; /* number of bytes read */
562 size_t excess = 0; /* excess bytes read */
563 bool readmore = FALSE; /* used by RTP to signal for more data */
564 int maxloops = 100;
565 char *buf = data->state.buffer;
566 DEBUGASSERT(buf);
567
568 *done = FALSE;
569 *comeback = FALSE;
570
571 /* This is where we loop until we have read everything there is to
572 read or we get a CURLE_AGAIN */
573 do {
574 bool is_empty_data = FALSE;
575 size_t buffersize = data->set.buffer_size;
576 size_t bytestoread = buffersize;
577 #ifdef USE_NGHTTP2
578 bool is_http2 = ((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
579 (conn->httpversion == 20));
580 #endif
581
582 if(
583 #ifdef USE_NGHTTP2
584 /* For HTTP/2, read data without caring about the content
585 length. This is safe because body in HTTP/2 is always
586 segmented thanks to its framing layer. Meanwhile, we have to
587 call Curl_read to ensure that http2_handle_stream_close is
588 called when we read all incoming bytes for a particular
589 stream. */
590 !is_http2 &&
591 #endif
592 k->size != -1 && !k->header) {
593 /* make sure we don't read too much */
594 curl_off_t totalleft = k->size - k->bytecount;
595 if(totalleft < (curl_off_t)bytestoread)
596 bytestoread = (size_t)totalleft;
597 }
598
599 if(bytestoread) {
600 /* receive data from the network! */
601 result = Curl_read(conn, conn->sockfd, buf, bytestoread, &nread);
602
603 /* read would've blocked */
604 if(CURLE_AGAIN == result)
605 break; /* get out of loop */
606
607 if(result>0)
608 return result;
609 }
610 else {
611 /* read nothing but since we wanted nothing we consider this an OK
612 situation to proceed from */
613 DEBUGF(infof(data, "readwrite_data: we're done!\n"));
614 nread = 0;
615 }
616
617 if(!k->bytecount) {
618 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
619 if(k->exp100 > EXP100_SEND_DATA)
620 /* set time stamp to compare with when waiting for the 100 */
621 k->start100 = Curl_now();
622 }
623
624 *didwhat |= KEEP_RECV;
625 /* indicates data of zero size, i.e. empty file */
626 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
627
628 if(0 < nread || is_empty_data) {
629 buf[nread] = 0;
630 }
631 else {
632 /* if we receive 0 or less here, either the http2 stream is closed or the
633 server closed the connection and we bail out from this! */
634 #ifdef USE_NGHTTP2
635 if(is_http2 && !nread)
636 DEBUGF(infof(data, "nread == 0, stream closed, bailing\n"));
637 else
638 #endif
639 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
640 k->keepon &= ~KEEP_RECV;
641 break;
642 }
643
644 /* Default buffer to use when we write the buffer, it may be changed
645 in the flow below before the actual storing is done. */
646 k->str = buf;
647
648 if(conn->handler->readwrite) {
649 result = conn->handler->readwrite(data, conn, &nread, &readmore);
650 if(result)
651 return result;
652 if(readmore)
653 break;
654 }
655
656 #ifndef CURL_DISABLE_HTTP
657 /* Since this is a two-state thing, we check if we are parsing
658 headers at the moment or not. */
659 if(k->header) {
660 /* we are in parse-the-header-mode */
661 bool stop_reading = FALSE;
662 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
663 if(result)
664 return result;
665
666 if(conn->handler->readwrite &&
667 (k->maxdownload <= 0 && nread > 0)) {
668 result = conn->handler->readwrite(data, conn, &nread, &readmore);
669 if(result)
670 return result;
671 if(readmore)
672 break;
673 }
674
675 if(stop_reading) {
676 /* We've stopped dealing with input, get out of the do-while loop */
677
678 if(nread > 0) {
679 infof(data,
680 "Excess found:"
681 " excess = %zd"
682 " url = %s (zero-length body)\n",
683 nread, data->state.up.path);
684 }
685
686 break;
687 }
688 }
689 #endif /* CURL_DISABLE_HTTP */
690
691
692 /* This is not an 'else if' since it may be a rest from the header
693 parsing, where the beginning of the buffer is headers and the end
694 is non-headers. */
695 if(!k->header && (nread > 0 || is_empty_data)) {
696
697 if(data->set.opt_no_body) {
698 /* data arrives although we want none, bail out */
699 streamclose(conn, "ignoring body");
700 *done = TRUE;
701 return CURLE_WEIRD_SERVER_REPLY;
702 }
703
704 #ifndef CURL_DISABLE_HTTP
705 if(0 == k->bodywrites && !is_empty_data) {
706 /* These checks are only made the first time we are about to
707 write a piece of the body */
708 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
709 /* HTTP-only checks */
710
711 if(data->req.newurl) {
712 if(conn->bits.close) {
713 /* Abort after the headers if "follow Location" is set
714 and we're set to close anyway. */
715 k->keepon &= ~KEEP_RECV;
716 *done = TRUE;
717 return CURLE_OK;
718 }
719 /* We have a new url to load, but since we want to be able
720 to re-use this connection properly, we read the full
721 response in "ignore more" */
722 k->ignorebody = TRUE;
723 infof(data, "Ignoring the response-body\n");
724 }
725 if(data->state.resume_from && !k->content_range &&
726 (data->state.httpreq == HTTPREQ_GET) &&
727 !k->ignorebody) {
728
729 if(k->size == data->state.resume_from) {
730 /* The resume point is at the end of file, consider this fine
731 even if it doesn't allow resume from here. */
732 infof(data, "The entire document is already downloaded");
733 connclose(conn, "already downloaded");
734 /* Abort download */
735 k->keepon &= ~KEEP_RECV;
736 *done = TRUE;
737 return CURLE_OK;
738 }
739
740 /* we wanted to resume a download, although the server doesn't
741 * seem to support this and we did this with a GET (if it
742 * wasn't a GET we did a POST or PUT resume) */
743 failf(data, "HTTP server doesn't seem to support "
744 "byte ranges. Cannot resume.");
745 return CURLE_RANGE_ERROR;
746 }
747
748 if(data->set.timecondition && !data->state.range) {
749 /* A time condition has been set AND no ranges have been
750 requested. This seems to be what chapter 13.3.4 of
751 RFC 2616 defines to be the correct action for a
752 HTTP/1.1 client */
753
754 if(!Curl_meets_timecondition(data, k->timeofdoc)) {
755 *done = TRUE;
756 /* We're simulating a http 304 from server so we return
757 what should have been returned from the server */
758 data->info.httpcode = 304;
759 infof(data, "Simulate a HTTP 304 response!\n");
760 /* we abort the transfer before it is completed == we ruin the
761 re-use ability. Close the connection */
762 connclose(conn, "Simulated 304 handling");
763 return CURLE_OK;
764 }
765 } /* we have a time condition */
766
767 } /* this is HTTP or RTSP */
768 } /* this is the first time we write a body part */
769 #endif /* CURL_DISABLE_HTTP */
770
771 k->bodywrites++;
772
773 /* pass data to the debug function before it gets "dechunked" */
774 if(data->set.verbose) {
775 if(k->badheader) {
776 Curl_debug(data, CURLINFO_DATA_IN,
777 Curl_dyn_ptr(&data->state.headerb),
778 Curl_dyn_len(&data->state.headerb));
779 if(k->badheader == HEADER_PARTHEADER)
780 Curl_debug(data, CURLINFO_DATA_IN,
781 k->str, (size_t)nread);
782 }
783 else
784 Curl_debug(data, CURLINFO_DATA_IN,
785 k->str, (size_t)nread);
786 }
787
788 #ifndef CURL_DISABLE_HTTP
789 if(k->chunk) {
790 /*
791 * Here comes a chunked transfer flying and we need to decode this
792 * properly. While the name says read, this function both reads
793 * and writes away the data. The returned 'nread' holds the number
794 * of actual data it wrote to the client.
795 */
796 CURLcode extra;
797 CHUNKcode res =
798 Curl_httpchunk_read(conn, k->str, nread, &nread, &extra);
799
800 if(CHUNKE_OK < res) {
801 if(CHUNKE_PASSTHRU_ERROR == res) {
802 failf(data, "Failed reading the chunked-encoded stream");
803 return extra;
804 }
805 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
806 return CURLE_RECV_ERROR;
807 }
808 if(CHUNKE_STOP == res) {
809 size_t dataleft;
810 /* we're done reading chunks! */
811 k->keepon &= ~KEEP_RECV; /* read no more */
812
813 /* There are now possibly N number of bytes at the end of the
814 str buffer that weren't written to the client.
815 Push it back to be read on the next pass. */
816
817 dataleft = conn->chunk.dataleft;
818 if(dataleft != 0) {
819 infof(conn->data, "Leftovers after chunking: %zu bytes\n",
820 dataleft);
821 }
822 }
823 /* If it returned OK, we just keep going */
824 }
825 #endif /* CURL_DISABLE_HTTP */
826
827 /* Account for body content stored in the header buffer */
828 if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
829 size_t headlen = Curl_dyn_len(&data->state.headerb);
830 DEBUGF(infof(data, "Increasing bytecount by %zu\n", headlen));
831 k->bytecount += headlen;
832 }
833
834 if((-1 != k->maxdownload) &&
835 (k->bytecount + nread >= k->maxdownload)) {
836
837 excess = (size_t)(k->bytecount + nread - k->maxdownload);
838 if(excess > 0 && !k->ignorebody) {
839 infof(data,
840 "Excess found in a read:"
841 " excess = %zu"
842 ", size = %" CURL_FORMAT_CURL_OFF_T
843 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
844 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
845 excess, k->size, k->maxdownload, k->bytecount);
846 connclose(conn, "excess found in a read");
847 }
848
849 nread = (ssize_t) (k->maxdownload - k->bytecount);
850 if(nread < 0) /* this should be unusual */
851 nread = 0;
852
853 k->keepon &= ~KEEP_RECV; /* we're done reading */
854 }
855
856 k->bytecount += nread;
857
858 Curl_pgrsSetDownloadCounter(data, k->bytecount);
859
860 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
861 /* If this is chunky transfer, it was already written */
862
863 if(k->badheader && !k->ignorebody) {
864 /* we parsed a piece of data wrongly assuming it was a header
865 and now we output it as body instead */
866 size_t headlen = Curl_dyn_len(&data->state.headerb);
867
868 /* Don't let excess data pollute body writes */
869 if(k->maxdownload == -1 || (curl_off_t)headlen <= k->maxdownload)
870 result = Curl_client_write(conn, CLIENTWRITE_BODY,
871 Curl_dyn_ptr(&data->state.headerb),
872 headlen);
873 else
874 result = Curl_client_write(conn, CLIENTWRITE_BODY,
875 Curl_dyn_ptr(&data->state.headerb),
876 (size_t)k->maxdownload);
877
878 if(result)
879 return result;
880 }
881 if(k->badheader < HEADER_ALLBAD) {
882 /* This switch handles various content encodings. If there's an
883 error here, be sure to check over the almost identical code
884 in http_chunks.c.
885 Make sure that ALL_CONTENT_ENCODINGS contains all the
886 encodings handled here. */
887 if(conn->data->set.http_ce_skip || !k->writer_stack) {
888 if(!k->ignorebody) {
889 #ifndef CURL_DISABLE_POP3
890 if(conn->handler->protocol & PROTO_FAMILY_POP3)
891 result = Curl_pop3_write(conn, k->str, nread);
892 else
893 #endif /* CURL_DISABLE_POP3 */
894 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
895 nread);
896 }
897 }
898 else if(!k->ignorebody)
899 result = Curl_unencode_write(conn, k->writer_stack, k->str, nread);
900 }
901 k->badheader = HEADER_NORMAL; /* taken care of now */
902
903 if(result)
904 return result;
905 }
906
907 } /* if(!header and data to read) */
908
909 if(conn->handler->readwrite && excess) {
910 /* Parse the excess data */
911 k->str += nread;
912
913 if(&k->str[excess] > &buf[data->set.buffer_size]) {
914 /* the excess amount was too excessive(!), make sure
915 it doesn't read out of buffer */
916 excess = &buf[data->set.buffer_size] - k->str;
917 }
918 nread = (ssize_t)excess;
919
920 result = conn->handler->readwrite(data, conn, &nread, &readmore);
921 if(result)
922 return result;
923
924 if(readmore)
925 k->keepon |= KEEP_RECV; /* we're not done reading */
926 break;
927 }
928
929 if(is_empty_data) {
930 /* if we received nothing, the server closed the connection and we
931 are done */
932 k->keepon &= ~KEEP_RECV;
933 }
934
935 if(k->keepon & KEEP_RECV_PAUSE) {
936 /* this is a paused transfer */
937 break;
938 }
939
940 } while(data_pending(data) && maxloops--);
941
942 if(maxloops <= 0) {
943 /* we mark it as read-again-please */
944 conn->cselect_bits = CURL_CSELECT_IN;
945 *comeback = TRUE;
946 }
947
948 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
949 conn->bits.close) {
950 /* When we've read the entire thing and the close bit is set, the server
951 may now close the connection. If there's now any kind of sending going
952 on from our side, we need to stop that immediately. */
953 infof(data, "we are done reading and this is set to close, stop send\n");
954 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
955 }
956
957 return CURLE_OK;
958 }
959
Curl_done_sending(struct connectdata * conn,struct SingleRequest * k)960 CURLcode Curl_done_sending(struct connectdata *conn,
961 struct SingleRequest *k)
962 {
963 k->keepon &= ~KEEP_SEND; /* we're done writing */
964
965 /* These functions should be moved into the handler struct! */
966 Curl_http2_done_sending(conn);
967 Curl_quic_done_sending(conn);
968
969 if(conn->bits.rewindaftersend) {
970 CURLcode result = Curl_readrewind(conn);
971 if(result)
972 return result;
973 }
974 return CURLE_OK;
975 }
976
977 #if defined(WIN32) && !defined(USE_LWIPSOCK)
978 #ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
979 #define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
980 #endif
981
win_update_buffer_size(curl_socket_t sockfd)982 static void win_update_buffer_size(curl_socket_t sockfd)
983 {
984 int result;
985 ULONG ideal;
986 DWORD ideallen;
987 result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
988 &ideal, sizeof(ideal), &ideallen, 0, 0);
989 if(result == 0) {
990 setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
991 (const char *)&ideal, sizeof(ideal));
992 }
993 }
994 #else
995 #define win_update_buffer_size(x)
996 #endif
997
998 /*
999 * Send data to upload to the server, when the socket is writable.
1000 */
readwrite_upload(struct Curl_easy * data,struct connectdata * conn,int * didwhat)1001 static CURLcode readwrite_upload(struct Curl_easy *data,
1002 struct connectdata *conn,
1003 int *didwhat)
1004 {
1005 ssize_t i, si;
1006 ssize_t bytes_written;
1007 CURLcode result;
1008 ssize_t nread; /* number of bytes read */
1009 bool sending_http_headers = FALSE;
1010 struct SingleRequest *k = &data->req;
1011
1012 if((k->bytecount == 0) && (k->writebytecount == 0))
1013 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
1014
1015 *didwhat |= KEEP_SEND;
1016
1017 do {
1018 /* only read more data if there's no upload data already
1019 present in the upload buffer */
1020 if(0 == k->upload_present) {
1021 result = Curl_get_upload_buffer(data);
1022 if(result)
1023 return result;
1024 /* init the "upload from here" pointer */
1025 k->upload_fromhere = data->state.ulbuf;
1026
1027 if(!k->upload_done) {
1028 /* HTTP pollution, this should be written nicer to become more
1029 protocol agnostic. */
1030 size_t fillcount;
1031 struct HTTP *http = k->protop;
1032
1033 if((k->exp100 == EXP100_SENDING_REQUEST) &&
1034 (http->sending == HTTPSEND_BODY)) {
1035 /* If this call is to send body data, we must take some action:
1036 We have sent off the full HTTP 1.1 request, and we shall now
1037 go into the Expect: 100 state and await such a header */
1038 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
1039 k->keepon &= ~KEEP_SEND; /* disable writing */
1040 k->start100 = Curl_now(); /* timeout count starts now */
1041 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
1042 /* set a timeout for the multi interface */
1043 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1044 break;
1045 }
1046
1047 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
1048 if(http->sending == HTTPSEND_REQUEST)
1049 /* We're sending the HTTP request headers, not the data.
1050 Remember that so we don't change the line endings. */
1051 sending_http_headers = TRUE;
1052 else
1053 sending_http_headers = FALSE;
1054 }
1055
1056 result = Curl_fillreadbuffer(conn, data->set.upload_buffer_size,
1057 &fillcount);
1058 if(result)
1059 return result;
1060
1061 nread = fillcount;
1062 }
1063 else
1064 nread = 0; /* we're done uploading/reading */
1065
1066 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
1067 /* this is a paused transfer */
1068 break;
1069 }
1070 if(nread <= 0) {
1071 result = Curl_done_sending(conn, k);
1072 if(result)
1073 return result;
1074 break;
1075 }
1076
1077 /* store number of bytes available for upload */
1078 k->upload_present = nread;
1079
1080 /* convert LF to CRLF if so asked */
1081 if((!sending_http_headers) && (
1082 #ifdef CURL_DO_LINEEND_CONV
1083 /* always convert if we're FTPing in ASCII mode */
1084 (data->set.prefer_ascii) ||
1085 #endif
1086 (data->set.crlf))) {
1087 /* Do we need to allocate a scratch buffer? */
1088 if(!data->state.scratch) {
1089 data->state.scratch = malloc(2 * data->set.upload_buffer_size);
1090 if(!data->state.scratch) {
1091 failf(data, "Failed to alloc scratch buffer!");
1092
1093 return CURLE_OUT_OF_MEMORY;
1094 }
1095 }
1096
1097 /*
1098 * ASCII/EBCDIC Note: This is presumably a text (not binary)
1099 * transfer so the data should already be in ASCII.
1100 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
1101 * must be used instead of the escape sequences \r & \n.
1102 */
1103 for(i = 0, si = 0; i < nread; i++, si++) {
1104 if(k->upload_fromhere[i] == 0x0a) {
1105 data->state.scratch[si++] = 0x0d;
1106 data->state.scratch[si] = 0x0a;
1107 if(!data->set.crlf) {
1108 /* we're here only because FTP is in ASCII mode...
1109 bump infilesize for the LF we just added */
1110 if(data->state.infilesize != -1)
1111 data->state.infilesize++;
1112 }
1113 }
1114 else
1115 data->state.scratch[si] = k->upload_fromhere[i];
1116 }
1117
1118 if(si != nread) {
1119 /* only perform the special operation if we really did replace
1120 anything */
1121 nread = si;
1122
1123 /* upload from the new (replaced) buffer instead */
1124 k->upload_fromhere = data->state.scratch;
1125
1126 /* set the new amount too */
1127 k->upload_present = nread;
1128 }
1129 }
1130
1131 #ifndef CURL_DISABLE_SMTP
1132 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
1133 result = Curl_smtp_escape_eob(conn, nread);
1134 if(result)
1135 return result;
1136 }
1137 #endif /* CURL_DISABLE_SMTP */
1138 } /* if 0 == k->upload_present */
1139 else {
1140 /* We have a partial buffer left from a previous "round". Use
1141 that instead of reading more data */
1142 }
1143
1144 /* write to socket (send away data) */
1145 result = Curl_write(conn,
1146 conn->writesockfd, /* socket to send to */
1147 k->upload_fromhere, /* buffer pointer */
1148 k->upload_present, /* buffer size */
1149 &bytes_written); /* actually sent */
1150 if(result)
1151 return result;
1152
1153 win_update_buffer_size(conn->writesockfd);
1154
1155 if(data->set.verbose)
1156 /* show the data before we change the pointer upload_fromhere */
1157 Curl_debug(data, CURLINFO_DATA_OUT, k->upload_fromhere,
1158 (size_t)bytes_written);
1159
1160 k->writebytecount += bytes_written;
1161 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1162
1163 if((!k->upload_chunky || k->forbidchunk) &&
1164 (k->writebytecount == data->state.infilesize)) {
1165 /* we have sent all data we were supposed to */
1166 k->upload_done = TRUE;
1167 infof(data, "We are completely uploaded and fine\n");
1168 }
1169
1170 if(k->upload_present != bytes_written) {
1171 /* we only wrote a part of the buffer (if anything), deal with it! */
1172
1173 /* store the amount of bytes left in the buffer to write */
1174 k->upload_present -= bytes_written;
1175
1176 /* advance the pointer where to find the buffer when the next send
1177 is to happen */
1178 k->upload_fromhere += bytes_written;
1179 }
1180 else {
1181 /* we've uploaded that buffer now */
1182 result = Curl_get_upload_buffer(data);
1183 if(result)
1184 return result;
1185 k->upload_fromhere = data->state.ulbuf;
1186 k->upload_present = 0; /* no more bytes left */
1187
1188 if(k->upload_done) {
1189 result = Curl_done_sending(conn, k);
1190 if(result)
1191 return result;
1192 }
1193 }
1194
1195
1196 } while(0); /* just to break out from! */
1197
1198 return CURLE_OK;
1199 }
1200
1201 /*
1202 * Curl_readwrite() is the low-level function to be called when data is to
1203 * be read and written to/from the connection.
1204 *
1205 * return '*comeback' TRUE if we didn't properly drain the socket so this
1206 * function should get called again without select() or similar in between!
1207 */
Curl_readwrite(struct connectdata * conn,struct Curl_easy * data,bool * done,bool * comeback)1208 CURLcode Curl_readwrite(struct connectdata *conn,
1209 struct Curl_easy *data,
1210 bool *done,
1211 bool *comeback)
1212 {
1213 struct SingleRequest *k = &data->req;
1214 CURLcode result;
1215 int didwhat = 0;
1216
1217 curl_socket_t fd_read;
1218 curl_socket_t fd_write;
1219 int select_res = conn->cselect_bits;
1220
1221 conn->cselect_bits = 0;
1222
1223 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1224 then we are in rate limiting state in that transfer direction */
1225
1226 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1227 fd_read = conn->sockfd;
1228 else
1229 fd_read = CURL_SOCKET_BAD;
1230
1231 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1232 fd_write = conn->writesockfd;
1233 else
1234 fd_write = CURL_SOCKET_BAD;
1235
1236 if(conn->data->state.drain) {
1237 select_res |= CURL_CSELECT_IN;
1238 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1239 }
1240
1241 if(!select_res) /* Call for select()/poll() only, if read/write/error
1242 status is not known. */
1243 select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1244
1245 if(select_res == CURL_CSELECT_ERR) {
1246 failf(data, "select/poll returned error");
1247 return CURLE_SEND_ERROR;
1248 }
1249
1250 /* We go ahead and do a read if we have a readable socket or if
1251 the stream was rewound (in which case we have data in a
1252 buffer) */
1253 if((k->keepon & KEEP_RECV) && (select_res & CURL_CSELECT_IN)) {
1254 result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1255 if(result || *done)
1256 return result;
1257 }
1258
1259 /* If we still have writing to do, we check if we have a writable socket. */
1260 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1261 /* write */
1262
1263 result = readwrite_upload(data, conn, &didwhat);
1264 if(result)
1265 return result;
1266 }
1267
1268 k->now = Curl_now();
1269 if(didwhat) {
1270 ;
1271 }
1272 else {
1273 /* no read no write, this is a timeout? */
1274 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1275 /* This should allow some time for the header to arrive, but only a
1276 very short time as otherwise it'll be too much wasted time too
1277 often. */
1278
1279 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1280
1281 Therefore, when a client sends this header field to an origin server
1282 (possibly via a proxy) from which it has never seen a 100 (Continue)
1283 status, the client SHOULD NOT wait for an indefinite period before
1284 sending the request body.
1285
1286 */
1287
1288 timediff_t ms = Curl_timediff(k->now, k->start100);
1289 if(ms >= data->set.expect_100_timeout) {
1290 /* we've waited long enough, continue anyway */
1291 k->exp100 = EXP100_SEND_DATA;
1292 k->keepon |= KEEP_SEND;
1293 Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1294 infof(data, "Done waiting for 100-continue\n");
1295 }
1296 }
1297 }
1298
1299 if(Curl_pgrsUpdate(conn))
1300 result = CURLE_ABORTED_BY_CALLBACK;
1301 else
1302 result = Curl_speedcheck(data, k->now);
1303 if(result)
1304 return result;
1305
1306 if(k->keepon) {
1307 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1308 if(k->size != -1) {
1309 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1310 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1311 CURL_FORMAT_CURL_OFF_T " bytes received",
1312 Curl_timediff(k->now, data->progress.t_startsingle),
1313 k->bytecount, k->size);
1314 }
1315 else {
1316 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1317 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1318 Curl_timediff(k->now, data->progress.t_startsingle),
1319 k->bytecount);
1320 }
1321 return CURLE_OPERATION_TIMEDOUT;
1322 }
1323 }
1324 else {
1325 /*
1326 * The transfer has been performed. Just make some general checks before
1327 * returning.
1328 */
1329
1330 if(!(data->set.opt_no_body) && (k->size != -1) &&
1331 (k->bytecount != k->size) &&
1332 #ifdef CURL_DO_LINEEND_CONV
1333 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1334 so we'll check to see if the discrepancy can be explained
1335 by the number of CRLFs we've changed to LFs.
1336 */
1337 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1338 #endif /* CURL_DO_LINEEND_CONV */
1339 !k->newurl) {
1340 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1341 " bytes remaining to read", k->size - k->bytecount);
1342 return CURLE_PARTIAL_FILE;
1343 }
1344 if(!(data->set.opt_no_body) && k->chunk &&
1345 (conn->chunk.state != CHUNK_STOP)) {
1346 /*
1347 * In chunked mode, return an error if the connection is closed prior to
1348 * the empty (terminating) chunk is read.
1349 *
1350 * The condition above used to check for
1351 * conn->proto.http->chunk.datasize != 0 which is true after reading
1352 * *any* chunk, not just the empty chunk.
1353 *
1354 */
1355 failf(data, "transfer closed with outstanding read data remaining");
1356 return CURLE_PARTIAL_FILE;
1357 }
1358 if(Curl_pgrsUpdate(conn))
1359 return CURLE_ABORTED_BY_CALLBACK;
1360 }
1361
1362 /* Now update the "done" boolean we return */
1363 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1364 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1365
1366 return CURLE_OK;
1367 }
1368
1369 /*
1370 * Curl_single_getsock() gets called by the multi interface code when the app
1371 * has requested to get the sockets for the current connection. This function
1372 * will then be called once for every connection that the multi interface
1373 * keeps track of. This function will only be called for connections that are
1374 * in the proper state to have this information available.
1375 */
Curl_single_getsock(const struct connectdata * conn,curl_socket_t * sock)1376 int Curl_single_getsock(const struct connectdata *conn,
1377 curl_socket_t *sock)
1378 {
1379 const struct Curl_easy *data = conn->data;
1380 int bitmap = GETSOCK_BLANK;
1381 unsigned sockindex = 0;
1382
1383 if(conn->handler->perform_getsock)
1384 return conn->handler->perform_getsock(conn, sock);
1385
1386 /* don't include HOLD and PAUSE connections */
1387 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1388
1389 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1390
1391 bitmap |= GETSOCK_READSOCK(sockindex);
1392 sock[sockindex] = conn->sockfd;
1393 }
1394
1395 /* don't include HOLD and PAUSE connections */
1396 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1397
1398 if((conn->sockfd != conn->writesockfd) ||
1399 bitmap == GETSOCK_BLANK) {
1400 /* only if they are not the same socket and we have a readable
1401 one, we increase index */
1402 if(bitmap != GETSOCK_BLANK)
1403 sockindex++; /* increase index if we need two entries */
1404
1405 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1406
1407 sock[sockindex] = conn->writesockfd;
1408 }
1409
1410 bitmap |= GETSOCK_WRITESOCK(sockindex);
1411 }
1412
1413 return bitmap;
1414 }
1415
1416 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1417 which means this gets called once for each subsequent redirect etc */
Curl_init_CONNECT(struct Curl_easy * data)1418 void Curl_init_CONNECT(struct Curl_easy *data)
1419 {
1420 data->state.fread_func = data->set.fread_func_set;
1421 data->state.in = data->set.in_set;
1422 }
1423
1424 /*
1425 * Curl_pretransfer() is called immediately before a transfer starts, and only
1426 * once for one transfer no matter if it has redirects or do multi-pass
1427 * authentication etc.
1428 */
Curl_pretransfer(struct Curl_easy * data)1429 CURLcode Curl_pretransfer(struct Curl_easy *data)
1430 {
1431 CURLcode result;
1432
1433 if(!data->change.url && !data->set.uh) {
1434 /* we can't do anything without URL */
1435 failf(data, "No URL set!");
1436 return CURLE_URL_MALFORMAT;
1437 }
1438
1439 /* since the URL may have been redirected in a previous use of this handle */
1440 if(data->change.url_alloc) {
1441 /* the already set URL is allocated, free it first! */
1442 Curl_safefree(data->change.url);
1443 data->change.url_alloc = FALSE;
1444 }
1445
1446 if(!data->change.url && data->set.uh) {
1447 CURLUcode uc;
1448 free(data->set.str[STRING_SET_URL]);
1449 uc = curl_url_get(data->set.uh,
1450 CURLUPART_URL, &data->set.str[STRING_SET_URL], 0);
1451 if(uc) {
1452 failf(data, "No URL set!");
1453 return CURLE_URL_MALFORMAT;
1454 }
1455 }
1456
1457 data->state.httpreq = data->set.method;
1458 data->change.url = data->set.str[STRING_SET_URL];
1459
1460 /* Init the SSL session ID cache here. We do it here since we want to do it
1461 after the *_setopt() calls (that could specify the size of the cache) but
1462 before any transfer takes place. */
1463 result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1464 if(result)
1465 return result;
1466
1467 data->state.wildcardmatch = data->set.wildcard_enabled;
1468 data->set.followlocation = 0; /* reset the location-follow counter */
1469 data->state.this_is_a_follow = FALSE; /* reset this */
1470 data->state.errorbuf = FALSE; /* no error has occurred */
1471 data->state.httpversion = 0; /* don't assume any particular server version */
1472
1473 data->state.authproblem = FALSE;
1474 data->state.authhost.want = data->set.httpauth;
1475 data->state.authproxy.want = data->set.proxyauth;
1476 Curl_safefree(data->info.wouldredirect);
1477
1478 if(data->state.httpreq == HTTPREQ_PUT)
1479 data->state.infilesize = data->set.filesize;
1480 else if((data->state.httpreq != HTTPREQ_GET) &&
1481 (data->state.httpreq != HTTPREQ_HEAD)) {
1482 data->state.infilesize = data->set.postfieldsize;
1483 if(data->set.postfields && (data->state.infilesize == -1))
1484 data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1485 }
1486 else
1487 data->state.infilesize = 0;
1488
1489 /* If there is a list of cookie files to read, do it now! */
1490 if(data->change.cookielist)
1491 Curl_cookie_loadfiles(data);
1492
1493 /* If there is a list of host pairs to deal with */
1494 if(data->change.resolve)
1495 result = Curl_loadhostpairs(data);
1496
1497 if(!result) {
1498 /* Allow data->set.use_port to set which port to use. This needs to be
1499 * disabled for example when we follow Location: headers to URLs using
1500 * different ports! */
1501 data->state.allow_port = TRUE;
1502
1503 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1504 /*************************************************************
1505 * Tell signal handler to ignore SIGPIPE
1506 *************************************************************/
1507 if(!data->set.no_signal)
1508 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1509 #endif
1510
1511 Curl_initinfo(data); /* reset session-specific information "variables" */
1512 Curl_pgrsResetTransferSizes(data);
1513 Curl_pgrsStartNow(data);
1514
1515 /* In case the handle is re-used and an authentication method was picked
1516 in the session we need to make sure we only use the one(s) we now
1517 consider to be fine */
1518 data->state.authhost.picked &= data->state.authhost.want;
1519 data->state.authproxy.picked &= data->state.authproxy.want;
1520
1521 #ifndef CURL_DISABLE_FTP
1522 if(data->state.wildcardmatch) {
1523 struct WildcardData *wc = &data->wildcard;
1524 if(wc->state < CURLWC_INIT) {
1525 result = Curl_wildcard_init(wc); /* init wildcard structures */
1526 if(result)
1527 return CURLE_OUT_OF_MEMORY;
1528 }
1529 }
1530 #endif
1531 Curl_http2_init_state(&data->state);
1532 }
1533
1534 return result;
1535 }
1536
1537 /*
1538 * Curl_posttransfer() is called immediately after a transfer ends
1539 */
Curl_posttransfer(struct Curl_easy * data)1540 CURLcode Curl_posttransfer(struct Curl_easy *data)
1541 {
1542 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1543 /* restore the signal handler for SIGPIPE before we get back */
1544 if(!data->set.no_signal)
1545 signal(SIGPIPE, data->state.prev_signal);
1546 #else
1547 (void)data; /* unused parameter */
1548 #endif
1549
1550 return CURLE_OK;
1551 }
1552
1553 /*
1554 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1555 * as given by the remote server and set up the new URL to request.
1556 *
1557 * This function DOES NOT FREE the given url.
1558 */
Curl_follow(struct Curl_easy * data,char * newurl,followtype type)1559 CURLcode Curl_follow(struct Curl_easy *data,
1560 char *newurl, /* the Location: string */
1561 followtype type) /* see transfer.h */
1562 {
1563 #ifdef CURL_DISABLE_HTTP
1564 (void)data;
1565 (void)newurl;
1566 (void)type;
1567 /* Location: following will not happen when HTTP is disabled */
1568 return CURLE_TOO_MANY_REDIRECTS;
1569 #else
1570
1571 /* Location: redirect */
1572 bool disallowport = FALSE;
1573 bool reachedmax = FALSE;
1574 CURLUcode uc;
1575
1576 if(type == FOLLOW_REDIR) {
1577 if((data->set.maxredirs != -1) &&
1578 (data->set.followlocation >= data->set.maxredirs)) {
1579 reachedmax = TRUE;
1580 type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1581 to URL */
1582 }
1583 else {
1584 /* mark the next request as a followed location: */
1585 data->state.this_is_a_follow = TRUE;
1586
1587 data->set.followlocation++; /* count location-followers */
1588
1589 if(data->set.http_auto_referer) {
1590 /* We are asked to automatically set the previous URL as the referer
1591 when we get the next URL. We pick the ->url field, which may or may
1592 not be 100% correct */
1593
1594 if(data->change.referer_alloc) {
1595 Curl_safefree(data->change.referer);
1596 data->change.referer_alloc = FALSE;
1597 }
1598
1599 data->change.referer = strdup(data->change.url);
1600 if(!data->change.referer)
1601 return CURLE_OUT_OF_MEMORY;
1602 data->change.referer_alloc = TRUE; /* yes, free this later */
1603 }
1604 }
1605 }
1606
1607 if(Curl_is_absolute_url(newurl, NULL, MAX_SCHEME_LEN))
1608 /* This is an absolute URL, don't allow the custom port number */
1609 disallowport = TRUE;
1610
1611 DEBUGASSERT(data->state.uh);
1612 uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
1613 (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
1614 ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) );
1615 if(uc) {
1616 if(type != FOLLOW_FAKE)
1617 return Curl_uc_to_curlcode(uc);
1618
1619 /* the URL could not be parsed for some reason, but since this is FAKE
1620 mode, just duplicate the field as-is */
1621 newurl = strdup(newurl);
1622 if(!newurl)
1623 return CURLE_OUT_OF_MEMORY;
1624 }
1625 else {
1626
1627 uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
1628 if(uc)
1629 return Curl_uc_to_curlcode(uc);
1630 }
1631
1632 if(type == FOLLOW_FAKE) {
1633 /* we're only figuring out the new url if we would've followed locations
1634 but now we're done so we can get out! */
1635 data->info.wouldredirect = newurl;
1636
1637 if(reachedmax) {
1638 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1639 return CURLE_TOO_MANY_REDIRECTS;
1640 }
1641 return CURLE_OK;
1642 }
1643
1644 if(disallowport)
1645 data->state.allow_port = FALSE;
1646
1647 if(data->change.url_alloc)
1648 Curl_safefree(data->change.url);
1649
1650 data->change.url = newurl;
1651 data->change.url_alloc = TRUE;
1652
1653 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1654
1655 /*
1656 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1657 * differently based on exactly what return code there was.
1658 *
1659 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1660 * a HTTP (proxy-) authentication scheme other than Basic.
1661 */
1662 switch(data->info.httpcode) {
1663 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1664 Authorization: XXXX header in the HTTP request code snippet */
1665 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1666 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1667 /* 300 - Multiple Choices */
1668 /* 306 - Not used */
1669 /* 307 - Temporary Redirect */
1670 default: /* for all above (and the unknown ones) */
1671 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1672 * seem to be OK to POST to.
1673 */
1674 break;
1675 case 301: /* Moved Permanently */
1676 /* (quote from RFC7231, section 6.4.2)
1677 *
1678 * Note: For historical reasons, a user agent MAY change the request
1679 * method from POST to GET for the subsequent request. If this
1680 * behavior is undesired, the 307 (Temporary Redirect) status code
1681 * can be used instead.
1682 *
1683 * ----
1684 *
1685 * Many webservers expect this, so these servers often answers to a POST
1686 * request with an error page. To be sure that libcurl gets the page that
1687 * most user agents would get, libcurl has to force GET.
1688 *
1689 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1690 * can be overridden with CURLOPT_POSTREDIR.
1691 */
1692 if((data->state.httpreq == HTTPREQ_POST
1693 || data->state.httpreq == HTTPREQ_POST_FORM
1694 || data->state.httpreq == HTTPREQ_POST_MIME)
1695 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1696 infof(data, "Switch from POST to GET\n");
1697 data->state.httpreq = HTTPREQ_GET;
1698 }
1699 break;
1700 case 302: /* Found */
1701 /* (quote from RFC7231, section 6.4.3)
1702 *
1703 * Note: For historical reasons, a user agent MAY change the request
1704 * method from POST to GET for the subsequent request. If this
1705 * behavior is undesired, the 307 (Temporary Redirect) status code
1706 * can be used instead.
1707 *
1708 * ----
1709 *
1710 * Many webservers expect this, so these servers often answers to a POST
1711 * request with an error page. To be sure that libcurl gets the page that
1712 * most user agents would get, libcurl has to force GET.
1713 *
1714 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1715 * can be overridden with CURLOPT_POSTREDIR.
1716 */
1717 if((data->state.httpreq == HTTPREQ_POST
1718 || data->state.httpreq == HTTPREQ_POST_FORM
1719 || data->state.httpreq == HTTPREQ_POST_MIME)
1720 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1721 infof(data, "Switch from POST to GET\n");
1722 data->state.httpreq = HTTPREQ_GET;
1723 }
1724 break;
1725
1726 case 303: /* See Other */
1727 /* 'See Other' location is not the resource but a substitute for the
1728 * resource. In this case we switch the method to GET/HEAD, unless the
1729 * method is POST and the user specified to keep it as POST.
1730 * https://github.com/curl/curl/issues/5237#issuecomment-614641049
1731 */
1732 if(data->state.httpreq != HTTPREQ_GET &&
1733 ((data->state.httpreq != HTTPREQ_POST &&
1734 data->state.httpreq != HTTPREQ_POST_FORM &&
1735 data->state.httpreq != HTTPREQ_POST_MIME) ||
1736 !(data->set.keep_post & CURL_REDIR_POST_303))) {
1737 data->state.httpreq = HTTPREQ_GET;
1738 data->set.upload = false;
1739 infof(data, "Switch to %s\n",
1740 data->set.opt_no_body?"HEAD":"GET");
1741 }
1742 break;
1743 case 304: /* Not Modified */
1744 /* 304 means we did a conditional request and it was "Not modified".
1745 * We shouldn't get any Location: header in this response!
1746 */
1747 break;
1748 case 305: /* Use Proxy */
1749 /* (quote from RFC2616, section 10.3.6):
1750 * "The requested resource MUST be accessed through the proxy given
1751 * by the Location field. The Location field gives the URI of the
1752 * proxy. The recipient is expected to repeat this single request
1753 * via the proxy. 305 responses MUST only be generated by origin
1754 * servers."
1755 */
1756 break;
1757 }
1758 Curl_pgrsTime(data, TIMER_REDIRECT);
1759 Curl_pgrsResetTransferSizes(data);
1760
1761 return CURLE_OK;
1762 #endif /* CURL_DISABLE_HTTP */
1763 }
1764
1765 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1766
1767 NOTE: that the *url is malloc()ed. */
Curl_retry_request(struct connectdata * conn,char ** url)1768 CURLcode Curl_retry_request(struct connectdata *conn,
1769 char **url)
1770 {
1771 struct Curl_easy *data = conn->data;
1772 bool retry = FALSE;
1773 *url = NULL;
1774
1775 /* if we're talking upload, we can't do the checks below, unless the protocol
1776 is HTTP as when uploading over HTTP we will still get a response */
1777 if(data->set.upload &&
1778 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1779 return CURLE_OK;
1780
1781 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1782 conn->bits.reuse &&
1783 (!data->set.opt_no_body
1784 || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
1785 (data->set.rtspreq != RTSPREQ_RECEIVE))
1786 /* We got no data, we attempted to re-use a connection. For HTTP this
1787 can be a retry so we try again regardless if we expected a body.
1788 For other protocols we only try again only if we expected a body.
1789
1790 This might happen if the connection was left alive when we were
1791 done using it before, but that was closed when we wanted to read from
1792 it again. Bad luck. Retry the same request on a fresh connect! */
1793 retry = TRUE;
1794 else if(data->state.refused_stream &&
1795 (data->req.bytecount + data->req.headerbytecount == 0) ) {
1796 /* This was sent on a refused stream, safe to rerun. A refused stream
1797 error can typically only happen on HTTP/2 level if the stream is safe
1798 to issue again, but the nghttp2 API can deliver the message to other
1799 streams as well, which is why this adds the check the data counters
1800 too. */
1801 infof(conn->data, "REFUSED_STREAM, retrying a fresh connect\n");
1802 data->state.refused_stream = FALSE; /* clear again */
1803 retry = TRUE;
1804 }
1805 if(retry) {
1806 #define CONN_MAX_RETRIES 5
1807 if(data->state.retrycount++ >= CONN_MAX_RETRIES) {
1808 failf(data, "Connection died, tried %d times before giving up",
1809 CONN_MAX_RETRIES);
1810 data->state.retrycount = 0;
1811 return CURLE_SEND_ERROR;
1812 }
1813 infof(conn->data, "Connection died, retrying a fresh connect\
1814 (retry count: %d)\n", data->state.retrycount);
1815 *url = strdup(conn->data->change.url);
1816 if(!*url)
1817 return CURLE_OUT_OF_MEMORY;
1818
1819 connclose(conn, "retry"); /* close this connection */
1820 conn->bits.retry = TRUE; /* mark this as a connection we're about
1821 to retry. Marking it this way should
1822 prevent i.e HTTP transfers to return
1823 error just because nothing has been
1824 transferred! */
1825
1826
1827 if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1828 if(data->req.writebytecount) {
1829 CURLcode result = Curl_readrewind(conn);
1830 if(result) {
1831 Curl_safefree(*url);
1832 return result;
1833 }
1834 }
1835 }
1836 }
1837 return CURLE_OK;
1838 }
1839
1840 /*
1841 * Curl_setup_transfer() is called to setup some basic properties for the
1842 * upcoming transfer.
1843 */
1844 void
Curl_setup_transfer(struct Curl_easy * data,int sockindex,curl_off_t size,bool getheader,int writesockindex)1845 Curl_setup_transfer(
1846 struct Curl_easy *data, /* transfer */
1847 int sockindex, /* socket index to read from or -1 */
1848 curl_off_t size, /* -1 if unknown at this point */
1849 bool getheader, /* TRUE if header parsing is wanted */
1850 int writesockindex /* socket index to write to, it may very well be
1851 the same we read from. -1 disables */
1852 )
1853 {
1854 struct SingleRequest *k = &data->req;
1855 struct connectdata *conn = data->conn;
1856 struct HTTP *http = data->req.protop;
1857 bool httpsending = ((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1858 (http->sending == HTTPSEND_REQUEST));
1859 DEBUGASSERT(conn != NULL);
1860 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1861
1862 if(conn->bits.multiplex || conn->httpversion == 20 || httpsending) {
1863 /* when multiplexing, the read/write sockets need to be the same! */
1864 conn->sockfd = sockindex == -1 ?
1865 ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1866 conn->sock[sockindex];
1867 conn->writesockfd = conn->sockfd;
1868 if(httpsending)
1869 /* special and very HTTP-specific */
1870 writesockindex = FIRSTSOCKET;
1871 }
1872 else {
1873 conn->sockfd = sockindex == -1 ?
1874 CURL_SOCKET_BAD : conn->sock[sockindex];
1875 conn->writesockfd = writesockindex == -1 ?
1876 CURL_SOCKET_BAD:conn->sock[writesockindex];
1877 }
1878 k->getheader = getheader;
1879
1880 k->size = size;
1881
1882 /* The code sequence below is placed in this function just because all
1883 necessary input is not always known in do_complete() as this function may
1884 be called after that */
1885
1886 if(!k->getheader) {
1887 k->header = FALSE;
1888 if(size > 0)
1889 Curl_pgrsSetDownloadSize(data, size);
1890 }
1891 /* we want header and/or body, if neither then don't do this! */
1892 if(k->getheader || !data->set.opt_no_body) {
1893
1894 if(sockindex != -1)
1895 k->keepon |= KEEP_RECV;
1896
1897 if(writesockindex != -1) {
1898 /* HTTP 1.1 magic:
1899
1900 Even if we require a 100-return code before uploading data, we might
1901 need to write data before that since the REQUEST may not have been
1902 finished sent off just yet.
1903
1904 Thus, we must check if the request has been sent before we set the
1905 state info where we wait for the 100-return code
1906 */
1907 if((data->state.expect100header) &&
1908 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1909 (http->sending == HTTPSEND_BODY)) {
1910 /* wait with write until we either got 100-continue or a timeout */
1911 k->exp100 = EXP100_AWAITING_CONTINUE;
1912 k->start100 = Curl_now();
1913
1914 /* Set a timeout for the multi interface. Add the inaccuracy margin so
1915 that we don't fire slightly too early and get denied to run. */
1916 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1917 }
1918 else {
1919 if(data->state.expect100header)
1920 /* when we've sent off the rest of the headers, we must await a
1921 100-continue but first finish sending the request */
1922 k->exp100 = EXP100_SENDING_REQUEST;
1923
1924 /* enable the write bit when we're not waiting for continue */
1925 k->keepon |= KEEP_SEND;
1926 }
1927 } /* if(writesockindex != -1) */
1928 } /* if(k->getheader || !data->set.opt_no_body) */
1929
1930 }
1931