• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***************************************************************************
2  *                                  _   _ ____  _
3  *  Project                     ___| | | |  _ \| |
4  *                             / __| | | | |_) | |
5  *                            | (__| |_| |  _ <| |___
6  *                             \___|\___/|_| \_\_____|
7  *
8  * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
9  *
10  * This software is licensed as described in the file COPYING, which
11  * you should have received as part of this distribution. The terms
12  * are also available at https://curl.haxx.se/docs/copyright.html.
13  *
14  * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15  * copies of the Software, and permit persons to whom the Software is
16  * furnished to do so, under the terms of the COPYING file.
17  *
18  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19  * KIND, either express or implied.
20  *
21  ***************************************************************************/
22 
23 #include "curl_setup.h"
24 #include "strtoofft.h"
25 
26 #ifdef HAVE_NETINET_IN_H
27 #include <netinet/in.h>
28 #endif
29 #ifdef HAVE_NETDB_H
30 #include <netdb.h>
31 #endif
32 #ifdef HAVE_ARPA_INET_H
33 #include <arpa/inet.h>
34 #endif
35 #ifdef HAVE_NET_IF_H
36 #include <net/if.h>
37 #endif
38 #ifdef HAVE_SYS_IOCTL_H
39 #include <sys/ioctl.h>
40 #endif
41 #ifdef HAVE_SIGNAL_H
42 #include <signal.h>
43 #endif
44 
45 #ifdef HAVE_SYS_PARAM_H
46 #include <sys/param.h>
47 #endif
48 
49 #ifdef HAVE_SYS_SELECT_H
50 #include <sys/select.h>
51 #endif
52 
53 #ifndef HAVE_SOCKET
54 #error "We can't compile without socket() support!"
55 #endif
56 
57 #include "urldata.h"
58 #include <curl/curl.h>
59 #include "netrc.h"
60 
61 #include "content_encoding.h"
62 #include "hostip.h"
63 #include "transfer.h"
64 #include "sendf.h"
65 #include "speedcheck.h"
66 #include "progress.h"
67 #include "http.h"
68 #include "url.h"
69 #include "getinfo.h"
70 #include "vtls/vtls.h"
71 #include "select.h"
72 #include "multiif.h"
73 #include "connect.h"
74 #include "non-ascii.h"
75 #include "http2.h"
76 #include "mime.h"
77 #include "strcase.h"
78 #include "urlapi-int.h"
79 
80 /* The last 3 #include files should be in this order */
81 #include "curl_printf.h"
82 #include "curl_memory.h"
83 #include "memdebug.h"
84 
85 #if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
86     !defined(CURL_DISABLE_IMAP)
87 /*
88  * checkheaders() checks the linked list of custom headers for a
89  * particular header (prefix). Provide the prefix without colon!
90  *
91  * Returns a pointer to the first matching header or NULL if none matched.
92  */
Curl_checkheaders(const struct connectdata * conn,const char * thisheader)93 char *Curl_checkheaders(const struct connectdata *conn,
94                         const char *thisheader)
95 {
96   struct curl_slist *head;
97   size_t thislen = strlen(thisheader);
98   struct Curl_easy *data = conn->data;
99 
100   for(head = data->set.headers; head; head = head->next) {
101     if(strncasecompare(head->data, thisheader, thislen) &&
102        Curl_headersep(head->data[thislen]) )
103       return head->data;
104   }
105 
106   return NULL;
107 }
108 #endif
109 
Curl_get_upload_buffer(struct Curl_easy * data)110 CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
111 {
112   if(!data->state.ulbuf) {
113     data->state.ulbuf = malloc(data->set.upload_buffer_size);
114     if(!data->state.ulbuf)
115       return CURLE_OUT_OF_MEMORY;
116   }
117   return CURLE_OK;
118 }
119 
120 #ifndef CURL_DISABLE_HTTP
121 /*
122  * This function will be called to loop through the trailers buffer
123  * until no more data is available for sending.
124  */
Curl_trailers_read(char * buffer,size_t size,size_t nitems,void * raw)125 static size_t Curl_trailers_read(char *buffer, size_t size, size_t nitems,
126                                  void *raw)
127 {
128   struct Curl_easy *data = (struct Curl_easy *)raw;
129   Curl_send_buffer *trailers_buf = data->state.trailers_buf;
130   size_t bytes_left = trailers_buf->size_used-data->state.trailers_bytes_sent;
131   size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
132   if(to_copy) {
133     memcpy(buffer,
134            &trailers_buf->buffer[data->state.trailers_bytes_sent],
135            to_copy);
136     data->state.trailers_bytes_sent += to_copy;
137   }
138   return to_copy;
139 }
140 
Curl_trailers_left(void * raw)141 static size_t Curl_trailers_left(void *raw)
142 {
143   struct Curl_easy *data = (struct Curl_easy *)raw;
144   Curl_send_buffer *trailers_buf = data->state.trailers_buf;
145   return trailers_buf->size_used - data->state.trailers_bytes_sent;
146 }
147 #endif
148 
149 /*
150  * This function will call the read callback to fill our buffer with data
151  * to upload.
152  */
Curl_fillreadbuffer(struct connectdata * conn,size_t bytes,size_t * nreadp)153 CURLcode Curl_fillreadbuffer(struct connectdata *conn, size_t bytes,
154                              size_t *nreadp)
155 {
156   struct Curl_easy *data = conn->data;
157   size_t buffersize = bytes;
158   size_t nread;
159 
160   curl_read_callback readfunc = NULL;
161   void *extra_data = NULL;
162 
163 #ifdef CURL_DOES_CONVERSIONS
164   bool sending_http_headers = FALSE;
165 
166   if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
167     const struct HTTP *http = data->req.protop;
168 
169     if(http->sending == HTTPSEND_REQUEST)
170       /* We're sending the HTTP request headers, not the data.
171          Remember that so we don't re-translate them into garbage. */
172       sending_http_headers = TRUE;
173   }
174 #endif
175 
176 #ifndef CURL_DISABLE_HTTP
177   if(data->state.trailers_state == TRAILERS_INITIALIZED) {
178     struct curl_slist *trailers = NULL;
179     CURLcode result;
180     int trailers_ret_code;
181 
182     /* at this point we already verified that the callback exists
183        so we compile and store the trailers buffer, then proceed */
184     infof(data,
185           "Moving trailers state machine from initialized to sending.\n");
186     data->state.trailers_state = TRAILERS_SENDING;
187     data->state.trailers_buf = Curl_add_buffer_init();
188     if(!data->state.trailers_buf) {
189       failf(data, "Unable to allocate trailing headers buffer !");
190       return CURLE_OUT_OF_MEMORY;
191     }
192     data->state.trailers_bytes_sent = 0;
193     Curl_set_in_callback(data, true);
194     trailers_ret_code = data->set.trailer_callback(&trailers,
195                                                    data->set.trailer_data);
196     Curl_set_in_callback(data, false);
197     if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
198       result = Curl_http_compile_trailers(trailers, &data->state.trailers_buf,
199                                           data);
200     }
201     else {
202       failf(data, "operation aborted by trailing headers callback");
203       *nreadp = 0;
204       result = CURLE_ABORTED_BY_CALLBACK;
205     }
206     if(result) {
207       Curl_add_buffer_free(&data->state.trailers_buf);
208       curl_slist_free_all(trailers);
209       return result;
210     }
211     infof(data, "Successfully compiled trailers.\r\n");
212     curl_slist_free_all(trailers);
213   }
214 #endif
215 
216   /* if we are transmitting trailing data, we don't need to write
217      a chunk size so we skip this */
218   if(data->req.upload_chunky &&
219      data->state.trailers_state == TRAILERS_NONE) {
220     /* if chunked Transfer-Encoding */
221     buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
222     data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
223   }
224 
225 #ifndef CURL_DISABLE_HTTP
226   if(data->state.trailers_state == TRAILERS_SENDING) {
227     /* if we're here then that means that we already sent the last empty chunk
228        but we didn't send a final CR LF, so we sent 0 CR LF. We then start
229        pulling trailing data until we have no more at which point we
230        simply return to the previous point in the state machine as if
231        nothing happened.
232        */
233     readfunc = Curl_trailers_read;
234     extra_data = (void *)data;
235   }
236   else
237 #endif
238   {
239     readfunc = data->state.fread_func;
240     extra_data = data->state.in;
241   }
242 
243   Curl_set_in_callback(data, true);
244   nread = readfunc(data->req.upload_fromhere, 1,
245                    buffersize, extra_data);
246   Curl_set_in_callback(data, false);
247 
248   if(nread == CURL_READFUNC_ABORT) {
249     failf(data, "operation aborted by callback");
250     *nreadp = 0;
251     return CURLE_ABORTED_BY_CALLBACK;
252   }
253   if(nread == CURL_READFUNC_PAUSE) {
254     struct SingleRequest *k = &data->req;
255 
256     if(conn->handler->flags & PROTOPT_NONETWORK) {
257       /* protocols that work without network cannot be paused. This is
258          actually only FILE:// just now, and it can't pause since the transfer
259          isn't done using the "normal" procedure. */
260       failf(data, "Read callback asked for PAUSE when not supported!");
261       return CURLE_READ_ERROR;
262     }
263 
264     /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
265     k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
266     if(data->req.upload_chunky) {
267         /* Back out the preallocation done above */
268       data->req.upload_fromhere -= (8 + 2);
269     }
270     *nreadp = 0;
271 
272     return CURLE_OK; /* nothing was read */
273   }
274   else if(nread > buffersize) {
275     /* the read function returned a too large value */
276     *nreadp = 0;
277     failf(data, "read function returned funny value");
278     return CURLE_READ_ERROR;
279   }
280 
281   if(!data->req.forbidchunk && data->req.upload_chunky) {
282     /* if chunked Transfer-Encoding
283      *    build chunk:
284      *
285      *        <HEX SIZE> CRLF
286      *        <DATA> CRLF
287      */
288     /* On non-ASCII platforms the <DATA> may or may not be
289        translated based on set.prefer_ascii while the protocol
290        portion must always be translated to the network encoding.
291        To further complicate matters, line end conversion might be
292        done later on, so we need to prevent CRLFs from becoming
293        CRCRLFs if that's the case.  To do this we use bare LFs
294        here, knowing they'll become CRLFs later on.
295      */
296 
297     bool added_crlf = FALSE;
298     int hexlen = 0;
299     const char *endofline_native;
300     const char *endofline_network;
301 
302     if(
303 #ifdef CURL_DO_LINEEND_CONV
304        (data->set.prefer_ascii) ||
305 #endif
306        (data->set.crlf)) {
307       /* \n will become \r\n later on */
308       endofline_native  = "\n";
309       endofline_network = "\x0a";
310     }
311     else {
312       endofline_native  = "\r\n";
313       endofline_network = "\x0d\x0a";
314     }
315 
316     /* if we're not handling trailing data, proceed as usual */
317     if(data->state.trailers_state != TRAILERS_SENDING) {
318       char hexbuffer[11] = "";
319       hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
320                          "%zx%s", nread, endofline_native);
321 
322       /* move buffer pointer */
323       data->req.upload_fromhere -= hexlen;
324       nread += hexlen;
325 
326       /* copy the prefix to the buffer, leaving out the NUL */
327       memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
328 
329       /* always append ASCII CRLF to the data unless
330          we have a valid trailer callback */
331 #ifndef CURL_DISABLE_HTTP
332       if((nread-hexlen) == 0 &&
333           data->set.trailer_callback != NULL &&
334           data->state.trailers_state == TRAILERS_NONE) {
335         data->state.trailers_state = TRAILERS_INITIALIZED;
336       }
337       else
338 #endif
339       {
340         memcpy(data->req.upload_fromhere + nread,
341                endofline_network,
342                strlen(endofline_network));
343         added_crlf = TRUE;
344       }
345     }
346 
347 #ifdef CURL_DOES_CONVERSIONS
348     {
349       CURLcode result;
350       size_t length;
351       if(data->set.prefer_ascii)
352         /* translate the protocol and data */
353         length = nread;
354       else
355         /* just translate the protocol portion */
356         length = hexlen;
357       if(length) {
358         result = Curl_convert_to_network(data, data->req.upload_fromhere,
359                                          length);
360         /* Curl_convert_to_network calls failf if unsuccessful */
361         if(result)
362           return result;
363       }
364     }
365 #endif /* CURL_DOES_CONVERSIONS */
366 
367 #ifndef CURL_DISABLE_HTTP
368     if(data->state.trailers_state == TRAILERS_SENDING &&
369        !Curl_trailers_left(data)) {
370       Curl_add_buffer_free(&data->state.trailers_buf);
371       data->state.trailers_state = TRAILERS_DONE;
372       data->set.trailer_data = NULL;
373       data->set.trailer_callback = NULL;
374       /* mark the transfer as done */
375       data->req.upload_done = TRUE;
376       infof(data, "Signaling end of chunked upload after trailers.\n");
377     }
378     else
379 #endif
380       if((nread - hexlen) == 0 &&
381          data->state.trailers_state != TRAILERS_INITIALIZED) {
382         /* mark this as done once this chunk is transferred */
383         data->req.upload_done = TRUE;
384         infof(data,
385               "Signaling end of chunked upload via terminating chunk.\n");
386       }
387 
388     if(added_crlf)
389       nread += strlen(endofline_network); /* for the added end of line */
390   }
391 #ifdef CURL_DOES_CONVERSIONS
392   else if((data->set.prefer_ascii) && (!sending_http_headers)) {
393     CURLcode result;
394     result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
395     /* Curl_convert_to_network calls failf if unsuccessful */
396     if(result)
397       return result;
398   }
399 #endif /* CURL_DOES_CONVERSIONS */
400 
401   *nreadp = nread;
402 
403   return CURLE_OK;
404 }
405 
406 
407 /*
408  * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
409  * POST/PUT with multi-pass authentication when a sending was denied and a
410  * resend is necessary.
411  */
Curl_readrewind(struct connectdata * conn)412 CURLcode Curl_readrewind(struct connectdata *conn)
413 {
414   struct Curl_easy *data = conn->data;
415   curl_mimepart *mimepart = &data->set.mimepost;
416 
417   conn->bits.rewindaftersend = FALSE; /* we rewind now */
418 
419   /* explicitly switch off sending data on this connection now since we are
420      about to restart a new transfer and thus we want to avoid inadvertently
421      sending more data on the existing connection until the next transfer
422      starts */
423   data->req.keepon &= ~KEEP_SEND;
424 
425   /* We have sent away data. If not using CURLOPT_POSTFIELDS or
426      CURLOPT_HTTPPOST, call app to rewind
427   */
428   if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
429     struct HTTP *http = data->req.protop;
430 
431     if(http->sendit)
432       mimepart = http->sendit;
433   }
434   if(data->set.postfields)
435     ; /* do nothing */
436   else if(data->set.httpreq == HTTPREQ_POST_MIME ||
437           data->set.httpreq == HTTPREQ_POST_FORM) {
438     if(Curl_mime_rewind(mimepart)) {
439       failf(data, "Cannot rewind mime/post data");
440       return CURLE_SEND_FAIL_REWIND;
441     }
442   }
443   else {
444     if(data->set.seek_func) {
445       int err;
446 
447       Curl_set_in_callback(data, true);
448       err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
449       Curl_set_in_callback(data, false);
450       if(err) {
451         failf(data, "seek callback returned error %d", (int)err);
452         return CURLE_SEND_FAIL_REWIND;
453       }
454     }
455     else if(data->set.ioctl_func) {
456       curlioerr err;
457 
458       Curl_set_in_callback(data, true);
459       err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
460                                    data->set.ioctl_client);
461       Curl_set_in_callback(data, false);
462       infof(data, "the ioctl callback returned %d\n", (int)err);
463 
464       if(err) {
465         failf(data, "ioctl callback returned error %d", (int)err);
466         return CURLE_SEND_FAIL_REWIND;
467       }
468     }
469     else {
470       /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
471          given FILE * stream and we can actually attempt to rewind that
472          ourselves with fseek() */
473       if(data->state.fread_func == (curl_read_callback)fread) {
474         if(-1 != fseek(data->state.in, 0, SEEK_SET))
475           /* successful rewind */
476           return CURLE_OK;
477       }
478 
479       /* no callback set or failure above, makes us fail at once */
480       failf(data, "necessary data rewind wasn't possible");
481       return CURLE_SEND_FAIL_REWIND;
482     }
483   }
484   return CURLE_OK;
485 }
486 
data_pending(const struct connectdata * conn)487 static int data_pending(const struct connectdata *conn)
488 {
489   /* in the case of libssh2, we can never be really sure that we have emptied
490      its internal buffers so we MUST always try until we get EAGAIN back */
491   return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
492 #if defined(USE_NGHTTP2)
493     Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
494     /* For HTTP/2, we may read up everything including response body
495        with header fields in Curl_http_readwrite_headers. If no
496        content-length is provided, curl waits for the connection
497        close, which we emulate it using conn->proto.httpc.closed =
498        TRUE. The thing is if we read everything, then http2_recv won't
499        be called and we cannot signal the HTTP/2 stream has closed. As
500        a workaround, we return nonzero here to call http2_recv. */
501     ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion >= 20);
502 #else
503     Curl_ssl_data_pending(conn, FIRSTSOCKET);
504 #endif
505 }
506 
507 /*
508  * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
509  * remote document with the time provided by CURLOPT_TIMEVAL
510  */
Curl_meets_timecondition(struct Curl_easy * data,time_t timeofdoc)511 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
512 {
513   if((timeofdoc == 0) || (data->set.timevalue == 0))
514     return TRUE;
515 
516   switch(data->set.timecondition) {
517   case CURL_TIMECOND_IFMODSINCE:
518   default:
519     if(timeofdoc <= data->set.timevalue) {
520       infof(data,
521             "The requested document is not new enough\n");
522       data->info.timecond = TRUE;
523       return FALSE;
524     }
525     break;
526   case CURL_TIMECOND_IFUNMODSINCE:
527     if(timeofdoc >= data->set.timevalue) {
528       infof(data,
529             "The requested document is not old enough\n");
530       data->info.timecond = TRUE;
531       return FALSE;
532     }
533     break;
534   }
535 
536   return TRUE;
537 }
538 
539 /*
540  * Go ahead and do a read if we have a readable socket or if
541  * the stream was rewound (in which case we have data in a
542  * buffer)
543  *
544  * return '*comeback' TRUE if we didn't properly drain the socket so this
545  * function should get called again without select() or similar in between!
546  */
readwrite_data(struct Curl_easy * data,struct connectdata * conn,struct SingleRequest * k,int * didwhat,bool * done,bool * comeback)547 static CURLcode readwrite_data(struct Curl_easy *data,
548                                struct connectdata *conn,
549                                struct SingleRequest *k,
550                                int *didwhat, bool *done,
551                                bool *comeback)
552 {
553   CURLcode result = CURLE_OK;
554   ssize_t nread; /* number of bytes read */
555   size_t excess = 0; /* excess bytes read */
556   bool readmore = FALSE; /* used by RTP to signal for more data */
557   int maxloops = 100;
558 
559   *done = FALSE;
560   *comeback = FALSE;
561 
562   /* This is where we loop until we have read everything there is to
563      read or we get a CURLE_AGAIN */
564   do {
565     bool is_empty_data = FALSE;
566     size_t buffersize = data->set.buffer_size;
567     size_t bytestoread = buffersize;
568 
569     if(
570 #if defined(USE_NGHTTP2)
571        /* For HTTP/2, read data without caring about the content
572           length. This is safe because body in HTTP/2 is always
573           segmented thanks to its framing layer. Meanwhile, we have to
574           call Curl_read to ensure that http2_handle_stream_close is
575           called when we read all incoming bytes for a particular
576           stream. */
577        !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
578          conn->httpversion == 20) &&
579 #endif
580        k->size != -1 && !k->header) {
581       /* make sure we don't read too much */
582       curl_off_t totalleft = k->size - k->bytecount;
583       if(totalleft < (curl_off_t)bytestoread)
584         bytestoread = (size_t)totalleft;
585     }
586 
587     if(bytestoread) {
588       /* receive data from the network! */
589       result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
590 
591       /* read would've blocked */
592       if(CURLE_AGAIN == result)
593         break; /* get out of loop */
594 
595       if(result>0)
596         return result;
597     }
598     else {
599       /* read nothing but since we wanted nothing we consider this an OK
600          situation to proceed from */
601       DEBUGF(infof(data, "readwrite_data: we're done!\n"));
602       nread = 0;
603     }
604 
605     if(!k->bytecount) {
606       Curl_pgrsTime(data, TIMER_STARTTRANSFER);
607       if(k->exp100 > EXP100_SEND_DATA)
608         /* set time stamp to compare with when waiting for the 100 */
609         k->start100 = Curl_now();
610     }
611 
612     *didwhat |= KEEP_RECV;
613     /* indicates data of zero size, i.e. empty file */
614     is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
615 
616     /* NUL terminate, allowing string ops to be used */
617     if(0 < nread || is_empty_data) {
618       k->buf[nread] = 0;
619     }
620     else {
621       /* if we receive 0 or less here, the server closed the connection
622          and we bail out from this! */
623       DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
624       k->keepon &= ~KEEP_RECV;
625       break;
626     }
627 
628     /* Default buffer to use when we write the buffer, it may be changed
629        in the flow below before the actual storing is done. */
630     k->str = k->buf;
631 
632     if(conn->handler->readwrite) {
633       result = conn->handler->readwrite(data, conn, &nread, &readmore);
634       if(result)
635         return result;
636       if(readmore)
637         break;
638     }
639 
640 #ifndef CURL_DISABLE_HTTP
641     /* Since this is a two-state thing, we check if we are parsing
642        headers at the moment or not. */
643     if(k->header) {
644       /* we are in parse-the-header-mode */
645       bool stop_reading = FALSE;
646       result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
647       if(result)
648         return result;
649 
650       if(conn->handler->readwrite &&
651          (k->maxdownload <= 0 && nread > 0)) {
652         result = conn->handler->readwrite(data, conn, &nread, &readmore);
653         if(result)
654           return result;
655         if(readmore)
656           break;
657       }
658 
659       if(stop_reading) {
660         /* We've stopped dealing with input, get out of the do-while loop */
661 
662         if(nread > 0) {
663           infof(data,
664                 "Excess found:"
665                 " excess = %zd"
666                 " url = %s (zero-length body)\n",
667                 nread, data->state.up.path);
668         }
669 
670         break;
671       }
672     }
673 #endif /* CURL_DISABLE_HTTP */
674 
675 
676     /* This is not an 'else if' since it may be a rest from the header
677        parsing, where the beginning of the buffer is headers and the end
678        is non-headers. */
679     if(k->str && !k->header && (nread > 0 || is_empty_data)) {
680 
681       if(data->set.opt_no_body) {
682         /* data arrives although we want none, bail out */
683         streamclose(conn, "ignoring body");
684         *done = TRUE;
685         return CURLE_WEIRD_SERVER_REPLY;
686       }
687 
688 #ifndef CURL_DISABLE_HTTP
689       if(0 == k->bodywrites && !is_empty_data) {
690         /* These checks are only made the first time we are about to
691            write a piece of the body */
692         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
693           /* HTTP-only checks */
694 
695           if(data->req.newurl) {
696             if(conn->bits.close) {
697               /* Abort after the headers if "follow Location" is set
698                  and we're set to close anyway. */
699               k->keepon &= ~KEEP_RECV;
700               *done = TRUE;
701               return CURLE_OK;
702             }
703             /* We have a new url to load, but since we want to be able
704                to re-use this connection properly, we read the full
705                response in "ignore more" */
706             k->ignorebody = TRUE;
707             infof(data, "Ignoring the response-body\n");
708           }
709           if(data->state.resume_from && !k->content_range &&
710              (data->set.httpreq == HTTPREQ_GET) &&
711              !k->ignorebody) {
712 
713             if(k->size == data->state.resume_from) {
714               /* The resume point is at the end of file, consider this fine
715                  even if it doesn't allow resume from here. */
716               infof(data, "The entire document is already downloaded");
717               connclose(conn, "already downloaded");
718               /* Abort download */
719               k->keepon &= ~KEEP_RECV;
720               *done = TRUE;
721               return CURLE_OK;
722             }
723 
724             /* we wanted to resume a download, although the server doesn't
725              * seem to support this and we did this with a GET (if it
726              * wasn't a GET we did a POST or PUT resume) */
727             failf(data, "HTTP server doesn't seem to support "
728                   "byte ranges. Cannot resume.");
729             return CURLE_RANGE_ERROR;
730           }
731 
732           if(data->set.timecondition && !data->state.range) {
733             /* A time condition has been set AND no ranges have been
734                requested. This seems to be what chapter 13.3.4 of
735                RFC 2616 defines to be the correct action for a
736                HTTP/1.1 client */
737 
738             if(!Curl_meets_timecondition(data, k->timeofdoc)) {
739               *done = TRUE;
740               /* We're simulating a http 304 from server so we return
741                  what should have been returned from the server */
742               data->info.httpcode = 304;
743               infof(data, "Simulate a HTTP 304 response!\n");
744               /* we abort the transfer before it is completed == we ruin the
745                  re-use ability. Close the connection */
746               connclose(conn, "Simulated 304 handling");
747               return CURLE_OK;
748             }
749           } /* we have a time condition */
750 
751         } /* this is HTTP or RTSP */
752       } /* this is the first time we write a body part */
753 #endif /* CURL_DISABLE_HTTP */
754 
755       k->bodywrites++;
756 
757       /* pass data to the debug function before it gets "dechunked" */
758       if(data->set.verbose) {
759         if(k->badheader) {
760           Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
761                      (size_t)k->hbuflen);
762           if(k->badheader == HEADER_PARTHEADER)
763             Curl_debug(data, CURLINFO_DATA_IN,
764                        k->str, (size_t)nread);
765         }
766         else
767           Curl_debug(data, CURLINFO_DATA_IN,
768                      k->str, (size_t)nread);
769       }
770 
771 #ifndef CURL_DISABLE_HTTP
772       if(k->chunk) {
773         /*
774          * Here comes a chunked transfer flying and we need to decode this
775          * properly.  While the name says read, this function both reads
776          * and writes away the data. The returned 'nread' holds the number
777          * of actual data it wrote to the client.
778          */
779         CURLcode extra;
780         CHUNKcode res =
781           Curl_httpchunk_read(conn, k->str, nread, &nread, &extra);
782 
783         if(CHUNKE_OK < res) {
784           if(CHUNKE_PASSTHRU_ERROR == res) {
785             failf(data, "Failed reading the chunked-encoded stream");
786             return extra;
787           }
788           failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
789           return CURLE_RECV_ERROR;
790         }
791         if(CHUNKE_STOP == res) {
792           size_t dataleft;
793           /* we're done reading chunks! */
794           k->keepon &= ~KEEP_RECV; /* read no more */
795 
796           /* There are now possibly N number of bytes at the end of the
797              str buffer that weren't written to the client.
798              Push it back to be read on the next pass. */
799 
800           dataleft = conn->chunk.dataleft;
801           if(dataleft != 0) {
802             infof(conn->data, "Leftovers after chunking: %zu bytes\n",
803                   dataleft);
804           }
805         }
806         /* If it returned OK, we just keep going */
807       }
808 #endif   /* CURL_DISABLE_HTTP */
809 
810       /* Account for body content stored in the header buffer */
811       if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
812         DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
813                      k->hbuflen));
814         k->bytecount += k->hbuflen;
815       }
816 
817       if((-1 != k->maxdownload) &&
818          (k->bytecount + nread >= k->maxdownload)) {
819 
820         excess = (size_t)(k->bytecount + nread - k->maxdownload);
821         if(excess > 0 && !k->ignorebody) {
822           infof(data,
823                 "Excess found in a read:"
824                 " excess = %zu"
825                 ", size = %" CURL_FORMAT_CURL_OFF_T
826                 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
827                 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
828                 excess, k->size, k->maxdownload, k->bytecount);
829         }
830 
831         nread = (ssize_t) (k->maxdownload - k->bytecount);
832         if(nread < 0) /* this should be unusual */
833           nread = 0;
834 
835         k->keepon &= ~KEEP_RECV; /* we're done reading */
836       }
837 
838       k->bytecount += nread;
839 
840       Curl_pgrsSetDownloadCounter(data, k->bytecount);
841 
842       if(!k->chunk && (nread || k->badheader || is_empty_data)) {
843         /* If this is chunky transfer, it was already written */
844 
845         if(k->badheader && !k->ignorebody) {
846           /* we parsed a piece of data wrongly assuming it was a header
847              and now we output it as body instead */
848 
849           /* Don't let excess data pollute body writes */
850           if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
851             result = Curl_client_write(conn, CLIENTWRITE_BODY,
852                                        data->state.headerbuff,
853                                        k->hbuflen);
854           else
855             result = Curl_client_write(conn, CLIENTWRITE_BODY,
856                                        data->state.headerbuff,
857                                        (size_t)k->maxdownload);
858 
859           if(result)
860             return result;
861         }
862         if(k->badheader < HEADER_ALLBAD) {
863           /* This switch handles various content encodings. If there's an
864              error here, be sure to check over the almost identical code
865              in http_chunks.c.
866              Make sure that ALL_CONTENT_ENCODINGS contains all the
867              encodings handled here. */
868           if(conn->data->set.http_ce_skip || !k->writer_stack) {
869             if(!k->ignorebody) {
870 #ifndef CURL_DISABLE_POP3
871               if(conn->handler->protocol & PROTO_FAMILY_POP3)
872                 result = Curl_pop3_write(conn, k->str, nread);
873               else
874 #endif /* CURL_DISABLE_POP3 */
875                 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
876                                            nread);
877             }
878           }
879           else if(!k->ignorebody)
880             result = Curl_unencode_write(conn, k->writer_stack, k->str, nread);
881         }
882         k->badheader = HEADER_NORMAL; /* taken care of now */
883 
884         if(result)
885           return result;
886       }
887 
888     } /* if(!header and data to read) */
889 
890     if(conn->handler->readwrite && excess && !conn->bits.stream_was_rewound) {
891       /* Parse the excess data */
892       k->str += nread;
893 
894       if(&k->str[excess] > &k->buf[data->set.buffer_size]) {
895         /* the excess amount was too excessive(!), make sure
896            it doesn't read out of buffer */
897         excess = &k->buf[data->set.buffer_size] - k->str;
898       }
899       nread = (ssize_t)excess;
900 
901       result = conn->handler->readwrite(data, conn, &nread, &readmore);
902       if(result)
903         return result;
904 
905       if(readmore)
906         k->keepon |= KEEP_RECV; /* we're not done reading */
907       break;
908     }
909 
910     if(is_empty_data) {
911       /* if we received nothing, the server closed the connection and we
912          are done */
913       k->keepon &= ~KEEP_RECV;
914     }
915 
916     if(k->keepon & KEEP_RECV_PAUSE) {
917       /* this is a paused transfer */
918       break;
919     }
920 
921   } while(data_pending(conn) && maxloops--);
922 
923   if(maxloops <= 0) {
924     /* we mark it as read-again-please */
925     conn->cselect_bits = CURL_CSELECT_IN;
926     *comeback = TRUE;
927   }
928 
929   if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
930      conn->bits.close) {
931     /* When we've read the entire thing and the close bit is set, the server
932        may now close the connection. If there's now any kind of sending going
933        on from our side, we need to stop that immediately. */
934     infof(data, "we are done reading and this is set to close, stop send\n");
935     k->keepon &= ~KEEP_SEND; /* no writing anymore either */
936   }
937 
938   return CURLE_OK;
939 }
940 
Curl_done_sending(struct connectdata * conn,struct SingleRequest * k)941 CURLcode Curl_done_sending(struct connectdata *conn,
942                            struct SingleRequest *k)
943 {
944   k->keepon &= ~KEEP_SEND; /* we're done writing */
945 
946   /* These functions should be moved into the handler struct! */
947   Curl_http2_done_sending(conn);
948   Curl_quic_done_sending(conn);
949 
950   if(conn->bits.rewindaftersend) {
951     CURLcode result = Curl_readrewind(conn);
952     if(result)
953       return result;
954   }
955   return CURLE_OK;
956 }
957 
958 #if defined(WIN32) && !defined(USE_LWIPSOCK)
959 #ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
960 #define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
961 #endif
962 
win_update_buffer_size(curl_socket_t sockfd)963 static void win_update_buffer_size(curl_socket_t sockfd)
964 {
965   int result;
966   ULONG ideal;
967   DWORD ideallen;
968   result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
969                     &ideal, sizeof(ideal), &ideallen, 0, 0);
970   if(result == 0) {
971     setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
972                (const char *)&ideal, sizeof(ideal));
973   }
974 }
975 #else
976 #define win_update_buffer_size(x)
977 #endif
978 
979 /*
980  * Send data to upload to the server, when the socket is writable.
981  */
readwrite_upload(struct Curl_easy * data,struct connectdata * conn,int * didwhat)982 static CURLcode readwrite_upload(struct Curl_easy *data,
983                                  struct connectdata *conn,
984                                  int *didwhat)
985 {
986   ssize_t i, si;
987   ssize_t bytes_written;
988   CURLcode result;
989   ssize_t nread; /* number of bytes read */
990   bool sending_http_headers = FALSE;
991   struct SingleRequest *k = &data->req;
992 
993   if((k->bytecount == 0) && (k->writebytecount == 0))
994     Curl_pgrsTime(data, TIMER_STARTTRANSFER);
995 
996   *didwhat |= KEEP_SEND;
997 
998   do {
999     /* only read more data if there's no upload data already
1000        present in the upload buffer */
1001     if(0 == k->upload_present) {
1002       result = Curl_get_upload_buffer(data);
1003       if(result)
1004         return result;
1005       /* init the "upload from here" pointer */
1006       k->upload_fromhere = data->state.ulbuf;
1007 
1008       if(!k->upload_done) {
1009         /* HTTP pollution, this should be written nicer to become more
1010            protocol agnostic. */
1011         size_t fillcount;
1012         struct HTTP *http = k->protop;
1013 
1014         if((k->exp100 == EXP100_SENDING_REQUEST) &&
1015            (http->sending == HTTPSEND_BODY)) {
1016           /* If this call is to send body data, we must take some action:
1017              We have sent off the full HTTP 1.1 request, and we shall now
1018              go into the Expect: 100 state and await such a header */
1019           k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
1020           k->keepon &= ~KEEP_SEND;         /* disable writing */
1021           k->start100 = Curl_now();       /* timeout count starts now */
1022           *didwhat &= ~KEEP_SEND;  /* we didn't write anything actually */
1023           /* set a timeout for the multi interface */
1024           Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1025           break;
1026         }
1027 
1028         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
1029           if(http->sending == HTTPSEND_REQUEST)
1030             /* We're sending the HTTP request headers, not the data.
1031                Remember that so we don't change the line endings. */
1032             sending_http_headers = TRUE;
1033           else
1034             sending_http_headers = FALSE;
1035         }
1036 
1037         result = Curl_fillreadbuffer(conn, data->set.upload_buffer_size,
1038                                      &fillcount);
1039         if(result)
1040           return result;
1041 
1042         nread = fillcount;
1043       }
1044       else
1045         nread = 0; /* we're done uploading/reading */
1046 
1047       if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
1048         /* this is a paused transfer */
1049         break;
1050       }
1051       if(nread <= 0) {
1052         result = Curl_done_sending(conn, k);
1053         if(result)
1054           return result;
1055         break;
1056       }
1057 
1058       /* store number of bytes available for upload */
1059       k->upload_present = nread;
1060 
1061       /* convert LF to CRLF if so asked */
1062       if((!sending_http_headers) && (
1063 #ifdef CURL_DO_LINEEND_CONV
1064          /* always convert if we're FTPing in ASCII mode */
1065          (data->set.prefer_ascii) ||
1066 #endif
1067          (data->set.crlf))) {
1068         /* Do we need to allocate a scratch buffer? */
1069         if(!data->state.scratch) {
1070           data->state.scratch = malloc(2 * data->set.upload_buffer_size);
1071           if(!data->state.scratch) {
1072             failf(data, "Failed to alloc scratch buffer!");
1073 
1074             return CURLE_OUT_OF_MEMORY;
1075           }
1076         }
1077 
1078         /*
1079          * ASCII/EBCDIC Note: This is presumably a text (not binary)
1080          * transfer so the data should already be in ASCII.
1081          * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
1082          * must be used instead of the escape sequences \r & \n.
1083          */
1084         for(i = 0, si = 0; i < nread; i++, si++) {
1085           if(k->upload_fromhere[i] == 0x0a) {
1086             data->state.scratch[si++] = 0x0d;
1087             data->state.scratch[si] = 0x0a;
1088             if(!data->set.crlf) {
1089               /* we're here only because FTP is in ASCII mode...
1090                  bump infilesize for the LF we just added */
1091               if(data->state.infilesize != -1)
1092                 data->state.infilesize++;
1093             }
1094           }
1095           else
1096             data->state.scratch[si] = k->upload_fromhere[i];
1097         }
1098 
1099         if(si != nread) {
1100           /* only perform the special operation if we really did replace
1101              anything */
1102           nread = si;
1103 
1104           /* upload from the new (replaced) buffer instead */
1105           k->upload_fromhere = data->state.scratch;
1106 
1107           /* set the new amount too */
1108           k->upload_present = nread;
1109         }
1110       }
1111 
1112 #ifndef CURL_DISABLE_SMTP
1113       if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
1114         result = Curl_smtp_escape_eob(conn, nread);
1115         if(result)
1116           return result;
1117       }
1118 #endif /* CURL_DISABLE_SMTP */
1119     } /* if 0 == k->upload_present */
1120     else {
1121       /* We have a partial buffer left from a previous "round". Use
1122          that instead of reading more data */
1123     }
1124 
1125     /* write to socket (send away data) */
1126     result = Curl_write(conn,
1127                         conn->writesockfd,  /* socket to send to */
1128                         k->upload_fromhere, /* buffer pointer */
1129                         k->upload_present,  /* buffer size */
1130                         &bytes_written);    /* actually sent */
1131     if(result)
1132       return result;
1133 
1134     win_update_buffer_size(conn->writesockfd);
1135 
1136     if(data->set.verbose)
1137       /* show the data before we change the pointer upload_fromhere */
1138       Curl_debug(data, CURLINFO_DATA_OUT, k->upload_fromhere,
1139                  (size_t)bytes_written);
1140 
1141     k->writebytecount += bytes_written;
1142     Curl_pgrsSetUploadCounter(data, k->writebytecount);
1143 
1144     if((!k->upload_chunky || k->forbidchunk) &&
1145        (k->writebytecount == data->state.infilesize)) {
1146       /* we have sent all data we were supposed to */
1147       k->upload_done = TRUE;
1148       infof(data, "We are completely uploaded and fine\n");
1149     }
1150 
1151     if(k->upload_present != bytes_written) {
1152       /* we only wrote a part of the buffer (if anything), deal with it! */
1153 
1154       /* store the amount of bytes left in the buffer to write */
1155       k->upload_present -= bytes_written;
1156 
1157       /* advance the pointer where to find the buffer when the next send
1158          is to happen */
1159       k->upload_fromhere += bytes_written;
1160     }
1161     else {
1162       /* we've uploaded that buffer now */
1163       result = Curl_get_upload_buffer(data);
1164       if(result)
1165         return result;
1166       k->upload_fromhere = data->state.ulbuf;
1167       k->upload_present = 0; /* no more bytes left */
1168 
1169       if(k->upload_done) {
1170         result = Curl_done_sending(conn, k);
1171         if(result)
1172           return result;
1173       }
1174     }
1175 
1176 
1177   } WHILE_FALSE; /* just to break out from! */
1178 
1179   return CURLE_OK;
1180 }
1181 
1182 /*
1183  * Curl_readwrite() is the low-level function to be called when data is to
1184  * be read and written to/from the connection.
1185  *
1186  * return '*comeback' TRUE if we didn't properly drain the socket so this
1187  * function should get called again without select() or similar in between!
1188  */
Curl_readwrite(struct connectdata * conn,struct Curl_easy * data,bool * done,bool * comeback)1189 CURLcode Curl_readwrite(struct connectdata *conn,
1190                         struct Curl_easy *data,
1191                         bool *done,
1192                         bool *comeback)
1193 {
1194   struct SingleRequest *k = &data->req;
1195   CURLcode result;
1196   int didwhat = 0;
1197 
1198   curl_socket_t fd_read;
1199   curl_socket_t fd_write;
1200   int select_res = conn->cselect_bits;
1201 
1202   conn->cselect_bits = 0;
1203 
1204   /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1205      then we are in rate limiting state in that transfer direction */
1206 
1207   if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1208     fd_read = conn->sockfd;
1209   else
1210     fd_read = CURL_SOCKET_BAD;
1211 
1212   if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1213     fd_write = conn->writesockfd;
1214   else
1215     fd_write = CURL_SOCKET_BAD;
1216 
1217   if(conn->data->state.drain) {
1218     select_res |= CURL_CSELECT_IN;
1219     DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1220   }
1221 
1222   if(!select_res) /* Call for select()/poll() only, if read/write/error
1223                      status is not known. */
1224     select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1225 
1226   if(select_res == CURL_CSELECT_ERR) {
1227     failf(data, "select/poll returned error");
1228     return CURLE_SEND_ERROR;
1229   }
1230 
1231   /* We go ahead and do a read if we have a readable socket or if
1232      the stream was rewound (in which case we have data in a
1233      buffer) */
1234   if((k->keepon & KEEP_RECV) &&
1235      ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1236 
1237     result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1238     if(result || *done)
1239       return result;
1240   }
1241 
1242   /* If we still have writing to do, we check if we have a writable socket. */
1243   if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1244     /* write */
1245 
1246     result = readwrite_upload(data, conn, &didwhat);
1247     if(result)
1248       return result;
1249   }
1250 
1251   k->now = Curl_now();
1252   if(didwhat) {
1253     ;
1254   }
1255   else {
1256     /* no read no write, this is a timeout? */
1257     if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1258       /* This should allow some time for the header to arrive, but only a
1259          very short time as otherwise it'll be too much wasted time too
1260          often. */
1261 
1262       /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1263 
1264          Therefore, when a client sends this header field to an origin server
1265          (possibly via a proxy) from which it has never seen a 100 (Continue)
1266          status, the client SHOULD NOT wait for an indefinite period before
1267          sending the request body.
1268 
1269       */
1270 
1271       timediff_t ms = Curl_timediff(k->now, k->start100);
1272       if(ms >= data->set.expect_100_timeout) {
1273         /* we've waited long enough, continue anyway */
1274         k->exp100 = EXP100_SEND_DATA;
1275         k->keepon |= KEEP_SEND;
1276         Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1277         infof(data, "Done waiting for 100-continue\n");
1278       }
1279     }
1280   }
1281 
1282   if(Curl_pgrsUpdate(conn))
1283     result = CURLE_ABORTED_BY_CALLBACK;
1284   else
1285     result = Curl_speedcheck(data, k->now);
1286   if(result)
1287     return result;
1288 
1289   if(k->keepon) {
1290     if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1291       if(k->size != -1) {
1292         failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1293               " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1294               CURL_FORMAT_CURL_OFF_T " bytes received",
1295               Curl_timediff(k->now, data->progress.t_startsingle),
1296               k->bytecount, k->size);
1297       }
1298       else {
1299         failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1300               " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1301               Curl_timediff(k->now, data->progress.t_startsingle),
1302               k->bytecount);
1303       }
1304       return CURLE_OPERATION_TIMEDOUT;
1305     }
1306   }
1307   else {
1308     /*
1309      * The transfer has been performed. Just make some general checks before
1310      * returning.
1311      */
1312 
1313     if(!(data->set.opt_no_body) && (k->size != -1) &&
1314        (k->bytecount != k->size) &&
1315 #ifdef CURL_DO_LINEEND_CONV
1316        /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1317           so we'll check to see if the discrepancy can be explained
1318           by the number of CRLFs we've changed to LFs.
1319        */
1320        (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1321 #endif /* CURL_DO_LINEEND_CONV */
1322        !k->newurl) {
1323       failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1324             " bytes remaining to read", k->size - k->bytecount);
1325       return CURLE_PARTIAL_FILE;
1326     }
1327     if(!(data->set.opt_no_body) && k->chunk &&
1328        (conn->chunk.state != CHUNK_STOP)) {
1329       /*
1330        * In chunked mode, return an error if the connection is closed prior to
1331        * the empty (terminating) chunk is read.
1332        *
1333        * The condition above used to check for
1334        * conn->proto.http->chunk.datasize != 0 which is true after reading
1335        * *any* chunk, not just the empty chunk.
1336        *
1337        */
1338       failf(data, "transfer closed with outstanding read data remaining");
1339       return CURLE_PARTIAL_FILE;
1340     }
1341     if(Curl_pgrsUpdate(conn))
1342       return CURLE_ABORTED_BY_CALLBACK;
1343   }
1344 
1345   /* Now update the "done" boolean we return */
1346   *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1347                             KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1348 
1349   return CURLE_OK;
1350 }
1351 
1352 /*
1353  * Curl_single_getsock() gets called by the multi interface code when the app
1354  * has requested to get the sockets for the current connection. This function
1355  * will then be called once for every connection that the multi interface
1356  * keeps track of. This function will only be called for connections that are
1357  * in the proper state to have this information available.
1358  */
Curl_single_getsock(const struct connectdata * conn,curl_socket_t * sock)1359 int Curl_single_getsock(const struct connectdata *conn,
1360                         curl_socket_t *sock)
1361 {
1362   const struct Curl_easy *data = conn->data;
1363   int bitmap = GETSOCK_BLANK;
1364   unsigned sockindex = 0;
1365 
1366   if(conn->handler->perform_getsock)
1367     return conn->handler->perform_getsock(conn, sock);
1368 
1369   /* don't include HOLD and PAUSE connections */
1370   if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1371 
1372     DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1373 
1374     bitmap |= GETSOCK_READSOCK(sockindex);
1375     sock[sockindex] = conn->sockfd;
1376   }
1377 
1378   /* don't include HOLD and PAUSE connections */
1379   if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1380 
1381     if((conn->sockfd != conn->writesockfd) ||
1382        bitmap == GETSOCK_BLANK) {
1383       /* only if they are not the same socket and we have a readable
1384          one, we increase index */
1385       if(bitmap != GETSOCK_BLANK)
1386         sockindex++; /* increase index if we need two entries */
1387 
1388       DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1389 
1390       sock[sockindex] = conn->writesockfd;
1391     }
1392 
1393     bitmap |= GETSOCK_WRITESOCK(sockindex);
1394   }
1395 
1396   return bitmap;
1397 }
1398 
1399 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1400    which means this gets called once for each subsequent redirect etc */
Curl_init_CONNECT(struct Curl_easy * data)1401 void Curl_init_CONNECT(struct Curl_easy *data)
1402 {
1403   data->state.fread_func = data->set.fread_func_set;
1404   data->state.in = data->set.in_set;
1405 }
1406 
1407 /*
1408  * Curl_pretransfer() is called immediately before a transfer starts, and only
1409  * once for one transfer no matter if it has redirects or do multi-pass
1410  * authentication etc.
1411  */
Curl_pretransfer(struct Curl_easy * data)1412 CURLcode Curl_pretransfer(struct Curl_easy *data)
1413 {
1414   CURLcode result;
1415 
1416   if(!data->change.url && !data->set.uh) {
1417     /* we can't do anything without URL */
1418     failf(data, "No URL set!");
1419     return CURLE_URL_MALFORMAT;
1420   }
1421 
1422   /* since the URL may have been redirected in a previous use of this handle */
1423   if(data->change.url_alloc) {
1424     /* the already set URL is allocated, free it first! */
1425     Curl_safefree(data->change.url);
1426     data->change.url_alloc = FALSE;
1427   }
1428 
1429   if(!data->change.url && data->set.uh) {
1430     CURLUcode uc;
1431     uc = curl_url_get(data->set.uh,
1432                         CURLUPART_URL, &data->set.str[STRING_SET_URL], 0);
1433     if(uc) {
1434       failf(data, "No URL set!");
1435       return CURLE_URL_MALFORMAT;
1436     }
1437   }
1438 
1439   data->change.url = data->set.str[STRING_SET_URL];
1440 
1441   /* Init the SSL session ID cache here. We do it here since we want to do it
1442      after the *_setopt() calls (that could specify the size of the cache) but
1443      before any transfer takes place. */
1444   result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1445   if(result)
1446     return result;
1447 
1448   data->state.wildcardmatch = data->set.wildcard_enabled;
1449   data->set.followlocation = 0; /* reset the location-follow counter */
1450   data->state.this_is_a_follow = FALSE; /* reset this */
1451   data->state.errorbuf = FALSE; /* no error has occurred */
1452   data->state.httpversion = 0; /* don't assume any particular server version */
1453 
1454   data->state.authproblem = FALSE;
1455   data->state.authhost.want = data->set.httpauth;
1456   data->state.authproxy.want = data->set.proxyauth;
1457   Curl_safefree(data->info.wouldredirect);
1458   data->info.wouldredirect = NULL;
1459 
1460   if(data->set.httpreq == HTTPREQ_PUT)
1461     data->state.infilesize = data->set.filesize;
1462   else if((data->set.httpreq != HTTPREQ_GET) &&
1463           (data->set.httpreq != HTTPREQ_HEAD)) {
1464     data->state.infilesize = data->set.postfieldsize;
1465     if(data->set.postfields && (data->state.infilesize == -1))
1466       data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1467   }
1468   else
1469     data->state.infilesize = 0;
1470 
1471   /* If there is a list of cookie files to read, do it now! */
1472   if(data->change.cookielist)
1473     Curl_cookie_loadfiles(data);
1474 
1475   /* If there is a list of host pairs to deal with */
1476   if(data->change.resolve)
1477     result = Curl_loadhostpairs(data);
1478 
1479   if(!result) {
1480     /* Allow data->set.use_port to set which port to use. This needs to be
1481      * disabled for example when we follow Location: headers to URLs using
1482      * different ports! */
1483     data->state.allow_port = TRUE;
1484 
1485 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1486     /*************************************************************
1487      * Tell signal handler to ignore SIGPIPE
1488      *************************************************************/
1489     if(!data->set.no_signal)
1490       data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1491 #endif
1492 
1493     Curl_initinfo(data); /* reset session-specific information "variables" */
1494     Curl_pgrsResetTransferSizes(data);
1495     Curl_pgrsStartNow(data);
1496 
1497     /* In case the handle is re-used and an authentication method was picked
1498        in the session we need to make sure we only use the one(s) we now
1499        consider to be fine */
1500     data->state.authhost.picked &= data->state.authhost.want;
1501     data->state.authproxy.picked &= data->state.authproxy.want;
1502 
1503 #ifndef CURL_DISABLE_FTP
1504     if(data->state.wildcardmatch) {
1505       struct WildcardData *wc = &data->wildcard;
1506       if(wc->state < CURLWC_INIT) {
1507         result = Curl_wildcard_init(wc); /* init wildcard structures */
1508         if(result)
1509           return CURLE_OUT_OF_MEMORY;
1510       }
1511     }
1512 #endif
1513     Curl_http2_init_state(&data->state);
1514   }
1515 
1516   return result;
1517 }
1518 
1519 /*
1520  * Curl_posttransfer() is called immediately after a transfer ends
1521  */
Curl_posttransfer(struct Curl_easy * data)1522 CURLcode Curl_posttransfer(struct Curl_easy *data)
1523 {
1524 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1525   /* restore the signal handler for SIGPIPE before we get back */
1526   if(!data->set.no_signal)
1527     signal(SIGPIPE, data->state.prev_signal);
1528 #else
1529   (void)data; /* unused parameter */
1530 #endif
1531 
1532   return CURLE_OK;
1533 }
1534 
1535 /*
1536  * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1537  * as given by the remote server and set up the new URL to request.
1538  *
1539  * This function DOES NOT FREE the given url.
1540  */
Curl_follow(struct Curl_easy * data,char * newurl,followtype type)1541 CURLcode Curl_follow(struct Curl_easy *data,
1542                      char *newurl,    /* the Location: string */
1543                      followtype type) /* see transfer.h */
1544 {
1545 #ifdef CURL_DISABLE_HTTP
1546   (void)data;
1547   (void)newurl;
1548   (void)type;
1549   /* Location: following will not happen when HTTP is disabled */
1550   return CURLE_TOO_MANY_REDIRECTS;
1551 #else
1552 
1553   /* Location: redirect */
1554   bool disallowport = FALSE;
1555   bool reachedmax = FALSE;
1556   CURLUcode uc;
1557 
1558   if(type == FOLLOW_REDIR) {
1559     if((data->set.maxredirs != -1) &&
1560        (data->set.followlocation >= data->set.maxredirs)) {
1561       reachedmax = TRUE;
1562       type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1563                              to URL */
1564     }
1565     else {
1566       /* mark the next request as a followed location: */
1567       data->state.this_is_a_follow = TRUE;
1568 
1569       data->set.followlocation++; /* count location-followers */
1570 
1571       if(data->set.http_auto_referer) {
1572         /* We are asked to automatically set the previous URL as the referer
1573            when we get the next URL. We pick the ->url field, which may or may
1574            not be 100% correct */
1575 
1576         if(data->change.referer_alloc) {
1577           Curl_safefree(data->change.referer);
1578           data->change.referer_alloc = FALSE;
1579         }
1580 
1581         data->change.referer = strdup(data->change.url);
1582         if(!data->change.referer)
1583           return CURLE_OUT_OF_MEMORY;
1584         data->change.referer_alloc = TRUE; /* yes, free this later */
1585       }
1586     }
1587   }
1588 
1589   if(Curl_is_absolute_url(newurl, NULL, MAX_SCHEME_LEN))
1590     /* This is an absolute URL, don't allow the custom port number */
1591     disallowport = TRUE;
1592 
1593   DEBUGASSERT(data->state.uh);
1594   uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
1595                     (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
1596                     ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) );
1597   if(uc) {
1598     if(type != FOLLOW_FAKE)
1599       return Curl_uc_to_curlcode(uc);
1600 
1601     /* the URL could not be parsed for some reason, but since this is FAKE
1602        mode, just duplicate the field as-is */
1603     newurl = strdup(newurl);
1604     if(!newurl)
1605       return CURLE_OUT_OF_MEMORY;
1606   }
1607   else {
1608 
1609     uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
1610     if(uc)
1611       return Curl_uc_to_curlcode(uc);
1612   }
1613 
1614   if(type == FOLLOW_FAKE) {
1615     /* we're only figuring out the new url if we would've followed locations
1616        but now we're done so we can get out! */
1617     data->info.wouldredirect = newurl;
1618 
1619     if(reachedmax) {
1620       failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1621       return CURLE_TOO_MANY_REDIRECTS;
1622     }
1623     return CURLE_OK;
1624   }
1625 
1626   if(disallowport)
1627     data->state.allow_port = FALSE;
1628 
1629   if(data->change.url_alloc)
1630     Curl_safefree(data->change.url);
1631 
1632   data->change.url = newurl;
1633   data->change.url_alloc = TRUE;
1634 
1635   infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1636 
1637   /*
1638    * We get here when the HTTP code is 300-399 (and 401). We need to perform
1639    * differently based on exactly what return code there was.
1640    *
1641    * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1642    * a HTTP (proxy-) authentication scheme other than Basic.
1643    */
1644   switch(data->info.httpcode) {
1645     /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1646        Authorization: XXXX header in the HTTP request code snippet */
1647     /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1648        Proxy-Authorization: XXXX header in the HTTP request code snippet */
1649     /* 300 - Multiple Choices */
1650     /* 306 - Not used */
1651     /* 307 - Temporary Redirect */
1652   default:  /* for all above (and the unknown ones) */
1653     /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1654      * seem to be OK to POST to.
1655      */
1656     break;
1657   case 301: /* Moved Permanently */
1658     /* (quote from RFC7231, section 6.4.2)
1659      *
1660      * Note: For historical reasons, a user agent MAY change the request
1661      * method from POST to GET for the subsequent request.  If this
1662      * behavior is undesired, the 307 (Temporary Redirect) status code
1663      * can be used instead.
1664      *
1665      * ----
1666      *
1667      * Many webservers expect this, so these servers often answers to a POST
1668      * request with an error page. To be sure that libcurl gets the page that
1669      * most user agents would get, libcurl has to force GET.
1670      *
1671      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1672      * can be overridden with CURLOPT_POSTREDIR.
1673      */
1674     if((data->set.httpreq == HTTPREQ_POST
1675         || data->set.httpreq == HTTPREQ_POST_FORM
1676         || data->set.httpreq == HTTPREQ_POST_MIME)
1677        && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1678       infof(data, "Switch from POST to GET\n");
1679       data->set.httpreq = HTTPREQ_GET;
1680     }
1681     break;
1682   case 302: /* Found */
1683     /* (quote from RFC7231, section 6.4.3)
1684      *
1685      * Note: For historical reasons, a user agent MAY change the request
1686      * method from POST to GET for the subsequent request.  If this
1687      * behavior is undesired, the 307 (Temporary Redirect) status code
1688      * can be used instead.
1689      *
1690      * ----
1691      *
1692      * Many webservers expect this, so these servers often answers to a POST
1693      * request with an error page. To be sure that libcurl gets the page that
1694      * most user agents would get, libcurl has to force GET.
1695      *
1696      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1697      * can be overridden with CURLOPT_POSTREDIR.
1698      */
1699     if((data->set.httpreq == HTTPREQ_POST
1700         || data->set.httpreq == HTTPREQ_POST_FORM
1701         || data->set.httpreq == HTTPREQ_POST_MIME)
1702        && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1703       infof(data, "Switch from POST to GET\n");
1704       data->set.httpreq = HTTPREQ_GET;
1705     }
1706     break;
1707 
1708   case 303: /* See Other */
1709     /* Disable both types of POSTs, unless the user explicitly
1710        asks for POST after POST */
1711     if(data->set.httpreq != HTTPREQ_GET
1712       && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1713       data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1714       infof(data, "Disables POST, goes with %s\n",
1715             data->set.opt_no_body?"HEAD":"GET");
1716     }
1717     break;
1718   case 304: /* Not Modified */
1719     /* 304 means we did a conditional request and it was "Not modified".
1720      * We shouldn't get any Location: header in this response!
1721      */
1722     break;
1723   case 305: /* Use Proxy */
1724     /* (quote from RFC2616, section 10.3.6):
1725      * "The requested resource MUST be accessed through the proxy given
1726      * by the Location field. The Location field gives the URI of the
1727      * proxy.  The recipient is expected to repeat this single request
1728      * via the proxy. 305 responses MUST only be generated by origin
1729      * servers."
1730      */
1731     break;
1732   }
1733   Curl_pgrsTime(data, TIMER_REDIRECT);
1734   Curl_pgrsResetTransferSizes(data);
1735 
1736   return CURLE_OK;
1737 #endif /* CURL_DISABLE_HTTP */
1738 }
1739 
1740 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1741 
1742    NOTE: that the *url is malloc()ed. */
Curl_retry_request(struct connectdata * conn,char ** url)1743 CURLcode Curl_retry_request(struct connectdata *conn,
1744                             char **url)
1745 {
1746   struct Curl_easy *data = conn->data;
1747   bool retry = FALSE;
1748   *url = NULL;
1749 
1750   /* if we're talking upload, we can't do the checks below, unless the protocol
1751      is HTTP as when uploading over HTTP we will still get a response */
1752   if(data->set.upload &&
1753      !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1754     return CURLE_OK;
1755 
1756   if((data->req.bytecount + data->req.headerbytecount == 0) &&
1757       conn->bits.reuse &&
1758       (!data->set.opt_no_body
1759         || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
1760       (data->set.rtspreq != RTSPREQ_RECEIVE))
1761     /* We got no data, we attempted to re-use a connection. For HTTP this
1762        can be a retry so we try again regardless if we expected a body.
1763        For other protocols we only try again only if we expected a body.
1764 
1765        This might happen if the connection was left alive when we were
1766        done using it before, but that was closed when we wanted to read from
1767        it again. Bad luck. Retry the same request on a fresh connect! */
1768     retry = TRUE;
1769   else if(data->state.refused_stream &&
1770           (data->req.bytecount + data->req.headerbytecount == 0) ) {
1771     /* This was sent on a refused stream, safe to rerun. A refused stream
1772        error can typically only happen on HTTP/2 level if the stream is safe
1773        to issue again, but the nghttp2 API can deliver the message to other
1774        streams as well, which is why this adds the check the data counters
1775        too. */
1776     infof(conn->data, "REFUSED_STREAM, retrying a fresh connect\n");
1777     data->state.refused_stream = FALSE; /* clear again */
1778     retry = TRUE;
1779   }
1780   if(retry) {
1781     infof(conn->data, "Connection died, retrying a fresh connect\n");
1782     *url = strdup(conn->data->change.url);
1783     if(!*url)
1784       return CURLE_OUT_OF_MEMORY;
1785 
1786     connclose(conn, "retry"); /* close this connection */
1787     conn->bits.retry = TRUE; /* mark this as a connection we're about
1788                                 to retry. Marking it this way should
1789                                 prevent i.e HTTP transfers to return
1790                                 error just because nothing has been
1791                                 transferred! */
1792 
1793 
1794     if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1795       if(data->req.writebytecount) {
1796         CURLcode result = Curl_readrewind(conn);
1797         if(result) {
1798           Curl_safefree(*url);
1799           return result;
1800         }
1801       }
1802     }
1803   }
1804   return CURLE_OK;
1805 }
1806 
1807 /*
1808  * Curl_setup_transfer() is called to setup some basic properties for the
1809  * upcoming transfer.
1810  */
1811 void
Curl_setup_transfer(struct Curl_easy * data,int sockindex,curl_off_t size,bool getheader,int writesockindex)1812 Curl_setup_transfer(
1813   struct Curl_easy *data,   /* transfer */
1814   int sockindex,            /* socket index to read from or -1 */
1815   curl_off_t size,          /* -1 if unknown at this point */
1816   bool getheader,           /* TRUE if header parsing is wanted */
1817   int writesockindex        /* socket index to write to, it may very well be
1818                                the same we read from. -1 disables */
1819   )
1820 {
1821   struct SingleRequest *k = &data->req;
1822   struct connectdata *conn = data->conn;
1823   DEBUGASSERT(conn != NULL);
1824   DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1825 
1826   if(conn->bits.multiplex || conn->httpversion == 20) {
1827     /* when multiplexing, the read/write sockets need to be the same! */
1828     conn->sockfd = sockindex == -1 ?
1829       ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1830       conn->sock[sockindex];
1831     conn->writesockfd = conn->sockfd;
1832   }
1833   else {
1834     conn->sockfd = sockindex == -1 ?
1835       CURL_SOCKET_BAD : conn->sock[sockindex];
1836     conn->writesockfd = writesockindex == -1 ?
1837       CURL_SOCKET_BAD:conn->sock[writesockindex];
1838   }
1839   k->getheader = getheader;
1840 
1841   k->size = size;
1842 
1843   /* The code sequence below is placed in this function just because all
1844      necessary input is not always known in do_complete() as this function may
1845      be called after that */
1846 
1847   if(!k->getheader) {
1848     k->header = FALSE;
1849     if(size > 0)
1850       Curl_pgrsSetDownloadSize(data, size);
1851   }
1852   /* we want header and/or body, if neither then don't do this! */
1853   if(k->getheader || !data->set.opt_no_body) {
1854 
1855     if(sockindex != -1)
1856       k->keepon |= KEEP_RECV;
1857 
1858     if(writesockindex != -1) {
1859       struct HTTP *http = data->req.protop;
1860       /* HTTP 1.1 magic:
1861 
1862          Even if we require a 100-return code before uploading data, we might
1863          need to write data before that since the REQUEST may not have been
1864          finished sent off just yet.
1865 
1866          Thus, we must check if the request has been sent before we set the
1867          state info where we wait for the 100-return code
1868       */
1869       if((data->state.expect100header) &&
1870          (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1871          (http->sending == HTTPSEND_BODY)) {
1872         /* wait with write until we either got 100-continue or a timeout */
1873         k->exp100 = EXP100_AWAITING_CONTINUE;
1874         k->start100 = Curl_now();
1875 
1876         /* Set a timeout for the multi interface. Add the inaccuracy margin so
1877            that we don't fire slightly too early and get denied to run. */
1878         Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1879       }
1880       else {
1881         if(data->state.expect100header)
1882           /* when we've sent off the rest of the headers, we must await a
1883              100-continue but first finish sending the request */
1884           k->exp100 = EXP100_SENDING_REQUEST;
1885 
1886         /* enable the write bit when we're not waiting for continue */
1887         k->keepon |= KEEP_SEND;
1888       }
1889     } /* if(writesockindex != -1) */
1890   } /* if(k->getheader || !data->set.opt_no_body) */
1891 
1892 }
1893