• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***************************************************************************
2  *                                  _   _ ____  _
3  *  Project                     ___| | | |  _ \| |
4  *                             / __| | | | |_) | |
5  *                            | (__| |_| |  _ <| |___
6  *                             \___|\___/|_| \_\_____|
7  *
8  * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
9  *
10  * This software is licensed as described in the file COPYING, which
11  * you should have received as part of this distribution. The terms
12  * are also available at https://curl.haxx.se/docs/copyright.html.
13  *
14  * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15  * copies of the Software, and permit persons to whom the Software is
16  * furnished to do so, under the terms of the COPYING file.
17  *
18  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19  * KIND, either express or implied.
20  *
21  ***************************************************************************/
22 
23 #include "curl_setup.h"
24 #include "strtoofft.h"
25 
26 #ifdef HAVE_NETINET_IN_H
27 #include <netinet/in.h>
28 #endif
29 #ifdef HAVE_NETDB_H
30 #include <netdb.h>
31 #endif
32 #ifdef HAVE_ARPA_INET_H
33 #include <arpa/inet.h>
34 #endif
35 #ifdef HAVE_NET_IF_H
36 #include <net/if.h>
37 #endif
38 #ifdef HAVE_SYS_IOCTL_H
39 #include <sys/ioctl.h>
40 #endif
41 #ifdef HAVE_SIGNAL_H
42 #include <signal.h>
43 #endif
44 
45 #ifdef HAVE_SYS_PARAM_H
46 #include <sys/param.h>
47 #endif
48 
49 #ifdef HAVE_SYS_SELECT_H
50 #include <sys/select.h>
51 #endif
52 
53 #ifndef HAVE_SOCKET
54 #error "We can't compile without socket() support!"
55 #endif
56 
57 #include "urldata.h"
58 #include <curl/curl.h>
59 #include "netrc.h"
60 
61 #include "content_encoding.h"
62 #include "hostip.h"
63 #include "transfer.h"
64 #include "sendf.h"
65 #include "speedcheck.h"
66 #include "progress.h"
67 #include "http.h"
68 #include "url.h"
69 #include "getinfo.h"
70 #include "vtls/vtls.h"
71 #include "select.h"
72 #include "multiif.h"
73 #include "connect.h"
74 #include "non-ascii.h"
75 #include "http2.h"
76 #include "mime.h"
77 #include "strcase.h"
78 #include "urlapi-int.h"
79 
80 /* The last 3 #include files should be in this order */
81 #include "curl_printf.h"
82 #include "curl_memory.h"
83 #include "memdebug.h"
84 
85 #if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
86     !defined(CURL_DISABLE_IMAP)
87 /*
88  * checkheaders() checks the linked list of custom headers for a
89  * particular header (prefix). Provide the prefix without colon!
90  *
91  * Returns a pointer to the first matching header or NULL if none matched.
92  */
Curl_checkheaders(const struct connectdata * conn,const char * thisheader)93 char *Curl_checkheaders(const struct connectdata *conn,
94                         const char *thisheader)
95 {
96   struct curl_slist *head;
97   size_t thislen = strlen(thisheader);
98   struct Curl_easy *data = conn->data;
99 
100   for(head = data->set.headers; head; head = head->next) {
101     if(strncasecompare(head->data, thisheader, thislen) &&
102        Curl_headersep(head->data[thislen]) )
103       return head->data;
104   }
105 
106   return NULL;
107 }
108 #endif
109 
Curl_get_upload_buffer(struct Curl_easy * data)110 CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
111 {
112   if(!data->state.ulbuf) {
113     data->state.ulbuf = malloc(data->set.upload_buffer_size);
114     if(!data->state.ulbuf)
115       return CURLE_OUT_OF_MEMORY;
116   }
117   return CURLE_OK;
118 }
119 
120 #ifndef CURL_DISABLE_HTTP
121 /*
122  * This function will be called to loop through the trailers buffer
123  * until no more data is available for sending.
124  */
Curl_trailers_read(char * buffer,size_t size,size_t nitems,void * raw)125 static size_t Curl_trailers_read(char *buffer, size_t size, size_t nitems,
126                                  void *raw)
127 {
128   struct Curl_easy *data = (struct Curl_easy *)raw;
129   Curl_send_buffer *trailers_buf = data->state.trailers_buf;
130   size_t bytes_left = trailers_buf->size_used-data->state.trailers_bytes_sent;
131   size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
132   if(to_copy) {
133     memcpy(buffer,
134            &trailers_buf->buffer[data->state.trailers_bytes_sent],
135            to_copy);
136     data->state.trailers_bytes_sent += to_copy;
137   }
138   return to_copy;
139 }
140 
Curl_trailers_left(void * raw)141 static size_t Curl_trailers_left(void *raw)
142 {
143   struct Curl_easy *data = (struct Curl_easy *)raw;
144   Curl_send_buffer *trailers_buf = data->state.trailers_buf;
145   return trailers_buf->size_used - data->state.trailers_bytes_sent;
146 }
147 #endif
148 
149 /*
150  * This function will call the read callback to fill our buffer with data
151  * to upload.
152  */
Curl_fillreadbuffer(struct connectdata * conn,size_t bytes,size_t * nreadp)153 CURLcode Curl_fillreadbuffer(struct connectdata *conn, size_t bytes,
154                              size_t *nreadp)
155 {
156   struct Curl_easy *data = conn->data;
157   size_t buffersize = bytes;
158   size_t nread;
159 
160 #ifndef CURL_DISABLE_HTTP
161   struct curl_slist *trailers = NULL;
162   CURLcode c;
163   int trailers_ret_code;
164 #endif
165 
166   curl_read_callback readfunc = NULL;
167   void *extra_data = NULL;
168   bool added_crlf = FALSE;
169 
170 #ifdef CURL_DOES_CONVERSIONS
171   bool sending_http_headers = FALSE;
172 
173   if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
174     const struct HTTP *http = data->req.protop;
175 
176     if(http->sending == HTTPSEND_REQUEST)
177       /* We're sending the HTTP request headers, not the data.
178          Remember that so we don't re-translate them into garbage. */
179       sending_http_headers = TRUE;
180   }
181 #endif
182 
183 #ifndef CURL_DISABLE_HTTP
184   if(data->state.trailers_state == TRAILERS_INITIALIZED) {
185     /* at this point we already verified that the callback exists
186        so we compile and store the trailers buffer, then proceed */
187     infof(data,
188           "Moving trailers state machine from initialized to sending.\n");
189     data->state.trailers_state = TRAILERS_SENDING;
190     data->state.trailers_buf = Curl_add_buffer_init();
191     if(!data->state.trailers_buf) {
192       failf(data, "Unable to allocate trailing headers buffer !");
193       return CURLE_OUT_OF_MEMORY;
194     }
195     data->state.trailers_bytes_sent = 0;
196     Curl_set_in_callback(data, true);
197     trailers_ret_code = data->set.trailer_callback(&trailers,
198                                                    data->set.trailer_data);
199     Curl_set_in_callback(data, false);
200     if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
201       c = Curl_http_compile_trailers(trailers, data->state.trailers_buf, data);
202     }
203     else {
204       failf(data, "operation aborted by trailing headers callback");
205       *nreadp = 0;
206       c = CURLE_ABORTED_BY_CALLBACK;
207     }
208     if(c != CURLE_OK) {
209       Curl_add_buffer_free(&data->state.trailers_buf);
210       curl_slist_free_all(trailers);
211       return c;
212     }
213     infof(data, "Successfully compiled trailers.\r\n");
214     curl_slist_free_all(trailers);
215   }
216 #endif
217 
218   /* if we are transmitting trailing data, we don't need to write
219      a chunk size so we skip this */
220   if(data->req.upload_chunky &&
221      data->state.trailers_state == TRAILERS_NONE) {
222     /* if chunked Transfer-Encoding */
223     buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
224     data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
225   }
226 
227 #ifndef CURL_DISABLE_HTTP
228   if(data->state.trailers_state == TRAILERS_SENDING) {
229     /* if we're here then that means that we already sent the last empty chunk
230        but we didn't send a final CR LF, so we sent 0 CR LF. We then start
231        pulling trailing data until we ²have no more at which point we
232        simply return to the previous point in the state machine as if
233        nothing happened.
234        */
235     readfunc = Curl_trailers_read;
236     extra_data = (void *)data;
237   }
238   else
239 #endif
240   {
241     readfunc = data->state.fread_func;
242     extra_data = data->state.in;
243   }
244 
245   Curl_set_in_callback(data, true);
246   nread = readfunc(data->req.upload_fromhere, 1,
247                    buffersize, extra_data);
248   Curl_set_in_callback(data, false);
249 
250   if(nread == CURL_READFUNC_ABORT) {
251     failf(data, "operation aborted by callback");
252     *nreadp = 0;
253     return CURLE_ABORTED_BY_CALLBACK;
254   }
255   if(nread == CURL_READFUNC_PAUSE) {
256     struct SingleRequest *k = &data->req;
257 
258     if(conn->handler->flags & PROTOPT_NONETWORK) {
259       /* protocols that work without network cannot be paused. This is
260          actually only FILE:// just now, and it can't pause since the transfer
261          isn't done using the "normal" procedure. */
262       failf(data, "Read callback asked for PAUSE when not supported!");
263       return CURLE_READ_ERROR;
264     }
265 
266     /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
267     k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
268     if(data->req.upload_chunky) {
269         /* Back out the preallocation done above */
270       data->req.upload_fromhere -= (8 + 2);
271     }
272     *nreadp = 0;
273 
274     return CURLE_OK; /* nothing was read */
275   }
276   else if(nread > buffersize) {
277     /* the read function returned a too large value */
278     *nreadp = 0;
279     failf(data, "read function returned funny value");
280     return CURLE_READ_ERROR;
281   }
282 
283   if(!data->req.forbidchunk && data->req.upload_chunky) {
284     /* if chunked Transfer-Encoding
285      *    build chunk:
286      *
287      *        <HEX SIZE> CRLF
288      *        <DATA> CRLF
289      */
290     /* On non-ASCII platforms the <DATA> may or may not be
291        translated based on set.prefer_ascii while the protocol
292        portion must always be translated to the network encoding.
293        To further complicate matters, line end conversion might be
294        done later on, so we need to prevent CRLFs from becoming
295        CRCRLFs if that's the case.  To do this we use bare LFs
296        here, knowing they'll become CRLFs later on.
297      */
298 
299     char hexbuffer[11] = "";
300     int hexlen = 0;
301     const char *endofline_native;
302     const char *endofline_network;
303 
304     if(
305 #ifdef CURL_DO_LINEEND_CONV
306        (data->set.prefer_ascii) ||
307 #endif
308        (data->set.crlf)) {
309       /* \n will become \r\n later on */
310       endofline_native  = "\n";
311       endofline_network = "\x0a";
312     }
313     else {
314       endofline_native  = "\r\n";
315       endofline_network = "\x0d\x0a";
316     }
317 
318     /* if we're not handling trailing data, proceed as usual */
319     if(data->state.trailers_state != TRAILERS_SENDING) {
320       hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
321                          "%zx%s", nread, endofline_native);
322 
323       /* move buffer pointer */
324       data->req.upload_fromhere -= hexlen;
325       nread += hexlen;
326 
327       /* copy the prefix to the buffer, leaving out the NUL */
328       memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
329 
330       /* always append ASCII CRLF to the data unless
331          we have a valid trailer callback */
332 #ifndef CURL_DISABLE_HTTP
333       if((nread-hexlen) == 0 &&
334           data->set.trailer_callback != NULL &&
335           data->state.trailers_state == TRAILERS_NONE) {
336         data->state.trailers_state = TRAILERS_INITIALIZED;
337       }
338       else
339 #endif
340       {
341         memcpy(data->req.upload_fromhere + nread,
342                endofline_network,
343                strlen(endofline_network));
344         added_crlf = TRUE;
345       }
346     }
347 
348 #ifdef CURL_DOES_CONVERSIONS
349     {
350       CURLcode result;
351       size_t length;
352       if(data->set.prefer_ascii)
353         /* translate the protocol and data */
354         length = nread;
355       else
356         /* just translate the protocol portion */
357         length = hexlen;
358       if(length) {
359         result = Curl_convert_to_network(data, data->req.upload_fromhere,
360                                          length);
361         /* Curl_convert_to_network calls failf if unsuccessful */
362         if(result)
363           return result;
364       }
365     }
366 #endif /* CURL_DOES_CONVERSIONS */
367 
368 #ifndef CURL_DISABLE_HTTP
369     if(data->state.trailers_state == TRAILERS_SENDING &&
370        !Curl_trailers_left(data)) {
371       Curl_add_buffer_free(&data->state.trailers_buf);
372       data->state.trailers_state = TRAILERS_DONE;
373       data->set.trailer_data = NULL;
374       data->set.trailer_callback = NULL;
375       /* mark the transfer as done */
376       data->req.upload_done = TRUE;
377       infof(data, "Signaling end of chunked upload after trailers.\n");
378     }
379     else
380 #endif
381       if((nread - hexlen) == 0 &&
382          data->state.trailers_state != TRAILERS_INITIALIZED) {
383         /* mark this as done once this chunk is transferred */
384         data->req.upload_done = TRUE;
385         infof(data,
386               "Signaling end of chunked upload via terminating chunk.\n");
387       }
388 
389     if(added_crlf)
390       nread += strlen(endofline_network); /* for the added end of line */
391   }
392 #ifdef CURL_DOES_CONVERSIONS
393   else if((data->set.prefer_ascii) && (!sending_http_headers)) {
394     CURLcode result;
395     result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
396     /* Curl_convert_to_network calls failf if unsuccessful */
397     if(result)
398       return result;
399   }
400 #endif /* CURL_DOES_CONVERSIONS */
401 
402   *nreadp = nread;
403 
404   return CURLE_OK;
405 }
406 
407 
408 /*
409  * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
410  * POST/PUT with multi-pass authentication when a sending was denied and a
411  * resend is necessary.
412  */
Curl_readrewind(struct connectdata * conn)413 CURLcode Curl_readrewind(struct connectdata *conn)
414 {
415   struct Curl_easy *data = conn->data;
416   curl_mimepart *mimepart = &data->set.mimepost;
417 
418   conn->bits.rewindaftersend = FALSE; /* we rewind now */
419 
420   /* explicitly switch off sending data on this connection now since we are
421      about to restart a new transfer and thus we want to avoid inadvertently
422      sending more data on the existing connection until the next transfer
423      starts */
424   data->req.keepon &= ~KEEP_SEND;
425 
426   /* We have sent away data. If not using CURLOPT_POSTFIELDS or
427      CURLOPT_HTTPPOST, call app to rewind
428   */
429   if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
430     struct HTTP *http = data->req.protop;
431 
432     if(http->sendit)
433       mimepart = http->sendit;
434   }
435   if(data->set.postfields)
436     ; /* do nothing */
437   else if(data->set.httpreq == HTTPREQ_POST_MIME ||
438           data->set.httpreq == HTTPREQ_POST_FORM) {
439     if(Curl_mime_rewind(mimepart)) {
440       failf(data, "Cannot rewind mime/post data");
441       return CURLE_SEND_FAIL_REWIND;
442     }
443   }
444   else {
445     if(data->set.seek_func) {
446       int err;
447 
448       Curl_set_in_callback(data, true);
449       err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
450       Curl_set_in_callback(data, false);
451       if(err) {
452         failf(data, "seek callback returned error %d", (int)err);
453         return CURLE_SEND_FAIL_REWIND;
454       }
455     }
456     else if(data->set.ioctl_func) {
457       curlioerr err;
458 
459       Curl_set_in_callback(data, true);
460       err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
461                                    data->set.ioctl_client);
462       Curl_set_in_callback(data, false);
463       infof(data, "the ioctl callback returned %d\n", (int)err);
464 
465       if(err) {
466         /* FIXME: convert to a human readable error message */
467         failf(data, "ioctl callback returned error %d", (int)err);
468         return CURLE_SEND_FAIL_REWIND;
469       }
470     }
471     else {
472       /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
473          given FILE * stream and we can actually attempt to rewind that
474          ourselves with fseek() */
475       if(data->state.fread_func == (curl_read_callback)fread) {
476         if(-1 != fseek(data->state.in, 0, SEEK_SET))
477           /* successful rewind */
478           return CURLE_OK;
479       }
480 
481       /* no callback set or failure above, makes us fail at once */
482       failf(data, "necessary data rewind wasn't possible");
483       return CURLE_SEND_FAIL_REWIND;
484     }
485   }
486   return CURLE_OK;
487 }
488 
data_pending(const struct connectdata * conn)489 static int data_pending(const struct connectdata *conn)
490 {
491   /* in the case of libssh2, we can never be really sure that we have emptied
492      its internal buffers so we MUST always try until we get EAGAIN back */
493   return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
494 #if defined(USE_NGHTTP2)
495     Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
496     /* For HTTP/2, we may read up everything including response body
497        with header fields in Curl_http_readwrite_headers. If no
498        content-length is provided, curl waits for the connection
499        close, which we emulate it using conn->proto.httpc.closed =
500        TRUE. The thing is if we read everything, then http2_recv won't
501        be called and we cannot signal the HTTP/2 stream has closed. As
502        a workaround, we return nonzero here to call http2_recv. */
503     ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
504 #else
505     Curl_ssl_data_pending(conn, FIRSTSOCKET);
506 #endif
507 }
508 
read_rewind(struct connectdata * conn,size_t thismuch)509 static void read_rewind(struct connectdata *conn,
510                         size_t thismuch)
511 {
512   DEBUGASSERT(conn->read_pos >= thismuch);
513 
514   conn->read_pos -= thismuch;
515   conn->bits.stream_was_rewound = TRUE;
516 
517 #ifdef DEBUGBUILD
518   {
519     char buf[512 + 1];
520     size_t show;
521 
522     show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
523     if(conn->master_buffer) {
524       memcpy(buf, conn->master_buffer + conn->read_pos, show);
525       buf[show] = '\0';
526     }
527     else {
528       buf[0] = '\0';
529     }
530 
531     DEBUGF(infof(conn->data,
532                  "Buffer after stream rewind (read_pos = %zu): [%s]\n",
533                  conn->read_pos, buf));
534   }
535 #endif
536 }
537 
538 /*
539  * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
540  * remote document with the time provided by CURLOPT_TIMEVAL
541  */
Curl_meets_timecondition(struct Curl_easy * data,time_t timeofdoc)542 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
543 {
544   if((timeofdoc == 0) || (data->set.timevalue == 0))
545     return TRUE;
546 
547   switch(data->set.timecondition) {
548   case CURL_TIMECOND_IFMODSINCE:
549   default:
550     if(timeofdoc <= data->set.timevalue) {
551       infof(data,
552             "The requested document is not new enough\n");
553       data->info.timecond = TRUE;
554       return FALSE;
555     }
556     break;
557   case CURL_TIMECOND_IFUNMODSINCE:
558     if(timeofdoc >= data->set.timevalue) {
559       infof(data,
560             "The requested document is not old enough\n");
561       data->info.timecond = TRUE;
562       return FALSE;
563     }
564     break;
565   }
566 
567   return TRUE;
568 }
569 
570 /*
571  * Go ahead and do a read if we have a readable socket or if
572  * the stream was rewound (in which case we have data in a
573  * buffer)
574  *
575  * return '*comeback' TRUE if we didn't properly drain the socket so this
576  * function should get called again without select() or similar in between!
577  */
readwrite_data(struct Curl_easy * data,struct connectdata * conn,struct SingleRequest * k,int * didwhat,bool * done,bool * comeback)578 static CURLcode readwrite_data(struct Curl_easy *data,
579                                struct connectdata *conn,
580                                struct SingleRequest *k,
581                                int *didwhat, bool *done,
582                                bool *comeback)
583 {
584   CURLcode result = CURLE_OK;
585   ssize_t nread; /* number of bytes read */
586   size_t excess = 0; /* excess bytes read */
587   bool readmore = FALSE; /* used by RTP to signal for more data */
588   int maxloops = 100;
589 
590   *done = FALSE;
591   *comeback = FALSE;
592 
593   /* This is where we loop until we have read everything there is to
594      read or we get a CURLE_AGAIN */
595   do {
596     bool is_empty_data = FALSE;
597     size_t buffersize = data->set.buffer_size;
598     size_t bytestoread = buffersize;
599 
600     if(
601 #if defined(USE_NGHTTP2)
602        /* For HTTP/2, read data without caring about the content
603           length. This is safe because body in HTTP/2 is always
604           segmented thanks to its framing layer. Meanwhile, we have to
605           call Curl_read to ensure that http2_handle_stream_close is
606           called when we read all incoming bytes for a particular
607           stream. */
608        !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
609          conn->httpversion == 20) &&
610 #endif
611        k->size != -1 && !k->header) {
612       /* make sure we don't read "too much" if we can help it since we
613          might be pipelining and then someone else might want to read what
614          follows! */
615       curl_off_t totalleft = k->size - k->bytecount;
616       if(totalleft < (curl_off_t)bytestoread)
617         bytestoread = (size_t)totalleft;
618     }
619 
620     if(bytestoread) {
621       /* receive data from the network! */
622       result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
623 
624       /* read would've blocked */
625       if(CURLE_AGAIN == result)
626         break; /* get out of loop */
627 
628       if(result>0)
629         return result;
630     }
631     else {
632       /* read nothing but since we wanted nothing we consider this an OK
633          situation to proceed from */
634       DEBUGF(infof(data, "readwrite_data: we're done!\n"));
635       nread = 0;
636     }
637 
638     if((k->bytecount == 0) && (k->writebytecount == 0)) {
639       Curl_pgrsTime(data, TIMER_STARTTRANSFER);
640       if(k->exp100 > EXP100_SEND_DATA)
641         /* set time stamp to compare with when waiting for the 100 */
642         k->start100 = Curl_now();
643     }
644 
645     *didwhat |= KEEP_RECV;
646     /* indicates data of zero size, i.e. empty file */
647     is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
648 
649     /* NUL terminate, allowing string ops to be used */
650     if(0 < nread || is_empty_data) {
651       k->buf[nread] = 0;
652     }
653     else if(0 >= nread) {
654       /* if we receive 0 or less here, the server closed the connection
655          and we bail out from this! */
656       DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
657       k->keepon &= ~KEEP_RECV;
658       break;
659     }
660 
661     /* Default buffer to use when we write the buffer, it may be changed
662        in the flow below before the actual storing is done. */
663     k->str = k->buf;
664 
665     if(conn->handler->readwrite) {
666       result = conn->handler->readwrite(data, conn, &nread, &readmore);
667       if(result)
668         return result;
669       if(readmore)
670         break;
671     }
672 
673 #ifndef CURL_DISABLE_HTTP
674     /* Since this is a two-state thing, we check if we are parsing
675        headers at the moment or not. */
676     if(k->header) {
677       /* we are in parse-the-header-mode */
678       bool stop_reading = FALSE;
679       result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
680       if(result)
681         return result;
682 
683       if(conn->handler->readwrite &&
684          (k->maxdownload <= 0 && nread > 0)) {
685         result = conn->handler->readwrite(data, conn, &nread, &readmore);
686         if(result)
687           return result;
688         if(readmore)
689           break;
690       }
691 
692       if(stop_reading) {
693         /* We've stopped dealing with input, get out of the do-while loop */
694 
695         if(nread > 0) {
696           if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
697             infof(data,
698                   "Rewinding stream by : %zd"
699                   " bytes on url %s (zero-length body)\n",
700                   nread, data->state.up.path);
701             read_rewind(conn, (size_t)nread);
702           }
703           else {
704             infof(data,
705                   "Excess found in a non pipelined read:"
706                   " excess = %zd"
707                   " url = %s (zero-length body)\n",
708                   nread, data->state.up.path);
709           }
710         }
711 
712         break;
713       }
714     }
715 #endif /* CURL_DISABLE_HTTP */
716 
717 
718     /* This is not an 'else if' since it may be a rest from the header
719        parsing, where the beginning of the buffer is headers and the end
720        is non-headers. */
721     if(k->str && !k->header && (nread > 0 || is_empty_data)) {
722 
723       if(data->set.opt_no_body) {
724         /* data arrives although we want none, bail out */
725         streamclose(conn, "ignoring body");
726         *done = TRUE;
727         return CURLE_WEIRD_SERVER_REPLY;
728       }
729 
730 #ifndef CURL_DISABLE_HTTP
731       if(0 == k->bodywrites && !is_empty_data) {
732         /* These checks are only made the first time we are about to
733            write a piece of the body */
734         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
735           /* HTTP-only checks */
736 
737           if(data->req.newurl) {
738             if(conn->bits.close) {
739               /* Abort after the headers if "follow Location" is set
740                  and we're set to close anyway. */
741               k->keepon &= ~KEEP_RECV;
742               *done = TRUE;
743               return CURLE_OK;
744             }
745             /* We have a new url to load, but since we want to be able
746                to re-use this connection properly, we read the full
747                response in "ignore more" */
748             k->ignorebody = TRUE;
749             infof(data, "Ignoring the response-body\n");
750           }
751           if(data->state.resume_from && !k->content_range &&
752              (data->set.httpreq == HTTPREQ_GET) &&
753              !k->ignorebody) {
754 
755             if(k->size == data->state.resume_from) {
756               /* The resume point is at the end of file, consider this fine
757                  even if it doesn't allow resume from here. */
758               infof(data, "The entire document is already downloaded");
759               connclose(conn, "already downloaded");
760               /* Abort download */
761               k->keepon &= ~KEEP_RECV;
762               *done = TRUE;
763               return CURLE_OK;
764             }
765 
766             /* we wanted to resume a download, although the server doesn't
767              * seem to support this and we did this with a GET (if it
768              * wasn't a GET we did a POST or PUT resume) */
769             failf(data, "HTTP server doesn't seem to support "
770                   "byte ranges. Cannot resume.");
771             return CURLE_RANGE_ERROR;
772           }
773 
774           if(data->set.timecondition && !data->state.range) {
775             /* A time condition has been set AND no ranges have been
776                requested. This seems to be what chapter 13.3.4 of
777                RFC 2616 defines to be the correct action for a
778                HTTP/1.1 client */
779 
780             if(!Curl_meets_timecondition(data, k->timeofdoc)) {
781               *done = TRUE;
782               /* We're simulating a http 304 from server so we return
783                  what should have been returned from the server */
784               data->info.httpcode = 304;
785               infof(data, "Simulate a HTTP 304 response!\n");
786               /* we abort the transfer before it is completed == we ruin the
787                  re-use ability. Close the connection */
788               connclose(conn, "Simulated 304 handling");
789               return CURLE_OK;
790             }
791           } /* we have a time condition */
792 
793         } /* this is HTTP or RTSP */
794       } /* this is the first time we write a body part */
795 #endif /* CURL_DISABLE_HTTP */
796 
797       k->bodywrites++;
798 
799       /* pass data to the debug function before it gets "dechunked" */
800       if(data->set.verbose) {
801         if(k->badheader) {
802           Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
803                      (size_t)k->hbuflen);
804           if(k->badheader == HEADER_PARTHEADER)
805             Curl_debug(data, CURLINFO_DATA_IN,
806                        k->str, (size_t)nread);
807         }
808         else
809           Curl_debug(data, CURLINFO_DATA_IN,
810                      k->str, (size_t)nread);
811       }
812 
813 #ifndef CURL_DISABLE_HTTP
814       if(k->chunk) {
815         /*
816          * Here comes a chunked transfer flying and we need to decode this
817          * properly.  While the name says read, this function both reads
818          * and writes away the data. The returned 'nread' holds the number
819          * of actual data it wrote to the client.
820          */
821 
822         CHUNKcode res =
823           Curl_httpchunk_read(conn, k->str, nread, &nread);
824 
825         if(CHUNKE_OK < res) {
826           if(CHUNKE_WRITE_ERROR == res) {
827             failf(data, "Failed writing data");
828             return CURLE_WRITE_ERROR;
829           }
830           failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
831           return CURLE_RECV_ERROR;
832         }
833         if(CHUNKE_STOP == res) {
834           size_t dataleft;
835           /* we're done reading chunks! */
836           k->keepon &= ~KEEP_RECV; /* read no more */
837 
838           /* There are now possibly N number of bytes at the end of the
839              str buffer that weren't written to the client.
840 
841              We DO care about this data if we are pipelining.
842              Push it back to be read on the next pass. */
843 
844           dataleft = conn->chunk.dataleft;
845           if(dataleft != 0) {
846             infof(conn->data, "Leftovers after chunking: %zu bytes\n",
847                   dataleft);
848             if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
849               /* only attempt the rewind if we truly are pipelining */
850               infof(conn->data, "Rewinding %zu bytes\n",dataleft);
851               read_rewind(conn, dataleft);
852             }
853           }
854         }
855         /* If it returned OK, we just keep going */
856       }
857 #endif   /* CURL_DISABLE_HTTP */
858 
859       /* Account for body content stored in the header buffer */
860       if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
861         DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
862                      k->hbuflen));
863         k->bytecount += k->hbuflen;
864       }
865 
866       if((-1 != k->maxdownload) &&
867          (k->bytecount + nread >= k->maxdownload)) {
868 
869         excess = (size_t)(k->bytecount + nread - k->maxdownload);
870         if(excess > 0 && !k->ignorebody) {
871           if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
872             infof(data,
873                   "Rewinding stream by : %zu"
874                   " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
875                   ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
876                   ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
877                   excess, data->state.up.path,
878                   k->size, k->maxdownload, k->bytecount, nread);
879             read_rewind(conn, excess);
880           }
881           else {
882             infof(data,
883                   "Excess found in a non pipelined read:"
884                   " excess = %zu"
885                   ", size = %" CURL_FORMAT_CURL_OFF_T
886                   ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
887                   ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
888                   excess, k->size, k->maxdownload, k->bytecount);
889           }
890         }
891 
892         nread = (ssize_t) (k->maxdownload - k->bytecount);
893         if(nread < 0) /* this should be unusual */
894           nread = 0;
895 
896         k->keepon &= ~KEEP_RECV; /* we're done reading */
897       }
898 
899       k->bytecount += nread;
900 
901       Curl_pgrsSetDownloadCounter(data, k->bytecount);
902 
903       if(!k->chunk && (nread || k->badheader || is_empty_data)) {
904         /* If this is chunky transfer, it was already written */
905 
906         if(k->badheader && !k->ignorebody) {
907           /* we parsed a piece of data wrongly assuming it was a header
908              and now we output it as body instead */
909 
910           /* Don't let excess data pollute body writes */
911           if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
912             result = Curl_client_write(conn, CLIENTWRITE_BODY,
913                                        data->state.headerbuff,
914                                        k->hbuflen);
915           else
916             result = Curl_client_write(conn, CLIENTWRITE_BODY,
917                                        data->state.headerbuff,
918                                        (size_t)k->maxdownload);
919 
920           if(result)
921             return result;
922         }
923         if(k->badheader < HEADER_ALLBAD) {
924           /* This switch handles various content encodings. If there's an
925              error here, be sure to check over the almost identical code
926              in http_chunks.c.
927              Make sure that ALL_CONTENT_ENCODINGS contains all the
928              encodings handled here. */
929           if(conn->data->set.http_ce_skip || !k->writer_stack) {
930             if(!k->ignorebody) {
931 #ifndef CURL_DISABLE_POP3
932               if(conn->handler->protocol & PROTO_FAMILY_POP3)
933                 result = Curl_pop3_write(conn, k->str, nread);
934               else
935 #endif /* CURL_DISABLE_POP3 */
936                 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
937                                            nread);
938             }
939           }
940           else if(!k->ignorebody)
941             result = Curl_unencode_write(conn, k->writer_stack, k->str, nread);
942         }
943         k->badheader = HEADER_NORMAL; /* taken care of now */
944 
945         if(result)
946           return result;
947       }
948 
949     } /* if(!header and data to read) */
950 
951     if(conn->handler->readwrite && excess && !conn->bits.stream_was_rewound) {
952       /* Parse the excess data */
953       k->str += nread;
954 
955       if(&k->str[excess] > &k->buf[data->set.buffer_size]) {
956         /* the excess amount was too excessive(!), make sure
957            it doesn't read out of buffer */
958         excess = &k->buf[data->set.buffer_size] - k->str;
959       }
960       nread = (ssize_t)excess;
961 
962       result = conn->handler->readwrite(data, conn, &nread, &readmore);
963       if(result)
964         return result;
965 
966       if(readmore)
967         k->keepon |= KEEP_RECV; /* we're not done reading */
968       break;
969     }
970 
971     if(is_empty_data) {
972       /* if we received nothing, the server closed the connection and we
973          are done */
974       k->keepon &= ~KEEP_RECV;
975     }
976 
977     if(k->keepon & KEEP_RECV_PAUSE) {
978       /* this is a paused transfer */
979       break;
980     }
981 
982   } while(data_pending(conn) && maxloops--);
983 
984   if(maxloops <= 0) {
985     /* we mark it as read-again-please */
986     conn->cselect_bits = CURL_CSELECT_IN;
987     *comeback = TRUE;
988   }
989 
990   if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
991      conn->bits.close) {
992     /* When we've read the entire thing and the close bit is set, the server
993        may now close the connection. If there's now any kind of sending going
994        on from our side, we need to stop that immediately. */
995     infof(data, "we are done reading and this is set to close, stop send\n");
996     k->keepon &= ~KEEP_SEND; /* no writing anymore either */
997   }
998 
999   return CURLE_OK;
1000 }
1001 
done_sending(struct connectdata * conn,struct SingleRequest * k)1002 static CURLcode done_sending(struct connectdata *conn,
1003                              struct SingleRequest *k)
1004 {
1005   k->keepon &= ~KEEP_SEND; /* we're done writing */
1006 
1007   Curl_http2_done_sending(conn);
1008 
1009   if(conn->bits.rewindaftersend) {
1010     CURLcode result = Curl_readrewind(conn);
1011     if(result)
1012       return result;
1013   }
1014   return CURLE_OK;
1015 }
1016 
1017 #if defined(WIN32) && !defined(USE_LWIPSOCK)
1018 #ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
1019 #define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
1020 #endif
1021 
win_update_buffer_size(curl_socket_t sockfd)1022 static void win_update_buffer_size(curl_socket_t sockfd)
1023 {
1024   int result;
1025   ULONG ideal;
1026   DWORD ideallen;
1027   result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
1028                     &ideal, sizeof(ideal), &ideallen, 0, 0);
1029   if(result == 0) {
1030     setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
1031                (const char *)&ideal, sizeof(ideal));
1032   }
1033 }
1034 #else
1035 #define win_update_buffer_size(x)
1036 #endif
1037 
1038 /*
1039  * Send data to upload to the server, when the socket is writable.
1040  */
readwrite_upload(struct Curl_easy * data,struct connectdata * conn,int * didwhat)1041 static CURLcode readwrite_upload(struct Curl_easy *data,
1042                                  struct connectdata *conn,
1043                                  int *didwhat)
1044 {
1045   ssize_t i, si;
1046   ssize_t bytes_written;
1047   CURLcode result;
1048   ssize_t nread; /* number of bytes read */
1049   bool sending_http_headers = FALSE;
1050   struct SingleRequest *k = &data->req;
1051 
1052   if((k->bytecount == 0) && (k->writebytecount == 0))
1053     Curl_pgrsTime(data, TIMER_STARTTRANSFER);
1054 
1055   *didwhat |= KEEP_SEND;
1056 
1057   do {
1058     /* only read more data if there's no upload data already
1059        present in the upload buffer */
1060     if(0 == k->upload_present) {
1061       result = Curl_get_upload_buffer(data);
1062       if(result)
1063         return result;
1064       /* init the "upload from here" pointer */
1065       k->upload_fromhere = data->state.ulbuf;
1066 
1067       if(!k->upload_done) {
1068         /* HTTP pollution, this should be written nicer to become more
1069            protocol agnostic. */
1070         size_t fillcount;
1071         struct HTTP *http = k->protop;
1072 
1073         if((k->exp100 == EXP100_SENDING_REQUEST) &&
1074            (http->sending == HTTPSEND_BODY)) {
1075           /* If this call is to send body data, we must take some action:
1076              We have sent off the full HTTP 1.1 request, and we shall now
1077              go into the Expect: 100 state and await such a header */
1078           k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
1079           k->keepon &= ~KEEP_SEND;         /* disable writing */
1080           k->start100 = Curl_now();       /* timeout count starts now */
1081           *didwhat &= ~KEEP_SEND;  /* we didn't write anything actually */
1082           /* set a timeout for the multi interface */
1083           Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1084           break;
1085         }
1086 
1087         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
1088           if(http->sending == HTTPSEND_REQUEST)
1089             /* We're sending the HTTP request headers, not the data.
1090                Remember that so we don't change the line endings. */
1091             sending_http_headers = TRUE;
1092           else
1093             sending_http_headers = FALSE;
1094         }
1095 
1096         result = Curl_fillreadbuffer(conn, data->set.upload_buffer_size,
1097                                      &fillcount);
1098         if(result)
1099           return result;
1100 
1101         nread = fillcount;
1102       }
1103       else
1104         nread = 0; /* we're done uploading/reading */
1105 
1106       if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
1107         /* this is a paused transfer */
1108         break;
1109       }
1110       if(nread <= 0) {
1111         result = done_sending(conn, k);
1112         if(result)
1113           return result;
1114         break;
1115       }
1116 
1117       /* store number of bytes available for upload */
1118       k->upload_present = nread;
1119 
1120       /* convert LF to CRLF if so asked */
1121       if((!sending_http_headers) && (
1122 #ifdef CURL_DO_LINEEND_CONV
1123          /* always convert if we're FTPing in ASCII mode */
1124          (data->set.prefer_ascii) ||
1125 #endif
1126          (data->set.crlf))) {
1127         /* Do we need to allocate a scratch buffer? */
1128         if(!data->state.scratch) {
1129           data->state.scratch = malloc(2 * data->set.upload_buffer_size);
1130           if(!data->state.scratch) {
1131             failf(data, "Failed to alloc scratch buffer!");
1132 
1133             return CURLE_OUT_OF_MEMORY;
1134           }
1135         }
1136 
1137         /*
1138          * ASCII/EBCDIC Note: This is presumably a text (not binary)
1139          * transfer so the data should already be in ASCII.
1140          * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
1141          * must be used instead of the escape sequences \r & \n.
1142          */
1143         for(i = 0, si = 0; i < nread; i++, si++) {
1144           if(k->upload_fromhere[i] == 0x0a) {
1145             data->state.scratch[si++] = 0x0d;
1146             data->state.scratch[si] = 0x0a;
1147             if(!data->set.crlf) {
1148               /* we're here only because FTP is in ASCII mode...
1149                  bump infilesize for the LF we just added */
1150               if(data->state.infilesize != -1)
1151                 data->state.infilesize++;
1152             }
1153           }
1154           else
1155             data->state.scratch[si] = k->upload_fromhere[i];
1156         }
1157 
1158         if(si != nread) {
1159           /* only perform the special operation if we really did replace
1160              anything */
1161           nread = si;
1162 
1163           /* upload from the new (replaced) buffer instead */
1164           k->upload_fromhere = data->state.scratch;
1165 
1166           /* set the new amount too */
1167           k->upload_present = nread;
1168         }
1169       }
1170 
1171 #ifndef CURL_DISABLE_SMTP
1172       if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
1173         result = Curl_smtp_escape_eob(conn, nread);
1174         if(result)
1175           return result;
1176       }
1177 #endif /* CURL_DISABLE_SMTP */
1178     } /* if 0 == k->upload_present */
1179     else {
1180       /* We have a partial buffer left from a previous "round". Use
1181          that instead of reading more data */
1182     }
1183 
1184     /* write to socket (send away data) */
1185     result = Curl_write(conn,
1186                         conn->writesockfd,  /* socket to send to */
1187                         k->upload_fromhere, /* buffer pointer */
1188                         k->upload_present,  /* buffer size */
1189                         &bytes_written);    /* actually sent */
1190     if(result)
1191       return result;
1192 
1193     win_update_buffer_size(conn->writesockfd);
1194 
1195     if(data->set.verbose)
1196       /* show the data before we change the pointer upload_fromhere */
1197       Curl_debug(data, CURLINFO_DATA_OUT, k->upload_fromhere,
1198                  (size_t)bytes_written);
1199 
1200     k->writebytecount += bytes_written;
1201     Curl_pgrsSetUploadCounter(data, k->writebytecount);
1202 
1203     if((!k->upload_chunky || k->forbidchunk) &&
1204        (k->writebytecount == data->state.infilesize)) {
1205       /* we have sent all data we were supposed to */
1206       k->upload_done = TRUE;
1207       infof(data, "We are completely uploaded and fine\n");
1208     }
1209 
1210     if(k->upload_present != bytes_written) {
1211       /* we only wrote a part of the buffer (if anything), deal with it! */
1212 
1213       /* store the amount of bytes left in the buffer to write */
1214       k->upload_present -= bytes_written;
1215 
1216       /* advance the pointer where to find the buffer when the next send
1217          is to happen */
1218       k->upload_fromhere += bytes_written;
1219     }
1220     else {
1221       /* we've uploaded that buffer now */
1222       result = Curl_get_upload_buffer(data);
1223       if(result)
1224         return result;
1225       k->upload_fromhere = data->state.ulbuf;
1226       k->upload_present = 0; /* no more bytes left */
1227 
1228       if(k->upload_done) {
1229         result = done_sending(conn, k);
1230         if(result)
1231           return result;
1232       }
1233     }
1234 
1235 
1236   } WHILE_FALSE; /* just to break out from! */
1237 
1238   return CURLE_OK;
1239 }
1240 
1241 /*
1242  * Curl_readwrite() is the low-level function to be called when data is to
1243  * be read and written to/from the connection.
1244  *
1245  * return '*comeback' TRUE if we didn't properly drain the socket so this
1246  * function should get called again without select() or similar in between!
1247  */
Curl_readwrite(struct connectdata * conn,struct Curl_easy * data,bool * done,bool * comeback)1248 CURLcode Curl_readwrite(struct connectdata *conn,
1249                         struct Curl_easy *data,
1250                         bool *done,
1251                         bool *comeback)
1252 {
1253   struct SingleRequest *k = &data->req;
1254   CURLcode result;
1255   int didwhat = 0;
1256 
1257   curl_socket_t fd_read;
1258   curl_socket_t fd_write;
1259   int select_res = conn->cselect_bits;
1260 
1261   conn->cselect_bits = 0;
1262 
1263   /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1264      then we are in rate limiting state in that transfer direction */
1265 
1266   if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1267     fd_read = conn->sockfd;
1268   else
1269     fd_read = CURL_SOCKET_BAD;
1270 
1271   if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1272     fd_write = conn->writesockfd;
1273   else
1274     fd_write = CURL_SOCKET_BAD;
1275 
1276   if(conn->data->state.drain) {
1277     select_res |= CURL_CSELECT_IN;
1278     DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1279   }
1280 
1281   if(!select_res) /* Call for select()/poll() only, if read/write/error
1282                      status is not known. */
1283     select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1284 
1285   if(select_res == CURL_CSELECT_ERR) {
1286     failf(data, "select/poll returned error");
1287     return CURLE_SEND_ERROR;
1288   }
1289 
1290   /* We go ahead and do a read if we have a readable socket or if
1291      the stream was rewound (in which case we have data in a
1292      buffer) */
1293   if((k->keepon & KEEP_RECV) &&
1294      ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1295 
1296     result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1297     if(result || *done)
1298       return result;
1299   }
1300 
1301   /* If we still have writing to do, we check if we have a writable socket. */
1302   if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1303     /* write */
1304 
1305     result = readwrite_upload(data, conn, &didwhat);
1306     if(result)
1307       return result;
1308   }
1309 
1310   k->now = Curl_now();
1311   if(didwhat) {
1312     ;
1313   }
1314   else {
1315     /* no read no write, this is a timeout? */
1316     if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1317       /* This should allow some time for the header to arrive, but only a
1318          very short time as otherwise it'll be too much wasted time too
1319          often. */
1320 
1321       /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1322 
1323          Therefore, when a client sends this header field to an origin server
1324          (possibly via a proxy) from which it has never seen a 100 (Continue)
1325          status, the client SHOULD NOT wait for an indefinite period before
1326          sending the request body.
1327 
1328       */
1329 
1330       timediff_t ms = Curl_timediff(k->now, k->start100);
1331       if(ms >= data->set.expect_100_timeout) {
1332         /* we've waited long enough, continue anyway */
1333         k->exp100 = EXP100_SEND_DATA;
1334         k->keepon |= KEEP_SEND;
1335         Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1336         infof(data, "Done waiting for 100-continue\n");
1337       }
1338     }
1339   }
1340 
1341   if(Curl_pgrsUpdate(conn))
1342     result = CURLE_ABORTED_BY_CALLBACK;
1343   else
1344     result = Curl_speedcheck(data, k->now);
1345   if(result)
1346     return result;
1347 
1348   if(k->keepon) {
1349     if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1350       if(k->size != -1) {
1351         failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1352               " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1353               CURL_FORMAT_CURL_OFF_T " bytes received",
1354               Curl_timediff(k->now, data->progress.t_startsingle),
1355               k->bytecount, k->size);
1356       }
1357       else {
1358         failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1359               " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1360               Curl_timediff(k->now, data->progress.t_startsingle),
1361               k->bytecount);
1362       }
1363       return CURLE_OPERATION_TIMEDOUT;
1364     }
1365   }
1366   else {
1367     /*
1368      * The transfer has been performed. Just make some general checks before
1369      * returning.
1370      */
1371 
1372     if(!(data->set.opt_no_body) && (k->size != -1) &&
1373        (k->bytecount != k->size) &&
1374 #ifdef CURL_DO_LINEEND_CONV
1375        /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1376           so we'll check to see if the discrepancy can be explained
1377           by the number of CRLFs we've changed to LFs.
1378        */
1379        (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1380 #endif /* CURL_DO_LINEEND_CONV */
1381        !k->newurl) {
1382       failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1383             " bytes remaining to read", k->size - k->bytecount);
1384       return CURLE_PARTIAL_FILE;
1385     }
1386     if(!(data->set.opt_no_body) && k->chunk &&
1387        (conn->chunk.state != CHUNK_STOP)) {
1388       /*
1389        * In chunked mode, return an error if the connection is closed prior to
1390        * the empty (terminating) chunk is read.
1391        *
1392        * The condition above used to check for
1393        * conn->proto.http->chunk.datasize != 0 which is true after reading
1394        * *any* chunk, not just the empty chunk.
1395        *
1396        */
1397       failf(data, "transfer closed with outstanding read data remaining");
1398       return CURLE_PARTIAL_FILE;
1399     }
1400     if(Curl_pgrsUpdate(conn))
1401       return CURLE_ABORTED_BY_CALLBACK;
1402   }
1403 
1404   /* Now update the "done" boolean we return */
1405   *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1406                             KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1407 
1408   return CURLE_OK;
1409 }
1410 
1411 /*
1412  * Curl_single_getsock() gets called by the multi interface code when the app
1413  * has requested to get the sockets for the current connection. This function
1414  * will then be called once for every connection that the multi interface
1415  * keeps track of. This function will only be called for connections that are
1416  * in the proper state to have this information available.
1417  */
Curl_single_getsock(const struct connectdata * conn,curl_socket_t * sock,int numsocks)1418 int Curl_single_getsock(const struct connectdata *conn,
1419                         curl_socket_t *sock, /* points to numsocks number
1420                                                 of sockets */
1421                         int numsocks)
1422 {
1423   const struct Curl_easy *data = conn->data;
1424   int bitmap = GETSOCK_BLANK;
1425   unsigned sockindex = 0;
1426 
1427   if(conn->handler->perform_getsock)
1428     return conn->handler->perform_getsock(conn, sock, numsocks);
1429 
1430   if(numsocks < 2)
1431     /* simple check but we might need two slots */
1432     return GETSOCK_BLANK;
1433 
1434   /* don't include HOLD and PAUSE connections */
1435   if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1436 
1437     DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1438 
1439     bitmap |= GETSOCK_READSOCK(sockindex);
1440     sock[sockindex] = conn->sockfd;
1441   }
1442 
1443   /* don't include HOLD and PAUSE connections */
1444   if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1445 
1446     if((conn->sockfd != conn->writesockfd) ||
1447        bitmap == GETSOCK_BLANK) {
1448       /* only if they are not the same socket and we have a readable
1449          one, we increase index */
1450       if(bitmap != GETSOCK_BLANK)
1451         sockindex++; /* increase index if we need two entries */
1452 
1453       DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1454 
1455       sock[sockindex] = conn->writesockfd;
1456     }
1457 
1458     bitmap |= GETSOCK_WRITESOCK(sockindex);
1459   }
1460 
1461   return bitmap;
1462 }
1463 
1464 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1465    which means this gets called once for each subsequent redirect etc */
Curl_init_CONNECT(struct Curl_easy * data)1466 void Curl_init_CONNECT(struct Curl_easy *data)
1467 {
1468   data->state.fread_func = data->set.fread_func_set;
1469   data->state.in = data->set.in_set;
1470 }
1471 
1472 /*
1473  * Curl_pretransfer() is called immediately before a transfer starts, and only
1474  * once for one transfer no matter if it has redirects or do multi-pass
1475  * authentication etc.
1476  */
Curl_pretransfer(struct Curl_easy * data)1477 CURLcode Curl_pretransfer(struct Curl_easy *data)
1478 {
1479   CURLcode result;
1480 
1481   if(!data->change.url && !data->set.uh) {
1482     /* we can't do anything without URL */
1483     failf(data, "No URL set!");
1484     return CURLE_URL_MALFORMAT;
1485   }
1486 
1487   /* since the URL may have been redirected in a previous use of this handle */
1488   if(data->change.url_alloc) {
1489     /* the already set URL is allocated, free it first! */
1490     Curl_safefree(data->change.url);
1491     data->change.url_alloc = FALSE;
1492   }
1493 
1494   if(!data->change.url && data->set.uh) {
1495     CURLUcode uc;
1496     uc = curl_url_get(data->set.uh,
1497                         CURLUPART_URL, &data->set.str[STRING_SET_URL], 0);
1498     if(uc) {
1499       failf(data, "No URL set!");
1500       return CURLE_URL_MALFORMAT;
1501     }
1502   }
1503 
1504   data->change.url = data->set.str[STRING_SET_URL];
1505 
1506   /* Init the SSL session ID cache here. We do it here since we want to do it
1507      after the *_setopt() calls (that could specify the size of the cache) but
1508      before any transfer takes place. */
1509   result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1510   if(result)
1511     return result;
1512 
1513   data->state.wildcardmatch = data->set.wildcard_enabled;
1514   data->set.followlocation = 0; /* reset the location-follow counter */
1515   data->state.this_is_a_follow = FALSE; /* reset this */
1516   data->state.errorbuf = FALSE; /* no error has occurred */
1517   data->state.httpversion = 0; /* don't assume any particular server version */
1518 
1519   data->state.authproblem = FALSE;
1520   data->state.authhost.want = data->set.httpauth;
1521   data->state.authproxy.want = data->set.proxyauth;
1522   Curl_safefree(data->info.wouldredirect);
1523   data->info.wouldredirect = NULL;
1524 
1525   if(data->set.httpreq == HTTPREQ_PUT)
1526     data->state.infilesize = data->set.filesize;
1527   else if((data->set.httpreq != HTTPREQ_GET) &&
1528           (data->set.httpreq != HTTPREQ_HEAD)) {
1529     data->state.infilesize = data->set.postfieldsize;
1530     if(data->set.postfields && (data->state.infilesize == -1))
1531       data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1532   }
1533   else
1534     data->state.infilesize = 0;
1535 
1536   /* If there is a list of cookie files to read, do it now! */
1537   if(data->change.cookielist)
1538     Curl_cookie_loadfiles(data);
1539 
1540   /* If there is a list of host pairs to deal with */
1541   if(data->change.resolve)
1542     result = Curl_loadhostpairs(data);
1543 
1544   if(!result) {
1545     /* Allow data->set.use_port to set which port to use. This needs to be
1546      * disabled for example when we follow Location: headers to URLs using
1547      * different ports! */
1548     data->state.allow_port = TRUE;
1549 
1550 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1551     /*************************************************************
1552      * Tell signal handler to ignore SIGPIPE
1553      *************************************************************/
1554     if(!data->set.no_signal)
1555       data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1556 #endif
1557 
1558     Curl_initinfo(data); /* reset session-specific information "variables" */
1559     Curl_pgrsResetTransferSizes(data);
1560     Curl_pgrsStartNow(data);
1561 
1562     /* In case the handle is re-used and an authentication method was picked
1563        in the session we need to make sure we only use the one(s) we now
1564        consider to be fine */
1565     data->state.authhost.picked &= data->state.authhost.want;
1566     data->state.authproxy.picked &= data->state.authproxy.want;
1567 
1568     if(data->state.wildcardmatch) {
1569       struct WildcardData *wc = &data->wildcard;
1570       if(wc->state < CURLWC_INIT) {
1571         result = Curl_wildcard_init(wc); /* init wildcard structures */
1572         if(result)
1573           return CURLE_OUT_OF_MEMORY;
1574       }
1575     }
1576   }
1577 
1578   return result;
1579 }
1580 
1581 /*
1582  * Curl_posttransfer() is called immediately after a transfer ends
1583  */
Curl_posttransfer(struct Curl_easy * data)1584 CURLcode Curl_posttransfer(struct Curl_easy *data)
1585 {
1586 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1587   /* restore the signal handler for SIGPIPE before we get back */
1588   if(!data->set.no_signal)
1589     signal(SIGPIPE, data->state.prev_signal);
1590 #else
1591   (void)data; /* unused parameter */
1592 #endif
1593 
1594   return CURLE_OK;
1595 }
1596 
1597 /*
1598  * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1599  * as given by the remote server and set up the new URL to request.
1600  *
1601  * This function DOES NOT FREE the given url.
1602  */
Curl_follow(struct Curl_easy * data,char * newurl,followtype type)1603 CURLcode Curl_follow(struct Curl_easy *data,
1604                      char *newurl,    /* the Location: string */
1605                      followtype type) /* see transfer.h */
1606 {
1607 #ifdef CURL_DISABLE_HTTP
1608   (void)data;
1609   (void)newurl;
1610   (void)type;
1611   /* Location: following will not happen when HTTP is disabled */
1612   return CURLE_TOO_MANY_REDIRECTS;
1613 #else
1614 
1615   /* Location: redirect */
1616   bool disallowport = FALSE;
1617   bool reachedmax = FALSE;
1618   CURLUcode uc;
1619 
1620   if(type == FOLLOW_REDIR) {
1621     if((data->set.maxredirs != -1) &&
1622        (data->set.followlocation >= data->set.maxredirs)) {
1623       reachedmax = TRUE;
1624       type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1625                              to URL */
1626     }
1627     else {
1628       /* mark the next request as a followed location: */
1629       data->state.this_is_a_follow = TRUE;
1630 
1631       data->set.followlocation++; /* count location-followers */
1632 
1633       if(data->set.http_auto_referer) {
1634         /* We are asked to automatically set the previous URL as the referer
1635            when we get the next URL. We pick the ->url field, which may or may
1636            not be 100% correct */
1637 
1638         if(data->change.referer_alloc) {
1639           Curl_safefree(data->change.referer);
1640           data->change.referer_alloc = FALSE;
1641         }
1642 
1643         data->change.referer = strdup(data->change.url);
1644         if(!data->change.referer)
1645           return CURLE_OUT_OF_MEMORY;
1646         data->change.referer_alloc = TRUE; /* yes, free this later */
1647       }
1648     }
1649   }
1650 
1651   if(Curl_is_absolute_url(newurl, NULL, MAX_SCHEME_LEN))
1652     /* This is an absolute URL, don't allow the custom port number */
1653     disallowport = TRUE;
1654 
1655   DEBUGASSERT(data->state.uh);
1656   uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
1657                     (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME : 0);
1658   if(uc) {
1659     if(type != FOLLOW_FAKE)
1660       return Curl_uc_to_curlcode(uc);
1661 
1662     /* the URL could not be parsed for some reason, but since this is FAKE
1663        mode, just duplicate the field as-is */
1664     newurl = strdup(newurl);
1665     if(!newurl)
1666       return CURLE_OUT_OF_MEMORY;
1667   }
1668   else {
1669 
1670     uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
1671     if(uc)
1672       return Curl_uc_to_curlcode(uc);
1673   }
1674 
1675   if(type == FOLLOW_FAKE) {
1676     /* we're only figuring out the new url if we would've followed locations
1677        but now we're done so we can get out! */
1678     data->info.wouldredirect = newurl;
1679 
1680     if(reachedmax) {
1681       failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1682       return CURLE_TOO_MANY_REDIRECTS;
1683     }
1684     return CURLE_OK;
1685   }
1686 
1687   if(disallowport)
1688     data->state.allow_port = FALSE;
1689 
1690   if(data->change.url_alloc)
1691     Curl_safefree(data->change.url);
1692 
1693   data->change.url = newurl;
1694   data->change.url_alloc = TRUE;
1695 
1696   infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1697 
1698   /*
1699    * We get here when the HTTP code is 300-399 (and 401). We need to perform
1700    * differently based on exactly what return code there was.
1701    *
1702    * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1703    * a HTTP (proxy-) authentication scheme other than Basic.
1704    */
1705   switch(data->info.httpcode) {
1706     /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1707        Authorization: XXXX header in the HTTP request code snippet */
1708     /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1709        Proxy-Authorization: XXXX header in the HTTP request code snippet */
1710     /* 300 - Multiple Choices */
1711     /* 306 - Not used */
1712     /* 307 - Temporary Redirect */
1713   default:  /* for all above (and the unknown ones) */
1714     /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1715      * seem to be OK to POST to.
1716      */
1717     break;
1718   case 301: /* Moved Permanently */
1719     /* (quote from RFC7231, section 6.4.2)
1720      *
1721      * Note: For historical reasons, a user agent MAY change the request
1722      * method from POST to GET for the subsequent request.  If this
1723      * behavior is undesired, the 307 (Temporary Redirect) status code
1724      * can be used instead.
1725      *
1726      * ----
1727      *
1728      * Many webservers expect this, so these servers often answers to a POST
1729      * request with an error page. To be sure that libcurl gets the page that
1730      * most user agents would get, libcurl has to force GET.
1731      *
1732      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1733      * can be overridden with CURLOPT_POSTREDIR.
1734      */
1735     if((data->set.httpreq == HTTPREQ_POST
1736         || data->set.httpreq == HTTPREQ_POST_FORM
1737         || data->set.httpreq == HTTPREQ_POST_MIME)
1738        && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1739       infof(data, "Switch from POST to GET\n");
1740       data->set.httpreq = HTTPREQ_GET;
1741     }
1742     break;
1743   case 302: /* Found */
1744     /* (quote from RFC7231, section 6.4.3)
1745      *
1746      * Note: For historical reasons, a user agent MAY change the request
1747      * method from POST to GET for the subsequent request.  If this
1748      * behavior is undesired, the 307 (Temporary Redirect) status code
1749      * can be used instead.
1750      *
1751      * ----
1752      *
1753      * Many webservers expect this, so these servers often answers to a POST
1754      * request with an error page. To be sure that libcurl gets the page that
1755      * most user agents would get, libcurl has to force GET.
1756      *
1757      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1758      * can be overridden with CURLOPT_POSTREDIR.
1759      */
1760     if((data->set.httpreq == HTTPREQ_POST
1761         || data->set.httpreq == HTTPREQ_POST_FORM
1762         || data->set.httpreq == HTTPREQ_POST_MIME)
1763        && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1764       infof(data, "Switch from POST to GET\n");
1765       data->set.httpreq = HTTPREQ_GET;
1766     }
1767     break;
1768 
1769   case 303: /* See Other */
1770     /* Disable both types of POSTs, unless the user explicitly
1771        asks for POST after POST */
1772     if(data->set.httpreq != HTTPREQ_GET
1773       && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1774       data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1775       infof(data, "Disables POST, goes with %s\n",
1776             data->set.opt_no_body?"HEAD":"GET");
1777     }
1778     break;
1779   case 304: /* Not Modified */
1780     /* 304 means we did a conditional request and it was "Not modified".
1781      * We shouldn't get any Location: header in this response!
1782      */
1783     break;
1784   case 305: /* Use Proxy */
1785     /* (quote from RFC2616, section 10.3.6):
1786      * "The requested resource MUST be accessed through the proxy given
1787      * by the Location field. The Location field gives the URI of the
1788      * proxy.  The recipient is expected to repeat this single request
1789      * via the proxy. 305 responses MUST only be generated by origin
1790      * servers."
1791      */
1792     break;
1793   }
1794   Curl_pgrsTime(data, TIMER_REDIRECT);
1795   Curl_pgrsResetTransferSizes(data);
1796 
1797   return CURLE_OK;
1798 #endif /* CURL_DISABLE_HTTP */
1799 }
1800 
1801 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1802 
1803    NOTE: that the *url is malloc()ed. */
Curl_retry_request(struct connectdata * conn,char ** url)1804 CURLcode Curl_retry_request(struct connectdata *conn,
1805                             char **url)
1806 {
1807   struct Curl_easy *data = conn->data;
1808   bool retry = FALSE;
1809   *url = NULL;
1810 
1811   /* if we're talking upload, we can't do the checks below, unless the protocol
1812      is HTTP as when uploading over HTTP we will still get a response */
1813   if(data->set.upload &&
1814      !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1815     return CURLE_OK;
1816 
1817   if((data->req.bytecount + data->req.headerbytecount == 0) &&
1818       conn->bits.reuse &&
1819       (!data->set.opt_no_body
1820         || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
1821       (data->set.rtspreq != RTSPREQ_RECEIVE))
1822     /* We got no data, we attempted to re-use a connection. For HTTP this
1823        can be a retry so we try again regardless if we expected a body.
1824        For other protocols we only try again only if we expected a body.
1825 
1826        This might happen if the connection was left alive when we were
1827        done using it before, but that was closed when we wanted to read from
1828        it again. Bad luck. Retry the same request on a fresh connect! */
1829     retry = TRUE;
1830   else if(data->state.refused_stream &&
1831           (data->req.bytecount + data->req.headerbytecount == 0) ) {
1832     /* This was sent on a refused stream, safe to rerun. A refused stream
1833        error can typically only happen on HTTP/2 level if the stream is safe
1834        to issue again, but the nghttp2 API can deliver the message to other
1835        streams as well, which is why this adds the check the data counters
1836        too. */
1837     infof(conn->data, "REFUSED_STREAM, retrying a fresh connect\n");
1838     data->state.refused_stream = FALSE; /* clear again */
1839     retry = TRUE;
1840   }
1841   if(retry) {
1842     infof(conn->data, "Connection died, retrying a fresh connect\n");
1843     *url = strdup(conn->data->change.url);
1844     if(!*url)
1845       return CURLE_OUT_OF_MEMORY;
1846 
1847     connclose(conn, "retry"); /* close this connection */
1848     conn->bits.retry = TRUE; /* mark this as a connection we're about
1849                                 to retry. Marking it this way should
1850                                 prevent i.e HTTP transfers to return
1851                                 error just because nothing has been
1852                                 transferred! */
1853 
1854 
1855     if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1856       if(data->req.writebytecount) {
1857         CURLcode result = Curl_readrewind(conn);
1858         if(result) {
1859           Curl_safefree(*url);
1860           return result;
1861         }
1862       }
1863     }
1864   }
1865   return CURLE_OK;
1866 }
1867 
1868 /*
1869  * Curl_setup_transfer() is called to setup some basic properties for the
1870  * upcoming transfer.
1871  */
1872 void
Curl_setup_transfer(struct Curl_easy * data,int sockindex,curl_off_t size,bool getheader,int writesockindex)1873 Curl_setup_transfer(
1874   struct Curl_easy *data,   /* transfer */
1875   int sockindex,            /* socket index to read from or -1 */
1876   curl_off_t size,          /* -1 if unknown at this point */
1877   bool getheader,           /* TRUE if header parsing is wanted */
1878   int writesockindex        /* socket index to write to, it may very well be
1879                                the same we read from. -1 disables */
1880   )
1881 {
1882   struct SingleRequest *k = &data->req;
1883   struct connectdata *conn = data->conn;
1884   DEBUGASSERT(conn != NULL);
1885   DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1886 
1887   if(conn->bits.multiplex || conn->httpversion == 20) {
1888     /* when multiplexing, the read/write sockets need to be the same! */
1889     conn->sockfd = sockindex == -1 ?
1890       ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1891       conn->sock[sockindex];
1892     conn->writesockfd = conn->sockfd;
1893   }
1894   else {
1895     conn->sockfd = sockindex == -1 ?
1896       CURL_SOCKET_BAD : conn->sock[sockindex];
1897     conn->writesockfd = writesockindex == -1 ?
1898       CURL_SOCKET_BAD:conn->sock[writesockindex];
1899   }
1900   k->getheader = getheader;
1901 
1902   k->size = size;
1903 
1904   /* The code sequence below is placed in this function just because all
1905      necessary input is not always known in do_complete() as this function may
1906      be called after that */
1907 
1908   if(!k->getheader) {
1909     k->header = FALSE;
1910     if(size > 0)
1911       Curl_pgrsSetDownloadSize(data, size);
1912   }
1913   /* we want header and/or body, if neither then don't do this! */
1914   if(k->getheader || !data->set.opt_no_body) {
1915 
1916     if(sockindex != -1)
1917       k->keepon |= KEEP_RECV;
1918 
1919     if(writesockindex != -1) {
1920       struct HTTP *http = data->req.protop;
1921       /* HTTP 1.1 magic:
1922 
1923          Even if we require a 100-return code before uploading data, we might
1924          need to write data before that since the REQUEST may not have been
1925          finished sent off just yet.
1926 
1927          Thus, we must check if the request has been sent before we set the
1928          state info where we wait for the 100-return code
1929       */
1930       if((data->state.expect100header) &&
1931          (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1932          (http->sending == HTTPSEND_BODY)) {
1933         /* wait with write until we either got 100-continue or a timeout */
1934         k->exp100 = EXP100_AWAITING_CONTINUE;
1935         k->start100 = Curl_now();
1936 
1937         /* Set a timeout for the multi interface. Add the inaccuracy margin so
1938            that we don't fire slightly too early and get denied to run. */
1939         Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1940       }
1941       else {
1942         if(data->state.expect100header)
1943           /* when we've sent off the rest of the headers, we must await a
1944              100-continue but first finish sending the request */
1945           k->exp100 = EXP100_SENDING_REQUEST;
1946 
1947         /* enable the write bit when we're not waiting for continue */
1948         k->keepon |= KEEP_SEND;
1949       }
1950     } /* if(writesockindex != -1) */
1951   } /* if(k->getheader || !data->set.opt_no_body) */
1952 
1953 }
1954