1 /***************************************************************************
3 * Project ___| | | | _ \| |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
8 * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.haxx.se/docs/copyright.html.
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
21 ***************************************************************************/
23 #include "curl_setup.h"
24 #include "strtoofft.h"
26 #ifdef HAVE_NETINET_IN_H
27 #include <netinet/in.h>
32 #ifdef HAVE_ARPA_INET_H
33 #include <arpa/inet.h>
38 #ifdef HAVE_SYS_IOCTL_H
39 #include <sys/ioctl.h>
45 #ifdef HAVE_SYS_PARAM_H
46 #include <sys/param.h>
49 #ifdef HAVE_SYS_SELECT_H
50 #include <sys/select.h>
54 #error "We can't compile without socket() support!"
58 #include <curl/curl.h>
61 #include "content_encoding.h"
65 #include "speedcheck.h"
70 #include "vtls/vtls.h"
74 #include "non-ascii.h"
79 /* The last 3 #include files should be in this order */
80 #include "curl_printf.h"
81 #include "curl_memory.h"
84 #if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
85 !defined(CURL_DISABLE_IMAP)
87 * checkheaders() checks the linked list of custom headers for a
88 * particular header (prefix).
90 * Returns a pointer to the first matching header or NULL if none matched.
92 char *Curl_checkheaders(const struct connectdata *conn,
93 const char *thisheader)
95 struct curl_slist *head;
96 size_t thislen = strlen(thisheader);
97 struct Curl_easy *data = conn->data;
99 for(head = data->set.headers; head; head = head->next) {
100 if(strncasecompare(head->data, thisheader, thislen))
109 * This function will call the read callback to fill our buffer with data
112 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
114 struct Curl_easy *data = conn->data;
115 size_t buffersize = (size_t)bytes;
117 #ifdef CURL_DOES_CONVERSIONS
118 bool sending_http_headers = FALSE;
120 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
121 const struct HTTP *http = data->req.protop;
123 if(http->sending == HTTPSEND_REQUEST)
124 /* We're sending the HTTP request headers, not the data.
125 Remember that so we don't re-translate them into garbage. */
126 sending_http_headers = TRUE;
130 if(data->req.upload_chunky) {
131 /* if chunked Transfer-Encoding */
132 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
133 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
136 /* this function returns a size_t, so we typecast to int to prevent warnings
137 with picky compilers */
138 nread = (int)data->state.fread_func(data->req.upload_fromhere, 1,
139 buffersize, data->state.in);
141 if(nread == CURL_READFUNC_ABORT) {
142 failf(data, "operation aborted by callback");
144 return CURLE_ABORTED_BY_CALLBACK;
146 if(nread == CURL_READFUNC_PAUSE) {
147 struct SingleRequest *k = &data->req;
149 if(conn->handler->flags & PROTOPT_NONETWORK) {
150 /* protocols that work without network cannot be paused. This is
151 actually only FILE:// just now, and it can't pause since the transfer
152 isn't done using the "normal" procedure. */
153 failf(data, "Read callback asked for PAUSE when not supported!");
154 return CURLE_READ_ERROR;
157 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
158 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
159 if(data->req.upload_chunky) {
160 /* Back out the preallocation done above */
161 data->req.upload_fromhere -= (8 + 2);
165 return CURLE_OK; /* nothing was read */
167 else if((size_t)nread > buffersize) {
168 /* the read function returned a too large value */
170 failf(data, "read function returned funny value");
171 return CURLE_READ_ERROR;
174 if(!data->req.forbidchunk && data->req.upload_chunky) {
175 /* if chunked Transfer-Encoding
181 /* On non-ASCII platforms the <DATA> may or may not be
182 translated based on set.prefer_ascii while the protocol
183 portion must always be translated to the network encoding.
184 To further complicate matters, line end conversion might be
185 done later on, so we need to prevent CRLFs from becoming
186 CRCRLFs if that's the case. To do this we use bare LFs
187 here, knowing they'll become CRLFs later on.
191 const char *endofline_native;
192 const char *endofline_network;
196 #ifdef CURL_DO_LINEEND_CONV
197 (data->set.prefer_ascii) ||
200 /* \n will become \r\n later on */
201 endofline_native = "\n";
202 endofline_network = "\x0a";
205 endofline_native = "\r\n";
206 endofline_network = "\x0d\x0a";
208 hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
209 "%x%s", nread, endofline_native);
211 /* move buffer pointer */
212 data->req.upload_fromhere -= hexlen;
215 /* copy the prefix to the buffer, leaving out the NUL */
216 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
218 /* always append ASCII CRLF to the data */
219 memcpy(data->req.upload_fromhere + nread,
221 strlen(endofline_network));
223 #ifdef CURL_DOES_CONVERSIONS
227 if(data->set.prefer_ascii)
228 /* translate the protocol and data */
231 /* just translate the protocol portion */
232 length = (int)strlen(hexbuffer);
233 result = Curl_convert_to_network(data, data->req.upload_fromhere,
235 /* Curl_convert_to_network calls failf if unsuccessful */
239 #endif /* CURL_DOES_CONVERSIONS */
241 if((nread - hexlen) == 0)
242 /* mark this as done once this chunk is transferred */
243 data->req.upload_done = TRUE;
245 nread += (int)strlen(endofline_native); /* for the added end of line */
247 #ifdef CURL_DOES_CONVERSIONS
248 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
250 result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
251 /* Curl_convert_to_network calls failf if unsuccessful */
255 #endif /* CURL_DOES_CONVERSIONS */
264 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
265 * POST/PUT with multi-pass authentication when a sending was denied and a
266 * resend is necessary.
268 CURLcode Curl_readrewind(struct connectdata *conn)
270 struct Curl_easy *data = conn->data;
271 curl_mimepart *mimepart = &data->set.mimepost;
273 conn->bits.rewindaftersend = FALSE; /* we rewind now */
275 /* explicitly switch off sending data on this connection now since we are
276 about to restart a new transfer and thus we want to avoid inadvertently
277 sending more data on the existing connection until the next transfer
279 data->req.keepon &= ~KEEP_SEND;
281 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
282 CURLOPT_HTTPPOST, call app to rewind
284 if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
285 struct HTTP *http = data->req.protop;
288 mimepart = http->sendit;
290 if(data->set.postfields)
292 else if(data->set.httpreq == HTTPREQ_POST_MIME ||
293 data->set.httpreq == HTTPREQ_POST_FORM) {
294 if(Curl_mime_rewind(mimepart)) {
295 failf(data, "Cannot rewind mime/post data");
296 return CURLE_SEND_FAIL_REWIND;
300 if(data->set.seek_func) {
303 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
305 failf(data, "seek callback returned error %d", (int)err);
306 return CURLE_SEND_FAIL_REWIND;
309 else if(data->set.ioctl_func) {
312 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
313 data->set.ioctl_client);
314 infof(data, "the ioctl callback returned %d\n", (int)err);
317 /* FIXME: convert to a human readable error message */
318 failf(data, "ioctl callback returned error %d", (int)err);
319 return CURLE_SEND_FAIL_REWIND;
323 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
324 given FILE * stream and we can actually attempt to rewind that
325 ourselves with fseek() */
326 if(data->state.fread_func == (curl_read_callback)fread) {
327 if(-1 != fseek(data->state.in, 0, SEEK_SET))
328 /* successful rewind */
332 /* no callback set or failure above, makes us fail at once */
333 failf(data, "necessary data rewind wasn't possible");
334 return CURLE_SEND_FAIL_REWIND;
340 static int data_pending(const struct connectdata *conn)
342 /* in the case of libssh2, we can never be really sure that we have emptied
343 its internal buffers so we MUST always try until we get EAGAIN back */
344 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
345 #if defined(USE_NGHTTP2)
346 Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
347 /* For HTTP/2, we may read up everything including responde body
348 with header fields in Curl_http_readwrite_headers. If no
349 content-length is provided, curl waits for the connection
350 close, which we emulate it using conn->proto.httpc.closed =
351 TRUE. The thing is if we read everything, then http2_recv won't
352 be called and we cannot signal the HTTP/2 stream has closed. As
353 a workaround, we return nonzero here to call http2_recv. */
354 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
356 Curl_ssl_data_pending(conn, FIRSTSOCKET);
360 static void read_rewind(struct connectdata *conn,
363 DEBUGASSERT(conn->read_pos >= thismuch);
365 conn->read_pos -= thismuch;
366 conn->bits.stream_was_rewound = TRUE;
373 show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
374 if(conn->master_buffer) {
375 memcpy(buf, conn->master_buffer + conn->read_pos, show);
382 DEBUGF(infof(conn->data,
383 "Buffer after stream rewind (read_pos = %zu): [%s]\n",
384 conn->read_pos, buf));
390 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
391 * remote document with the time provided by CURLOPT_TIMEVAL
393 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
395 if((timeofdoc == 0) || (data->set.timevalue == 0))
398 switch(data->set.timecondition) {
399 case CURL_TIMECOND_IFMODSINCE:
401 if(timeofdoc <= data->set.timevalue) {
403 "The requested document is not new enough\n");
404 data->info.timecond = TRUE;
408 case CURL_TIMECOND_IFUNMODSINCE:
409 if(timeofdoc >= data->set.timevalue) {
411 "The requested document is not old enough\n");
412 data->info.timecond = TRUE;
422 * Go ahead and do a read if we have a readable socket or if
423 * the stream was rewound (in which case we have data in a
426 * return '*comeback' TRUE if we didn't properly drain the socket so this
427 * function should get called again without select() or similar in between!
429 static CURLcode readwrite_data(struct Curl_easy *data,
430 struct connectdata *conn,
431 struct SingleRequest *k,
432 int *didwhat, bool *done,
435 CURLcode result = CURLE_OK;
436 ssize_t nread; /* number of bytes read */
437 size_t excess = 0; /* excess bytes read */
438 bool is_empty_data = FALSE;
439 bool readmore = FALSE; /* used by RTP to signal for more data */
445 /* This is where we loop until we have read everything there is to
446 read or we get a CURLE_AGAIN */
448 size_t buffersize = data->set.buffer_size;
449 size_t bytestoread = buffersize;
452 #if defined(USE_NGHTTP2)
453 /* For HTTP/2, read data without caring about the content
454 length. This is safe because body in HTTP/2 is always
455 segmented thanks to its framing layer. Meanwhile, we have to
456 call Curl_read to ensure that http2_handle_stream_close is
457 called when we read all incoming bytes for a particular
459 !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
460 conn->httpversion == 20) &&
462 k->size != -1 && !k->header) {
463 /* make sure we don't read "too much" if we can help it since we
464 might be pipelining and then someone else might want to read what
466 curl_off_t totalleft = k->size - k->bytecount;
467 if(totalleft < (curl_off_t)bytestoread)
468 bytestoread = (size_t)totalleft;
472 /* receive data from the network! */
473 result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
475 /* read would've blocked */
476 if(CURLE_AGAIN == result)
477 break; /* get out of loop */
483 /* read nothing but since we wanted nothing we consider this an OK
484 situation to proceed from */
485 DEBUGF(infof(data, "readwrite_data: we're done!\n"));
489 if((k->bytecount == 0) && (k->writebytecount == 0)) {
490 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
491 if(k->exp100 > EXP100_SEND_DATA)
492 /* set time stamp to compare with when waiting for the 100 */
493 k->start100 = Curl_tvnow();
496 *didwhat |= KEEP_RECV;
497 /* indicates data of zero size, i.e. empty file */
498 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
500 /* NUL terminate, allowing string ops to be used */
501 if(0 < nread || is_empty_data) {
504 else if(0 >= nread) {
505 /* if we receive 0 or less here, the server closed the connection
506 and we bail out from this! */
507 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
508 k->keepon &= ~KEEP_RECV;
512 /* Default buffer to use when we write the buffer, it may be changed
513 in the flow below before the actual storing is done. */
516 if(conn->handler->readwrite) {
517 result = conn->handler->readwrite(data, conn, &nread, &readmore);
524 #ifndef CURL_DISABLE_HTTP
525 /* Since this is a two-state thing, we check if we are parsing
526 headers at the moment or not. */
528 /* we are in parse-the-header-mode */
529 bool stop_reading = FALSE;
530 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
534 if(conn->handler->readwrite &&
535 (k->maxdownload <= 0 && nread > 0)) {
536 result = conn->handler->readwrite(data, conn, &nread, &readmore);
544 /* We've stopped dealing with input, get out of the do-while loop */
547 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
549 "Rewinding stream by : %zd"
550 " bytes on url %s (zero-length body)\n",
551 nread, data->state.path);
552 read_rewind(conn, (size_t)nread);
556 "Excess found in a non pipelined read:"
558 " url = %s (zero-length body)\n",
559 nread, data->state.path);
566 #endif /* CURL_DISABLE_HTTP */
569 /* This is not an 'else if' since it may be a rest from the header
570 parsing, where the beginning of the buffer is headers and the end
572 if(k->str && !k->header && (nread > 0 || is_empty_data)) {
574 if(data->set.opt_no_body) {
575 /* data arrives although we want none, bail out */
576 streamclose(conn, "ignoring body");
578 return CURLE_WEIRD_SERVER_REPLY;
581 #ifndef CURL_DISABLE_HTTP
582 if(0 == k->bodywrites && !is_empty_data) {
583 /* These checks are only made the first time we are about to
584 write a piece of the body */
585 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
586 /* HTTP-only checks */
588 if(data->req.newurl) {
589 if(conn->bits.close) {
590 /* Abort after the headers if "follow Location" is set
591 and we're set to close anyway. */
592 k->keepon &= ~KEEP_RECV;
596 /* We have a new url to load, but since we want to be able
597 to re-use this connection properly, we read the full
598 response in "ignore more" */
599 k->ignorebody = TRUE;
600 infof(data, "Ignoring the response-body\n");
602 if(data->state.resume_from && !k->content_range &&
603 (data->set.httpreq == HTTPREQ_GET) &&
606 if(k->size == data->state.resume_from) {
607 /* The resume point is at the end of file, consider this fine
608 even if it doesn't allow resume from here. */
609 infof(data, "The entire document is already downloaded");
610 connclose(conn, "already downloaded");
612 k->keepon &= ~KEEP_RECV;
617 /* we wanted to resume a download, although the server doesn't
618 * seem to support this and we did this with a GET (if it
619 * wasn't a GET we did a POST or PUT resume) */
620 failf(data, "HTTP server doesn't seem to support "
621 "byte ranges. Cannot resume.");
622 return CURLE_RANGE_ERROR;
625 if(data->set.timecondition && !data->state.range) {
626 /* A time condition has been set AND no ranges have been
627 requested. This seems to be what chapter 13.3.4 of
628 RFC 2616 defines to be the correct action for a
631 if(!Curl_meets_timecondition(data, k->timeofdoc)) {
633 /* We're simulating a http 304 from server so we return
634 what should have been returned from the server */
635 data->info.httpcode = 304;
636 infof(data, "Simulate a HTTP 304 response!\n");
637 /* we abort the transfer before it is completed == we ruin the
638 re-use ability. Close the connection */
639 connclose(conn, "Simulated 304 handling");
642 } /* we have a time condition */
644 } /* this is HTTP or RTSP */
645 } /* this is the first time we write a body part */
646 #endif /* CURL_DISABLE_HTTP */
650 /* pass data to the debug function before it gets "dechunked" */
651 if(data->set.verbose) {
653 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
654 (size_t)k->hbuflen, conn);
655 if(k->badheader == HEADER_PARTHEADER)
656 Curl_debug(data, CURLINFO_DATA_IN,
657 k->str, (size_t)nread, conn);
660 Curl_debug(data, CURLINFO_DATA_IN,
661 k->str, (size_t)nread, conn);
664 #ifndef CURL_DISABLE_HTTP
667 * Here comes a chunked transfer flying and we need to decode this
668 * properly. While the name says read, this function both reads
669 * and writes away the data. The returned 'nread' holds the number
670 * of actual data it wrote to the client.
674 Curl_httpchunk_read(conn, k->str, nread, &nread);
676 if(CHUNKE_OK < res) {
677 if(CHUNKE_WRITE_ERROR == res) {
678 failf(data, "Failed writing data");
679 return CURLE_WRITE_ERROR;
681 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
682 return CURLE_RECV_ERROR;
684 if(CHUNKE_STOP == res) {
686 /* we're done reading chunks! */
687 k->keepon &= ~KEEP_RECV; /* read no more */
689 /* There are now possibly N number of bytes at the end of the
690 str buffer that weren't written to the client.
692 We DO care about this data if we are pipelining.
693 Push it back to be read on the next pass. */
695 dataleft = conn->chunk.dataleft;
697 infof(conn->data, "Leftovers after chunking: %zu bytes\n",
699 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
700 /* only attempt the rewind if we truly are pipelining */
701 infof(conn->data, "Rewinding %zu bytes\n",dataleft);
702 read_rewind(conn, dataleft);
706 /* If it returned OK, we just keep going */
708 #endif /* CURL_DISABLE_HTTP */
710 /* Account for body content stored in the header buffer */
711 if(k->badheader && !k->ignorebody) {
712 DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
714 k->bytecount += k->hbuflen;
717 if((-1 != k->maxdownload) &&
718 (k->bytecount + nread >= k->maxdownload)) {
720 excess = (size_t)(k->bytecount + nread - k->maxdownload);
721 if(excess > 0 && !k->ignorebody) {
722 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
724 "Rewinding stream by : %zu"
725 " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
726 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
727 ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
728 excess, data->state.path,
729 k->size, k->maxdownload, k->bytecount, nread);
730 read_rewind(conn, excess);
734 "Excess found in a non pipelined read:"
736 ", size = %" CURL_FORMAT_CURL_OFF_T
737 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
738 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
739 excess, k->size, k->maxdownload, k->bytecount);
743 nread = (ssize_t) (k->maxdownload - k->bytecount);
744 if(nread < 0) /* this should be unusual */
747 k->keepon &= ~KEEP_RECV; /* we're done reading */
750 k->bytecount += nread;
752 Curl_pgrsSetDownloadCounter(data, k->bytecount);
754 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
755 /* If this is chunky transfer, it was already written */
757 if(k->badheader && !k->ignorebody) {
758 /* we parsed a piece of data wrongly assuming it was a header
759 and now we output it as body instead */
761 /* Don't let excess data pollute body writes */
762 if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
763 result = Curl_client_write(conn, CLIENTWRITE_BODY,
764 data->state.headerbuff,
767 result = Curl_client_write(conn, CLIENTWRITE_BODY,
768 data->state.headerbuff,
769 (size_t)k->maxdownload);
774 if(k->badheader < HEADER_ALLBAD) {
775 /* This switch handles various content encodings. If there's an
776 error here, be sure to check over the almost identical code
778 Make sure that ALL_CONTENT_ENCODINGS contains all the
779 encodings handled here. */
781 switch(conn->data->set.http_ce_skip ?
782 IDENTITY : k->auto_decoding) {
785 /* This is the default when the server sends no
786 Content-Encoding header. See Curl_readwrite_init; the
787 memset() call initializes k->auto_decoding to zero. */
790 #ifndef CURL_DISABLE_POP3
791 if(conn->handler->protocol&PROTO_FAMILY_POP3)
792 result = Curl_pop3_write(conn, k->str, nread);
794 #endif /* CURL_DISABLE_POP3 */
796 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
803 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
805 result = Curl_unencode_deflate_write(conn, k, nread);
809 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
811 result = Curl_unencode_gzip_write(conn, k, nread);
815 failf(data, "Unrecognized content encoding type. "
816 "libcurl understands `identity', `deflate' and `gzip' "
817 "content encodings.");
818 result = CURLE_BAD_CONTENT_ENCODING;
823 k->badheader = HEADER_NORMAL; /* taken care of now */
829 } /* if(!header and data to read) */
831 if(conn->handler->readwrite &&
832 (excess > 0 && !conn->bits.stream_was_rewound)) {
833 /* Parse the excess data */
835 nread = (ssize_t)excess;
837 result = conn->handler->readwrite(data, conn, &nread, &readmore);
842 k->keepon |= KEEP_RECV; /* we're not done reading */
847 /* if we received nothing, the server closed the connection and we
849 k->keepon &= ~KEEP_RECV;
852 } while(data_pending(conn) && maxloops--);
855 /* we mark it as read-again-please */
856 conn->cselect_bits = CURL_CSELECT_IN;
860 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
862 /* When we've read the entire thing and the close bit is set, the server
863 may now close the connection. If there's now any kind of sending going
864 on from our side, we need to stop that immediately. */
865 infof(data, "we are done reading and this is set to close, stop send\n");
866 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
872 static CURLcode done_sending(struct connectdata *conn,
873 struct SingleRequest *k)
875 k->keepon &= ~KEEP_SEND; /* we're done writing */
877 Curl_http2_done_sending(conn);
879 if(conn->bits.rewindaftersend) {
880 CURLcode result = Curl_readrewind(conn);
889 * Send data to upload to the server, when the socket is writable.
891 static CURLcode readwrite_upload(struct Curl_easy *data,
892 struct connectdata *conn,
896 ssize_t bytes_written;
898 ssize_t nread; /* number of bytes read */
899 bool sending_http_headers = FALSE;
900 struct SingleRequest *k = &data->req;
902 if((k->bytecount == 0) && (k->writebytecount == 0))
903 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
905 *didwhat |= KEEP_SEND;
909 /* only read more data if there's no upload data already
910 present in the upload buffer */
911 if(0 == k->upload_present) {
912 /* init the "upload from here" pointer */
913 k->upload_fromhere = data->state.uploadbuffer;
915 if(!k->upload_done) {
916 /* HTTP pollution, this should be written nicer to become more
917 protocol agnostic. */
919 struct HTTP *http = k->protop;
921 if((k->exp100 == EXP100_SENDING_REQUEST) &&
922 (http->sending == HTTPSEND_BODY)) {
923 /* If this call is to send body data, we must take some action:
924 We have sent off the full HTTP 1.1 request, and we shall now
925 go into the Expect: 100 state and await such a header */
926 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
927 k->keepon &= ~KEEP_SEND; /* disable writing */
928 k->start100 = Curl_tvnow(); /* timeout count starts now */
929 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
931 /* set a timeout for the multi interface */
932 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
936 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
937 if(http->sending == HTTPSEND_REQUEST)
938 /* We're sending the HTTP request headers, not the data.
939 Remember that so we don't change the line endings. */
940 sending_http_headers = TRUE;
942 sending_http_headers = FALSE;
945 result = Curl_fillreadbuffer(conn, UPLOAD_BUFSIZE, &fillcount);
949 nread = (ssize_t)fillcount;
952 nread = 0; /* we're done uploading/reading */
954 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
955 /* this is a paused transfer */
959 result = done_sending(conn, k);
965 /* store number of bytes available for upload */
966 k->upload_present = nread;
968 /* convert LF to CRLF if so asked */
969 if((!sending_http_headers) && (
970 #ifdef CURL_DO_LINEEND_CONV
971 /* always convert if we're FTPing in ASCII mode */
972 (data->set.prefer_ascii) ||
975 /* Do we need to allocate a scratch buffer? */
976 if(!data->state.scratch) {
977 data->state.scratch = malloc(2 * data->set.buffer_size);
978 if(!data->state.scratch) {
979 failf(data, "Failed to alloc scratch buffer!");
981 return CURLE_OUT_OF_MEMORY;
986 * ASCII/EBCDIC Note: This is presumably a text (not binary)
987 * transfer so the data should already be in ASCII.
988 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
989 * must be used instead of the escape sequences \r & \n.
991 for(i = 0, si = 0; i < nread; i++, si++) {
992 if(k->upload_fromhere[i] == 0x0a) {
993 data->state.scratch[si++] = 0x0d;
994 data->state.scratch[si] = 0x0a;
995 if(!data->set.crlf) {
996 /* we're here only because FTP is in ASCII mode...
997 bump infilesize for the LF we just added */
998 if(data->state.infilesize != -1)
999 data->state.infilesize++;
1003 data->state.scratch[si] = k->upload_fromhere[i];
1007 /* only perform the special operation if we really did replace
1011 /* upload from the new (replaced) buffer instead */
1012 k->upload_fromhere = data->state.scratch;
1014 /* set the new amount too */
1015 k->upload_present = nread;
1019 #ifndef CURL_DISABLE_SMTP
1020 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
1021 result = Curl_smtp_escape_eob(conn, nread);
1025 #endif /* CURL_DISABLE_SMTP */
1026 } /* if 0 == k->upload_present */
1028 /* We have a partial buffer left from a previous "round". Use
1029 that instead of reading more data */
1032 /* write to socket (send away data) */
1033 result = Curl_write(conn,
1034 conn->writesockfd, /* socket to send to */
1035 k->upload_fromhere, /* buffer pointer */
1036 k->upload_present, /* buffer size */
1037 &bytes_written); /* actually sent */
1042 if(data->set.verbose)
1043 /* show the data before we change the pointer upload_fromhere */
1044 Curl_debug(data, CURLINFO_DATA_OUT, k->upload_fromhere,
1045 (size_t)bytes_written, conn);
1047 k->writebytecount += bytes_written;
1049 if(k->writebytecount == data->state.infilesize) {
1050 /* we have sent all data we were supposed to */
1051 k->upload_done = TRUE;
1052 infof(data, "We are completely uploaded and fine\n");
1055 if(k->upload_present != bytes_written) {
1056 /* we only wrote a part of the buffer (if anything), deal with it! */
1058 /* store the amount of bytes left in the buffer to write */
1059 k->upload_present -= bytes_written;
1061 /* advance the pointer where to find the buffer when the next send
1063 k->upload_fromhere += bytes_written;
1066 /* we've uploaded that buffer now */
1067 k->upload_fromhere = data->state.uploadbuffer;
1068 k->upload_present = 0; /* no more bytes left */
1070 if(k->upload_done) {
1071 result = done_sending(conn, k);
1077 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1079 } WHILE_FALSE; /* just to break out from! */
1085 * Curl_readwrite() is the low-level function to be called when data is to
1086 * be read and written to/from the connection.
1088 * return '*comeback' TRUE if we didn't properly drain the socket so this
1089 * function should get called again without select() or similar in between!
1091 CURLcode Curl_readwrite(struct connectdata *conn,
1092 struct Curl_easy *data,
1096 struct SingleRequest *k = &data->req;
1100 curl_socket_t fd_read;
1101 curl_socket_t fd_write;
1102 int select_res = conn->cselect_bits;
1104 conn->cselect_bits = 0;
1106 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1107 then we are in rate limiting state in that transfer direction */
1109 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1110 fd_read = conn->sockfd;
1112 fd_read = CURL_SOCKET_BAD;
1114 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1115 fd_write = conn->writesockfd;
1117 fd_write = CURL_SOCKET_BAD;
1119 if(conn->data->state.drain) {
1120 select_res |= CURL_CSELECT_IN;
1121 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1124 if(!select_res) /* Call for select()/poll() only, if read/write/error
1125 status is not known. */
1126 select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1128 if(select_res == CURL_CSELECT_ERR) {
1129 failf(data, "select/poll returned error");
1130 return CURLE_SEND_ERROR;
1133 /* We go ahead and do a read if we have a readable socket or if
1134 the stream was rewound (in which case we have data in a
1136 if((k->keepon & KEEP_RECV) &&
1137 ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1139 result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1144 /* If we still have writing to do, we check if we have a writable socket. */
1145 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1148 result = readwrite_upload(data, conn, &didwhat);
1153 k->now = Curl_tvnow();
1155 /* Update read/write counters */
1157 *k->bytecountp = k->bytecount; /* read count */
1158 if(k->writebytecountp)
1159 *k->writebytecountp = k->writebytecount; /* write count */
1162 /* no read no write, this is a timeout? */
1163 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1164 /* This should allow some time for the header to arrive, but only a
1165 very short time as otherwise it'll be too much wasted time too
1168 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1170 Therefore, when a client sends this header field to an origin server
1171 (possibly via a proxy) from which it has never seen a 100 (Continue)
1172 status, the client SHOULD NOT wait for an indefinite period before
1173 sending the request body.
1177 time_t ms = Curl_tvdiff(k->now, k->start100);
1178 if(ms >= data->set.expect_100_timeout) {
1179 /* we've waited long enough, continue anyway */
1180 k->exp100 = EXP100_SEND_DATA;
1181 k->keepon |= KEEP_SEND;
1182 Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1183 infof(data, "Done waiting for 100-continue\n");
1188 if(Curl_pgrsUpdate(conn))
1189 result = CURLE_ABORTED_BY_CALLBACK;
1191 result = Curl_speedcheck(data, k->now);
1196 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1198 failf(data, "Operation timed out after %ld milliseconds with %"
1199 CURL_FORMAT_CURL_OFF_T " out of %"
1200 CURL_FORMAT_CURL_OFF_T " bytes received",
1201 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount,
1205 failf(data, "Operation timed out after %ld milliseconds with %"
1206 CURL_FORMAT_CURL_OFF_T " bytes received",
1207 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount);
1209 return CURLE_OPERATION_TIMEDOUT;
1214 * The transfer has been performed. Just make some general checks before
1218 if(!(data->set.opt_no_body) && (k->size != -1) &&
1219 (k->bytecount != k->size) &&
1220 #ifdef CURL_DO_LINEEND_CONV
1221 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1222 so we'll check to see if the discrepancy can be explained
1223 by the number of CRLFs we've changed to LFs.
1225 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1226 #endif /* CURL_DO_LINEEND_CONV */
1228 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1229 " bytes remaining to read", k->size - k->bytecount);
1230 return CURLE_PARTIAL_FILE;
1232 if(!(data->set.opt_no_body) && k->chunk &&
1233 (conn->chunk.state != CHUNK_STOP)) {
1235 * In chunked mode, return an error if the connection is closed prior to
1236 * the empty (terminating) chunk is read.
1238 * The condition above used to check for
1239 * conn->proto.http->chunk.datasize != 0 which is true after reading
1240 * *any* chunk, not just the empty chunk.
1243 failf(data, "transfer closed with outstanding read data remaining");
1244 return CURLE_PARTIAL_FILE;
1246 if(Curl_pgrsUpdate(conn))
1247 return CURLE_ABORTED_BY_CALLBACK;
1250 /* Now update the "done" boolean we return */
1251 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1252 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1258 * Curl_single_getsock() gets called by the multi interface code when the app
1259 * has requested to get the sockets for the current connection. This function
1260 * will then be called once for every connection that the multi interface
1261 * keeps track of. This function will only be called for connections that are
1262 * in the proper state to have this information available.
1264 int Curl_single_getsock(const struct connectdata *conn,
1265 curl_socket_t *sock, /* points to numsocks number
1269 const struct Curl_easy *data = conn->data;
1270 int bitmap = GETSOCK_BLANK;
1271 unsigned sockindex = 0;
1273 if(conn->handler->perform_getsock)
1274 return conn->handler->perform_getsock(conn, sock, numsocks);
1277 /* simple check but we might need two slots */
1278 return GETSOCK_BLANK;
1280 /* don't include HOLD and PAUSE connections */
1281 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1283 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1285 bitmap |= GETSOCK_READSOCK(sockindex);
1286 sock[sockindex] = conn->sockfd;
1289 /* don't include HOLD and PAUSE connections */
1290 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1292 if((conn->sockfd != conn->writesockfd) ||
1293 bitmap == GETSOCK_BLANK) {
1294 /* only if they are not the same socket and we have a readable
1295 one, we increase index */
1296 if(bitmap != GETSOCK_BLANK)
1297 sockindex++; /* increase index if we need two entries */
1299 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1301 sock[sockindex] = conn->writesockfd;
1304 bitmap |= GETSOCK_WRITESOCK(sockindex);
1310 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1311 which means this gets called once for each subsequent redirect etc */
1312 void Curl_init_CONNECT(struct Curl_easy *data)
1314 data->state.fread_func = data->set.fread_func_set;
1315 data->state.in = data->set.in_set;
1319 * Curl_pretransfer() is called immediately before a transfer starts, and only
1320 * once for one transfer no matter if it has redirects or do multi-pass
1321 * authentication etc.
1323 CURLcode Curl_pretransfer(struct Curl_easy *data)
1326 if(!data->change.url) {
1327 /* we can't do anything without URL */
1328 failf(data, "No URL set!");
1329 return CURLE_URL_MALFORMAT;
1331 /* since the URL may have been redirected in a previous use of this handle */
1332 if(data->change.url_alloc) {
1333 /* the already set URL is allocated, free it first! */
1334 Curl_safefree(data->change.url);
1335 data->change.url_alloc = FALSE;
1337 data->change.url = data->set.str[STRING_SET_URL];
1339 /* Init the SSL session ID cache here. We do it here since we want to do it
1340 after the *_setopt() calls (that could specify the size of the cache) but
1341 before any transfer takes place. */
1342 result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1346 data->set.followlocation = 0; /* reset the location-follow counter */
1347 data->state.this_is_a_follow = FALSE; /* reset this */
1348 data->state.errorbuf = FALSE; /* no error has occurred */
1349 data->state.httpversion = 0; /* don't assume any particular server version */
1351 data->state.authproblem = FALSE;
1352 data->state.authhost.want = data->set.httpauth;
1353 data->state.authproxy.want = data->set.proxyauth;
1354 Curl_safefree(data->info.wouldredirect);
1355 data->info.wouldredirect = NULL;
1357 if(data->set.httpreq == HTTPREQ_PUT)
1358 data->state.infilesize = data->set.filesize;
1360 data->state.infilesize = data->set.postfieldsize;
1361 if(data->set.postfields && (data->state.infilesize == -1))
1362 data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1365 /* If there is a list of cookie files to read, do it now! */
1366 if(data->change.cookielist)
1367 Curl_cookie_loadfiles(data);
1369 /* If there is a list of host pairs to deal with */
1370 if(data->change.resolve)
1371 result = Curl_loadhostpairs(data);
1374 /* Allow data->set.use_port to set which port to use. This needs to be
1375 * disabled for example when we follow Location: headers to URLs using
1376 * different ports! */
1377 data->state.allow_port = TRUE;
1379 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1380 /*************************************************************
1381 * Tell signal handler to ignore SIGPIPE
1382 *************************************************************/
1383 if(!data->set.no_signal)
1384 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1387 Curl_initinfo(data); /* reset session-specific information "variables" */
1388 Curl_pgrsResetTransferSizes(data);
1389 Curl_pgrsStartNow(data);
1391 if(data->set.timeout)
1392 Curl_expire(data, data->set.timeout, EXPIRE_TIMEOUT);
1394 if(data->set.connecttimeout)
1395 Curl_expire(data, data->set.connecttimeout, EXPIRE_CONNECTTIMEOUT);
1397 /* In case the handle is re-used and an authentication method was picked
1398 in the session we need to make sure we only use the one(s) we now
1399 consider to be fine */
1400 data->state.authhost.picked &= data->state.authhost.want;
1401 data->state.authproxy.picked &= data->state.authproxy.want;
1403 if(data->set.wildcardmatch) {
1404 struct WildcardData *wc = &data->wildcard;
1405 if(wc->state < CURLWC_INIT) {
1406 result = Curl_wildcard_init(wc); /* init wildcard structures */
1408 return CURLE_OUT_OF_MEMORY;
1417 * Curl_posttransfer() is called immediately after a transfer ends
1419 CURLcode Curl_posttransfer(struct Curl_easy *data)
1421 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1422 /* restore the signal handler for SIGPIPE before we get back */
1423 if(!data->set.no_signal)
1424 signal(SIGPIPE, data->state.prev_signal);
1426 (void)data; /* unused parameter */
1432 #ifndef CURL_DISABLE_HTTP
1434 * Find the separator at the end of the host name, or the '?' in cases like
1435 * http://www.url.com?id=2380
1437 static const char *find_host_sep(const char *url)
1442 /* Find the start of the hostname */
1443 sep = strstr(url, "//");
1449 query = strchr(sep, '?');
1450 sep = strchr(sep, '/');
1453 sep = url + strlen(url);
1456 query = url + strlen(url);
1458 return sep < query ? sep : query;
1462 * strlen_url() returns the length of the given URL if the spaces within the
1463 * URL were properly URL encoded.
1464 * URL encoding should be skipped for host names, otherwise IDN resolution
1467 static size_t strlen_url(const char *url, bool relative)
1469 const unsigned char *ptr;
1471 bool left = TRUE; /* left side of the ? */
1472 const unsigned char *host_sep = (const unsigned char *) url;
1475 host_sep = (const unsigned char *) find_host_sep(url);
1477 for(ptr = (unsigned char *)url; *ptr; ptr++) {
1479 if(ptr < host_sep) {
1504 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1505 * the source URL accordingly.
1506 * URL encoding should be skipped for host names, otherwise IDN resolution
1509 static void strcpy_url(char *output, const char *url, bool relative)
1511 /* we must add this with whitespace-replacing */
1513 const unsigned char *iptr;
1514 char *optr = output;
1515 const unsigned char *host_sep = (const unsigned char *) url;
1518 host_sep = (const unsigned char *) find_host_sep(url);
1520 for(iptr = (unsigned char *)url; /* read from here */
1521 *iptr; /* until zero byte */
1524 if(iptr < host_sep) {
1535 snprintf(optr, 4, "%%%02x", *iptr);
1543 *optr++='%'; /* add a '%' */
1544 *optr++='2'; /* add a '2' */
1545 *optr++='0'; /* add a '0' */
1548 *optr++='+'; /* add a '+' here */
1552 *optr = 0; /* zero terminate output buffer */
1557 * Returns true if the given URL is absolute (as opposed to relative)
1559 static bool is_absolute_url(const char *url)
1561 char prot[16]; /* URL protocol string storage */
1562 char letter; /* used for a silly sscanf */
1564 return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
1568 * Concatenate a relative URL to a base URL making it absolute.
1569 * URL-encodes any spaces.
1570 * The returned pointer must be freed by the caller unless NULL
1571 * (returns NULL on out of memory).
1573 static char *concat_url(const char *base, const char *relurl)
1576 TRY to append this new path to the old URL
1577 to the right of the host part. Oh crap, this is doomed to cause
1578 problems in the future...
1584 bool host_changed = FALSE;
1586 const char *useurl = relurl;
1589 /* we must make our own copy of the URL to play with, as it may
1590 point to read-only data */
1591 char *url_clone = strdup(base);
1594 return NULL; /* skip out of this NOW */
1596 /* protsep points to the start of the host name */
1597 protsep = strstr(url_clone, "//");
1599 protsep = url_clone;
1601 protsep += 2; /* pass the slashes */
1603 if('/' != relurl[0]) {
1606 /* First we need to find out if there's a ?-letter in the URL,
1607 and cut it and the right-side of that off */
1608 pathsep = strchr(protsep, '?');
1612 /* we have a relative path to append to the last slash if there's one
1613 available, or if the new URL is just a query string (starts with a
1614 '?') we append the new one at the end of the entire currently worked
1616 if(useurl[0] != '?') {
1617 pathsep = strrchr(protsep, '/');
1622 /* Check if there's any slash after the host name, and if so, remember
1623 that position instead */
1624 pathsep = strchr(protsep, '/');
1626 protsep = pathsep + 1;
1630 /* now deal with one "./" or any amount of "../" in the newurl
1631 and act accordingly */
1633 if((useurl[0] == '.') && (useurl[1] == '/'))
1634 useurl += 2; /* just skip the "./" */
1636 while((useurl[0] == '.') &&
1637 (useurl[1] == '.') &&
1638 (useurl[2] == '/')) {
1640 useurl += 3; /* pass the "../" */
1645 /* cut off one more level from the right of the original URL */
1646 pathsep = strrchr(protsep, '/');
1657 /* We got a new absolute path for this server */
1659 if((relurl[0] == '/') && (relurl[1] == '/')) {
1660 /* the new URL starts with //, just keep the protocol part from the
1663 useurl = &relurl[2]; /* we keep the slashes from the original, so we
1664 skip the new ones */
1665 host_changed = TRUE;
1668 /* cut off the original URL from the first slash, or deal with URLs
1670 pathsep = strchr(protsep, '/');
1672 /* When people use badly formatted URLs, such as
1673 "http://www.url.com?dir=/home/daniel" we must not use the first
1674 slash, if there's a ?-letter before it! */
1675 char *sep = strchr(protsep, '?');
1676 if(sep && (sep < pathsep))
1681 /* There was no slash. Now, since we might be operating on a badly
1682 formatted URL, such as "http://www.url.com?id=2380" which doesn't
1683 use a slash separator as it is supposed to, we need to check for a
1684 ?-letter as well! */
1685 pathsep = strchr(protsep, '?');
1692 /* If the new part contains a space, this is a mighty stupid redirect
1693 but we still make an effort to do "right". To the left of a '?'
1694 letter we replace each space with %20 while it is replaced with '+'
1695 on the right side of the '?' letter.
1697 newlen = strlen_url(useurl, !host_changed);
1699 urllen = strlen(url_clone);
1701 newest = malloc(urllen + 1 + /* possible slash */
1702 newlen + 1 /* zero byte */);
1705 free(url_clone); /* don't leak this */
1709 /* copy over the root url part */
1710 memcpy(newest, url_clone, urllen);
1712 /* check if we need to append a slash */
1713 if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1716 newest[urllen++]='/';
1718 /* then append the new piece on the right side */
1719 strcpy_url(&newest[urllen], useurl, !host_changed);
1725 #endif /* CURL_DISABLE_HTTP */
1728 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1729 * as given by the remote server and set up the new URL to request.
1731 CURLcode Curl_follow(struct Curl_easy *data,
1732 char *newurl, /* the Location: string */
1733 followtype type) /* see transfer.h */
1735 #ifdef CURL_DISABLE_HTTP
1739 /* Location: following will not happen when HTTP is disabled */
1740 return CURLE_TOO_MANY_REDIRECTS;
1743 /* Location: redirect */
1744 bool disallowport = FALSE;
1745 bool reachedmax = FALSE;
1747 if(type == FOLLOW_REDIR) {
1748 if((data->set.maxredirs != -1) &&
1749 (data->set.followlocation >= data->set.maxredirs)) {
1751 type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1755 /* mark the next request as a followed location: */
1756 data->state.this_is_a_follow = TRUE;
1758 data->set.followlocation++; /* count location-followers */
1760 if(data->set.http_auto_referer) {
1761 /* We are asked to automatically set the previous URL as the referer
1762 when we get the next URL. We pick the ->url field, which may or may
1763 not be 100% correct */
1765 if(data->change.referer_alloc) {
1766 Curl_safefree(data->change.referer);
1767 data->change.referer_alloc = FALSE;
1770 data->change.referer = strdup(data->change.url);
1771 if(!data->change.referer)
1772 return CURLE_OUT_OF_MEMORY;
1773 data->change.referer_alloc = TRUE; /* yes, free this later */
1778 if(!is_absolute_url(newurl)) {
1780 *DANG* this is an RFC 2068 violation. The URL is supposed
1781 to be absolute and this doesn't seem to be that!
1783 char *absolute = concat_url(data->change.url, newurl);
1785 return CURLE_OUT_OF_MEMORY;
1789 /* The new URL MAY contain space or high byte values, that means a mighty
1790 stupid redirect URL but we still make an effort to do "right". */
1792 size_t newlen = strlen_url(newurl, FALSE);
1794 /* This is an absolute URL, don't allow the custom port number */
1795 disallowport = TRUE;
1797 newest = malloc(newlen + 1); /* get memory for this */
1799 return CURLE_OUT_OF_MEMORY;
1801 strcpy_url(newest, newurl, FALSE); /* create a space-free URL */
1802 newurl = newest; /* use this instead now */
1806 if(type == FOLLOW_FAKE) {
1807 /* we're only figuring out the new url if we would've followed locations
1808 but now we're done so we can get out! */
1809 data->info.wouldredirect = newurl;
1812 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1813 return CURLE_TOO_MANY_REDIRECTS;
1819 data->state.allow_port = FALSE;
1821 if(data->change.url_alloc) {
1822 Curl_safefree(data->change.url);
1823 data->change.url_alloc = FALSE;
1826 data->change.url = newurl;
1827 data->change.url_alloc = TRUE;
1829 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1832 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1833 * differently based on exactly what return code there was.
1835 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1836 * a HTTP (proxy-) authentication scheme other than Basic.
1838 switch(data->info.httpcode) {
1839 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1840 Authorization: XXXX header in the HTTP request code snippet */
1841 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1842 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1843 /* 300 - Multiple Choices */
1844 /* 306 - Not used */
1845 /* 307 - Temporary Redirect */
1846 default: /* for all above (and the unknown ones) */
1847 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1848 * seem to be OK to POST to.
1851 case 301: /* Moved Permanently */
1852 /* (quote from RFC7231, section 6.4.2)
1854 * Note: For historical reasons, a user agent MAY change the request
1855 * method from POST to GET for the subsequent request. If this
1856 * behavior is undesired, the 307 (Temporary Redirect) status code
1857 * can be used instead.
1861 * Many webservers expect this, so these servers often answers to a POST
1862 * request with an error page. To be sure that libcurl gets the page that
1863 * most user agents would get, libcurl has to force GET.
1865 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1866 * can be overridden with CURLOPT_POSTREDIR.
1868 if((data->set.httpreq == HTTPREQ_POST
1869 || data->set.httpreq == HTTPREQ_POST_FORM
1870 || data->set.httpreq == HTTPREQ_POST_MIME)
1871 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1872 infof(data, "Switch from POST to GET\n");
1873 data->set.httpreq = HTTPREQ_GET;
1876 case 302: /* Found */
1877 /* (quote from RFC7231, section 6.4.3)
1879 * Note: For historical reasons, a user agent MAY change the request
1880 * method from POST to GET for the subsequent request. If this
1881 * behavior is undesired, the 307 (Temporary Redirect) status code
1882 * can be used instead.
1886 * Many webservers expect this, so these servers often answers to a POST
1887 * request with an error page. To be sure that libcurl gets the page that
1888 * most user agents would get, libcurl has to force GET.
1890 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1891 * can be overridden with CURLOPT_POSTREDIR.
1893 if((data->set.httpreq == HTTPREQ_POST
1894 || data->set.httpreq == HTTPREQ_POST_FORM
1895 || data->set.httpreq == HTTPREQ_POST_MIME)
1896 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1897 infof(data, "Switch from POST to GET\n");
1898 data->set.httpreq = HTTPREQ_GET;
1902 case 303: /* See Other */
1903 /* Disable both types of POSTs, unless the user explicitly
1904 asks for POST after POST */
1905 if(data->set.httpreq != HTTPREQ_GET
1906 && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1907 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1908 infof(data, "Disables POST, goes with %s\n",
1909 data->set.opt_no_body?"HEAD":"GET");
1912 case 304: /* Not Modified */
1913 /* 304 means we did a conditional request and it was "Not modified".
1914 * We shouldn't get any Location: header in this response!
1917 case 305: /* Use Proxy */
1918 /* (quote from RFC2616, section 10.3.6):
1919 * "The requested resource MUST be accessed through the proxy given
1920 * by the Location field. The Location field gives the URI of the
1921 * proxy. The recipient is expected to repeat this single request
1922 * via the proxy. 305 responses MUST only be generated by origin
1927 Curl_pgrsTime(data, TIMER_REDIRECT);
1928 Curl_pgrsResetTransferSizes(data);
1931 #endif /* CURL_DISABLE_HTTP */
1934 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1936 NOTE: that the *url is malloc()ed. */
1937 CURLcode Curl_retry_request(struct connectdata *conn,
1940 struct Curl_easy *data = conn->data;
1944 /* if we're talking upload, we can't do the checks below, unless the protocol
1945 is HTTP as when uploading over HTTP we will still get a response */
1946 if(data->set.upload &&
1947 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1950 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1952 (!data->set.opt_no_body
1953 || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
1954 (data->set.rtspreq != RTSPREQ_RECEIVE)) {
1955 /* We got no data, we attempted to re-use a connection. For HTTP this
1956 can be a retry so we try again regardless if we expected a body.
1957 For other protocols we only try again only if we expected a body.
1959 This might happen if the connection was left alive when we were
1960 done using it before, but that was closed when we wanted to read from
1961 it again. Bad luck. Retry the same request on a fresh connect! */
1962 infof(conn->data, "Connection died, retrying a fresh connect\n");
1963 *url = strdup(conn->data->change.url);
1965 return CURLE_OUT_OF_MEMORY;
1967 connclose(conn, "retry"); /* close this connection */
1968 conn->bits.retry = TRUE; /* mark this as a connection we're about
1969 to retry. Marking it this way should
1970 prevent i.e HTTP transfers to return
1971 error just because nothing has been
1975 if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1976 struct HTTP *http = data->req.protop;
1977 if(http->writebytecount)
1978 return Curl_readrewind(conn);
1985 * Curl_setup_transfer() is called to setup some basic properties for the
1986 * upcoming transfer.
1989 Curl_setup_transfer(
1990 struct connectdata *conn, /* connection data */
1991 int sockindex, /* socket index to read from or -1 */
1992 curl_off_t size, /* -1 if unknown at this point */
1993 bool getheader, /* TRUE if header parsing is wanted */
1994 curl_off_t *bytecountp, /* return number of bytes read or NULL */
1995 int writesockindex, /* socket index to write to, it may very well be
1996 the same we read from. -1 disables */
1997 curl_off_t *writecountp /* return number of bytes written or NULL */
2000 struct Curl_easy *data;
2001 struct SingleRequest *k;
2003 DEBUGASSERT(conn != NULL);
2008 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
2010 /* now copy all input parameters */
2011 conn->sockfd = sockindex == -1 ?
2012 CURL_SOCKET_BAD : conn->sock[sockindex];
2013 conn->writesockfd = writesockindex == -1 ?
2014 CURL_SOCKET_BAD:conn->sock[writesockindex];
2015 k->getheader = getheader;
2018 k->bytecountp = bytecountp;
2019 k->writebytecountp = writecountp;
2021 /* The code sequence below is placed in this function just because all
2022 necessary input is not always known in do_complete() as this function may
2023 be called after that */
2028 Curl_pgrsSetDownloadSize(data, size);
2030 /* we want header and/or body, if neither then don't do this! */
2031 if(k->getheader || !data->set.opt_no_body) {
2033 if(conn->sockfd != CURL_SOCKET_BAD)
2034 k->keepon |= KEEP_RECV;
2036 if(conn->writesockfd != CURL_SOCKET_BAD) {
2037 struct HTTP *http = data->req.protop;
2040 Even if we require a 100-return code before uploading data, we might
2041 need to write data before that since the REQUEST may not have been
2042 finished sent off just yet.
2044 Thus, we must check if the request has been sent before we set the
2045 state info where we wait for the 100-return code
2047 if((data->state.expect100header) &&
2048 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
2049 (http->sending == HTTPSEND_BODY)) {
2050 /* wait with write until we either got 100-continue or a timeout */
2051 k->exp100 = EXP100_AWAITING_CONTINUE;
2052 k->start100 = Curl_tvnow();
2054 /* Set a timeout for the multi interface. Add the inaccuracy margin so
2055 that we don't fire slightly too early and get denied to run. */
2056 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
2059 if(data->state.expect100header)
2060 /* when we've sent off the rest of the headers, we must await a
2061 100-continue but first finish sending the request */
2062 k->exp100 = EXP100_SENDING_REQUEST;
2064 /* enable the write bit when we're not waiting for continue */
2065 k->keepon |= KEEP_SEND;
2067 } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
2068 } /* if(k->getheader || !data->set.opt_no_body) */