VirtualBox

source: vbox/trunk/src/libs/curl-7.87.0/lib/transfer.c@ 98704

最後變更 在這個檔案從98704是 98326,由 vboxsync 提交於 2 年 前

curl-7.87.0: Applied and adjusted our curl changes to 7.83.1. bugref:10356

  • 屬性 svn:eol-style 設為 native
檔案大小: 63.1 KB
 
1/***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) 1998 - 2022, Daniel Stenberg, <[email protected]>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 * SPDX-License-Identifier: curl
22 *
23 ***************************************************************************/
24
25#include "curl_setup.h"
26#include "strtoofft.h"
27
28#ifdef HAVE_NETINET_IN_H
29#include <netinet/in.h>
30#endif
31#ifdef HAVE_NETDB_H
32#include <netdb.h>
33#endif
34#ifdef HAVE_ARPA_INET_H
35#include <arpa/inet.h>
36#endif
37#ifdef HAVE_NET_IF_H
38#include <net/if.h>
39#endif
40#ifdef HAVE_SYS_IOCTL_H
41#include <sys/ioctl.h>
42#endif
43#ifdef HAVE_SIGNAL_H
44#include <signal.h>
45#endif
46
47#ifdef HAVE_SYS_PARAM_H
48#include <sys/param.h>
49#endif
50
51#ifdef HAVE_SYS_SELECT_H
52#include <sys/select.h>
53#elif defined(HAVE_UNISTD_H)
54#include <unistd.h>
55#endif
56
57#ifndef HAVE_SOCKET
58#error "We can't compile without socket() support!"
59#endif
60
61#include "urldata.h"
62#include <curl/curl.h>
63#include "netrc.h"
64
65#include "content_encoding.h"
66#include "hostip.h"
67#include "cfilters.h"
68#include "transfer.h"
69#include "sendf.h"
70#include "speedcheck.h"
71#include "progress.h"
72#include "http.h"
73#include "url.h"
74#include "getinfo.h"
75#include "vtls/vtls.h"
76#include "select.h"
77#include "multiif.h"
78#include "connect.h"
79#include "http2.h"
80#include "mime.h"
81#include "strcase.h"
82#include "urlapi-int.h"
83#include "hsts.h"
84#include "setopt.h"
85#include "headers.h"
86
87/* The last 3 #include files should be in this order */
88#include "curl_printf.h"
89#include "curl_memory.h"
90#include "memdebug.h"
91
92#if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
93 !defined(CURL_DISABLE_IMAP)
94/*
95 * checkheaders() checks the linked list of custom headers for a
96 * particular header (prefix). Provide the prefix without colon!
97 *
98 * Returns a pointer to the first matching header or NULL if none matched.
99 */
100char *Curl_checkheaders(const struct Curl_easy *data,
101 const char *thisheader,
102 const size_t thislen)
103{
104 struct curl_slist *head;
105 DEBUGASSERT(thislen);
106 DEBUGASSERT(thisheader[thislen-1] != ':');
107
108 for(head = data->set.headers; head; head = head->next) {
109 if(strncasecompare(head->data, thisheader, thislen) &&
110 Curl_headersep(head->data[thislen]) )
111 return head->data;
112 }
113
114 return NULL;
115}
116#endif
117
118CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
119{
120 if(!data->state.ulbuf) {
121 data->state.ulbuf = malloc(data->set.upload_buffer_size);
122 if(!data->state.ulbuf)
123 return CURLE_OUT_OF_MEMORY;
124 }
125 return CURLE_OK;
126}
127
128#ifndef CURL_DISABLE_HTTP
129/*
130 * This function will be called to loop through the trailers buffer
131 * until no more data is available for sending.
132 */
133static size_t trailers_read(char *buffer, size_t size, size_t nitems,
134 void *raw)
135{
136 struct Curl_easy *data = (struct Curl_easy *)raw;
137 struct dynbuf *trailers_buf = &data->state.trailers_buf;
138 size_t bytes_left = Curl_dyn_len(trailers_buf) -
139 data->state.trailers_bytes_sent;
140 size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
141 if(to_copy) {
142 memcpy(buffer,
143 Curl_dyn_ptr(trailers_buf) + data->state.trailers_bytes_sent,
144 to_copy);
145 data->state.trailers_bytes_sent += to_copy;
146 }
147 return to_copy;
148}
149
150static size_t trailers_left(void *raw)
151{
152 struct Curl_easy *data = (struct Curl_easy *)raw;
153 struct dynbuf *trailers_buf = &data->state.trailers_buf;
154 return Curl_dyn_len(trailers_buf) - data->state.trailers_bytes_sent;
155}
156#endif
157
158/*
159 * This function will call the read callback to fill our buffer with data
160 * to upload.
161 */
162CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
163 size_t *nreadp)
164{
165 size_t buffersize = bytes;
166 size_t nread;
167
168 curl_read_callback readfunc = NULL;
169 void *extra_data = NULL;
170
171#ifndef CURL_DISABLE_HTTP
172 if(data->state.trailers_state == TRAILERS_INITIALIZED) {
173 struct curl_slist *trailers = NULL;
174 CURLcode result;
175 int trailers_ret_code;
176
177 /* at this point we already verified that the callback exists
178 so we compile and store the trailers buffer, then proceed */
179 infof(data,
180 "Moving trailers state machine from initialized to sending.");
181 data->state.trailers_state = TRAILERS_SENDING;
182 Curl_dyn_init(&data->state.trailers_buf, DYN_TRAILERS);
183
184 data->state.trailers_bytes_sent = 0;
185 Curl_set_in_callback(data, true);
186 trailers_ret_code = data->set.trailer_callback(&trailers,
187 data->set.trailer_data);
188 Curl_set_in_callback(data, false);
189 if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
190 result = Curl_http_compile_trailers(trailers, &data->state.trailers_buf,
191 data);
192 }
193 else {
194 failf(data, "operation aborted by trailing headers callback");
195 *nreadp = 0;
196 result = CURLE_ABORTED_BY_CALLBACK;
197 }
198 if(result) {
199 Curl_dyn_free(&data->state.trailers_buf);
200 curl_slist_free_all(trailers);
201 return result;
202 }
203 infof(data, "Successfully compiled trailers.");
204 curl_slist_free_all(trailers);
205 }
206#endif
207
208#ifndef CURL_DISABLE_HTTP
209 /* if we are transmitting trailing data, we don't need to write
210 a chunk size so we skip this */
211 if(data->req.upload_chunky &&
212 data->state.trailers_state == TRAILERS_NONE) {
213 /* if chunked Transfer-Encoding */
214 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
215 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
216 }
217
218 if(data->state.trailers_state == TRAILERS_SENDING) {
219 /* if we're here then that means that we already sent the last empty chunk
220 but we didn't send a final CR LF, so we sent 0 CR LF. We then start
221 pulling trailing data until we have no more at which point we
222 simply return to the previous point in the state machine as if
223 nothing happened.
224 */
225 readfunc = trailers_read;
226 extra_data = (void *)data;
227 }
228 else
229#endif
230 {
231 readfunc = data->state.fread_func;
232 extra_data = data->state.in;
233 }
234
235 Curl_set_in_callback(data, true);
236 nread = readfunc(data->req.upload_fromhere, 1,
237 buffersize, extra_data);
238 Curl_set_in_callback(data, false);
239
240 if(nread == CURL_READFUNC_ABORT) {
241 failf(data, "operation aborted by callback");
242 *nreadp = 0;
243 return CURLE_ABORTED_BY_CALLBACK;
244 }
245 if(nread == CURL_READFUNC_PAUSE) {
246 struct SingleRequest *k = &data->req;
247
248 if(data->conn->handler->flags & PROTOPT_NONETWORK) {
249 /* protocols that work without network cannot be paused. This is
250 actually only FILE:// just now, and it can't pause since the transfer
251 isn't done using the "normal" procedure. */
252 failf(data, "Read callback asked for PAUSE when not supported");
253 return CURLE_READ_ERROR;
254 }
255
256 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
257 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
258 if(data->req.upload_chunky) {
259 /* Back out the preallocation done above */
260 data->req.upload_fromhere -= (8 + 2);
261 }
262 *nreadp = 0;
263
264 return CURLE_OK; /* nothing was read */
265 }
266 else if(nread > buffersize) {
267 /* the read function returned a too large value */
268 *nreadp = 0;
269 failf(data, "read function returned funny value");
270 return CURLE_READ_ERROR;
271 }
272
273#ifndef CURL_DISABLE_HTTP
274 if(!data->req.forbidchunk && data->req.upload_chunky) {
275 /* if chunked Transfer-Encoding
276 * build chunk:
277 *
278 * <HEX SIZE> CRLF
279 * <DATA> CRLF
280 */
281 /* On non-ASCII platforms the <DATA> may or may not be
282 translated based on state.prefer_ascii while the protocol
283 portion must always be translated to the network encoding.
284 To further complicate matters, line end conversion might be
285 done later on, so we need to prevent CRLFs from becoming
286 CRCRLFs if that's the case. To do this we use bare LFs
287 here, knowing they'll become CRLFs later on.
288 */
289
290 bool added_crlf = FALSE;
291 int hexlen = 0;
292 const char *endofline_native;
293 const char *endofline_network;
294
295 if(
296#ifdef CURL_DO_LINEEND_CONV
297 (data->state.prefer_ascii) ||
298#endif
299 (data->set.crlf)) {
300 /* \n will become \r\n later on */
301 endofline_native = "\n";
302 endofline_network = "\x0a";
303 }
304 else {
305 endofline_native = "\r\n";
306 endofline_network = "\x0d\x0a";
307 }
308
309 /* if we're not handling trailing data, proceed as usual */
310 if(data->state.trailers_state != TRAILERS_SENDING) {
311 char hexbuffer[11] = "";
312 hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
313 "%zx%s", nread, endofline_native);
314
315 /* move buffer pointer */
316 data->req.upload_fromhere -= hexlen;
317 nread += hexlen;
318
319 /* copy the prefix to the buffer, leaving out the NUL */
320 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
321
322 /* always append ASCII CRLF to the data unless
323 we have a valid trailer callback */
324 if((nread-hexlen) == 0 &&
325 data->set.trailer_callback != NULL &&
326 data->state.trailers_state == TRAILERS_NONE) {
327 data->state.trailers_state = TRAILERS_INITIALIZED;
328 }
329 else {
330 memcpy(data->req.upload_fromhere + nread,
331 endofline_network,
332 strlen(endofline_network));
333 added_crlf = TRUE;
334 }
335 }
336
337 if(data->state.trailers_state == TRAILERS_SENDING &&
338 !trailers_left(data)) {
339 Curl_dyn_free(&data->state.trailers_buf);
340 data->state.trailers_state = TRAILERS_DONE;
341 data->set.trailer_data = NULL;
342 data->set.trailer_callback = NULL;
343 /* mark the transfer as done */
344 data->req.upload_done = TRUE;
345 infof(data, "Signaling end of chunked upload after trailers.");
346 }
347 else
348 if((nread - hexlen) == 0 &&
349 data->state.trailers_state != TRAILERS_INITIALIZED) {
350 /* mark this as done once this chunk is transferred */
351 data->req.upload_done = TRUE;
352 infof(data,
353 "Signaling end of chunked upload via terminating chunk.");
354 }
355
356 if(added_crlf)
357 nread += strlen(endofline_network); /* for the added end of line */
358 }
359#endif
360
361 *nreadp = nread;
362
363 return CURLE_OK;
364}
365
366static int data_pending(struct Curl_easy *data)
367{
368 struct connectdata *conn = data->conn;
369
370#ifdef ENABLE_QUIC
371 if(conn->transport == TRNSPRT_QUIC)
372 return Curl_quic_data_pending(data);
373#endif
374
375 if(conn->handler->protocol&PROTO_FAMILY_FTP)
376 return Curl_conn_data_pending(data, SECONDARYSOCKET);
377
378 /* in the case of libssh2, we can never be really sure that we have emptied
379 its internal buffers so we MUST always try until we get EAGAIN back */
380 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
381#ifdef USE_NGHTTP2
382 /* For HTTP/2, we may read up everything including response body
383 with header fields in Curl_http_readwrite_headers. If no
384 content-length is provided, curl waits for the connection
385 close, which we emulate it using conn->proto.httpc.closed =
386 TRUE. The thing is if we read everything, then http2_recv won't
387 be called and we cannot signal the HTTP/2 stream has closed. As
388 a workaround, we return nonzero here to call http2_recv. */
389 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion >= 20) ||
390#endif
391 Curl_conn_data_pending(data, FIRSTSOCKET);
392}
393
394/*
395 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
396 * remote document with the time provided by CURLOPT_TIMEVAL
397 */
398bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
399{
400 if((timeofdoc == 0) || (data->set.timevalue == 0))
401 return TRUE;
402
403 switch(data->set.timecondition) {
404 case CURL_TIMECOND_IFMODSINCE:
405 default:
406 if(timeofdoc <= data->set.timevalue) {
407 infof(data,
408 "The requested document is not new enough");
409 data->info.timecond = TRUE;
410 return FALSE;
411 }
412 break;
413 case CURL_TIMECOND_IFUNMODSINCE:
414 if(timeofdoc >= data->set.timevalue) {
415 infof(data,
416 "The requested document is not old enough");
417 data->info.timecond = TRUE;
418 return FALSE;
419 }
420 break;
421 }
422
423 return TRUE;
424}
425
426/*
427 * Go ahead and do a read if we have a readable socket or if
428 * the stream was rewound (in which case we have data in a
429 * buffer)
430 *
431 * return '*comeback' TRUE if we didn't properly drain the socket so this
432 * function should get called again without select() or similar in between!
433 */
434static CURLcode readwrite_data(struct Curl_easy *data,
435 struct connectdata *conn,
436 struct SingleRequest *k,
437 int *didwhat, bool *done,
438 bool *comeback)
439{
440 CURLcode result = CURLE_OK;
441 ssize_t nread; /* number of bytes read */
442 size_t excess = 0; /* excess bytes read */
443 bool readmore = FALSE; /* used by RTP to signal for more data */
444 int maxloops = 100;
445 char *buf = data->state.buffer;
446 DEBUGASSERT(buf);
447
448 *done = FALSE;
449 *comeback = FALSE;
450
451 /* This is where we loop until we have read everything there is to
452 read or we get a CURLE_AGAIN */
453 do {
454 bool is_empty_data = FALSE;
455 size_t buffersize = data->set.buffer_size;
456 size_t bytestoread = buffersize;
457#ifdef USE_NGHTTP2
458 bool is_http2 = ((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
459 (conn->httpversion == 20));
460#endif
461 bool is_http3 =
462#ifdef ENABLE_QUIC
463 ((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
464 (conn->httpversion == 30));
465#else
466 FALSE;
467#endif
468
469 if(
470#ifdef USE_NGHTTP2
471 /* For HTTP/2, read data without caring about the content length. This
472 is safe because body in HTTP/2 is always segmented thanks to its
473 framing layer. Meanwhile, we have to call Curl_read to ensure that
474 http2_handle_stream_close is called when we read all incoming bytes
475 for a particular stream. */
476 !is_http2 &&
477#endif
478 !is_http3 && /* Same reason mentioned above. */
479 k->size != -1 && !k->header) {
480 /* make sure we don't read too much */
481 curl_off_t totalleft = k->size - k->bytecount;
482 if(totalleft < (curl_off_t)bytestoread)
483 bytestoread = (size_t)totalleft;
484 }
485
486 if(bytestoread) {
487 /* receive data from the network! */
488 result = Curl_read(data, conn->sockfd, buf, bytestoread, &nread);
489
490 /* read would've blocked */
491 if(CURLE_AGAIN == result) {
492 result = CURLE_OK;
493 break; /* get out of loop */
494 }
495
496 if(result>0)
497 goto out;
498 }
499 else {
500 /* read nothing but since we wanted nothing we consider this an OK
501 situation to proceed from */
502 DEBUGF(infof(data, "readwrite_data: we're done"));
503 nread = 0;
504 }
505
506 if(!k->bytecount) {
507 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
508 if(k->exp100 > EXP100_SEND_DATA)
509 /* set time stamp to compare with when waiting for the 100 */
510 k->start100 = Curl_now();
511 }
512
513 *didwhat |= KEEP_RECV;
514 /* indicates data of zero size, i.e. empty file */
515 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
516
517 if(0 < nread || is_empty_data) {
518 buf[nread] = 0;
519 }
520 else {
521 /* if we receive 0 or less here, either the http2 stream is closed or the
522 server closed the connection and we bail out from this! */
523#ifdef USE_NGHTTP2
524 if(is_http2 && !nread)
525 DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
526 else
527#endif
528 if(is_http3 && !nread)
529 DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
530 else
531 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing"));
532 k->keepon &= ~KEEP_RECV;
533 break;
534 }
535
536 /* Default buffer to use when we write the buffer, it may be changed
537 in the flow below before the actual storing is done. */
538 k->str = buf;
539
540 if(conn->handler->readwrite) {
541 result = conn->handler->readwrite(data, conn, &nread, &readmore);
542 if(result)
543 goto out;
544 if(readmore)
545 break;
546 }
547
548#ifndef CURL_DISABLE_HTTP
549 /* Since this is a two-state thing, we check if we are parsing
550 headers at the moment or not. */
551 if(k->header) {
552 /* we are in parse-the-header-mode */
553 bool stop_reading = FALSE;
554 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
555 if(result)
556 goto out;
557
558 if(conn->handler->readwrite &&
559 (k->maxdownload <= 0 && nread > 0)) {
560 result = conn->handler->readwrite(data, conn, &nread, &readmore);
561 if(result)
562 goto out;
563 if(readmore)
564 break;
565 }
566
567 if(stop_reading) {
568 /* We've stopped dealing with input, get out of the do-while loop */
569
570 if(nread > 0) {
571 infof(data,
572 "Excess found:"
573 " excess = %zd"
574 " url = %s (zero-length body)",
575 nread, data->state.up.path);
576 }
577
578 break;
579 }
580 }
581#endif /* CURL_DISABLE_HTTP */
582
583
584 /* This is not an 'else if' since it may be a rest from the header
585 parsing, where the beginning of the buffer is headers and the end
586 is non-headers. */
587 if(!k->header && (nread > 0 || is_empty_data)) {
588
589 if(data->req.no_body) {
590 /* data arrives although we want none, bail out */
591 streamclose(conn, "ignoring body");
592 *done = TRUE;
593 result = CURLE_WEIRD_SERVER_REPLY;
594 goto out;
595 }
596
597#ifndef CURL_DISABLE_HTTP
598 if(0 == k->bodywrites && !is_empty_data) {
599 /* These checks are only made the first time we are about to
600 write a piece of the body */
601 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
602 /* HTTP-only checks */
603 result = Curl_http_firstwrite(data, conn, done);
604 if(result || *done)
605 goto out;
606 }
607 } /* this is the first time we write a body part */
608#endif /* CURL_DISABLE_HTTP */
609
610 k->bodywrites++;
611
612 /* pass data to the debug function before it gets "dechunked" */
613 if(data->set.verbose) {
614 if(k->badheader) {
615 Curl_debug(data, CURLINFO_DATA_IN,
616 Curl_dyn_ptr(&data->state.headerb),
617 Curl_dyn_len(&data->state.headerb));
618 if(k->badheader == HEADER_PARTHEADER)
619 Curl_debug(data, CURLINFO_DATA_IN,
620 k->str, (size_t)nread);
621 }
622 else
623 Curl_debug(data, CURLINFO_DATA_IN,
624 k->str, (size_t)nread);
625 }
626
627#ifndef CURL_DISABLE_HTTP
628 if(k->chunk) {
629 /*
630 * Here comes a chunked transfer flying and we need to decode this
631 * properly. While the name says read, this function both reads
632 * and writes away the data. The returned 'nread' holds the number
633 * of actual data it wrote to the client.
634 */
635 CURLcode extra;
636 CHUNKcode res =
637 Curl_httpchunk_read(data, k->str, nread, &nread, &extra);
638
639 if(CHUNKE_OK < res) {
640 if(CHUNKE_PASSTHRU_ERROR == res) {
641 failf(data, "Failed reading the chunked-encoded stream");
642 result = extra;
643 goto out;
644 }
645 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
646 result = CURLE_RECV_ERROR;
647 goto out;
648 }
649 if(CHUNKE_STOP == res) {
650 /* we're done reading chunks! */
651 k->keepon &= ~KEEP_RECV; /* read no more */
652
653 /* N number of bytes at the end of the str buffer that weren't
654 written to the client. */
655 if(conn->chunk.datasize) {
656 infof(data, "Leftovers after chunking: % "
657 CURL_FORMAT_CURL_OFF_T "u bytes",
658 conn->chunk.datasize);
659 }
660 }
661 /* If it returned OK, we just keep going */
662 }
663#endif /* CURL_DISABLE_HTTP */
664
665 /* Account for body content stored in the header buffer */
666 if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
667 size_t headlen = Curl_dyn_len(&data->state.headerb);
668 DEBUGF(infof(data, "Increasing bytecount by %zu", headlen));
669 k->bytecount += headlen;
670 }
671
672 if((-1 != k->maxdownload) &&
673 (k->bytecount + nread >= k->maxdownload)) {
674
675 excess = (size_t)(k->bytecount + nread - k->maxdownload);
676 if(excess > 0 && !k->ignorebody) {
677 infof(data,
678 "Excess found in a read:"
679 " excess = %zu"
680 ", size = %" CURL_FORMAT_CURL_OFF_T
681 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
682 ", bytecount = %" CURL_FORMAT_CURL_OFF_T,
683 excess, k->size, k->maxdownload, k->bytecount);
684 connclose(conn, "excess found in a read");
685 }
686
687 nread = (ssize_t) (k->maxdownload - k->bytecount);
688 if(nread < 0) /* this should be unusual */
689 nread = 0;
690
691 /* HTTP/3 over QUIC should keep reading until QUIC connection
692 is closed. In contrast to HTTP/2 which can stop reading
693 from TCP connection, HTTP/3 over QUIC needs ACK from server
694 to ensure stream closure. It should keep reading. */
695 if(!is_http3) {
696 k->keepon &= ~KEEP_RECV; /* we're done reading */
697 }
698 }
699
700 k->bytecount += nread;
701
702 Curl_pgrsSetDownloadCounter(data, k->bytecount);
703
704 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
705 /* If this is chunky transfer, it was already written */
706
707 if(k->badheader && !k->ignorebody) {
708 /* we parsed a piece of data wrongly assuming it was a header
709 and now we output it as body instead */
710 size_t headlen = Curl_dyn_len(&data->state.headerb);
711
712 /* Don't let excess data pollute body writes */
713 if(k->maxdownload == -1 || (curl_off_t)headlen <= k->maxdownload)
714 result = Curl_client_write(data, CLIENTWRITE_BODY,
715 Curl_dyn_ptr(&data->state.headerb),
716 headlen);
717 else
718 result = Curl_client_write(data, CLIENTWRITE_BODY,
719 Curl_dyn_ptr(&data->state.headerb),
720 (size_t)k->maxdownload);
721
722 if(result)
723 goto out;
724 }
725 if(k->badheader < HEADER_ALLBAD) {
726 /* This switch handles various content encodings. If there's an
727 error here, be sure to check over the almost identical code
728 in http_chunks.c.
729 Make sure that ALL_CONTENT_ENCODINGS contains all the
730 encodings handled here. */
731 if(data->set.http_ce_skip || !k->writer_stack) {
732 if(!k->ignorebody && nread) {
733#ifndef CURL_DISABLE_POP3
734 if(conn->handler->protocol & PROTO_FAMILY_POP3)
735 result = Curl_pop3_write(data, k->str, nread);
736 else
737#endif /* CURL_DISABLE_POP3 */
738 result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
739 nread);
740 }
741 }
742 else if(!k->ignorebody && nread)
743 result = Curl_unencode_write(data, k->writer_stack, k->str, nread);
744 }
745 k->badheader = HEADER_NORMAL; /* taken care of now */
746
747 if(result)
748 goto out;
749 }
750
751 } /* if(!header and data to read) */
752
753 if(conn->handler->readwrite && excess) {
754 /* Parse the excess data */
755 k->str += nread;
756
757 if(&k->str[excess] > &buf[data->set.buffer_size]) {
758 /* the excess amount was too excessive(!), make sure
759 it doesn't read out of buffer */
760 excess = &buf[data->set.buffer_size] - k->str;
761 }
762 nread = (ssize_t)excess;
763
764 result = conn->handler->readwrite(data, conn, &nread, &readmore);
765 if(result)
766 goto out;
767
768 if(readmore)
769 k->keepon |= KEEP_RECV; /* we're not done reading */
770 break;
771 }
772
773 if(is_empty_data) {
774 /* if we received nothing, the server closed the connection and we
775 are done */
776 k->keepon &= ~KEEP_RECV;
777 }
778
779 if(k->keepon & KEEP_RECV_PAUSE) {
780 /* this is a paused transfer */
781 break;
782 }
783
784 } while(data_pending(data) && maxloops--);
785
786 if(maxloops <= 0) {
787 /* we mark it as read-again-please */
788 conn->cselect_bits = CURL_CSELECT_IN;
789 *comeback = TRUE;
790 }
791
792 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
793 conn->bits.close) {
794 /* When we've read the entire thing and the close bit is set, the server
795 may now close the connection. If there's now any kind of sending going
796 on from our side, we need to stop that immediately. */
797 infof(data, "we are done reading and this is set to close, stop send");
798 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
799 }
800
801out:
802 DEBUGF(infof(data, "readwrite_data(handle=%p) -> %d", data, result));
803 return result;
804}
805
806CURLcode Curl_done_sending(struct Curl_easy *data,
807 struct SingleRequest *k)
808{
809 struct connectdata *conn = data->conn;
810 k->keepon &= ~KEEP_SEND; /* we're done writing */
811
812 /* These functions should be moved into the handler struct! */
813 Curl_http2_done_sending(data, conn);
814 Curl_quic_done_sending(data);
815
816 return CURLE_OK;
817}
818
819#if defined(WIN32) && defined(USE_WINSOCK)
820#ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
821#define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
822#endif
823
824static void win_update_buffer_size(curl_socket_t sockfd)
825{
826 int result;
827 ULONG ideal;
828 DWORD ideallen;
829 result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
830 &ideal, sizeof(ideal), &ideallen, 0, 0);
831 if(result == 0) {
832 setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
833 (const char *)&ideal, sizeof(ideal));
834 }
835}
836#else
837#define win_update_buffer_size(x)
838#endif
839
840#define curl_upload_refill_watermark(data) \
841 ((ssize_t)((data)->set.upload_buffer_size >> 5))
842
843/*
844 * Send data to upload to the server, when the socket is writable.
845 */
846static CURLcode readwrite_upload(struct Curl_easy *data,
847 struct connectdata *conn,
848 int *didwhat)
849{
850 ssize_t i, si;
851 ssize_t bytes_written;
852 CURLcode result;
853 ssize_t nread; /* number of bytes read */
854 bool sending_http_headers = FALSE;
855 struct SingleRequest *k = &data->req;
856
857 if((k->bytecount == 0) && (k->writebytecount == 0))
858 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
859
860 *didwhat |= KEEP_SEND;
861
862 do {
863 curl_off_t nbody;
864 ssize_t offset = 0;
865
866 if(0 != k->upload_present &&
867 k->upload_present < curl_upload_refill_watermark(data) &&
868 !k->upload_chunky &&/*(variable sized chunked header; append not safe)*/
869 !k->upload_done && /*!(k->upload_done once k->upload_present sent)*/
870 !(k->writebytecount + k->upload_present - k->pendingheader ==
871 data->state.infilesize)) {
872 offset = k->upload_present;
873 }
874
875 /* only read more data if there's no upload data already
876 present in the upload buffer, or if appending to upload buffer */
877 if(0 == k->upload_present || offset) {
878 result = Curl_get_upload_buffer(data);
879 if(result)
880 return result;
881 if(offset && k->upload_fromhere != data->state.ulbuf)
882 memmove(data->state.ulbuf, k->upload_fromhere, offset);
883 /* init the "upload from here" pointer */
884 k->upload_fromhere = data->state.ulbuf;
885
886 if(!k->upload_done) {
887 /* HTTP pollution, this should be written nicer to become more
888 protocol agnostic. */
889 size_t fillcount;
890 struct HTTP *http = k->p.http;
891
892 if((k->exp100 == EXP100_SENDING_REQUEST) &&
893 (http->sending == HTTPSEND_BODY)) {
894 /* If this call is to send body data, we must take some action:
895 We have sent off the full HTTP 1.1 request, and we shall now
896 go into the Expect: 100 state and await such a header */
897 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
898 k->keepon &= ~KEEP_SEND; /* disable writing */
899 k->start100 = Curl_now(); /* timeout count starts now */
900 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
901 /* set a timeout for the multi interface */
902 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
903 break;
904 }
905
906 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
907 if(http->sending == HTTPSEND_REQUEST)
908 /* We're sending the HTTP request headers, not the data.
909 Remember that so we don't change the line endings. */
910 sending_http_headers = TRUE;
911 else
912 sending_http_headers = FALSE;
913 }
914
915 k->upload_fromhere += offset;
916 result = Curl_fillreadbuffer(data, data->set.upload_buffer_size-offset,
917 &fillcount);
918 k->upload_fromhere -= offset;
919 if(result)
920 return result;
921
922 nread = offset + fillcount;
923 }
924 else
925 nread = 0; /* we're done uploading/reading */
926
927 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
928 /* this is a paused transfer */
929 break;
930 }
931 if(nread <= 0) {
932 result = Curl_done_sending(data, k);
933 if(result)
934 return result;
935 break;
936 }
937
938 /* store number of bytes available for upload */
939 k->upload_present = nread;
940
941 /* convert LF to CRLF if so asked */
942 if((!sending_http_headers) && (
943#ifdef CURL_DO_LINEEND_CONV
944 /* always convert if we're FTPing in ASCII mode */
945 (data->state.prefer_ascii) ||
946#endif
947 (data->set.crlf))) {
948 /* Do we need to allocate a scratch buffer? */
949 if(!data->state.scratch) {
950 data->state.scratch = malloc(2 * data->set.upload_buffer_size);
951 if(!data->state.scratch) {
952 failf(data, "Failed to alloc scratch buffer");
953
954 return CURLE_OUT_OF_MEMORY;
955 }
956 }
957
958 /*
959 * ASCII/EBCDIC Note: This is presumably a text (not binary)
960 * transfer so the data should already be in ASCII.
961 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
962 * must be used instead of the escape sequences \r & \n.
963 */
964 if(offset)
965 memcpy(data->state.scratch, k->upload_fromhere, offset);
966 for(i = offset, si = offset; i < nread; i++, si++) {
967 if(k->upload_fromhere[i] == 0x0a) {
968 data->state.scratch[si++] = 0x0d;
969 data->state.scratch[si] = 0x0a;
970 if(!data->set.crlf) {
971 /* we're here only because FTP is in ASCII mode...
972 bump infilesize for the LF we just added */
973 if(data->state.infilesize != -1)
974 data->state.infilesize++;
975 }
976 }
977 else
978 data->state.scratch[si] = k->upload_fromhere[i];
979 }
980
981 if(si != nread) {
982 /* only perform the special operation if we really did replace
983 anything */
984 nread = si;
985
986 /* upload from the new (replaced) buffer instead */
987 k->upload_fromhere = data->state.scratch;
988
989 /* set the new amount too */
990 k->upload_present = nread;
991 }
992 }
993
994#ifndef CURL_DISABLE_SMTP
995 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
996 result = Curl_smtp_escape_eob(data, nread, offset);
997 if(result)
998 return result;
999 }
1000#endif /* CURL_DISABLE_SMTP */
1001 } /* if 0 == k->upload_present or appended to upload buffer */
1002 else {
1003 /* We have a partial buffer left from a previous "round". Use
1004 that instead of reading more data */
1005 }
1006
1007 /* write to socket (send away data) */
1008 result = Curl_write(data,
1009 conn->writesockfd, /* socket to send to */
1010 k->upload_fromhere, /* buffer pointer */
1011 k->upload_present, /* buffer size */
1012 &bytes_written); /* actually sent */
1013 if(result)
1014 return result;
1015
1016 win_update_buffer_size(conn->writesockfd);
1017
1018 if(k->pendingheader) {
1019 /* parts of what was sent was header */
1020 curl_off_t n = CURLMIN(k->pendingheader, bytes_written);
1021 /* show the data before we change the pointer upload_fromhere */
1022 Curl_debug(data, CURLINFO_HEADER_OUT, k->upload_fromhere, (size_t)n);
1023 k->pendingheader -= n;
1024 nbody = bytes_written - n; /* size of the written body part */
1025 }
1026 else
1027 nbody = bytes_written;
1028
1029 if(nbody) {
1030 /* show the data before we change the pointer upload_fromhere */
1031 Curl_debug(data, CURLINFO_DATA_OUT,
1032 &k->upload_fromhere[bytes_written - nbody],
1033 (size_t)nbody);
1034
1035 k->writebytecount += nbody;
1036 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1037 }
1038
1039 if((!k->upload_chunky || k->forbidchunk) &&
1040 (k->writebytecount == data->state.infilesize)) {
1041 /* we have sent all data we were supposed to */
1042 k->upload_done = TRUE;
1043 infof(data, "We are completely uploaded and fine");
1044 }
1045
1046 if(k->upload_present != bytes_written) {
1047 /* we only wrote a part of the buffer (if anything), deal with it! */
1048
1049 /* store the amount of bytes left in the buffer to write */
1050 k->upload_present -= bytes_written;
1051
1052 /* advance the pointer where to find the buffer when the next send
1053 is to happen */
1054 k->upload_fromhere += bytes_written;
1055 }
1056 else {
1057 /* we've uploaded that buffer now */
1058 result = Curl_get_upload_buffer(data);
1059 if(result)
1060 return result;
1061 k->upload_fromhere = data->state.ulbuf;
1062 k->upload_present = 0; /* no more bytes left */
1063
1064 if(k->upload_done) {
1065 result = Curl_done_sending(data, k);
1066 if(result)
1067 return result;
1068 }
1069 }
1070
1071
1072 } while(0); /* just to break out from! */
1073
1074 return CURLE_OK;
1075}
1076
1077/*
1078 * Curl_readwrite() is the low-level function to be called when data is to
1079 * be read and written to/from the connection.
1080 *
1081 * return '*comeback' TRUE if we didn't properly drain the socket so this
1082 * function should get called again without select() or similar in between!
1083 */
1084CURLcode Curl_readwrite(struct connectdata *conn,
1085 struct Curl_easy *data,
1086 bool *done,
1087 bool *comeback)
1088{
1089 struct SingleRequest *k = &data->req;
1090 CURLcode result;
1091 int didwhat = 0;
1092
1093 curl_socket_t fd_read;
1094 curl_socket_t fd_write;
1095 int select_res = conn->cselect_bits;
1096
1097 conn->cselect_bits = 0;
1098
1099 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1100 then we are in rate limiting state in that transfer direction */
1101
1102 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1103 fd_read = conn->sockfd;
1104 else
1105 fd_read = CURL_SOCKET_BAD;
1106
1107 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1108 fd_write = conn->writesockfd;
1109 else
1110 fd_write = CURL_SOCKET_BAD;
1111
1112#if defined(USE_HTTP2) || defined(USE_HTTP3)
1113 if(data->state.drain) {
1114 select_res |= CURL_CSELECT_IN;
1115 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data"));
1116 }
1117#endif
1118
1119 if(!select_res) /* Call for select()/poll() only, if read/write/error
1120 status is not known. */
1121 select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1122
1123 if(select_res == CURL_CSELECT_ERR) {
1124 failf(data, "select/poll returned error");
1125 result = CURLE_SEND_ERROR;
1126 goto out;
1127 }
1128
1129#ifdef USE_HYPER
1130 if(conn->datastream) {
1131 result = conn->datastream(data, conn, &didwhat, done, select_res);
1132 if(result || *done)
1133 goto out;
1134 }
1135 else {
1136#endif
1137 /* We go ahead and do a read if we have a readable socket or if
1138 the stream was rewound (in which case we have data in a
1139 buffer) */
1140 if((k->keepon & KEEP_RECV) && (select_res & CURL_CSELECT_IN)) {
1141 result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1142 if(result || *done)
1143 goto out;
1144 }
1145
1146 /* If we still have writing to do, we check if we have a writable socket. */
1147 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1148 /* write */
1149
1150 result = readwrite_upload(data, conn, &didwhat);
1151 if(result)
1152 goto out;
1153 }
1154#ifdef USE_HYPER
1155 }
1156#endif
1157
1158 k->now = Curl_now();
1159 if(!didwhat) {
1160 /* no read no write, this is a timeout? */
1161 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1162 /* This should allow some time for the header to arrive, but only a
1163 very short time as otherwise it'll be too much wasted time too
1164 often. */
1165
1166 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1167
1168 Therefore, when a client sends this header field to an origin server
1169 (possibly via a proxy) from which it has never seen a 100 (Continue)
1170 status, the client SHOULD NOT wait for an indefinite period before
1171 sending the request body.
1172
1173 */
1174
1175 timediff_t ms = Curl_timediff(k->now, k->start100);
1176 if(ms >= data->set.expect_100_timeout) {
1177 /* we've waited long enough, continue anyway */
1178 k->exp100 = EXP100_SEND_DATA;
1179 k->keepon |= KEEP_SEND;
1180 Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1181 infof(data, "Done waiting for 100-continue");
1182 }
1183 }
1184
1185#ifdef ENABLE_QUIC
1186 if(conn->transport == TRNSPRT_QUIC) {
1187 result = Curl_quic_idle(data);
1188 if(result)
1189 goto out;
1190 }
1191#endif
1192 }
1193
1194 if(Curl_pgrsUpdate(data))
1195 result = CURLE_ABORTED_BY_CALLBACK;
1196 else
1197 result = Curl_speedcheck(data, k->now);
1198 if(result)
1199 goto out;
1200
1201 if(k->keepon) {
1202 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1203 if(k->size != -1) {
1204 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1205 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1206 CURL_FORMAT_CURL_OFF_T " bytes received",
1207 Curl_timediff(k->now, data->progress.t_startsingle),
1208 k->bytecount, k->size);
1209 }
1210 else {
1211 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1212 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1213 Curl_timediff(k->now, data->progress.t_startsingle),
1214 k->bytecount);
1215 }
1216 result = CURLE_OPERATION_TIMEDOUT;
1217 goto out;
1218 }
1219 }
1220 else {
1221 /*
1222 * The transfer has been performed. Just make some general checks before
1223 * returning.
1224 */
1225
1226 if(!(data->req.no_body) && (k->size != -1) &&
1227 (k->bytecount != k->size) &&
1228#ifdef CURL_DO_LINEEND_CONV
1229 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1230 so we'll check to see if the discrepancy can be explained
1231 by the number of CRLFs we've changed to LFs.
1232 */
1233 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1234#endif /* CURL_DO_LINEEND_CONV */
1235 !k->newurl) {
1236 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1237 " bytes remaining to read", k->size - k->bytecount);
1238 result = CURLE_PARTIAL_FILE;
1239 goto out;
1240 }
1241 if(!(data->req.no_body) && k->chunk &&
1242 (conn->chunk.state != CHUNK_STOP)) {
1243 /*
1244 * In chunked mode, return an error if the connection is closed prior to
1245 * the empty (terminating) chunk is read.
1246 *
1247 * The condition above used to check for
1248 * conn->proto.http->chunk.datasize != 0 which is true after reading
1249 * *any* chunk, not just the empty chunk.
1250 *
1251 */
1252 failf(data, "transfer closed with outstanding read data remaining");
1253 result = CURLE_PARTIAL_FILE;
1254 goto out;
1255 }
1256 if(Curl_pgrsUpdate(data)) {
1257 result = CURLE_ABORTED_BY_CALLBACK;
1258 goto out;
1259 }
1260 }
1261
1262 /* Now update the "done" boolean we return */
1263 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1264 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1265 result = CURLE_OK;
1266out:
1267 DEBUGF(infof(data, "Curl_readwrite(handle=%p) -> %d", data, result));
1268 return result;
1269}
1270
1271/*
1272 * Curl_single_getsock() gets called by the multi interface code when the app
1273 * has requested to get the sockets for the current connection. This function
1274 * will then be called once for every connection that the multi interface
1275 * keeps track of. This function will only be called for connections that are
1276 * in the proper state to have this information available.
1277 */
1278int Curl_single_getsock(struct Curl_easy *data,
1279 struct connectdata *conn,
1280 curl_socket_t *sock)
1281{
1282 int bitmap = GETSOCK_BLANK;
1283 unsigned sockindex = 0;
1284
1285 if(conn->handler->perform_getsock)
1286 return conn->handler->perform_getsock(data, conn, sock);
1287
1288 /* don't include HOLD and PAUSE connections */
1289 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1290
1291 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1292
1293 bitmap |= GETSOCK_READSOCK(sockindex);
1294 sock[sockindex] = conn->sockfd;
1295 }
1296
1297 /* don't include HOLD and PAUSE connections */
1298 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1299 if((conn->sockfd != conn->writesockfd) ||
1300 bitmap == GETSOCK_BLANK) {
1301 /* only if they are not the same socket and we have a readable
1302 one, we increase index */
1303 if(bitmap != GETSOCK_BLANK)
1304 sockindex++; /* increase index if we need two entries */
1305
1306 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1307
1308 sock[sockindex] = conn->writesockfd;
1309 }
1310
1311 bitmap |= GETSOCK_WRITESOCK(sockindex);
1312 }
1313
1314 return bitmap;
1315}
1316
1317/* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1318 which means this gets called once for each subsequent redirect etc */
1319void Curl_init_CONNECT(struct Curl_easy *data)
1320{
1321 data->state.fread_func = data->set.fread_func_set;
1322 data->state.in = data->set.in_set;
1323}
1324
1325/*
1326 * Curl_pretransfer() is called immediately before a transfer starts, and only
1327 * once for one transfer no matter if it has redirects or do multi-pass
1328 * authentication etc.
1329 */
1330CURLcode Curl_pretransfer(struct Curl_easy *data)
1331{
1332 CURLcode result;
1333
1334 if(!data->state.url && !data->set.uh) {
1335 /* we can't do anything without URL */
1336 failf(data, "No URL set");
1337 return CURLE_URL_MALFORMAT;
1338 }
1339
1340 /* since the URL may have been redirected in a previous use of this handle */
1341 if(data->state.url_alloc) {
1342 /* the already set URL is allocated, free it first! */
1343 Curl_safefree(data->state.url);
1344 data->state.url_alloc = FALSE;
1345 }
1346
1347 if(!data->state.url && data->set.uh) {
1348 CURLUcode uc;
1349 free(data->set.str[STRING_SET_URL]);
1350 uc = curl_url_get(data->set.uh,
1351 CURLUPART_URL, &data->set.str[STRING_SET_URL], 0);
1352 if(uc) {
1353 failf(data, "No URL set");
1354 return CURLE_URL_MALFORMAT;
1355 }
1356 }
1357
1358 data->state.prefer_ascii = data->set.prefer_ascii;
1359 data->state.list_only = data->set.list_only;
1360 data->state.httpreq = data->set.method;
1361 data->state.url = data->set.str[STRING_SET_URL];
1362
1363 /* Init the SSL session ID cache here. We do it here since we want to do it
1364 after the *_setopt() calls (that could specify the size of the cache) but
1365 before any transfer takes place. */
1366 result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1367 if(result)
1368 return result;
1369
1370 data->state.requests = 0;
1371 data->state.followlocation = 0; /* reset the location-follow counter */
1372 data->state.this_is_a_follow = FALSE; /* reset this */
1373 data->state.errorbuf = FALSE; /* no error has occurred */
1374 data->state.httpwant = data->set.httpwant;
1375 data->state.httpversion = 0;
1376 data->state.authproblem = FALSE;
1377 data->state.authhost.want = data->set.httpauth;
1378 data->state.authproxy.want = data->set.proxyauth;
1379 Curl_safefree(data->info.wouldredirect);
1380
1381 if(data->state.httpreq == HTTPREQ_PUT)
1382 data->state.infilesize = data->set.filesize;
1383 else if((data->state.httpreq != HTTPREQ_GET) &&
1384 (data->state.httpreq != HTTPREQ_HEAD)) {
1385 data->state.infilesize = data->set.postfieldsize;
1386 if(data->set.postfields && (data->state.infilesize == -1))
1387 data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1388 }
1389 else
1390 data->state.infilesize = 0;
1391
1392#ifndef CURL_DISABLE_COOKIES
1393 /* If there is a list of cookie files to read, do it now! */
1394 if(data->state.cookielist)
1395 Curl_cookie_loadfiles(data);
1396#endif
1397 /* If there is a list of host pairs to deal with */
1398 if(data->state.resolve)
1399 result = Curl_loadhostpairs(data);
1400
1401 if(!result) {
1402 /* Allow data->set.use_port to set which port to use. This needs to be
1403 * disabled for example when we follow Location: headers to URLs using
1404 * different ports! */
1405 data->state.allow_port = TRUE;
1406
1407#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1408 /*************************************************************
1409 * Tell signal handler to ignore SIGPIPE
1410 *************************************************************/
1411 if(!data->set.no_signal)
1412 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1413#endif
1414
1415 Curl_initinfo(data); /* reset session-specific information "variables" */
1416 Curl_pgrsResetTransferSizes(data);
1417 Curl_pgrsStartNow(data);
1418
1419 /* In case the handle is re-used and an authentication method was picked
1420 in the session we need to make sure we only use the one(s) we now
1421 consider to be fine */
1422 data->state.authhost.picked &= data->state.authhost.want;
1423 data->state.authproxy.picked &= data->state.authproxy.want;
1424
1425#ifndef CURL_DISABLE_FTP
1426 data->state.wildcardmatch = data->set.wildcard_enabled;
1427 if(data->state.wildcardmatch) {
1428 struct WildcardData *wc = &data->wildcard;
1429 if(wc->state < CURLWC_INIT) {
1430 result = Curl_wildcard_init(wc); /* init wildcard structures */
1431 if(result)
1432 return CURLE_OUT_OF_MEMORY;
1433 }
1434 }
1435#endif
1436 Curl_http2_init_state(&data->state);
1437 result = Curl_hsts_loadcb(data, data->hsts);
1438 }
1439
1440 /*
1441 * Set user-agent. Used for HTTP, but since we can attempt to tunnel
1442 * basically anything through an HTTP proxy we can't limit this based on
1443 * protocol.
1444 */
1445 if(data->set.str[STRING_USERAGENT]) {
1446 Curl_safefree(data->state.aptr.uagent);
1447 data->state.aptr.uagent =
1448 aprintf("User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]);
1449 if(!data->state.aptr.uagent)
1450 return CURLE_OUT_OF_MEMORY;
1451 }
1452
1453 if(!result)
1454 result = Curl_setstropt(&data->state.aptr.user,
1455 data->set.str[STRING_USERNAME]);
1456 if(!result)
1457 result = Curl_setstropt(&data->state.aptr.passwd,
1458 data->set.str[STRING_PASSWORD]);
1459 if(!result)
1460 result = Curl_setstropt(&data->state.aptr.proxyuser,
1461 data->set.str[STRING_PROXYUSERNAME]);
1462 if(!result)
1463 result = Curl_setstropt(&data->state.aptr.proxypasswd,
1464 data->set.str[STRING_PROXYPASSWORD]);
1465
1466 data->req.headerbytecount = 0;
1467 Curl_headers_cleanup(data);
1468 return result;
1469}
1470
1471/*
1472 * Curl_posttransfer() is called immediately after a transfer ends
1473 */
1474CURLcode Curl_posttransfer(struct Curl_easy *data)
1475{
1476#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1477 /* restore the signal handler for SIGPIPE before we get back */
1478 if(!data->set.no_signal)
1479 signal(SIGPIPE, data->state.prev_signal);
1480#else
1481 (void)data; /* unused parameter */
1482#endif
1483
1484 return CURLE_OK;
1485}
1486
1487/*
1488 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1489 * as given by the remote server and set up the new URL to request.
1490 *
1491 * This function DOES NOT FREE the given url.
1492 */
1493CURLcode Curl_follow(struct Curl_easy *data,
1494 char *newurl, /* the Location: string */
1495 followtype type) /* see transfer.h */
1496{
1497#ifdef CURL_DISABLE_HTTP
1498 (void)data;
1499 (void)newurl;
1500 (void)type;
1501 /* Location: following will not happen when HTTP is disabled */
1502 return CURLE_TOO_MANY_REDIRECTS;
1503#else
1504
1505 /* Location: redirect */
1506 bool disallowport = FALSE;
1507 bool reachedmax = FALSE;
1508 CURLUcode uc;
1509
1510 DEBUGASSERT(type != FOLLOW_NONE);
1511
1512 if(type != FOLLOW_FAKE)
1513 data->state.requests++; /* count all real follows */
1514 if(type == FOLLOW_REDIR) {
1515 if((data->set.maxredirs != -1) &&
1516 (data->state.followlocation >= data->set.maxredirs)) {
1517 reachedmax = TRUE;
1518 type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1519 to URL */
1520 }
1521 else {
1522 data->state.followlocation++; /* count redirect-followings, including
1523 auth reloads */
1524
1525 if(data->set.http_auto_referer) {
1526 CURLU *u;
1527 char *referer = NULL;
1528
1529 /* We are asked to automatically set the previous URL as the referer
1530 when we get the next URL. We pick the ->url field, which may or may
1531 not be 100% correct */
1532
1533 if(data->state.referer_alloc) {
1534 Curl_safefree(data->state.referer);
1535 data->state.referer_alloc = FALSE;
1536 }
1537
1538 /* Make a copy of the URL without credentials and fragment */
1539 u = curl_url();
1540 if(!u)
1541 return CURLE_OUT_OF_MEMORY;
1542
1543 uc = curl_url_set(u, CURLUPART_URL, data->state.url, 0);
1544 if(!uc)
1545 uc = curl_url_set(u, CURLUPART_FRAGMENT, NULL, 0);
1546 if(!uc)
1547 uc = curl_url_set(u, CURLUPART_USER, NULL, 0);
1548 if(!uc)
1549 uc = curl_url_set(u, CURLUPART_PASSWORD, NULL, 0);
1550 if(!uc)
1551 uc = curl_url_get(u, CURLUPART_URL, &referer, 0);
1552
1553 curl_url_cleanup(u);
1554
1555 if(uc || !referer)
1556 return CURLE_OUT_OF_MEMORY;
1557
1558 data->state.referer = referer;
1559 data->state.referer_alloc = TRUE; /* yes, free this later */
1560 }
1561 }
1562 }
1563
1564 if((type != FOLLOW_RETRY) &&
1565 (data->req.httpcode != 401) && (data->req.httpcode != 407) &&
1566 Curl_is_absolute_url(newurl, NULL, 0, FALSE))
1567 /* If this is not redirect due to a 401 or 407 response and an absolute
1568 URL: don't allow a custom port number */
1569 disallowport = TRUE;
1570
1571 DEBUGASSERT(data->state.uh);
1572 uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
1573 (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
1574 ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) |
1575 CURLU_ALLOW_SPACE |
1576 (data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
1577 if(uc) {
1578 if(type != FOLLOW_FAKE) {
1579 failf(data, "The redirect target URL could not be parsed: %s",
1580 curl_url_strerror(uc));
1581 return Curl_uc_to_curlcode(uc);
1582 }
1583
1584 /* the URL could not be parsed for some reason, but since this is FAKE
1585 mode, just duplicate the field as-is */
1586 newurl = strdup(newurl);
1587 if(!newurl)
1588 return CURLE_OUT_OF_MEMORY;
1589 }
1590 else {
1591 uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
1592 if(uc)
1593 return Curl_uc_to_curlcode(uc);
1594
1595 /* Clear auth if this redirects to a different port number or protocol,
1596 unless permitted */
1597 if(!data->set.allow_auth_to_other_hosts && (type != FOLLOW_FAKE)) {
1598 char *portnum;
1599 int port;
1600 bool clear = FALSE;
1601
1602 if(data->set.use_port && data->state.allow_port)
1603 /* a custom port is used */
1604 port = (int)data->set.use_port;
1605 else {
1606 uc = curl_url_get(data->state.uh, CURLUPART_PORT, &portnum,
1607 CURLU_DEFAULT_PORT);
1608 if(uc) {
1609 free(newurl);
1610 return Curl_uc_to_curlcode(uc);
1611 }
1612 port = atoi(portnum);
1613 free(portnum);
1614 }
1615 if(port != data->info.conn_remote_port) {
1616 infof(data, "Clear auth, redirects to port from %u to %u",
1617 data->info.conn_remote_port, port);
1618 clear = TRUE;
1619 }
1620 else {
1621 char *scheme;
1622 const struct Curl_handler *p;
1623 uc = curl_url_get(data->state.uh, CURLUPART_SCHEME, &scheme, 0);
1624 if(uc) {
1625 free(newurl);
1626 return Curl_uc_to_curlcode(uc);
1627 }
1628
1629 p = Curl_builtin_scheme(scheme, CURL_ZERO_TERMINATED);
1630 if(p && (p->protocol != data->info.conn_protocol)) {
1631 infof(data, "Clear auth, redirects scheme from %s to %s",
1632 data->info.conn_scheme, scheme);
1633 clear = TRUE;
1634 }
1635 free(scheme);
1636 }
1637 if(clear) {
1638 Curl_safefree(data->state.aptr.user);
1639 Curl_safefree(data->state.aptr.passwd);
1640 }
1641 }
1642 }
1643
1644 if(type == FOLLOW_FAKE) {
1645 /* we're only figuring out the new url if we would've followed locations
1646 but now we're done so we can get out! */
1647 data->info.wouldredirect = newurl;
1648
1649 if(reachedmax) {
1650 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1651 return CURLE_TOO_MANY_REDIRECTS;
1652 }
1653 return CURLE_OK;
1654 }
1655
1656 if(disallowport)
1657 data->state.allow_port = FALSE;
1658
1659 if(data->state.url_alloc)
1660 Curl_safefree(data->state.url);
1661
1662 data->state.url = newurl;
1663 data->state.url_alloc = TRUE;
1664
1665 infof(data, "Issue another request to this URL: '%s'", data->state.url);
1666
1667 /*
1668 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1669 * differently based on exactly what return code there was.
1670 *
1671 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1672 * an HTTP (proxy-) authentication scheme other than Basic.
1673 */
1674 switch(data->info.httpcode) {
1675 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1676 Authorization: XXXX header in the HTTP request code snippet */
1677 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1678 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1679 /* 300 - Multiple Choices */
1680 /* 306 - Not used */
1681 /* 307 - Temporary Redirect */
1682 default: /* for all above (and the unknown ones) */
1683 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1684 * seem to be OK to POST to.
1685 */
1686 break;
1687 case 301: /* Moved Permanently */
1688 /* (quote from RFC7231, section 6.4.2)
1689 *
1690 * Note: For historical reasons, a user agent MAY change the request
1691 * method from POST to GET for the subsequent request. If this
1692 * behavior is undesired, the 307 (Temporary Redirect) status code
1693 * can be used instead.
1694 *
1695 * ----
1696 *
1697 * Many webservers expect this, so these servers often answers to a POST
1698 * request with an error page. To be sure that libcurl gets the page that
1699 * most user agents would get, libcurl has to force GET.
1700 *
1701 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1702 * can be overridden with CURLOPT_POSTREDIR.
1703 */
1704 if((data->state.httpreq == HTTPREQ_POST
1705 || data->state.httpreq == HTTPREQ_POST_FORM
1706 || data->state.httpreq == HTTPREQ_POST_MIME)
1707 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1708 infof(data, "Switch from POST to GET");
1709 data->state.httpreq = HTTPREQ_GET;
1710 }
1711 break;
1712 case 302: /* Found */
1713 /* (quote from RFC7231, section 6.4.3)
1714 *
1715 * Note: For historical reasons, a user agent MAY change the request
1716 * method from POST to GET for the subsequent request. If this
1717 * behavior is undesired, the 307 (Temporary Redirect) status code
1718 * can be used instead.
1719 *
1720 * ----
1721 *
1722 * Many webservers expect this, so these servers often answers to a POST
1723 * request with an error page. To be sure that libcurl gets the page that
1724 * most user agents would get, libcurl has to force GET.
1725 *
1726 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1727 * can be overridden with CURLOPT_POSTREDIR.
1728 */
1729 if((data->state.httpreq == HTTPREQ_POST
1730 || data->state.httpreq == HTTPREQ_POST_FORM
1731 || data->state.httpreq == HTTPREQ_POST_MIME)
1732 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1733 infof(data, "Switch from POST to GET");
1734 data->state.httpreq = HTTPREQ_GET;
1735 }
1736 break;
1737
1738 case 303: /* See Other */
1739 /* 'See Other' location is not the resource but a substitute for the
1740 * resource. In this case we switch the method to GET/HEAD, unless the
1741 * method is POST and the user specified to keep it as POST.
1742 * https://github.com/curl/curl/issues/5237#issuecomment-614641049
1743 */
1744 if(data->state.httpreq != HTTPREQ_GET &&
1745 ((data->state.httpreq != HTTPREQ_POST &&
1746 data->state.httpreq != HTTPREQ_POST_FORM &&
1747 data->state.httpreq != HTTPREQ_POST_MIME) ||
1748 !(data->set.keep_post & CURL_REDIR_POST_303))) {
1749 data->state.httpreq = HTTPREQ_GET;
1750 data->set.upload = false;
1751 infof(data, "Switch to %s",
1752 data->req.no_body?"HEAD":"GET");
1753 }
1754 break;
1755 case 304: /* Not Modified */
1756 /* 304 means we did a conditional request and it was "Not modified".
1757 * We shouldn't get any Location: header in this response!
1758 */
1759 break;
1760 case 305: /* Use Proxy */
1761 /* (quote from RFC2616, section 10.3.6):
1762 * "The requested resource MUST be accessed through the proxy given
1763 * by the Location field. The Location field gives the URI of the
1764 * proxy. The recipient is expected to repeat this single request
1765 * via the proxy. 305 responses MUST only be generated by origin
1766 * servers."
1767 */
1768 break;
1769 }
1770 Curl_pgrsTime(data, TIMER_REDIRECT);
1771 Curl_pgrsResetTransferSizes(data);
1772
1773 return CURLE_OK;
1774#endif /* CURL_DISABLE_HTTP */
1775}
1776
1777/* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1778
1779 NOTE: that the *url is malloc()ed. */
1780CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
1781{
1782 struct connectdata *conn = data->conn;
1783 bool retry = FALSE;
1784 *url = NULL;
1785
1786 /* if we're talking upload, we can't do the checks below, unless the protocol
1787 is HTTP as when uploading over HTTP we will still get a response */
1788 if(data->set.upload &&
1789 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1790 return CURLE_OK;
1791
1792 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1793 conn->bits.reuse &&
1794 (!data->req.no_body || (conn->handler->protocol & PROTO_FAMILY_HTTP))
1795#ifndef CURL_DISABLE_RTSP
1796 && (data->set.rtspreq != RTSPREQ_RECEIVE)
1797#endif
1798 )
1799 /* We got no data, we attempted to re-use a connection. For HTTP this
1800 can be a retry so we try again regardless if we expected a body.
1801 For other protocols we only try again only if we expected a body.
1802
1803 This might happen if the connection was left alive when we were
1804 done using it before, but that was closed when we wanted to read from
1805 it again. Bad luck. Retry the same request on a fresh connect! */
1806 retry = TRUE;
1807 else if(data->state.refused_stream &&
1808 (data->req.bytecount + data->req.headerbytecount == 0) ) {
1809 /* This was sent on a refused stream, safe to rerun. A refused stream
1810 error can typically only happen on HTTP/2 level if the stream is safe
1811 to issue again, but the nghttp2 API can deliver the message to other
1812 streams as well, which is why this adds the check the data counters
1813 too. */
1814 infof(data, "REFUSED_STREAM, retrying a fresh connect");
1815 data->state.refused_stream = FALSE; /* clear again */
1816 retry = TRUE;
1817 }
1818 if(retry) {
1819#define CONN_MAX_RETRIES 5
1820 if(data->state.retrycount++ >= CONN_MAX_RETRIES) {
1821 failf(data, "Connection died, tried %d times before giving up",
1822 CONN_MAX_RETRIES);
1823 data->state.retrycount = 0;
1824 return CURLE_SEND_ERROR;
1825 }
1826 infof(data, "Connection died, retrying a fresh connect (retry count: %d)",
1827 data->state.retrycount);
1828 *url = strdup(data->state.url);
1829 if(!*url)
1830 return CURLE_OUT_OF_MEMORY;
1831
1832 connclose(conn, "retry"); /* close this connection */
1833 conn->bits.retry = TRUE; /* mark this as a connection we're about
1834 to retry. Marking it this way should
1835 prevent i.e HTTP transfers to return
1836 error just because nothing has been
1837 transferred! */
1838
1839
1840 if((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1841 data->req.writebytecount) {
1842 data->state.rewindbeforesend = TRUE;
1843 infof(data, "state.rewindbeforesend = TRUE");
1844 }
1845 }
1846 return CURLE_OK;
1847}
1848
1849/*
1850 * Curl_setup_transfer() is called to setup some basic properties for the
1851 * upcoming transfer.
1852 */
1853void
1854Curl_setup_transfer(
1855 struct Curl_easy *data, /* transfer */
1856 int sockindex, /* socket index to read from or -1 */
1857 curl_off_t size, /* -1 if unknown at this point */
1858 bool getheader, /* TRUE if header parsing is wanted */
1859 int writesockindex /* socket index to write to, it may very well be
1860 the same we read from. -1 disables */
1861 )
1862{
1863 struct SingleRequest *k = &data->req;
1864 struct connectdata *conn = data->conn;
1865 struct HTTP *http = data->req.p.http;
1866 bool httpsending;
1867
1868 DEBUGASSERT(conn != NULL);
1869 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1870
1871 httpsending = ((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1872 (http->sending == HTTPSEND_REQUEST));
1873
1874 if(conn->bits.multiplex || conn->httpversion == 20 || httpsending) {
1875 /* when multiplexing, the read/write sockets need to be the same! */
1876 conn->sockfd = sockindex == -1 ?
1877 ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1878 conn->sock[sockindex];
1879 conn->writesockfd = conn->sockfd;
1880 if(httpsending)
1881 /* special and very HTTP-specific */
1882 writesockindex = FIRSTSOCKET;
1883 }
1884 else {
1885 conn->sockfd = sockindex == -1 ?
1886 CURL_SOCKET_BAD : conn->sock[sockindex];
1887 conn->writesockfd = writesockindex == -1 ?
1888 CURL_SOCKET_BAD:conn->sock[writesockindex];
1889 }
1890 k->getheader = getheader;
1891
1892 k->size = size;
1893
1894 /* The code sequence below is placed in this function just because all
1895 necessary input is not always known in do_complete() as this function may
1896 be called after that */
1897
1898 if(!k->getheader) {
1899 k->header = FALSE;
1900 if(size > 0)
1901 Curl_pgrsSetDownloadSize(data, size);
1902 }
1903 /* we want header and/or body, if neither then don't do this! */
1904 if(k->getheader || !data->req.no_body) {
1905
1906 if(sockindex != -1)
1907 k->keepon |= KEEP_RECV;
1908
1909 if(writesockindex != -1) {
1910 /* HTTP 1.1 magic:
1911
1912 Even if we require a 100-return code before uploading data, we might
1913 need to write data before that since the REQUEST may not have been
1914 finished sent off just yet.
1915
1916 Thus, we must check if the request has been sent before we set the
1917 state info where we wait for the 100-return code
1918 */
1919 if((data->state.expect100header) &&
1920 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1921 (http->sending == HTTPSEND_BODY)) {
1922 /* wait with write until we either got 100-continue or a timeout */
1923 k->exp100 = EXP100_AWAITING_CONTINUE;
1924 k->start100 = Curl_now();
1925
1926 /* Set a timeout for the multi interface. Add the inaccuracy margin so
1927 that we don't fire slightly too early and get denied to run. */
1928 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1929 }
1930 else {
1931 if(data->state.expect100header)
1932 /* when we've sent off the rest of the headers, we must await a
1933 100-continue but first finish sending the request */
1934 k->exp100 = EXP100_SENDING_REQUEST;
1935
1936 /* enable the write bit when we're not waiting for continue */
1937 k->keepon |= KEEP_SEND;
1938 }
1939 } /* if(writesockindex != -1) */
1940 } /* if(k->getheader || !data->req.no_body) */
1941
1942}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette