Annotation of embedaddon/curl/lib/http.c, revision 1.1.1.1
1.1 misho 1: /***************************************************************************
2: * _ _ ____ _
3: * Project ___| | | | _ \| |
4: * / __| | | | |_) | |
5: * | (__| |_| | _ <| |___
6: * \___|\___/|_| \_\_____|
7: *
8: * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
9: *
10: * This software is licensed as described in the file COPYING, which
11: * you should have received as part of this distribution. The terms
12: * are also available at https://curl.haxx.se/docs/copyright.html.
13: *
14: * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15: * copies of the Software, and permit persons to whom the Software is
16: * furnished to do so, under the terms of the COPYING file.
17: *
18: * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19: * KIND, either express or implied.
20: *
21: ***************************************************************************/
22:
23: #include "curl_setup.h"
24:
25: #ifndef CURL_DISABLE_HTTP
26:
27: #ifdef HAVE_NETINET_IN_H
28: #include <netinet/in.h>
29: #endif
30:
31: #ifdef HAVE_NETDB_H
32: #include <netdb.h>
33: #endif
34: #ifdef HAVE_ARPA_INET_H
35: #include <arpa/inet.h>
36: #endif
37: #ifdef HAVE_NET_IF_H
38: #include <net/if.h>
39: #endif
40: #ifdef HAVE_SYS_IOCTL_H
41: #include <sys/ioctl.h>
42: #endif
43:
44: #ifdef HAVE_SYS_PARAM_H
45: #include <sys/param.h>
46: #endif
47:
48: #include "urldata.h"
49: #include <curl/curl.h>
50: #include "transfer.h"
51: #include "sendf.h"
52: #include "formdata.h"
53: #include "mime.h"
54: #include "progress.h"
55: #include "curl_base64.h"
56: #include "cookie.h"
57: #include "vauth/vauth.h"
58: #include "vtls/vtls.h"
59: #include "http_digest.h"
60: #include "http_ntlm.h"
61: #include "curl_ntlm_wb.h"
62: #include "http_negotiate.h"
63: #include "url.h"
64: #include "share.h"
65: #include "hostip.h"
66: #include "http.h"
67: #include "select.h"
68: #include "parsedate.h" /* for the week day and month names */
69: #include "strtoofft.h"
70: #include "multiif.h"
71: #include "strcase.h"
72: #include "content_encoding.h"
73: #include "http_proxy.h"
74: #include "warnless.h"
75: #include "non-ascii.h"
76: #include "http2.h"
77: #include "connect.h"
78: #include "strdup.h"
79: #include "altsvc.h"
80:
81: /* The last 3 #include files should be in this order */
82: #include "curl_printf.h"
83: #include "curl_memory.h"
84: #include "memdebug.h"
85:
86: /*
87: * Forward declarations.
88: */
89:
90: static int http_getsock_do(struct connectdata *conn,
91: curl_socket_t *socks);
92: static int http_should_fail(struct connectdata *conn);
93:
94: #ifndef CURL_DISABLE_PROXY
95: static CURLcode add_haproxy_protocol_header(struct connectdata *conn);
96: #endif
97:
98: #ifdef USE_SSL
99: static CURLcode https_connecting(struct connectdata *conn, bool *done);
100: static int https_getsock(struct connectdata *conn,
101: curl_socket_t *socks);
102: #else
103: #define https_connecting(x,y) CURLE_COULDNT_CONNECT
104: #endif
105: static CURLcode http_setup_conn(struct connectdata *conn);
106:
107: /*
108: * HTTP handler interface.
109: */
110: const struct Curl_handler Curl_handler_http = {
111: "HTTP", /* scheme */
112: http_setup_conn, /* setup_connection */
113: Curl_http, /* do_it */
114: Curl_http_done, /* done */
115: ZERO_NULL, /* do_more */
116: Curl_http_connect, /* connect_it */
117: ZERO_NULL, /* connecting */
118: ZERO_NULL, /* doing */
119: ZERO_NULL, /* proto_getsock */
120: http_getsock_do, /* doing_getsock */
121: ZERO_NULL, /* domore_getsock */
122: ZERO_NULL, /* perform_getsock */
123: ZERO_NULL, /* disconnect */
124: ZERO_NULL, /* readwrite */
125: ZERO_NULL, /* connection_check */
126: PORT_HTTP, /* defport */
127: CURLPROTO_HTTP, /* protocol */
128: PROTOPT_CREDSPERREQUEST /* flags */
129: };
130:
131: #ifdef USE_SSL
132: /*
133: * HTTPS handler interface.
134: */
135: const struct Curl_handler Curl_handler_https = {
136: "HTTPS", /* scheme */
137: http_setup_conn, /* setup_connection */
138: Curl_http, /* do_it */
139: Curl_http_done, /* done */
140: ZERO_NULL, /* do_more */
141: Curl_http_connect, /* connect_it */
142: https_connecting, /* connecting */
143: ZERO_NULL, /* doing */
144: https_getsock, /* proto_getsock */
145: http_getsock_do, /* doing_getsock */
146: ZERO_NULL, /* domore_getsock */
147: ZERO_NULL, /* perform_getsock */
148: ZERO_NULL, /* disconnect */
149: ZERO_NULL, /* readwrite */
150: ZERO_NULL, /* connection_check */
151: PORT_HTTPS, /* defport */
152: CURLPROTO_HTTPS, /* protocol */
153: PROTOPT_SSL | PROTOPT_CREDSPERREQUEST | PROTOPT_ALPN_NPN /* flags */
154: };
155: #endif
156:
157: static CURLcode http_setup_conn(struct connectdata *conn)
158: {
159: /* allocate the HTTP-specific struct for the Curl_easy, only to survive
160: during this request */
161: struct HTTP *http;
162: struct Curl_easy *data = conn->data;
163: DEBUGASSERT(data->req.protop == NULL);
164:
165: http = calloc(1, sizeof(struct HTTP));
166: if(!http)
167: return CURLE_OUT_OF_MEMORY;
168:
169: Curl_mime_initpart(&http->form, conn->data);
170: data->req.protop = http;
171:
172: if(data->set.httpversion == CURL_HTTP_VERSION_3) {
173: if(conn->handler->flags & PROTOPT_SSL)
174: /* Only go HTTP/3 directly on HTTPS URLs. It needs a UDP socket and does
175: the QUIC dance. */
176: conn->transport = TRNSPRT_QUIC;
177: else {
178: failf(data, "HTTP/3 requested for non-HTTPS URL");
179: return CURLE_URL_MALFORMAT;
180: }
181: }
182: else {
183: if(!CONN_INUSE(conn))
184: /* if not already multi-using, setup connection details */
185: Curl_http2_setup_conn(conn);
186: Curl_http2_setup_req(data);
187: }
188: return CURLE_OK;
189: }
190:
191: #ifndef CURL_DISABLE_PROXY
192: /*
193: * checkProxyHeaders() checks the linked list of custom proxy headers
194: * if proxy headers are not available, then it will lookup into http header
195: * link list
196: *
197: * It takes a connectdata struct as input instead of the Curl_easy simply to
198: * know if this is a proxy request or not, as it then might check a different
199: * header list. Provide the header prefix without colon!.
200: */
201: char *Curl_checkProxyheaders(const struct connectdata *conn,
202: const char *thisheader)
203: {
204: struct curl_slist *head;
205: size_t thislen = strlen(thisheader);
206: struct Curl_easy *data = conn->data;
207:
208: for(head = (conn->bits.proxy && data->set.sep_headers) ?
209: data->set.proxyheaders : data->set.headers;
210: head; head = head->next) {
211: if(strncasecompare(head->data, thisheader, thislen) &&
212: Curl_headersep(head->data[thislen]))
213: return head->data;
214: }
215:
216: return NULL;
217: }
218: #else
219: /* disabled */
220: #define Curl_checkProxyheaders(x,y) NULL
221: #endif
222:
223: /*
224: * Strip off leading and trailing whitespace from the value in the
225: * given HTTP header line and return a strdupped copy. Returns NULL in
226: * case of allocation failure. Returns an empty string if the header value
227: * consists entirely of whitespace.
228: */
229: char *Curl_copy_header_value(const char *header)
230: {
231: const char *start;
232: const char *end;
233: char *value;
234: size_t len;
235:
236: /* Find the end of the header name */
237: while(*header && (*header != ':'))
238: ++header;
239:
240: if(*header)
241: /* Skip over colon */
242: ++header;
243:
244: /* Find the first non-space letter */
245: start = header;
246: while(*start && ISSPACE(*start))
247: start++;
248:
249: /* data is in the host encoding so
250: use '\r' and '\n' instead of 0x0d and 0x0a */
251: end = strchr(start, '\r');
252: if(!end)
253: end = strchr(start, '\n');
254: if(!end)
255: end = strchr(start, '\0');
256: if(!end)
257: return NULL;
258:
259: /* skip all trailing space letters */
260: while((end > start) && ISSPACE(*end))
261: end--;
262:
263: /* get length of the type */
264: len = end - start + 1;
265:
266: value = malloc(len + 1);
267: if(!value)
268: return NULL;
269:
270: memcpy(value, start, len);
271: value[len] = 0; /* zero terminate */
272:
273: return value;
274: }
275:
276: #ifndef CURL_DISABLE_HTTP_AUTH
277: /*
278: * http_output_basic() sets up an Authorization: header (or the proxy version)
279: * for HTTP Basic authentication.
280: *
281: * Returns CURLcode.
282: */
283: static CURLcode http_output_basic(struct connectdata *conn, bool proxy)
284: {
285: size_t size = 0;
286: char *authorization = NULL;
287: struct Curl_easy *data = conn->data;
288: char **userp;
289: const char *user;
290: const char *pwd;
291: CURLcode result;
292: char *out;
293:
294: if(proxy) {
295: userp = &conn->allocptr.proxyuserpwd;
296: user = conn->http_proxy.user;
297: pwd = conn->http_proxy.passwd;
298: }
299: else {
300: userp = &conn->allocptr.userpwd;
301: user = conn->user;
302: pwd = conn->passwd;
303: }
304:
305: out = aprintf("%s:%s", user, pwd);
306: if(!out)
307: return CURLE_OUT_OF_MEMORY;
308:
309: result = Curl_base64_encode(data, out, strlen(out), &authorization, &size);
310: if(result)
311: goto fail;
312:
313: if(!authorization) {
314: result = CURLE_REMOTE_ACCESS_DENIED;
315: goto fail;
316: }
317:
318: free(*userp);
319: *userp = aprintf("%sAuthorization: Basic %s\r\n",
320: proxy ? "Proxy-" : "",
321: authorization);
322: free(authorization);
323: if(!*userp) {
324: result = CURLE_OUT_OF_MEMORY;
325: goto fail;
326: }
327:
328: fail:
329: free(out);
330: return result;
331: }
332:
333: /*
334: * http_output_bearer() sets up an Authorization: header
335: * for HTTP Bearer authentication.
336: *
337: * Returns CURLcode.
338: */
339: static CURLcode http_output_bearer(struct connectdata *conn)
340: {
341: char **userp;
342: CURLcode result = CURLE_OK;
343:
344: userp = &conn->allocptr.userpwd;
345: free(*userp);
346: *userp = aprintf("Authorization: Bearer %s\r\n",
347: conn->data->set.str[STRING_BEARER]);
348:
349: if(!*userp) {
350: result = CURLE_OUT_OF_MEMORY;
351: goto fail;
352: }
353:
354: fail:
355: return result;
356: }
357:
358: #endif
359:
360: /* pickoneauth() selects the most favourable authentication method from the
361: * ones available and the ones we want.
362: *
363: * return TRUE if one was picked
364: */
365: static bool pickoneauth(struct auth *pick, unsigned long mask)
366: {
367: bool picked;
368: /* only deal with authentication we want */
369: unsigned long avail = pick->avail & pick->want & mask;
370: picked = TRUE;
371:
372: /* The order of these checks is highly relevant, as this will be the order
373: of preference in case of the existence of multiple accepted types. */
374: if(avail & CURLAUTH_NEGOTIATE)
375: pick->picked = CURLAUTH_NEGOTIATE;
376: else if(avail & CURLAUTH_BEARER)
377: pick->picked = CURLAUTH_BEARER;
378: else if(avail & CURLAUTH_DIGEST)
379: pick->picked = CURLAUTH_DIGEST;
380: else if(avail & CURLAUTH_NTLM)
381: pick->picked = CURLAUTH_NTLM;
382: else if(avail & CURLAUTH_NTLM_WB)
383: pick->picked = CURLAUTH_NTLM_WB;
384: else if(avail & CURLAUTH_BASIC)
385: pick->picked = CURLAUTH_BASIC;
386: else {
387: pick->picked = CURLAUTH_PICKNONE; /* we select to use nothing */
388: picked = FALSE;
389: }
390: pick->avail = CURLAUTH_NONE; /* clear it here */
391:
392: return picked;
393: }
394:
395: /*
396: * http_perhapsrewind()
397: *
398: * If we are doing POST or PUT {
399: * If we have more data to send {
400: * If we are doing NTLM {
401: * Keep sending since we must not disconnect
402: * }
403: * else {
404: * If there is more than just a little data left to send, close
405: * the current connection by force.
406: * }
407: * }
408: * If we have sent any data {
409: * If we don't have track of all the data {
410: * call app to tell it to rewind
411: * }
412: * else {
413: * rewind internally so that the operation can restart fine
414: * }
415: * }
416: * }
417: */
418: static CURLcode http_perhapsrewind(struct connectdata *conn)
419: {
420: struct Curl_easy *data = conn->data;
421: struct HTTP *http = data->req.protop;
422: curl_off_t bytessent;
423: curl_off_t expectsend = -1; /* default is unknown */
424:
425: if(!http)
426: /* If this is still NULL, we have not reach very far and we can safely
427: skip this rewinding stuff */
428: return CURLE_OK;
429:
430: switch(data->set.httpreq) {
431: case HTTPREQ_GET:
432: case HTTPREQ_HEAD:
433: return CURLE_OK;
434: default:
435: break;
436: }
437:
438: bytessent = data->req.writebytecount;
439:
440: if(conn->bits.authneg) {
441: /* This is a state where we are known to be negotiating and we don't send
442: any data then. */
443: expectsend = 0;
444: }
445: else if(!conn->bits.protoconnstart) {
446: /* HTTP CONNECT in progress: there is no body */
447: expectsend = 0;
448: }
449: else {
450: /* figure out how much data we are expected to send */
451: switch(data->set.httpreq) {
452: case HTTPREQ_POST:
453: case HTTPREQ_PUT:
454: if(data->state.infilesize != -1)
455: expectsend = data->state.infilesize;
456: break;
457: case HTTPREQ_POST_FORM:
458: case HTTPREQ_POST_MIME:
459: expectsend = http->postsize;
460: break;
461: default:
462: break;
463: }
464: }
465:
466: conn->bits.rewindaftersend = FALSE; /* default */
467:
468: if((expectsend == -1) || (expectsend > bytessent)) {
469: #if defined(USE_NTLM)
470: /* There is still data left to send */
471: if((data->state.authproxy.picked == CURLAUTH_NTLM) ||
472: (data->state.authhost.picked == CURLAUTH_NTLM) ||
473: (data->state.authproxy.picked == CURLAUTH_NTLM_WB) ||
474: (data->state.authhost.picked == CURLAUTH_NTLM_WB)) {
475: if(((expectsend - bytessent) < 2000) ||
476: (conn->http_ntlm_state != NTLMSTATE_NONE) ||
477: (conn->proxy_ntlm_state != NTLMSTATE_NONE)) {
478: /* The NTLM-negotiation has started *OR* there is just a little (<2K)
479: data left to send, keep on sending. */
480:
481: /* rewind data when completely done sending! */
482: if(!conn->bits.authneg && (conn->writesockfd != CURL_SOCKET_BAD)) {
483: conn->bits.rewindaftersend = TRUE;
484: infof(data, "Rewind stream after send\n");
485: }
486:
487: return CURLE_OK;
488: }
489:
490: if(conn->bits.close)
491: /* this is already marked to get closed */
492: return CURLE_OK;
493:
494: infof(data, "NTLM send, close instead of sending %"
495: CURL_FORMAT_CURL_OFF_T " bytes\n",
496: (curl_off_t)(expectsend - bytessent));
497: }
498: #endif
499: #if defined(USE_SPNEGO)
500: /* There is still data left to send */
501: if((data->state.authproxy.picked == CURLAUTH_NEGOTIATE) ||
502: (data->state.authhost.picked == CURLAUTH_NEGOTIATE)) {
503: if(((expectsend - bytessent) < 2000) ||
504: (conn->http_negotiate_state != GSS_AUTHNONE) ||
505: (conn->proxy_negotiate_state != GSS_AUTHNONE)) {
506: /* The NEGOTIATE-negotiation has started *OR*
507: there is just a little (<2K) data left to send, keep on sending. */
508:
509: /* rewind data when completely done sending! */
510: if(!conn->bits.authneg && (conn->writesockfd != CURL_SOCKET_BAD)) {
511: conn->bits.rewindaftersend = TRUE;
512: infof(data, "Rewind stream after send\n");
513: }
514:
515: return CURLE_OK;
516: }
517:
518: if(conn->bits.close)
519: /* this is already marked to get closed */
520: return CURLE_OK;
521:
522: infof(data, "NEGOTIATE send, close instead of sending %"
523: CURL_FORMAT_CURL_OFF_T " bytes\n",
524: (curl_off_t)(expectsend - bytessent));
525: }
526: #endif
527:
528: /* This is not NEGOTIATE/NTLM or many bytes left to send: close */
529: streamclose(conn, "Mid-auth HTTP and much data left to send");
530: data->req.size = 0; /* don't download any more than 0 bytes */
531:
532: /* There still is data left to send, but this connection is marked for
533: closure so we can safely do the rewind right now */
534: }
535:
536: if(bytessent)
537: /* we rewind now at once since if we already sent something */
538: return Curl_readrewind(conn);
539:
540: return CURLE_OK;
541: }
542:
543: /*
544: * Curl_http_auth_act() gets called when all HTTP headers have been received
545: * and it checks what authentication methods that are available and decides
546: * which one (if any) to use. It will set 'newurl' if an auth method was
547: * picked.
548: */
549:
550: CURLcode Curl_http_auth_act(struct connectdata *conn)
551: {
552: struct Curl_easy *data = conn->data;
553: bool pickhost = FALSE;
554: bool pickproxy = FALSE;
555: CURLcode result = CURLE_OK;
556: unsigned long authmask = ~0ul;
557:
558: if(!data->set.str[STRING_BEARER])
559: authmask &= (unsigned long)~CURLAUTH_BEARER;
560:
561: if(100 <= data->req.httpcode && 199 >= data->req.httpcode)
562: /* this is a transient response code, ignore */
563: return CURLE_OK;
564:
565: if(data->state.authproblem)
566: return data->set.http_fail_on_error?CURLE_HTTP_RETURNED_ERROR:CURLE_OK;
567:
568: if((conn->bits.user_passwd || data->set.str[STRING_BEARER]) &&
569: ((data->req.httpcode == 401) ||
570: (conn->bits.authneg && data->req.httpcode < 300))) {
571: pickhost = pickoneauth(&data->state.authhost, authmask);
572: if(!pickhost)
573: data->state.authproblem = TRUE;
574: if(data->state.authhost.picked == CURLAUTH_NTLM &&
575: conn->httpversion > 11) {
576: infof(data, "Forcing HTTP/1.1 for NTLM");
577: connclose(conn, "Force HTTP/1.1 connection");
578: conn->data->set.httpversion = CURL_HTTP_VERSION_1_1;
579: }
580: }
581: if(conn->bits.proxy_user_passwd &&
582: ((data->req.httpcode == 407) ||
583: (conn->bits.authneg && data->req.httpcode < 300))) {
584: pickproxy = pickoneauth(&data->state.authproxy,
585: authmask & ~CURLAUTH_BEARER);
586: if(!pickproxy)
587: data->state.authproblem = TRUE;
588: }
589:
590: if(pickhost || pickproxy) {
591: if((data->set.httpreq != HTTPREQ_GET) &&
592: (data->set.httpreq != HTTPREQ_HEAD) &&
593: !conn->bits.rewindaftersend) {
594: result = http_perhapsrewind(conn);
595: if(result)
596: return result;
597: }
598: /* In case this is GSS auth, the newurl field is already allocated so
599: we must make sure to free it before allocating a new one. As figured
600: out in bug #2284386 */
601: Curl_safefree(data->req.newurl);
602: data->req.newurl = strdup(data->change.url); /* clone URL */
603: if(!data->req.newurl)
604: return CURLE_OUT_OF_MEMORY;
605: }
606: else if((data->req.httpcode < 300) &&
607: (!data->state.authhost.done) &&
608: conn->bits.authneg) {
609: /* no (known) authentication available,
610: authentication is not "done" yet and
611: no authentication seems to be required and
612: we didn't try HEAD or GET */
613: if((data->set.httpreq != HTTPREQ_GET) &&
614: (data->set.httpreq != HTTPREQ_HEAD)) {
615: data->req.newurl = strdup(data->change.url); /* clone URL */
616: if(!data->req.newurl)
617: return CURLE_OUT_OF_MEMORY;
618: data->state.authhost.done = TRUE;
619: }
620: }
621: if(http_should_fail(conn)) {
622: failf(data, "The requested URL returned error: %d",
623: data->req.httpcode);
624: result = CURLE_HTTP_RETURNED_ERROR;
625: }
626:
627: return result;
628: }
629:
630: #ifndef CURL_DISABLE_HTTP_AUTH
631: /*
632: * Output the correct authentication header depending on the auth type
633: * and whether or not it is to a proxy.
634: */
635: static CURLcode
636: output_auth_headers(struct connectdata *conn,
637: struct auth *authstatus,
638: const char *request,
639: const char *path,
640: bool proxy)
641: {
642: const char *auth = NULL;
643: CURLcode result = CURLE_OK;
644: struct Curl_easy *data = conn->data;
645:
646: #ifdef CURL_DISABLE_CRYPTO_AUTH
647: (void)request;
648: (void)path;
649: #endif
650:
651: #ifdef USE_SPNEGO
652: if(authstatus->picked == CURLAUTH_NEGOTIATE) {
653: auth = "Negotiate";
654: result = Curl_output_negotiate(conn, proxy);
655: if(result)
656: return result;
657: }
658: else
659: #endif
660: #ifdef USE_NTLM
661: if(authstatus->picked == CURLAUTH_NTLM) {
662: auth = "NTLM";
663: result = Curl_output_ntlm(conn, proxy);
664: if(result)
665: return result;
666: }
667: else
668: #endif
669: #if defined(USE_NTLM) && defined(NTLM_WB_ENABLED)
670: if(authstatus->picked == CURLAUTH_NTLM_WB) {
671: auth = "NTLM_WB";
672: result = Curl_output_ntlm_wb(conn, proxy);
673: if(result)
674: return result;
675: }
676: else
677: #endif
678: #ifndef CURL_DISABLE_CRYPTO_AUTH
679: if(authstatus->picked == CURLAUTH_DIGEST) {
680: auth = "Digest";
681: result = Curl_output_digest(conn,
682: proxy,
683: (const unsigned char *)request,
684: (const unsigned char *)path);
685: if(result)
686: return result;
687: }
688: else
689: #endif
690: if(authstatus->picked == CURLAUTH_BASIC) {
691: /* Basic */
692: if((proxy && conn->bits.proxy_user_passwd &&
693: !Curl_checkProxyheaders(conn, "Proxy-authorization")) ||
694: (!proxy && conn->bits.user_passwd &&
695: !Curl_checkheaders(conn, "Authorization"))) {
696: auth = "Basic";
697: result = http_output_basic(conn, proxy);
698: if(result)
699: return result;
700: }
701:
702: /* NOTE: this function should set 'done' TRUE, as the other auth
703: functions work that way */
704: authstatus->done = TRUE;
705: }
706: if(authstatus->picked == CURLAUTH_BEARER) {
707: /* Bearer */
708: if((!proxy && data->set.str[STRING_BEARER] &&
709: !Curl_checkheaders(conn, "Authorization:"))) {
710: auth = "Bearer";
711: result = http_output_bearer(conn);
712: if(result)
713: return result;
714: }
715:
716: /* NOTE: this function should set 'done' TRUE, as the other auth
717: functions work that way */
718: authstatus->done = TRUE;
719: }
720:
721: if(auth) {
722: infof(data, "%s auth using %s with user '%s'\n",
723: proxy ? "Proxy" : "Server", auth,
724: proxy ? (conn->http_proxy.user ? conn->http_proxy.user : "") :
725: (conn->user ? conn->user : ""));
726: authstatus->multipass = (!authstatus->done) ? TRUE : FALSE;
727: }
728: else
729: authstatus->multipass = FALSE;
730:
731: return CURLE_OK;
732: }
733:
734: /**
735: * Curl_http_output_auth() setups the authentication headers for the
736: * host/proxy and the correct authentication
737: * method. conn->data->state.authdone is set to TRUE when authentication is
738: * done.
739: *
740: * @param conn all information about the current connection
741: * @param request pointer to the request keyword
742: * @param path pointer to the requested path; should include query part
743: * @param proxytunnel boolean if this is the request setting up a "proxy
744: * tunnel"
745: *
746: * @returns CURLcode
747: */
748: CURLcode
749: Curl_http_output_auth(struct connectdata *conn,
750: const char *request,
751: const char *path,
752: bool proxytunnel) /* TRUE if this is the request setting
753: up the proxy tunnel */
754: {
755: CURLcode result = CURLE_OK;
756: struct Curl_easy *data = conn->data;
757: struct auth *authhost;
758: struct auth *authproxy;
759:
760: DEBUGASSERT(data);
761:
762: authhost = &data->state.authhost;
763: authproxy = &data->state.authproxy;
764:
765: if((conn->bits.httpproxy && conn->bits.proxy_user_passwd) ||
766: conn->bits.user_passwd || data->set.str[STRING_BEARER])
767: /* continue please */;
768: else {
769: authhost->done = TRUE;
770: authproxy->done = TRUE;
771: return CURLE_OK; /* no authentication with no user or password */
772: }
773:
774: if(authhost->want && !authhost->picked)
775: /* The app has selected one or more methods, but none has been picked
776: so far by a server round-trip. Then we set the picked one to the
777: want one, and if this is one single bit it'll be used instantly. */
778: authhost->picked = authhost->want;
779:
780: if(authproxy->want && !authproxy->picked)
781: /* The app has selected one or more methods, but none has been picked so
782: far by a proxy round-trip. Then we set the picked one to the want one,
783: and if this is one single bit it'll be used instantly. */
784: authproxy->picked = authproxy->want;
785:
786: #ifndef CURL_DISABLE_PROXY
787: /* Send proxy authentication header if needed */
788: if(conn->bits.httpproxy &&
789: (conn->bits.tunnel_proxy == (bit)proxytunnel)) {
790: result = output_auth_headers(conn, authproxy, request, path, TRUE);
791: if(result)
792: return result;
793: }
794: else
795: #else
796: (void)proxytunnel;
797: #endif /* CURL_DISABLE_PROXY */
798: /* we have no proxy so let's pretend we're done authenticating
799: with it */
800: authproxy->done = TRUE;
801:
802: /* To prevent the user+password to get sent to other than the original
803: host due to a location-follow, we do some weirdo checks here */
804: if(!data->state.this_is_a_follow ||
805: conn->bits.netrc ||
806: !data->state.first_host ||
807: data->set.allow_auth_to_other_hosts ||
808: strcasecompare(data->state.first_host, conn->host.name)) {
809: result = output_auth_headers(conn, authhost, request, path, FALSE);
810: }
811: else
812: authhost->done = TRUE;
813:
814: return result;
815: }
816:
817: #else
818: /* when disabled */
819: CURLcode
820: Curl_http_output_auth(struct connectdata *conn,
821: const char *request,
822: const char *path,
823: bool proxytunnel)
824: {
825: (void)conn;
826: (void)request;
827: (void)path;
828: (void)proxytunnel;
829: return CURLE_OK;
830: }
831: #endif
832:
833: /*
834: * Curl_http_input_auth() deals with Proxy-Authenticate: and WWW-Authenticate:
835: * headers. They are dealt with both in the transfer.c main loop and in the
836: * proxy CONNECT loop.
837: */
838:
839: CURLcode Curl_http_input_auth(struct connectdata *conn, bool proxy,
840: const char *auth) /* the first non-space */
841: {
842: /*
843: * This resource requires authentication
844: */
845: struct Curl_easy *data = conn->data;
846:
847: #ifdef USE_SPNEGO
848: curlnegotiate *negstate = proxy ? &conn->proxy_negotiate_state :
849: &conn->http_negotiate_state;
850: #endif
851: unsigned long *availp;
852: struct auth *authp;
853:
854: if(proxy) {
855: availp = &data->info.proxyauthavail;
856: authp = &data->state.authproxy;
857: }
858: else {
859: availp = &data->info.httpauthavail;
860: authp = &data->state.authhost;
861: }
862:
863: /*
864: * Here we check if we want the specific single authentication (using ==) and
865: * if we do, we initiate usage of it.
866: *
867: * If the provided authentication is wanted as one out of several accepted
868: * types (using &), we OR this authentication type to the authavail
869: * variable.
870: *
871: * Note:
872: *
873: * ->picked is first set to the 'want' value (one or more bits) before the
874: * request is sent, and then it is again set _after_ all response 401/407
875: * headers have been received but then only to a single preferred method
876: * (bit).
877: */
878:
879: while(*auth) {
880: #ifdef USE_SPNEGO
881: if(checkprefix("Negotiate", auth)) {
882: if((authp->avail & CURLAUTH_NEGOTIATE) ||
883: Curl_auth_is_spnego_supported()) {
884: *availp |= CURLAUTH_NEGOTIATE;
885: authp->avail |= CURLAUTH_NEGOTIATE;
886:
887: if(authp->picked == CURLAUTH_NEGOTIATE) {
888: CURLcode result = Curl_input_negotiate(conn, proxy, auth);
889: if(!result) {
890: DEBUGASSERT(!data->req.newurl);
891: data->req.newurl = strdup(data->change.url);
892: if(!data->req.newurl)
893: return CURLE_OUT_OF_MEMORY;
894: data->state.authproblem = FALSE;
895: /* we received a GSS auth token and we dealt with it fine */
896: *negstate = GSS_AUTHRECV;
897: }
898: else
899: data->state.authproblem = TRUE;
900: }
901: }
902: }
903: else
904: #endif
905: #ifdef USE_NTLM
906: /* NTLM support requires the SSL crypto libs */
907: if(checkprefix("NTLM", auth)) {
908: if((authp->avail & CURLAUTH_NTLM) ||
909: (authp->avail & CURLAUTH_NTLM_WB) ||
910: Curl_auth_is_ntlm_supported()) {
911: *availp |= CURLAUTH_NTLM;
912: authp->avail |= CURLAUTH_NTLM;
913:
914: if(authp->picked == CURLAUTH_NTLM ||
915: authp->picked == CURLAUTH_NTLM_WB) {
916: /* NTLM authentication is picked and activated */
917: CURLcode result = Curl_input_ntlm(conn, proxy, auth);
918: if(!result) {
919: data->state.authproblem = FALSE;
920: #ifdef NTLM_WB_ENABLED
921: if(authp->picked == CURLAUTH_NTLM_WB) {
922: *availp &= ~CURLAUTH_NTLM;
923: authp->avail &= ~CURLAUTH_NTLM;
924: *availp |= CURLAUTH_NTLM_WB;
925: authp->avail |= CURLAUTH_NTLM_WB;
926:
927: result = Curl_input_ntlm_wb(conn, proxy, auth);
928: if(result) {
929: infof(data, "Authentication problem. Ignoring this.\n");
930: data->state.authproblem = TRUE;
931: }
932: }
933: #endif
934: }
935: else {
936: infof(data, "Authentication problem. Ignoring this.\n");
937: data->state.authproblem = TRUE;
938: }
939: }
940: }
941: }
942: else
943: #endif
944: #ifndef CURL_DISABLE_CRYPTO_AUTH
945: if(checkprefix("Digest", auth)) {
946: if((authp->avail & CURLAUTH_DIGEST) != 0)
947: infof(data, "Ignoring duplicate digest auth header.\n");
948: else if(Curl_auth_is_digest_supported()) {
949: CURLcode result;
950:
951: *availp |= CURLAUTH_DIGEST;
952: authp->avail |= CURLAUTH_DIGEST;
953:
954: /* We call this function on input Digest headers even if Digest
955: * authentication isn't activated yet, as we need to store the
956: * incoming data from this header in case we are going to use
957: * Digest */
958: result = Curl_input_digest(conn, proxy, auth);
959: if(result) {
960: infof(data, "Authentication problem. Ignoring this.\n");
961: data->state.authproblem = TRUE;
962: }
963: }
964: }
965: else
966: #endif
967: if(checkprefix("Basic", auth)) {
968: *availp |= CURLAUTH_BASIC;
969: authp->avail |= CURLAUTH_BASIC;
970: if(authp->picked == CURLAUTH_BASIC) {
971: /* We asked for Basic authentication but got a 40X back
972: anyway, which basically means our name+password isn't
973: valid. */
974: authp->avail = CURLAUTH_NONE;
975: infof(data, "Authentication problem. Ignoring this.\n");
976: data->state.authproblem = TRUE;
977: }
978: }
979: else
980: if(checkprefix("Bearer", auth)) {
981: *availp |= CURLAUTH_BEARER;
982: authp->avail |= CURLAUTH_BEARER;
983: if(authp->picked == CURLAUTH_BEARER) {
984: /* We asked for Bearer authentication but got a 40X back
985: anyway, which basically means our token isn't valid. */
986: authp->avail = CURLAUTH_NONE;
987: infof(data, "Authentication problem. Ignoring this.\n");
988: data->state.authproblem = TRUE;
989: }
990: }
991:
992: /* there may be multiple methods on one line, so keep reading */
993: while(*auth && *auth != ',') /* read up to the next comma */
994: auth++;
995: if(*auth == ',') /* if we're on a comma, skip it */
996: auth++;
997: while(*auth && ISSPACE(*auth))
998: auth++;
999: }
1000:
1001: return CURLE_OK;
1002: }
1003:
1004: /**
1005: * http_should_fail() determines whether an HTTP response has gotten us
1006: * into an error state or not.
1007: *
1008: * @param conn all information about the current connection
1009: *
1010: * @retval 0 communications should continue
1011: *
1012: * @retval 1 communications should not continue
1013: */
1014: static int http_should_fail(struct connectdata *conn)
1015: {
1016: struct Curl_easy *data;
1017: int httpcode;
1018:
1019: DEBUGASSERT(conn);
1020: data = conn->data;
1021: DEBUGASSERT(data);
1022:
1023: httpcode = data->req.httpcode;
1024:
1025: /*
1026: ** If we haven't been asked to fail on error,
1027: ** don't fail.
1028: */
1029: if(!data->set.http_fail_on_error)
1030: return 0;
1031:
1032: /*
1033: ** Any code < 400 is never terminal.
1034: */
1035: if(httpcode < 400)
1036: return 0;
1037:
1038: /*
1039: ** Any code >= 400 that's not 401 or 407 is always
1040: ** a terminal error
1041: */
1042: if((httpcode != 401) && (httpcode != 407))
1043: return 1;
1044:
1045: /*
1046: ** All we have left to deal with is 401 and 407
1047: */
1048: DEBUGASSERT((httpcode == 401) || (httpcode == 407));
1049:
1050: /*
1051: ** Examine the current authentication state to see if this
1052: ** is an error. The idea is for this function to get
1053: ** called after processing all the headers in a response
1054: ** message. So, if we've been to asked to authenticate a
1055: ** particular stage, and we've done it, we're OK. But, if
1056: ** we're already completely authenticated, it's not OK to
1057: ** get another 401 or 407.
1058: **
1059: ** It is possible for authentication to go stale such that
1060: ** the client needs to reauthenticate. Once that info is
1061: ** available, use it here.
1062: */
1063:
1064: /*
1065: ** Either we're not authenticating, or we're supposed to
1066: ** be authenticating something else. This is an error.
1067: */
1068: if((httpcode == 401) && !conn->bits.user_passwd)
1069: return TRUE;
1070: if((httpcode == 407) && !conn->bits.proxy_user_passwd)
1071: return TRUE;
1072:
1073: return data->state.authproblem;
1074: }
1075:
1076: /*
1077: * readmoredata() is a "fread() emulation" to provide POST and/or request
1078: * data. It is used when a huge POST is to be made and the entire chunk wasn't
1079: * sent in the first send(). This function will then be called from the
1080: * transfer.c loop when more data is to be sent to the peer.
1081: *
1082: * Returns the amount of bytes it filled the buffer with.
1083: */
1084: static size_t readmoredata(char *buffer,
1085: size_t size,
1086: size_t nitems,
1087: void *userp)
1088: {
1089: struct connectdata *conn = (struct connectdata *)userp;
1090: struct HTTP *http = conn->data->req.protop;
1091: size_t fullsize = size * nitems;
1092:
1093: if(!http->postsize)
1094: /* nothing to return */
1095: return 0;
1096:
1097: /* make sure that a HTTP request is never sent away chunked! */
1098: conn->data->req.forbidchunk = (http->sending == HTTPSEND_REQUEST)?TRUE:FALSE;
1099:
1100: if(http->postsize <= (curl_off_t)fullsize) {
1101: memcpy(buffer, http->postdata, (size_t)http->postsize);
1102: fullsize = (size_t)http->postsize;
1103:
1104: if(http->backup.postsize) {
1105: /* move backup data into focus and continue on that */
1106: http->postdata = http->backup.postdata;
1107: http->postsize = http->backup.postsize;
1108: conn->data->state.fread_func = http->backup.fread_func;
1109: conn->data->state.in = http->backup.fread_in;
1110:
1111: http->sending++; /* move one step up */
1112:
1113: http->backup.postsize = 0;
1114: }
1115: else
1116: http->postsize = 0;
1117:
1118: return fullsize;
1119: }
1120:
1121: memcpy(buffer, http->postdata, fullsize);
1122: http->postdata += fullsize;
1123: http->postsize -= fullsize;
1124:
1125: return fullsize;
1126: }
1127:
1128: /* ------------------------------------------------------------------------- */
1129: /* add_buffer functions */
1130:
1131: /*
1132: * Curl_add_buffer_init() sets up and returns a fine buffer struct
1133: */
1134: Curl_send_buffer *Curl_add_buffer_init(void)
1135: {
1136: return calloc(1, sizeof(Curl_send_buffer));
1137: }
1138:
1139: /*
1140: * Curl_add_buffer_free() frees all associated resources.
1141: */
1142: void Curl_add_buffer_free(Curl_send_buffer **inp)
1143: {
1144: Curl_send_buffer *in;
1145: if(!inp)
1146: return;
1147: in = *inp;
1148: if(in) { /* deal with NULL input */
1149: free(in->buffer);
1150: free(in);
1151: }
1152: *inp = NULL;
1153: }
1154:
1155: /*
1156: * Curl_add_buffer_send() sends a header buffer and frees all associated
1157: * memory. Body data may be appended to the header data if desired.
1158: *
1159: * Returns CURLcode
1160: */
1161: CURLcode Curl_add_buffer_send(Curl_send_buffer **inp,
1162: struct connectdata *conn,
1163:
1164: /* add the number of sent bytes to this
1165: counter */
1166: curl_off_t *bytes_written,
1167:
1168: /* how much of the buffer contains body data */
1169: size_t included_body_bytes,
1170: int socketindex)
1171: {
1172: ssize_t amount;
1173: CURLcode result;
1174: char *ptr;
1175: size_t size;
1176: struct Curl_easy *data = conn->data;
1177: struct HTTP *http = data->req.protop;
1178: size_t sendsize;
1179: curl_socket_t sockfd;
1180: size_t headersize;
1181: Curl_send_buffer *in = *inp;
1182:
1183: DEBUGASSERT(socketindex <= SECONDARYSOCKET);
1184:
1185: sockfd = conn->sock[socketindex];
1186:
1187: /* The looping below is required since we use non-blocking sockets, but due
1188: to the circumstances we will just loop and try again and again etc */
1189:
1190: ptr = in->buffer;
1191: size = in->size_used;
1192:
1193: headersize = size - included_body_bytes; /* the initial part that isn't body
1194: is header */
1195:
1196: DEBUGASSERT(size > included_body_bytes);
1197:
1198: result = Curl_convert_to_network(data, ptr, headersize);
1199: /* Curl_convert_to_network calls failf if unsuccessful */
1200: if(result) {
1201: /* conversion failed, free memory and return to the caller */
1202: Curl_add_buffer_free(inp);
1203: return result;
1204: }
1205:
1206: if((conn->handler->flags & PROTOPT_SSL ||
1207: conn->http_proxy.proxytype == CURLPROXY_HTTPS)
1208: && conn->httpversion != 20) {
1209: /* We never send more than CURL_MAX_WRITE_SIZE bytes in one single chunk
1210: when we speak HTTPS, as if only a fraction of it is sent now, this data
1211: needs to fit into the normal read-callback buffer later on and that
1212: buffer is using this size.
1213: */
1214:
1215: sendsize = CURLMIN(size, CURL_MAX_WRITE_SIZE);
1216:
1217: /* OpenSSL is very picky and we must send the SAME buffer pointer to the
1218: library when we attempt to re-send this buffer. Sending the same data
1219: is not enough, we must use the exact same address. For this reason, we
1220: must copy the data to the uploadbuffer first, since that is the buffer
1221: we will be using if this send is retried later.
1222: */
1223: result = Curl_get_upload_buffer(data);
1224: if(result) {
1225: /* malloc failed, free memory and return to the caller */
1226: Curl_add_buffer_free(&in);
1227: return result;
1228: }
1229: memcpy(data->state.ulbuf, ptr, sendsize);
1230: ptr = data->state.ulbuf;
1231: }
1232: else {
1233: #ifdef CURLDEBUG
1234: /* Allow debug builds override this logic to force short initial sends */
1235: char *p = getenv("CURL_SMALLREQSEND");
1236: if(p) {
1237: size_t altsize = (size_t)strtoul(p, NULL, 10);
1238: if(altsize)
1239: sendsize = CURLMIN(size, altsize);
1240: else
1241: sendsize = size;
1242: }
1243: else
1244: #endif
1245: sendsize = size;
1246: }
1247:
1248: result = Curl_write(conn, sockfd, ptr, sendsize, &amount);
1249:
1250: if(!result) {
1251: /*
1252: * Note that we may not send the entire chunk at once, and we have a set
1253: * number of data bytes at the end of the big buffer (out of which we may
1254: * only send away a part).
1255: */
1256: /* how much of the header that was sent */
1257: size_t headlen = (size_t)amount>headersize ? headersize : (size_t)amount;
1258: size_t bodylen = amount - headlen;
1259:
1260: if(data->set.verbose) {
1261: /* this data _may_ contain binary stuff */
1262: Curl_debug(data, CURLINFO_HEADER_OUT, ptr, headlen);
1263: if(bodylen) {
1264: /* there was body data sent beyond the initial header part, pass that
1265: on to the debug callback too */
1266: Curl_debug(data, CURLINFO_DATA_OUT,
1267: ptr + headlen, bodylen);
1268: }
1269: }
1270:
1271: /* 'amount' can never be a very large value here so typecasting it so a
1272: signed 31 bit value should not cause problems even if ssize_t is
1273: 64bit */
1274: *bytes_written += (long)amount;
1275:
1276: if(http) {
1277: /* if we sent a piece of the body here, up the byte counter for it
1278: accordingly */
1279: data->req.writebytecount += bodylen;
1280: Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
1281:
1282: if((size_t)amount != size) {
1283: /* The whole request could not be sent in one system call. We must
1284: queue it up and send it later when we get the chance. We must not
1285: loop here and wait until it might work again. */
1286:
1287: size -= amount;
1288:
1289: ptr = in->buffer + amount;
1290:
1291: /* backup the currently set pointers */
1292: http->backup.fread_func = data->state.fread_func;
1293: http->backup.fread_in = data->state.in;
1294: http->backup.postdata = http->postdata;
1295: http->backup.postsize = http->postsize;
1296:
1297: /* set the new pointers for the request-sending */
1298: data->state.fread_func = (curl_read_callback)readmoredata;
1299: data->state.in = (void *)conn;
1300: http->postdata = ptr;
1301: http->postsize = (curl_off_t)size;
1302:
1303: http->send_buffer = in;
1304: http->sending = HTTPSEND_REQUEST;
1305:
1306: return CURLE_OK;
1307: }
1308: http->sending = HTTPSEND_BODY;
1309: /* the full buffer was sent, clean up and return */
1310: }
1311: else {
1312: if((size_t)amount != size)
1313: /* We have no continue-send mechanism now, fail. This can only happen
1314: when this function is used from the CONNECT sending function. We
1315: currently (stupidly) assume that the whole request is always sent
1316: away in the first single chunk.
1317:
1318: This needs FIXing.
1319: */
1320: return CURLE_SEND_ERROR;
1321: }
1322: }
1323: Curl_add_buffer_free(&in);
1324:
1325: return result;
1326: }
1327:
1328:
1329: /*
1330: * add_bufferf() add the formatted input to the buffer.
1331: */
1332: CURLcode Curl_add_bufferf(Curl_send_buffer **inp, const char *fmt, ...)
1333: {
1334: char *s;
1335: va_list ap;
1336: va_start(ap, fmt);
1337: s = vaprintf(fmt, ap); /* this allocs a new string to append */
1338: va_end(ap);
1339:
1340: if(s) {
1341: CURLcode result = Curl_add_buffer(inp, s, strlen(s));
1342: free(s);
1343: return result;
1344: }
1345: /* If we failed, we cleanup the whole buffer and return error */
1346: Curl_add_buffer_free(inp);
1347: return CURLE_OUT_OF_MEMORY;
1348: }
1349:
1350: /*
1351: * Curl_add_buffer() appends a memory chunk to the existing buffer
1352: */
1353: CURLcode Curl_add_buffer(Curl_send_buffer **inp, const void *inptr,
1354: size_t size)
1355: {
1356: char *new_rb;
1357: Curl_send_buffer *in = *inp;
1358:
1359: if(~size < in->size_used) {
1360: /* If resulting used size of send buffer would wrap size_t, cleanup
1361: the whole buffer and return error. Otherwise the required buffer
1362: size will fit into a single allocatable memory chunk */
1363: Curl_add_buffer_free(inp);
1364: return CURLE_OUT_OF_MEMORY;
1365: }
1366:
1367: if(!in->buffer ||
1368: ((in->size_used + size) > (in->size_max - 1))) {
1369: /* If current buffer size isn't enough to hold the result, use a
1370: buffer size that doubles the required size. If this new size
1371: would wrap size_t, then just use the largest possible one */
1372: size_t new_size;
1373:
1374: if((size > (size_t)-1 / 2) || (in->size_used > (size_t)-1 / 2) ||
1375: (~(size * 2) < (in->size_used * 2)))
1376: new_size = (size_t)-1;
1377: else
1378: new_size = (in->size_used + size) * 2;
1379:
1380: if(in->buffer)
1381: /* we have a buffer, enlarge the existing one */
1382: new_rb = Curl_saferealloc(in->buffer, new_size);
1383: else
1384: /* create a new buffer */
1385: new_rb = malloc(new_size);
1386:
1387: if(!new_rb) {
1388: /* If we failed, we cleanup the whole buffer and return error */
1389: free(in);
1390: *inp = NULL;
1391: return CURLE_OUT_OF_MEMORY;
1392: }
1393:
1394: in->buffer = new_rb;
1395: in->size_max = new_size;
1396: }
1397: memcpy(&in->buffer[in->size_used], inptr, size);
1398:
1399: in->size_used += size;
1400:
1401: return CURLE_OK;
1402: }
1403:
1404: /* end of the add_buffer functions */
1405: /* ------------------------------------------------------------------------- */
1406:
1407:
1408:
1409: /*
1410: * Curl_compareheader()
1411: *
1412: * Returns TRUE if 'headerline' contains the 'header' with given 'content'.
1413: * Pass headers WITH the colon.
1414: */
1415: bool
1416: Curl_compareheader(const char *headerline, /* line to check */
1417: const char *header, /* header keyword _with_ colon */
1418: const char *content) /* content string to find */
1419: {
1420: /* RFC2616, section 4.2 says: "Each header field consists of a name followed
1421: * by a colon (":") and the field value. Field names are case-insensitive.
1422: * The field value MAY be preceded by any amount of LWS, though a single SP
1423: * is preferred." */
1424:
1425: size_t hlen = strlen(header);
1426: size_t clen;
1427: size_t len;
1428: const char *start;
1429: const char *end;
1430:
1431: if(!strncasecompare(headerline, header, hlen))
1432: return FALSE; /* doesn't start with header */
1433:
1434: /* pass the header */
1435: start = &headerline[hlen];
1436:
1437: /* pass all white spaces */
1438: while(*start && ISSPACE(*start))
1439: start++;
1440:
1441: /* find the end of the header line */
1442: end = strchr(start, '\r'); /* lines end with CRLF */
1443: if(!end) {
1444: /* in case there's a non-standard compliant line here */
1445: end = strchr(start, '\n');
1446:
1447: if(!end)
1448: /* hm, there's no line ending here, use the zero byte! */
1449: end = strchr(start, '\0');
1450: }
1451:
1452: len = end-start; /* length of the content part of the input line */
1453: clen = strlen(content); /* length of the word to find */
1454:
1455: /* find the content string in the rest of the line */
1456: for(; len >= clen; len--, start++) {
1457: if(strncasecompare(start, content, clen))
1458: return TRUE; /* match! */
1459: }
1460:
1461: return FALSE; /* no match */
1462: }
1463:
1464: /*
1465: * Curl_http_connect() performs HTTP stuff to do at connect-time, called from
1466: * the generic Curl_connect().
1467: */
1468: CURLcode Curl_http_connect(struct connectdata *conn, bool *done)
1469: {
1470: CURLcode result;
1471:
1472: /* We default to persistent connections. We set this already in this connect
1473: function to make the re-use checks properly be able to check this bit. */
1474: connkeep(conn, "HTTP default");
1475:
1476: /* the CONNECT procedure might not have been completed */
1477: result = Curl_proxy_connect(conn, FIRSTSOCKET);
1478: if(result)
1479: return result;
1480:
1481: if(conn->bits.proxy_connect_closed)
1482: /* this is not an error, just part of the connection negotiation */
1483: return CURLE_OK;
1484:
1485: if(CONNECT_FIRSTSOCKET_PROXY_SSL())
1486: return CURLE_OK; /* wait for HTTPS proxy SSL initialization to complete */
1487:
1488: if(Curl_connect_ongoing(conn))
1489: /* nothing else to do except wait right now - we're not done here. */
1490: return CURLE_OK;
1491:
1492: #ifndef CURL_DISABLE_PROXY
1493: if(conn->data->set.haproxyprotocol) {
1494: /* add HAProxy PROXY protocol header */
1495: result = add_haproxy_protocol_header(conn);
1496: if(result)
1497: return result;
1498: }
1499: #endif
1500:
1501: if(conn->given->protocol & CURLPROTO_HTTPS) {
1502: /* perform SSL initialization */
1503: result = https_connecting(conn, done);
1504: if(result)
1505: return result;
1506: }
1507: else
1508: *done = TRUE;
1509:
1510: return CURLE_OK;
1511: }
1512:
1513: /* this returns the socket to wait for in the DO and DOING state for the multi
1514: interface and then we're always _sending_ a request and thus we wait for
1515: the single socket to become writable only */
1516: static int http_getsock_do(struct connectdata *conn,
1517: curl_socket_t *socks)
1518: {
1519: /* write mode */
1520: socks[0] = conn->sock[FIRSTSOCKET];
1521: return GETSOCK_WRITESOCK(0);
1522: }
1523:
1524: #ifndef CURL_DISABLE_PROXY
1525: static CURLcode add_haproxy_protocol_header(struct connectdata *conn)
1526: {
1527: char proxy_header[128];
1528: Curl_send_buffer *req_buffer;
1529: CURLcode result;
1530: char tcp_version[5];
1531:
1532: /* Emit the correct prefix for IPv6 */
1533: if(conn->bits.ipv6) {
1534: strcpy(tcp_version, "TCP6");
1535: }
1536: else {
1537: strcpy(tcp_version, "TCP4");
1538: }
1539:
1540: msnprintf(proxy_header,
1541: sizeof(proxy_header),
1542: "PROXY %s %s %s %li %li\r\n",
1543: tcp_version,
1544: conn->data->info.conn_local_ip,
1545: conn->data->info.conn_primary_ip,
1546: conn->data->info.conn_local_port,
1547: conn->data->info.conn_primary_port);
1548:
1549: req_buffer = Curl_add_buffer_init();
1550: if(!req_buffer)
1551: return CURLE_OUT_OF_MEMORY;
1552:
1553: result = Curl_add_bufferf(&req_buffer, proxy_header);
1554: if(result)
1555: return result;
1556:
1557: result = Curl_add_buffer_send(&req_buffer,
1558: conn,
1559: &conn->data->info.request_size,
1560: 0,
1561: FIRSTSOCKET);
1562:
1563: return result;
1564: }
1565: #endif
1566:
1567: #ifdef USE_SSL
1568: static CURLcode https_connecting(struct connectdata *conn, bool *done)
1569: {
1570: CURLcode result;
1571: DEBUGASSERT((conn) && (conn->handler->flags & PROTOPT_SSL));
1572:
1573: #ifdef ENABLE_QUIC
1574: if(conn->transport == TRNSPRT_QUIC) {
1575: *done = TRUE;
1576: return CURLE_OK;
1577: }
1578: #endif
1579:
1580: /* perform SSL initialization for this socket */
1581: result = Curl_ssl_connect_nonblocking(conn, FIRSTSOCKET, done);
1582: if(result)
1583: connclose(conn, "Failed HTTPS connection");
1584:
1585: return result;
1586: }
1587:
1588: static int https_getsock(struct connectdata *conn,
1589: curl_socket_t *socks)
1590: {
1591: if(conn->handler->flags & PROTOPT_SSL)
1592: return Curl_ssl_getsock(conn, socks);
1593: return GETSOCK_BLANK;
1594: }
1595: #endif /* USE_SSL */
1596:
1597: /*
1598: * Curl_http_done() gets called after a single HTTP request has been
1599: * performed.
1600: */
1601:
1602: CURLcode Curl_http_done(struct connectdata *conn,
1603: CURLcode status, bool premature)
1604: {
1605: struct Curl_easy *data = conn->data;
1606: struct HTTP *http = data->req.protop;
1607:
1608: /* Clear multipass flag. If authentication isn't done yet, then it will get
1609: * a chance to be set back to true when we output the next auth header */
1610: data->state.authhost.multipass = FALSE;
1611: data->state.authproxy.multipass = FALSE;
1612:
1613: Curl_unencode_cleanup(conn);
1614:
1615: /* set the proper values (possibly modified on POST) */
1616: conn->seek_func = data->set.seek_func; /* restore */
1617: conn->seek_client = data->set.seek_client; /* restore */
1618:
1619: if(!http)
1620: return CURLE_OK;
1621:
1622: if(http->send_buffer) {
1623: Curl_add_buffer_free(&http->send_buffer);
1624: }
1625:
1626: Curl_http2_done(data, premature);
1627: Curl_quic_done(data, premature);
1628:
1629: Curl_mime_cleanpart(&http->form);
1630:
1631: if(status)
1632: return status;
1633:
1634: if(!premature && /* this check is pointless when DONE is called before the
1635: entire operation is complete */
1636: !conn->bits.retry &&
1637: !data->set.connect_only &&
1638: (data->req.bytecount +
1639: data->req.headerbytecount -
1640: data->req.deductheadercount) <= 0) {
1641: /* If this connection isn't simply closed to be retried, AND nothing was
1642: read from the HTTP server (that counts), this can't be right so we
1643: return an error here */
1644: failf(data, "Empty reply from server");
1645: return CURLE_GOT_NOTHING;
1646: }
1647:
1648: return CURLE_OK;
1649: }
1650:
1651: /*
1652: * Determine if we should use HTTP 1.1 (OR BETTER) for this request. Reasons
1653: * to avoid it include:
1654: *
1655: * - if the user specifically requested HTTP 1.0
1656: * - if the server we are connected to only supports 1.0
1657: * - if any server previously contacted to handle this request only supports
1658: * 1.0.
1659: */
1660: static bool use_http_1_1plus(const struct Curl_easy *data,
1661: const struct connectdata *conn)
1662: {
1663: if((data->state.httpversion == 10) || (conn->httpversion == 10))
1664: return FALSE;
1665: if((data->set.httpversion == CURL_HTTP_VERSION_1_0) &&
1666: (conn->httpversion <= 10))
1667: return FALSE;
1668: return ((data->set.httpversion == CURL_HTTP_VERSION_NONE) ||
1669: (data->set.httpversion >= CURL_HTTP_VERSION_1_1));
1670: }
1671:
1672: static const char *get_http_string(const struct Curl_easy *data,
1673: const struct connectdata *conn)
1674: {
1675: #ifdef ENABLE_QUIC
1676: if((data->set.httpversion == CURL_HTTP_VERSION_3) ||
1677: (conn->httpversion == 30))
1678: return "3";
1679: #endif
1680:
1681: #ifdef USE_NGHTTP2
1682: if(conn->proto.httpc.h2)
1683: return "2";
1684: #endif
1685:
1686: if(use_http_1_1plus(data, conn))
1687: return "1.1";
1688:
1689: return "1.0";
1690: }
1691:
1692: /* check and possibly add an Expect: header */
1693: static CURLcode expect100(struct Curl_easy *data,
1694: struct connectdata *conn,
1695: Curl_send_buffer *req_buffer)
1696: {
1697: CURLcode result = CURLE_OK;
1698: data->state.expect100header = FALSE; /* default to false unless it is set
1699: to TRUE below */
1700: if(!data->state.disableexpect && use_http_1_1plus(data, conn) &&
1701: (conn->httpversion < 20)) {
1702: /* if not doing HTTP 1.0 or version 2, or disabled explicitly, we add an
1703: Expect: 100-continue to the headers which actually speeds up post
1704: operations (as there is one packet coming back from the web server) */
1705: const char *ptr = Curl_checkheaders(conn, "Expect");
1706: if(ptr) {
1707: data->state.expect100header =
1708: Curl_compareheader(ptr, "Expect:", "100-continue");
1709: }
1710: else {
1711: result = Curl_add_bufferf(&req_buffer,
1712: "Expect: 100-continue\r\n");
1713: if(!result)
1714: data->state.expect100header = TRUE;
1715: }
1716: }
1717:
1718: return result;
1719: }
1720:
1721: enum proxy_use {
1722: HEADER_SERVER, /* direct to server */
1723: HEADER_PROXY, /* regular request to proxy */
1724: HEADER_CONNECT /* sending CONNECT to a proxy */
1725: };
1726:
1727: /* used to compile the provided trailers into one buffer
1728: will return an error code if one of the headers is
1729: not formatted correctly */
1730: CURLcode Curl_http_compile_trailers(struct curl_slist *trailers,
1731: Curl_send_buffer **buffer,
1732: struct Curl_easy *handle)
1733: {
1734: char *ptr = NULL;
1735: CURLcode result = CURLE_OK;
1736: const char *endofline_native = NULL;
1737: const char *endofline_network = NULL;
1738:
1739: if(
1740: #ifdef CURL_DO_LINEEND_CONV
1741: (handle->set.prefer_ascii) ||
1742: #endif
1743: (handle->set.crlf)) {
1744: /* \n will become \r\n later on */
1745: endofline_native = "\n";
1746: endofline_network = "\x0a";
1747: }
1748: else {
1749: endofline_native = "\r\n";
1750: endofline_network = "\x0d\x0a";
1751: }
1752:
1753: while(trailers) {
1754: /* only add correctly formatted trailers */
1755: ptr = strchr(trailers->data, ':');
1756: if(ptr && *(ptr + 1) == ' ') {
1757: result = Curl_add_bufferf(buffer, "%s%s", trailers->data,
1758: endofline_native);
1759: if(result)
1760: return result;
1761: }
1762: else
1763: infof(handle, "Malformatted trailing header ! Skipping trailer.");
1764: trailers = trailers->next;
1765: }
1766: result = Curl_add_buffer(buffer, endofline_network,
1767: strlen(endofline_network));
1768: return result;
1769: }
1770:
1771: CURLcode Curl_add_custom_headers(struct connectdata *conn,
1772: bool is_connect,
1773: Curl_send_buffer *req_buffer)
1774: {
1775: char *ptr;
1776: struct curl_slist *h[2];
1777: struct curl_slist *headers;
1778: int numlists = 1; /* by default */
1779: struct Curl_easy *data = conn->data;
1780: int i;
1781:
1782: enum proxy_use proxy;
1783:
1784: if(is_connect)
1785: proxy = HEADER_CONNECT;
1786: else
1787: proxy = conn->bits.httpproxy && !conn->bits.tunnel_proxy?
1788: HEADER_PROXY:HEADER_SERVER;
1789:
1790: switch(proxy) {
1791: case HEADER_SERVER:
1792: h[0] = data->set.headers;
1793: break;
1794: case HEADER_PROXY:
1795: h[0] = data->set.headers;
1796: if(data->set.sep_headers) {
1797: h[1] = data->set.proxyheaders;
1798: numlists++;
1799: }
1800: break;
1801: case HEADER_CONNECT:
1802: if(data->set.sep_headers)
1803: h[0] = data->set.proxyheaders;
1804: else
1805: h[0] = data->set.headers;
1806: break;
1807: }
1808:
1809: /* loop through one or two lists */
1810: for(i = 0; i < numlists; i++) {
1811: headers = h[i];
1812:
1813: while(headers) {
1814: char *semicolonp = NULL;
1815: ptr = strchr(headers->data, ':');
1816: if(!ptr) {
1817: char *optr;
1818: /* no colon, semicolon? */
1819: ptr = strchr(headers->data, ';');
1820: if(ptr) {
1821: optr = ptr;
1822: ptr++; /* pass the semicolon */
1823: while(*ptr && ISSPACE(*ptr))
1824: ptr++;
1825:
1826: if(*ptr) {
1827: /* this may be used for something else in the future */
1828: optr = NULL;
1829: }
1830: else {
1831: if(*(--ptr) == ';') {
1832: /* copy the source */
1833: semicolonp = strdup(headers->data);
1834: if(!semicolonp) {
1835: Curl_add_buffer_free(&req_buffer);
1836: return CURLE_OUT_OF_MEMORY;
1837: }
1838: /* put a colon where the semicolon is */
1839: semicolonp[ptr - headers->data] = ':';
1840: /* point at the colon */
1841: optr = &semicolonp [ptr - headers->data];
1842: }
1843: }
1844: ptr = optr;
1845: }
1846: }
1847: if(ptr) {
1848: /* we require a colon for this to be a true header */
1849:
1850: ptr++; /* pass the colon */
1851: while(*ptr && ISSPACE(*ptr))
1852: ptr++;
1853:
1854: if(*ptr || semicolonp) {
1855: /* only send this if the contents was non-blank or done special */
1856: CURLcode result = CURLE_OK;
1857: char *compare = semicolonp ? semicolonp : headers->data;
1858:
1859: if(conn->allocptr.host &&
1860: /* a Host: header was sent already, don't pass on any custom Host:
1861: header as that will produce *two* in the same request! */
1862: checkprefix("Host:", compare))
1863: ;
1864: else if(data->set.httpreq == HTTPREQ_POST_FORM &&
1865: /* this header (extended by formdata.c) is sent later */
1866: checkprefix("Content-Type:", compare))
1867: ;
1868: else if(data->set.httpreq == HTTPREQ_POST_MIME &&
1869: /* this header is sent later */
1870: checkprefix("Content-Type:", compare))
1871: ;
1872: else if(conn->bits.authneg &&
1873: /* while doing auth neg, don't allow the custom length since
1874: we will force length zero then */
1875: checkprefix("Content-Length:", compare))
1876: ;
1877: else if(conn->allocptr.te &&
1878: /* when asking for Transfer-Encoding, don't pass on a custom
1879: Connection: */
1880: checkprefix("Connection:", compare))
1881: ;
1882: else if((conn->httpversion >= 20) &&
1883: checkprefix("Transfer-Encoding:", compare))
1884: /* HTTP/2 doesn't support chunked requests */
1885: ;
1886: else if((checkprefix("Authorization:", compare) ||
1887: checkprefix("Cookie:", compare)) &&
1888: /* be careful of sending this potentially sensitive header to
1889: other hosts */
1890: (data->state.this_is_a_follow &&
1891: data->state.first_host &&
1892: !data->set.allow_auth_to_other_hosts &&
1893: !strcasecompare(data->state.first_host, conn->host.name)))
1894: ;
1895: else {
1896: result = Curl_add_bufferf(&req_buffer, "%s\r\n", compare);
1897: }
1898: if(semicolonp)
1899: free(semicolonp);
1900: if(result)
1901: return result;
1902: }
1903: }
1904: headers = headers->next;
1905: }
1906: }
1907:
1908: return CURLE_OK;
1909: }
1910:
1911: #ifndef CURL_DISABLE_PARSEDATE
1912: CURLcode Curl_add_timecondition(const struct connectdata *conn,
1913: Curl_send_buffer *req_buffer)
1914: {
1915: struct Curl_easy *data = conn->data;
1916: const struct tm *tm;
1917: struct tm keeptime;
1918: CURLcode result;
1919: char datestr[80];
1920: const char *condp;
1921:
1922: if(data->set.timecondition == CURL_TIMECOND_NONE)
1923: /* no condition was asked for */
1924: return CURLE_OK;
1925:
1926: result = Curl_gmtime(data->set.timevalue, &keeptime);
1927: if(result) {
1928: failf(data, "Invalid TIMEVALUE");
1929: return result;
1930: }
1931: tm = &keeptime;
1932:
1933: switch(data->set.timecondition) {
1934: default:
1935: return CURLE_BAD_FUNCTION_ARGUMENT;
1936:
1937: case CURL_TIMECOND_IFMODSINCE:
1938: condp = "If-Modified-Since";
1939: break;
1940: case CURL_TIMECOND_IFUNMODSINCE:
1941: condp = "If-Unmodified-Since";
1942: break;
1943: case CURL_TIMECOND_LASTMOD:
1944: condp = "Last-Modified";
1945: break;
1946: }
1947:
1948: if(Curl_checkheaders(conn, condp)) {
1949: /* A custom header was specified; it will be sent instead. */
1950: return CURLE_OK;
1951: }
1952:
1953: /* The If-Modified-Since header family should have their times set in
1954: * GMT as RFC2616 defines: "All HTTP date/time stamps MUST be
1955: * represented in Greenwich Mean Time (GMT), without exception. For the
1956: * purposes of HTTP, GMT is exactly equal to UTC (Coordinated Universal
1957: * Time)." (see page 20 of RFC2616).
1958: */
1959:
1960: /* format: "Tue, 15 Nov 1994 12:45:26 GMT" */
1961: msnprintf(datestr, sizeof(datestr),
1962: "%s: %s, %02d %s %4d %02d:%02d:%02d GMT\r\n",
1963: condp,
1964: Curl_wkday[tm->tm_wday?tm->tm_wday-1:6],
1965: tm->tm_mday,
1966: Curl_month[tm->tm_mon],
1967: tm->tm_year + 1900,
1968: tm->tm_hour,
1969: tm->tm_min,
1970: tm->tm_sec);
1971:
1972: result = Curl_add_buffer(&req_buffer, datestr, strlen(datestr));
1973:
1974: return result;
1975: }
1976: #else
1977: /* disabled */
1978: CURLcode Curl_add_timecondition(const struct connectdata *conn,
1979: Curl_send_buffer *req_buffer)
1980: {
1981: (void)conn;
1982: (void)req_buffer;
1983: return CURLE_OK;
1984: }
1985: #endif
1986:
1987: /*
1988: * Curl_http() gets called from the generic multi_do() function when a HTTP
1989: * request is to be performed. This creates and sends a properly constructed
1990: * HTTP request.
1991: */
1992: CURLcode Curl_http(struct connectdata *conn, bool *done)
1993: {
1994: struct Curl_easy *data = conn->data;
1995: CURLcode result = CURLE_OK;
1996: struct HTTP *http;
1997: const char *path = data->state.up.path;
1998: const char *query = data->state.up.query;
1999: bool paste_ftp_userpwd = FALSE;
2000: char ftp_typecode[sizeof("/;type=?")] = "";
2001: const char *host = conn->host.name;
2002: const char *te = ""; /* transfer-encoding */
2003: const char *ptr;
2004: const char *request;
2005: Curl_HttpReq httpreq = data->set.httpreq;
2006: #if !defined(CURL_DISABLE_COOKIES)
2007: char *addcookies = NULL;
2008: #endif
2009: curl_off_t included_body = 0;
2010: const char *httpstring;
2011: Curl_send_buffer *req_buffer;
2012: curl_off_t postsize = 0; /* curl_off_t to handle large file sizes */
2013: char *altused = NULL;
2014:
2015: /* Always consider the DO phase done after this function call, even if there
2016: may be parts of the request that is not yet sent, since we can deal with
2017: the rest of the request in the PERFORM phase. */
2018: *done = TRUE;
2019:
2020: if(conn->transport != TRNSPRT_QUIC) {
2021: if(conn->httpversion < 20) { /* unless the connection is re-used and
2022: already http2 */
2023: switch(conn->negnpn) {
2024: case CURL_HTTP_VERSION_2:
2025: conn->httpversion = 20; /* we know we're on HTTP/2 now */
2026:
2027: result = Curl_http2_switched(conn, NULL, 0);
2028: if(result)
2029: return result;
2030: break;
2031: case CURL_HTTP_VERSION_1_1:
2032: /* continue with HTTP/1.1 when explicitly requested */
2033: break;
2034: default:
2035: /* Check if user wants to use HTTP/2 with clear TCP*/
2036: #ifdef USE_NGHTTP2
2037: if(conn->data->set.httpversion ==
2038: CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE) {
2039: if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
2040: /* We don't support HTTP/2 proxies yet. Also it's debatable
2041: whether or not this setting should apply to HTTP/2 proxies. */
2042: infof(data, "Ignoring HTTP/2 prior knowledge due to proxy\n");
2043: break;
2044: }
2045:
2046: DEBUGF(infof(data, "HTTP/2 over clean TCP\n"));
2047: conn->httpversion = 20;
2048:
2049: result = Curl_http2_switched(conn, NULL, 0);
2050: if(result)
2051: return result;
2052: }
2053: #endif
2054: break;
2055: }
2056: }
2057: else {
2058: /* prepare for a http2 request */
2059: result = Curl_http2_setup(conn);
2060: if(result)
2061: return result;
2062: }
2063: }
2064: http = data->req.protop;
2065: DEBUGASSERT(http);
2066:
2067: if(!data->state.this_is_a_follow) {
2068: /* Free to avoid leaking memory on multiple requests*/
2069: free(data->state.first_host);
2070:
2071: data->state.first_host = strdup(conn->host.name);
2072: if(!data->state.first_host)
2073: return CURLE_OUT_OF_MEMORY;
2074:
2075: data->state.first_remote_port = conn->remote_port;
2076: }
2077:
2078: if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
2079: data->set.upload) {
2080: httpreq = HTTPREQ_PUT;
2081: }
2082:
2083: /* Now set the 'request' pointer to the proper request string */
2084: if(data->set.str[STRING_CUSTOMREQUEST])
2085: request = data->set.str[STRING_CUSTOMREQUEST];
2086: else {
2087: if(data->set.opt_no_body)
2088: request = "HEAD";
2089: else {
2090: DEBUGASSERT((httpreq > HTTPREQ_NONE) && (httpreq < HTTPREQ_LAST));
2091: switch(httpreq) {
2092: case HTTPREQ_POST:
2093: case HTTPREQ_POST_FORM:
2094: case HTTPREQ_POST_MIME:
2095: request = "POST";
2096: break;
2097: case HTTPREQ_PUT:
2098: request = "PUT";
2099: break;
2100: case HTTPREQ_OPTIONS:
2101: request = "OPTIONS";
2102: break;
2103: default: /* this should never happen */
2104: case HTTPREQ_GET:
2105: request = "GET";
2106: break;
2107: case HTTPREQ_HEAD:
2108: request = "HEAD";
2109: break;
2110: }
2111: }
2112: }
2113:
2114: /* The User-Agent string might have been allocated in url.c already, because
2115: it might have been used in the proxy connect, but if we have got a header
2116: with the user-agent string specified, we erase the previously made string
2117: here. */
2118: if(Curl_checkheaders(conn, "User-Agent")) {
2119: free(conn->allocptr.uagent);
2120: conn->allocptr.uagent = NULL;
2121: }
2122:
2123: /* setup the authentication headers */
2124: {
2125: char *pq = NULL;
2126: if(query && *query) {
2127: pq = aprintf("%s?%s", path, query);
2128: if(!pq)
2129: return CURLE_OUT_OF_MEMORY;
2130: }
2131: result = Curl_http_output_auth(conn, request, (pq ? pq : path), FALSE);
2132: free(pq);
2133: if(result)
2134: return result;
2135: }
2136:
2137: if(((data->state.authhost.multipass && !data->state.authhost.done)
2138: || (data->state.authproxy.multipass && !data->state.authproxy.done)) &&
2139: (httpreq != HTTPREQ_GET) &&
2140: (httpreq != HTTPREQ_HEAD)) {
2141: /* Auth is required and we are not authenticated yet. Make a PUT or POST
2142: with content-length zero as a "probe". */
2143: conn->bits.authneg = TRUE;
2144: }
2145: else
2146: conn->bits.authneg = FALSE;
2147:
2148: Curl_safefree(conn->allocptr.ref);
2149: if(data->change.referer && !Curl_checkheaders(conn, "Referer")) {
2150: conn->allocptr.ref = aprintf("Referer: %s\r\n", data->change.referer);
2151: if(!conn->allocptr.ref)
2152: return CURLE_OUT_OF_MEMORY;
2153: }
2154: else
2155: conn->allocptr.ref = NULL;
2156:
2157: #if !defined(CURL_DISABLE_COOKIES)
2158: if(data->set.str[STRING_COOKIE] && !Curl_checkheaders(conn, "Cookie"))
2159: addcookies = data->set.str[STRING_COOKIE];
2160: #endif
2161:
2162: if(!Curl_checkheaders(conn, "Accept-Encoding") &&
2163: data->set.str[STRING_ENCODING]) {
2164: Curl_safefree(conn->allocptr.accept_encoding);
2165: conn->allocptr.accept_encoding =
2166: aprintf("Accept-Encoding: %s\r\n", data->set.str[STRING_ENCODING]);
2167: if(!conn->allocptr.accept_encoding)
2168: return CURLE_OUT_OF_MEMORY;
2169: }
2170: else {
2171: Curl_safefree(conn->allocptr.accept_encoding);
2172: conn->allocptr.accept_encoding = NULL;
2173: }
2174:
2175: #ifdef HAVE_LIBZ
2176: /* we only consider transfer-encoding magic if libz support is built-in */
2177:
2178: if(!Curl_checkheaders(conn, "TE") &&
2179: data->set.http_transfer_encoding) {
2180: /* When we are to insert a TE: header in the request, we must also insert
2181: TE in a Connection: header, so we need to merge the custom provided
2182: Connection: header and prevent the original to get sent. Note that if
2183: the user has inserted his/hers own TE: header we don't do this magic
2184: but then assume that the user will handle it all! */
2185: char *cptr = Curl_checkheaders(conn, "Connection");
2186: #define TE_HEADER "TE: gzip\r\n"
2187:
2188: Curl_safefree(conn->allocptr.te);
2189:
2190: if(cptr) {
2191: cptr = Curl_copy_header_value(cptr);
2192: if(!cptr)
2193: return CURLE_OUT_OF_MEMORY;
2194: }
2195:
2196: /* Create the (updated) Connection: header */
2197: conn->allocptr.te = aprintf("Connection: %s%sTE\r\n" TE_HEADER,
2198: cptr ? cptr : "", (cptr && *cptr) ? ", ":"");
2199:
2200: free(cptr);
2201: if(!conn->allocptr.te)
2202: return CURLE_OUT_OF_MEMORY;
2203: }
2204: #endif
2205:
2206: switch(httpreq) {
2207: case HTTPREQ_POST_MIME:
2208: http->sendit = &data->set.mimepost;
2209: break;
2210: case HTTPREQ_POST_FORM:
2211: /* Convert the form structure into a mime structure. */
2212: Curl_mime_cleanpart(&http->form);
2213: result = Curl_getformdata(data, &http->form, data->set.httppost,
2214: data->state.fread_func);
2215: if(result)
2216: return result;
2217: http->sendit = &http->form;
2218: break;
2219: default:
2220: http->sendit = NULL;
2221: }
2222:
2223: #ifndef CURL_DISABLE_MIME
2224: if(http->sendit) {
2225: const char *cthdr = Curl_checkheaders(conn, "Content-Type");
2226:
2227: /* Read and seek body only. */
2228: http->sendit->flags |= MIME_BODY_ONLY;
2229:
2230: /* Prepare the mime structure headers & set content type. */
2231:
2232: if(cthdr)
2233: for(cthdr += 13; *cthdr == ' '; cthdr++)
2234: ;
2235: else if(http->sendit->kind == MIMEKIND_MULTIPART)
2236: cthdr = "multipart/form-data";
2237:
2238: curl_mime_headers(http->sendit, data->set.headers, 0);
2239: result = Curl_mime_prepare_headers(http->sendit, cthdr,
2240: NULL, MIMESTRATEGY_FORM);
2241: curl_mime_headers(http->sendit, NULL, 0);
2242: if(!result)
2243: result = Curl_mime_rewind(http->sendit);
2244: if(result)
2245: return result;
2246: http->postsize = Curl_mime_size(http->sendit);
2247: }
2248: #endif
2249:
2250: ptr = Curl_checkheaders(conn, "Transfer-Encoding");
2251: if(ptr) {
2252: /* Some kind of TE is requested, check if 'chunked' is chosen */
2253: data->req.upload_chunky =
2254: Curl_compareheader(ptr, "Transfer-Encoding:", "chunked");
2255: }
2256: else {
2257: if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
2258: (((httpreq == HTTPREQ_POST_MIME || httpreq == HTTPREQ_POST_FORM) &&
2259: http->postsize < 0) ||
2260: ((data->set.upload || httpreq == HTTPREQ_POST) &&
2261: data->state.infilesize == -1))) {
2262: if(conn->bits.authneg)
2263: /* don't enable chunked during auth neg */
2264: ;
2265: else if(use_http_1_1plus(data, conn)) {
2266: if(conn->httpversion < 20)
2267: /* HTTP, upload, unknown file size and not HTTP 1.0 */
2268: data->req.upload_chunky = TRUE;
2269: }
2270: else {
2271: failf(data, "Chunky upload is not supported by HTTP 1.0");
2272: return CURLE_UPLOAD_FAILED;
2273: }
2274: }
2275: else {
2276: /* else, no chunky upload */
2277: data->req.upload_chunky = FALSE;
2278: }
2279:
2280: if(data->req.upload_chunky)
2281: te = "Transfer-Encoding: chunked\r\n";
2282: }
2283:
2284: Curl_safefree(conn->allocptr.host);
2285:
2286: ptr = Curl_checkheaders(conn, "Host");
2287: if(ptr && (!data->state.this_is_a_follow ||
2288: strcasecompare(data->state.first_host, conn->host.name))) {
2289: #if !defined(CURL_DISABLE_COOKIES)
2290: /* If we have a given custom Host: header, we extract the host name in
2291: order to possibly use it for cookie reasons later on. We only allow the
2292: custom Host: header if this is NOT a redirect, as setting Host: in the
2293: redirected request is being out on thin ice. Except if the host name
2294: is the same as the first one! */
2295: char *cookiehost = Curl_copy_header_value(ptr);
2296: if(!cookiehost)
2297: return CURLE_OUT_OF_MEMORY;
2298: if(!*cookiehost)
2299: /* ignore empty data */
2300: free(cookiehost);
2301: else {
2302: /* If the host begins with '[', we start searching for the port after
2303: the bracket has been closed */
2304: if(*cookiehost == '[') {
2305: char *closingbracket;
2306: /* since the 'cookiehost' is an allocated memory area that will be
2307: freed later we cannot simply increment the pointer */
2308: memmove(cookiehost, cookiehost + 1, strlen(cookiehost) - 1);
2309: closingbracket = strchr(cookiehost, ']');
2310: if(closingbracket)
2311: *closingbracket = 0;
2312: }
2313: else {
2314: int startsearch = 0;
2315: char *colon = strchr(cookiehost + startsearch, ':');
2316: if(colon)
2317: *colon = 0; /* The host must not include an embedded port number */
2318: }
2319: Curl_safefree(conn->allocptr.cookiehost);
2320: conn->allocptr.cookiehost = cookiehost;
2321: }
2322: #endif
2323:
2324: if(strcmp("Host:", ptr)) {
2325: conn->allocptr.host = aprintf("Host:%s\r\n", &ptr[5]);
2326: if(!conn->allocptr.host)
2327: return CURLE_OUT_OF_MEMORY;
2328: }
2329: else
2330: /* when clearing the header */
2331: conn->allocptr.host = NULL;
2332: }
2333: else {
2334: /* When building Host: headers, we must put the host name within
2335: [brackets] if the host name is a plain IPv6-address. RFC2732-style. */
2336:
2337: if(((conn->given->protocol&CURLPROTO_HTTPS) &&
2338: (conn->remote_port == PORT_HTTPS)) ||
2339: ((conn->given->protocol&CURLPROTO_HTTP) &&
2340: (conn->remote_port == PORT_HTTP)) )
2341: /* if(HTTPS on port 443) OR (HTTP on port 80) then don't include
2342: the port number in the host string */
2343: conn->allocptr.host = aprintf("Host: %s%s%s\r\n",
2344: conn->bits.ipv6_ip?"[":"",
2345: host,
2346: conn->bits.ipv6_ip?"]":"");
2347: else
2348: conn->allocptr.host = aprintf("Host: %s%s%s:%d\r\n",
2349: conn->bits.ipv6_ip?"[":"",
2350: host,
2351: conn->bits.ipv6_ip?"]":"",
2352: conn->remote_port);
2353:
2354: if(!conn->allocptr.host)
2355: /* without Host: we can't make a nice request */
2356: return CURLE_OUT_OF_MEMORY;
2357: }
2358:
2359: #ifndef CURL_DISABLE_PROXY
2360: if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
2361: /* Using a proxy but does not tunnel through it */
2362:
2363: /* The path sent to the proxy is in fact the entire URL. But if the remote
2364: host is a IDN-name, we must make sure that the request we produce only
2365: uses the encoded host name! */
2366:
2367: /* and no fragment part */
2368: CURLUcode uc;
2369: CURLU *h = curl_url_dup(data->state.uh);
2370: if(!h)
2371: return CURLE_OUT_OF_MEMORY;
2372:
2373: if(conn->host.dispname != conn->host.name) {
2374: uc = curl_url_set(h, CURLUPART_HOST, conn->host.name, 0);
2375: if(uc) {
2376: curl_url_cleanup(h);
2377: return CURLE_OUT_OF_MEMORY;
2378: }
2379: }
2380: uc = curl_url_set(h, CURLUPART_FRAGMENT, NULL, 0);
2381: if(uc) {
2382: curl_url_cleanup(h);
2383: return CURLE_OUT_OF_MEMORY;
2384: }
2385:
2386: if(strcasecompare("http", data->state.up.scheme)) {
2387: /* when getting HTTP, we don't want the userinfo the URL */
2388: uc = curl_url_set(h, CURLUPART_USER, NULL, 0);
2389: if(uc) {
2390: curl_url_cleanup(h);
2391: return CURLE_OUT_OF_MEMORY;
2392: }
2393: uc = curl_url_set(h, CURLUPART_PASSWORD, NULL, 0);
2394: if(uc) {
2395: curl_url_cleanup(h);
2396: return CURLE_OUT_OF_MEMORY;
2397: }
2398: }
2399: /* Extract the URL to use in the request. Store in STRING_TEMP_URL for
2400: clean-up reasons if the function returns before the free() further
2401: down. */
2402: uc = curl_url_get(h, CURLUPART_URL, &data->set.str[STRING_TEMP_URL], 0);
2403: if(uc) {
2404: curl_url_cleanup(h);
2405: return CURLE_OUT_OF_MEMORY;
2406: }
2407:
2408: curl_url_cleanup(h);
2409:
2410: if(strcasecompare("ftp", data->state.up.scheme)) {
2411: if(data->set.proxy_transfer_mode) {
2412: /* when doing ftp, append ;type=<a|i> if not present */
2413: char *type = strstr(path, ";type=");
2414: if(type && type[6] && type[7] == 0) {
2415: switch(Curl_raw_toupper(type[6])) {
2416: case 'A':
2417: case 'D':
2418: case 'I':
2419: break;
2420: default:
2421: type = NULL;
2422: }
2423: }
2424: if(!type) {
2425: char *p = ftp_typecode;
2426: /* avoid sending invalid URLs like ftp://example.com;type=i if the
2427: * user specified ftp://example.com without the slash */
2428: if(!*data->state.up.path && path[strlen(path) - 1] != '/') {
2429: *p++ = '/';
2430: }
2431: msnprintf(p, sizeof(ftp_typecode) - 1, ";type=%c",
2432: data->set.prefer_ascii ? 'a' : 'i');
2433: }
2434: }
2435: if(conn->bits.user_passwd && !conn->bits.userpwd_in_url)
2436: paste_ftp_userpwd = TRUE;
2437: }
2438: }
2439: #endif /* CURL_DISABLE_PROXY */
2440:
2441: http->p_accept = Curl_checkheaders(conn, "Accept")?NULL:"Accept: */*\r\n";
2442:
2443: if((HTTPREQ_POST == httpreq || HTTPREQ_PUT == httpreq) &&
2444: data->state.resume_from) {
2445: /**********************************************************************
2446: * Resuming upload in HTTP means that we PUT or POST and that we have
2447: * got a resume_from value set. The resume value has already created
2448: * a Range: header that will be passed along. We need to "fast forward"
2449: * the file the given number of bytes and decrease the assume upload
2450: * file size before we continue this venture in the dark lands of HTTP.
2451: * Resuming mime/form posting at an offset > 0 has no sense and is ignored.
2452: *********************************************************************/
2453:
2454: if(data->state.resume_from < 0) {
2455: /*
2456: * This is meant to get the size of the present remote-file by itself.
2457: * We don't support this now. Bail out!
2458: */
2459: data->state.resume_from = 0;
2460: }
2461:
2462: if(data->state.resume_from && !data->state.this_is_a_follow) {
2463: /* do we still game? */
2464:
2465: /* Now, let's read off the proper amount of bytes from the
2466: input. */
2467: int seekerr = CURL_SEEKFUNC_CANTSEEK;
2468: if(conn->seek_func) {
2469: Curl_set_in_callback(data, true);
2470: seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
2471: SEEK_SET);
2472: Curl_set_in_callback(data, false);
2473: }
2474:
2475: if(seekerr != CURL_SEEKFUNC_OK) {
2476: curl_off_t passed = 0;
2477:
2478: if(seekerr != CURL_SEEKFUNC_CANTSEEK) {
2479: failf(data, "Could not seek stream");
2480: return CURLE_READ_ERROR;
2481: }
2482: /* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
2483: do {
2484: size_t readthisamountnow =
2485: (data->state.resume_from - passed > data->set.buffer_size) ?
2486: (size_t)data->set.buffer_size :
2487: curlx_sotouz(data->state.resume_from - passed);
2488:
2489: size_t actuallyread =
2490: data->state.fread_func(data->state.buffer, 1, readthisamountnow,
2491: data->state.in);
2492:
2493: passed += actuallyread;
2494: if((actuallyread == 0) || (actuallyread > readthisamountnow)) {
2495: /* this checks for greater-than only to make sure that the
2496: CURL_READFUNC_ABORT return code still aborts */
2497: failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
2498: " bytes from the input", passed);
2499: return CURLE_READ_ERROR;
2500: }
2501: } while(passed < data->state.resume_from);
2502: }
2503:
2504: /* now, decrease the size of the read */
2505: if(data->state.infilesize>0) {
2506: data->state.infilesize -= data->state.resume_from;
2507:
2508: if(data->state.infilesize <= 0) {
2509: failf(data, "File already completely uploaded");
2510: return CURLE_PARTIAL_FILE;
2511: }
2512: }
2513: /* we've passed, proceed as normal */
2514: }
2515: }
2516: if(data->state.use_range) {
2517: /*
2518: * A range is selected. We use different headers whether we're downloading
2519: * or uploading and we always let customized headers override our internal
2520: * ones if any such are specified.
2521: */
2522: if(((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) &&
2523: !Curl_checkheaders(conn, "Range")) {
2524: /* if a line like this was already allocated, free the previous one */
2525: free(conn->allocptr.rangeline);
2526: conn->allocptr.rangeline = aprintf("Range: bytes=%s\r\n",
2527: data->state.range);
2528: }
2529: else if((httpreq == HTTPREQ_POST || httpreq == HTTPREQ_PUT) &&
2530: !Curl_checkheaders(conn, "Content-Range")) {
2531:
2532: /* if a line like this was already allocated, free the previous one */
2533: free(conn->allocptr.rangeline);
2534:
2535: if(data->set.set_resume_from < 0) {
2536: /* Upload resume was asked for, but we don't know the size of the
2537: remote part so we tell the server (and act accordingly) that we
2538: upload the whole file (again) */
2539: conn->allocptr.rangeline =
2540: aprintf("Content-Range: bytes 0-%" CURL_FORMAT_CURL_OFF_T
2541: "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
2542: data->state.infilesize - 1, data->state.infilesize);
2543:
2544: }
2545: else if(data->state.resume_from) {
2546: /* This is because "resume" was selected */
2547: curl_off_t total_expected_size =
2548: data->state.resume_from + data->state.infilesize;
2549: conn->allocptr.rangeline =
2550: aprintf("Content-Range: bytes %s%" CURL_FORMAT_CURL_OFF_T
2551: "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
2552: data->state.range, total_expected_size-1,
2553: total_expected_size);
2554: }
2555: else {
2556: /* Range was selected and then we just pass the incoming range and
2557: append total size */
2558: conn->allocptr.rangeline =
2559: aprintf("Content-Range: bytes %s/%" CURL_FORMAT_CURL_OFF_T "\r\n",
2560: data->state.range, data->state.infilesize);
2561: }
2562: if(!conn->allocptr.rangeline)
2563: return CURLE_OUT_OF_MEMORY;
2564: }
2565: }
2566:
2567: httpstring = get_http_string(data, conn);
2568:
2569: /* initialize a dynamic send-buffer */
2570: req_buffer = Curl_add_buffer_init();
2571:
2572: if(!req_buffer)
2573: return CURLE_OUT_OF_MEMORY;
2574:
2575: /* add the main request stuff */
2576: /* GET/HEAD/POST/PUT */
2577: result = Curl_add_bufferf(&req_buffer, "%s ", request);
2578: if(result)
2579: return result;
2580:
2581: if(data->set.str[STRING_TARGET]) {
2582: path = data->set.str[STRING_TARGET];
2583: query = NULL;
2584: }
2585:
2586: #ifndef CURL_DISABLE_PROXY
2587: /* url */
2588: if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
2589: char *url = data->set.str[STRING_TEMP_URL];
2590: result = Curl_add_buffer(&req_buffer, url, strlen(url));
2591: Curl_safefree(data->set.str[STRING_TEMP_URL]);
2592: }
2593: else
2594: #endif
2595: if(paste_ftp_userpwd)
2596: result = Curl_add_bufferf(&req_buffer, "ftp://%s:%s@%s",
2597: conn->user, conn->passwd,
2598: path + sizeof("ftp://") - 1);
2599: else {
2600: result = Curl_add_buffer(&req_buffer, path, strlen(path));
2601: if(result)
2602: return result;
2603: if(query)
2604: result = Curl_add_bufferf(&req_buffer, "?%s", query);
2605: }
2606: if(result)
2607: return result;
2608:
2609: #ifdef USE_ALTSVC
2610: if(conn->bits.altused && !Curl_checkheaders(conn, "Alt-Used")) {
2611: altused = aprintf("Alt-Used: %s:%d\r\n",
2612: conn->conn_to_host.name, conn->conn_to_port);
2613: if(!altused) {
2614: Curl_add_buffer_free(&req_buffer);
2615: return CURLE_OUT_OF_MEMORY;
2616: }
2617: }
2618: #endif
2619: result =
2620: Curl_add_bufferf(&req_buffer,
2621: "%s" /* ftp typecode (;type=x) */
2622: " HTTP/%s\r\n" /* HTTP version */
2623: "%s" /* host */
2624: "%s" /* proxyuserpwd */
2625: "%s" /* userpwd */
2626: "%s" /* range */
2627: "%s" /* user agent */
2628: "%s" /* accept */
2629: "%s" /* TE: */
2630: "%s" /* accept-encoding */
2631: "%s" /* referer */
2632: "%s" /* Proxy-Connection */
2633: "%s" /* transfer-encoding */
2634: "%s",/* Alt-Used */
2635:
2636: ftp_typecode,
2637: httpstring,
2638: (conn->allocptr.host?conn->allocptr.host:""),
2639: conn->allocptr.proxyuserpwd?
2640: conn->allocptr.proxyuserpwd:"",
2641: conn->allocptr.userpwd?conn->allocptr.userpwd:"",
2642: (data->state.use_range && conn->allocptr.rangeline)?
2643: conn->allocptr.rangeline:"",
2644: (data->set.str[STRING_USERAGENT] &&
2645: *data->set.str[STRING_USERAGENT] &&
2646: conn->allocptr.uagent)?
2647: conn->allocptr.uagent:"",
2648: http->p_accept?http->p_accept:"",
2649: conn->allocptr.te?conn->allocptr.te:"",
2650: (data->set.str[STRING_ENCODING] &&
2651: *data->set.str[STRING_ENCODING] &&
2652: conn->allocptr.accept_encoding)?
2653: conn->allocptr.accept_encoding:"",
2654: (data->change.referer && conn->allocptr.ref)?
2655: conn->allocptr.ref:"" /* Referer: <data> */,
2656: (conn->bits.httpproxy &&
2657: !conn->bits.tunnel_proxy &&
2658: !Curl_checkProxyheaders(conn, "Proxy-Connection"))?
2659: "Proxy-Connection: Keep-Alive\r\n":"",
2660: te,
2661: altused ? altused : ""
2662: );
2663:
2664: /* clear userpwd and proxyuserpwd to avoid re-using old credentials
2665: * from re-used connections */
2666: Curl_safefree(conn->allocptr.userpwd);
2667: Curl_safefree(conn->allocptr.proxyuserpwd);
2668: free(altused);
2669:
2670: if(result)
2671: return result;
2672:
2673: if(!(conn->handler->flags&PROTOPT_SSL) &&
2674: conn->httpversion != 20 &&
2675: (data->set.httpversion == CURL_HTTP_VERSION_2)) {
2676: /* append HTTP2 upgrade magic stuff to the HTTP request if it isn't done
2677: over SSL */
2678: result = Curl_http2_request_upgrade(req_buffer, conn);
2679: if(result)
2680: return result;
2681: }
2682:
2683: #if !defined(CURL_DISABLE_COOKIES)
2684: if(data->cookies || addcookies) {
2685: struct Cookie *co = NULL; /* no cookies from start */
2686: int count = 0;
2687:
2688: if(data->cookies && data->state.cookie_engine) {
2689: Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
2690: co = Curl_cookie_getlist(data->cookies,
2691: conn->allocptr.cookiehost?
2692: conn->allocptr.cookiehost:host,
2693: data->state.up.path,
2694: (conn->handler->protocol&CURLPROTO_HTTPS)?
2695: TRUE:FALSE);
2696: Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
2697: }
2698: if(co) {
2699: struct Cookie *store = co;
2700: /* now loop through all cookies that matched */
2701: while(co) {
2702: if(co->value) {
2703: if(0 == count) {
2704: result = Curl_add_bufferf(&req_buffer, "Cookie: ");
2705: if(result)
2706: break;
2707: }
2708: result = Curl_add_bufferf(&req_buffer,
2709: "%s%s=%s", count?"; ":"",
2710: co->name, co->value);
2711: if(result)
2712: break;
2713: count++;
2714: }
2715: co = co->next; /* next cookie please */
2716: }
2717: Curl_cookie_freelist(store);
2718: }
2719: if(addcookies && !result) {
2720: if(!count)
2721: result = Curl_add_bufferf(&req_buffer, "Cookie: ");
2722: if(!result) {
2723: result = Curl_add_bufferf(&req_buffer, "%s%s", count?"; ":"",
2724: addcookies);
2725: count++;
2726: }
2727: }
2728: if(count && !result)
2729: result = Curl_add_buffer(&req_buffer, "\r\n", 2);
2730:
2731: if(result)
2732: return result;
2733: }
2734: #endif
2735:
2736: result = Curl_add_timecondition(conn, req_buffer);
2737: if(result)
2738: return result;
2739:
2740: result = Curl_add_custom_headers(conn, FALSE, req_buffer);
2741: if(result)
2742: return result;
2743:
2744: http->postdata = NULL; /* nothing to post at this point */
2745: Curl_pgrsSetUploadSize(data, -1); /* upload size is unknown atm */
2746:
2747: /* If 'authdone' is FALSE, we must not set the write socket index to the
2748: Curl_transfer() call below, as we're not ready to actually upload any
2749: data yet. */
2750:
2751: switch(httpreq) {
2752:
2753: case HTTPREQ_PUT: /* Let's PUT the data to the server! */
2754:
2755: if(conn->bits.authneg)
2756: postsize = 0;
2757: else
2758: postsize = data->state.infilesize;
2759:
2760: if((postsize != -1) && !data->req.upload_chunky &&
2761: (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
2762: /* only add Content-Length if not uploading chunked */
2763: result = Curl_add_bufferf(&req_buffer,
2764: "Content-Length: %" CURL_FORMAT_CURL_OFF_T
2765: "\r\n", postsize);
2766: if(result)
2767: return result;
2768: }
2769:
2770: if(postsize != 0) {
2771: result = expect100(data, conn, req_buffer);
2772: if(result)
2773: return result;
2774: }
2775:
2776: result = Curl_add_buffer(&req_buffer, "\r\n", 2); /* end of headers */
2777: if(result)
2778: return result;
2779:
2780: /* set the upload size to the progress meter */
2781: Curl_pgrsSetUploadSize(data, postsize);
2782:
2783: /* this sends the buffer and frees all the buffer resources */
2784: result = Curl_add_buffer_send(&req_buffer, conn,
2785: &data->info.request_size, 0, FIRSTSOCKET);
2786: if(result)
2787: failf(data, "Failed sending PUT request");
2788: else
2789: /* prepare for transfer */
2790: Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
2791: postsize?FIRSTSOCKET:-1);
2792: if(result)
2793: return result;
2794: break;
2795:
2796: case HTTPREQ_POST_FORM:
2797: case HTTPREQ_POST_MIME:
2798: /* This is form posting using mime data. */
2799: if(conn->bits.authneg) {
2800: /* nothing to post! */
2801: result = Curl_add_bufferf(&req_buffer, "Content-Length: 0\r\n\r\n");
2802: if(result)
2803: return result;
2804:
2805: result = Curl_add_buffer_send(&req_buffer, conn,
2806: &data->info.request_size, 0, FIRSTSOCKET);
2807: if(result)
2808: failf(data, "Failed sending POST request");
2809: else
2810: /* setup variables for the upcoming transfer */
2811: Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
2812: break;
2813: }
2814:
2815: data->state.infilesize = postsize = http->postsize;
2816:
2817: /* We only set Content-Length and allow a custom Content-Length if
2818: we don't upload data chunked, as RFC2616 forbids us to set both
2819: kinds of headers (Transfer-Encoding: chunked and Content-Length) */
2820: if(postsize != -1 && !data->req.upload_chunky &&
2821: (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
2822: /* we allow replacing this header if not during auth negotiation,
2823: although it isn't very wise to actually set your own */
2824: result = Curl_add_bufferf(&req_buffer,
2825: "Content-Length: %" CURL_FORMAT_CURL_OFF_T
2826: "\r\n", postsize);
2827: if(result)
2828: return result;
2829: }
2830:
2831: #ifndef CURL_DISABLE_MIME
2832: /* Output mime-generated headers. */
2833: {
2834: struct curl_slist *hdr;
2835:
2836: for(hdr = http->sendit->curlheaders; hdr; hdr = hdr->next) {
2837: result = Curl_add_bufferf(&req_buffer, "%s\r\n", hdr->data);
2838: if(result)
2839: return result;
2840: }
2841: }
2842: #endif
2843:
2844: /* For really small posts we don't use Expect: headers at all, and for
2845: the somewhat bigger ones we allow the app to disable it. Just make
2846: sure that the expect100header is always set to the preferred value
2847: here. */
2848: ptr = Curl_checkheaders(conn, "Expect");
2849: if(ptr) {
2850: data->state.expect100header =
2851: Curl_compareheader(ptr, "Expect:", "100-continue");
2852: }
2853: else if(postsize > EXPECT_100_THRESHOLD || postsize < 0) {
2854: result = expect100(data, conn, req_buffer);
2855: if(result)
2856: return result;
2857: }
2858: else
2859: data->state.expect100header = FALSE;
2860:
2861: /* make the request end in a true CRLF */
2862: result = Curl_add_buffer(&req_buffer, "\r\n", 2);
2863: if(result)
2864: return result;
2865:
2866: /* set the upload size to the progress meter */
2867: Curl_pgrsSetUploadSize(data, postsize);
2868:
2869: /* Read from mime structure. */
2870: data->state.fread_func = (curl_read_callback) Curl_mime_read;
2871: data->state.in = (void *) http->sendit;
2872: http->sending = HTTPSEND_BODY;
2873:
2874: /* this sends the buffer and frees all the buffer resources */
2875: result = Curl_add_buffer_send(&req_buffer, conn,
2876: &data->info.request_size, 0, FIRSTSOCKET);
2877: if(result)
2878: failf(data, "Failed sending POST request");
2879: else
2880: /* prepare for transfer */
2881: Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
2882: postsize?FIRSTSOCKET:-1);
2883: if(result)
2884: return result;
2885:
2886: break;
2887:
2888: case HTTPREQ_POST:
2889: /* this is the simple POST, using x-www-form-urlencoded style */
2890:
2891: if(conn->bits.authneg)
2892: postsize = 0;
2893: else
2894: /* the size of the post body */
2895: postsize = data->state.infilesize;
2896:
2897: /* We only set Content-Length and allow a custom Content-Length if
2898: we don't upload data chunked, as RFC2616 forbids us to set both
2899: kinds of headers (Transfer-Encoding: chunked and Content-Length) */
2900: if((postsize != -1) && !data->req.upload_chunky &&
2901: (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
2902: /* we allow replacing this header if not during auth negotiation,
2903: although it isn't very wise to actually set your own */
2904: result = Curl_add_bufferf(&req_buffer,
2905: "Content-Length: %" CURL_FORMAT_CURL_OFF_T
2906: "\r\n", postsize);
2907: if(result)
2908: return result;
2909: }
2910:
2911: if(!Curl_checkheaders(conn, "Content-Type")) {
2912: result = Curl_add_bufferf(&req_buffer,
2913: "Content-Type: application/"
2914: "x-www-form-urlencoded\r\n");
2915: if(result)
2916: return result;
2917: }
2918:
2919: /* For really small posts we don't use Expect: headers at all, and for
2920: the somewhat bigger ones we allow the app to disable it. Just make
2921: sure that the expect100header is always set to the preferred value
2922: here. */
2923: ptr = Curl_checkheaders(conn, "Expect");
2924: if(ptr) {
2925: data->state.expect100header =
2926: Curl_compareheader(ptr, "Expect:", "100-continue");
2927: }
2928: else if(postsize > EXPECT_100_THRESHOLD || postsize < 0) {
2929: result = expect100(data, conn, req_buffer);
2930: if(result)
2931: return result;
2932: }
2933: else
2934: data->state.expect100header = FALSE;
2935:
2936: if(data->set.postfields) {
2937:
2938: /* In HTTP2, we send request body in DATA frame regardless of
2939: its size. */
2940: if(conn->httpversion != 20 &&
2941: !data->state.expect100header &&
2942: (postsize < MAX_INITIAL_POST_SIZE)) {
2943: /* if we don't use expect: 100 AND
2944: postsize is less than MAX_INITIAL_POST_SIZE
2945:
2946: then append the post data to the HTTP request header. This limit
2947: is no magic limit but only set to prevent really huge POSTs to
2948: get the data duplicated with malloc() and family. */
2949:
2950: result = Curl_add_buffer(&req_buffer, "\r\n", 2); /* end of headers! */
2951: if(result)
2952: return result;
2953:
2954: if(!data->req.upload_chunky) {
2955: /* We're not sending it 'chunked', append it to the request
2956: already now to reduce the number if send() calls */
2957: result = Curl_add_buffer(&req_buffer, data->set.postfields,
2958: (size_t)postsize);
2959: included_body = postsize;
2960: }
2961: else {
2962: if(postsize) {
2963: /* Append the POST data chunky-style */
2964: result = Curl_add_bufferf(&req_buffer, "%x\r\n", (int)postsize);
2965: if(!result) {
2966: result = Curl_add_buffer(&req_buffer, data->set.postfields,
2967: (size_t)postsize);
2968: if(!result)
2969: result = Curl_add_buffer(&req_buffer, "\r\n", 2);
2970: included_body = postsize + 2;
2971: }
2972: }
2973: if(!result)
2974: result = Curl_add_buffer(&req_buffer, "\x30\x0d\x0a\x0d\x0a", 5);
2975: /* 0 CR LF CR LF */
2976: included_body += 5;
2977: }
2978: if(result)
2979: return result;
2980: /* Make sure the progress information is accurate */
2981: Curl_pgrsSetUploadSize(data, postsize);
2982: }
2983: else {
2984: /* A huge POST coming up, do data separate from the request */
2985: http->postsize = postsize;
2986: http->postdata = data->set.postfields;
2987:
2988: http->sending = HTTPSEND_BODY;
2989:
2990: data->state.fread_func = (curl_read_callback)readmoredata;
2991: data->state.in = (void *)conn;
2992:
2993: /* set the upload size to the progress meter */
2994: Curl_pgrsSetUploadSize(data, http->postsize);
2995:
2996: result = Curl_add_buffer(&req_buffer, "\r\n", 2); /* end of headers! */
2997: if(result)
2998: return result;
2999: }
3000: }
3001: else {
3002: result = Curl_add_buffer(&req_buffer, "\r\n", 2); /* end of headers! */
3003: if(result)
3004: return result;
3005:
3006: if(data->req.upload_chunky && conn->bits.authneg) {
3007: /* Chunky upload is selected and we're negotiating auth still, send
3008: end-of-data only */
3009: result = Curl_add_buffer(&req_buffer,
3010: "\x30\x0d\x0a\x0d\x0a", 5);
3011: /* 0 CR LF CR LF */
3012: if(result)
3013: return result;
3014: }
3015:
3016: else if(data->state.infilesize) {
3017: /* set the upload size to the progress meter */
3018: Curl_pgrsSetUploadSize(data, postsize?postsize:-1);
3019:
3020: /* set the pointer to mark that we will send the post body using the
3021: read callback, but only if we're not in authenticate
3022: negotiation */
3023: if(!conn->bits.authneg) {
3024: http->postdata = (char *)&http->postdata;
3025: http->postsize = postsize;
3026: }
3027: }
3028: }
3029: /* issue the request */
3030: result = Curl_add_buffer_send(&req_buffer, conn, &data->info.request_size,
3031: (size_t)included_body, FIRSTSOCKET);
3032:
3033: if(result)
3034: failf(data, "Failed sending HTTP POST request");
3035: else
3036: Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
3037: http->postdata?FIRSTSOCKET:-1);
3038: break;
3039:
3040: default:
3041: result = Curl_add_buffer(&req_buffer, "\r\n", 2);
3042: if(result)
3043: return result;
3044:
3045: /* issue the request */
3046: result = Curl_add_buffer_send(&req_buffer, conn,
3047: &data->info.request_size, 0, FIRSTSOCKET);
3048:
3049: if(result)
3050: failf(data, "Failed sending HTTP request");
3051: else
3052: /* HTTP GET/HEAD download: */
3053: Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
3054: }
3055: if(result)
3056: return result;
3057: if(!postsize && (http->sending != HTTPSEND_REQUEST))
3058: data->req.upload_done = TRUE;
3059:
3060: if(data->req.writebytecount) {
3061: /* if a request-body has been sent off, we make sure this progress is noted
3062: properly */
3063: Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
3064: if(Curl_pgrsUpdate(conn))
3065: result = CURLE_ABORTED_BY_CALLBACK;
3066:
3067: if(data->req.writebytecount >= postsize) {
3068: /* already sent the entire request body, mark the "upload" as
3069: complete */
3070: infof(data, "upload completely sent off: %" CURL_FORMAT_CURL_OFF_T
3071: " out of %" CURL_FORMAT_CURL_OFF_T " bytes\n",
3072: data->req.writebytecount, postsize);
3073: data->req.upload_done = TRUE;
3074: data->req.keepon &= ~KEEP_SEND; /* we're done writing */
3075: data->req.exp100 = EXP100_SEND_DATA; /* already sent */
3076: Curl_expire_done(data, EXPIRE_100_TIMEOUT);
3077: }
3078: }
3079:
3080: if((conn->httpversion == 20) && data->req.upload_chunky)
3081: /* upload_chunky was set above to set up the request in a chunky fashion,
3082: but is disabled here again to avoid that the chunked encoded version is
3083: actually used when sending the request body over h2 */
3084: data->req.upload_chunky = FALSE;
3085: return result;
3086: }
3087:
3088: typedef enum {
3089: STATUS_UNKNOWN, /* not enough data to tell yet */
3090: STATUS_DONE, /* a status line was read */
3091: STATUS_BAD /* not a status line */
3092: } statusline;
3093:
3094:
3095: /* Check a string for a prefix. Check no more than 'len' bytes */
3096: static bool checkprefixmax(const char *prefix, const char *buffer, size_t len)
3097: {
3098: size_t ch = CURLMIN(strlen(prefix), len);
3099: return curl_strnequal(prefix, buffer, ch);
3100: }
3101:
3102: /*
3103: * checkhttpprefix()
3104: *
3105: * Returns TRUE if member of the list matches prefix of string
3106: */
3107: static statusline
3108: checkhttpprefix(struct Curl_easy *data,
3109: const char *s, size_t len)
3110: {
3111: struct curl_slist *head = data->set.http200aliases;
3112: statusline rc = STATUS_BAD;
3113: statusline onmatch = len >= 5? STATUS_DONE : STATUS_UNKNOWN;
3114: #ifdef CURL_DOES_CONVERSIONS
3115: /* convert from the network encoding using a scratch area */
3116: char *scratch = strdup(s);
3117: if(NULL == scratch) {
3118: failf(data, "Failed to allocate memory for conversion!");
3119: return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
3120: }
3121: if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s) + 1)) {
3122: /* Curl_convert_from_network calls failf if unsuccessful */
3123: free(scratch);
3124: return FALSE; /* can't return CURLE_foobar so return FALSE */
3125: }
3126: s = scratch;
3127: #endif /* CURL_DOES_CONVERSIONS */
3128:
3129: while(head) {
3130: if(checkprefixmax(head->data, s, len)) {
3131: rc = onmatch;
3132: break;
3133: }
3134: head = head->next;
3135: }
3136:
3137: if((rc != STATUS_DONE) && (checkprefixmax("HTTP/", s, len)))
3138: rc = onmatch;
3139:
3140: #ifdef CURL_DOES_CONVERSIONS
3141: free(scratch);
3142: #endif /* CURL_DOES_CONVERSIONS */
3143: return rc;
3144: }
3145:
3146: #ifndef CURL_DISABLE_RTSP
3147: static statusline
3148: checkrtspprefix(struct Curl_easy *data,
3149: const char *s, size_t len)
3150: {
3151: statusline result = STATUS_BAD;
3152: statusline onmatch = len >= 5? STATUS_DONE : STATUS_UNKNOWN;
3153:
3154: #ifdef CURL_DOES_CONVERSIONS
3155: /* convert from the network encoding using a scratch area */
3156: char *scratch = strdup(s);
3157: if(NULL == scratch) {
3158: failf(data, "Failed to allocate memory for conversion!");
3159: return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
3160: }
3161: if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s) + 1)) {
3162: /* Curl_convert_from_network calls failf if unsuccessful */
3163: result = FALSE; /* can't return CURLE_foobar so return FALSE */
3164: }
3165: else if(checkprefixmax("RTSP/", scratch, len))
3166: result = onmatch;
3167: free(scratch);
3168: #else
3169: (void)data; /* unused */
3170: if(checkprefixmax("RTSP/", s, len))
3171: result = onmatch;
3172: #endif /* CURL_DOES_CONVERSIONS */
3173:
3174: return result;
3175: }
3176: #endif /* CURL_DISABLE_RTSP */
3177:
3178: static statusline
3179: checkprotoprefix(struct Curl_easy *data, struct connectdata *conn,
3180: const char *s, size_t len)
3181: {
3182: #ifndef CURL_DISABLE_RTSP
3183: if(conn->handler->protocol & CURLPROTO_RTSP)
3184: return checkrtspprefix(data, s, len);
3185: #else
3186: (void)conn;
3187: #endif /* CURL_DISABLE_RTSP */
3188:
3189: return checkhttpprefix(data, s, len);
3190: }
3191:
3192: /*
3193: * header_append() copies a chunk of data to the end of the already received
3194: * header. We make sure that the full string fit in the allocated header
3195: * buffer, or else we enlarge it.
3196: */
3197: static CURLcode header_append(struct Curl_easy *data,
3198: struct SingleRequest *k,
3199: size_t length)
3200: {
3201: /* length is at most the size of a full read buffer, for which the upper
3202: bound is CURL_MAX_READ_SIZE. There is thus no chance of overflow in this
3203: calculation. */
3204: size_t newsize = k->hbuflen + length;
3205: if(newsize > CURL_MAX_HTTP_HEADER) {
3206: /* The reason to have a max limit for this is to avoid the risk of a bad
3207: server feeding libcurl with a never-ending header that will cause
3208: reallocs infinitely */
3209: failf(data, "Rejected %zu bytes header (max is %d)!", newsize,
3210: CURL_MAX_HTTP_HEADER);
3211: return CURLE_OUT_OF_MEMORY;
3212: }
3213: if(newsize >= data->state.headersize) {
3214: /* We enlarge the header buffer as it is too small */
3215: char *newbuff;
3216: size_t hbufp_index;
3217:
3218: newsize = CURLMAX((k->hbuflen + length) * 3 / 2, data->state.headersize*2);
3219: hbufp_index = k->hbufp - data->state.headerbuff;
3220: newbuff = realloc(data->state.headerbuff, newsize);
3221: if(!newbuff) {
3222: failf(data, "Failed to alloc memory for big header!");
3223: return CURLE_OUT_OF_MEMORY;
3224: }
3225: data->state.headersize = newsize;
3226: data->state.headerbuff = newbuff;
3227: k->hbufp = data->state.headerbuff + hbufp_index;
3228: }
3229: memcpy(k->hbufp, k->str_start, length);
3230: k->hbufp += length;
3231: k->hbuflen += length;
3232: *k->hbufp = 0;
3233:
3234: return CURLE_OK;
3235: }
3236:
3237: static void print_http_error(struct Curl_easy *data)
3238: {
3239: struct SingleRequest *k = &data->req;
3240: char *beg = k->p;
3241:
3242: /* make sure that data->req.p points to the HTTP status line */
3243: if(!strncmp(beg, "HTTP", 4)) {
3244:
3245: /* skip to HTTP status code */
3246: beg = strchr(beg, ' ');
3247: if(beg && *++beg) {
3248:
3249: /* find trailing CR */
3250: char end_char = '\r';
3251: char *end = strchr(beg, end_char);
3252: if(!end) {
3253: /* try to find LF (workaround for non-compliant HTTP servers) */
3254: end_char = '\n';
3255: end = strchr(beg, end_char);
3256: }
3257:
3258: if(end) {
3259: /* temporarily replace CR or LF by NUL and print the error message */
3260: *end = '\0';
3261: failf(data, "The requested URL returned error: %s", beg);
3262:
3263: /* restore the previously replaced CR or LF */
3264: *end = end_char;
3265: return;
3266: }
3267: }
3268: }
3269:
3270: /* fall-back to printing the HTTP status code only */
3271: failf(data, "The requested URL returned error: %d", k->httpcode);
3272: }
3273:
3274: /*
3275: * Read any HTTP header lines from the server and pass them to the client app.
3276: */
3277: CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
3278: struct connectdata *conn,
3279: ssize_t *nread,
3280: bool *stop_reading)
3281: {
3282: CURLcode result;
3283: struct SingleRequest *k = &data->req;
3284: ssize_t onread = *nread;
3285: char *ostr = k->str;
3286:
3287: /* header line within buffer loop */
3288: do {
3289: size_t rest_length;
3290: size_t full_length;
3291: int writetype;
3292:
3293: /* str_start is start of line within buf */
3294: k->str_start = k->str;
3295:
3296: /* data is in network encoding so use 0x0a instead of '\n' */
3297: k->end_ptr = memchr(k->str_start, 0x0a, *nread);
3298:
3299: if(!k->end_ptr) {
3300: /* Not a complete header line within buffer, append the data to
3301: the end of the headerbuff. */
3302: result = header_append(data, k, *nread);
3303: if(result)
3304: return result;
3305:
3306: if(!k->headerline) {
3307: /* check if this looks like a protocol header */
3308: statusline st = checkprotoprefix(data, conn, data->state.headerbuff,
3309: k->hbuflen);
3310: if(st == STATUS_BAD) {
3311: /* this is not the beginning of a protocol first header line */
3312: k->header = FALSE;
3313: k->badheader = HEADER_ALLBAD;
3314: streamclose(conn, "bad HTTP: No end-of-message indicator");
3315: if(!data->set.http09_allowed) {
3316: failf(data, "Received HTTP/0.9 when not allowed\n");
3317: return CURLE_UNSUPPORTED_PROTOCOL;
3318: }
3319: break;
3320: }
3321: }
3322:
3323: break; /* read more and try again */
3324: }
3325:
3326: /* decrease the size of the remaining (supposed) header line */
3327: rest_length = (k->end_ptr - k->str) + 1;
3328: *nread -= (ssize_t)rest_length;
3329:
3330: k->str = k->end_ptr + 1; /* move past new line */
3331:
3332: full_length = k->str - k->str_start;
3333:
3334: result = header_append(data, k, full_length);
3335: if(result)
3336: return result;
3337:
3338: k->end_ptr = k->hbufp;
3339: k->p = data->state.headerbuff;
3340:
3341: /****
3342: * We now have a FULL header line that p points to
3343: *****/
3344:
3345: if(!k->headerline) {
3346: /* the first read header */
3347: statusline st = checkprotoprefix(data, conn, data->state.headerbuff,
3348: k->hbuflen);
3349: if(st == STATUS_BAD) {
3350: streamclose(conn, "bad HTTP: No end-of-message indicator");
3351: /* this is not the beginning of a protocol first header line */
3352: if(!data->set.http09_allowed) {
3353: failf(data, "Received HTTP/0.9 when not allowed\n");
3354: return CURLE_UNSUPPORTED_PROTOCOL;
3355: }
3356: k->header = FALSE;
3357: if(*nread)
3358: /* since there's more, this is a partial bad header */
3359: k->badheader = HEADER_PARTHEADER;
3360: else {
3361: /* this was all we read so it's all a bad header */
3362: k->badheader = HEADER_ALLBAD;
3363: *nread = onread;
3364: k->str = ostr;
3365: return CURLE_OK;
3366: }
3367: break;
3368: }
3369: }
3370:
3371: /* headers are in network encoding so
3372: use 0x0a and 0x0d instead of '\n' and '\r' */
3373: if((0x0a == *k->p) || (0x0d == *k->p)) {
3374: size_t headerlen;
3375: /* Zero-length header line means end of headers! */
3376:
3377: #ifdef CURL_DOES_CONVERSIONS
3378: if(0x0d == *k->p) {
3379: *k->p = '\r'; /* replace with CR in host encoding */
3380: k->p++; /* pass the CR byte */
3381: }
3382: if(0x0a == *k->p) {
3383: *k->p = '\n'; /* replace with LF in host encoding */
3384: k->p++; /* pass the LF byte */
3385: }
3386: #else
3387: if('\r' == *k->p)
3388: k->p++; /* pass the \r byte */
3389: if('\n' == *k->p)
3390: k->p++; /* pass the \n byte */
3391: #endif /* CURL_DOES_CONVERSIONS */
3392:
3393: if(100 <= k->httpcode && 199 >= k->httpcode) {
3394: /* "A user agent MAY ignore unexpected 1xx status responses." */
3395: switch(k->httpcode) {
3396: case 100:
3397: /*
3398: * We have made a HTTP PUT or POST and this is 1.1-lingo
3399: * that tells us that the server is OK with this and ready
3400: * to receive the data.
3401: * However, we'll get more headers now so we must get
3402: * back into the header-parsing state!
3403: */
3404: k->header = TRUE;
3405: k->headerline = 0; /* restart the header line counter */
3406:
3407: /* if we did wait for this do enable write now! */
3408: if(k->exp100 > EXP100_SEND_DATA) {
3409: k->exp100 = EXP100_SEND_DATA;
3410: k->keepon |= KEEP_SEND;
3411: Curl_expire_done(data, EXPIRE_100_TIMEOUT);
3412: }
3413: break;
3414: case 101:
3415: /* Switching Protocols */
3416: if(k->upgr101 == UPGR101_REQUESTED) {
3417: /* Switching to HTTP/2 */
3418: infof(data, "Received 101\n");
3419: k->upgr101 = UPGR101_RECEIVED;
3420:
3421: /* we'll get more headers (HTTP/2 response) */
3422: k->header = TRUE;
3423: k->headerline = 0; /* restart the header line counter */
3424:
3425: /* switch to http2 now. The bytes after response headers
3426: are also processed here, otherwise they are lost. */
3427: result = Curl_http2_switched(conn, k->str, *nread);
3428: if(result)
3429: return result;
3430: *nread = 0;
3431: }
3432: else {
3433: /* Switching to another protocol (e.g. WebSocket) */
3434: k->header = FALSE; /* no more header to parse! */
3435: }
3436: break;
3437: default:
3438: /* the status code 1xx indicates a provisional response, so
3439: we'll get another set of headers */
3440: k->header = TRUE;
3441: k->headerline = 0; /* restart the header line counter */
3442: break;
3443: }
3444: }
3445: else {
3446: k->header = FALSE; /* no more header to parse! */
3447:
3448: if((k->size == -1) && !k->chunk && !conn->bits.close &&
3449: (conn->httpversion == 11) &&
3450: !(conn->handler->protocol & CURLPROTO_RTSP) &&
3451: data->set.httpreq != HTTPREQ_HEAD) {
3452: /* On HTTP 1.1, when connection is not to get closed, but no
3453: Content-Length nor Transfer-Encoding chunked have been
3454: received, according to RFC2616 section 4.4 point 5, we
3455: assume that the server will close the connection to
3456: signal the end of the document. */
3457: infof(data, "no chunk, no close, no size. Assume close to "
3458: "signal end\n");
3459: streamclose(conn, "HTTP: No end-of-message indicator");
3460: }
3461: }
3462:
3463: /* At this point we have some idea about the fate of the connection.
3464: If we are closing the connection it may result auth failure. */
3465: #if defined(USE_NTLM)
3466: if(conn->bits.close &&
3467: (((data->req.httpcode == 401) &&
3468: (conn->http_ntlm_state == NTLMSTATE_TYPE2)) ||
3469: ((data->req.httpcode == 407) &&
3470: (conn->proxy_ntlm_state == NTLMSTATE_TYPE2)))) {
3471: infof(data, "Connection closure while negotiating auth (HTTP 1.0?)\n");
3472: data->state.authproblem = TRUE;
3473: }
3474: #endif
3475: #if defined(USE_SPNEGO)
3476: if(conn->bits.close &&
3477: (((data->req.httpcode == 401) &&
3478: (conn->http_negotiate_state == GSS_AUTHRECV)) ||
3479: ((data->req.httpcode == 407) &&
3480: (conn->proxy_negotiate_state == GSS_AUTHRECV)))) {
3481: infof(data, "Connection closure while negotiating auth (HTTP 1.0?)\n");
3482: data->state.authproblem = TRUE;
3483: }
3484: if((conn->http_negotiate_state == GSS_AUTHDONE) &&
3485: (data->req.httpcode != 401)) {
3486: conn->http_negotiate_state = GSS_AUTHSUCC;
3487: }
3488: if((conn->proxy_negotiate_state == GSS_AUTHDONE) &&
3489: (data->req.httpcode != 407)) {
3490: conn->proxy_negotiate_state = GSS_AUTHSUCC;
3491: }
3492: #endif
3493: /*
3494: * When all the headers have been parsed, see if we should give
3495: * up and return an error.
3496: */
3497: if(http_should_fail(conn)) {
3498: failf(data, "The requested URL returned error: %d",
3499: k->httpcode);
3500: return CURLE_HTTP_RETURNED_ERROR;
3501: }
3502:
3503: /* now, only output this if the header AND body are requested:
3504: */
3505: writetype = CLIENTWRITE_HEADER;
3506: if(data->set.include_header)
3507: writetype |= CLIENTWRITE_BODY;
3508:
3509: headerlen = k->p - data->state.headerbuff;
3510:
3511: result = Curl_client_write(conn, writetype,
3512: data->state.headerbuff,
3513: headerlen);
3514: if(result)
3515: return result;
3516:
3517: data->info.header_size += (long)headerlen;
3518: data->req.headerbytecount += (long)headerlen;
3519:
3520: data->req.deductheadercount =
3521: (100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
3522:
3523: /* Curl_http_auth_act() checks what authentication methods
3524: * that are available and decides which one (if any) to
3525: * use. It will set 'newurl' if an auth method was picked. */
3526: result = Curl_http_auth_act(conn);
3527:
3528: if(result)
3529: return result;
3530:
3531: if(k->httpcode >= 300) {
3532: if((!conn->bits.authneg) && !conn->bits.close &&
3533: !conn->bits.rewindaftersend) {
3534: /*
3535: * General treatment of errors when about to send data. Including :
3536: * "417 Expectation Failed", while waiting for 100-continue.
3537: *
3538: * The check for close above is done simply because of something
3539: * else has already deemed the connection to get closed then
3540: * something else should've considered the big picture and we
3541: * avoid this check.
3542: *
3543: * rewindaftersend indicates that something has told libcurl to
3544: * continue sending even if it gets discarded
3545: */
3546:
3547: switch(data->set.httpreq) {
3548: case HTTPREQ_PUT:
3549: case HTTPREQ_POST:
3550: case HTTPREQ_POST_FORM:
3551: case HTTPREQ_POST_MIME:
3552: /* We got an error response. If this happened before the whole
3553: * request body has been sent we stop sending and mark the
3554: * connection for closure after we've read the entire response.
3555: */
3556: Curl_expire_done(data, EXPIRE_100_TIMEOUT);
3557: if(!k->upload_done) {
3558: if((k->httpcode == 417) && data->state.expect100header) {
3559: /* 417 Expectation Failed - try again without the Expect
3560: header */
3561: infof(data, "Got 417 while waiting for a 100\n");
3562: data->state.disableexpect = TRUE;
3563: DEBUGASSERT(!data->req.newurl);
3564: data->req.newurl = strdup(conn->data->change.url);
3565: Curl_done_sending(conn, k);
3566: }
3567: else if(data->set.http_keep_sending_on_error) {
3568: infof(data, "HTTP error before end of send, keep sending\n");
3569: if(k->exp100 > EXP100_SEND_DATA) {
3570: k->exp100 = EXP100_SEND_DATA;
3571: k->keepon |= KEEP_SEND;
3572: }
3573: }
3574: else {
3575: infof(data, "HTTP error before end of send, stop sending\n");
3576: streamclose(conn, "Stop sending data before everything sent");
3577: result = Curl_done_sending(conn, k);
3578: if(result)
3579: return result;
3580: k->upload_done = TRUE;
3581: if(data->state.expect100header)
3582: k->exp100 = EXP100_FAILED;
3583: }
3584: }
3585: break;
3586:
3587: default: /* default label present to avoid compiler warnings */
3588: break;
3589: }
3590: }
3591:
3592: if(conn->bits.rewindaftersend) {
3593: /* We rewind after a complete send, so thus we continue
3594: sending now */
3595: infof(data, "Keep sending data to get tossed away!\n");
3596: k->keepon |= KEEP_SEND;
3597: }
3598: }
3599:
3600: if(!k->header) {
3601: /*
3602: * really end-of-headers.
3603: *
3604: * If we requested a "no body", this is a good time to get
3605: * out and return home.
3606: */
3607: if(data->set.opt_no_body)
3608: *stop_reading = TRUE;
3609: #ifndef CURL_DISABLE_RTSP
3610: else if((conn->handler->protocol & CURLPROTO_RTSP) &&
3611: (data->set.rtspreq == RTSPREQ_DESCRIBE) &&
3612: (k->size <= -1))
3613: /* Respect section 4.4 of rfc2326: If the Content-Length header is
3614: absent, a length 0 must be assumed. It will prevent libcurl from
3615: hanging on DESCRIBE request that got refused for whatever
3616: reason */
3617: *stop_reading = TRUE;
3618: #endif
3619: else {
3620: /* If we know the expected size of this document, we set the
3621: maximum download size to the size of the expected
3622: document or else, we won't know when to stop reading!
3623:
3624: Note that we set the download maximum even if we read a
3625: "Connection: close" header, to make sure that
3626: "Content-Length: 0" still prevents us from attempting to
3627: read the (missing) response-body.
3628: */
3629: /* According to RFC2616 section 4.4, we MUST ignore
3630: Content-Length: headers if we are now receiving data
3631: using chunked Transfer-Encoding.
3632: */
3633: if(k->chunk)
3634: k->maxdownload = k->size = -1;
3635: }
3636: if(-1 != k->size) {
3637: /* We do this operation even if no_body is true, since this
3638: data might be retrieved later with curl_easy_getinfo()
3639: and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
3640:
3641: Curl_pgrsSetDownloadSize(data, k->size);
3642: k->maxdownload = k->size;
3643: }
3644:
3645: /* If max download size is *zero* (nothing) we already have
3646: nothing and can safely return ok now! But for HTTP/2, we'd
3647: like to call http2_handle_stream_close to properly close a
3648: stream. In order to do this, we keep reading until we
3649: close the stream. */
3650: if(0 == k->maxdownload
3651: #if defined(USE_NGHTTP2)
3652: && !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
3653: conn->httpversion == 20)
3654: #endif
3655: )
3656: *stop_reading = TRUE;
3657:
3658: if(*stop_reading) {
3659: /* we make sure that this socket isn't read more now */
3660: k->keepon &= ~KEEP_RECV;
3661: }
3662:
3663: if(data->set.verbose)
3664: Curl_debug(data, CURLINFO_HEADER_IN,
3665: k->str_start, headerlen);
3666: break; /* exit header line loop */
3667: }
3668:
3669: /* We continue reading headers, so reset the line-based
3670: header parsing variables hbufp && hbuflen */
3671: k->hbufp = data->state.headerbuff;
3672: k->hbuflen = 0;
3673: continue;
3674: }
3675:
3676: /*
3677: * Checks for special headers coming up.
3678: */
3679:
3680: if(!k->headerline++) {
3681: /* This is the first header, it MUST be the error code line
3682: or else we consider this to be the body right away! */
3683: int httpversion_major;
3684: int rtspversion_major;
3685: int nc = 0;
3686: #ifdef CURL_DOES_CONVERSIONS
3687: #define HEADER1 scratch
3688: #define SCRATCHSIZE 21
3689: CURLcode res;
3690: char scratch[SCRATCHSIZE + 1]; /* "HTTP/major.minor 123" */
3691: /* We can't really convert this yet because we
3692: don't know if it's the 1st header line or the body.
3693: So we do a partial conversion into a scratch area,
3694: leaving the data at k->p as-is.
3695: */
3696: strncpy(&scratch[0], k->p, SCRATCHSIZE);
3697: scratch[SCRATCHSIZE] = 0; /* null terminate */
3698: res = Curl_convert_from_network(data,
3699: &scratch[0],
3700: SCRATCHSIZE);
3701: if(res)
3702: /* Curl_convert_from_network calls failf if unsuccessful */
3703: return res;
3704: #else
3705: #define HEADER1 k->p /* no conversion needed, just use k->p */
3706: #endif /* CURL_DOES_CONVERSIONS */
3707:
3708: if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
3709: /*
3710: * https://tools.ietf.org/html/rfc7230#section-3.1.2
3711: *
3712: * The response code is always a three-digit number in HTTP as the spec
3713: * says. We try to allow any number here, but we cannot make
3714: * guarantees on future behaviors since it isn't within the protocol.
3715: */
3716: char separator;
3717: char twoorthree[2];
3718: nc = sscanf(HEADER1,
3719: " HTTP/%1d.%1d%c%3d",
3720: &httpversion_major,
3721: &conn->httpversion,
3722: &separator,
3723: &k->httpcode);
3724:
3725: if(nc == 1 && httpversion_major >= 2 &&
3726: 2 == sscanf(HEADER1, " HTTP/%1[23] %d", twoorthree, &k->httpcode)) {
3727: conn->httpversion = 0;
3728: nc = 4;
3729: separator = ' ';
3730: }
3731:
3732: if((nc == 4) && (' ' == separator)) {
3733: conn->httpversion += 10 * httpversion_major;
3734:
3735: if(k->upgr101 == UPGR101_RECEIVED) {
3736: /* supposedly upgraded to http2 now */
3737: if(conn->httpversion != 20)
3738: infof(data, "Lying server, not serving HTTP/2\n");
3739: }
3740: if(conn->httpversion < 20) {
3741: conn->bundle->multiuse = BUNDLE_NO_MULTIUSE;
3742: infof(data, "Mark bundle as not supporting multiuse\n");
3743: }
3744: }
3745: else if(!nc) {
3746: /* this is the real world, not a Nirvana
3747: NCSA 1.5.x returns this crap when asked for HTTP/1.1
3748: */
3749: nc = sscanf(HEADER1, " HTTP %3d", &k->httpcode);
3750: conn->httpversion = 10;
3751:
3752: /* If user has set option HTTP200ALIASES,
3753: compare header line against list of aliases
3754: */
3755: if(!nc) {
3756: if(checkhttpprefix(data, k->p, k->hbuflen) == STATUS_DONE) {
3757: nc = 1;
3758: k->httpcode = 200;
3759: conn->httpversion = 10;
3760: }
3761: }
3762: }
3763: else {
3764: failf(data, "Unsupported HTTP version in response");
3765: return CURLE_UNSUPPORTED_PROTOCOL;
3766: }
3767: }
3768: else if(conn->handler->protocol & CURLPROTO_RTSP) {
3769: char separator;
3770: nc = sscanf(HEADER1,
3771: " RTSP/%1d.%1d%c%3d",
3772: &rtspversion_major,
3773: &conn->rtspversion,
3774: &separator,
3775: &k->httpcode);
3776: if((nc == 4) && (' ' == separator)) {
3777: conn->rtspversion += 10 * rtspversion_major;
3778: conn->httpversion = 11; /* For us, RTSP acts like HTTP 1.1 */
3779: }
3780: else {
3781: nc = 0;
3782: }
3783: }
3784:
3785: if(nc) {
3786: data->info.httpcode = k->httpcode;
3787:
3788: data->info.httpversion = conn->httpversion;
3789: if(!data->state.httpversion ||
3790: data->state.httpversion > conn->httpversion)
3791: /* store the lowest server version we encounter */
3792: data->state.httpversion = conn->httpversion;
3793:
3794: /*
3795: * This code executes as part of processing the header. As a
3796: * result, it's not totally clear how to interpret the
3797: * response code yet as that depends on what other headers may
3798: * be present. 401 and 407 may be errors, but may be OK
3799: * depending on how authentication is working. Other codes
3800: * are definitely errors, so give up here.
3801: */
3802: if(data->state.resume_from && data->set.httpreq == HTTPREQ_GET &&
3803: k->httpcode == 416) {
3804: /* "Requested Range Not Satisfiable", just proceed and
3805: pretend this is no error */
3806: k->ignorebody = TRUE; /* Avoid appending error msg to good data. */
3807: }
3808: else if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
3809: ((k->httpcode != 401) || !conn->bits.user_passwd) &&
3810: ((k->httpcode != 407) || !conn->bits.proxy_user_passwd) ) {
3811: /* serious error, go home! */
3812: print_http_error(data);
3813: return CURLE_HTTP_RETURNED_ERROR;
3814: }
3815:
3816: if(conn->httpversion == 10) {
3817: /* Default action for HTTP/1.0 must be to close, unless
3818: we get one of those fancy headers that tell us the
3819: server keeps it open for us! */
3820: infof(data, "HTTP 1.0, assume close after body\n");
3821: connclose(conn, "HTTP/1.0 close after body");
3822: }
3823: else if(conn->httpversion == 20 ||
3824: (k->upgr101 == UPGR101_REQUESTED && k->httpcode == 101)) {
3825: DEBUGF(infof(data, "HTTP/2 found, allow multiplexing\n"));
3826:
3827: /* HTTP/2 cannot blacklist multiplexing since it is a core
3828: functionality of the protocol */
3829: conn->bundle->multiuse = BUNDLE_MULTIPLEX;
3830: }
3831: else if(conn->httpversion >= 11 &&
3832: !conn->bits.close) {
3833: /* If HTTP version is >= 1.1 and connection is persistent */
3834: DEBUGF(infof(data,
3835: "HTTP 1.1 or later with persistent connection\n"));
3836: }
3837:
3838: k->http_bodyless = k->httpcode >= 100 && k->httpcode < 200;
3839: switch(k->httpcode) {
3840: case 304:
3841: /* (quote from RFC2616, section 10.3.5): The 304 response
3842: * MUST NOT contain a message-body, and thus is always
3843: * terminated by the first empty line after the header
3844: * fields. */
3845: if(data->set.timecondition)
3846: data->info.timecond = TRUE;
3847: /* FALLTHROUGH */
3848: case 204:
3849: /* (quote from RFC2616, section 10.2.5): The server has
3850: * fulfilled the request but does not need to return an
3851: * entity-body ... The 204 response MUST NOT include a
3852: * message-body, and thus is always terminated by the first
3853: * empty line after the header fields. */
3854: k->size = 0;
3855: k->maxdownload = 0;
3856: k->http_bodyless = TRUE;
3857: break;
3858: default:
3859: break;
3860: }
3861: }
3862: else {
3863: k->header = FALSE; /* this is not a header line */
3864: break;
3865: }
3866: }
3867:
3868: result = Curl_convert_from_network(data, k->p, strlen(k->p));
3869: /* Curl_convert_from_network calls failf if unsuccessful */
3870: if(result)
3871: return result;
3872:
3873: /* Check for Content-Length: header lines to get size */
3874: if(!k->http_bodyless &&
3875: !data->set.ignorecl && checkprefix("Content-Length:", k->p)) {
3876: curl_off_t contentlength;
3877: CURLofft offt = curlx_strtoofft(k->p + 15, NULL, 10, &contentlength);
3878:
3879: if(offt == CURL_OFFT_OK) {
3880: if(data->set.max_filesize &&
3881: contentlength > data->set.max_filesize) {
3882: failf(data, "Maximum file size exceeded");
3883: return CURLE_FILESIZE_EXCEEDED;
3884: }
3885: k->size = contentlength;
3886: k->maxdownload = k->size;
3887: /* we set the progress download size already at this point
3888: just to make it easier for apps/callbacks to extract this
3889: info as soon as possible */
3890: Curl_pgrsSetDownloadSize(data, k->size);
3891: }
3892: else if(offt == CURL_OFFT_FLOW) {
3893: /* out of range */
3894: if(data->set.max_filesize) {
3895: failf(data, "Maximum file size exceeded");
3896: return CURLE_FILESIZE_EXCEEDED;
3897: }
3898: streamclose(conn, "overflow content-length");
3899: infof(data, "Overflow Content-Length: value!\n");
3900: }
3901: else {
3902: /* negative or just rubbish - bad HTTP */
3903: failf(data, "Invalid Content-Length: value");
3904: return CURLE_WEIRD_SERVER_REPLY;
3905: }
3906: }
3907: /* check for Content-Type: header lines to get the MIME-type */
3908: else if(checkprefix("Content-Type:", k->p)) {
3909: char *contenttype = Curl_copy_header_value(k->p);
3910: if(!contenttype)
3911: return CURLE_OUT_OF_MEMORY;
3912: if(!*contenttype)
3913: /* ignore empty data */
3914: free(contenttype);
3915: else {
3916: Curl_safefree(data->info.contenttype);
3917: data->info.contenttype = contenttype;
3918: }
3919: }
3920: else if((conn->httpversion == 10) &&
3921: conn->bits.httpproxy &&
3922: Curl_compareheader(k->p,
3923: "Proxy-Connection:", "keep-alive")) {
3924: /*
3925: * When a HTTP/1.0 reply comes when using a proxy, the
3926: * 'Proxy-Connection: keep-alive' line tells us the
3927: * connection will be kept alive for our pleasure.
3928: * Default action for 1.0 is to close.
3929: */
3930: connkeep(conn, "Proxy-Connection keep-alive"); /* don't close */
3931: infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
3932: }
3933: else if((conn->httpversion == 11) &&
3934: conn->bits.httpproxy &&
3935: Curl_compareheader(k->p,
3936: "Proxy-Connection:", "close")) {
3937: /*
3938: * We get a HTTP/1.1 response from a proxy and it says it'll
3939: * close down after this transfer.
3940: */
3941: connclose(conn, "Proxy-Connection: asked to close after done");
3942: infof(data, "HTTP/1.1 proxy connection set close!\n");
3943: }
3944: else if((conn->httpversion == 10) &&
3945: Curl_compareheader(k->p, "Connection:", "keep-alive")) {
3946: /*
3947: * A HTTP/1.0 reply with the 'Connection: keep-alive' line
3948: * tells us the connection will be kept alive for our
3949: * pleasure. Default action for 1.0 is to close.
3950: *
3951: * [RFC2068, section 19.7.1] */
3952: connkeep(conn, "Connection keep-alive");
3953: infof(data, "HTTP/1.0 connection set to keep alive!\n");
3954: }
3955: else if(Curl_compareheader(k->p, "Connection:", "close")) {
3956: /*
3957: * [RFC 2616, section 8.1.2.1]
3958: * "Connection: close" is HTTP/1.1 language and means that
3959: * the connection will close when this request has been
3960: * served.
3961: */
3962: streamclose(conn, "Connection: close used");
3963: }
3964: else if(!k->http_bodyless && checkprefix("Transfer-Encoding:", k->p)) {
3965: /* One or more encodings. We check for chunked and/or a compression
3966: algorithm. */
3967: /*
3968: * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
3969: * means that the server will send a series of "chunks". Each
3970: * chunk starts with line with info (including size of the
3971: * coming block) (terminated with CRLF), then a block of data
3972: * with the previously mentioned size. There can be any amount
3973: * of chunks, and a chunk-data set to zero signals the
3974: * end-of-chunks. */
3975:
3976: result = Curl_build_unencoding_stack(conn, k->p + 18, TRUE);
3977: if(result)
3978: return result;
3979: }
3980: else if(!k->http_bodyless && checkprefix("Content-Encoding:", k->p) &&
3981: data->set.str[STRING_ENCODING]) {
3982: /*
3983: * Process Content-Encoding. Look for the values: identity,
3984: * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
3985: * x-compress are the same as gzip and compress. (Sec 3.5 RFC
3986: * 2616). zlib cannot handle compress. However, errors are
3987: * handled further down when the response body is processed
3988: */
3989: result = Curl_build_unencoding_stack(conn, k->p + 17, FALSE);
3990: if(result)
3991: return result;
3992: }
3993: else if(checkprefix("Retry-After:", k->p)) {
3994: /* Retry-After = HTTP-date / delay-seconds */
3995: curl_off_t retry_after = 0; /* zero for unknown or "now" */
3996: time_t date = Curl_getdate_capped(&k->p[12]);
3997: if(-1 == date) {
3998: /* not a date, try it as a decimal number */
3999: (void)curlx_strtoofft(&k->p[12], NULL, 10, &retry_after);
4000: }
4001: else
4002: /* convert date to number of seconds into the future */
4003: retry_after = date - time(NULL);
4004: data->info.retry_after = retry_after; /* store it */
4005: }
4006: else if(!k->http_bodyless && checkprefix("Content-Range:", k->p)) {
4007: /* Content-Range: bytes [num]-
4008: Content-Range: bytes: [num]-
4009: Content-Range: [num]-
4010: Content-Range: [asterisk]/[total]
4011:
4012: The second format was added since Sun's webserver
4013: JavaWebServer/1.1.1 obviously sends the header this way!
4014: The third added since some servers use that!
4015: The forth means the requested range was unsatisfied.
4016: */
4017:
4018: char *ptr = k->p + 14;
4019:
4020: /* Move forward until first digit or asterisk */
4021: while(*ptr && !ISDIGIT(*ptr) && *ptr != '*')
4022: ptr++;
4023:
4024: /* if it truly stopped on a digit */
4025: if(ISDIGIT(*ptr)) {
4026: if(!curlx_strtoofft(ptr, NULL, 10, &k->offset)) {
4027: if(data->state.resume_from == k->offset)
4028: /* we asked for a resume and we got it */
4029: k->content_range = TRUE;
4030: }
4031: }
4032: else
4033: data->state.resume_from = 0; /* get everything */
4034: }
4035: #if !defined(CURL_DISABLE_COOKIES)
4036: else if(data->cookies && data->state.cookie_engine &&
4037: checkprefix("Set-Cookie:", k->p)) {
4038: Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
4039: CURL_LOCK_ACCESS_SINGLE);
4040: Curl_cookie_add(data,
4041: data->cookies, TRUE, FALSE, k->p + 11,
4042: /* If there is a custom-set Host: name, use it
4043: here, or else use real peer host name. */
4044: conn->allocptr.cookiehost?
4045: conn->allocptr.cookiehost:conn->host.name,
4046: data->state.up.path,
4047: (conn->handler->protocol&CURLPROTO_HTTPS)?
4048: TRUE:FALSE);
4049: Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
4050: }
4051: #endif
4052: else if(!k->http_bodyless && checkprefix("Last-Modified:", k->p) &&
4053: (data->set.timecondition || data->set.get_filetime) ) {
4054: k->timeofdoc = Curl_getdate_capped(k->p + strlen("Last-Modified:"));
4055: if(data->set.get_filetime)
4056: data->info.filetime = k->timeofdoc;
4057: }
4058: else if((checkprefix("WWW-Authenticate:", k->p) &&
4059: (401 == k->httpcode)) ||
4060: (checkprefix("Proxy-authenticate:", k->p) &&
4061: (407 == k->httpcode))) {
4062:
4063: bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
4064: char *auth = Curl_copy_header_value(k->p);
4065: if(!auth)
4066: return CURLE_OUT_OF_MEMORY;
4067:
4068: result = Curl_http_input_auth(conn, proxy, auth);
4069:
4070: free(auth);
4071:
4072: if(result)
4073: return result;
4074: }
4075: #ifdef USE_SPNEGO
4076: else if(checkprefix("Persistent-Auth", k->p)) {
4077: struct negotiatedata *negdata = &conn->negotiate;
4078: struct auth *authp = &data->state.authhost;
4079: if(authp->picked == CURLAUTH_NEGOTIATE) {
4080: char *persistentauth = Curl_copy_header_value(k->p);
4081: if(!persistentauth)
4082: return CURLE_OUT_OF_MEMORY;
4083: negdata->noauthpersist = checkprefix("false", persistentauth)?
4084: TRUE:FALSE;
4085: negdata->havenoauthpersist = TRUE;
4086: infof(data, "Negotiate: noauthpersist -> %d, header part: %s",
4087: negdata->noauthpersist, persistentauth);
4088: free(persistentauth);
4089: }
4090: }
4091: #endif
4092: else if((k->httpcode >= 300 && k->httpcode < 400) &&
4093: checkprefix("Location:", k->p) &&
4094: !data->req.location) {
4095: /* this is the URL that the server advises us to use instead */
4096: char *location = Curl_copy_header_value(k->p);
4097: if(!location)
4098: return CURLE_OUT_OF_MEMORY;
4099: if(!*location)
4100: /* ignore empty data */
4101: free(location);
4102: else {
4103: data->req.location = location;
4104:
4105: if(data->set.http_follow_location) {
4106: DEBUGASSERT(!data->req.newurl);
4107: data->req.newurl = strdup(data->req.location); /* clone */
4108: if(!data->req.newurl)
4109: return CURLE_OUT_OF_MEMORY;
4110:
4111: /* some cases of POST and PUT etc needs to rewind the data
4112: stream at this point */
4113: result = http_perhapsrewind(conn);
4114: if(result)
4115: return result;
4116: }
4117: }
4118: }
4119: #ifdef USE_ALTSVC
4120: /* If enabled, the header is incoming and this is over HTTPS */
4121: else if(data->asi && checkprefix("Alt-Svc:", k->p) &&
4122: ((conn->handler->flags & PROTOPT_SSL) ||
4123: #ifdef CURLDEBUG
4124: /* allow debug builds to circumvent the HTTPS restriction */
4125: getenv("CURL_ALTSVC_HTTP")
4126: #else
4127: 0
4128: #endif
4129: )) {
4130: /* the ALPN of the current request */
4131: enum alpnid id = (conn->httpversion == 20) ? ALPN_h2 : ALPN_h1;
4132: result = Curl_altsvc_parse(data, data->asi,
4133: &k->p[ strlen("Alt-Svc:") ],
4134: id, conn->host.name,
4135: curlx_uitous(conn->remote_port));
4136: if(result)
4137: return result;
4138: }
4139: #endif
4140: else if(conn->handler->protocol & CURLPROTO_RTSP) {
4141: result = Curl_rtsp_parseheader(conn, k->p);
4142: if(result)
4143: return result;
4144: }
4145:
4146: /*
4147: * End of header-checks. Write them to the client.
4148: */
4149:
4150: writetype = CLIENTWRITE_HEADER;
4151: if(data->set.include_header)
4152: writetype |= CLIENTWRITE_BODY;
4153:
4154: if(data->set.verbose)
4155: Curl_debug(data, CURLINFO_HEADER_IN, k->p, (size_t)k->hbuflen);
4156:
4157: result = Curl_client_write(conn, writetype, k->p, k->hbuflen);
4158: if(result)
4159: return result;
4160:
4161: data->info.header_size += (long)k->hbuflen;
4162: data->req.headerbytecount += (long)k->hbuflen;
4163:
4164: /* reset hbufp pointer && hbuflen */
4165: k->hbufp = data->state.headerbuff;
4166: k->hbuflen = 0;
4167: }
4168: while(*k->str); /* header line within buffer */
4169:
4170: /* We might have reached the end of the header part here, but
4171: there might be a non-header part left in the end of the read
4172: buffer. */
4173:
4174: return CURLE_OK;
4175: }
4176:
4177: #endif /* CURL_DISABLE_HTTP */
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>