1998-07-09 16:52:44 +00:00
|
|
|
/*-
|
2017-11-26 02:00:33 +00:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*
|
2014-01-30 08:37:23 +00:00
|
|
|
* Copyright (c) 2000-2014 Dag-Erling Smørgrav
|
1998-07-09 16:52:44 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer
|
|
|
|
* in this position and unchanged.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
2000-07-12 10:39:56 +00:00
|
|
|
* derived from this software without specific prior written permission.
|
1998-07-09 16:52:44 +00:00
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2001-09-30 21:36:09 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2000-07-16 01:04:10 +00:00
|
|
|
/*
|
|
|
|
* The following copyright applies to the base64 code:
|
|
|
|
*
|
|
|
|
*-
|
|
|
|
* Copyright 1997 Massachusetts Institute of Technology
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software and
|
|
|
|
* its documentation for any purpose and without fee is hereby
|
|
|
|
* granted, provided that both the above copyright notice and this
|
|
|
|
* permission notice appear in all copies, that both the above
|
|
|
|
* copyright notice and this permission notice appear in all
|
|
|
|
* supporting documentation, and that the name of M.I.T. not be used
|
|
|
|
* in advertising or publicity pertaining to distribution of the
|
|
|
|
* software without specific, written prior permission. M.I.T. makes
|
|
|
|
* no representations about the suitability of this software for any
|
|
|
|
* purpose. It is provided "as is" without express or implied
|
|
|
|
* warranty.
|
2002-02-05 22:13:51 +00:00
|
|
|
*
|
2000-07-16 01:04:10 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
|
|
|
|
* ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
|
|
|
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
|
|
|
|
* SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
|
|
|
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
|
|
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
|
|
|
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
1998-07-09 16:52:44 +00:00
|
|
|
#include <sys/param.h>
|
2000-05-20 18:23:51 +00:00
|
|
|
#include <sys/socket.h>
|
2008-12-15 08:27:44 +00:00
|
|
|
#include <sys/time.h>
|
1998-07-09 16:52:44 +00:00
|
|
|
|
|
|
|
#include <ctype.h>
|
2000-07-12 10:39:56 +00:00
|
|
|
#include <err.h>
|
|
|
|
#include <errno.h>
|
2000-05-11 13:31:02 +00:00
|
|
|
#include <locale.h>
|
2000-05-07 20:01:55 +00:00
|
|
|
#include <netdb.h>
|
1998-07-12 22:34:40 +00:00
|
|
|
#include <stdarg.h>
|
1998-07-09 16:52:44 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2000-05-11 13:31:02 +00:00
|
|
|
#include <time.h>
|
1998-07-09 16:52:44 +00:00
|
|
|
#include <unistd.h>
|
2012-09-14 13:00:43 +00:00
|
|
|
|
|
|
|
#ifdef WITH_SSL
|
|
|
|
#include <openssl/md5.h>
|
|
|
|
#define MD5Init(c) MD5_Init(c)
|
|
|
|
#define MD5Update(c, data, len) MD5_Update(c, data, len)
|
|
|
|
#define MD5Final(md, c) MD5_Final(md, c)
|
|
|
|
#else
|
2010-01-19 10:19:55 +00:00
|
|
|
#include <md5.h>
|
2012-09-14 13:00:43 +00:00
|
|
|
#endif
|
1998-07-09 16:52:44 +00:00
|
|
|
|
2005-02-16 00:22:20 +00:00
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/tcp.h>
|
|
|
|
|
1998-07-09 16:52:44 +00:00
|
|
|
#include "fetch.h"
|
1998-11-05 19:48:17 +00:00
|
|
|
#include "common.h"
|
1998-12-16 10:24:55 +00:00
|
|
|
#include "httperr.h"
|
1998-07-09 16:52:44 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/* Maximum number of redirects to follow */
|
2012-10-22 03:00:04 +00:00
|
|
|
#define MAX_REDIRECT 20
|
1998-07-09 16:52:44 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/* Symbolic names for reply codes we care about */
|
|
|
|
#define HTTP_OK 200
|
|
|
|
#define HTTP_PARTIAL 206
|
|
|
|
#define HTTP_MOVED_PERM 301
|
|
|
|
#define HTTP_MOVED_TEMP 302
|
|
|
|
#define HTTP_SEE_OTHER 303
|
2008-12-15 08:27:44 +00:00
|
|
|
#define HTTP_NOT_MODIFIED 304
|
2012-10-22 03:00:15 +00:00
|
|
|
#define HTTP_USE_PROXY 305
|
2007-05-08 19:28:03 +00:00
|
|
|
#define HTTP_TEMP_REDIRECT 307
|
2012-10-22 03:00:10 +00:00
|
|
|
#define HTTP_PERM_REDIRECT 308
|
2000-07-12 10:39:56 +00:00
|
|
|
#define HTTP_NEED_AUTH 401
|
2001-12-04 01:12:51 +00:00
|
|
|
#define HTTP_NEED_PROXY_AUTH 407
|
2004-02-11 09:31:39 +00:00
|
|
|
#define HTTP_BAD_RANGE 416
|
2000-07-12 10:39:56 +00:00
|
|
|
#define HTTP_PROTOCOL_ERROR 999
|
|
|
|
|
|
|
|
#define HTTP_REDIRECT(xyz) ((xyz) == HTTP_MOVED_PERM \
|
2002-02-05 22:13:51 +00:00
|
|
|
|| (xyz) == HTTP_MOVED_TEMP \
|
2007-05-08 19:28:03 +00:00
|
|
|
|| (xyz) == HTTP_TEMP_REDIRECT \
|
2016-05-31 08:27:39 +00:00
|
|
|
|| (xyz) == HTTP_PERM_REDIRECT \
|
2012-10-22 03:00:15 +00:00
|
|
|
|| (xyz) == HTTP_USE_PROXY \
|
2002-02-05 22:13:51 +00:00
|
|
|
|| (xyz) == HTTP_SEE_OTHER)
|
2000-07-12 10:39:56 +00:00
|
|
|
|
2017-03-17 14:18:52 +00:00
|
|
|
#define HTTP_ERROR(xyz) ((xyz) >= 400 && (xyz) <= 599)
|
2000-07-12 10:39:56 +00:00
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*****************************************************************************
|
|
|
|
* I/O functions for decoding chunked streams
|
|
|
|
*/
|
2000-05-07 20:51:31 +00:00
|
|
|
|
2002-06-05 10:31:01 +00:00
|
|
|
struct httpio
|
1998-07-09 16:52:44 +00:00
|
|
|
{
|
2002-06-05 10:27:24 +00:00
|
|
|
conn_t *conn; /* connection */
|
2002-06-05 12:19:08 +00:00
|
|
|
int chunked; /* chunked mode */
|
2002-06-05 10:27:24 +00:00
|
|
|
char *buf; /* chunk buffer */
|
2002-06-05 12:19:08 +00:00
|
|
|
size_t bufsize; /* size of chunk buffer */
|
2015-12-16 09:20:45 +00:00
|
|
|
size_t buflen; /* amount of data currently in buffer */
|
|
|
|
size_t bufpos; /* current read offset in buffer */
|
2002-06-05 10:27:24 +00:00
|
|
|
int eof; /* end-of-file flag */
|
|
|
|
int error; /* error flag */
|
|
|
|
size_t chunksize; /* remaining size of current chunk */
|
2000-07-16 23:18:44 +00:00
|
|
|
#ifndef NDEBUG
|
2002-02-05 22:13:51 +00:00
|
|
|
size_t total;
|
2000-07-12 10:39:56 +00:00
|
|
|
#endif
|
1998-07-09 16:52:44 +00:00
|
|
|
};
|
|
|
|
|
1998-07-12 22:34:40 +00:00
|
|
|
/*
|
2000-07-12 10:39:56 +00:00
|
|
|
* Get next chunk header
|
1998-07-12 22:34:40 +00:00
|
|
|
*/
|
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_new_chunk(struct httpio *io)
|
1998-07-12 22:34:40 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
char *p;
|
|
|
|
|
2007-12-14 10:26:58 +00:00
|
|
|
if (fetch_getln(io->conn) == -1)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
|
|
|
|
2007-12-19 00:26:36 +00:00
|
|
|
if (io->conn->buflen < 2 || !isxdigit((unsigned char)*io->conn->buf))
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
|
|
|
|
2007-12-19 00:26:36 +00:00
|
|
|
for (p = io->conn->buf; *p && !isspace((unsigned char)*p); ++p) {
|
2002-02-05 22:13:51 +00:00
|
|
|
if (*p == ';')
|
|
|
|
break;
|
2007-12-19 00:26:36 +00:00
|
|
|
if (!isxdigit((unsigned char)*p))
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
2007-12-19 00:26:36 +00:00
|
|
|
if (isdigit((unsigned char)*p)) {
|
2002-06-05 10:31:01 +00:00
|
|
|
io->chunksize = io->chunksize * 16 +
|
2002-02-05 22:13:51 +00:00
|
|
|
*p - '0';
|
|
|
|
} else {
|
2002-06-05 10:31:01 +00:00
|
|
|
io->chunksize = io->chunksize * 16 +
|
2008-02-06 11:39:55 +00:00
|
|
|
10 + tolower((unsigned char)*p) - 'a';
|
2002-02-05 22:13:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-07-16 23:18:44 +00:00
|
|
|
#ifndef NDEBUG
|
2002-02-05 22:13:51 +00:00
|
|
|
if (fetchDebug) {
|
2002-06-05 10:31:01 +00:00
|
|
|
io->total += io->chunksize;
|
|
|
|
if (io->chunksize == 0)
|
2002-10-30 15:01:29 +00:00
|
|
|
fprintf(stderr, "%s(): end of last chunk\n", __func__);
|
2002-02-05 22:13:51 +00:00
|
|
|
else
|
2002-10-30 15:01:29 +00:00
|
|
|
fprintf(stderr, "%s(): new chunk: %lu (%lu)\n",
|
|
|
|
__func__, (unsigned long)io->chunksize,
|
|
|
|
(unsigned long)io->total);
|
2002-02-05 22:13:51 +00:00
|
|
|
}
|
2000-07-12 10:39:56 +00:00
|
|
|
#endif
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2002-06-05 10:31:01 +00:00
|
|
|
return (io->chunksize);
|
1998-07-12 22:34:40 +00:00
|
|
|
}
|
|
|
|
|
2002-06-05 12:19:08 +00:00
|
|
|
/*
|
|
|
|
* Grow the input buffer to at least len bytes
|
|
|
|
*/
|
|
|
|
static inline int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_growbuf(struct httpio *io, size_t len)
|
2002-06-05 12:19:08 +00:00
|
|
|
{
|
|
|
|
char *tmp;
|
|
|
|
|
|
|
|
if (io->bufsize >= len)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
if ((tmp = realloc(io->buf, len)) == NULL)
|
|
|
|
return (-1);
|
|
|
|
io->buf = tmp;
|
|
|
|
io->bufsize = len;
|
2002-10-27 15:43:40 +00:00
|
|
|
return (0);
|
2002-06-05 12:19:08 +00:00
|
|
|
}
|
|
|
|
|
1998-07-12 22:34:40 +00:00
|
|
|
/*
|
|
|
|
* Fill the input buffer, do chunk decoding on the fly
|
|
|
|
*/
|
2014-01-29 12:48:19 +00:00
|
|
|
static ssize_t
|
2007-12-14 10:26:58 +00:00
|
|
|
http_fillbuf(struct httpio *io, size_t len)
|
1998-07-09 16:52:44 +00:00
|
|
|
{
|
2012-01-18 15:13:21 +00:00
|
|
|
ssize_t nbytes;
|
2014-01-28 12:48:17 +00:00
|
|
|
char ch;
|
2012-01-18 15:13:21 +00:00
|
|
|
|
2002-06-05 10:31:01 +00:00
|
|
|
if (io->error)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
2002-06-05 10:31:01 +00:00
|
|
|
if (io->eof)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (0);
|
|
|
|
|
2015-12-16 09:20:45 +00:00
|
|
|
/* not chunked: just fetch the requested amount */
|
2002-06-05 12:19:08 +00:00
|
|
|
if (io->chunked == 0) {
|
2007-12-14 10:26:58 +00:00
|
|
|
if (http_growbuf(io, len) == -1)
|
2002-06-05 12:19:08 +00:00
|
|
|
return (-1);
|
2012-01-18 15:13:21 +00:00
|
|
|
if ((nbytes = fetch_read(io->conn, io->buf, len)) == -1) {
|
|
|
|
io->error = errno;
|
2002-06-05 12:19:08 +00:00
|
|
|
return (-1);
|
2002-10-30 04:42:01 +00:00
|
|
|
}
|
2012-01-18 15:13:21 +00:00
|
|
|
io->buflen = nbytes;
|
2002-06-05 12:19:08 +00:00
|
|
|
io->bufpos = 0;
|
|
|
|
return (io->buflen);
|
|
|
|
}
|
|
|
|
|
2015-12-16 09:20:45 +00:00
|
|
|
/* chunked, but we ran out: get the next chunk header */
|
2002-06-05 10:31:01 +00:00
|
|
|
if (io->chunksize == 0) {
|
2007-12-14 10:26:58 +00:00
|
|
|
switch (http_new_chunk(io)) {
|
2002-02-05 22:13:51 +00:00
|
|
|
case -1:
|
2014-01-29 12:48:19 +00:00
|
|
|
io->error = EPROTO;
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
|
|
|
case 0:
|
2002-06-05 10:31:01 +00:00
|
|
|
io->eof = 1;
|
2002-02-05 22:13:51 +00:00
|
|
|
return (0);
|
|
|
|
}
|
1998-07-09 16:52:44 +00:00
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2015-12-16 09:20:45 +00:00
|
|
|
/* fetch the requested amount, but no more than the current chunk */
|
2002-06-05 12:19:08 +00:00
|
|
|
if (len > io->chunksize)
|
|
|
|
len = io->chunksize;
|
2007-12-14 10:26:58 +00:00
|
|
|
if (http_growbuf(io, len) == -1)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
2012-01-18 15:13:21 +00:00
|
|
|
if ((nbytes = fetch_read(io->conn, io->buf, len)) == -1) {
|
|
|
|
io->error = errno;
|
2002-06-05 12:19:08 +00:00
|
|
|
return (-1);
|
2002-10-30 04:42:01 +00:00
|
|
|
}
|
2015-12-16 09:17:07 +00:00
|
|
|
io->bufpos = 0;
|
2012-01-18 15:13:21 +00:00
|
|
|
io->buflen = nbytes;
|
2015-12-16 09:17:07 +00:00
|
|
|
io->chunksize -= nbytes;
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2002-06-05 10:31:01 +00:00
|
|
|
if (io->chunksize == 0) {
|
2014-01-28 12:48:17 +00:00
|
|
|
if (fetch_read(io->conn, &ch, 1) != 1 || ch != '\r' ||
|
|
|
|
fetch_read(io->conn, &ch, 1) != 1 || ch != '\n')
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2002-06-05 12:19:08 +00:00
|
|
|
return (io->buflen);
|
1998-07-09 16:52:44 +00:00
|
|
|
}
|
|
|
|
|
1998-07-12 22:34:40 +00:00
|
|
|
/*
|
|
|
|
* Read function
|
|
|
|
*/
|
1998-07-09 16:52:44 +00:00
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_readfn(void *v, char *buf, int len)
|
1998-07-09 16:52:44 +00:00
|
|
|
{
|
2002-06-05 10:31:01 +00:00
|
|
|
struct httpio *io = (struct httpio *)v;
|
2014-01-28 12:48:17 +00:00
|
|
|
int rlen;
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2002-06-05 10:31:01 +00:00
|
|
|
if (io->error)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
2002-06-05 10:31:01 +00:00
|
|
|
if (io->eof)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (0);
|
|
|
|
|
2014-01-28 12:48:17 +00:00
|
|
|
/* empty buffer */
|
|
|
|
if (!io->buf || io->bufpos == io->buflen) {
|
2014-01-29 12:48:19 +00:00
|
|
|
if ((rlen = http_fillbuf(io, len)) < 0) {
|
|
|
|
if ((errno = io->error) == EINTR)
|
2014-01-28 12:48:17 +00:00
|
|
|
io->error = 0;
|
|
|
|
return (-1);
|
2014-01-29 12:48:19 +00:00
|
|
|
} else if (rlen == 0) {
|
|
|
|
return (0);
|
2014-01-28 12:48:17 +00:00
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
}
|
|
|
|
|
2014-01-28 12:48:17 +00:00
|
|
|
rlen = io->buflen - io->bufpos;
|
|
|
|
if (len < rlen)
|
|
|
|
rlen = len;
|
|
|
|
memcpy(buf, io->buf + io->bufpos, rlen);
|
|
|
|
io->bufpos += rlen;
|
|
|
|
return (rlen);
|
1998-07-09 16:52:44 +00:00
|
|
|
}
|
|
|
|
|
1998-07-12 22:34:40 +00:00
|
|
|
/*
|
|
|
|
* Write function
|
|
|
|
*/
|
1998-07-09 16:52:44 +00:00
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_writefn(void *v, const char *buf, int len)
|
1998-07-09 16:52:44 +00:00
|
|
|
{
|
2002-06-05 10:31:01 +00:00
|
|
|
struct httpio *io = (struct httpio *)v;
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2007-12-14 10:26:58 +00:00
|
|
|
return (fetch_write(io->conn, buf, len));
|
1998-07-09 16:52:44 +00:00
|
|
|
}
|
|
|
|
|
1998-07-12 22:34:40 +00:00
|
|
|
/*
|
|
|
|
* Close function
|
|
|
|
*/
|
1998-07-09 16:52:44 +00:00
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_closefn(void *v)
|
1998-07-09 16:52:44 +00:00
|
|
|
{
|
2002-06-05 10:31:01 +00:00
|
|
|
struct httpio *io = (struct httpio *)v;
|
2002-02-05 22:13:51 +00:00
|
|
|
int r;
|
|
|
|
|
2007-12-14 10:26:58 +00:00
|
|
|
r = fetch_close(io->conn);
|
2002-06-05 10:31:01 +00:00
|
|
|
if (io->buf)
|
|
|
|
free(io->buf);
|
|
|
|
free(io);
|
2002-02-05 22:13:51 +00:00
|
|
|
return (r);
|
2000-07-12 10:39:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wrap a file descriptor up
|
|
|
|
*/
|
|
|
|
static FILE *
|
2007-12-14 10:26:58 +00:00
|
|
|
http_funopen(conn_t *conn, int chunked)
|
2000-07-12 10:39:56 +00:00
|
|
|
{
|
2002-06-05 10:31:01 +00:00
|
|
|
struct httpio *io;
|
2002-02-05 22:13:51 +00:00
|
|
|
FILE *f;
|
|
|
|
|
2003-01-28 08:04:40 +00:00
|
|
|
if ((io = calloc(1, sizeof(*io))) == NULL) {
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_syserr();
|
2002-02-05 22:13:51 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
2002-06-05 10:31:01 +00:00
|
|
|
io->conn = conn;
|
2002-06-05 12:19:08 +00:00
|
|
|
io->chunked = chunked;
|
2007-12-14 10:26:58 +00:00
|
|
|
f = funopen(io, http_readfn, http_writefn, NULL, http_closefn);
|
2002-02-05 22:13:51 +00:00
|
|
|
if (f == NULL) {
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_syserr();
|
2002-06-05 10:31:01 +00:00
|
|
|
free(io);
|
2002-02-05 22:13:51 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
return (f);
|
2000-07-12 10:39:56 +00:00
|
|
|
}
|
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*****************************************************************************
|
|
|
|
* Helper functions for talking to the server and parsing its replies
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Header types */
|
|
|
|
typedef enum {
|
2002-02-05 22:13:51 +00:00
|
|
|
hdr_syserror = -2,
|
|
|
|
hdr_error = -1,
|
|
|
|
hdr_end = 0,
|
|
|
|
hdr_unknown = 1,
|
|
|
|
hdr_content_length,
|
|
|
|
hdr_content_range,
|
|
|
|
hdr_last_modified,
|
|
|
|
hdr_location,
|
|
|
|
hdr_transfer_encoding,
|
2010-01-19 10:19:55 +00:00
|
|
|
hdr_www_authenticate,
|
|
|
|
hdr_proxy_authenticate,
|
2001-10-18 08:29:26 +00:00
|
|
|
} hdr_t;
|
2000-07-12 10:39:56 +00:00
|
|
|
|
|
|
|
/* Names of interesting headers */
|
|
|
|
static struct {
|
2002-02-05 22:13:51 +00:00
|
|
|
hdr_t num;
|
|
|
|
const char *name;
|
2000-07-12 10:39:56 +00:00
|
|
|
} hdr_names[] = {
|
2002-02-05 22:13:51 +00:00
|
|
|
{ hdr_content_length, "Content-Length" },
|
|
|
|
{ hdr_content_range, "Content-Range" },
|
|
|
|
{ hdr_last_modified, "Last-Modified" },
|
|
|
|
{ hdr_location, "Location" },
|
|
|
|
{ hdr_transfer_encoding, "Transfer-Encoding" },
|
|
|
|
{ hdr_www_authenticate, "WWW-Authenticate" },
|
2010-01-19 10:19:55 +00:00
|
|
|
{ hdr_proxy_authenticate, "Proxy-Authenticate" },
|
2002-02-05 22:13:51 +00:00
|
|
|
{ hdr_unknown, NULL },
|
2000-07-12 10:39:56 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a formatted line; optionally echo to terminal
|
|
|
|
*/
|
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_cmd(conn_t *conn, const char *fmt, ...)
|
2000-07-12 10:39:56 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
va_list ap;
|
|
|
|
size_t len;
|
|
|
|
char *msg;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
len = vasprintf(&msg, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
if (msg == NULL) {
|
|
|
|
errno = ENOMEM;
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_syserr();
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2007-12-14 10:26:58 +00:00
|
|
|
r = fetch_putln(conn, msg, len);
|
2002-02-05 22:13:51 +00:00
|
|
|
free(msg);
|
|
|
|
|
|
|
|
if (r == -1) {
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_syserr();
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
2000-07-12 10:39:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get and parse status line
|
|
|
|
*/
|
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_get_reply(conn_t *conn)
|
2000-07-12 10:39:56 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
char *p;
|
|
|
|
|
2007-12-14 10:26:58 +00:00
|
|
|
if (fetch_getln(conn) == -1)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
|
|
|
/*
|
|
|
|
* A valid status line looks like "HTTP/m.n xyz reason" where m
|
|
|
|
* and n are the major and minor protocol version numbers and xyz
|
|
|
|
* is the reply code.
|
|
|
|
* Unfortunately, there are servers out there (NCSA 1.5.1, to name
|
|
|
|
* just one) that do not send a version number, so we can't rely
|
|
|
|
* on finding one, but if we do, insist on it being 1.0 or 1.1.
|
|
|
|
* We don't care about the reason phrase.
|
|
|
|
*/
|
2002-06-05 10:05:03 +00:00
|
|
|
if (strncmp(conn->buf, "HTTP", 4) != 0)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (HTTP_PROTOCOL_ERROR);
|
2002-06-05 10:05:03 +00:00
|
|
|
p = conn->buf + 4;
|
2002-02-05 22:13:51 +00:00
|
|
|
if (*p == '/') {
|
|
|
|
if (p[1] != '1' || p[2] != '.' || (p[3] != '0' && p[3] != '1'))
|
|
|
|
return (HTTP_PROTOCOL_ERROR);
|
|
|
|
p += 4;
|
|
|
|
}
|
2007-12-19 00:26:36 +00:00
|
|
|
if (*p != ' ' ||
|
|
|
|
!isdigit((unsigned char)p[1]) ||
|
|
|
|
!isdigit((unsigned char)p[2]) ||
|
|
|
|
!isdigit((unsigned char)p[3]))
|
2002-02-05 22:13:51 +00:00
|
|
|
return (HTTP_PROTOCOL_ERROR);
|
|
|
|
|
2002-06-05 10:05:03 +00:00
|
|
|
conn->err = (p[1] - '0') * 100 + (p[2] - '0') * 10 + (p[3] - '0');
|
|
|
|
return (conn->err);
|
1998-07-09 16:52:44 +00:00
|
|
|
}
|
|
|
|
|
1998-07-12 22:34:40 +00:00
|
|
|
/*
|
2002-02-05 22:13:51 +00:00
|
|
|
* Check a header; if the type matches the given string, return a pointer
|
|
|
|
* to the beginning of the value.
|
1998-07-12 22:34:40 +00:00
|
|
|
*/
|
2001-04-24 00:06:21 +00:00
|
|
|
static const char *
|
2007-12-14 10:26:58 +00:00
|
|
|
http_match(const char *str, const char *hdr)
|
1998-07-09 16:52:44 +00:00
|
|
|
{
|
2008-02-06 11:39:55 +00:00
|
|
|
while (*str && *hdr &&
|
|
|
|
tolower((unsigned char)*str++) == tolower((unsigned char)*hdr++))
|
2002-02-05 22:13:51 +00:00
|
|
|
/* nothing */;
|
|
|
|
if (*str || *hdr != ':')
|
|
|
|
return (NULL);
|
2007-12-19 00:26:36 +00:00
|
|
|
while (*hdr && isspace((unsigned char)*++hdr))
|
2002-02-05 22:13:51 +00:00
|
|
|
/* nothing */;
|
|
|
|
return (hdr);
|
2000-07-12 10:39:56 +00:00
|
|
|
}
|
|
|
|
|
2010-01-19 10:19:55 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*
|
2010-01-19 10:19:55 +00:00
|
|
|
* Get the next header and return the appropriate symbolic code. We
|
|
|
|
* need to read one line ahead for checking for a continuation line
|
|
|
|
* belonging to the current header (continuation lines start with
|
2011-05-12 21:18:55 +00:00
|
|
|
* white space).
|
2010-01-19 10:19:55 +00:00
|
|
|
*
|
|
|
|
* We get called with a fresh line already in the conn buffer, either
|
|
|
|
* from the previous http_next_header() invocation, or, the first
|
|
|
|
* time, from a fetch_getln() performed by our caller.
|
|
|
|
*
|
|
|
|
* This stops when we encounter an empty line (we dont read beyond the header
|
|
|
|
* area).
|
2011-05-12 21:18:55 +00:00
|
|
|
*
|
2010-01-19 10:19:55 +00:00
|
|
|
* Note that the "headerbuf" is just a place to return the result. Its
|
|
|
|
* contents are not used for the next call. This means that no cleanup
|
|
|
|
* is needed when ie doing another connection, just call the cleanup when
|
|
|
|
* fully done to deallocate memory.
|
2000-07-12 10:39:56 +00:00
|
|
|
*/
|
2010-01-19 10:19:55 +00:00
|
|
|
|
|
|
|
/* Limit the max number of continuation lines to some reasonable value */
|
|
|
|
#define HTTP_MAX_CONT_LINES 10
|
|
|
|
|
|
|
|
/* Place into which to build a header from one or several lines */
|
|
|
|
typedef struct {
|
|
|
|
char *buf; /* buffer */
|
|
|
|
size_t bufsize; /* buffer size */
|
|
|
|
size_t buflen; /* length of buffer contents */
|
|
|
|
} http_headerbuf_t;
|
|
|
|
|
|
|
|
static void
|
|
|
|
init_http_headerbuf(http_headerbuf_t *buf)
|
2000-07-12 10:39:56 +00:00
|
|
|
{
|
2010-01-19 10:19:55 +00:00
|
|
|
buf->buf = NULL;
|
|
|
|
buf->bufsize = 0;
|
|
|
|
buf->buflen = 0;
|
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
clean_http_headerbuf(http_headerbuf_t *buf)
|
|
|
|
{
|
|
|
|
if (buf->buf)
|
|
|
|
free(buf->buf);
|
|
|
|
init_http_headerbuf(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove whitespace at the end of the buffer */
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
http_conn_trimright(conn_t *conn)
|
|
|
|
{
|
2011-05-12 21:18:55 +00:00
|
|
|
while (conn->buflen &&
|
2010-01-19 10:19:55 +00:00
|
|
|
isspace((unsigned char)conn->buf[conn->buflen - 1]))
|
2002-06-05 10:05:03 +00:00
|
|
|
conn->buflen--;
|
|
|
|
conn->buf[conn->buflen] = '\0';
|
2010-01-19 10:19:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static hdr_t
|
|
|
|
http_next_header(conn_t *conn, http_headerbuf_t *hbuf, const char **p)
|
|
|
|
{
|
2011-05-12 21:12:24 +00:00
|
|
|
unsigned int i, len;
|
2010-01-19 10:19:55 +00:00
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
2010-01-19 10:19:55 +00:00
|
|
|
* Have to do the stripping here because of the first line. So
|
2011-05-12 21:18:55 +00:00
|
|
|
* it's done twice for the subsequent lines. No big deal
|
2010-01-19 10:19:55 +00:00
|
|
|
*/
|
|
|
|
http_conn_trimright(conn);
|
2002-06-05 10:05:03 +00:00
|
|
|
if (conn->buflen == 0)
|
|
|
|
return (hdr_end);
|
2010-01-19 10:19:55 +00:00
|
|
|
|
|
|
|
/* Copy the line to the headerbuf */
|
|
|
|
if (hbuf->bufsize < conn->buflen + 1) {
|
|
|
|
if ((hbuf->buf = realloc(hbuf->buf, conn->buflen + 1)) == NULL)
|
|
|
|
return (hdr_syserror);
|
|
|
|
hbuf->bufsize = conn->buflen + 1;
|
|
|
|
}
|
|
|
|
strcpy(hbuf->buf, conn->buf);
|
|
|
|
hbuf->buflen = conn->buflen;
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
2010-01-19 10:19:55 +00:00
|
|
|
* Fetch possible continuation lines. Stop at 1st non-continuation
|
2011-05-12 21:18:55 +00:00
|
|
|
* and leave it in the conn buffer
|
|
|
|
*/
|
2010-01-19 10:19:55 +00:00
|
|
|
for (i = 0; i < HTTP_MAX_CONT_LINES; i++) {
|
|
|
|
if (fetch_getln(conn) == -1)
|
|
|
|
return (hdr_syserror);
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
2010-01-19 10:19:55 +00:00
|
|
|
* Note: we carry on the idea from the previous version
|
|
|
|
* that a pure whitespace line is equivalent to an empty
|
|
|
|
* one (so it's not continuation and will be handled when
|
2011-05-12 21:18:55 +00:00
|
|
|
* we are called next)
|
2010-01-19 10:19:55 +00:00
|
|
|
*/
|
|
|
|
http_conn_trimright(conn);
|
|
|
|
if (conn->buf[0] != ' ' && conn->buf[0] != "\t"[0])
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Got a continuation line. Concatenate to previous */
|
|
|
|
len = hbuf->buflen + conn->buflen;
|
|
|
|
if (hbuf->bufsize < len + 1) {
|
|
|
|
len *= 2;
|
|
|
|
if ((hbuf->buf = realloc(hbuf->buf, len + 1)) == NULL)
|
|
|
|
return (hdr_syserror);
|
|
|
|
hbuf->bufsize = len + 1;
|
|
|
|
}
|
|
|
|
strcpy(hbuf->buf + hbuf->buflen, conn->buf);
|
|
|
|
hbuf->buflen += conn->buflen;
|
2011-05-12 21:18:55 +00:00
|
|
|
}
|
2010-01-19 10:19:55 +00:00
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
/*
|
|
|
|
* We could check for malformed headers but we don't really care.
|
|
|
|
* A valid header starts with a token immediately followed by a
|
|
|
|
* colon; a token is any sequence of non-control, non-whitespace
|
|
|
|
* characters except "()<>@,;:\\\"{}".
|
|
|
|
*/
|
|
|
|
for (i = 0; hdr_names[i].num != hdr_unknown; i++)
|
2010-01-19 10:19:55 +00:00
|
|
|
if ((*p = http_match(hdr_names[i].name, hbuf->buf)) != NULL)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (hdr_names[i].num);
|
2010-01-19 10:19:55 +00:00
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
return (hdr_unknown);
|
2000-07-12 10:39:56 +00:00
|
|
|
}
|
|
|
|
|
2010-01-19 10:19:55 +00:00
|
|
|
/**************************
|
|
|
|
* [Proxy-]Authenticate header parsing
|
|
|
|
*/
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
|
|
|
* Read doublequote-delimited string into output buffer obuf (allocated
|
2010-01-19 10:19:55 +00:00
|
|
|
* by caller, whose responsibility it is to ensure that it's big enough)
|
|
|
|
* cp points to the first char after the initial '"'
|
2011-05-12 21:18:55 +00:00
|
|
|
* Handles \ quoting
|
|
|
|
* Returns pointer to the first char after the terminating double quote, or
|
2010-01-19 10:19:55 +00:00
|
|
|
* NULL for error.
|
|
|
|
*/
|
|
|
|
static const char *
|
|
|
|
http_parse_headerstring(const char *cp, char *obuf)
|
|
|
|
{
|
|
|
|
for (;;) {
|
|
|
|
switch (*cp) {
|
|
|
|
case 0: /* Unterminated string */
|
|
|
|
*obuf = 0;
|
|
|
|
return (NULL);
|
|
|
|
case '"': /* Ending quote */
|
|
|
|
*obuf = 0;
|
|
|
|
return (++cp);
|
|
|
|
case '\\':
|
|
|
|
if (*++cp == 0) {
|
|
|
|
*obuf = 0;
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
default:
|
|
|
|
*obuf++ = *cp++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Http auth challenge schemes */
|
|
|
|
typedef enum {HTTPAS_UNKNOWN, HTTPAS_BASIC,HTTPAS_DIGEST} http_auth_schemes_t;
|
|
|
|
|
|
|
|
/* Data holder for a Basic or Digest challenge. */
|
|
|
|
typedef struct {
|
|
|
|
http_auth_schemes_t scheme;
|
|
|
|
char *realm;
|
|
|
|
char *qop;
|
|
|
|
char *nonce;
|
|
|
|
char *opaque;
|
|
|
|
char *algo;
|
|
|
|
int stale;
|
|
|
|
int nc; /* Nonce count */
|
|
|
|
} http_auth_challenge_t;
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
init_http_auth_challenge(http_auth_challenge_t *b)
|
|
|
|
{
|
|
|
|
b->scheme = HTTPAS_UNKNOWN;
|
|
|
|
b->realm = b->qop = b->nonce = b->opaque = b->algo = NULL;
|
|
|
|
b->stale = b->nc = 0;
|
|
|
|
}
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
clean_http_auth_challenge(http_auth_challenge_t *b)
|
|
|
|
{
|
2011-05-12 21:18:55 +00:00
|
|
|
if (b->realm)
|
2010-01-19 10:19:55 +00:00
|
|
|
free(b->realm);
|
2011-05-12 21:18:55 +00:00
|
|
|
if (b->qop)
|
2010-01-19 10:19:55 +00:00
|
|
|
free(b->qop);
|
2011-05-12 21:18:55 +00:00
|
|
|
if (b->nonce)
|
2010-01-19 10:19:55 +00:00
|
|
|
free(b->nonce);
|
2011-05-12 21:18:55 +00:00
|
|
|
if (b->opaque)
|
2010-01-19 10:19:55 +00:00
|
|
|
free(b->opaque);
|
2011-05-12 21:18:55 +00:00
|
|
|
if (b->algo)
|
2010-01-19 10:19:55 +00:00
|
|
|
free(b->algo);
|
|
|
|
init_http_auth_challenge(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Data holder for an array of challenges offered in an http response. */
|
|
|
|
#define MAX_CHALLENGES 10
|
|
|
|
typedef struct {
|
|
|
|
http_auth_challenge_t *challenges[MAX_CHALLENGES];
|
|
|
|
int count; /* Number of parsed challenges in the array */
|
|
|
|
int valid; /* We did parse an authenticate header */
|
|
|
|
} http_auth_challenges_t;
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
init_http_auth_challenges(http_auth_challenges_t *cs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_CHALLENGES; i++)
|
|
|
|
cs->challenges[i] = NULL;
|
|
|
|
cs->count = cs->valid = 0;
|
|
|
|
}
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
clean_http_auth_challenges(http_auth_challenges_t *cs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
/* We rely on non-zero pointers being allocated, not on the count */
|
|
|
|
for (i = 0; i < MAX_CHALLENGES; i++) {
|
|
|
|
if (cs->challenges[i] != NULL) {
|
|
|
|
clean_http_auth_challenge(cs->challenges[i]);
|
|
|
|
free(cs->challenges[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
init_http_auth_challenges(cs);
|
|
|
|
}
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
2010-01-19 10:19:55 +00:00
|
|
|
* Enumeration for lexical elements. Separators will be returned as their own
|
|
|
|
* ascii value
|
|
|
|
*/
|
|
|
|
typedef enum {HTTPHL_WORD=256, HTTPHL_STRING=257, HTTPHL_END=258,
|
|
|
|
HTTPHL_ERROR = 259} http_header_lex_t;
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
2010-01-19 10:19:55 +00:00
|
|
|
* Determine what kind of token comes next and return possible value
|
|
|
|
* in buf, which is supposed to have been allocated big enough by
|
2011-05-12 21:18:55 +00:00
|
|
|
* caller. Advance input pointer and return element type.
|
2010-01-19 10:19:55 +00:00
|
|
|
*/
|
2011-05-12 21:18:55 +00:00
|
|
|
static int
|
2010-01-19 10:19:55 +00:00
|
|
|
http_header_lex(const char **cpp, char *buf)
|
|
|
|
{
|
|
|
|
size_t l;
|
|
|
|
/* Eat initial whitespace */
|
|
|
|
*cpp += strspn(*cpp, " \t");
|
|
|
|
if (**cpp == 0)
|
|
|
|
return (HTTPHL_END);
|
|
|
|
|
|
|
|
/* Separator ? */
|
|
|
|
if (**cpp == ',' || **cpp == '=')
|
|
|
|
return (*((*cpp)++));
|
|
|
|
|
|
|
|
/* String ? */
|
|
|
|
if (**cpp == '"') {
|
|
|
|
*cpp = http_parse_headerstring(++*cpp, buf);
|
|
|
|
if (*cpp == NULL)
|
|
|
|
return (HTTPHL_ERROR);
|
|
|
|
return (HTTPHL_STRING);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read other token, until separator or whitespace */
|
|
|
|
l = strcspn(*cpp, " \t,=");
|
|
|
|
memcpy(buf, *cpp, l);
|
|
|
|
buf[l] = 0;
|
|
|
|
*cpp += l;
|
|
|
|
return (HTTPHL_WORD);
|
|
|
|
}
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
2010-01-19 10:19:55 +00:00
|
|
|
* Read challenges from http xxx-authenticate header and accumulate them
|
|
|
|
* in the challenges list structure.
|
|
|
|
*
|
|
|
|
* Headers with multiple challenges are specified by rfc2617, but
|
|
|
|
* servers (ie: squid) often send them in separate headers instead,
|
|
|
|
* which in turn is forbidden by the http spec (multiple headers with
|
|
|
|
* the same name are only allowed for pure comma-separated lists, see
|
|
|
|
* rfc2616 sec 4.2).
|
|
|
|
*
|
|
|
|
* We support both approaches anyway
|
|
|
|
*/
|
2011-05-12 21:18:55 +00:00
|
|
|
static int
|
2010-01-19 10:19:55 +00:00
|
|
|
http_parse_authenticate(const char *cp, http_auth_challenges_t *cs)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
http_header_lex_t lex;
|
|
|
|
char *key = malloc(strlen(cp) + 1);
|
|
|
|
char *value = malloc(strlen(cp) + 1);
|
|
|
|
char *buf = malloc(strlen(cp) + 1);
|
|
|
|
|
|
|
|
if (key == NULL || value == NULL || buf == NULL) {
|
|
|
|
fetch_syserr();
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* In any case we've seen the header and we set the valid bit */
|
|
|
|
cs->valid = 1;
|
|
|
|
|
|
|
|
/* Need word first */
|
|
|
|
lex = http_header_lex(&cp, key);
|
|
|
|
if (lex != HTTPHL_WORD)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Loop on challenges */
|
|
|
|
for (; cs->count < MAX_CHALLENGES; cs->count++) {
|
2011-05-12 21:18:55 +00:00
|
|
|
cs->challenges[cs->count] =
|
2010-01-19 10:19:55 +00:00
|
|
|
malloc(sizeof(http_auth_challenge_t));
|
|
|
|
if (cs->challenges[cs->count] == NULL) {
|
|
|
|
fetch_syserr();
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
init_http_auth_challenge(cs->challenges[cs->count]);
|
2018-05-29 10:29:43 +00:00
|
|
|
if (strcasecmp(key, "basic") == 0) {
|
2010-01-19 10:19:55 +00:00
|
|
|
cs->challenges[cs->count]->scheme = HTTPAS_BASIC;
|
2018-05-29 10:29:43 +00:00
|
|
|
} else if (strcasecmp(key, "digest") == 0) {
|
2010-01-19 10:19:55 +00:00
|
|
|
cs->challenges[cs->count]->scheme = HTTPAS_DIGEST;
|
|
|
|
} else {
|
|
|
|
cs->challenges[cs->count]->scheme = HTTPAS_UNKNOWN;
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
|
|
|
* Continue parsing as basic or digest may
|
2010-01-19 10:19:55 +00:00
|
|
|
* follow, and the syntax is the same for
|
|
|
|
* all. We'll just ignore this one when
|
|
|
|
* looking at the list
|
|
|
|
*/
|
|
|
|
}
|
2011-05-12 21:18:55 +00:00
|
|
|
|
2010-01-19 10:19:55 +00:00
|
|
|
/* Loop on attributes */
|
|
|
|
for (;;) {
|
|
|
|
/* Key */
|
|
|
|
lex = http_header_lex(&cp, key);
|
|
|
|
if (lex != HTTPHL_WORD)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Equal sign */
|
|
|
|
lex = http_header_lex(&cp, buf);
|
|
|
|
if (lex != '=')
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Value */
|
|
|
|
lex = http_header_lex(&cp, value);
|
|
|
|
if (lex != HTTPHL_WORD && lex != HTTPHL_STRING)
|
|
|
|
goto out;
|
|
|
|
|
2018-05-29 10:29:43 +00:00
|
|
|
if (strcasecmp(key, "realm") == 0) {
|
2011-05-12 21:18:55 +00:00
|
|
|
cs->challenges[cs->count]->realm =
|
2018-05-29 10:29:43 +00:00
|
|
|
strdup(value);
|
|
|
|
} else if (strcasecmp(key, "qop") == 0) {
|
2011-05-12 21:18:55 +00:00
|
|
|
cs->challenges[cs->count]->qop =
|
2018-05-29 10:29:43 +00:00
|
|
|
strdup(value);
|
|
|
|
} else if (strcasecmp(key, "nonce") == 0) {
|
2011-05-12 21:18:55 +00:00
|
|
|
cs->challenges[cs->count]->nonce =
|
2018-05-29 10:29:43 +00:00
|
|
|
strdup(value);
|
|
|
|
} else if (strcasecmp(key, "opaque") == 0) {
|
2011-05-12 21:18:55 +00:00
|
|
|
cs->challenges[cs->count]->opaque =
|
2018-05-29 10:29:43 +00:00
|
|
|
strdup(value);
|
|
|
|
} else if (strcasecmp(key, "algorithm") == 0) {
|
2011-05-12 21:18:55 +00:00
|
|
|
cs->challenges[cs->count]->algo =
|
2018-05-29 10:29:43 +00:00
|
|
|
strdup(value);
|
|
|
|
} else if (strcasecmp(key, "stale") == 0) {
|
2011-05-12 21:18:55 +00:00
|
|
|
cs->challenges[cs->count]->stale =
|
2018-05-29 10:29:43 +00:00
|
|
|
strcasecmp(value, "no");
|
|
|
|
} else {
|
|
|
|
/* ignore unknown attributes */
|
|
|
|
}
|
2010-01-19 10:19:55 +00:00
|
|
|
|
|
|
|
/* Comma or Next challenge or End */
|
|
|
|
lex = http_header_lex(&cp, key);
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
|
|
|
* If we get a word here, this is the beginning of the
|
|
|
|
* next challenge. Break the attributes loop
|
|
|
|
*/
|
2010-01-19 10:19:55 +00:00
|
|
|
if (lex == HTTPHL_WORD)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (lex == HTTPHL_END) {
|
|
|
|
/* End while looking for ',' is normal exit */
|
|
|
|
cs->count++;
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* Anything else is an error */
|
|
|
|
if (lex != ',')
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
} /* End attributes loop */
|
|
|
|
} /* End challenge loop */
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
|
|
|
* Challenges max count exceeded. This really can't happen
|
|
|
|
* with normal data, something's fishy -> error
|
|
|
|
*/
|
2010-01-19 10:19:55 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
if (key)
|
|
|
|
free(key);
|
|
|
|
if (value)
|
|
|
|
free(value);
|
|
|
|
if (buf)
|
|
|
|
free(buf);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*
|
|
|
|
* Parse a last-modified header
|
|
|
|
*/
|
2000-07-21 11:02:43 +00:00
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_parse_mtime(const char *p, time_t *mtime)
|
2000-07-12 10:39:56 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
char locale[64], *r;
|
|
|
|
struct tm tm;
|
|
|
|
|
2016-05-12 06:39:13 +00:00
|
|
|
strlcpy(locale, setlocale(LC_TIME, NULL), sizeof(locale));
|
2002-02-05 22:13:51 +00:00
|
|
|
setlocale(LC_TIME, "C");
|
|
|
|
r = strptime(p, "%a, %d %b %Y %H:%M:%S GMT", &tm);
|
2014-03-11 13:47:11 +00:00
|
|
|
/*
|
|
|
|
* Some proxies use UTC in response, but it should still be
|
|
|
|
* parsed. RFC2616 states GMT and UTC are exactly equal for HTTP.
|
|
|
|
*/
|
|
|
|
if (r == NULL)
|
|
|
|
r = strptime(p, "%a, %d %b %Y %H:%M:%S UTC", &tm);
|
2002-02-05 22:13:51 +00:00
|
|
|
/* XXX should add support for date-2 and date-3 */
|
|
|
|
setlocale(LC_TIME, locale);
|
|
|
|
if (r == NULL)
|
|
|
|
return (-1);
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("last modified: [%04d-%02d-%02d %02d:%02d:%02d]\n",
|
|
|
|
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
|
|
|
|
tm.tm_hour, tm.tm_min, tm.tm_sec);
|
2002-02-05 22:13:51 +00:00
|
|
|
*mtime = timegm(&tm);
|
|
|
|
return (0);
|
1998-07-09 16:52:44 +00:00
|
|
|
}
|
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*
|
|
|
|
* Parse a content-length header
|
|
|
|
*/
|
2000-07-21 11:02:43 +00:00
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_parse_length(const char *p, off_t *length)
|
2000-07-12 10:39:56 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
off_t len;
|
|
|
|
|
2007-12-19 00:26:36 +00:00
|
|
|
for (len = 0; *p && isdigit((unsigned char)*p); ++p)
|
2002-02-05 22:13:51 +00:00
|
|
|
len = len * 10 + (*p - '0');
|
|
|
|
if (*p)
|
|
|
|
return (-1);
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("content length: [%lld]\n", (long long)len);
|
2002-02-05 22:13:51 +00:00
|
|
|
*length = len;
|
|
|
|
return (0);
|
2000-07-12 10:39:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse a content-range header
|
|
|
|
*/
|
2000-07-21 11:02:43 +00:00
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_parse_range(const char *p, off_t *offset, off_t *length, off_t *size)
|
2000-07-12 10:39:56 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
off_t first, last, len;
|
|
|
|
|
|
|
|
if (strncasecmp(p, "bytes ", 6) != 0)
|
|
|
|
return (-1);
|
2004-02-11 09:31:39 +00:00
|
|
|
p += 6;
|
|
|
|
if (*p == '*') {
|
|
|
|
first = last = -1;
|
|
|
|
++p;
|
|
|
|
} else {
|
2007-12-19 00:26:36 +00:00
|
|
|
for (first = 0; *p && isdigit((unsigned char)*p); ++p)
|
2004-02-11 09:31:39 +00:00
|
|
|
first = first * 10 + *p - '0';
|
|
|
|
if (*p != '-')
|
|
|
|
return (-1);
|
2007-12-19 00:26:36 +00:00
|
|
|
for (last = 0, ++p; *p && isdigit((unsigned char)*p); ++p)
|
2004-02-11 09:31:39 +00:00
|
|
|
last = last * 10 + *p - '0';
|
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
if (first > last || *p != '/')
|
|
|
|
return (-1);
|
2007-12-19 00:26:36 +00:00
|
|
|
for (len = 0, ++p; *p && isdigit((unsigned char)*p); ++p)
|
2002-02-05 22:13:51 +00:00
|
|
|
len = len * 10 + *p - '0';
|
|
|
|
if (*p || len < last - first + 1)
|
|
|
|
return (-1);
|
2004-02-11 09:31:39 +00:00
|
|
|
if (first == -1) {
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("content range: [*/%lld]\n", (long long)len);
|
2004-02-11 09:31:39 +00:00
|
|
|
*length = 0;
|
|
|
|
} else {
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("content range: [%lld-%lld/%lld]\n",
|
|
|
|
(long long)first, (long long)last, (long long)len);
|
2004-02-11 09:31:39 +00:00
|
|
|
*length = last - first + 1;
|
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
*offset = first;
|
|
|
|
*size = len;
|
|
|
|
return (0);
|
2000-07-12 10:39:56 +00:00
|
|
|
}
|
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*****************************************************************************
|
|
|
|
* Helper functions for authorization
|
|
|
|
*/
|
|
|
|
|
1998-07-12 22:34:40 +00:00
|
|
|
/*
|
|
|
|
* Base64 encoding
|
|
|
|
*/
|
2000-07-11 18:12:41 +00:00
|
|
|
static char *
|
2007-12-14 10:26:58 +00:00
|
|
|
http_base64(const char *src)
|
1998-07-12 22:34:40 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
static const char base64[] =
|
|
|
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
|
|
|
"abcdefghijklmnopqrstuvwxyz"
|
|
|
|
"0123456789+/";
|
|
|
|
char *str, *dst;
|
|
|
|
size_t l;
|
|
|
|
int t, r;
|
|
|
|
|
|
|
|
l = strlen(src);
|
2004-08-07 20:23:50 +00:00
|
|
|
if ((str = malloc(((l + 2) / 3) * 4 + 1)) == NULL)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (NULL);
|
|
|
|
dst = str;
|
|
|
|
r = 0;
|
|
|
|
|
|
|
|
while (l >= 3) {
|
|
|
|
t = (src[0] << 16) | (src[1] << 8) | src[2];
|
|
|
|
dst[0] = base64[(t >> 18) & 0x3f];
|
|
|
|
dst[1] = base64[(t >> 12) & 0x3f];
|
|
|
|
dst[2] = base64[(t >> 6) & 0x3f];
|
|
|
|
dst[3] = base64[(t >> 0) & 0x3f];
|
|
|
|
src += 3; l -= 3;
|
|
|
|
dst += 4; r += 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (l) {
|
|
|
|
case 2:
|
|
|
|
t = (src[0] << 16) | (src[1] << 8);
|
|
|
|
dst[0] = base64[(t >> 18) & 0x3f];
|
|
|
|
dst[1] = base64[(t >> 12) & 0x3f];
|
|
|
|
dst[2] = base64[(t >> 6) & 0x3f];
|
|
|
|
dst[3] = '=';
|
|
|
|
dst += 4;
|
|
|
|
r += 4;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
t = src[0] << 16;
|
|
|
|
dst[0] = base64[(t >> 18) & 0x3f];
|
|
|
|
dst[1] = base64[(t >> 12) & 0x3f];
|
|
|
|
dst[2] = dst[3] = '=';
|
|
|
|
dst += 4;
|
|
|
|
r += 4;
|
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
*dst = 0;
|
|
|
|
return (str);
|
1998-07-12 22:34:40 +00:00
|
|
|
}
|
|
|
|
|
2010-01-19 10:19:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract authorization parameters from environment value.
|
|
|
|
* The value is like scheme:realm:user:pass
|
|
|
|
*/
|
|
|
|
typedef struct {
|
|
|
|
char *scheme;
|
|
|
|
char *realm;
|
|
|
|
char *user;
|
|
|
|
char *password;
|
|
|
|
} http_auth_params_t;
|
|
|
|
|
|
|
|
static void
|
|
|
|
init_http_auth_params(http_auth_params_t *s)
|
|
|
|
{
|
2014-06-05 22:10:25 +00:00
|
|
|
s->scheme = s->realm = s->user = s->password = NULL;
|
2010-01-19 10:19:55 +00:00
|
|
|
}
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
clean_http_auth_params(http_auth_params_t *s)
|
|
|
|
{
|
2011-05-12 21:18:55 +00:00
|
|
|
if (s->scheme)
|
2010-01-19 10:19:55 +00:00
|
|
|
free(s->scheme);
|
2011-05-12 21:18:55 +00:00
|
|
|
if (s->realm)
|
2010-01-19 10:19:55 +00:00
|
|
|
free(s->realm);
|
2011-05-12 21:18:55 +00:00
|
|
|
if (s->user)
|
2010-01-19 10:19:55 +00:00
|
|
|
free(s->user);
|
2011-05-12 21:18:55 +00:00
|
|
|
if (s->password)
|
2010-01-19 10:19:55 +00:00
|
|
|
free(s->password);
|
|
|
|
init_http_auth_params(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
http_authfromenv(const char *p, http_auth_params_t *parms)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
char *v, *ve;
|
|
|
|
char *str = strdup(p);
|
|
|
|
|
|
|
|
if (str == NULL) {
|
|
|
|
fetch_syserr();
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
v = str;
|
|
|
|
|
|
|
|
if ((ve = strchr(v, ':')) == NULL)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
*ve = 0;
|
|
|
|
if ((parms->scheme = strdup(v)) == NULL) {
|
|
|
|
fetch_syserr();
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
v = ve + 1;
|
|
|
|
|
|
|
|
if ((ve = strchr(v, ':')) == NULL)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
*ve = 0;
|
|
|
|
if ((parms->realm = strdup(v)) == NULL) {
|
|
|
|
fetch_syserr();
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
v = ve + 1;
|
|
|
|
|
|
|
|
if ((ve = strchr(v, ':')) == NULL)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
*ve = 0;
|
|
|
|
if ((parms->user = strdup(v)) == NULL) {
|
|
|
|
fetch_syserr();
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
v = ve + 1;
|
|
|
|
|
|
|
|
|
|
|
|
if ((parms->password = strdup(v)) == NULL) {
|
|
|
|
fetch_syserr();
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
out:
|
2011-05-12 21:18:55 +00:00
|
|
|
if (ret == -1)
|
2010-01-19 10:19:55 +00:00
|
|
|
clean_http_auth_params(parms);
|
|
|
|
if (str)
|
|
|
|
free(str);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
2010-01-19 10:19:55 +00:00
|
|
|
* Digest response: the code to compute the digest is taken from the
|
2011-05-12 21:18:55 +00:00
|
|
|
* sample implementation in RFC2616
|
2010-01-19 10:19:55 +00:00
|
|
|
*/
|
2011-05-12 21:26:42 +00:00
|
|
|
#define IN const
|
2010-01-19 10:19:55 +00:00
|
|
|
#define OUT
|
|
|
|
|
|
|
|
#define HASHLEN 16
|
|
|
|
typedef char HASH[HASHLEN];
|
|
|
|
#define HASHHEXLEN 32
|
|
|
|
typedef char HASHHEX[HASHHEXLEN+1];
|
|
|
|
|
|
|
|
static const char *hexchars = "0123456789abcdef";
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
CvtHex(IN HASH Bin, OUT HASHHEX Hex)
|
|
|
|
{
|
|
|
|
unsigned short i;
|
|
|
|
unsigned char j;
|
|
|
|
|
|
|
|
for (i = 0; i < HASHLEN; i++) {
|
|
|
|
j = (Bin[i] >> 4) & 0xf;
|
|
|
|
Hex[i*2] = hexchars[j];
|
|
|
|
j = Bin[i] & 0xf;
|
|
|
|
Hex[i*2+1] = hexchars[j];
|
2014-06-05 22:13:30 +00:00
|
|
|
}
|
2010-01-19 10:19:55 +00:00
|
|
|
Hex[HASHHEXLEN] = '\0';
|
|
|
|
};
|
|
|
|
|
|
|
|
/* calculate H(A1) as per spec */
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
DigestCalcHA1(
|
|
|
|
IN char * pszAlg,
|
|
|
|
IN char * pszUserName,
|
|
|
|
IN char * pszRealm,
|
|
|
|
IN char * pszPassword,
|
|
|
|
IN char * pszNonce,
|
|
|
|
IN char * pszCNonce,
|
|
|
|
OUT HASHHEX SessionKey
|
|
|
|
)
|
|
|
|
{
|
|
|
|
MD5_CTX Md5Ctx;
|
|
|
|
HASH HA1;
|
|
|
|
|
|
|
|
MD5Init(&Md5Ctx);
|
|
|
|
MD5Update(&Md5Ctx, pszUserName, strlen(pszUserName));
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
MD5Update(&Md5Ctx, pszRealm, strlen(pszRealm));
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
MD5Update(&Md5Ctx, pszPassword, strlen(pszPassword));
|
|
|
|
MD5Final(HA1, &Md5Ctx);
|
|
|
|
if (strcasecmp(pszAlg, "md5-sess") == 0) {
|
|
|
|
|
|
|
|
MD5Init(&Md5Ctx);
|
|
|
|
MD5Update(&Md5Ctx, HA1, HASHLEN);
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
MD5Update(&Md5Ctx, pszNonce, strlen(pszNonce));
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
MD5Update(&Md5Ctx, pszCNonce, strlen(pszCNonce));
|
|
|
|
MD5Final(HA1, &Md5Ctx);
|
2014-06-05 22:13:30 +00:00
|
|
|
}
|
2010-01-19 10:19:55 +00:00
|
|
|
CvtHex(HA1, SessionKey);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* calculate request-digest/response-digest as per HTTP Digest spec */
|
2011-05-12 21:18:55 +00:00
|
|
|
static void
|
2010-01-19 10:19:55 +00:00
|
|
|
DigestCalcResponse(
|
|
|
|
IN HASHHEX HA1, /* H(A1) */
|
|
|
|
IN char * pszNonce, /* nonce from server */
|
|
|
|
IN char * pszNonceCount, /* 8 hex digits */
|
|
|
|
IN char * pszCNonce, /* client nonce */
|
|
|
|
IN char * pszQop, /* qop-value: "", "auth", "auth-int" */
|
|
|
|
IN char * pszMethod, /* method from the request */
|
|
|
|
IN char * pszDigestUri, /* requested URL */
|
|
|
|
IN HASHHEX HEntity, /* H(entity body) if qop="auth-int" */
|
|
|
|
OUT HASHHEX Response /* request-digest or response-digest */
|
|
|
|
)
|
|
|
|
{
|
2018-05-29 10:28:20 +00:00
|
|
|
#if 0
|
|
|
|
DEBUGF("Calc: HA1[%s] Nonce[%s] qop[%s] method[%s] URI[%s]\n",
|
|
|
|
HA1, pszNonce, pszQop, pszMethod, pszDigestUri);
|
|
|
|
#endif
|
2010-01-19 10:19:55 +00:00
|
|
|
MD5_CTX Md5Ctx;
|
|
|
|
HASH HA2;
|
|
|
|
HASH RespHash;
|
|
|
|
HASHHEX HA2Hex;
|
|
|
|
|
|
|
|
// calculate H(A2)
|
|
|
|
MD5Init(&Md5Ctx);
|
|
|
|
MD5Update(&Md5Ctx, pszMethod, strlen(pszMethod));
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
MD5Update(&Md5Ctx, pszDigestUri, strlen(pszDigestUri));
|
|
|
|
if (strcasecmp(pszQop, "auth-int") == 0) {
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
MD5Update(&Md5Ctx, HEntity, HASHHEXLEN);
|
2014-06-05 22:13:30 +00:00
|
|
|
}
|
2010-01-19 10:19:55 +00:00
|
|
|
MD5Final(HA2, &Md5Ctx);
|
|
|
|
CvtHex(HA2, HA2Hex);
|
|
|
|
|
|
|
|
// calculate response
|
|
|
|
MD5Init(&Md5Ctx);
|
|
|
|
MD5Update(&Md5Ctx, HA1, HASHHEXLEN);
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
MD5Update(&Md5Ctx, pszNonce, strlen(pszNonce));
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
if (*pszQop) {
|
|
|
|
MD5Update(&Md5Ctx, pszNonceCount, strlen(pszNonceCount));
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
MD5Update(&Md5Ctx, pszCNonce, strlen(pszCNonce));
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
|
|
|
MD5Update(&Md5Ctx, pszQop, strlen(pszQop));
|
|
|
|
MD5Update(&Md5Ctx, ":", 1);
|
2014-06-05 22:13:30 +00:00
|
|
|
}
|
2010-01-19 10:19:55 +00:00
|
|
|
MD5Update(&Md5Ctx, HA2Hex, HASHHEXLEN);
|
|
|
|
MD5Final(RespHash, &Md5Ctx);
|
|
|
|
CvtHex(RespHash, Response);
|
|
|
|
}
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
|
|
|
* Generate/Send a Digest authorization header
|
2010-01-19 10:19:55 +00:00
|
|
|
* This looks like: [Proxy-]Authorization: credentials
|
|
|
|
*
|
|
|
|
* credentials = "Digest" digest-response
|
|
|
|
* digest-response = 1#( username | realm | nonce | digest-uri
|
|
|
|
* | response | [ algorithm ] | [cnonce] |
|
|
|
|
* [opaque] | [message-qop] |
|
|
|
|
* [nonce-count] | [auth-param] )
|
|
|
|
* username = "username" "=" username-value
|
|
|
|
* username-value = quoted-string
|
|
|
|
* digest-uri = "uri" "=" digest-uri-value
|
|
|
|
* digest-uri-value = request-uri ; As specified by HTTP/1.1
|
|
|
|
* message-qop = "qop" "=" qop-value
|
|
|
|
* cnonce = "cnonce" "=" cnonce-value
|
|
|
|
* cnonce-value = nonce-value
|
|
|
|
* nonce-count = "nc" "=" nc-value
|
|
|
|
* nc-value = 8LHEX
|
|
|
|
* response = "response" "=" request-digest
|
|
|
|
* request-digest = <"> 32LHEX <">
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
http_digest_auth(conn_t *conn, const char *hdr, http_auth_challenge_t *c,
|
|
|
|
http_auth_params_t *parms, struct url *url)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
char noncecount[10];
|
|
|
|
char cnonce[40];
|
2014-06-05 22:10:25 +00:00
|
|
|
char *options = NULL;
|
2010-01-19 10:19:55 +00:00
|
|
|
|
|
|
|
if (!c->realm || !c->nonce) {
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("realm/nonce not set in challenge\n");
|
2010-01-19 10:19:55 +00:00
|
|
|
return(-1);
|
|
|
|
}
|
2011-05-12 21:18:55 +00:00
|
|
|
if (!c->algo)
|
2010-01-19 10:19:55 +00:00
|
|
|
c->algo = strdup("");
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
if (asprintf(&options, "%s%s%s%s",
|
2018-05-29 10:29:43 +00:00
|
|
|
*c->algo? ",algorithm=" : "", c->algo,
|
|
|
|
c->opaque? ",opaque=" : "", c->opaque?c->opaque:"") < 0)
|
2010-01-19 10:19:55 +00:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
if (!c->qop) {
|
|
|
|
c->qop = strdup("");
|
|
|
|
*noncecount = 0;
|
|
|
|
*cnonce = 0;
|
|
|
|
} else {
|
|
|
|
c->nc++;
|
|
|
|
sprintf(noncecount, "%08x", c->nc);
|
|
|
|
/* We don't try very hard with the cnonce ... */
|
|
|
|
sprintf(cnonce, "%x%lx", getpid(), (unsigned long)time(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
HASHHEX HA1;
|
|
|
|
DigestCalcHA1(c->algo, parms->user, c->realm,
|
|
|
|
parms->password, c->nonce, cnonce, HA1);
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("HA1: [%s]\n", HA1);
|
2010-01-19 10:19:55 +00:00
|
|
|
HASHHEX digest;
|
|
|
|
DigestCalcResponse(HA1, c->nonce, noncecount, cnonce, c->qop,
|
|
|
|
"GET", url->doc, "", digest);
|
|
|
|
|
|
|
|
if (c->qop[0]) {
|
|
|
|
r = http_cmd(conn, "%s: Digest username=\"%s\",realm=\"%s\","
|
|
|
|
"nonce=\"%s\",uri=\"%s\",response=\"%s\","
|
|
|
|
"qop=\"auth\", cnonce=\"%s\", nc=%s%s",
|
2011-05-12 21:18:55 +00:00
|
|
|
hdr, parms->user, c->realm,
|
2010-01-19 10:19:55 +00:00
|
|
|
c->nonce, url->doc, digest,
|
|
|
|
cnonce, noncecount, options);
|
|
|
|
} else {
|
|
|
|
r = http_cmd(conn, "%s: Digest username=\"%s\",realm=\"%s\","
|
|
|
|
"nonce=\"%s\",uri=\"%s\",response=\"%s\"%s",
|
2011-05-12 21:18:55 +00:00
|
|
|
hdr, parms->user, c->realm,
|
2010-01-19 10:19:55 +00:00
|
|
|
c->nonce, url->doc, digest, options);
|
|
|
|
}
|
|
|
|
if (options)
|
|
|
|
free(options);
|
|
|
|
return (r);
|
|
|
|
}
|
|
|
|
|
1998-07-12 22:34:40 +00:00
|
|
|
/*
|
|
|
|
* Encode username and password
|
|
|
|
*/
|
2000-07-11 18:12:41 +00:00
|
|
|
static int
|
2007-12-14 10:26:58 +00:00
|
|
|
http_basic_auth(conn_t *conn, const char *hdr, const char *usr, const char *pwd)
|
1998-07-12 22:34:40 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
char *upw, *auth;
|
|
|
|
int r;
|
|
|
|
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("basic: usr: [%s]\n", usr);
|
|
|
|
DEBUGF("basic: pwd: [%s]\n", pwd);
|
2002-02-05 22:13:51 +00:00
|
|
|
if (asprintf(&upw, "%s:%s", usr, pwd) == -1)
|
|
|
|
return (-1);
|
2007-12-14 10:26:58 +00:00
|
|
|
auth = http_base64(upw);
|
2002-02-05 22:13:51 +00:00
|
|
|
free(upw);
|
|
|
|
if (auth == NULL)
|
|
|
|
return (-1);
|
2007-12-14 10:26:58 +00:00
|
|
|
r = http_cmd(conn, "%s: Basic %s", hdr, auth);
|
2002-02-05 22:13:51 +00:00
|
|
|
free(auth);
|
|
|
|
return (r);
|
2000-07-11 18:12:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-05-12 21:18:55 +00:00
|
|
|
* Chose the challenge to answer and call the appropriate routine to
|
2010-01-19 10:19:55 +00:00
|
|
|
* produce the header.
|
2000-07-11 18:12:41 +00:00
|
|
|
*/
|
|
|
|
static int
|
2010-01-19 10:19:55 +00:00
|
|
|
http_authorize(conn_t *conn, const char *hdr, http_auth_challenges_t *cs,
|
|
|
|
http_auth_params_t *parms, struct url *url)
|
2000-07-11 18:12:41 +00:00
|
|
|
{
|
2010-01-19 10:19:55 +00:00
|
|
|
http_auth_challenge_t *digest = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* If user or pass are null we're not happy */
|
|
|
|
if (!parms->user || !parms->password) {
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("NULL usr or pass\n");
|
2010-01-19 10:19:55 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2015-07-04 17:22:07 +00:00
|
|
|
/* Look for a Digest */
|
2010-01-19 10:19:55 +00:00
|
|
|
for (i = 0; i < cs->count; i++) {
|
|
|
|
if (cs->challenges[i]->scheme == HTTPAS_DIGEST)
|
|
|
|
digest = cs->challenges[i];
|
2002-02-05 22:13:51 +00:00
|
|
|
}
|
1998-07-12 22:34:40 +00:00
|
|
|
|
2010-01-19 10:19:55 +00:00
|
|
|
/* Error if "Digest" was specified and there is no Digest challenge */
|
2018-05-29 10:29:43 +00:00
|
|
|
if (!digest &&
|
|
|
|
(parms->scheme && strcasecmp(parms->scheme, "digest") == 0)) {
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("Digest auth in env, not supported by peer\n");
|
2010-01-19 10:19:55 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
|
|
|
* If "basic" was specified in the environment, or there is no Digest
|
2010-01-19 10:19:55 +00:00
|
|
|
* challenge, do the basic thing. Don't need a challenge for this,
|
2011-05-12 21:18:55 +00:00
|
|
|
* so no need to check basic!=NULL
|
2010-01-19 10:19:55 +00:00
|
|
|
*/
|
2018-05-29 10:29:43 +00:00
|
|
|
if (!digest ||
|
|
|
|
(parms->scheme && strcasecmp(parms->scheme, "basic") == 0))
|
2010-01-19 10:19:55 +00:00
|
|
|
return (http_basic_auth(conn,hdr,parms->user,parms->password));
|
|
|
|
|
|
|
|
/* Else, prefer digest. We just checked that it's not NULL */
|
|
|
|
return (http_digest_auth(conn, hdr, digest, parms, url));
|
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*****************************************************************************
|
|
|
|
* Helper functions for connecting to a server or proxy
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2002-02-05 22:13:51 +00:00
|
|
|
* Connect to the correct HTTP server or proxy.
|
2000-07-12 10:39:56 +00:00
|
|
|
*/
|
2002-06-05 10:05:03 +00:00
|
|
|
static conn_t *
|
2007-12-14 10:26:58 +00:00
|
|
|
http_connect(struct url *URL, struct url *purl, const char *flags)
|
2000-07-12 10:39:56 +00:00
|
|
|
{
|
2013-04-12 22:05:15 +00:00
|
|
|
struct url *curl;
|
2002-06-05 10:05:03 +00:00
|
|
|
conn_t *conn;
|
2015-10-16 12:21:44 +00:00
|
|
|
hdr_t h;
|
|
|
|
http_headerbuf_t headerbuf;
|
|
|
|
const char *p;
|
2002-02-05 22:13:51 +00:00
|
|
|
int verbose;
|
2005-02-16 00:22:20 +00:00
|
|
|
int af, val;
|
2015-10-16 12:21:44 +00:00
|
|
|
int serrno;
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
#ifdef INET6
|
2002-02-05 22:13:51 +00:00
|
|
|
af = AF_UNSPEC;
|
2000-07-12 10:39:56 +00:00
|
|
|
#else
|
2000-05-20 18:23:51 +00:00
|
|
|
af = AF_INET;
|
2002-02-05 22:13:51 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
verbose = CHECK_FLAG('v');
|
|
|
|
if (CHECK_FLAG('4'))
|
|
|
|
af = AF_INET;
|
2000-10-12 22:10:26 +00:00
|
|
|
#ifdef INET6
|
2002-02-05 22:13:51 +00:00
|
|
|
else if (CHECK_FLAG('6'))
|
|
|
|
af = AF_INET6;
|
2000-10-12 22:10:26 +00:00
|
|
|
#endif
|
|
|
|
|
2013-04-12 22:05:15 +00:00
|
|
|
curl = (purl != NULL) ? purl : URL;
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2013-04-12 22:05:15 +00:00
|
|
|
if ((conn = fetch_connect(curl->host, curl->port, af, verbose)) == NULL)
|
2007-12-14 10:26:58 +00:00
|
|
|
/* fetch_connect() has already set an error code */
|
2002-06-05 10:05:03 +00:00
|
|
|
return (NULL);
|
2015-10-16 12:21:44 +00:00
|
|
|
init_http_headerbuf(&headerbuf);
|
2018-11-27 11:22:19 +00:00
|
|
|
if (strcmp(URL->scheme, SCHEME_HTTPS) == 0 && purl) {
|
2013-04-12 22:05:15 +00:00
|
|
|
http_cmd(conn, "CONNECT %s:%d HTTP/1.1",
|
|
|
|
URL->host, URL->port);
|
2013-08-22 07:43:36 +00:00
|
|
|
http_cmd(conn, "Host: %s:%d",
|
|
|
|
URL->host, URL->port);
|
2013-04-12 22:05:15 +00:00
|
|
|
http_cmd(conn, "");
|
|
|
|
if (http_get_reply(conn) != HTTP_OK) {
|
2015-10-16 12:21:44 +00:00
|
|
|
http_seterr(conn->err);
|
|
|
|
goto ouch;
|
|
|
|
}
|
|
|
|
/* Read and discard the rest of the proxy response */
|
|
|
|
if (fetch_getln(conn) < 0) {
|
|
|
|
fetch_syserr();
|
|
|
|
goto ouch;
|
2013-04-12 22:05:15 +00:00
|
|
|
}
|
2015-10-16 12:21:44 +00:00
|
|
|
do {
|
|
|
|
switch ((h = http_next_header(conn, &headerbuf, &p))) {
|
|
|
|
case hdr_syserror:
|
|
|
|
fetch_syserr();
|
|
|
|
goto ouch;
|
|
|
|
case hdr_error:
|
|
|
|
http_seterr(HTTP_PROTOCOL_ERROR);
|
|
|
|
goto ouch;
|
|
|
|
default:
|
|
|
|
/* ignore */ ;
|
|
|
|
}
|
2016-12-30 14:54:54 +00:00
|
|
|
} while (h > hdr_end);
|
2013-04-12 22:05:15 +00:00
|
|
|
}
|
2018-11-27 11:22:19 +00:00
|
|
|
if (strcmp(URL->scheme, SCHEME_HTTPS) == 0 &&
|
2013-07-26 15:53:43 +00:00
|
|
|
fetch_ssl(conn, URL, verbose) == -1) {
|
2002-06-05 21:35:35 +00:00
|
|
|
/* grrr */
|
|
|
|
errno = EAUTH;
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_syserr();
|
2015-10-16 12:21:44 +00:00
|
|
|
goto ouch;
|
2002-06-05 12:46:36 +00:00
|
|
|
}
|
2005-02-16 00:22:20 +00:00
|
|
|
|
|
|
|
val = 1;
|
|
|
|
setsockopt(conn->sd, IPPROTO_TCP, TCP_NOPUSH, &val, sizeof(val));
|
|
|
|
|
2015-10-16 12:21:44 +00:00
|
|
|
clean_http_headerbuf(&headerbuf);
|
2002-06-05 10:05:03 +00:00
|
|
|
return (conn);
|
2015-10-16 12:21:44 +00:00
|
|
|
ouch:
|
|
|
|
serrno = errno;
|
|
|
|
clean_http_headerbuf(&headerbuf);
|
|
|
|
fetch_close(conn);
|
|
|
|
errno = serrno;
|
|
|
|
return (NULL);
|
2000-10-12 22:10:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct url *
|
2007-12-18 11:03:07 +00:00
|
|
|
http_get_proxy(struct url * url, const char *flags)
|
2000-10-12 22:10:26 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
struct url *purl;
|
|
|
|
char *p;
|
|
|
|
|
2003-03-29 15:15:38 +00:00
|
|
|
if (flags != NULL && strchr(flags, 'd') != NULL)
|
2003-03-11 08:20:58 +00:00
|
|
|
return (NULL);
|
2007-12-18 11:03:07 +00:00
|
|
|
if (fetch_no_proxy_match(url->host))
|
|
|
|
return (NULL);
|
2002-02-05 22:13:51 +00:00
|
|
|
if (((p = getenv("HTTP_PROXY")) || (p = getenv("http_proxy"))) &&
|
2005-08-24 12:28:05 +00:00
|
|
|
*p && (purl = fetchParseURL(p))) {
|
2002-02-05 22:13:51 +00:00
|
|
|
if (!*purl->scheme)
|
|
|
|
strcpy(purl->scheme, SCHEME_HTTP);
|
|
|
|
if (!purl->port)
|
2007-12-14 10:26:58 +00:00
|
|
|
purl->port = fetch_default_proxy_port(purl->scheme);
|
2018-11-27 11:22:19 +00:00
|
|
|
if (strcmp(purl->scheme, SCHEME_HTTP) == 0)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (purl);
|
|
|
|
fetchFreeURL(purl);
|
|
|
|
}
|
|
|
|
return (NULL);
|
2000-07-12 10:39:56 +00:00
|
|
|
}
|
|
|
|
|
2002-01-01 16:25:29 +00:00
|
|
|
static void
|
2007-12-14 10:26:58 +00:00
|
|
|
http_print_html(FILE *out, FILE *in)
|
2002-01-01 16:25:29 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
size_t len;
|
|
|
|
char *line, *p, *q;
|
|
|
|
int comment, tag;
|
|
|
|
|
|
|
|
comment = tag = 0;
|
|
|
|
while ((line = fgetln(in, &len)) != NULL) {
|
2007-12-19 00:26:36 +00:00
|
|
|
while (len && isspace((unsigned char)line[len - 1]))
|
2002-02-05 22:13:51 +00:00
|
|
|
--len;
|
|
|
|
for (p = q = line; q < line + len; ++q) {
|
|
|
|
if (comment && *q == '-') {
|
|
|
|
if (q + 2 < line + len &&
|
|
|
|
strcmp(q, "-->") == 0) {
|
|
|
|
tag = comment = 0;
|
|
|
|
q += 2;
|
|
|
|
}
|
|
|
|
} else if (tag && !comment && *q == '>') {
|
|
|
|
p = q + 1;
|
|
|
|
tag = 0;
|
|
|
|
} else if (!tag && *q == '<') {
|
|
|
|
if (q > p)
|
|
|
|
fwrite(p, q - p, 1, out);
|
|
|
|
tag = 1;
|
|
|
|
if (q + 3 < line + len &&
|
|
|
|
strcmp(q, "<!--") == 0) {
|
|
|
|
comment = 1;
|
|
|
|
q += 3;
|
|
|
|
}
|
|
|
|
}
|
2002-01-01 16:25:29 +00:00
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
if (!tag && q > p)
|
|
|
|
fwrite(p, q - p, 1, out);
|
|
|
|
fputc('\n', out);
|
2002-01-01 16:25:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*****************************************************************************
|
|
|
|
* Core
|
|
|
|
*/
|
|
|
|
|
2014-06-05 22:16:26 +00:00
|
|
|
FILE *
|
|
|
|
http_request(struct url *URL, const char *op, struct url_stat *us,
|
|
|
|
struct url *purl, const char *flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (http_request_body(URL, op, us, purl, flags, NULL, NULL));
|
|
|
|
}
|
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*
|
|
|
|
* Send a request and process the reply
|
2002-06-05 12:19:08 +00:00
|
|
|
*
|
|
|
|
* XXX This function is way too long, the do..while loop should be split
|
|
|
|
* XXX off into a separate function.
|
2000-07-12 10:39:56 +00:00
|
|
|
*/
|
2000-10-12 22:10:26 +00:00
|
|
|
FILE *
|
2014-06-05 22:16:26 +00:00
|
|
|
http_request_body(struct url *URL, const char *op, struct url_stat *us,
|
|
|
|
struct url *purl, const char *flags, const char *content_type,
|
|
|
|
const char *body)
|
2000-07-12 10:39:56 +00:00
|
|
|
{
|
2008-12-15 08:27:44 +00:00
|
|
|
char timebuf[80];
|
|
|
|
char hbuf[MAXHOSTNAMELEN + 7], *host;
|
2002-06-05 10:05:03 +00:00
|
|
|
conn_t *conn;
|
2002-02-05 22:13:51 +00:00
|
|
|
struct url *url, *new;
|
2010-01-19 10:19:55 +00:00
|
|
|
int chunked, direct, ims, noredirect, verbose;
|
2005-03-02 19:09:28 +00:00
|
|
|
int e, i, n, val;
|
2002-02-05 22:13:51 +00:00
|
|
|
off_t offset, clength, length, size;
|
|
|
|
time_t mtime;
|
|
|
|
const char *p;
|
|
|
|
FILE *f;
|
|
|
|
hdr_t h;
|
2008-12-15 08:27:44 +00:00
|
|
|
struct tm *timestruct;
|
2010-01-19 10:19:55 +00:00
|
|
|
http_headerbuf_t headerbuf;
|
|
|
|
http_auth_challenges_t server_challenges;
|
|
|
|
http_auth_challenges_t proxy_challenges;
|
2014-06-05 22:16:26 +00:00
|
|
|
size_t body_len;
|
2010-01-19 10:19:55 +00:00
|
|
|
|
|
|
|
/* The following calls don't allocate anything */
|
2011-05-12 21:18:55 +00:00
|
|
|
init_http_headerbuf(&headerbuf);
|
2010-01-19 10:19:55 +00:00
|
|
|
init_http_auth_challenges(&server_challenges);
|
|
|
|
init_http_auth_challenges(&proxy_challenges);
|
1998-07-09 16:52:44 +00:00
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
direct = CHECK_FLAG('d');
|
|
|
|
noredirect = CHECK_FLAG('A');
|
|
|
|
verbose = CHECK_FLAG('v');
|
2008-12-15 08:27:44 +00:00
|
|
|
ims = CHECK_FLAG('i');
|
2002-02-05 22:13:51 +00:00
|
|
|
|
|
|
|
if (direct && purl) {
|
|
|
|
fetchFreeURL(purl);
|
|
|
|
purl = NULL;
|
2001-12-04 01:12:51 +00:00
|
|
|
}
|
2000-07-12 10:39:56 +00:00
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
/* try the provided URL first */
|
|
|
|
url = URL;
|
|
|
|
|
2012-10-22 03:00:10 +00:00
|
|
|
n = MAX_REDIRECT;
|
2002-02-05 22:13:51 +00:00
|
|
|
i = 0;
|
|
|
|
|
2002-06-19 08:36:00 +00:00
|
|
|
e = HTTP_PROTOCOL_ERROR;
|
2002-02-05 22:13:51 +00:00
|
|
|
do {
|
|
|
|
new = NULL;
|
|
|
|
chunked = 0;
|
|
|
|
offset = 0;
|
|
|
|
clength = -1;
|
|
|
|
length = -1;
|
|
|
|
size = -1;
|
|
|
|
mtime = 0;
|
|
|
|
|
|
|
|
/* check port */
|
|
|
|
if (!url->port)
|
2007-12-14 10:26:58 +00:00
|
|
|
url->port = fetch_default_port(url->scheme);
|
2002-02-05 22:13:51 +00:00
|
|
|
|
|
|
|
/* were we redirected to an FTP URL? */
|
|
|
|
if (purl == NULL && strcmp(url->scheme, SCHEME_FTP) == 0) {
|
|
|
|
if (strcmp(op, "GET") == 0)
|
2007-12-14 10:26:58 +00:00
|
|
|
return (ftp_request(url, "RETR", us, purl, flags));
|
2002-02-05 22:13:51 +00:00
|
|
|
else if (strcmp(op, "HEAD") == 0)
|
2007-12-14 10:26:58 +00:00
|
|
|
return (ftp_request(url, "STAT", us, purl, flags));
|
2002-02-05 22:13:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* connect to server or proxy */
|
2007-12-14 10:26:58 +00:00
|
|
|
if ((conn = http_connect(url, purl, flags)) == NULL)
|
2002-02-05 22:13:51 +00:00
|
|
|
goto ouch;
|
|
|
|
|
2017-03-17 14:18:52 +00:00
|
|
|
/* append port number only if necessary */
|
2002-02-05 22:13:51 +00:00
|
|
|
host = url->host;
|
2007-12-14 10:26:58 +00:00
|
|
|
if (url->port != fetch_default_port(url->scheme)) {
|
2017-03-17 14:18:52 +00:00
|
|
|
snprintf(hbuf, sizeof(hbuf), "%s:%d", host, url->port);
|
|
|
|
host = hbuf;
|
2002-11-28 12:07:15 +00:00
|
|
|
}
|
2000-07-12 10:39:56 +00:00
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
/* send request */
|
|
|
|
if (verbose)
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_info("requesting %s://%s%s",
|
2002-11-28 12:07:15 +00:00
|
|
|
url->scheme, host, url->doc);
|
2018-11-27 16:23:17 +00:00
|
|
|
if (purl && strcmp(url->scheme, SCHEME_HTTPS) != 0) {
|
2007-12-14 10:26:58 +00:00
|
|
|
http_cmd(conn, "%s %s://%s%s HTTP/1.1",
|
2002-11-28 12:07:15 +00:00
|
|
|
op, url->scheme, host, url->doc);
|
2002-02-05 22:13:51 +00:00
|
|
|
} else {
|
2007-12-14 10:26:58 +00:00
|
|
|
http_cmd(conn, "%s %s HTTP/1.1",
|
2002-02-05 22:13:51 +00:00
|
|
|
op, url->doc);
|
|
|
|
}
|
2000-07-12 10:39:56 +00:00
|
|
|
|
2008-12-15 08:27:44 +00:00
|
|
|
if (ims && url->ims_time) {
|
|
|
|
timestruct = gmtime((time_t *)&url->ims_time);
|
|
|
|
(void)strftime(timebuf, 80, "%a, %d %b %Y %T GMT",
|
|
|
|
timestruct);
|
|
|
|
if (verbose)
|
|
|
|
fetch_info("If-Modified-Since: %s", timebuf);
|
|
|
|
http_cmd(conn, "If-Modified-Since: %s", timebuf);
|
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
/* virtual host */
|
2007-12-14 10:26:58 +00:00
|
|
|
http_cmd(conn, "Host: %s", host);
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
|
|
|
* Proxy authorization: we only send auth after we received
|
|
|
|
* a 407 error. We do not first try basic anyway (changed
|
|
|
|
* when support was added for digest-auth)
|
|
|
|
*/
|
2010-01-19 10:19:55 +00:00
|
|
|
if (purl && proxy_challenges.valid) {
|
|
|
|
http_auth_params_t aparams;
|
|
|
|
init_http_auth_params(&aparams);
|
|
|
|
if (*purl->user || *purl->pwd) {
|
2015-06-13 19:26:48 +00:00
|
|
|
aparams.user = strdup(purl->user);
|
|
|
|
aparams.password = strdup(purl->pwd);
|
2011-05-12 21:18:55 +00:00
|
|
|
} else if ((p = getenv("HTTP_PROXY_AUTH")) != NULL &&
|
2010-01-19 10:19:55 +00:00
|
|
|
*p != '\0') {
|
|
|
|
if (http_authfromenv(p, &aparams) < 0) {
|
|
|
|
http_seterr(HTTP_NEED_PROXY_AUTH);
|
|
|
|
goto ouch;
|
|
|
|
}
|
2015-11-29 14:26:59 +00:00
|
|
|
} else if (fetch_netrc_auth(purl) == 0) {
|
|
|
|
aparams.user = strdup(purl->user);
|
|
|
|
aparams.password = strdup(purl->pwd);
|
2010-01-19 10:19:55 +00:00
|
|
|
}
|
2011-05-12 21:18:55 +00:00
|
|
|
http_authorize(conn, "Proxy-Authorization",
|
2010-01-19 10:19:55 +00:00
|
|
|
&proxy_challenges, &aparams, url);
|
|
|
|
clean_http_auth_params(&aparams);
|
2002-02-05 22:13:51 +00:00
|
|
|
}
|
|
|
|
|
2011-05-12 21:18:55 +00:00
|
|
|
/*
|
|
|
|
* Server authorization: we never send "a priori"
|
2010-01-19 10:19:55 +00:00
|
|
|
* Basic auth, which used to be done if user/pass were
|
|
|
|
* set in the url. This would be weird because we'd send the
|
2011-05-12 21:18:55 +00:00
|
|
|
* password in the clear even if Digest is finally to be
|
2010-01-19 10:19:55 +00:00
|
|
|
* used (it would have made more sense for the
|
2011-05-12 21:18:55 +00:00
|
|
|
* pre-digest version to do this when Basic was specified
|
|
|
|
* in the environment)
|
|
|
|
*/
|
2010-01-19 10:19:55 +00:00
|
|
|
if (server_challenges.valid) {
|
|
|
|
http_auth_params_t aparams;
|
|
|
|
init_http_auth_params(&aparams);
|
|
|
|
if (*url->user || *url->pwd) {
|
2015-06-13 19:26:48 +00:00
|
|
|
aparams.user = strdup(url->user);
|
|
|
|
aparams.password = strdup(url->pwd);
|
2011-05-12 21:18:55 +00:00
|
|
|
} else if ((p = getenv("HTTP_AUTH")) != NULL &&
|
2010-01-19 10:19:55 +00:00
|
|
|
*p != '\0') {
|
|
|
|
if (http_authfromenv(p, &aparams) < 0) {
|
|
|
|
http_seterr(HTTP_NEED_AUTH);
|
|
|
|
goto ouch;
|
|
|
|
}
|
2015-11-29 14:26:59 +00:00
|
|
|
} else if (fetch_netrc_auth(url) == 0) {
|
2015-11-29 22:37:48 +00:00
|
|
|
aparams.user = strdup(url->user);
|
|
|
|
aparams.password = strdup(url->pwd);
|
2011-05-12 21:18:55 +00:00
|
|
|
} else if (fetchAuthMethod &&
|
2010-01-19 10:19:55 +00:00
|
|
|
fetchAuthMethod(url) == 0) {
|
2015-06-13 19:26:48 +00:00
|
|
|
aparams.user = strdup(url->user);
|
|
|
|
aparams.password = strdup(url->pwd);
|
2002-02-05 22:13:51 +00:00
|
|
|
} else {
|
2007-12-14 10:26:58 +00:00
|
|
|
http_seterr(HTTP_NEED_AUTH);
|
2002-02-05 22:13:51 +00:00
|
|
|
goto ouch;
|
|
|
|
}
|
2011-05-12 21:18:55 +00:00
|
|
|
http_authorize(conn, "Authorization",
|
2010-01-19 10:19:55 +00:00
|
|
|
&server_challenges, &aparams, url);
|
|
|
|
clean_http_auth_params(&aparams);
|
2002-02-05 22:13:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* other headers */
|
2013-07-30 13:07:55 +00:00
|
|
|
if ((p = getenv("HTTP_ACCEPT")) != NULL) {
|
|
|
|
if (*p != '\0')
|
|
|
|
http_cmd(conn, "Accept: %s", p);
|
|
|
|
} else {
|
|
|
|
http_cmd(conn, "Accept: */*");
|
|
|
|
}
|
2002-11-28 12:07:15 +00:00
|
|
|
if ((p = getenv("HTTP_REFERER")) != NULL && *p != '\0') {
|
|
|
|
if (strcasecmp(p, "auto") == 0)
|
2007-12-14 10:26:58 +00:00
|
|
|
http_cmd(conn, "Referer: %s://%s%s",
|
2002-11-28 12:07:15 +00:00
|
|
|
url->scheme, host, url->doc);
|
|
|
|
else
|
2007-12-14 10:26:58 +00:00
|
|
|
http_cmd(conn, "Referer: %s", p);
|
2002-11-28 12:07:15 +00:00
|
|
|
}
|
2014-06-05 20:27:16 +00:00
|
|
|
if ((p = getenv("HTTP_USER_AGENT")) != NULL) {
|
|
|
|
/* no User-Agent if defined but empty */
|
|
|
|
if (*p != '\0')
|
|
|
|
http_cmd(conn, "User-Agent: %s", p);
|
|
|
|
} else {
|
|
|
|
/* default User-Agent */
|
|
|
|
http_cmd(conn, "User-Agent: %s " _LIBFETCH_VER,
|
|
|
|
getprogname());
|
|
|
|
}
|
2003-01-22 17:52:27 +00:00
|
|
|
if (url->offset > 0)
|
2007-12-14 10:26:58 +00:00
|
|
|
http_cmd(conn, "Range: bytes=%lld-", (long long)url->offset);
|
|
|
|
http_cmd(conn, "Connection: close");
|
2014-06-05 22:16:26 +00:00
|
|
|
|
|
|
|
if (body) {
|
|
|
|
body_len = strlen(body);
|
|
|
|
http_cmd(conn, "Content-Length: %zu", body_len);
|
|
|
|
if (content_type != NULL)
|
|
|
|
http_cmd(conn, "Content-Type: %s", content_type);
|
|
|
|
}
|
|
|
|
|
2007-12-14 10:26:58 +00:00
|
|
|
http_cmd(conn, "");
|
2005-03-02 19:09:28 +00:00
|
|
|
|
2014-06-05 22:16:26 +00:00
|
|
|
if (body)
|
|
|
|
fetch_write(conn, body, body_len);
|
|
|
|
|
2005-03-02 19:09:28 +00:00
|
|
|
/*
|
|
|
|
* Force the queued request to be dispatched. Normally, one
|
|
|
|
* would do this with shutdown(2) but squid proxies can be
|
|
|
|
* configured to disallow such half-closed connections. To
|
|
|
|
* be compatible with such configurations, fiddle with socket
|
|
|
|
* options to force the pending data to be written.
|
|
|
|
*/
|
|
|
|
val = 0;
|
|
|
|
setsockopt(conn->sd, IPPROTO_TCP, TCP_NOPUSH, &val,
|
|
|
|
sizeof(val));
|
|
|
|
val = 1;
|
|
|
|
setsockopt(conn->sd, IPPROTO_TCP, TCP_NODELAY, &val,
|
|
|
|
sizeof(val));
|
2002-02-05 22:13:51 +00:00
|
|
|
|
|
|
|
/* get reply */
|
2007-12-14 10:26:58 +00:00
|
|
|
switch (http_get_reply(conn)) {
|
2002-02-05 22:13:51 +00:00
|
|
|
case HTTP_OK:
|
|
|
|
case HTTP_PARTIAL:
|
2008-12-15 08:27:44 +00:00
|
|
|
case HTTP_NOT_MODIFIED:
|
2002-02-05 22:13:51 +00:00
|
|
|
/* fine */
|
|
|
|
break;
|
|
|
|
case HTTP_MOVED_PERM:
|
|
|
|
case HTTP_MOVED_TEMP:
|
2016-05-31 08:27:39 +00:00
|
|
|
case HTTP_TEMP_REDIRECT:
|
|
|
|
case HTTP_PERM_REDIRECT:
|
2002-02-05 22:13:51 +00:00
|
|
|
case HTTP_SEE_OTHER:
|
2012-10-22 03:00:15 +00:00
|
|
|
case HTTP_USE_PROXY:
|
2002-02-05 22:13:51 +00:00
|
|
|
/*
|
2004-02-11 09:23:35 +00:00
|
|
|
* Not so fine, but we still have to read the
|
|
|
|
* headers to get the new location.
|
2002-02-05 22:13:51 +00:00
|
|
|
*/
|
|
|
|
break;
|
|
|
|
case HTTP_NEED_AUTH:
|
2010-01-19 10:19:55 +00:00
|
|
|
if (server_challenges.valid) {
|
2002-02-05 22:13:51 +00:00
|
|
|
/*
|
2004-02-11 09:23:35 +00:00
|
|
|
* We already sent out authorization code,
|
|
|
|
* so there's nothing more we can do.
|
2002-02-05 22:13:51 +00:00
|
|
|
*/
|
2007-12-14 10:26:58 +00:00
|
|
|
http_seterr(conn->err);
|
2002-02-05 22:13:51 +00:00
|
|
|
goto ouch;
|
|
|
|
}
|
|
|
|
/* try again, but send the password this time */
|
|
|
|
if (verbose)
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_info("server requires authorization");
|
2002-02-05 22:13:51 +00:00
|
|
|
break;
|
|
|
|
case HTTP_NEED_PROXY_AUTH:
|
2010-01-19 10:19:55 +00:00
|
|
|
if (proxy_challenges.valid) {
|
|
|
|
/*
|
|
|
|
* We already sent our proxy
|
|
|
|
* authorization code, so there's
|
|
|
|
* nothing more we can do. */
|
|
|
|
http_seterr(conn->err);
|
|
|
|
goto ouch;
|
|
|
|
}
|
|
|
|
/* try again, but send the password this time */
|
|
|
|
if (verbose)
|
|
|
|
fetch_info("proxy requires authorization");
|
|
|
|
break;
|
2004-02-11 09:31:39 +00:00
|
|
|
case HTTP_BAD_RANGE:
|
|
|
|
/*
|
|
|
|
* This can happen if we ask for 0 bytes because
|
|
|
|
* we already have the whole file. Consider this
|
|
|
|
* a success for now, and check sizes later.
|
|
|
|
*/
|
|
|
|
break;
|
2002-02-05 22:13:51 +00:00
|
|
|
case HTTP_PROTOCOL_ERROR:
|
|
|
|
/* fall through */
|
|
|
|
case -1:
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_syserr();
|
2002-02-05 22:13:51 +00:00
|
|
|
goto ouch;
|
|
|
|
default:
|
2007-12-14 10:26:58 +00:00
|
|
|
http_seterr(conn->err);
|
2002-02-05 22:13:51 +00:00
|
|
|
if (!verbose)
|
|
|
|
goto ouch;
|
|
|
|
/* fall through so we can get the full error message */
|
|
|
|
}
|
|
|
|
|
2010-01-19 10:19:55 +00:00
|
|
|
/* get headers. http_next_header expects one line readahead */
|
|
|
|
if (fetch_getln(conn) == -1) {
|
2012-11-16 12:31:43 +00:00
|
|
|
fetch_syserr();
|
|
|
|
goto ouch;
|
2010-01-19 10:19:55 +00:00
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
do {
|
2012-11-16 12:31:43 +00:00
|
|
|
switch ((h = http_next_header(conn, &headerbuf, &p))) {
|
2002-02-05 22:13:51 +00:00
|
|
|
case hdr_syserror:
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_syserr();
|
2002-02-05 22:13:51 +00:00
|
|
|
goto ouch;
|
|
|
|
case hdr_error:
|
2007-12-14 10:26:58 +00:00
|
|
|
http_seterr(HTTP_PROTOCOL_ERROR);
|
2002-02-05 22:13:51 +00:00
|
|
|
goto ouch;
|
|
|
|
case hdr_content_length:
|
2007-12-14 10:26:58 +00:00
|
|
|
http_parse_length(p, &clength);
|
2002-02-05 22:13:51 +00:00
|
|
|
break;
|
|
|
|
case hdr_content_range:
|
2007-12-14 10:26:58 +00:00
|
|
|
http_parse_range(p, &offset, &length, &size);
|
2002-02-05 22:13:51 +00:00
|
|
|
break;
|
|
|
|
case hdr_last_modified:
|
2007-12-14 10:26:58 +00:00
|
|
|
http_parse_mtime(p, &mtime);
|
2002-02-05 22:13:51 +00:00
|
|
|
break;
|
|
|
|
case hdr_location:
|
2002-06-05 10:05:03 +00:00
|
|
|
if (!HTTP_REDIRECT(conn->err))
|
2002-02-05 22:13:51 +00:00
|
|
|
break;
|
2012-10-22 03:00:10 +00:00
|
|
|
/*
|
|
|
|
* if the A flag is set, we don't follow
|
|
|
|
* temporary redirects.
|
|
|
|
*/
|
|
|
|
if (noredirect &&
|
|
|
|
conn->err != HTTP_MOVED_PERM &&
|
2012-10-22 03:00:15 +00:00
|
|
|
conn->err != HTTP_PERM_REDIRECT &&
|
|
|
|
conn->err != HTTP_USE_PROXY) {
|
2012-10-22 03:00:10 +00:00
|
|
|
n = 1;
|
|
|
|
break;
|
2012-11-16 12:31:43 +00:00
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
if (new)
|
|
|
|
free(new);
|
|
|
|
if (verbose)
|
2018-05-29 10:29:43 +00:00
|
|
|
fetch_info("%d redirect to %s",
|
|
|
|
conn->err, p);
|
2002-02-05 22:13:51 +00:00
|
|
|
if (*p == '/')
|
|
|
|
/* absolute path */
|
2018-05-29 10:29:43 +00:00
|
|
|
new = fetchMakeURL(url->scheme, url->host,
|
|
|
|
url->port, p, url->user, url->pwd);
|
2002-02-05 22:13:51 +00:00
|
|
|
else
|
|
|
|
new = fetchParseURL(p);
|
|
|
|
if (new == NULL) {
|
|
|
|
/* XXX should set an error code */
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("failed to parse new URL\n");
|
2002-02-05 22:13:51 +00:00
|
|
|
goto ouch;
|
|
|
|
}
|
2012-04-30 12:12:48 +00:00
|
|
|
|
|
|
|
/* Only copy credentials if the host matches */
|
2018-05-29 10:29:43 +00:00
|
|
|
if (strcmp(new->host, url->host) == 0 &&
|
|
|
|
!*new->user && !*new->pwd) {
|
2002-02-05 22:13:51 +00:00
|
|
|
strcpy(new->user, url->user);
|
|
|
|
strcpy(new->pwd, url->pwd);
|
|
|
|
}
|
|
|
|
new->offset = url->offset;
|
|
|
|
new->length = url->length;
|
2018-05-12 17:02:27 +00:00
|
|
|
new->ims_time = url->ims_time;
|
2002-02-05 22:13:51 +00:00
|
|
|
break;
|
|
|
|
case hdr_transfer_encoding:
|
|
|
|
/* XXX weak test*/
|
|
|
|
chunked = (strcasecmp(p, "chunked") == 0);
|
|
|
|
break;
|
|
|
|
case hdr_www_authenticate:
|
2002-06-05 10:05:03 +00:00
|
|
|
if (conn->err != HTTP_NEED_AUTH)
|
2002-02-05 22:13:51 +00:00
|
|
|
break;
|
2010-07-28 15:29:18 +00:00
|
|
|
if (http_parse_authenticate(p, &server_challenges) == 0)
|
2010-07-01 17:44:33 +00:00
|
|
|
++n;
|
2010-01-19 10:19:55 +00:00
|
|
|
break;
|
|
|
|
case hdr_proxy_authenticate:
|
|
|
|
if (conn->err != HTTP_NEED_PROXY_AUTH)
|
|
|
|
break;
|
2010-07-28 15:29:18 +00:00
|
|
|
if (http_parse_authenticate(p, &proxy_challenges) == 0)
|
2010-07-01 17:44:33 +00:00
|
|
|
++n;
|
2002-02-05 22:13:51 +00:00
|
|
|
break;
|
|
|
|
case hdr_end:
|
|
|
|
/* fall through */
|
|
|
|
case hdr_unknown:
|
|
|
|
/* ignore */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (h > hdr_end);
|
|
|
|
|
|
|
|
/* we need to provide authentication */
|
2011-05-12 21:18:55 +00:00
|
|
|
if (conn->err == HTTP_NEED_AUTH ||
|
2010-01-19 10:19:55 +00:00
|
|
|
conn->err == HTTP_NEED_PROXY_AUTH) {
|
2002-06-19 08:36:00 +00:00
|
|
|
e = conn->err;
|
2011-05-12 21:18:55 +00:00
|
|
|
if ((conn->err == HTTP_NEED_AUTH &&
|
|
|
|
!server_challenges.valid) ||
|
|
|
|
(conn->err == HTTP_NEED_PROXY_AUTH &&
|
2010-01-19 10:19:55 +00:00
|
|
|
!proxy_challenges.valid)) {
|
|
|
|
/* 401/7 but no www/proxy-authenticate ?? */
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("%03d without auth header\n", conn->err);
|
2010-01-19 10:19:55 +00:00
|
|
|
goto ouch;
|
|
|
|
}
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_close(conn);
|
2002-06-05 10:05:03 +00:00
|
|
|
conn = NULL;
|
2002-02-05 22:13:51 +00:00
|
|
|
continue;
|
|
|
|
}
|
2000-07-12 10:39:56 +00:00
|
|
|
|
2004-02-11 09:31:39 +00:00
|
|
|
/* requested range not satisfiable */
|
|
|
|
if (conn->err == HTTP_BAD_RANGE) {
|
2017-03-05 12:06:45 +00:00
|
|
|
if (url->offset > 0 && url->length == 0) {
|
2004-02-11 09:31:39 +00:00
|
|
|
/* asked for 0 bytes; fake it */
|
|
|
|
offset = url->offset;
|
2008-10-24 07:56:01 +00:00
|
|
|
clength = -1;
|
2004-02-11 09:31:39 +00:00
|
|
|
conn->err = HTTP_OK;
|
|
|
|
break;
|
|
|
|
} else {
|
2007-12-14 10:26:58 +00:00
|
|
|
http_seterr(conn->err);
|
2004-02-11 09:31:39 +00:00
|
|
|
goto ouch;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-10-03 10:42:19 +00:00
|
|
|
/* we have a hit or an error */
|
2008-12-15 08:27:44 +00:00
|
|
|
if (conn->err == HTTP_OK
|
|
|
|
|| conn->err == HTTP_NOT_MODIFIED
|
|
|
|
|| conn->err == HTTP_PARTIAL
|
|
|
|
|| HTTP_ERROR(conn->err))
|
2002-10-03 10:42:19 +00:00
|
|
|
break;
|
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
/* all other cases: we got a redirect */
|
2002-06-19 08:36:00 +00:00
|
|
|
e = conn->err;
|
2010-01-19 10:19:55 +00:00
|
|
|
clean_http_auth_challenges(&server_challenges);
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_close(conn);
|
2002-06-05 10:05:03 +00:00
|
|
|
conn = NULL;
|
2002-02-05 22:13:51 +00:00
|
|
|
if (!new) {
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("redirect with no new location\n");
|
2002-02-05 22:13:51 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (url != URL)
|
|
|
|
fetchFreeURL(url);
|
|
|
|
url = new;
|
|
|
|
} while (++i < n);
|
|
|
|
|
|
|
|
/* we failed, or ran out of retries */
|
2002-06-05 10:05:03 +00:00
|
|
|
if (conn == NULL) {
|
2007-12-14 10:26:58 +00:00
|
|
|
http_seterr(e);
|
2000-07-12 10:39:56 +00:00
|
|
|
goto ouch;
|
2002-02-05 22:13:51 +00:00
|
|
|
}
|
|
|
|
|
2018-05-29 10:28:20 +00:00
|
|
|
DEBUGF("offset %lld, length %lld, size %lld, clength %lld\n",
|
|
|
|
(long long)offset, (long long)length,
|
|
|
|
(long long)size, (long long)clength);
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2008-12-15 08:27:44 +00:00
|
|
|
if (conn->err == HTTP_NOT_MODIFIED) {
|
|
|
|
http_seterr(HTTP_NOT_MODIFIED);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
/* check for inconsistencies */
|
|
|
|
if (clength != -1 && length != -1 && clength != length) {
|
2007-12-14 10:26:58 +00:00
|
|
|
http_seterr(HTTP_PROTOCOL_ERROR);
|
2002-01-01 16:25:29 +00:00
|
|
|
goto ouch;
|
2000-05-07 20:01:55 +00:00
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
if (clength == -1)
|
|
|
|
clength = length;
|
|
|
|
if (clength != -1)
|
|
|
|
length = offset + clength;
|
|
|
|
if (length != -1 && size != -1 && length != size) {
|
2007-12-14 10:26:58 +00:00
|
|
|
http_seterr(HTTP_PROTOCOL_ERROR);
|
2000-07-12 10:39:56 +00:00
|
|
|
goto ouch;
|
2002-02-05 22:13:51 +00:00
|
|
|
}
|
|
|
|
if (size == -1)
|
|
|
|
size = length;
|
|
|
|
|
|
|
|
/* fill in stats */
|
|
|
|
if (us) {
|
|
|
|
us->size = size;
|
|
|
|
us->atime = us->mtime = mtime;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* too far? */
|
2003-01-22 17:52:27 +00:00
|
|
|
if (URL->offset > 0 && offset > URL->offset) {
|
2007-12-14 10:26:58 +00:00
|
|
|
http_seterr(HTTP_PROTOCOL_ERROR);
|
2000-07-12 10:39:56 +00:00
|
|
|
goto ouch;
|
2001-05-26 19:37:15 +00:00
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
|
|
|
|
/* report back real offset and size */
|
|
|
|
URL->offset = offset;
|
|
|
|
URL->length = clength;
|
|
|
|
|
|
|
|
/* wrap it up in a FILE */
|
2007-12-14 10:26:58 +00:00
|
|
|
if ((f = http_funopen(conn, chunked)) == NULL) {
|
|
|
|
fetch_syserr();
|
2002-02-05 22:13:51 +00:00
|
|
|
goto ouch;
|
2001-05-26 19:37:15 +00:00
|
|
|
}
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2000-07-13 09:13:58 +00:00
|
|
|
if (url != URL)
|
2002-02-05 22:13:51 +00:00
|
|
|
fetchFreeURL(url);
|
|
|
|
if (purl)
|
|
|
|
fetchFreeURL(purl);
|
|
|
|
|
2002-06-05 10:05:03 +00:00
|
|
|
if (HTTP_ERROR(conn->err)) {
|
2007-12-14 10:26:58 +00:00
|
|
|
http_print_html(stderr, f);
|
2002-02-05 22:13:51 +00:00
|
|
|
fclose(f);
|
|
|
|
f = NULL;
|
|
|
|
}
|
2010-01-19 10:19:55 +00:00
|
|
|
clean_http_headerbuf(&headerbuf);
|
|
|
|
clean_http_auth_challenges(&server_challenges);
|
|
|
|
clean_http_auth_challenges(&proxy_challenges);
|
2002-02-05 22:13:51 +00:00
|
|
|
return (f);
|
1998-07-09 16:52:44 +00:00
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
ouch:
|
|
|
|
if (url != URL)
|
|
|
|
fetchFreeURL(url);
|
|
|
|
if (purl)
|
|
|
|
fetchFreeURL(purl);
|
2002-06-05 10:05:03 +00:00
|
|
|
if (conn != NULL)
|
2007-12-14 10:26:58 +00:00
|
|
|
fetch_close(conn);
|
2010-01-19 10:19:55 +00:00
|
|
|
clean_http_headerbuf(&headerbuf);
|
|
|
|
clean_http_auth_challenges(&server_challenges);
|
|
|
|
clean_http_auth_challenges(&proxy_challenges);
|
2002-02-05 22:13:51 +00:00
|
|
|
return (NULL);
|
2000-05-11 13:31:02 +00:00
|
|
|
}
|
|
|
|
|
2002-02-05 22:13:51 +00:00
|
|
|
|
2000-07-12 10:39:56 +00:00
|
|
|
/*****************************************************************************
|
|
|
|
* Entry points
|
2000-05-11 13:31:02 +00:00
|
|
|
*/
|
|
|
|
|
2000-07-17 21:25:00 +00:00
|
|
|
/*
|
|
|
|
* Retrieve and stat a file by HTTP
|
|
|
|
*/
|
|
|
|
FILE *
|
2001-04-24 00:06:21 +00:00
|
|
|
fetchXGetHTTP(struct url *URL, struct url_stat *us, const char *flags)
|
2000-07-17 21:25:00 +00:00
|
|
|
{
|
2007-12-18 11:03:07 +00:00
|
|
|
return (http_request(URL, "GET", us, http_get_proxy(URL, flags), flags));
|
2000-07-17 21:25:00 +00:00
|
|
|
}
|
|
|
|
|
2000-05-11 13:31:02 +00:00
|
|
|
/*
|
|
|
|
* Retrieve a file by HTTP
|
|
|
|
*/
|
|
|
|
FILE *
|
2001-04-24 00:06:21 +00:00
|
|
|
fetchGetHTTP(struct url *URL, const char *flags)
|
2000-05-11 13:31:02 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
return (fetchXGetHTTP(URL, NULL, flags));
|
1998-07-09 16:52:44 +00:00
|
|
|
}
|
|
|
|
|
2000-07-17 21:25:00 +00:00
|
|
|
/*
|
|
|
|
* Store a file by HTTP
|
|
|
|
*/
|
1998-07-09 16:52:44 +00:00
|
|
|
FILE *
|
2001-10-18 08:29:26 +00:00
|
|
|
fetchPutHTTP(struct url *URL __unused, const char *flags __unused)
|
1998-07-09 16:52:44 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
warnx("fetchPutHTTP(): not implemented");
|
|
|
|
return (NULL);
|
1998-07-09 16:52:44 +00:00
|
|
|
}
|
Second of a series of cleanups to libfetch.
This commit introduces the following features:
a) the fetchStat*() functions, which return meta-information for a
document, such as size, modification time, etc.
b) the use of the com_err(3) facilities to report errors.
It also fixes a bunch of style bugs and a few logic bugs and somewhat
improves the man page.
Changed files, in alphabetical order:
Makefile:
Don't generate macros in {ftp,http}err.c.
Generate category fields for the error message lists.
Compile the error table.
Install fetch_err.h along with fetch.h.
common.c:
Remove the _netdb_errstring() macro, and add FETCH_ERR_NETDB to the
error code in the _netdb_seterr() macro.
Add categories to the _netdb_errlist table.
Report errors through the Common Error library.
common.h:
Add the DEBUG macros.
Add prototype for fetchConnect().
Remove the prototype for _fetch_errstring(), which is local to common.c
Add a categroy field to struct fetcherr, and define constants for
error categories.
Define macros for _{url,netdb,ftp,http}_seterr().
errors.et: (new file)
List error categories.
fetch.3:
Document the fetchStat*() functions.
Move the "unimplemented functionality" comments from NOTES to BUGS.
Document that applications which use libfetch must also use
libcom_err, and list existing error codes.
Undocument fetchLastErr{Code,String}.
Remove the (empty) DIAGNOSTICS section.
Mention Eugene Skepner in the AUTHORS section.
fetch.c:
Move the DEBUG macros to common.c
Add fetchStat() and fetchStatURL().
Generate error messages for URL parser errors, and fix a minor bug
in the parser.
Use 'struct url' instead of 'url_t'.
Remove fetchLastErr{Code,String}.
fetch.h:
Use 'struct url' instead of 'url_t', and remove the typedef.
Define struct url_stat (used by fetchStat()).
Add prototypes for fetchStat*().
Remove the declarations for fetchLastErr{Code,String}.
Include fetch_err.h.
fetch_err.et: (new file)
Error table for libfetch.
file.c:
Add fetchStatFile().
Use 'struct url' instead of 'url_t'.
ftp.c:
Add fetchStatFTP().
Use 'struct url' instead of 'url_t'.
Don't use fetchLastErrCode.
ftp.errors:
Add categories to all error messages.
http.c:
Add fetchStatHTTP().
Use 'struct url' instead of 'url_t'.
Don't use fetchLastErr{Code,Text}.
http.errors:
Add categories to all error messages.
Prompted by: jkh and Eugene Skepner
Numerous sugestions from: Garett Wollman and Eugene Skepner
1998-11-06 22:14:08 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get an HTTP document's metadata
|
|
|
|
*/
|
|
|
|
int
|
2001-04-24 00:06:21 +00:00
|
|
|
fetchStatHTTP(struct url *URL, struct url_stat *us, const char *flags)
|
Second of a series of cleanups to libfetch.
This commit introduces the following features:
a) the fetchStat*() functions, which return meta-information for a
document, such as size, modification time, etc.
b) the use of the com_err(3) facilities to report errors.
It also fixes a bunch of style bugs and a few logic bugs and somewhat
improves the man page.
Changed files, in alphabetical order:
Makefile:
Don't generate macros in {ftp,http}err.c.
Generate category fields for the error message lists.
Compile the error table.
Install fetch_err.h along with fetch.h.
common.c:
Remove the _netdb_errstring() macro, and add FETCH_ERR_NETDB to the
error code in the _netdb_seterr() macro.
Add categories to the _netdb_errlist table.
Report errors through the Common Error library.
common.h:
Add the DEBUG macros.
Add prototype for fetchConnect().
Remove the prototype for _fetch_errstring(), which is local to common.c
Add a categroy field to struct fetcherr, and define constants for
error categories.
Define macros for _{url,netdb,ftp,http}_seterr().
errors.et: (new file)
List error categories.
fetch.3:
Document the fetchStat*() functions.
Move the "unimplemented functionality" comments from NOTES to BUGS.
Document that applications which use libfetch must also use
libcom_err, and list existing error codes.
Undocument fetchLastErr{Code,String}.
Remove the (empty) DIAGNOSTICS section.
Mention Eugene Skepner in the AUTHORS section.
fetch.c:
Move the DEBUG macros to common.c
Add fetchStat() and fetchStatURL().
Generate error messages for URL parser errors, and fix a minor bug
in the parser.
Use 'struct url' instead of 'url_t'.
Remove fetchLastErr{Code,String}.
fetch.h:
Use 'struct url' instead of 'url_t', and remove the typedef.
Define struct url_stat (used by fetchStat()).
Add prototypes for fetchStat*().
Remove the declarations for fetchLastErr{Code,String}.
Include fetch_err.h.
fetch_err.et: (new file)
Error table for libfetch.
file.c:
Add fetchStatFile().
Use 'struct url' instead of 'url_t'.
ftp.c:
Add fetchStatFTP().
Use 'struct url' instead of 'url_t'.
Don't use fetchLastErrCode.
ftp.errors:
Add categories to all error messages.
http.c:
Add fetchStatHTTP().
Use 'struct url' instead of 'url_t'.
Don't use fetchLastErr{Code,Text}.
http.errors:
Add categories to all error messages.
Prompted by: jkh and Eugene Skepner
Numerous sugestions from: Garett Wollman and Eugene Skepner
1998-11-06 22:14:08 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
FILE *f;
|
|
|
|
|
2007-12-18 11:03:07 +00:00
|
|
|
f = http_request(URL, "HEAD", us, http_get_proxy(URL, flags), flags);
|
2003-03-11 08:20:58 +00:00
|
|
|
if (f == NULL)
|
2002-02-05 22:13:51 +00:00
|
|
|
return (-1);
|
|
|
|
fclose(f);
|
|
|
|
return (0);
|
Second of a series of cleanups to libfetch.
This commit introduces the following features:
a) the fetchStat*() functions, which return meta-information for a
document, such as size, modification time, etc.
b) the use of the com_err(3) facilities to report errors.
It also fixes a bunch of style bugs and a few logic bugs and somewhat
improves the man page.
Changed files, in alphabetical order:
Makefile:
Don't generate macros in {ftp,http}err.c.
Generate category fields for the error message lists.
Compile the error table.
Install fetch_err.h along with fetch.h.
common.c:
Remove the _netdb_errstring() macro, and add FETCH_ERR_NETDB to the
error code in the _netdb_seterr() macro.
Add categories to the _netdb_errlist table.
Report errors through the Common Error library.
common.h:
Add the DEBUG macros.
Add prototype for fetchConnect().
Remove the prototype for _fetch_errstring(), which is local to common.c
Add a categroy field to struct fetcherr, and define constants for
error categories.
Define macros for _{url,netdb,ftp,http}_seterr().
errors.et: (new file)
List error categories.
fetch.3:
Document the fetchStat*() functions.
Move the "unimplemented functionality" comments from NOTES to BUGS.
Document that applications which use libfetch must also use
libcom_err, and list existing error codes.
Undocument fetchLastErr{Code,String}.
Remove the (empty) DIAGNOSTICS section.
Mention Eugene Skepner in the AUTHORS section.
fetch.c:
Move the DEBUG macros to common.c
Add fetchStat() and fetchStatURL().
Generate error messages for URL parser errors, and fix a minor bug
in the parser.
Use 'struct url' instead of 'url_t'.
Remove fetchLastErr{Code,String}.
fetch.h:
Use 'struct url' instead of 'url_t', and remove the typedef.
Define struct url_stat (used by fetchStat()).
Add prototypes for fetchStat*().
Remove the declarations for fetchLastErr{Code,String}.
Include fetch_err.h.
fetch_err.et: (new file)
Error table for libfetch.
file.c:
Add fetchStatFile().
Use 'struct url' instead of 'url_t'.
ftp.c:
Add fetchStatFTP().
Use 'struct url' instead of 'url_t'.
Don't use fetchLastErrCode.
ftp.errors:
Add categories to all error messages.
http.c:
Add fetchStatHTTP().
Use 'struct url' instead of 'url_t'.
Don't use fetchLastErr{Code,Text}.
http.errors:
Add categories to all error messages.
Prompted by: jkh and Eugene Skepner
Numerous sugestions from: Garett Wollman and Eugene Skepner
1998-11-06 22:14:08 +00:00
|
|
|
}
|
1998-12-21 19:41:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* List a directory
|
|
|
|
*/
|
|
|
|
struct url_ent *
|
2001-10-18 08:29:26 +00:00
|
|
|
fetchListHTTP(struct url *url __unused, const char *flags __unused)
|
1998-12-21 19:41:50 +00:00
|
|
|
{
|
2002-02-05 22:13:51 +00:00
|
|
|
warnx("fetchListHTTP(): not implemented");
|
|
|
|
return (NULL);
|
1998-12-21 19:41:50 +00:00
|
|
|
}
|
2014-06-05 22:16:26 +00:00
|
|
|
|
|
|
|
FILE *
|
|
|
|
fetchReqHTTP(struct url *URL, const char *method, const char *flags,
|
|
|
|
const char *content_type, const char *body)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (http_request_body(URL, method, NULL, http_get_proxy(URL, flags),
|
|
|
|
flags, content_type, body));
|
|
|
|
}
|