Update serf-1.3.0 -> 1.3.4 - fixes multiple issues (see the CHANGES file)
including an SSL issue that turned up in the cluster with svn-1.8.8.
This commit is contained in:
commit
562a345c5d
@ -1,4 +1,59 @@
|
||||
Serf 1.3.0 [2013-07-23, from /tags/1.3.0]
|
||||
Serf 1.3.4 [2014-02-08, from /tags/1.3.4, rxxxx]
|
||||
Fix issue #119: Endless loop during ssl tunnel setup with Negotiate authn
|
||||
Fix issue #123: Can't setup ssl tunnel which sends Connection close header
|
||||
Fix a race condition when initializing OpenSSL from multiple threads (r2263)
|
||||
Fix issue #138: Incorrect pkg-config file when GSSAPI isn't configured
|
||||
|
||||
|
||||
Serf 1.3.3 [2013-12-09, from /tags/1.3.3, r2242]
|
||||
Fix issue 129: Try more addresses of multihomed servers
|
||||
Handle X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE correctly (r2225)
|
||||
Return APR_TIMEUP from poll() to enable detecting connection timeouts (r2183)
|
||||
|
||||
|
||||
Serf 1.3.2 [2013-10-04, from /tags/1.3.2, r2195]
|
||||
Fix issue 130: HTTP headers should be treated case-insensitively
|
||||
Fix issue 126: Compilation breaks with Codewarrior compiler
|
||||
Fix crash during cleanup of SSL buckets in apr_terminate() (r2145)
|
||||
Fix Windows build: Also export functions with capital letters in .def file
|
||||
Fix host header when url contains a username or password (r2170)
|
||||
Ensure less TCP package fragmentation on Windows (r2145)
|
||||
Handle authentication for responses to HEAD requests (r2178,-9)
|
||||
Improve serf_get: add option to add request headers, allow url with query,
|
||||
allow HEAD requests (r2143,r2175,-6)
|
||||
Improve RFC conformance: don't expect body for certain responses (r2011,-2)
|
||||
Do not invoke progress callback when no data was received (r2144)
|
||||
And more test suite fixes and build warning cleanups
|
||||
SCons-related fixes:
|
||||
Fix build when GSSAPI not in default include path (2155)
|
||||
Fix OpenBSD build: always map all LIBPATH entries into RPATH (r2156)
|
||||
Checksum generation in Windows shared libraries for release builds (2162)
|
||||
Mac OS X: Use MAJOR version only in dylib install name (r2161)
|
||||
Use both MAJOR and MINOR version for the shared library name (2163)
|
||||
Fix the .pc file when installing serf in a non-default LIBDIR (r2191)
|
||||
|
||||
|
||||
Serf 1.3.1 [2013-08-15, from /tags/1.3.1, r2138]
|
||||
Fix issue 77: Endless loop if server doesn't accept Negotiate authentication.
|
||||
Fix issue 114: ssl/tls renegotiation fails
|
||||
Fix issue 120: error with ssl tunnel over proxy with KeepAlive off and
|
||||
Basic authentication.
|
||||
Fixed bugs with authentication (r2057,2115,2118)
|
||||
SCons-related fixes:
|
||||
Fix issue 111: add flag to set custom library path
|
||||
Fix issue 112: add soname
|
||||
Fix issue 113: add gssapi libs in the serf pc file
|
||||
Fix issue 115: Setting RPATH on Solaris broken in SConstruct
|
||||
Fix issue 116: scons check should return non-zero exit staths
|
||||
Fix issue 121: make CFLAGS, LIBS, LINKFLAGS and CPPFLAGS take a space-
|
||||
separated list of flags.
|
||||
Fix issue 122: make scons PREFIX create the folder if it doesn't exist
|
||||
Mac OS X: Fix scons --install-sandbox
|
||||
Solaris: Fix build with cc, don't use unsupported compiler flags
|
||||
Require SCons version 2.3.0 or higher now (for the soname support).
|
||||
|
||||
|
||||
Serf 1.3.0 [2013-07-23, from /tags/1.3.0, r2074]
|
||||
Fix issue 83: use PATH rather than URI within an ssltunnel (r1952)
|
||||
Fix issue 108: improved error reporting from the underlying socket (r1951)
|
||||
NEW: Switch to the SCons build system; retire serfmake, serf.mak, autotools
|
||||
|
@ -18,14 +18,14 @@ kept to a minimum to provide high performance operation.
|
||||
|
||||
1.1. SCons build system
|
||||
|
||||
serf uses SCons 2.x for its build system. If it is not installed on
|
||||
serf uses SCons 2.3 for its build system. If it is not installed on
|
||||
your system, then you can install it onto your system. If you do not
|
||||
have permissions, then you can download and install the "local"
|
||||
version into your home directory. When installed privately, simply
|
||||
create a symlink for 'scons' in your PATH to /path/to/scons/scons.py.
|
||||
|
||||
Fetch the scons-local package:
|
||||
http://prdownloads.sourceforge.net/scons/scons-local-2.0.1.tar.gz
|
||||
http://prdownloads.sourceforge.net/scons/scons-local-2.3.0.tar.gz
|
||||
|
||||
|
||||
1.2 Building serf
|
||||
@ -54,6 +54,12 @@ distinct directory from the source), you can use:
|
||||
|
||||
$ scons -Y /path/to/serf/source
|
||||
|
||||
If you plan to install the library on a system that uses different
|
||||
paths for architecture dependent files, specify LIBDIR. LIBDIR defaults
|
||||
to /usr/local/lib otherwise. Example for a 64 bit GNU/Linux system:
|
||||
|
||||
$ scons PREFIX=/usr/ LIBDIR=/usr/lib64
|
||||
|
||||
At any point, the current settings can be examined:
|
||||
|
||||
$ scons --help
|
||||
@ -74,6 +80,13 @@ specified on the install command line:
|
||||
|
||||
$ scons PREFIX=/some/path install
|
||||
|
||||
Distribution package maintainers regulary install to a buildroot, and
|
||||
would normally use something like below in their build systems, with
|
||||
placeholders for the specific paths:
|
||||
|
||||
$ scons PREFIX=/usr/ LIBDIR=/usr/lib64
|
||||
$ scons install --install-sandbox=/path/to/buildroot
|
||||
|
||||
|
||||
1.4 Cleaning up the build
|
||||
|
||||
|
@ -19,6 +19,8 @@ import sys
|
||||
import os
|
||||
import re
|
||||
|
||||
EnsureSConsVersion(2,3,0)
|
||||
|
||||
HEADER_FILES = ['serf.h',
|
||||
'serf_bucket_types.h',
|
||||
'serf_bucket_util.h',
|
||||
@ -34,23 +36,35 @@ def _converter(val):
|
||||
if val == 'none':
|
||||
val = []
|
||||
else:
|
||||
val = val.split(',')
|
||||
val = val.split(' ')
|
||||
return val
|
||||
|
||||
def RawListVariable(key, help, default):
|
||||
"""
|
||||
The input parameters describe a 'raw string list' option. This class
|
||||
accepts a comma separated list and converts it to a space separated
|
||||
list.
|
||||
accepts a space-separated string and converts it to a list.
|
||||
"""
|
||||
return (key, '%s' % (help), default, None, lambda val: _converter(val))
|
||||
|
||||
# Custom path validator, creates directory when a specified option is set.
|
||||
# To be used to ensure a PREFIX directory is only created when installing.
|
||||
def createPathIsDirCreateWithTarget(target):
|
||||
def my_validator(key, val, env):
|
||||
build_targets = (map(str, BUILD_TARGETS))
|
||||
if target in build_targets:
|
||||
return PathVariable.PathIsDirCreate(key, val, env)
|
||||
else:
|
||||
return PathVariable.PathAccept(key, val, env)
|
||||
return my_validator
|
||||
|
||||
# default directories
|
||||
if sys.platform == 'win32':
|
||||
default_incdir='..'
|
||||
default_libdir='..'
|
||||
default_prefix='Debug'
|
||||
else:
|
||||
default_libdir='/usr'
|
||||
default_incdir='/usr'
|
||||
default_libdir='$PREFIX/lib'
|
||||
default_prefix='/usr/local'
|
||||
|
||||
opts = Variables(files=[SAVED_CONFIG])
|
||||
@ -58,22 +72,26 @@ opts.AddVariables(
|
||||
PathVariable('PREFIX',
|
||||
'Directory to install under',
|
||||
default_prefix,
|
||||
PathVariable.PathIsDir),
|
||||
createPathIsDirCreateWithTarget('install')),
|
||||
PathVariable('LIBDIR',
|
||||
'Directory to install architecture dependent libraries under',
|
||||
default_libdir,
|
||||
createPathIsDirCreateWithTarget('install')),
|
||||
PathVariable('APR',
|
||||
"Path to apr-1-config, or to APR's install area",
|
||||
default_libdir,
|
||||
default_incdir,
|
||||
PathVariable.PathAccept),
|
||||
PathVariable('APU',
|
||||
"Path to apu-1-config, or to APR's install area",
|
||||
default_libdir,
|
||||
default_incdir,
|
||||
PathVariable.PathAccept),
|
||||
PathVariable('OPENSSL',
|
||||
"Path to OpenSSL's install area",
|
||||
default_libdir,
|
||||
default_incdir,
|
||||
PathVariable.PathIsDir),
|
||||
PathVariable('ZLIB',
|
||||
"Path to zlib's install area",
|
||||
default_libdir,
|
||||
default_incdir,
|
||||
PathVariable.PathIsDir),
|
||||
PathVariable('GSSAPI',
|
||||
"Path to GSSAPI's install area",
|
||||
@ -86,14 +104,14 @@ opts.AddVariables(
|
||||
"Enable using a static compiled APR",
|
||||
False),
|
||||
RawListVariable('CC', "Command name or path of the C compiler", None),
|
||||
RawListVariable('CFLAGS', "Extra flags for the C compiler (comma separated)",
|
||||
RawListVariable('CFLAGS', "Extra flags for the C compiler (space-separated)",
|
||||
None),
|
||||
RawListVariable('LIBS', "Extra libraries passed to the linker, "
|
||||
"e.g. -l<library> (comma separated)", None),
|
||||
RawListVariable('LINKFLAGS', "Extra flags for the linker (comma separated)",
|
||||
"e.g. \"-l<library1> -l<library2>\" (space separated)", None),
|
||||
RawListVariable('LINKFLAGS', "Extra flags for the linker (space-separated)",
|
||||
None),
|
||||
RawListVariable('CPPFLAGS', "Extra flags for the C preprocessor "
|
||||
"(comma separated)", None),
|
||||
"(space separated)", None),
|
||||
)
|
||||
|
||||
if sys.platform == 'win32':
|
||||
@ -146,6 +164,8 @@ match = re.search('SERF_MAJOR_VERSION ([0-9]+).*'
|
||||
re.DOTALL)
|
||||
MAJOR, MINOR, PATCH = [int(x) for x in match.groups()]
|
||||
env.Append(MAJOR=str(MAJOR))
|
||||
env.Append(MINOR=str(MINOR))
|
||||
env.Append(PATCH=str(PATCH))
|
||||
|
||||
# Calling external programs is okay if we're not cleaning or printing help.
|
||||
# (cleaning: no sense in fetching information; help: we may not know where
|
||||
@ -181,10 +201,18 @@ opts.Save(SAVED_CONFIG, env)
|
||||
# PLATFORM-SPECIFIC BUILD TWEAKS
|
||||
|
||||
thisdir = os.getcwd()
|
||||
libdir = '$PREFIX/lib'
|
||||
libdir = '$LIBDIR'
|
||||
incdir = '$PREFIX/include/serf-$MAJOR'
|
||||
|
||||
LIBNAME = 'libserf-${MAJOR}'
|
||||
# This version string is used in the dynamic library name, and for Mac OS X also
|
||||
# for the current_version and compatibility_version options in the .dylib
|
||||
#
|
||||
# Unfortunately we can't set the .dylib compatibility_version option separately
|
||||
# from current_version, so don't use the PATCH level to avoid that build and
|
||||
# runtime patch levels have to be identical.
|
||||
env['SHLIBVERSION'] = '%d.%d.%d' % (MAJOR, MINOR, 0)
|
||||
|
||||
LIBNAME = 'libserf-%d' % (MAJOR,)
|
||||
if sys.platform != 'win32':
|
||||
LIBNAMESTATIC = LIBNAME
|
||||
else:
|
||||
@ -196,23 +224,17 @@ env.Append(RPATH=libdir,
|
||||
if sys.platform == 'darwin':
|
||||
# linkflags.append('-Wl,-install_name,@executable_path/%s.dylib' % (LIBNAME,))
|
||||
env.Append(LINKFLAGS='-Wl,-install_name,%s/%s.dylib' % (thisdir, LIBNAME,))
|
||||
# 'man ld' says positive non-zero for the first number, so we add one.
|
||||
# Mac's interpretation of compatibility is the same as our MINOR version.
|
||||
env.Append(LINKFLAGS='-Wl,-compatibility_version,%d' % (MINOR+1,))
|
||||
env.Append(LINKFLAGS='-Wl,-current_version,%d.%d' % (MINOR+1, PATCH,))
|
||||
|
||||
if sys.platform != 'win32':
|
||||
### gcc only. figure out appropriate test / better way to check these
|
||||
### flags, and check for gcc.
|
||||
env.Append(CFLAGS='-std=c89')
|
||||
env.Append(CCFLAGS=[
|
||||
'-Wdeclaration-after-statement',
|
||||
'-Wmissing-prototypes',
|
||||
])
|
||||
|
||||
### -Wall is not available on Solaris
|
||||
### These warnings are not available on Solaris
|
||||
if sys.platform != 'sunos5':
|
||||
env.Append(CCFLAGS='-Wall')
|
||||
env.Append(CCFLAGS=['-Wdeclaration-after-statement',
|
||||
'-Wmissing-prototypes',
|
||||
'-Wall'])
|
||||
|
||||
if debug:
|
||||
env.Append(CCFLAGS='-g')
|
||||
@ -239,6 +261,7 @@ else:
|
||||
# Optimize for speed, use DLL runtime
|
||||
env.Append(CCFLAGS=['/O2', '/MD'])
|
||||
env.Append(CPPDEFINES='NDEBUG')
|
||||
env.Append(LINKFLAGS='/RELEASE')
|
||||
|
||||
# PLAN THE BUILD
|
||||
SHARED_SOURCES = []
|
||||
@ -334,28 +357,32 @@ else:
|
||||
|
||||
# If build with gssapi, get its information and define SERF_HAVE_GSSAPI
|
||||
if gssapi and CALLOUT_OKAY:
|
||||
env.ParseConfig('$GSSAPI --libs gssapi')
|
||||
env.ParseConfig('$GSSAPI --cflags gssapi')
|
||||
def parse_libs(env, cmd, unique=1):
|
||||
env['GSSAPI_LIBS'] = cmd.strip()
|
||||
return env.MergeFlags(cmd, unique)
|
||||
env.ParseConfig('$GSSAPI --libs gssapi', parse_libs)
|
||||
env.Append(CPPDEFINES='SERF_HAVE_GSSAPI')
|
||||
if sys.platform == 'win32':
|
||||
env.Append(CPPDEFINES=['SERF_HAVE_SSPI'])
|
||||
|
||||
# On Solaris, the -R values that APR describes never make it into actual
|
||||
# On some systems, the -R values that APR describes never make it into actual
|
||||
# RPATH flags. We'll manually map all directories in LIBPATH into new
|
||||
# flags to set RPATH values.
|
||||
if sys.platform == 'sunos5':
|
||||
for d in env['LIBPATH']:
|
||||
env.Append(RPATH=d)
|
||||
for d in env['LIBPATH']:
|
||||
env.Append(RPATH=':'+d)
|
||||
|
||||
# Set up the construction of serf-*.pc
|
||||
# TODO: add gssapi libs
|
||||
pkgconfig = env.Textfile('serf-%d.pc' % (MAJOR,),
|
||||
env.File('build/serf.pc.in'),
|
||||
SUBST_DICT = {
|
||||
'@MAJOR@': str(MAJOR),
|
||||
'@PREFIX@': '$PREFIX',
|
||||
'@LIBDIR@': '$LIBDIR',
|
||||
'@INCLUDE_SUBDIR@': 'serf-%d' % (MAJOR,),
|
||||
'@VERSION@': '%d.%d.%d' % (MAJOR, MINOR, PATCH),
|
||||
'@LIBS@': '%s %s -lz' % (apu_libs, apr_libs),
|
||||
'@LIBS@': '%s %s %s -lz' % (apu_libs, apr_libs,
|
||||
env.get('GSSAPI_LIBS', '')),
|
||||
})
|
||||
|
||||
env.Default(lib_static, lib_shared, pkgconfig)
|
||||
@ -371,16 +398,22 @@ if CALLOUT_OKAY:
|
||||
# INSTALLATION STUFF
|
||||
|
||||
install_static = env.Install(libdir, lib_static)
|
||||
install_shared = env.Install(libdir, lib_shared)
|
||||
install_shared = env.InstallVersionedLib(libdir, lib_shared)
|
||||
|
||||
if sys.platform == 'darwin':
|
||||
# Change the shared library install name (id) to its final name and location.
|
||||
# Notes:
|
||||
# If --install-sandbox=<path> is specified, install_shared_path will point
|
||||
# to a path in the sandbox. We can't use that path because the sandbox is
|
||||
# only a temporary location. The id should be the final target path.
|
||||
# Also, we shouldn't use the complete version number for id, as that'll
|
||||
# make applications depend on the exact major.minor.patch version of serf.
|
||||
|
||||
install_shared_path = install_shared[0].abspath
|
||||
target_install_shared_path = os.path.join(libdir, '%s.dylib' % LIBNAME)
|
||||
env.AddPostAction(install_shared, ('install_name_tool -id %s %s'
|
||||
% (install_shared_path,
|
||||
% (target_install_shared_path,
|
||||
install_shared_path)))
|
||||
### construct shared lib symlinks. this also means install the lib
|
||||
### as libserf-2.1.0.0.dylib, then add the symlinks.
|
||||
### note: see InstallAs
|
||||
|
||||
env.Alias('install-lib', [install_static, install_shared,
|
||||
])
|
||||
|
@ -23,7 +23,8 @@
|
||||
#include <apr_lib.h>
|
||||
|
||||
static apr_status_t
|
||||
default_auth_response_handler(peer_t peer,
|
||||
default_auth_response_handler(const serf__authn_scheme_t *scheme,
|
||||
peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
@ -151,6 +152,17 @@ static int handle_auth_headers(int code,
|
||||
if (!auth_hdr)
|
||||
continue;
|
||||
|
||||
if (code == 401) {
|
||||
authn_info = serf__get_authn_info_for_server(conn);
|
||||
} else {
|
||||
authn_info = &ctx->proxy_authn_info;
|
||||
}
|
||||
|
||||
if (authn_info->failed_authn_types & scheme->type) {
|
||||
/* Skip this authn type since we already tried it before. */
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Found a matching scheme */
|
||||
status = APR_SUCCESS;
|
||||
|
||||
@ -159,11 +171,6 @@ static int handle_auth_headers(int code,
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"... matched: %s\n", scheme->name);
|
||||
|
||||
if (code == 401) {
|
||||
authn_info = serf__get_authn_info_for_server(conn);
|
||||
} else {
|
||||
authn_info = &ctx->proxy_authn_info;
|
||||
}
|
||||
/* If this is the first time we use this scheme on this context and/or
|
||||
this connection, make sure to initialize the authentication handler
|
||||
first. */
|
||||
@ -198,6 +205,12 @@ static int handle_auth_headers(int code,
|
||||
*/
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"%s authentication failed.\n", scheme->name);
|
||||
|
||||
/* Clear per-request auth_baton when switching to next auth scheme. */
|
||||
request->auth_baton = NULL;
|
||||
|
||||
/* Remember failed auth types to skip in future. */
|
||||
authn_info->failed_authn_types |= scheme->type;
|
||||
}
|
||||
|
||||
return status;
|
||||
@ -221,7 +234,7 @@ static int store_header_in_dict(void *baton,
|
||||
char *auth_name, *c;
|
||||
|
||||
/* We're only interested in xxxx-Authenticate headers. */
|
||||
if (strcmp(key, ab->header) != 0)
|
||||
if (strcasecmp(key, ab->header) != 0)
|
||||
return 0;
|
||||
|
||||
/* Extract the authentication scheme name. */
|
||||
@ -378,16 +391,16 @@ apr_status_t serf__handle_auth_response(int *consumed_response,
|
||||
authn_info = serf__get_authn_info_for_server(conn);
|
||||
if (authn_info->scheme) {
|
||||
validate_resp = authn_info->scheme->validate_response_func;
|
||||
resp_status = validate_resp(HOST, sl.code, conn, request, response,
|
||||
pool);
|
||||
resp_status = validate_resp(authn_info->scheme, HOST, sl.code,
|
||||
conn, request, response, pool);
|
||||
}
|
||||
|
||||
/* Validate the response proxy authn headers. */
|
||||
authn_info = &ctx->proxy_authn_info;
|
||||
if (!resp_status && authn_info->scheme) {
|
||||
validate_resp = authn_info->scheme->validate_response_func;
|
||||
resp_status = validate_resp(PROXY, sl.code, conn, request, response,
|
||||
pool);
|
||||
resp_status = validate_resp(authn_info->scheme, PROXY, sl.code,
|
||||
conn, request, response, pool);
|
||||
}
|
||||
|
||||
if (resp_status) {
|
||||
|
@ -78,7 +78,8 @@ apr_status_t serf__setup_request_digest_auth(peer_t peer,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt);
|
||||
apr_status_t serf__validate_response_digest_auth(peer_t peer,
|
||||
apr_status_t serf__validate_response_digest_auth(const serf__authn_scheme_t *scheme,
|
||||
peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
@ -108,7 +109,8 @@ apr_status_t serf__setup_request_spnego_auth(peer_t peer,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt);
|
||||
apr_status_t serf__validate_response_spnego_auth(peer_t peer,
|
||||
apr_status_t serf__validate_response_spnego_auth(const serf__authn_scheme_t *scheme,
|
||||
peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
|
@ -48,7 +48,7 @@ serf__handle_basic_auth(int code,
|
||||
apr_status_t status;
|
||||
apr_pool_t *cred_pool;
|
||||
char *username, *password, *realm_name;
|
||||
const char *eq, *realm;
|
||||
const char *eq, *realm = NULL;
|
||||
|
||||
/* Can't do Basic authentication if there's no callback to get
|
||||
username & password. */
|
||||
|
@ -96,8 +96,9 @@ random_cnonce(apr_pool_t *pool)
|
||||
return hex_encode((unsigned char*)buf, pool);
|
||||
}
|
||||
|
||||
static const char *
|
||||
build_digest_ha1(const char *username,
|
||||
static apr_status_t
|
||||
build_digest_ha1(const char **out_ha1,
|
||||
const char *username,
|
||||
const char *password,
|
||||
const char *realm_name,
|
||||
apr_pool_t *pool)
|
||||
@ -113,12 +114,17 @@ build_digest_ha1(const char *username,
|
||||
realm_name,
|
||||
password);
|
||||
status = apr_md5(ha1, tmp, strlen(tmp));
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
return hex_encode(ha1, pool);
|
||||
*out_ha1 = hex_encode(ha1, pool);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static const char *
|
||||
build_digest_ha2(const char *uri,
|
||||
static apr_status_t
|
||||
build_digest_ha2(const char **out_ha2,
|
||||
const char *uri,
|
||||
const char *method,
|
||||
const char *qop,
|
||||
apr_pool_t *pool)
|
||||
@ -134,17 +140,21 @@ build_digest_ha2(const char *uri,
|
||||
method,
|
||||
uri);
|
||||
status = apr_md5(ha2, tmp, strlen(tmp));
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
return hex_encode(ha2, pool);
|
||||
*out_ha2 = hex_encode(ha2, pool);
|
||||
|
||||
return APR_SUCCESS;
|
||||
} else {
|
||||
/* TODO: auth-int isn't supported! */
|
||||
return APR_ENOTIMPL;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const char *
|
||||
build_auth_header(digest_authn_info_t *digest_info,
|
||||
static apr_status_t
|
||||
build_auth_header(const char **out_header,
|
||||
digest_authn_info_t *digest_info,
|
||||
const char *path,
|
||||
const char *method,
|
||||
apr_pool_t *pool)
|
||||
@ -156,7 +166,9 @@ build_auth_header(digest_authn_info_t *digest_info,
|
||||
const char *response_hdr_hex;
|
||||
apr_status_t status;
|
||||
|
||||
ha2 = build_digest_ha2(path, method, digest_info->qop, pool);
|
||||
status = build_digest_ha2(&ha2, path, method, digest_info->qop, pool);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
hdr = apr_psprintf(pool,
|
||||
"Digest realm=\"%s\","
|
||||
@ -194,6 +206,9 @@ build_auth_header(digest_authn_info_t *digest_info,
|
||||
}
|
||||
|
||||
status = apr_md5(response_hdr, response, strlen(response));
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
response_hdr_hex = hex_encode(response_hdr, pool);
|
||||
|
||||
hdr = apr_psprintf(pool, "%s, response=\"%s\"", hdr, response_hdr_hex);
|
||||
@ -207,7 +222,9 @@ build_auth_header(digest_authn_info_t *digest_info,
|
||||
digest_info->algorithm);
|
||||
}
|
||||
|
||||
return hdr;
|
||||
*out_header = hdr;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
@ -330,8 +347,8 @@ serf__handle_digest_auth(int code,
|
||||
digest_info->username = apr_pstrdup(digest_info->pool, username);
|
||||
digest_info->digest_nc++;
|
||||
|
||||
digest_info->ha1 = build_digest_ha1(username, password, digest_info->realm,
|
||||
digest_info->pool);
|
||||
status = build_digest_ha1(&digest_info->ha1, username, password,
|
||||
digest_info->realm, digest_info->pool);
|
||||
|
||||
apr_pool_destroy(cred_pool);
|
||||
|
||||
@ -339,7 +356,7 @@ serf__handle_digest_auth(int code,
|
||||
likes. */
|
||||
serf_connection_set_max_outstanding_requests(conn, 0);
|
||||
|
||||
return APR_SUCCESS;
|
||||
return status;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
@ -387,7 +404,7 @@ serf__setup_request_digest_auth(peer_t peer,
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
serf__authn_info_t *authn_info;
|
||||
digest_authn_info_t *digest_info;
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
apr_status_t status;
|
||||
|
||||
if (peer == HOST) {
|
||||
authn_info = serf__get_authn_info_for_server(conn);
|
||||
@ -421,8 +438,10 @@ serf__setup_request_digest_auth(peer_t peer,
|
||||
/* Build a new Authorization header. */
|
||||
digest_info->header = (peer == HOST) ? "Authorization" :
|
||||
"Proxy-Authorization";
|
||||
value = build_auth_header(digest_info, path, method,
|
||||
conn->pool);
|
||||
status = build_auth_header(&value, digest_info, path, method,
|
||||
conn->pool);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
serf_bucket_headers_setn(hdrs_bkt, digest_info->header,
|
||||
value);
|
||||
@ -431,14 +450,15 @@ serf__setup_request_digest_auth(peer_t peer,
|
||||
/* Store the uri of this request on the serf_request_t object, to make
|
||||
it available when validating the Authentication-Info header of the
|
||||
matching response. */
|
||||
request->auth_baton = path;
|
||||
request->auth_baton = (void *)path;
|
||||
}
|
||||
|
||||
return status;
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__validate_response_digest_auth(peer_t peer,
|
||||
serf__validate_response_digest_auth(const serf__authn_scheme_t *scheme,
|
||||
peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
@ -453,6 +473,7 @@ serf__validate_response_digest_auth(peer_t peer,
|
||||
const char *nc_str = NULL;
|
||||
serf_bucket_t *hdrs;
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
apr_status_t status;
|
||||
|
||||
hdrs = serf_bucket_response_get_headers(response);
|
||||
|
||||
@ -516,7 +537,10 @@ serf__validate_response_digest_auth(peer_t peer,
|
||||
}
|
||||
digest_info = authn_info->baton;
|
||||
|
||||
ha2 = build_digest_ha2(req_uri, "", qop, pool);
|
||||
status = build_digest_ha2(&ha2, req_uri, "", qop, pool);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
tmp = apr_psprintf(pool, "%s:%s:%s:%s:%s:%s",
|
||||
digest_info->ha1, digest_info->nonce, nc_str,
|
||||
digest_info->cnonce, digest_info->qop, ha2);
|
||||
|
@ -181,7 +181,8 @@ typedef struct
|
||||
claim to be. The session key can only be used with the HTTP service
|
||||
on the target host. */
|
||||
static apr_status_t
|
||||
gss_api_get_credentials(char *token, apr_size_t token_len,
|
||||
gss_api_get_credentials(serf_connection_t *conn,
|
||||
char *token, apr_size_t token_len,
|
||||
const char *hostname,
|
||||
const char **buf, apr_size_t *buf_len,
|
||||
gss_authn_info_t *gss_info)
|
||||
@ -202,6 +203,7 @@ gss_api_get_credentials(char *token, apr_size_t token_len,
|
||||
|
||||
/* Establish a security context to the server. */
|
||||
status = serf__spnego_init_sec_context(
|
||||
conn,
|
||||
gss_info->gss_ctx,
|
||||
KRB_HTTP_SERVICE, hostname,
|
||||
&input_buf,
|
||||
@ -212,7 +214,11 @@ gss_api_get_credentials(char *token, apr_size_t token_len,
|
||||
|
||||
switch(status) {
|
||||
case APR_SUCCESS:
|
||||
gss_info->state = gss_api_auth_completed;
|
||||
if (output_buf.length == 0) {
|
||||
gss_info->state = gss_api_auth_completed;
|
||||
} else {
|
||||
gss_info->state = gss_api_auth_in_progress;
|
||||
}
|
||||
break;
|
||||
case APR_EAGAIN:
|
||||
gss_info->state = gss_api_auth_in_progress;
|
||||
@ -242,6 +248,7 @@ do_auth(peer_t peer,
|
||||
int code,
|
||||
gss_authn_info_t *gss_info,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
const char *auth_hdr,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
@ -306,6 +313,14 @@ do_auth(peer_t peer,
|
||||
break;
|
||||
}
|
||||
|
||||
if (request->auth_baton && !token) {
|
||||
/* We provided token with this request, but server responded with empty
|
||||
authentication header. This means server rejected our credentials.
|
||||
XXX: Probably we need separate error code for this case like
|
||||
SERF_ERROR_AUTHN_CREDS_REJECTED? */
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
}
|
||||
|
||||
/* If the server didn't provide us with a token, start with a new initial
|
||||
step in the SPNEGO authentication. */
|
||||
if (!token) {
|
||||
@ -314,14 +329,16 @@ do_auth(peer_t peer,
|
||||
}
|
||||
|
||||
if (peer == HOST) {
|
||||
status = gss_api_get_credentials(token, token_len,
|
||||
status = gss_api_get_credentials(conn,
|
||||
token, token_len,
|
||||
conn->host_info.hostname,
|
||||
&tmp, &tmp_len,
|
||||
gss_info);
|
||||
} else {
|
||||
char *proxy_host;
|
||||
apr_getnameinfo(&proxy_host, conn->ctx->proxy_address, 0);
|
||||
status = gss_api_get_credentials(token, token_len, proxy_host,
|
||||
status = gss_api_get_credentials(conn,
|
||||
token, token_len, proxy_host,
|
||||
&tmp, &tmp_len,
|
||||
gss_info);
|
||||
}
|
||||
@ -357,24 +374,32 @@ serf__init_spnego_connection(const serf__authn_scheme_t *scheme,
|
||||
serf_connection_t *conn,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
gss_authn_info_t *gss_info;
|
||||
apr_status_t status;
|
||||
|
||||
gss_info = apr_pcalloc(conn->pool, sizeof(*gss_info));
|
||||
gss_info->pool = conn->pool;
|
||||
gss_info->state = gss_api_auth_not_started;
|
||||
gss_info->pstate = pstate_init;
|
||||
status = serf__spnego_create_sec_context(&gss_info->gss_ctx, scheme,
|
||||
gss_info->pool, pool);
|
||||
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
serf__authn_info_t *authn_info;
|
||||
gss_authn_info_t *gss_info = NULL;
|
||||
|
||||
/* For proxy authentication, reuse the gss context for all connections.
|
||||
For server authentication, create a new gss context per connection. */
|
||||
if (code == 401) {
|
||||
conn->authn_baton = gss_info;
|
||||
authn_info = &conn->authn_info;
|
||||
} else {
|
||||
conn->proxy_authn_baton = gss_info;
|
||||
authn_info = &ctx->proxy_authn_info;
|
||||
}
|
||||
gss_info = authn_info->baton;
|
||||
|
||||
if (!gss_info) {
|
||||
apr_status_t status;
|
||||
|
||||
gss_info = apr_pcalloc(conn->pool, sizeof(*gss_info));
|
||||
gss_info->pool = conn->pool;
|
||||
gss_info->state = gss_api_auth_not_started;
|
||||
gss_info->pstate = pstate_init;
|
||||
status = serf__spnego_create_sec_context(&gss_info->gss_ctx, scheme,
|
||||
gss_info->pool, pool);
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
authn_info->baton = gss_info;
|
||||
}
|
||||
|
||||
/* Make serf send the initial requests one by one */
|
||||
@ -397,13 +422,15 @@ serf__handle_spnego_auth(int code,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
serf_connection_t *conn = request->conn;
|
||||
gss_authn_info_t *gss_info = (code == 401) ? conn->authn_baton :
|
||||
conn->proxy_authn_baton;
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
gss_authn_info_t *gss_info = (code == 401) ? conn->authn_info.baton :
|
||||
ctx->proxy_authn_info.baton;
|
||||
|
||||
return do_auth(code == 401 ? HOST : PROXY,
|
||||
code,
|
||||
gss_info,
|
||||
request->conn,
|
||||
request,
|
||||
auth_hdr,
|
||||
pool);
|
||||
}
|
||||
@ -418,8 +445,9 @@ serf__setup_request_spnego_auth(peer_t peer,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt)
|
||||
{
|
||||
gss_authn_info_t *gss_info = (peer == HOST) ? conn->authn_baton :
|
||||
conn->proxy_authn_baton;
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
gss_authn_info_t *gss_info = (peer == HOST) ? conn->authn_info.baton :
|
||||
ctx->proxy_authn_info.baton;
|
||||
|
||||
/* If we have an ongoing authentication handshake, the handler of the
|
||||
previous response will have created the authn headers for this request
|
||||
@ -431,6 +459,10 @@ serf__setup_request_spnego_auth(peer_t peer,
|
||||
serf_bucket_headers_setn(hdrs_bkt, gss_info->header,
|
||||
gss_info->value);
|
||||
|
||||
/* Remember that we're using this request for authentication
|
||||
handshake. */
|
||||
request->auth_baton = (void*) TRUE;
|
||||
|
||||
/* We should send each token only once. */
|
||||
gss_info->header = NULL;
|
||||
gss_info->value = NULL;
|
||||
@ -469,6 +501,7 @@ serf__setup_request_spnego_auth(peer_t peer,
|
||||
code,
|
||||
gss_info,
|
||||
conn,
|
||||
request,
|
||||
0l, /* no response authn header */
|
||||
conn->pool);
|
||||
if (status)
|
||||
@ -476,6 +509,11 @@ serf__setup_request_spnego_auth(peer_t peer,
|
||||
|
||||
serf_bucket_headers_setn(hdrs_bkt, gss_info->header,
|
||||
gss_info->value);
|
||||
|
||||
/* Remember that we're using this request for authentication
|
||||
handshake. */
|
||||
request->auth_baton = (void*) TRUE;
|
||||
|
||||
/* We should send each token only once. */
|
||||
gss_info->header = NULL;
|
||||
gss_info->value = NULL;
|
||||
@ -486,19 +524,70 @@ serf__setup_request_spnego_auth(peer_t peer,
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Baton passed to the get_auth_header callback function.
|
||||
*/
|
||||
typedef struct {
|
||||
const char *hdr_name;
|
||||
const char *auth_name;
|
||||
const char *hdr_value;
|
||||
apr_pool_t *pool;
|
||||
} get_auth_header_baton_t;
|
||||
|
||||
static int
|
||||
get_auth_header_cb(void *baton,
|
||||
const char *key,
|
||||
const char *header)
|
||||
{
|
||||
get_auth_header_baton_t *b = baton;
|
||||
|
||||
/* We're only interested in xxxx-Authenticate headers. */
|
||||
if (strcasecmp(key, b->hdr_name) != 0)
|
||||
return 0;
|
||||
|
||||
/* Check if header value starts with interesting auth name. */
|
||||
if (strncmp(header, b->auth_name, strlen(b->auth_name)) == 0) {
|
||||
/* Save interesting header value and stop iteration. */
|
||||
b->hdr_value = apr_pstrdup(b->pool, header);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *
|
||||
get_auth_header(serf_bucket_t *hdrs,
|
||||
const char *hdr_name,
|
||||
const char *auth_name,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
get_auth_header_baton_t b;
|
||||
|
||||
b.auth_name = hdr_name;
|
||||
b.hdr_name = auth_name;
|
||||
b.hdr_value = NULL;
|
||||
b.pool = pool;
|
||||
|
||||
serf_bucket_headers_do(hdrs, get_auth_header_cb, &b);
|
||||
|
||||
return b.hdr_value;
|
||||
}
|
||||
|
||||
/* Function is called when 2xx responses are received. Normally we don't
|
||||
* have to do anything, except for the first response after the
|
||||
* authentication handshake. This specific response includes authentication
|
||||
* data which should be validated by the client (mutual authentication).
|
||||
*/
|
||||
apr_status_t
|
||||
serf__validate_response_spnego_auth(peer_t peer,
|
||||
serf__validate_response_spnego_auth(const serf__authn_scheme_t *scheme,
|
||||
peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
gss_authn_info_t *gss_info;
|
||||
const char *auth_hdr_name;
|
||||
|
||||
@ -511,10 +600,10 @@ serf__validate_response_spnego_auth(peer_t peer,
|
||||
"Validate Negotiate response header.\n");
|
||||
|
||||
if (peer == HOST) {
|
||||
gss_info = conn->authn_baton;
|
||||
gss_info = conn->authn_info.baton;
|
||||
auth_hdr_name = "WWW-Authenticate";
|
||||
} else {
|
||||
gss_info = conn->proxy_authn_baton;
|
||||
gss_info = ctx->proxy_authn_info.baton;
|
||||
auth_hdr_name = "Proxy-Authenticate";
|
||||
}
|
||||
|
||||
@ -524,11 +613,23 @@ serf__validate_response_spnego_auth(peer_t peer,
|
||||
apr_status_t status;
|
||||
|
||||
hdrs = serf_bucket_response_get_headers(response);
|
||||
auth_hdr_val = serf_bucket_headers_get(hdrs, auth_hdr_name);
|
||||
auth_hdr_val = get_auth_header(hdrs, auth_hdr_name, scheme->name,
|
||||
pool);
|
||||
|
||||
status = do_auth(peer, code, gss_info, conn, auth_hdr_val, pool);
|
||||
if (status)
|
||||
return status;
|
||||
if (auth_hdr_val) {
|
||||
status = do_auth(peer, code, gss_info, conn, request, auth_hdr_val,
|
||||
pool);
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
} else {
|
||||
/* No Authenticate headers, nothing to validate: authentication
|
||||
completed.*/
|
||||
gss_info->state = gss_api_auth_completed;
|
||||
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"SPNEGO handshake completed.\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (gss_info->state == gss_api_auth_completed) {
|
||||
|
@ -88,14 +88,15 @@ serf__spnego_create_sec_context(serf__spnego_context_t **ctx_p,
|
||||
* Other returns values indicates error.
|
||||
*/
|
||||
apr_status_t
|
||||
serf__spnego_init_sec_context(serf__spnego_context_t *ctx,
|
||||
const char *service,
|
||||
const char *hostname,
|
||||
serf__spnego_buffer_t *input_buf,
|
||||
serf__spnego_buffer_t *output_buf,
|
||||
apr_pool_t *result_pool,
|
||||
apr_pool_t *scratch_pool
|
||||
);
|
||||
serf__spnego_init_sec_context(serf_connection_t *conn,
|
||||
serf__spnego_context_t *ctx,
|
||||
const char *service,
|
||||
const char *hostname,
|
||||
serf__spnego_buffer_t *input_buf,
|
||||
serf__spnego_buffer_t *output_buf,
|
||||
apr_pool_t *result_pool,
|
||||
apr_pool_t *scratch_pool
|
||||
);
|
||||
|
||||
/*
|
||||
* Reset a previously created security context so we can start with a new one.
|
||||
|
@ -43,7 +43,7 @@ struct serf__spnego_context_t
|
||||
};
|
||||
|
||||
static void
|
||||
log_error(int verbose_flag, const char *filename,
|
||||
log_error(int verbose_flag, apr_socket_t *skt,
|
||||
serf__spnego_context_t *ctx,
|
||||
OM_uint32 err_maj_stat,
|
||||
OM_uint32 err_min_stat,
|
||||
@ -70,7 +70,7 @@ log_error(int verbose_flag, const char *filename,
|
||||
&stat_buff);
|
||||
}
|
||||
|
||||
serf__log(verbose_flag, filename,
|
||||
serf__log_skt(verbose_flag, __FILE__, skt,
|
||||
"%s (%x,%d): %s\n", msg,
|
||||
err_maj_stat, err_min_stat, stat_buff.value);
|
||||
}
|
||||
@ -89,7 +89,7 @@ cleanup_ctx(void *data)
|
||||
gss_maj_stat = gss_delete_sec_context(&gss_min_stat, &ctx->gss_ctx,
|
||||
GSS_C_NO_BUFFER);
|
||||
if(GSS_ERROR(gss_maj_stat)) {
|
||||
log_error(AUTH_VERBOSE, __FILE__, ctx,
|
||||
log_error(AUTH_VERBOSE, NULL, ctx,
|
||||
gss_maj_stat, gss_min_stat,
|
||||
"Error cleaning up GSS security context");
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
@ -146,7 +146,8 @@ serf__spnego_reset_sec_context(serf__spnego_context_t *ctx)
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__spnego_init_sec_context(serf__spnego_context_t *ctx,
|
||||
serf__spnego_init_sec_context(serf_connection_t *conn,
|
||||
serf__spnego_context_t *ctx,
|
||||
const char *service,
|
||||
const char *hostname,
|
||||
serf__spnego_buffer_t *input_buf,
|
||||
@ -166,12 +167,13 @@ serf__spnego_init_sec_context(serf__spnego_context_t *ctx,
|
||||
/* TODO: should be shared between multiple requests. */
|
||||
bufdesc.value = apr_pstrcat(scratch_pool, service, "@", hostname, NULL);
|
||||
bufdesc.length = strlen(bufdesc.value);
|
||||
serf__log(AUTH_VERBOSE, __FILE__, "Get principal for %s\n", bufdesc.value);
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"Get principal for %s\n", bufdesc.value);
|
||||
gss_maj_stat = gss_import_name (&gss_min_stat, &bufdesc,
|
||||
GSS_C_NT_HOSTBASED_SERVICE,
|
||||
&host_gss_name);
|
||||
if(GSS_ERROR(gss_maj_stat)) {
|
||||
log_error(AUTH_VERBOSE, __FILE__, ctx,
|
||||
log_error(AUTH_VERBOSE, conn->skt, ctx,
|
||||
gss_maj_stat, gss_min_stat,
|
||||
"Error converting principal name to GSS internal format ");
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
@ -214,7 +216,7 @@ serf__spnego_init_sec_context(serf__spnego_context_t *ctx,
|
||||
case GSS_S_CONTINUE_NEEDED:
|
||||
return APR_EAGAIN;
|
||||
default:
|
||||
log_error(AUTH_VERBOSE, __FILE__, ctx,
|
||||
log_error(AUTH_VERBOSE, conn->skt, ctx,
|
||||
gss_maj_stat, gss_min_stat,
|
||||
"Error during Kerberos handshake");
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
|
@ -192,7 +192,8 @@ serf__spnego_reset_sec_context(serf__spnego_context_t *ctx)
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__spnego_init_sec_context(serf__spnego_context_t *ctx,
|
||||
serf__spnego_init_sec_context(serf_connection_t *conn,
|
||||
serf__spnego_context_t *ctx,
|
||||
const char *service,
|
||||
const char *hostname,
|
||||
serf__spnego_buffer_t *input_buf,
|
||||
@ -219,8 +220,8 @@ serf__spnego_init_sec_context(serf__spnego_context_t *ctx,
|
||||
ctx->target_name = apr_pstrcat(scratch_pool, service, "/", canonname,
|
||||
NULL);
|
||||
|
||||
serf__log(AUTH_VERBOSE, __FILE__,
|
||||
"Using SPN '%s' for '%s'\n", ctx->target_name, hostname);
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"Using SPN '%s' for '%s'\n", ctx->target_name, hostname);
|
||||
}
|
||||
else if (ctx->authn_type == SERF_AUTHN_NTLM)
|
||||
{
|
||||
|
@ -20,6 +20,8 @@
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
#include "serf_private.h" /* for serf__bucket_headers_remove */
|
||||
|
||||
|
||||
typedef struct header_list {
|
||||
const char *header;
|
||||
@ -37,6 +39,7 @@ typedef struct header_list {
|
||||
|
||||
typedef struct {
|
||||
header_list_t *list;
|
||||
header_list_t *last;
|
||||
|
||||
header_list_t *cur_read;
|
||||
enum {
|
||||
@ -60,6 +63,7 @@ serf_bucket_t *serf_bucket_headers_create(
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->list = NULL;
|
||||
ctx->last = NULL;
|
||||
ctx->state = READ_START;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_headers, allocator, ctx);
|
||||
@ -71,7 +75,6 @@ void serf_bucket_headers_setx(
|
||||
const char *value, apr_size_t value_size, int value_copy)
|
||||
{
|
||||
headers_context_t *ctx = bkt->data;
|
||||
header_list_t *iter = ctx->list;
|
||||
header_list_t *hdr;
|
||||
|
||||
#if 0
|
||||
@ -105,13 +108,12 @@ void serf_bucket_headers_setx(
|
||||
}
|
||||
|
||||
/* Add the new header at the end of the list. */
|
||||
while (iter && iter->next) {
|
||||
iter = iter->next;
|
||||
}
|
||||
if (iter)
|
||||
iter->next = hdr;
|
||||
if (ctx->last)
|
||||
ctx->last->next = hdr;
|
||||
else
|
||||
ctx->list = hdr;
|
||||
|
||||
ctx->last = hdr;
|
||||
}
|
||||
|
||||
void serf_bucket_headers_set(
|
||||
@ -191,6 +193,29 @@ const char *serf_bucket_headers_get(
|
||||
return val;
|
||||
}
|
||||
|
||||
void serf__bucket_headers_remove(serf_bucket_t *bucket, const char *header)
|
||||
{
|
||||
headers_context_t *ctx = bucket->data;
|
||||
header_list_t *scan = ctx->list, *prev = NULL;
|
||||
|
||||
/* Find and delete all items with the same header (case insensitive) */
|
||||
while (scan) {
|
||||
if (strcasecmp(scan->header, header) == 0) {
|
||||
if (prev) {
|
||||
prev->next = scan->next;
|
||||
} else {
|
||||
ctx->list = scan->next;
|
||||
}
|
||||
if (ctx->last == scan) {
|
||||
ctx->last = NULL;
|
||||
}
|
||||
} else {
|
||||
prev = scan;
|
||||
}
|
||||
scan = scan->next;
|
||||
}
|
||||
}
|
||||
|
||||
void serf_bucket_headers_do(
|
||||
serf_bucket_t *headers_bucket,
|
||||
serf_bucket_headers_do_callback_fn_t func,
|
||||
|
@ -43,6 +43,29 @@ typedef struct {
|
||||
int head_req; /* Was this a HEAD request? */
|
||||
} response_context_t;
|
||||
|
||||
/* Returns 1 if according to RFC2626 this response can have a body, 0 if it
|
||||
must not have a body. */
|
||||
static int expect_body(response_context_t *ctx)
|
||||
{
|
||||
if (ctx->head_req)
|
||||
return 0;
|
||||
|
||||
/* 100 Continue and 101 Switching Protocols */
|
||||
if (ctx->sl.code >= 100 && ctx->sl.code < 200)
|
||||
return 0;
|
||||
|
||||
/* 204 No Content */
|
||||
if (ctx->sl.code == 204)
|
||||
return 0;
|
||||
|
||||
/* 205? */
|
||||
|
||||
/* 304 Not Modified */
|
||||
if (ctx->sl.code == 304)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_response_create(
|
||||
serf_bucket_t *stream,
|
||||
@ -238,6 +261,15 @@ static apr_status_t run_machine(serf_bucket_t *bkt, response_context_t *ctx)
|
||||
/* Advance the state. */
|
||||
ctx->state = STATE_BODY;
|
||||
|
||||
/* If this is a response to a HEAD request, or code == 1xx,204 or304
|
||||
then we don't receive a real body. */
|
||||
if (!expect_body(ctx)) {
|
||||
ctx->body = serf_bucket_simple_create(NULL, 0, NULL, NULL,
|
||||
bkt->allocator);
|
||||
ctx->state = STATE_BODY;
|
||||
break;
|
||||
}
|
||||
|
||||
ctx->body =
|
||||
serf_bucket_barrier_create(ctx->stream, bkt->allocator);
|
||||
|
||||
@ -261,10 +293,6 @@ static apr_status_t run_machine(serf_bucket_t *bkt, response_context_t *ctx)
|
||||
ctx->body = serf_bucket_dechunk_create(ctx->body,
|
||||
bkt->allocator);
|
||||
}
|
||||
|
||||
if (!v && (ctx->sl.code == 204 || ctx->sl.code == 304)) {
|
||||
ctx->state = STATE_DONE;
|
||||
}
|
||||
}
|
||||
v = serf_bucket_headers_get(ctx->headers, "Content-Encoding");
|
||||
if (v) {
|
||||
@ -280,10 +308,6 @@ static apr_status_t run_machine(serf_bucket_t *bkt, response_context_t *ctx)
|
||||
SERF_DEFLATE_DEFLATE);
|
||||
}
|
||||
}
|
||||
/* If we're a HEAD request, we don't receive a body. */
|
||||
if (ctx->head_req) {
|
||||
ctx->state = STATE_DONE;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case STATE_BODY:
|
||||
|
@ -50,7 +50,7 @@ static apr_status_t socket_reader(void *baton, apr_size_t bufsize,
|
||||
"--- socket_recv:\n%.*s\n-(%d)-\n",
|
||||
*len, buf, *len);
|
||||
|
||||
if (ctx->progress_func)
|
||||
if (ctx->progress_func && *len)
|
||||
ctx->progress_func(ctx->progress_baton, *len, 0);
|
||||
|
||||
return status;
|
||||
|
@ -463,6 +463,7 @@ validate_server_certificate(int cert_valid, X509_STORE_CTX *store_ctx)
|
||||
case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
|
||||
case X509_V_ERR_CERT_UNTRUSTED:
|
||||
case X509_V_ERR_INVALID_CA:
|
||||
case X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE:
|
||||
failures |= SERF_SSL_CERT_UNKNOWNCA;
|
||||
break;
|
||||
case X509_V_ERR_CERT_REVOKED:
|
||||
@ -958,16 +959,24 @@ static apr_status_t cleanup_ssl(void *data)
|
||||
|
||||
#endif
|
||||
|
||||
static apr_uint32_t have_init_ssl = 0;
|
||||
#if !APR_VERSION_AT_LEAST(1,0,0)
|
||||
#define apr_atomic_cas32(mem, with, cmp) apr_atomic_cas(mem, with, cmp)
|
||||
#endif
|
||||
|
||||
enum ssl_init_e
|
||||
{
|
||||
INIT_UNINITIALIZED = 0,
|
||||
INIT_BUSY = 1,
|
||||
INIT_DONE = 2
|
||||
};
|
||||
|
||||
static volatile apr_uint32_t have_init_ssl = INIT_UNINITIALIZED;
|
||||
|
||||
static void init_ssl_libraries(void)
|
||||
{
|
||||
apr_uint32_t val;
|
||||
#if APR_VERSION_AT_LEAST(1,0,0)
|
||||
val = apr_atomic_xchg32(&have_init_ssl, 1);
|
||||
#else
|
||||
val = apr_atomic_cas(&have_init_ssl, 1, 0);
|
||||
#endif
|
||||
|
||||
val = apr_atomic_cas32(&have_init_ssl, INIT_BUSY, INIT_UNINITIALIZED);
|
||||
|
||||
if (!val) {
|
||||
#if APR_HAS_THREADS
|
||||
@ -1015,6 +1024,19 @@ static void init_ssl_libraries(void)
|
||||
|
||||
apr_pool_cleanup_register(ssl_pool, NULL, cleanup_ssl, cleanup_ssl);
|
||||
#endif
|
||||
apr_atomic_cas32(&have_init_ssl, INIT_DONE, INIT_BUSY);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Make sure we don't continue before the initialization in another
|
||||
thread has completed */
|
||||
while (val != INIT_DONE) {
|
||||
apr_sleep(APR_USEC_PER_SEC / 1000);
|
||||
|
||||
val = apr_atomic_cas32(&have_init_ssl,
|
||||
INIT_UNINITIALIZED,
|
||||
INIT_UNINITIALIZED);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1198,21 +1220,16 @@ void serf_ssl_server_cert_chain_callback_set(
|
||||
context->server_cert_userdata = data;
|
||||
}
|
||||
|
||||
static serf_ssl_context_t *ssl_init_context(void)
|
||||
static serf_ssl_context_t *ssl_init_context(serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
serf_ssl_context_t *ssl_ctx;
|
||||
apr_pool_t *pool;
|
||||
serf_bucket_alloc_t *allocator;
|
||||
|
||||
init_ssl_libraries();
|
||||
|
||||
apr_pool_create(&pool, NULL);
|
||||
allocator = serf_bucket_allocator_create(pool, NULL, NULL);
|
||||
|
||||
ssl_ctx = serf_bucket_mem_alloc(allocator, sizeof(*ssl_ctx));
|
||||
|
||||
ssl_ctx->refcount = 0;
|
||||
ssl_ctx->pool = pool;
|
||||
ssl_ctx->pool = serf_bucket_allocator_get_pool(allocator);
|
||||
ssl_ctx->allocator = allocator;
|
||||
|
||||
ssl_ctx->ctx = SSL_CTX_new(SSLv23_client_method());
|
||||
@ -1269,8 +1286,6 @@ static serf_ssl_context_t *ssl_init_context(void)
|
||||
static apr_status_t ssl_free_context(
|
||||
serf_ssl_context_t *ssl_ctx)
|
||||
{
|
||||
apr_pool_t *p;
|
||||
|
||||
/* If never had the pending buckets, don't try to free them. */
|
||||
if (ssl_ctx->decrypt.pending != NULL) {
|
||||
serf_bucket_destroy(ssl_ctx->decrypt.pending);
|
||||
@ -1283,10 +1298,7 @@ static apr_status_t ssl_free_context(
|
||||
SSL_free(ssl_ctx->ssl);
|
||||
SSL_CTX_free(ssl_ctx->ctx);
|
||||
|
||||
p = ssl_ctx->pool;
|
||||
|
||||
serf_bucket_mem_free(ssl_ctx->allocator, ssl_ctx);
|
||||
apr_pool_destroy(p);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
@ -1300,7 +1312,7 @@ static serf_bucket_t * serf_bucket_ssl_create(
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
if (!ssl_ctx) {
|
||||
ctx->ssl_ctx = ssl_init_context();
|
||||
ctx->ssl_ctx = ssl_init_context(allocator);
|
||||
}
|
||||
else {
|
||||
ctx->ssl_ctx = ssl_ctx;
|
||||
|
@ -48,9 +48,11 @@
|
||||
subprocess.check_call([SERF_RESPONSE_EXE, case])
|
||||
except subprocess.CalledProcessError:
|
||||
print "ERROR: test case %s failed" % (case)
|
||||
sys.exit(1)
|
||||
|
||||
print "== Running the unit tests =="
|
||||
try:
|
||||
subprocess.check_call(TEST_ALL_EXE)
|
||||
except subprocess.CalledProcessError:
|
||||
print "ERROR: test(s) failed in test_all"
|
||||
sys.exit(1)
|
||||
|
@ -38,7 +38,7 @@
|
||||
# a more complicated example might be:
|
||||
# const type * const * serf_func3(...
|
||||
#
|
||||
_funcs = re.compile(r'^(?:(?:\w+|\*) )+\*?(serf_[a-z][a-z_0-9]*)\(',
|
||||
_funcs = re.compile(r'^(?:(?:\w+|\*) )+\*?(serf_[a-z][a-zA-Z_0-9]*)\(',
|
||||
re.MULTILINE)
|
||||
|
||||
# This regex parses the bucket type definitions which look like:
|
||||
|
@ -1,7 +1,7 @@
|
||||
SERF_MAJOR_VERSION=@MAJOR@
|
||||
prefix=@PREFIX@
|
||||
exec_prefix=${prefix}
|
||||
libdir=${exec_prefix}/lib
|
||||
libdir=@LIBDIR@
|
||||
includedir=${prefix}/include/@INCLUDE_SUBDIR@
|
||||
|
||||
Name: serf
|
||||
|
@ -285,6 +285,12 @@ apr_status_t serf_context_run(
|
||||
### look at the potential return codes. map to our defined
|
||||
### return values? ...
|
||||
*/
|
||||
|
||||
/* Use the strict documented error for poll timeouts, to allow proper
|
||||
handling of the other timeout types when returned from
|
||||
serf_event_trigger */
|
||||
if (APR_STATUS_IS_TIMEUP(status))
|
||||
return APR_TIMEUP; /* Return the documented error */
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -81,6 +81,46 @@ static apr_status_t clean_conn(void *data)
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* Check if there is data waiting to be sent over the socket. This can happen
|
||||
in two situations:
|
||||
- The connection queue has atleast one request with unwritten data.
|
||||
- All requests are written and the ssl layer wrote some data while reading
|
||||
the response. This can happen when the server triggers a renegotiation,
|
||||
e.g. after the first and only request on that connection was received.
|
||||
Returns 1 if data is pending on CONN, NULL if not.
|
||||
If NEXT_REQ is not NULL, it will be filled in with the next available request
|
||||
with unwritten data. */
|
||||
static int
|
||||
request_or_data_pending(serf_request_t **next_req, serf_connection_t *conn)
|
||||
{
|
||||
serf_request_t *request = conn->requests;
|
||||
|
||||
while (request != NULL && request->req_bkt == NULL &&
|
||||
request->writing_started)
|
||||
request = request->next;
|
||||
|
||||
if (next_req)
|
||||
*next_req = request;
|
||||
|
||||
if (request != NULL) {
|
||||
return 1;
|
||||
} else if (conn->ostream_head) {
|
||||
const char *dummy;
|
||||
apr_size_t len;
|
||||
apr_status_t status;
|
||||
|
||||
status = serf_bucket_peek(conn->ostream_head, &dummy,
|
||||
&len);
|
||||
if (!SERF_BUCKET_READ_ERROR(status) && len) {
|
||||
serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt,
|
||||
"All requests written but still data pending.\n");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Update the pollset for this connection. We tweak the pollset based on
|
||||
* whether we want to read and/or write, given conditions within the
|
||||
* connection. If the connection is not (yet) in the pollset, then it
|
||||
@ -126,7 +166,6 @@ apr_status_t serf__conn_update_pollset(serf_connection_t *conn)
|
||||
conn->state != SERF_CONN_CLOSING)
|
||||
desc.reqevents |= APR_POLLOUT;
|
||||
else {
|
||||
serf_request_t *request = conn->requests;
|
||||
|
||||
if ((conn->probable_keepalive_limit &&
|
||||
conn->completed_requests > conn->probable_keepalive_limit) ||
|
||||
@ -134,13 +173,9 @@ apr_status_t serf__conn_update_pollset(serf_connection_t *conn)
|
||||
conn->completed_requests - conn->completed_responses >=
|
||||
conn->max_outstanding_requests)) {
|
||||
/* we wouldn't try to write any way right now. */
|
||||
}
|
||||
else {
|
||||
while (request != NULL && request->req_bkt == NULL &&
|
||||
request->written)
|
||||
request = request->next;
|
||||
if (request != NULL)
|
||||
desc.reqevents |= APR_POLLOUT;
|
||||
}
|
||||
else if (request_or_data_pending(NULL, conn)) {
|
||||
desc.reqevents |= APR_POLLOUT;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -393,13 +428,12 @@ apr_status_t serf__open_connections(serf_context_t *ctx)
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t no_more_writes(serf_connection_t *conn,
|
||||
serf_request_t *request)
|
||||
static apr_status_t no_more_writes(serf_connection_t *conn)
|
||||
{
|
||||
/* Note that we should hold new requests until we open our new socket. */
|
||||
conn->state = SERF_CONN_CLOSING;
|
||||
serf__log(CONN_VERBOSE, __FILE__, "stop writing on conn 0x%x\n",
|
||||
conn);
|
||||
serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt,
|
||||
"stop writing on conn 0x%x\n", conn);
|
||||
|
||||
/* Clear our iovec. */
|
||||
conn->vec_len = 0;
|
||||
@ -544,8 +578,12 @@ static apr_status_t reset_connection(serf_connection_t *conn,
|
||||
while (old_reqs) {
|
||||
/* If we haven't started to write the connection, bring it over
|
||||
* unchanged to our new socket.
|
||||
* Do not copy a CONNECT request to the new connection, the ssl tunnel
|
||||
* setup code will create a new CONNECT request already.
|
||||
*/
|
||||
if (requeue_requests && !old_reqs->written) {
|
||||
if (requeue_requests && !old_reqs->writing_started &&
|
||||
!old_reqs->ssltunnel) {
|
||||
|
||||
serf_request_t *req = old_reqs;
|
||||
old_reqs = old_reqs->next;
|
||||
req->next = NULL;
|
||||
@ -672,8 +710,6 @@ static apr_status_t setup_request(serf_request_t *request)
|
||||
/* write data out to the connection */
|
||||
static apr_status_t write_to_connection(serf_connection_t *conn)
|
||||
{
|
||||
serf_request_t *request = conn->requests;
|
||||
|
||||
if (conn->probable_keepalive_limit &&
|
||||
conn->completed_requests > conn->probable_keepalive_limit) {
|
||||
|
||||
@ -684,21 +720,16 @@ static apr_status_t write_to_connection(serf_connection_t *conn)
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* Find a request that has data which needs to be delivered. */
|
||||
while (request != NULL &&
|
||||
request->req_bkt == NULL && request->written)
|
||||
request = request->next;
|
||||
|
||||
/* assert: request != NULL || conn->vec_len */
|
||||
|
||||
/* Keep reading and sending until we run out of stuff to read, or
|
||||
* writing would block.
|
||||
*/
|
||||
while (1) {
|
||||
serf_request_t *request;
|
||||
int stop_reading = 0;
|
||||
apr_status_t status;
|
||||
apr_status_t read_status;
|
||||
serf_bucket_t *ostreamt, *ostreamh;
|
||||
serf_bucket_t *ostreamt;
|
||||
serf_bucket_t *ostreamh;
|
||||
int max_outstanding_requests = conn->max_outstanding_requests;
|
||||
|
||||
/* If we're setting up an ssl tunnel, we can't send real requests
|
||||
@ -727,7 +758,7 @@ static apr_status_t write_to_connection(serf_connection_t *conn)
|
||||
if (APR_STATUS_IS_EPIPE(status) ||
|
||||
APR_STATUS_IS_ECONNRESET(status) ||
|
||||
APR_STATUS_IS_ECONNABORTED(status))
|
||||
return no_more_writes(conn, request);
|
||||
return no_more_writes(conn);
|
||||
if (status)
|
||||
return status;
|
||||
}
|
||||
@ -738,14 +769,11 @@ static apr_status_t write_to_connection(serf_connection_t *conn)
|
||||
/* We may need to move forward to a request which has something
|
||||
* to write.
|
||||
*/
|
||||
while (request != NULL &&
|
||||
request->req_bkt == NULL && request->written)
|
||||
request = request->next;
|
||||
|
||||
if (request == NULL) {
|
||||
if (!request_or_data_pending(&request, conn)) {
|
||||
/* No more requests (with data) are registered with the
|
||||
* connection. Let's update the pollset so that we don't
|
||||
* try to write to this socket again.
|
||||
* connection, and no data is pending on the outgoing stream.
|
||||
* Let's update the pollset so that we don't try to write to this
|
||||
* socket again.
|
||||
*/
|
||||
conn->dirty_conn = 1;
|
||||
conn->ctx->dirty_pollset = 1;
|
||||
@ -757,17 +785,19 @@ static apr_status_t write_to_connection(serf_connection_t *conn)
|
||||
return status;
|
||||
}
|
||||
|
||||
if (request->req_bkt == NULL) {
|
||||
read_status = setup_request(request);
|
||||
if (read_status) {
|
||||
/* Something bad happened. Propagate any errors. */
|
||||
return read_status;
|
||||
if (request) {
|
||||
if (request->req_bkt == NULL) {
|
||||
read_status = setup_request(request);
|
||||
if (read_status) {
|
||||
/* Something bad happened. Propagate any errors. */
|
||||
return read_status;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!request->written) {
|
||||
request->written = 1;
|
||||
serf_bucket_aggregate_append(ostreamt, request->req_bkt);
|
||||
if (!request->writing_started) {
|
||||
request->writing_started = 1;
|
||||
serf_bucket_aggregate_append(ostreamt, request->req_bkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* ### optimize at some point by using read_for_sendfile */
|
||||
@ -818,10 +848,10 @@ static apr_status_t write_to_connection(serf_connection_t *conn)
|
||||
if (APR_STATUS_IS_EAGAIN(status))
|
||||
return APR_SUCCESS;
|
||||
if (APR_STATUS_IS_EPIPE(status))
|
||||
return no_more_writes(conn, request);
|
||||
return no_more_writes(conn);
|
||||
if (APR_STATUS_IS_ECONNRESET(status) ||
|
||||
APR_STATUS_IS_ECONNABORTED(status)) {
|
||||
return no_more_writes(conn, request);
|
||||
return no_more_writes(conn);
|
||||
}
|
||||
if (status)
|
||||
return status;
|
||||
@ -833,7 +863,8 @@ static apr_status_t write_to_connection(serf_connection_t *conn)
|
||||
conn->dirty_conn = 1;
|
||||
conn->ctx->dirty_pollset = 1;
|
||||
}
|
||||
else if (read_status && conn->hit_eof && conn->vec_len == 0) {
|
||||
else if (request && read_status && conn->hit_eof &&
|
||||
conn->vec_len == 0) {
|
||||
/* If we hit the end of the request bucket and all of its data has
|
||||
* been written, then clear it out to signify that we're done
|
||||
* sending the request. On the next iteration through this loop:
|
||||
@ -897,8 +928,7 @@ static apr_status_t handle_response(serf_request_t *request,
|
||||
|
||||
If the authentication was tried, but failed, pass the response
|
||||
to the application, maybe it can do better. */
|
||||
if (APR_STATUS_IS_EOF(status) ||
|
||||
APR_STATUS_IS_EAGAIN(status)) {
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
@ -1060,7 +1090,7 @@ static apr_status_t read_from_connection(serf_connection_t *conn)
|
||||
* sending the SSL 'close notify' shutdown alert), we'll reset the
|
||||
* connection and open a new one.
|
||||
*/
|
||||
if (request->req_bkt || !request->written) {
|
||||
if (request->req_bkt || !request->writing_started) {
|
||||
const char *data;
|
||||
apr_size_t len;
|
||||
|
||||
@ -1118,6 +1148,14 @@ static apr_status_t read_from_connection(serf_connection_t *conn)
|
||||
* treat that as a success.
|
||||
*/
|
||||
if (APR_STATUS_IS_EAGAIN(status)) {
|
||||
/* It is possible that while reading the response, the ssl layer
|
||||
has prepared some data to send. If this was the last request,
|
||||
serf will not check for socket writability, so force this here.
|
||||
*/
|
||||
if (request_or_data_pending(&request, conn) && !request) {
|
||||
conn->dirty_conn = 1;
|
||||
conn->ctx->dirty_pollset = 1;
|
||||
}
|
||||
status = APR_SUCCESS;
|
||||
goto error;
|
||||
}
|
||||
@ -1182,7 +1220,7 @@ static apr_status_t read_from_connection(serf_connection_t *conn)
|
||||
* update the pollset. We don't want to read from this socket any
|
||||
* more. We are definitely done with this loop, too.
|
||||
*/
|
||||
if (request == NULL || !request->written) {
|
||||
if (request == NULL || !request->writing_started) {
|
||||
conn->dirty_conn = 1;
|
||||
conn->ctx->dirty_pollset = 1;
|
||||
status = APR_SUCCESS;
|
||||
@ -1247,8 +1285,29 @@ apr_status_t serf__process_connection(serf_connection_t *conn,
|
||||
int error;
|
||||
apr_socklen_t l = sizeof(error);
|
||||
|
||||
if (!getsockopt(osskt, SOL_SOCKET, SO_ERROR, (char*)&error, &l))
|
||||
return APR_FROM_OS_ERROR(error);
|
||||
if (!getsockopt(osskt, SOL_SOCKET, SO_ERROR, (char*)&error,
|
||||
&l)) {
|
||||
status = APR_FROM_OS_ERROR(error);
|
||||
|
||||
/* Handle fallback for multi-homed servers.
|
||||
|
||||
### Improve algorithm to find better than just 'next'?
|
||||
|
||||
Current Windows versions already handle re-ordering for
|
||||
api users by using statistics on the recently failed
|
||||
connections to order the list of addresses. */
|
||||
if (conn->completed_requests == 0
|
||||
&& conn->address->next != NULL
|
||||
&& (APR_STATUS_IS_ECONNREFUSED(status)
|
||||
|| APR_STATUS_IS_TIMEUP(status)
|
||||
|| APR_STATUS_IS_ENETUNREACH(status))) {
|
||||
|
||||
conn->address = conn->address->next;
|
||||
return reset_connection(conn, 1);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -1342,7 +1401,8 @@ apr_status_t serf_connection_create2(
|
||||
/* We're not interested in the path following the hostname. */
|
||||
c->host_url = apr_uri_unparse(c->pool,
|
||||
&host_info,
|
||||
APR_URI_UNP_OMITPATHINFO);
|
||||
APR_URI_UNP_OMITPATHINFO |
|
||||
APR_URI_UNP_OMITUSERINFO);
|
||||
|
||||
/* Store the host info without the path on the connection. */
|
||||
(void)apr_uri_parse(c->pool, c->host_url, &(c->host_info));
|
||||
@ -1469,9 +1529,10 @@ create_request(serf_connection_t *conn,
|
||||
request->req_bkt = NULL;
|
||||
request->resp_bkt = NULL;
|
||||
request->priority = priority;
|
||||
request->written = 0;
|
||||
request->writing_started = 0;
|
||||
request->ssltunnel = ssltunnel;
|
||||
request->next = NULL;
|
||||
request->auth_baton = NULL;
|
||||
|
||||
return request;
|
||||
}
|
||||
@ -1515,7 +1576,7 @@ priority_request_create(serf_connection_t *conn,
|
||||
prev = NULL;
|
||||
|
||||
/* Find a request that has data which needs to be delivered. */
|
||||
while (iter != NULL && iter->req_bkt == NULL && iter->written) {
|
||||
while (iter != NULL && iter->req_bkt == NULL && iter->writing_started) {
|
||||
prev = iter;
|
||||
iter = iter->next;
|
||||
}
|
||||
@ -1574,7 +1635,7 @@ apr_status_t serf_request_cancel(serf_request_t *request)
|
||||
|
||||
apr_status_t serf_request_is_written(serf_request_t *request)
|
||||
{
|
||||
if (request->written && !request->req_bkt)
|
||||
if (request->writing_started && !request->req_bkt)
|
||||
return APR_SUCCESS;
|
||||
|
||||
return APR_EBUSY;
|
||||
|
@ -1062,7 +1062,7 @@ void serf_debug__bucket_alloc_check(
|
||||
/* Version info */
|
||||
#define SERF_MAJOR_VERSION 1
|
||||
#define SERF_MINOR_VERSION 3
|
||||
#define SERF_PATCH_VERSION 0
|
||||
#define SERF_PATCH_VERSION 4
|
||||
|
||||
/* Version number string */
|
||||
#define SERF_VERSION_STRING APR_STRINGIFY(SERF_MAJOR_VERSION) "." \
|
||||
|
@ -23,7 +23,9 @@
|
||||
|
||||
/* Windows does not define IOV_MAX, so we need to ensure it is defined. */
|
||||
#ifndef IOV_MAX
|
||||
#define IOV_MAX 16
|
||||
/* There is no limit for iovec count on Windows, but apr_socket_sendv
|
||||
allocates WSABUF structures on stack if vecs_count <= 50. */
|
||||
#define IOV_MAX 50
|
||||
#endif
|
||||
|
||||
/* Older versions of APR do not have this macro. */
|
||||
@ -93,7 +95,7 @@ struct serf_request_t {
|
||||
|
||||
serf_bucket_t *resp_bkt;
|
||||
|
||||
int written;
|
||||
int writing_started;
|
||||
int priority;
|
||||
/* 1 if this is a request to setup a SSL tunnel, 0 for normal requests. */
|
||||
int ssltunnel;
|
||||
@ -117,6 +119,8 @@ typedef struct serf__authn_info_t {
|
||||
const serf__authn_scheme_t *scheme;
|
||||
|
||||
void *baton;
|
||||
|
||||
int failed_authn_types;
|
||||
} serf__authn_info_t;
|
||||
|
||||
struct serf_context_t {
|
||||
@ -266,9 +270,8 @@ struct serf_connection_t {
|
||||
port values are filled in. */
|
||||
apr_uri_t host_info;
|
||||
|
||||
/* connection and authentication scheme specific information */
|
||||
void *authn_baton;
|
||||
void *proxy_authn_baton;
|
||||
/* authentication info for this connection. */
|
||||
serf__authn_info_t authn_info;
|
||||
|
||||
/* Time marker when connection begins. */
|
||||
apr_time_t connect_time;
|
||||
@ -292,6 +295,12 @@ struct serf_connection_t {
|
||||
*/
|
||||
apr_status_t serf_response_full_become_aggregate(serf_bucket_t *bucket);
|
||||
|
||||
/**
|
||||
* Remove the header from the list, do nothing if the header wasn't added.
|
||||
*/
|
||||
void serf__bucket_headers_remove(serf_bucket_t *headers_bucket,
|
||||
const char *header);
|
||||
|
||||
/*** Authentication handler declarations ***/
|
||||
|
||||
typedef enum { PROXY, HOST } peer_t;
|
||||
@ -352,7 +361,8 @@ typedef apr_status_t
|
||||
* (if needed).
|
||||
*/
|
||||
typedef apr_status_t
|
||||
(*serf__validate_response_func_t)(peer_t peer,
|
||||
(*serf__validate_response_func_t)(const serf__authn_scheme_t *scheme,
|
||||
peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
|
@ -68,9 +68,10 @@ static apr_status_t handle_response(serf_request_t *request,
|
||||
apr_status_t status;
|
||||
serf_status_line sl;
|
||||
req_ctx_t *ctx = handler_baton;
|
||||
serf_connection_t *conn = request->conn;
|
||||
|
||||
if (! response) {
|
||||
serf_connection_request_create(request->conn,
|
||||
serf_connection_request_create(conn,
|
||||
setup_request,
|
||||
ctx);
|
||||
return APR_SUCCESS;
|
||||
@ -97,17 +98,34 @@ static apr_status_t handle_response(serf_request_t *request,
|
||||
connection.
|
||||
*/
|
||||
if (sl.code >= 200 && sl.code < 300) {
|
||||
request->conn->state = SERF_CONN_CONNECTED;
|
||||
serf_bucket_t *hdrs;
|
||||
const char *val;
|
||||
|
||||
conn->state = SERF_CONN_CONNECTED;
|
||||
|
||||
/* Body is supposed to be empty. */
|
||||
apr_pool_destroy(ctx->pool);
|
||||
serf_bucket_destroy(request->conn->ssltunnel_ostream);
|
||||
request->conn->stream = NULL;
|
||||
serf_bucket_destroy(conn->ssltunnel_ostream);
|
||||
serf_bucket_destroy(conn->stream);
|
||||
conn->stream = NULL;
|
||||
ctx = NULL;
|
||||
|
||||
serf__log(CONN_VERBOSE, __FILE__,
|
||||
"successfully set up ssl tunnel on connection 0x%x\n",
|
||||
request->conn);
|
||||
serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt,
|
||||
"successfully set up ssl tunnel.\n");
|
||||
|
||||
/* Fix for issue #123: ignore the "Connection: close" header here,
|
||||
leaving the header in place would make the serf's main context
|
||||
loop close this connection immediately after reading the 200 OK
|
||||
response. */
|
||||
|
||||
hdrs = serf_bucket_response_get_headers(response);
|
||||
val = serf_bucket_headers_get(hdrs, "Connection");
|
||||
if (val && strcasecmp("close", val) == 0) {
|
||||
serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt,
|
||||
"Ignore Connection: close header on this reponse, don't "
|
||||
"close the connection now that the tunnel is set up.\n");
|
||||
serf__bucket_headers_remove(hdrs, "Connection");
|
||||
}
|
||||
|
||||
return APR_EOF;
|
||||
}
|
||||
@ -171,8 +189,8 @@ apr_status_t serf__ssltunnel_connect(serf_connection_t *conn)
|
||||
ctx);
|
||||
|
||||
conn->state = SERF_CONN_SETUP_SSLTUNNEL;
|
||||
serf__log(CONN_VERBOSE, __FILE__,
|
||||
"setting up ssl tunnel on connection 0x%x\n", conn);
|
||||
serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt,
|
||||
"setting up ssl tunnel on connection.\n");
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user