Import serf-1.2.1
This commit is contained in:
commit
4e6fa1a552
198
CHANGES
Normal file
198
CHANGES
Normal file
@ -0,0 +1,198 @@
|
||||
Serf 1.2.1 [2013-06-03, from /tags/1.2.1]
|
||||
Fix issue 95: add gssapi switches to configure (r1864, r1900)
|
||||
Fix issue 97: skip mmap bucket if APR_HAS_MMAP is undefined (r1877)
|
||||
Fix issue 100: building against an old Windows Platform SDK (r1881)
|
||||
Fix issue 102: digest authentication failures (r1885)
|
||||
Improve error return values in SSPI authentication (r1804)
|
||||
Ensure serf-1.pc is constructed by serfmake (r1865)
|
||||
Optimize SPNego authentication processing (r1868)
|
||||
Reject certs that application does not like (r1794)
|
||||
Fix possible endless loop in serf_linebuf_fetch() (r1816)
|
||||
Windows build: dereference INTDIR in serf.mak (r1882)
|
||||
|
||||
|
||||
Serf 1.2.0 [2013-02-22, from /tags/1.2.0, r1726]
|
||||
Fixed issue 94: Serf can enter an infinite loop when server aborts conn.
|
||||
Fixed issue 91: Serf doesn't handle an incoming 408 Timeout Request
|
||||
Fixed issue 80: Serf is not handling Negotiate authentication correctly
|
||||
Fixed issue 77: Endless loop if server doesn't accept Negotiate authn
|
||||
Fixed issue 93: cleanup-after-fork interferes with parent (r1714)
|
||||
Fixed most of issue 89: Support REAL SPNEGO authentication
|
||||
Enable Negotiate/Kerberos support for proxy servers.
|
||||
Return error when C-L, chunked, gzip encoded response bodies where
|
||||
truncated (due to aborted connection) (r1688)
|
||||
Add a logging mechanism that can be enabled at compile-time.
|
||||
Don't lookup server address if a proxy was configured. (r1706)
|
||||
Fix an off-by-one in buffer sizing (r1695)
|
||||
Disable SSL compression by default + API to enable it (r1692)
|
||||
New serf_connection_get_latency() for estimated network latency (r1689)
|
||||
New error code and RFC compliance for the HTTPS tunnel (r1701, r1644)
|
||||
Handle EINTR when a user suspends and then backgrounds the app (r1708)
|
||||
Minor fixes and test suite improvements.
|
||||
|
||||
|
||||
Serf 1.1.1 [2012-10-04, from /tags/1.1.1, r1657]
|
||||
Fixed issue 86: ensure requeued requests are correctly handled.
|
||||
This fixes:
|
||||
- infinite loop with multiple connection resets or SIGPIPE errors
|
||||
- "connection" hang where we would not re-queue requests that are
|
||||
held after we re-connect
|
||||
Fixed issue 74: test_all goes in an endless loop
|
||||
Fix memleak when conn. is closed explicitly/due to pool cleanups (r1623)
|
||||
Windows: Fix https connection aborts (r1628..-30,-33,-34,-37)
|
||||
Add new error codes for the SSL bucket
|
||||
|
||||
|
||||
Serf 1.1.0 [2012-06-07, from /tags/1.1.0, r1617]
|
||||
New: serf_bucket_request_set_CL() for C-L based, non-chunked requests
|
||||
New: serf_ssl_server_cert_chain_callback_set() for full-chain validation
|
||||
|
||||
|
||||
Serf 1.0.3 [2012-03-20, from /tags/1.0.3, r1586]
|
||||
Map more OpenSSL errors into SERF_SSL_CERT_UNKNOWNCA (r1573)
|
||||
|
||||
|
||||
Serf 1.0.2
|
||||
Not released.
|
||||
|
||||
|
||||
Serf 1.0.1 [2012-02-15, from /tags/1.0.1, r1569]
|
||||
FreeBSD fixes in the test suite (r1560, r1565)
|
||||
Minor build fixes
|
||||
|
||||
|
||||
Serf 1.0.0 [2011-07-15, from /tags/1.0.0, r1540]
|
||||
Fixed issue 38: enable builds using non-GNU make
|
||||
Fixed issue 49: support SSL tunnels for HTTPS via a proxy
|
||||
Fixed issue 56: allow Subject Alternative Name, and enable SNI
|
||||
Fixed issue 61: include order dependencies
|
||||
Fixed issue 66: improved error reporting when creating install dirs
|
||||
Fixed issue 71: handle ECONNREFUSED on Windows
|
||||
Fixed issue 79: destroy the APR allocator, if we create one
|
||||
Fixed issue 81: build failed on APR 0.9.x
|
||||
Major performance improvements and bug fixes for SSL buckets/handling (r1462)
|
||||
Add a new "iovec" bucket type (r1434)
|
||||
Minimize network packet writes based on ra_serf analysis (r1467, r1471)
|
||||
Fix out of order issue with multiple priority requests (r1469)
|
||||
Work around broken WSAPoll() impl on Windows introduced in APR 1.4.0 (r1506)
|
||||
Fix 100% CPU usage with many pipelined requests (r1456)
|
||||
Corrected contents of build/serf.def; it now includes bucket types (r1512)
|
||||
Removed "snapshot" feature from buckets (r1503)
|
||||
Various improvements to the test system
|
||||
Various memory leak fixes
|
||||
|
||||
|
||||
Serf 0.7.2 [2011-03-12, from /tags/0.7.2, r1452]
|
||||
Actually disable Nagle when creating a connection (r1441)
|
||||
Return error when app asks for HTTPS over proxy connection (r1433)
|
||||
|
||||
|
||||
Serf 0.7.1 [2011-01-25, from /tags/0.7.1, r1432]
|
||||
Fix memory leak when using SSL (r1408, r1416)
|
||||
Fix build for blank apr-util directory (r1421)
|
||||
|
||||
|
||||
Serf 0.7.0 [2010-08-25, from /tags/0.7.0, r1407]
|
||||
Fix double free abort when destroying request buckets
|
||||
Fix test server in unit test framework to avoid random test failures
|
||||
Allow older Serf programs which don't use the new authn framework to still
|
||||
handle authn without forcing them to switch to the new framework. (r1401)
|
||||
Remove the SERF_DECLARE macros, preferring a .DEF file for Windows
|
||||
Barrier buckets now pass read_iovec to their wrapped bucket
|
||||
Fix HTTP header parsing to allow for empty header values
|
||||
|
||||
|
||||
Serf 0.6.1 [2010-05-14, from /tags/0.6.1, r1370]
|
||||
Generally: this release fixes problems with the 0.4.0 packaging
|
||||
Small compilation fix in outgoing.c for Windows builds
|
||||
|
||||
|
||||
Serf 0.6.0
|
||||
Not released.
|
||||
|
||||
|
||||
Serf 0.5.0
|
||||
Not released.
|
||||
|
||||
|
||||
Serf 0.4.0
|
||||
WITHDRAWN: this release misstated itself as 0.5.0; use a later release
|
||||
|
||||
Provide authn framework, supporting Basic, Digest, Kerberos (SSPI, GSS),
|
||||
along with proxy authn using Basic or Digest
|
||||
Added experimental listener framework, along with test_server.c
|
||||
Improvements and fixes to SSL support, including connection setup changes
|
||||
Experimental support for unrequested, arriving ("async") responses
|
||||
Experimental BWTP support using the async arrival feature
|
||||
Headers are combined on read (not write), to ease certian classes of parsing
|
||||
Experimental feature on aggregate buckets for a callback-on-empty
|
||||
Fix the bucket allocator for when APR is using its pool debugging features
|
||||
Proxy support in the serf_get testing utility
|
||||
Fix to include the port number in the Host header
|
||||
serf_get propagates errors from the response, instead of aborting (Issue 52)
|
||||
Added serf_lib_version() for runtime version tests
|
||||
|
||||
|
||||
Serf 0.3.1 [2010-02-14, from /tags/0.3.1, r1322]
|
||||
Fix loss of error on request->setup() callback. (Issue 47)
|
||||
Support APR 2.x. (Issue 48)
|
||||
Fixed slowdown in aggregate bucket with millions of child buckets
|
||||
Avoid hang in apr_pollset_poll() by unclosed connections after fork()
|
||||
|
||||
|
||||
Serf 0.3.0 [2009-01-26, from /tags/0.3.0, r1217]
|
||||
Support LTFLAGS override as a config-time env. variable (Issue 44)
|
||||
Fix CUTest test harness compilation on Solaris (Issue 43)
|
||||
Fix small race condition in OpenSSL initialization (Issue 39)
|
||||
Handle content streams larger than 4GB on 32-bit OSes (Issue 41)
|
||||
Fix test_ssl.c compilation with mingw+msys
|
||||
Fix conn close segfault by explicitly closing conn when pool is destroyed
|
||||
Expose the depth of the SSL certificate so the validator can use that info
|
||||
Fix socket address family issue when opening a connection to a proxy
|
||||
Provide new API to take snapshots of buckets
|
||||
Implement snapshot API for simple and aggregate buckets
|
||||
Build with bundled apr and apr-util VPATH builds
|
||||
Build with bundled OpenSSL builds
|
||||
|
||||
|
||||
Serf 0.2.0 [2008-06-06, from /tags/0.2.0, r1189]
|
||||
Enable use of external event loop: serf_create_context_ex
|
||||
Enable adding new requests at the beginning of the request queue
|
||||
Handle 'Connection:close' headers
|
||||
Enable limiting the number of outstanding requests
|
||||
Add readline function to simple buckets
|
||||
Concatenate repeated headers using comma as separator, as per RFC 2616,
|
||||
section 4.2. (Issue 29)
|
||||
Add proxy server support
|
||||
Add progress feedback support. (Issue 11)
|
||||
Provide new API to simplify use of proxy and progress feedback support
|
||||
Add callback to validate SSL server certificates. (Issue 31)
|
||||
Add new test framework
|
||||
Send current version string in the test programs (Issue 21)
|
||||
Bugfixes:
|
||||
Fix segfault with epoll when removing a NULL socket
|
||||
Reset OpenSSL thread-safety callbacks when apr_terminate() called
|
||||
Do not remove the socket from the pollset on pool cleanup
|
||||
Do not issue double close on skt w/second one being close(-1) (Issue 33)
|
||||
|
||||
|
||||
Serf 0.1.2 [2007-06-18, from /tags/0.1.2, r1115]
|
||||
Enable thread-safety with OpenSSL (Issue 19)
|
||||
Teach serfmake to install headers into include/serf-0
|
||||
Be more tolerant when servers close the connection without telling us
|
||||
Do not open the connection until we have requests to deliver
|
||||
Fix serfmake to produce the library that corresponds to the minor version
|
||||
Fix a memory leak with the socket bucket (Issue 14)
|
||||
Fix uninitialized branch in serf_spider (Issue 15)
|
||||
|
||||
|
||||
Serf 0.1.1 [2007-05-12, from /tags/0.1.1, r1105]
|
||||
Add SSL client certificate support
|
||||
Implement optimized iovec reads for header buckets
|
||||
Fix up 'make clean' and 'make distclean' (Issues 9, 10)
|
||||
Add SERF_VERSION_AT_LEAST macro
|
||||
Remove abort() calls (Issue 13)
|
||||
|
||||
|
||||
Serf 0.1.0 [2006-12-14, from /tags/0.1.0, r1087]
|
||||
Initial packaged release
|
201
LICENSE
Normal file
201
LICENSE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
171
Makefile.in
Normal file
171
Makefile.in
Normal file
@ -0,0 +1,171 @@
|
||||
#
|
||||
# Makefile for Serf
|
||||
#
|
||||
srcdir = @srcdir@
|
||||
VPATH = @srcdir@
|
||||
|
||||
SERF_MAJOR_VERSION=@SERF_MAJOR_VERSION@
|
||||
SERF_DOTTED_VERSION=@SERF_DOTTED_VERSION@
|
||||
|
||||
OBJECTS = buckets/aggregate_buckets.lo buckets/request_buckets.lo context.lo \
|
||||
buckets/buckets.lo buckets/simple_buckets.lo buckets/file_buckets.lo \
|
||||
buckets/mmap_buckets.lo buckets/socket_buckets.lo \
|
||||
buckets/response_body_buckets.lo buckets/response_buckets.lo \
|
||||
buckets/headers_buckets.lo \
|
||||
buckets/allocator.lo buckets/dechunk_buckets.lo \
|
||||
buckets/deflate_buckets.lo buckets/limit_buckets.lo \
|
||||
buckets/ssl_buckets.lo buckets/barrier_buckets.lo \
|
||||
buckets/chunk_buckets.lo buckets/bwtp_buckets.lo \
|
||||
buckets/iovec_buckets.lo \
|
||||
incoming.lo outgoing.lo ssltunnel.lo \
|
||||
auth/auth.lo auth/auth_basic.lo auth/auth_digest.lo \
|
||||
auth/auth_kerb.lo auth/auth_kerb_gss.lo
|
||||
|
||||
TARGET_LIB=libserf-$(SERF_MAJOR_VERSION).la
|
||||
|
||||
TEST_OBJECTS = test/serf_get.lo test/serf_response.lo test/serf_request.lo \
|
||||
test/serf_spider.lo test/serf_server.lo test/serf_bwtp.lo
|
||||
|
||||
TEST_SUITE_OBJECTS = test/CuTest.lo test/test_all.lo test/test_util.lo \
|
||||
test/test_buckets.lo test/test_context.lo \
|
||||
test/test_ssl.lo test/server/test_server.lo \
|
||||
test/server/test_sslserver.lo
|
||||
|
||||
PROGRAMS = $(TEST_OBJECTS:.lo=) test/test_all
|
||||
|
||||
TESTCASES = test/testcases/simple.response \
|
||||
test/testcases/chunked-empty.response test/testcases/chunked.response \
|
||||
test/testcases/chunked-trailers.response \
|
||||
test/testcases/deflate.response
|
||||
|
||||
HEADERS = serf.h serf_bucket_types.h serf_bucket_util.h
|
||||
|
||||
prefix=@prefix@
|
||||
exec_prefix=@exec_prefix@
|
||||
libdir=@libdir@
|
||||
includedir=@includedir@
|
||||
pkgconfigdir=$(libdir)/pkgconfig
|
||||
|
||||
LIBTOOL = @APR_LIBTOOL@
|
||||
LTFLAGS = @LTFLAGS@ --tag=CC
|
||||
CC = @CC@
|
||||
CFLAGS = @EXTRA_CFLAGS@ @CFLAGS@
|
||||
CPPFLAGS = @EXTRA_CPPFLAGS@ @CPPFLAGS@
|
||||
INCLUDES = -I$(srcdir) @APR_INCLUDES@ @APU_INCLUDES@ @EXTRA_INCLUDES@
|
||||
MKDIR = @mkdir_p@
|
||||
INSTALL = @INSTALL@
|
||||
|
||||
LDFLAGS = @EXTRA_LDFLAGS@ @LDFLAGS@
|
||||
LIBS = @EXTRA_LIBS@ @SERF_LIBS@ -lz -lssl -lcrypto
|
||||
|
||||
all: $(TARGET_LIB) $(PROGRAMS)
|
||||
|
||||
context.lo: context.c $(HEADERS)
|
||||
incoming.lo: incoming.c $(HEADERS)
|
||||
outgoing.lo: outgoing.c $(HEADERS)
|
||||
ssltunnel.lo: ssltunnel.c $(HEADERS)
|
||||
buckets/aggregate_buckets.lo: buckets/aggregate_buckets.c $(HEADERS)
|
||||
buckets/request_buckets.lo: buckets/request_buckets.c $(HEADERS)
|
||||
buckets/buckets.lo: buckets/buckets.c $(HEADERS)
|
||||
buckets/simple_buckets.lo: buckets/simple_buckets.c $(HEADERS)
|
||||
buckets/file_buckets.lo: buckets/file_buckets.c $(HEADERS)
|
||||
buckets/mmap_buckets.lo: buckets/mmap_buckets.c $(HEADERS)
|
||||
buckets/socket_buckets.lo: buckets/socket_buckets.c $(HEADERS)
|
||||
buckets/response_body_buckets.lo: buckets/response_body_buckets.c $(HEADERS)
|
||||
buckets/response_buckets.lo: buckets/response_buckets.c $(HEADERS)
|
||||
buckets/headers_buckets.lo: buckets/headers_buckets.c $(HEADERS)
|
||||
buckets/allocator.lo: buckets/allocator.c $(HEADERS)
|
||||
buckets/dechunk_buckets.lo: buckets/dechunk_buckets.c $(HEADERS)
|
||||
buckets/deflate_buckets.lo: buckets/deflate_buckets.c $(HEADERS)
|
||||
buckets/limit_buckets.lo: buckets/limit_buckets.c $(HEADERS)
|
||||
buckets/ssl_buckets.lo: buckets/ssl_buckets.c $(HEADERS)
|
||||
buckets/barrier_buckets.lo: buckets/barrier_buckets.c $(HEADERS)
|
||||
buckets/chunk_buckets.lo: buckets/chunk_buckets.c $(HEADERS)
|
||||
buckets/bwtp_buckets.lo: buckets/bwtp_buckets.c $(HEADERS)
|
||||
buckets/iovec_buckets.lo: buckets/iovec_buckets.c $(HEADERS)
|
||||
|
||||
test/serf_get.lo: test/serf_get.c $(HEADERS)
|
||||
test/serf_response.lo: test/serf_response.c $(HEADERS)
|
||||
test/serf_request.lo: test/serf_request.c $(HEADERS)
|
||||
test/serf_server.lo: test/serf_server.c $(HEADERS)
|
||||
test/serf_spider.lo: test/serf_spider.c $(HEADERS)
|
||||
test/serf_bwtp.lo: test/serf_bwtp.c $(HEADERS)
|
||||
|
||||
test/CuTest.lo: test/CuTest.c $(HEADERS)
|
||||
test/test_all.lo: test/test_all.c $(HEADERS)
|
||||
test/test_util.lo: test/test_util.c $(HEADERS)
|
||||
test/test_buckets.lo: test/test_buckets.c $(HEADERS)
|
||||
test/test_context.lo: test/test_context.c $(HEADERS)
|
||||
test/test_ssl.lo: test/test_ssl.c $(HEADERS)
|
||||
|
||||
$(TARGET_LIB): $(OBJECTS)
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -rpath $(libdir) -o $@ $(OBJECTS) $(LIBS)
|
||||
|
||||
test/serf_get: $(TARGET_LIB) test/serf_get.lo
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_get.lo $(LIBS)
|
||||
|
||||
test/serf_response: $(TARGET_LIB) test/serf_response.lo
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_response.lo $(LIBS)
|
||||
|
||||
test/serf_request: $(TARGET_LIB) test/serf_request.lo
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_request.lo $(LIBS)
|
||||
|
||||
test/serf_server: $(TARGET_LIB) test/serf_server.lo
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_server.lo $(LIBS)
|
||||
|
||||
test/serf_spider: $(TARGET_LIB) test/serf_spider.lo
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_spider.lo $(LIBS)
|
||||
|
||||
test/serf_bwtp: $(TARGET_LIB) test/serf_bwtp.lo
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_bwtp.lo $(LIBS)
|
||||
|
||||
test/test_all: $(TARGET_LIB) $(TEST_SUITE_OBJECTS)
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) $(TEST_SUITE_OBJECTS) $(LIBS)
|
||||
|
||||
check: test/serf_response test/test_all
|
||||
@for i in $(TESTCASES); \
|
||||
do echo "== Testing $$i =="; \
|
||||
./test/serf_response $(srcdir)/$$i; \
|
||||
done;
|
||||
@echo "== Running test_all ==";
|
||||
@./test/test_all
|
||||
|
||||
install: $(TARGET_LIB)
|
||||
$(MKDIR) $(DESTDIR)$(libdir) $(DESTDIR)$(includedir) $(DESTDIR)$(pkgconfigdir)
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=install $(INSTALL) -m 644 $(TARGET_LIB) $(DESTDIR)$(libdir)
|
||||
for i in $(HEADERS); do \
|
||||
$(INSTALL) -m 644 $(srcdir)/$$i $(DESTDIR)$(includedir); \
|
||||
done
|
||||
$(INSTALL) -m 644 serf.pc $(DESTDIR)$(pkgconfigdir)/serf-$(SERF_MAJOR_VERSION).pc
|
||||
|
||||
clean:
|
||||
rm -f $(TARGET_LIB) $(OBJECTS) $(OBJECTS:.lo=.o) $(PROGRAMS) $(TEST_OBJECTS) $(TEST_OBJECTS:.lo=.o) $(TEST_SUITE_OBJECTS) $(TEST_SUITE_OBJECTS:.lo=.o)
|
||||
for subdir in . buckets test; do \
|
||||
(cd $$subdir && rm -rf .libs) ; \
|
||||
done
|
||||
|
||||
distclean: clean
|
||||
rm -f Makefile serf.pc config.log config.status
|
||||
|
||||
.SUFFIXES:
|
||||
.SUFFIXES: .c .lo .o
|
||||
|
||||
mkdir-vpath:
|
||||
@if [ ! -d auth ]; then \
|
||||
$(MKDIR) auth; \
|
||||
fi;
|
||||
@if [ ! -d buckets ]; then \
|
||||
$(MKDIR) buckets; \
|
||||
fi;
|
||||
@if [ ! -d test ]; then \
|
||||
$(MKDIR) test; \
|
||||
fi;
|
||||
@if [ ! -d test/server ]; then \
|
||||
$(MKDIR) test/server; \
|
||||
fi;
|
||||
@if [ ! -r test/serftestca.pem ]; then \
|
||||
cp -f $(srcdir)/test/serftestca.pem test/; \
|
||||
fi;
|
||||
|
||||
.c.lo:
|
||||
$(LIBTOOL) $(LTFLAGS) --mode=compile $(CC) $(INCLUDES) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< && touch $@
|
2
NOTICE
Normal file
2
NOTICE
Normal file
@ -0,0 +1,2 @@
|
||||
This product includes software developed by
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
38
README
Normal file
38
README
Normal file
@ -0,0 +1,38 @@
|
||||
Welcome to serf, a high-performance asynchronous HTTP client library.
|
||||
|
||||
The serf library is a C-based HTTP client library built upon the Apache
|
||||
Portable Runtime (APR) library. It multiplexes connections, running the
|
||||
read/write communication asynchronously. Memory copies and transformations are
|
||||
kept to a minimum to provide high performance operation.
|
||||
|
||||
* Status: http://code.google.com/p/serf/wiki/
|
||||
* Site: http://code.google.com/p/serf/
|
||||
* Code: http://serf.googlecode.com/svn/
|
||||
* Issues: http://code.google.com/p/serf/issues/list
|
||||
* Mail: serf-dev@googlegroups.com
|
||||
* People: Justin Erenkrantz, Greg Stein
|
||||
|
||||
----
|
||||
|
||||
Quick guide for the impatient
|
||||
|
||||
(Unix)
|
||||
% ./configure
|
||||
% make
|
||||
% make install
|
||||
|
||||
----
|
||||
|
||||
Building serf from a Subversion checkout (non-packaged releases)
|
||||
|
||||
We suggest that you try out 'serfmake'.
|
||||
|
||||
% ./serfmake --prefix=/usr/local/serf --with-apr=/usr/local/apr install
|
||||
|
||||
If you want to use the autoconf build system and are using a Subversion
|
||||
checkout, you need to run buildconf and have APR and APR-util sources handy.
|
||||
|
||||
% ./buildconf --with-apr=/path/to/apr --with-apr-util=/path/to/apr-util
|
||||
(By default, buildconf will look in . and ../ for apr and apr-util.)
|
||||
|
||||
Then, you can use ./configure, make, etc.
|
421
auth/auth.c
Normal file
421
auth/auth.c
Normal file
@ -0,0 +1,421 @@
|
||||
/* Copyright 2009 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_private.h"
|
||||
#include "auth.h"
|
||||
|
||||
#include <apr.h>
|
||||
#include <apr_base64.h>
|
||||
#include <apr_strings.h>
|
||||
|
||||
static apr_status_t
|
||||
default_auth_response_handler(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static const serf__authn_scheme_t serf_authn_schemes[] = {
|
||||
{
|
||||
401,
|
||||
"Basic",
|
||||
SERF_AUTHN_BASIC,
|
||||
serf__init_basic,
|
||||
serf__init_basic_connection,
|
||||
serf__handle_basic_auth,
|
||||
serf__setup_request_basic_auth,
|
||||
default_auth_response_handler,
|
||||
},
|
||||
{
|
||||
407,
|
||||
"Basic",
|
||||
SERF_AUTHN_BASIC,
|
||||
serf__init_basic,
|
||||
serf__init_basic_connection,
|
||||
serf__handle_basic_auth,
|
||||
serf__setup_request_basic_auth,
|
||||
default_auth_response_handler,
|
||||
},
|
||||
{
|
||||
401,
|
||||
"Digest",
|
||||
SERF_AUTHN_DIGEST,
|
||||
serf__init_digest,
|
||||
serf__init_digest_connection,
|
||||
serf__handle_digest_auth,
|
||||
serf__setup_request_digest_auth,
|
||||
serf__validate_response_digest_auth,
|
||||
},
|
||||
{
|
||||
407,
|
||||
"Digest",
|
||||
SERF_AUTHN_DIGEST,
|
||||
serf__init_digest,
|
||||
serf__init_digest_connection,
|
||||
serf__handle_digest_auth,
|
||||
serf__setup_request_digest_auth,
|
||||
serf__validate_response_digest_auth,
|
||||
},
|
||||
#ifdef SERF_HAVE_KERB
|
||||
{
|
||||
401,
|
||||
"Negotiate",
|
||||
SERF_AUTHN_NEGOTIATE,
|
||||
serf__init_kerb,
|
||||
serf__init_kerb_connection,
|
||||
serf__handle_kerb_auth,
|
||||
serf__setup_request_kerb_auth,
|
||||
serf__validate_response_kerb_auth,
|
||||
},
|
||||
{
|
||||
407,
|
||||
"Negotiate",
|
||||
SERF_AUTHN_NEGOTIATE,
|
||||
serf__init_kerb,
|
||||
serf__init_kerb_connection,
|
||||
serf__handle_kerb_auth,
|
||||
serf__setup_request_kerb_auth,
|
||||
serf__validate_response_kerb_auth,
|
||||
},
|
||||
#endif
|
||||
/* ADD NEW AUTHENTICATION IMPLEMENTATIONS HERE (as they're written) */
|
||||
|
||||
/* sentinel */
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Baton passed to the response header callback function
|
||||
*/
|
||||
typedef struct {
|
||||
int code;
|
||||
apr_status_t status;
|
||||
const char *header;
|
||||
serf_request_t *request;
|
||||
serf_bucket_t *response;
|
||||
void *baton;
|
||||
apr_pool_t *pool;
|
||||
const serf__authn_scheme_t *scheme;
|
||||
const char *last_scheme_name;
|
||||
} auth_baton_t;
|
||||
|
||||
/* Reads and discards all bytes in the response body. */
|
||||
static apr_status_t discard_body(serf_bucket_t *response)
|
||||
{
|
||||
apr_status_t status;
|
||||
const char *data;
|
||||
apr_size_t len;
|
||||
|
||||
while (1) {
|
||||
status = serf_bucket_read(response, SERF_READ_ALL_AVAIL, &data, &len);
|
||||
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
|
||||
/* feed me */
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_auth_header is called for each header in the response. It filters
|
||||
* out the Authenticate headers (WWW or Proxy depending on what's needed) and
|
||||
* tries to find a matching scheme handler.
|
||||
*
|
||||
* Returns a non-0 value of a matching handler was found.
|
||||
*/
|
||||
static int handle_auth_header(void *baton,
|
||||
const char *key,
|
||||
const char *header)
|
||||
{
|
||||
auth_baton_t *ab = baton;
|
||||
int scheme_found = FALSE;
|
||||
const char *auth_name;
|
||||
const char *auth_attr;
|
||||
const serf__authn_scheme_t *scheme = NULL;
|
||||
serf_connection_t *conn = ab->request->conn;
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
|
||||
/* We're only interested in xxxx-Authenticate headers. */
|
||||
if (strcmp(key, ab->header) != 0)
|
||||
return 0;
|
||||
|
||||
/* Extract the authentication scheme name, and prepare for reading
|
||||
the attributes. */
|
||||
auth_attr = strchr(header, ' ');
|
||||
if (auth_attr) {
|
||||
auth_name = apr_pstrmemdup(ab->pool, header, auth_attr - header);
|
||||
++auth_attr;
|
||||
}
|
||||
else
|
||||
auth_name = header;
|
||||
|
||||
ab->last_scheme_name = auth_name;
|
||||
|
||||
/* Find the matching authentication handler.
|
||||
Note that we don't reuse the auth scheme stored in the context,
|
||||
as that may have changed. (ex. fallback from ntlm to basic.) */
|
||||
for (scheme = serf_authn_schemes; scheme->code != 0; ++scheme) {
|
||||
if (! (ab->code == scheme->code &&
|
||||
ctx->authn_types & scheme->type))
|
||||
continue;
|
||||
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"Client supports: %s\n", scheme->name);
|
||||
if (strcmp(auth_name, scheme->name) == 0) {
|
||||
serf__auth_handler_func_t handler = scheme->handle_func;
|
||||
apr_status_t status = 0;
|
||||
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"... matched: %s\n", scheme->name);
|
||||
/* If this is the first time we use this scheme on this connection,
|
||||
make sure to initialize the authentication handler first. */
|
||||
if (ab->code == 401 && ctx->authn_info.scheme != scheme) {
|
||||
status = scheme->init_ctx_func(ab->code, ctx, ctx->pool);
|
||||
if (!status) {
|
||||
status = scheme->init_conn_func(ab->code, conn, conn->pool);
|
||||
|
||||
if (!status)
|
||||
ctx->authn_info.scheme = scheme;
|
||||
else
|
||||
ctx->authn_info.scheme = NULL;
|
||||
}
|
||||
}
|
||||
else if (ab->code == 407 && ctx->proxy_authn_info.scheme != scheme) {
|
||||
status = scheme->init_ctx_func(ab->code, ctx, ctx->pool);
|
||||
if (!status) {
|
||||
status = scheme->init_conn_func(ab->code, conn, conn->pool);
|
||||
|
||||
if (!status)
|
||||
ctx->proxy_authn_info.scheme = scheme;
|
||||
else
|
||||
ctx->proxy_authn_info.scheme = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!status) {
|
||||
scheme_found = TRUE;
|
||||
ab->scheme = scheme;
|
||||
status = handler(ab->code, ab->request, ab->response,
|
||||
header, auth_attr, ab->baton, ctx->pool);
|
||||
}
|
||||
|
||||
/* If the authentication fails, cache the error for now. Try the
|
||||
next available scheme. If there's none raise the error. */
|
||||
if (status) {
|
||||
scheme_found = FALSE;
|
||||
scheme = NULL;
|
||||
}
|
||||
/* Let the caller now if the authentication setup was succesful
|
||||
or not. */
|
||||
ab->status = status;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* If a matching scheme handler was found, we can stop iterating
|
||||
over the response headers - so return a non-0 value. */
|
||||
return scheme_found;
|
||||
}
|
||||
|
||||
/* Dispatch authentication handling. This function matches the possible
|
||||
authentication mechanisms with those available. Server and proxy
|
||||
authentication are evaluated separately. */
|
||||
static apr_status_t dispatch_auth(int code,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
void *baton,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
serf_bucket_t *hdrs;
|
||||
|
||||
if (code == 401 || code == 407) {
|
||||
auth_baton_t ab = { 0 };
|
||||
const char *auth_hdr;
|
||||
|
||||
ab.code = code;
|
||||
ab.status = APR_SUCCESS;
|
||||
ab.request = request;
|
||||
ab.response = response;
|
||||
ab.baton = baton;
|
||||
ab.pool = pool;
|
||||
|
||||
/* Before iterating over all authn headers, check if there are any. */
|
||||
if (code == 401)
|
||||
ab.header = "WWW-Authenticate";
|
||||
else
|
||||
ab.header = "Proxy-Authenticate";
|
||||
|
||||
hdrs = serf_bucket_response_get_headers(response);
|
||||
auth_hdr = serf_bucket_headers_get(hdrs, ab.header);
|
||||
|
||||
if (!auth_hdr) {
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
}
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, request->conn->skt,
|
||||
"%s authz required. Response header(s): %s\n",
|
||||
code == 401 ? "Server" : "Proxy", auth_hdr);
|
||||
|
||||
/* Iterate over all headers. Try to find a matching authentication scheme
|
||||
handler.
|
||||
|
||||
Note: it is possible to have multiple Authentication: headers. We do
|
||||
not want to combine them (per normal header combination rules) as that
|
||||
would make it hard to parse. Instead, we want to individually parse
|
||||
and handle each header in the response, looking for one that we can
|
||||
work with.
|
||||
*/
|
||||
serf_bucket_headers_do(hdrs,
|
||||
handle_auth_header,
|
||||
&ab);
|
||||
if (ab.status != APR_SUCCESS)
|
||||
return ab.status;
|
||||
|
||||
if (!ab.scheme || ab.scheme->name == NULL) {
|
||||
/* No matching authentication found. */
|
||||
return SERF_ERROR_AUTHN_NOT_SUPPORTED;
|
||||
}
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* Read the headers of the response and try the available
|
||||
handlers if authentication or validation is needed. */
|
||||
apr_status_t serf__handle_auth_response(int *consumed_response,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
void *baton,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
apr_status_t status;
|
||||
serf_status_line sl;
|
||||
|
||||
*consumed_response = 0;
|
||||
|
||||
/* TODO: the response bucket was created by the application, not at all
|
||||
guaranteed that this is of type response_bucket!! */
|
||||
status = serf_bucket_response_status(response, &sl);
|
||||
if (SERF_BUCKET_READ_ERROR(status)) {
|
||||
return status;
|
||||
}
|
||||
if (!sl.version && (APR_STATUS_IS_EOF(status) ||
|
||||
APR_STATUS_IS_EAGAIN(status))) {
|
||||
return status;
|
||||
}
|
||||
|
||||
status = serf_bucket_response_wait_for_headers(response);
|
||||
if (status) {
|
||||
if (!APR_STATUS_IS_EOF(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
/* If status is APR_EOF, there were no headers to read.
|
||||
This can be ok in some situations, and it definitely
|
||||
means there's no authentication requested now. */
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
if (sl.code == 401 || sl.code == 407) {
|
||||
/* Authentication requested. */
|
||||
|
||||
/* Don't bother handling the authentication request if the response
|
||||
wasn't received completely yet. Serf will call serf__handle_auth_response
|
||||
again when more data is received. */
|
||||
status = discard_body(response);
|
||||
*consumed_response = 1;
|
||||
|
||||
/* Discard all response body before processing authentication. */
|
||||
if (!APR_STATUS_IS_EOF(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
status = dispatch_auth(sl.code, request, response, baton, pool);
|
||||
if (status != APR_SUCCESS) {
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Requeue the request with the necessary auth headers. */
|
||||
/* ### Application doesn't know about this request! */
|
||||
serf_connection_priority_request_create(request->conn,
|
||||
request->setup,
|
||||
request->setup_baton);
|
||||
|
||||
return APR_EOF;
|
||||
} else {
|
||||
/* Validate the response authn headers if needed. */
|
||||
serf__validate_response_func_t validate_resp;
|
||||
serf_connection_t *conn = request->conn;
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
apr_status_t resp_status = APR_SUCCESS;
|
||||
|
||||
if (ctx->authn_info.scheme) {
|
||||
validate_resp = ctx->authn_info.scheme->validate_response_func;
|
||||
resp_status = validate_resp(HOST, sl.code, conn, request, response,
|
||||
pool);
|
||||
}
|
||||
if (!resp_status && ctx->proxy_authn_info.scheme) {
|
||||
validate_resp = ctx->proxy_authn_info.scheme->validate_response_func;
|
||||
resp_status = validate_resp(PROXY, sl.code, conn, request, response,
|
||||
pool);
|
||||
}
|
||||
if (resp_status) {
|
||||
/* If there was an error in the final step of the authentication,
|
||||
consider the reponse body as invalid and discard it. */
|
||||
status = discard_body(response);
|
||||
*consumed_response = 1;
|
||||
if (!APR_STATUS_IS_EOF(status)) {
|
||||
return status;
|
||||
}
|
||||
/* The whole body was discarded, now return our error. */
|
||||
return resp_status;
|
||||
}
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* base64 encode the authentication data and build an authentication
|
||||
* header in this format:
|
||||
* [SCHEME] [BASE64 of auth DATA]
|
||||
*/
|
||||
void serf__encode_auth_header(const char **header,
|
||||
const char *scheme,
|
||||
const char *data, apr_size_t data_len,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
apr_size_t encoded_len, scheme_len;
|
||||
char *ptr;
|
||||
|
||||
encoded_len = apr_base64_encode_len(data_len);
|
||||
scheme_len = strlen(scheme);
|
||||
|
||||
ptr = apr_palloc(pool, encoded_len + scheme_len + 1);
|
||||
*header = ptr;
|
||||
|
||||
apr_cpystrn(ptr, scheme, scheme_len + 1);
|
||||
ptr += scheme_len;
|
||||
*ptr++ = ' ';
|
||||
|
||||
apr_base64_encode(ptr, data, data_len);
|
||||
}
|
113
auth/auth.h
Normal file
113
auth/auth.h
Normal file
@ -0,0 +1,113 @@
|
||||
/* Copyright 2009 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef AUTH_H
|
||||
#define AUTH_H
|
||||
|
||||
#include "auth_kerb.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void serf__encode_auth_header(const char **header, const char *protocol,
|
||||
const char *data, apr_size_t data_len,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/** Basic authentication **/
|
||||
apr_status_t serf__init_basic(int code,
|
||||
serf_context_t *ctx,
|
||||
apr_pool_t *pool);
|
||||
apr_status_t serf__init_basic_connection(int code,
|
||||
serf_connection_t *conn,
|
||||
apr_pool_t *pool);
|
||||
apr_status_t serf__handle_basic_auth(int code,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
const char *auth_hdr,
|
||||
const char *auth_attr,
|
||||
void *baton,
|
||||
apr_pool_t *pool);
|
||||
apr_status_t serf__setup_request_basic_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt);
|
||||
|
||||
/** Digest authentication **/
|
||||
apr_status_t serf__init_digest(int code,
|
||||
serf_context_t *ctx,
|
||||
apr_pool_t *pool);
|
||||
apr_status_t serf__init_digest_connection(int code,
|
||||
serf_connection_t *conn,
|
||||
apr_pool_t *pool);
|
||||
apr_status_t serf__handle_digest_auth(int code,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
const char *auth_hdr,
|
||||
const char *auth_attr,
|
||||
void *baton,
|
||||
apr_pool_t *pool);
|
||||
apr_status_t serf__setup_request_digest_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt);
|
||||
apr_status_t serf__validate_response_digest_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
apr_pool_t *pool);
|
||||
|
||||
#ifdef SERF_HAVE_KERB
|
||||
/** Kerberos authentication **/
|
||||
apr_status_t serf__init_kerb(int code,
|
||||
serf_context_t *ctx,
|
||||
apr_pool_t *pool);
|
||||
apr_status_t serf__init_kerb_connection(int code,
|
||||
serf_connection_t *conn,
|
||||
apr_pool_t *pool);
|
||||
apr_status_t serf__handle_kerb_auth(int code,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
const char *auth_hdr,
|
||||
const char *auth_attr,
|
||||
void *baton,
|
||||
apr_pool_t *pool);
|
||||
apr_status_t serf__setup_request_kerb_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt);
|
||||
apr_status_t serf__validate_response_kerb_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
apr_pool_t *pool);
|
||||
#endif /* SERF_HAVE_SPNEGO */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !AUTH_H */
|
157
auth/auth_basic.c
Normal file
157
auth/auth_basic.c
Normal file
@ -0,0 +1,157 @@
|
||||
/* Copyright 2009 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*** Basic authentication ***/
|
||||
|
||||
#include <serf.h>
|
||||
#include <serf_private.h>
|
||||
#include <auth/auth.h>
|
||||
|
||||
#include <apr.h>
|
||||
#include <apr_base64.h>
|
||||
#include <apr_strings.h>
|
||||
|
||||
typedef struct basic_authn_info_t {
|
||||
const char *header;
|
||||
const char *value;
|
||||
} basic_authn_info_t;
|
||||
|
||||
apr_status_t
|
||||
serf__handle_basic_auth(int code,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
const char *auth_hdr,
|
||||
const char *auth_attr,
|
||||
void *baton,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
const char *tmp;
|
||||
apr_size_t tmp_len;
|
||||
serf_connection_t *conn = request->conn;
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
serf__authn_info_t *authn_info = (code == 401) ? &ctx->authn_info :
|
||||
&ctx->proxy_authn_info;
|
||||
basic_authn_info_t *basic_info = authn_info->baton;
|
||||
apr_status_t status;
|
||||
apr_pool_t *cred_pool;
|
||||
char *username, *password;
|
||||
|
||||
/* Can't do Basic authentication if there's no callback to get
|
||||
username & password. */
|
||||
if (!ctx->cred_cb) {
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
}
|
||||
|
||||
if (!authn_info->realm) {
|
||||
char *realm_name = NULL;
|
||||
const char *eq = strchr(auth_attr, '=');
|
||||
|
||||
if (eq && strncasecmp(auth_attr, "realm", 5) == 0) {
|
||||
realm_name = apr_pstrdup(pool, eq + 1);
|
||||
if (realm_name[0] == '\"') {
|
||||
apr_size_t realm_len;
|
||||
|
||||
realm_len = strlen(realm_name);
|
||||
if (realm_name[realm_len - 1] == '\"') {
|
||||
realm_name[realm_len - 1] = '\0';
|
||||
realm_name++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!realm_name) {
|
||||
return SERF_ERROR_AUTHN_MISSING_ATTRIBUTE;
|
||||
}
|
||||
|
||||
authn_info->realm = apr_psprintf(conn->pool, "<%s://%s:%d> %s",
|
||||
conn->host_info.scheme,
|
||||
conn->host_info.hostname,
|
||||
conn->host_info.port,
|
||||
realm_name);
|
||||
}
|
||||
|
||||
/* Ask the application for credentials */
|
||||
apr_pool_create(&cred_pool, pool);
|
||||
status = (*ctx->cred_cb)(&username, &password, request, baton,
|
||||
code, authn_info->scheme->name,
|
||||
authn_info->realm, cred_pool);
|
||||
if (status) {
|
||||
apr_pool_destroy(cred_pool);
|
||||
return status;
|
||||
}
|
||||
|
||||
tmp = apr_pstrcat(conn->pool, username, ":", password, NULL);
|
||||
tmp_len = strlen(tmp);
|
||||
apr_pool_destroy(cred_pool);
|
||||
|
||||
serf__encode_auth_header(&basic_info->value,
|
||||
authn_info->scheme->name,
|
||||
tmp, tmp_len, pool);
|
||||
basic_info->header = (code == 401) ? "Authorization" : "Proxy-Authorization";
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* For Basic authentication we expect all authn info to be the same for all
|
||||
connections in the context (same realm, username, password). Therefore we
|
||||
can keep the header value in the context instead of per connection. */
|
||||
apr_status_t
|
||||
serf__init_basic(int code,
|
||||
serf_context_t *ctx,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
if (code == 401) {
|
||||
ctx->authn_info.baton = apr_pcalloc(pool, sizeof(basic_authn_info_t));
|
||||
} else {
|
||||
ctx->proxy_authn_info.baton = apr_pcalloc(pool, sizeof(basic_authn_info_t));
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__init_basic_connection(int code,
|
||||
serf_connection_t *conn,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__setup_request_basic_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt)
|
||||
{
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
basic_authn_info_t *authn_info;
|
||||
|
||||
if (peer == HOST) {
|
||||
authn_info = ctx->authn_info.baton;
|
||||
} else {
|
||||
authn_info = ctx->proxy_authn_info.baton;
|
||||
}
|
||||
|
||||
if (authn_info && authn_info->header && authn_info->value) {
|
||||
serf_bucket_headers_setn(hdrs_bkt, authn_info->header,
|
||||
authn_info->value);
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
}
|
486
auth/auth_digest.c
Normal file
486
auth/auth_digest.c
Normal file
@ -0,0 +1,486 @@
|
||||
/* Copyright 2009 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*** Digest authentication ***/
|
||||
|
||||
#include <serf.h>
|
||||
#include <serf_private.h>
|
||||
#include <auth/auth.h>
|
||||
|
||||
#include <apr.h>
|
||||
#include <apr_base64.h>
|
||||
#include <apr_strings.h>
|
||||
#include <apr_uuid.h>
|
||||
#include <apr_md5.h>
|
||||
|
||||
/** Digest authentication, implements RFC 2617. **/
|
||||
|
||||
/* Stores the context information related to Digest authentication.
|
||||
The context is per connection. */
|
||||
typedef struct digest_authn_info_t {
|
||||
/* nonce-count for digest authentication */
|
||||
unsigned int digest_nc;
|
||||
|
||||
const char *header;
|
||||
|
||||
const char *ha1;
|
||||
|
||||
const char *realm;
|
||||
const char *cnonce;
|
||||
const char *nonce;
|
||||
const char *opaque;
|
||||
const char *algorithm;
|
||||
const char *qop;
|
||||
const char *username;
|
||||
|
||||
apr_pool_t *pool;
|
||||
} digest_authn_info_t;
|
||||
|
||||
static char
|
||||
int_to_hex(int v)
|
||||
{
|
||||
return (v < 10) ? '0' + v : 'a' + (v - 10);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a string if ASCII characters HASHVAL to its hexadecimal
|
||||
* representation.
|
||||
*
|
||||
* The returned string will be allocated in the POOL and be null-terminated.
|
||||
*/
|
||||
static const char *
|
||||
hex_encode(const unsigned char *hashval,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
int i;
|
||||
char *hexval = apr_palloc(pool, (APR_MD5_DIGESTSIZE * 2) + 1);
|
||||
for (i = 0; i < APR_MD5_DIGESTSIZE; i++) {
|
||||
hexval[2 * i] = int_to_hex((hashval[i] >> 4) & 0xf);
|
||||
hexval[2 * i + 1] = int_to_hex(hashval[i] & 0xf);
|
||||
}
|
||||
hexval[APR_MD5_DIGESTSIZE * 2] = '\0';
|
||||
return hexval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a 36-byte long string of random characters.
|
||||
* UUIDs are formatted as: 00112233-4455-6677-8899-AABBCCDDEEFF.
|
||||
*
|
||||
* The returned string will be allocated in the POOL and be null-terminated.
|
||||
*/
|
||||
static const char *
|
||||
random_cnonce(apr_pool_t *pool)
|
||||
{
|
||||
apr_uuid_t uuid;
|
||||
char *buf = apr_palloc(pool, APR_UUID_FORMATTED_LENGTH + 1);
|
||||
|
||||
apr_uuid_get(&uuid);
|
||||
apr_uuid_format(buf, &uuid);
|
||||
|
||||
return hex_encode((unsigned char*)buf, pool);
|
||||
}
|
||||
|
||||
static const char *
|
||||
build_digest_ha1(const char *username,
|
||||
const char *password,
|
||||
const char *realm_name,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
const char *tmp;
|
||||
unsigned char ha1[APR_MD5_DIGESTSIZE];
|
||||
apr_status_t status;
|
||||
|
||||
/* calculate ha1:
|
||||
MD5 hash of the combined user name, authentication realm and password */
|
||||
tmp = apr_psprintf(pool, "%s:%s:%s",
|
||||
username,
|
||||
realm_name,
|
||||
password);
|
||||
status = apr_md5(ha1, tmp, strlen(tmp));
|
||||
|
||||
return hex_encode(ha1, pool);
|
||||
}
|
||||
|
||||
static const char *
|
||||
build_digest_ha2(const char *uri,
|
||||
const char *method,
|
||||
const char *qop,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
if (!qop || strcmp(qop, "auth") == 0) {
|
||||
const char *tmp;
|
||||
unsigned char ha2[APR_MD5_DIGESTSIZE];
|
||||
apr_status_t status;
|
||||
|
||||
/* calculate ha2:
|
||||
MD5 hash of the combined method and URI */
|
||||
tmp = apr_psprintf(pool, "%s:%s",
|
||||
method,
|
||||
uri);
|
||||
status = apr_md5(ha2, tmp, strlen(tmp));
|
||||
|
||||
return hex_encode(ha2, pool);
|
||||
} else {
|
||||
/* TODO: auth-int isn't supported! */
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const char *
|
||||
build_auth_header(digest_authn_info_t *digest_info,
|
||||
const char *path,
|
||||
const char *method,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
char *hdr;
|
||||
const char *ha2;
|
||||
const char *response;
|
||||
unsigned char response_hdr[APR_MD5_DIGESTSIZE];
|
||||
const char *response_hdr_hex;
|
||||
apr_status_t status;
|
||||
|
||||
ha2 = build_digest_ha2(path, method, digest_info->qop, pool);
|
||||
|
||||
hdr = apr_psprintf(pool,
|
||||
"Digest realm=\"%s\","
|
||||
" username=\"%s\","
|
||||
" nonce=\"%s\","
|
||||
" uri=\"%s\"",
|
||||
digest_info->realm, digest_info->username,
|
||||
digest_info->nonce,
|
||||
path);
|
||||
|
||||
if (digest_info->qop) {
|
||||
if (! digest_info->cnonce)
|
||||
digest_info->cnonce = random_cnonce(digest_info->pool);
|
||||
|
||||
hdr = apr_psprintf(pool, "%s, nc=%08x, cnonce=\"%s\", qop=\"%s\"",
|
||||
hdr,
|
||||
digest_info->digest_nc,
|
||||
digest_info->cnonce,
|
||||
digest_info->qop);
|
||||
|
||||
/* Build the response header:
|
||||
MD5 hash of the combined HA1 result, server nonce (nonce),
|
||||
request counter (nc), client nonce (cnonce),
|
||||
quality of protection code (qop) and HA2 result. */
|
||||
response = apr_psprintf(pool, "%s:%s:%08x:%s:%s:%s",
|
||||
digest_info->ha1, digest_info->nonce,
|
||||
digest_info->digest_nc,
|
||||
digest_info->cnonce, digest_info->qop, ha2);
|
||||
} else {
|
||||
/* Build the response header:
|
||||
MD5 hash of the combined HA1 result, server nonce (nonce)
|
||||
and HA2 result. */
|
||||
response = apr_psprintf(pool, "%s:%s:%s",
|
||||
digest_info->ha1, digest_info->nonce, ha2);
|
||||
}
|
||||
|
||||
status = apr_md5(response_hdr, response, strlen(response));
|
||||
response_hdr_hex = hex_encode(response_hdr, pool);
|
||||
|
||||
hdr = apr_psprintf(pool, "%s, response=\"%s\"", hdr, response_hdr_hex);
|
||||
|
||||
if (digest_info->opaque) {
|
||||
hdr = apr_psprintf(pool, "%s, opaque=\"%s\"", hdr,
|
||||
digest_info->opaque);
|
||||
}
|
||||
if (digest_info->algorithm) {
|
||||
hdr = apr_psprintf(pool, "%s, algorithm=\"%s\"", hdr,
|
||||
digest_info->algorithm);
|
||||
}
|
||||
|
||||
return hdr;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__handle_digest_auth(int code,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
const char *auth_hdr,
|
||||
const char *auth_attr,
|
||||
void *baton,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
char *attrs;
|
||||
char *nextkv;
|
||||
const char *realm_name = NULL;
|
||||
const char *nonce = NULL;
|
||||
const char *algorithm = NULL;
|
||||
const char *qop = NULL;
|
||||
const char *opaque = NULL;
|
||||
const char *key;
|
||||
serf_connection_t *conn = request->conn;
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
serf__authn_info_t *authn_info = (code == 401) ? &ctx->authn_info :
|
||||
&ctx->proxy_authn_info;
|
||||
digest_authn_info_t *digest_info = (code == 401) ? conn->authn_baton :
|
||||
conn->proxy_authn_baton;
|
||||
apr_status_t status;
|
||||
apr_pool_t *cred_pool;
|
||||
char *username, *password;
|
||||
|
||||
/* Can't do Digest authentication if there's no callback to get
|
||||
username & password. */
|
||||
if (!ctx->cred_cb) {
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
}
|
||||
|
||||
/* Need a copy cuz we're going to write NUL characters into the string. */
|
||||
attrs = apr_pstrdup(pool, auth_attr);
|
||||
|
||||
/* We're expecting a list of key=value pairs, separated by a comma.
|
||||
Ex. realm="SVN Digest",
|
||||
nonce="f+zTl/leBAA=e371bd3070adfb47b21f5fc64ad8cc21adc371a5",
|
||||
algorithm=MD5, qop="auth" */
|
||||
for ( ; (key = apr_strtok(attrs, ",", &nextkv)) != NULL; attrs = NULL) {
|
||||
char *val;
|
||||
|
||||
val = strchr(key, '=');
|
||||
if (val == NULL)
|
||||
continue;
|
||||
*val++ = '\0';
|
||||
|
||||
/* skip leading spaces */
|
||||
while (*key && *key == ' ')
|
||||
key++;
|
||||
|
||||
/* If the value is quoted, then remove the quotes. */
|
||||
if (*val == '"') {
|
||||
apr_size_t last = strlen(val) - 1;
|
||||
|
||||
if (val[last] == '"') {
|
||||
val[last] = '\0';
|
||||
val++;
|
||||
}
|
||||
}
|
||||
|
||||
if (strcmp(key, "realm") == 0)
|
||||
realm_name = val;
|
||||
else if (strcmp(key, "nonce") == 0)
|
||||
nonce = val;
|
||||
else if (strcmp(key, "algorithm") == 0)
|
||||
algorithm = val;
|
||||
else if (strcmp(key, "qop") == 0)
|
||||
qop = val;
|
||||
else if (strcmp(key, "opaque") == 0)
|
||||
opaque = val;
|
||||
|
||||
/* Ignore all unsupported attributes. */
|
||||
}
|
||||
|
||||
if (!realm_name) {
|
||||
return SERF_ERROR_AUTHN_MISSING_ATTRIBUTE;
|
||||
}
|
||||
|
||||
authn_info->realm = apr_psprintf(conn->pool, "<%s://%s:%d> %s",
|
||||
conn->host_info.scheme,
|
||||
conn->host_info.hostname,
|
||||
conn->host_info.port,
|
||||
realm_name);
|
||||
|
||||
/* Ask the application for credentials */
|
||||
apr_pool_create(&cred_pool, pool);
|
||||
status = (*ctx->cred_cb)(&username, &password, request, baton,
|
||||
code, authn_info->scheme->name,
|
||||
authn_info->realm, cred_pool);
|
||||
if (status) {
|
||||
apr_pool_destroy(cred_pool);
|
||||
return status;
|
||||
}
|
||||
|
||||
digest_info->header = (code == 401) ? "Authorization" :
|
||||
"Proxy-Authorization";
|
||||
|
||||
/* Store the digest authentication parameters in the context relative
|
||||
to this connection, so we can use it to create the Authorization header
|
||||
when setting up requests. */
|
||||
digest_info->pool = conn->pool;
|
||||
digest_info->qop = apr_pstrdup(digest_info->pool, qop);
|
||||
digest_info->nonce = apr_pstrdup(digest_info->pool, nonce);
|
||||
digest_info->cnonce = NULL;
|
||||
digest_info->opaque = apr_pstrdup(digest_info->pool, opaque);
|
||||
digest_info->algorithm = apr_pstrdup(digest_info->pool, algorithm);
|
||||
digest_info->realm = apr_pstrdup(digest_info->pool, realm_name);
|
||||
digest_info->username = apr_pstrdup(digest_info->pool, username);
|
||||
digest_info->digest_nc++;
|
||||
|
||||
digest_info->ha1 = build_digest_ha1(username, password, digest_info->realm,
|
||||
digest_info->pool);
|
||||
|
||||
apr_pool_destroy(cred_pool);
|
||||
|
||||
/* If the handshake is finished tell serf it can send as much requests as it
|
||||
likes. */
|
||||
serf_connection_set_max_outstanding_requests(conn, 0);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__init_digest(int code,
|
||||
serf_context_t *ctx,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__init_digest_connection(int code,
|
||||
serf_connection_t *conn,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
/* Digest authentication is done per connection, so keep all progress
|
||||
information per connection. */
|
||||
if (code == 401) {
|
||||
conn->authn_baton = apr_pcalloc(pool, sizeof(digest_authn_info_t));
|
||||
} else {
|
||||
conn->proxy_authn_baton = apr_pcalloc(pool, sizeof(digest_authn_info_t));
|
||||
}
|
||||
|
||||
/* Make serf send the initial requests one by one */
|
||||
serf_connection_set_max_outstanding_requests(conn, 1);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__setup_request_digest_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt)
|
||||
{
|
||||
digest_authn_info_t *digest_info = (peer == HOST) ? conn->authn_baton :
|
||||
conn->proxy_authn_baton;
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
|
||||
if (digest_info && digest_info->realm) {
|
||||
const char *value;
|
||||
apr_uri_t parsed_uri;
|
||||
|
||||
/* TODO: per request pool? */
|
||||
|
||||
/* Extract path from uri. */
|
||||
status = apr_uri_parse(conn->pool, uri, &parsed_uri);
|
||||
|
||||
/* Build a new Authorization header. */
|
||||
digest_info->header = (peer == HOST) ? "Authorization" :
|
||||
"Proxy-Authorization";
|
||||
value = build_auth_header(digest_info, parsed_uri.path, method,
|
||||
conn->pool);
|
||||
|
||||
serf_bucket_headers_setn(hdrs_bkt, digest_info->header,
|
||||
value);
|
||||
digest_info->digest_nc++;
|
||||
|
||||
/* Store the uri of this request on the serf_request_t object, to make
|
||||
it available when validating the Authentication-Info header of the
|
||||
matching response. */
|
||||
request->auth_baton = parsed_uri.path;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__validate_response_digest_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
const char *key;
|
||||
char *auth_attr;
|
||||
char *nextkv;
|
||||
const char *rspauth = NULL;
|
||||
const char *qop = NULL;
|
||||
const char *nc_str = NULL;
|
||||
serf_bucket_t *hdrs;
|
||||
digest_authn_info_t *digest_info = (peer == HOST) ? conn->authn_baton :
|
||||
conn->proxy_authn_baton;
|
||||
|
||||
hdrs = serf_bucket_response_get_headers(response);
|
||||
|
||||
/* Need a copy cuz we're going to write NUL characters into the string. */
|
||||
if (peer == HOST)
|
||||
auth_attr = apr_pstrdup(pool,
|
||||
serf_bucket_headers_get(hdrs, "Authentication-Info"));
|
||||
else
|
||||
auth_attr = apr_pstrdup(pool,
|
||||
serf_bucket_headers_get(hdrs, "Proxy-Authentication-Info"));
|
||||
|
||||
/* If there's no Authentication-Info header there's nothing to validate. */
|
||||
if (! auth_attr)
|
||||
return APR_SUCCESS;
|
||||
|
||||
/* We're expecting a list of key=value pairs, separated by a comma.
|
||||
Ex. rspauth="8a4b8451084b082be6b105e2b7975087",
|
||||
cnonce="346531653132652d303033392d3435", nc=00000007,
|
||||
qop=auth */
|
||||
for ( ; (key = apr_strtok(auth_attr, ",", &nextkv)) != NULL; auth_attr = NULL) {
|
||||
char *val;
|
||||
|
||||
val = strchr(key, '=');
|
||||
if (val == NULL)
|
||||
continue;
|
||||
*val++ = '\0';
|
||||
|
||||
/* skip leading spaces */
|
||||
while (*key && *key == ' ')
|
||||
key++;
|
||||
|
||||
/* If the value is quoted, then remove the quotes. */
|
||||
if (*val == '"') {
|
||||
apr_size_t last = strlen(val) - 1;
|
||||
|
||||
if (val[last] == '"') {
|
||||
val[last] = '\0';
|
||||
val++;
|
||||
}
|
||||
}
|
||||
|
||||
if (strcmp(key, "rspauth") == 0)
|
||||
rspauth = val;
|
||||
else if (strcmp(key, "qop") == 0)
|
||||
qop = val;
|
||||
else if (strcmp(key, "nc") == 0)
|
||||
nc_str = val;
|
||||
}
|
||||
|
||||
if (rspauth) {
|
||||
const char *ha2, *tmp, *resp_hdr_hex;
|
||||
unsigned char resp_hdr[APR_MD5_DIGESTSIZE];
|
||||
const char *req_uri = request->auth_baton;
|
||||
|
||||
ha2 = build_digest_ha2(req_uri, "", qop, pool);
|
||||
tmp = apr_psprintf(pool, "%s:%s:%s:%s:%s:%s",
|
||||
digest_info->ha1, digest_info->nonce, nc_str,
|
||||
digest_info->cnonce, digest_info->qop, ha2);
|
||||
apr_md5(resp_hdr, tmp, strlen(tmp));
|
||||
resp_hdr_hex = hex_encode(resp_hdr, pool);
|
||||
|
||||
/* Incorrect response-digest in Authentication-Info header. */
|
||||
if (strcmp(rspauth, resp_hdr_hex) != 0) {
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
552
auth/auth_kerb.c
Normal file
552
auth/auth_kerb.c
Normal file
@ -0,0 +1,552 @@
|
||||
/* Copyright 2009 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "auth_kerb.h"
|
||||
|
||||
#ifdef SERF_HAVE_KERB
|
||||
|
||||
/** These functions implement SPNEGO-based Kerberos and NTLM authentication,
|
||||
* using either GSS-API (RFC 2743) or SSPI on Windows.
|
||||
* The HTTP message exchange is documented in RFC 4559.
|
||||
**/
|
||||
|
||||
#include <serf.h>
|
||||
#include <serf_private.h>
|
||||
#include <auth/auth.h>
|
||||
|
||||
#include <apr.h>
|
||||
#include <apr_base64.h>
|
||||
#include <apr_strings.h>
|
||||
|
||||
/** TODO:
|
||||
** - This implements the SPNEGO mechanism, not Kerberos directly. Adapt
|
||||
** filename, functions & comments.
|
||||
** - send session key directly on new connections where we already know
|
||||
** the server requires Kerberos authn.
|
||||
** - Add a way for serf to give detailed error information back to the
|
||||
** application.
|
||||
**/
|
||||
|
||||
/* Authentication over HTTP using Kerberos
|
||||
*
|
||||
* Kerberos involves three servers:
|
||||
* - Authentication Server (AS): verifies users during login
|
||||
* - Ticket-Granting Server (TGS): issues proof of identity tickets
|
||||
* - HTTP server (S)
|
||||
*
|
||||
* Steps:
|
||||
* 0. User logs in to the AS and receives a TGS ticket. On workstations
|
||||
* where the login program doesn't support Kerberos, the user can use
|
||||
* 'kinit'.
|
||||
*
|
||||
* 1. C --> S: GET
|
||||
*
|
||||
* C <-- S: 401 Authentication Required
|
||||
* WWW-Authenticate: Negotiate
|
||||
*
|
||||
* -> app contacts the TGS to request a session key for the HTTP service
|
||||
* @ target host. The returned session key is encrypted with the HTTP
|
||||
* service's secret key, so we can safely send it to the server.
|
||||
*
|
||||
* 2. C --> S: GET
|
||||
* Authorization: Negotiate <Base64 encoded session key>
|
||||
* gss_api_ctx->state = gss_api_auth_in_progress;
|
||||
*
|
||||
* C <-- S: 200 OK
|
||||
* WWW-Authenticate: Negotiate <Base64 encoded server
|
||||
* authentication data>
|
||||
*
|
||||
* -> The server returned an (optional) key to proof itself to us. We check this
|
||||
* key with the TGS again. If it checks out, we can return the response
|
||||
* body to the application.
|
||||
*
|
||||
* Note: It's possible that the server returns 401 again in step 2, if the
|
||||
* Kerberos context isn't complete yet. This means there is 3rd step
|
||||
* where we'll send a request with an Authorization header to the
|
||||
* server. Some (simple) tests with mod_auth_kerb and MIT Kerberos 5 show
|
||||
* this never happens.
|
||||
*
|
||||
* Depending on the type of HTTP server, this handshake is required for either
|
||||
* every new connection, or for every new request! For more info see the next
|
||||
* comment on authn_persistence_state_t.
|
||||
*
|
||||
* Note: Step 1 of the handshake will only happen on the first connection, once
|
||||
* we know the server requires Kerberos authentication, the initial requests
|
||||
* on the other connections will include a session key, so we start at
|
||||
* step 2 in the handshake.
|
||||
* ### TODO: Not implemented yet!
|
||||
*/
|
||||
|
||||
/* Current state of the authentication of the current request. */
|
||||
typedef enum {
|
||||
gss_api_auth_not_started,
|
||||
gss_api_auth_in_progress,
|
||||
gss_api_auth_completed,
|
||||
} gss_api_auth_state;
|
||||
|
||||
/**
|
||||
authn_persistence_state_t: state that indicates if we are talking with a
|
||||
server that requires authentication only of the first request (stateful),
|
||||
or of each request (stateless).
|
||||
|
||||
INIT: Begin state. Authenticating the first request on this connection.
|
||||
UNDECIDED: we haven't identified the server yet, assume STATEFUL for now.
|
||||
Pipeline mode disabled, requests are sent only after the response off the
|
||||
previous request arrived.
|
||||
STATELESS: we know the server requires authentication for each request.
|
||||
On all new requests add the Authorization header with an initial SPNEGO
|
||||
token (created per request).
|
||||
To keep things simple, keep the connection in one by one mode.
|
||||
(otherwise we'd have to keep a queue of gssapi context objects to match
|
||||
the Negotiate header of the response with the session initiated by the
|
||||
mathing request).
|
||||
This state is an final state.
|
||||
STATEFUL: alright, we have authenticated the connection and for the server
|
||||
that is enough. Don't add an Authorization header to new requests.
|
||||
Serf will switch to pipelined mode.
|
||||
This state is not a final state, although in practical scenario's it will
|
||||
be. When we receive a 40x response from the server switch to STATELESS
|
||||
mode.
|
||||
|
||||
We start in state init for the first request until it is authenticated.
|
||||
|
||||
The rest of the state machine starts with the arrival of the response to the
|
||||
second request, and then goes on with each response:
|
||||
|
||||
--------
|
||||
| INIT | C --> S: GET request in response to 40x of the server
|
||||
-------- add [Proxy]-Authorization header
|
||||
|
|
||||
|
|
||||
------------
|
||||
| UNDECIDED| C --> S: GET request, assume stateful,
|
||||
------------ no [Proxy]-Authorization header
|
||||
|
|
||||
|
|
||||
|------------------------------------------------
|
||||
| |
|
||||
| C <-- S: 40x Authentication | C <-- S: 200 OK
|
||||
| Required |
|
||||
| |
|
||||
v v
|
||||
------------- ------------
|
||||
->| STATELESS |<------------------------------| STATEFUL |<--
|
||||
| ------------- C <-- S: 40x ------------ |
|
||||
* | | Authentication | | 200 OK
|
||||
| / Required | |
|
||||
----- -----/
|
||||
|
||||
**/
|
||||
typedef enum {
|
||||
pstate_init,
|
||||
pstate_undecided,
|
||||
pstate_stateless,
|
||||
pstate_stateful,
|
||||
} authn_persistence_state_t;
|
||||
|
||||
|
||||
/* HTTP Service name, used to get the session key. */
|
||||
#define KRB_HTTP_SERVICE "HTTP"
|
||||
|
||||
/* Stores the context information related to Kerberos authentication. */
|
||||
typedef struct
|
||||
{
|
||||
apr_pool_t *pool;
|
||||
|
||||
/* GSSAPI context */
|
||||
serf__kerb_context_t *gss_ctx;
|
||||
|
||||
/* Current state of the authentication cycle. */
|
||||
gss_api_auth_state state;
|
||||
|
||||
/* Current persistence state. */
|
||||
authn_persistence_state_t pstate;
|
||||
|
||||
const char *header;
|
||||
const char *value;
|
||||
} gss_authn_info_t;
|
||||
|
||||
/* On the initial 401 response of the server, request a session key from
|
||||
the Kerberos KDC to pass to the server, proving that we are who we
|
||||
claim to be. The session key can only be used with the HTTP service
|
||||
on the target host. */
|
||||
static apr_status_t
|
||||
gss_api_get_credentials(char *token, apr_size_t token_len,
|
||||
const char *hostname,
|
||||
const char **buf, apr_size_t *buf_len,
|
||||
gss_authn_info_t *gss_info)
|
||||
{
|
||||
serf__kerb_buffer_t input_buf;
|
||||
serf__kerb_buffer_t output_buf;
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
|
||||
/* If the server sent us a token, pass it to gss_init_sec_token for
|
||||
validation. */
|
||||
if (token) {
|
||||
input_buf.value = token;
|
||||
input_buf.length = token_len;
|
||||
} else {
|
||||
input_buf.value = 0;
|
||||
input_buf.length = 0;
|
||||
}
|
||||
|
||||
/* Establish a security context to the server. */
|
||||
status = serf__kerb_init_sec_context
|
||||
(gss_info->gss_ctx,
|
||||
KRB_HTTP_SERVICE, hostname,
|
||||
&input_buf,
|
||||
&output_buf,
|
||||
gss_info->pool,
|
||||
gss_info->pool
|
||||
);
|
||||
|
||||
switch(status) {
|
||||
case APR_SUCCESS:
|
||||
gss_info->state = gss_api_auth_completed;
|
||||
break;
|
||||
case APR_EAGAIN:
|
||||
gss_info->state = gss_api_auth_in_progress;
|
||||
status = APR_SUCCESS;
|
||||
break;
|
||||
default:
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Return the session key to our caller. */
|
||||
*buf = output_buf.value;
|
||||
*buf_len = output_buf.length;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* do_auth is invoked in two situations:
|
||||
- when a response from a server is received that contains an authn header
|
||||
(either from a 40x or 2xx response)
|
||||
- when a request is prepared on a connection with stateless authentication.
|
||||
|
||||
Read the header sent by the server (if any), invoke the gssapi authn
|
||||
code and use the resulting Server Ticket on the next request to the
|
||||
server. */
|
||||
static apr_status_t
|
||||
do_auth(peer_t peer,
|
||||
int code,
|
||||
gss_authn_info_t *gss_info,
|
||||
serf_connection_t *conn,
|
||||
const char *auth_hdr,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
serf__authn_info_t *authn_info = (peer == HOST) ? &ctx->authn_info :
|
||||
&ctx->proxy_authn_info;
|
||||
const char *tmp = NULL;
|
||||
char *token = NULL;
|
||||
apr_size_t tmp_len = 0, token_len = 0;
|
||||
apr_status_t status;
|
||||
|
||||
/* Is this a response from a host/proxy? auth_hdr should always be set. */
|
||||
if (code && auth_hdr) {
|
||||
const char *space = NULL;
|
||||
/* The server will return a token as attribute to the Negotiate key.
|
||||
Negotiate YGwGCSqGSIb3EgECAgIAb10wW6ADAgEFoQMCAQ+iTzBNoAMCARCiRgREa6
|
||||
mouMBAMFqKVdTGtfpZNXKzyw4Yo1paphJdIA3VOgncaoIlXxZLnkHiIHS2v65pVvrp
|
||||
bRIyjF8xve9HxpnNIucCY9c=
|
||||
|
||||
Read this base64 value, decode it and validate it so we're sure the
|
||||
server is who we expect it to be. */
|
||||
space = strchr(auth_hdr, ' ');
|
||||
|
||||
if (space) {
|
||||
token = apr_palloc(pool, apr_base64_decode_len(space + 1));
|
||||
token_len = apr_base64_decode(token, space + 1);
|
||||
}
|
||||
} else {
|
||||
/* This is a new request, not a retry in response to a 40x of the
|
||||
host/proxy.
|
||||
Only add the Authorization header if we know the server requires
|
||||
per-request authentication (stateless). */
|
||||
if (gss_info->pstate != pstate_stateless)
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
switch(gss_info->pstate) {
|
||||
case pstate_init:
|
||||
/* Nothing to do here */
|
||||
break;
|
||||
case pstate_undecided: /* Fall through */
|
||||
case pstate_stateful:
|
||||
{
|
||||
/* Switch to stateless mode, from now on handle authentication
|
||||
of each request with a new gss context. This is easiest to
|
||||
manage when sending requests one by one. */
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"Server requires per-request SPNEGO authn, "
|
||||
"switching to stateless mode.\n");
|
||||
|
||||
gss_info->pstate = pstate_stateless;
|
||||
serf_connection_set_max_outstanding_requests(conn, 1);
|
||||
break;
|
||||
}
|
||||
case pstate_stateless:
|
||||
/* Nothing to do here */
|
||||
break;
|
||||
}
|
||||
|
||||
/* If the server didn't provide us with a token, start with a new initial
|
||||
step in the SPNEGO authentication. */
|
||||
if (!token) {
|
||||
serf__kerb_reset_sec_context(gss_info->gss_ctx);
|
||||
gss_info->state = gss_api_auth_not_started;
|
||||
}
|
||||
|
||||
if (peer == HOST) {
|
||||
status = gss_api_get_credentials(token, token_len,
|
||||
conn->host_info.hostname,
|
||||
&tmp, &tmp_len,
|
||||
gss_info);
|
||||
} else {
|
||||
char *proxy_host;
|
||||
apr_getnameinfo(&proxy_host, conn->ctx->proxy_address, 0);
|
||||
status = gss_api_get_credentials(token, token_len, proxy_host,
|
||||
&tmp, &tmp_len,
|
||||
gss_info);
|
||||
}
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* On the next request, add an Authorization header. */
|
||||
if (tmp_len) {
|
||||
serf__encode_auth_header(&gss_info->value, authn_info->scheme->name,
|
||||
tmp,
|
||||
tmp_len,
|
||||
pool);
|
||||
gss_info->header = (peer == HOST) ?
|
||||
"Authorization" : "Proxy-Authorization";
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__init_kerb(int code,
|
||||
serf_context_t *ctx,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* A new connection is created to a server that's known to use
|
||||
Kerberos. */
|
||||
apr_status_t
|
||||
serf__init_kerb_connection(int code,
|
||||
serf_connection_t *conn,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
gss_authn_info_t *gss_info;
|
||||
apr_status_t status;
|
||||
|
||||
gss_info = apr_pcalloc(pool, sizeof(*gss_info));
|
||||
gss_info->pool = conn->pool;
|
||||
gss_info->state = gss_api_auth_not_started;
|
||||
gss_info->pstate = pstate_init;
|
||||
status = serf__kerb_create_sec_context(&gss_info->gss_ctx, pool,
|
||||
gss_info->pool);
|
||||
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
|
||||
if (code == 401) {
|
||||
conn->authn_baton = gss_info;
|
||||
} else {
|
||||
conn->proxy_authn_baton = gss_info;
|
||||
}
|
||||
|
||||
/* Make serf send the initial requests one by one */
|
||||
serf_connection_set_max_outstanding_requests(conn, 1);
|
||||
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"Initialized Kerberos context for this connection.\n");
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* A 40x response was received, handle the authentication. */
|
||||
apr_status_t
|
||||
serf__handle_kerb_auth(int code,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
const char *auth_hdr,
|
||||
const char *auth_attr,
|
||||
void *baton,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
serf_connection_t *conn = request->conn;
|
||||
gss_authn_info_t *gss_info = (code == 401) ? conn->authn_baton :
|
||||
conn->proxy_authn_baton;
|
||||
|
||||
return do_auth(code == 401 ? HOST : PROXY,
|
||||
code,
|
||||
gss_info,
|
||||
request->conn,
|
||||
auth_hdr,
|
||||
pool);
|
||||
}
|
||||
|
||||
/* Setup the authn headers on this request message. */
|
||||
apr_status_t
|
||||
serf__setup_request_kerb_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt)
|
||||
{
|
||||
gss_authn_info_t *gss_info = (peer == HOST) ? conn->authn_baton :
|
||||
conn->proxy_authn_baton;
|
||||
|
||||
/* If we have an ongoing authentication handshake, the handler of the
|
||||
previous response will have created the authn headers for this request
|
||||
already. */
|
||||
if (gss_info && gss_info->header && gss_info->value) {
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"Set Negotiate authn header on retried request.\n");
|
||||
|
||||
serf_bucket_headers_setn(hdrs_bkt, gss_info->header,
|
||||
gss_info->value);
|
||||
|
||||
/* We should send each token only once. */
|
||||
gss_info->header = NULL;
|
||||
gss_info->value = NULL;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
switch (gss_info->pstate) {
|
||||
case pstate_init:
|
||||
/* We shouldn't normally arrive here, do nothing. */
|
||||
break;
|
||||
case pstate_undecided: /* fall through */
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"Assume for now that the server supports persistent "
|
||||
"SPNEGO authentication.\n");
|
||||
/* Nothing to do here. */
|
||||
break;
|
||||
case pstate_stateful:
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"SPNEGO on this connection is persistent, "
|
||||
"don't set authn header on next request.\n");
|
||||
/* Nothing to do here. */
|
||||
break;
|
||||
case pstate_stateless:
|
||||
{
|
||||
apr_status_t status;
|
||||
|
||||
/* Authentication on this connection is known to be stateless.
|
||||
Add an initial Negotiate token for the server, to bypass the
|
||||
40x response we know we'll otherwise receive.
|
||||
(RFC 4559 section 4.2) */
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"Add initial Negotiate header to request.\n");
|
||||
|
||||
status = do_auth(peer,
|
||||
code,
|
||||
gss_info,
|
||||
conn,
|
||||
0l, /* no response authn header */
|
||||
conn->pool);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
serf_bucket_headers_setn(hdrs_bkt, gss_info->header,
|
||||
gss_info->value);
|
||||
/* We should send each token only once. */
|
||||
gss_info->header = NULL;
|
||||
gss_info->value = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* Function is called when 2xx responses are received. Normally we don't
|
||||
* have to do anything, except for the first response after the
|
||||
* authentication handshake. This specific response includes authentication
|
||||
* data which should be validated by the client (mutual authentication).
|
||||
*/
|
||||
apr_status_t
|
||||
serf__validate_response_kerb_auth(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
gss_authn_info_t *gss_info;
|
||||
const char *auth_hdr_name;
|
||||
|
||||
/* TODO: currently this function is only called when a response includes
|
||||
an Authenticate header. This header is optional. If the server does
|
||||
not provide this header on the first 2xx response, we will not promote
|
||||
the connection from undecided to stateful. This won't break anything,
|
||||
but means we stay in non-pipelining mode. */
|
||||
serf__log_skt(AUTH_VERBOSE, __FILE__, conn->skt,
|
||||
"Validate Negotiate response header.\n");
|
||||
|
||||
if (peer == HOST) {
|
||||
gss_info = conn->authn_baton;
|
||||
auth_hdr_name = "WWW-Authenticate";
|
||||
} else {
|
||||
gss_info = conn->proxy_authn_baton;
|
||||
auth_hdr_name = "Proxy-Authenticate";
|
||||
}
|
||||
|
||||
if (gss_info->state != gss_api_auth_completed) {
|
||||
serf_bucket_t *hdrs;
|
||||
const char *auth_hdr_val;
|
||||
apr_status_t status;
|
||||
|
||||
hdrs = serf_bucket_response_get_headers(response);
|
||||
auth_hdr_val = serf_bucket_headers_get(hdrs, auth_hdr_name);
|
||||
|
||||
status = do_auth(peer, code, gss_info, conn, auth_hdr_val, pool);
|
||||
if (status)
|
||||
return status;
|
||||
}
|
||||
|
||||
if (gss_info->state == gss_api_auth_completed) {
|
||||
switch(gss_info->pstate) {
|
||||
case pstate_init:
|
||||
/* Authentication of the first request is done. */
|
||||
gss_info->pstate = pstate_undecided;
|
||||
break;
|
||||
case pstate_undecided:
|
||||
/* The server didn't request for authentication even though
|
||||
we didn't add an Authorization header to previous
|
||||
request. That means it supports persistent authentication. */
|
||||
gss_info->pstate = pstate_stateful;
|
||||
serf_connection_set_max_outstanding_requests(conn, 0);
|
||||
break;
|
||||
default:
|
||||
/* Nothing to do here. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
#endif /* SERF_HAVE_GSSAPI */
|
112
auth/auth_kerb.h
Normal file
112
auth/auth_kerb.h
Normal file
@ -0,0 +1,112 @@
|
||||
/* Copyright 2010 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef AUTH_KERB_H
|
||||
#define AUTH_KERB_H
|
||||
|
||||
#include <apr.h>
|
||||
#include <apr_pools.h>
|
||||
|
||||
#if defined(SERF_HAVE_SSPI)
|
||||
#define SERF_HAVE_KERB
|
||||
#define SERF_USE_SSPI
|
||||
#elif defined(SERF_HAVE_GSSAPI)
|
||||
#define SERF_HAVE_KERB
|
||||
#define SERF_USE_GSSAPI
|
||||
#endif
|
||||
|
||||
#ifdef SERF_HAVE_KERB
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct serf__kerb_context_t serf__kerb_context_t;
|
||||
|
||||
typedef struct serf__kerb_buffer_t {
|
||||
apr_size_t length;
|
||||
void *value;
|
||||
} serf__kerb_buffer_t;
|
||||
|
||||
/* Create outbound security context.
|
||||
*
|
||||
* All temporary allocations will be performed in SCRATCH_POOL, while security
|
||||
* context will be allocated in result_pool and will be destroyed automatically
|
||||
* on RESULT_POOL cleanup.
|
||||
*
|
||||
*/
|
||||
apr_status_t
|
||||
serf__kerb_create_sec_context(serf__kerb_context_t **ctx_p,
|
||||
apr_pool_t *scratch_pool,
|
||||
apr_pool_t *result_pool);
|
||||
|
||||
/* Initialize outbound security context.
|
||||
*
|
||||
* The function is used to build a security context between the client
|
||||
* application and a remote peer.
|
||||
*
|
||||
* CTX is pointer to existing context created using
|
||||
* serf__kerb_create_sec_context() function.
|
||||
*
|
||||
* SERVICE is name of Kerberos service name. Usually 'HTTP'. HOSTNAME is
|
||||
* canonical name of destination server. Caller should resolve server's alias
|
||||
* to canonical name.
|
||||
*
|
||||
* INPUT_BUF is pointer structure describing input token if any. Should be
|
||||
* zero length on first call.
|
||||
*
|
||||
* OUTPUT_BUF will be populated with pointer to output data that should send
|
||||
* to destination server. This buffer will be automatically freed on
|
||||
* RESULT_POOL cleanup.
|
||||
*
|
||||
* All temporary allocations will be performed in SCRATCH_POOL.
|
||||
*
|
||||
* Return value:
|
||||
* - APR_EAGAIN The client must send the output token to the server and wait
|
||||
* for a return token.
|
||||
*
|
||||
* - APR_SUCCESS The security context was successfully initialized. There is no
|
||||
* need for another serf__kerb_init_sec_context call. If the function returns
|
||||
* an output token, that is, if the OUTPUT_BUF is of nonzero length, that
|
||||
* token must be sent to the server.
|
||||
*
|
||||
* Other returns values indicates error.
|
||||
*/
|
||||
apr_status_t
|
||||
serf__kerb_init_sec_context(serf__kerb_context_t *ctx,
|
||||
const char *service,
|
||||
const char *hostname,
|
||||
serf__kerb_buffer_t *input_buf,
|
||||
serf__kerb_buffer_t *output_buf,
|
||||
apr_pool_t *scratch_pool,
|
||||
apr_pool_t *result_pool
|
||||
);
|
||||
|
||||
/*
|
||||
* Reset a previously created security context so we can start with a new one.
|
||||
*
|
||||
* This is triggered when the server requires per-request authentication,
|
||||
* where each request requires a new security context.
|
||||
*/
|
||||
apr_status_t
|
||||
serf__kerb_reset_sec_context(serf__kerb_context_t *ctx);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* SERF_HAVE_KERB */
|
||||
|
||||
#endif /* !AUTH_KERB_H */
|
214
auth/auth_kerb_gss.c
Normal file
214
auth/auth_kerb_gss.c
Normal file
@ -0,0 +1,214 @@
|
||||
/* Copyright 2009 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_private.h"
|
||||
#include "auth_kerb.h"
|
||||
|
||||
#ifdef SERF_USE_GSSAPI
|
||||
#include <apr_strings.h>
|
||||
#include <gssapi/gssapi.h>
|
||||
|
||||
|
||||
/* This module can support all authentication mechanisms as provided by
|
||||
the GSS-API implementation, but for now it only supports SPNEGO for
|
||||
Negotiate.
|
||||
SPNEGO can delegate authentication to Kerberos if supported by the
|
||||
host. */
|
||||
|
||||
#ifndef GSS_SPNEGO_MECHANISM
|
||||
static gss_OID_desc spnego_mech_oid = { 6, "\x2b\x06\x01\x05\x05\x02" };
|
||||
#define GSS_SPNEGO_MECHANISM &spnego_mech_oid
|
||||
#endif
|
||||
|
||||
struct serf__kerb_context_t
|
||||
{
|
||||
/* GSSAPI context */
|
||||
gss_ctx_id_t gss_ctx;
|
||||
|
||||
/* Mechanism used to authenticate. */
|
||||
gss_OID gss_mech;
|
||||
};
|
||||
|
||||
static void
|
||||
log_error(int verbose_flag, const char *filename,
|
||||
serf__kerb_context_t *ctx,
|
||||
OM_uint32 err_maj_stat,
|
||||
OM_uint32 err_min_stat,
|
||||
const char *msg)
|
||||
{
|
||||
OM_uint32 maj_stat, min_stat;
|
||||
gss_buffer_desc stat_buff;
|
||||
OM_uint32 msg_ctx = 0;
|
||||
|
||||
if (verbose_flag) {
|
||||
maj_stat = gss_display_status(&min_stat,
|
||||
err_maj_stat,
|
||||
GSS_C_GSS_CODE,
|
||||
ctx->gss_mech,
|
||||
&msg_ctx,
|
||||
&stat_buff);
|
||||
if (maj_stat == GSS_S_COMPLETE ||
|
||||
maj_stat == GSS_S_FAILURE) {
|
||||
maj_stat = gss_display_status(&min_stat,
|
||||
err_min_stat,
|
||||
GSS_C_MECH_CODE,
|
||||
ctx->gss_mech,
|
||||
&msg_ctx,
|
||||
&stat_buff);
|
||||
}
|
||||
|
||||
serf__log(verbose_flag, filename,
|
||||
"%s (%x,%d): %s\n", msg,
|
||||
err_maj_stat, err_min_stat, stat_buff.value);
|
||||
}
|
||||
}
|
||||
|
||||
/* Cleans the GSS context object, when the pool used to create it gets
|
||||
cleared or destroyed. */
|
||||
static apr_status_t
|
||||
cleanup_ctx(void *data)
|
||||
{
|
||||
OM_uint32 min_stat;
|
||||
serf__kerb_context_t *ctx = data;
|
||||
|
||||
if (ctx->gss_ctx != GSS_C_NO_CONTEXT) {
|
||||
if (gss_delete_sec_context(&min_stat, &ctx->gss_ctx,
|
||||
GSS_C_NO_BUFFER) == GSS_S_FAILURE)
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t
|
||||
cleanup_sec_buffer(void *data)
|
||||
{
|
||||
OM_uint32 min_stat;
|
||||
gss_buffer_desc *gss_buf = data;
|
||||
|
||||
gss_release_buffer(&min_stat, gss_buf);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__kerb_create_sec_context(serf__kerb_context_t **ctx_p,
|
||||
apr_pool_t *scratch_pool,
|
||||
apr_pool_t *result_pool)
|
||||
{
|
||||
serf__kerb_context_t *ctx;
|
||||
|
||||
ctx = apr_pcalloc(result_pool, sizeof(*ctx));
|
||||
|
||||
ctx->gss_ctx = GSS_C_NO_CONTEXT;
|
||||
ctx->gss_mech = GSS_SPNEGO_MECHANISM;
|
||||
|
||||
apr_pool_cleanup_register(result_pool, ctx,
|
||||
cleanup_ctx,
|
||||
apr_pool_cleanup_null);
|
||||
|
||||
*ctx_p = ctx;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__kerb_reset_sec_context(serf__kerb_context_t *ctx)
|
||||
{
|
||||
OM_uint32 dummy_stat;
|
||||
|
||||
if (ctx->gss_ctx)
|
||||
(void)gss_delete_sec_context(&dummy_stat, &ctx->gss_ctx,
|
||||
GSS_C_NO_BUFFER);
|
||||
ctx->gss_ctx = GSS_C_NO_CONTEXT;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__kerb_init_sec_context(serf__kerb_context_t *ctx,
|
||||
const char *service,
|
||||
const char *hostname,
|
||||
serf__kerb_buffer_t *input_buf,
|
||||
serf__kerb_buffer_t *output_buf,
|
||||
apr_pool_t *scratch_pool,
|
||||
apr_pool_t *result_pool
|
||||
)
|
||||
{
|
||||
gss_buffer_desc gss_input_buf = GSS_C_EMPTY_BUFFER;
|
||||
gss_buffer_desc *gss_output_buf_p;
|
||||
OM_uint32 gss_min_stat, gss_maj_stat;
|
||||
gss_name_t host_gss_name;
|
||||
gss_buffer_desc bufdesc;
|
||||
gss_OID dummy; /* unused */
|
||||
|
||||
/* Get the name for the HTTP service at the target host. */
|
||||
/* TODO: should be shared between multiple requests. */
|
||||
bufdesc.value = apr_pstrcat(scratch_pool, service, "@", hostname, NULL);
|
||||
bufdesc.length = strlen(bufdesc.value);
|
||||
serf__log(AUTH_VERBOSE, __FILE__, "Get principal for %s\n", bufdesc.value);
|
||||
gss_maj_stat = gss_import_name (&gss_min_stat, &bufdesc,
|
||||
GSS_C_NT_HOSTBASED_SERVICE,
|
||||
&host_gss_name);
|
||||
if(GSS_ERROR(gss_maj_stat)) {
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
|
||||
/* If the server sent us a token, pass it to gss_init_sec_token for
|
||||
validation. */
|
||||
gss_input_buf.value = input_buf->value;
|
||||
gss_input_buf.length = input_buf->length;
|
||||
|
||||
gss_output_buf_p = apr_pcalloc(result_pool, sizeof(*gss_output_buf_p));
|
||||
|
||||
/* Establish a security context to the server. */
|
||||
gss_maj_stat = gss_init_sec_context
|
||||
(&gss_min_stat, /* minor_status */
|
||||
GSS_C_NO_CREDENTIAL, /* XXXXX claimant_cred_handle */
|
||||
&ctx->gss_ctx, /* gssapi context handle */
|
||||
host_gss_name, /* HTTP@server name */
|
||||
ctx->gss_mech, /* mech_type (SPNEGO) */
|
||||
GSS_C_MUTUAL_FLAG, /* ensure the peer authenticates itself */
|
||||
0, /* default validity period */
|
||||
GSS_C_NO_CHANNEL_BINDINGS, /* do not use channel bindings */
|
||||
&gss_input_buf, /* server token, initially empty */
|
||||
&dummy, /* actual mech type */
|
||||
gss_output_buf_p, /* output_token */
|
||||
NULL, /* ret_flags */
|
||||
NULL /* not interested in remaining validity */
|
||||
);
|
||||
|
||||
apr_pool_cleanup_register(result_pool, gss_output_buf_p,
|
||||
cleanup_sec_buffer,
|
||||
apr_pool_cleanup_null);
|
||||
|
||||
output_buf->value = gss_output_buf_p->value;
|
||||
output_buf->length = gss_output_buf_p->length;
|
||||
|
||||
switch(gss_maj_stat) {
|
||||
case GSS_S_COMPLETE:
|
||||
return APR_SUCCESS;
|
||||
case GSS_S_CONTINUE_NEEDED:
|
||||
return APR_EAGAIN;
|
||||
default:
|
||||
log_error(AUTH_VERBOSE, __FILE__, ctx,
|
||||
gss_maj_stat, gss_min_stat,
|
||||
"Error during Kerberos handshake");
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* SERF_USE_GSSAPI */
|
268
auth/auth_kerb_sspi.c
Normal file
268
auth/auth_kerb_sspi.c
Normal file
@ -0,0 +1,268 @@
|
||||
/* Copyright 2010 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "auth_kerb.h"
|
||||
#include "serf.h"
|
||||
|
||||
#ifdef SERF_USE_SSPI
|
||||
#include <apr.h>
|
||||
#include <apr_strings.h>
|
||||
|
||||
#define SECURITY_WIN32
|
||||
#include <sspi.h>
|
||||
|
||||
/* SEC_E_MUTUAL_AUTH_FAILED is not defined in Windows Platform SDK 5.0. */
|
||||
#ifndef SEC_E_MUTUAL_AUTH_FAILED
|
||||
#define SEC_E_MUTUAL_AUTH_FAILED _HRESULT_TYPEDEF_(0x80090363L)
|
||||
#endif
|
||||
|
||||
struct serf__kerb_context_t
|
||||
{
|
||||
CredHandle sspi_credentials;
|
||||
CtxtHandle sspi_context;
|
||||
BOOL initalized;
|
||||
};
|
||||
|
||||
/* Map SECURITY_STATUS from SSPI to APR error code. Some error codes mapped
|
||||
* to our own codes and some to Win32 error codes:
|
||||
* http://support.microsoft.com/kb/113996
|
||||
*/
|
||||
static apr_status_t
|
||||
map_sspi_status(SECURITY_STATUS sspi_status)
|
||||
{
|
||||
switch(sspi_status)
|
||||
{
|
||||
case SEC_E_INSUFFICIENT_MEMORY:
|
||||
return APR_FROM_OS_ERROR(ERROR_NO_SYSTEM_RESOURCES);
|
||||
case SEC_E_INVALID_HANDLE:
|
||||
return APR_FROM_OS_ERROR(ERROR_INVALID_HANDLE);
|
||||
case SEC_E_UNSUPPORTED_FUNCTION:
|
||||
return APR_FROM_OS_ERROR(ERROR_INVALID_FUNCTION);
|
||||
case SEC_E_TARGET_UNKNOWN:
|
||||
return APR_FROM_OS_ERROR(ERROR_BAD_NETPATH);
|
||||
case SEC_E_INTERNAL_ERROR:
|
||||
return APR_FROM_OS_ERROR(ERROR_INTERNAL_ERROR);
|
||||
case SEC_E_SECPKG_NOT_FOUND:
|
||||
case SEC_E_BAD_PKGID:
|
||||
return APR_FROM_OS_ERROR(ERROR_NO_SUCH_PACKAGE);
|
||||
case SEC_E_NO_IMPERSONATION:
|
||||
return APR_FROM_OS_ERROR(ERROR_CANNOT_IMPERSONATE);
|
||||
case SEC_E_NO_AUTHENTICATING_AUTHORITY:
|
||||
return APR_FROM_OS_ERROR(ERROR_NO_LOGON_SERVERS);
|
||||
case SEC_E_UNTRUSTED_ROOT:
|
||||
return APR_FROM_OS_ERROR(ERROR_TRUST_FAILURE);
|
||||
case SEC_E_WRONG_PRINCIPAL:
|
||||
return APR_FROM_OS_ERROR(ERROR_WRONG_TARGET_NAME);
|
||||
case SEC_E_MUTUAL_AUTH_FAILED:
|
||||
return APR_FROM_OS_ERROR(ERROR_MUTUAL_AUTH_FAILED);
|
||||
case SEC_E_TIME_SKEW:
|
||||
return APR_FROM_OS_ERROR(ERROR_TIME_SKEW);
|
||||
default:
|
||||
return SERF_ERROR_AUTHN_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cleans the SSPI context object, when the pool used to create it gets
|
||||
cleared or destroyed. */
|
||||
static apr_status_t
|
||||
cleanup_ctx(void *data)
|
||||
{
|
||||
serf__kerb_context_t *ctx = data;
|
||||
|
||||
if (SecIsValidHandle(&ctx->sspi_context)) {
|
||||
DeleteSecurityContext(&ctx->sspi_context);
|
||||
SecInvalidateHandle(&ctx->sspi_context);
|
||||
}
|
||||
|
||||
if (SecIsValidHandle(&ctx->sspi_credentials)) {
|
||||
FreeCredentialsHandle(&ctx->sspi_context);
|
||||
SecInvalidateHandle(&ctx->sspi_context);
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t
|
||||
cleanup_sec_buffer(void *data)
|
||||
{
|
||||
FreeContextBuffer(data);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__kerb_create_sec_context(serf__kerb_context_t **ctx_p,
|
||||
apr_pool_t *scratch_pool,
|
||||
apr_pool_t *result_pool)
|
||||
{
|
||||
SECURITY_STATUS sspi_status;
|
||||
serf__kerb_context_t *ctx;
|
||||
|
||||
ctx = apr_pcalloc(result_pool, sizeof(*ctx));
|
||||
|
||||
SecInvalidateHandle(&ctx->sspi_context);
|
||||
SecInvalidateHandle(&ctx->sspi_credentials);
|
||||
ctx->initalized = FALSE;
|
||||
|
||||
apr_pool_cleanup_register(result_pool, ctx,
|
||||
cleanup_ctx,
|
||||
apr_pool_cleanup_null);
|
||||
|
||||
sspi_status = AcquireCredentialsHandle(
|
||||
NULL, "Negotiate", SECPKG_CRED_OUTBOUND,
|
||||
NULL, NULL, NULL, NULL,
|
||||
&ctx->sspi_credentials, NULL);
|
||||
|
||||
if (FAILED(sspi_status)) {
|
||||
return map_sspi_status(sspi_status);
|
||||
}
|
||||
|
||||
*ctx_p = ctx;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t
|
||||
get_canonical_hostname(const char **canonname,
|
||||
const char *hostname,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
struct addrinfo hints;
|
||||
struct addrinfo *addrinfo;
|
||||
|
||||
ZeroMemory(&hints, sizeof(hints));
|
||||
hints.ai_flags = AI_CANONNAME;
|
||||
|
||||
if (getaddrinfo(hostname, NULL, &hints, &addrinfo)) {
|
||||
return apr_get_netos_error();
|
||||
}
|
||||
|
||||
if (addrinfo) {
|
||||
*canonname = apr_pstrdup(pool, addrinfo->ai_canonname);
|
||||
}
|
||||
else {
|
||||
*canonname = apr_pstrdup(pool, hostname);
|
||||
}
|
||||
|
||||
freeaddrinfo(addrinfo);
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__kerb_reset_sec_context(serf__kerb_context_t *ctx)
|
||||
{
|
||||
if (SecIsValidHandle(&ctx->sspi_context)) {
|
||||
DeleteSecurityContext(&ctx->sspi_context);
|
||||
SecInvalidateHandle(&ctx->sspi_context);
|
||||
}
|
||||
|
||||
ctx->initalized = FALSE;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t
|
||||
serf__kerb_init_sec_context(serf__kerb_context_t *ctx,
|
||||
const char *service,
|
||||
const char *hostname,
|
||||
serf__kerb_buffer_t *input_buf,
|
||||
serf__kerb_buffer_t *output_buf,
|
||||
apr_pool_t *scratch_pool,
|
||||
apr_pool_t *result_pool
|
||||
)
|
||||
{
|
||||
SECURITY_STATUS status;
|
||||
ULONG actual_attr;
|
||||
SecBuffer sspi_in_buffer;
|
||||
SecBufferDesc sspi_in_buffer_desc;
|
||||
SecBuffer sspi_out_buffer;
|
||||
SecBufferDesc sspi_out_buffer_desc;
|
||||
char *target_name;
|
||||
apr_status_t apr_status;
|
||||
const char *canonname;
|
||||
|
||||
apr_status = get_canonical_hostname(&canonname, hostname, scratch_pool);
|
||||
if (apr_status) {
|
||||
return apr_status;
|
||||
}
|
||||
target_name = apr_pstrcat(scratch_pool, service, "/", canonname, NULL);
|
||||
|
||||
/* Prepare input buffer description. */
|
||||
sspi_in_buffer.BufferType = SECBUFFER_TOKEN;
|
||||
sspi_in_buffer.pvBuffer = input_buf->value;
|
||||
sspi_in_buffer.cbBuffer = input_buf->length;
|
||||
|
||||
sspi_in_buffer_desc.cBuffers = 1;
|
||||
sspi_in_buffer_desc.pBuffers = &sspi_in_buffer;
|
||||
sspi_in_buffer_desc.ulVersion = SECBUFFER_VERSION;
|
||||
|
||||
/* Output buffers. Output buffer will be allocated by system. */
|
||||
sspi_out_buffer.BufferType = SECBUFFER_TOKEN;
|
||||
sspi_out_buffer.pvBuffer = NULL;
|
||||
sspi_out_buffer.cbBuffer = 0;
|
||||
|
||||
sspi_out_buffer_desc.cBuffers = 1;
|
||||
sspi_out_buffer_desc.pBuffers = &sspi_out_buffer;
|
||||
sspi_out_buffer_desc.ulVersion = SECBUFFER_VERSION;
|
||||
|
||||
status = InitializeSecurityContext(
|
||||
&ctx->sspi_credentials,
|
||||
ctx->initalized ? &ctx->sspi_context : NULL,
|
||||
target_name,
|
||||
ISC_REQ_ALLOCATE_MEMORY
|
||||
| ISC_REQ_MUTUAL_AUTH
|
||||
| ISC_REQ_CONFIDENTIALITY,
|
||||
0, /* Reserved1 */
|
||||
SECURITY_NETWORK_DREP,
|
||||
&sspi_in_buffer_desc,
|
||||
0, /* Reserved2 */
|
||||
&ctx->sspi_context,
|
||||
&sspi_out_buffer_desc,
|
||||
&actual_attr,
|
||||
NULL);
|
||||
|
||||
if (sspi_out_buffer.cbBuffer > 0) {
|
||||
apr_pool_cleanup_register(result_pool, sspi_out_buffer.pvBuffer,
|
||||
cleanup_sec_buffer,
|
||||
apr_pool_cleanup_null);
|
||||
}
|
||||
|
||||
ctx->initalized = TRUE;
|
||||
|
||||
/* Finish authentication if SSPI requires so. */
|
||||
if (status == SEC_I_COMPLETE_NEEDED
|
||||
|| status == SEC_I_COMPLETE_AND_CONTINUE)
|
||||
{
|
||||
CompleteAuthToken(&ctx->sspi_context, &sspi_out_buffer_desc);
|
||||
}
|
||||
|
||||
output_buf->value = sspi_out_buffer.pvBuffer;
|
||||
output_buf->length = sspi_out_buffer.cbBuffer;
|
||||
|
||||
switch(status) {
|
||||
case SEC_I_COMPLETE_AND_CONTINUE:
|
||||
case SEC_I_CONTINUE_NEEDED:
|
||||
return APR_EAGAIN;
|
||||
|
||||
case SEC_I_COMPLETE_NEEDED:
|
||||
case SEC_E_OK:
|
||||
return APR_SUCCESS;
|
||||
|
||||
default:
|
||||
return map_sspi_status(status);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* SERF_USE_SSPI */
|
400
buckets/aggregate_buckets.c
Normal file
400
buckets/aggregate_buckets.c
Normal file
@ -0,0 +1,400 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
/* Should be an APR_RING? */
|
||||
typedef struct bucket_list {
|
||||
serf_bucket_t *bucket;
|
||||
struct bucket_list *next;
|
||||
} bucket_list_t;
|
||||
|
||||
typedef struct {
|
||||
bucket_list_t *list; /* active buckets */
|
||||
bucket_list_t *last; /* last bucket of the list */
|
||||
bucket_list_t *done; /* we finished reading this; now pending a destroy */
|
||||
|
||||
serf_bucket_aggregate_eof_t hold_open;
|
||||
void *hold_open_baton;
|
||||
|
||||
/* Does this bucket own its children? !0 if yes, 0 if not. */
|
||||
int bucket_owner;
|
||||
} aggregate_context_t;
|
||||
|
||||
|
||||
static void cleanup_aggregate(aggregate_context_t *ctx,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
bucket_list_t *next_list;
|
||||
|
||||
/* If we finished reading a bucket during the previous read, then
|
||||
* we can now toss that bucket.
|
||||
*/
|
||||
while (ctx->done != NULL) {
|
||||
next_list = ctx->done->next;
|
||||
|
||||
if (ctx->bucket_owner) {
|
||||
serf_bucket_destroy(ctx->done->bucket);
|
||||
}
|
||||
serf_bucket_mem_free(allocator, ctx->done);
|
||||
|
||||
ctx->done = next_list;
|
||||
}
|
||||
}
|
||||
|
||||
void serf_bucket_aggregate_cleanup(
|
||||
serf_bucket_t *bucket, serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
aggregate_context_t *ctx = bucket->data;
|
||||
|
||||
cleanup_aggregate(ctx, allocator);
|
||||
}
|
||||
|
||||
static aggregate_context_t *create_aggregate(serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
aggregate_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
|
||||
ctx->list = NULL;
|
||||
ctx->last = NULL;
|
||||
ctx->done = NULL;
|
||||
ctx->hold_open = NULL;
|
||||
ctx->hold_open_baton = NULL;
|
||||
ctx->bucket_owner = 1;
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_aggregate_create(
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
aggregate_context_t *ctx;
|
||||
|
||||
ctx = create_aggregate(allocator);
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_aggregate, allocator, ctx);
|
||||
}
|
||||
|
||||
serf_bucket_t *serf__bucket_stream_create(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
serf_bucket_aggregate_eof_t fn,
|
||||
void *baton)
|
||||
{
|
||||
serf_bucket_t *bucket = serf_bucket_aggregate_create(allocator);
|
||||
aggregate_context_t *ctx = bucket->data;
|
||||
|
||||
serf_bucket_aggregate_hold_open(bucket, fn, baton);
|
||||
|
||||
ctx->bucket_owner = 0;
|
||||
|
||||
return bucket;
|
||||
}
|
||||
|
||||
|
||||
static void serf_aggregate_destroy_and_data(serf_bucket_t *bucket)
|
||||
{
|
||||
aggregate_context_t *ctx = bucket->data;
|
||||
bucket_list_t *next_ctx;
|
||||
|
||||
while (ctx->list) {
|
||||
if (ctx->bucket_owner) {
|
||||
serf_bucket_destroy(ctx->list->bucket);
|
||||
}
|
||||
next_ctx = ctx->list->next;
|
||||
serf_bucket_mem_free(bucket->allocator, ctx->list);
|
||||
ctx->list = next_ctx;
|
||||
}
|
||||
cleanup_aggregate(ctx, bucket->allocator);
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
void serf_bucket_aggregate_become(serf_bucket_t *bucket)
|
||||
{
|
||||
aggregate_context_t *ctx;
|
||||
|
||||
ctx = create_aggregate(bucket->allocator);
|
||||
|
||||
bucket->type = &serf_bucket_type_aggregate;
|
||||
bucket->data = ctx;
|
||||
|
||||
/* The allocator remains the same. */
|
||||
}
|
||||
|
||||
|
||||
void serf_bucket_aggregate_prepend(
|
||||
serf_bucket_t *aggregate_bucket,
|
||||
serf_bucket_t *prepend_bucket)
|
||||
{
|
||||
aggregate_context_t *ctx = aggregate_bucket->data;
|
||||
bucket_list_t *new_list;
|
||||
|
||||
new_list = serf_bucket_mem_alloc(aggregate_bucket->allocator,
|
||||
sizeof(*new_list));
|
||||
new_list->bucket = prepend_bucket;
|
||||
new_list->next = ctx->list;
|
||||
|
||||
ctx->list = new_list;
|
||||
}
|
||||
|
||||
void serf_bucket_aggregate_append(
|
||||
serf_bucket_t *aggregate_bucket,
|
||||
serf_bucket_t *append_bucket)
|
||||
{
|
||||
aggregate_context_t *ctx = aggregate_bucket->data;
|
||||
bucket_list_t *new_list;
|
||||
|
||||
new_list = serf_bucket_mem_alloc(aggregate_bucket->allocator,
|
||||
sizeof(*new_list));
|
||||
new_list->bucket = append_bucket;
|
||||
new_list->next = NULL;
|
||||
|
||||
/* If we use APR_RING, this is trivial. So, wait.
|
||||
new_list->next = ctx->list;
|
||||
ctx->list = new_list;
|
||||
*/
|
||||
if (ctx->list == NULL) {
|
||||
ctx->list = new_list;
|
||||
ctx->last = new_list;
|
||||
}
|
||||
else {
|
||||
ctx->last->next = new_list;
|
||||
ctx->last = ctx->last->next;
|
||||
}
|
||||
}
|
||||
|
||||
void serf_bucket_aggregate_hold_open(serf_bucket_t *aggregate_bucket,
|
||||
serf_bucket_aggregate_eof_t fn,
|
||||
void *baton)
|
||||
{
|
||||
aggregate_context_t *ctx = aggregate_bucket->data;
|
||||
ctx->hold_open = fn;
|
||||
ctx->hold_open_baton = baton;
|
||||
}
|
||||
|
||||
void serf_bucket_aggregate_prepend_iovec(
|
||||
serf_bucket_t *aggregate_bucket,
|
||||
struct iovec *vecs,
|
||||
int vecs_count)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Add in reverse order. */
|
||||
for (i = vecs_count - 1; i >= 0; i--) {
|
||||
serf_bucket_t *new_bucket;
|
||||
|
||||
new_bucket = serf_bucket_simple_create(vecs[i].iov_base,
|
||||
vecs[i].iov_len,
|
||||
NULL, NULL,
|
||||
aggregate_bucket->allocator);
|
||||
|
||||
serf_bucket_aggregate_prepend(aggregate_bucket, new_bucket);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void serf_bucket_aggregate_append_iovec(
|
||||
serf_bucket_t *aggregate_bucket,
|
||||
struct iovec *vecs,
|
||||
int vecs_count)
|
||||
{
|
||||
serf_bucket_t *new_bucket;
|
||||
|
||||
new_bucket = serf_bucket_iovec_create(vecs, vecs_count,
|
||||
aggregate_bucket->allocator);
|
||||
|
||||
serf_bucket_aggregate_append(aggregate_bucket, new_bucket);
|
||||
}
|
||||
|
||||
static apr_status_t read_aggregate(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size, struct iovec *vecs,
|
||||
int *vecs_used)
|
||||
{
|
||||
aggregate_context_t *ctx = bucket->data;
|
||||
int cur_vecs_used;
|
||||
apr_status_t status;
|
||||
|
||||
*vecs_used = 0;
|
||||
|
||||
if (!ctx->list) {
|
||||
if (ctx->hold_open) {
|
||||
return ctx->hold_open(ctx->hold_open_baton, bucket);
|
||||
}
|
||||
else {
|
||||
return APR_EOF;
|
||||
}
|
||||
}
|
||||
|
||||
status = APR_SUCCESS;
|
||||
while (requested) {
|
||||
serf_bucket_t *head = ctx->list->bucket;
|
||||
|
||||
status = serf_bucket_read_iovec(head, requested, vecs_size, vecs,
|
||||
&cur_vecs_used);
|
||||
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
/* Add the number of vecs we read to our running total. */
|
||||
*vecs_used += cur_vecs_used;
|
||||
|
||||
if (cur_vecs_used > 0 || status) {
|
||||
bucket_list_t *next_list;
|
||||
|
||||
/* If we got SUCCESS (w/bytes) or EAGAIN, we want to return now
|
||||
* as it isn't safe to read more without returning to our caller.
|
||||
*/
|
||||
if (!status || APR_STATUS_IS_EAGAIN(status) || status == SERF_ERROR_WAIT_CONN) {
|
||||
return status;
|
||||
}
|
||||
|
||||
/* However, if we read EOF, we can stash this bucket in a
|
||||
* to-be-freed list and move on to the next bucket. This ensures
|
||||
* that the bucket stays alive (so as not to violate our read
|
||||
* semantics). We'll destroy this list of buckets the next time
|
||||
* we are asked to perform a read operation - thus ensuring the
|
||||
* proper read lifetime.
|
||||
*/
|
||||
next_list = ctx->list->next;
|
||||
ctx->list->next = ctx->done;
|
||||
ctx->done = ctx->list;
|
||||
ctx->list = next_list;
|
||||
|
||||
/* If we have no more in our list, return EOF. */
|
||||
if (!ctx->list) {
|
||||
if (ctx->hold_open) {
|
||||
return ctx->hold_open(ctx->hold_open_baton, bucket);
|
||||
}
|
||||
else {
|
||||
return APR_EOF;
|
||||
}
|
||||
}
|
||||
|
||||
/* At this point, it safe to read the next bucket - if we can. */
|
||||
|
||||
/* If the caller doesn't want ALL_AVAIL, decrement the size
|
||||
* of the items we just read from the list.
|
||||
*/
|
||||
if (requested != SERF_READ_ALL_AVAIL) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cur_vecs_used; i++)
|
||||
requested -= vecs[i].iov_len;
|
||||
}
|
||||
|
||||
/* Adjust our vecs to account for what we just read. */
|
||||
vecs_size -= cur_vecs_used;
|
||||
vecs += cur_vecs_used;
|
||||
|
||||
/* We reached our max. Oh well. */
|
||||
if (!requested || !vecs_size) {
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_aggregate_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
aggregate_context_t *ctx = bucket->data;
|
||||
struct iovec vec;
|
||||
int vecs_used;
|
||||
apr_status_t status;
|
||||
|
||||
cleanup_aggregate(ctx, bucket->allocator);
|
||||
|
||||
status = read_aggregate(bucket, requested, 1, &vec, &vecs_used);
|
||||
|
||||
if (!vecs_used) {
|
||||
*len = 0;
|
||||
}
|
||||
else {
|
||||
*data = vec.iov_base;
|
||||
*len = vec.iov_len;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_aggregate_read_iovec(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size,
|
||||
struct iovec *vecs,
|
||||
int *vecs_used)
|
||||
{
|
||||
aggregate_context_t *ctx = bucket->data;
|
||||
|
||||
cleanup_aggregate(ctx, bucket->allocator);
|
||||
|
||||
return read_aggregate(bucket, requested, vecs_size, vecs, vecs_used);
|
||||
}
|
||||
|
||||
static apr_status_t serf_aggregate_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
/* Follow pattern from serf_aggregate_read. */
|
||||
return APR_ENOTIMPL;
|
||||
}
|
||||
|
||||
static apr_status_t serf_aggregate_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
/* Follow pattern from serf_aggregate_read. */
|
||||
return APR_ENOTIMPL;
|
||||
}
|
||||
|
||||
static serf_bucket_t * serf_aggregate_read_bucket(
|
||||
serf_bucket_t *bucket,
|
||||
const serf_bucket_type_t *type)
|
||||
{
|
||||
aggregate_context_t *ctx = bucket->data;
|
||||
serf_bucket_t *found_bucket;
|
||||
|
||||
if (!ctx->list) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (ctx->list->bucket->type == type) {
|
||||
/* Got the bucket. Consume it from our list. */
|
||||
found_bucket = ctx->list->bucket;
|
||||
ctx->list = ctx->list->next;
|
||||
return found_bucket;
|
||||
}
|
||||
|
||||
/* Call read_bucket on first one in our list. */
|
||||
return serf_bucket_read_bucket(ctx->list->bucket, type);
|
||||
}
|
||||
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_aggregate = {
|
||||
"AGGREGATE",
|
||||
serf_aggregate_read,
|
||||
serf_aggregate_readline,
|
||||
serf_aggregate_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_aggregate_read_bucket,
|
||||
serf_aggregate_peek,
|
||||
serf_aggregate_destroy_and_data,
|
||||
};
|
434
buckets/allocator.c
Normal file
434
buckets/allocator.c
Normal file
@ -0,0 +1,434 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <apr_pools.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
typedef struct node_header_t {
|
||||
apr_size_t size;
|
||||
union {
|
||||
struct node_header_t *next; /* if size == 0 (freed/inactive) */
|
||||
/* no data if size == STANDARD_NODE_SIZE */
|
||||
apr_memnode_t *memnode; /* if size > STANDARD_NODE_SIZE */
|
||||
} u;
|
||||
} node_header_t;
|
||||
|
||||
/* The size of a node_header_t, properly aligned. Note that (normally)
|
||||
* this macro will round the size to a multiple of 8 bytes. Keep this in
|
||||
* mind when altering the node_header_t structure. Also, keep in mind that
|
||||
* node_header_t is an overhead for every allocation performed through
|
||||
* the serf_bucket_mem_alloc() function.
|
||||
*/
|
||||
#define SIZEOF_NODE_HEADER_T APR_ALIGN_DEFAULT(sizeof(node_header_t))
|
||||
|
||||
|
||||
/* STANDARD_NODE_SIZE is manually set to an allocation size that will
|
||||
* capture most allocators performed via this API. It must be "large
|
||||
* enough" to avoid lots of spillage to allocating directly from the
|
||||
* apr_allocator associated with the bucket allocator. The apr_allocator
|
||||
* has a minimum size of 8k, which can be expensive if you missed the
|
||||
* STANDARD_NODE_SIZE by just a few bytes.
|
||||
*/
|
||||
/* ### we should define some rules or ways to determine how to derive
|
||||
* ### a "good" value for this. probably log some stats on allocs, then
|
||||
* ### analyze them for size "misses". then find the balance point between
|
||||
* ### wasted space due to min-size allocator, and wasted-space due to
|
||||
* ### size-spill to the 8k minimum.
|
||||
*/
|
||||
#define STANDARD_NODE_SIZE 128
|
||||
|
||||
/* When allocating a block of memory from the allocator, we should go for
|
||||
* an 8k block, minus the overhead that the allocator needs.
|
||||
*/
|
||||
#define ALLOC_AMT (8192 - APR_MEMNODE_T_SIZE)
|
||||
|
||||
/* Define DEBUG_DOUBLE_FREE if you're interested in debugging double-free
|
||||
* calls to serf_bucket_mem_free().
|
||||
*/
|
||||
#define DEBUG_DOUBLE_FREE
|
||||
|
||||
|
||||
typedef struct {
|
||||
const serf_bucket_t *bucket;
|
||||
apr_status_t last;
|
||||
} read_status_t;
|
||||
|
||||
#define TRACK_BUCKET_COUNT 100 /* track N buckets' status */
|
||||
|
||||
typedef struct {
|
||||
int next_index; /* info[] is a ring. next bucket goes at this idx. */
|
||||
int num_used;
|
||||
|
||||
read_status_t info[TRACK_BUCKET_COUNT];
|
||||
} track_state_t;
|
||||
|
||||
|
||||
struct serf_bucket_alloc_t {
|
||||
apr_pool_t *pool;
|
||||
apr_allocator_t *allocator;
|
||||
int own_allocator;
|
||||
|
||||
serf_unfreed_func_t unfreed;
|
||||
void *unfreed_baton;
|
||||
|
||||
apr_uint32_t num_alloc;
|
||||
|
||||
node_header_t *freelist; /* free STANDARD_NODE_SIZE blocks */
|
||||
apr_memnode_t *blocks; /* blocks we allocated for subdividing */
|
||||
|
||||
track_state_t *track;
|
||||
};
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
static apr_status_t allocator_cleanup(void *data)
|
||||
{
|
||||
serf_bucket_alloc_t *allocator = data;
|
||||
|
||||
/* If we allocated anything, give it back. */
|
||||
if (allocator->blocks) {
|
||||
apr_allocator_free(allocator->allocator, allocator->blocks);
|
||||
}
|
||||
|
||||
/* If we allocated our own allocator (?!), destroy it here. */
|
||||
if (allocator->own_allocator) {
|
||||
apr_allocator_destroy(allocator->allocator);
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
serf_bucket_alloc_t *serf_bucket_allocator_create(
|
||||
apr_pool_t *pool,
|
||||
serf_unfreed_func_t unfreed,
|
||||
void *unfreed_baton)
|
||||
{
|
||||
serf_bucket_alloc_t *allocator = apr_pcalloc(pool, sizeof(*allocator));
|
||||
|
||||
allocator->pool = pool;
|
||||
allocator->allocator = apr_pool_allocator_get(pool);
|
||||
if (allocator->allocator == NULL) {
|
||||
/* This most likely means pools are running in debug mode, create our
|
||||
* own allocator to deal with memory ourselves */
|
||||
apr_allocator_create(&allocator->allocator);
|
||||
allocator->own_allocator = 1;
|
||||
}
|
||||
allocator->unfreed = unfreed;
|
||||
allocator->unfreed_baton = unfreed_baton;
|
||||
|
||||
#ifdef SERF_DEBUG_BUCKET_USE
|
||||
{
|
||||
track_state_t *track;
|
||||
|
||||
track = allocator->track = apr_palloc(pool, sizeof(*allocator->track));
|
||||
track->next_index = 0;
|
||||
track->num_used = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* NOTE: On a fork/exec, the child won't bother cleaning up memory.
|
||||
This is just fine... the memory will go away at exec.
|
||||
|
||||
NOTE: If the child will NOT perform an exec, then the parent or
|
||||
the child will need to decide who to clean up any
|
||||
outstanding connection/buckets (as appropriate). */
|
||||
apr_pool_cleanup_register(pool, allocator,
|
||||
allocator_cleanup, apr_pool_cleanup_null);
|
||||
|
||||
return allocator;
|
||||
}
|
||||
|
||||
apr_pool_t *serf_bucket_allocator_get_pool(
|
||||
const serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
return allocator->pool;
|
||||
}
|
||||
|
||||
|
||||
void *serf_bucket_mem_alloc(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
apr_size_t size)
|
||||
{
|
||||
node_header_t *node;
|
||||
|
||||
++allocator->num_alloc;
|
||||
|
||||
size += SIZEOF_NODE_HEADER_T;
|
||||
if (size <= STANDARD_NODE_SIZE) {
|
||||
if (allocator->freelist) {
|
||||
/* just pull a node off our freelist */
|
||||
node = allocator->freelist;
|
||||
allocator->freelist = node->u.next;
|
||||
#ifdef DEBUG_DOUBLE_FREE
|
||||
/* When we free an item, we set its size to zero. Thus, when
|
||||
* we return it to the caller, we must ensure the size is set
|
||||
* properly.
|
||||
*/
|
||||
node->size = STANDARD_NODE_SIZE;
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
apr_memnode_t *active = allocator->blocks;
|
||||
|
||||
if (active == NULL
|
||||
|| active->first_avail + STANDARD_NODE_SIZE >= active->endp) {
|
||||
apr_memnode_t *head = allocator->blocks;
|
||||
|
||||
/* ran out of room. grab another block. */
|
||||
active = apr_allocator_alloc(allocator->allocator, ALLOC_AMT);
|
||||
|
||||
/* System couldn't provide us with memory. */
|
||||
if (active == NULL)
|
||||
return NULL;
|
||||
|
||||
/* link the block into our tracking list */
|
||||
allocator->blocks = active;
|
||||
active->next = head;
|
||||
}
|
||||
|
||||
node = (node_header_t *)active->first_avail;
|
||||
node->size = STANDARD_NODE_SIZE;
|
||||
active->first_avail += STANDARD_NODE_SIZE;
|
||||
}
|
||||
}
|
||||
else {
|
||||
apr_memnode_t *memnode = apr_allocator_alloc(allocator->allocator,
|
||||
size);
|
||||
|
||||
if (memnode == NULL)
|
||||
return NULL;
|
||||
|
||||
node = (node_header_t *)memnode->first_avail;
|
||||
node->u.memnode = memnode;
|
||||
node->size = size;
|
||||
}
|
||||
|
||||
return ((char *)node) + SIZEOF_NODE_HEADER_T;
|
||||
}
|
||||
|
||||
|
||||
void *serf_bucket_mem_calloc(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
apr_size_t size)
|
||||
{
|
||||
void *mem;
|
||||
mem = serf_bucket_mem_alloc(allocator, size);
|
||||
if (mem == NULL)
|
||||
return NULL;
|
||||
memset(mem, 0, size);
|
||||
return mem;
|
||||
}
|
||||
|
||||
|
||||
void serf_bucket_mem_free(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
void *block)
|
||||
{
|
||||
node_header_t *node;
|
||||
|
||||
--allocator->num_alloc;
|
||||
|
||||
node = (node_header_t *)((char *)block - SIZEOF_NODE_HEADER_T);
|
||||
|
||||
if (node->size == STANDARD_NODE_SIZE) {
|
||||
/* put the node onto our free list */
|
||||
node->u.next = allocator->freelist;
|
||||
allocator->freelist = node;
|
||||
|
||||
#ifdef DEBUG_DOUBLE_FREE
|
||||
/* note that this thing was freed. */
|
||||
node->size = 0;
|
||||
}
|
||||
else if (node->size == 0) {
|
||||
/* damn thing was freed already. */
|
||||
abort();
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
#ifdef DEBUG_DOUBLE_FREE
|
||||
/* note that this thing was freed. */
|
||||
node->size = 0;
|
||||
#endif
|
||||
|
||||
/* now free it */
|
||||
apr_allocator_free(allocator->allocator, node->u.memnode);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
#ifdef SERF_DEBUG_BUCKET_USE
|
||||
|
||||
static read_status_t *find_read_status(
|
||||
track_state_t *track,
|
||||
const serf_bucket_t *bucket,
|
||||
int create_rs)
|
||||
{
|
||||
read_status_t *rs;
|
||||
|
||||
if (track->num_used) {
|
||||
int count = track->num_used;
|
||||
int idx = track->next_index;
|
||||
|
||||
/* Search backwards. In all likelihood, the bucket which just got
|
||||
* read was read very recently.
|
||||
*/
|
||||
while (count-- > 0) {
|
||||
if (!idx--) {
|
||||
/* assert: track->num_used == TRACK_BUCKET_COUNT */
|
||||
idx = track->num_used - 1;
|
||||
}
|
||||
if ((rs = &track->info[idx])->bucket == bucket) {
|
||||
return rs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Only create a new read_status_t when asked. */
|
||||
if (!create_rs)
|
||||
return NULL;
|
||||
|
||||
if (track->num_used < TRACK_BUCKET_COUNT) {
|
||||
/* We're still filling up the ring. */
|
||||
++track->num_used;
|
||||
}
|
||||
|
||||
rs = &track->info[track->next_index];
|
||||
rs->bucket = bucket;
|
||||
rs->last = APR_SUCCESS; /* ### the right initial value? */
|
||||
|
||||
if (++track->next_index == TRACK_BUCKET_COUNT)
|
||||
track->next_index = 0;
|
||||
|
||||
return rs;
|
||||
}
|
||||
|
||||
#endif /* SERF_DEBUG_BUCKET_USE */
|
||||
|
||||
|
||||
apr_status_t serf_debug__record_read(
|
||||
const serf_bucket_t *bucket,
|
||||
apr_status_t status)
|
||||
{
|
||||
#ifndef SERF_DEBUG_BUCKET_USE
|
||||
return status;
|
||||
#else
|
||||
|
||||
track_state_t *track = bucket->allocator->track;
|
||||
read_status_t *rs = find_read_status(track, bucket, 1);
|
||||
|
||||
/* Validate that the previous status value allowed for another read. */
|
||||
if (APR_STATUS_IS_EAGAIN(rs->last) /* ### or APR_EOF? */) {
|
||||
/* Somebody read when they weren't supposed to. Bail. */
|
||||
abort();
|
||||
}
|
||||
|
||||
/* Save the current status for later. */
|
||||
rs->last = status;
|
||||
|
||||
return status;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void serf_debug__entered_loop(serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
#ifdef SERF_DEBUG_BUCKET_USE
|
||||
|
||||
track_state_t *track = allocator->track;
|
||||
read_status_t *rs = &track->info[0];
|
||||
|
||||
for ( ; track->num_used; --track->num_used, ++rs ) {
|
||||
if (rs->last == APR_SUCCESS) {
|
||||
/* Somebody should have read this bucket again. */
|
||||
abort();
|
||||
}
|
||||
|
||||
/* ### other status values? */
|
||||
}
|
||||
|
||||
/* num_used was reset. also need to reset the next index. */
|
||||
track->next_index = 0;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void serf_debug__closed_conn(serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
#ifdef SERF_DEBUG_BUCKET_USE
|
||||
|
||||
/* Just reset the number used so that we don't examine the info[] */
|
||||
allocator->track->num_used = 0;
|
||||
allocator->track->next_index = 0;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void serf_debug__bucket_destroy(const serf_bucket_t *bucket)
|
||||
{
|
||||
#ifdef SERF_DEBUG_BUCKET_USE
|
||||
|
||||
track_state_t *track = bucket->allocator->track;
|
||||
read_status_t *rs = find_read_status(track, bucket, 0);
|
||||
|
||||
if (rs != NULL && rs->last != APR_EOF) {
|
||||
/* The bucket was destroyed before it was read to completion. */
|
||||
|
||||
/* Special exception for socket buckets. If a connection remains
|
||||
* open, they are not read to completion.
|
||||
*/
|
||||
if (SERF_BUCKET_IS_SOCKET(bucket))
|
||||
return;
|
||||
|
||||
/* Ditto for SSL Decrypt buckets. */
|
||||
if (SERF_BUCKET_IS_SSL_DECRYPT(bucket))
|
||||
return;
|
||||
|
||||
/* Ditto for SSL Encrypt buckets. */
|
||||
if (SERF_BUCKET_IS_SSL_ENCRYPT(bucket))
|
||||
return;
|
||||
|
||||
/* Ditto for barrier buckets. */
|
||||
if (SERF_BUCKET_IS_BARRIER(bucket))
|
||||
return;
|
||||
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void serf_debug__bucket_alloc_check(
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
#ifdef SERF_DEBUG_BUCKET_USE
|
||||
if (allocator->num_alloc != 0) {
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
97
buckets/barrier_buckets.c
Normal file
97
buckets/barrier_buckets.c
Normal file
@ -0,0 +1,97 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
typedef struct {
|
||||
serf_bucket_t *stream;
|
||||
} barrier_context_t;
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_barrier_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
barrier_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->stream = stream;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_barrier, allocator, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t serf_barrier_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
barrier_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_bucket_read(ctx->stream, requested, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_barrier_read_iovec(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size, struct iovec *vecs,
|
||||
int *vecs_used)
|
||||
{
|
||||
barrier_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_bucket_read_iovec(ctx->stream, requested, vecs_size, vecs,
|
||||
vecs_used);
|
||||
}
|
||||
|
||||
static apr_status_t serf_barrier_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
barrier_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_bucket_readline(ctx->stream, acceptable, found, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_barrier_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
barrier_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_bucket_peek(ctx->stream, data, len);
|
||||
}
|
||||
|
||||
static void serf_barrier_destroy(serf_bucket_t *bucket)
|
||||
{
|
||||
/* The intent of this bucket is not to let our wrapped buckets be
|
||||
* destroyed. */
|
||||
|
||||
/* The option is for us to go ahead and 'eat' this bucket now,
|
||||
* or just ignore the deletion entirely.
|
||||
*/
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_barrier = {
|
||||
"BARRIER",
|
||||
serf_barrier_read,
|
||||
serf_barrier_readline,
|
||||
serf_barrier_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_barrier_peek,
|
||||
serf_barrier_destroy,
|
||||
};
|
614
buckets/buckets.c
Normal file
614
buckets/buckets.c
Normal file
@ -0,0 +1,614 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_create(
|
||||
const serf_bucket_type_t *type,
|
||||
serf_bucket_alloc_t *allocator,
|
||||
void *data)
|
||||
{
|
||||
serf_bucket_t *bkt = serf_bucket_mem_alloc(allocator, sizeof(*bkt));
|
||||
|
||||
bkt->type = type;
|
||||
bkt->data = data;
|
||||
bkt->allocator = allocator;
|
||||
|
||||
return bkt;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_default_read_iovec(
|
||||
serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size,
|
||||
struct iovec *vecs,
|
||||
int *vecs_used)
|
||||
{
|
||||
const char *data;
|
||||
apr_size_t len;
|
||||
|
||||
/* Read some data from the bucket.
|
||||
*
|
||||
* Because we're an internal 'helper' to the bucket, we can't call the
|
||||
* normal serf_bucket_read() call because the debug allocator tracker will
|
||||
* end up marking the bucket as read *twice* - once for us and once for
|
||||
* our caller - which is reading the same bucket. This leads to premature
|
||||
* abort()s if we ever see EAGAIN. Instead, we'll go directly to the
|
||||
* vtable and bypass the debug tracker.
|
||||
*/
|
||||
apr_status_t status = bucket->type->read(bucket, requested, &data, &len);
|
||||
|
||||
/* assert that vecs_size >= 1 ? */
|
||||
|
||||
/* Return that data as a single iovec. */
|
||||
if (len) {
|
||||
vecs[0].iov_base = (void *)data; /* loses the 'const' */
|
||||
vecs[0].iov_len = len;
|
||||
*vecs_used = 1;
|
||||
}
|
||||
else {
|
||||
*vecs_used = 0;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_default_read_for_sendfile(
|
||||
serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
apr_hdtr_t *hdtr,
|
||||
apr_file_t **file,
|
||||
apr_off_t *offset,
|
||||
apr_size_t *len)
|
||||
{
|
||||
/* Read a bunch of stuff into the headers.
|
||||
*
|
||||
* See serf_default_read_iovec as to why we call into the vtable
|
||||
* directly.
|
||||
*/
|
||||
apr_status_t status = bucket->type->read_iovec(bucket, requested,
|
||||
hdtr->numheaders,
|
||||
hdtr->headers,
|
||||
&hdtr->numheaders);
|
||||
|
||||
/* There isn't a file, and there are no trailers. */
|
||||
*file = NULL;
|
||||
hdtr->numtrailers = 0;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
serf_bucket_t *serf_default_read_bucket(
|
||||
serf_bucket_t *bucket,
|
||||
const serf_bucket_type_t *type)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void serf_default_destroy(serf_bucket_t *bucket)
|
||||
{
|
||||
#ifdef SERF_DEBUG_BUCKET_USE
|
||||
serf_debug__bucket_destroy(bucket);
|
||||
#endif
|
||||
|
||||
serf_bucket_mem_free(bucket->allocator, bucket);
|
||||
}
|
||||
|
||||
|
||||
void serf_default_destroy_and_data(serf_bucket_t *bucket)
|
||||
{
|
||||
serf_bucket_mem_free(bucket->allocator, bucket->data);
|
||||
serf_default_destroy(bucket);
|
||||
}
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
char *serf_bstrmemdup(serf_bucket_alloc_t *allocator,
|
||||
const char *str,
|
||||
apr_size_t size)
|
||||
{
|
||||
char *newstr = serf_bucket_mem_alloc(allocator, size + 1);
|
||||
memcpy(newstr, str, size);
|
||||
newstr[size] = '\0';
|
||||
return newstr;
|
||||
}
|
||||
|
||||
|
||||
void *serf_bmemdup(serf_bucket_alloc_t *allocator,
|
||||
const void *mem,
|
||||
apr_size_t size)
|
||||
{
|
||||
void *newmem = serf_bucket_mem_alloc(allocator, size);
|
||||
memcpy(newmem, mem, size);
|
||||
return newmem;
|
||||
}
|
||||
|
||||
|
||||
char *serf_bstrdup(serf_bucket_alloc_t *allocator,
|
||||
const char *str)
|
||||
{
|
||||
apr_size_t size = strlen(str) + 1;
|
||||
char *newstr = serf_bucket_mem_alloc(allocator, size);
|
||||
memcpy(newstr, str, size);
|
||||
return newstr;
|
||||
}
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
static void find_crlf(const char **data, apr_size_t *len, int *found)
|
||||
{
|
||||
const char *start = *data;
|
||||
const char *end = start + *len;
|
||||
|
||||
while (start < end) {
|
||||
const char *cr = memchr(start, '\r', *len);
|
||||
|
||||
if (cr == NULL) {
|
||||
break;
|
||||
}
|
||||
++cr;
|
||||
|
||||
if (cr < end && cr[0] == '\n') {
|
||||
*len -= cr + 1 - start;
|
||||
*data = cr + 1;
|
||||
*found = SERF_NEWLINE_CRLF;
|
||||
return;
|
||||
}
|
||||
if (cr == end) {
|
||||
*len = 0;
|
||||
*data = end;
|
||||
*found = SERF_NEWLINE_CRLF_SPLIT;
|
||||
return;
|
||||
}
|
||||
|
||||
/* It was a bare CR without an LF. Just move past it. */
|
||||
*len -= cr - start;
|
||||
start = cr;
|
||||
}
|
||||
|
||||
*data = start + *len;
|
||||
*len -= *data - start;
|
||||
*found = SERF_NEWLINE_NONE;
|
||||
}
|
||||
|
||||
|
||||
void serf_util_readline(
|
||||
const char **data,
|
||||
apr_size_t *len,
|
||||
int acceptable,
|
||||
int *found)
|
||||
{
|
||||
const char *start;
|
||||
const char *cr;
|
||||
const char *lf;
|
||||
int want_cr;
|
||||
int want_crlf;
|
||||
int want_lf;
|
||||
|
||||
/* If _only_ CRLF is acceptable, then the scanning needs a loop to
|
||||
* skip false hits on CR characters. Use a separate function.
|
||||
*/
|
||||
if (acceptable == SERF_NEWLINE_CRLF) {
|
||||
find_crlf(data, len, found);
|
||||
return;
|
||||
}
|
||||
|
||||
start = *data;
|
||||
cr = lf = NULL;
|
||||
want_cr = acceptable & SERF_NEWLINE_CR;
|
||||
want_crlf = acceptable & SERF_NEWLINE_CRLF;
|
||||
want_lf = acceptable & SERF_NEWLINE_LF;
|
||||
|
||||
if (want_cr || want_crlf) {
|
||||
cr = memchr(start, '\r', *len);
|
||||
}
|
||||
if (want_lf) {
|
||||
lf = memchr(start, '\n', *len);
|
||||
}
|
||||
|
||||
if (cr != NULL) {
|
||||
if (lf != NULL) {
|
||||
if (cr + 1 == lf)
|
||||
*found = want_crlf ? SERF_NEWLINE_CRLF : SERF_NEWLINE_CR;
|
||||
else if (want_cr && cr < lf)
|
||||
*found = SERF_NEWLINE_CR;
|
||||
else
|
||||
*found = SERF_NEWLINE_LF;
|
||||
}
|
||||
else if (cr == start + *len - 1) {
|
||||
/* the CR occurred in the last byte of the buffer. this could be
|
||||
* a CRLF split across the data boundary.
|
||||
* ### FIX THIS LOGIC? does caller need to detect?
|
||||
*/
|
||||
*found = want_crlf ? SERF_NEWLINE_CRLF_SPLIT : SERF_NEWLINE_CR;
|
||||
}
|
||||
else if (want_cr)
|
||||
*found = SERF_NEWLINE_CR;
|
||||
else /* want_crlf */
|
||||
*found = SERF_NEWLINE_NONE;
|
||||
}
|
||||
else if (lf != NULL)
|
||||
*found = SERF_NEWLINE_LF;
|
||||
else
|
||||
*found = SERF_NEWLINE_NONE;
|
||||
|
||||
switch (*found) {
|
||||
case SERF_NEWLINE_LF:
|
||||
*data = lf + 1;
|
||||
break;
|
||||
case SERF_NEWLINE_CR:
|
||||
case SERF_NEWLINE_CRLF:
|
||||
case SERF_NEWLINE_CRLF_SPLIT:
|
||||
*data = cr + 1 + (*found == SERF_NEWLINE_CRLF);
|
||||
break;
|
||||
case SERF_NEWLINE_NONE:
|
||||
*data += *len;
|
||||
break;
|
||||
default:
|
||||
/* Not reachable */
|
||||
return;
|
||||
}
|
||||
|
||||
*len -= *data - start;
|
||||
}
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
void serf_databuf_init(serf_databuf_t *databuf)
|
||||
{
|
||||
/* nothing is sitting in the buffer */
|
||||
databuf->remaining = 0;
|
||||
|
||||
/* avoid thinking we have hit EOF */
|
||||
databuf->status = APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* Ensure the buffer is prepared for reading. Will return APR_SUCCESS,
|
||||
* APR_EOF, or some failure code. *len is only set for EOF. */
|
||||
static apr_status_t common_databuf_prep(serf_databuf_t *databuf,
|
||||
apr_size_t *len)
|
||||
{
|
||||
apr_size_t readlen;
|
||||
apr_status_t status;
|
||||
|
||||
/* if there is data in the buffer, then we're happy. */
|
||||
if (databuf->remaining > 0)
|
||||
return APR_SUCCESS;
|
||||
|
||||
/* if we already hit EOF, then keep returning that. */
|
||||
if (APR_STATUS_IS_EOF(databuf->status)) {
|
||||
/* *data = NULL; ?? */
|
||||
*len = 0;
|
||||
return APR_EOF;
|
||||
}
|
||||
|
||||
/* refill the buffer */
|
||||
status = (*databuf->read)(databuf->read_baton, sizeof(databuf->buf),
|
||||
databuf->buf, &readlen);
|
||||
if (SERF_BUCKET_READ_ERROR(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
databuf->current = databuf->buf;
|
||||
databuf->remaining = readlen;
|
||||
databuf->status = status;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_databuf_read(
|
||||
serf_databuf_t *databuf,
|
||||
apr_size_t requested,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
apr_status_t status = common_databuf_prep(databuf, len);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* peg the requested amount to what we have remaining */
|
||||
if (requested == SERF_READ_ALL_AVAIL || requested > databuf->remaining)
|
||||
requested = databuf->remaining;
|
||||
|
||||
/* return the values */
|
||||
*data = databuf->current;
|
||||
*len = requested;
|
||||
|
||||
/* adjust our internal state to note we've consumed some data */
|
||||
databuf->current += requested;
|
||||
databuf->remaining -= requested;
|
||||
|
||||
/* If we read everything, then we need to return whatever the data
|
||||
* read returned to us. This is going to be APR_EOF or APR_EGAIN.
|
||||
* If we have NOT read everything, then return APR_SUCCESS to indicate
|
||||
* that we're ready to return some more if asked.
|
||||
*/
|
||||
return databuf->remaining ? APR_SUCCESS : databuf->status;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_databuf_readline(
|
||||
serf_databuf_t *databuf,
|
||||
int acceptable,
|
||||
int *found,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
apr_status_t status = common_databuf_prep(databuf, len);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* the returned line will start at the current position. */
|
||||
*data = databuf->current;
|
||||
|
||||
/* read a line from the buffer, and adjust the various pointers. */
|
||||
serf_util_readline(&databuf->current, &databuf->remaining, acceptable,
|
||||
found);
|
||||
|
||||
/* the length matches the amount consumed by the readline */
|
||||
*len = databuf->current - *data;
|
||||
|
||||
/* see serf_databuf_read's return condition */
|
||||
return databuf->remaining ? APR_SUCCESS : databuf->status;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_databuf_peek(
|
||||
serf_databuf_t *databuf,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
apr_status_t status = common_databuf_prep(databuf, len);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* return everything we have */
|
||||
*data = databuf->current;
|
||||
*len = databuf->remaining;
|
||||
|
||||
/* If the last read returned EOF, then the peek should return the same.
|
||||
* The other possibility in databuf->status is APR_EAGAIN, which we
|
||||
* should never return. Thus, just return APR_SUCCESS for non-EOF cases.
|
||||
*/
|
||||
if (APR_STATUS_IS_EOF(databuf->status))
|
||||
return APR_EOF;
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
void serf_linebuf_init(serf_linebuf_t *linebuf)
|
||||
{
|
||||
linebuf->state = SERF_LINEBUF_EMPTY;
|
||||
linebuf->used = 0;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_linebuf_fetch(
|
||||
serf_linebuf_t *linebuf,
|
||||
serf_bucket_t *bucket,
|
||||
int acceptable)
|
||||
{
|
||||
/* If we had a complete line, then assume the caller has used it, so
|
||||
* we can now reset the state.
|
||||
*/
|
||||
if (linebuf->state == SERF_LINEBUF_READY) {
|
||||
linebuf->state = SERF_LINEBUF_EMPTY;
|
||||
|
||||
/* Reset the line_used, too, so we don't have to test the state
|
||||
* before using this value.
|
||||
*/
|
||||
linebuf->used = 0;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
apr_status_t status;
|
||||
const char *data;
|
||||
apr_size_t len;
|
||||
|
||||
if (linebuf->state == SERF_LINEBUF_CRLF_SPLIT) {
|
||||
/* On the previous read, we received just a CR. The LF might
|
||||
* be present, but the bucket couldn't see it. We need to
|
||||
* examine a single character to determine how to handle the
|
||||
* split CRLF.
|
||||
*/
|
||||
|
||||
status = serf_bucket_peek(bucket, &data, &len);
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
if (len > 0) {
|
||||
if (*data == '\n') {
|
||||
/* We saw the second part of CRLF. We don't need to
|
||||
* save that character, so do an actual read to suck
|
||||
* up that character.
|
||||
*/
|
||||
/* ### check status */
|
||||
(void) serf_bucket_read(bucket, 1, &data, &len);
|
||||
}
|
||||
/* else:
|
||||
* We saw the first character of the next line. Thus,
|
||||
* the current line is terminated by the CR. Just
|
||||
* ignore whatever we peeked at. The next reader will
|
||||
* see it and handle it as appropriate.
|
||||
*/
|
||||
|
||||
/* Whatever was read, the line is now ready for use. */
|
||||
linebuf->state = SERF_LINEBUF_READY;
|
||||
} else {
|
||||
/* no data available, try again later. */
|
||||
return APR_EAGAIN;
|
||||
}
|
||||
}
|
||||
else {
|
||||
int found;
|
||||
|
||||
status = serf_bucket_readline(bucket, acceptable, &found,
|
||||
&data, &len);
|
||||
if (SERF_BUCKET_READ_ERROR(status)) {
|
||||
return status;
|
||||
}
|
||||
/* Some bucket types (socket) might need an extra read to find
|
||||
out EOF state, so they'll return no data in that read. This
|
||||
means we're done reading, return what we got. */
|
||||
if (APR_STATUS_IS_EOF(status) && len == 0) {
|
||||
return status;
|
||||
}
|
||||
if (linebuf->used + len > sizeof(linebuf->line)) {
|
||||
/* ### need a "line too long" error */
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
|
||||
/* Note: our logic doesn't change for SERF_LINEBUF_PARTIAL. That
|
||||
* only affects how we fill the buffer. It is a communication to
|
||||
* our caller on whether the line is ready or not.
|
||||
*/
|
||||
|
||||
/* If we didn't see a newline, then we should mark the line
|
||||
* buffer as partially complete.
|
||||
*/
|
||||
if (found == SERF_NEWLINE_NONE) {
|
||||
linebuf->state = SERF_LINEBUF_PARTIAL;
|
||||
}
|
||||
else if (found == SERF_NEWLINE_CRLF_SPLIT) {
|
||||
linebuf->state = SERF_LINEBUF_CRLF_SPLIT;
|
||||
|
||||
/* Toss the partial CR. We won't ever need it. */
|
||||
--len;
|
||||
}
|
||||
else {
|
||||
/* We got a newline (of some form). We don't need it
|
||||
* in the line buffer, so back up the length. Then
|
||||
* mark the line as ready.
|
||||
*/
|
||||
len -= 1 + (found == SERF_NEWLINE_CRLF);
|
||||
|
||||
linebuf->state = SERF_LINEBUF_READY;
|
||||
}
|
||||
|
||||
/* ### it would be nice to avoid this copy if at all possible,
|
||||
### and just return the a data/len pair to the caller. we're
|
||||
### keeping it simple for now. */
|
||||
memcpy(&linebuf->line[linebuf->used], data, len);
|
||||
linebuf->used += len;
|
||||
}
|
||||
|
||||
/* If we saw anything besides "success. please read again", then
|
||||
* we should return that status. If the line was completed, then
|
||||
* we should also return.
|
||||
*/
|
||||
if (status || linebuf->state == SERF_LINEBUF_READY)
|
||||
return status;
|
||||
|
||||
/* We got APR_SUCCESS and the line buffer is not complete. Let's
|
||||
* loop to read some more data.
|
||||
*/
|
||||
}
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
||||
/* Logging functions.
|
||||
Use with one of the [COMP]_VERBOSE defines so that the compiler knows to
|
||||
optimize this code out when no logging is needed. */
|
||||
static void log_time()
|
||||
{
|
||||
apr_time_exp_t tm;
|
||||
|
||||
apr_time_exp_lt(&tm, apr_time_now());
|
||||
fprintf(stderr, "[%d-%02d-%02dT%02d:%02d:%02d.%06d%+03d] ",
|
||||
1900 + tm.tm_year, 1 + tm.tm_mon, tm.tm_mday,
|
||||
tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_usec,
|
||||
tm.tm_gmtoff/3600);
|
||||
}
|
||||
|
||||
void serf__log(int verbose_flag, const char *filename, const char *fmt, ...)
|
||||
{
|
||||
va_list argp;
|
||||
|
||||
if (verbose_flag) {
|
||||
log_time();
|
||||
|
||||
if (filename)
|
||||
fprintf(stderr, "%s: ", filename);
|
||||
|
||||
va_start(argp, fmt);
|
||||
vfprintf(stderr, fmt, argp);
|
||||
va_end(argp);
|
||||
}
|
||||
}
|
||||
|
||||
void serf__log_nopref(int verbose_flag, const char *fmt, ...)
|
||||
{
|
||||
va_list argp;
|
||||
|
||||
if (verbose_flag) {
|
||||
va_start(argp, fmt);
|
||||
vfprintf(stderr, fmt, argp);
|
||||
va_end(argp);
|
||||
}
|
||||
}
|
||||
|
||||
void serf__log_skt(int verbose_flag, const char *filename, apr_socket_t *skt,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list argp;
|
||||
|
||||
if (verbose_flag) {
|
||||
apr_sockaddr_t *sa;
|
||||
log_time();
|
||||
|
||||
if (skt) {
|
||||
/* Log local and remote ip address:port */
|
||||
fprintf(stderr, "[l:");
|
||||
if (apr_socket_addr_get(&sa, APR_LOCAL, skt) == APR_SUCCESS) {
|
||||
char buf[32];
|
||||
apr_sockaddr_ip_getbuf(buf, 32, sa);
|
||||
fprintf(stderr, "%s:%d", buf, sa->port);
|
||||
}
|
||||
fprintf(stderr, " r:");
|
||||
if (apr_socket_addr_get(&sa, APR_REMOTE, skt) == APR_SUCCESS) {
|
||||
char buf[32];
|
||||
apr_sockaddr_ip_getbuf(buf, 32, sa);
|
||||
fprintf(stderr, "%s:%d", buf, sa->port);
|
||||
}
|
||||
fprintf(stderr, "] ");
|
||||
}
|
||||
|
||||
if (filename)
|
||||
fprintf(stderr, "%s: ", filename);
|
||||
|
||||
va_start(argp, fmt);
|
||||
vfprintf(stderr, fmt, argp);
|
||||
va_end(argp);
|
||||
}
|
||||
}
|
||||
|
596
buckets/bwtp_buckets.c
Normal file
596
buckets/bwtp_buckets.c
Normal file
@ -0,0 +1,596 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
#include <apr_strings.h>
|
||||
#include <apr_lib.h>
|
||||
#include <apr_date.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
#include "serf_bucket_types.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
/* This is an implementation of Bidirectional Web Transfer Protocol (BWTP)
|
||||
* See:
|
||||
* http://bwtp.wikidot.com/
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
int channel;
|
||||
int open;
|
||||
int type; /* 0 = header, 1 = message */ /* TODO enum? */
|
||||
const char *phrase;
|
||||
serf_bucket_t *headers;
|
||||
|
||||
char req_line[1000];
|
||||
} frame_context_t;
|
||||
|
||||
typedef struct {
|
||||
serf_bucket_t *stream;
|
||||
serf_bucket_t *body; /* Pointer to the stream wrapping the body. */
|
||||
serf_bucket_t *headers; /* holds parsed headers */
|
||||
|
||||
enum {
|
||||
STATE_STATUS_LINE, /* reading status line */
|
||||
STATE_HEADERS, /* reading headers */
|
||||
STATE_BODY, /* reading body */
|
||||
STATE_DONE /* we've sent EOF */
|
||||
} state;
|
||||
|
||||
/* Buffer for accumulating a line from the response. */
|
||||
serf_linebuf_t linebuf;
|
||||
|
||||
int type; /* 0 = header, 1 = message */ /* TODO enum? */
|
||||
int channel;
|
||||
char *phrase;
|
||||
apr_size_t length;
|
||||
} incoming_context_t;
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_channel_close(
|
||||
int channel,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
frame_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->type = 0;
|
||||
ctx->open = 0;
|
||||
ctx->channel = channel;
|
||||
ctx->phrase = "CLOSED";
|
||||
ctx->headers = serf_bucket_headers_create(allocator);
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_channel_open(
|
||||
int channel,
|
||||
const char *uri,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
frame_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->type = 0;
|
||||
ctx->open = 1;
|
||||
ctx->channel = channel;
|
||||
ctx->phrase = uri;
|
||||
ctx->headers = serf_bucket_headers_create(allocator);
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_header_create(
|
||||
int channel,
|
||||
const char *phrase,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
frame_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->type = 0;
|
||||
ctx->open = 0;
|
||||
ctx->channel = channel;
|
||||
ctx->phrase = phrase;
|
||||
ctx->headers = serf_bucket_headers_create(allocator);
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_message_create(
|
||||
int channel,
|
||||
serf_bucket_t *body,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
frame_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->type = 1;
|
||||
ctx->open = 0;
|
||||
ctx->channel = channel;
|
||||
ctx->phrase = "MESSAGE";
|
||||
ctx->headers = serf_bucket_headers_create(allocator);
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
|
||||
}
|
||||
|
||||
int serf_bucket_bwtp_frame_get_channel(
|
||||
serf_bucket_t *bucket)
|
||||
{
|
||||
if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
|
||||
frame_context_t *ctx = bucket->data;
|
||||
|
||||
return ctx->channel;
|
||||
}
|
||||
else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
|
||||
incoming_context_t *ctx = bucket->data;
|
||||
|
||||
return ctx->channel;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int serf_bucket_bwtp_frame_get_type(
|
||||
serf_bucket_t *bucket)
|
||||
{
|
||||
if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
|
||||
frame_context_t *ctx = bucket->data;
|
||||
|
||||
return ctx->type;
|
||||
}
|
||||
else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
|
||||
incoming_context_t *ctx = bucket->data;
|
||||
|
||||
return ctx->type;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char *serf_bucket_bwtp_frame_get_phrase(
|
||||
serf_bucket_t *bucket)
|
||||
{
|
||||
if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
|
||||
frame_context_t *ctx = bucket->data;
|
||||
|
||||
return ctx->phrase;
|
||||
}
|
||||
else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
|
||||
incoming_context_t *ctx = bucket->data;
|
||||
|
||||
return ctx->phrase;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_frame_get_headers(
|
||||
serf_bucket_t *bucket)
|
||||
{
|
||||
if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
|
||||
frame_context_t *ctx = bucket->data;
|
||||
|
||||
return ctx->headers;
|
||||
}
|
||||
else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
|
||||
incoming_context_t *ctx = bucket->data;
|
||||
|
||||
return ctx->headers;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int count_size(void *baton, const char *key, const char *value)
|
||||
{
|
||||
apr_size_t *c = baton;
|
||||
/* TODO Deal with folding. Yikes. */
|
||||
|
||||
/* Add in ": " and CRLF - so an extra four bytes. */
|
||||
*c += strlen(key) + strlen(value) + 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static apr_size_t calc_header_size(serf_bucket_t *hdrs)
|
||||
{
|
||||
apr_size_t size = 0;
|
||||
|
||||
serf_bucket_headers_do(hdrs, count_size, &size);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static void serialize_data(serf_bucket_t *bucket)
|
||||
{
|
||||
frame_context_t *ctx = bucket->data;
|
||||
serf_bucket_t *new_bucket;
|
||||
apr_size_t req_len;
|
||||
|
||||
/* Serialize the request-line and headers into one mother string,
|
||||
* and wrap a bucket around it.
|
||||
*/
|
||||
req_len = apr_snprintf(ctx->req_line, sizeof(ctx->req_line),
|
||||
"%s %d " "%" APR_UINT64_T_HEX_FMT " %s%s\r\n",
|
||||
(ctx->type ? "BWM" : "BWH"),
|
||||
ctx->channel, calc_header_size(ctx->headers),
|
||||
(ctx->open ? "OPEN " : ""),
|
||||
ctx->phrase);
|
||||
new_bucket = serf_bucket_simple_copy_create(ctx->req_line, req_len,
|
||||
bucket->allocator);
|
||||
|
||||
/* Build up the new bucket structure.
|
||||
*
|
||||
* Note that self needs to become an aggregate bucket so that a
|
||||
* pointer to self still represents the "right" data.
|
||||
*/
|
||||
serf_bucket_aggregate_become(bucket);
|
||||
|
||||
/* Insert the two buckets. */
|
||||
serf_bucket_aggregate_append(bucket, new_bucket);
|
||||
serf_bucket_aggregate_append(bucket, ctx->headers);
|
||||
|
||||
/* Our private context is no longer needed, and is not referred to by
|
||||
* any existing bucket. Toss it.
|
||||
*/
|
||||
serf_bucket_mem_free(bucket->allocator, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t serf_bwtp_frame_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
/* Seralize our private data into a new aggregate bucket. */
|
||||
serialize_data(bucket);
|
||||
|
||||
/* Delegate to the "new" aggregate bucket to do the read. */
|
||||
return serf_bucket_read(bucket, requested, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_bwtp_frame_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
/* Seralize our private data into a new aggregate bucket. */
|
||||
serialize_data(bucket);
|
||||
|
||||
/* Delegate to the "new" aggregate bucket to do the readline. */
|
||||
return serf_bucket_readline(bucket, acceptable, found, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_bwtp_frame_read_iovec(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size,
|
||||
struct iovec *vecs,
|
||||
int *vecs_used)
|
||||
{
|
||||
/* Seralize our private data into a new aggregate bucket. */
|
||||
serialize_data(bucket);
|
||||
|
||||
/* Delegate to the "new" aggregate bucket to do the read. */
|
||||
return serf_bucket_read_iovec(bucket, requested,
|
||||
vecs_size, vecs, vecs_used);
|
||||
}
|
||||
|
||||
static apr_status_t serf_bwtp_frame_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
/* Seralize our private data into a new aggregate bucket. */
|
||||
serialize_data(bucket);
|
||||
|
||||
/* Delegate to the "new" aggregate bucket to do the peek. */
|
||||
return serf_bucket_peek(bucket, data, len);
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_bwtp_frame = {
|
||||
"BWTP-FRAME",
|
||||
serf_bwtp_frame_read,
|
||||
serf_bwtp_frame_readline,
|
||||
serf_bwtp_frame_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_bwtp_frame_peek,
|
||||
serf_default_destroy_and_data,
|
||||
};
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_incoming_frame_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
incoming_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->stream = stream;
|
||||
ctx->body = NULL;
|
||||
ctx->headers = serf_bucket_headers_create(allocator);
|
||||
ctx->state = STATE_STATUS_LINE;
|
||||
ctx->length = 0;
|
||||
ctx->channel = -1;
|
||||
ctx->phrase = NULL;
|
||||
|
||||
serf_linebuf_init(&ctx->linebuf);
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_bwtp_incoming_frame, allocator, ctx);
|
||||
}
|
||||
|
||||
static void bwtp_incoming_destroy_and_data(serf_bucket_t *bucket)
|
||||
{
|
||||
incoming_context_t *ctx = bucket->data;
|
||||
|
||||
if (ctx->state != STATE_STATUS_LINE && ctx->phrase) {
|
||||
serf_bucket_mem_free(bucket->allocator, (void*)ctx->phrase);
|
||||
}
|
||||
|
||||
serf_bucket_destroy(ctx->stream);
|
||||
if (ctx->body != NULL)
|
||||
serf_bucket_destroy(ctx->body);
|
||||
serf_bucket_destroy(ctx->headers);
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
static apr_status_t fetch_line(incoming_context_t *ctx, int acceptable)
|
||||
{
|
||||
return serf_linebuf_fetch(&ctx->linebuf, ctx->stream, acceptable);
|
||||
}
|
||||
|
||||
static apr_status_t parse_status_line(incoming_context_t *ctx,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
int res;
|
||||
char *reason; /* ### stupid APR interface makes this non-const */
|
||||
|
||||
/* ctx->linebuf.line should be of form: BW* */
|
||||
res = apr_date_checkmask(ctx->linebuf.line, "BW*");
|
||||
if (!res) {
|
||||
/* Not an BWTP response? Well, at least we won't understand it. */
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
|
||||
if (ctx->linebuf.line[2] == 'H') {
|
||||
ctx->type = 0;
|
||||
}
|
||||
else if (ctx->linebuf.line[2] == 'M') {
|
||||
ctx->type = 1;
|
||||
}
|
||||
else {
|
||||
ctx->type = -1;
|
||||
}
|
||||
|
||||
ctx->channel = apr_strtoi64(ctx->linebuf.line + 3, &reason, 16);
|
||||
|
||||
/* Skip leading spaces for the reason string. */
|
||||
if (apr_isspace(*reason)) {
|
||||
reason++;
|
||||
}
|
||||
|
||||
ctx->length = apr_strtoi64(reason, &reason, 16);
|
||||
|
||||
/* Skip leading spaces for the reason string. */
|
||||
if (reason - ctx->linebuf.line < ctx->linebuf.used) {
|
||||
if (apr_isspace(*reason)) {
|
||||
reason++;
|
||||
}
|
||||
|
||||
ctx->phrase = serf_bstrmemdup(allocator, reason,
|
||||
ctx->linebuf.used
|
||||
- (reason - ctx->linebuf.line));
|
||||
} else {
|
||||
ctx->phrase = NULL;
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* This code should be replaced with header buckets. */
|
||||
static apr_status_t fetch_headers(serf_bucket_t *bkt, incoming_context_t *ctx)
|
||||
{
|
||||
apr_status_t status;
|
||||
|
||||
/* RFC 2616 says that CRLF is the only line ending, but we can easily
|
||||
* accept any kind of line ending.
|
||||
*/
|
||||
status = fetch_line(ctx, SERF_NEWLINE_ANY);
|
||||
if (SERF_BUCKET_READ_ERROR(status)) {
|
||||
return status;
|
||||
}
|
||||
/* Something was read. Process it. */
|
||||
|
||||
if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) {
|
||||
const char *end_key;
|
||||
const char *c;
|
||||
|
||||
end_key = c = memchr(ctx->linebuf.line, ':', ctx->linebuf.used);
|
||||
if (!c) {
|
||||
/* Bad headers? */
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
|
||||
/* Skip over initial : and spaces. */
|
||||
while (apr_isspace(*++c))
|
||||
continue;
|
||||
|
||||
/* Always copy the headers (from the linebuf into new mem). */
|
||||
/* ### we should be able to optimize some mem copies */
|
||||
serf_bucket_headers_setx(
|
||||
ctx->headers,
|
||||
ctx->linebuf.line, end_key - ctx->linebuf.line, 1,
|
||||
c, ctx->linebuf.line + ctx->linebuf.used - c, 1);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Perform one iteration of the state machine.
|
||||
*
|
||||
* Will return when one the following conditions occurred:
|
||||
* 1) a state change
|
||||
* 2) an error
|
||||
* 3) the stream is not ready or at EOF
|
||||
* 4) APR_SUCCESS, meaning the machine can be run again immediately
|
||||
*/
|
||||
static apr_status_t run_machine(serf_bucket_t *bkt, incoming_context_t *ctx)
|
||||
{
|
||||
apr_status_t status = APR_SUCCESS; /* initialize to avoid gcc warnings */
|
||||
|
||||
switch (ctx->state) {
|
||||
case STATE_STATUS_LINE:
|
||||
/* RFC 2616 says that CRLF is the only line ending, but we can easily
|
||||
* accept any kind of line ending.
|
||||
*/
|
||||
status = fetch_line(ctx, SERF_NEWLINE_ANY);
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) {
|
||||
/* The Status-Line is in the line buffer. Process it. */
|
||||
status = parse_status_line(ctx, bkt->allocator);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
if (ctx->length) {
|
||||
ctx->body =
|
||||
serf_bucket_barrier_create(ctx->stream, bkt->allocator);
|
||||
ctx->body = serf_bucket_limit_create(ctx->body, ctx->length,
|
||||
bkt->allocator);
|
||||
if (!ctx->type) {
|
||||
ctx->state = STATE_HEADERS;
|
||||
} else {
|
||||
ctx->state = STATE_BODY;
|
||||
}
|
||||
} else {
|
||||
ctx->state = STATE_DONE;
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* The connection closed before we could get the next
|
||||
* response. Treat the request as lost so that our upper
|
||||
* end knows the server never tried to give us a response.
|
||||
*/
|
||||
if (APR_STATUS_IS_EOF(status)) {
|
||||
return SERF_ERROR_REQUEST_LOST;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case STATE_HEADERS:
|
||||
status = fetch_headers(ctx->body, ctx);
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
/* If an empty line was read, then we hit the end of the headers.
|
||||
* Move on to the body.
|
||||
*/
|
||||
if (ctx->linebuf.state == SERF_LINEBUF_READY && !ctx->linebuf.used) {
|
||||
/* Advance the state. */
|
||||
ctx->state = STATE_DONE;
|
||||
}
|
||||
break;
|
||||
case STATE_BODY:
|
||||
/* Don't do anything. */
|
||||
break;
|
||||
case STATE_DONE:
|
||||
return APR_EOF;
|
||||
default:
|
||||
/* Not reachable */
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t wait_for_body(serf_bucket_t *bkt, incoming_context_t *ctx)
|
||||
{
|
||||
apr_status_t status;
|
||||
|
||||
/* Keep reading and moving through states if we aren't at the BODY */
|
||||
while (ctx->state != STATE_BODY) {
|
||||
status = run_machine(bkt, ctx);
|
||||
|
||||
/* Anything other than APR_SUCCESS means that we cannot immediately
|
||||
* read again (for now).
|
||||
*/
|
||||
if (status)
|
||||
return status;
|
||||
}
|
||||
/* in STATE_BODY */
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t serf_bucket_bwtp_incoming_frame_wait_for_headers(
|
||||
serf_bucket_t *bucket)
|
||||
{
|
||||
incoming_context_t *ctx = bucket->data;
|
||||
|
||||
return wait_for_body(bucket, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t bwtp_incoming_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
incoming_context_t *ctx = bucket->data;
|
||||
apr_status_t rv;
|
||||
|
||||
rv = wait_for_body(bucket, ctx);
|
||||
if (rv) {
|
||||
/* It's not possible to have read anything yet! */
|
||||
if (APR_STATUS_IS_EOF(rv) || APR_STATUS_IS_EAGAIN(rv)) {
|
||||
*len = 0;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
rv = serf_bucket_read(ctx->body, requested, data, len);
|
||||
if (APR_STATUS_IS_EOF(rv)) {
|
||||
ctx->state = STATE_DONE;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
static apr_status_t bwtp_incoming_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
incoming_context_t *ctx = bucket->data;
|
||||
apr_status_t rv;
|
||||
|
||||
rv = wait_for_body(bucket, ctx);
|
||||
if (rv) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
/* Delegate to the stream bucket to do the readline. */
|
||||
return serf_bucket_readline(ctx->body, acceptable, found, data, len);
|
||||
}
|
||||
|
||||
/* ### need to implement */
|
||||
#define bwtp_incoming_peek NULL
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_bwtp_incoming_frame = {
|
||||
"BWTP-INCOMING",
|
||||
bwtp_incoming_read,
|
||||
bwtp_incoming_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
bwtp_incoming_peek,
|
||||
bwtp_incoming_destroy_and_data,
|
||||
};
|
235
buckets/chunk_buckets.c
Normal file
235
buckets/chunk_buckets.c
Normal file
@ -0,0 +1,235 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
#include <apr_strings.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
typedef struct {
|
||||
enum {
|
||||
STATE_FETCH,
|
||||
STATE_CHUNK,
|
||||
STATE_EOF
|
||||
} state;
|
||||
|
||||
apr_status_t last_status;
|
||||
|
||||
serf_bucket_t *chunk;
|
||||
serf_bucket_t *stream;
|
||||
|
||||
char chunk_hdr[20];
|
||||
} chunk_context_t;
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_chunk_create(
|
||||
serf_bucket_t *stream, serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
chunk_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->state = STATE_FETCH;
|
||||
ctx->chunk = serf_bucket_aggregate_create(allocator);
|
||||
ctx->stream = stream;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_chunk, allocator, ctx);
|
||||
}
|
||||
|
||||
#define CRLF "\r\n"
|
||||
|
||||
static apr_status_t create_chunk(serf_bucket_t *bucket)
|
||||
{
|
||||
chunk_context_t *ctx = bucket->data;
|
||||
serf_bucket_t *simple_bkt;
|
||||
apr_size_t chunk_len;
|
||||
apr_size_t stream_len;
|
||||
struct iovec vecs[66]; /* 64 + chunk trailer + EOF trailer = 66 */
|
||||
int vecs_read;
|
||||
int i;
|
||||
|
||||
if (ctx->state != STATE_FETCH) {
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
ctx->last_status =
|
||||
serf_bucket_read_iovec(ctx->stream, SERF_READ_ALL_AVAIL,
|
||||
64, vecs, &vecs_read);
|
||||
|
||||
if (SERF_BUCKET_READ_ERROR(ctx->last_status)) {
|
||||
/* Uh-oh. */
|
||||
return ctx->last_status;
|
||||
}
|
||||
|
||||
/* Count the length of the data we read. */
|
||||
stream_len = 0;
|
||||
for (i = 0; i < vecs_read; i++) {
|
||||
stream_len += vecs[i].iov_len;
|
||||
}
|
||||
|
||||
/* assert: stream_len in hex < sizeof(ctx->chunk_hdr) */
|
||||
|
||||
/* Inserting a 0 byte chunk indicates a terminator, which already happens
|
||||
* during the EOF handler below. Adding another one here will cause the
|
||||
* EOF chunk to be interpreted by the server as a new request. So,
|
||||
* we'll only do this if we have something to write.
|
||||
*/
|
||||
if (stream_len) {
|
||||
/* Build the chunk header. */
|
||||
chunk_len = apr_snprintf(ctx->chunk_hdr, sizeof(ctx->chunk_hdr),
|
||||
"%" APR_UINT64_T_HEX_FMT CRLF,
|
||||
(apr_uint64_t)stream_len);
|
||||
|
||||
/* Create a copy of the chunk header so we can have multiple chunks
|
||||
* in the pipeline at the same time.
|
||||
*/
|
||||
simple_bkt = serf_bucket_simple_copy_create(ctx->chunk_hdr, chunk_len,
|
||||
bucket->allocator);
|
||||
serf_bucket_aggregate_append(ctx->chunk, simple_bkt);
|
||||
|
||||
/* Insert the chunk footer. */
|
||||
vecs[vecs_read].iov_base = CRLF;
|
||||
vecs[vecs_read++].iov_len = sizeof(CRLF) - 1;
|
||||
}
|
||||
|
||||
/* We've reached the end of the line for the stream. */
|
||||
if (APR_STATUS_IS_EOF(ctx->last_status)) {
|
||||
/* Insert the chunk footer. */
|
||||
vecs[vecs_read].iov_base = "0" CRLF CRLF;
|
||||
vecs[vecs_read++].iov_len = sizeof("0" CRLF CRLF) - 1;
|
||||
|
||||
ctx->state = STATE_EOF;
|
||||
}
|
||||
else {
|
||||
/* Okay, we can return data. */
|
||||
ctx->state = STATE_CHUNK;
|
||||
}
|
||||
|
||||
serf_bucket_aggregate_append_iovec(ctx->chunk, vecs, vecs_read);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t serf_chunk_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
chunk_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
/* Before proceeding, we need to fetch some data from the stream. */
|
||||
if (ctx->state == STATE_FETCH) {
|
||||
status = create_chunk(bucket);
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
status = serf_bucket_read(ctx->chunk, requested, data, len);
|
||||
|
||||
/* Mask EOF from aggregate bucket. */
|
||||
if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
|
||||
status = ctx->last_status;
|
||||
ctx->state = STATE_FETCH;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_chunk_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
chunk_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
status = serf_bucket_readline(ctx->chunk, acceptable, found, data, len);
|
||||
|
||||
/* Mask EOF from aggregate bucket. */
|
||||
if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
|
||||
status = APR_EAGAIN;
|
||||
ctx->state = STATE_FETCH;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_chunk_read_iovec(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size,
|
||||
struct iovec *vecs,
|
||||
int *vecs_used)
|
||||
{
|
||||
chunk_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
/* Before proceeding, we need to fetch some data from the stream. */
|
||||
if (ctx->state == STATE_FETCH) {
|
||||
status = create_chunk(bucket);
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
status = serf_bucket_read_iovec(ctx->chunk, requested, vecs_size, vecs,
|
||||
vecs_used);
|
||||
|
||||
/* Mask EOF from aggregate bucket. */
|
||||
if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
|
||||
status = ctx->last_status;
|
||||
ctx->state = STATE_FETCH;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_chunk_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
chunk_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
status = serf_bucket_peek(ctx->chunk, data, len);
|
||||
|
||||
/* Mask EOF from aggregate bucket. */
|
||||
if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
|
||||
status = APR_EAGAIN;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void serf_chunk_destroy(serf_bucket_t *bucket)
|
||||
{
|
||||
chunk_context_t *ctx = bucket->data;
|
||||
|
||||
serf_bucket_destroy(ctx->stream);
|
||||
serf_bucket_destroy(ctx->chunk);
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_chunk = {
|
||||
"CHUNK",
|
||||
serf_chunk_read,
|
||||
serf_chunk_readline,
|
||||
serf_chunk_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_chunk_peek,
|
||||
serf_chunk_destroy,
|
||||
};
|
195
buckets/dechunk_buckets.c
Normal file
195
buckets/dechunk_buckets.c
Normal file
@ -0,0 +1,195 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_strings.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
typedef struct {
|
||||
serf_bucket_t *stream;
|
||||
|
||||
enum {
|
||||
STATE_SIZE, /* reading the chunk size */
|
||||
STATE_CHUNK, /* reading the chunk */
|
||||
STATE_TERM, /* reading the chunk terminator */
|
||||
STATE_DONE /* body is done; we've returned EOF */
|
||||
} state;
|
||||
|
||||
/* Buffer for accumulating a chunk size. */
|
||||
serf_linebuf_t linebuf;
|
||||
|
||||
/* How much of the chunk, or the terminator, do we have left to read? */
|
||||
apr_int64_t body_left;
|
||||
|
||||
} dechunk_context_t;
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_dechunk_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
dechunk_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->stream = stream;
|
||||
ctx->state = STATE_SIZE;
|
||||
|
||||
serf_linebuf_init(&ctx->linebuf);
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_dechunk, allocator, ctx);
|
||||
}
|
||||
|
||||
static void serf_dechunk_destroy_and_data(serf_bucket_t *bucket)
|
||||
{
|
||||
dechunk_context_t *ctx = bucket->data;
|
||||
|
||||
serf_bucket_destroy(ctx->stream);
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
static apr_status_t serf_dechunk_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
dechunk_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
while (1) {
|
||||
switch (ctx->state) {
|
||||
case STATE_SIZE:
|
||||
|
||||
/* fetch a line terminated by CRLF */
|
||||
status = serf_linebuf_fetch(&ctx->linebuf, ctx->stream,
|
||||
SERF_NEWLINE_CRLF);
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
/* if a line was read, then parse it. */
|
||||
if (ctx->linebuf.state == SERF_LINEBUF_READY) {
|
||||
/* NUL-terminate the line. if it filled the entire buffer,
|
||||
then just assume the thing is too large. */
|
||||
if (ctx->linebuf.used == sizeof(ctx->linebuf.line))
|
||||
return APR_FROM_OS_ERROR(ERANGE);
|
||||
ctx->linebuf.line[ctx->linebuf.used] = '\0';
|
||||
|
||||
/* convert from HEX digits. */
|
||||
ctx->body_left = apr_strtoi64(ctx->linebuf.line, NULL, 16);
|
||||
if (errno == ERANGE) {
|
||||
return APR_FROM_OS_ERROR(ERANGE);
|
||||
}
|
||||
|
||||
if (ctx->body_left == 0) {
|
||||
/* Just read the last-chunk marker. We're DONE. */
|
||||
ctx->state = STATE_DONE;
|
||||
status = APR_EOF;
|
||||
}
|
||||
else {
|
||||
/* Got a size, so we'll start reading the chunk now. */
|
||||
ctx->state = STATE_CHUNK;
|
||||
}
|
||||
|
||||
/* If we can read more, then go do so. */
|
||||
if (!status)
|
||||
continue;
|
||||
}
|
||||
/* assert: status != 0 */
|
||||
|
||||
/* Note that we didn't actually read anything, so our callers
|
||||
* don't get confused.
|
||||
*/
|
||||
*len = 0;
|
||||
|
||||
return status;
|
||||
|
||||
case STATE_CHUNK:
|
||||
|
||||
if (requested > ctx->body_left) {
|
||||
requested = ctx->body_left;
|
||||
}
|
||||
|
||||
/* Delegate to the stream bucket to do the read. */
|
||||
status = serf_bucket_read(ctx->stream, requested, data, len);
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
/* Some data was read, so decrement the amount left and see
|
||||
* if we're done reading this chunk.
|
||||
*/
|
||||
ctx->body_left -= *len;
|
||||
if (!ctx->body_left) {
|
||||
ctx->state = STATE_TERM;
|
||||
ctx->body_left = 2; /* CRLF */
|
||||
}
|
||||
|
||||
/* We need more data but there is no more available. */
|
||||
if (ctx->body_left && APR_STATUS_IS_EOF(status)) {
|
||||
return SERF_ERROR_TRUNCATED_HTTP_RESPONSE;
|
||||
}
|
||||
|
||||
/* Return the data we just read. */
|
||||
return status;
|
||||
|
||||
case STATE_TERM:
|
||||
/* Delegate to the stream bucket to do the read. */
|
||||
status = serf_bucket_read(ctx->stream, ctx->body_left, data, len);
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
/* Some data was read, so decrement the amount left and see
|
||||
* if we're done reading the chunk terminator.
|
||||
*/
|
||||
ctx->body_left -= *len;
|
||||
|
||||
/* We need more data but there is no more available. */
|
||||
if (ctx->body_left && APR_STATUS_IS_EOF(status))
|
||||
return SERF_ERROR_TRUNCATED_HTTP_RESPONSE;
|
||||
|
||||
if (!ctx->body_left) {
|
||||
ctx->state = STATE_SIZE;
|
||||
}
|
||||
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
break;
|
||||
|
||||
case STATE_DONE:
|
||||
/* Just keep returning EOF */
|
||||
return APR_EOF;
|
||||
|
||||
default:
|
||||
/* Not reachable */
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
}
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
||||
/* ### need to implement */
|
||||
#define serf_dechunk_readline NULL
|
||||
#define serf_dechunk_peek NULL
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_dechunk = {
|
||||
"DECHUNK",
|
||||
serf_dechunk_read,
|
||||
serf_dechunk_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_dechunk_peek,
|
||||
serf_dechunk_destroy_and_data,
|
||||
};
|
384
buckets/deflate_buckets.c
Normal file
384
buckets/deflate_buckets.c
Normal file
@ -0,0 +1,384 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_strings.h>
|
||||
|
||||
#include <zlib.h>
|
||||
|
||||
/* This conditional isn't defined anywhere yet. */
|
||||
#ifdef HAVE_ZUTIL_H
|
||||
#include <zutil.h>
|
||||
#endif
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
/* magic header */
|
||||
static char deflate_magic[2] = { '\037', '\213' };
|
||||
#define DEFLATE_MAGIC_SIZE 10
|
||||
#define DEFLATE_VERIFY_SIZE 8
|
||||
#define DEFLATE_BUFFER_SIZE 8096
|
||||
|
||||
static const int DEFLATE_WINDOW_SIZE = -15;
|
||||
static const int DEFLATE_MEMLEVEL = 9;
|
||||
|
||||
typedef struct {
|
||||
serf_bucket_t *stream;
|
||||
serf_bucket_t *inflate_stream;
|
||||
|
||||
int format; /* Are we 'deflate' or 'gzip'? */
|
||||
|
||||
enum {
|
||||
STATE_READING_HEADER, /* reading the gzip header */
|
||||
STATE_HEADER, /* read the gzip header */
|
||||
STATE_INIT, /* init'ing zlib functions */
|
||||
STATE_INFLATE, /* inflating the content now */
|
||||
STATE_READING_VERIFY, /* reading the final gzip CRC */
|
||||
STATE_VERIFY, /* verifying the final gzip CRC */
|
||||
STATE_FINISH, /* clean up after reading body */
|
||||
STATE_DONE, /* body is done; we'll return EOF here */
|
||||
} state;
|
||||
|
||||
z_stream zstream;
|
||||
char hdr_buffer[DEFLATE_MAGIC_SIZE];
|
||||
unsigned char buffer[DEFLATE_BUFFER_SIZE];
|
||||
unsigned long crc;
|
||||
int windowSize;
|
||||
int memLevel;
|
||||
int bufferSize;
|
||||
|
||||
/* How much of the chunk, or the terminator, do we have left to read? */
|
||||
apr_size_t stream_left;
|
||||
|
||||
/* How much are we supposed to read? */
|
||||
apr_size_t stream_size;
|
||||
|
||||
int stream_status; /* What was the last status we read? */
|
||||
|
||||
} deflate_context_t;
|
||||
|
||||
/* Inputs a string and returns a long. */
|
||||
static unsigned long getLong(unsigned char *string)
|
||||
{
|
||||
return ((unsigned long)string[0])
|
||||
| (((unsigned long)string[1]) << 8)
|
||||
| (((unsigned long)string[2]) << 16)
|
||||
| (((unsigned long)string[3]) << 24);
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_deflate_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator,
|
||||
int format)
|
||||
{
|
||||
deflate_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->stream = stream;
|
||||
ctx->stream_status = APR_SUCCESS;
|
||||
ctx->inflate_stream = serf_bucket_aggregate_create(allocator);
|
||||
ctx->format = format;
|
||||
ctx->crc = 0;
|
||||
/* zstream must be NULL'd out. */
|
||||
memset(&ctx->zstream, 0, sizeof(ctx->zstream));
|
||||
|
||||
switch (ctx->format) {
|
||||
case SERF_DEFLATE_GZIP:
|
||||
ctx->state = STATE_READING_HEADER;
|
||||
break;
|
||||
case SERF_DEFLATE_DEFLATE:
|
||||
/* deflate doesn't have a header. */
|
||||
ctx->state = STATE_INIT;
|
||||
break;
|
||||
default:
|
||||
/* Not reachable */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Initial size of gzip header. */
|
||||
ctx->stream_left = ctx->stream_size = DEFLATE_MAGIC_SIZE;
|
||||
|
||||
ctx->windowSize = DEFLATE_WINDOW_SIZE;
|
||||
ctx->memLevel = DEFLATE_MEMLEVEL;
|
||||
ctx->bufferSize = DEFLATE_BUFFER_SIZE;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_deflate, allocator, ctx);
|
||||
}
|
||||
|
||||
static void serf_deflate_destroy_and_data(serf_bucket_t *bucket)
|
||||
{
|
||||
deflate_context_t *ctx = bucket->data;
|
||||
|
||||
if (ctx->state > STATE_INIT &&
|
||||
ctx->state <= STATE_FINISH)
|
||||
inflateEnd(&ctx->zstream);
|
||||
|
||||
/* We may have appended inflate_stream into the stream bucket.
|
||||
* If so, avoid free'ing it twice.
|
||||
*/
|
||||
if (ctx->inflate_stream) {
|
||||
serf_bucket_destroy(ctx->inflate_stream);
|
||||
}
|
||||
serf_bucket_destroy(ctx->stream);
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
static apr_status_t serf_deflate_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
deflate_context_t *ctx = bucket->data;
|
||||
unsigned long compCRC, compLen;
|
||||
apr_status_t status;
|
||||
const char *private_data;
|
||||
apr_size_t private_len;
|
||||
int zRC;
|
||||
|
||||
while (1) {
|
||||
switch (ctx->state) {
|
||||
case STATE_READING_HEADER:
|
||||
case STATE_READING_VERIFY:
|
||||
status = serf_bucket_read(ctx->stream, ctx->stream_left,
|
||||
&private_data, &private_len);
|
||||
|
||||
if (SERF_BUCKET_READ_ERROR(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
memcpy(ctx->hdr_buffer + (ctx->stream_size - ctx->stream_left),
|
||||
private_data, private_len);
|
||||
|
||||
ctx->stream_left -= private_len;
|
||||
|
||||
if (ctx->stream_left == 0) {
|
||||
ctx->state++;
|
||||
if (APR_STATUS_IS_EAGAIN(status)) {
|
||||
*len = 0;
|
||||
return status;
|
||||
}
|
||||
}
|
||||
else if (status) {
|
||||
*len = 0;
|
||||
return status;
|
||||
}
|
||||
break;
|
||||
case STATE_HEADER:
|
||||
if (ctx->hdr_buffer[0] != deflate_magic[0] ||
|
||||
ctx->hdr_buffer[1] != deflate_magic[1]) {
|
||||
return SERF_ERROR_DECOMPRESSION_FAILED;
|
||||
}
|
||||
if (ctx->hdr_buffer[3] != 0) {
|
||||
return SERF_ERROR_DECOMPRESSION_FAILED;
|
||||
}
|
||||
ctx->state++;
|
||||
break;
|
||||
case STATE_VERIFY:
|
||||
/* Do the checksum computation. */
|
||||
compCRC = getLong((unsigned char*)ctx->hdr_buffer);
|
||||
if (ctx->crc != compCRC) {
|
||||
return SERF_ERROR_DECOMPRESSION_FAILED;
|
||||
}
|
||||
compLen = getLong((unsigned char*)ctx->hdr_buffer + 4);
|
||||
if (ctx->zstream.total_out != compLen) {
|
||||
return SERF_ERROR_DECOMPRESSION_FAILED;
|
||||
}
|
||||
ctx->state++;
|
||||
break;
|
||||
case STATE_INIT:
|
||||
zRC = inflateInit2(&ctx->zstream, ctx->windowSize);
|
||||
if (zRC != Z_OK) {
|
||||
return SERF_ERROR_DECOMPRESSION_FAILED;
|
||||
}
|
||||
ctx->zstream.next_out = ctx->buffer;
|
||||
ctx->zstream.avail_out = ctx->bufferSize;
|
||||
ctx->state++;
|
||||
break;
|
||||
case STATE_FINISH:
|
||||
inflateEnd(&ctx->zstream);
|
||||
serf_bucket_aggregate_prepend(ctx->stream, ctx->inflate_stream);
|
||||
ctx->inflate_stream = 0;
|
||||
ctx->state++;
|
||||
break;
|
||||
case STATE_INFLATE:
|
||||
/* Do we have anything already uncompressed to read? */
|
||||
status = serf_bucket_read(ctx->inflate_stream, requested, data,
|
||||
len);
|
||||
if (SERF_BUCKET_READ_ERROR(status)) {
|
||||
return status;
|
||||
}
|
||||
/* Hide EOF. */
|
||||
if (APR_STATUS_IS_EOF(status)) {
|
||||
status = ctx->stream_status;
|
||||
if (APR_STATUS_IS_EOF(status)) {
|
||||
/* We've read all of the data from our stream, but we
|
||||
* need to continue to iterate until we flush
|
||||
* out the zlib buffer.
|
||||
*/
|
||||
status = APR_SUCCESS;
|
||||
}
|
||||
}
|
||||
if (*len != 0) {
|
||||
return status;
|
||||
}
|
||||
|
||||
/* We tried; but we have nothing buffered. Fetch more. */
|
||||
|
||||
/* It is possible that we maxed out avail_out before
|
||||
* exhausting avail_in; therefore, continue using the
|
||||
* previous buffer. Otherwise, fetch more data from
|
||||
* our stream bucket.
|
||||
*/
|
||||
if (ctx->zstream.avail_in == 0) {
|
||||
/* When we empty our inflated stream, we'll return this
|
||||
* status - this allow us to eventually pass up EAGAINs.
|
||||
*/
|
||||
ctx->stream_status = serf_bucket_read(ctx->stream,
|
||||
ctx->bufferSize,
|
||||
&private_data,
|
||||
&private_len);
|
||||
|
||||
if (SERF_BUCKET_READ_ERROR(ctx->stream_status)) {
|
||||
return ctx->stream_status;
|
||||
}
|
||||
|
||||
if (!private_len && APR_STATUS_IS_EAGAIN(ctx->stream_status)) {
|
||||
*len = 0;
|
||||
status = ctx->stream_status;
|
||||
ctx->stream_status = APR_SUCCESS;
|
||||
return status;
|
||||
}
|
||||
|
||||
ctx->zstream.next_in = (unsigned char*)private_data;
|
||||
ctx->zstream.avail_in = private_len;
|
||||
}
|
||||
zRC = Z_OK;
|
||||
while (ctx->zstream.avail_in != 0) {
|
||||
/* We're full, clear out our buffer, reset, and return. */
|
||||
if (ctx->zstream.avail_out == 0) {
|
||||
serf_bucket_t *tmp;
|
||||
ctx->zstream.next_out = ctx->buffer;
|
||||
private_len = ctx->bufferSize - ctx->zstream.avail_out;
|
||||
|
||||
ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer,
|
||||
private_len);
|
||||
|
||||
/* FIXME: There probably needs to be a free func. */
|
||||
tmp = SERF_BUCKET_SIMPLE_STRING_LEN((char *)ctx->buffer,
|
||||
private_len,
|
||||
bucket->allocator);
|
||||
serf_bucket_aggregate_append(ctx->inflate_stream, tmp);
|
||||
ctx->zstream.avail_out = ctx->bufferSize;
|
||||
break;
|
||||
}
|
||||
zRC = inflate(&ctx->zstream, Z_NO_FLUSH);
|
||||
|
||||
if (zRC == Z_STREAM_END) {
|
||||
serf_bucket_t *tmp;
|
||||
|
||||
private_len = ctx->bufferSize - ctx->zstream.avail_out;
|
||||
ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer,
|
||||
private_len);
|
||||
/* FIXME: There probably needs to be a free func. */
|
||||
tmp = SERF_BUCKET_SIMPLE_STRING_LEN((char *)ctx->buffer,
|
||||
private_len,
|
||||
bucket->allocator);
|
||||
serf_bucket_aggregate_append(ctx->inflate_stream, tmp);
|
||||
|
||||
ctx->zstream.avail_out = ctx->bufferSize;
|
||||
|
||||
/* Push back the remaining data to be read. */
|
||||
tmp = serf_bucket_aggregate_create(bucket->allocator);
|
||||
serf_bucket_aggregate_prepend(tmp, ctx->stream);
|
||||
ctx->stream = tmp;
|
||||
|
||||
/* We now need to take the remaining avail_in and
|
||||
* throw it in ctx->stream so our next read picks it up.
|
||||
*/
|
||||
tmp = SERF_BUCKET_SIMPLE_STRING_LEN(
|
||||
(const char*)ctx->zstream.next_in,
|
||||
ctx->zstream.avail_in,
|
||||
bucket->allocator);
|
||||
serf_bucket_aggregate_prepend(ctx->stream, tmp);
|
||||
|
||||
switch (ctx->format) {
|
||||
case SERF_DEFLATE_GZIP:
|
||||
ctx->stream_left = ctx->stream_size =
|
||||
DEFLATE_VERIFY_SIZE;
|
||||
ctx->state++;
|
||||
break;
|
||||
case SERF_DEFLATE_DEFLATE:
|
||||
/* Deflate does not have a verify footer. */
|
||||
ctx->state = STATE_FINISH;
|
||||
break;
|
||||
default:
|
||||
/* Not reachable */
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
if (zRC != Z_OK) {
|
||||
return SERF_ERROR_DECOMPRESSION_FAILED;
|
||||
}
|
||||
}
|
||||
/* Okay, we've inflated. Try to read. */
|
||||
status = serf_bucket_read(ctx->inflate_stream, requested, data,
|
||||
len);
|
||||
/* Hide EOF. */
|
||||
if (APR_STATUS_IS_EOF(status)) {
|
||||
status = ctx->stream_status;
|
||||
/* If our stream is finished too, return SUCCESS so
|
||||
* we'll iterate one more time.
|
||||
*/
|
||||
if (APR_STATUS_IS_EOF(status)) {
|
||||
/* No more data to read from the stream, and everything
|
||||
inflated. If all data was received correctly, state
|
||||
should have been advanced to STATE_READING_VERIFY or
|
||||
STATE_FINISH. If not, then the data was incomplete
|
||||
and we have an error. */
|
||||
if (ctx->state != STATE_INFLATE)
|
||||
return APR_SUCCESS;
|
||||
else
|
||||
return SERF_ERROR_DECOMPRESSION_FAILED;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
case STATE_DONE:
|
||||
/* We're done inflating. Use our finished buffer. */
|
||||
return serf_bucket_read(ctx->stream, requested, data, len);
|
||||
default:
|
||||
/* Not reachable */
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
||||
/* ### need to implement */
|
||||
#define serf_deflate_readline NULL
|
||||
#define serf_deflate_peek NULL
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_deflate = {
|
||||
"DEFLATE",
|
||||
serf_deflate_read,
|
||||
serf_deflate_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_deflate_peek,
|
||||
serf_deflate_destroy_and_data,
|
||||
};
|
117
buckets/file_buckets.c
Normal file
117
buckets/file_buckets.c
Normal file
@ -0,0 +1,117 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
typedef struct {
|
||||
apr_file_t *file;
|
||||
|
||||
serf_databuf_t databuf;
|
||||
|
||||
} file_context_t;
|
||||
|
||||
|
||||
static apr_status_t file_reader(void *baton, apr_size_t bufsize,
|
||||
char *buf, apr_size_t *len)
|
||||
{
|
||||
file_context_t *ctx = baton;
|
||||
|
||||
*len = bufsize;
|
||||
return apr_file_read(ctx->file, buf, len);
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_file_create(
|
||||
apr_file_t *file,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
file_context_t *ctx;
|
||||
#if APR_HAS_MMAP
|
||||
apr_finfo_t finfo;
|
||||
const char *file_path;
|
||||
|
||||
/* See if we'd be better off mmap'ing this file instead.
|
||||
*
|
||||
* Note that there is a failure case here that we purposely fall through:
|
||||
* if a file is buffered, apr_mmap will reject it. However, on older
|
||||
* versions of APR, we have no way of knowing this - but apr_mmap_create
|
||||
* will check for this and return APR_EBADF.
|
||||
*/
|
||||
apr_file_name_get(&file_path, file);
|
||||
apr_stat(&finfo, file_path, APR_FINFO_SIZE,
|
||||
serf_bucket_allocator_get_pool(allocator));
|
||||
if (APR_MMAP_CANDIDATE(finfo.size)) {
|
||||
apr_status_t status;
|
||||
apr_mmap_t *file_mmap;
|
||||
status = apr_mmap_create(&file_mmap, file, 0, finfo.size,
|
||||
APR_MMAP_READ,
|
||||
serf_bucket_allocator_get_pool(allocator));
|
||||
|
||||
if (status == APR_SUCCESS) {
|
||||
return serf_bucket_mmap_create(file_mmap, allocator);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Oh, well. */
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->file = file;
|
||||
|
||||
serf_databuf_init(&ctx->databuf);
|
||||
ctx->databuf.read = file_reader;
|
||||
ctx->databuf.read_baton = ctx;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_file, allocator, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t serf_file_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
file_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_databuf_read(&ctx->databuf, requested, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_file_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
file_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_databuf_readline(&ctx->databuf, acceptable, found, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_file_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
file_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_databuf_peek(&ctx->databuf, data, len);
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_file = {
|
||||
"FILE",
|
||||
serf_file_read,
|
||||
serf_file_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_file_peek,
|
||||
serf_default_destroy_and_data,
|
||||
};
|
429
buckets/headers_buckets.c
Normal file
429
buckets/headers_buckets.c
Normal file
@ -0,0 +1,429 @@
|
||||
/* Copyright 2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <apr_general.h> /* for strcasecmp() */
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
typedef struct header_list {
|
||||
const char *header;
|
||||
const char *value;
|
||||
|
||||
apr_size_t header_size;
|
||||
apr_size_t value_size;
|
||||
|
||||
int alloc_flags;
|
||||
#define ALLOC_HEADER 0x0001 /* header lives in our allocator */
|
||||
#define ALLOC_VALUE 0x0002 /* value lives in our allocator */
|
||||
|
||||
struct header_list *next;
|
||||
} header_list_t;
|
||||
|
||||
typedef struct {
|
||||
header_list_t *list;
|
||||
|
||||
header_list_t *cur_read;
|
||||
enum {
|
||||
READ_START, /* haven't started reading yet */
|
||||
READ_HEADER, /* reading cur_read->header */
|
||||
READ_SEP, /* reading ": " */
|
||||
READ_VALUE, /* reading cur_read->value */
|
||||
READ_CRLF, /* reading "\r\n" */
|
||||
READ_TERM, /* reading the final "\r\n" */
|
||||
READ_DONE /* no more data to read */
|
||||
} state;
|
||||
apr_size_t amt_read; /* how much of the current state we've read */
|
||||
|
||||
} headers_context_t;
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_headers_create(
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
headers_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->list = NULL;
|
||||
ctx->state = READ_START;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_headers, allocator, ctx);
|
||||
}
|
||||
|
||||
void serf_bucket_headers_setx(
|
||||
serf_bucket_t *bkt,
|
||||
const char *header, apr_size_t header_size, int header_copy,
|
||||
const char *value, apr_size_t value_size, int value_copy)
|
||||
{
|
||||
headers_context_t *ctx = bkt->data;
|
||||
header_list_t *iter = ctx->list;
|
||||
header_list_t *hdr;
|
||||
|
||||
#if 0
|
||||
/* ### include this? */
|
||||
if (ctx->cur_read) {
|
||||
/* we started reading. can't change now. */
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
|
||||
hdr = serf_bucket_mem_alloc(bkt->allocator, sizeof(*hdr));
|
||||
hdr->header_size = header_size;
|
||||
hdr->value_size = value_size;
|
||||
hdr->alloc_flags = 0;
|
||||
hdr->next = NULL;
|
||||
|
||||
if (header_copy) {
|
||||
hdr->header = serf_bstrmemdup(bkt->allocator, header, header_size);
|
||||
hdr->alloc_flags |= ALLOC_HEADER;
|
||||
}
|
||||
else {
|
||||
hdr->header = header;
|
||||
}
|
||||
|
||||
if (value_copy) {
|
||||
hdr->value = serf_bstrmemdup(bkt->allocator, value, value_size);
|
||||
hdr->alloc_flags |= ALLOC_VALUE;
|
||||
}
|
||||
else {
|
||||
hdr->value = value;
|
||||
}
|
||||
|
||||
/* Add the new header at the end of the list. */
|
||||
while (iter && iter->next) {
|
||||
iter = iter->next;
|
||||
}
|
||||
if (iter)
|
||||
iter->next = hdr;
|
||||
else
|
||||
ctx->list = hdr;
|
||||
}
|
||||
|
||||
void serf_bucket_headers_set(
|
||||
serf_bucket_t *headers_bucket,
|
||||
const char *header,
|
||||
const char *value)
|
||||
{
|
||||
serf_bucket_headers_setx(headers_bucket,
|
||||
header, strlen(header), 0,
|
||||
value, strlen(value), 1);
|
||||
}
|
||||
|
||||
void serf_bucket_headers_setc(
|
||||
serf_bucket_t *headers_bucket,
|
||||
const char *header,
|
||||
const char *value)
|
||||
{
|
||||
serf_bucket_headers_setx(headers_bucket,
|
||||
header, strlen(header), 1,
|
||||
value, strlen(value), 1);
|
||||
}
|
||||
|
||||
void serf_bucket_headers_setn(
|
||||
serf_bucket_t *headers_bucket,
|
||||
const char *header,
|
||||
const char *value)
|
||||
{
|
||||
serf_bucket_headers_setx(headers_bucket,
|
||||
header, strlen(header), 0,
|
||||
value, strlen(value), 0);
|
||||
}
|
||||
|
||||
const char *serf_bucket_headers_get(
|
||||
serf_bucket_t *headers_bucket,
|
||||
const char *header)
|
||||
{
|
||||
headers_context_t *ctx = headers_bucket->data;
|
||||
header_list_t *found = ctx->list;
|
||||
const char *val = NULL;
|
||||
int value_size = 0;
|
||||
int val_alloc = 0;
|
||||
|
||||
while (found) {
|
||||
if (strcasecmp(found->header, header) == 0) {
|
||||
if (val) {
|
||||
/* The header is already present. RFC 2616, section 4.2
|
||||
indicates that we should append the new value, separated by
|
||||
a comma. Reasoning: for headers whose values are known to
|
||||
be comma-separated, that is clearly the correct behavior;
|
||||
for others, the correct behavior is undefined anyway. */
|
||||
|
||||
/* The "+1" is for the comma; the +1 in the alloc
|
||||
call is for the terminating '\0' */
|
||||
apr_size_t new_size = found->value_size + value_size + 1;
|
||||
char *new_val = serf_bucket_mem_alloc(headers_bucket->allocator,
|
||||
new_size + 1);
|
||||
memcpy(new_val, val, value_size);
|
||||
new_val[value_size] = ',';
|
||||
memcpy(new_val + value_size + 1, found->value,
|
||||
found->value_size);
|
||||
new_val[new_size] = '\0';
|
||||
/* Copy the new value over the already existing value. */
|
||||
if (val_alloc)
|
||||
serf_bucket_mem_free(headers_bucket->allocator, (void*)val);
|
||||
val_alloc |= ALLOC_VALUE;
|
||||
val = new_val;
|
||||
value_size = new_size;
|
||||
}
|
||||
else {
|
||||
val = found->value;
|
||||
value_size = found->value_size;
|
||||
}
|
||||
}
|
||||
found = found->next;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
void serf_bucket_headers_do(
|
||||
serf_bucket_t *headers_bucket,
|
||||
serf_bucket_headers_do_callback_fn_t func,
|
||||
void *baton)
|
||||
{
|
||||
headers_context_t *ctx = headers_bucket->data;
|
||||
header_list_t *scan = ctx->list;
|
||||
|
||||
while (scan) {
|
||||
if (func(baton, scan->header, scan->value) != 0) {
|
||||
break;
|
||||
}
|
||||
scan = scan->next;
|
||||
}
|
||||
}
|
||||
|
||||
static void serf_headers_destroy_and_data(serf_bucket_t *bucket)
|
||||
{
|
||||
headers_context_t *ctx = bucket->data;
|
||||
header_list_t *scan = ctx->list;
|
||||
|
||||
while (scan) {
|
||||
header_list_t *next_hdr = scan->next;
|
||||
|
||||
if (scan->alloc_flags & ALLOC_HEADER)
|
||||
serf_bucket_mem_free(bucket->allocator, (void *)scan->header);
|
||||
if (scan->alloc_flags & ALLOC_VALUE)
|
||||
serf_bucket_mem_free(bucket->allocator, (void *)scan->value);
|
||||
serf_bucket_mem_free(bucket->allocator, scan);
|
||||
|
||||
scan = next_hdr;
|
||||
}
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
static void select_value(
|
||||
headers_context_t *ctx,
|
||||
const char **value,
|
||||
apr_size_t *len)
|
||||
{
|
||||
const char *v;
|
||||
apr_size_t l;
|
||||
|
||||
if (ctx->state == READ_START) {
|
||||
if (ctx->list == NULL) {
|
||||
/* No headers. Move straight to the TERM state. */
|
||||
ctx->state = READ_TERM;
|
||||
}
|
||||
else {
|
||||
ctx->state = READ_HEADER;
|
||||
ctx->cur_read = ctx->list;
|
||||
}
|
||||
ctx->amt_read = 0;
|
||||
}
|
||||
|
||||
switch (ctx->state) {
|
||||
case READ_HEADER:
|
||||
v = ctx->cur_read->header;
|
||||
l = ctx->cur_read->header_size;
|
||||
break;
|
||||
case READ_SEP:
|
||||
v = ": ";
|
||||
l = 2;
|
||||
break;
|
||||
case READ_VALUE:
|
||||
v = ctx->cur_read->value;
|
||||
l = ctx->cur_read->value_size;
|
||||
break;
|
||||
case READ_CRLF:
|
||||
case READ_TERM:
|
||||
v = "\r\n";
|
||||
l = 2;
|
||||
break;
|
||||
case READ_DONE:
|
||||
*len = 0;
|
||||
return;
|
||||
default:
|
||||
/* Not reachable */
|
||||
return;
|
||||
}
|
||||
|
||||
*value = v + ctx->amt_read;
|
||||
*len = l - ctx->amt_read;
|
||||
}
|
||||
|
||||
/* the current data chunk has been read/consumed. move our internal state. */
|
||||
static apr_status_t consume_chunk(headers_context_t *ctx)
|
||||
{
|
||||
/* move to the next state, resetting the amount read. */
|
||||
++ctx->state;
|
||||
ctx->amt_read = 0;
|
||||
|
||||
/* just sent the terminator and moved to DONE. signal completion. */
|
||||
if (ctx->state == READ_DONE)
|
||||
return APR_EOF;
|
||||
|
||||
/* end of this header. move to the next one. */
|
||||
if (ctx->state == READ_TERM) {
|
||||
ctx->cur_read = ctx->cur_read->next;
|
||||
if (ctx->cur_read != NULL) {
|
||||
/* We've got another head to send. Reset the read state. */
|
||||
ctx->state = READ_HEADER;
|
||||
}
|
||||
/* else leave in READ_TERM */
|
||||
}
|
||||
|
||||
/* there is more data which can be read immediately. */
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t serf_headers_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
headers_context_t *ctx = bucket->data;
|
||||
|
||||
select_value(ctx, data, len);
|
||||
|
||||
/* already done or returning the CRLF terminator? return EOF */
|
||||
if (ctx->state == READ_DONE || ctx->state == READ_TERM)
|
||||
return APR_EOF;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t serf_headers_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
headers_context_t *ctx = bucket->data;
|
||||
apr_size_t avail;
|
||||
|
||||
select_value(ctx, data, &avail);
|
||||
if (ctx->state == READ_DONE)
|
||||
return APR_EOF;
|
||||
|
||||
if (requested >= avail) {
|
||||
/* return everything from this chunk */
|
||||
*len = avail;
|
||||
|
||||
/* we consumed this chunk. advance the state. */
|
||||
return consume_chunk(ctx);
|
||||
}
|
||||
|
||||
/* return just the amount requested, and advance our pointer */
|
||||
*len = requested;
|
||||
ctx->amt_read += requested;
|
||||
|
||||
/* there is more that can be read immediately */
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t serf_headers_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
headers_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
/* ### what behavior should we use here? APR_EGENERAL for now */
|
||||
if ((acceptable & SERF_NEWLINE_CRLF) == 0)
|
||||
return APR_EGENERAL;
|
||||
|
||||
/* get whatever is in this chunk */
|
||||
select_value(ctx, data, len);
|
||||
if (ctx->state == READ_DONE)
|
||||
return APR_EOF;
|
||||
|
||||
/* we consumed this chunk. advance the state. */
|
||||
status = consume_chunk(ctx);
|
||||
|
||||
/* the type of newline found is easy... */
|
||||
*found = (ctx->state == READ_CRLF || ctx->state == READ_TERM)
|
||||
? SERF_NEWLINE_CRLF : SERF_NEWLINE_NONE;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_headers_read_iovec(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size,
|
||||
struct iovec *vecs,
|
||||
int *vecs_used)
|
||||
{
|
||||
apr_size_t avail = requested;
|
||||
int i;
|
||||
|
||||
*vecs_used = 0;
|
||||
|
||||
for (i = 0; i < vecs_size; i++) {
|
||||
const char *data;
|
||||
apr_size_t len;
|
||||
apr_status_t status;
|
||||
|
||||
/* Calling read() would not be a safe opt in the general case, but it
|
||||
* is here for the header bucket as it only frees all of the header
|
||||
* keys and values when the entire bucket goes away - not on a
|
||||
* per-read() basis as is normally the case.
|
||||
*/
|
||||
status = serf_headers_read(bucket, avail, &data, &len);
|
||||
|
||||
if (len) {
|
||||
vecs[*vecs_used].iov_base = (char*)data;
|
||||
vecs[*vecs_used].iov_len = len;
|
||||
|
||||
(*vecs_used)++;
|
||||
|
||||
if (avail != SERF_READ_ALL_AVAIL) {
|
||||
avail -= len;
|
||||
|
||||
/* If we reach 0, then read()'s status will suffice. */
|
||||
if (avail == 0) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_headers = {
|
||||
"HEADERS",
|
||||
serf_headers_read,
|
||||
serf_headers_readline,
|
||||
serf_headers_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_headers_peek,
|
||||
serf_headers_destroy_and_data,
|
||||
};
|
169
buckets/iovec_buckets.c
Normal file
169
buckets/iovec_buckets.c
Normal file
@ -0,0 +1,169 @@
|
||||
/* Copyright 2011 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
typedef struct {
|
||||
struct iovec *vecs;
|
||||
|
||||
/* Total number of buffer stored in the vecs var. */
|
||||
int vecs_len;
|
||||
/* Points to the first unread buffer. */
|
||||
int current_vec;
|
||||
/* First buffer offset. */
|
||||
int offset;
|
||||
} iovec_context_t;
|
||||
|
||||
serf_bucket_t *serf_bucket_iovec_create(
|
||||
struct iovec vecs[],
|
||||
int len,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
iovec_context_t *ctx;
|
||||
int i;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->vecs = serf_bucket_mem_alloc(allocator, len * sizeof(struct iovec));
|
||||
ctx->vecs_len = len;
|
||||
ctx->current_vec = 0;
|
||||
ctx->offset = 0;
|
||||
|
||||
/* copy all buffers to our iovec. */
|
||||
for (i = 0; i < len; i++) {
|
||||
ctx->vecs[i].iov_base = vecs[i].iov_base;
|
||||
ctx->vecs[i].iov_len = vecs[i].iov_len;
|
||||
}
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_iovec, allocator, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t serf_iovec_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
return APR_ENOTIMPL;
|
||||
}
|
||||
|
||||
static apr_status_t serf_iovec_read_iovec(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size,
|
||||
struct iovec *vecs,
|
||||
int *vecs_used)
|
||||
{
|
||||
iovec_context_t *ctx = bucket->data;
|
||||
|
||||
*vecs_used = 0;
|
||||
|
||||
/* copy the requested amount of buffers to the provided iovec. */
|
||||
for (; ctx->current_vec < ctx->vecs_len; ctx->current_vec++) {
|
||||
struct iovec vec = ctx->vecs[ctx->current_vec];
|
||||
apr_size_t remaining;
|
||||
|
||||
if (requested != SERF_READ_ALL_AVAIL && requested <= 0)
|
||||
break;
|
||||
if (*vecs_used >= vecs_size)
|
||||
break;
|
||||
|
||||
vecs[*vecs_used].iov_base = (char*)vec.iov_base + ctx->offset;
|
||||
remaining = vec.iov_len - ctx->offset;
|
||||
|
||||
/* Less bytes requested than remaining in the current buffer. */
|
||||
if (requested != SERF_READ_ALL_AVAIL && requested < remaining) {
|
||||
vecs[*vecs_used].iov_len = requested;
|
||||
ctx->offset += requested;
|
||||
requested = 0;
|
||||
(*vecs_used)++;
|
||||
break;
|
||||
} else {
|
||||
/* Copy the complete buffer. */
|
||||
vecs[*vecs_used].iov_len = remaining;
|
||||
ctx->offset = 0;
|
||||
if (requested != SERF_READ_ALL_AVAIL)
|
||||
requested -= remaining;
|
||||
(*vecs_used)++;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx->current_vec == ctx->vecs_len && !ctx->offset)
|
||||
return APR_EOF;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t serf_iovec_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
struct iovec vec[1];
|
||||
apr_status_t status;
|
||||
int vecs_used;
|
||||
|
||||
status = serf_iovec_read_iovec(bucket, requested, 1, vec, &vecs_used);
|
||||
|
||||
if (vecs_used) {
|
||||
*data = vec[0].iov_base;
|
||||
*len = vec[0].iov_len;
|
||||
} else {
|
||||
*len = 0;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_iovec_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
iovec_context_t *ctx = bucket->data;
|
||||
|
||||
if (ctx->current_vec >= ctx->vecs_len) {
|
||||
*len = 0;
|
||||
return APR_EOF;
|
||||
}
|
||||
|
||||
/* Return the first unread buffer, don't bother combining all
|
||||
remaining data. */
|
||||
*data = ctx->vecs[ctx->current_vec].iov_base;
|
||||
*len = ctx->vecs[ctx->current_vec].iov_len;
|
||||
|
||||
if (ctx->current_vec + 1 == ctx->vecs_len)
|
||||
return APR_EOF;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static void serf_iovec_destroy(serf_bucket_t *bucket)
|
||||
{
|
||||
iovec_context_t *ctx = bucket->data;
|
||||
|
||||
serf_bucket_mem_free(bucket->allocator, ctx->vecs);
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_iovec = {
|
||||
"IOVEC",
|
||||
serf_iovec_read,
|
||||
serf_iovec_readline,
|
||||
serf_iovec_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_iovec_peek,
|
||||
serf_iovec_destroy,
|
||||
};
|
134
buckets/limit_buckets.c
Normal file
134
buckets/limit_buckets.c
Normal file
@ -0,0 +1,134 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
/* Older versions of APR do not have this macro. */
|
||||
#ifdef APR_SIZE_MAX
|
||||
#define REQUESTED_MAX APR_SIZE_MAX
|
||||
#else
|
||||
#define REQUESTED_MAX (~((apr_size_t)0))
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct {
|
||||
serf_bucket_t *stream;
|
||||
apr_uint64_t remaining;
|
||||
} limit_context_t;
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_limit_create(
|
||||
serf_bucket_t *stream, apr_uint64_t len, serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
limit_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->stream = stream;
|
||||
ctx->remaining = len;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_limit, allocator, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t serf_limit_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
limit_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
if (!ctx->remaining) {
|
||||
*len = 0;
|
||||
return APR_EOF;
|
||||
}
|
||||
|
||||
if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining) {
|
||||
if (ctx->remaining <= REQUESTED_MAX) {
|
||||
requested = (apr_size_t) ctx->remaining;
|
||||
} else {
|
||||
requested = REQUESTED_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
status = serf_bucket_read(ctx->stream, requested, data, len);
|
||||
|
||||
if (!SERF_BUCKET_READ_ERROR(status)) {
|
||||
ctx->remaining -= *len;
|
||||
}
|
||||
|
||||
/* If we have met our limit and don't have a status, return EOF. */
|
||||
if (!ctx->remaining && !status) {
|
||||
status = APR_EOF;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_limit_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
limit_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
if (!ctx->remaining) {
|
||||
*len = 0;
|
||||
return APR_EOF;
|
||||
}
|
||||
|
||||
status = serf_bucket_readline(ctx->stream, acceptable, found, data, len);
|
||||
|
||||
if (!SERF_BUCKET_READ_ERROR(status)) {
|
||||
ctx->remaining -= *len;
|
||||
}
|
||||
|
||||
/* If we have met our limit and don't have a status, return EOF. */
|
||||
if (!ctx->remaining && !status) {
|
||||
status = APR_EOF;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_limit_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
limit_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_bucket_peek(ctx->stream, data, len);
|
||||
}
|
||||
|
||||
static void serf_limit_destroy(serf_bucket_t *bucket)
|
||||
{
|
||||
limit_context_t *ctx = bucket->data;
|
||||
|
||||
serf_bucket_destroy(ctx->stream);
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_limit = {
|
||||
"LIMIT",
|
||||
serf_limit_read,
|
||||
serf_limit_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_limit_peek,
|
||||
serf_limit_destroy,
|
||||
};
|
140
buckets/mmap_buckets.c
Normal file
140
buckets/mmap_buckets.c
Normal file
@ -0,0 +1,140 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
#include <apr_mmap.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
#if APR_HAS_MMAP
|
||||
|
||||
typedef struct {
|
||||
apr_mmap_t *mmap;
|
||||
void *current;
|
||||
apr_off_t offset;
|
||||
apr_off_t remaining;
|
||||
} mmap_context_t;
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_mmap_create(
|
||||
apr_mmap_t *file_mmap,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
mmap_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->mmap = file_mmap;
|
||||
ctx->current = NULL;
|
||||
ctx->offset = 0;
|
||||
ctx->remaining = ctx->mmap->size;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_mmap, allocator, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t serf_mmap_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
mmap_context_t *ctx = bucket->data;
|
||||
|
||||
if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining) {
|
||||
*len = ctx->remaining;
|
||||
}
|
||||
else {
|
||||
*len = requested;
|
||||
}
|
||||
|
||||
/* ### Would it be faster to call this once and do the offset ourselves? */
|
||||
apr_mmap_offset((void**)data, ctx->mmap, ctx->offset);
|
||||
|
||||
/* For the next read... */
|
||||
ctx->offset += *len;
|
||||
ctx->remaining -= *len;
|
||||
|
||||
if (ctx->remaining == 0) {
|
||||
return APR_EOF;
|
||||
}
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t serf_mmap_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
mmap_context_t *ctx = bucket->data;
|
||||
const char *end;
|
||||
|
||||
/* ### Would it be faster to call this once and do the offset ourselves? */
|
||||
apr_mmap_offset((void**)data, ctx->mmap, ctx->offset);
|
||||
end = *data;
|
||||
|
||||
/* XXX An overflow is generated if we pass &ctx->remaining to readline.
|
||||
* Not real clear why.
|
||||
*/
|
||||
*len = ctx->remaining;
|
||||
|
||||
serf_util_readline(&end, len, acceptable, found);
|
||||
|
||||
*len = end - *data;
|
||||
|
||||
ctx->offset += *len;
|
||||
ctx->remaining -= *len;
|
||||
|
||||
if (ctx->remaining == 0) {
|
||||
return APR_EOF;
|
||||
}
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t serf_mmap_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
/* Oh, bah. */
|
||||
return APR_ENOTIMPL;
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_mmap = {
|
||||
"MMAP",
|
||||
serf_mmap_read,
|
||||
serf_mmap_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_mmap_peek,
|
||||
serf_default_destroy_and_data,
|
||||
};
|
||||
|
||||
#else /* !APR_HAS_MMAP */
|
||||
|
||||
serf_bucket_t *serf_bucket_mmap_create(apr_mmap_t *file_mmap,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_mmap = {
|
||||
"MMAP",
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
};
|
||||
|
||||
#endif
|
228
buckets/request_buckets.c
Normal file
228
buckets/request_buckets.c
Normal file
@ -0,0 +1,228 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
#include <apr_strings.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
typedef struct {
|
||||
const char *method;
|
||||
const char *uri;
|
||||
serf_bucket_t *headers;
|
||||
serf_bucket_t *body;
|
||||
apr_int64_t len;
|
||||
} request_context_t;
|
||||
|
||||
#define LENGTH_UNKNOWN ((apr_int64_t)-1)
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_request_create(
|
||||
const char *method,
|
||||
const char *URI,
|
||||
serf_bucket_t *body,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
request_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->method = method;
|
||||
ctx->uri = URI;
|
||||
ctx->headers = serf_bucket_headers_create(allocator);
|
||||
ctx->body = body;
|
||||
ctx->len = LENGTH_UNKNOWN;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_request, allocator, ctx);
|
||||
}
|
||||
|
||||
void serf_bucket_request_set_CL(
|
||||
serf_bucket_t *bucket,
|
||||
apr_int64_t len)
|
||||
{
|
||||
request_context_t *ctx = (request_context_t *)bucket->data;
|
||||
|
||||
ctx->len = len;
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_request_get_headers(
|
||||
serf_bucket_t *bucket)
|
||||
{
|
||||
return ((request_context_t *)bucket->data)->headers;
|
||||
}
|
||||
|
||||
void serf_bucket_request_set_root(
|
||||
serf_bucket_t *bucket,
|
||||
const char *root_url)
|
||||
{
|
||||
request_context_t *ctx = (request_context_t *)bucket->data;
|
||||
|
||||
/* If uri is already absolute, don't change it. */
|
||||
if (ctx->uri[0] != '/')
|
||||
return;
|
||||
|
||||
/* If uri is '/' replace it with root_url. */
|
||||
if (ctx->uri[1] == '\0')
|
||||
ctx->uri = root_url;
|
||||
else
|
||||
ctx->uri =
|
||||
apr_pstrcat(serf_bucket_allocator_get_pool(bucket->allocator),
|
||||
root_url,
|
||||
ctx->uri,
|
||||
NULL);
|
||||
}
|
||||
|
||||
static void serialize_data(serf_bucket_t *bucket)
|
||||
{
|
||||
request_context_t *ctx = bucket->data;
|
||||
serf_bucket_t *new_bucket;
|
||||
const char *new_data;
|
||||
struct iovec iov[4];
|
||||
apr_size_t nbytes;
|
||||
|
||||
/* Serialize the request-line and headers into one mother string,
|
||||
* and wrap a bucket around it.
|
||||
*/
|
||||
iov[0].iov_base = (char*)ctx->method;
|
||||
iov[0].iov_len = strlen(ctx->method);
|
||||
iov[1].iov_base = " ";
|
||||
iov[1].iov_len = sizeof(" ") - 1;
|
||||
iov[2].iov_base = (char*)ctx->uri;
|
||||
iov[2].iov_len = strlen(ctx->uri);
|
||||
iov[3].iov_base = " HTTP/1.1\r\n";
|
||||
iov[3].iov_len = sizeof(" HTTP/1.1\r\n") - 1;
|
||||
|
||||
/* ### pool allocation! */
|
||||
new_data = apr_pstrcatv(serf_bucket_allocator_get_pool(bucket->allocator),
|
||||
iov, 4, &nbytes);
|
||||
|
||||
/* Create a new bucket for this string. A free function isn't needed
|
||||
* since the string is residing in a pool.
|
||||
*/
|
||||
new_bucket = SERF_BUCKET_SIMPLE_STRING_LEN(new_data, nbytes,
|
||||
bucket->allocator);
|
||||
|
||||
/* Build up the new bucket structure.
|
||||
*
|
||||
* Note that self needs to become an aggregate bucket so that a
|
||||
* pointer to self still represents the "right" data.
|
||||
*/
|
||||
serf_bucket_aggregate_become(bucket);
|
||||
|
||||
/* Insert the two buckets. */
|
||||
serf_bucket_aggregate_append(bucket, new_bucket);
|
||||
serf_bucket_aggregate_append(bucket, ctx->headers);
|
||||
|
||||
/* If we know the length, then use C-L and the raw body. Otherwise,
|
||||
use chunked encoding for the request. */
|
||||
if (ctx->len != LENGTH_UNKNOWN) {
|
||||
char buf[30];
|
||||
sprintf(buf, "%" APR_INT64_T_FMT, ctx->len);
|
||||
serf_bucket_headers_set(ctx->headers, "Content-Length", buf);
|
||||
if (ctx->body != NULL)
|
||||
serf_bucket_aggregate_append(bucket, ctx->body);
|
||||
}
|
||||
else if (ctx->body != NULL) {
|
||||
/* Morph the body bucket to a chunked encoding bucket for now. */
|
||||
serf_bucket_headers_setn(ctx->headers, "Transfer-Encoding", "chunked");
|
||||
ctx->body = serf_bucket_chunk_create(ctx->body, bucket->allocator);
|
||||
serf_bucket_aggregate_append(bucket, ctx->body);
|
||||
}
|
||||
|
||||
/* Our private context is no longer needed, and is not referred to by
|
||||
* any existing bucket. Toss it.
|
||||
*/
|
||||
serf_bucket_mem_free(bucket->allocator, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t serf_request_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
/* Seralize our private data into a new aggregate bucket. */
|
||||
serialize_data(bucket);
|
||||
|
||||
/* Delegate to the "new" aggregate bucket to do the read. */
|
||||
return serf_bucket_read(bucket, requested, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_request_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
/* Seralize our private data into a new aggregate bucket. */
|
||||
serialize_data(bucket);
|
||||
|
||||
/* Delegate to the "new" aggregate bucket to do the readline. */
|
||||
return serf_bucket_readline(bucket, acceptable, found, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_request_read_iovec(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size,
|
||||
struct iovec *vecs,
|
||||
int *vecs_used)
|
||||
{
|
||||
/* Seralize our private data into a new aggregate bucket. */
|
||||
serialize_data(bucket);
|
||||
|
||||
/* Delegate to the "new" aggregate bucket to do the read. */
|
||||
return serf_bucket_read_iovec(bucket, requested,
|
||||
vecs_size, vecs, vecs_used);
|
||||
}
|
||||
|
||||
static apr_status_t serf_request_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
/* Seralize our private data into a new aggregate bucket. */
|
||||
serialize_data(bucket);
|
||||
|
||||
/* Delegate to the "new" aggregate bucket to do the peek. */
|
||||
return serf_bucket_peek(bucket, data, len);
|
||||
}
|
||||
|
||||
void serf_bucket_request_become(
|
||||
serf_bucket_t *bucket,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *body)
|
||||
{
|
||||
request_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(bucket->allocator, sizeof(*ctx));
|
||||
ctx->method = method;
|
||||
ctx->uri = uri;
|
||||
ctx->headers = serf_bucket_headers_create(bucket->allocator);
|
||||
ctx->body = body;
|
||||
|
||||
bucket->type = &serf_bucket_type_request;
|
||||
bucket->data = ctx;
|
||||
|
||||
/* The allocator remains the same. */
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_request = {
|
||||
"REQUEST",
|
||||
serf_request_read,
|
||||
serf_request_readline,
|
||||
serf_request_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_request_peek,
|
||||
serf_default_destroy_and_data,
|
||||
};
|
||||
|
135
buckets/response_body_buckets.c
Normal file
135
buckets/response_body_buckets.c
Normal file
@ -0,0 +1,135 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
/* Older versions of APR do not have this macro. */
|
||||
#ifdef APR_SIZE_MAX
|
||||
#define REQUESTED_MAX APR_SIZE_MAX
|
||||
#else
|
||||
#define REQUESTED_MAX (~((apr_size_t)0))
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct {
|
||||
serf_bucket_t *stream;
|
||||
apr_uint64_t remaining;
|
||||
} body_context_t;
|
||||
|
||||
serf_bucket_t *serf_bucket_response_body_create(
|
||||
serf_bucket_t *stream, apr_uint64_t len, serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
body_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->stream = stream;
|
||||
ctx->remaining = len;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_response_body, allocator, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t serf_response_body_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
body_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
if (!ctx->remaining) {
|
||||
*len = 0;
|
||||
return APR_EOF;
|
||||
}
|
||||
|
||||
if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining) {
|
||||
if (ctx->remaining <= REQUESTED_MAX) {
|
||||
requested = (apr_size_t) ctx->remaining;
|
||||
} else {
|
||||
requested = REQUESTED_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
status = serf_bucket_read(ctx->stream, requested, data, len);
|
||||
|
||||
if (!SERF_BUCKET_READ_ERROR(status)) {
|
||||
ctx->remaining -= *len;
|
||||
}
|
||||
|
||||
if (APR_STATUS_IS_EOF(status) && ctx->remaining > 0) {
|
||||
/* The server sent less data than expected. */
|
||||
status = SERF_ERROR_TRUNCATED_HTTP_RESPONSE;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_response_body_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
body_context_t *ctx = bucket->data;
|
||||
apr_status_t status;
|
||||
|
||||
if (!ctx->remaining) {
|
||||
*len = 0;
|
||||
return APR_EOF;
|
||||
}
|
||||
|
||||
status = serf_bucket_readline(ctx->stream, acceptable, found, data, len);
|
||||
|
||||
if (!SERF_BUCKET_READ_ERROR(status)) {
|
||||
ctx->remaining -= *len;
|
||||
}
|
||||
|
||||
if (APR_STATUS_IS_EOF(status) && ctx->remaining > 0) {
|
||||
/* The server sent less data than expected. */
|
||||
status = SERF_ERROR_TRUNCATED_HTTP_RESPONSE;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_response_body_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
body_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_bucket_peek(ctx->stream, data, len);
|
||||
}
|
||||
|
||||
static void serf_response_body_destroy(serf_bucket_t *bucket)
|
||||
{
|
||||
body_context_t *ctx = bucket->data;
|
||||
|
||||
serf_bucket_destroy(ctx->stream);
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_response_body = {
|
||||
"RESPONSE_BODY",
|
||||
serf_response_body_read,
|
||||
serf_response_body_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_response_body_peek,
|
||||
serf_response_body_destroy,
|
||||
};
|
464
buckets/response_buckets.c
Normal file
464
buckets/response_buckets.c
Normal file
@ -0,0 +1,464 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_lib.h>
|
||||
#include <apr_strings.h>
|
||||
#include <apr_date.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
typedef struct {
|
||||
serf_bucket_t *stream;
|
||||
serf_bucket_t *body; /* Pointer to the stream wrapping the body. */
|
||||
serf_bucket_t *headers; /* holds parsed headers */
|
||||
|
||||
enum {
|
||||
STATE_STATUS_LINE, /* reading status line */
|
||||
STATE_HEADERS, /* reading headers */
|
||||
STATE_BODY, /* reading body */
|
||||
STATE_TRAILERS, /* reading trailers */
|
||||
STATE_DONE /* we've sent EOF */
|
||||
} state;
|
||||
|
||||
/* Buffer for accumulating a line from the response. */
|
||||
serf_linebuf_t linebuf;
|
||||
|
||||
serf_status_line sl;
|
||||
|
||||
int chunked; /* Do we need to read trailers? */
|
||||
int head_req; /* Was this a HEAD request? */
|
||||
} response_context_t;
|
||||
|
||||
|
||||
serf_bucket_t *serf_bucket_response_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
response_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->stream = stream;
|
||||
ctx->body = NULL;
|
||||
ctx->headers = serf_bucket_headers_create(allocator);
|
||||
ctx->state = STATE_STATUS_LINE;
|
||||
ctx->chunked = 0;
|
||||
ctx->head_req = 0;
|
||||
|
||||
serf_linebuf_init(&ctx->linebuf);
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_response, allocator, ctx);
|
||||
}
|
||||
|
||||
void serf_bucket_response_set_head(
|
||||
serf_bucket_t *bucket)
|
||||
{
|
||||
response_context_t *ctx = bucket->data;
|
||||
|
||||
ctx->head_req = 1;
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_response_get_headers(
|
||||
serf_bucket_t *bucket)
|
||||
{
|
||||
return ((response_context_t *)bucket->data)->headers;
|
||||
}
|
||||
|
||||
|
||||
static void serf_response_destroy_and_data(serf_bucket_t *bucket)
|
||||
{
|
||||
response_context_t *ctx = bucket->data;
|
||||
|
||||
if (ctx->state != STATE_STATUS_LINE) {
|
||||
serf_bucket_mem_free(bucket->allocator, (void*)ctx->sl.reason);
|
||||
}
|
||||
|
||||
serf_bucket_destroy(ctx->stream);
|
||||
if (ctx->body != NULL)
|
||||
serf_bucket_destroy(ctx->body);
|
||||
serf_bucket_destroy(ctx->headers);
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
static apr_status_t fetch_line(response_context_t *ctx, int acceptable)
|
||||
{
|
||||
return serf_linebuf_fetch(&ctx->linebuf, ctx->stream, acceptable);
|
||||
}
|
||||
|
||||
static apr_status_t parse_status_line(response_context_t *ctx,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
int res;
|
||||
char *reason; /* ### stupid APR interface makes this non-const */
|
||||
|
||||
/* ctx->linebuf.line should be of form: HTTP/1.1 200 OK */
|
||||
res = apr_date_checkmask(ctx->linebuf.line, "HTTP/#.# ###*");
|
||||
if (!res) {
|
||||
/* Not an HTTP response? Well, at least we won't understand it. */
|
||||
return SERF_ERROR_BAD_HTTP_RESPONSE;
|
||||
}
|
||||
|
||||
ctx->sl.version = SERF_HTTP_VERSION(ctx->linebuf.line[5] - '0',
|
||||
ctx->linebuf.line[7] - '0');
|
||||
ctx->sl.code = apr_strtoi64(ctx->linebuf.line + 8, &reason, 10);
|
||||
|
||||
/* Skip leading spaces for the reason string. */
|
||||
if (apr_isspace(*reason)) {
|
||||
reason++;
|
||||
}
|
||||
|
||||
/* Copy the reason value out of the line buffer. */
|
||||
ctx->sl.reason = serf_bstrmemdup(allocator, reason,
|
||||
ctx->linebuf.used
|
||||
- (reason - ctx->linebuf.line));
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* This code should be replaced with header buckets. */
|
||||
static apr_status_t fetch_headers(serf_bucket_t *bkt, response_context_t *ctx)
|
||||
{
|
||||
apr_status_t status;
|
||||
|
||||
/* RFC 2616 says that CRLF is the only line ending, but we can easily
|
||||
* accept any kind of line ending.
|
||||
*/
|
||||
status = fetch_line(ctx, SERF_NEWLINE_ANY);
|
||||
if (SERF_BUCKET_READ_ERROR(status)) {
|
||||
return status;
|
||||
}
|
||||
/* Something was read. Process it. */
|
||||
|
||||
if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) {
|
||||
const char *end_key;
|
||||
const char *c;
|
||||
|
||||
end_key = c = memchr(ctx->linebuf.line, ':', ctx->linebuf.used);
|
||||
if (!c) {
|
||||
/* Bad headers? */
|
||||
return SERF_ERROR_BAD_HTTP_RESPONSE;
|
||||
}
|
||||
|
||||
/* Skip over initial ':' */
|
||||
c++;
|
||||
|
||||
/* And skip all whitespaces. */
|
||||
for(; c < ctx->linebuf.line + ctx->linebuf.used; c++)
|
||||
{
|
||||
if (!apr_isspace(*c))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Always copy the headers (from the linebuf into new mem). */
|
||||
/* ### we should be able to optimize some mem copies */
|
||||
serf_bucket_headers_setx(
|
||||
ctx->headers,
|
||||
ctx->linebuf.line, end_key - ctx->linebuf.line, 1,
|
||||
c, ctx->linebuf.line + ctx->linebuf.used - c, 1);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Perform one iteration of the state machine.
|
||||
*
|
||||
* Will return when one the following conditions occurred:
|
||||
* 1) a state change
|
||||
* 2) an error
|
||||
* 3) the stream is not ready or at EOF
|
||||
* 4) APR_SUCCESS, meaning the machine can be run again immediately
|
||||
*/
|
||||
static apr_status_t run_machine(serf_bucket_t *bkt, response_context_t *ctx)
|
||||
{
|
||||
apr_status_t status = APR_SUCCESS; /* initialize to avoid gcc warnings */
|
||||
|
||||
switch (ctx->state) {
|
||||
case STATE_STATUS_LINE:
|
||||
/* RFC 2616 says that CRLF is the only line ending, but we can easily
|
||||
* accept any kind of line ending.
|
||||
*/
|
||||
status = fetch_line(ctx, SERF_NEWLINE_ANY);
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
if (ctx->linebuf.state == SERF_LINEBUF_READY) {
|
||||
/* The Status-Line is in the line buffer. Process it. */
|
||||
status = parse_status_line(ctx, bkt->allocator);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* Good times ahead: we're switching protocols! */
|
||||
if (ctx->sl.code == 101) {
|
||||
ctx->body =
|
||||
serf_bucket_barrier_create(ctx->stream, bkt->allocator);
|
||||
ctx->state = STATE_DONE;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Okay... move on to reading the headers. */
|
||||
ctx->state = STATE_HEADERS;
|
||||
}
|
||||
else {
|
||||
/* The connection closed before we could get the next
|
||||
* response. Treat the request as lost so that our upper
|
||||
* end knows the server never tried to give us a response.
|
||||
*/
|
||||
if (APR_STATUS_IS_EOF(status)) {
|
||||
return SERF_ERROR_REQUEST_LOST;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case STATE_HEADERS:
|
||||
status = fetch_headers(bkt, ctx);
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
/* If an empty line was read, then we hit the end of the headers.
|
||||
* Move on to the body.
|
||||
*/
|
||||
if (ctx->linebuf.state == SERF_LINEBUF_READY && !ctx->linebuf.used) {
|
||||
const void *v;
|
||||
|
||||
/* Advance the state. */
|
||||
ctx->state = STATE_BODY;
|
||||
|
||||
ctx->body =
|
||||
serf_bucket_barrier_create(ctx->stream, bkt->allocator);
|
||||
|
||||
/* Are we C-L, chunked, or conn close? */
|
||||
v = serf_bucket_headers_get(ctx->headers, "Content-Length");
|
||||
if (v) {
|
||||
apr_uint64_t length;
|
||||
length = apr_strtoi64(v, NULL, 10);
|
||||
if (errno == ERANGE) {
|
||||
return APR_FROM_OS_ERROR(ERANGE);
|
||||
}
|
||||
ctx->body = serf_bucket_response_body_create(
|
||||
ctx->body, length, bkt->allocator);
|
||||
}
|
||||
else {
|
||||
v = serf_bucket_headers_get(ctx->headers, "Transfer-Encoding");
|
||||
|
||||
/* Need to handle multiple transfer-encoding. */
|
||||
if (v && strcasecmp("chunked", v) == 0) {
|
||||
ctx->chunked = 1;
|
||||
ctx->body = serf_bucket_dechunk_create(ctx->body,
|
||||
bkt->allocator);
|
||||
}
|
||||
|
||||
if (!v && (ctx->sl.code == 204 || ctx->sl.code == 304)) {
|
||||
ctx->state = STATE_DONE;
|
||||
}
|
||||
}
|
||||
v = serf_bucket_headers_get(ctx->headers, "Content-Encoding");
|
||||
if (v) {
|
||||
/* Need to handle multiple content-encoding. */
|
||||
if (v && strcasecmp("gzip", v) == 0) {
|
||||
ctx->body =
|
||||
serf_bucket_deflate_create(ctx->body, bkt->allocator,
|
||||
SERF_DEFLATE_GZIP);
|
||||
}
|
||||
else if (v && strcasecmp("deflate", v) == 0) {
|
||||
ctx->body =
|
||||
serf_bucket_deflate_create(ctx->body, bkt->allocator,
|
||||
SERF_DEFLATE_DEFLATE);
|
||||
}
|
||||
}
|
||||
/* If we're a HEAD request, we don't receive a body. */
|
||||
if (ctx->head_req) {
|
||||
ctx->state = STATE_DONE;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case STATE_BODY:
|
||||
/* Don't do anything. */
|
||||
break;
|
||||
case STATE_TRAILERS:
|
||||
status = fetch_headers(bkt, ctx);
|
||||
if (SERF_BUCKET_READ_ERROR(status))
|
||||
return status;
|
||||
|
||||
/* If an empty line was read, then we're done. */
|
||||
if (ctx->linebuf.state == SERF_LINEBUF_READY && !ctx->linebuf.used) {
|
||||
ctx->state = STATE_DONE;
|
||||
return APR_EOF;
|
||||
}
|
||||
break;
|
||||
case STATE_DONE:
|
||||
return APR_EOF;
|
||||
default:
|
||||
/* Not reachable */
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t wait_for_body(serf_bucket_t *bkt, response_context_t *ctx)
|
||||
{
|
||||
apr_status_t status;
|
||||
|
||||
/* Keep reading and moving through states if we aren't at the BODY */
|
||||
while (ctx->state != STATE_BODY) {
|
||||
status = run_machine(bkt, ctx);
|
||||
|
||||
/* Anything other than APR_SUCCESS means that we cannot immediately
|
||||
* read again (for now).
|
||||
*/
|
||||
if (status)
|
||||
return status;
|
||||
}
|
||||
/* in STATE_BODY */
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t serf_bucket_response_wait_for_headers(
|
||||
serf_bucket_t *bucket)
|
||||
{
|
||||
response_context_t *ctx = bucket->data;
|
||||
|
||||
return wait_for_body(bucket, ctx);
|
||||
}
|
||||
|
||||
apr_status_t serf_bucket_response_status(
|
||||
serf_bucket_t *bkt,
|
||||
serf_status_line *sline)
|
||||
{
|
||||
response_context_t *ctx = bkt->data;
|
||||
apr_status_t status;
|
||||
|
||||
if (ctx->state != STATE_STATUS_LINE) {
|
||||
/* We already read it and moved on. Just return it. */
|
||||
*sline = ctx->sl;
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* Running the state machine once will advance the machine, or state
|
||||
* that the stream isn't ready with enough data. There isn't ever a
|
||||
* need to run the machine more than once to try and satisfy this. We
|
||||
* have to look at the state to tell whether it advanced, though, as
|
||||
* it is quite possible to advance *and* to return APR_EAGAIN.
|
||||
*/
|
||||
status = run_machine(bkt, ctx);
|
||||
if (ctx->state == STATE_HEADERS) {
|
||||
*sline = ctx->sl;
|
||||
}
|
||||
else {
|
||||
/* Indicate that we don't have the information yet. */
|
||||
sline->version = 0;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static apr_status_t serf_response_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
response_context_t *ctx = bucket->data;
|
||||
apr_status_t rv;
|
||||
|
||||
rv = wait_for_body(bucket, ctx);
|
||||
if (rv) {
|
||||
/* It's not possible to have read anything yet! */
|
||||
if (APR_STATUS_IS_EOF(rv) || APR_STATUS_IS_EAGAIN(rv)) {
|
||||
*len = 0;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
rv = serf_bucket_read(ctx->body, requested, data, len);
|
||||
if (SERF_BUCKET_READ_ERROR(rv))
|
||||
return rv;
|
||||
|
||||
if (APR_STATUS_IS_EOF(rv)) {
|
||||
if (ctx->chunked) {
|
||||
ctx->state = STATE_TRAILERS;
|
||||
/* Mask the result. */
|
||||
rv = APR_SUCCESS;
|
||||
} else {
|
||||
ctx->state = STATE_DONE;
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
static apr_status_t serf_response_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
response_context_t *ctx = bucket->data;
|
||||
apr_status_t rv;
|
||||
|
||||
rv = wait_for_body(bucket, ctx);
|
||||
if (rv) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
/* Delegate to the stream bucket to do the readline. */
|
||||
return serf_bucket_readline(ctx->body, acceptable, found, data, len);
|
||||
}
|
||||
|
||||
apr_status_t serf_response_full_become_aggregate(serf_bucket_t *bucket)
|
||||
{
|
||||
response_context_t *ctx = bucket->data;
|
||||
serf_bucket_t *bkt;
|
||||
char buf[256];
|
||||
int size;
|
||||
|
||||
serf_bucket_aggregate_become(bucket);
|
||||
|
||||
/* Add reconstructed status line. */
|
||||
size = apr_snprintf(buf, 256, "HTTP/%d.%d %d ",
|
||||
SERF_HTTP_VERSION_MAJOR(ctx->sl.version),
|
||||
SERF_HTTP_VERSION_MINOR(ctx->sl.version),
|
||||
ctx->sl.code);
|
||||
bkt = serf_bucket_simple_copy_create(buf, size,
|
||||
bucket->allocator);
|
||||
serf_bucket_aggregate_append(bucket, bkt);
|
||||
bkt = serf_bucket_simple_copy_create(ctx->sl.reason, strlen(ctx->sl.reason),
|
||||
bucket->allocator);
|
||||
serf_bucket_aggregate_append(bucket, bkt);
|
||||
bkt = SERF_BUCKET_SIMPLE_STRING_LEN("\r\n", 2,
|
||||
bucket->allocator);
|
||||
serf_bucket_aggregate_append(bucket, bkt);
|
||||
|
||||
/* Add headers and stream buckets in order. */
|
||||
serf_bucket_aggregate_append(bucket, ctx->headers);
|
||||
serf_bucket_aggregate_append(bucket, ctx->stream);
|
||||
|
||||
serf_bucket_mem_free(bucket->allocator, ctx);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
/* ### need to implement */
|
||||
#define serf_response_peek NULL
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_response = {
|
||||
"RESPONSE",
|
||||
serf_response_read,
|
||||
serf_response_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_response_peek,
|
||||
serf_response_destroy_and_data,
|
||||
};
|
142
buckets/simple_buckets.c
Normal file
142
buckets/simple_buckets.c
Normal file
@ -0,0 +1,142 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
typedef struct {
|
||||
const char *original;
|
||||
const char *current;
|
||||
apr_size_t remaining;
|
||||
|
||||
serf_simple_freefunc_t freefunc;
|
||||
void *baton;
|
||||
|
||||
} simple_context_t;
|
||||
|
||||
|
||||
static void free_copied_data(void *baton, const char *data)
|
||||
{
|
||||
serf_bucket_mem_free(baton, (char*)data);
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_simple_create(
|
||||
const char *data,
|
||||
apr_size_t len,
|
||||
serf_simple_freefunc_t freefunc,
|
||||
void *freefunc_baton,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
simple_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->original = ctx->current = data;
|
||||
ctx->remaining = len;
|
||||
ctx->freefunc = freefunc;
|
||||
ctx->baton = freefunc_baton;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_simple, allocator, ctx);
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_simple_copy_create(
|
||||
const char *data, apr_size_t len,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
simple_context_t *ctx;
|
||||
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
|
||||
ctx->original = ctx->current = serf_bucket_mem_alloc(allocator, len);
|
||||
memcpy((char*)ctx->original, data, len);
|
||||
|
||||
ctx->remaining = len;
|
||||
ctx->freefunc = free_copied_data;
|
||||
ctx->baton = allocator;
|
||||
|
||||
return serf_bucket_create(&serf_bucket_type_simple, allocator, ctx);
|
||||
}
|
||||
|
||||
static apr_status_t serf_simple_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
simple_context_t *ctx = bucket->data;
|
||||
|
||||
if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining)
|
||||
requested = ctx->remaining;
|
||||
|
||||
*data = ctx->current;
|
||||
*len = requested;
|
||||
|
||||
ctx->current += requested;
|
||||
ctx->remaining -= requested;
|
||||
|
||||
return ctx->remaining ? APR_SUCCESS : APR_EOF;
|
||||
}
|
||||
|
||||
static apr_status_t serf_simple_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
simple_context_t *ctx = bucket->data;
|
||||
|
||||
/* Returned data will be from current position. */
|
||||
*data = ctx->current;
|
||||
serf_util_readline(&ctx->current, &ctx->remaining, acceptable, found);
|
||||
|
||||
/* See how much ctx->current moved forward. */
|
||||
*len = ctx->current - *data;
|
||||
|
||||
return ctx->remaining ? APR_SUCCESS : APR_EOF;
|
||||
}
|
||||
|
||||
static apr_status_t serf_simple_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
simple_context_t *ctx = bucket->data;
|
||||
|
||||
/* return whatever we have left */
|
||||
*data = ctx->current;
|
||||
*len = ctx->remaining;
|
||||
|
||||
/* we returned everything this bucket will ever hold */
|
||||
return APR_EOF;
|
||||
}
|
||||
|
||||
static void serf_simple_destroy(serf_bucket_t *bucket)
|
||||
{
|
||||
simple_context_t *ctx = bucket->data;
|
||||
|
||||
if (ctx->freefunc)
|
||||
(*ctx->freefunc)(ctx->baton, ctx->original);
|
||||
|
||||
serf_default_destroy_and_data(bucket);
|
||||
}
|
||||
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_simple = {
|
||||
"SIMPLE",
|
||||
serf_simple_read,
|
||||
serf_simple_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_simple_peek,
|
||||
serf_simple_destroy,
|
||||
};
|
125
buckets/socket_buckets.c
Normal file
125
buckets/socket_buckets.c
Normal file
@ -0,0 +1,125 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
#include <apr_network_io.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_private.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
|
||||
typedef struct {
|
||||
apr_socket_t *skt;
|
||||
|
||||
serf_databuf_t databuf;
|
||||
|
||||
/* Progress callback */
|
||||
serf_progress_t progress_func;
|
||||
void *progress_baton;
|
||||
} socket_context_t;
|
||||
|
||||
|
||||
static apr_status_t socket_reader(void *baton, apr_size_t bufsize,
|
||||
char *buf, apr_size_t *len)
|
||||
{
|
||||
socket_context_t *ctx = baton;
|
||||
apr_status_t status;
|
||||
|
||||
*len = bufsize;
|
||||
status = apr_socket_recv(ctx->skt, buf, len);
|
||||
|
||||
if (status && !APR_STATUS_IS_EAGAIN(status))
|
||||
serf__log_skt(SOCK_VERBOSE, __FILE__, ctx->skt,
|
||||
"socket_recv error %d\n", status);
|
||||
|
||||
if (*len)
|
||||
serf__log_skt(SOCK_MSG_VERBOSE, __FILE__, ctx->skt,
|
||||
"--- socket_recv:\n%.*s\n-(%d)-\n",
|
||||
*len, buf, *len);
|
||||
|
||||
if (ctx->progress_func)
|
||||
ctx->progress_func(ctx->progress_baton, *len, 0);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
serf_bucket_t *serf_bucket_socket_create(
|
||||
apr_socket_t *skt,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
socket_context_t *ctx;
|
||||
|
||||
/* Oh, well. */
|
||||
ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
|
||||
ctx->skt = skt;
|
||||
|
||||
serf_databuf_init(&ctx->databuf);
|
||||
ctx->databuf.read = socket_reader;
|
||||
ctx->databuf.read_baton = ctx;
|
||||
|
||||
ctx->progress_func = NULL;
|
||||
ctx->progress_baton = NULL;
|
||||
return serf_bucket_create(&serf_bucket_type_socket, allocator, ctx);
|
||||
}
|
||||
|
||||
void serf_bucket_socket_set_read_progress_cb(
|
||||
serf_bucket_t *bucket,
|
||||
const serf_progress_t progress_func,
|
||||
void *progress_baton)
|
||||
{
|
||||
socket_context_t *ctx = bucket->data;
|
||||
|
||||
ctx->progress_func = progress_func;
|
||||
ctx->progress_baton = progress_baton;
|
||||
}
|
||||
|
||||
static apr_status_t serf_socket_read(serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
socket_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_databuf_read(&ctx->databuf, requested, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_socket_readline(serf_bucket_t *bucket,
|
||||
int acceptable, int *found,
|
||||
const char **data, apr_size_t *len)
|
||||
{
|
||||
socket_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_databuf_readline(&ctx->databuf, acceptable, found, data, len);
|
||||
}
|
||||
|
||||
static apr_status_t serf_socket_peek(serf_bucket_t *bucket,
|
||||
const char **data,
|
||||
apr_size_t *len)
|
||||
{
|
||||
socket_context_t *ctx = bucket->data;
|
||||
|
||||
return serf_databuf_peek(&ctx->databuf, data, len);
|
||||
}
|
||||
|
||||
const serf_bucket_type_t serf_bucket_type_socket = {
|
||||
"SOCKET",
|
||||
serf_socket_read,
|
||||
serf_socket_readline,
|
||||
serf_default_read_iovec,
|
||||
serf_default_read_for_sendfile,
|
||||
serf_default_read_bucket,
|
||||
serf_socket_peek,
|
||||
serf_default_destroy_and_data,
|
||||
};
|
1752
buckets/ssl_buckets.c
Normal file
1752
buckets/ssl_buckets.c
Normal file
File diff suppressed because it is too large
Load Diff
985
build/apr_common.m4
Normal file
985
build/apr_common.m4
Normal file
@ -0,0 +1,985 @@
|
||||
dnl -------------------------------------------------------- -*- autoconf -*-
|
||||
dnl Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
dnl contributor license agreements. See the NOTICE file distributed with
|
||||
dnl this work for additional information regarding copyright ownership.
|
||||
dnl The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
dnl (the "License"); you may not use this file except in compliance with
|
||||
dnl the License. You may obtain a copy of the License at
|
||||
dnl
|
||||
dnl http://www.apache.org/licenses/LICENSE-2.0
|
||||
dnl
|
||||
dnl Unless required by applicable law or agreed to in writing, software
|
||||
dnl distributed under the License is distributed on an "AS IS" BASIS,
|
||||
dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
dnl See the License for the specific language governing permissions and
|
||||
dnl limitations under the License.
|
||||
|
||||
dnl
|
||||
dnl apr_common.m4: APR's general-purpose autoconf macros
|
||||
dnl
|
||||
|
||||
dnl
|
||||
dnl APR_CONFIG_NICE(filename)
|
||||
dnl
|
||||
dnl Saves a snapshot of the configure command-line for later reuse
|
||||
dnl
|
||||
AC_DEFUN([APR_CONFIG_NICE], [
|
||||
rm -f $1
|
||||
cat >$1<<EOF
|
||||
#! /bin/sh
|
||||
#
|
||||
# Created by configure
|
||||
|
||||
EOF
|
||||
if test -n "$CC"; then
|
||||
echo "CC=\"$CC\"; export CC" >> $1
|
||||
fi
|
||||
if test -n "$CFLAGS"; then
|
||||
echo "CFLAGS=\"$CFLAGS\"; export CFLAGS" >> $1
|
||||
fi
|
||||
if test -n "$CPPFLAGS"; then
|
||||
echo "CPPFLAGS=\"$CPPFLAGS\"; export CPPFLAGS" >> $1
|
||||
fi
|
||||
if test -n "$LDFLAGS"; then
|
||||
echo "LDFLAGS=\"$LDFLAGS\"; export LDFLAGS" >> $1
|
||||
fi
|
||||
if test -n "$LTFLAGS"; then
|
||||
echo "LTFLAGS=\"$LTFLAGS\"; export LTFLAGS" >> $1
|
||||
fi
|
||||
if test -n "$LIBS"; then
|
||||
echo "LIBS=\"$LIBS\"; export LIBS" >> $1
|
||||
fi
|
||||
if test -n "$INCLUDES"; then
|
||||
echo "INCLUDES=\"$INCLUDES\"; export INCLUDES" >> $1
|
||||
fi
|
||||
if test -n "$NOTEST_CFLAGS"; then
|
||||
echo "NOTEST_CFLAGS=\"$NOTEST_CFLAGS\"; export NOTEST_CFLAGS" >> $1
|
||||
fi
|
||||
if test -n "$NOTEST_CPPFLAGS"; then
|
||||
echo "NOTEST_CPPFLAGS=\"$NOTEST_CPPFLAGS\"; export NOTEST_CPPFLAGS" >> $1
|
||||
fi
|
||||
if test -n "$NOTEST_LDFLAGS"; then
|
||||
echo "NOTEST_LDFLAGS=\"$NOTEST_LDFLAGS\"; export NOTEST_LDFLAGS" >> $1
|
||||
fi
|
||||
if test -n "$NOTEST_LIBS"; then
|
||||
echo "NOTEST_LIBS=\"$NOTEST_LIBS\"; export NOTEST_LIBS" >> $1
|
||||
fi
|
||||
|
||||
# Retrieve command-line arguments.
|
||||
eval "set x $[0] $ac_configure_args"
|
||||
shift
|
||||
|
||||
for arg
|
||||
do
|
||||
APR_EXPAND_VAR(arg, $arg)
|
||||
echo "\"[$]arg\" \\" >> $1
|
||||
done
|
||||
echo '"[$]@"' >> $1
|
||||
chmod +x $1
|
||||
])dnl
|
||||
|
||||
dnl APR_MKDIR_P_CHECK(fallback-mkdir-p)
|
||||
dnl checks whether mkdir -p works
|
||||
AC_DEFUN([APR_MKDIR_P_CHECK], [
|
||||
AC_CACHE_CHECK(for working mkdir -p, ac_cv_mkdir_p,[
|
||||
test -d conftestdir && rm -rf conftestdir
|
||||
mkdir -p conftestdir/somedir >/dev/null 2>&1
|
||||
if test -d conftestdir/somedir; then
|
||||
ac_cv_mkdir_p=yes
|
||||
else
|
||||
ac_cv_mkdir_p=no
|
||||
fi
|
||||
rm -rf conftestdir
|
||||
])
|
||||
if test "$ac_cv_mkdir_p" = "yes"; then
|
||||
mkdir_p="mkdir -p"
|
||||
else
|
||||
mkdir_p="$1"
|
||||
fi
|
||||
])
|
||||
|
||||
dnl
|
||||
dnl APR_SUBDIR_CONFIG(dir [, sub-package-cmdline-args, args-to-drop])
|
||||
dnl
|
||||
dnl dir: directory to find configure in
|
||||
dnl sub-package-cmdline-args: arguments to add to the invocation (optional)
|
||||
dnl args-to-drop: arguments to drop from the invocation (optional)
|
||||
dnl
|
||||
dnl Note: This macro relies on ac_configure_args being set properly.
|
||||
dnl
|
||||
dnl The args-to-drop argument is shoved into a case statement, so
|
||||
dnl multiple arguments can be separated with a |.
|
||||
dnl
|
||||
dnl Note: Older versions of autoconf do not single-quote args, while 2.54+
|
||||
dnl places quotes around every argument. So, if you want to drop the
|
||||
dnl argument called --enable-layout, you must pass the third argument as:
|
||||
dnl [--enable-layout=*|\'--enable-layout=*]
|
||||
dnl
|
||||
dnl Trying to optimize this is left as an exercise to the reader who wants
|
||||
dnl to put up with more autoconf craziness. I give up.
|
||||
dnl
|
||||
AC_DEFUN([APR_SUBDIR_CONFIG], [
|
||||
# save our work to this point; this allows the sub-package to use it
|
||||
AC_CACHE_SAVE
|
||||
|
||||
echo "configuring package in $1 now"
|
||||
ac_popdir=`pwd`
|
||||
apr_config_subdirs="$1"
|
||||
test -d $1 || $mkdir_p $1
|
||||
ac_abs_srcdir=`(cd $srcdir/$1 && pwd)`
|
||||
cd $1
|
||||
|
||||
changequote(, )dnl
|
||||
# A "../" for each directory in /$config_subdirs.
|
||||
ac_dots=`echo $apr_config_subdirs|sed -e 's%^\./%%' -e 's%[^/]$%&/%' -e 's%[^/]*/%../%g'`
|
||||
changequote([, ])dnl
|
||||
|
||||
# Make the cache file pathname absolute for the subdirs
|
||||
# required to correctly handle subdirs that might actually
|
||||
# be symlinks
|
||||
case "$cache_file" in
|
||||
/*) # already absolute
|
||||
ac_sub_cache_file=$cache_file ;;
|
||||
*) # Was relative path.
|
||||
ac_sub_cache_file="$ac_popdir/$cache_file" ;;
|
||||
esac
|
||||
|
||||
ifelse($3, [], [apr_configure_args=$ac_configure_args],[
|
||||
apr_configure_args=
|
||||
apr_sep=
|
||||
for apr_configure_arg in $ac_configure_args
|
||||
do
|
||||
case "$apr_configure_arg" in
|
||||
$3)
|
||||
continue ;;
|
||||
esac
|
||||
apr_configure_args="$apr_configure_args$apr_sep'$apr_configure_arg'"
|
||||
apr_sep=" "
|
||||
done
|
||||
])
|
||||
|
||||
dnl autoconf doesn't add --silent to ac_configure_args; explicitly pass it
|
||||
test "x$silent" = "xyes" && apr_configure_args="$apr_configure_args --silent"
|
||||
|
||||
dnl AC_CONFIG_SUBDIRS silences option warnings, emulate this for 2.62
|
||||
apr_configure_args="--disable-option-checking $apr_configure_args"
|
||||
|
||||
dnl The eval makes quoting arguments work - specifically the second argument
|
||||
dnl where the quoting mechanisms used is "" rather than [].
|
||||
dnl
|
||||
dnl We need to execute another shell because some autoconf/shell combinations
|
||||
dnl will choke after doing repeated APR_SUBDIR_CONFIG()s. (Namely Solaris
|
||||
dnl and autoconf-2.54+)
|
||||
if eval $SHELL $ac_abs_srcdir/configure $apr_configure_args --cache-file=$ac_sub_cache_file --srcdir=$ac_abs_srcdir $2
|
||||
then :
|
||||
echo "$1 configured properly"
|
||||
else
|
||||
echo "configure failed for $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd $ac_popdir
|
||||
|
||||
# grab any updates from the sub-package
|
||||
AC_CACHE_LOAD
|
||||
])dnl
|
||||
|
||||
dnl
|
||||
dnl APR_SAVE_THE_ENVIRONMENT(variable_name)
|
||||
dnl
|
||||
dnl Stores the variable (usually a Makefile macro) for later restoration
|
||||
dnl
|
||||
AC_DEFUN([APR_SAVE_THE_ENVIRONMENT], [
|
||||
apr_ste_save_$1="$$1"
|
||||
])dnl
|
||||
|
||||
dnl
|
||||
dnl APR_RESTORE_THE_ENVIRONMENT(variable_name, prefix_)
|
||||
dnl
|
||||
dnl Uses the previously saved variable content to figure out what configure
|
||||
dnl has added to the variable, moving the new bits to prefix_variable_name
|
||||
dnl and restoring the original variable contents. This makes it possible
|
||||
dnl for a user to override configure when it does something stupid.
|
||||
dnl
|
||||
AC_DEFUN([APR_RESTORE_THE_ENVIRONMENT], [
|
||||
dnl Check whether $apr_ste_save_$1 is empty or
|
||||
dnl only whitespace. The verbatim "X" is token number 1,
|
||||
dnl the following whitespace will be ignored.
|
||||
set X $apr_ste_save_$1
|
||||
if test ${#} -eq 1; then
|
||||
$2$1="$$1"
|
||||
$1=
|
||||
else
|
||||
if test "x$apr_ste_save_$1" = "x$$1"; then
|
||||
$2$1=
|
||||
else
|
||||
$2$1=`echo "$$1" | sed -e "s%${apr_ste_save_$1}%%"`
|
||||
$1="$apr_ste_save_$1"
|
||||
fi
|
||||
fi
|
||||
if test "x$silent" != "xyes"; then
|
||||
echo " restoring $1 to \"$$1\""
|
||||
echo " setting $2$1 to \"$$2$1\""
|
||||
fi
|
||||
AC_SUBST($2$1)
|
||||
])dnl
|
||||
|
||||
dnl
|
||||
dnl APR_SETIFNULL(variable, value)
|
||||
dnl
|
||||
dnl Set variable iff it's currently null
|
||||
dnl
|
||||
AC_DEFUN([APR_SETIFNULL], [
|
||||
if test -z "$$1"; then
|
||||
test "x$silent" != "xyes" && echo " setting $1 to \"$2\""
|
||||
$1="$2"
|
||||
fi
|
||||
])dnl
|
||||
|
||||
dnl
|
||||
dnl APR_SETVAR(variable, value)
|
||||
dnl
|
||||
dnl Set variable no matter what
|
||||
dnl
|
||||
AC_DEFUN([APR_SETVAR], [
|
||||
test "x$silent" != "xyes" && echo " forcing $1 to \"$2\""
|
||||
$1="$2"
|
||||
])dnl
|
||||
|
||||
dnl
|
||||
dnl APR_ADDTO(variable, value)
|
||||
dnl
|
||||
dnl Add value to variable
|
||||
dnl
|
||||
AC_DEFUN([APR_ADDTO], [
|
||||
if test "x$$1" = "x"; then
|
||||
test "x$silent" != "xyes" && echo " setting $1 to \"$2\""
|
||||
$1="$2"
|
||||
else
|
||||
apr_addto_bugger="$2"
|
||||
for i in $apr_addto_bugger; do
|
||||
apr_addto_duplicate="0"
|
||||
for j in $$1; do
|
||||
if test "x$i" = "x$j"; then
|
||||
apr_addto_duplicate="1"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if test $apr_addto_duplicate = "0"; then
|
||||
test "x$silent" != "xyes" && echo " adding \"$i\" to $1"
|
||||
$1="$$1 $i"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
])dnl
|
||||
|
||||
dnl
|
||||
dnl APR_REMOVEFROM(variable, value)
|
||||
dnl
|
||||
dnl Remove a value from a variable
|
||||
dnl
|
||||
AC_DEFUN([APR_REMOVEFROM], [
|
||||
if test "x$$1" = "x$2"; then
|
||||
test "x$silent" != "xyes" && echo " nulling $1"
|
||||
$1=""
|
||||
else
|
||||
apr_new_bugger=""
|
||||
apr_removed=0
|
||||
for i in $$1; do
|
||||
if test "x$i" != "x$2"; then
|
||||
apr_new_bugger="$apr_new_bugger $i"
|
||||
else
|
||||
apr_removed=1
|
||||
fi
|
||||
done
|
||||
if test $apr_removed = "1"; then
|
||||
test "x$silent" != "xyes" && echo " removed \"$2\" from $1"
|
||||
$1=$apr_new_bugger
|
||||
fi
|
||||
fi
|
||||
]) dnl
|
||||
|
||||
dnl
|
||||
dnl APR_CHECK_DEFINE_FILES( symbol, header_file [header_file ...] )
|
||||
dnl
|
||||
AC_DEFUN([APR_CHECK_DEFINE_FILES], [
|
||||
AC_CACHE_CHECK([for $1 in $2],ac_cv_define_$1,[
|
||||
ac_cv_define_$1=no
|
||||
for curhdr in $2
|
||||
do
|
||||
AC_EGREP_CPP(YES_IS_DEFINED, [
|
||||
#include <$curhdr>
|
||||
#ifdef $1
|
||||
YES_IS_DEFINED
|
||||
#endif
|
||||
], ac_cv_define_$1=yes)
|
||||
done
|
||||
])
|
||||
if test "$ac_cv_define_$1" = "yes"; then
|
||||
AC_DEFINE(HAVE_$1, 1, [Define if $1 is defined])
|
||||
fi
|
||||
])
|
||||
|
||||
|
||||
dnl
|
||||
dnl APR_CHECK_DEFINE(symbol, header_file)
|
||||
dnl
|
||||
AC_DEFUN([APR_CHECK_DEFINE], [
|
||||
AC_CACHE_CHECK([for $1 in $2],ac_cv_define_$1,[
|
||||
AC_EGREP_CPP(YES_IS_DEFINED, [
|
||||
#include <$2>
|
||||
#ifdef $1
|
||||
YES_IS_DEFINED
|
||||
#endif
|
||||
], ac_cv_define_$1=yes, ac_cv_define_$1=no)
|
||||
])
|
||||
if test "$ac_cv_define_$1" = "yes"; then
|
||||
AC_DEFINE(HAVE_$1, 1, [Define if $1 is defined in $2])
|
||||
fi
|
||||
])
|
||||
|
||||
dnl
|
||||
dnl APR_CHECK_APR_DEFINE( symbol )
|
||||
dnl
|
||||
AC_DEFUN([APR_CHECK_APR_DEFINE], [
|
||||
apr_old_cppflags=$CPPFLAGS
|
||||
CPPFLAGS="$CPPFLAGS $INCLUDES"
|
||||
AC_EGREP_CPP(YES_IS_DEFINED, [
|
||||
#include <apr.h>
|
||||
#if $1
|
||||
YES_IS_DEFINED
|
||||
#endif
|
||||
], ac_cv_define_$1=yes, ac_cv_define_$1=no)
|
||||
CPPFLAGS=$apr_old_cppflags
|
||||
])
|
||||
|
||||
dnl APR_CHECK_FILE(filename); set ac_cv_file_filename to
|
||||
dnl "yes" if 'filename' is readable, else "no".
|
||||
dnl @deprecated! - use AC_CHECK_FILE instead
|
||||
AC_DEFUN([APR_CHECK_FILE], [
|
||||
dnl Pick a safe variable name
|
||||
define([apr_cvname], ac_cv_file_[]translit([$1], [./+-], [__p_]))
|
||||
AC_CACHE_CHECK([for $1], [apr_cvname],
|
||||
[if test -r $1; then
|
||||
apr_cvname=yes
|
||||
else
|
||||
apr_cvname=no
|
||||
fi])
|
||||
])
|
||||
|
||||
define(APR_IFALLYES,[dnl
|
||||
ac_rc=yes
|
||||
for ac_spec in $1; do
|
||||
ac_type=`echo "$ac_spec" | sed -e 's/:.*$//'`
|
||||
ac_item=`echo "$ac_spec" | sed -e 's/^.*://'`
|
||||
case $ac_type in
|
||||
header )
|
||||
ac_item=`echo "$ac_item" | sed 'y%./+-%__p_%'`
|
||||
ac_var="ac_cv_header_$ac_item"
|
||||
;;
|
||||
file )
|
||||
ac_item=`echo "$ac_item" | sed 'y%./+-%__p_%'`
|
||||
ac_var="ac_cv_file_$ac_item"
|
||||
;;
|
||||
func ) ac_var="ac_cv_func_$ac_item" ;;
|
||||
struct ) ac_var="ac_cv_struct_$ac_item" ;;
|
||||
define ) ac_var="ac_cv_define_$ac_item" ;;
|
||||
custom ) ac_var="$ac_item" ;;
|
||||
esac
|
||||
eval "ac_val=\$$ac_var"
|
||||
if test ".$ac_val" != .yes; then
|
||||
ac_rc=no
|
||||
break
|
||||
fi
|
||||
done
|
||||
if test ".$ac_rc" = .yes; then
|
||||
:
|
||||
$2
|
||||
else
|
||||
:
|
||||
$3
|
||||
fi
|
||||
])
|
||||
|
||||
|
||||
define(APR_BEGIN_DECISION,[dnl
|
||||
ac_decision_item='$1'
|
||||
ac_decision_msg='FAILED'
|
||||
ac_decision=''
|
||||
])
|
||||
|
||||
|
||||
AC_DEFUN([APR_DECIDE],[dnl
|
||||
dnl Define the flag (or not) in apr_private.h via autoheader
|
||||
AH_TEMPLATE($1, [Define if $2 will be used])
|
||||
ac_decision='$1'
|
||||
ac_decision_msg='$2'
|
||||
ac_decision_$1=yes
|
||||
ac_decision_$1_msg='$2'
|
||||
])
|
||||
|
||||
|
||||
define(APR_DECISION_OVERRIDE,[dnl
|
||||
ac_decision=''
|
||||
for ac_item in $1; do
|
||||
eval "ac_decision_this=\$ac_decision_${ac_item}"
|
||||
if test ".$ac_decision_this" = .yes; then
|
||||
ac_decision=$ac_item
|
||||
eval "ac_decision_msg=\$ac_decision_${ac_item}_msg"
|
||||
fi
|
||||
done
|
||||
])
|
||||
|
||||
|
||||
define(APR_DECISION_FORCE,[dnl
|
||||
ac_decision="$1"
|
||||
eval "ac_decision_msg=\"\$ac_decision_${ac_decision}_msg\""
|
||||
])
|
||||
|
||||
|
||||
define(APR_END_DECISION,[dnl
|
||||
if test ".$ac_decision" = .; then
|
||||
echo "[$]0:Error: decision on $ac_decision_item failed" 1>&2
|
||||
exit 1
|
||||
else
|
||||
if test ".$ac_decision_msg" = .; then
|
||||
ac_decision_msg="$ac_decision"
|
||||
fi
|
||||
AC_DEFINE_UNQUOTED(${ac_decision_item})
|
||||
AC_MSG_RESULT([decision on $ac_decision_item... $ac_decision_msg])
|
||||
fi
|
||||
])
|
||||
|
||||
|
||||
dnl
|
||||
dnl APR_CHECK_SIZEOF_EXTENDED(INCLUDES, TYPE [, CROSS_SIZE])
|
||||
dnl
|
||||
dnl A variant of AC_CHECK_SIZEOF which allows the checking of
|
||||
dnl sizes of non-builtin types
|
||||
dnl
|
||||
AC_DEFUN([APR_CHECK_SIZEOF_EXTENDED],
|
||||
[changequote(<<, >>)dnl
|
||||
dnl The name to #define.
|
||||
define(<<AC_TYPE_NAME>>, translit(sizeof_$2, [a-z *], [A-Z_P]))dnl
|
||||
dnl The cache variable name.
|
||||
define(<<AC_CV_NAME>>, translit(ac_cv_sizeof_$2, [ *], [_p]))dnl
|
||||
changequote([, ])dnl
|
||||
AC_MSG_CHECKING(size of $2)
|
||||
AC_CACHE_VAL(AC_CV_NAME,
|
||||
[AC_TRY_RUN([#include <stdio.h>
|
||||
$1
|
||||
main()
|
||||
{
|
||||
FILE *f=fopen("conftestval", "w");
|
||||
if (!f) exit(1);
|
||||
fprintf(f, "%d\n", sizeof($2));
|
||||
exit(0);
|
||||
}], AC_CV_NAME=`cat conftestval`, AC_CV_NAME=0, ifelse([$3],,,
|
||||
AC_CV_NAME=$3))])dnl
|
||||
AC_MSG_RESULT($AC_CV_NAME)
|
||||
AC_DEFINE_UNQUOTED(AC_TYPE_NAME, $AC_CV_NAME, [The size of ]$2)
|
||||
undefine([AC_TYPE_NAME])dnl
|
||||
undefine([AC_CV_NAME])dnl
|
||||
])
|
||||
|
||||
|
||||
dnl
|
||||
dnl APR_TRY_COMPILE_NO_WARNING(INCLUDES, FUNCTION-BODY,
|
||||
dnl [ACTIONS-IF-NO-WARNINGS], [ACTIONS-IF-WARNINGS])
|
||||
dnl
|
||||
dnl Tries a compile test with warnings activated so that the result
|
||||
dnl is false if the code doesn't compile cleanly. For compilers
|
||||
dnl where it is not known how to activate a "fail-on-error" mode,
|
||||
dnl it is undefined which of the sets of actions will be run.
|
||||
dnl
|
||||
AC_DEFUN([APR_TRY_COMPILE_NO_WARNING],
|
||||
[apr_save_CFLAGS=$CFLAGS
|
||||
CFLAGS="$CFLAGS $CFLAGS_WARN"
|
||||
if test "$ac_cv_prog_gcc" = "yes"; then
|
||||
CFLAGS="$CFLAGS -Werror"
|
||||
fi
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_SOURCE(
|
||||
[#include "confdefs.h"
|
||||
]
|
||||
[[$1]]
|
||||
[int main(int argc, const char *const *argv) {]
|
||||
[[$2]]
|
||||
[ return 0; }]
|
||||
)],
|
||||
[$3], [$4])
|
||||
CFLAGS=$apr_save_CFLAGS
|
||||
])
|
||||
|
||||
dnl
|
||||
dnl APR_CHECK_STRERROR_R_RC
|
||||
dnl
|
||||
dnl Decide which style of retcode is used by this system's
|
||||
dnl strerror_r(). It either returns int (0 for success, -1
|
||||
dnl for failure), or it returns a pointer to the error
|
||||
dnl string.
|
||||
dnl
|
||||
dnl
|
||||
AC_DEFUN([APR_CHECK_STRERROR_R_RC], [
|
||||
AC_MSG_CHECKING(for type of return code from strerror_r)
|
||||
AC_TRY_RUN([
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
main()
|
||||
{
|
||||
char buf[1024];
|
||||
if (strerror_r(ERANGE, buf, sizeof buf) < 1) {
|
||||
exit(0);
|
||||
}
|
||||
else {
|
||||
exit(1);
|
||||
}
|
||||
}], [
|
||||
ac_cv_strerror_r_rc_int=yes ], [
|
||||
ac_cv_strerror_r_rc_int=no ], [
|
||||
ac_cv_strerror_r_rc_int=no ] )
|
||||
if test "x$ac_cv_strerror_r_rc_int" = xyes; then
|
||||
AC_DEFINE(STRERROR_R_RC_INT, 1, [Define if strerror returns int])
|
||||
msg="int"
|
||||
else
|
||||
msg="pointer"
|
||||
fi
|
||||
AC_MSG_RESULT([$msg])
|
||||
] )
|
||||
|
||||
dnl
|
||||
dnl APR_CHECK_DIRENT_INODE
|
||||
dnl
|
||||
dnl Decide if d_fileno or d_ino are available in the dirent
|
||||
dnl structure on this platform. Single UNIX Spec says d_ino,
|
||||
dnl BSD uses d_fileno. Undef to find the real beast.
|
||||
dnl
|
||||
AC_DEFUN([APR_CHECK_DIRENT_INODE], [
|
||||
AC_CACHE_CHECK([for inode member of struct dirent], apr_cv_dirent_inode, [
|
||||
apr_cv_dirent_inode=no
|
||||
AC_TRY_COMPILE([
|
||||
#include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
],[
|
||||
#ifdef d_ino
|
||||
#undef d_ino
|
||||
#endif
|
||||
struct dirent de; de.d_fileno;
|
||||
], apr_cv_dirent_inode=d_fileno)
|
||||
if test "$apr_cv_dirent_inode" = "no"; then
|
||||
AC_TRY_COMPILE([
|
||||
#include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
],[
|
||||
#ifdef d_fileno
|
||||
#undef d_fileno
|
||||
#endif
|
||||
struct dirent de; de.d_ino;
|
||||
], apr_cv_dirent_inode=d_ino)
|
||||
fi
|
||||
])
|
||||
if test "$apr_cv_dirent_inode" != "no"; then
|
||||
AC_DEFINE_UNQUOTED(DIRENT_INODE, $apr_cv_dirent_inode,
|
||||
[Define if struct dirent has an inode member])
|
||||
fi
|
||||
])
|
||||
|
||||
dnl
|
||||
dnl APR_CHECK_DIRENT_TYPE
|
||||
dnl
|
||||
dnl Decide if d_type is available in the dirent structure
|
||||
dnl on this platform. Not part of the Single UNIX Spec.
|
||||
dnl Note that this is worthless without DT_xxx macros, so
|
||||
dnl look for one while we are at it.
|
||||
dnl
|
||||
AC_DEFUN([APR_CHECK_DIRENT_TYPE], [
|
||||
AC_CACHE_CHECK([for file type member of struct dirent], apr_cv_dirent_type,[
|
||||
apr_cv_dirent_type=no
|
||||
AC_TRY_COMPILE([
|
||||
#include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
],[
|
||||
struct dirent de; de.d_type = DT_REG;
|
||||
], apr_cv_dirent_type=d_type)
|
||||
])
|
||||
if test "$apr_cv_dirent_type" != "no"; then
|
||||
AC_DEFINE_UNQUOTED(DIRENT_TYPE, $apr_cv_dirent_type,
|
||||
[Define if struct dirent has a d_type member])
|
||||
fi
|
||||
])
|
||||
|
||||
dnl the following is a newline, a space, a tab, and a backslash (the
|
||||
dnl backslash is used by the shell to skip newlines, but m4 sees it;
|
||||
dnl treat it like whitespace).
|
||||
dnl WARNING: don't reindent these lines, or the space/tab will be lost!
|
||||
define([apr_whitespace],[
|
||||
\])
|
||||
|
||||
dnl
|
||||
dnl APR_COMMA_ARGS(ARG1 ...)
|
||||
dnl convert the whitespace-separated arguments into comman-separated
|
||||
dnl arguments.
|
||||
dnl
|
||||
dnl APR_FOREACH(CODE-BLOCK, ARG1, ARG2, ...)
|
||||
dnl subsitute CODE-BLOCK for each ARG[i]. "eachval" will be set to ARG[i]
|
||||
dnl within each iteration.
|
||||
dnl
|
||||
changequote({,})
|
||||
define({APR_COMMA_ARGS},{patsubst([$}{1],[[}apr_whitespace{]+],[,])})
|
||||
define({APR_FOREACH},
|
||||
{ifelse($}{2,,,
|
||||
[define([eachval],
|
||||
$}{2)$}{1[]APR_FOREACH([$}{1],
|
||||
builtin([shift],
|
||||
builtin([shift], $}{@)))])})
|
||||
changequote([,])
|
||||
|
||||
dnl APR_FLAG_HEADERS(HEADER-FILE ... [, FLAG-TO-SET ] [, "yes" ])
|
||||
dnl we set FLAG-TO-SET to 1 if we find HEADER-FILE, otherwise we set to 0
|
||||
dnl if FLAG-TO-SET is null, we automagically determine it's name
|
||||
dnl by changing all "/" to "_" in the HEADER-FILE and dropping
|
||||
dnl all "." and "-" chars. If the 3rd parameter is "yes" then instead of
|
||||
dnl setting to 1 or 0, we set FLAG-TO-SET to yes or no.
|
||||
dnl
|
||||
AC_DEFUN([APR_FLAG_HEADERS], [
|
||||
AC_CHECK_HEADERS($1)
|
||||
for aprt_i in $1
|
||||
do
|
||||
ac_safe=`echo "$aprt_i" | sed 'y%./+-%__p_%'`
|
||||
aprt_2=`echo "$aprt_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'`
|
||||
if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then
|
||||
eval "ifelse($2,,$aprt_2,$2)=ifelse($3,yes,yes,1)"
|
||||
else
|
||||
eval "ifelse($2,,$aprt_2,$2)=ifelse($3,yes,no,0)"
|
||||
fi
|
||||
done
|
||||
])
|
||||
|
||||
dnl APR_FLAG_FUNCS(FUNC ... [, FLAG-TO-SET] [, "yes" ])
|
||||
dnl if FLAG-TO-SET is null, we automagically determine it's name
|
||||
dnl prepending "have_" to the function name in FUNC, otherwise
|
||||
dnl we use what's provided as FLAG-TO-SET. If the 3rd parameter
|
||||
dnl is "yes" then instead of setting to 1 or 0, we set FLAG-TO-SET
|
||||
dnl to yes or no.
|
||||
dnl
|
||||
AC_DEFUN([APR_FLAG_FUNCS], [
|
||||
AC_CHECK_FUNCS($1)
|
||||
for aprt_j in $1
|
||||
do
|
||||
aprt_3="have_$aprt_j"
|
||||
if eval "test \"`echo '$ac_cv_func_'$aprt_j`\" = yes"; then
|
||||
eval "ifelse($2,,$aprt_3,$2)=ifelse($3,yes,yes,1)"
|
||||
else
|
||||
eval "ifelse($2,,$aprt_3,$2)=ifelse($3,yes,no,0)"
|
||||
fi
|
||||
done
|
||||
])
|
||||
|
||||
dnl Iteratively interpolate the contents of the second argument
|
||||
dnl until interpolation offers no new result. Then assign the
|
||||
dnl final result to $1.
|
||||
dnl
|
||||
dnl Example:
|
||||
dnl
|
||||
dnl foo=1
|
||||
dnl bar='${foo}/2'
|
||||
dnl baz='${bar}/3'
|
||||
dnl APR_EXPAND_VAR(fraz, $baz)
|
||||
dnl $fraz is now "1/2/3"
|
||||
dnl
|
||||
AC_DEFUN([APR_EXPAND_VAR], [
|
||||
ap_last=
|
||||
ap_cur="$2"
|
||||
while test "x${ap_cur}" != "x${ap_last}";
|
||||
do
|
||||
ap_last="${ap_cur}"
|
||||
ap_cur=`eval "echo ${ap_cur}"`
|
||||
done
|
||||
$1="${ap_cur}"
|
||||
])
|
||||
|
||||
dnl
|
||||
dnl Removes the value of $3 from the string in $2, strips of any leading
|
||||
dnl slashes, and returns the value in $1.
|
||||
dnl
|
||||
dnl Example:
|
||||
dnl orig_path="${prefix}/bar"
|
||||
dnl APR_PATH_RELATIVE(final_path, $orig_path, $prefix)
|
||||
dnl $final_path now contains "bar"
|
||||
AC_DEFUN([APR_PATH_RELATIVE], [
|
||||
ap_stripped=`echo $2 | sed -e "s#^$3##"`
|
||||
# check if the stripping was successful
|
||||
if test "x$2" != "x${ap_stripped}"; then
|
||||
# it was, so strip of any leading slashes
|
||||
$1="`echo ${ap_stripped} | sed -e 's#^/*##'`"
|
||||
else
|
||||
# it wasn't so return the original
|
||||
$1="$2"
|
||||
fi
|
||||
])
|
||||
|
||||
dnl APR_HELP_STRING(LHS, RHS)
|
||||
dnl Autoconf 2.50 can not handle substr correctly. It does have
|
||||
dnl AC_HELP_STRING, so let's try to call it if we can.
|
||||
dnl Note: this define must be on one line so that it can be properly returned
|
||||
dnl as the help string. When using this macro with a multi-line RHS, ensure
|
||||
dnl that you surround the macro invocation with []s
|
||||
AC_DEFUN([APR_HELP_STRING], [ifelse(regexp(AC_ACVERSION, 2\.1), -1, AC_HELP_STRING([$1],[$2]),[ ][$1] substr([ ],len($1))[$2])])
|
||||
|
||||
dnl
|
||||
dnl APR_LAYOUT(configlayout, layoutname [, extravars])
|
||||
dnl
|
||||
AC_DEFUN([APR_LAYOUT], [
|
||||
if test ! -f $srcdir/config.layout; then
|
||||
echo "** Error: Layout file $srcdir/config.layout not found"
|
||||
echo "** Error: Cannot use undefined layout '$LAYOUT'"
|
||||
exit 1
|
||||
fi
|
||||
# Catch layout names including a slash which will otherwise
|
||||
# confuse the heck out of the sed script.
|
||||
case $2 in
|
||||
*/*)
|
||||
echo "** Error: $2 is not a valid layout name"
|
||||
exit 1 ;;
|
||||
esac
|
||||
pldconf=./config.pld
|
||||
changequote({,})
|
||||
sed -e "1s/[ ]*<[lL]ayout[ ]*$2[ ]*>[ ]*//;1t" \
|
||||
-e "1,/[ ]*<[lL]ayout[ ]*$2[ ]*>[ ]*/d" \
|
||||
-e '/[ ]*<\/Layout>[ ]*/,$d' \
|
||||
-e "s/^[ ]*//g" \
|
||||
-e "s/:[ ]*/=\'/g" \
|
||||
-e "s/[ ]*$/'/g" \
|
||||
$1 > $pldconf
|
||||
layout_name=$2
|
||||
if test ! -s $pldconf; then
|
||||
echo "** Error: unable to find layout $layout_name"
|
||||
exit 1
|
||||
fi
|
||||
. $pldconf
|
||||
rm $pldconf
|
||||
for var in prefix exec_prefix bindir sbindir libexecdir mandir \
|
||||
sysconfdir datadir includedir localstatedir runtimedir \
|
||||
logfiledir libdir installbuilddir libsuffix $3; do
|
||||
eval "val=\"\$$var\""
|
||||
case $val in
|
||||
*+)
|
||||
val=`echo $val | sed -e 's;\+$;;'`
|
||||
eval "$var=\"\$val\""
|
||||
autosuffix=yes
|
||||
;;
|
||||
*)
|
||||
autosuffix=no
|
||||
;;
|
||||
esac
|
||||
val=`echo $val | sed -e 's:\(.\)/*$:\1:'`
|
||||
val=`echo $val | sed -e 's:[\$]\([a-z_]*\):${\1}:g'`
|
||||
if test "$autosuffix" = "yes"; then
|
||||
if echo $val | grep apache >/dev/null; then
|
||||
addtarget=no
|
||||
else
|
||||
addtarget=yes
|
||||
fi
|
||||
if test "$addtarget" = "yes"; then
|
||||
val="$val/apache2"
|
||||
fi
|
||||
fi
|
||||
eval "$var='$val'"
|
||||
done
|
||||
changequote([,])
|
||||
])dnl
|
||||
|
||||
dnl
|
||||
dnl APR_ENABLE_LAYOUT(default layout name [, extra vars])
|
||||
dnl
|
||||
AC_DEFUN([APR_ENABLE_LAYOUT], [
|
||||
AC_ARG_ENABLE(layout,
|
||||
[ --enable-layout=LAYOUT],[
|
||||
LAYOUT=$enableval
|
||||
])
|
||||
|
||||
if test -z "$LAYOUT"; then
|
||||
LAYOUT="$1"
|
||||
fi
|
||||
APR_LAYOUT($srcdir/config.layout, $LAYOUT, $2)
|
||||
|
||||
AC_MSG_CHECKING(for chosen layout)
|
||||
AC_MSG_RESULT($layout_name)
|
||||
])
|
||||
|
||||
|
||||
dnl
|
||||
dnl APR_PARSE_ARGUMENTS
|
||||
dnl a reimplementation of autoconf's argument parser,
|
||||
dnl used here to allow us to co-exist layouts and argument based
|
||||
dnl set ups.
|
||||
AC_DEFUN([APR_PARSE_ARGUMENTS], [
|
||||
ac_prev=
|
||||
# Retrieve the command-line arguments. The eval is needed because
|
||||
# the arguments are quoted to preserve accuracy.
|
||||
eval "set x $ac_configure_args"
|
||||
shift
|
||||
for ac_option
|
||||
do
|
||||
# If the previous option needs an argument, assign it.
|
||||
if test -n "$ac_prev"; then
|
||||
eval "$ac_prev=\$ac_option"
|
||||
ac_prev=
|
||||
continue
|
||||
fi
|
||||
|
||||
ac_optarg=`expr "x$ac_option" : 'x[[^=]]*=\(.*\)'`
|
||||
|
||||
case $ac_option in
|
||||
|
||||
-bindir | --bindir | --bindi | --bind | --bin | --bi)
|
||||
ac_prev=bindir ;;
|
||||
-bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
|
||||
bindir="$ac_optarg" ;;
|
||||
|
||||
-datadir | --datadir | --datadi | --datad | --data | --dat | --da)
|
||||
ac_prev=datadir ;;
|
||||
-datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \
|
||||
| --da=*)
|
||||
datadir="$ac_optarg" ;;
|
||||
|
||||
-exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
|
||||
| --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
|
||||
| --exec | --exe | --ex)
|
||||
ac_prev=exec_prefix ;;
|
||||
-exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
|
||||
| --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
|
||||
| --exec=* | --exe=* | --ex=*)
|
||||
exec_prefix="$ac_optarg" ;;
|
||||
|
||||
-includedir | --includedir | --includedi | --included | --include \
|
||||
| --includ | --inclu | --incl | --inc)
|
||||
ac_prev=includedir ;;
|
||||
-includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
|
||||
| --includ=* | --inclu=* | --incl=* | --inc=*)
|
||||
includedir="$ac_optarg" ;;
|
||||
|
||||
-infodir | --infodir | --infodi | --infod | --info | --inf)
|
||||
ac_prev=infodir ;;
|
||||
-infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
|
||||
infodir="$ac_optarg" ;;
|
||||
|
||||
-libdir | --libdir | --libdi | --libd)
|
||||
ac_prev=libdir ;;
|
||||
-libdir=* | --libdir=* | --libdi=* | --libd=*)
|
||||
libdir="$ac_optarg" ;;
|
||||
|
||||
-libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
|
||||
| --libexe | --libex | --libe)
|
||||
ac_prev=libexecdir ;;
|
||||
-libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
|
||||
| --libexe=* | --libex=* | --libe=*)
|
||||
libexecdir="$ac_optarg" ;;
|
||||
|
||||
-localstatedir | --localstatedir | --localstatedi | --localstated \
|
||||
| --localstate | --localstat | --localsta | --localst \
|
||||
| --locals | --local | --loca | --loc | --lo)
|
||||
ac_prev=localstatedir ;;
|
||||
-localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
|
||||
| --localstate=* | --localstat=* | --localsta=* | --localst=* \
|
||||
| --locals=* | --local=* | --loca=* | --loc=* | --lo=*)
|
||||
localstatedir="$ac_optarg" ;;
|
||||
|
||||
-mandir | --mandir | --mandi | --mand | --man | --ma | --m)
|
||||
ac_prev=mandir ;;
|
||||
-mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
|
||||
mandir="$ac_optarg" ;;
|
||||
|
||||
-prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
|
||||
ac_prev=prefix ;;
|
||||
-prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
|
||||
prefix="$ac_optarg" ;;
|
||||
|
||||
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
|
||||
ac_prev=sbindir ;;
|
||||
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
|
||||
| --sbi=* | --sb=*)
|
||||
sbindir="$ac_optarg" ;;
|
||||
|
||||
-sharedstatedir | --sharedstatedir | --sharedstatedi \
|
||||
| --sharedstated | --sharedstate | --sharedstat | --sharedsta \
|
||||
| --sharedst | --shareds | --shared | --share | --shar \
|
||||
| --sha | --sh)
|
||||
ac_prev=sharedstatedir ;;
|
||||
-sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
|
||||
| --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
|
||||
| --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
|
||||
| --sha=* | --sh=*)
|
||||
sharedstatedir="$ac_optarg" ;;
|
||||
|
||||
-sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
|
||||
| --syscon | --sysco | --sysc | --sys | --sy)
|
||||
ac_prev=sysconfdir ;;
|
||||
-sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
|
||||
| --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
|
||||
sysconfdir="$ac_optarg" ;;
|
||||
|
||||
esac
|
||||
done
|
||||
|
||||
# Be sure to have absolute paths.
|
||||
for ac_var in exec_prefix prefix
|
||||
do
|
||||
eval ac_val=$`echo $ac_var`
|
||||
case $ac_val in
|
||||
[[\\/$]]* | ?:[[\\/]]* | NONE | '' ) ;;
|
||||
*) AC_MSG_ERROR([expected an absolute path for --$ac_var: $ac_val]);;
|
||||
esac
|
||||
done
|
||||
|
||||
])dnl
|
||||
|
||||
dnl
|
||||
dnl APR_CHECK_DEPEND
|
||||
dnl
|
||||
dnl Determine what program we can use to generate .deps-style dependencies
|
||||
dnl
|
||||
AC_DEFUN([APR_CHECK_DEPEND], [
|
||||
dnl Try to determine what depend program we can use
|
||||
dnl All GCC-variants should have -MM.
|
||||
dnl If not, then we can check on those, too.
|
||||
if test "$GCC" = "yes"; then
|
||||
MKDEP='$(CC) -MM'
|
||||
else
|
||||
rm -f conftest.c
|
||||
dnl <sys/types.h> should be available everywhere!
|
||||
cat > conftest.c <<EOF
|
||||
#include <sys/types.h>
|
||||
int main() { return 0; }
|
||||
EOF
|
||||
MKDEP="true"
|
||||
for i in "$CC -MM" "$CC -M" "$CPP -MM" "$CPP -M" "cpp -M"; do
|
||||
AC_MSG_CHECKING([if $i can create proper make dependencies])
|
||||
if $i conftest.c 2>/dev/null | grep 'conftest.o: conftest.c' >/dev/null; then
|
||||
MKDEP=$i
|
||||
AC_MSG_RESULT(yes)
|
||||
break;
|
||||
fi
|
||||
AC_MSG_RESULT(no)
|
||||
done
|
||||
rm -f conftest.c
|
||||
fi
|
||||
|
||||
AC_SUBST(MKDEP)
|
||||
])
|
||||
|
||||
dnl
|
||||
dnl APR_CHECK_TYPES_COMPATIBLE(TYPE-1, TYPE-2, [ACTION-IF-TRUE])
|
||||
dnl
|
||||
dnl Try to determine whether two types are the same. Only works
|
||||
dnl for gcc and icc.
|
||||
dnl
|
||||
AC_DEFUN([APR_CHECK_TYPES_COMPATIBLE], [
|
||||
define([apr_cvname], apr_cv_typematch_[]translit([$1], [ ], [_])_[]translit([$2], [ ], [_]))
|
||||
AC_CACHE_CHECK([whether $1 and $2 are the same], apr_cvname, [
|
||||
AC_TRY_COMPILE(AC_INCLUDES_DEFAULT, [
|
||||
int foo[0 - !__builtin_types_compatible_p($1, $2)];
|
||||
], [apr_cvname=yes
|
||||
$3], [apr_cvname=no])])
|
||||
])
|
1544
build/config.guess
vendored
Executable file
1544
build/config.guess
vendored
Executable file
File diff suppressed because it is too large
Load Diff
1788
build/config.sub
vendored
Executable file
1788
build/config.sub
vendored
Executable file
File diff suppressed because it is too large
Load Diff
202
build/find_apr.m4
Normal file
202
build/find_apr.m4
Normal file
@ -0,0 +1,202 @@
|
||||
dnl -------------------------------------------------------- -*- autoconf -*-
|
||||
dnl Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
dnl contributor license agreements. See the NOTICE file distributed with
|
||||
dnl this work for additional information regarding copyright ownership.
|
||||
dnl The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
dnl (the "License"); you may not use this file except in compliance with
|
||||
dnl the License. You may obtain a copy of the License at
|
||||
dnl
|
||||
dnl http://www.apache.org/licenses/LICENSE-2.0
|
||||
dnl
|
||||
dnl Unless required by applicable law or agreed to in writing, software
|
||||
dnl distributed under the License is distributed on an "AS IS" BASIS,
|
||||
dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
dnl See the License for the specific language governing permissions and
|
||||
dnl limitations under the License.
|
||||
|
||||
dnl
|
||||
dnl find_apr.m4 : locate the APR include files and libraries
|
||||
dnl
|
||||
dnl This macro file can be used by applications to find and use the APR
|
||||
dnl library. It provides a standardized mechanism for using APR. It supports
|
||||
dnl embedding APR into the application source, or locating an installed
|
||||
dnl copy of APR.
|
||||
dnl
|
||||
dnl APR_FIND_APR(srcdir, builddir, implicit-install-check, acceptable-majors,
|
||||
dnl detailed-check)
|
||||
dnl
|
||||
dnl where srcdir is the location of the bundled APR source directory, or
|
||||
dnl empty if source is not bundled.
|
||||
dnl
|
||||
dnl where builddir is the location where the bundled APR will will be built,
|
||||
dnl or empty if the build will occur in the srcdir.
|
||||
dnl
|
||||
dnl where implicit-install-check set to 1 indicates if there is no
|
||||
dnl --with-apr option specified, we will look for installed copies.
|
||||
dnl
|
||||
dnl where acceptable-majors is a space separated list of acceptable major
|
||||
dnl version numbers. Often only a single major version will be acceptable.
|
||||
dnl If multiple versions are specified, and --with-apr=PREFIX or the
|
||||
dnl implicit installed search are used, then the first (leftmost) version
|
||||
dnl in the list that is found will be used. Currently defaults to [0 1].
|
||||
dnl
|
||||
dnl where detailed-check is an M4 macro which sets the apr_acceptable to
|
||||
dnl either "yes" or "no". The macro will be invoked for each installed
|
||||
dnl copy of APR found, with the apr_config variable set appropriately.
|
||||
dnl Only installed copies of APR which are considered acceptable by
|
||||
dnl this macro will be considered found. If no installed copies are
|
||||
dnl considered acceptable by this macro, apr_found will be set to either
|
||||
dnl either "no" or "reconfig".
|
||||
dnl
|
||||
dnl Sets the following variables on exit:
|
||||
dnl
|
||||
dnl apr_found : "yes", "no", "reconfig"
|
||||
dnl
|
||||
dnl apr_config : If the apr-config tool exists, this refers to it. If
|
||||
dnl apr_found is "reconfig", then the bundled directory
|
||||
dnl should be reconfigured *before* using apr_config.
|
||||
dnl
|
||||
dnl Note: this macro file assumes that apr-config has been installed; it
|
||||
dnl is normally considered a required part of an APR installation.
|
||||
dnl
|
||||
dnl If a bundled source directory is available and needs to be (re)configured,
|
||||
dnl then apr_found is set to "reconfig". The caller should reconfigure the
|
||||
dnl (passed-in) source directory, placing the result in the build directory,
|
||||
dnl as appropriate.
|
||||
dnl
|
||||
dnl If apr_found is "yes" or "reconfig", then the caller should use the
|
||||
dnl value of apr_config to fetch any necessary build/link information.
|
||||
dnl
|
||||
|
||||
AC_DEFUN([APR_FIND_APR], [
|
||||
apr_found="no"
|
||||
|
||||
if test "$target_os" = "os2-emx"; then
|
||||
# Scripts don't pass test -x on OS/2
|
||||
TEST_X="test -f"
|
||||
else
|
||||
TEST_X="test -x"
|
||||
fi
|
||||
|
||||
ifelse([$4], [], [
|
||||
ifdef(AC_WARNING,AC_WARNING([$0: missing argument 4 (acceptable-majors): Defaulting to APR 0.x then APR 1.x]))
|
||||
acceptable_majors="0 1"],
|
||||
[acceptable_majors="$4"])
|
||||
|
||||
apr_temp_acceptable_apr_config=""
|
||||
for apr_temp_major in $acceptable_majors
|
||||
do
|
||||
case $apr_temp_major in
|
||||
0)
|
||||
apr_temp_acceptable_apr_config="$apr_temp_acceptable_apr_config apr-config"
|
||||
;;
|
||||
*)
|
||||
apr_temp_acceptable_apr_config="$apr_temp_acceptable_apr_config apr-$apr_temp_major-config"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
AC_MSG_CHECKING(for APR)
|
||||
AC_ARG_WITH(apr,
|
||||
[ --with-apr=PATH prefix for installed APR or the full path to
|
||||
apr-config],
|
||||
[
|
||||
if test "$withval" = "no" || test "$withval" = "yes"; then
|
||||
AC_MSG_ERROR([--with-apr requires a directory or file to be provided])
|
||||
fi
|
||||
|
||||
for apr_temp_apr_config_file in $apr_temp_acceptable_apr_config
|
||||
do
|
||||
for lookdir in "$withval/bin" "$withval"
|
||||
do
|
||||
if $TEST_X "$lookdir/$apr_temp_apr_config_file"; then
|
||||
apr_config="$lookdir/$apr_temp_apr_config_file"
|
||||
ifelse([$5], [], [], [
|
||||
apr_acceptable="yes"
|
||||
$5
|
||||
if test "$apr_acceptable" != "yes"; then
|
||||
AC_MSG_WARN([Found APR in $apr_config, but we think it is considered unacceptable])
|
||||
continue
|
||||
fi])
|
||||
apr_found="yes"
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if test "$apr_found" != "yes" && $TEST_X "$withval" && $withval --help > /dev/null 2>&1 ; then
|
||||
apr_config="$withval"
|
||||
ifelse([$5], [], [apr_found="yes"], [
|
||||
apr_acceptable="yes"
|
||||
$5
|
||||
if test "$apr_acceptable" = "yes"; then
|
||||
apr_found="yes"
|
||||
fi])
|
||||
fi
|
||||
|
||||
dnl if --with-apr is used, it is a fatal error for its argument
|
||||
dnl to be invalid
|
||||
if test "$apr_found" != "yes"; then
|
||||
AC_MSG_ERROR([the --with-apr parameter is incorrect. It must specify an install prefix, a build directory, or an apr-config file.])
|
||||
fi
|
||||
],[
|
||||
dnl If we allow installed copies, check those before using bundled copy.
|
||||
if test -n "$3" && test "$3" = "1"; then
|
||||
for apr_temp_apr_config_file in $apr_temp_acceptable_apr_config
|
||||
do
|
||||
if $apr_temp_apr_config_file --help > /dev/null 2>&1 ; then
|
||||
apr_config="$apr_temp_apr_config_file"
|
||||
ifelse([$5], [], [], [
|
||||
apr_acceptable="yes"
|
||||
$5
|
||||
if test "$apr_acceptable" != "yes"; then
|
||||
AC_MSG_WARN([skipped APR at $apr_config, version not acceptable])
|
||||
continue
|
||||
fi])
|
||||
apr_found="yes"
|
||||
break
|
||||
else
|
||||
dnl look in some standard places
|
||||
for lookdir in /usr /usr/local /usr/local/apr /opt/apr; do
|
||||
if $TEST_X "$lookdir/bin/$apr_temp_apr_config_file"; then
|
||||
apr_config="$lookdir/bin/$apr_temp_apr_config_file"
|
||||
ifelse([$5], [], [], [
|
||||
apr_acceptable="yes"
|
||||
$5
|
||||
if test "$apr_acceptable" != "yes"; then
|
||||
AC_MSG_WARN([skipped APR at $apr_config, version not acceptable])
|
||||
continue
|
||||
fi])
|
||||
apr_found="yes"
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
fi
|
||||
dnl if we have not found anything yet and have bundled source, use that
|
||||
if test "$apr_found" = "no" && test -d "$1"; then
|
||||
apr_temp_abs_srcdir="`cd \"$1\" && pwd`"
|
||||
apr_found="reconfig"
|
||||
apr_bundled_major="`sed -n '/#define.*APR_MAJOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p' \"$1/include/apr_version.h\"`"
|
||||
case $apr_bundled_major in
|
||||
"")
|
||||
AC_MSG_ERROR([failed to find major version of bundled APR])
|
||||
;;
|
||||
0)
|
||||
apr_temp_apr_config_file="apr-config"
|
||||
;;
|
||||
*)
|
||||
apr_temp_apr_config_file="apr-$apr_bundled_major-config"
|
||||
;;
|
||||
esac
|
||||
if test -n "$2"; then
|
||||
apr_config="$2/$apr_temp_apr_config_file"
|
||||
else
|
||||
apr_config="$1/$apr_temp_apr_config_file"
|
||||
fi
|
||||
fi
|
||||
])
|
||||
|
||||
AC_MSG_RESULT($apr_found)
|
||||
])
|
211
build/find_apu.m4
Normal file
211
build/find_apu.m4
Normal file
@ -0,0 +1,211 @@
|
||||
dnl -------------------------------------------------------- -*- autoconf -*-
|
||||
dnl Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
dnl contributor license agreements. See the NOTICE file distributed with
|
||||
dnl this work for additional information regarding copyright ownership.
|
||||
dnl The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
dnl (the "License"); you may not use this file except in compliance with
|
||||
dnl the License. You may obtain a copy of the License at
|
||||
dnl
|
||||
dnl http://www.apache.org/licenses/LICENSE-2.0
|
||||
dnl
|
||||
dnl Unless required by applicable law or agreed to in writing, software
|
||||
dnl distributed under the License is distributed on an "AS IS" BASIS,
|
||||
dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
dnl See the License for the specific language governing permissions and
|
||||
dnl limitations under the License.
|
||||
|
||||
dnl
|
||||
dnl find_apu.m4 : locate the APR-util (APU) include files and libraries
|
||||
dnl
|
||||
dnl This macro file can be used by applications to find and use the APU
|
||||
dnl library. It provides a standardized mechanism for using APU. It supports
|
||||
dnl embedding APU into the application source, or locating an installed
|
||||
dnl copy of APU.
|
||||
dnl
|
||||
dnl APR_FIND_APU(srcdir, builddir, implicit-install-check, acceptable-majors,
|
||||
dnl detailed-check)
|
||||
dnl
|
||||
dnl where srcdir is the location of the bundled APU source directory, or
|
||||
dnl empty if source is not bundled.
|
||||
dnl
|
||||
dnl where builddir is the location where the bundled APU will be built,
|
||||
dnl or empty if the build will occur in the srcdir.
|
||||
dnl
|
||||
dnl where implicit-install-check set to 1 indicates if there is no
|
||||
dnl --with-apr-util option specified, we will look for installed copies.
|
||||
dnl
|
||||
dnl where acceptable-majors is a space separated list of acceptable major
|
||||
dnl version numbers. Often only a single major version will be acceptable.
|
||||
dnl If multiple versions are specified, and --with-apr-util=PREFIX or the
|
||||
dnl implicit installed search are used, then the first (leftmost) version
|
||||
dnl in the list that is found will be used. Currently defaults to [0 1].
|
||||
dnl
|
||||
dnl where detailed-check is an M4 macro which sets the apu_acceptable to
|
||||
dnl either "yes" or "no". The macro will be invoked for each installed
|
||||
dnl copy of APU found, with the apu_config variable set appropriately.
|
||||
dnl Only installed copies of APU which are considered acceptable by
|
||||
dnl this macro will be considered found. If no installed copies are
|
||||
dnl considered acceptable by this macro, apu_found will be set to either
|
||||
dnl either "no" or "reconfig".
|
||||
dnl
|
||||
dnl Sets the following variables on exit:
|
||||
dnl
|
||||
dnl apu_found : "yes", "no", "reconfig"
|
||||
dnl
|
||||
dnl apu_config : If the apu-config tool exists, this refers to it. If
|
||||
dnl apu_found is "reconfig", then the bundled directory
|
||||
dnl should be reconfigured *before* using apu_config.
|
||||
dnl
|
||||
dnl Note: this macro file assumes that apr-config has been installed; it
|
||||
dnl is normally considered a required part of an APR installation.
|
||||
dnl
|
||||
dnl Note: At this time, we cannot find *both* a source dir and a build dir.
|
||||
dnl If both are available, the build directory should be passed to
|
||||
dnl the --with-apr-util switch.
|
||||
dnl
|
||||
dnl Note: the installation layout is presumed to follow the standard
|
||||
dnl PREFIX/lib and PREFIX/include pattern. If the APU config file
|
||||
dnl is available (and can be found), then non-standard layouts are
|
||||
dnl possible, since it will be described in the config file.
|
||||
dnl
|
||||
dnl If a bundled source directory is available and needs to be (re)configured,
|
||||
dnl then apu_found is set to "reconfig". The caller should reconfigure the
|
||||
dnl (passed-in) source directory, placing the result in the build directory,
|
||||
dnl as appropriate.
|
||||
dnl
|
||||
dnl If apu_found is "yes" or "reconfig", then the caller should use the
|
||||
dnl value of apu_config to fetch any necessary build/link information.
|
||||
dnl
|
||||
|
||||
AC_DEFUN([APR_FIND_APU], [
|
||||
apu_found="no"
|
||||
|
||||
if test "$target_os" = "os2-emx"; then
|
||||
# Scripts don't pass test -x on OS/2
|
||||
TEST_X="test -f"
|
||||
else
|
||||
TEST_X="test -x"
|
||||
fi
|
||||
|
||||
ifelse([$4], [],
|
||||
[
|
||||
ifdef(AC_WARNING,([$0: missing argument 4 (acceptable-majors): Defaulting to APU 0.x then APU 1.x]))
|
||||
acceptable_majors="0 1"
|
||||
], [acceptable_majors="$4"])
|
||||
|
||||
apu_temp_acceptable_apu_config=""
|
||||
for apu_temp_major in $acceptable_majors
|
||||
do
|
||||
case $apu_temp_major in
|
||||
0)
|
||||
apu_temp_acceptable_apu_config="$apu_temp_acceptable_apu_config apu-config"
|
||||
;;
|
||||
*)
|
||||
apu_temp_acceptable_apu_config="$apu_temp_acceptable_apu_config apu-$apu_temp_major-config"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
AC_MSG_CHECKING(for APR-util)
|
||||
AC_ARG_WITH(apr-util,
|
||||
[ --with-apr-util=PATH prefix for installed APU or the full path to
|
||||
apu-config],
|
||||
[
|
||||
if test "$withval" = "no" || test "$withval" = "yes"; then
|
||||
AC_MSG_ERROR([--with-apr-util requires a directory or file to be provided])
|
||||
fi
|
||||
|
||||
for apu_temp_apu_config_file in $apu_temp_acceptable_apu_config
|
||||
do
|
||||
for lookdir in "$withval/bin" "$withval"
|
||||
do
|
||||
if $TEST_X "$lookdir/$apu_temp_apu_config_file"; then
|
||||
apu_config="$lookdir/$apu_temp_apu_config_file"
|
||||
ifelse([$5], [], [], [
|
||||
apu_acceptable="yes"
|
||||
$5
|
||||
if test "$apu_acceptable" != "yes"; then
|
||||
AC_MSG_WARN([Found APU in $apu_config, but it is considered unacceptable])
|
||||
continue
|
||||
fi])
|
||||
apu_found="yes"
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if test "$apu_found" != "yes" && $TEST_X "$withval" && $withval --help > /dev/null 2>&1 ; then
|
||||
apu_config="$withval"
|
||||
ifelse([$5], [], [apu_found="yes"], [
|
||||
apu_acceptable="yes"
|
||||
$5
|
||||
if test "$apu_acceptable" = "yes"; then
|
||||
apu_found="yes"
|
||||
fi])
|
||||
fi
|
||||
|
||||
dnl if --with-apr-util is used, it is a fatal error for its argument
|
||||
dnl to be invalid
|
||||
if test "$apu_found" != "yes"; then
|
||||
AC_MSG_ERROR([the --with-apr-util parameter is incorrect. It must specify an install prefix, a build directory, or an apu-config file.])
|
||||
fi
|
||||
],[
|
||||
if test -n "$3" && test "$3" = "1"; then
|
||||
for apu_temp_apu_config_file in $apu_temp_acceptable_apu_config
|
||||
do
|
||||
if $apu_temp_apu_config_file --help > /dev/null 2>&1 ; then
|
||||
apu_config="$apu_temp_apu_config_file"
|
||||
ifelse([$5], [], [], [
|
||||
apu_acceptable="yes"
|
||||
$5
|
||||
if test "$apu_acceptable" != "yes"; then
|
||||
AC_MSG_WARN([skipped APR-util at $apu_config, version not acceptable])
|
||||
continue
|
||||
fi])
|
||||
apu_found="yes"
|
||||
break
|
||||
else
|
||||
dnl look in some standard places (apparently not in builtin/default)
|
||||
for lookdir in /usr /usr/local /usr/local/apr /opt/apr; do
|
||||
if $TEST_X "$lookdir/bin/$apu_temp_apu_config_file"; then
|
||||
apu_config="$lookdir/bin/$apu_temp_apu_config_file"
|
||||
ifelse([$5], [], [], [
|
||||
apu_acceptable="yes"
|
||||
$5
|
||||
if test "$apu_acceptable" != "yes"; then
|
||||
AC_MSG_WARN([skipped APR-util at $apu_config, version not acceptable])
|
||||
continue
|
||||
fi])
|
||||
apu_found="yes"
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
fi
|
||||
dnl if we have not found anything yet and have bundled source, use that
|
||||
if test "$apu_found" = "no" && test -d "$1"; then
|
||||
apu_temp_abs_srcdir="`cd \"$1\" && pwd`"
|
||||
apu_found="reconfig"
|
||||
apu_bundled_major="`sed -n '/#define.*APU_MAJOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p' \"$1/include/apu_version.h\"`"
|
||||
case $apu_bundled_major in
|
||||
"")
|
||||
AC_MSG_ERROR([failed to find major version of bundled APU])
|
||||
;;
|
||||
0)
|
||||
apu_temp_apu_config_file="apu-config"
|
||||
;;
|
||||
*)
|
||||
apu_temp_apu_config_file="apu-$apu_bundled_major-config"
|
||||
;;
|
||||
esac
|
||||
if test -n "$2"; then
|
||||
apu_config="$2/$apu_temp_apu_config_file"
|
||||
else
|
||||
apu_config="$1/$apu_temp_apu_config_file"
|
||||
fi
|
||||
fi
|
||||
])
|
||||
|
||||
AC_MSG_RESULT($apu_found)
|
||||
])
|
68
build/gen_def.py
Executable file
68
build/gen_def.py
Executable file
@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# gen_def.py : Generate the .DEF file for Windows builds
|
||||
#
|
||||
# ====================================================================
|
||||
# Copyright 2002-2010 Justin Erenkrantz and Greg Stein
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ====================================================================
|
||||
#
|
||||
#
|
||||
# Typically, this script is used like:
|
||||
#
|
||||
# C:\PATH> python build/gen_def.py serf.h serf_bucket_types.h serf_bucket_util.h > build/serf.def
|
||||
#
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
# This regex parses function declarations that look like:
|
||||
#
|
||||
# return_type serf_func1(...
|
||||
# return_type *serf_func2(...
|
||||
#
|
||||
# Where return_type is a combination of words and "*" each separated by a
|
||||
# SINGLE space. If the function returns a pointer type (like serf_func2),
|
||||
# then a space may exist between the "*" and the function name. Thus,
|
||||
# a more complicated example might be:
|
||||
# const type * const * serf_func3(...
|
||||
#
|
||||
_funcs = re.compile(r'^(?:(?:\w+|\*) )+\*?(serf_[a-z][a-z_0-9]*)\(',
|
||||
re.MULTILINE)
|
||||
|
||||
# This regex parses the bucket type definitions which look like:
|
||||
#
|
||||
# extern const serf_bucket_type_t serf_bucket_type_FOO;
|
||||
#
|
||||
_types = re.compile(r'^extern const serf_bucket_type_t (serf_[a-z_]*);',
|
||||
re.MULTILINE)
|
||||
|
||||
|
||||
def extract_exports(fname):
|
||||
content = open(fname).read()
|
||||
exports = [ ]
|
||||
for name in _funcs.findall(content):
|
||||
exports.append(name)
|
||||
for name in _types.findall(content):
|
||||
exports.append(name)
|
||||
return exports
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# run the extraction over each file mentioned
|
||||
import sys
|
||||
print("EXPORTS")
|
||||
for fname in sys.argv[1:]:
|
||||
for func in extract_exports(fname):
|
||||
print(func)
|
37
build/get-version.sh
Executable file
37
build/get-version.sh
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# extract version numbers from a header file
|
||||
#
|
||||
# USAGE: get-version.sh CMD VERSION_HEADER PREFIX
|
||||
# where CMD is one of: all, major, libtool
|
||||
# where PREFIX is the prefix to {MAJOR|MINOR|PATCH}_VERSION defines
|
||||
#
|
||||
# get-version.sh all returns a dotted version number
|
||||
# get-version.sh major returns just the major version number
|
||||
# get-version.sh libtool returns a version "libtool -version-info" format
|
||||
#
|
||||
|
||||
if test $# != 3; then
|
||||
echo "USAGE: $0 CMD VERSION_HEADER PREFIX"
|
||||
echo " where CMD is one of: all, major, libtool"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
major_sed="/#define.*$3_MAJOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p"
|
||||
minor_sed="/#define.*$3_MINOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p"
|
||||
patch_sed="/#define.*$3_PATCH_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p"
|
||||
major="`sed -n $major_sed $2`"
|
||||
minor="`sed -n $minor_sed $2`"
|
||||
patch="`sed -n $patch_sed $2`"
|
||||
|
||||
if test "$1" = "all"; then
|
||||
echo ${major}.${minor}.${patch}
|
||||
elif test "$1" = "major"; then
|
||||
echo ${major}
|
||||
elif test "$1" = "libtool"; then
|
||||
# Yes, ${minor}:${patch}:${minor} is correct due to libtool idiocy.
|
||||
echo ${minor}:${patch}:${minor}
|
||||
else
|
||||
echo "ERROR: unknown version CMD ($1)"
|
||||
exit 1
|
||||
fi
|
112
build/install.sh
Executable file
112
build/install.sh
Executable file
@ -0,0 +1,112 @@
|
||||
#!/bin/sh
|
||||
##
|
||||
## install.sh -- install a program, script or datafile
|
||||
##
|
||||
## Based on `install-sh' from the X Consortium's X11R5 distribution
|
||||
## as of 89/12/18 which is freely available.
|
||||
## Cleaned up for Apache's Autoconf-style Interface (APACI)
|
||||
## by Ralf S. Engelschall <rse@apache.org>
|
||||
##
|
||||
#
|
||||
# This script falls under the Apache License.
|
||||
# See http://www.apache.org/docs/LICENSE
|
||||
|
||||
|
||||
#
|
||||
# put in absolute paths if you don't have them in your path;
|
||||
# or use env. vars.
|
||||
#
|
||||
mvprog="${MVPROG-mv}"
|
||||
cpprog="${CPPROG-cp}"
|
||||
chmodprog="${CHMODPROG-chmod}"
|
||||
chownprog="${CHOWNPROG-chown}"
|
||||
chgrpprog="${CHGRPPROG-chgrp}"
|
||||
stripprog="${STRIPPROG-strip}"
|
||||
rmprog="${RMPROG-rm}"
|
||||
|
||||
#
|
||||
# parse argument line
|
||||
#
|
||||
instcmd="$mvprog"
|
||||
chmodcmd=""
|
||||
chowncmd=""
|
||||
chgrpcmd=""
|
||||
stripcmd=""
|
||||
rmcmd="$rmprog -f"
|
||||
mvcmd="$mvprog"
|
||||
ext=""
|
||||
src=""
|
||||
dst=""
|
||||
while [ "x$1" != "x" ]; do
|
||||
case $1 in
|
||||
-c) instcmd="$cpprog"
|
||||
shift; continue
|
||||
;;
|
||||
-m) chmodcmd="$chmodprog $2"
|
||||
shift; shift; continue
|
||||
;;
|
||||
-o) chowncmd="$chownprog $2"
|
||||
shift; shift; continue
|
||||
;;
|
||||
-g) chgrpcmd="$chgrpprog $2"
|
||||
shift; shift; continue
|
||||
;;
|
||||
-s) stripcmd="$stripprog"
|
||||
shift; continue
|
||||
;;
|
||||
-S) stripcmd="$stripprog $2"
|
||||
shift; shift; continue
|
||||
;;
|
||||
-e) ext="$2"
|
||||
shift; shift; continue
|
||||
;;
|
||||
*) if [ "x$src" = "x" ]; then
|
||||
src=$1
|
||||
else
|
||||
dst=$1
|
||||
fi
|
||||
shift; continue
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [ "x$src" = "x" ]; then
|
||||
echo "install.sh: no input file specified"
|
||||
exit 1
|
||||
fi
|
||||
if [ "x$dst" = "x" ]; then
|
||||
echo "install.sh: no destination specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#
|
||||
# If destination is a directory, append the input filename; if
|
||||
# your system does not like double slashes in filenames, you may
|
||||
# need to add some logic
|
||||
#
|
||||
if [ -d $dst ]; then
|
||||
dst="$dst/`basename $src`"
|
||||
fi
|
||||
|
||||
# Add a possible extension (such as ".exe") to src and dst
|
||||
src="$src$ext"
|
||||
dst="$dst$ext"
|
||||
|
||||
# Make a temp file name in the proper directory.
|
||||
dstdir=`dirname $dst`
|
||||
dsttmp=$dstdir/#inst.$$#
|
||||
|
||||
# Move or copy the file name to the temp name
|
||||
$instcmd $src $dsttmp
|
||||
|
||||
# And set any options; do chmod last to preserve setuid bits
|
||||
if [ "x$chowncmd" != "x" ]; then $chowncmd $dsttmp; fi
|
||||
if [ "x$chgrpcmd" != "x" ]; then $chgrpcmd $dsttmp; fi
|
||||
if [ "x$stripcmd" != "x" ]; then $stripcmd $dsttmp; fi
|
||||
if [ "x$chmodcmd" != "x" ]; then $chmodcmd $dsttmp; fi
|
||||
|
||||
# Now rename the file to the real destination.
|
||||
$rmcmd $dst
|
||||
$mvcmd $dsttmp $dst
|
||||
|
||||
exit 0
|
||||
|
141
build/serf.def
Normal file
141
build/serf.def
Normal file
@ -0,0 +1,141 @@
|
||||
EXPORTS
|
||||
serf_error_string
|
||||
serf_context_create
|
||||
serf_context_create_ex
|
||||
serf_event_trigger
|
||||
serf_context_run
|
||||
serf_context_prerun
|
||||
serf_context_set_progress_cb
|
||||
serf_connection_create
|
||||
serf_connection_create2
|
||||
serf_listener_create
|
||||
serf_incoming_create
|
||||
serf_connection_reset
|
||||
serf_connection_close
|
||||
serf_connection_set_max_outstanding_requests
|
||||
serf_connection_set_async_responses
|
||||
serf_connection_request_create
|
||||
serf_connection_priority_request_create
|
||||
serf_connection_get_latency
|
||||
serf_request_is_written
|
||||
serf_request_cancel
|
||||
serf_request_get_pool
|
||||
serf_request_get_alloc
|
||||
serf_request_get_conn
|
||||
serf_request_set_handler
|
||||
serf_config_proxy
|
||||
serf_config_authn_types
|
||||
serf_config_credentials_callback
|
||||
serf_context_bucket_socket_create
|
||||
serf_request_bucket_request_create
|
||||
serf_bucket_allocator_create
|
||||
serf_bucket_allocator_get_pool
|
||||
serf_linebuf_init
|
||||
serf_linebuf_fetch
|
||||
serf_debug__record_read
|
||||
serf_debug__entered_loop
|
||||
serf_debug__closed_conn
|
||||
serf_debug__bucket_destroy
|
||||
serf_debug__bucket_alloc_check
|
||||
serf_lib_version
|
||||
serf_bucket_request_create
|
||||
serf_bucket_request_get_headers
|
||||
serf_bucket_request_become
|
||||
serf_bucket_request_set_root
|
||||
serf_bucket_response_create
|
||||
serf_bucket_response_status
|
||||
serf_bucket_response_wait_for_headers
|
||||
serf_bucket_response_get_headers
|
||||
serf_bucket_response_set_head
|
||||
serf_bucket_response_body_create
|
||||
serf_bucket_bwtp_frame_get_channel
|
||||
serf_bucket_bwtp_frame_get_type
|
||||
serf_bucket_bwtp_frame_get_phrase
|
||||
serf_bucket_bwtp_frame_get_headers
|
||||
serf_bucket_bwtp_channel_open
|
||||
serf_bucket_bwtp_channel_close
|
||||
serf_bucket_bwtp_header_create
|
||||
serf_bucket_bwtp_message_create
|
||||
serf_bucket_bwtp_incoming_frame_create
|
||||
serf_bucket_bwtp_incoming_frame_wait_for_headers
|
||||
serf_bucket_aggregate_cleanup
|
||||
serf_bucket_aggregate_create
|
||||
serf_bucket_aggregate_become
|
||||
serf_bucket_aggregate_prepend
|
||||
serf_bucket_aggregate_append
|
||||
serf_bucket_aggregate_hold_open
|
||||
serf_bucket_aggregate_prepend_iovec
|
||||
serf_bucket_aggregate_append_iovec
|
||||
serf_bucket_file_create
|
||||
serf_bucket_socket_create
|
||||
serf_bucket_socket_set_read_progress_cb
|
||||
serf_bucket_simple_create
|
||||
serf_bucket_simple_copy_create
|
||||
serf_bucket_mmap_create
|
||||
serf_bucket_headers_create
|
||||
serf_bucket_headers_set
|
||||
serf_bucket_headers_setc
|
||||
serf_bucket_headers_setn
|
||||
serf_bucket_headers_setx
|
||||
serf_bucket_headers_get
|
||||
serf_bucket_headers_do
|
||||
serf_bucket_chunk_create
|
||||
serf_bucket_dechunk_create
|
||||
serf_bucket_deflate_create
|
||||
serf_bucket_limit_create
|
||||
serf_ssl_client_cert_provider_set
|
||||
serf_ssl_client_cert_password_set
|
||||
serf_ssl_server_cert_callback_set
|
||||
serf_ssl_server_cert_chain_callback_set
|
||||
serf_ssl_use_default_certificates
|
||||
serf_ssl_set_hostname
|
||||
serf_ssl_cert_depth
|
||||
serf_ssl_cert_issuer
|
||||
serf_ssl_cert_subject
|
||||
serf_ssl_cert_certificate
|
||||
serf_ssl_cert_export
|
||||
serf_ssl_load_cert_file
|
||||
serf_ssl_trust_cert
|
||||
serf_ssl_use_compression
|
||||
serf_bucket_ssl_encrypt_create
|
||||
serf_bucket_ssl_encrypt_context_get
|
||||
serf_bucket_ssl_decrypt_create
|
||||
serf_bucket_ssl_decrypt_context_get
|
||||
serf_bucket_barrier_create
|
||||
serf_bucket_iovec_create
|
||||
serf_bucket_type_request
|
||||
serf_bucket_type_response
|
||||
serf_bucket_type_response_body
|
||||
serf_bucket_type_bwtp_frame
|
||||
serf_bucket_type_bwtp_incoming_frame
|
||||
serf_bucket_type_aggregate
|
||||
serf_bucket_type_file
|
||||
serf_bucket_type_socket
|
||||
serf_bucket_type_simple
|
||||
serf_bucket_type_mmap
|
||||
serf_bucket_type_headers
|
||||
serf_bucket_type_chunk
|
||||
serf_bucket_type_dechunk
|
||||
serf_bucket_type_deflate
|
||||
serf_bucket_type_limit
|
||||
serf_bucket_type_ssl_encrypt
|
||||
serf_bucket_type_ssl_decrypt
|
||||
serf_bucket_type_barrier
|
||||
serf_bucket_type_iovec
|
||||
serf_bucket_create
|
||||
serf_default_read_iovec
|
||||
serf_default_read_for_sendfile
|
||||
serf_default_read_bucket
|
||||
serf_default_destroy
|
||||
serf_default_destroy_and_data
|
||||
serf_bucket_mem_alloc
|
||||
serf_bucket_mem_calloc
|
||||
serf_bucket_mem_free
|
||||
serf_bstrmemdup
|
||||
serf_bmemdup
|
||||
serf_bstrdup
|
||||
serf_util_readline
|
||||
serf_databuf_init
|
||||
serf_databuf_read
|
||||
serf_databuf_readline
|
||||
serf_databuf_peek
|
119
buildconf
Executable file
119
buildconf
Executable file
@ -0,0 +1,119 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2005 Justin Erenkrantz and Greg Stein
|
||||
# Copyright 2005 The Apache Software Foundation or its licensors, as
|
||||
# applicable.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
#
|
||||
# buildconf: Build the support scripts needed to compile from a
|
||||
# checked-out version of the source code.
|
||||
|
||||
# set a couple of defaults for where we should be looking for our support libs.
|
||||
# can be overridden with --with-apr=[dir] and --with-apr-util=[dir]
|
||||
|
||||
apr_src_dir="apr ../apr"
|
||||
apu_src_dir="apr-util ../apr-util"
|
||||
|
||||
while test $# -gt 0
|
||||
do
|
||||
# Normalize
|
||||
case "$1" in
|
||||
-*=*) optarg=`echo "$1" | sed 's/[-_a-zA-Z0-9]*=//'` ;;
|
||||
*) optarg= ;;
|
||||
esac
|
||||
|
||||
case "$1" in
|
||||
--with-apr=*)
|
||||
apr_src_dir=$optarg
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$1" in
|
||||
--with-apr-util=*)
|
||||
apu_src_dir=$optarg
|
||||
;;
|
||||
esac
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
#
|
||||
# Check to be sure that we have the srclib dependencies checked-out
|
||||
#
|
||||
|
||||
should_exit=0
|
||||
apr_found=0
|
||||
apu_found=0
|
||||
|
||||
for dir in $apr_src_dir
|
||||
do
|
||||
if [ -d "${dir}" -a -f "${dir}/build/apr_common.m4" ]; then
|
||||
echo "found apr source: ${dir}"
|
||||
apr_src_dir=$dir
|
||||
apr_found=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $apr_found -lt 1 ]; then
|
||||
echo ""
|
||||
echo "You don't have a copy of the apr source in srclib/apr. "
|
||||
echo "Please get the source using the following instructions,"
|
||||
echo "or specify the location of the source with "
|
||||
echo "--with-apr=[path to apr] :"
|
||||
echo ""
|
||||
echo " svn co http://svn.apache.org/repos/asf/apr/apr/trunk srclib/apr"
|
||||
echo ""
|
||||
should_exit=1
|
||||
fi
|
||||
|
||||
for dir in $apu_src_dir
|
||||
do
|
||||
if [ -d "${dir}" -a -f "${dir}/Makefile.in" ]; then
|
||||
echo "found apr-util source: ${dir}"
|
||||
apu_src_dir=$dir
|
||||
apu_found=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $apu_found -lt 1 ]; then
|
||||
echo ""
|
||||
echo "APR-util not found. Assuming you are using APR 2.x."
|
||||
echo ""
|
||||
apu_src_dir=
|
||||
fi
|
||||
|
||||
if [ $should_exit -gt 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo copying build files
|
||||
cp $apr_src_dir/build/config.guess $apr_src_dir/build/config.sub \
|
||||
$apr_src_dir/build/install.sh $apr_src_dir/build/apr_common.m4 \
|
||||
$apr_src_dir/build/find_apr.m4 $apr_src_dir/build/get-version.sh build
|
||||
|
||||
if [ -n "$apu_src_dir" -a -d "$apu_src_dir" ] ; then
|
||||
cp $apu_src_dir/build/find_apu.m4 build
|
||||
fi
|
||||
|
||||
echo generating configure
|
||||
${AUTOCONF:-autoconf}
|
||||
|
||||
# Remove autoconf 2.5x's cache directory
|
||||
rm -rf autom4te*.cache
|
||||
|
||||
echo generating serf.def
|
||||
./build/gen_def.py serf.h serf_bucket_*.h > build/serf.def
|
26
config.layout
Normal file
26
config.layout
Normal file
@ -0,0 +1,26 @@
|
||||
##
|
||||
## config.layout -- Pre-defined Installation Path Layouts
|
||||
##
|
||||
## Hints:
|
||||
## - layouts can be loaded with configure's --enable-layout=ID option
|
||||
## - when no --enable-layout option is given, the default layout is `serf'
|
||||
## - a trailing plus character (`+') on paths is replaced with a
|
||||
## `/<target>' suffix where <target> is currently hardcoded to 'serf'.
|
||||
## (This may become a configurable parameter at some point.)
|
||||
##
|
||||
|
||||
<Layout Serf>
|
||||
prefix: /usr/local/serf
|
||||
exec_prefix: ${prefix}
|
||||
bindir: ${exec_prefix}/bin
|
||||
sbindir: ${exec_prefix}/bin
|
||||
libdir: ${exec_prefix}/lib
|
||||
libexecdir: ${exec_prefix}/modules
|
||||
mandir: ${prefix}/man
|
||||
sysconfdir: ${prefix}/conf
|
||||
datadir: ${prefix}
|
||||
installbuilddir: ${datadir}/build-${SERF_MAJOR_VERSION}
|
||||
includedir: ${prefix}/include/serf-${SERF_MAJOR_VERSION}
|
||||
localstatedir: ${prefix}
|
||||
libsuffix: -${SERF_MAJOR_VERSION}
|
||||
</Layout>
|
277
configure.in
Normal file
277
configure.in
Normal file
@ -0,0 +1,277 @@
|
||||
dnl Autoconf file for Serf
|
||||
|
||||
AC_PREREQ(2.50)
|
||||
AC_INIT(context.c)
|
||||
|
||||
AC_CONFIG_AUX_DIR(build)
|
||||
|
||||
sinclude(build/apr_common.m4)
|
||||
sinclude(build/find_apr.m4)
|
||||
|
||||
AC_PREFIX_DEFAULT(/usr/local/serf)
|
||||
|
||||
dnl Get the layout here, so we can pass the required variables to apr
|
||||
APR_ENABLE_LAYOUT(Serf, [])
|
||||
|
||||
dnl reparse the configure arguments.
|
||||
APR_PARSE_ARGUMENTS
|
||||
|
||||
APR_SAVE_THE_ENVIRONMENT(CPPFLAGS)
|
||||
APR_SAVE_THE_ENVIRONMENT(CFLAGS)
|
||||
APR_SAVE_THE_ENVIRONMENT(CXXFLAGS)
|
||||
APR_SAVE_THE_ENVIRONMENT(LDFLAGS)
|
||||
APR_SAVE_THE_ENVIRONMENT(LIBS)
|
||||
APR_SAVE_THE_ENVIRONMENT(INCLUDES)
|
||||
|
||||
APR_CONFIG_NICE(config.nice)
|
||||
|
||||
nl='
|
||||
'
|
||||
dnl Check that mkdir -p works
|
||||
APR_MKDIR_P_CHECK($top_srcdir/build/mkdir.sh)
|
||||
AC_SUBST(mkdir_p)
|
||||
|
||||
dnl ## Run configure for packages Apache uses
|
||||
|
||||
dnl shared library support for these packages doesn't currently
|
||||
dnl work on some platforms
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
|
||||
orig_prefix="$prefix"
|
||||
|
||||
echo $ac_n "${nl}Configuring Apache Portable Runtime library...${nl}"
|
||||
|
||||
APR_FIND_APR("$srcdir/apr", "./apr", 1, 0 1 2)
|
||||
|
||||
if test "$apr_found" = "no"; then
|
||||
AC_MSG_ERROR([APR not found. Please read the documentation.])
|
||||
fi
|
||||
|
||||
if test "$apr_found" = "reconfig"; then
|
||||
APR_SUBDIR_CONFIG(apr,
|
||||
[--prefix=$prefix --exec-prefix=$exec_prefix --libdir=$libdir --includedir=$includedir --bindir=$bindir --datadir=$datadir --with-installbuilddir=$installbuilddir],
|
||||
[--enable-layout=*|\'--enable-layout=*])
|
||||
dnl We must be the first to build and the last to be cleaned
|
||||
SERF_BUILD_SRCLIB_DIRS="apr $SERF_BUILD_SRCLIB_DIRS"
|
||||
SERF_CLEAN_SRCLIB_DIRS="$SERF_CLEAN_SRCLIB_DIRS apr"
|
||||
fi
|
||||
|
||||
APR_SETIFNULL(CC, `$apr_config --cc`)
|
||||
APR_SETIFNULL(CPP, `$apr_config --cpp`)
|
||||
APR_SETIFNULL(APR_LIBTOOL, `$apr_config --apr-libtool`)
|
||||
APR_ADDTO(CFLAGS, `$apr_config --cflags`)
|
||||
APR_ADDTO(CPPFLAGS, `$apr_config --cppflags`)
|
||||
APR_ADDTO(LDFLAGS, `$apr_config --ldflags`)
|
||||
SHLIBPATH_VAR=`$apr_config --shlib-path-var`
|
||||
APR_BINDIR=`$apr_config --bindir`
|
||||
APR_INCLUDES=`$apr_config --includes`
|
||||
APR_VERSION=`$apr_config --version`
|
||||
APR_CONFIG="$apr_config"
|
||||
|
||||
APR_SETIFNULL(LTFLAGS, "--silent")
|
||||
AC_SUBST(LTFLAGS)
|
||||
|
||||
AC_SUBST(APR_LIBTOOL)
|
||||
AC_SUBST(APR_BINDIR)
|
||||
AC_SUBST(APR_INCLUDES)
|
||||
AC_SUBST(APR_VERSION)
|
||||
AC_SUBST(APR_CONFIG)
|
||||
|
||||
APR_VERSION_MAJOR="`echo \"$APR_VERSION\" | sed 's,\..*,,'`"
|
||||
APR_VERSION_NUM="`echo \"$APR_VERSION\" | \
|
||||
sed -e 's/[[^0-9\.]].*$//' \
|
||||
-e 's/\.\([[0-9]]\)$/.0\1/' \
|
||||
-e 's/\.\([[0-9]][[0-9]]\)$/.0\1/' \
|
||||
-e 's/\.\([[0-9]]\)\./0\1/; s/\.//g;'`"
|
||||
|
||||
if test "$APR_VERSION_NUM" -ge "200000"; then
|
||||
|
||||
APU_BINDIR=""
|
||||
APU_INCLUDES=""
|
||||
APU_VERSION=""
|
||||
APU_CONFIG=""
|
||||
|
||||
else
|
||||
sinclude(build/find_apu.m4)
|
||||
|
||||
echo $ac_n "${nl}Configuring Apache Portable Runtime Utility library...${nl}"
|
||||
|
||||
ifdef([APR_FIND_APU], [
|
||||
APR_FIND_APU("$srcdir/apr-util", "./apr-util", 1, $APR_VERSION_MAJOR)
|
||||
], [AC_MSG_ERROR([APR-util required, but find_apu.m4 not present!])])
|
||||
|
||||
if test "$apu_found" = "no"; then
|
||||
AC_MSG_ERROR([APR-util not found. Please read the documentation.])
|
||||
fi
|
||||
|
||||
# Catch some misconfigurations:
|
||||
case ${apr_found}.${apu_found} in
|
||||
reconfig.yes)
|
||||
AC_MSG_ERROR([Cannot use an external APR-util with the bundled APR])
|
||||
;;
|
||||
yes.reconfig)
|
||||
AC_MSG_ERROR([Cannot use an external APR with the bundled APR-util])
|
||||
;;
|
||||
esac
|
||||
|
||||
if test "$apu_found" = "reconfig"; then
|
||||
APR_SUBDIR_CONFIG(apr-util,
|
||||
[--with-apr=../apr --prefix=$prefix --exec-prefix=$exec_prefix --libdir=$libdir --includedir=$includedir --bindir=$bindir],
|
||||
[--enable-layout=*|\'--enable-layout=*])
|
||||
dnl We must be the last to build and the first to be cleaned
|
||||
SERF_BUILD_SRCLIB_DIRS="$SERF_BUILD_SRCLIB_DIRS apr-util"
|
||||
SERF_CLEAN_SRCLIB_DIRS="apr-util $SERF_CLEAN_SRCLIB_DIRS"
|
||||
fi
|
||||
|
||||
APR_ADDTO(LDFLAGS, `$apu_config --ldflags`)
|
||||
APU_BINDIR=`$apu_config --bindir`
|
||||
APU_INCLUDES=`$apu_config --includes`
|
||||
APU_VERSION=`$apu_config --version`
|
||||
APU_CONFIG="$APU_BINDIR/apu-`echo ${APU_VERSION} | sed 's,\..*,,'`-config"
|
||||
fi
|
||||
|
||||
AC_SUBST(APU_BINDIR)
|
||||
AC_SUBST(APU_INCLUDES)
|
||||
AC_SUBST(APU_VERSION)
|
||||
AC_SUBST(APU_CONFIG)
|
||||
|
||||
dnl In case we picked up CC and CPP from APR, get that info into the
|
||||
dnl config cache so that PCRE uses it. Otherwise, CC and CPP used for
|
||||
dnl PCRE and for our config tests will be whatever PCRE determines.
|
||||
AC_PROG_CC
|
||||
AC_PROG_CPP
|
||||
AC_PROG_INSTALL
|
||||
|
||||
if test "x${cache_file}" = "x/dev/null"; then
|
||||
# Likewise, ensure that CC and CPP are passed through to the pcre
|
||||
# configure script iff caching is disabled (the autoconf 2.5x default).
|
||||
export CC; export CPP
|
||||
fi
|
||||
|
||||
echo $ac_n "Configuring Serf...${nl}"
|
||||
|
||||
dnl Absolute source/build directory
|
||||
abs_srcdir=`(cd $srcdir && pwd)`
|
||||
abs_builddir=`pwd`
|
||||
|
||||
dnl get our version information
|
||||
get_version="$abs_srcdir/build/get-version.sh"
|
||||
version_hdr="$abs_srcdir/serf.h"
|
||||
SERF_MAJOR_VERSION="`$get_version major $version_hdr SERF`"
|
||||
SERF_DOTTED_VERSION="`$get_version all $version_hdr SERF`"
|
||||
|
||||
AC_SUBST(SERF_MAJOR_VERSION)
|
||||
AC_SUBST(SERF_DOTTED_VERSION)
|
||||
|
||||
AC_SUBST(SERF_BUILD_SRCLIB_DIRS)
|
||||
AC_SUBST(SERF_CLEAN_SRCLIB_DIRS)
|
||||
|
||||
AC_ARG_WITH(openssl,
|
||||
APR_HELP_STRING([--with-openssl=PATH],[Path to OpenSSL (eg. /usr/local/ssl)]),
|
||||
[
|
||||
if test "$withval" = "yes"; then
|
||||
AC_MSG_ERROR([--with-openssl requires a path])
|
||||
else
|
||||
openssl_prefix=$withval
|
||||
|
||||
if test "x$openssl_prefix" != "x" -a ! -d "$openssl_prefix"; then
|
||||
AC_MSG_ERROR('--with-openssl requires a path to a directory')
|
||||
fi
|
||||
|
||||
APR_ADDTO(CPPFLAGS, "-I${openssl_prefix}/include")
|
||||
if test -e "${openssl_prefix}/Makefile"; then
|
||||
APR_ADDTO(LDFLAGS, "-L${openssl_prefix}")
|
||||
APR_ADDTO(LDFLAGS, "-R${openssl_prefix}")
|
||||
else
|
||||
APR_ADDTO(LDFLAGS, "-L${openssl_prefix}/lib")
|
||||
APR_ADDTO(LDFLAGS, "-R${openssl_prefix}/lib")
|
||||
fi
|
||||
fi
|
||||
])
|
||||
|
||||
dnl Look for OpenSSL
|
||||
AC_CHECK_HEADER([openssl/opensslv.h], [],
|
||||
[AC_MSG_ERROR([We require OpenSSL; try --with-openssl])])
|
||||
|
||||
dnl Look for Kerberos 5 for GSSAPI
|
||||
AC_ARG_WITH(gssapi,
|
||||
APR_HELP_STRING([--with-gssapi=PATH],[build with GSSAPI support; needs krb5-config in PATH/bin (eg. /usr/lib/mit)]),
|
||||
[
|
||||
if test "$withval" = "yes"; then
|
||||
AC_MSG_ERROR([--with-gssapi requires a path])
|
||||
else
|
||||
gssapi_prefix=$withval/
|
||||
|
||||
if test "x$gssapi_prefix" != "x" -a ! -d "$gssapi_prefix"; then
|
||||
AC_MSG_ERROR('--with-gssapi requires a path to a directory')
|
||||
fi
|
||||
AC_MSG_CHECKING([for krb5-config])
|
||||
if test -x "$gssapi_prefix/bin/krb5-config"; then
|
||||
krb5conf=$gssapi_prefix/bin/krb5-config
|
||||
AC_MSG_RESULT([$krb5conf])
|
||||
AC_MSG_CHECKING([for gssapi support in krb5-config])
|
||||
if "$krb5conf" | grep gssapi > /dev/null; then
|
||||
AC_MSG_RESULT([yes])
|
||||
GSSAPI_confopts=gssapi
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
GSSAPI_confopts=
|
||||
fi
|
||||
GSSAPI_cflags=`"$krb5conf" --cflags $GSSAPI_confopts`
|
||||
GSSAPI_libs=`"$krb5conf" --libs $GSSAPI_confopts`
|
||||
if test -z "$GSSAPI_confopts"; then
|
||||
case "${host_os}" in
|
||||
solaris*)
|
||||
GSSAPI_cflags="$GSSAPI_cflags -I/usr/include/gssapi"
|
||||
GSSAPI_libs="$GSSAPI_libs -lgss"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
APR_ADDTO(CFLAGS, "$GSSAPI_cflags")
|
||||
APR_ADDTO(CFLAGS, [-DSERF_HAVE_GSSAPI])
|
||||
APR_ADDTO(LDFLAGS, "$GSSAPI_libs")
|
||||
AC_MSG_CHECKING([if gssapi works])
|
||||
AC_LINK_IFELSE([AC_LANG_SOURCE([[
|
||||
#include <gssapi.h>
|
||||
int main()
|
||||
{gss_init_sec_context(NULL, NULL, NULL, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL);}]])],
|
||||
lib_gssapi="yes", lib_gssapi="no")
|
||||
if test "$lib_gssapi" = "yes"; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
AC_MSG_ERROR([cannot find GSSAPI (Kerberos)])
|
||||
fi
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
AC_MSG_WARN([--with-gssapi specified but krb5-config not found])
|
||||
fi
|
||||
|
||||
fi
|
||||
])
|
||||
|
||||
|
||||
dnl CuTest requires libm on Solaris
|
||||
AC_SEARCH_LIBS(fabs, m)
|
||||
|
||||
libs=""
|
||||
if test -n "$apu_config"; then
|
||||
APR_ADDTO(libs, [`$apu_config --link-libtool --libs`])
|
||||
fi
|
||||
APR_ADDTO(libs, [`$apr_config --link-libtool --libs` $LIBS])
|
||||
|
||||
APR_ADDTO(SERF_LIBS, [$libs])
|
||||
AC_SUBST(SERF_LIBS)
|
||||
|
||||
APR_RESTORE_THE_ENVIRONMENT(CPPFLAGS, EXTRA_)
|
||||
APR_RESTORE_THE_ENVIRONMENT(CFLAGS, EXTRA_)
|
||||
APR_RESTORE_THE_ENVIRONMENT(CXXFLAGS, EXTRA_)
|
||||
APR_RESTORE_THE_ENVIRONMENT(LDFLAGS, EXTRA_)
|
||||
APR_RESTORE_THE_ENVIRONMENT(LIBS, EXTRA_)
|
||||
APR_RESTORE_THE_ENVIRONMENT(INCLUDES, EXTRA_)
|
||||
|
||||
AC_CONFIG_FILES([Makefile serf.pc])
|
||||
AC_CONFIG_COMMANDS([mkdir-vpath],[make mkdir-vpath])
|
||||
|
||||
AC_OUTPUT
|
392
context.c
Normal file
392
context.c
Normal file
@ -0,0 +1,392 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
#include <apr_poll.h>
|
||||
#include <apr_version.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
#include "serf_private.h"
|
||||
|
||||
/* Older versions of APR do not have the APR_VERSION_AT_LEAST macro. Those
|
||||
implementations are safe.
|
||||
|
||||
If the macro *is* defined, and we're on WIN32, and APR is version 1.4.0,
|
||||
then we have a broken WSAPoll() implementation.
|
||||
|
||||
See serf_context_create_ex() below. */
|
||||
#if defined(APR_VERSION_AT_LEAST) && defined(WIN32)
|
||||
#if APR_VERSION_AT_LEAST(1,4,0)
|
||||
#define BROKEN_WSAPOLL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* Callback function (implements serf_progress_t). Takes a number of bytes
|
||||
* read @a read and bytes written @a written, adds those to the total for this
|
||||
* context and notifies an interested party (if any).
|
||||
*/
|
||||
void serf__context_progress_delta(
|
||||
void *progress_baton,
|
||||
apr_off_t read,
|
||||
apr_off_t written)
|
||||
{
|
||||
serf_context_t *ctx = progress_baton;
|
||||
|
||||
ctx->progress_read += read;
|
||||
ctx->progress_written += written;
|
||||
|
||||
if (ctx->progress_func)
|
||||
ctx->progress_func(ctx->progress_baton,
|
||||
ctx->progress_read,
|
||||
ctx->progress_written);
|
||||
}
|
||||
|
||||
|
||||
/* Check for dirty connections and update their pollsets accordingly. */
|
||||
static apr_status_t check_dirty_pollsets(serf_context_t *ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* if we're not dirty, return now. */
|
||||
if (!ctx->dirty_pollset) {
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
for (i = ctx->conns->nelts; i--; ) {
|
||||
serf_connection_t *conn = GET_CONN(ctx, i);
|
||||
apr_status_t status;
|
||||
|
||||
/* if this connection isn't dirty, skip it. */
|
||||
if (!conn->dirty_conn) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* reset this connection's flag before we update. */
|
||||
conn->dirty_conn = 0;
|
||||
|
||||
if ((status = serf__conn_update_pollset(conn)) != APR_SUCCESS)
|
||||
return status;
|
||||
}
|
||||
|
||||
/* reset our context flag now */
|
||||
ctx->dirty_pollset = 0;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
static apr_status_t pollset_add(void *user_baton,
|
||||
apr_pollfd_t *pfd,
|
||||
void *serf_baton)
|
||||
{
|
||||
serf_pollset_t *s = (serf_pollset_t*)user_baton;
|
||||
pfd->client_data = serf_baton;
|
||||
return apr_pollset_add(s->pollset, pfd);
|
||||
}
|
||||
|
||||
static apr_status_t pollset_rm(void *user_baton,
|
||||
apr_pollfd_t *pfd,
|
||||
void *serf_baton)
|
||||
{
|
||||
serf_pollset_t *s = (serf_pollset_t*)user_baton;
|
||||
pfd->client_data = serf_baton;
|
||||
return apr_pollset_remove(s->pollset, pfd);
|
||||
}
|
||||
|
||||
|
||||
void serf_config_proxy(serf_context_t *ctx,
|
||||
apr_sockaddr_t *address)
|
||||
{
|
||||
ctx->proxy_address = address;
|
||||
}
|
||||
|
||||
|
||||
void serf_config_credentials_callback(serf_context_t *ctx,
|
||||
serf_credentials_callback_t cred_cb)
|
||||
{
|
||||
ctx->cred_cb = cred_cb;
|
||||
}
|
||||
|
||||
|
||||
void serf_config_authn_types(serf_context_t *ctx,
|
||||
int authn_types)
|
||||
{
|
||||
ctx->authn_types = authn_types;
|
||||
}
|
||||
|
||||
|
||||
serf_context_t *serf_context_create_ex(
|
||||
void *user_baton,
|
||||
serf_socket_add_t addf,
|
||||
serf_socket_remove_t rmf,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
serf_context_t *ctx = apr_pcalloc(pool, sizeof(*ctx));
|
||||
|
||||
ctx->pool = pool;
|
||||
|
||||
if (user_baton != NULL) {
|
||||
ctx->pollset_baton = user_baton;
|
||||
ctx->pollset_add = addf;
|
||||
ctx->pollset_rm = rmf;
|
||||
}
|
||||
else {
|
||||
/* build the pollset with a (default) number of connections */
|
||||
serf_pollset_t *ps = apr_pcalloc(pool, sizeof(*ps));
|
||||
|
||||
/* ### TODO: As of APR 1.4.x apr_pollset_create_ex can return a status
|
||||
### other than APR_SUCCESS, so we should handle it.
|
||||
### Probably move creation of the pollset to later when we have
|
||||
### the possibility of returning status to the caller.
|
||||
*/
|
||||
#ifdef BROKEN_WSAPOLL
|
||||
/* APR 1.4.x switched to using WSAPoll() on Win32, but it does not
|
||||
* properly handle errors on a non-blocking sockets (such as
|
||||
* connecting to a server where no listener is active).
|
||||
*
|
||||
* So, sadly, we must force using select() on Win32.
|
||||
*
|
||||
* http://mail-archives.apache.org/mod_mbox/apr-dev/201105.mbox/%3CBANLkTin3rBCecCBRvzUA5B-14u-NWxR_Kg@mail.gmail.com%3E
|
||||
*/
|
||||
(void) apr_pollset_create_ex(&ps->pollset, MAX_CONN, pool, 0,
|
||||
APR_POLLSET_SELECT);
|
||||
#else
|
||||
(void) apr_pollset_create(&ps->pollset, MAX_CONN, pool, 0);
|
||||
#endif
|
||||
ctx->pollset_baton = ps;
|
||||
ctx->pollset_add = pollset_add;
|
||||
ctx->pollset_rm = pollset_rm;
|
||||
}
|
||||
|
||||
/* default to a single connection since that is the typical case */
|
||||
ctx->conns = apr_array_make(pool, 1, sizeof(serf_connection_t *));
|
||||
|
||||
/* Initialize progress status */
|
||||
ctx->progress_read = 0;
|
||||
ctx->progress_written = 0;
|
||||
|
||||
ctx->authn_types = SERF_AUTHN_ALL;
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
|
||||
serf_context_t *serf_context_create(apr_pool_t *pool)
|
||||
{
|
||||
return serf_context_create_ex(NULL, NULL, NULL, pool);
|
||||
}
|
||||
|
||||
apr_status_t serf_context_prerun(serf_context_t *ctx)
|
||||
{
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
if ((status = serf__open_connections(ctx)) != APR_SUCCESS)
|
||||
return status;
|
||||
|
||||
if ((status = check_dirty_pollsets(ctx)) != APR_SUCCESS)
|
||||
return status;
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_event_trigger(
|
||||
serf_context_t *s,
|
||||
void *serf_baton,
|
||||
const apr_pollfd_t *desc)
|
||||
{
|
||||
apr_pollfd_t tdesc = { 0 };
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
serf_io_baton_t *io = serf_baton;
|
||||
|
||||
if (io->type == SERF_IO_CONN) {
|
||||
serf_connection_t *conn = io->u.conn;
|
||||
serf_context_t *ctx = conn->ctx;
|
||||
|
||||
/* If this connection has already failed, return the error again, and try
|
||||
* to remove it from the pollset again
|
||||
*/
|
||||
if (conn->status) {
|
||||
tdesc.desc_type = APR_POLL_SOCKET;
|
||||
tdesc.desc.s = conn->skt;
|
||||
tdesc.reqevents = conn->reqevents;
|
||||
ctx->pollset_rm(ctx->pollset_baton,
|
||||
&tdesc, conn);
|
||||
return conn->status;
|
||||
}
|
||||
/* apr_pollset_poll() can return a conn multiple times... */
|
||||
if ((conn->seen_in_pollset & desc->rtnevents) != 0 ||
|
||||
(conn->seen_in_pollset & APR_POLLHUP) != 0) {
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
conn->seen_in_pollset |= desc->rtnevents;
|
||||
|
||||
if ((conn->status = serf__process_connection(conn,
|
||||
desc->rtnevents)) != APR_SUCCESS) {
|
||||
|
||||
/* it's possible that the connection was already reset and thus the
|
||||
socket cleaned up. */
|
||||
if (conn->skt) {
|
||||
tdesc.desc_type = APR_POLL_SOCKET;
|
||||
tdesc.desc.s = conn->skt;
|
||||
tdesc.reqevents = conn->reqevents;
|
||||
ctx->pollset_rm(ctx->pollset_baton,
|
||||
&tdesc, conn);
|
||||
}
|
||||
return conn->status;
|
||||
}
|
||||
}
|
||||
else if (io->type == SERF_IO_LISTENER) {
|
||||
serf_listener_t *l = io->u.listener;
|
||||
|
||||
status = serf__process_listener(l);
|
||||
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
else if (io->type == SERF_IO_CLIENT) {
|
||||
serf_incoming_t *c = io->u.client;
|
||||
|
||||
status = serf__process_client(c, desc->rtnevents);
|
||||
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_context_run(
|
||||
serf_context_t *ctx,
|
||||
apr_short_interval_time_t duration,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
apr_status_t status;
|
||||
apr_int32_t num;
|
||||
const apr_pollfd_t *desc;
|
||||
serf_pollset_t *ps = (serf_pollset_t*)ctx->pollset_baton;
|
||||
|
||||
if ((status = serf_context_prerun(ctx)) != APR_SUCCESS) {
|
||||
return status;
|
||||
}
|
||||
|
||||
if ((status = apr_pollset_poll(ps->pollset, duration, &num,
|
||||
&desc)) != APR_SUCCESS) {
|
||||
/* EINTR indicates a handled signal happened during the poll call,
|
||||
ignore, the application can safely retry. */
|
||||
if (APR_STATUS_IS_EINTR(status))
|
||||
return APR_SUCCESS;
|
||||
|
||||
/* ### do we still need to dispatch stuff here?
|
||||
### look at the potential return codes. map to our defined
|
||||
### return values? ...
|
||||
*/
|
||||
return status;
|
||||
}
|
||||
|
||||
while (num--) {
|
||||
serf_connection_t *conn = desc->client_data;
|
||||
|
||||
status = serf_event_trigger(ctx, conn, desc);
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
|
||||
desc++;
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
void serf_context_set_progress_cb(
|
||||
serf_context_t *ctx,
|
||||
const serf_progress_t progress_func,
|
||||
void *progress_baton)
|
||||
{
|
||||
ctx->progress_func = progress_func;
|
||||
ctx->progress_baton = progress_baton;
|
||||
}
|
||||
|
||||
|
||||
serf_bucket_t *serf_context_bucket_socket_create(
|
||||
serf_context_t *ctx,
|
||||
apr_socket_t *skt,
|
||||
serf_bucket_alloc_t *allocator)
|
||||
{
|
||||
serf_bucket_t *bucket = serf_bucket_socket_create(skt, allocator);
|
||||
|
||||
/* Use serf's default bytes read/written callback */
|
||||
serf_bucket_socket_set_read_progress_cb(bucket,
|
||||
serf__context_progress_delta,
|
||||
ctx);
|
||||
|
||||
return bucket;
|
||||
}
|
||||
|
||||
|
||||
/* ### this really ought to go somewhere else, but... meh. */
|
||||
void serf_lib_version(int *major, int *minor, int *patch)
|
||||
{
|
||||
*major = SERF_MAJOR_VERSION;
|
||||
*minor = SERF_MINOR_VERSION;
|
||||
*patch = SERF_PATCH_VERSION;
|
||||
}
|
||||
|
||||
|
||||
const char *serf_error_string(apr_status_t errcode)
|
||||
{
|
||||
switch (errcode)
|
||||
{
|
||||
case SERF_ERROR_CLOSING:
|
||||
return "The connection is closing";
|
||||
case SERF_ERROR_REQUEST_LOST:
|
||||
return "A request has been lost";
|
||||
case SERF_ERROR_WAIT_CONN:
|
||||
return "The connection is blocked, pending further action";
|
||||
case SERF_ERROR_DECOMPRESSION_FAILED:
|
||||
return "An error occurred during decompression";
|
||||
case SERF_ERROR_BAD_HTTP_RESPONSE:
|
||||
return "The server sent an improper HTTP response";
|
||||
case SERF_ERROR_TRUNCATED_HTTP_RESPONSE:
|
||||
return "The server sent a truncated HTTP response body.";
|
||||
case SERF_ERROR_ABORTED_CONNECTION:
|
||||
return "The server unexpectedly closed the connection.";
|
||||
case SERF_ERROR_SSL_COMM_FAILED:
|
||||
return "An error occurred during SSL communication";
|
||||
case SERF_ERROR_SSL_CERT_FAILED:
|
||||
return "An SSL certificate related error occurred ";
|
||||
case SERF_ERROR_AUTHN_FAILED:
|
||||
return "An error occurred during authentication";
|
||||
case SERF_ERROR_AUTHN_NOT_SUPPORTED:
|
||||
return "The requested authentication type(s) are not supported";
|
||||
case SERF_ERROR_AUTHN_MISSING_ATTRIBUTE:
|
||||
return "An authentication attribute is missing";
|
||||
case SERF_ERROR_AUTHN_INITALIZATION_FAILED:
|
||||
return "Initialization of an authentication type failed";
|
||||
case SERF_ERROR_SSLTUNNEL_SETUP_FAILED:
|
||||
return "The proxy server returned an error while setting up the "\
|
||||
"SSL tunnel.";
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* NOTREACHED */
|
||||
}
|
152
design-guide.txt
Normal file
152
design-guide.txt
Normal file
@ -0,0 +1,152 @@
|
||||
APACHE COMMONS: serf -*-indented-text-*-
|
||||
|
||||
|
||||
TOPICS
|
||||
|
||||
1. Introduction
|
||||
2. Thread Safety
|
||||
3. Pool Usage
|
||||
4. Bucket Read Functions
|
||||
5. Versioning
|
||||
6. Bucket lifetimes
|
||||
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
1. INTRODUCTION
|
||||
|
||||
This document details various design choices for the serf library. It
|
||||
is intended to be a guide for serf developers. Of course, these design
|
||||
principles, choices made, etc are a good source of information for
|
||||
users of the serf library, too.
|
||||
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
2. THREAD SAFETY
|
||||
|
||||
The serf library should contain no mutable globals, making it is safe
|
||||
to use in a multi-threaded environment.
|
||||
|
||||
Each "object" within the system does not need to be used from multiple
|
||||
threads at a time. Thus, they require no internal mutexes, and can
|
||||
disable mutexes within APR objects where applicable (e.g. pools that
|
||||
are created).
|
||||
|
||||
The objects should not have any thread affinity (i.e. don't use
|
||||
thread-local storage). This enables an application to use external
|
||||
mutexes to guard entry to the serf objects, which then allows the
|
||||
objects to be used from multiple threads.
|
||||
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
3. POOL USAGE
|
||||
|
||||
For general information on the proper use of pools, please see:
|
||||
|
||||
http://cvs.apache.org/viewcvs/*checkout*/apr/docs/pool-design.html
|
||||
|
||||
Within serf itself, the buckets introduce a significant issue related
|
||||
to pools. Since it is very possible to end up creating *many* buckets
|
||||
within a transaction, and that creation could be proportional to an
|
||||
incoming or outgoing data stream, a lot of care must be take to avoid
|
||||
tying bucket allocations to pools. If a bucket allocated any internal
|
||||
memory against a pool, and if that bucket is created an unbounded
|
||||
number of times, then the pool memory could be exhausted.
|
||||
|
||||
Thus, buckets are allocated using a custom allocator which allows the
|
||||
memory to be freed when that bucket is no longer needed. This
|
||||
contrasts with pools where the "free" operation occurs over a large
|
||||
set of objects, which is problematic if some are still in use.
|
||||
|
||||
### need more explanation of strategy/solution ...
|
||||
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
4. BUCKET READ FUNCTIONS
|
||||
|
||||
The bucket reading and peek functions must not block. Each read
|
||||
function should return (up to) the specified amount of data. If
|
||||
SERF_READ_ALL_AVAIL is passed, then the function should provide
|
||||
whatever is immediately available, without blocking.
|
||||
|
||||
The peek function does not take a requested length because it is
|
||||
non-destructive. It is not possible to "read past" any barrier with a
|
||||
peek function. Thus, peek should operate like SERF_READ_ALL_AVAIL.
|
||||
|
||||
The return values from the read functions should follow this general
|
||||
pattern:
|
||||
|
||||
APR_SUCCESS Some data was returned, and the caller can
|
||||
immediately call the read function again to read
|
||||
more data.
|
||||
|
||||
NOTE: when bucket behavior tracking is enabled,
|
||||
then you must read more data from this bucket
|
||||
before returning to the serf context loop. If a
|
||||
bucket is not completely drained first, then it is
|
||||
possible to deadlock (the server might not read
|
||||
anything until you read everything it has already
|
||||
given to you).
|
||||
|
||||
APR_EAGAIN Some data was returned, but no more is available
|
||||
for now. The caller must "wait for a bit" or wait
|
||||
for some event before attempting to read again
|
||||
(basically, this simply means re-run the serf
|
||||
context loop). Though it shouldn't be done, reading
|
||||
again will, in all likelihood, return zero length
|
||||
data and APR_EAGAIN again.
|
||||
|
||||
NOTE: when bucket behavior tracking is enabled,
|
||||
then it is illegal to immediately read a bucket
|
||||
again after it has returned APR_EAGAIN. You must
|
||||
run the serf context loop again to (potentially)
|
||||
fetch more data for the bucket.
|
||||
|
||||
APR_EOF Some data was returned, and this bucket has no more
|
||||
data available and should not be read again. If you
|
||||
happen to read it again, then it will return zero
|
||||
length data and APR_EOF.
|
||||
|
||||
NOTE: when bucket behavior tracking is enabled,
|
||||
then it is illegal to read this bucket ever again.
|
||||
|
||||
other An error has occurred. No data was returned. The
|
||||
returned length is undefined.
|
||||
|
||||
In the above paragraphs, when it says "some data was returned", note
|
||||
that this could be data of length zero.
|
||||
|
||||
If a length of zero is returned, then the caller should not attempt to
|
||||
dereference the data pointer. It may be invalid. Note that there is no
|
||||
reason to dereference that pointer, since it doesn't point to any
|
||||
valid data.
|
||||
|
||||
Any data returned by the bucket should live as long as the bucket, or
|
||||
until the next read or peek occurs.
|
||||
|
||||
The read_bucket function falls into a very different pattern. See its
|
||||
doc string for more information.
|
||||
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
5. VERSIONING
|
||||
|
||||
The serf project uses the APR versioning guidelines described here:
|
||||
|
||||
http://apr.apache.org/versioning.html
|
||||
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
6. BUCKET LIFETIMES
|
||||
|
||||
### flesh out. basically: if you hold a bucket pointer, then you own
|
||||
### it. passing a bucket into another transfers ownership. use barrier
|
||||
### buckets to limit destruction of a tree of buckets.
|
||||
|
||||
|
||||
-----------------------------------------------------------------------------
|
176
incoming.c
Normal file
176
incoming.c
Normal file
@ -0,0 +1,176 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <apr_pools.h>
|
||||
#include <apr_poll.h>
|
||||
#include <apr_version.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_bucket_util.h"
|
||||
|
||||
#include "serf_private.h"
|
||||
|
||||
static apr_status_t read_from_client(serf_incoming_t *client)
|
||||
{
|
||||
return APR_ENOTIMPL;
|
||||
}
|
||||
|
||||
static apr_status_t write_to_client(serf_incoming_t *client)
|
||||
{
|
||||
return APR_ENOTIMPL;
|
||||
}
|
||||
|
||||
apr_status_t serf__process_client(serf_incoming_t *client, apr_int16_t events)
|
||||
{
|
||||
apr_status_t rv;
|
||||
if ((events & APR_POLLIN) != 0) {
|
||||
rv = read_from_client(client);
|
||||
if (rv) {
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
|
||||
if ((events & APR_POLLHUP) != 0) {
|
||||
return APR_ECONNRESET;
|
||||
}
|
||||
|
||||
if ((events & APR_POLLERR) != 0) {
|
||||
return APR_EGENERAL;
|
||||
}
|
||||
|
||||
if ((events & APR_POLLOUT) != 0) {
|
||||
rv = write_to_client(client);
|
||||
if (rv) {
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
apr_status_t serf__process_listener(serf_listener_t *l)
|
||||
{
|
||||
apr_status_t rv;
|
||||
apr_socket_t *in;
|
||||
apr_pool_t *p;
|
||||
/* THIS IS NOT OPTIMAL */
|
||||
apr_pool_create(&p, l->pool);
|
||||
|
||||
rv = apr_socket_accept(&in, l->skt, p);
|
||||
|
||||
if (rv) {
|
||||
apr_pool_destroy(p);
|
||||
return rv;
|
||||
}
|
||||
|
||||
rv = l->accept_func(l->ctx, l, l->accept_baton, in, p);
|
||||
|
||||
if (rv) {
|
||||
apr_pool_destroy(p);
|
||||
return rv;
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_incoming_create(
|
||||
serf_incoming_t **client,
|
||||
serf_context_t *ctx,
|
||||
apr_socket_t *insock,
|
||||
void *request_baton,
|
||||
serf_incoming_request_cb_t request,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
apr_status_t rv;
|
||||
serf_incoming_t *ic = apr_palloc(pool, sizeof(*ic));
|
||||
|
||||
ic->ctx = ctx;
|
||||
ic->baton.type = SERF_IO_CLIENT;
|
||||
ic->baton.u.client = ic;
|
||||
ic->request_baton = request_baton;
|
||||
ic->request = request;
|
||||
ic->skt = insock;
|
||||
ic->desc.desc_type = APR_POLL_SOCKET;
|
||||
ic->desc.desc.s = ic->skt;
|
||||
ic->desc.reqevents = APR_POLLIN;
|
||||
|
||||
rv = ctx->pollset_add(ctx->pollset_baton,
|
||||
&ic->desc, &ic->baton);
|
||||
*client = ic;
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
||||
apr_status_t serf_listener_create(
|
||||
serf_listener_t **listener,
|
||||
serf_context_t *ctx,
|
||||
const char *host,
|
||||
apr_uint16_t port,
|
||||
void *accept_baton,
|
||||
serf_accept_client_t accept,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
apr_sockaddr_t *sa;
|
||||
apr_status_t rv;
|
||||
serf_listener_t *l = apr_palloc(pool, sizeof(*l));
|
||||
|
||||
l->ctx = ctx;
|
||||
l->baton.type = SERF_IO_LISTENER;
|
||||
l->baton.u.listener = l;
|
||||
l->accept_func = accept;
|
||||
l->accept_baton = accept_baton;
|
||||
|
||||
apr_pool_create(&l->pool, pool);
|
||||
|
||||
rv = apr_sockaddr_info_get(&sa, host, APR_UNSPEC, port, 0, l->pool);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
rv = apr_socket_create(&l->skt, sa->family,
|
||||
SOCK_STREAM,
|
||||
#if APR_MAJOR_VERSION > 0
|
||||
APR_PROTO_TCP,
|
||||
#endif
|
||||
l->pool);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
rv = apr_socket_opt_set(l->skt, APR_SO_REUSEADDR, 1);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
rv = apr_socket_bind(l->skt, sa);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
rv = apr_socket_listen(l->skt, 5);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
l->desc.desc_type = APR_POLL_SOCKET;
|
||||
l->desc.desc.s = l->skt;
|
||||
l->desc.reqevents = APR_POLLIN;
|
||||
|
||||
rv = ctx->pollset_add(ctx->pollset_baton,
|
||||
&l->desc, &l->baton);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
*listener = l;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
1505
outgoing.c
Normal file
1505
outgoing.c
Normal file
File diff suppressed because it is too large
Load Diff
215
serf.mak
Normal file
215
serf.mak
Normal file
@ -0,0 +1,215 @@
|
||||
#**** serf Win32 -*- Makefile -*- ********************************************
|
||||
#
|
||||
# Define DEBUG_BUILD to create a debug version of the library.
|
||||
|
||||
!IF "$(OS)" == "Windows_NT"
|
||||
NULL=
|
||||
!ELSE
|
||||
NULL=nul
|
||||
!ENDIF
|
||||
|
||||
CFLAGS = /Zi /W3 /EHsc /I "./"
|
||||
|
||||
!IF "$(DEBUG_BUILD)" == ""
|
||||
INTDIR = Release
|
||||
CFLAGS = /MD /O2 /D "NDEBUG" $(CFLAGS)
|
||||
STATIC_LIB = $(INTDIR)\serf-1.lib
|
||||
!ELSE
|
||||
INTDIR = Debug
|
||||
CFLAGS = /MDd /Od /W3 /Gm /D "_DEBUG" $(CFLAGS)
|
||||
STATIC_LIB = $(INTDIR)\serf-1.lib
|
||||
!ENDIF
|
||||
|
||||
########
|
||||
# Support for OpenSSL integration
|
||||
!IF "$(OPENSSL_SRC)" == ""
|
||||
!ERROR OpenSSL is required. Please define OPENSSL_SRC.
|
||||
!ELSE
|
||||
OPENSSL_FLAGS = /I "$(OPENSSL_SRC)\inc32"
|
||||
!ENDIF
|
||||
|
||||
!IF "$(HTTPD_SRC)" != ""
|
||||
!IF "$(APR_SRC)" == ""
|
||||
APR_SRC=$(HTTPD_SRC)\srclib\apr
|
||||
!ENDIF
|
||||
|
||||
!IF "$(APRUTIL_SRC)" == ""
|
||||
APRUTIL_SRC=$(HTTPD_SRC)\srclib\apr-util
|
||||
!ENDIF
|
||||
|
||||
!ENDIF
|
||||
|
||||
########
|
||||
# APR
|
||||
!IF "$(APR_SRC)" == ""
|
||||
!ERROR APR is required. Please define APR_SRC or HTTPD_SRC.
|
||||
!ENDIF
|
||||
|
||||
APR_FLAGS = /I "$(APR_SRC)\include"
|
||||
!IF [IF EXIST "$(APR_SRC)\$(INTDIR)\libapr-1.lib" exit 1] == 1
|
||||
APR_LIBS = "$(APR_SRC)\$(INTDIR)\libapr-1.lib"
|
||||
!ELSE
|
||||
APR_LIBS = "$(APR_SRC)\$(INTDIR)\libapr.lib"
|
||||
!ENDIF
|
||||
|
||||
########
|
||||
# APR Util
|
||||
!IF "$(APRUTIL_SRC)" == ""
|
||||
!ERROR APR-Util is required. Please define APRUTIL_SRC or HTTPD_SRC.
|
||||
!ENDIF
|
||||
|
||||
APRUTIL_FLAGS = /I "$(APRUTIL_SRC)\include"
|
||||
!IF [IF EXIST "$(APRUTIL_SRC)\$(INTDIR)\libaprutil-1.lib" exit 1] == 1
|
||||
APRUTIL_LIBS = "$(APRUTIL_SRC)\$(INTDIR)\libaprutil-1.lib"
|
||||
!ELSE
|
||||
APRUTIL_LIBS = "$(APRUTIL_SRC)\$(INTDIR)\libaprutil.lib"
|
||||
!ENDIF
|
||||
|
||||
########
|
||||
# Support for zlib integration
|
||||
!IF "$(ZLIB_SRC)" == ""
|
||||
!ERROR ZLib is required. Please define ZLIB_SRC.
|
||||
!ELSE
|
||||
ZLIB_FLAGS = /I "$(ZLIB_SRC)"
|
||||
!IF "$(ZLIB_DLL)" == ""
|
||||
!IF "$(ZLIB_LIBDIR)" == ""
|
||||
!IF "$(DEBUG_BUILD)" == ""
|
||||
ZLIB_LIBS = "$(ZLIB_SRC)\zlibstat.lib"
|
||||
!ELSE
|
||||
ZLIB_LIBS = "$(ZLIB_SRC)\zlibstatD.lib"
|
||||
!ENDIF
|
||||
!ELSE
|
||||
ZLIB_LIBS = "$(ZLIB_LIBDIR)\x86\ZlibStat$(INTDIR)\zlibstat.lib"
|
||||
ZLIB_FLAGS = $(ZLIB_FLAGS) /D ZLIB_WINAPI
|
||||
!ENDIF
|
||||
!ELSE
|
||||
ZLIB_FLAGS = $(ZLIB_FLAGS) /D ZLIB_DLL
|
||||
ZLIB_LIBS = "$(ZLIB_SRC)\zlibdll.lib"
|
||||
!ENDIF
|
||||
!ENDIF
|
||||
|
||||
|
||||
# Exclude stuff we don't need from the Win32 headers
|
||||
WIN32_DEFS = /D WIN32 /D WIN32_LEAN_AND_MEAN /D NOUSER /D NOGDI /D NONLS /D NOCRYPT /D SERF_HAVE_SSPI
|
||||
|
||||
CPP=cl.exe
|
||||
CPP_PROJ = /c /nologo $(CFLAGS) $(WIN32_DEFS) $(APR_FLAGS) $(APRUTIL_FLAGS) $(OPENSSL_FLAGS) $(ZLIB_FLAGS) /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\\"
|
||||
LIB32=link.exe
|
||||
LIB32_FLAGS=/nologo
|
||||
|
||||
LIB32_OBJS= \
|
||||
"$(INTDIR)\aggregate_buckets.obj" \
|
||||
"$(INTDIR)\auth.obj" \
|
||||
"$(INTDIR)\auth_basic.obj" \
|
||||
"$(INTDIR)\auth_digest.obj" \
|
||||
"$(INTDIR)\auth_kerb.obj" \
|
||||
"$(INTDIR)\auth_kerb_gss.obj" \
|
||||
"$(INTDIR)\auth_kerb_sspi.obj" \
|
||||
"$(INTDIR)\context.obj" \
|
||||
"$(INTDIR)\ssltunnel.obj" \
|
||||
"$(INTDIR)\allocator.obj" \
|
||||
"$(INTDIR)\barrier_buckets.obj" \
|
||||
"$(INTDIR)\buckets.obj" \
|
||||
"$(INTDIR)\chunk_buckets.obj" \
|
||||
"$(INTDIR)\dechunk_buckets.obj" \
|
||||
"$(INTDIR)\deflate_buckets.obj" \
|
||||
"$(INTDIR)\file_buckets.obj" \
|
||||
"$(INTDIR)\headers_buckets.obj" \
|
||||
"$(INTDIR)\incoming.obj" \
|
||||
"$(INTDIR)\iovec_buckets.obj" \
|
||||
"$(INTDIR)\limit_buckets.obj" \
|
||||
"$(INTDIR)\mmap_buckets.obj" \
|
||||
"$(INTDIR)\outgoing.obj" \
|
||||
"$(INTDIR)\request_buckets.obj" \
|
||||
"$(INTDIR)\response_buckets.obj" \
|
||||
"$(INTDIR)\response_body_buckets.obj" \
|
||||
"$(INTDIR)\simple_buckets.obj" \
|
||||
"$(INTDIR)\socket_buckets.obj" \
|
||||
"$(INTDIR)\ssl_buckets.obj" \
|
||||
|
||||
!IFDEF OPENSSL_STATIC
|
||||
LIB32_OBJS = $(LIB32_OBJS) "$(OPENSSL_SRC)\out32\libeay32.lib" \
|
||||
"$(OPENSSL_SRC)\out32\ssleay32.lib"
|
||||
!ELSE
|
||||
LIB32_OBJS = $(LIB32_OBJS) "$(OPENSSL_SRC)\out32dll\libeay32.lib" \
|
||||
"$(OPENSSL_SRC)\out32dll\ssleay32.lib"
|
||||
!ENDIF
|
||||
|
||||
LIB32_OBJS = $(LIB32_OBJS) $(APR_LIBS) $(APRUTIL_LIBS) $(ZLIB_LIBS)
|
||||
|
||||
SYS_LIBS = secur32.lib
|
||||
|
||||
TEST_OBJS = \
|
||||
"$(INTDIR)\CuTest.obj" \
|
||||
"$(INTDIR)\test_all.obj" \
|
||||
"$(INTDIR)\test_util.obj" \
|
||||
"$(INTDIR)\test_context.obj" \
|
||||
"$(INTDIR)\test_buckets.obj" \
|
||||
"$(INTDIR)\test_ssl.obj" \
|
||||
"$(INTDIR)\test_server.obj" \
|
||||
"$(INTDIR)\test_sslserver.obj" \
|
||||
|
||||
TEST_LIBS = user32.lib advapi32.lib gdi32.lib ws2_32.lib
|
||||
|
||||
|
||||
ALL: $(INTDIR) $(STATIC_LIB) TESTS
|
||||
|
||||
CLEAN:
|
||||
-@erase /q "$(INTDIR)" >nul
|
||||
|
||||
$(INTDIR):
|
||||
-@if not exist "$(INTDIR)/$(NULL)" mkdir "$(INTDIR)"
|
||||
|
||||
TESTS: $(STATIC_LIB) $(INTDIR)\serf_response.exe $(INTDIR)\serf_get.exe \
|
||||
$(INTDIR)\serf_request.exe $(INTDIR)\test_all.exe
|
||||
|
||||
CHECK: $(INTDIR) TESTS
|
||||
$(INTDIR)\serf_response.exe test\testcases\simple.response
|
||||
$(INTDIR)\serf_response.exe test\testcases\chunked-empty.response
|
||||
$(INTDIR)\serf_response.exe test\testcases\chunked.response
|
||||
$(INTDIR)\serf_response.exe test\testcases\chunked-trailers.response
|
||||
$(INTDIR)\serf_response.exe test\testcases\deflate.response
|
||||
$(INTDIR)\test_all.exe
|
||||
|
||||
"$(STATIC_LIB)": $(INTDIR) $(LIB32_OBJS)
|
||||
$(LIB32) -lib @<<
|
||||
$(LIB32_FLAGS) $(LIB32_OBJS) $(SYS_LIBS) /OUT:$@
|
||||
<<
|
||||
|
||||
|
||||
.c{$(INTDIR)}.obj:
|
||||
$(CPP) @<<
|
||||
$(CPP_PROJ) $<
|
||||
<<
|
||||
|
||||
{auth}.c{$(INTDIR)}.obj:
|
||||
$(CPP) @<<
|
||||
$(CPP_PROJ) $<
|
||||
<<
|
||||
|
||||
{buckets}.c{$(INTDIR)}.obj:
|
||||
$(CPP) @<<
|
||||
$(CPP_PROJ) $<
|
||||
<<
|
||||
|
||||
{test}.c{$(INTDIR)}.obj:
|
||||
$(CPP) @<<
|
||||
$(CPP_PROJ) $<
|
||||
<<
|
||||
|
||||
{test\server}.c{$(INTDIR)}.obj:
|
||||
$(CPP) @<<
|
||||
$(CPP_PROJ) $<
|
||||
<<
|
||||
|
||||
$(INTDIR)\serf_response.exe: $(INTDIR)\serf_response.obj $(STATIC_LIB)
|
||||
$(LIB32) /DEBUG /OUT:$@ $** $(LIB32_FLAGS) $(TEST_LIBS)
|
||||
|
||||
$(INTDIR)\serf_get.exe: $(INTDIR)\serf_get.obj $(STATIC_LIB)
|
||||
$(LIB32) /DEBUG /OUT:$@ $** $(LIB32_FLAGS) $(TEST_LIBS)
|
||||
|
||||
$(INTDIR)\serf_request.exe: $(INTDIR)\serf_request.obj $(STATIC_LIB)
|
||||
$(LIB32) /DEBUG /OUT:$@ $** $(LIB32_FLAGS) $(TEST_LIBS)
|
||||
|
||||
$(INTDIR)\test_all.exe: $(TEST_OBJS) $(STATIC_LIB)
|
||||
$(LIB32) /DEBUG /OUT:$@ $** $(LIB32_FLAGS) $(TEST_LIBS)
|
13
serf.pc.in
Normal file
13
serf.pc.in
Normal file
@ -0,0 +1,13 @@
|
||||
SERF_MAJOR_VERSION=@SERF_MAJOR_VERSION@
|
||||
prefix=@prefix@
|
||||
exec_prefix=@exec_prefix@
|
||||
libdir=@libdir@
|
||||
includedir=@includedir@
|
||||
|
||||
Name: serf
|
||||
Description: HTTP client library
|
||||
Version: @SERF_DOTTED_VERSION@
|
||||
Requires.private: libssl libcrypto
|
||||
Libs: -L${libdir} -lserf-${SERF_MAJOR_VERSION}
|
||||
Libs.private: @EXTRA_LIBS@ @SERF_LIBS@ -lz
|
||||
Cflags: -I${includedir}
|
678
serf_bucket_types.h
Normal file
678
serf_bucket_types.h
Normal file
@ -0,0 +1,678 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef SERF_BUCKET_TYPES_H
|
||||
#define SERF_BUCKET_TYPES_H
|
||||
|
||||
#include <apr_mmap.h>
|
||||
#include <apr_hash.h>
|
||||
|
||||
/* this header and serf.h refer to each other, so take a little extra care */
|
||||
#ifndef SERF_H
|
||||
#include "serf.h"
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* @file serf_bucket_types.h
|
||||
* @brief serf-supported bucket types
|
||||
*/
|
||||
/* ### this whole file needs docco ... */
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_request;
|
||||
#define SERF_BUCKET_IS_REQUEST(b) SERF_BUCKET_CHECK((b), request)
|
||||
|
||||
serf_bucket_t *serf_bucket_request_create(
|
||||
const char *method,
|
||||
const char *URI,
|
||||
serf_bucket_t *body,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
/* Send a Content-Length header with @a len. The @a body bucket should
|
||||
contain precisely that much data. */
|
||||
void serf_bucket_request_set_CL(
|
||||
serf_bucket_t *bucket,
|
||||
apr_int64_t len);
|
||||
|
||||
serf_bucket_t *serf_bucket_request_get_headers(
|
||||
serf_bucket_t *request);
|
||||
|
||||
void serf_bucket_request_become(
|
||||
serf_bucket_t *bucket,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *body);
|
||||
|
||||
/**
|
||||
* Sets the root url of the remote host. If this request contains a relative
|
||||
* url, it will be prefixed with the root url to form an absolute url.
|
||||
* @a bucket is the request bucket. @a root_url is the absolute url of the
|
||||
* root of the remote host, without the closing '/'.
|
||||
*/
|
||||
void serf_bucket_request_set_root(
|
||||
serf_bucket_t *bucket,
|
||||
const char *root_url);
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_response;
|
||||
#define SERF_BUCKET_IS_RESPONSE(b) SERF_BUCKET_CHECK((b), response)
|
||||
|
||||
serf_bucket_t *serf_bucket_response_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
#define SERF_HTTP_VERSION(major, minor) ((major) * 1000 + (minor))
|
||||
#define SERF_HTTP_11 SERF_HTTP_VERSION(1, 1)
|
||||
#define SERF_HTTP_10 SERF_HTTP_VERSION(1, 0)
|
||||
#define SERF_HTTP_VERSION_MAJOR(shv) ((int)shv / 1000)
|
||||
#define SERF_HTTP_VERSION_MINOR(shv) ((int)shv % 1000)
|
||||
|
||||
typedef struct {
|
||||
int version;
|
||||
int code;
|
||||
const char *reason;
|
||||
} serf_status_line;
|
||||
|
||||
/**
|
||||
* Return the Status-Line information, if available. This function
|
||||
* works like other bucket read functions: it may return APR_EAGAIN or
|
||||
* APR_EOF to signal the state of the bucket for reading. A return
|
||||
* value of APR_SUCCESS will always indicate that status line
|
||||
* information was returned; for other return values the caller must
|
||||
* check the version field in @a sline. A value of 0 means that the
|
||||
* data is not (yet) present.
|
||||
*/
|
||||
apr_status_t serf_bucket_response_status(
|
||||
serf_bucket_t *bkt,
|
||||
serf_status_line *sline);
|
||||
|
||||
/**
|
||||
* Wait for the HTTP headers to be processed for a @a response.
|
||||
*
|
||||
* If the headers are available, APR_SUCCESS is returned.
|
||||
* If the headers aren't available, APR_EAGAIN is returned.
|
||||
*/
|
||||
apr_status_t serf_bucket_response_wait_for_headers(
|
||||
serf_bucket_t *response);
|
||||
|
||||
/**
|
||||
* Get the headers bucket for @a response.
|
||||
*/
|
||||
serf_bucket_t *serf_bucket_response_get_headers(
|
||||
serf_bucket_t *response);
|
||||
|
||||
/**
|
||||
* Advise the response @a bucket that this was from a HEAD request and
|
||||
* that it should not expect to see a response body.
|
||||
*/
|
||||
void serf_bucket_response_set_head(
|
||||
serf_bucket_t *bucket);
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_response_body;
|
||||
#define SERF_BUCKET_IS_RESPONSE_BODY(b) SERF_BUCKET_CHECK((b), response_body)
|
||||
|
||||
serf_bucket_t *serf_bucket_response_body_create(
|
||||
serf_bucket_t *stream,
|
||||
apr_uint64_t limit,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_bwtp_frame;
|
||||
#define SERF_BUCKET_IS_BWTP_FRAME(b) SERF_BUCKET_CHECK((b), bwtp_frame)
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_bwtp_incoming_frame;
|
||||
#define SERF_BUCKET_IS_BWTP_INCOMING_FRAME(b) SERF_BUCKET_CHECK((b), bwtp_incoming_frame)
|
||||
|
||||
int serf_bucket_bwtp_frame_get_channel(
|
||||
serf_bucket_t *hdr);
|
||||
|
||||
int serf_bucket_bwtp_frame_get_type(
|
||||
serf_bucket_t *hdr);
|
||||
|
||||
const char *serf_bucket_bwtp_frame_get_phrase(
|
||||
serf_bucket_t *hdr);
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_frame_get_headers(
|
||||
serf_bucket_t *hdr);
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_channel_open(
|
||||
int channel,
|
||||
const char *URI,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_channel_close(
|
||||
int channel,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_header_create(
|
||||
int channel,
|
||||
const char *phrase,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_message_create(
|
||||
int channel,
|
||||
serf_bucket_t *body,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
serf_bucket_t *serf_bucket_bwtp_incoming_frame_create(
|
||||
serf_bucket_t *bkt,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
apr_status_t serf_bucket_bwtp_incoming_frame_wait_for_headers(
|
||||
serf_bucket_t *bkt);
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_aggregate;
|
||||
#define SERF_BUCKET_IS_AGGREGATE(b) SERF_BUCKET_CHECK((b), aggregate)
|
||||
|
||||
typedef apr_status_t (*serf_bucket_aggregate_eof_t)(
|
||||
void *baton,
|
||||
serf_bucket_t *aggregate_bucket);
|
||||
|
||||
/** serf_bucket_aggregate_cleanup will instantly destroy all buckets in
|
||||
the aggregate bucket that have been read completely. Whereas normally,
|
||||
these buckets are destroyed on every read operation. */
|
||||
void serf_bucket_aggregate_cleanup(
|
||||
serf_bucket_t *bucket,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
serf_bucket_t *serf_bucket_aggregate_create(
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
/* Creates a stream bucket.
|
||||
A stream bucket is like an aggregate bucket, but:
|
||||
- it doesn't destroy its child buckets on cleanup
|
||||
- one can always keep adding child buckets, the handler FN should return
|
||||
APR_EOF when no more buckets will be added.
|
||||
|
||||
Note: keep this factory function internal for now. If it turns out this
|
||||
bucket type is useful outside serf, we should make it an actual separate
|
||||
type.
|
||||
*/
|
||||
serf_bucket_t *serf__bucket_stream_create(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
serf_bucket_aggregate_eof_t fn,
|
||||
void *baton);
|
||||
|
||||
/** Transform @a bucket in-place into an aggregate bucket. */
|
||||
void serf_bucket_aggregate_become(
|
||||
serf_bucket_t *bucket);
|
||||
|
||||
void serf_bucket_aggregate_prepend(
|
||||
serf_bucket_t *aggregate_bucket,
|
||||
serf_bucket_t *prepend_bucket);
|
||||
|
||||
void serf_bucket_aggregate_append(
|
||||
serf_bucket_t *aggregate_bucket,
|
||||
serf_bucket_t *append_bucket);
|
||||
|
||||
void serf_bucket_aggregate_hold_open(
|
||||
serf_bucket_t *aggregate_bucket,
|
||||
serf_bucket_aggregate_eof_t fn,
|
||||
void *baton);
|
||||
|
||||
void serf_bucket_aggregate_prepend_iovec(
|
||||
serf_bucket_t *aggregate_bucket,
|
||||
struct iovec *vecs,
|
||||
int vecs_count);
|
||||
|
||||
void serf_bucket_aggregate_append_iovec(
|
||||
serf_bucket_t *aggregate_bucket,
|
||||
struct iovec *vecs,
|
||||
int vecs_count);
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_file;
|
||||
#define SERF_BUCKET_IS_FILE(b) SERF_BUCKET_CHECK((b), file)
|
||||
|
||||
serf_bucket_t *serf_bucket_file_create(
|
||||
apr_file_t *file,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_socket;
|
||||
#define SERF_BUCKET_IS_SOCKET(b) SERF_BUCKET_CHECK((b), socket)
|
||||
|
||||
serf_bucket_t *serf_bucket_socket_create(
|
||||
apr_socket_t *skt,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
/**
|
||||
* Call @a progress_func every time bytes are read from the socket, pass
|
||||
* the number of bytes read.
|
||||
*
|
||||
* When using serf's bytes read & written progress indicator, pass
|
||||
* @a serf_context_progress_delta for progress_func and the serf_context for
|
||||
* progress_baton.
|
||||
*/
|
||||
void serf_bucket_socket_set_read_progress_cb(
|
||||
serf_bucket_t *bucket,
|
||||
const serf_progress_t progress_func,
|
||||
void *progress_baton);
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_simple;
|
||||
#define SERF_BUCKET_IS_SIMPLE(b) SERF_BUCKET_CHECK((b), simple)
|
||||
|
||||
typedef void (*serf_simple_freefunc_t)(
|
||||
void *baton,
|
||||
const char *data);
|
||||
|
||||
serf_bucket_t *serf_bucket_simple_create(
|
||||
const char *data,
|
||||
apr_size_t len,
|
||||
serf_simple_freefunc_t freefunc,
|
||||
void *freefunc_baton,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
/**
|
||||
* Equivalent to serf_bucket_simple_create, except that the bucket takes
|
||||
* ownership of a private copy of the data.
|
||||
*/
|
||||
serf_bucket_t *serf_bucket_simple_copy_create(
|
||||
const char *data,
|
||||
apr_size_t len,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
#define SERF_BUCKET_SIMPLE_STRING(s,a) \
|
||||
serf_bucket_simple_create(s, strlen(s), NULL, NULL, a);
|
||||
|
||||
#define SERF_BUCKET_SIMPLE_STRING_LEN(s,l,a) \
|
||||
serf_bucket_simple_create(s, l, NULL, NULL, a);
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
/* Note: apr_mmap_t is always defined, but if APR doesn't have mmaps, then
|
||||
the caller can never create an apr_mmap_t to pass to this function. */
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_mmap;
|
||||
#define SERF_BUCKET_IS_MMAP(b) SERF_BUCKET_CHECK((b), mmap)
|
||||
|
||||
serf_bucket_t *serf_bucket_mmap_create(
|
||||
apr_mmap_t *mmap,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_headers;
|
||||
#define SERF_BUCKET_IS_HEADERS(b) SERF_BUCKET_CHECK((b), headers)
|
||||
|
||||
serf_bucket_t *serf_bucket_headers_create(
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
/**
|
||||
* Set, default: value copied.
|
||||
*
|
||||
* Set the specified @a header within the bucket, copying the @a value
|
||||
* into space from this bucket's allocator. The header is NOT copied,
|
||||
* so it should remain in scope at least as long as the bucket.
|
||||
*/
|
||||
void serf_bucket_headers_set(
|
||||
serf_bucket_t *headers_bucket,
|
||||
const char *header,
|
||||
const char *value);
|
||||
|
||||
/**
|
||||
* Set, copies: header and value copied.
|
||||
*
|
||||
* Copy the specified @a header and @a value into the bucket, using space
|
||||
* from this bucket's allocator.
|
||||
*/
|
||||
void serf_bucket_headers_setc(
|
||||
serf_bucket_t *headers_bucket,
|
||||
const char *header,
|
||||
const char *value);
|
||||
|
||||
/**
|
||||
* Set, no copies.
|
||||
*
|
||||
* Set the specified @a header and @a value into the bucket, without
|
||||
* copying either attribute. Both attributes should remain in scope at
|
||||
* least as long as the bucket.
|
||||
*
|
||||
* @note In the case where a header already exists this will result
|
||||
* in a reallocation and copy, @see serf_bucket_headers_setn.
|
||||
*/
|
||||
void serf_bucket_headers_setn(
|
||||
serf_bucket_t *headers_bucket,
|
||||
const char *header,
|
||||
const char *value);
|
||||
|
||||
/**
|
||||
* Set, extended: fine grained copy control of header and value.
|
||||
*
|
||||
* Set the specified @a header, with length @a header_size with the
|
||||
* @a value, and length @a value_size, into the bucket. The header will
|
||||
* be copied if @a header_copy is set, and the value is copied if
|
||||
* @a value_copy is set. If the values are not copied, then they should
|
||||
* remain in scope at least as long as the bucket.
|
||||
*
|
||||
* If @a headers_bucket already contains a header with the same name
|
||||
* as @a header, then append @a value to the existing value,
|
||||
* separating with a comma (as per RFC 2616, section 4.2). In this
|
||||
* case, the new value must be allocated and the header re-used, so
|
||||
* behave as if @a value_copy were true and @a header_copy false.
|
||||
*/
|
||||
void serf_bucket_headers_setx(
|
||||
serf_bucket_t *headers_bucket,
|
||||
const char *header,
|
||||
apr_size_t header_size,
|
||||
int header_copy,
|
||||
const char *value,
|
||||
apr_size_t value_size,
|
||||
int value_copy);
|
||||
|
||||
const char *serf_bucket_headers_get(
|
||||
serf_bucket_t *headers_bucket,
|
||||
const char *header);
|
||||
|
||||
/**
|
||||
* @param baton opaque baton as passed to @see serf_bucket_headers_do
|
||||
* @param key The header key from this iteration through the table
|
||||
* @param value The header value from this iteration through the table
|
||||
*/
|
||||
typedef int (serf_bucket_headers_do_callback_fn_t)(
|
||||
void *baton,
|
||||
const char *key,
|
||||
const char *value);
|
||||
|
||||
/**
|
||||
* Iterates over all headers of the message and invokes the callback
|
||||
* function with header key and value. Stop iterating when no more
|
||||
* headers are available or when the callback function returned a
|
||||
* non-0 value.
|
||||
*
|
||||
* @param headers_bucket headers to iterate over
|
||||
* @param func callback routine to invoke for every header in the bucket
|
||||
* @param baton baton to pass on each invocation to func
|
||||
*/
|
||||
void serf_bucket_headers_do(
|
||||
serf_bucket_t *headers_bucket,
|
||||
serf_bucket_headers_do_callback_fn_t func,
|
||||
void *baton);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_chunk;
|
||||
#define SERF_BUCKET_IS_CHUNK(b) SERF_BUCKET_CHECK((b), chunk)
|
||||
|
||||
serf_bucket_t *serf_bucket_chunk_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_dechunk;
|
||||
#define SERF_BUCKET_IS_DECHUNK(b) SERF_BUCKET_CHECK((b), dechunk)
|
||||
|
||||
serf_bucket_t *serf_bucket_dechunk_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_deflate;
|
||||
#define SERF_BUCKET_IS_DEFLATE(b) SERF_BUCKET_CHECK((b), deflate)
|
||||
|
||||
#define SERF_DEFLATE_GZIP 0
|
||||
#define SERF_DEFLATE_DEFLATE 1
|
||||
|
||||
serf_bucket_t *serf_bucket_deflate_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator,
|
||||
int format);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_limit;
|
||||
#define SERF_BUCKET_IS_LIMIT(b) SERF_BUCKET_CHECK((b), limit)
|
||||
|
||||
serf_bucket_t *serf_bucket_limit_create(
|
||||
serf_bucket_t *stream,
|
||||
apr_uint64_t limit,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
#define SERF_SSL_CERT_NOTYETVALID 1
|
||||
#define SERF_SSL_CERT_EXPIRED 2
|
||||
#define SERF_SSL_CERT_UNKNOWNCA 4
|
||||
#define SERF_SSL_CERT_SELF_SIGNED 8
|
||||
#define SERF_SSL_CERT_UNKNOWN_FAILURE 16
|
||||
#define SERF_SSL_CERT_REVOKED 32
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_ssl_encrypt;
|
||||
#define SERF_BUCKET_IS_SSL_ENCRYPT(b) SERF_BUCKET_CHECK((b), ssl_encrypt)
|
||||
|
||||
typedef struct serf_ssl_context_t serf_ssl_context_t;
|
||||
typedef struct serf_ssl_certificate_t serf_ssl_certificate_t;
|
||||
|
||||
typedef apr_status_t (*serf_ssl_need_client_cert_t)(
|
||||
void *data,
|
||||
const char **cert_path);
|
||||
|
||||
typedef apr_status_t (*serf_ssl_need_cert_password_t)(
|
||||
void *data,
|
||||
const char *cert_path,
|
||||
const char **password);
|
||||
|
||||
typedef apr_status_t (*serf_ssl_need_server_cert_t)(
|
||||
void *data,
|
||||
int failures,
|
||||
const serf_ssl_certificate_t *cert);
|
||||
|
||||
typedef apr_status_t (*serf_ssl_server_cert_chain_cb_t)(
|
||||
void *data,
|
||||
int failures,
|
||||
int error_depth,
|
||||
const serf_ssl_certificate_t * const * certs,
|
||||
apr_size_t certs_len);
|
||||
|
||||
void serf_ssl_client_cert_provider_set(
|
||||
serf_ssl_context_t *context,
|
||||
serf_ssl_need_client_cert_t callback,
|
||||
void *data,
|
||||
void *cache_pool);
|
||||
|
||||
void serf_ssl_client_cert_password_set(
|
||||
serf_ssl_context_t *context,
|
||||
serf_ssl_need_cert_password_t callback,
|
||||
void *data,
|
||||
void *cache_pool);
|
||||
|
||||
/**
|
||||
* Set a callback to override the default SSL server certificate validation
|
||||
* algorithm.
|
||||
*/
|
||||
void serf_ssl_server_cert_callback_set(
|
||||
serf_ssl_context_t *context,
|
||||
serf_ssl_need_server_cert_t callback,
|
||||
void *data);
|
||||
|
||||
/**
|
||||
* Set callbacks to override the default SSL server certificate validation
|
||||
* algorithm for the current certificate or the entire certificate chain.
|
||||
*/
|
||||
void serf_ssl_server_cert_chain_callback_set(
|
||||
serf_ssl_context_t *context,
|
||||
serf_ssl_need_server_cert_t cert_callback,
|
||||
serf_ssl_server_cert_chain_cb_t cert_chain_callback,
|
||||
void *data);
|
||||
|
||||
/**
|
||||
* Use the default root CA certificates as included with the OpenSSL library.
|
||||
*/
|
||||
apr_status_t serf_ssl_use_default_certificates(
|
||||
serf_ssl_context_t *context);
|
||||
|
||||
/**
|
||||
* Allow SNI indicators to be sent to the server.
|
||||
*/
|
||||
apr_status_t serf_ssl_set_hostname(
|
||||
serf_ssl_context_t *context, const char *hostname);
|
||||
|
||||
/**
|
||||
* Return the depth of the certificate.
|
||||
*/
|
||||
int serf_ssl_cert_depth(
|
||||
const serf_ssl_certificate_t *cert);
|
||||
|
||||
/**
|
||||
* Extract the fields of the issuer in a table with keys (E, CN, OU, O, L,
|
||||
* ST and C). The returned table will be allocated in @a pool.
|
||||
*/
|
||||
apr_hash_t *serf_ssl_cert_issuer(
|
||||
const serf_ssl_certificate_t *cert,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/**
|
||||
* Extract the fields of the subject in a table with keys (E, CN, OU, O, L,
|
||||
* ST and C). The returned table will be allocated in @a pool.
|
||||
*/
|
||||
apr_hash_t *serf_ssl_cert_subject(
|
||||
const serf_ssl_certificate_t *cert,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/**
|
||||
* Extract the fields of the certificate in a table with keys (sha1, notBefore,
|
||||
* notAfter). The returned table will be allocated in @a pool.
|
||||
*/
|
||||
apr_hash_t *serf_ssl_cert_certificate(
|
||||
const serf_ssl_certificate_t *cert,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/**
|
||||
* Export a certificate to base64-encoded, zero-terminated string.
|
||||
* The returned string is allocated in @a pool. Returns NULL on failure.
|
||||
*/
|
||||
const char *serf_ssl_cert_export(
|
||||
const serf_ssl_certificate_t *cert,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/**
|
||||
* Load a CA certificate file from a path @a file_path. If the file was loaded
|
||||
* and parsed correctly, a certificate @a cert will be created and returned.
|
||||
* This certificate object will be alloced in @a pool.
|
||||
*/
|
||||
apr_status_t serf_ssl_load_cert_file(
|
||||
serf_ssl_certificate_t **cert,
|
||||
const char *file_path,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/**
|
||||
* Adds the certificate @a cert to the list of trusted certificates in
|
||||
* @a ssl_ctx that will be used for verification.
|
||||
* See also @a serf_ssl_load_cert_file.
|
||||
*/
|
||||
apr_status_t serf_ssl_trust_cert(
|
||||
serf_ssl_context_t *ssl_ctx,
|
||||
serf_ssl_certificate_t *cert);
|
||||
|
||||
/**
|
||||
* Enable or disable SSL compression on a SSL session.
|
||||
* @a enabled = 1 to enable compression, 0 to disable compression.
|
||||
* Default = disabled.
|
||||
*/
|
||||
apr_status_t serf_ssl_use_compression(
|
||||
serf_ssl_context_t *ssl_ctx,
|
||||
int enabled);
|
||||
|
||||
serf_bucket_t *serf_bucket_ssl_encrypt_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_ssl_context_t *ssl_context,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
serf_ssl_context_t *serf_bucket_ssl_encrypt_context_get(
|
||||
serf_bucket_t *bucket);
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_ssl_decrypt;
|
||||
#define SERF_BUCKET_IS_SSL_DECRYPT(b) SERF_BUCKET_CHECK((b), ssl_decrypt)
|
||||
|
||||
serf_bucket_t *serf_bucket_ssl_decrypt_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_ssl_context_t *ssl_context,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
serf_ssl_context_t *serf_bucket_ssl_decrypt_context_get(
|
||||
serf_bucket_t *bucket);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_barrier;
|
||||
#define SERF_BUCKET_IS_BARRIER(b) SERF_BUCKET_CHECK((b), barrier)
|
||||
|
||||
serf_bucket_t *serf_bucket_barrier_create(
|
||||
serf_bucket_t *stream,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
extern const serf_bucket_type_t serf_bucket_type_iovec;
|
||||
#define SERF_BUCKET_IS_IOVEC(b) SERF_BUCKET_CHECK((b), iovec)
|
||||
|
||||
serf_bucket_t *serf_bucket_iovec_create(
|
||||
struct iovec vecs[],
|
||||
int len,
|
||||
serf_bucket_alloc_t *allocator);
|
||||
|
||||
|
||||
/* ==================================================================== */
|
||||
|
||||
/* ### do we need a PIPE bucket type? they are simple apr_file_t objects */
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !SERF_BUCKET_TYPES_H */
|
286
serf_bucket_util.h
Normal file
286
serf_bucket_util.h
Normal file
@ -0,0 +1,286 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef SERF_BUCKET_UTIL_H
|
||||
#define SERF_BUCKET_UTIL_H
|
||||
|
||||
/**
|
||||
* @file serf_bucket_util.h
|
||||
* @brief This header defines a set of functions and other utilities
|
||||
* for implementing buckets. It is not needed by users of the bucket
|
||||
* system.
|
||||
*/
|
||||
|
||||
#include "serf.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* Basic bucket creation function.
|
||||
*
|
||||
* This function will create a bucket of @a type, allocating the necessary
|
||||
* memory from @a allocator. The @a data bucket-private information will
|
||||
* be stored into the bucket.
|
||||
*/
|
||||
serf_bucket_t *serf_bucket_create(
|
||||
const serf_bucket_type_t *type,
|
||||
serf_bucket_alloc_t *allocator,
|
||||
void *data);
|
||||
|
||||
/**
|
||||
* Default implementation of the @see read_iovec functionality.
|
||||
*
|
||||
* This function will use the @see read function to get a block of memory,
|
||||
* then return it in the iovec.
|
||||
*/
|
||||
apr_status_t serf_default_read_iovec(
|
||||
serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
int vecs_size,
|
||||
struct iovec *vecs,
|
||||
int *vecs_used);
|
||||
|
||||
/**
|
||||
* Default implementation of the @see read_for_sendfile functionality.
|
||||
*
|
||||
* This function will use the @see read function to get a block of memory,
|
||||
* then return it as a header. No file will be returned.
|
||||
*/
|
||||
apr_status_t serf_default_read_for_sendfile(
|
||||
serf_bucket_t *bucket,
|
||||
apr_size_t requested,
|
||||
apr_hdtr_t *hdtr,
|
||||
apr_file_t **file,
|
||||
apr_off_t *offset,
|
||||
apr_size_t *len);
|
||||
|
||||
/**
|
||||
* Default implementation of the @see read_bucket functionality.
|
||||
*
|
||||
* This function will always return NULL, indicating that the @a type
|
||||
* of bucket cannot be found within @a bucket.
|
||||
*/
|
||||
serf_bucket_t *serf_default_read_bucket(
|
||||
serf_bucket_t *bucket,
|
||||
const serf_bucket_type_t *type);
|
||||
|
||||
/**
|
||||
* Default implementation of the @see destroy functionality.
|
||||
*
|
||||
* This function will return the @a bucket to its allcoator.
|
||||
*/
|
||||
void serf_default_destroy(
|
||||
serf_bucket_t *bucket);
|
||||
|
||||
|
||||
/**
|
||||
* Default implementation of the @see destroy functionality.
|
||||
*
|
||||
* This function will return the @a bucket, and the data member to its
|
||||
* allocator.
|
||||
*/
|
||||
void serf_default_destroy_and_data(
|
||||
serf_bucket_t *bucket);
|
||||
|
||||
|
||||
/**
|
||||
* Allocate @a size bytes of memory using @a allocator.
|
||||
*
|
||||
* Returns NULL of the requested memory size could not be allocated.
|
||||
*/
|
||||
void *serf_bucket_mem_alloc(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
apr_size_t size);
|
||||
|
||||
/**
|
||||
* Allocate @a size bytes of memory using @a allocator and set all of the
|
||||
* memory to 0.
|
||||
*
|
||||
* Returns NULL of the requested memory size could not be allocated.
|
||||
*/
|
||||
void *serf_bucket_mem_calloc(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
apr_size_t size);
|
||||
|
||||
/**
|
||||
* Free the memory at @a block, returning it to @a allocator.
|
||||
*/
|
||||
void serf_bucket_mem_free(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
void *block);
|
||||
|
||||
|
||||
/**
|
||||
* Analogous to apr_pstrmemdup, using a bucket allocator instead.
|
||||
*/
|
||||
char *serf_bstrmemdup(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
const char *str,
|
||||
apr_size_t size);
|
||||
|
||||
/**
|
||||
* Analogous to apr_pmemdup, using a bucket allocator instead.
|
||||
*/
|
||||
void * serf_bmemdup(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
const void *mem,
|
||||
apr_size_t size);
|
||||
|
||||
/**
|
||||
* Analogous to apr_pstrdup, using a bucket allocator instead.
|
||||
*/
|
||||
char * serf_bstrdup(
|
||||
serf_bucket_alloc_t *allocator,
|
||||
const char *str);
|
||||
|
||||
|
||||
/**
|
||||
* Read data up to a newline.
|
||||
*
|
||||
* @a acceptable contains the allowed forms of a newline, and @a found
|
||||
* will return the particular newline type that was found. If a newline
|
||||
* is not found, then SERF_NEWLINE_NONE will be placed in @a found.
|
||||
*
|
||||
* @a data should contain a pointer to the data to be scanned. @a len
|
||||
* should specify the length of that data buffer. On exit, @a data will
|
||||
* be advanced past the newline, and @a len will specify the remaining
|
||||
* amount of data in the buffer.
|
||||
*
|
||||
* Given this pattern of behavior, the caller should store the initial
|
||||
* value of @a data as the line start. The difference between the
|
||||
* returned value of @a data and the saved start is the length of the
|
||||
* line.
|
||||
*
|
||||
* Note that the newline character(s) will remain within the buffer.
|
||||
* This function scans at a byte level for the newline characters. Thus,
|
||||
* the data buffer may contain NUL characters. As a corollary, this
|
||||
* function only works on 8-bit character encodings.
|
||||
*
|
||||
* If the data is fully consumed (@a len gets set to zero) and a CR
|
||||
* character is found at the end and the CRLF sequence is allowed, then
|
||||
* this function may store SERF_NEWLINE_CRLF_SPLIT into @a found. The
|
||||
* caller should take particular consideration for the CRLF sequence
|
||||
* that may be split across data buffer boundaries.
|
||||
*/
|
||||
void serf_util_readline(
|
||||
const char **data,
|
||||
apr_size_t *len,
|
||||
int acceptable,
|
||||
int *found);
|
||||
|
||||
|
||||
/** The buffer size used within @see serf_databuf_t. */
|
||||
#define SERF_DATABUF_BUFSIZE 8000
|
||||
|
||||
/** Callback function which is used to refill the data buffer.
|
||||
*
|
||||
* The function takes @a baton, which is the @see read_baton value
|
||||
* from the serf_databuf_t structure. Data should be placed into
|
||||
* a buffer specified by @a buf, which is @a bufsize bytes long.
|
||||
* The amount of data read should be returned in @a len.
|
||||
*
|
||||
* APR_EOF should be returned if no more data is available. APR_EAGAIN
|
||||
* should be returned, rather than blocking. In both cases, @a buf
|
||||
* should be filled in and @a len set, as appropriate.
|
||||
*/
|
||||
typedef apr_status_t (*serf_databuf_reader_t)(
|
||||
void *baton,
|
||||
apr_size_t bufsize,
|
||||
char *buf,
|
||||
apr_size_t *len);
|
||||
|
||||
/**
|
||||
* This structure is used as an intermediate data buffer for some "external"
|
||||
* source of data. It works as a scratch pad area for incoming data to be
|
||||
* stored, and then returned as a ptr/len pair by the bucket read functions.
|
||||
*
|
||||
* This structure should be initialized by calling @see serf_databuf_init.
|
||||
* Users should not bother to zero the structure beforehand.
|
||||
*/
|
||||
typedef struct {
|
||||
/** The current data position within the buffer. */
|
||||
const char *current;
|
||||
|
||||
/** Amount of data remaining in the buffer. */
|
||||
apr_size_t remaining;
|
||||
|
||||
/** Callback function. */
|
||||
serf_databuf_reader_t read;
|
||||
|
||||
/** A baton to hold context-specific data. */
|
||||
void *read_baton;
|
||||
|
||||
/** Records the status from the last @see read operation. */
|
||||
apr_status_t status;
|
||||
|
||||
/** Holds the data until it can be returned. */
|
||||
char buf[SERF_DATABUF_BUFSIZE];
|
||||
|
||||
} serf_databuf_t;
|
||||
|
||||
/**
|
||||
* Initialize the @see serf_databuf_t structure specified by @a databuf.
|
||||
*/
|
||||
void serf_databuf_init(
|
||||
serf_databuf_t *databuf);
|
||||
|
||||
/**
|
||||
* Implement a bucket-style read function from the @see serf_databuf_t
|
||||
* structure given by @a databuf.
|
||||
*
|
||||
* The @a requested, @a data, and @a len fields are interpreted and used
|
||||
* as in the read function of @see serf_bucket_t.
|
||||
*/
|
||||
apr_status_t serf_databuf_read(
|
||||
serf_databuf_t *databuf,
|
||||
apr_size_t requested,
|
||||
const char **data,
|
||||
apr_size_t *len);
|
||||
|
||||
/**
|
||||
* Implement a bucket-style readline function from the @see serf_databuf_t
|
||||
* structure given by @a databuf.
|
||||
*
|
||||
* The @a acceptable, @a found, @a data, and @a len fields are interpreted
|
||||
* and used as in the read function of @see serf_bucket_t.
|
||||
*/
|
||||
apr_status_t serf_databuf_readline(
|
||||
serf_databuf_t *databuf,
|
||||
int acceptable,
|
||||
int *found,
|
||||
const char **data,
|
||||
apr_size_t *len);
|
||||
|
||||
/**
|
||||
* Implement a bucket-style peek function from the @see serf_databuf_t
|
||||
* structure given by @a databuf.
|
||||
*
|
||||
* The @a data, and @a len fields are interpreted and used as in the
|
||||
* peek function of @see serf_bucket_t.
|
||||
*/
|
||||
apr_status_t serf_databuf_peek(
|
||||
serf_databuf_t *databuf,
|
||||
const char **data,
|
||||
apr_size_t *len);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !SERF_BUCKET_UTIL_H */
|
403
serf_private.h
Normal file
403
serf_private.h
Normal file
@ -0,0 +1,403 @@
|
||||
/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef _SERF_PRIVATE_H_
|
||||
#define _SERF_PRIVATE_H_
|
||||
|
||||
/* ### what the hell? why does the APR interface have a "size" ??
|
||||
### the implication is that, if we bust this limit, we'd need to
|
||||
### stop, rebuild a pollset, and repopulate it. what suckage. */
|
||||
#define MAX_CONN 16
|
||||
|
||||
/* Windows does not define IOV_MAX, so we need to ensure it is defined. */
|
||||
#ifndef IOV_MAX
|
||||
#define IOV_MAX 16
|
||||
#endif
|
||||
|
||||
#define SERF_IO_CLIENT (1)
|
||||
#define SERF_IO_CONN (2)
|
||||
#define SERF_IO_LISTENER (3)
|
||||
|
||||
/* Internal logging facilities, set flag to 1 to enable console logging for
|
||||
the selected component. */
|
||||
#define SSL_VERBOSE 0
|
||||
#define SSL_MSG_VERBOSE 0 /* logs decrypted requests and responses. */
|
||||
#define SOCK_VERBOSE 0
|
||||
#define SOCK_MSG_VERBOSE 0 /* logs bytes received from or written to a socket. */
|
||||
#define CONN_VERBOSE 0
|
||||
#define AUTH_VERBOSE 0
|
||||
|
||||
|
||||
typedef struct serf__authn_scheme_t serf__authn_scheme_t;
|
||||
|
||||
typedef struct serf_io_baton_t {
|
||||
int type;
|
||||
union {
|
||||
serf_incoming_t *client;
|
||||
serf_connection_t *conn;
|
||||
serf_listener_t *listener;
|
||||
} u;
|
||||
} serf_io_baton_t;
|
||||
|
||||
/* Holds all the information corresponding to a request/response pair. */
|
||||
struct serf_request_t {
|
||||
serf_connection_t *conn;
|
||||
|
||||
apr_pool_t *respool;
|
||||
serf_bucket_alloc_t *allocator;
|
||||
|
||||
/* The bucket corresponding to the request. Will be NULL once the
|
||||
* bucket has been emptied (for delivery into the socket).
|
||||
*/
|
||||
serf_bucket_t *req_bkt;
|
||||
|
||||
serf_request_setup_t setup;
|
||||
void *setup_baton;
|
||||
|
||||
serf_response_acceptor_t acceptor;
|
||||
void *acceptor_baton;
|
||||
|
||||
serf_response_handler_t handler;
|
||||
void *handler_baton;
|
||||
|
||||
serf_bucket_t *resp_bkt;
|
||||
|
||||
int written;
|
||||
int priority;
|
||||
|
||||
/* This baton is currently only used for digest authentication, which
|
||||
needs access to the uri of the request in the response handler.
|
||||
If serf_request_t is replaced by a serf_http_request_t in the future,
|
||||
which knows about uri and method and such, this baton won't be needed
|
||||
anymore. */
|
||||
void *auth_baton;
|
||||
|
||||
struct serf_request_t *next;
|
||||
};
|
||||
|
||||
typedef struct serf_pollset_t {
|
||||
/* the set of connections to poll */
|
||||
apr_pollset_t *pollset;
|
||||
} serf_pollset_t;
|
||||
|
||||
typedef struct serf__authn_info_t {
|
||||
const char *realm;
|
||||
|
||||
const serf__authn_scheme_t *scheme;
|
||||
|
||||
void *baton;
|
||||
} serf__authn_info_t;
|
||||
|
||||
struct serf_context_t {
|
||||
/* the pool used for self and for other allocations */
|
||||
apr_pool_t *pool;
|
||||
|
||||
void *pollset_baton;
|
||||
serf_socket_add_t pollset_add;
|
||||
serf_socket_remove_t pollset_rm;
|
||||
|
||||
/* one of our connections has a dirty pollset state. */
|
||||
int dirty_pollset;
|
||||
|
||||
/* the list of active connections */
|
||||
apr_array_header_t *conns;
|
||||
#define GET_CONN(ctx, i) (((serf_connection_t **)(ctx)->conns->elts)[i])
|
||||
|
||||
/* Proxy server address */
|
||||
apr_sockaddr_t *proxy_address;
|
||||
|
||||
/* Progress callback */
|
||||
serf_progress_t progress_func;
|
||||
void *progress_baton;
|
||||
apr_off_t progress_read;
|
||||
apr_off_t progress_written;
|
||||
|
||||
/* authentication info for this context, shared by all connections. */
|
||||
serf__authn_info_t authn_info;
|
||||
serf__authn_info_t proxy_authn_info;
|
||||
|
||||
/* List of authn types supported by the client.*/
|
||||
int authn_types;
|
||||
/* Callback function used to get credentials for a realm. */
|
||||
serf_credentials_callback_t cred_cb;
|
||||
};
|
||||
|
||||
struct serf_listener_t {
|
||||
serf_context_t *ctx;
|
||||
serf_io_baton_t baton;
|
||||
apr_socket_t *skt;
|
||||
apr_pool_t *pool;
|
||||
apr_pollfd_t desc;
|
||||
void *accept_baton;
|
||||
serf_accept_client_t accept_func;
|
||||
};
|
||||
|
||||
struct serf_incoming_t {
|
||||
serf_context_t *ctx;
|
||||
serf_io_baton_t baton;
|
||||
void *request_baton;
|
||||
serf_incoming_request_cb_t request;
|
||||
apr_socket_t *skt;
|
||||
apr_pollfd_t desc;
|
||||
};
|
||||
|
||||
/* States for the different stages in the lifecyle of a connection. */
|
||||
typedef enum {
|
||||
SERF_CONN_INIT, /* no socket created yet */
|
||||
SERF_CONN_SETUP_SSLTUNNEL, /* ssl tunnel being setup, no requests sent */
|
||||
SERF_CONN_CONNECTED, /* conn is ready to send requests */
|
||||
SERF_CONN_CLOSING, /* conn is closing, no more requests,
|
||||
start a new socket */
|
||||
} serf__connection_state_t;
|
||||
|
||||
struct serf_connection_t {
|
||||
serf_context_t *ctx;
|
||||
|
||||
apr_status_t status;
|
||||
serf_io_baton_t baton;
|
||||
|
||||
apr_pool_t *pool;
|
||||
serf_bucket_alloc_t *allocator;
|
||||
|
||||
apr_sockaddr_t *address;
|
||||
|
||||
apr_socket_t *skt;
|
||||
apr_pool_t *skt_pool;
|
||||
|
||||
/* the last reqevents we gave to pollset_add */
|
||||
apr_int16_t reqevents;
|
||||
|
||||
/* the events we've seen for this connection in our returned pollset */
|
||||
apr_int16_t seen_in_pollset;
|
||||
|
||||
/* are we a dirty connection that needs its poll status updated? */
|
||||
int dirty_conn;
|
||||
|
||||
/* number of completed requests we've sent */
|
||||
unsigned int completed_requests;
|
||||
|
||||
/* number of completed responses we've got */
|
||||
unsigned int completed_responses;
|
||||
|
||||
/* keepalive */
|
||||
unsigned int probable_keepalive_limit;
|
||||
|
||||
/* Current state of the connection (whether or not it is connected). */
|
||||
serf__connection_state_t state;
|
||||
|
||||
/* This connection may have responses without a request! */
|
||||
int async_responses;
|
||||
serf_bucket_t *current_async_response;
|
||||
serf_response_acceptor_t async_acceptor;
|
||||
void *async_acceptor_baton;
|
||||
serf_response_handler_t async_handler;
|
||||
void *async_handler_baton;
|
||||
|
||||
/* A bucket wrapped around our socket (for reading responses). */
|
||||
serf_bucket_t *stream;
|
||||
/* A reference to the aggregate bucket that provides the boundary between
|
||||
* request level buckets and connection level buckets.
|
||||
*/
|
||||
serf_bucket_t *ostream_head;
|
||||
serf_bucket_t *ostream_tail;
|
||||
|
||||
/* Aggregate bucket used to send the CONNECT request. */
|
||||
serf_bucket_t *ssltunnel_ostream;
|
||||
|
||||
/* The list of active requests. */
|
||||
serf_request_t *requests;
|
||||
serf_request_t *requests_tail;
|
||||
|
||||
struct iovec vec[IOV_MAX];
|
||||
int vec_len;
|
||||
|
||||
serf_connection_setup_t setup;
|
||||
void *setup_baton;
|
||||
serf_connection_closed_t closed;
|
||||
void *closed_baton;
|
||||
|
||||
/* Max. number of outstanding requests. */
|
||||
unsigned int max_outstanding_requests;
|
||||
|
||||
int hit_eof;
|
||||
/* Host info. */
|
||||
const char *host_url;
|
||||
apr_uri_t host_info;
|
||||
|
||||
/* connection and authentication scheme specific information */
|
||||
void *authn_baton;
|
||||
void *proxy_authn_baton;
|
||||
|
||||
/* Time marker when connection begins. */
|
||||
apr_time_t connect_time;
|
||||
|
||||
/* Calculated connection latency. Negative value if latency is unknown. */
|
||||
apr_interval_time_t latency;
|
||||
};
|
||||
|
||||
/*** Internal bucket functions ***/
|
||||
|
||||
/** Transform a response_bucket in-place into an aggregate bucket. Restore the
|
||||
status line and all headers, not just the body.
|
||||
|
||||
This can only be used when we haven't started reading the body of the
|
||||
response yet.
|
||||
|
||||
Keep internal for now, probably only useful within serf.
|
||||
*/
|
||||
apr_status_t serf_response_full_become_aggregate(serf_bucket_t *bucket);
|
||||
|
||||
/*** Authentication handler declarations ***/
|
||||
|
||||
typedef enum { PROXY, HOST } peer_t;
|
||||
|
||||
/**
|
||||
* For each authentication scheme we need a handler function of type
|
||||
* serf__auth_handler_func_t. This function will be called when an
|
||||
* authentication challenge is received in a session.
|
||||
*/
|
||||
typedef apr_status_t
|
||||
(*serf__auth_handler_func_t)(int code,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
const char *auth_hdr,
|
||||
const char *auth_attr,
|
||||
void *baton,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/**
|
||||
* For each authentication scheme we need an initialization function of type
|
||||
* serf__init_context_func_t. This function will be called the first time
|
||||
* serf tries a specific authentication scheme handler.
|
||||
*/
|
||||
typedef apr_status_t
|
||||
(*serf__init_context_func_t)(int code,
|
||||
serf_context_t *conn,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/**
|
||||
* For each authentication scheme we need an initialization function of type
|
||||
* serf__init_conn_func_t. This function will be called when a new
|
||||
* connection is opened.
|
||||
*/
|
||||
typedef apr_status_t
|
||||
(*serf__init_conn_func_t)(int code,
|
||||
serf_connection_t *conn,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/**
|
||||
* For each authentication scheme we need a setup_request function of type
|
||||
* serf__setup_request_func_t. This function will be called when a
|
||||
* new serf_request_t object is created and should fill in the correct
|
||||
* authentication headers (if needed).
|
||||
*/
|
||||
typedef apr_status_t
|
||||
(*serf__setup_request_func_t)(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
const char *method,
|
||||
const char *uri,
|
||||
serf_bucket_t *hdrs_bkt);
|
||||
|
||||
/**
|
||||
* This function will be called when a response is received, so that the
|
||||
* scheme handler can validate the Authentication related response headers
|
||||
* (if needed).
|
||||
*/
|
||||
typedef apr_status_t
|
||||
(*serf__validate_response_func_t)(peer_t peer,
|
||||
int code,
|
||||
serf_connection_t *conn,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/**
|
||||
* serf__authn_scheme_t: vtable for an authn scheme provider.
|
||||
*/
|
||||
struct serf__authn_scheme_t {
|
||||
/* The http status code that's handled by this authentication scheme.
|
||||
Normal values are 401 for server authentication and 407 for proxy
|
||||
authentication */
|
||||
int code;
|
||||
|
||||
/* The name of this authentication scheme. This should be a case
|
||||
sensitive match of the string sent in the HTTP authentication header. */
|
||||
const char *name;
|
||||
|
||||
/* Internal code used for this authn type. */
|
||||
int type;
|
||||
|
||||
/* The context initialization function if any; otherwise, NULL */
|
||||
serf__init_context_func_t init_ctx_func;
|
||||
|
||||
/* The connection initialization function if any; otherwise, NULL */
|
||||
serf__init_conn_func_t init_conn_func;
|
||||
|
||||
/* The authentication handler function */
|
||||
serf__auth_handler_func_t handle_func;
|
||||
|
||||
/* Function to set up the authentication header of a request */
|
||||
serf__setup_request_func_t setup_request_func;
|
||||
|
||||
/* Function to validate the authentication header of a response */
|
||||
serf__validate_response_func_t validate_response_func;
|
||||
};
|
||||
|
||||
/**
|
||||
* Handles a 401 or 407 response, tries the different available authentication
|
||||
* handlers.
|
||||
*/
|
||||
apr_status_t serf__handle_auth_response(int *consumed_response,
|
||||
serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
void *baton,
|
||||
apr_pool_t *pool);
|
||||
|
||||
/* fromt context.c */
|
||||
void serf__context_progress_delta(void *progress_baton, apr_off_t read,
|
||||
apr_off_t written);
|
||||
|
||||
/* from incoming.c */
|
||||
apr_status_t serf__process_client(serf_incoming_t *l, apr_int16_t events);
|
||||
apr_status_t serf__process_listener(serf_listener_t *l);
|
||||
|
||||
/* from outgoing.c */
|
||||
apr_status_t serf__open_connections(serf_context_t *ctx);
|
||||
apr_status_t serf__process_connection(serf_connection_t *conn,
|
||||
apr_int16_t events);
|
||||
apr_status_t serf__conn_update_pollset(serf_connection_t *conn);
|
||||
|
||||
/* from ssltunnel.c */
|
||||
apr_status_t serf__ssltunnel_connect(serf_connection_t *conn);
|
||||
|
||||
|
||||
/** Logging functions. Use one of the [COMP]_VERBOSE flags to enable specific
|
||||
logging.
|
||||
**/
|
||||
|
||||
/* Logs a standard event, with filename & timestamp header */
|
||||
void serf__log(int verbose_flag, const char *filename, const char *fmt, ...);
|
||||
|
||||
/* Logs a standard event, but without prefix. This is useful to build up
|
||||
log lines in parts. */
|
||||
void serf__log_nopref(int verbose_flag, const char *fmt, ...);
|
||||
|
||||
/* Logs a socket event, add local and remote ip address:port */
|
||||
void serf__log_skt(int verbose_flag, const char *filename, apr_socket_t *skt,
|
||||
const char *fmt, ...);
|
||||
|
||||
#endif
|
506
serfmake
Executable file
506
serfmake
Executable file
@ -0,0 +1,506 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import stat
|
||||
import copy
|
||||
|
||||
### use get_version() ?
|
||||
MAJOR = 1
|
||||
|
||||
# Basic defines for our outputs.
|
||||
LIBNAME = 'libserf-%d' % (MAJOR,)
|
||||
INCLUDES = 'serf-%d' % (MAJOR,)
|
||||
PCFILE = 'serf-%d' % (MAJOR,)
|
||||
|
||||
|
||||
FILES_HDR = [
|
||||
('.', 'serf'),
|
||||
('.', 'serf_bucket_types'),
|
||||
('.', 'serf_bucket_util'),
|
||||
]
|
||||
|
||||
LIB_FILES = [
|
||||
('.', 'context'),
|
||||
('.', 'incoming'),
|
||||
('.', 'outgoing'),
|
||||
('.', 'ssltunnel'),
|
||||
|
||||
('buckets', 'aggregate_buckets'),
|
||||
('buckets', 'request_buckets'),
|
||||
('buckets', 'buckets'),
|
||||
('buckets', 'simple_buckets'),
|
||||
('buckets', 'file_buckets'),
|
||||
('buckets', 'mmap_buckets'),
|
||||
('buckets', 'socket_buckets'),
|
||||
('buckets', 'response_buckets'),
|
||||
('buckets', 'response_body_buckets'),
|
||||
('buckets', 'headers_buckets'),
|
||||
('buckets', 'allocator'),
|
||||
('buckets', 'dechunk_buckets'),
|
||||
('buckets', 'deflate_buckets'),
|
||||
('buckets', 'limit_buckets'),
|
||||
('buckets', 'ssl_buckets'),
|
||||
('buckets', 'barrier_buckets'),
|
||||
('buckets', 'chunk_buckets'),
|
||||
('buckets', 'iovec_buckets'),
|
||||
('auth', 'auth'),
|
||||
('auth', 'auth_basic'),
|
||||
('auth', 'auth_digest'),
|
||||
('auth', 'auth_kerb'),
|
||||
('auth', 'auth_kerb_gss'),
|
||||
]
|
||||
|
||||
TEST_DEPS = [
|
||||
('test', 'CuTest'),
|
||||
('test', 'test_util'),
|
||||
('test', 'test_context'),
|
||||
('test', 'test_buckets'),
|
||||
('test', 'test_ssl'),
|
||||
('test/server', 'test_server'),
|
||||
('test/server', 'test_sslserver'),
|
||||
]
|
||||
|
||||
TEST_HDR_FILES = [
|
||||
('test', 'CuTest'),
|
||||
('test', 'test_serf'),
|
||||
]
|
||||
|
||||
TEST_FILES = [
|
||||
('test', 'serf_get'),
|
||||
('test', 'serf_response'),
|
||||
('test', 'serf_request'),
|
||||
('test', 'serf_spider'),
|
||||
('test', 'test_all'),
|
||||
]
|
||||
|
||||
TESTCASES = [
|
||||
('test/testcases', 'simple.response'),
|
||||
('test/testcases', 'chunked-empty.response'),
|
||||
('test/testcases', 'chunked.response'),
|
||||
('test/testcases', 'chunked-trailers.response'),
|
||||
('test/testcases', 'deflate.response'),
|
||||
]
|
||||
|
||||
|
||||
def main(argv):
|
||||
params = {}
|
||||
|
||||
commands = []
|
||||
|
||||
for arg in argv[1:]:
|
||||
idx = arg.find('=')
|
||||
if idx > 0:
|
||||
start = arg.rfind('-', 0, idx)
|
||||
if start > 0:
|
||||
params[arg[start+1:idx]] = arg[idx+1:].strip()
|
||||
else:
|
||||
func = globals().get('cmd_' + arg)
|
||||
if func:
|
||||
commands.append(func)
|
||||
else:
|
||||
print('ERROR: unknown argument: ' + arg)
|
||||
usage()
|
||||
|
||||
if not commands:
|
||||
usage()
|
||||
|
||||
for func in commands:
|
||||
try:
|
||||
func(params)
|
||||
except:
|
||||
print('ERROR: exception:')
|
||||
print(sys.exc_info()[1])
|
||||
print("")
|
||||
usage()
|
||||
|
||||
|
||||
def usage():
|
||||
### print something
|
||||
print('serfmake [cmd] [options]')
|
||||
print('Commands:')
|
||||
print('\tbuild\tBuilds (default)')
|
||||
print('\tcheck\tRuns test cases')
|
||||
print('\tinstall\tInstalls serf into PREFIX')
|
||||
print('\tclean\tCleans')
|
||||
print('Options:')
|
||||
print('\t--with-apr=PATH\t\tprefix for installed APR and APR-util')
|
||||
print('\t\t\t\t(needs apr-1-config and apu-1-config; will look in PATH)')
|
||||
print('\t--with-gssapi=PATH\tbuild serf with GSSAPI support')
|
||||
print('\t\t\t\t(needs krb5-config; will look in PATH/bin)')
|
||||
|
||||
print('\t--prefix=PATH\t\tinstall serf into PATH (default: /usr/local)')
|
||||
print('Quick guide:')
|
||||
print('\tserfmake --prefix=/usr/local/serf --with-apr=/usr/local/apr install')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def cmd_build(param):
|
||||
builder = Builder(param)
|
||||
builder.build_target(File('.', LIBNAME, 'la'), False)
|
||||
builder.build_target(File('.', PCFILE, 'pc'), False)
|
||||
|
||||
|
||||
def cmd_install(param):
|
||||
builder = Builder(param)
|
||||
builder.build_target(File('.', PCFILE, 'pc'), False)
|
||||
### should be called .install_all()
|
||||
builder.install_target(File('.', LIBNAME, 'la'), False)
|
||||
|
||||
|
||||
def cmd_check(param):
|
||||
builder = Builder(param)
|
||||
for dirpath, fname in TEST_FILES:
|
||||
builder.build_target(File(dirpath, fname, None), False)
|
||||
|
||||
for dirpath, fname in TESTCASES:
|
||||
case = os.path.join(dirpath, fname)
|
||||
print('== Testing %s ==' % case)
|
||||
result = os.system('%s %s' % (os.path.join('test', 'serf_response'), case))
|
||||
if result:
|
||||
raise TestError("", result)
|
||||
|
||||
# run the test suite based on the CuTest framework
|
||||
result = os.system(os.path.join('test', 'test_all'))
|
||||
if result:
|
||||
raise TestError(case, result)
|
||||
|
||||
def cmd_clean(param):
|
||||
targets = [File(dirpath, fname, 'o') for dirpath, fname in LIB_FILES]
|
||||
targets += [File(dirpath, fname, 'lo') for dirpath, fname in LIB_FILES]
|
||||
targets += [File('.', LIBNAME, 'la'),
|
||||
File('.', PCFILE, 'pc'),
|
||||
]
|
||||
targets += [File(dirpath, fname, 'o') for dirpath, fname in TEST_FILES]
|
||||
targets += [File(dirpath, fname, 'lo') for dirpath, fname in TEST_FILES]
|
||||
targets += [File(dirpath, fname, None) for dirpath, fname in TEST_FILES]
|
||||
targets += [File(dirpath, fname, 'o') for dirpath, fname in TEST_DEPS]
|
||||
targets += [File(dirpath, fname, 'lo') for dirpath, fname in TEST_DEPS]
|
||||
|
||||
clean = [file for file in targets if file.mtime]
|
||||
if clean:
|
||||
sys.stdout.write('Cleaning %d files... ' % len(clean))
|
||||
for i in clean:
|
||||
if i.mtime:
|
||||
os.remove(i.fname)
|
||||
print('done.')
|
||||
else:
|
||||
print('Clean.')
|
||||
|
||||
|
||||
class Builder(object):
|
||||
def __init__(self, params):
|
||||
# use apr option if set
|
||||
if 'apr' in params:
|
||||
self.apr = APRConfig(params['apr'])
|
||||
self.apu = APUConfig(params['apr'])
|
||||
else:
|
||||
self.apr = APRConfig(None)
|
||||
self.apu = APUConfig(None)
|
||||
|
||||
# build with gssapi if option is set
|
||||
if 'gssapi' in params:
|
||||
self.gssapi = GSSAPIConfig(params['gssapi'])
|
||||
else:
|
||||
self.gssapi = None
|
||||
|
||||
try:
|
||||
self.prefix = params['prefix']
|
||||
except:
|
||||
self.prefix = '/usr/local'
|
||||
|
||||
### no way to tweak these
|
||||
self.libdir = os.path.join(self.prefix, 'lib')
|
||||
self.pkgconfigdir = os.path.join(self.prefix, 'lib', 'pkgconfig')
|
||||
self.includedir = os.path.join(self.prefix, 'include', INCLUDES)
|
||||
|
||||
self.load_vars()
|
||||
self.load_deps()
|
||||
|
||||
def load_vars(self):
|
||||
self.CC = self.apr.get_value('CC', '--cc')
|
||||
self.CFLAGS = self.apr.get_value('CFLAGS', '--cflags')
|
||||
self.CPPFLAGS = self.apr.get_value('CPPFLAGS', '--cppflags')
|
||||
self.LIBTOOL = self.apr.get_value('LIBTOOL', '--apr-libtool')
|
||||
self.LDFLAGS = self.apr.get_value('LDFLAGS', '--ldflags') \
|
||||
+ ' ' + self.apu.get_value('LDFLAGS', '--ldflags')
|
||||
|
||||
self.INCLUDES = '-I%s -I%s -I%s' % (
|
||||
'.',
|
||||
self.apr.get_value(None, '--includedir'),
|
||||
self.apu.get_value(None, '--includedir'),
|
||||
)
|
||||
if os.getenv('EXTRA_INCLUDES'):
|
||||
self.INCLUDES += ' -I' + os.getenv('EXTRA_INCLUDES')
|
||||
|
||||
self.LIBS = self.apu.get_value(None, '--link-libtool') \
|
||||
+ ' ' + self.apu.get_value(None, '--libs') \
|
||||
+ ' ' + self.apr.get_value(None, '--link-libtool') \
|
||||
+ ' ' + self.apr.get_value(None, '--libs') \
|
||||
+ ' -lz'
|
||||
self.SSL_LIBS = '-lssl -lcrypto'
|
||||
if self.gssapi:
|
||||
self.LIBS += ' ' + self.gssapi.get_value(None, '--libs gssapi')
|
||||
self.CFLAGS += ' ' + self.gssapi.get_value('CFLAGS', '--cflags gssapi')\
|
||||
+ ' -DSERF_HAVE_GSSAPI -g'
|
||||
|
||||
self.MODE = 644
|
||||
|
||||
def load_deps(self):
|
||||
self.deps = { }
|
||||
|
||||
hdrs = [File(dirpath, fname, 'h') for dirpath, fname in FILES_HDR]
|
||||
libfiles = [File(dirpath, fname, 'c') for dirpath, fname in LIB_FILES]
|
||||
libobjs = [File(dirpath, fname, 'lo') for dirpath, fname in LIB_FILES]
|
||||
for src, obj in zip(libfiles, libobjs):
|
||||
self._add_compile(src, obj, hdrs)
|
||||
|
||||
self.hdrs = hdrs
|
||||
|
||||
all_libs = self.LIBS + ' ' + self.SSL_LIBS
|
||||
|
||||
lib = File('.', LIBNAME, 'la')
|
||||
cmd = '%s --silent --mode=link %s %s -rpath %s -o %s %s %s' % (
|
||||
self.LIBTOOL, self.CC, self.LDFLAGS, self.libdir,
|
||||
lib.fname, ' '.join([l.fname for l in libobjs]), all_libs)
|
||||
self._add_dep(lib, libobjs, cmd)
|
||||
|
||||
# load the test program dependencies now
|
||||
testhdrs = copy.deepcopy(hdrs)
|
||||
testhdrs += [File(dirpath, fname, 'h') for dirpath, fname in TEST_HDR_FILES]
|
||||
testdeps = [File(dirpath, fname, 'c') for dirpath, fname in TEST_DEPS]
|
||||
testobjs = [File(dirpath, fname, 'lo') for dirpath, fname in TEST_DEPS]
|
||||
|
||||
for testsrc, testobj in zip(testdeps, testobjs):
|
||||
self._add_compile(testsrc, testobj, testhdrs)
|
||||
|
||||
for dirpath, fname in TEST_FILES:
|
||||
src = File(dirpath, fname, 'c')
|
||||
obj = File(dirpath, fname, 'lo')
|
||||
prog = File(dirpath, fname, None)
|
||||
|
||||
self._add_compile(src, obj, hdrs)
|
||||
|
||||
# test_all requires extra dependencies
|
||||
if fname == "test_all":
|
||||
cmd = '%s --silent --mode=link %s %s -static -o %s %s %s %s' % (
|
||||
self.LIBTOOL, self.CC, self.LDFLAGS,
|
||||
prog.fname, lib.fname, ' '.join([l.fname for l in [obj] + testobjs]),
|
||||
all_libs)
|
||||
self._add_dep(prog, [lib, obj] + testobjs, cmd)
|
||||
else:
|
||||
cmd = '%s --silent --mode=link %s %s -static -o %s %s %s %s' % (
|
||||
self.LIBTOOL, self.CC, self.LDFLAGS,
|
||||
prog.fname, lib.fname, obj.fname, all_libs)
|
||||
self._add_dep(prog, [lib, obj], cmd)
|
||||
|
||||
# create 'serf-1.pc' if it doesn't exist.
|
||||
pcfile = File('.', PCFILE, 'pc')
|
||||
self._add_dep(pcfile, [], self._write_pcfile)
|
||||
|
||||
def _add_compile(self, src, obj, hdrs):
|
||||
cmd = '%s --silent --mode=compile %s %s %s %s -c -o %s %s' % (
|
||||
self.LIBTOOL, self.CC, self.CFLAGS, self.CPPFLAGS, self.INCLUDES,
|
||||
obj.fname, src.fname)
|
||||
self._add_dep(obj, [src] + hdrs, cmd)
|
||||
|
||||
def _add_dep(self, target, deps, cmd):
|
||||
if target.mtime:
|
||||
for dep in deps:
|
||||
if dep in self.deps or (dep.mtime and dep.mtime > target.mtime):
|
||||
# a dep is newer. this needs to be rebuilt.
|
||||
break
|
||||
else:
|
||||
# this is up to date. don't add it to the deps[] structure.
|
||||
return
|
||||
# else non-existent, so it must be rebuilt.
|
||||
|
||||
# Commands that are strings are cmdline invocations. Otherwise, it
|
||||
# should be a callable.
|
||||
if isinstance(cmd, str):
|
||||
cmd = CommandLine(cmd)
|
||||
|
||||
# register the dependency so this will get built
|
||||
self.deps[target] = deps, cmd
|
||||
|
||||
def _write_pcfile(self):
|
||||
"""Generating serf-1.pc ..."""
|
||||
|
||||
open(PCFILE + '.pc', 'w').write(
|
||||
"""SERF_MAJOR_VERSION=%d
|
||||
prefix=%s
|
||||
exec_prefix=${prefix}
|
||||
libdir=${exec_prefix}/lib
|
||||
includedir=${prefix}/include/%s
|
||||
|
||||
Name: serf
|
||||
Description: HTTP client library
|
||||
Version: %s
|
||||
Requires.private: libssl libcrypto
|
||||
Libs: -L${libdir} -lserf-${SERF_MAJOR_VERSION}
|
||||
Libs.private: %s
|
||||
Cflags: -I${includedir}
|
||||
""" % (MAJOR, self.prefix, INCLUDES, get_version(), self.LIBS))
|
||||
|
||||
def build_target(self, target, dry_run):
|
||||
deps, cmd = self.deps.get(target, (None, None))
|
||||
if cmd is None:
|
||||
# it's already up to date. all done.
|
||||
return
|
||||
|
||||
for f in deps:
|
||||
subdep = self.deps.get(f)
|
||||
if subdep:
|
||||
self.build_target(f, dry_run)
|
||||
|
||||
# build the target now
|
||||
print(cmd.__doc__)
|
||||
if not dry_run:
|
||||
result = cmd()
|
||||
if result:
|
||||
raise BuildError(cmd.__doc__, result)
|
||||
# FALLTHROUGH
|
||||
|
||||
# it's a dry run. pretend we built the target.
|
||||
del self.deps[target]
|
||||
return 0
|
||||
|
||||
def install_target(self, target, dry_run):
|
||||
self.build_target(target, dry_run)
|
||||
|
||||
# install the target now
|
||||
if not dry_run:
|
||||
|
||||
for path in (self.libdir, self.pkgconfigdir, self.includedir):
|
||||
if not os.path.exists(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError:
|
||||
raise BuildError('os.makedirs',
|
||||
'can not create install directories')
|
||||
|
||||
for f in self.hdrs:
|
||||
print("Installing: %s" % (os.path.basename(f.fname),))
|
||||
shutil.copy(f.fname, self.includedir)
|
||||
|
||||
print("Installing: %s.pc" % (PCFILE,))
|
||||
shutil.copy(PCFILE + '.pc', self.pkgconfigdir)
|
||||
|
||||
cmd = '%s --silent --mode=install %s -c -m %d %s %s' % (
|
||||
self.LIBTOOL, '/usr/bin/install', self.MODE, target.fname,
|
||||
self.libdir)
|
||||
|
||||
print("Installing: %s" % (os.path.basename(target.fname),))
|
||||
result = os.system(cmd)
|
||||
if result:
|
||||
raise BuildError(cmd, result)
|
||||
# FALLTHROUGH
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
class ConfigScript(object):
|
||||
script_name = None
|
||||
locations = [
|
||||
'/usr/bin',
|
||||
'/usr/local/bin',
|
||||
'/usr/local/apache2/bin',
|
||||
]
|
||||
|
||||
def __init__(self, search_dir):
|
||||
if search_dir:
|
||||
locations = [search_dir, os.path.join(search_dir, 'bin')]
|
||||
else:
|
||||
locations = self.locations
|
||||
|
||||
for dirname in locations:
|
||||
bin = os.path.join(dirname, self.script_name)
|
||||
if os.access(bin, os.X_OK):
|
||||
self.bin = bin
|
||||
break
|
||||
else:
|
||||
raise ConfigScriptNotFound(self.script_name)
|
||||
|
||||
def get_value(self, env_name, switch):
|
||||
if env_name and os.getenv(env_name):
|
||||
return os.getenv(env_name)
|
||||
return os.popen('%s %s' % (self.bin, switch), 'r').read().strip()
|
||||
|
||||
|
||||
class APRConfig(ConfigScript):
|
||||
script_name = 'apr-1-config'
|
||||
|
||||
|
||||
class APUConfig(ConfigScript):
|
||||
script_name = 'apu-1-config'
|
||||
|
||||
|
||||
class GSSAPIConfig(ConfigScript):
|
||||
script_name = 'krb5-config'
|
||||
|
||||
|
||||
class CommandLine(object):
|
||||
"""Simple helper to invoke a system command when called."""
|
||||
|
||||
def __init__(self, cmd):
|
||||
self.cmd = cmd
|
||||
self.__doc__ = cmd # when we print the execution of this command
|
||||
|
||||
def __call__(self):
|
||||
return os.system(self.cmd)
|
||||
|
||||
|
||||
class File:
|
||||
def __init__(self, dirpath, fname, ext):
|
||||
if ext:
|
||||
self.fname = os.path.join(dirpath, fname + '.' + ext)
|
||||
else:
|
||||
self.fname = os.path.join(dirpath, fname)
|
||||
|
||||
try:
|
||||
s = os.stat(self.fname)
|
||||
except OSError:
|
||||
self.mtime = None
|
||||
else:
|
||||
self.mtime = s[stat.ST_MTIME]
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.fname == other.fname
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.fname)
|
||||
|
||||
|
||||
def get_version():
|
||||
match = re.search('SERF_MAJOR_VERSION ([0-9]+).*'
|
||||
'SERF_MINOR_VERSION ([0-9]+).*'
|
||||
'SERF_PATCH_VERSION ([0-9]+)',
|
||||
open('serf.h').read(),
|
||||
re.DOTALL)
|
||||
major, minor, patch = match.groups()
|
||||
return '%s.%s.%s' % (major, minor, patch)
|
||||
|
||||
|
||||
class BuildError(Exception):
|
||||
"An error occurred while building a target."
|
||||
class TestError(Exception):
|
||||
"An error occurred while running a unit test."
|
||||
class ConfigScriptNotFound(Exception):
|
||||
def __init__(self, value):
|
||||
self.value = "ERROR: A configuration script was not found: " + value
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
|
||||
|
||||
###
|
||||
### TODO:
|
||||
### * obey DESTDIR
|
||||
### * arfrever says LDFLAGS is passed twice
|
||||
### * be able to specify libdir and includedir
|
||||
###
|
179
ssltunnel.c
Normal file
179
ssltunnel.c
Normal file
@ -0,0 +1,179 @@
|
||||
/* Copyright 2011 Justin Erenkrantz and Greg Stein
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*** Setup a SSL tunnel over a HTTP proxy, according to RFC 2817. ***/
|
||||
|
||||
#include <apr_pools.h>
|
||||
#include <apr_strings.h>
|
||||
|
||||
#include "serf.h"
|
||||
#include "serf_private.h"
|
||||
|
||||
|
||||
/* Structure passed around as baton for the CONNECT request and respone. */
|
||||
typedef struct {
|
||||
apr_pool_t *pool;
|
||||
const char *uri;
|
||||
} req_ctx_t;
|
||||
|
||||
/* forward declaration. */
|
||||
static apr_status_t setup_request(serf_request_t *request,
|
||||
void *setup_baton,
|
||||
serf_bucket_t **req_bkt,
|
||||
serf_response_acceptor_t *acceptor,
|
||||
void **acceptor_baton,
|
||||
serf_response_handler_t *handler,
|
||||
void **handler_baton,
|
||||
apr_pool_t *pool);
|
||||
|
||||
static serf_bucket_t* accept_response(serf_request_t *request,
|
||||
serf_bucket_t *stream,
|
||||
void *acceptor_baton,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
serf_bucket_t *c;
|
||||
serf_bucket_alloc_t *bkt_alloc;
|
||||
#if 0
|
||||
req_ctx_t *ctx = acceptor_baton;
|
||||
#endif
|
||||
|
||||
/* get the per-request bucket allocator */
|
||||
bkt_alloc = serf_request_get_alloc(request);
|
||||
|
||||
/* Create a barrier so the response doesn't eat us! */
|
||||
c = serf_bucket_barrier_create(stream, bkt_alloc);
|
||||
|
||||
return serf_bucket_response_create(c, bkt_alloc);
|
||||
}
|
||||
|
||||
/* If a 200 OK was received for the CONNECT request, consider the connection
|
||||
as ready for use. */
|
||||
static apr_status_t handle_response(serf_request_t *request,
|
||||
serf_bucket_t *response,
|
||||
void *handler_baton,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
apr_status_t status;
|
||||
serf_status_line sl;
|
||||
req_ctx_t *ctx = handler_baton;
|
||||
|
||||
if (! response) {
|
||||
serf_connection_request_create(request->conn,
|
||||
setup_request,
|
||||
ctx);
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
status = serf_bucket_response_status(response, &sl);
|
||||
if (SERF_BUCKET_READ_ERROR(status)) {
|
||||
return status;
|
||||
}
|
||||
if (!sl.version && (APR_STATUS_IS_EOF(status) ||
|
||||
APR_STATUS_IS_EAGAIN(status)))
|
||||
{
|
||||
return status;
|
||||
}
|
||||
|
||||
status = serf_bucket_response_wait_for_headers(response);
|
||||
if (status && !APR_STATUS_IS_EOF(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
/* RFC 2817: Any successful (2xx) response to a CONNECT request indicates
|
||||
that the proxy has established a connection to the requested host and
|
||||
port, and has switched to tunneling the current connection to that server
|
||||
connection.
|
||||
*/
|
||||
if (sl.code >= 200 && sl.code < 300) {
|
||||
request->conn->state = SERF_CONN_CONNECTED;
|
||||
|
||||
/* Body is supposed to be empty. */
|
||||
apr_pool_destroy(ctx->pool);
|
||||
serf_bucket_destroy(request->conn->ssltunnel_ostream);
|
||||
request->conn->stream = NULL;
|
||||
ctx = NULL;
|
||||
|
||||
serf__log(CONN_VERBOSE, __FILE__,
|
||||
"successfully set up ssl tunnel on connection 0x%x\n",
|
||||
request->conn);
|
||||
|
||||
return APR_EOF;
|
||||
}
|
||||
|
||||
/* Authentication failure and 2xx Ok are handled at this point,
|
||||
the rest are errors. */
|
||||
return SERF_ERROR_SSLTUNNEL_SETUP_FAILED;
|
||||
}
|
||||
|
||||
/* Prepare the CONNECT request. */
|
||||
static apr_status_t setup_request(serf_request_t *request,
|
||||
void *setup_baton,
|
||||
serf_bucket_t **req_bkt,
|
||||
serf_response_acceptor_t *acceptor,
|
||||
void **acceptor_baton,
|
||||
serf_response_handler_t *handler,
|
||||
void **handler_baton,
|
||||
apr_pool_t *pool)
|
||||
{
|
||||
req_ctx_t *ctx = setup_baton;
|
||||
|
||||
*req_bkt =
|
||||
serf_request_bucket_request_create(request,
|
||||
"CONNECT", ctx->uri,
|
||||
NULL,
|
||||
serf_request_get_alloc(request));
|
||||
*acceptor = accept_response;
|
||||
*acceptor_baton = ctx;
|
||||
*handler = handle_response;
|
||||
*handler_baton = ctx;
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t detect_eof(void *baton, serf_bucket_t *aggregate_bucket)
|
||||
{
|
||||
serf_connection_t *conn = baton;
|
||||
conn->hit_eof = 1;
|
||||
return APR_EAGAIN;
|
||||
}
|
||||
|
||||
/* SSL tunnel is needed, push a CONNECT request on the connection. */
|
||||
apr_status_t serf__ssltunnel_connect(serf_connection_t *conn)
|
||||
{
|
||||
req_ctx_t *ctx;
|
||||
apr_pool_t *ssltunnel_pool;
|
||||
|
||||
apr_pool_create(&ssltunnel_pool, conn->pool);
|
||||
|
||||
ctx = apr_palloc(ssltunnel_pool, sizeof(*ctx));
|
||||
ctx->pool = ssltunnel_pool;
|
||||
ctx->uri = apr_psprintf(ctx->pool, "%s:%d", conn->host_info.hostinfo,
|
||||
conn->host_info.port);
|
||||
|
||||
conn->ssltunnel_ostream = serf__bucket_stream_create(conn->allocator,
|
||||
detect_eof,
|
||||
conn);
|
||||
|
||||
/* TODO: should be the first request on the connection. */
|
||||
serf_connection_priority_request_create(conn,
|
||||
setup_request,
|
||||
ctx);
|
||||
|
||||
conn->state = SERF_CONN_SETUP_SSLTUNNEL;
|
||||
serf__log(CONN_VERBOSE, __FILE__,
|
||||
"setting up ssl tunnel on connection 0x%x\n", conn);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
Loading…
Reference in New Issue
Block a user