import unbound 1.5.1

This commit is contained in:
Dag-Erling Smørgrav 2015-01-02 17:35:29 +00:00
parent 7f563e614f
commit 7954be7fa5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/vendor/unbound/dist/; revision=276544
svn path=/vendor/unbound/1.5.1/; revision=276545; tag=vendor/unbound/1.5.1
43 changed files with 705 additions and 1562 deletions

View File

@ -80,7 +80,7 @@ LINTFLAGS+="-Dsigset_t=long"
# FreeBSD
LINTFLAGS+="-D__uint16_t=uint16_t" "-DEVP_PKEY_ASN1_METHOD=int" "-D_RuneLocale=int" "-D__va_list=va_list"
INSTALL=$(srcdir)/install-sh
INSTALL=$(SHELL) $(srcdir)/install-sh
#pythonmod.c is not here, it is mentioned by itself in its own rules,
#makedepend fails on missing interface.h otherwise.
@ -397,7 +397,7 @@ libunbound/python/libunbound_wrap.c: $(srcdir)/libunbound/python/libunbound.i un
# Pyunbound python unbound wrapper
_unbound.la: libunbound_wrap.lo libunbound.la
$(LIBTOOL) --tag=CC --mode=link $(CC) $(RUNTIME_PATH) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -module -version-info @LIBUNBOUND_CURRENT@:@LIBUNBOUND_REVISION@:@LIBUNBOUND_AGE@ -no-undefined -o $@ libunbound_wrap.lo -rpath $(PYTHON_SITE_PKG) L. -L.libs -lunbound $(LIBS)
$(LIBTOOL) --tag=CC --mode=link $(CC) $(RUNTIME_PATH) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -module -avoid-version -no-undefined -shared -o $@ libunbound_wrap.lo -rpath $(PYTHON_SITE_PKG) L. -L.libs -lunbound
util/config_file.c: util/configparser.h
util/configlexer.c: $(srcdir)/util/configlexer.lex util/configparser.h

View File

@ -53,8 +53,10 @@ static int arc4lockinit = 0;
void _ARC4_LOCK(void)
{
if(!arc4lockinit)
if(!arc4lockinit) {
arc4lockinit = 1;
lock_quick_init(&arc4lock);
}
lock_quick_lock(&arc4lock);
}

View File

@ -48,6 +48,7 @@
#include <time.h>
#include <openssl/sha.h>
#include <linux/types.h>
#include <linux/random.h>
#include <linux/sysctl.h>
#ifdef HAVE_GETAUXVAL
@ -77,7 +78,7 @@ extern int main(int, char *argv[]);
#endif
static int gotdata(char *buf, size_t len);
static int getentropy_urandom(void *buf, size_t len);
#ifdef CTL_MAXNAME
#ifdef SYS__sysctl
static int getentropy_sysctl(void *buf, size_t len);
#endif
static int getentropy_fallback(void *buf, size_t len);
@ -102,7 +103,7 @@ getentropy(void *buf, size_t len)
if (ret != -1)
return (ret);
#ifdef CTL_MAXNAME
#ifdef SYS__sysctl
/*
* Try to use sysctl CTL_KERN, KERN_RANDOM, RANDOM_UUID.
* sysctl is a failsafe API, so it guarantees a result. This
@ -124,7 +125,7 @@ getentropy(void *buf, size_t len)
ret = getentropy_sysctl(buf, len);
if (ret != -1)
return (ret);
#endif /* CTL_MAXNAME */
#endif /* SYS__sysctl */
/*
* Entropy collection via /dev/urandom and sysctl have failed.
@ -235,7 +236,7 @@ getentropy_urandom(void *buf, size_t len)
return -1;
}
#ifdef CTL_MAXNAME
#ifdef SYS__sysctl
static int
getentropy_sysctl(void *buf, size_t len)
{
@ -265,7 +266,7 @@ getentropy_sysctl(void *buf, size_t len)
errno = EIO;
return -1;
}
#endif /* CTL_MAXNAME */
#endif /* SYS__sysctl */
static int cl[] = {
CLOCK_REALTIME,

View File

@ -41,9 +41,9 @@ getentropy(void *buf, size_t len)
}
if (CryptAcquireContext(&provider, NULL, NULL, PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT) != 0)
CRYPT_VERIFYCONTEXT) == 0)
goto fail;
if (CryptGenRandom(provider, len, buf) != 0) {
if (CryptGenRandom(provider, len, buf) == 0) {
CryptReleaseContext(provider, 0);
goto fail;
}

View File

@ -1,8 +1,5 @@
/* config.h.in. Generated from configure.ac by autoheader. */
/* define if a library can reference the 'main' symbol */
#undef CAN_REFERENCE_MAIN
/* Directory to chroot to */
#undef CHROOT_DIR

1429
configure vendored

File diff suppressed because it is too large Load Diff

View File

@ -10,7 +10,7 @@ sinclude(dnstap/dnstap.m4)
# must be numbers. ac_defun because of later processing
m4_define([VERSION_MAJOR],[1])
m4_define([VERSION_MINOR],[5])
m4_define([VERSION_MICRO],[0])
m4_define([VERSION_MICRO],[1])
AC_INIT(unbound, m4_defn([VERSION_MAJOR]).m4_defn([VERSION_MINOR]).m4_defn([VERSION_MICRO]), unbound-bugs@nlnetlabs.nl, unbound)
AC_SUBST(UNBOUND_VERSION_MAJOR, [VERSION_MAJOR])
AC_SUBST(UNBOUND_VERSION_MINOR, [VERSION_MINOR])
@ -57,6 +57,7 @@ LIBUNBOUND_AGE=3
# 1.4.21 had 4:1:2
# 1.4.22 had 4:1:2
# 1.5.0 had 5:3:3 # adds ub_ctx_add_ta_autr
# 1.5.1 had 5:4:3
# Current -- the number of the binary API that we're implementing
# Revision -- which iteration of the implementation of the binary
@ -1022,39 +1023,6 @@ if test "$USE_NSS" = "no"; then
AC_SEARCH_LIBS([clock_gettime], [rt])
;;
esac
# generate libtool to test if linking main
# from a dynamic library works.
LT_OUTPUT
AC_MSG_CHECKING([if dynamic lib can refer to main])
cat >tmp.$$.def <<EOF
myfunc
EOF
cat >tmp.$$.c <<EOF
int myfunc(void);
extern int main(int, char *argv[]);
int myfunc(void)
{
return ((int)main) + 1;
}
EOF
mylibtool=./libtool
mylibdir=/usr/local/lib
myok=yes
$mylibtool --quiet --tag=CC --mode=compile $CC $CFLAGS -o tmp.$$.lo -c tmp.$$.c >/dev/null 2>&1
if test $? = 0; then myok=yes; else myok=no; fi
if test "$myok" = "yes"; then
$mylibtool --quiet --tag=CC --mode=link $CC $CFLAGS -version-info 1:0:0 -no-undefined -export-symbols tmp.$$.def -o libtmp$$.la tmp.$$.lo $LDFLAGS -rpath $mylibdir $LIBS >/dev/null 2>&1
if test $? = 0; then myok=yes; else myok=no; fi
fi
if test "$myok" = "yes"; then
AC_MSG_RESULT(yes)
AC_DEFINE(CAN_REFERENCE_MAIN, [1], [define if a library can reference the 'main' symbol])
else
AC_MSG_RESULT(no)
fi
$mylibtool --quiet --mode=clean rm -rf libtmp$$.la tmp.$$.lo
rm -f tmp.$$.def tmp.$$.c libtmp$$.la tmp.$$.lo tmp.$$.o
fi
])
fi

View File

@ -25,4 +25,8 @@ distribution but may be helpful.
* unbound_cache.cmd: windows script to save and load the cache.
* warmup.sh: shell script to warm up DNS cache by your own MRU domains.
* warmup.cmd: windows script to warm up DNS cache by your own MRU domains.
* aaaa-filter-iterator.patch: adds config option aaaa-filter: yes that
works like the BIND feature (removes AAAA records unless AAAA-only domain).
Useful for certain 'broken IPv6 default route' scenarios.
Patch from Stephane Lapie for ASAHI Net.

View File

@ -0,0 +1,394 @@
--- unbound-1.4.17.orig/doc/unbound.conf.5.in
+++ unbound-1.4.17/doc/unbound.conf.5.in
@@ -519,6 +519,13 @@ authority servers and checks if the repl
Disabled by default.
This feature is an experimental implementation of draft dns\-0x20.
.TP
+.B aaaa\-filter: \fI<yes or no>
+Activate behavior similar to BIND's AAAA-filter.
+This forces the dropping of all AAAA records, unless in the case of
+explicit AAAA queries, when no A records have been confirmed.
+This also causes an additional A query to be sent for each AAAA query.
+This breaks DNSSEC!
+.TP
.B private\-address: \fI<IP address or subnet>
Give IPv4 of IPv6 addresses or classless subnets. These are addresses
on your private network, and are not allowed to be returned for public
--- unbound-1.4.17.orig/util/config_file.c
+++ unbound-1.4.17/util/config_file.c
@@ -160,6 +160,7 @@ config_create(void)
cfg->harden_below_nxdomain = 0;
cfg->harden_referral_path = 0;
cfg->use_caps_bits_for_id = 0;
+ cfg->aaaa_filter = 0; /* ASN: default is disabled */
cfg->private_address = NULL;
cfg->private_domain = NULL;
cfg->unwanted_threshold = 0;
--- unbound-1.4.17.orig/iterator/iter_scrub.c
+++ unbound-1.4.17/iterator/iter_scrub.c
@@ -580,6 +580,32 @@ static int sanitize_nsec_is_overreach(st
}
/**
+ * ASN: Lookup A records from rrset cache.
+ * @param qinfo: the question originally asked.
+ * @param env: module environment with config and cache.
+ * @param ie: iterator environment with private address data.
+ * @return 0 if no A record found, 1 if A record found.
+ */
+static int
+asn_lookup_a_record_from_cache(struct query_info* qinfo,
+ struct module_env* env, struct iter_env* ie)
+{
+ struct ub_packed_rrset_key* akey;
+
+ /* get cached A records for queried name */
+ akey = rrset_cache_lookup(env->rrset_cache, qinfo->qname,
+ qinfo->qname_len, LDNS_RR_TYPE_A, qinfo->qclass,
+ 0, *env->now, 0);
+ if(akey) { /* we had some. */
+ log_rrset_key(VERB_ALGO, "ASN-AAAA-filter: found A record",
+ akey);
+ lock_rw_unlock(&akey->entry.lock);
+ return 1;
+ }
+ return 0;
+}
+
+/**
* Given a response event, remove suspect RRsets from the response.
* "Suspect" rrsets are potentially poison. Note that this routine expects
* the response to be in a "normalized" state -- that is, all "irrelevant"
@@ -598,6 +625,7 @@ scrub_sanitize(ldns_buffer* pkt, struct
struct query_info* qinfo, uint8_t* zonename, struct module_env* env,
struct iter_env* ie)
{
+ int found_a_record = 0; /* ASN: do we have a A record? */
int del_addi = 0; /* if additional-holding rrsets are deleted, we
do not trust the normalized additional-A-AAAA any more */
struct rrset_parse* rrset, *prev;
@@ -633,6 +661,13 @@ scrub_sanitize(ldns_buffer* pkt, struct
rrset = rrset->rrset_all_next;
}
+ /* ASN: Locate any A record we can find */
+ if((ie->aaaa_filter) && (qinfo->qtype == LDNS_RR_TYPE_AAAA)) {
+ found_a_record = asn_lookup_a_record_from_cache(qinfo,
+ env, ie);
+ }
+ /* ASN: End of added code */
+
/* At this point, we brutally remove ALL rrsets that aren't
* children of the originating zone. The idea here is that,
* as far as we know, the server that we contacted is ONLY
@@ -644,6 +679,24 @@ scrub_sanitize(ldns_buffer* pkt, struct
rrset = msg->rrset_first;
while(rrset) {
+ /* ASN: For AAAA records only... */
+ if((ie->aaaa_filter) && (rrset->type == LDNS_RR_TYPE_AAAA)) {
+ /* ASN: If this is not a AAAA query, then remove AAAA
+ * records, no questions asked. If this IS a AAAA query
+ * then remove AAAA records if we have an A record.
+ * Otherwise, leave things be. */
+ if((qinfo->qtype != LDNS_RR_TYPE_AAAA) ||
+ (found_a_record)) {
+ remove_rrset("ASN-AAAA-filter: removing AAAA "
+ "for record", pkt, msg, prev, &rrset);
+ continue;
+ }
+ log_nametypeclass(VERB_ALGO, "ASN-AAAA-filter: "
+ "keep AAAA for", zonename,
+ LDNS_RR_TYPE_AAAA, qinfo->qclass);
+ }
+ /* ASN: End of added code */
+
/* remove private addresses */
if( (rrset->type == LDNS_RR_TYPE_A ||
rrset->type == LDNS_RR_TYPE_AAAA) &&
--- unbound-1.4.17.orig/iterator/iterator.c
+++ unbound-1.4.17/iterator/iterator.c
@@ -1579,6 +1579,53 @@ processDSNSFind(struct module_qstate* qs
return 0;
}
+
+/**
+ * ASN: This event state was added as an intermediary step between
+ * QUERYTARGETS_STATE and the next step, in order to cast a subquery for the
+ * purpose of caching A records for the queried name.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param ie: iterator shared global environment.
+ * @param id: module id.
+ * @return true if the event requires more request processing immediately,
+ * false if not. This state only returns true when it is generating
+ * a SERVFAIL response because the query has hit a dead end.
+ */
+static int
+asn_processQueryAAAA(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id)
+{
+ struct module_qstate* subq = NULL;
+
+ log_assert(iq->fetch_a_for_aaaa == 0);
+
+ /* flag the query properly in order to not loop */
+ iq->fetch_a_for_aaaa = 1;
+
+ /* re-throw same query, but with a different type */
+ if(!generate_sub_request(iq->qchase.qname,
+ iq->qchase.qname_len, LDNS_RR_TYPE_A,
+ iq->qchase.qclass, qstate, id, iq,
+ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1)) {
+ log_nametypeclass(VERB_ALGO, "ASN-AAAA-filter: failed "
+ "preloading of A record for",
+ iq->qchase.qname, LDNS_RR_TYPE_A,
+ iq->qchase.qclass);
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+ log_nametypeclass(VERB_ALGO, "ASN-AAAA-filter: "
+ "preloading records in cache for",
+ iq->qchase.qname, LDNS_RR_TYPE_A,
+ iq->qchase.qclass);
+
+ /* set this query as waiting */
+ qstate->ext_state[id] = module_wait_subquery;
+ /* at this point break loop */
+ return 0;
+}
+/* ASN: End of added code */
/**
* This is the request event state where the request will be sent to one of
@@ -1626,6 +1673,13 @@ processQueryTargets(struct module_qstate
return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
}
+ /* ASN: If we have a AAAA query, then also query for A records */
+ if((ie->aaaa_filter) && (iq->qchase.qtype == LDNS_RR_TYPE_AAAA) &&
+ (iq->fetch_a_for_aaaa == 0)) {
+ return next_state(iq, ASN_FETCH_A_FOR_AAAA_STATE);
+ }
+ /* ASN: End of added code */
+
/* Make sure we have a delegation point, otherwise priming failed
* or another failure occurred */
if(!iq->dp) {
@@ -2568,6 +2622,62 @@ processFinished(struct module_qstate* qs
return 0;
}
+/**
+ * ASN: Do final processing on responses to A queries originated from AAAA
+ * queries. Events reach this state after the iterative resolution algorithm
+ * terminates.
+ * This is required down the road to decide whether to scrub AAAA records
+ * from the results or not.
+ *
+ * @param qstate: query state.
+ * @param id: module id.
+ * @param forq: super query state.
+ */
+static void
+asn_processAAAAResponse(struct module_qstate* qstate, int id,
+ struct module_qstate* super)
+{
+ struct iter_qstate* iq = (struct iter_qstate*)qstate->minfo[id];
+ struct iter_qstate* super_iq = (struct iter_qstate*)super->minfo[id];
+ struct ub_packed_rrset_key* rrset;
+ struct delegpt_ns* dpns = NULL;
+ int error = (qstate->return_rcode != LDNS_RCODE_NOERROR);
+
+ log_assert(super_iq->fetch_a_for_aaaa > 0);
+
+ /* let super go to evaluation of targets after this */
+ super_iq->state = QUERYTARGETS_STATE;
+
+ log_query_info(VERB_ALGO, "ASN-AAAA-filter: processAAAAResponse",
+ &qstate->qinfo);
+ log_query_info(VERB_ALGO, "ASN-AAAA-filter: processAAAAResponse super",
+ &super->qinfo);
+
+ if(super_iq->dp)
+ dpns = delegpt_find_ns(super_iq->dp,
+ qstate->qinfo.qname, qstate->qinfo.qname_len);
+ if (!dpns) {
+ /* not interested */
+ verbose(VERB_ALGO, "ASN-AAAA-filter: subq: %s, but parent not "
+ "interested%s", (error ? "error, but" : "success"),
+ (super_iq->dp ? "anymore" : " (was reset)"));
+ log_query_info(VERB_ALGO, "ASN-AAAA-filter: superq", &super->qinfo);
+ if(super_iq->dp && error)
+ delegpt_log(VERB_ALGO, super_iq->dp);
+ return;
+ } else if (error) {
+ verbose(VERB_ALGO, "ASN-AAAA-filter: mark as failed, "
+ "and go to target query.");
+ /* see if the failure did get (parent-lame) info */
+ if(!cache_fill_missing(super->env,
+ super_iq->qchase.qclass, super->region,
+ super_iq->dp))
+ log_err("ASN-AAAA-filter: out of memory adding missing");
+ dpns->resolved = 1; /* mark as failed */
+ }
+}
+/* ASN: End of added code */
+
/*
* Return priming query results to interestes super querystates.
*
@@ -2587,6 +2697,9 @@ iter_inform_super(struct module_qstate*
else if(super->qinfo.qtype == LDNS_RR_TYPE_DS && ((struct iter_qstate*)
super->minfo[id])->state == DSNS_FIND_STATE)
processDSNSResponse(qstate, id, super);
+ else if (super->qinfo.qtype == LDNS_RR_TYPE_AAAA && ((struct iter_qstate*)
+ super->minfo[id])->state == ASN_FETCH_A_FOR_AAAA_STATE)
+ asn_processAAAAResponse(qstate, id, super);
else if(qstate->return_rcode != LDNS_RCODE_NOERROR)
error_supers(qstate, id, super);
else if(qstate->is_priming)
@@ -2624,6 +2737,9 @@ iter_handle(struct module_qstate* qstate
case INIT_REQUEST_3_STATE:
cont = processInitRequest3(qstate, iq, id);
break;
+ case ASN_FETCH_A_FOR_AAAA_STATE:
+ cont = asn_processQueryAAAA(qstate, iq, ie, id);
+ break;
case QUERYTARGETS_STATE:
cont = processQueryTargets(qstate, iq, ie, id);
break;
@@ -2863,6 +2979,8 @@ iter_state_to_string(enum iter_state sta
return "INIT REQUEST STATE (stage 2)";
case INIT_REQUEST_3_STATE:
return "INIT REQUEST STATE (stage 3)";
+ case ASN_FETCH_A_FOR_AAAA_STATE:
+ return "ASN_FETCH_A_FOR_AAAA_STATE";
case QUERYTARGETS_STATE :
return "QUERY TARGETS STATE";
case PRIME_RESP_STATE :
@@ -2887,6 +3005,7 @@ iter_state_is_responsestate(enum iter_st
case INIT_REQUEST_STATE :
case INIT_REQUEST_2_STATE :
case INIT_REQUEST_3_STATE :
+ case ASN_FETCH_A_FOR_AAAA_STATE :
case QUERYTARGETS_STATE :
case COLLECT_CLASS_STATE :
return 0;
--- unbound-1.4.17.orig/iterator/iter_utils.c
+++ unbound-1.4.17/iterator/iter_utils.c
@@ -128,6 +128,7 @@ iter_apply_cfg(struct iter_env* iter_env
}
iter_env->supports_ipv6 = cfg->do_ip6;
iter_env->supports_ipv4 = cfg->do_ip4;
+ iter_env->aaaa_filter = cfg->aaaa_filter;
return 1;
}
--- unbound-1.4.17.orig/iterator/iterator.h
+++ unbound-1.4.17/iterator/iterator.h
@@ -110,6 +110,9 @@ struct iter_env {
* array of max_dependency_depth+1 size.
*/
int* target_fetch_policy;
+
+ /** ASN: AAAA-filter flag */
+ int aaaa_filter;
};
/**
@@ -135,6 +138,14 @@ enum iter_state {
INIT_REQUEST_3_STATE,
/**
+ * This state is responsible for intercepting AAAA queries,
+ * and launch a A subquery on the same target, to populate the
+ * cache with A records, so the AAAA filter scrubbing logic can
+ * work.
+ */
+ ASN_FETCH_A_FOR_AAAA_STATE,
+
+ /**
* Each time a delegation point changes for a given query or a
* query times out and/or wakes up, this state is (re)visited.
* This state is reponsible for iterating through a list of
@@ -309,6 +320,13 @@ struct iter_qstate {
*/
int refetch_glue;
+ /**
+ * ASN: This is a flag that, if true, means that this query is
+ * for fetching A records to populate cache and determine if we must
+ * return AAAA records or not.
+ */
+ int fetch_a_for_aaaa;
+
/** list of pending queries to authoritative servers. */
struct outbound_list outlist;
};
--- unbound-1.4.17.orig/util/config_file.h
+++ unbound-1.4.17/util/config_file.h
@@ -169,6 +169,8 @@ struct config_file {
int harden_referral_path;
/** use 0x20 bits in query as random ID bits */
int use_caps_bits_for_id;
+ /** ASN: enable AAAA filter? */
+ int aaaa_filter;
/** strip away these private addrs from answers, no DNS Rebinding */
struct config_strlist* private_address;
/** allow domain (and subdomains) to use private address space */
--- unbound-1.4.17.orig/util/configlexer.lex
+++ unbound-1.4.17/util/configlexer.lex
@@ -177,6 +177,7 @@ harden-below-nxdomain{COLON} { YDVAR(1,
harden-referral-path{COLON} { YDVAR(1, VAR_HARDEN_REFERRAL_PATH) }
use-caps-for-id{COLON} { YDVAR(1, VAR_USE_CAPS_FOR_ID) }
unwanted-reply-threshold{COLON} { YDVAR(1, VAR_UNWANTED_REPLY_THRESHOLD) }
+aaaa-filter{COLON} { YDVAR(1, VAR_AAAA_FILTER) }
private-address{COLON} { YDVAR(1, VAR_PRIVATE_ADDRESS) }
private-domain{COLON} { YDVAR(1, VAR_PRIVATE_DOMAIN) }
prefetch-key{COLON} { YDVAR(1, VAR_PREFETCH_KEY) }
--- unbound-1.4.17.orig/util/configparser.y
+++ unbound-1.4.17/util/configparser.y
@@ -92,6 +92,7 @@ extern struct config_parser_state* cfg_p
%token VAR_STATISTICS_CUMULATIVE VAR_OUTGOING_PORT_PERMIT
%token VAR_OUTGOING_PORT_AVOID VAR_DLV_ANCHOR_FILE VAR_DLV_ANCHOR
%token VAR_NEG_CACHE_SIZE VAR_HARDEN_REFERRAL_PATH VAR_PRIVATE_ADDRESS
+%token VAR_AAAA_FILTER
%token VAR_PRIVATE_DOMAIN VAR_REMOTE_CONTROL VAR_CONTROL_ENABLE
%token VAR_CONTROL_INTERFACE VAR_CONTROL_PORT VAR_SERVER_KEY_FILE
%token VAR_SERVER_CERT_FILE VAR_CONTROL_KEY_FILE VAR_CONTROL_CERT_FILE
@@ -151,6 +152,7 @@ content_server: server_num_threads | ser
server_dlv_anchor_file | server_dlv_anchor | server_neg_cache_size |
server_harden_referral_path | server_private_address |
server_private_domain | server_extended_statistics |
+ server_aaaa_filter |
server_local_data_ptr | server_jostle_timeout |
server_unwanted_reply_threshold | server_log_time_ascii |
server_domain_insecure | server_val_sig_skew_min |
@@ -802,6 +803,15 @@ server_use_caps_for_id: VAR_USE_CAPS_FOR
free($2);
}
;
+server_aaaa_filter: VAR_AAAA_FILTER STRING_ARG
+ {
+ OUTYY(("P(server_aaaa_filter:%s)\n", $2));
+ if(strcmp($2, "yes") != 0 && strcmp($2, "no") != 0)
+ yyerror("expected yes or no.");
+ else cfg_parser->cfg->aaaa_filter = (strcmp($2, "yes")==0);
+ free($2);
+ }
+ ;
server_private_address: VAR_PRIVATE_ADDRESS STRING_ARG
{
OUTYY(("P(server_private_address:%s)\n", $2));
--- unbound-1.4.17.orig/pythonmod/interface.i
+++ unbound-1.4.17/pythonmod/interface.i
@@ -626,6 +626,7 @@ struct config_file {
int harden_dnssec_stripped;
int harden_referral_path;
int use_caps_bits_for_id;
+ int aaaa_filter; /* ASN */
struct config_strlist* private_address;
struct config_strlist* private_domain;
size_t unwanted_threshold;

View File

@ -664,7 +664,7 @@ load_msg(SSL* ssl, sldns_buffer* buf, struct worker* worker)
if(!go_on)
return 1; /* skip this one, not all references satisfied */
if(!dns_cache_store(&worker->env, &qinf, &rep, 0, 0, 0, NULL)) {
if(!dns_cache_store(&worker->env, &qinf, &rep, 0, 0, 0, NULL, flags)) {
log_warn("error out of memory");
return 0;
}

View File

@ -854,7 +854,8 @@ print_ext(SSL* ssl, struct stats_info* s)
/* RCODE */
for(i=0; i<STATS_RCODE_NUM; i++) {
if(inhibit_zero && s->svr.ans_rcode[i] == 0)
/* Always include RCODEs 0-5 */
if(inhibit_zero && i > LDNS_RCODE_REFUSED && s->svr.ans_rcode[i] == 0)
continue;
lt = sldns_lookup_by_id(sldns_rcodes, i);
if(lt && lt->name) {
@ -1094,8 +1095,13 @@ do_cache_remove(struct worker* worker, uint8_t* nm, size_t nmlen,
k.qname_len = nmlen;
k.qtype = t;
k.qclass = c;
h = query_info_hash(&k);
h = query_info_hash(&k, 0);
slabhash_remove(worker->env.msg_cache, h, &k);
if(t == LDNS_RR_TYPE_AAAA) {
/* for AAAA also flush dns64 bit_cd packet */
h = query_info_hash(&k, BIT_CD);
slabhash_remove(worker->env.msg_cache, h, &k);
}
}
/** flush a type */

View File

@ -287,7 +287,7 @@ checkrlimits(struct config_file* cfg)
#ifdef HAVE_SETRLIMIT
}
#endif
log_warn("increased limit(open files) from %u to %u",
verbose(VERB_ALGO, "increased limit(open files) from %u to %u",
(unsigned)avail, (unsigned)total+10);
}
#else

View File

@ -935,7 +935,7 @@ worker_handle_request(struct comm_point* c, void* arg, int error,
&repinfo->addr, repinfo->addrlen);
goto send_reply;
}
h = query_info_hash(&qinfo);
h = query_info_hash(&qinfo, sldns_buffer_read_u16_at(c->buffer, 2));
if((e=slabhash_lookup(worker->env.msg_cache, h, &qinfo, 0))) {
/* answer from cache - we have acquired a readlock on it */
if(answer_from_cache(worker, &qinfo,

View File

@ -399,7 +399,7 @@ handle_ipv6_ptr(struct module_qstate* qstate, int id)
/* Create the new sub-query. */
fptr_ok(fptr_whitelist_modenv_attach_sub(qstate->env->attach_sub));
if(!(*qstate->env->attach_sub)(qstate, &qinfo, qstate->query_flags, 0,
if(!(*qstate->env->attach_sub)(qstate, &qinfo, qstate->query_flags, 0, 0,
&subq))
return module_error;
if (subq) {
@ -451,7 +451,7 @@ generate_type_A_query(struct module_qstate* qstate, int id)
/* Start the sub-query. */
fptr_ok(fptr_whitelist_modenv_attach_sub(qstate->env->attach_sub));
if(!(*qstate->env->attach_sub)(qstate, &qinfo, qstate->query_flags, 0,
&subq))
0, &subq))
{
verbose(VERB_ALGO, "dns64: sub-query creation failed");
return module_error;
@ -520,11 +520,13 @@ handle_event_moddone(struct module_qstate* qstate, int id)
*
* - An internal query.
* - A query for a record type other than AAAA.
* - CD FLAG was set on querier
* - An AAAA query for which an error was returned.
* - A successful AAAA query with an answer.
*/
if ( (enum dns64_qstate)qstate->minfo[id] == DNS64_INTERNAL_QUERY
|| qstate->qinfo.qtype != LDNS_RR_TYPE_AAAA
|| (qstate->query_flags & BIT_CD)
|| qstate->return_rcode != LDNS_RCODE_NOERROR
|| (qstate->return_msg &&
qstate->return_msg->rep &&
@ -813,7 +815,7 @@ dns64_inform_super(struct module_qstate* qstate, int id,
/* Store the generated response in cache. */
if (!dns_cache_store(super->env, &super->qinfo, super->return_msg->rep,
0, 0, 0, NULL))
0, 0, 0, NULL, super->query_flags))
log_err("out of memory");
}

View File

@ -1,9 +1,69 @@
8 December 2014: Wouter
- Fix CVE-2014-8602: denial of service by making resolver chase
endless series of delegations.
1 December 2014: Wouter
- Fix bug#632: unbound fails to build on AArch64, protects
getentropy compat code from calling sysctl if it is has been removed.
29 November 2014: Wouter
- Add include to getentropy_linux.c, hopefully fixing debian build.
28 November 2014: Wouter
- Fix makefile for build from noexec source tree.
26 November 2014: Wouter
- Fix libunbound undefined symbol errors for main.
Referencing main does not seem to be possible for libunbound.
24 November 2014: Wouter
- Fix log at high verbosity and memory allocation failure.
- iana portlist update.
21 November 2014: Wouter
- Fix crash on multiple thread random usage on systems without
arc4random.
20 November 2014: Wouter
- fix compat/getentropy_win.c check if CryptGenRandom works and no
immediate exit on windows.
19 November 2014: Wouter
- Fix cdflag dns64 processing.
18 November 2014: Wouter
- Fix that CD flag disables DNS64 processing, returning the DNSSEC
signed AAAA denial.
- iana portlist update.
17 November 2014: Wouter
- Fix #627: SSL_CTX_load_verify_locations return code not properly
checked.
14 November 2014: Wouter
- parser with bison 2.7
13 November 2014: Wouter
- Patch from Stephane Lapie for ASAHI Net that implements aaaa-filter,
added to contrib/aaaa-filter-iterator.patch.
12 November 2014: Wouter
- trunk has 1.5.1 in development.
- Patch from Robert Edmonds to build pyunbound python module
differently. No versioninfo, with -shared and without $(LIBS).
- Patch from Robert Edmonds fixes hyphens in unbound-anchor man page.
- Removed 'increased limit open files' log message that is written
to console. It is only written on verbosity 4 and higher.
This keeps system bootup console cleaner.
- Patch from James Raftery, always print stats for rcodes 0..5.
11 November 2014: Wouter
- iana portlist update.
- Fix bug where forward or stub addresses with same address but
different port number were not tried.
- version number in svn trunk is 1.5.0
- tag 1.5.0rc1
- review fix from Ralph.
7 November 2014: Wouter
- dnstap fixes by Robert Edmonds:

View File

@ -1,4 +1,4 @@
README for Unbound 1.5.0
README for Unbound 1.5.1
Copyright 2007 NLnet Labs
http://unbound.net

View File

@ -1,7 +1,7 @@
#
# Example configuration file.
#
# See unbound.conf(5) man page, version 1.5.0.
# See unbound.conf(5) man page, version 1.5.1.
#
# this is a comment.

View File

@ -1,4 +1,4 @@
.TH "libunbound" "3" "Nov 18, 2014" "NLnet Labs" "unbound 1.5.0"
.TH "libunbound" "3" "Dec 8, 2014" "NLnet Labs" "unbound 1.5.1"
.\"
.\" libunbound.3 -- unbound library functions manual
.\"
@ -42,7 +42,7 @@
.B ub_ctx_zone_remove,
.B ub_ctx_data_add,
.B ub_ctx_data_remove
\- Unbound DNS validating resolver 1.5.0 functions.
\- Unbound DNS validating resolver 1.5.1 functions.
.SH "SYNOPSIS"
.B #include <unbound.h>
.LP

View File

@ -1,4 +1,4 @@
.TH "unbound-anchor" "8" "Nov 18, 2014" "NLnet Labs" "unbound 1.5.0"
.TH "unbound-anchor" "8" "Dec 8, 2014" "NLnet Labs" "unbound 1.5.1"
.\"
.\" unbound-anchor.8 -- unbound anchor maintenance utility manual
.\"
@ -24,14 +24,14 @@ Suggested usage:
.nf
# in the init scripts.
# provide or update the root anchor (if necessary)
unbound-anchor -a "@UNBOUND_ROOTKEY_FILE@"
unbound-anchor \-a "@UNBOUND_ROOTKEY_FILE@"
# Please note usage of this root anchor is at your own risk
# and under the terms of our LICENSE (see source).
#
# start validating resolver
# the unbound.conf contains:
# auto-trust-anchor-file: "@UNBOUND_ROOTKEY_FILE@"
unbound -c unbound.conf
unbound \-c unbound.conf
.fi
.P
This tool provides builtin default contents for the root anchor and root
@ -138,7 +138,7 @@ tracking, or if an error occurred.
.P
You can check the exit value in this manner:
.nf
unbound-anchor -a "root.key" || logger "Please check root.key"
unbound-anchor \-a "root.key" || logger "Please check root.key"
.fi
Or something more suitable for your operational environment.
.SH "TRUST"

View File

@ -1,4 +1,4 @@
.TH "unbound-checkconf" "8" "Nov 18, 2014" "NLnet Labs" "unbound 1.5.0"
.TH "unbound-checkconf" "8" "Dec 8, 2014" "NLnet Labs" "unbound 1.5.1"
.\"
.\" unbound-checkconf.8 -- unbound configuration checker manual
.\"

View File

@ -1,4 +1,4 @@
.TH "unbound-control" "8" "Nov 18, 2014" "NLnet Labs" "unbound 1.5.0"
.TH "unbound-control" "8" "Dec 8, 2014" "NLnet Labs" "unbound 1.5.1"
.\"
.\" unbound-control.8 -- unbound remote control manual
.\"

View File

@ -1,4 +1,4 @@
.TH "unbound\-host" "1" "Nov 18, 2014" "NLnet Labs" "unbound 1.5.0"
.TH "unbound\-host" "1" "Dec 8, 2014" "NLnet Labs" "unbound 1.5.1"
.\"
.\" unbound-host.1 -- unbound DNS lookup utility
.\"

View File

@ -1,4 +1,4 @@
.TH "unbound" "8" "Nov 18, 2014" "NLnet Labs" "unbound 1.5.0"
.TH "unbound" "8" "Dec 8, 2014" "NLnet Labs" "unbound 1.5.1"
.\"
.\" unbound.8 -- unbound manual
.\"
@ -9,7 +9,7 @@
.\"
.SH "NAME"
.B unbound
\- Unbound DNS validating resolver 1.5.0.
\- Unbound DNS validating resolver 1.5.1.
.SH "SYNOPSIS"
.B unbound
.RB [ \-h ]

View File

@ -1,4 +1,4 @@
.TH "unbound.conf" "5" "Nov 18, 2014" "NLnet Labs" "unbound 1.5.0"
.TH "unbound.conf" "5" "Dec 8, 2014" "NLnet Labs" "unbound 1.5.1"
.\"
.\" unbound.conf.5 -- unbound.conf manual
.\"

View File

@ -425,10 +425,10 @@ dns_copy_msg(struct dns_msg* from, struct regional* region)
void
iter_dns_store(struct module_env* env, struct query_info* msgqinf,
struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
struct regional* region)
struct regional* region, uint16_t flags)
{
if(!dns_cache_store(env, msgqinf, msgrep, is_referral, leeway,
pside, region))
pside, region, flags))
log_err("out of memory: cannot store data in cache");
}
@ -457,7 +457,8 @@ causes_cycle(struct module_qstate* qstate, uint8_t* name, size_t namelen,
fptr_ok(fptr_whitelist_modenv_detect_cycle(
qstate->env->detect_cycle));
return (*qstate->env->detect_cycle)(qstate, &qinf,
(uint16_t)(BIT_RD|BIT_CD), qstate->is_priming);
(uint16_t)(BIT_RD|BIT_CD), qstate->is_priming,
qstate->is_valrec);
}
void

View File

@ -124,6 +124,7 @@ struct dns_msg* dns_copy_msg(struct dns_msg* from, struct regional* regional);
* @param pside: true if dp is parentside, thus message is 'fresh' and NS
* can be prefetch-updates.
* @param region: to copy modified (cache is better) rrs back to.
* @param flags: with BIT_CD for dns64 AAAA translated queries.
* @return void, because we are not interested in alloc errors,
* the iterator and validator can operate on the results in their
* scratch space (the qstate.region) and are not dependent on the cache.
@ -132,7 +133,7 @@ struct dns_msg* dns_copy_msg(struct dns_msg* from, struct regional* regional);
*/
void iter_dns_store(struct module_env* env, struct query_info* qinf,
struct reply_info* rep, int is_referral, time_t leeway, int pside,
struct regional* region);
struct regional* region, uint16_t flags);
/**
* Select randomly with n/m probability.

View File

@ -120,6 +120,7 @@ iter_new(struct module_qstate* qstate, int id)
iq->query_restart_count = 0;
iq->referral_count = 0;
iq->sent_count = 0;
iq->target_count = NULL;
iq->wait_priming_stub = 0;
iq->refetch_glue = 0;
iq->dnssec_expected = 0;
@ -257,7 +258,7 @@ error_response_cache(struct module_qstate* qstate, int id, int rcode)
verbose(VERB_ALGO, "error response for prefetch in cache");
/* attempt to adjust the cache entry prefetch */
if(dns_cache_prefetch_adjust(qstate->env, &qstate->qinfo,
NORR_TTL))
NORR_TTL, qstate->query_flags))
return error_response(qstate, id, rcode);
/* if that fails (not in cache), fall through to store err */
}
@ -270,7 +271,8 @@ error_response_cache(struct module_qstate* qstate, int id, int rcode)
/* do not waste time trying to validate this servfail */
err.security = sec_status_indeterminate;
verbose(VERB_ALGO, "store error response in message cache");
iter_dns_store(qstate->env, &qstate->qinfo, &err, 0, 0, 0, NULL);
iter_dns_store(qstate->env, &qstate->qinfo, &err, 0, 0, 0, NULL,
qstate->query_flags);
return error_response(qstate, id, rcode);
}
@ -453,6 +455,26 @@ handle_cname_response(struct module_qstate* qstate, struct iter_qstate* iq,
return 1;
}
/** create target count structure for this query */
static void
target_count_create(struct iter_qstate* iq)
{
if(!iq->target_count) {
iq->target_count = (int*)calloc(2, sizeof(int));
/* if calloc fails we simply do not track this number */
if(iq->target_count)
iq->target_count[0] = 1;
}
}
static void
target_count_increase(struct iter_qstate* iq, int num)
{
target_count_create(iq);
if(iq->target_count)
iq->target_count[1] += num;
}
/**
* Generate a subrequest.
* Generate a local request event. Local events are tied to this module, and
@ -486,6 +508,7 @@ generate_sub_request(uint8_t* qname, size_t qnamelen, uint16_t qtype,
uint16_t qflags = 0; /* OPCODE QUERY, no flags */
struct query_info qinf;
int prime = (finalstate == PRIME_RESP_STATE)?1:0;
int valrec = 0;
qinf.qname = qname;
qinf.qname_len = qnamelen;
qinf.qtype = qtype;
@ -499,12 +522,15 @@ generate_sub_request(uint8_t* qname, size_t qnamelen, uint16_t qtype,
* the resolution chain, which might have a validator. We are
* uninterested in validating things not on the direct resolution
* path. */
if(!v)
if(!v) {
qflags |= BIT_CD;
valrec = 1;
}
/* attach subquery, lookup existing or make a new one */
fptr_ok(fptr_whitelist_modenv_attach_sub(qstate->env->attach_sub));
if(!(*qstate->env->attach_sub)(qstate, &qinf, qflags, prime, &subq)) {
if(!(*qstate->env->attach_sub)(qstate, &qinf, qflags, prime, valrec,
&subq)) {
return 0;
}
*subq_ret = subq;
@ -524,6 +550,10 @@ generate_sub_request(uint8_t* qname, size_t qnamelen, uint16_t qtype,
subiq = (struct iter_qstate*)subq->minfo[id];
memset(subiq, 0, sizeof(*subiq));
subiq->num_target_queries = 0;
target_count_create(iq);
subiq->target_count = iq->target_count;
if(iq->target_count)
iq->target_count[0] ++; /* extra reference */
subiq->num_current_queries = 0;
subiq->depth = iq->depth+1;
outbound_list_init(&subiq->outlist);
@ -938,7 +968,8 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
} else {
msg = dns_cache_lookup(qstate->env, iq->qchase.qname,
iq->qchase.qname_len, iq->qchase.qtype,
iq->qchase.qclass, qstate->region, qstate->env->scratch);
iq->qchase.qclass, qstate->query_flags,
qstate->region, qstate->env->scratch);
if(!msg && qstate->env->neg_cache) {
/* lookup in negative cache; may result in
* NOERROR/NODATA or NXDOMAIN answers that need validation */
@ -1350,6 +1381,12 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
if(iq->depth == ie->max_dependency_depth)
return 0;
if(iq->depth > 0 && iq->target_count &&
iq->target_count[1] > MAX_TARGET_COUNT) {
verbose(VERB_QUERY, "request has exceeded the maximum "
"number of glue fetches %d", iq->target_count[1]);
return 0;
}
iter_mark_cycle_targets(qstate, iq->dp);
missing = (int)delegpt_count_missing_targets(iq->dp);
@ -1532,6 +1569,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
}
iq->num_target_queries += qs;
target_count_increase(iq, qs);
if(qs != 0) {
qstate->ext_state[id] = module_wait_subquery;
return 0; /* and wait for them */
@ -1541,6 +1579,12 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
verbose(VERB_QUERY, "maxdepth and need more nameservers, fail");
return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL);
}
if(iq->depth > 0 && iq->target_count &&
iq->target_count[1] > MAX_TARGET_COUNT) {
verbose(VERB_QUERY, "request has exceeded the maximum "
"number of glue fetches %d", iq->target_count[1]);
return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL);
}
/* mark cycle targets for parent-side lookups */
iter_mark_pside_cycle_targets(qstate, iq->dp);
/* see if we can issue queries to get nameserver addresses */
@ -1570,6 +1614,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
if(query_count != 0) { /* suspend to await results */
verbose(VERB_ALGO, "try parent-side glue lookup");
iq->num_target_queries += query_count;
target_count_increase(iq, query_count);
qstate->ext_state[id] = module_wait_subquery;
return 0;
}
@ -1725,6 +1770,7 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
}
iq->num_target_queries += extra;
target_count_increase(iq, extra);
if(iq->num_target_queries > 0) {
/* wait to get all targets, we want to try em */
verbose(VERB_ALGO, "wait for all targets for fallback");
@ -1765,6 +1811,7 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
/* errors ignored, these targets are not strictly necessary for
* this result, we do not have to reply with SERVFAIL */
iq->num_target_queries += extra;
target_count_increase(iq, extra);
}
/* Add the current set of unused targets to our queue. */
@ -1810,6 +1857,7 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
return 1;
}
iq->num_target_queries += qs;
target_count_increase(iq, qs);
}
/* Since a target query might have been made, we
* need to check again. */
@ -1991,7 +2039,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
iter_dns_store(qstate->env, &iq->response->qinfo,
iq->response->rep, 0, qstate->prefetch_leeway,
iq->dp&&iq->dp->has_parent_side_NS,
qstate->region);
qstate->region, qstate->query_flags);
/* close down outstanding requests to be discarded */
outbound_list_clear(&iq->outlist);
iq->num_current_queries = 0;
@ -2029,7 +2077,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
/* Store the referral under the current query */
/* no prefetch-leeway, since its not the answer */
iter_dns_store(qstate->env, &iq->response->qinfo,
iq->response->rep, 1, 0, 0, NULL);
iq->response->rep, 1, 0, 0, NULL, 0);
if(iq->store_parent_NS)
iter_store_parentside_NS(qstate->env,
iq->response->rep);
@ -2128,7 +2176,8 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
/* prefetchleeway applied because this updates answer parts */
iter_dns_store(qstate->env, &iq->response->qinfo,
iq->response->rep, 1, qstate->prefetch_leeway,
iq->dp&&iq->dp->has_parent_side_NS, NULL);
iq->dp&&iq->dp->has_parent_side_NS, NULL,
qstate->query_flags);
/* set the current request's qname to the new value. */
iq->qchase.qname = sname;
iq->qchase.qname_len = snamelen;
@ -2209,7 +2258,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
}
/**
* Return priming query results to interestes super querystates.
* Return priming query results to interested super querystates.
*
* Sets the delegation point and delegation message (not nonRD queries).
* This is a callback from walk_supers.
@ -2640,7 +2689,7 @@ processFinished(struct module_qstate* qstate, struct iter_qstate* iq,
iter_dns_store(qstate->env, &qstate->qinfo,
iq->response->rep, 0, qstate->prefetch_leeway,
iq->dp&&iq->dp->has_parent_side_NS,
qstate->region);
qstate->region, qstate->query_flags);
}
}
qstate->return_rcode = LDNS_RCODE_NOERROR;
@ -2921,6 +2970,8 @@ iter_clear(struct module_qstate* qstate, int id)
iq = (struct iter_qstate*)qstate->minfo[id];
if(iq) {
outbound_list_clear(&iq->outlist);
if(iq->target_count && --iq->target_count[0] == 0)
free(iq->target_count);
iq->num_current_queries = 0;
}
qstate->minfo[id] = NULL;

View File

@ -52,6 +52,8 @@ struct iter_donotq;
struct iter_prep_list;
struct iter_priv;
/** max number of targets spawned for a query and its subqueries */
#define MAX_TARGET_COUNT 32
/** max number of query restarts. Determines max number of CNAME chain. */
#define MAX_RESTART_COUNT 8
/** max number of referrals. Makes sure resolver does not run away */
@ -251,6 +253,10 @@ struct iter_qstate {
/** number of queries fired off */
int sent_count;
/** number of target queries spawned in [1], for this query and its
* subqueries, the malloced-array is shared, [0] refcount. */
int* target_count;
/**
* The query must store NS records from referrals as parentside RRs

View File

@ -357,7 +357,7 @@ int ub_ctx_add_ta(struct ub_ctx* ctx, const char* ta);
int ub_ctx_add_ta_file(struct ub_ctx* ctx, const char* fname);
/**
* Add trust anchor to the give context that is tracked with RFC5011
* Add trust anchor to the given context that is tracked with RFC5011
* automated trust anchor maintenance. The file is written to when the
* trust anchor is changed.
* Pass the name of a file that was output from eg. unbound-anchor,

View File

@ -67,7 +67,7 @@ int storeQueryInCache(struct module_qstate* qstate, struct query_info* qinfo, st
}
return dns_cache_store(qstate->env, qinfo, msgrep, is_referral,
qstate->prefetch_leeway, 0, NULL);
qstate->prefetch_leeway, 0, NULL, qstate->query_flags);
}
/* Invalidate the message associated with query_info stored in message cache */
@ -78,7 +78,7 @@ void invalidateQueryInCache(struct module_qstate* qstate, struct query_info* qin
struct reply_info *r;
size_t i, j;
h = query_info_hash(qinfo);
h = query_info_hash(qinfo, qstate->query_flags);
if ((e=slabhash_lookup(qstate->env->msg_cache, h, qinfo, 0)))
{
r = (struct reply_info*)(e->data);

34
services/cache/dns.c vendored
View File

@ -184,7 +184,7 @@ addr_to_additional(struct ub_packed_rrset_key* rrset, struct regional* region,
/** lookup message in message cache */
static struct msgreply_entry*
msg_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen,
uint16_t qtype, uint16_t qclass, time_t now, int wr)
uint16_t qtype, uint16_t qclass, uint16_t flags, time_t now, int wr)
{
struct lruhash_entry* e;
struct query_info k;
@ -194,7 +194,7 @@ msg_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen,
k.qname_len = qnamelen;
k.qtype = qtype;
k.qclass = qclass;
h = query_info_hash(&k);
h = query_info_hash(&k, flags);
e = slabhash_lookup(env->msg_cache, h, &k, wr);
if(!e) return NULL;
@ -226,8 +226,10 @@ find_add_addrs(struct module_env* env, uint16_t qclass,
addr_to_additional(akey, region, *msg, now);
lock_rw_unlock(&akey->entry.lock);
} else {
/* BIT_CD on false because delegpt lookup does
* not use dns64 translation */
neg = msg_cache_lookup(env, ns->name, ns->namelen,
LDNS_RR_TYPE_A, qclass, now, 0);
LDNS_RR_TYPE_A, qclass, 0, now, 0);
if(neg) {
delegpt_add_neg_msg(dp, neg);
lock_rw_unlock(&neg->entry.lock);
@ -244,8 +246,10 @@ find_add_addrs(struct module_env* env, uint16_t qclass,
addr_to_additional(akey, region, *msg, now);
lock_rw_unlock(&akey->entry.lock);
} else {
/* BIT_CD on false because delegpt lookup does
* not use dns64 translation */
neg = msg_cache_lookup(env, ns->name, ns->namelen,
LDNS_RR_TYPE_AAAA, qclass, now, 0);
LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
if(neg) {
delegpt_add_neg_msg(dp, neg);
lock_rw_unlock(&neg->entry.lock);
@ -276,8 +280,10 @@ cache_fill_missing(struct module_env* env, uint16_t qclass,
ns->name, LDNS_RR_TYPE_A, qclass);
lock_rw_unlock(&akey->entry.lock);
} else {
/* BIT_CD on false because delegpt lookup does
* not use dns64 translation */
neg = msg_cache_lookup(env, ns->name, ns->namelen,
LDNS_RR_TYPE_A, qclass, now, 0);
LDNS_RR_TYPE_A, qclass, 0, now, 0);
if(neg) {
delegpt_add_neg_msg(dp, neg);
lock_rw_unlock(&neg->entry.lock);
@ -294,8 +300,10 @@ cache_fill_missing(struct module_env* env, uint16_t qclass,
ns->name, LDNS_RR_TYPE_AAAA, qclass);
lock_rw_unlock(&akey->entry.lock);
} else {
/* BIT_CD on false because delegpt lookup does
* not use dns64 translation */
neg = msg_cache_lookup(env, ns->name, ns->namelen,
LDNS_RR_TYPE_AAAA, qclass, now, 0);
LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
if(neg) {
delegpt_add_neg_msg(dp, neg);
lock_rw_unlock(&neg->entry.lock);
@ -626,7 +634,7 @@ synth_dname_msg(struct ub_packed_rrset_key* rrset, struct regional* region,
struct dns_msg*
dns_cache_lookup(struct module_env* env,
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
struct regional* region, struct regional* scratch)
uint16_t flags, struct regional* region, struct regional* scratch)
{
struct lruhash_entry* e;
struct query_info k;
@ -639,7 +647,7 @@ dns_cache_lookup(struct module_env* env,
k.qname_len = qnamelen;
k.qtype = qtype;
k.qclass = qclass;
h = query_info_hash(&k);
h = query_info_hash(&k, flags);
e = slabhash_lookup(env->msg_cache, h, &k, 0);
if(e) {
struct msgreply_entry* key = (struct msgreply_entry*)e->key;
@ -716,7 +724,7 @@ dns_cache_lookup(struct module_env* env,
if(env->cfg->harden_below_nxdomain)
while(!dname_is_root(k.qname)) {
dname_remove_label(&k.qname, &k.qname_len);
h = query_info_hash(&k);
h = query_info_hash(&k, flags);
e = slabhash_lookup(env->msg_cache, h, &k, 0);
if(e) {
struct reply_info* data = (struct reply_info*)e->data;
@ -741,7 +749,7 @@ dns_cache_lookup(struct module_env* env,
int
dns_cache_store(struct module_env* env, struct query_info* msgqinf,
struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
struct regional* region)
struct regional* region, uint16_t flags)
{
struct reply_info* rep = NULL;
/* alloc, malloc properly (not in region, like msg is) */
@ -786,7 +794,7 @@ dns_cache_store(struct module_env* env, struct query_info* msgqinf,
* Not AA from cache. Not CD in cache (depends on client bit). */
rep->flags |= (BIT_RA | BIT_QR);
rep->flags &= ~(BIT_AA | BIT_CD);
h = query_info_hash(&qinf);
h = query_info_hash(&qinf, flags);
dns_cache_store_msg(env, &qinf, h, rep, leeway, pside, msgrep,
region);
/* qname is used inside query_info_entrysetup, and set to
@ -798,11 +806,11 @@ dns_cache_store(struct module_env* env, struct query_info* msgqinf,
int
dns_cache_prefetch_adjust(struct module_env* env, struct query_info* qinfo,
time_t adjust)
time_t adjust, uint16_t flags)
{
struct msgreply_entry* msg;
msg = msg_cache_lookup(env, qinfo->qname, qinfo->qname_len,
qinfo->qtype, qinfo->qclass, *env->now, 1);
qinfo->qtype, qinfo->qclass, flags, *env->now, 1);
if(msg) {
struct reply_info* rep = (struct reply_info*)msg->entry.data;
if(rep) {

View File

@ -79,11 +79,12 @@ struct dns_msg {
* can be updated to full TTL even in prefetch situations.
* @param region: region to allocate better entries from cache into.
* (used when is_referral is false).
* @param flags: flags with BIT_CD for AAAA queries in dns64 translation.
* @return 0 on alloc error (out of memory).
*/
int dns_cache_store(struct module_env* env, struct query_info* qinf,
struct reply_info* rep, int is_referral, time_t leeway, int pside,
struct regional* region);
struct regional* region, uint16_t flags);
/**
* Store message in the cache. Stores in message cache and rrset cache.
@ -132,6 +133,7 @@ struct delegpt* dns_cache_find_delegation(struct module_env* env,
* @param qnamelen: length of qname.
* @param qtype: query type.
* @param qclass: query class.
* @param flags: flags with BIT_CD for AAAA queries in dns64 translation.
* @param region: where to allocate result.
* @param scratch: where to allocate temporary data.
* @return new response message (alloced in region, rrsets do not have IDs).
@ -140,7 +142,7 @@ struct delegpt* dns_cache_find_delegation(struct module_env* env,
*/
struct dns_msg* dns_cache_lookup(struct module_env* env,
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
struct regional* region, struct regional* scratch);
uint16_t flags, struct regional* region, struct regional* scratch);
/**
* find and add A and AAAA records for missing nameservers in delegpt
@ -186,9 +188,10 @@ int dns_msg_authadd(struct dns_msg* msg, struct regional* region,
* @param env: module environment with caches and time.
* @param qinfo: query info for the query that needs adjustment.
* @param adjust: time in seconds to add to the prefetch_leeway.
* @param flags: flags with BIT_CD for AAAA queries in dns64 translation.
* @return false if not in cache. true if added.
*/
int dns_cache_prefetch_adjust(struct module_env* env, struct query_info* qinfo,
time_t adjust);
time_t adjust, uint16_t flags);
#endif /* SERVICES_CACHE_DNS_H */

View File

@ -132,6 +132,11 @@ mesh_state_compare(const void* ap, const void* bp)
if(!a->s.is_priming && b->s.is_priming)
return 1;
if(a->s.is_valrec && !b->s.is_valrec)
return -1;
if(!a->s.is_valrec && b->s.is_valrec)
return 1;
if((a->s.query_flags&BIT_RD) && !(b->s.query_flags&BIT_RD))
return -1;
if(!(a->s.query_flags&BIT_RD) && (b->s.query_flags&BIT_RD))
@ -277,11 +282,7 @@ void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo,
uint16_t qflags, struct edns_data* edns, struct comm_reply* rep,
uint16_t qid)
{
/* do not use CD flag from user for mesh state, we want the CD-query
* to receive validation anyway, to protect out cache contents and
* avoid bad-data in this cache that a downstream validator cannot
* remove from this cache */
struct mesh_state* s = mesh_area_find(mesh, qinfo, qflags&BIT_RD, 0);
struct mesh_state* s = mesh_area_find(mesh, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
int was_detached = 0;
int was_noreply = 0;
int added = 0;
@ -311,7 +312,7 @@ void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo,
#ifdef UNBOUND_DEBUG
struct rbnode_t* n;
#endif
s = mesh_state_create(mesh->env, qinfo, qflags&BIT_RD, 0);
s = mesh_state_create(mesh->env, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
if(!s) {
log_err("mesh_state_create: out of memory; SERVFAIL");
error_encode(rep->c->buffer, LDNS_RCODE_SERVFAIL,
@ -375,7 +376,7 @@ mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo,
uint16_t qflags, struct edns_data* edns, sldns_buffer* buf,
uint16_t qid, mesh_cb_func_t cb, void* cb_arg)
{
struct mesh_state* s = mesh_area_find(mesh, qinfo, qflags&BIT_RD, 0);
struct mesh_state* s = mesh_area_find(mesh, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
int was_detached = 0;
int was_noreply = 0;
int added = 0;
@ -386,7 +387,7 @@ mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo,
#ifdef UNBOUND_DEBUG
struct rbnode_t* n;
#endif
s = mesh_state_create(mesh->env, qinfo, qflags&BIT_RD, 0);
s = mesh_state_create(mesh->env, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
if(!s) {
return 0;
}
@ -428,7 +429,7 @@ mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo,
void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo,
uint16_t qflags, time_t leeway)
{
struct mesh_state* s = mesh_area_find(mesh, qinfo, qflags&BIT_RD, 0);
struct mesh_state* s = mesh_area_find(mesh, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
#ifdef UNBOUND_DEBUG
struct rbnode_t* n;
#endif
@ -447,7 +448,7 @@ void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo,
mesh->stats_dropped ++;
return;
}
s = mesh_state_create(mesh->env, qinfo, qflags&BIT_RD, 0);
s = mesh_state_create(mesh->env, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
if(!s) {
log_err("prefetch mesh_state_create: out of memory");
return;
@ -496,7 +497,7 @@ void mesh_report_reply(struct mesh_area* mesh, struct outbound_entry* e,
struct mesh_state*
mesh_state_create(struct module_env* env, struct query_info* qinfo,
uint16_t qflags, int prime)
uint16_t qflags, int prime, int valrec)
{
struct regional* region = alloc_reg_obtain(env->alloc);
struct mesh_state* mstate;
@ -533,6 +534,7 @@ mesh_state_create(struct module_env* env, struct query_info* qinfo,
/* remove all weird bits from qflags */
mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD));
mstate->s.is_priming = prime;
mstate->s.is_valrec = valrec;
mstate->s.reply = NULL;
mstate->s.region = region;
mstate->s.curmod = 0;
@ -679,11 +681,12 @@ void mesh_detach_subs(struct module_qstate* qstate)
}
int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo,
uint16_t qflags, int prime, struct module_qstate** newq)
uint16_t qflags, int prime, int valrec, struct module_qstate** newq)
{
/* find it, if not, create it */
struct mesh_area* mesh = qstate->env->mesh;
struct mesh_state* sub = mesh_area_find(mesh, qinfo, qflags, prime);
struct mesh_state* sub = mesh_area_find(mesh, qinfo, qflags, prime,
valrec);
int was_detached;
if(mesh_detect_cycle_found(qstate, sub)) {
verbose(VERB_ALGO, "attach failed, cycle detected");
@ -694,7 +697,8 @@ int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo,
struct rbnode_t* n;
#endif
/* create a new one */
sub = mesh_state_create(qstate->env, qinfo, qflags, prime);
sub = mesh_state_create(qstate->env, qinfo, qflags, prime,
valrec);
if(!sub) {
log_err("mesh_attach_sub: out of memory");
return 0;
@ -941,13 +945,14 @@ void mesh_walk_supers(struct mesh_area* mesh, struct mesh_state* mstate)
}
struct mesh_state* mesh_area_find(struct mesh_area* mesh,
struct query_info* qinfo, uint16_t qflags, int prime)
struct query_info* qinfo, uint16_t qflags, int prime, int valrec)
{
struct mesh_state key;
struct mesh_state* result;
key.node.key = &key;
key.s.is_priming = prime;
key.s.is_valrec = valrec;
key.s.qinfo = *qinfo;
key.s.query_flags = qflags;
@ -1107,8 +1112,9 @@ mesh_log_list(struct mesh_area* mesh)
struct mesh_state* m;
int num = 0;
RBTREE_FOR(m, struct mesh_state*, &mesh->all) {
snprintf(buf, sizeof(buf), "%d%s%s%s%s%s mod%d %s%s",
snprintf(buf, sizeof(buf), "%d%s%s%s%s%s%s mod%d %s%s",
num++, (m->s.is_priming)?"p":"", /* prime */
(m->s.is_valrec)?"v":"", /* prime */
(m->s.query_flags&BIT_RD)?"RD":"",
(m->s.query_flags&BIT_CD)?"CD":"",
(m->super_set.count==0)?"d":"", /* detached */
@ -1178,10 +1184,11 @@ mesh_get_mem(struct mesh_area* mesh)
int
mesh_detect_cycle(struct module_qstate* qstate, struct query_info* qinfo,
uint16_t flags, int prime)
uint16_t flags, int prime, int valrec)
{
struct mesh_area* mesh = qstate->env->mesh;
struct mesh_state* dep_m = mesh_area_find(mesh, qinfo, flags, prime);
struct mesh_state* dep_m = mesh_area_find(mesh, qinfo, flags, prime,
valrec);
return mesh_detect_cycle_found(qstate, dep_m);
}

View File

@ -353,12 +353,13 @@ void mesh_detach_subs(struct module_qstate* qstate);
* @param qinfo: what to query for (copied).
* @param qflags: what flags to use (RD / CD flag or not).
* @param prime: if it is a (stub) priming query.
* @param valrec: if it is a validation recursion query (lookup of key, DS).
* @param newq: If the new subquery needs initialisation, it is returned,
* otherwise NULL is returned.
* @return: false on error, true if success (and init may be needed).
*/
int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo,
uint16_t qflags, int prime, struct module_qstate** newq);
uint16_t qflags, int prime, int valrec, struct module_qstate** newq);
/**
* Query state is done, send messages to reply entries.
@ -406,10 +407,12 @@ void mesh_state_delete(struct module_qstate* qstate);
* @param qinfo: query info that the mesh is for.
* @param qflags: flags for query (RD / CD flag).
* @param prime: if true, it is a priming query, set is_priming on mesh state.
* @param valrec: if true, it is a validation recursion query, and sets
* is_valrec on the mesh state.
* @return: new mesh state or NULL on allocation error.
*/
struct mesh_state* mesh_state_create(struct module_env* env,
struct query_info* qinfo, uint16_t qflags, int prime);
struct query_info* qinfo, uint16_t qflags, int prime, int valrec);
/**
* Cleanup a mesh state and its query state. Does not do rbtree or
@ -432,10 +435,11 @@ void mesh_delete_all(struct mesh_area* mesh);
* @param qinfo: what query
* @param qflags: if RD / CD bit is set or not.
* @param prime: if it is a priming query.
* @param valrec: if it is a validation-recursion query.
* @return: mesh state or NULL if not found.
*/
struct mesh_state* mesh_area_find(struct mesh_area* mesh,
struct query_info* qinfo, uint16_t qflags, int prime);
struct query_info* qinfo, uint16_t qflags, int prime, int valrec);
/**
* Setup attachment super/sub relation between super and sub mesh state.
@ -523,13 +527,14 @@ size_t mesh_get_mem(struct mesh_area* mesh);
* @param qinfo: query info for dependency.
* @param flags: query flags of dependency.
* @param prime: if dependency is a priming query or not.
* @param valrec: if it is a validation recursion query (lookup of key, DS).
* @return true if the name,type,class exists and the given qstate mesh exists
* as a dependency of that name. Thus if qstate becomes dependent on
* name,type,class then a cycle is created, this is return value 1.
* Too large to search is value 2 (also true).
*/
int mesh_detect_cycle(struct module_qstate* qstate, struct query_info* qinfo,
uint16_t flags, int prime);
uint16_t flags, int prime, int valrec);
/** compare two mesh_states */
int mesh_state_compare(const void* ap, const void* bp);

View File

@ -409,7 +409,7 @@ extern int optind;
/** getopt global, in case header files fail to declare it. */
extern char* optarg;
/** Main routine for checkconf */
/** Main routine for unbound-host */
int main(int argc, char* argv[])
{
int c;

View File

@ -576,10 +576,12 @@ reply_info_delete(void* d, void* ATTR_UNUSED(arg))
}
hashvalue_t
query_info_hash(struct query_info *q)
query_info_hash(struct query_info *q, uint16_t flags)
{
hashvalue_t h = 0xab;
h = hashlittle(&q->qtype, sizeof(q->qtype), h);
if(q->qtype == LDNS_RR_TYPE_AAAA && (flags&BIT_CD))
h++;
h = hashlittle(&q->qclass, sizeof(q->qclass), h);
h = dname_query_hash(q->qname, h);
return h;
@ -771,15 +773,14 @@ log_dns_msg(const char* str, struct query_info* qinfo, struct reply_info* rep)
region, 65535, 1)) {
log_info("%s: log_dns_msg: out of memory", str);
} else {
char* str = sldns_wire2str_pkt(sldns_buffer_begin(buf),
char* s = sldns_wire2str_pkt(sldns_buffer_begin(buf),
sldns_buffer_limit(buf));
if(!str) {
if(!s) {
log_info("%s: log_dns_msg: ldns tostr failed", str);
} else {
log_info("%s %s",
str, (char*)sldns_buffer_begin(buf));
log_info("%s %s", str, s);
}
free(str);
free(s);
}
sldns_buffer_free(buf);
regional_destroy(region);

View File

@ -305,8 +305,9 @@ void query_entry_delete(void *q, void* arg);
/** delete reply_info data structure */
void reply_info_delete(void* d, void* arg);
/** calculate hash value of query_info, lowercases the qname */
hashvalue_t query_info_hash(struct query_info *q);
/** calculate hash value of query_info, lowercases the qname,
* uses CD flag for AAAA qtype */
hashvalue_t query_info_hash(struct query_info *q, uint16_t flags);
/**
* Setup query info entry

View File

@ -280,7 +280,7 @@ fptr_whitelist_modenv_detach_subs(void (*fptr)(
int
fptr_whitelist_modenv_attach_sub(int (*fptr)(
struct module_qstate* qstate, struct query_info* qinfo,
uint16_t qflags, int prime, struct module_qstate** newq))
uint16_t qflags, int prime, int valrec, struct module_qstate** newq))
{
if(fptr == &mesh_attach_sub) return 1;
return 0;
@ -296,7 +296,7 @@ fptr_whitelist_modenv_kill_sub(void (*fptr)(struct module_qstate* newq))
int
fptr_whitelist_modenv_detect_cycle(int (*fptr)(
struct module_qstate* qstate, struct query_info* qinfo,
uint16_t flags, int prime))
uint16_t flags, int prime, int valrec))
{
if(fptr == &mesh_detect_cycle) return 1;
return 0;

View File

@ -233,7 +233,7 @@ int fptr_whitelist_modenv_detach_subs(void (*fptr)(
*/
int fptr_whitelist_modenv_attach_sub(int (*fptr)(
struct module_qstate* qstate, struct query_info* qinfo,
uint16_t qflags, int prime, struct module_qstate** newq));
uint16_t qflags, int prime, int valrec, struct module_qstate** newq));
/**
* Check function pointer whitelist for module_env kill_sub callback values.
@ -251,7 +251,7 @@ int fptr_whitelist_modenv_kill_sub(void (*fptr)(struct module_qstate* newq));
*/
int fptr_whitelist_modenv_detect_cycle(int (*fptr)(
struct module_qstate* qstate, struct query_info* qinfo,
uint16_t flags, int prime));
uint16_t flags, int prime, int valrec));
/**
* Check function pointer whitelist for module init call values.

View File

@ -2061,6 +2061,7 @@
2423,
2424,
2425,
2426,
2427,
2428,
2429,
@ -5353,9 +5354,11 @@
35004,
35355,
36001,
36411,
36865,
37475,
37654,
38002,
38201,
38202,
38203,

View File

@ -256,13 +256,14 @@ struct module_env {
* @param qinfo: what to query for (copied).
* @param qflags: what flags to use (RD, CD flag or not).
* @param prime: if it is a (stub) priming query.
* @param valrec: validation lookup recursion, does not need validation
* @param newq: If the new subquery needs initialisation, it is
* returned, otherwise NULL is returned.
* @return: false on error, true if success (and init may be needed).
*/
int (*attach_sub)(struct module_qstate* qstate,
struct query_info* qinfo, uint16_t qflags, int prime,
struct module_qstate** newq);
int valrec, struct module_qstate** newq);
/**
* Kill newly attached sub. If attach_sub returns newq for
@ -280,13 +281,15 @@ struct module_env {
* @param qinfo: query info for dependency.
* @param flags: query flags of dependency, RD/CD flags.
* @param prime: if dependency is a priming query or not.
* @param valrec: validation lookup recursion, does not need validation
* @return true if the name,type,class exists and the given
* qstate mesh exists as a dependency of that name. Thus
* if qstate becomes dependent on name,type,class then a
* cycle is created.
*/
int (*detect_cycle)(struct module_qstate* qstate,
struct query_info* qinfo, uint16_t flags, int prime);
struct query_info* qinfo, uint16_t flags, int prime,
int valrec);
/** region for temporary usage. May be cleared after operate() call. */
struct regional* scratch;
@ -397,6 +400,9 @@ struct module_qstate {
uint16_t query_flags;
/** if this is a (stub or root) priming query (with hints) */
int is_priming;
/** if this is a validation recursion query that does not get
* validation itself */
int is_valrec;
/** comm_reply contains server replies */
struct comm_reply* reply;

View File

@ -699,7 +699,7 @@ void* connect_sslctx_create(char* key, char* pem, char* verifypem)
}
}
if(verifypem && verifypem[0]) {
if(!SSL_CTX_load_verify_locations(ctx, verifypem, NULL) != 1) {
if(!SSL_CTX_load_verify_locations(ctx, verifypem, NULL)) {
log_crypto_err("error in SSL_CTX verify");
SSL_CTX_free(ctx);
return NULL;

View File

@ -283,12 +283,25 @@ needs_validation(struct module_qstate* qstate, int ret_rc,
{
int rcode;
/* If the CD bit is on in the original request, then we don't bother to
* validate anything.*/
/* If the CD bit is on in the original request, then you could think
* that we don't bother to validate anything.
* But this is signalled internally with the valrec flag.
* User queries are validated with BIT_CD to make our cache clean
* so that bogus messages get retried by the upstream also for
* downstream validators that set BIT_CD.
* For DNS64 bit_cd signals no dns64 processing, but we want to
* provide validation there too */
/*
if(qstate->query_flags & BIT_CD) {
verbose(VERB_ALGO, "not validating response due to CD bit");
return 0;
}
*/
if(qstate->is_valrec) {
verbose(VERB_ALGO, "not validating response, is valrec"
"(validation recursion lookup)");
return 0;
}
if(ret_rc != LDNS_RCODE_NOERROR || !ret_msg)
rcode = ret_rc;
@ -351,14 +364,20 @@ generate_request(struct module_qstate* qstate, int id, uint8_t* name,
struct val_qstate* vq = (struct val_qstate*)qstate->minfo[id];
struct module_qstate* newq;
struct query_info ask;
int valrec;
ask.qname = name;
ask.qname_len = namelen;
ask.qtype = qtype;
ask.qclass = qclass;
log_query_info(VERB_ALGO, "generate request", &ask);
fptr_ok(fptr_whitelist_modenv_attach_sub(qstate->env->attach_sub));
/* enable valrec flag to avoid recursion to the same validation
* routine, this lookup is simply a lookup. DLVs need validation */
if(qtype == LDNS_RR_TYPE_DLV)
valrec = 0;
else valrec = 1;
if(!(*qstate->env->attach_sub)(qstate, &ask,
(uint16_t)(BIT_RD|flags), 0, &newq)){
(uint16_t)(BIT_RD|flags), 0, valrec, &newq)){
log_err("Could not generate request: out of memory");
return 0;
}
@ -2005,14 +2024,16 @@ processFinished(struct module_qstate* qstate, struct val_qstate* vq,
/* if secure, this will override cache anyway, no need
* to check if from parentNS */
if(!dns_cache_store(qstate->env, &vq->orig_msg->qinfo,
vq->orig_msg->rep, 0, qstate->prefetch_leeway, 0, NULL)) {
vq->orig_msg->rep, 0, qstate->prefetch_leeway, 0, NULL,
qstate->query_flags)) {
log_err("out of memory caching validator results");
}
} else {
/* for a referral, store the verified RRsets */
/* and this does not get prefetched, so no leeway */
if(!dns_cache_store(qstate->env, &vq->orig_msg->qinfo,
vq->orig_msg->rep, 1, 0, 0, NULL)) {
vq->orig_msg->rep, 1, 0, 0, NULL,
qstate->query_flags)) {
log_err("out of memory caching validator results");
}
}