val_neg.c revision 249141
1/*
2 * validator/val_neg.c - validator aggressive negative caching functions.
3 *
4 * Copyright (c) 2008, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file contains helper functions for the validator module.
40 * The functions help with aggressive negative caching.
41 * This creates new denials of existance, and proofs for absence of types
42 * from cached NSEC records.
43 */
44#include "config.h"
45#ifdef HAVE_OPENSSL_SSL_H
46#include "openssl/ssl.h"
47#define NSEC3_SHA_LEN SHA_DIGEST_LENGTH
48#else
49#define NSEC3_SHA_LEN 20
50#endif
51#include "validator/val_neg.h"
52#include "validator/val_nsec.h"
53#include "validator/val_nsec3.h"
54#include "validator/val_utils.h"
55#include "util/data/dname.h"
56#include "util/data/msgreply.h"
57#include "util/log.h"
58#include "util/net_help.h"
59#include "util/config_file.h"
60#include "services/cache/rrset.h"
61#include "services/cache/dns.h"
62
63int val_neg_data_compare(const void* a, const void* b)
64{
65	struct val_neg_data* x = (struct val_neg_data*)a;
66	struct val_neg_data* y = (struct val_neg_data*)b;
67	int m;
68	return dname_canon_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
69}
70
71int val_neg_zone_compare(const void* a, const void* b)
72{
73	struct val_neg_zone* x = (struct val_neg_zone*)a;
74	struct val_neg_zone* y = (struct val_neg_zone*)b;
75	int m;
76	if(x->dclass != y->dclass) {
77		if(x->dclass < y->dclass)
78			return -1;
79		return 1;
80	}
81	return dname_canon_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
82}
83
84struct val_neg_cache* val_neg_create(struct config_file* cfg, size_t maxiter)
85{
86	struct val_neg_cache* neg = (struct val_neg_cache*)calloc(1,
87		sizeof(*neg));
88	if(!neg) {
89		log_err("Could not create neg cache: out of memory");
90		return NULL;
91	}
92	neg->nsec3_max_iter = maxiter;
93	neg->max = 1024*1024; /* 1 M is thousands of entries */
94	if(cfg) neg->max = cfg->neg_cache_size;
95	rbtree_init(&neg->tree, &val_neg_zone_compare);
96	lock_basic_init(&neg->lock);
97	lock_protect(&neg->lock, neg, sizeof(*neg));
98	return neg;
99}
100
101size_t val_neg_get_mem(struct val_neg_cache* neg)
102{
103	size_t result;
104	lock_basic_lock(&neg->lock);
105	result = sizeof(*neg) + neg->use;
106	lock_basic_unlock(&neg->lock);
107	return result;
108}
109
110/** clear datas on cache deletion */
111static void
112neg_clear_datas(rbnode_t* n, void* ATTR_UNUSED(arg))
113{
114	struct val_neg_data* d = (struct val_neg_data*)n;
115	free(d->name);
116	free(d);
117}
118
119/** clear zones on cache deletion */
120static void
121neg_clear_zones(rbnode_t* n, void* ATTR_UNUSED(arg))
122{
123	struct val_neg_zone* z = (struct val_neg_zone*)n;
124	/* delete all the rrset entries in the tree */
125	traverse_postorder(&z->tree, &neg_clear_datas, NULL);
126	free(z->nsec3_salt);
127	free(z->name);
128	free(z);
129}
130
131void neg_cache_delete(struct val_neg_cache* neg)
132{
133	if(!neg) return;
134	lock_basic_destroy(&neg->lock);
135	/* delete all the zones in the tree */
136	traverse_postorder(&neg->tree, &neg_clear_zones, NULL);
137	free(neg);
138}
139
140/**
141 * Put data element at the front of the LRU list.
142 * @param neg: negative cache with LRU start and end.
143 * @param data: this data is fronted.
144 */
145static void neg_lru_front(struct val_neg_cache* neg,
146	struct val_neg_data* data)
147{
148	data->prev = NULL;
149	data->next = neg->first;
150	if(!neg->first)
151		neg->last = data;
152	else	neg->first->prev = data;
153	neg->first = data;
154}
155
156/**
157 * Remove data element from LRU list.
158 * @param neg: negative cache with LRU start and end.
159 * @param data: this data is removed from the list.
160 */
161static void neg_lru_remove(struct val_neg_cache* neg,
162	struct val_neg_data* data)
163{
164	if(data->prev)
165		data->prev->next = data->next;
166	else	neg->first = data->next;
167	if(data->next)
168		data->next->prev = data->prev;
169	else	neg->last = data->prev;
170}
171
172/**
173 * Touch LRU for data element, put it at the start of the LRU list.
174 * @param neg: negative cache with LRU start and end.
175 * @param data: this data is used.
176 */
177static void neg_lru_touch(struct val_neg_cache* neg,
178	struct val_neg_data* data)
179{
180	if(data == neg->first)
181		return; /* nothing to do */
182	/* remove from current lru position */
183	neg_lru_remove(neg, data);
184	/* add at front */
185	neg_lru_front(neg, data);
186}
187
188/**
189 * Delete a zone element from the negative cache.
190 * May delete other zone elements to keep tree coherent, or
191 * only mark the element as 'not in use'.
192 * @param neg: negative cache.
193 * @param z: zone element to delete.
194 */
195static void neg_delete_zone(struct val_neg_cache* neg, struct val_neg_zone* z)
196{
197	struct val_neg_zone* p, *np;
198	if(!z) return;
199	log_assert(z->in_use);
200	log_assert(z->count > 0);
201	z->in_use = 0;
202
203	/* go up the tree and reduce counts */
204	p = z;
205	while(p) {
206		log_assert(p->count > 0);
207		p->count --;
208		p = p->parent;
209	}
210
211	/* remove zones with zero count */
212	p = z;
213	while(p && p->count == 0) {
214		np = p->parent;
215		(void)rbtree_delete(&neg->tree, &p->node);
216		neg->use -= p->len + sizeof(*p);
217		free(p->nsec3_salt);
218		free(p->name);
219		free(p);
220		p = np;
221	}
222}
223
224void neg_delete_data(struct val_neg_cache* neg, struct val_neg_data* el)
225{
226	struct val_neg_zone* z;
227	struct val_neg_data* p, *np;
228	if(!el) return;
229	z = el->zone;
230	log_assert(el->in_use);
231	log_assert(el->count > 0);
232	el->in_use = 0;
233
234	/* remove it from the lru list */
235	neg_lru_remove(neg, el);
236
237	/* go up the tree and reduce counts */
238	p = el;
239	while(p) {
240		log_assert(p->count > 0);
241		p->count --;
242		p = p->parent;
243	}
244
245	/* delete 0 count items from tree */
246	p = el;
247	while(p && p->count == 0) {
248		np = p->parent;
249		(void)rbtree_delete(&z->tree, &p->node);
250		neg->use -= p->len + sizeof(*p);
251		free(p->name);
252		free(p);
253		p = np;
254	}
255
256	/* check if the zone is now unused */
257	if(z->tree.count == 0) {
258		neg_delete_zone(neg, z);
259	}
260}
261
262/**
263 * Create more space in negative cache
264 * The oldest elements are deleted until enough space is present.
265 * Empty zones are deleted.
266 * @param neg: negative cache.
267 * @param need: how many bytes are needed.
268 */
269static void neg_make_space(struct val_neg_cache* neg, size_t need)
270{
271	/* delete elements until enough space or its empty */
272	while(neg->last && neg->max < neg->use + need) {
273		neg_delete_data(neg, neg->last);
274	}
275}
276
277struct val_neg_zone* neg_find_zone(struct val_neg_cache* neg,
278	uint8_t* nm, size_t len, uint16_t dclass)
279{
280	struct val_neg_zone lookfor;
281	struct val_neg_zone* result;
282	lookfor.node.key = &lookfor;
283	lookfor.name = nm;
284	lookfor.len = len;
285	lookfor.labs = dname_count_labels(lookfor.name);
286	lookfor.dclass = dclass;
287
288	result = (struct val_neg_zone*)
289		rbtree_search(&neg->tree, lookfor.node.key);
290	return result;
291}
292
293/**
294 * Find the given data
295 * @param zone: negative zone
296 * @param nm: what to look for.
297 * @param len: length of nm
298 * @param labs: labels in nm
299 * @return data or NULL if not found.
300 */
301static struct val_neg_data* neg_find_data(struct val_neg_zone* zone,
302	uint8_t* nm, size_t len, int labs)
303{
304	struct val_neg_data lookfor;
305	struct val_neg_data* result;
306	lookfor.node.key = &lookfor;
307	lookfor.name = nm;
308	lookfor.len = len;
309	lookfor.labs = labs;
310
311	result = (struct val_neg_data*)
312		rbtree_search(&zone->tree, lookfor.node.key);
313	return result;
314}
315
316/**
317 * Calculate space needed for the data and all its parents
318 * @param rep: NSEC entries.
319 * @return size.
320 */
321static size_t calc_data_need(struct reply_info* rep)
322{
323	uint8_t* d;
324	size_t i, len, res = 0;
325
326	for(i=rep->an_numrrsets; i<rep->an_numrrsets+rep->ns_numrrsets; i++) {
327		if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC) {
328			d = rep->rrsets[i]->rk.dname;
329			len = rep->rrsets[i]->rk.dname_len;
330			res = sizeof(struct val_neg_data) + len;
331			while(!dname_is_root(d)) {
332				log_assert(len > 1); /* not root label */
333				dname_remove_label(&d, &len);
334				res += sizeof(struct val_neg_data) + len;
335			}
336		}
337	}
338	return res;
339}
340
341/**
342 * Calculate space needed for zone and all its parents
343 * @param d: name of zone
344 * @param len: length of name
345 * @return size.
346 */
347static size_t calc_zone_need(uint8_t* d, size_t len)
348{
349	size_t res = sizeof(struct val_neg_zone) + len;
350	while(!dname_is_root(d)) {
351		log_assert(len > 1); /* not root label */
352		dname_remove_label(&d, &len);
353		res += sizeof(struct val_neg_zone) + len;
354	}
355	return res;
356}
357
358/**
359 * Find closest existing parent zone of the given name.
360 * @param neg: negative cache.
361 * @param nm: name to look for
362 * @param nm_len: length of nm
363 * @param labs: labelcount of nm.
364 * @param qclass: class.
365 * @return the zone or NULL if none found.
366 */
367static struct val_neg_zone* neg_closest_zone_parent(struct val_neg_cache* neg,
368	uint8_t* nm, size_t nm_len, int labs, uint16_t qclass)
369{
370	struct val_neg_zone key;
371	struct val_neg_zone* result;
372	rbnode_t* res = NULL;
373	key.node.key = &key;
374	key.name = nm;
375	key.len = nm_len;
376	key.labs = labs;
377	key.dclass = qclass;
378	if(rbtree_find_less_equal(&neg->tree, &key, &res)) {
379		/* exact match */
380		result = (struct val_neg_zone*)res;
381	} else {
382		/* smaller element (or no element) */
383		int m;
384		result = (struct val_neg_zone*)res;
385		if(!result || result->dclass != qclass)
386			return NULL;
387		/* count number of labels matched */
388		(void)dname_lab_cmp(result->name, result->labs, key.name,
389			key.labs, &m);
390		while(result) { /* go up until qname is subdomain of stub */
391			if(result->labs <= m)
392				break;
393			result = result->parent;
394		}
395	}
396	return result;
397}
398
399/**
400 * Find closest existing parent data for the given name.
401 * @param zone: to look in.
402 * @param nm: name to look for
403 * @param nm_len: length of nm
404 * @param labs: labelcount of nm.
405 * @return the data or NULL if none found.
406 */
407static struct val_neg_data* neg_closest_data_parent(
408	struct val_neg_zone* zone, uint8_t* nm, size_t nm_len, int labs)
409{
410	struct val_neg_data key;
411	struct val_neg_data* result;
412	rbnode_t* res = NULL;
413	key.node.key = &key;
414	key.name = nm;
415	key.len = nm_len;
416	key.labs = labs;
417	if(rbtree_find_less_equal(&zone->tree, &key, &res)) {
418		/* exact match */
419		result = (struct val_neg_data*)res;
420	} else {
421		/* smaller element (or no element) */
422		int m;
423		result = (struct val_neg_data*)res;
424		if(!result)
425			return NULL;
426		/* count number of labels matched */
427		(void)dname_lab_cmp(result->name, result->labs, key.name,
428			key.labs, &m);
429		while(result) { /* go up until qname is subdomain of stub */
430			if(result->labs <= m)
431				break;
432			result = result->parent;
433		}
434	}
435	return result;
436}
437
438/**
439 * Create a single zone node
440 * @param nm: name for zone (copied)
441 * @param nm_len: length of name
442 * @param labs: labels in name.
443 * @param dclass: class of zone, host order.
444 * @return new zone or NULL on failure
445 */
446static struct val_neg_zone* neg_setup_zone_node(
447	uint8_t* nm, size_t nm_len, int labs, uint16_t dclass)
448{
449	struct val_neg_zone* zone =
450		(struct val_neg_zone*)calloc(1, sizeof(*zone));
451	if(!zone) {
452		return NULL;
453	}
454	zone->node.key = zone;
455	zone->name = memdup(nm, nm_len);
456	if(!zone->name) {
457		free(zone);
458		return NULL;
459	}
460	zone->len = nm_len;
461	zone->labs = labs;
462	zone->dclass = dclass;
463
464	rbtree_init(&zone->tree, &val_neg_data_compare);
465	return zone;
466}
467
468/**
469 * Create a linked list of parent zones, starting at longname ending on
470 * the parent (can be NULL, creates to the root).
471 * @param nm: name for lowest in chain
472 * @param nm_len: length of name
473 * @param labs: labels in name.
474 * @param dclass: class of zone.
475 * @param parent: NULL for to root, else so it fits under here.
476 * @return zone; a chain of zones and their parents up to the parent.
477 *  	or NULL on malloc failure
478 */
479static struct val_neg_zone* neg_zone_chain(
480	uint8_t* nm, size_t nm_len, int labs, uint16_t dclass,
481	struct val_neg_zone* parent)
482{
483	int i;
484	int tolabs = parent?parent->labs:0;
485	struct val_neg_zone* zone, *prev = NULL, *first = NULL;
486
487	/* create the new subtree, i is labelcount of current creation */
488	/* this creates a 'first' to z->parent=NULL list of zones */
489	for(i=labs; i!=tolabs; i--) {
490		/* create new item */
491		zone = neg_setup_zone_node(nm, nm_len, i, dclass);
492		if(!zone) {
493			/* need to delete other allocations in this routine!*/
494			struct val_neg_zone* p=first, *np;
495			while(p) {
496				np = p->parent;
497				free(p);
498				free(p->name);
499				p = np;
500			}
501			return NULL;
502		}
503		if(i == labs) {
504			first = zone;
505		} else {
506			prev->parent = zone;
507		}
508		/* prepare for next name */
509		prev = zone;
510		dname_remove_label(&nm, &nm_len);
511	}
512	return first;
513}
514
515void val_neg_zone_take_inuse(struct val_neg_zone* zone)
516{
517	if(!zone->in_use) {
518		struct val_neg_zone* p;
519		zone->in_use = 1;
520		/* increase usage count of all parents */
521		for(p=zone; p; p = p->parent) {
522			p->count++;
523		}
524	}
525}
526
527struct val_neg_zone* neg_create_zone(struct val_neg_cache* neg,
528	uint8_t* nm, size_t nm_len, uint16_t dclass)
529{
530	struct val_neg_zone* zone;
531	struct val_neg_zone* parent;
532	struct val_neg_zone* p, *np;
533	int labs = dname_count_labels(nm);
534
535	/* find closest enclosing parent zone that (still) exists */
536	parent = neg_closest_zone_parent(neg, nm, nm_len, labs, dclass);
537	if(parent && query_dname_compare(parent->name, nm) == 0)
538		return parent; /* already exists, weird */
539	/* if parent exists, it is in use */
540	log_assert(!parent || parent->count > 0);
541	zone = neg_zone_chain(nm, nm_len, labs, dclass, parent);
542	if(!zone) {
543		return NULL;
544	}
545
546	/* insert the list of zones into the tree */
547	p = zone;
548	while(p) {
549		np = p->parent;
550		/* mem use */
551		neg->use += sizeof(struct val_neg_zone) + p->len;
552		/* insert in tree */
553		(void)rbtree_insert(&neg->tree, &p->node);
554		/* last one needs proper parent pointer */
555		if(np == NULL)
556			p->parent = parent;
557		p = np;
558	}
559	return zone;
560}
561
562/** find zone name of message, returns the SOA record */
563static struct ub_packed_rrset_key* reply_find_soa(struct reply_info* rep)
564{
565	size_t i;
566	for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
567		if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_SOA)
568			return rep->rrsets[i];
569	}
570	return NULL;
571}
572
573/** see if the reply has NSEC records worthy of caching */
574static int reply_has_nsec(struct reply_info* rep)
575{
576	size_t i;
577	struct packed_rrset_data* d;
578	if(rep->security != sec_status_secure)
579		return 0;
580	for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
581		if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC) {
582			d = (struct packed_rrset_data*)rep->rrsets[i]->
583				entry.data;
584			if(d->security == sec_status_secure)
585				return 1;
586		}
587	}
588	return 0;
589}
590
591
592/**
593 * Create single node of data element.
594 * @param nm: name (copied)
595 * @param nm_len: length of name
596 * @param labs: labels in name.
597 * @return element with name nm, or NULL malloc failure.
598 */
599static struct val_neg_data* neg_setup_data_node(
600	uint8_t* nm, size_t nm_len, int labs)
601{
602	struct val_neg_data* el;
603	el = (struct val_neg_data*)calloc(1, sizeof(*el));
604	if(!el) {
605		return NULL;
606	}
607	el->node.key = el;
608	el->name = memdup(nm, nm_len);
609	if(!el->name) {
610		free(el);
611		return NULL;
612	}
613	el->len = nm_len;
614	el->labs = labs;
615	return el;
616}
617
618/**
619 * Create chain of data element and parents
620 * @param nm: name
621 * @param nm_len: length of name
622 * @param labs: labels in name.
623 * @param parent: up to where to make, if NULL up to root label.
624 * @return lowest element with name nm, or NULL malloc failure.
625 */
626static struct val_neg_data* neg_data_chain(
627	uint8_t* nm, size_t nm_len, int labs, struct val_neg_data* parent)
628{
629	int i;
630	int tolabs = parent?parent->labs:0;
631	struct val_neg_data* el, *first = NULL, *prev = NULL;
632
633	/* create the new subtree, i is labelcount of current creation */
634	/* this creates a 'first' to z->parent=NULL list of zones */
635	for(i=labs; i!=tolabs; i--) {
636		/* create new item */
637		el = neg_setup_data_node(nm, nm_len, i);
638		if(!el) {
639			/* need to delete other allocations in this routine!*/
640			struct val_neg_data* p = first, *np;
641			while(p) {
642				np = p->parent;
643				free(p);
644				free(p->name);
645				p = np;
646			}
647			return NULL;
648		}
649		if(i == labs) {
650			first = el;
651		} else {
652			prev->parent = el;
653		}
654
655		/* prepare for next name */
656		prev = el;
657		dname_remove_label(&nm, &nm_len);
658	}
659	return first;
660}
661
662/**
663 * Remove NSEC records between start and end points.
664 * By walking the tree, the tree is sorted canonically.
665 * @param neg: negative cache.
666 * @param zone: the zone
667 * @param el: element to start walking at.
668 * @param nsec: the nsec record with the end point
669 */
670static void wipeout(struct val_neg_cache* neg, struct val_neg_zone* zone,
671	struct val_neg_data* el, struct ub_packed_rrset_key* nsec)
672{
673	struct packed_rrset_data* d = (struct packed_rrset_data*)nsec->
674		entry.data;
675	uint8_t* end;
676	size_t end_len;
677	int end_labs, m;
678	rbnode_t* walk, *next;
679	struct val_neg_data* cur;
680	uint8_t buf[257];
681	/* get endpoint */
682	if(!d || d->count == 0 || d->rr_len[0] < 2+1)
683		return;
684	if(ntohs(nsec->rk.type) == LDNS_RR_TYPE_NSEC) {
685		end = d->rr_data[0]+2;
686		end_len = dname_valid(end, d->rr_len[0]-2);
687		end_labs = dname_count_labels(end);
688	} else {
689		/* NSEC3 */
690		if(!nsec3_get_nextowner_b32(nsec, 0, buf, sizeof(buf)))
691			return;
692		end = buf;
693		end_labs = dname_count_size_labels(end, &end_len);
694	}
695
696	/* sanity check, both owner and end must be below the zone apex */
697	if(!dname_subdomain_c(el->name, zone->name) ||
698		!dname_subdomain_c(end, zone->name))
699		return;
700
701	/* detect end of zone NSEC ; wipe until the end of zone */
702	if(query_dname_compare(end, zone->name) == 0) {
703		end = NULL;
704	}
705
706	walk = rbtree_next(&el->node);
707	while(walk && walk != RBTREE_NULL) {
708		cur = (struct val_neg_data*)walk;
709		/* sanity check: must be larger than start */
710		if(dname_canon_lab_cmp(cur->name, cur->labs,
711			el->name, el->labs, &m) <= 0) {
712			/* r == 0 skip original record. */
713			/* r < 0  too small! */
714			walk = rbtree_next(walk);
715			continue;
716		}
717		/* stop at endpoint, also data at empty nonterminals must be
718		 * removed (no NSECs there) so everything between
719		 * start and end */
720		if(end && dname_canon_lab_cmp(cur->name, cur->labs,
721			end, end_labs, &m) >= 0) {
722			break;
723		}
724		/* this element has to be deleted, but we cannot do it
725		 * now, because we are walking the tree still ... */
726		/* get the next element: */
727		next = rbtree_next(walk);
728		/* now delete the original element, this may trigger
729		 * rbtree rebalances, but really, the next element is
730		 * the one we need.
731		 * But it may trigger delete of other data and the
732		 * entire zone. However, if that happens, this is done
733		 * by deleting the *parents* of the element for deletion,
734		 * and maybe also the entire zone if it is empty.
735		 * But parents are smaller in canonical compare, thus,
736		 * if a larger element exists, then it is not a parent,
737		 * it cannot get deleted, the zone cannot get empty.
738		 * If the next==NULL, then zone can be empty. */
739		if(cur->in_use)
740			neg_delete_data(neg, cur);
741		walk = next;
742	}
743}
744
745void neg_insert_data(struct val_neg_cache* neg,
746	struct val_neg_zone* zone, struct ub_packed_rrset_key* nsec)
747{
748	struct packed_rrset_data* d;
749	struct val_neg_data* parent;
750	struct val_neg_data* el;
751	uint8_t* nm = nsec->rk.dname;
752	size_t nm_len = nsec->rk.dname_len;
753	int labs = dname_count_labels(nsec->rk.dname);
754
755	d = (struct packed_rrset_data*)nsec->entry.data;
756	if( !(d->security == sec_status_secure ||
757		(d->security == sec_status_unchecked && d->rrsig_count > 0)))
758		return;
759	log_nametypeclass(VERB_ALGO, "negcache rr",
760		nsec->rk.dname, ntohs(nsec->rk.type),
761		ntohs(nsec->rk.rrset_class));
762
763	/* find closest enclosing parent data that (still) exists */
764	parent = neg_closest_data_parent(zone, nm, nm_len, labs);
765	if(parent && query_dname_compare(parent->name, nm) == 0) {
766		/* perfect match already exists */
767		log_assert(parent->count > 0);
768		el = parent;
769	} else {
770		struct val_neg_data* p, *np;
771
772		/* create subtree for perfect match */
773		/* if parent exists, it is in use */
774		log_assert(!parent || parent->count > 0);
775
776		el = neg_data_chain(nm, nm_len, labs, parent);
777		if(!el) {
778			log_err("out of memory inserting NSEC negative cache");
779			return;
780		}
781		el->in_use = 0; /* set on below */
782
783		/* insert the list of zones into the tree */
784		p = el;
785		while(p) {
786			np = p->parent;
787			/* mem use */
788			neg->use += sizeof(struct val_neg_data) + p->len;
789			/* insert in tree */
790			p->zone = zone;
791			(void)rbtree_insert(&zone->tree, &p->node);
792			/* last one needs proper parent pointer */
793			if(np == NULL)
794				p->parent = parent;
795			p = np;
796		}
797	}
798
799	if(!el->in_use) {
800		struct val_neg_data* p;
801
802		el->in_use = 1;
803		/* increase usage count of all parents */
804		for(p=el; p; p = p->parent) {
805			p->count++;
806		}
807
808		neg_lru_front(neg, el);
809	} else {
810		/* in use, bring to front, lru */
811		neg_lru_touch(neg, el);
812	}
813
814	/* if nsec3 store last used parameters */
815	if(ntohs(nsec->rk.type) == LDNS_RR_TYPE_NSEC3) {
816		int h;
817		uint8_t* s;
818		size_t slen, it;
819		if(nsec3_get_params(nsec, 0, &h, &it, &s, &slen) &&
820			it <= neg->nsec3_max_iter &&
821			(h != zone->nsec3_hash || it != zone->nsec3_iter ||
822			slen != zone->nsec3_saltlen ||
823			memcmp(zone->nsec3_salt, s, slen) != 0)) {
824			uint8_t* sa = memdup(s, slen);
825			if(sa) {
826				free(zone->nsec3_salt);
827				zone->nsec3_salt = sa;
828				zone->nsec3_saltlen = slen;
829				zone->nsec3_hash = h;
830				zone->nsec3_iter = it;
831			}
832		}
833	}
834
835	/* wipe out the cache items between NSEC start and end */
836	wipeout(neg, zone, el, nsec);
837}
838
839void val_neg_addreply(struct val_neg_cache* neg, struct reply_info* rep)
840{
841	size_t i, need;
842	struct ub_packed_rrset_key* soa;
843	struct val_neg_zone* zone;
844	/* see if secure nsecs inside */
845	if(!reply_has_nsec(rep))
846		return;
847	/* find the zone name in message */
848	soa = reply_find_soa(rep);
849	if(!soa)
850		return;
851
852	log_nametypeclass(VERB_ALGO, "negcache insert for zone",
853		soa->rk.dname, LDNS_RR_TYPE_SOA, ntohs(soa->rk.rrset_class));
854
855	/* ask for enough space to store all of it */
856	need = calc_data_need(rep) +
857		calc_zone_need(soa->rk.dname, soa->rk.dname_len);
858	lock_basic_lock(&neg->lock);
859	neg_make_space(neg, need);
860
861	/* find or create the zone entry */
862	zone = neg_find_zone(neg, soa->rk.dname, soa->rk.dname_len,
863		ntohs(soa->rk.rrset_class));
864	if(!zone) {
865		if(!(zone = neg_create_zone(neg, soa->rk.dname,
866			soa->rk.dname_len, ntohs(soa->rk.rrset_class)))) {
867			lock_basic_unlock(&neg->lock);
868			log_err("out of memory adding negative zone");
869			return;
870		}
871	}
872	val_neg_zone_take_inuse(zone);
873
874	/* insert the NSECs */
875	for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
876		if(ntohs(rep->rrsets[i]->rk.type) != LDNS_RR_TYPE_NSEC)
877			continue;
878		if(!dname_subdomain_c(rep->rrsets[i]->rk.dname,
879			zone->name)) continue;
880		/* insert NSEC into this zone's tree */
881		neg_insert_data(neg, zone, rep->rrsets[i]);
882	}
883	if(zone->tree.count == 0) {
884		/* remove empty zone if inserts failed */
885		neg_delete_zone(neg, zone);
886	}
887	lock_basic_unlock(&neg->lock);
888}
889
890/**
891 * Lookup closest data record. For NSEC denial.
892 * @param zone: zone to look in
893 * @param qname: name to look for.
894 * @param len: length of name
895 * @param labs: labels in name
896 * @param data: data element, exact or smaller or NULL
897 * @return true if exact match.
898 */
899static int neg_closest_data(struct val_neg_zone* zone,
900	uint8_t* qname, size_t len, int labs, struct val_neg_data** data)
901{
902	struct val_neg_data key;
903	rbnode_t* r;
904	key.node.key = &key;
905	key.name = qname;
906	key.len = len;
907	key.labs = labs;
908	if(rbtree_find_less_equal(&zone->tree, &key, &r)) {
909		/* exact match */
910		*data = (struct val_neg_data*)r;
911		return 1;
912	} else {
913		/* smaller match */
914		*data = (struct val_neg_data*)r;
915		return 0;
916	}
917}
918
919int val_neg_dlvlookup(struct val_neg_cache* neg, uint8_t* qname, size_t len,
920        uint16_t qclass, struct rrset_cache* rrset_cache, uint32_t now)
921{
922	/* lookup closest zone */
923	struct val_neg_zone* zone;
924	struct val_neg_data* data;
925	int labs;
926	struct ub_packed_rrset_key* nsec;
927	struct packed_rrset_data* d;
928	uint32_t flags;
929	uint8_t* wc;
930	struct query_info qinfo;
931	if(!neg) return 0;
932
933	log_nametypeclass(VERB_ALGO, "negcache dlvlookup", qname,
934		LDNS_RR_TYPE_DLV, qclass);
935
936	labs = dname_count_labels(qname);
937	lock_basic_lock(&neg->lock);
938	zone = neg_closest_zone_parent(neg, qname, len, labs, qclass);
939	while(zone && !zone->in_use)
940		zone = zone->parent;
941	if(!zone) {
942		lock_basic_unlock(&neg->lock);
943		return 0;
944	}
945	log_nametypeclass(VERB_ALGO, "negcache zone", zone->name, 0,
946		zone->dclass);
947
948	/* DLV is defined to use NSEC only */
949	if(zone->nsec3_hash) {
950		lock_basic_unlock(&neg->lock);
951		return 0;
952	}
953
954	/* lookup closest data record */
955	(void)neg_closest_data(zone, qname, len, labs, &data);
956	while(data && !data->in_use)
957		data = data->parent;
958	if(!data) {
959		lock_basic_unlock(&neg->lock);
960		return 0;
961	}
962	log_nametypeclass(VERB_ALGO, "negcache rr", data->name,
963		LDNS_RR_TYPE_NSEC, zone->dclass);
964
965	/* lookup rrset in rrset cache */
966	flags = 0;
967	if(query_dname_compare(data->name, zone->name) == 0)
968		flags = PACKED_RRSET_NSEC_AT_APEX;
969	nsec = rrset_cache_lookup(rrset_cache, data->name, data->len,
970		LDNS_RR_TYPE_NSEC, zone->dclass, flags, now, 0);
971
972	/* check if secure and TTL ok */
973	if(!nsec) {
974		lock_basic_unlock(&neg->lock);
975		return 0;
976	}
977	d = (struct packed_rrset_data*)nsec->entry.data;
978	if(!d || now > d->ttl) {
979		lock_rw_unlock(&nsec->entry.lock);
980		/* delete data record if expired */
981		neg_delete_data(neg, data);
982		lock_basic_unlock(&neg->lock);
983		return 0;
984	}
985	if(d->security != sec_status_secure) {
986		lock_rw_unlock(&nsec->entry.lock);
987		neg_delete_data(neg, data);
988		lock_basic_unlock(&neg->lock);
989		return 0;
990	}
991	verbose(VERB_ALGO, "negcache got secure rrset");
992
993	/* check NSEC security */
994	/* check if NSEC proves no DLV type exists */
995	/* check if NSEC proves NXDOMAIN for qname */
996	qinfo.qname = qname;
997	qinfo.qtype = LDNS_RR_TYPE_DLV;
998	qinfo.qclass = qclass;
999	if(!nsec_proves_nodata(nsec, &qinfo, &wc) &&
1000		!val_nsec_proves_name_error(nsec, qname)) {
1001		/* the NSEC is not a denial for the DLV */
1002		lock_rw_unlock(&nsec->entry.lock);
1003		lock_basic_unlock(&neg->lock);
1004		verbose(VERB_ALGO, "negcache not proven");
1005		return 0;
1006	}
1007	/* so the NSEC was a NODATA proof, or NXDOMAIN proof. */
1008
1009	/* no need to check for wildcard NSEC; no wildcards in DLV repos */
1010	/* no need to lookup SOA record for client; no response message */
1011
1012	lock_rw_unlock(&nsec->entry.lock);
1013	/* if OK touch the LRU for neg_data element */
1014	neg_lru_touch(neg, data);
1015	lock_basic_unlock(&neg->lock);
1016	verbose(VERB_ALGO, "negcache DLV denial proven");
1017	return 1;
1018}
1019
1020/** see if the reply has signed NSEC records and return the signer */
1021static uint8_t* reply_nsec_signer(struct reply_info* rep, size_t* signer_len,
1022	uint16_t* dclass)
1023{
1024	size_t i;
1025	struct packed_rrset_data* d;
1026	uint8_t* s;
1027	for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
1028		if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC ||
1029			ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC3) {
1030			d = (struct packed_rrset_data*)rep->rrsets[i]->
1031				entry.data;
1032			/* return first signer name of first NSEC */
1033			if(d->rrsig_count != 0) {
1034				val_find_rrset_signer(rep->rrsets[i],
1035					&s, signer_len);
1036				if(s && *signer_len) {
1037					*dclass = ntohs(rep->rrsets[i]->
1038						rk.rrset_class);
1039					return s;
1040				}
1041			}
1042		}
1043	}
1044	return 0;
1045}
1046
1047void val_neg_addreferral(struct val_neg_cache* neg, struct reply_info* rep,
1048	uint8_t* zone_name)
1049{
1050	size_t i, need;
1051	uint8_t* signer;
1052	size_t signer_len;
1053	uint16_t dclass;
1054	struct val_neg_zone* zone;
1055	/* no SOA in this message, find RRSIG over NSEC's signer name.
1056	 * note the NSEC records are maybe not validated yet */
1057	signer = reply_nsec_signer(rep, &signer_len, &dclass);
1058	if(!signer)
1059		return;
1060	if(!dname_subdomain_c(signer, zone_name)) {
1061		/* the signer is not in the bailiwick, throw it out */
1062		return;
1063	}
1064
1065	log_nametypeclass(VERB_ALGO, "negcache insert referral ",
1066		signer, LDNS_RR_TYPE_NS, dclass);
1067
1068	/* ask for enough space to store all of it */
1069	need = calc_data_need(rep) + calc_zone_need(signer, signer_len);
1070	lock_basic_lock(&neg->lock);
1071	neg_make_space(neg, need);
1072
1073	/* find or create the zone entry */
1074	zone = neg_find_zone(neg, signer, signer_len, dclass);
1075	if(!zone) {
1076		if(!(zone = neg_create_zone(neg, signer, signer_len,
1077			dclass))) {
1078			lock_basic_unlock(&neg->lock);
1079			log_err("out of memory adding negative zone");
1080			return;
1081		}
1082	}
1083	val_neg_zone_take_inuse(zone);
1084
1085	/* insert the NSECs */
1086	for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
1087		if(ntohs(rep->rrsets[i]->rk.type) != LDNS_RR_TYPE_NSEC &&
1088			ntohs(rep->rrsets[i]->rk.type) != LDNS_RR_TYPE_NSEC3)
1089			continue;
1090		if(!dname_subdomain_c(rep->rrsets[i]->rk.dname,
1091			zone->name)) continue;
1092		/* insert NSEC into this zone's tree */
1093		neg_insert_data(neg, zone, rep->rrsets[i]);
1094	}
1095	if(zone->tree.count == 0) {
1096		/* remove empty zone if inserts failed */
1097		neg_delete_zone(neg, zone);
1098	}
1099	lock_basic_unlock(&neg->lock);
1100}
1101
1102/**
1103 * Check that an NSEC3 rrset does not have a type set.
1104 * None of the nsec3s in a hash-collision are allowed to have the type.
1105 * (since we do not know which one is the nsec3 looked at, flags, ..., we
1106 * ignore the cached item and let it bypass negative caching).
1107 * @param k: the nsec3 rrset to check.
1108 * @param t: type to check
1109 * @return true if no RRs have the type.
1110 */
1111static int nsec3_no_type(struct ub_packed_rrset_key* k, uint16_t t)
1112{
1113	int count = (int)((struct packed_rrset_data*)k->entry.data)->count;
1114	int i;
1115	for(i=0; i<count; i++)
1116		if(nsec3_has_type(k, i, t))
1117			return 0;
1118	return 1;
1119}
1120
1121/**
1122 * See if rrset exists in rrset cache.
1123 * If it does, the bit is checked, and if not expired, it is returned
1124 * allocated in region.
1125 * @param rrset_cache: rrset cache
1126 * @param qname: to lookup rrset name
1127 * @param qname_len: length of qname.
1128 * @param qtype: type of rrset to lookup, host order
1129 * @param qclass: class of rrset to lookup, host order
1130 * @param flags: flags for rrset to lookup
1131 * @param region: where to alloc result
1132 * @param checkbit: if true, a bit in the nsec typemap is checked for absence.
1133 * @param checktype: which bit to check
1134 * @param now: to check ttl against
1135 * @return rrset or NULL
1136 */
1137static struct ub_packed_rrset_key*
1138grab_nsec(struct rrset_cache* rrset_cache, uint8_t* qname, size_t qname_len,
1139	uint16_t qtype, uint16_t qclass, uint32_t flags,
1140	struct regional* region, int checkbit, uint16_t checktype,
1141	uint32_t now)
1142{
1143	struct ub_packed_rrset_key* r, *k = rrset_cache_lookup(rrset_cache,
1144		qname, qname_len, qtype, qclass, flags, now, 0);
1145	struct packed_rrset_data* d;
1146	if(!k) return NULL;
1147	d = (struct packed_rrset_data*)k->entry.data;
1148	if(d->ttl < now) {
1149		lock_rw_unlock(&k->entry.lock);
1150		return NULL;
1151	}
1152	/* only secure or unchecked records that have signatures. */
1153	if( ! ( d->security == sec_status_secure ||
1154		(d->security == sec_status_unchecked &&
1155		d->rrsig_count > 0) ) ) {
1156		lock_rw_unlock(&k->entry.lock);
1157		return NULL;
1158	}
1159	/* check if checktype is absent */
1160	if(checkbit && (
1161		(qtype == LDNS_RR_TYPE_NSEC && nsec_has_type(k, checktype)) ||
1162		(qtype == LDNS_RR_TYPE_NSEC3 && !nsec3_no_type(k, checktype))
1163		)) {
1164		lock_rw_unlock(&k->entry.lock);
1165		return NULL;
1166	}
1167	/* looks OK! copy to region and return it */
1168	r = packed_rrset_copy_region(k, region, now);
1169	/* if it failed, we return the NULL */
1170	lock_rw_unlock(&k->entry.lock);
1171	return r;
1172}
1173
1174/** find nsec3 closest encloser in neg cache */
1175static struct val_neg_data*
1176neg_find_nsec3_ce(struct val_neg_zone* zone, uint8_t* qname, size_t qname_len,
1177		int qlabs, ldns_buffer* buf, uint8_t* hashnc, size_t* nclen)
1178{
1179	struct val_neg_data* data;
1180	uint8_t hashce[NSEC3_SHA_LEN];
1181	uint8_t b32[257];
1182	size_t celen, b32len;
1183
1184	*nclen = 0;
1185	while(qlabs > 0) {
1186		/* hash */
1187		if(!(celen=nsec3_get_hashed(buf, qname, qname_len,
1188			zone->nsec3_hash, zone->nsec3_iter, zone->nsec3_salt,
1189			zone->nsec3_saltlen, hashce, sizeof(hashce))))
1190			return NULL;
1191		if(!(b32len=nsec3_hash_to_b32(hashce, celen, zone->name,
1192			zone->len, b32, sizeof(b32))))
1193			return NULL;
1194
1195		/* lookup (exact match only) */
1196		data = neg_find_data(zone, b32, b32len, zone->labs+1);
1197		if(data && data->in_use) {
1198			/* found ce match! */
1199			return data;
1200		}
1201
1202		*nclen = celen;
1203		memmove(hashnc, hashce, celen);
1204		dname_remove_label(&qname, &qname_len);
1205		qlabs --;
1206	}
1207	return NULL;
1208}
1209
1210/** check nsec3 parameters on nsec3 rrset with current zone values */
1211static int
1212neg_params_ok(struct val_neg_zone* zone, struct ub_packed_rrset_key* rrset)
1213{
1214	int h;
1215	uint8_t* s;
1216	size_t slen, it;
1217	if(!nsec3_get_params(rrset, 0, &h, &it, &s, &slen))
1218		return 0;
1219	return (h == zone->nsec3_hash && it == zone->nsec3_iter &&
1220		slen == zone->nsec3_saltlen &&
1221		memcmp(zone->nsec3_salt, s, slen) == 0);
1222}
1223
1224/** get next closer for nsec3 proof */
1225static struct ub_packed_rrset_key*
1226neg_nsec3_getnc(struct val_neg_zone* zone, uint8_t* hashnc, size_t nclen,
1227	struct rrset_cache* rrset_cache, struct regional* region,
1228	uint32_t now, uint8_t* b32, size_t maxb32)
1229{
1230	struct ub_packed_rrset_key* nc_rrset;
1231	struct val_neg_data* data;
1232	size_t b32len;
1233
1234	if(!(b32len=nsec3_hash_to_b32(hashnc, nclen, zone->name,
1235		zone->len, b32, maxb32)))
1236		return NULL;
1237	(void)neg_closest_data(zone, b32, b32len, zone->labs+1, &data);
1238	if(!data && zone->tree.count != 0) {
1239		/* could be before the first entry ; return the last
1240		 * entry (possibly the rollover nsec3 at end) */
1241		data = (struct val_neg_data*)rbtree_last(&zone->tree);
1242	}
1243	while(data && !data->in_use)
1244		data = data->parent;
1245	if(!data)
1246		return NULL;
1247	/* got a data element in tree, grab it */
1248	nc_rrset = grab_nsec(rrset_cache, data->name, data->len,
1249		LDNS_RR_TYPE_NSEC3, zone->dclass, 0, region, 0, 0, now);
1250	if(!nc_rrset)
1251		return NULL;
1252	if(!neg_params_ok(zone, nc_rrset))
1253		return NULL;
1254	return nc_rrset;
1255}
1256
1257/** neg cache nsec3 proof procedure*/
1258static struct dns_msg*
1259neg_nsec3_proof_ds(struct val_neg_zone* zone, uint8_t* qname, size_t qname_len,
1260		int qlabs, ldns_buffer* buf, struct rrset_cache* rrset_cache,
1261		struct regional* region, uint32_t now, uint8_t* topname)
1262{
1263	struct dns_msg* msg;
1264	struct val_neg_data* data;
1265	uint8_t hashnc[NSEC3_SHA_LEN];
1266	size_t nclen;
1267	struct ub_packed_rrset_key* ce_rrset, *nc_rrset;
1268	struct nsec3_cached_hash c;
1269	uint8_t nc_b32[257];
1270
1271	/* for NSEC3 ; determine the closest encloser for which we
1272	 * can find an exact match. Remember the hashed lower name,
1273	 * since that is the one we need a closest match for.
1274	 * If we find a match straight away, then it becomes NODATA.
1275	 * Otherwise, NXDOMAIN or if OPTOUT, an insecure delegation.
1276	 * Also check that parameters are the same on closest encloser
1277	 * and on closest match.
1278	 */
1279	if(!zone->nsec3_hash)
1280		return NULL; /* not nsec3 zone */
1281
1282	if(!(data=neg_find_nsec3_ce(zone, qname, qname_len, qlabs, buf,
1283		hashnc, &nclen))) {
1284		return NULL;
1285	}
1286
1287	/* grab the ce rrset */
1288	ce_rrset = grab_nsec(rrset_cache, data->name, data->len,
1289		LDNS_RR_TYPE_NSEC3, zone->dclass, 0, region, 1,
1290		LDNS_RR_TYPE_DS, now);
1291	if(!ce_rrset)
1292		return NULL;
1293	if(!neg_params_ok(zone, ce_rrset))
1294		return NULL;
1295
1296	if(nclen == 0) {
1297		/* exact match, just check the type bits */
1298		/* need: -SOA, -DS, +NS */
1299		if(nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_SOA) ||
1300			nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_DS) ||
1301			!nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_NS))
1302			return NULL;
1303		if(!(msg = dns_msg_create(qname, qname_len,
1304			LDNS_RR_TYPE_DS, zone->dclass, region, 1)))
1305			return NULL;
1306		/* TTL reduced in grab_nsec */
1307		if(!dns_msg_authadd(msg, region, ce_rrset, 0))
1308			return NULL;
1309		return msg;
1310	}
1311
1312	/* optout is not allowed without knowing the trust-anchor in use,
1313	 * otherwise the optout could spoof away that anchor */
1314	if(!topname)
1315		return NULL;
1316
1317	/* if there is no exact match, it must be in an optout span
1318	 * (an existing DS implies an NSEC3 must exist) */
1319	nc_rrset = neg_nsec3_getnc(zone, hashnc, nclen, rrset_cache,
1320		region, now, nc_b32, sizeof(nc_b32));
1321	if(!nc_rrset)
1322		return NULL;
1323	if(!neg_params_ok(zone, nc_rrset))
1324		return NULL;
1325	if(!nsec3_has_optout(nc_rrset, 0))
1326		return NULL;
1327	c.hash = hashnc;
1328	c.hash_len = nclen;
1329	c.b32 = nc_b32+1;
1330	c.b32_len = (size_t)nc_b32[0];
1331	if(nsec3_covers(zone->name, &c, nc_rrset, 0, buf)) {
1332		/* nc_rrset covers the next closer name.
1333		 * ce_rrset equals a closer encloser.
1334		 * nc_rrset is optout.
1335		 * No need to check wildcard for type DS */
1336		/* capacity=3: ce + nc + soa(if needed) */
1337		if(!(msg = dns_msg_create(qname, qname_len,
1338			LDNS_RR_TYPE_DS, zone->dclass, region, 3)))
1339			return NULL;
1340		/* now=0 because TTL was reduced in grab_nsec */
1341		if(!dns_msg_authadd(msg, region, ce_rrset, 0))
1342			return NULL;
1343		if(!dns_msg_authadd(msg, region, nc_rrset, 0))
1344			return NULL;
1345		return msg;
1346	}
1347	return NULL;
1348}
1349
1350/**
1351 * Add SOA record for external responses.
1352 * @param rrset_cache: to look into.
1353 * @param now: current time.
1354 * @param region: where to perform the allocation
1355 * @param msg: current msg with NSEC.
1356 * @param zone: val_neg_zone if we have one.
1357 * @return false on lookup or alloc failure.
1358 */
1359static int add_soa(struct rrset_cache* rrset_cache, uint32_t now,
1360	struct regional* region, struct dns_msg* msg, struct val_neg_zone* zone)
1361{
1362	struct ub_packed_rrset_key* soa;
1363	uint8_t* nm;
1364	size_t nmlen;
1365	uint16_t dclass;
1366	if(zone) {
1367		nm = zone->name;
1368		nmlen = zone->len;
1369		dclass = zone->dclass;
1370	} else {
1371		/* Assumes the signer is the zone SOA to add */
1372		nm = reply_nsec_signer(msg->rep, &nmlen, &dclass);
1373		if(!nm)
1374			return 0;
1375	}
1376	soa = rrset_cache_lookup(rrset_cache, nm, nmlen, LDNS_RR_TYPE_SOA,
1377		dclass, PACKED_RRSET_SOA_NEG, now, 0);
1378	if(!soa)
1379		return 0;
1380	if(!dns_msg_authadd(msg, region, soa, now)) {
1381		lock_rw_unlock(&soa->entry.lock);
1382		return 0;
1383	}
1384	lock_rw_unlock(&soa->entry.lock);
1385	return 1;
1386}
1387
1388struct dns_msg*
1389val_neg_getmsg(struct val_neg_cache* neg, struct query_info* qinfo,
1390	struct regional* region, struct rrset_cache* rrset_cache,
1391	ldns_buffer* buf, uint32_t now, int addsoa, uint8_t* topname)
1392{
1393	struct dns_msg* msg;
1394	struct ub_packed_rrset_key* rrset;
1395	uint8_t* zname;
1396	size_t zname_len;
1397	int zname_labs;
1398	struct val_neg_zone* zone;
1399
1400	/* only for DS queries */
1401	if(qinfo->qtype != LDNS_RR_TYPE_DS)
1402		return NULL;
1403	log_assert(!topname || dname_subdomain_c(qinfo->qname, topname));
1404
1405	/* see if info from neg cache is available
1406	 * For NSECs, because there is no optout; a DS next to a delegation
1407	 * always has exactly an NSEC for it itself; check its DS bit.
1408	 * flags=0 (not the zone apex).
1409	 */
1410	rrset = grab_nsec(rrset_cache, qinfo->qname, qinfo->qname_len,
1411		LDNS_RR_TYPE_NSEC, qinfo->qclass, 0, region, 1,
1412		qinfo->qtype, now);
1413	if(rrset) {
1414		/* return msg with that rrset */
1415		if(!(msg = dns_msg_create(qinfo->qname, qinfo->qname_len,
1416			qinfo->qtype, qinfo->qclass, region, 2)))
1417			return NULL;
1418		/* TTL already subtracted in grab_nsec */
1419		if(!dns_msg_authadd(msg, region, rrset, 0))
1420			return NULL;
1421		if(addsoa && !add_soa(rrset_cache, now, region, msg, NULL))
1422			return NULL;
1423		return msg;
1424	}
1425
1426	/* check NSEC3 neg cache for type DS */
1427	/* need to look one zone higher for DS type */
1428	zname = qinfo->qname;
1429	zname_len = qinfo->qname_len;
1430	dname_remove_label(&zname, &zname_len);
1431	zname_labs = dname_count_labels(zname);
1432
1433	/* lookup closest zone */
1434	lock_basic_lock(&neg->lock);
1435	zone = neg_closest_zone_parent(neg, zname, zname_len, zname_labs,
1436		qinfo->qclass);
1437	while(zone && !zone->in_use)
1438		zone = zone->parent;
1439	/* check that the zone is not too high up so that we do not pick data
1440	 * out of a zone that is above the last-seen key (or trust-anchor). */
1441	if(zone && topname) {
1442		if(!dname_subdomain_c(zone->name, topname))
1443			zone = NULL;
1444	}
1445	if(!zone) {
1446		lock_basic_unlock(&neg->lock);
1447		return NULL;
1448	}
1449
1450	msg = neg_nsec3_proof_ds(zone, qinfo->qname, qinfo->qname_len,
1451		zname_labs+1, buf, rrset_cache, region, now, topname);
1452	if(msg && addsoa && !add_soa(rrset_cache, now, region, msg, zone)) {
1453		lock_basic_unlock(&neg->lock);
1454		return NULL;
1455	}
1456	lock_basic_unlock(&neg->lock);
1457	return msg;
1458}
1459