libworker.c revision 249141
1/*
2 * libunbound/worker.c - worker thread or process that resolves
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file contains the worker process or thread that performs
40 * the DNS resolving and validation. The worker is called by a procedure
41 * and if in the background continues until exit, if in the foreground
42 * returns from the procedure when done.
43 */
44#include "config.h"
45#include <ldns/dname.h>
46#include <ldns/wire2host.h>
47#ifdef HAVE_SSL
48#include <openssl/ssl.h>
49#endif
50#include "libunbound/libworker.h"
51#include "libunbound/context.h"
52#include "libunbound/unbound.h"
53#include "services/outside_network.h"
54#include "services/mesh.h"
55#include "services/localzone.h"
56#include "services/cache/rrset.h"
57#include "services/outbound_list.h"
58#include "util/module.h"
59#include "util/regional.h"
60#include "util/random.h"
61#include "util/config_file.h"
62#include "util/netevent.h"
63#include "util/storage/lookup3.h"
64#include "util/storage/slabhash.h"
65#include "util/net_help.h"
66#include "util/data/dname.h"
67#include "util/data/msgreply.h"
68#include "util/data/msgencode.h"
69#include "util/tube.h"
70#include "iterator/iter_fwd.h"
71#include "iterator/iter_hints.h"
72
73/** handle new query command for bg worker */
74static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len);
75
76/** delete libworker struct */
77static void
78libworker_delete(struct libworker* w)
79{
80	if(!w) return;
81	if(w->env) {
82		outside_network_quit_prepare(w->back);
83		mesh_delete(w->env->mesh);
84		context_release_alloc(w->ctx, w->env->alloc,
85			!w->is_bg || w->is_bg_thread);
86		ldns_buffer_free(w->env->scratch_buffer);
87		regional_destroy(w->env->scratch);
88		forwards_delete(w->env->fwds);
89		hints_delete(w->env->hints);
90		ub_randfree(w->env->rnd);
91		free(w->env);
92	}
93#ifdef HAVE_SSL
94	SSL_CTX_free(w->sslctx);
95#endif
96	outside_network_delete(w->back);
97	comm_base_delete(w->base);
98	free(w);
99}
100
101/** setup fresh libworker struct */
102static struct libworker*
103libworker_setup(struct ub_ctx* ctx, int is_bg)
104{
105	unsigned int seed;
106	struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
107	struct config_file* cfg = ctx->env->cfg;
108	int* ports;
109	int numports;
110	if(!w) return NULL;
111	w->is_bg = is_bg;
112	w->ctx = ctx;
113	w->env = (struct module_env*)malloc(sizeof(*w->env));
114	if(!w->env) {
115		free(w);
116		return NULL;
117	}
118	*w->env = *ctx->env;
119	w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
120	if(!w->env->alloc) {
121		libworker_delete(w);
122		return NULL;
123	}
124	w->thread_num = w->env->alloc->thread_num;
125	alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
126	if(!w->is_bg || w->is_bg_thread) {
127		lock_basic_lock(&ctx->cfglock);
128	}
129	w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
130	w->env->scratch_buffer = ldns_buffer_new(cfg->msg_buffer_size);
131	w->env->fwds = forwards_create();
132	if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) {
133		forwards_delete(w->env->fwds);
134		w->env->fwds = NULL;
135	}
136	w->env->hints = hints_create();
137	if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) {
138		hints_delete(w->env->hints);
139		w->env->hints = NULL;
140	}
141	if(cfg->ssl_upstream) {
142		w->sslctx = connect_sslctx_create(NULL, NULL, NULL);
143		if(!w->sslctx) {
144			/* to make the setup fail after unlock */
145			hints_delete(w->env->hints);
146			w->env->hints = NULL;
147		}
148	}
149	if(!w->is_bg || w->is_bg_thread) {
150		lock_basic_unlock(&ctx->cfglock);
151	}
152	if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds ||
153		!w->env->hints) {
154		libworker_delete(w);
155		return NULL;
156	}
157	w->env->worker = (struct worker*)w;
158	w->env->probe_timer = NULL;
159	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
160		(((unsigned int)w->thread_num)<<17);
161	seed ^= (unsigned int)w->env->alloc->next_id;
162	if(!w->is_bg || w->is_bg_thread) {
163		lock_basic_lock(&ctx->cfglock);
164	}
165	if(!(w->env->rnd = ub_initstate(seed, ctx->seed_rnd))) {
166		if(!w->is_bg || w->is_bg_thread) {
167			lock_basic_unlock(&ctx->cfglock);
168		}
169		seed = 0;
170		libworker_delete(w);
171		return NULL;
172	}
173	if(!w->is_bg || w->is_bg_thread) {
174		lock_basic_unlock(&ctx->cfglock);
175	}
176	if(1) {
177		/* primitive lockout for threading: if it overwrites another
178		 * thread it is like wiping the cache (which is likely empty
179		 * at the start) */
180		/* note we are holding the ctx lock in normal threaded
181		 * cases so that is solved properly, it is only for many ctx
182		 * in different threads that this may clash */
183		static int done_raninit = 0;
184		if(!done_raninit) {
185			done_raninit = 1;
186			hash_set_raninit((uint32_t)ub_random(w->env->rnd));
187		}
188	}
189	seed = 0;
190
191	w->base = comm_base_create(0);
192	if(!w->base) {
193		libworker_delete(w);
194		return NULL;
195	}
196	if(!w->is_bg || w->is_bg_thread) {
197		lock_basic_lock(&ctx->cfglock);
198	}
199	numports = cfg_condense_ports(cfg, &ports);
200	if(numports == 0) {
201		libworker_delete(w);
202		return NULL;
203	}
204	w->back = outside_network_create(w->base, cfg->msg_buffer_size,
205		(size_t)cfg->outgoing_num_ports, cfg->out_ifs,
206		cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
207		cfg->do_tcp?cfg->outgoing_num_tcp:0,
208		w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
209		ports, numports, cfg->unwanted_threshold,
210		&libworker_alloc_cleanup, w, cfg->do_udp, w->sslctx);
211	if(!w->is_bg || w->is_bg_thread) {
212		lock_basic_unlock(&ctx->cfglock);
213	}
214	free(ports);
215	if(!w->back) {
216		libworker_delete(w);
217		return NULL;
218	}
219	w->env->mesh = mesh_create(&ctx->mods, w->env);
220	if(!w->env->mesh) {
221		libworker_delete(w);
222		return NULL;
223	}
224	w->env->send_query = &libworker_send_query;
225	w->env->detach_subs = &mesh_detach_subs;
226	w->env->attach_sub = &mesh_attach_sub;
227	w->env->kill_sub = &mesh_state_delete;
228	w->env->detect_cycle = &mesh_detect_cycle;
229	comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
230	return w;
231}
232
233/** handle cancel command for bg worker */
234static void
235handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len)
236{
237	struct ctx_query* q;
238	if(w->is_bg_thread) {
239		lock_basic_lock(&w->ctx->cfglock);
240		q = context_deserialize_cancel(w->ctx, buf, len);
241		lock_basic_unlock(&w->ctx->cfglock);
242	} else {
243		q = context_deserialize_cancel(w->ctx, buf, len);
244	}
245	if(!q) {
246		/* probably simply lookup failed, i.e. the message had been
247		 * processed and answered before the cancel arrived */
248		return;
249	}
250	q->cancelled = 1;
251	free(buf);
252}
253
254/** do control command coming into bg server */
255static void
256libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len)
257{
258	switch(context_serial_getcmd(msg, len)) {
259		default:
260		case UB_LIBCMD_ANSWER:
261			log_err("unknown command for bg worker %d",
262				(int)context_serial_getcmd(msg, len));
263			/* and fall through to quit */
264		case UB_LIBCMD_QUIT:
265			free(msg);
266			comm_base_exit(w->base);
267			break;
268		case UB_LIBCMD_NEWQUERY:
269			handle_newq(w, msg, len);
270			break;
271		case UB_LIBCMD_CANCEL:
272			handle_cancel(w, msg, len);
273			break;
274	}
275}
276
277/** handle control command coming into server */
278void
279libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
280	uint8_t* msg, size_t len, int err, void* arg)
281{
282	struct libworker* w = (struct libworker*)arg;
283
284	if(err != 0) {
285		free(msg);
286		/* it is of no use to go on, exit */
287		comm_base_exit(w->base);
288		return;
289	}
290	libworker_do_cmd(w, msg, len); /* also frees the buf */
291}
292
293/** the background thread func */
294static void*
295libworker_dobg(void* arg)
296{
297	/* setup */
298	uint32_t m;
299	struct libworker* w = (struct libworker*)arg;
300	struct ub_ctx* ctx;
301	if(!w) {
302		log_err("libunbound bg worker init failed, nomem");
303		return NULL;
304	}
305	ctx = w->ctx;
306	log_thread_set(&w->thread_num);
307#ifdef THREADS_DISABLED
308	/* we are forked */
309	w->is_bg_thread = 0;
310	/* close non-used parts of the pipes */
311	tube_close_write(ctx->qq_pipe);
312	tube_close_read(ctx->rr_pipe);
313#endif
314	if(!tube_setup_bg_listen(ctx->qq_pipe, w->base,
315		libworker_handle_control_cmd, w)) {
316		log_err("libunbound bg worker init failed, no bglisten");
317		return NULL;
318	}
319	if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
320		log_err("libunbound bg worker init failed, no bgwrite");
321		return NULL;
322	}
323
324	/* do the work */
325	comm_base_dispatch(w->base);
326
327	/* cleanup */
328	m = UB_LIBCMD_QUIT;
329	tube_remove_bg_listen(w->ctx->qq_pipe);
330	tube_remove_bg_write(w->ctx->rr_pipe);
331	libworker_delete(w);
332	(void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m,
333		(uint32_t)sizeof(m), 0);
334#ifdef THREADS_DISABLED
335	/* close pipes from forked process before exit */
336	tube_close_read(ctx->qq_pipe);
337	tube_close_write(ctx->rr_pipe);
338#endif
339	return NULL;
340}
341
342int libworker_bg(struct ub_ctx* ctx)
343{
344	struct libworker* w;
345	/* fork or threadcreate */
346	lock_basic_lock(&ctx->cfglock);
347	if(ctx->dothread) {
348		lock_basic_unlock(&ctx->cfglock);
349		w = libworker_setup(ctx, 1);
350		if(!w) return UB_NOMEM;
351		w->is_bg_thread = 1;
352#ifdef ENABLE_LOCK_CHECKS
353		w->thread_num = 1; /* for nicer DEBUG checklocks */
354#endif
355		ub_thread_create(&ctx->bg_tid, libworker_dobg, w);
356	} else {
357		lock_basic_unlock(&ctx->cfglock);
358#ifndef HAVE_FORK
359		/* no fork on windows */
360		return UB_FORKFAIL;
361#else /* HAVE_FORK */
362		switch((ctx->bg_pid=fork())) {
363			case 0:
364				w = libworker_setup(ctx, 1);
365				if(!w) fatal_exit("out of memory");
366				/* close non-used parts of the pipes */
367				tube_close_write(ctx->qq_pipe);
368				tube_close_read(ctx->rr_pipe);
369				(void)libworker_dobg(w);
370				exit(0);
371				break;
372			case -1:
373				return UB_FORKFAIL;
374			default:
375				break;
376		}
377#endif /* HAVE_FORK */
378	}
379	return UB_NOERROR;
380}
381
382/** get msg reply struct (in temp region) */
383static struct reply_info*
384parse_reply(ldns_buffer* pkt, struct regional* region, struct query_info* qi)
385{
386	struct reply_info* rep;
387	struct msg_parse* msg;
388	if(!(msg = regional_alloc(region, sizeof(*msg)))) {
389		return NULL;
390	}
391	memset(msg, 0, sizeof(*msg));
392	ldns_buffer_set_position(pkt, 0);
393	if(parse_packet(pkt, msg, region) != 0)
394		return 0;
395	if(!parse_create_msg(pkt, msg, NULL, qi, &rep, region)) {
396		return 0;
397	}
398	return rep;
399}
400
401/** insert canonname */
402static int
403fill_canon(struct ub_result* res, uint8_t* s)
404{
405	char buf[255+2];
406	dname_str(s, buf);
407	res->canonname = strdup(buf);
408	return res->canonname != 0;
409}
410
411/** fill data into result */
412static int
413fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer,
414	uint8_t* finalcname, struct query_info* rq, struct reply_info* rep)
415{
416	size_t i;
417	struct packed_rrset_data* data;
418	res->ttl = 0;
419	if(!answer) {
420		if(finalcname) {
421			if(!fill_canon(res, finalcname))
422				return 0; /* out of memory */
423		}
424		if(rep->rrset_count != 0)
425			res->ttl = (int)rep->ttl;
426		res->data = (char**)calloc(1, sizeof(char*));
427		res->len = (int*)calloc(1, sizeof(int));
428		return (res->data && res->len);
429	}
430	data = (struct packed_rrset_data*)answer->entry.data;
431	if(query_dname_compare(rq->qname, answer->rk.dname) != 0) {
432		if(!fill_canon(res, answer->rk.dname))
433			return 0; /* out of memory */
434	} else	res->canonname = NULL;
435	res->data = (char**)calloc(data->count+1, sizeof(char*));
436	res->len = (int*)calloc(data->count+1, sizeof(int));
437	if(!res->data || !res->len)
438		return 0; /* out of memory */
439	for(i=0; i<data->count; i++) {
440		/* remove rdlength from rdata */
441		res->len[i] = (int)(data->rr_len[i] - 2);
442		res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]);
443		if(!res->data[i])
444			return 0; /* out of memory */
445	}
446	/* ttl for positive answers, from CNAME and answer RRs */
447	if(data->count != 0) {
448		size_t j;
449		res->ttl = (int)data->ttl;
450		for(j=0; j<rep->an_numrrsets; j++) {
451			struct packed_rrset_data* d =
452				(struct packed_rrset_data*)rep->rrsets[j]->
453				entry.data;
454			if((int)d->ttl < res->ttl)
455				res->ttl = (int)d->ttl;
456		}
457	}
458	/* ttl for negative answers */
459	if(data->count == 0 && rep->rrset_count != 0)
460		res->ttl = (int)rep->ttl;
461	res->data[data->count] = NULL;
462	res->len[data->count] = 0;
463	return 1;
464}
465
466/** fill result from parsed message, on error fills servfail */
467void
468libworker_enter_result(struct ub_result* res, ldns_buffer* buf,
469	struct regional* temp, enum sec_status msg_security)
470{
471	struct query_info rq;
472	struct reply_info* rep;
473	res->rcode = LDNS_RCODE_SERVFAIL;
474	rep = parse_reply(buf, temp, &rq);
475	if(!rep) {
476		log_err("cannot parse buf");
477		return; /* error parsing buf, or out of memory */
478	}
479	if(!fill_res(res, reply_find_answer_rrset(&rq, rep),
480		reply_find_final_cname_target(&rq, rep), &rq, rep))
481		return; /* out of memory */
482	/* rcode, havedata, nxdomain, secure, bogus */
483	res->rcode = (int)FLAGS_GET_RCODE(rep->flags);
484	if(res->data && res->data[0])
485		res->havedata = 1;
486	if(res->rcode == LDNS_RCODE_NXDOMAIN)
487		res->nxdomain = 1;
488	if(msg_security == sec_status_secure)
489		res->secure = 1;
490	if(msg_security == sec_status_bogus)
491		res->bogus = 1;
492}
493
494/** fillup fg results */
495static void
496libworker_fillup_fg(struct ctx_query* q, int rcode, ldns_buffer* buf,
497	enum sec_status s, char* why_bogus)
498{
499	if(why_bogus)
500		q->res->why_bogus = strdup(why_bogus);
501	if(rcode != 0) {
502		q->res->rcode = rcode;
503		q->msg_security = s;
504		return;
505	}
506
507	q->res->rcode = LDNS_RCODE_SERVFAIL;
508	q->msg_security = 0;
509	q->msg = memdup(ldns_buffer_begin(buf), ldns_buffer_limit(buf));
510	q->msg_len = ldns_buffer_limit(buf);
511	if(!q->msg) {
512		return; /* the error is in the rcode */
513	}
514
515	/* canonname and results */
516	q->msg_security = s;
517	libworker_enter_result(q->res, buf, q->w->env->scratch, s);
518}
519
520void
521libworker_fg_done_cb(void* arg, int rcode, ldns_buffer* buf, enum sec_status s,
522	char* why_bogus)
523{
524	struct ctx_query* q = (struct ctx_query*)arg;
525	/* fg query is done; exit comm base */
526	comm_base_exit(q->w->base);
527
528	libworker_fillup_fg(q, rcode, buf, s, why_bogus);
529}
530
531/** setup qinfo and edns */
532static int
533setup_qinfo_edns(struct libworker* w, struct ctx_query* q,
534	struct query_info* qinfo, struct edns_data* edns)
535{
536	ldns_rdf* rdf;
537	qinfo->qtype = (uint16_t)q->res->qtype;
538	qinfo->qclass = (uint16_t)q->res->qclass;
539	rdf = ldns_dname_new_frm_str(q->res->qname);
540	if(!rdf) {
541		return 0;
542	}
543#ifdef UNBOUND_ALLOC_LITE
544	qinfo->qname = memdup(ldns_rdf_data(rdf), ldns_rdf_size(rdf));
545	qinfo->qname_len = ldns_rdf_size(rdf);
546	ldns_rdf_deep_free(rdf);
547	rdf = 0;
548#else
549	qinfo->qname = ldns_rdf_data(rdf);
550	qinfo->qname_len = ldns_rdf_size(rdf);
551#endif
552	edns->edns_present = 1;
553	edns->ext_rcode = 0;
554	edns->edns_version = 0;
555	edns->bits = EDNS_DO;
556	if(ldns_buffer_capacity(w->back->udp_buff) < 65535)
557		edns->udp_size = (uint16_t)ldns_buffer_capacity(
558			w->back->udp_buff);
559	else	edns->udp_size = 65535;
560	ldns_rdf_free(rdf);
561	return 1;
562}
563
564int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q)
565{
566	struct libworker* w = libworker_setup(ctx, 0);
567	uint16_t qflags, qid;
568	struct query_info qinfo;
569	struct edns_data edns;
570	if(!w)
571		return UB_INITFAIL;
572	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
573		libworker_delete(w);
574		return UB_SYNTAX;
575	}
576	qid = 0;
577	qflags = BIT_RD;
578	q->w = w;
579	/* see if there is a fixed answer */
580	ldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
581	ldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
582	if(local_zones_answer(ctx->local_zones, &qinfo, &edns,
583		w->back->udp_buff, w->env->scratch)) {
584		regional_free_all(w->env->scratch);
585		libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
586			w->back->udp_buff, sec_status_insecure, NULL);
587		libworker_delete(w);
588		free(qinfo.qname);
589		return UB_NOERROR;
590	}
591	/* process new query */
592	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
593		w->back->udp_buff, qid, libworker_fg_done_cb, q)) {
594		free(qinfo.qname);
595		return UB_NOMEM;
596	}
597	free(qinfo.qname);
598
599	/* wait for reply */
600	comm_base_dispatch(w->base);
601
602	libworker_delete(w);
603	return UB_NOERROR;
604}
605
606/** add result to the bg worker result queue */
607static void
608add_bg_result(struct libworker* w, struct ctx_query* q, ldns_buffer* pkt,
609	int err, char* reason)
610{
611	uint8_t* msg = NULL;
612	uint32_t len = 0;
613
614	/* serialize and delete unneeded q */
615	if(w->is_bg_thread) {
616		lock_basic_lock(&w->ctx->cfglock);
617		if(reason)
618			q->res->why_bogus = strdup(reason);
619		if(pkt) {
620			q->msg_len = ldns_buffer_remaining(pkt);
621			q->msg = memdup(ldns_buffer_begin(pkt), q->msg_len);
622			if(!q->msg)
623				msg = context_serialize_answer(q, UB_NOMEM,
624				NULL, &len);
625			else	msg = context_serialize_answer(q, err,
626				NULL, &len);
627		} else msg = context_serialize_answer(q, err, NULL, &len);
628		lock_basic_unlock(&w->ctx->cfglock);
629	} else {
630		if(reason)
631			q->res->why_bogus = strdup(reason);
632		msg = context_serialize_answer(q, err, pkt, &len);
633		(void)rbtree_delete(&w->ctx->queries, q->node.key);
634		w->ctx->num_async--;
635		context_query_delete(q);
636	}
637
638	if(!msg) {
639		log_err("out of memory for async answer");
640		return;
641	}
642	if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) {
643		log_err("out of memory for async answer");
644		return;
645	}
646}
647
648void
649libworker_bg_done_cb(void* arg, int rcode, ldns_buffer* buf, enum sec_status s,
650	char* why_bogus)
651{
652	struct ctx_query* q = (struct ctx_query*)arg;
653
654	if(q->cancelled) {
655		if(q->w->is_bg_thread) {
656			/* delete it now */
657			struct ub_ctx* ctx = q->w->ctx;
658			lock_basic_lock(&ctx->cfglock);
659			(void)rbtree_delete(&ctx->queries, q->node.key);
660			ctx->num_async--;
661			context_query_delete(q);
662			lock_basic_unlock(&ctx->cfglock);
663		}
664		/* cancelled, do not give answer */
665		return;
666	}
667	q->msg_security = s;
668	if(!buf)
669		buf = q->w->env->scratch_buffer;
670	if(rcode != 0) {
671		error_encode(buf, rcode, NULL, 0, BIT_RD, NULL);
672	}
673	add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus);
674}
675
676
677/** handle new query command for bg worker */
678static void
679handle_newq(struct libworker* w, uint8_t* buf, uint32_t len)
680{
681	uint16_t qflags, qid;
682	struct query_info qinfo;
683	struct edns_data edns;
684	struct ctx_query* q;
685	if(w->is_bg_thread) {
686		lock_basic_lock(&w->ctx->cfglock);
687		q = context_lookup_new_query(w->ctx, buf, len);
688		lock_basic_unlock(&w->ctx->cfglock);
689	} else {
690		q = context_deserialize_new_query(w->ctx, buf, len);
691	}
692	free(buf);
693	if(!q) {
694		log_err("failed to deserialize newq");
695		return;
696	}
697	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
698		add_bg_result(w, q, NULL, UB_SYNTAX, NULL);
699		return;
700	}
701	qid = 0;
702	qflags = BIT_RD;
703	/* see if there is a fixed answer */
704	ldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
705	ldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
706	if(local_zones_answer(w->ctx->local_zones, &qinfo, &edns,
707		w->back->udp_buff, w->env->scratch)) {
708		regional_free_all(w->env->scratch);
709		q->msg_security = sec_status_insecure;
710		add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL);
711		free(qinfo.qname);
712		return;
713	}
714	q->w = w;
715	/* process new query */
716	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
717		w->back->udp_buff, qid, libworker_bg_done_cb, q)) {
718		add_bg_result(w, q, NULL, UB_NOMEM, NULL);
719	}
720	free(qinfo.qname);
721}
722
723void libworker_alloc_cleanup(void* arg)
724{
725	struct libworker* w = (struct libworker*)arg;
726	slabhash_clear(&w->env->rrset_cache->table);
727        slabhash_clear(w->env->msg_cache);
728}
729
730struct outbound_entry* libworker_send_query(uint8_t* qname, size_t qnamelen,
731        uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
732	int want_dnssec, struct sockaddr_storage* addr, socklen_t addrlen,
733	uint8_t* zone, size_t zonelen, struct module_qstate* q)
734{
735	struct libworker* w = (struct libworker*)q->env->worker;
736	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
737		q->region, sizeof(*e));
738	if(!e)
739		return NULL;
740	e->qstate = q;
741	e->qsent = outnet_serviced_query(w->back, qname,
742		qnamelen, qtype, qclass, flags, dnssec, want_dnssec,
743		q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, addr,
744		addrlen, zone, zonelen, libworker_handle_service_reply, e,
745		w->back->udp_buff);
746	if(!e->qsent) {
747		return NULL;
748	}
749	return e;
750}
751
752int
753libworker_handle_reply(struct comm_point* c, void* arg, int error,
754        struct comm_reply* reply_info)
755{
756	struct module_qstate* q = (struct module_qstate*)arg;
757	struct libworker* lw = (struct libworker*)q->env->worker;
758	struct outbound_entry e;
759	e.qstate = q;
760	e.qsent = NULL;
761
762	if(error != 0) {
763		mesh_report_reply(lw->env->mesh, &e, reply_info, error);
764		return 0;
765	}
766	/* sanity check. */
767	if(!LDNS_QR_WIRE(ldns_buffer_begin(c->buffer))
768		|| LDNS_OPCODE_WIRE(ldns_buffer_begin(c->buffer)) !=
769			LDNS_PACKET_QUERY
770		|| LDNS_QDCOUNT(ldns_buffer_begin(c->buffer)) > 1) {
771		/* error becomes timeout for the module as if this reply
772		 * never arrived. */
773		mesh_report_reply(lw->env->mesh, &e, reply_info,
774			NETEVENT_TIMEOUT);
775		return 0;
776	}
777	mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR);
778	return 0;
779}
780
781int
782libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
783        struct comm_reply* reply_info)
784{
785	struct outbound_entry* e = (struct outbound_entry*)arg;
786	struct libworker* lw = (struct libworker*)e->qstate->env->worker;
787
788	if(error != 0) {
789		mesh_report_reply(lw->env->mesh, e, reply_info, error);
790		return 0;
791	}
792	/* sanity check. */
793	if(!LDNS_QR_WIRE(ldns_buffer_begin(c->buffer))
794		|| LDNS_OPCODE_WIRE(ldns_buffer_begin(c->buffer)) !=
795			LDNS_PACKET_QUERY
796		|| LDNS_QDCOUNT(ldns_buffer_begin(c->buffer)) > 1) {
797		/* error becomes timeout for the module as if this reply
798		 * never arrived. */
799		mesh_report_reply(lw->env->mesh, e, reply_info,
800			NETEVENT_TIMEOUT);
801		return 0;
802	}
803	mesh_report_reply(lw->env->mesh,  e, reply_info, NETEVENT_NOERROR);
804	return 0;
805}
806
807/* --- fake callbacks for fptr_wlist to work --- */
808void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
809	uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
810	int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
811{
812	log_assert(0);
813}
814
815int worker_handle_request(struct comm_point* ATTR_UNUSED(c),
816	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
817        struct comm_reply* ATTR_UNUSED(repinfo))
818{
819	log_assert(0);
820	return 0;
821}
822
823int worker_handle_reply(struct comm_point* ATTR_UNUSED(c),
824	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
825        struct comm_reply* ATTR_UNUSED(reply_info))
826{
827	log_assert(0);
828	return 0;
829}
830
831int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
832	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
833        struct comm_reply* ATTR_UNUSED(reply_info))
834{
835	log_assert(0);
836	return 0;
837}
838
839int remote_accept_callback(struct comm_point* ATTR_UNUSED(c),
840	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
841        struct comm_reply* ATTR_UNUSED(repinfo))
842{
843	log_assert(0);
844	return 0;
845}
846
847int remote_control_callback(struct comm_point* ATTR_UNUSED(c),
848	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
849        struct comm_reply* ATTR_UNUSED(repinfo))
850{
851	log_assert(0);
852	return 0;
853}
854
855void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg))
856{
857	log_assert(0);
858}
859
860struct outbound_entry* worker_send_query(uint8_t* ATTR_UNUSED(qname),
861	size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
862	uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
863	int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
864	struct sockaddr_storage* ATTR_UNUSED(addr),
865	socklen_t ATTR_UNUSED(addrlen), struct module_qstate* ATTR_UNUSED(q))
866{
867	log_assert(0);
868	return 0;
869}
870
871void
872worker_alloc_cleanup(void* ATTR_UNUSED(arg))
873{
874	log_assert(0);
875}
876
877void worker_stat_timer_cb(void* ATTR_UNUSED(arg))
878{
879	log_assert(0);
880}
881
882void worker_probe_timer_cb(void* ATTR_UNUSED(arg))
883{
884	log_assert(0);
885}
886
887void worker_start_accept(void* ATTR_UNUSED(arg))
888{
889	log_assert(0);
890}
891
892void worker_stop_accept(void* ATTR_UNUSED(arg))
893{
894	log_assert(0);
895}
896
897int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
898{
899	log_assert(0);
900	return 0;
901}
902
903int
904codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
905{
906	log_assert(0);
907	return 0;
908}
909
910int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
911{
912        log_assert(0);
913        return 0;
914}
915
916void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg))
917{
918        log_assert(0);
919}
920
921#ifdef UB_ON_WINDOWS
922void
923worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void*
924        ATTR_UNUSED(arg)) {
925        log_assert(0);
926}
927
928void
929wsvc_cron_cb(void* ATTR_UNUSED(arg))
930{
931        log_assert(0);
932}
933#endif /* UB_ON_WINDOWS */
934