1/*
2 * libunbound/worker.c - worker thread or process that resolves
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file contains the worker process or thread that performs
40 * the DNS resolving and validation. The worker is called by a procedure
41 * and if in the background continues until exit, if in the foreground
42 * returns from the procedure when done.
43 */
44#include "config.h"
45#ifdef HAVE_SSL
46#include <openssl/ssl.h>
47#endif
48#include "libunbound/libworker.h"
49#include "libunbound/context.h"
50#include "libunbound/unbound.h"
51#include "libunbound/worker.h"
52#include "libunbound/unbound-event.h"
53#include "services/outside_network.h"
54#include "services/mesh.h"
55#include "services/localzone.h"
56#include "services/cache/rrset.h"
57#include "services/outbound_list.h"
58#include "services/authzone.h"
59#include "util/fptr_wlist.h"
60#include "util/module.h"
61#include "util/regional.h"
62#include "util/random.h"
63#include "util/config_file.h"
64#include "util/netevent.h"
65#include "util/proxy_protocol.h"
66#include "util/storage/lookup3.h"
67#include "util/storage/slabhash.h"
68#include "util/net_help.h"
69#include "util/data/dname.h"
70#include "util/data/msgreply.h"
71#include "util/data/msgencode.h"
72#include "util/tube.h"
73#include "iterator/iter_fwd.h"
74#include "iterator/iter_hints.h"
75#include "sldns/sbuffer.h"
76#include "sldns/str2wire.h"
77#ifdef USE_DNSTAP
78#include "dnstap/dtstream.h"
79#endif
80
81#ifdef HAVE_TARGETCONDITIONALS_H
82#include <TargetConditionals.h>
83#endif
84
85#if (defined(TARGET_OS_TV) && TARGET_OS_TV) || (defined(TARGET_OS_WATCH) && TARGET_OS_WATCH)
86#undef HAVE_FORK
87#endif
88
89/** handle new query command for bg worker */
90static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len);
91
92/** delete libworker env */
93static void
94libworker_delete_env(struct libworker* w)
95{
96	if(w->env) {
97		outside_network_quit_prepare(w->back);
98		mesh_delete(w->env->mesh);
99		context_release_alloc(w->ctx, w->env->alloc,
100			!w->is_bg || w->is_bg_thread);
101		sldns_buffer_free(w->env->scratch_buffer);
102		regional_destroy(w->env->scratch);
103		forwards_delete(w->env->fwds);
104		hints_delete(w->env->hints);
105		ub_randfree(w->env->rnd);
106		free(w->env);
107	}
108#ifdef HAVE_SSL
109	SSL_CTX_free(w->sslctx);
110#endif
111	outside_network_delete(w->back);
112}
113
114/** delete libworker struct */
115static void
116libworker_delete(struct libworker* w)
117{
118	if(!w) return;
119	libworker_delete_env(w);
120	comm_base_delete(w->base);
121	free(w);
122}
123
124void
125libworker_delete_event(struct libworker* w)
126{
127	if(!w) return;
128	libworker_delete_env(w);
129	comm_base_delete_no_base(w->base);
130	free(w);
131}
132
133/** setup fresh libworker struct */
134static struct libworker*
135libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb)
136{
137	struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
138	struct config_file* cfg = ctx->env->cfg;
139	int* ports;
140	int numports;
141	if(!w) return NULL;
142	w->is_bg = is_bg;
143	w->ctx = ctx;
144	w->env = (struct module_env*)malloc(sizeof(*w->env));
145	if(!w->env) {
146		free(w);
147		return NULL;
148	}
149	*w->env = *ctx->env;
150	w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
151	if(!w->env->alloc) {
152		libworker_delete(w);
153		return NULL;
154	}
155	w->thread_num = w->env->alloc->thread_num;
156	alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
157	if(!w->is_bg || w->is_bg_thread) {
158		lock_basic_lock(&ctx->cfglock);
159	}
160	w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
161	w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
162	w->env->fwds = forwards_create();
163	if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) {
164		forwards_delete(w->env->fwds);
165		w->env->fwds = NULL;
166	}
167	w->env->hints = hints_create();
168	if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) {
169		hints_delete(w->env->hints);
170		w->env->hints = NULL;
171	}
172#ifdef HAVE_SSL
173	w->sslctx = connect_sslctx_create(NULL, NULL,
174		cfg->tls_cert_bundle, cfg->tls_win_cert);
175	if(!w->sslctx) {
176		/* to make the setup fail after unlock */
177		hints_delete(w->env->hints);
178		w->env->hints = NULL;
179	}
180#endif
181	if(!w->is_bg || w->is_bg_thread) {
182		lock_basic_unlock(&ctx->cfglock);
183	}
184	if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds ||
185		!w->env->hints) {
186		libworker_delete(w);
187		return NULL;
188	}
189	w->env->worker = (struct worker*)w;
190	w->env->probe_timer = NULL;
191	if(!w->is_bg || w->is_bg_thread) {
192		lock_basic_lock(&ctx->cfglock);
193	}
194	if(!(w->env->rnd = ub_initstate(ctx->seed_rnd))) {
195		if(!w->is_bg || w->is_bg_thread) {
196			lock_basic_unlock(&ctx->cfglock);
197		}
198		libworker_delete(w);
199		return NULL;
200	}
201	if(!w->is_bg || w->is_bg_thread) {
202		lock_basic_unlock(&ctx->cfglock);
203	}
204	if(1) {
205		/* primitive lockout for threading: if it overwrites another
206		 * thread it is like wiping the cache (which is likely empty
207		 * at the start) */
208		/* note we are holding the ctx lock in normal threaded
209		 * cases so that is solved properly, it is only for many ctx
210		 * in different threads that this may clash */
211		static int done_raninit = 0;
212		if(!done_raninit) {
213			done_raninit = 1;
214			hash_set_raninit((uint32_t)ub_random(w->env->rnd));
215		}
216	}
217
218	if(eb)
219		w->base = comm_base_create_event(eb);
220	else	w->base = comm_base_create(0);
221	if(!w->base) {
222		libworker_delete(w);
223		return NULL;
224	}
225	w->env->worker_base = w->base;
226	if(!w->is_bg || w->is_bg_thread) {
227		lock_basic_lock(&ctx->cfglock);
228	}
229	numports = cfg_condense_ports(cfg, &ports);
230	if(numports == 0) {
231		if(!w->is_bg || w->is_bg_thread) {
232			lock_basic_unlock(&ctx->cfglock);
233		}
234		libworker_delete(w);
235		return NULL;
236	}
237	w->back = outside_network_create(w->base, cfg->msg_buffer_size,
238		(size_t)cfg->outgoing_num_ports, cfg->out_ifs,
239		cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
240		cfg->do_tcp?cfg->outgoing_num_tcp:0, cfg->ip_dscp,
241		w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
242		ports, numports, cfg->unwanted_threshold,
243		cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w,
244		cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx,
245		cfg->delay_close, cfg->tls_use_sni, NULL, cfg->udp_connect,
246		cfg->max_reuse_tcp_queries, cfg->tcp_reuse_timeout,
247		cfg->tcp_auth_query_timeout);
248	w->env->outnet = w->back;
249	if(!w->is_bg || w->is_bg_thread) {
250		lock_basic_unlock(&ctx->cfglock);
251	}
252	free(ports);
253	if(!w->back) {
254		libworker_delete(w);
255		return NULL;
256	}
257	w->env->mesh = mesh_create(&ctx->mods, w->env);
258	if(!w->env->mesh) {
259		libworker_delete(w);
260		return NULL;
261	}
262	w->env->send_query = &libworker_send_query;
263	w->env->detach_subs = &mesh_detach_subs;
264	w->env->attach_sub = &mesh_attach_sub;
265	w->env->add_sub = &mesh_add_sub;
266	w->env->kill_sub = &mesh_state_delete;
267	w->env->detect_cycle = &mesh_detect_cycle;
268	comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
269	pp_init(&sldns_write_uint16, &sldns_write_uint32);
270	return w;
271}
272
273struct libworker* libworker_create_event(struct ub_ctx* ctx,
274	struct ub_event_base* eb)
275{
276	return libworker_setup(ctx, 0, eb);
277}
278
279/** handle cancel command for bg worker */
280static void
281handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len)
282{
283	struct ctx_query* q;
284	if(w->is_bg_thread) {
285		lock_basic_lock(&w->ctx->cfglock);
286		q = context_deserialize_cancel(w->ctx, buf, len);
287		lock_basic_unlock(&w->ctx->cfglock);
288	} else {
289		q = context_deserialize_cancel(w->ctx, buf, len);
290	}
291	if(!q) {
292		/* probably simply lookup failed, i.e. the message had been
293		 * processed and answered before the cancel arrived */
294		return;
295	}
296	q->cancelled = 1;
297	free(buf);
298}
299
300/** do control command coming into bg server */
301static void
302libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len)
303{
304	switch(context_serial_getcmd(msg, len)) {
305		default:
306		case UB_LIBCMD_ANSWER:
307			log_err("unknown command for bg worker %d",
308				(int)context_serial_getcmd(msg, len));
309			/* and fall through to quit */
310			/* fallthrough */
311		case UB_LIBCMD_QUIT:
312			free(msg);
313			comm_base_exit(w->base);
314			break;
315		case UB_LIBCMD_NEWQUERY:
316			handle_newq(w, msg, len);
317			break;
318		case UB_LIBCMD_CANCEL:
319			handle_cancel(w, msg, len);
320			break;
321	}
322}
323
324/** handle control command coming into server */
325void
326libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
327	uint8_t* msg, size_t len, int err, void* arg)
328{
329	struct libworker* w = (struct libworker*)arg;
330
331	if(err != 0) {
332		free(msg);
333		/* it is of no use to go on, exit */
334		comm_base_exit(w->base);
335		return;
336	}
337	libworker_do_cmd(w, msg, len); /* also frees the buf */
338}
339
340/** the background thread func */
341static void*
342libworker_dobg(void* arg)
343{
344	/* setup */
345	uint32_t m;
346	struct libworker* w = (struct libworker*)arg;
347	struct ub_ctx* ctx;
348	if(!w) {
349		log_err("libunbound bg worker init failed, nomem");
350		return NULL;
351	}
352	ctx = w->ctx;
353	log_thread_set(&w->thread_num);
354#ifdef THREADS_DISABLED
355	/* we are forked */
356	w->is_bg_thread = 0;
357	/* close non-used parts of the pipes */
358	tube_close_write(ctx->qq_pipe);
359	tube_close_read(ctx->rr_pipe);
360#endif
361	if(!tube_setup_bg_listen(ctx->qq_pipe, w->base,
362		libworker_handle_control_cmd, w)) {
363		log_err("libunbound bg worker init failed, no bglisten");
364		return NULL;
365	}
366	if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
367		log_err("libunbound bg worker init failed, no bgwrite");
368		return NULL;
369	}
370
371	/* do the work */
372	comm_base_dispatch(w->base);
373
374	/* cleanup */
375	m = UB_LIBCMD_QUIT;
376	w->want_quit = 1;
377	tube_remove_bg_listen(w->ctx->qq_pipe);
378	tube_remove_bg_write(w->ctx->rr_pipe);
379	libworker_delete(w);
380	(void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m,
381		(uint32_t)sizeof(m), 0);
382#ifdef THREADS_DISABLED
383	/* close pipes from forked process before exit */
384	tube_close_read(ctx->qq_pipe);
385	tube_close_write(ctx->rr_pipe);
386#endif
387	return NULL;
388}
389
390int libworker_bg(struct ub_ctx* ctx)
391{
392	struct libworker* w;
393	/* fork or threadcreate */
394	lock_basic_lock(&ctx->cfglock);
395	if(ctx->dothread) {
396		lock_basic_unlock(&ctx->cfglock);
397		w = libworker_setup(ctx, 1, NULL);
398		if(!w) return UB_NOMEM;
399		w->is_bg_thread = 1;
400		ctx->thread_worker = w;
401#ifdef ENABLE_LOCK_CHECKS
402		w->thread_num = 1; /* for nicer DEBUG checklocks */
403#endif
404		ub_thread_create(&ctx->bg_tid, libworker_dobg, w);
405	} else {
406		lock_basic_unlock(&ctx->cfglock);
407#ifndef HAVE_FORK
408		/* no fork on windows */
409		return UB_FORKFAIL;
410#else /* HAVE_FORK */
411		switch((ctx->bg_pid=fork())) {
412			case 0:
413				w = libworker_setup(ctx, 1, NULL);
414				if(!w) fatal_exit("out of memory");
415				/* close non-used parts of the pipes */
416				tube_close_write(ctx->qq_pipe);
417				tube_close_read(ctx->rr_pipe);
418				(void)libworker_dobg(w);
419				exit(0);
420				break;
421			case -1:
422				return UB_FORKFAIL;
423			default:
424				/* close non-used parts, so that the worker
425				 * bgprocess gets 'pipe closed' when the
426				 * main process exits */
427				tube_close_read(ctx->qq_pipe);
428				tube_close_write(ctx->rr_pipe);
429				break;
430		}
431#endif /* HAVE_FORK */
432	}
433	return UB_NOERROR;
434}
435
436/** insert canonname */
437static int
438fill_canon(struct ub_result* res, uint8_t* s)
439{
440	char buf[255+2];
441	dname_str(s, buf);
442	res->canonname = strdup(buf);
443	return res->canonname != 0;
444}
445
446/** fill data into result */
447static int
448fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer,
449	uint8_t* finalcname, struct query_info* rq, struct reply_info* rep)
450{
451	size_t i;
452	struct packed_rrset_data* data;
453	res->ttl = 0;
454	if(!answer) {
455		if(finalcname) {
456			if(!fill_canon(res, finalcname))
457				return 0; /* out of memory */
458		}
459		if(rep->rrset_count != 0)
460			res->ttl = (int)rep->ttl;
461		res->data = (char**)calloc(1, sizeof(char*));
462		if(!res->data)
463			return 0; /* out of memory */
464		res->len = (int*)calloc(1, sizeof(int));
465		if(!res->len) {
466			free(res->data);
467			res->data = NULL;
468			return 0; /* out of memory */
469		}
470		return 1;
471	}
472	data = (struct packed_rrset_data*)answer->entry.data;
473	if(query_dname_compare(rq->qname, answer->rk.dname) != 0) {
474		if(!fill_canon(res, answer->rk.dname))
475			return 0; /* out of memory */
476	} else	res->canonname = NULL;
477	res->data = (char**)calloc(data->count+1, sizeof(char*));
478	if(!res->data)
479		return 0; /* out of memory */
480	res->len = (int*)calloc(data->count+1, sizeof(int));
481	if(!res->len) {
482		free(res->data);
483		res->data = NULL;
484		return 0; /* out of memory */
485	}
486	for(i=0; i<data->count; i++) {
487		/* remove rdlength from rdata */
488		res->len[i] = (int)(data->rr_len[i] - 2);
489		res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]);
490		if(!res->data[i]) {
491			size_t j;
492			for(j=0; j<i; j++) {
493				free(res->data[j]);
494				res->data[j] = NULL;
495			}
496			free(res->data);
497			res->data = NULL;
498			free(res->len);
499			res->len = NULL;
500			return 0; /* out of memory */
501		}
502	}
503	/* ttl for positive answers, from CNAME and answer RRs */
504	if(data->count != 0) {
505		size_t j;
506		res->ttl = (int)data->ttl;
507		for(j=0; j<rep->an_numrrsets; j++) {
508			struct packed_rrset_data* d =
509				(struct packed_rrset_data*)rep->rrsets[j]->
510				entry.data;
511			if((int)d->ttl < res->ttl)
512				res->ttl = (int)d->ttl;
513		}
514	}
515	/* ttl for negative answers */
516	if(data->count == 0 && rep->rrset_count != 0)
517		res->ttl = (int)rep->ttl;
518	res->data[data->count] = NULL;
519	res->len[data->count] = 0;
520	return 1;
521}
522
523/** fill result from parsed message, on error fills servfail */
524void
525libworker_enter_result(struct ub_result* res, sldns_buffer* buf,
526	struct regional* temp, enum sec_status msg_security)
527{
528	struct query_info rq;
529	struct reply_info* rep;
530	res->rcode = LDNS_RCODE_SERVFAIL;
531	rep = parse_reply_in_temp_region(buf, temp, &rq);
532	if(!rep) {
533		log_err("cannot parse buf");
534		return; /* error parsing buf, or out of memory */
535	}
536	if(!fill_res(res, reply_find_answer_rrset(&rq, rep),
537		reply_find_final_cname_target(&rq, rep), &rq, rep))
538		return; /* out of memory */
539	/* rcode, havedata, nxdomain, secure, bogus */
540	res->rcode = (int)FLAGS_GET_RCODE(rep->flags);
541	if(res->data && res->data[0])
542		res->havedata = 1;
543	if(res->rcode == LDNS_RCODE_NXDOMAIN)
544		res->nxdomain = 1;
545	if(msg_security == sec_status_secure)
546		res->secure = 1;
547	if(msg_security == sec_status_bogus ||
548		msg_security == sec_status_secure_sentinel_fail)
549		res->bogus = 1;
550}
551
552/** fillup fg results */
553static void
554libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf,
555	enum sec_status s, char* why_bogus, int was_ratelimited)
556{
557	q->res->was_ratelimited = was_ratelimited;
558	if(why_bogus)
559		q->res->why_bogus = strdup(why_bogus);
560	if(rcode != 0) {
561		q->res->rcode = rcode;
562		q->msg_security = s;
563		return;
564	}
565
566	q->res->rcode = LDNS_RCODE_SERVFAIL;
567	q->msg_security = sec_status_unchecked;
568	q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf));
569	q->msg_len = sldns_buffer_limit(buf);
570	if(!q->msg) {
571		return; /* the error is in the rcode */
572	}
573
574	/* canonname and results */
575	q->msg_security = s;
576	libworker_enter_result(q->res, buf, q->w->env->scratch, s);
577}
578
579void
580libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
581	char* why_bogus, int was_ratelimited)
582{
583	struct ctx_query* q = (struct ctx_query*)arg;
584	/* fg query is done; exit comm base */
585	comm_base_exit(q->w->base);
586
587	libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited);
588}
589
590/** setup qinfo and edns */
591static int
592setup_qinfo_edns(struct libworker* w, struct ctx_query* q,
593	struct query_info* qinfo, struct edns_data* edns)
594{
595	qinfo->qtype = (uint16_t)q->res->qtype;
596	qinfo->qclass = (uint16_t)q->res->qclass;
597	qinfo->local_alias = NULL;
598	qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len);
599	if(!qinfo->qname) {
600		return 0;
601	}
602	edns->edns_present = 1;
603	edns->ext_rcode = 0;
604	edns->edns_version = 0;
605	edns->bits = EDNS_DO;
606	edns->opt_list_in = NULL;
607	edns->opt_list_out = NULL;
608	edns->opt_list_inplace_cb_out = NULL;
609	edns->padding_block_size = 0;
610	edns->cookie_present = 0;
611	edns->cookie_valid = 0;
612	if(sldns_buffer_capacity(w->back->udp_buff) < 65535)
613		edns->udp_size = (uint16_t)sldns_buffer_capacity(
614			w->back->udp_buff);
615	else	edns->udp_size = 65535;
616	return 1;
617}
618
619int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q)
620{
621	struct libworker* w = libworker_setup(ctx, 0, NULL);
622	uint16_t qflags, qid;
623	struct query_info qinfo;
624	struct edns_data edns;
625	if(!w)
626		return UB_INITFAIL;
627	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
628		libworker_delete(w);
629		return UB_SYNTAX;
630	}
631	qid = 0;
632	qflags = BIT_RD;
633	q->w = w;
634	/* see if there is a fixed answer */
635	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
636	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
637	if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
638		w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
639		NULL, 0, NULL, 0, NULL)) {
640		regional_free_all(w->env->scratch);
641		libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
642			w->back->udp_buff, sec_status_insecure, NULL, 0);
643		libworker_delete(w);
644		free(qinfo.qname);
645		return UB_NOERROR;
646	}
647	if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
648		w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
649		regional_free_all(w->env->scratch);
650		libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
651			w->back->udp_buff, sec_status_insecure, NULL, 0);
652		libworker_delete(w);
653		free(qinfo.qname);
654		return UB_NOERROR;
655	}
656	/* process new query */
657	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
658		w->back->udp_buff, qid, libworker_fg_done_cb, q, 0)) {
659		free(qinfo.qname);
660		return UB_NOMEM;
661	}
662	free(qinfo.qname);
663
664	/* wait for reply */
665	comm_base_dispatch(w->base);
666
667	libworker_delete(w);
668	return UB_NOERROR;
669}
670
671void
672libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf,
673	enum sec_status s, char* why_bogus, int was_ratelimited)
674{
675	struct ctx_query* q = (struct ctx_query*)arg;
676	ub_event_callback_type cb = q->cb_event;
677	void* cb_arg = q->cb_arg;
678	int cancelled = q->cancelled;
679
680	/* delete it now */
681	struct ub_ctx* ctx = q->w->ctx;
682	lock_basic_lock(&ctx->cfglock);
683	(void)rbtree_delete(&ctx->queries, q->node.key);
684	ctx->num_async--;
685	context_query_delete(q);
686	lock_basic_unlock(&ctx->cfglock);
687
688	if(!cancelled) {
689		/* call callback */
690		int sec = 0;
691		if(s == sec_status_bogus)
692			sec = 1;
693		else if(s == sec_status_secure)
694			sec = 2;
695		(*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL),
696			(buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited);
697	}
698}
699
700int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q,
701	int* async_id)
702{
703	struct libworker* w = ctx->event_worker;
704	uint16_t qflags, qid;
705	struct query_info qinfo;
706	struct edns_data edns;
707	if(!w)
708		return UB_INITFAIL;
709	if(!setup_qinfo_edns(w, q, &qinfo, &edns))
710		return UB_SYNTAX;
711	qid = 0;
712	qflags = BIT_RD;
713	q->w = w;
714	/* see if there is a fixed answer */
715	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
716	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
717	if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
718		w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
719		NULL, 0, NULL, 0, NULL)) {
720		regional_free_all(w->env->scratch);
721		free(qinfo.qname);
722		libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
723			w->back->udp_buff, sec_status_insecure, NULL, 0);
724		return UB_NOERROR;
725	}
726	if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
727		w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
728		regional_free_all(w->env->scratch);
729		free(qinfo.qname);
730		libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
731			w->back->udp_buff, sec_status_insecure, NULL, 0);
732		return UB_NOERROR;
733	}
734	/* process new query */
735	if(async_id)
736		*async_id = q->querynum;
737	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
738		w->back->udp_buff, qid, libworker_event_done_cb, q, 0)) {
739		free(qinfo.qname);
740		return UB_NOMEM;
741	}
742	free(qinfo.qname);
743	return UB_NOERROR;
744}
745
746/** add result to the bg worker result queue */
747static void
748add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt,
749	int err, char* reason, int was_ratelimited)
750{
751	uint8_t* msg = NULL;
752	uint32_t len = 0;
753
754	if(w->want_quit) {
755		context_query_delete(q);
756		return;
757	}
758	/* serialize and delete unneeded q */
759	if(w->is_bg_thread) {
760		lock_basic_lock(&w->ctx->cfglock);
761		if(reason)
762			q->res->why_bogus = strdup(reason);
763		q->res->was_ratelimited = was_ratelimited;
764		if(pkt) {
765			q->msg_len = sldns_buffer_remaining(pkt);
766			q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len);
767			if(!q->msg) {
768				msg = context_serialize_answer(q, UB_NOMEM, NULL, &len);
769			} else {
770				msg = context_serialize_answer(q, err, NULL, &len);
771			}
772		} else {
773			msg = context_serialize_answer(q, err, NULL, &len);
774		}
775		lock_basic_unlock(&w->ctx->cfglock);
776	} else {
777		if(reason)
778			q->res->why_bogus = strdup(reason);
779		q->res->was_ratelimited = was_ratelimited;
780		msg = context_serialize_answer(q, err, pkt, &len);
781		(void)rbtree_delete(&w->ctx->queries, q->node.key);
782		w->ctx->num_async--;
783		context_query_delete(q);
784	}
785
786	if(!msg) {
787		log_err("out of memory for async answer");
788		return;
789	}
790	if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) {
791		log_err("out of memory for async answer");
792		return;
793	}
794}
795
796void
797libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
798	char* why_bogus, int was_ratelimited)
799{
800	struct ctx_query* q = (struct ctx_query*)arg;
801
802	if(q->cancelled || q->w->back->want_to_quit) {
803		if(q->w->is_bg_thread) {
804			/* delete it now */
805			struct ub_ctx* ctx = q->w->ctx;
806			lock_basic_lock(&ctx->cfglock);
807			(void)rbtree_delete(&ctx->queries, q->node.key);
808			ctx->num_async--;
809			context_query_delete(q);
810			lock_basic_unlock(&ctx->cfglock);
811		}
812		/* cancelled, do not give answer */
813		return;
814	}
815	q->msg_security = s;
816	if(!buf) {
817		buf = q->w->env->scratch_buffer;
818	}
819	if(rcode != 0) {
820		error_encode(buf, rcode, NULL, 0, BIT_RD, NULL);
821	}
822	add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited);
823}
824
825
826/** handle new query command for bg worker */
827static void
828handle_newq(struct libworker* w, uint8_t* buf, uint32_t len)
829{
830	uint16_t qflags, qid;
831	struct query_info qinfo;
832	struct edns_data edns;
833	struct ctx_query* q;
834	if(w->is_bg_thread) {
835		lock_basic_lock(&w->ctx->cfglock);
836		q = context_lookup_new_query(w->ctx, buf, len);
837		lock_basic_unlock(&w->ctx->cfglock);
838	} else {
839		q = context_deserialize_new_query(w->ctx, buf, len);
840	}
841	free(buf);
842	if(!q) {
843		log_err("failed to deserialize newq");
844		return;
845	}
846	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
847		add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0);
848		return;
849	}
850	qid = 0;
851	qflags = BIT_RD;
852	/* see if there is a fixed answer */
853	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
854	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
855	if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns,
856		w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
857		NULL, 0, NULL, 0, NULL)) {
858		regional_free_all(w->env->scratch);
859		q->msg_security = sec_status_insecure;
860		add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
861		free(qinfo.qname);
862		return;
863	}
864	if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones,
865		w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
866		regional_free_all(w->env->scratch);
867		q->msg_security = sec_status_insecure;
868		add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
869		free(qinfo.qname);
870		return;
871	}
872	q->w = w;
873	/* process new query */
874	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
875		w->back->udp_buff, qid, libworker_bg_done_cb, q, 0)) {
876		add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0);
877	}
878	free(qinfo.qname);
879}
880
881void libworker_alloc_cleanup(void* arg)
882{
883	struct libworker* w = (struct libworker*)arg;
884	slabhash_clear(&w->env->rrset_cache->table);
885        slabhash_clear(w->env->msg_cache);
886}
887
888struct outbound_entry* libworker_send_query(struct query_info* qinfo,
889	uint16_t flags, int dnssec, int want_dnssec, int nocaps,
890	int check_ratelimit,
891	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
892	size_t zonelen, int tcp_upstream, int ssl_upstream, char* tls_auth_name,
893	struct module_qstate* q, int* was_ratelimited)
894{
895	struct libworker* w = (struct libworker*)q->env->worker;
896	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
897		q->region, sizeof(*e));
898	if(!e)
899		return NULL;
900	e->qstate = q;
901	e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec,
902		want_dnssec, nocaps, check_ratelimit, tcp_upstream, ssl_upstream,
903		tls_auth_name, addr, addrlen, zone, zonelen, q,
904		libworker_handle_service_reply, e, w->back->udp_buff, q->env,
905		was_ratelimited);
906	if(!e->qsent) {
907		return NULL;
908	}
909	return e;
910}
911
912int
913libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
914        struct comm_reply* reply_info)
915{
916	struct outbound_entry* e = (struct outbound_entry*)arg;
917	struct libworker* lw = (struct libworker*)e->qstate->env->worker;
918
919	if(error != 0) {
920		mesh_report_reply(lw->env->mesh, e, reply_info, error);
921		return 0;
922	}
923	/* sanity check. */
924	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
925		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
926			LDNS_PACKET_QUERY
927		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
928		/* error becomes timeout for the module as if this reply
929		 * never arrived. */
930		mesh_report_reply(lw->env->mesh, e, reply_info,
931			NETEVENT_TIMEOUT);
932		return 0;
933	}
934	mesh_report_reply(lw->env->mesh,  e, reply_info, NETEVENT_NOERROR);
935	return 0;
936}
937
938/* --- fake callbacks for fptr_wlist to work --- */
939void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
940	uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
941	int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
942{
943	log_assert(0);
944}
945
946int worker_handle_request(struct comm_point* ATTR_UNUSED(c),
947	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
948        struct comm_reply* ATTR_UNUSED(repinfo))
949{
950	log_assert(0);
951	return 0;
952}
953
954int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
955	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
956        struct comm_reply* ATTR_UNUSED(reply_info))
957{
958	log_assert(0);
959	return 0;
960}
961
962int remote_accept_callback(struct comm_point* ATTR_UNUSED(c),
963	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
964        struct comm_reply* ATTR_UNUSED(repinfo))
965{
966	log_assert(0);
967	return 0;
968}
969
970int remote_control_callback(struct comm_point* ATTR_UNUSED(c),
971	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
972        struct comm_reply* ATTR_UNUSED(repinfo))
973{
974	log_assert(0);
975	return 0;
976}
977
978void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg))
979{
980	log_assert(0);
981}
982
983struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo),
984	uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec),
985	int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps),
986	int ATTR_UNUSED(check_ratelimit),
987	struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen),
988	uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen), int ATTR_UNUSED(tcp_upstream),
989	int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name),
990	struct module_qstate* ATTR_UNUSED(q), int* ATTR_UNUSED(was_ratelimited))
991{
992	log_assert(0);
993	return 0;
994}
995
996void
997worker_alloc_cleanup(void* ATTR_UNUSED(arg))
998{
999	log_assert(0);
1000}
1001
1002void worker_stat_timer_cb(void* ATTR_UNUSED(arg))
1003{
1004	log_assert(0);
1005}
1006
1007void worker_probe_timer_cb(void* ATTR_UNUSED(arg))
1008{
1009	log_assert(0);
1010}
1011
1012void worker_start_accept(void* ATTR_UNUSED(arg))
1013{
1014	log_assert(0);
1015}
1016
1017void worker_stop_accept(void* ATTR_UNUSED(arg))
1018{
1019	log_assert(0);
1020}
1021
1022int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
1023{
1024	log_assert(0);
1025	return 0;
1026}
1027
1028int
1029codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1030{
1031	log_assert(0);
1032	return 0;
1033}
1034
1035int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1036{
1037        log_assert(0);
1038        return 0;
1039}
1040
1041void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg))
1042{
1043        log_assert(0);
1044}
1045
1046#ifdef UB_ON_WINDOWS
1047void
1048worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void*
1049        ATTR_UNUSED(arg)) {
1050        log_assert(0);
1051}
1052
1053void
1054wsvc_cron_cb(void* ATTR_UNUSED(arg))
1055{
1056        log_assert(0);
1057}
1058#endif /* UB_ON_WINDOWS */
1059
1060#ifdef USE_DNSTAP
1061void dtio_tap_callback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev),
1062	void* ATTR_UNUSED(arg))
1063{
1064	log_assert(0);
1065}
1066#endif
1067
1068#ifdef USE_DNSTAP
1069void dtio_mainfdcallback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev),
1070	void* ATTR_UNUSED(arg))
1071{
1072	log_assert(0);
1073}
1074#endif
1075