1/*	$NetBSD: socket.c,v 1.8.4.1 2012/06/05 21:15:23 bouyer Exp $	*/
2
3/*
4 * Copyright (C) 2004-2012  Internet Systems Consortium, Inc. ("ISC")
5 * Copyright (C) 1998-2003  Internet Software Consortium.
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
12 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
13 * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
14 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
16 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Id */
21
22/*! \file */
23
24#include <config.h>
25
26#include <sys/param.h>
27#include <sys/types.h>
28#include <sys/socket.h>
29#include <sys/stat.h>
30#include <sys/time.h>
31#include <sys/uio.h>
32
33#include <errno.h>
34#include <fcntl.h>
35#include <stddef.h>
36#include <stdlib.h>
37#include <string.h>
38#include <unistd.h>
39
40#include <isc/buffer.h>
41#include <isc/bufferlist.h>
42#include <isc/condition.h>
43#include <isc/formatcheck.h>
44#include <isc/list.h>
45#include <isc/log.h>
46#include <isc/mem.h>
47#include <isc/msgs.h>
48#include <isc/mutex.h>
49#include <isc/net.h>
50#include <isc/once.h>
51#include <isc/platform.h>
52#include <isc/print.h>
53#include <isc/region.h>
54#include <isc/socket.h>
55#include <isc/stats.h>
56#include <isc/strerror.h>
57#include <isc/task.h>
58#include <isc/thread.h>
59#include <isc/util.h>
60#include <isc/xml.h>
61
62#ifdef ISC_PLATFORM_HAVESYSUNH
63#include <sys/un.h>
64#endif
65#ifdef ISC_PLATFORM_HAVEKQUEUE
66#include <sys/event.h>
67#endif
68#ifdef ISC_PLATFORM_HAVEEPOLL
69#include <sys/epoll.h>
70#endif
71#ifdef ISC_PLATFORM_HAVEDEVPOLL
72#if defined(HAVE_SYS_DEVPOLL_H)
73#include <sys/devpoll.h>
74#elif defined(HAVE_DEVPOLL_H)
75#include <devpoll.h>
76#endif
77#endif
78
79#include "errno2result.h"
80
81/* See task.c about the following definition: */
82#ifdef BIND9
83#ifdef ISC_PLATFORM_USETHREADS
84#define USE_WATCHER_THREAD
85#else
86#define USE_SHARED_MANAGER
87#endif	/* ISC_PLATFORM_USETHREADS */
88#endif	/* BIND9 */
89
90#ifndef USE_WATCHER_THREAD
91#include "socket_p.h"
92#include "../task_p.h"
93#endif /* USE_WATCHER_THREAD */
94
95#if defined(SO_BSDCOMPAT) && defined(__linux__)
96#include <sys/utsname.h>
97#endif
98
99/*%
100 * Choose the most preferable multiplex method.
101 */
102#ifdef ISC_PLATFORM_HAVEKQUEUE
103#define USE_KQUEUE
104#elif defined (ISC_PLATFORM_HAVEEPOLL)
105#define USE_EPOLL
106#elif defined (ISC_PLATFORM_HAVEDEVPOLL)
107#define USE_DEVPOLL
108typedef struct {
109	unsigned int want_read : 1,
110		want_write : 1;
111} pollinfo_t;
112#else
113#define USE_SELECT
114#endif	/* ISC_PLATFORM_HAVEKQUEUE */
115
116#ifndef USE_WATCHER_THREAD
117#if defined(USE_KQUEUE) || defined(USE_EPOLL) || defined(USE_DEVPOLL)
118struct isc_socketwait {
119	int nevents;
120};
121#elif defined (USE_SELECT)
122struct isc_socketwait {
123	fd_set *readset;
124	fd_set *writeset;
125	int nfds;
126	int maxfd;
127};
128#endif	/* USE_KQUEUE */
129#endif /* !USE_WATCHER_THREAD */
130
131/*%
132 * Maximum number of allowable open sockets.  This is also the maximum
133 * allowable socket file descriptor.
134 *
135 * Care should be taken before modifying this value for select():
136 * The API standard doesn't ensure select() accept more than (the system default
137 * of) FD_SETSIZE descriptors, and the default size should in fact be fine in
138 * the vast majority of cases.  This constant should therefore be increased only
139 * when absolutely necessary and possible, i.e., the server is exhausting all
140 * available file descriptors (up to FD_SETSIZE) and the select() function
141 * and FD_xxx macros support larger values than FD_SETSIZE (which may not
142 * always by true, but we keep using some of them to ensure as much
143 * portability as possible).  Note also that overall server performance
144 * may be rather worsened with a larger value of this constant due to
145 * inherent scalability problems of select().
146 *
147 * As a special note, this value shouldn't have to be touched if
148 * this is a build for an authoritative only DNS server.
149 */
150#ifndef ISC_SOCKET_MAXSOCKETS
151#if defined(USE_KQUEUE) || defined(USE_EPOLL) || defined(USE_DEVPOLL)
152#define ISC_SOCKET_MAXSOCKETS 4096
153#elif defined(USE_SELECT)
154#define ISC_SOCKET_MAXSOCKETS FD_SETSIZE
155#endif	/* USE_KQUEUE... */
156#endif	/* ISC_SOCKET_MAXSOCKETS */
157
158#ifdef USE_SELECT
159/*%
160 * Mac OS X needs a special definition to support larger values in select().
161 * We always define this because a larger value can be specified run-time.
162 */
163#ifdef __APPLE__
164#define _DARWIN_UNLIMITED_SELECT
165#endif	/* __APPLE__ */
166#endif	/* USE_SELECT */
167
168#ifdef ISC_SOCKET_USE_POLLWATCH
169/*%
170 * If this macro is defined, enable workaround for a Solaris /dev/poll kernel
171 * bug: DP_POLL ioctl could keep sleeping even if socket I/O is possible for
172 * some of the specified FD.  The idea is based on the observation that it's
173 * likely for a busy server to keep receiving packets.  It specifically works
174 * as follows: the socket watcher is first initialized with the state of
175 * "poll_idle".  While it's in the idle state it keeps sleeping until a socket
176 * event occurs.  When it wakes up for a socket I/O event, it moves to the
177 * poll_active state, and sets the poll timeout to a short period
178 * (ISC_SOCKET_POLLWATCH_TIMEOUT msec).  If timeout occurs in this state, the
179 * watcher goes to the poll_checking state with the same timeout period.
180 * In this state, the watcher tries to detect whether this is a break
181 * during intermittent events or the kernel bug is triggered.  If the next
182 * polling reports an event within the short period, the previous timeout is
183 * likely to be a kernel bug, and so the watcher goes back to the active state.
184 * Otherwise, it moves to the idle state again.
185 *
186 * It's not clear whether this is a thread-related bug, but since we've only
187 * seen this with threads, this workaround is used only when enabling threads.
188 */
189
190typedef enum { poll_idle, poll_active, poll_checking } pollstate_t;
191
192#ifndef ISC_SOCKET_POLLWATCH_TIMEOUT
193#define ISC_SOCKET_POLLWATCH_TIMEOUT 10
194#endif	/* ISC_SOCKET_POLLWATCH_TIMEOUT */
195#endif	/* ISC_SOCKET_USE_POLLWATCH */
196
197/*%
198 * Size of per-FD lock buckets.
199 */
200#ifdef ISC_PLATFORM_USETHREADS
201#define FDLOCK_COUNT		1024
202#define FDLOCK_ID(fd)		((fd) % FDLOCK_COUNT)
203#else
204#define FDLOCK_COUNT		1
205#define FDLOCK_ID(fd)		0
206#endif	/* ISC_PLATFORM_USETHREADS */
207
208/*%
209 * Maximum number of events communicated with the kernel.  There should normally
210 * be no need for having a large number.
211 */
212#if defined(USE_KQUEUE) || defined(USE_EPOLL) || defined(USE_DEVPOLL)
213#ifndef ISC_SOCKET_MAXEVENTS
214#define ISC_SOCKET_MAXEVENTS	64
215#endif
216#endif
217
218/*%
219 * Some systems define the socket length argument as an int, some as size_t,
220 * some as socklen_t.  This is here so it can be easily changed if needed.
221 */
222#ifndef ISC_SOCKADDR_LEN_T
223#define ISC_SOCKADDR_LEN_T unsigned int
224#endif
225
226/*%
227 * Define what the possible "soft" errors can be.  These are non-fatal returns
228 * of various network related functions, like recv() and so on.
229 *
230 * For some reason, BSDI (and perhaps others) will sometimes return <0
231 * from recv() but will have errno==0.  This is broken, but we have to
232 * work around it here.
233 */
234#define SOFT_ERROR(e)	((e) == EAGAIN || \
235			 (e) == EWOULDBLOCK || \
236			 (e) == EINTR || \
237			 (e) == 0)
238
239#define DLVL(x) ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_SOCKET, ISC_LOG_DEBUG(x)
240
241/*!<
242 * DLVL(90)  --  Function entry/exit and other tracing.
243 * DLVL(70)  --  Socket "correctness" -- including returning of events, etc.
244 * DLVL(60)  --  Socket data send/receive
245 * DLVL(50)  --  Event tracing, including receiving/sending completion events.
246 * DLVL(20)  --  Socket creation/destruction.
247 */
248#define TRACE_LEVEL		90
249#define CORRECTNESS_LEVEL	70
250#define IOEVENT_LEVEL		60
251#define EVENT_LEVEL		50
252#define CREATION_LEVEL		20
253
254#define TRACE		DLVL(TRACE_LEVEL)
255#define CORRECTNESS	DLVL(CORRECTNESS_LEVEL)
256#define IOEVENT		DLVL(IOEVENT_LEVEL)
257#define EVENT		DLVL(EVENT_LEVEL)
258#define CREATION	DLVL(CREATION_LEVEL)
259
260typedef isc_event_t intev_t;
261
262#define SOCKET_MAGIC		ISC_MAGIC('I', 'O', 'i', 'o')
263#define VALID_SOCKET(s)		ISC_MAGIC_VALID(s, SOCKET_MAGIC)
264
265/*!
266 * IPv6 control information.  If the socket is an IPv6 socket we want
267 * to collect the destination address and interface so the client can
268 * set them on outgoing packets.
269 */
270#ifdef ISC_PLATFORM_HAVEIN6PKTINFO
271#ifndef USE_CMSG
272#define USE_CMSG	1
273#endif
274#endif
275
276/*%
277 * NetBSD and FreeBSD can timestamp packets.  XXXMLG Should we have
278 * a setsockopt() like interface to request timestamps, and if the OS
279 * doesn't do it for us, call gettimeofday() on every UDP receive?
280 */
281#ifdef SO_TIMESTAMP
282#ifndef USE_CMSG
283#define USE_CMSG	1
284#endif
285#endif
286
287/*%
288 * The size to raise the receive buffer to (from BIND 8).
289 */
290#define RCVBUFSIZE (32*1024)
291
292/*%
293 * The number of times a send operation is repeated if the result is EINTR.
294 */
295#define NRETRIES 10
296
297typedef struct isc__socket isc__socket_t;
298typedef struct isc__socketmgr isc__socketmgr_t;
299
300#define NEWCONNSOCK(ev) ((isc__socket_t *)(ev)->newsocket)
301
302struct isc__socket {
303	/* Not locked. */
304	isc_socket_t		common;
305	isc__socketmgr_t	*manager;
306	isc_mutex_t		lock;
307	isc_sockettype_t	type;
308	const isc_statscounter_t	*statsindex;
309
310	/* Locked by socket lock. */
311	ISC_LINK(isc__socket_t)	link;
312	unsigned int		references;
313	int			fd;
314	int			pf;
315	char				name[16];
316	void *				tag;
317
318	ISC_LIST(isc_socketevent_t)		send_list;
319	ISC_LIST(isc_socketevent_t)		recv_list;
320	ISC_LIST(isc_socket_newconnev_t)	accept_list;
321	isc_socket_connev_t		       *connect_ev;
322
323	/*
324	 * Internal events.  Posted when a descriptor is readable or
325	 * writable.  These are statically allocated and never freed.
326	 * They will be set to non-purgable before use.
327	 */
328	intev_t			readable_ev;
329	intev_t			writable_ev;
330
331	isc_sockaddr_t		peer_address;  /* remote address */
332
333	unsigned int		pending_recv : 1,
334				pending_send : 1,
335				pending_accept : 1,
336				listener : 1, /* listener socket */
337				connected : 1,
338				connecting : 1, /* connect pending */
339				bound : 1, /* bound to local addr */
340				dupped : 1;
341
342#ifdef ISC_NET_RECVOVERFLOW
343	unsigned char		overflow; /* used for MSG_TRUNC fake */
344#endif
345
346	char			*recvcmsgbuf;
347	ISC_SOCKADDR_LEN_T	recvcmsgbuflen;
348	char			*sendcmsgbuf;
349	ISC_SOCKADDR_LEN_T	sendcmsgbuflen;
350
351	void			*fdwatcharg;
352	isc_sockfdwatch_t	fdwatchcb;
353	int			fdwatchflags;
354	isc_task_t		*fdwatchtask;
355};
356
357#define SOCKET_MANAGER_MAGIC	ISC_MAGIC('I', 'O', 'm', 'g')
358#define VALID_MANAGER(m)	ISC_MAGIC_VALID(m, SOCKET_MANAGER_MAGIC)
359
360struct isc__socketmgr {
361	/* Not locked. */
362	isc_socketmgr_t		common;
363	isc_mem_t	       *mctx;
364	isc_mutex_t		lock;
365	isc_mutex_t		*fdlock;
366	isc_stats_t		*stats;
367#ifdef USE_KQUEUE
368	int			kqueue_fd;
369	int			nevents;
370	struct kevent		*events;
371#endif	/* USE_KQUEUE */
372#ifdef USE_EPOLL
373	int			epoll_fd;
374	int			nevents;
375	struct epoll_event	*events;
376#endif	/* USE_EPOLL */
377#ifdef USE_DEVPOLL
378	int			devpoll_fd;
379	int			nevents;
380	struct pollfd		*events;
381#endif	/* USE_DEVPOLL */
382#ifdef USE_SELECT
383	int			fd_bufsize;
384#endif	/* USE_SELECT */
385	unsigned int		maxsocks;
386#ifdef ISC_PLATFORM_USETHREADS
387	int			pipe_fds[2];
388#endif
389
390	/* Locked by fdlock. */
391	isc__socket_t	       **fds;
392	int			*fdstate;
393#ifdef USE_DEVPOLL
394	pollinfo_t		*fdpollinfo;
395#endif
396
397	/* Locked by manager lock. */
398	ISC_LIST(isc__socket_t)	socklist;
399#ifdef USE_SELECT
400	fd_set			*read_fds;
401	fd_set			*read_fds_copy;
402	fd_set			*write_fds;
403	fd_set			*write_fds_copy;
404	int			maxfd;
405#endif	/* USE_SELECT */
406	int			reserved;	/* unlocked */
407#ifdef USE_WATCHER_THREAD
408	isc_thread_t		watcher;
409	isc_condition_t		shutdown_ok;
410#else /* USE_WATCHER_THREAD */
411	unsigned int		refs;
412#endif /* USE_WATCHER_THREAD */
413	int			maxudp;
414};
415
416#ifdef USE_SHARED_MANAGER
417static isc__socketmgr_t *socketmgr = NULL;
418#endif /* USE_SHARED_MANAGER */
419
420#define CLOSED			0	/* this one must be zero */
421#define MANAGED			1
422#define CLOSE_PENDING		2
423
424/*
425 * send() and recv() iovec counts
426 */
427#define MAXSCATTERGATHER_SEND	(ISC_SOCKET_MAXSCATTERGATHER)
428#ifdef ISC_NET_RECVOVERFLOW
429# define MAXSCATTERGATHER_RECV	(ISC_SOCKET_MAXSCATTERGATHER + 1)
430#else
431# define MAXSCATTERGATHER_RECV	(ISC_SOCKET_MAXSCATTERGATHER)
432#endif
433
434static isc_result_t socket_create(isc_socketmgr_t *manager0, int pf,
435				  isc_sockettype_t type,
436				  isc_socket_t **socketp,
437				  isc_socket_t *dup_socket);
438static void send_recvdone_event(isc__socket_t *, isc_socketevent_t **);
439static void send_senddone_event(isc__socket_t *, isc_socketevent_t **);
440static void free_socket(isc__socket_t **);
441static isc_result_t allocate_socket(isc__socketmgr_t *, isc_sockettype_t,
442				    isc__socket_t **);
443static void destroy(isc__socket_t **);
444static void internal_accept(isc_task_t *, isc_event_t *);
445static void internal_connect(isc_task_t *, isc_event_t *);
446static void internal_recv(isc_task_t *, isc_event_t *);
447static void internal_send(isc_task_t *, isc_event_t *);
448static void internal_fdwatch_write(isc_task_t *, isc_event_t *);
449static void internal_fdwatch_read(isc_task_t *, isc_event_t *);
450static void process_cmsg(isc__socket_t *, struct msghdr *, isc_socketevent_t *);
451static void build_msghdr_send(isc__socket_t *, isc_socketevent_t *,
452			      struct msghdr *, struct iovec *, size_t *);
453static void build_msghdr_recv(isc__socket_t *, isc_socketevent_t *,
454			      struct msghdr *, struct iovec *, size_t *);
455#ifdef USE_WATCHER_THREAD
456static isc_boolean_t process_ctlfd(isc__socketmgr_t *manager);
457#endif
458
459/*%
460 * The following can be either static or public, depending on build environment.
461 */
462
463#ifdef BIND9
464#define ISC_SOCKETFUNC_SCOPE
465#else
466#define ISC_SOCKETFUNC_SCOPE static
467#endif
468
469ISC_SOCKETFUNC_SCOPE isc_result_t
470isc__socket_create(isc_socketmgr_t *manager, int pf, isc_sockettype_t type,
471		   isc_socket_t **socketp);
472ISC_SOCKETFUNC_SCOPE void
473isc__socket_attach(isc_socket_t *sock, isc_socket_t **socketp);
474ISC_SOCKETFUNC_SCOPE void
475isc__socket_detach(isc_socket_t **socketp);
476ISC_SOCKETFUNC_SCOPE isc_result_t
477isc__socketmgr_create(isc_mem_t *mctx, isc_socketmgr_t **managerp);
478ISC_SOCKETFUNC_SCOPE isc_result_t
479isc__socketmgr_create2(isc_mem_t *mctx, isc_socketmgr_t **managerp,
480		       unsigned int maxsocks);
481ISC_SOCKETFUNC_SCOPE void
482isc__socketmgr_destroy(isc_socketmgr_t **managerp);
483ISC_SOCKETFUNC_SCOPE isc_result_t
484isc__socket_recvv(isc_socket_t *sock, isc_bufferlist_t *buflist,
485		 unsigned int minimum, isc_task_t *task,
486		  isc_taskaction_t action, const void *arg);
487ISC_SOCKETFUNC_SCOPE isc_result_t
488isc__socket_recv(isc_socket_t *sock, isc_region_t *region,
489		 unsigned int minimum, isc_task_t *task,
490		 isc_taskaction_t action, const void *arg);
491ISC_SOCKETFUNC_SCOPE isc_result_t
492isc__socket_recv2(isc_socket_t *sock, isc_region_t *region,
493		  unsigned int minimum, isc_task_t *task,
494		  isc_socketevent_t *event, unsigned int flags);
495ISC_SOCKETFUNC_SCOPE isc_result_t
496isc__socket_send(isc_socket_t *sock, isc_region_t *region,
497		 isc_task_t *task, isc_taskaction_t action, const void *arg);
498ISC_SOCKETFUNC_SCOPE isc_result_t
499isc__socket_sendto(isc_socket_t *sock, isc_region_t *region,
500		   isc_task_t *task, isc_taskaction_t action, const void *arg,
501		   isc_sockaddr_t *address, struct in6_pktinfo *pktinfo);
502ISC_SOCKETFUNC_SCOPE isc_result_t
503isc__socket_sendv(isc_socket_t *sock, isc_bufferlist_t *buflist,
504		  isc_task_t *task, isc_taskaction_t action, const void *arg);
505ISC_SOCKETFUNC_SCOPE isc_result_t
506isc__socket_sendtov(isc_socket_t *sock, isc_bufferlist_t *buflist,
507		    isc_task_t *task, isc_taskaction_t action, const void *arg,
508		    isc_sockaddr_t *address, struct in6_pktinfo *pktinfo);
509ISC_SOCKETFUNC_SCOPE isc_result_t
510isc__socket_sendto2(isc_socket_t *sock, isc_region_t *region,
511		    isc_task_t *task,
512		    isc_sockaddr_t *address, struct in6_pktinfo *pktinfo,
513		    isc_socketevent_t *event, unsigned int flags);
514ISC_SOCKETFUNC_SCOPE void
515isc__socket_cleanunix(isc_sockaddr_t *sockaddr, isc_boolean_t active);
516ISC_SOCKETFUNC_SCOPE isc_result_t
517isc__socket_permunix(isc_sockaddr_t *sockaddr, isc_uint32_t perm,
518		     isc_uint32_t owner, isc_uint32_t group);
519ISC_SOCKETFUNC_SCOPE isc_result_t
520isc__socket_bind(isc_socket_t *sock, isc_sockaddr_t *sockaddr,
521		 unsigned int options);
522ISC_SOCKETFUNC_SCOPE isc_result_t
523isc__socket_filter(isc_socket_t *sock, const char *filter);
524ISC_SOCKETFUNC_SCOPE isc_result_t
525isc__socket_listen(isc_socket_t *sock, unsigned int backlog);
526ISC_SOCKETFUNC_SCOPE isc_result_t
527isc__socket_accept(isc_socket_t *sock,
528		   isc_task_t *task, isc_taskaction_t action, const void *arg);
529ISC_SOCKETFUNC_SCOPE isc_result_t
530isc__socket_connect(isc_socket_t *sock, isc_sockaddr_t *addr,
531		    isc_task_t *task, isc_taskaction_t action,
532		    const void *arg);
533ISC_SOCKETFUNC_SCOPE isc_result_t
534isc__socket_getpeername(isc_socket_t *sock, isc_sockaddr_t *addressp);
535ISC_SOCKETFUNC_SCOPE isc_result_t
536isc__socket_getsockname(isc_socket_t *sock, isc_sockaddr_t *addressp);
537ISC_SOCKETFUNC_SCOPE void
538isc__socket_cancel(isc_socket_t *sock, isc_task_t *task, unsigned int how);
539ISC_SOCKETFUNC_SCOPE isc_sockettype_t
540isc__socket_gettype(isc_socket_t *sock);
541ISC_SOCKETFUNC_SCOPE isc_boolean_t
542isc__socket_isbound(isc_socket_t *sock);
543ISC_SOCKETFUNC_SCOPE void
544isc__socket_ipv6only(isc_socket_t *sock, isc_boolean_t yes);
545#if defined(HAVE_LIBXML2) && defined(BIND9)
546ISC_SOCKETFUNC_SCOPE void
547isc__socketmgr_renderxml(isc_socketmgr_t *mgr0, xmlTextWriterPtr writer);
548#endif
549
550ISC_SOCKETFUNC_SCOPE isc_result_t
551isc__socket_fdwatchcreate(isc_socketmgr_t *manager, int fd, int flags,
552			  isc_sockfdwatch_t callback, void *cbarg,
553			  isc_task_t *task, isc_socket_t **socketp);
554ISC_SOCKETFUNC_SCOPE isc_result_t
555isc__socket_fdwatchpoke(isc_socket_t *sock, int flags);
556ISC_SOCKETFUNC_SCOPE isc_result_t
557isc__socket_dup(isc_socket_t *sock, isc_socket_t **socketp);
558ISC_SOCKETFUNC_SCOPE int
559isc__socket_getfd(isc_socket_t *sock);
560
561static struct {
562	isc_socketmethods_t methods;
563
564	/*%
565	 * The following are defined just for avoiding unused static functions.
566	 */
567#ifndef BIND9
568	void *recvv, *send, *sendv, *sendto2, *cleanunix, *permunix, *filter,
569		*listen, *accept, *getpeername, *isbound;
570#endif
571} socketmethods = {
572	{
573		isc__socket_attach,
574		isc__socket_detach,
575		isc__socket_bind,
576		isc__socket_sendto,
577		isc__socket_connect,
578		isc__socket_recv,
579		isc__socket_cancel,
580		isc__socket_getsockname,
581		isc__socket_gettype,
582		isc__socket_ipv6only,
583		isc__socket_fdwatchpoke,
584		isc__socket_dup,
585		isc__socket_getfd
586	}
587#ifndef BIND9
588	,
589	(void *)isc__socket_recvv, (void *)isc__socket_send,
590	(void *)isc__socket_sendv, (void *)isc__socket_sendto2,
591	(void *)isc__socket_cleanunix, (void *)isc__socket_permunix,
592	(void *)isc__socket_filter, (void *)isc__socket_listen,
593	(void *)isc__socket_accept, (void *)isc__socket_getpeername,
594	(void *)isc__socket_isbound
595#endif
596};
597
598static isc_socketmgrmethods_t socketmgrmethods = {
599	isc__socketmgr_destroy,
600	isc__socket_create,
601	isc__socket_fdwatchcreate
602};
603
604#define SELECT_POKE_SHUTDOWN		(-1)
605#define SELECT_POKE_NOTHING		(-2)
606#define SELECT_POKE_READ		(-3)
607#define SELECT_POKE_ACCEPT		(-3) /*%< Same as _READ */
608#define SELECT_POKE_WRITE		(-4)
609#define SELECT_POKE_CONNECT		(-4) /*%< Same as _WRITE */
610#define SELECT_POKE_CLOSE		(-5)
611
612#define SOCK_DEAD(s)			((s)->references == 0)
613
614/*%
615 * Shortcut index arrays to get access to statistics counters.
616 */
617enum {
618	STATID_OPEN = 0,
619	STATID_OPENFAIL = 1,
620	STATID_CLOSE = 2,
621	STATID_BINDFAIL = 3,
622	STATID_CONNECTFAIL = 4,
623	STATID_CONNECT = 5,
624	STATID_ACCEPTFAIL = 6,
625	STATID_ACCEPT = 7,
626	STATID_SENDFAIL = 8,
627	STATID_RECVFAIL = 9
628};
629static const isc_statscounter_t upd4statsindex[] = {
630	isc_sockstatscounter_udp4open,
631	isc_sockstatscounter_udp4openfail,
632	isc_sockstatscounter_udp4close,
633	isc_sockstatscounter_udp4bindfail,
634	isc_sockstatscounter_udp4connectfail,
635	isc_sockstatscounter_udp4connect,
636	-1,
637	-1,
638	isc_sockstatscounter_udp4sendfail,
639	isc_sockstatscounter_udp4recvfail
640};
641static const isc_statscounter_t upd6statsindex[] = {
642	isc_sockstatscounter_udp6open,
643	isc_sockstatscounter_udp6openfail,
644	isc_sockstatscounter_udp6close,
645	isc_sockstatscounter_udp6bindfail,
646	isc_sockstatscounter_udp6connectfail,
647	isc_sockstatscounter_udp6connect,
648	-1,
649	-1,
650	isc_sockstatscounter_udp6sendfail,
651	isc_sockstatscounter_udp6recvfail
652};
653static const isc_statscounter_t tcp4statsindex[] = {
654	isc_sockstatscounter_tcp4open,
655	isc_sockstatscounter_tcp4openfail,
656	isc_sockstatscounter_tcp4close,
657	isc_sockstatscounter_tcp4bindfail,
658	isc_sockstatscounter_tcp4connectfail,
659	isc_sockstatscounter_tcp4connect,
660	isc_sockstatscounter_tcp4acceptfail,
661	isc_sockstatscounter_tcp4accept,
662	isc_sockstatscounter_tcp4sendfail,
663	isc_sockstatscounter_tcp4recvfail
664};
665static const isc_statscounter_t tcp6statsindex[] = {
666	isc_sockstatscounter_tcp6open,
667	isc_sockstatscounter_tcp6openfail,
668	isc_sockstatscounter_tcp6close,
669	isc_sockstatscounter_tcp6bindfail,
670	isc_sockstatscounter_tcp6connectfail,
671	isc_sockstatscounter_tcp6connect,
672	isc_sockstatscounter_tcp6acceptfail,
673	isc_sockstatscounter_tcp6accept,
674	isc_sockstatscounter_tcp6sendfail,
675	isc_sockstatscounter_tcp6recvfail
676};
677static const isc_statscounter_t unixstatsindex[] = {
678	isc_sockstatscounter_unixopen,
679	isc_sockstatscounter_unixopenfail,
680	isc_sockstatscounter_unixclose,
681	isc_sockstatscounter_unixbindfail,
682	isc_sockstatscounter_unixconnectfail,
683	isc_sockstatscounter_unixconnect,
684	isc_sockstatscounter_unixacceptfail,
685	isc_sockstatscounter_unixaccept,
686	isc_sockstatscounter_unixsendfail,
687	isc_sockstatscounter_unixrecvfail
688};
689static const isc_statscounter_t fdwatchstatsindex[] = {
690	-1,
691	-1,
692	isc_sockstatscounter_fdwatchclose,
693	isc_sockstatscounter_fdwatchbindfail,
694	isc_sockstatscounter_fdwatchconnectfail,
695	isc_sockstatscounter_fdwatchconnect,
696	-1,
697	-1,
698	isc_sockstatscounter_fdwatchsendfail,
699	isc_sockstatscounter_fdwatchrecvfail
700};
701
702#if defined(USE_KQUEUE) || defined(USE_EPOLL) || defined(USE_DEVPOLL) || \
703    defined(USE_WATCHER_THREAD)
704static void
705manager_log(isc__socketmgr_t *sockmgr,
706	    isc_logcategory_t *category, isc_logmodule_t *module, int level,
707	    const char *fmt, ...) ISC_FORMAT_PRINTF(5, 6);
708static void
709manager_log(isc__socketmgr_t *sockmgr,
710	    isc_logcategory_t *category, isc_logmodule_t *module, int level,
711	    const char *fmt, ...)
712{
713	char msgbuf[2048];
714	va_list ap;
715
716	if (! isc_log_wouldlog(isc_lctx, level))
717		return;
718
719	va_start(ap, fmt);
720	vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
721	va_end(ap);
722
723	isc_log_write(isc_lctx, category, module, level,
724		      "sockmgr %p: %s", sockmgr, msgbuf);
725}
726#endif
727
728static void
729socket_log(isc__socket_t *sock, isc_sockaddr_t *address,
730	   isc_logcategory_t *category, isc_logmodule_t *module, int level,
731	   isc_msgcat_t *msgcat, int msgset, int message,
732	   const char *fmt, ...) ISC_FORMAT_PRINTF(9, 10);
733static void
734socket_log(isc__socket_t *sock, isc_sockaddr_t *address,
735	   isc_logcategory_t *category, isc_logmodule_t *module, int level,
736	   isc_msgcat_t *msgcat, int msgset, int message,
737	   const char *fmt, ...)
738{
739	char msgbuf[2048];
740	char peerbuf[ISC_SOCKADDR_FORMATSIZE];
741	va_list ap;
742
743	if (! isc_log_wouldlog(isc_lctx, level))
744		return;
745
746	va_start(ap, fmt);
747	vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
748	va_end(ap);
749
750	if (address == NULL) {
751		isc_log_iwrite(isc_lctx, category, module, level,
752			       msgcat, msgset, message,
753			       "socket %p: %s", sock, msgbuf);
754	} else {
755		isc_sockaddr_format(address, peerbuf, sizeof(peerbuf));
756		isc_log_iwrite(isc_lctx, category, module, level,
757			       msgcat, msgset, message,
758			       "socket %p %s: %s", sock, peerbuf, msgbuf);
759	}
760}
761
762#if defined(_AIX) && defined(ISC_NET_BSD44MSGHDR) && \
763    defined(USE_CMSG) && defined(IPV6_RECVPKTINFO)
764/*
765 * AIX has a kernel bug where IPV6_RECVPKTINFO gets cleared by
766 * setting IPV6_V6ONLY.
767 */
768static void
769FIX_IPV6_RECVPKTINFO(isc__socket_t *sock)
770{
771	char strbuf[ISC_STRERRORSIZE];
772	int on = 1;
773
774	if (sock->pf != AF_INET6 || sock->type != isc_sockettype_udp)
775		return;
776
777	if (setsockopt(sock->fd, IPPROTO_IPV6, IPV6_RECVPKTINFO,
778		       (void *)&on, sizeof(on)) < 0) {
779
780		isc__strerror(errno, strbuf, sizeof(strbuf));
781		UNEXPECTED_ERROR(__FILE__, __LINE__,
782				 "setsockopt(%d, IPV6_RECVPKTINFO) "
783				 "%s: %s", sock->fd,
784				 isc_msgcat_get(isc_msgcat,
785						ISC_MSGSET_GENERAL,
786						ISC_MSG_FAILED,
787						"failed"),
788				 strbuf);
789	}
790}
791#else
792#define FIX_IPV6_RECVPKTINFO(sock) (void)0
793#endif
794
795/*%
796 * Increment socket-related statistics counters.
797 */
798static inline void
799inc_stats(isc_stats_t *stats, isc_statscounter_t counterid) {
800	REQUIRE(counterid != -1);
801
802	if (stats != NULL)
803		isc_stats_increment(stats, counterid);
804}
805
806static inline isc_result_t
807watch_fd(isc__socketmgr_t *manager, int fd, int msg) {
808	isc_result_t result = ISC_R_SUCCESS;
809
810#ifdef USE_KQUEUE
811	struct kevent evchange;
812
813	memset(&evchange, 0, sizeof(evchange));
814	if (msg == SELECT_POKE_READ)
815		evchange.filter = EVFILT_READ;
816	else
817		evchange.filter = EVFILT_WRITE;
818	evchange.flags = EV_ADD;
819	evchange.ident = fd;
820	if (kevent(manager->kqueue_fd, &evchange, 1, NULL, 0, NULL) != 0)
821		result = isc__errno2result(errno);
822
823	return (result);
824#elif defined(USE_EPOLL)
825	struct epoll_event event;
826
827	if (msg == SELECT_POKE_READ)
828		event.events = EPOLLIN;
829	else
830		event.events = EPOLLOUT;
831	memset(&event.data, 0, sizeof(event.data));
832	event.data.fd = fd;
833	if (epoll_ctl(manager->epoll_fd, EPOLL_CTL_ADD, fd, &event) == -1 &&
834	    errno != EEXIST) {
835		result = isc__errno2result(errno);
836	}
837
838	return (result);
839#elif defined(USE_DEVPOLL)
840	struct pollfd pfd;
841	int lockid = FDLOCK_ID(fd);
842
843	memset(&pfd, 0, sizeof(pfd));
844	if (msg == SELECT_POKE_READ)
845		pfd.events = POLLIN;
846	else
847		pfd.events = POLLOUT;
848	pfd.fd = fd;
849	pfd.revents = 0;
850	LOCK(&manager->fdlock[lockid]);
851	if (write(manager->devpoll_fd, &pfd, sizeof(pfd)) == -1)
852		result = isc__errno2result(errno);
853	else {
854		if (msg == SELECT_POKE_READ)
855			manager->fdpollinfo[fd].want_read = 1;
856		else
857			manager->fdpollinfo[fd].want_write = 1;
858	}
859	UNLOCK(&manager->fdlock[lockid]);
860
861	return (result);
862#elif defined(USE_SELECT)
863	LOCK(&manager->lock);
864	if (msg == SELECT_POKE_READ)
865		FD_SET(fd, manager->read_fds);
866	if (msg == SELECT_POKE_WRITE)
867		FD_SET(fd, manager->write_fds);
868	UNLOCK(&manager->lock);
869
870	return (result);
871#endif
872}
873
874static inline isc_result_t
875unwatch_fd(isc__socketmgr_t *manager, int fd, int msg) {
876	isc_result_t result = ISC_R_SUCCESS;
877
878#ifdef USE_KQUEUE
879	struct kevent evchange;
880
881	memset(&evchange, 0, sizeof(evchange));
882	if (msg == SELECT_POKE_READ)
883		evchange.filter = EVFILT_READ;
884	else
885		evchange.filter = EVFILT_WRITE;
886	evchange.flags = EV_DELETE;
887	evchange.ident = fd;
888	if (kevent(manager->kqueue_fd, &evchange, 1, NULL, 0, NULL) != 0)
889		result = isc__errno2result(errno);
890
891	return (result);
892#elif defined(USE_EPOLL)
893	struct epoll_event event;
894
895	if (msg == SELECT_POKE_READ)
896		event.events = EPOLLIN;
897	else
898		event.events = EPOLLOUT;
899	memset(&event.data, 0, sizeof(event.data));
900	event.data.fd = fd;
901	if (epoll_ctl(manager->epoll_fd, EPOLL_CTL_DEL, fd, &event) == -1 &&
902	    errno != ENOENT) {
903		char strbuf[ISC_STRERRORSIZE];
904		isc__strerror(errno, strbuf, sizeof(strbuf));
905		UNEXPECTED_ERROR(__FILE__, __LINE__,
906				 "epoll_ctl(DEL), %d: %s", fd, strbuf);
907		result = ISC_R_UNEXPECTED;
908	}
909	return (result);
910#elif defined(USE_DEVPOLL)
911	struct pollfd pfds[2];
912	size_t writelen = sizeof(pfds[0]);
913	int lockid = FDLOCK_ID(fd);
914
915	memset(pfds, 0, sizeof(pfds));
916	pfds[0].events = POLLREMOVE;
917	pfds[0].fd = fd;
918
919	/*
920	 * Canceling read or write polling via /dev/poll is tricky.  Since it
921	 * only provides a way of canceling per FD, we may need to re-poll the
922	 * socket for the other operation.
923	 */
924	LOCK(&manager->fdlock[lockid]);
925	if (msg == SELECT_POKE_READ &&
926	    manager->fdpollinfo[fd].want_write == 1) {
927		pfds[1].events = POLLOUT;
928		pfds[1].fd = fd;
929		writelen += sizeof(pfds[1]);
930	}
931	if (msg == SELECT_POKE_WRITE &&
932	    manager->fdpollinfo[fd].want_read == 1) {
933		pfds[1].events = POLLIN;
934		pfds[1].fd = fd;
935		writelen += sizeof(pfds[1]);
936	}
937
938	if (write(manager->devpoll_fd, pfds, writelen) == -1)
939		result = isc__errno2result(errno);
940	else {
941		if (msg == SELECT_POKE_READ)
942			manager->fdpollinfo[fd].want_read = 0;
943		else
944			manager->fdpollinfo[fd].want_write = 0;
945	}
946	UNLOCK(&manager->fdlock[lockid]);
947
948	return (result);
949#elif defined(USE_SELECT)
950	LOCK(&manager->lock);
951	if (msg == SELECT_POKE_READ)
952		FD_CLR(fd, manager->read_fds);
953	else if (msg == SELECT_POKE_WRITE)
954		FD_CLR(fd, manager->write_fds);
955	UNLOCK(&manager->lock);
956
957	return (result);
958#endif
959}
960
961static void
962wakeup_socket(isc__socketmgr_t *manager, int fd, int msg) {
963	isc_result_t result;
964	int lockid = FDLOCK_ID(fd);
965
966	/*
967	 * This is a wakeup on a socket.  If the socket is not in the
968	 * process of being closed, start watching it for either reads
969	 * or writes.
970	 */
971
972	INSIST(fd >= 0 && fd < (int)manager->maxsocks);
973
974	if (msg == SELECT_POKE_CLOSE) {
975		/* No one should be updating fdstate, so no need to lock it */
976		INSIST(manager->fdstate[fd] == CLOSE_PENDING);
977		manager->fdstate[fd] = CLOSED;
978		(void)unwatch_fd(manager, fd, SELECT_POKE_READ);
979		(void)unwatch_fd(manager, fd, SELECT_POKE_WRITE);
980		(void)close(fd);
981		return;
982	}
983
984	LOCK(&manager->fdlock[lockid]);
985	if (manager->fdstate[fd] == CLOSE_PENDING) {
986		UNLOCK(&manager->fdlock[lockid]);
987
988		/*
989		 * We accept (and ignore) any error from unwatch_fd() as we are
990		 * closing the socket, hoping it doesn't leave dangling state in
991		 * the kernel.
992		 * Note that unwatch_fd() must be called after releasing the
993		 * fdlock; otherwise it could cause deadlock due to a lock order
994		 * reversal.
995		 */
996		(void)unwatch_fd(manager, fd, SELECT_POKE_READ);
997		(void)unwatch_fd(manager, fd, SELECT_POKE_WRITE);
998		return;
999	}
1000	if (manager->fdstate[fd] != MANAGED) {
1001		UNLOCK(&manager->fdlock[lockid]);
1002		return;
1003	}
1004	UNLOCK(&manager->fdlock[lockid]);
1005
1006	/*
1007	 * Set requested bit.
1008	 */
1009	result = watch_fd(manager, fd, msg);
1010	if (result != ISC_R_SUCCESS) {
1011		/*
1012		 * XXXJT: what should we do?  Ignoring the failure of watching
1013		 * a socket will make the application dysfunctional, but there
1014		 * seems to be no reasonable recovery process.
1015		 */
1016		isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
1017			      ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
1018			      "failed to start watching FD (%d): %s",
1019			      fd, isc_result_totext(result));
1020	}
1021}
1022
1023#ifdef USE_WATCHER_THREAD
1024/*
1025 * Poke the select loop when there is something for us to do.
1026 * The write is required (by POSIX) to complete.  That is, we
1027 * will not get partial writes.
1028 */
1029static void
1030select_poke(isc__socketmgr_t *mgr, int fd, int msg) {
1031	int cc;
1032	int buf[2];
1033	char strbuf[ISC_STRERRORSIZE];
1034
1035	buf[0] = fd;
1036	buf[1] = msg;
1037
1038	do {
1039		cc = write(mgr->pipe_fds[1], buf, sizeof(buf));
1040#ifdef ENOSR
1041		/*
1042		 * Treat ENOSR as EAGAIN but loop slowly as it is
1043		 * unlikely to clear fast.
1044		 */
1045		if (cc < 0 && errno == ENOSR) {
1046			sleep(1);
1047			errno = EAGAIN;
1048		}
1049#endif
1050	} while (cc < 0 && SOFT_ERROR(errno));
1051
1052	if (cc < 0) {
1053		isc__strerror(errno, strbuf, sizeof(strbuf));
1054		FATAL_ERROR(__FILE__, __LINE__,
1055			    isc_msgcat_get(isc_msgcat, ISC_MSGSET_SOCKET,
1056					   ISC_MSG_WRITEFAILED,
1057					   "write() failed "
1058					   "during watcher poke: %s"),
1059			    strbuf);
1060	}
1061
1062	INSIST(cc == sizeof(buf));
1063}
1064
1065/*
1066 * Read a message on the internal fd.
1067 */
1068static void
1069select_readmsg(isc__socketmgr_t *mgr, int *fd, int *msg) {
1070	int buf[2];
1071	int cc;
1072	char strbuf[ISC_STRERRORSIZE];
1073
1074	cc = read(mgr->pipe_fds[0], buf, sizeof(buf));
1075	if (cc < 0) {
1076		*msg = SELECT_POKE_NOTHING;
1077		*fd = -1;	/* Silence compiler. */
1078		if (SOFT_ERROR(errno))
1079			return;
1080
1081		isc__strerror(errno, strbuf, sizeof(strbuf));
1082		FATAL_ERROR(__FILE__, __LINE__,
1083			    isc_msgcat_get(isc_msgcat, ISC_MSGSET_SOCKET,
1084					   ISC_MSG_READFAILED,
1085					   "read() failed "
1086					   "during watcher poke: %s"),
1087			    strbuf);
1088
1089		return;
1090	}
1091	INSIST(cc == sizeof(buf));
1092
1093	*fd = buf[0];
1094	*msg = buf[1];
1095}
1096#else /* USE_WATCHER_THREAD */
1097/*
1098 * Update the state of the socketmgr when something changes.
1099 */
1100static void
1101select_poke(isc__socketmgr_t *manager, int fd, int msg) {
1102	if (msg == SELECT_POKE_SHUTDOWN)
1103		return;
1104	else if (fd >= 0)
1105		wakeup_socket(manager, fd, msg);
1106	return;
1107}
1108#endif /* USE_WATCHER_THREAD */
1109
1110/*
1111 * Make a fd non-blocking.
1112 */
1113static isc_result_t
1114make_nonblock(int fd) {
1115	int ret;
1116	int flags;
1117	char strbuf[ISC_STRERRORSIZE];
1118#ifdef USE_FIONBIO_IOCTL
1119	int on = 1;
1120
1121	ret = ioctl(fd, FIONBIO, (char *)&on);
1122#else
1123	flags = fcntl(fd, F_GETFL, 0);
1124	flags |= PORT_NONBLOCK;
1125	ret = fcntl(fd, F_SETFL, flags);
1126#endif
1127
1128	if (ret == -1) {
1129		isc__strerror(errno, strbuf, sizeof(strbuf));
1130		UNEXPECTED_ERROR(__FILE__, __LINE__,
1131#ifdef USE_FIONBIO_IOCTL
1132				 "ioctl(%d, FIONBIO, &on): %s", fd,
1133#else
1134				 "fcntl(%d, F_SETFL, %d): %s", fd, flags,
1135#endif
1136				 strbuf);
1137
1138		return (ISC_R_UNEXPECTED);
1139	}
1140
1141	return (ISC_R_SUCCESS);
1142}
1143
1144#ifdef USE_CMSG
1145/*
1146 * Not all OSes support advanced CMSG macros: CMSG_LEN and CMSG_SPACE.
1147 * In order to ensure as much portability as possible, we provide wrapper
1148 * functions of these macros.
1149 * Note that cmsg_space() could run slow on OSes that do not have
1150 * CMSG_SPACE.
1151 */
1152static inline ISC_SOCKADDR_LEN_T
1153cmsg_len(ISC_SOCKADDR_LEN_T len) {
1154#ifdef CMSG_LEN
1155	return (CMSG_LEN(len));
1156#else
1157	ISC_SOCKADDR_LEN_T hdrlen;
1158
1159	/*
1160	 * Cast NULL so that any pointer arithmetic performed by CMSG_DATA
1161	 * is correct.
1162	 */
1163	hdrlen = (ISC_SOCKADDR_LEN_T)CMSG_DATA(((struct cmsghdr *)NULL));
1164	return (hdrlen + len);
1165#endif
1166}
1167
1168static inline ISC_SOCKADDR_LEN_T
1169cmsg_space(ISC_SOCKADDR_LEN_T len) {
1170#ifdef CMSG_SPACE
1171	return (CMSG_SPACE(len));
1172#else
1173	struct msghdr msg;
1174	struct cmsghdr *cmsgp;
1175	/*
1176	 * XXX: The buffer length is an ad-hoc value, but should be enough
1177	 * in a practical sense.
1178	 */
1179	char dummybuf[sizeof(struct cmsghdr) + 1024];
1180
1181	memset(&msg, 0, sizeof(msg));
1182	msg.msg_control = dummybuf;
1183	msg.msg_controllen = sizeof(dummybuf);
1184
1185	cmsgp = (struct cmsghdr *)dummybuf;
1186	cmsgp->cmsg_len = cmsg_len(len);
1187
1188	cmsgp = CMSG_NXTHDR(&msg, cmsgp);
1189	if (cmsgp != NULL)
1190		return ((char *)cmsgp - (char *)msg.msg_control);
1191	else
1192		return (0);
1193#endif
1194}
1195#endif /* USE_CMSG */
1196
1197/*
1198 * Process control messages received on a socket.
1199 */
1200static void
1201process_cmsg(isc__socket_t *sock, struct msghdr *msg, isc_socketevent_t *dev) {
1202#ifdef USE_CMSG
1203	struct cmsghdr *cmsgp;
1204#ifdef ISC_PLATFORM_HAVEIN6PKTINFO
1205	struct in6_pktinfo *pktinfop;
1206#endif
1207#ifdef SO_TIMESTAMP
1208	struct timeval *timevalp;
1209#endif
1210#endif
1211
1212	/*
1213	 * sock is used only when ISC_NET_BSD44MSGHDR and USE_CMSG are defined.
1214	 * msg and dev are used only when ISC_NET_BSD44MSGHDR is defined.
1215	 * They are all here, outside of the CPP tests, because it is
1216	 * more consistent with the usual ISC coding style.
1217	 */
1218	UNUSED(sock);
1219	UNUSED(msg);
1220	UNUSED(dev);
1221
1222#ifdef ISC_NET_BSD44MSGHDR
1223
1224#ifdef MSG_TRUNC
1225	if ((msg->msg_flags & MSG_TRUNC) == MSG_TRUNC)
1226		dev->attributes |= ISC_SOCKEVENTATTR_TRUNC;
1227#endif
1228
1229#ifdef MSG_CTRUNC
1230	if ((msg->msg_flags & MSG_CTRUNC) == MSG_CTRUNC)
1231		dev->attributes |= ISC_SOCKEVENTATTR_CTRUNC;
1232#endif
1233
1234#ifndef USE_CMSG
1235	return;
1236#else
1237	if (msg->msg_controllen == 0U || msg->msg_control == NULL)
1238		return;
1239
1240#ifdef SO_TIMESTAMP
1241	timevalp = NULL;
1242#endif
1243#ifdef ISC_PLATFORM_HAVEIN6PKTINFO
1244	pktinfop = NULL;
1245#endif
1246
1247	cmsgp = CMSG_FIRSTHDR(msg);
1248	while (cmsgp != NULL) {
1249		socket_log(sock, NULL, TRACE,
1250			   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_PROCESSCMSG,
1251			   "processing cmsg %p", cmsgp);
1252
1253#ifdef ISC_PLATFORM_HAVEIN6PKTINFO
1254		if (cmsgp->cmsg_level == IPPROTO_IPV6
1255		    && cmsgp->cmsg_type == IPV6_PKTINFO) {
1256
1257			pktinfop = (struct in6_pktinfo *)CMSG_DATA(cmsgp);
1258			memcpy(&dev->pktinfo, pktinfop,
1259			       sizeof(struct in6_pktinfo));
1260			dev->attributes |= ISC_SOCKEVENTATTR_PKTINFO;
1261			socket_log(sock, NULL, TRACE,
1262				   isc_msgcat, ISC_MSGSET_SOCKET,
1263				   ISC_MSG_IFRECEIVED,
1264				   "interface received on ifindex %u",
1265				   dev->pktinfo.ipi6_ifindex);
1266			if (IN6_IS_ADDR_MULTICAST(&pktinfop->ipi6_addr))
1267				dev->attributes |= ISC_SOCKEVENTATTR_MULTICAST;
1268			goto next;
1269		}
1270#endif
1271
1272#ifdef SO_TIMESTAMP
1273		if (cmsgp->cmsg_level == SOL_SOCKET
1274		    && cmsgp->cmsg_type == SCM_TIMESTAMP) {
1275			timevalp = (struct timeval *)CMSG_DATA(cmsgp);
1276			dev->timestamp.seconds = timevalp->tv_sec;
1277			dev->timestamp.nanoseconds = timevalp->tv_usec * 1000;
1278			dev->attributes |= ISC_SOCKEVENTATTR_TIMESTAMP;
1279			goto next;
1280		}
1281#endif
1282
1283	next:
1284		cmsgp = CMSG_NXTHDR(msg, cmsgp);
1285	}
1286#endif /* USE_CMSG */
1287
1288#endif /* ISC_NET_BSD44MSGHDR */
1289}
1290
1291/*
1292 * Construct an iov array and attach it to the msghdr passed in.  This is
1293 * the SEND constructor, which will use the used region of the buffer
1294 * (if using a buffer list) or will use the internal region (if a single
1295 * buffer I/O is requested).
1296 *
1297 * Nothing can be NULL, and the done event must list at least one buffer
1298 * on the buffer linked list for this function to be meaningful.
1299 *
1300 * If write_countp != NULL, *write_countp will hold the number of bytes
1301 * this transaction can send.
1302 */
1303static void
1304build_msghdr_send(isc__socket_t *sock, isc_socketevent_t *dev,
1305		  struct msghdr *msg, struct iovec *iov, size_t *write_countp)
1306{
1307	unsigned int iovcount;
1308	isc_buffer_t *buffer;
1309	isc_region_t used;
1310	size_t write_count;
1311	size_t skip_count;
1312
1313	memset(msg, 0, sizeof(*msg));
1314
1315	if (!sock->connected) {
1316		msg->msg_name = (void *)&dev->address.type.sa;
1317		msg->msg_namelen = dev->address.length;
1318	} else {
1319		msg->msg_name = NULL;
1320		msg->msg_namelen = 0;
1321	}
1322
1323	buffer = ISC_LIST_HEAD(dev->bufferlist);
1324	write_count = 0;
1325	iovcount = 0;
1326
1327	/*
1328	 * Single buffer I/O?  Skip what we've done so far in this region.
1329	 */
1330	if (buffer == NULL) {
1331		write_count = dev->region.length - dev->n;
1332		iov[0].iov_base = (void *)(dev->region.base + dev->n);
1333		iov[0].iov_len = write_count;
1334		iovcount = 1;
1335
1336		goto config;
1337	}
1338
1339	/*
1340	 * Multibuffer I/O.
1341	 * Skip the data in the buffer list that we have already written.
1342	 */
1343	skip_count = dev->n;
1344	while (buffer != NULL) {
1345		REQUIRE(ISC_BUFFER_VALID(buffer));
1346		if (skip_count < isc_buffer_usedlength(buffer))
1347			break;
1348		skip_count -= isc_buffer_usedlength(buffer);
1349		buffer = ISC_LIST_NEXT(buffer, link);
1350	}
1351
1352	while (buffer != NULL) {
1353		INSIST(iovcount < MAXSCATTERGATHER_SEND);
1354
1355		isc_buffer_usedregion(buffer, &used);
1356
1357		if (used.length > 0) {
1358			iov[iovcount].iov_base = (void *)(used.base
1359							  + skip_count);
1360			iov[iovcount].iov_len = used.length - skip_count;
1361			write_count += (used.length - skip_count);
1362			skip_count = 0;
1363			iovcount++;
1364		}
1365		buffer = ISC_LIST_NEXT(buffer, link);
1366	}
1367
1368	INSIST(skip_count == 0U);
1369
1370 config:
1371	msg->msg_iov = iov;
1372	msg->msg_iovlen = iovcount;
1373
1374#ifdef ISC_NET_BSD44MSGHDR
1375	msg->msg_control = NULL;
1376	msg->msg_controllen = 0;
1377	msg->msg_flags = 0;
1378#if defined(USE_CMSG) && defined(ISC_PLATFORM_HAVEIN6PKTINFO)
1379	if ((sock->type == isc_sockettype_udp)
1380	    && ((dev->attributes & ISC_SOCKEVENTATTR_PKTINFO) != 0)) {
1381#if defined(IPV6_USE_MIN_MTU)
1382		int use_min_mtu = 1;	/* -1, 0, 1 */
1383#endif
1384		struct cmsghdr *cmsgp;
1385		struct in6_pktinfo *pktinfop;
1386
1387		socket_log(sock, NULL, TRACE,
1388			   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_SENDTODATA,
1389			   "sendto pktinfo data, ifindex %u",
1390			   dev->pktinfo.ipi6_ifindex);
1391
1392		msg->msg_controllen = cmsg_space(sizeof(struct in6_pktinfo));
1393		INSIST(msg->msg_controllen <= sock->sendcmsgbuflen);
1394		msg->msg_control = (void *)sock->sendcmsgbuf;
1395
1396		cmsgp = (struct cmsghdr *)sock->sendcmsgbuf;
1397		cmsgp->cmsg_level = IPPROTO_IPV6;
1398		cmsgp->cmsg_type = IPV6_PKTINFO;
1399		cmsgp->cmsg_len = cmsg_len(sizeof(struct in6_pktinfo));
1400		pktinfop = (struct in6_pktinfo *)CMSG_DATA(cmsgp);
1401		memcpy(pktinfop, &dev->pktinfo, sizeof(struct in6_pktinfo));
1402#if defined(IPV6_USE_MIN_MTU)
1403		/*
1404		 * Set IPV6_USE_MIN_MTU as a per packet option as FreeBSD
1405		 * ignores setsockopt(IPV6_USE_MIN_MTU) when IPV6_PKTINFO
1406		 * is used.
1407		 */
1408		cmsgp = (struct cmsghdr *)(sock->sendcmsgbuf +
1409					   msg->msg_controllen);
1410		msg->msg_controllen += cmsg_space(sizeof(use_min_mtu));
1411		INSIST(msg->msg_controllen <= sock->sendcmsgbuflen);
1412
1413		cmsgp->cmsg_level = IPPROTO_IPV6;
1414		cmsgp->cmsg_type = IPV6_USE_MIN_MTU;
1415		cmsgp->cmsg_len = cmsg_len(sizeof(use_min_mtu));
1416		memcpy(CMSG_DATA(cmsgp), &use_min_mtu, sizeof(use_min_mtu));
1417#endif
1418	}
1419#endif /* USE_CMSG && ISC_PLATFORM_HAVEIPV6 */
1420#else /* ISC_NET_BSD44MSGHDR */
1421	msg->msg_accrights = NULL;
1422	msg->msg_accrightslen = 0;
1423#endif /* ISC_NET_BSD44MSGHDR */
1424
1425	if (write_countp != NULL)
1426		*write_countp = write_count;
1427}
1428
1429/*
1430 * Construct an iov array and attach it to the msghdr passed in.  This is
1431 * the RECV constructor, which will use the available region of the buffer
1432 * (if using a buffer list) or will use the internal region (if a single
1433 * buffer I/O is requested).
1434 *
1435 * Nothing can be NULL, and the done event must list at least one buffer
1436 * on the buffer linked list for this function to be meaningful.
1437 *
1438 * If read_countp != NULL, *read_countp will hold the number of bytes
1439 * this transaction can receive.
1440 */
1441static void
1442build_msghdr_recv(isc__socket_t *sock, isc_socketevent_t *dev,
1443		  struct msghdr *msg, struct iovec *iov, size_t *read_countp)
1444{
1445	unsigned int iovcount;
1446	isc_buffer_t *buffer;
1447	isc_region_t available;
1448	size_t read_count;
1449
1450	memset(msg, 0, sizeof(struct msghdr));
1451
1452	if (sock->type == isc_sockettype_udp) {
1453		memset(&dev->address, 0, sizeof(dev->address));
1454#ifdef BROKEN_RECVMSG
1455		if (sock->pf == AF_INET) {
1456			msg->msg_name = (void *)&dev->address.type.sin;
1457			msg->msg_namelen = sizeof(dev->address.type.sin6);
1458		} else if (sock->pf == AF_INET6) {
1459			msg->msg_name = (void *)&dev->address.type.sin6;
1460			msg->msg_namelen = sizeof(dev->address.type.sin6);
1461#ifdef ISC_PLATFORM_HAVESYSUNH
1462		} else if (sock->pf == AF_UNIX) {
1463			msg->msg_name = (void *)&dev->address.type.sunix;
1464			msg->msg_namelen = sizeof(dev->address.type.sunix);
1465#endif
1466		} else {
1467			msg->msg_name = (void *)&dev->address.type.sa;
1468			msg->msg_namelen = sizeof(dev->address.type);
1469		}
1470#else
1471		msg->msg_name = (void *)&dev->address.type.sa;
1472		msg->msg_namelen = sizeof(dev->address.type);
1473#endif
1474#ifdef ISC_NET_RECVOVERFLOW
1475		/* If needed, steal one iovec for overflow detection. */
1476		maxiov--;
1477#endif
1478	} else { /* TCP */
1479		msg->msg_name = NULL;
1480		msg->msg_namelen = 0;
1481		dev->address = sock->peer_address;
1482	}
1483
1484	buffer = ISC_LIST_HEAD(dev->bufferlist);
1485	read_count = 0;
1486
1487	/*
1488	 * Single buffer I/O?  Skip what we've done so far in this region.
1489	 */
1490	if (buffer == NULL) {
1491		read_count = dev->region.length - dev->n;
1492		iov[0].iov_base = (void *)(dev->region.base + dev->n);
1493		iov[0].iov_len = read_count;
1494		iovcount = 1;
1495
1496		goto config;
1497	}
1498
1499	/*
1500	 * Multibuffer I/O.
1501	 * Skip empty buffers.
1502	 */
1503	while (buffer != NULL) {
1504		REQUIRE(ISC_BUFFER_VALID(buffer));
1505		if (isc_buffer_availablelength(buffer) != 0)
1506			break;
1507		buffer = ISC_LIST_NEXT(buffer, link);
1508	}
1509
1510	iovcount = 0;
1511	while (buffer != NULL) {
1512		INSIST(iovcount < MAXSCATTERGATHER_RECV);
1513
1514		isc_buffer_availableregion(buffer, &available);
1515
1516		if (available.length > 0) {
1517			iov[iovcount].iov_base = (void *)(available.base);
1518			iov[iovcount].iov_len = available.length;
1519			read_count += available.length;
1520			iovcount++;
1521		}
1522		buffer = ISC_LIST_NEXT(buffer, link);
1523	}
1524
1525 config:
1526
1527	/*
1528	 * If needed, set up to receive that one extra byte.  Note that
1529	 * we know there is at least one iov left, since we stole it
1530	 * at the top of this function.
1531	 */
1532#ifdef ISC_NET_RECVOVERFLOW
1533	if (sock->type == isc_sockettype_udp) {
1534		iov[iovcount].iov_base = (void *)(&sock->overflow);
1535		iov[iovcount].iov_len = 1;
1536		iovcount++;
1537	}
1538#endif
1539
1540	msg->msg_iov = iov;
1541	msg->msg_iovlen = iovcount;
1542
1543#ifdef ISC_NET_BSD44MSGHDR
1544	msg->msg_control = NULL;
1545	msg->msg_controllen = 0;
1546	msg->msg_flags = 0;
1547#if defined(USE_CMSG)
1548	if (sock->type == isc_sockettype_udp) {
1549		msg->msg_control = sock->recvcmsgbuf;
1550		msg->msg_controllen = sock->recvcmsgbuflen;
1551	}
1552#endif /* USE_CMSG */
1553#else /* ISC_NET_BSD44MSGHDR */
1554	msg->msg_accrights = NULL;
1555	msg->msg_accrightslen = 0;
1556#endif /* ISC_NET_BSD44MSGHDR */
1557
1558	if (read_countp != NULL)
1559		*read_countp = read_count;
1560}
1561
1562static void
1563set_dev_address(isc_sockaddr_t *address, isc__socket_t *sock,
1564		isc_socketevent_t *dev)
1565{
1566	if (sock->type == isc_sockettype_udp) {
1567		if (address != NULL)
1568			dev->address = *address;
1569		else
1570			dev->address = sock->peer_address;
1571	} else if (sock->type == isc_sockettype_tcp) {
1572		INSIST(address == NULL);
1573		dev->address = sock->peer_address;
1574	}
1575}
1576
1577static void
1578destroy_socketevent(isc_event_t *event) {
1579	isc_socketevent_t *ev = (isc_socketevent_t *)event;
1580
1581	INSIST(ISC_LIST_EMPTY(ev->bufferlist));
1582
1583	(ev->destroy)(event);
1584}
1585
1586static isc_socketevent_t *
1587allocate_socketevent(isc__socket_t *sock, isc_eventtype_t eventtype,
1588		     isc_taskaction_t action, const void *arg)
1589{
1590	isc_socketevent_t *ev;
1591
1592	ev = (isc_socketevent_t *)isc_event_allocate(sock->manager->mctx,
1593						     sock, eventtype,
1594						     action, arg,
1595						     sizeof(*ev));
1596
1597	if (ev == NULL)
1598		return (NULL);
1599
1600	ev->result = ISC_R_UNSET;
1601	ISC_LINK_INIT(ev, ev_link);
1602	ISC_LIST_INIT(ev->bufferlist);
1603	ev->region.base = NULL;
1604	ev->n = 0;
1605	ev->offset = 0;
1606	ev->attributes = 0;
1607	ev->destroy = ev->ev_destroy;
1608	ev->ev_destroy = destroy_socketevent;
1609
1610	return (ev);
1611}
1612
1613#if defined(ISC_SOCKET_DEBUG)
1614static void
1615dump_msg(struct msghdr *msg) {
1616	unsigned int i;
1617
1618	printf("MSGHDR %p\n", msg);
1619	printf("\tname %p, namelen %ld\n", msg->msg_name,
1620	       (long) msg->msg_namelen);
1621	printf("\tiov %p, iovlen %ld\n", msg->msg_iov,
1622	       (long) msg->msg_iovlen);
1623	for (i = 0; i < (unsigned int)msg->msg_iovlen; i++)
1624		printf("\t\t%d\tbase %p, len %ld\n", i,
1625		       msg->msg_iov[i].iov_base,
1626		       (long) msg->msg_iov[i].iov_len);
1627#ifdef ISC_NET_BSD44MSGHDR
1628	printf("\tcontrol %p, controllen %ld\n", msg->msg_control,
1629	       (long) msg->msg_controllen);
1630#endif
1631}
1632#endif
1633
1634#define DOIO_SUCCESS		0	/* i/o ok, event sent */
1635#define DOIO_SOFT		1	/* i/o ok, soft error, no event sent */
1636#define DOIO_HARD		2	/* i/o error, event sent */
1637#define DOIO_EOF		3	/* EOF, no event sent */
1638
1639static int
1640doio_recv(isc__socket_t *sock, isc_socketevent_t *dev) {
1641	int cc;
1642	struct iovec iov[MAXSCATTERGATHER_RECV];
1643	size_t read_count;
1644	size_t actual_count;
1645	struct msghdr msghdr;
1646	isc_buffer_t *buffer;
1647	int recv_errno;
1648	char strbuf[ISC_STRERRORSIZE];
1649
1650	build_msghdr_recv(sock, dev, &msghdr, iov, &read_count);
1651
1652#if defined(ISC_SOCKET_DEBUG)
1653	dump_msg(&msghdr);
1654#endif
1655
1656	cc = recvmsg(sock->fd, &msghdr, 0);
1657	recv_errno = errno;
1658
1659#if defined(ISC_SOCKET_DEBUG)
1660	dump_msg(&msghdr);
1661#endif
1662
1663	if (cc < 0) {
1664		if (SOFT_ERROR(recv_errno))
1665			return (DOIO_SOFT);
1666
1667		if (isc_log_wouldlog(isc_lctx, IOEVENT_LEVEL)) {
1668			isc__strerror(recv_errno, strbuf, sizeof(strbuf));
1669			socket_log(sock, NULL, IOEVENT,
1670				   isc_msgcat, ISC_MSGSET_SOCKET,
1671				   ISC_MSG_DOIORECV,
1672				  "doio_recv: recvmsg(%d) %d bytes, err %d/%s",
1673				   sock->fd, cc, recv_errno, strbuf);
1674		}
1675
1676#define SOFT_OR_HARD(_system, _isc) \
1677	if (recv_errno == _system) { \
1678		if (sock->connected) { \
1679			dev->result = _isc; \
1680			inc_stats(sock->manager->stats, \
1681				  sock->statsindex[STATID_RECVFAIL]); \
1682			return (DOIO_HARD); \
1683		} \
1684		return (DOIO_SOFT); \
1685	}
1686#define ALWAYS_HARD(_system, _isc) \
1687	if (recv_errno == _system) { \
1688		dev->result = _isc; \
1689		inc_stats(sock->manager->stats, \
1690			  sock->statsindex[STATID_RECVFAIL]); \
1691		return (DOIO_HARD); \
1692	}
1693
1694		SOFT_OR_HARD(ECONNREFUSED, ISC_R_CONNREFUSED);
1695		SOFT_OR_HARD(ENETUNREACH, ISC_R_NETUNREACH);
1696		SOFT_OR_HARD(EHOSTUNREACH, ISC_R_HOSTUNREACH);
1697		SOFT_OR_HARD(EHOSTDOWN, ISC_R_HOSTDOWN);
1698		/* HPUX 11.11 can return EADDRNOTAVAIL. */
1699		SOFT_OR_HARD(EADDRNOTAVAIL, ISC_R_ADDRNOTAVAIL);
1700		ALWAYS_HARD(ENOBUFS, ISC_R_NORESOURCES);
1701		/*
1702		 * HPUX returns EPROTO and EINVAL on receiving some ICMP/ICMPv6
1703		 * errors.
1704		 */
1705#ifdef EPROTO
1706		SOFT_OR_HARD(EPROTO, ISC_R_HOSTUNREACH);
1707#endif
1708		SOFT_OR_HARD(EINVAL, ISC_R_HOSTUNREACH);
1709
1710#undef SOFT_OR_HARD
1711#undef ALWAYS_HARD
1712
1713		dev->result = isc__errno2result(recv_errno);
1714		inc_stats(sock->manager->stats,
1715			  sock->statsindex[STATID_RECVFAIL]);
1716		return (DOIO_HARD);
1717	}
1718
1719	/*
1720	 * On TCP and UNIX sockets, zero length reads indicate EOF,
1721	 * while on UDP sockets, zero length reads are perfectly valid,
1722	 * although strange.
1723	 */
1724	switch (sock->type) {
1725	case isc_sockettype_tcp:
1726	case isc_sockettype_unix:
1727		if (cc == 0)
1728			return (DOIO_EOF);
1729		break;
1730	case isc_sockettype_udp:
1731		break;
1732	case isc_sockettype_fdwatch:
1733	default:
1734		INSIST(0);
1735	}
1736
1737	if (sock->type == isc_sockettype_udp) {
1738		dev->address.length = msghdr.msg_namelen;
1739		if (isc_sockaddr_getport(&dev->address) == 0) {
1740			if (isc_log_wouldlog(isc_lctx, IOEVENT_LEVEL)) {
1741				socket_log(sock, &dev->address, IOEVENT,
1742					   isc_msgcat, ISC_MSGSET_SOCKET,
1743					   ISC_MSG_ZEROPORT,
1744					   "dropping source port zero packet");
1745			}
1746			return (DOIO_SOFT);
1747		}
1748		/*
1749		 * Simulate a firewall blocking UDP responses bigger than
1750		 * 512 bytes.
1751		 */
1752		if (sock->manager->maxudp != 0 && cc > sock->manager->maxudp)
1753			return (DOIO_SOFT);
1754	}
1755
1756	socket_log(sock, &dev->address, IOEVENT,
1757		   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_PKTRECV,
1758		   "packet received correctly");
1759
1760	/*
1761	 * Overflow bit detection.  If we received MORE bytes than we should,
1762	 * this indicates an overflow situation.  Set the flag in the
1763	 * dev entry and adjust how much we read by one.
1764	 */
1765#ifdef ISC_NET_RECVOVERFLOW
1766	if ((sock->type == isc_sockettype_udp) && ((size_t)cc > read_count)) {
1767		dev->attributes |= ISC_SOCKEVENTATTR_TRUNC;
1768		cc--;
1769	}
1770#endif
1771
1772	/*
1773	 * If there are control messages attached, run through them and pull
1774	 * out the interesting bits.
1775	 */
1776	if (sock->type == isc_sockettype_udp)
1777		process_cmsg(sock, &msghdr, dev);
1778
1779	/*
1780	 * update the buffers (if any) and the i/o count
1781	 */
1782	dev->n += cc;
1783	actual_count = cc;
1784	buffer = ISC_LIST_HEAD(dev->bufferlist);
1785	while (buffer != NULL && actual_count > 0U) {
1786		REQUIRE(ISC_BUFFER_VALID(buffer));
1787		if (isc_buffer_availablelength(buffer) <= actual_count) {
1788			actual_count -= isc_buffer_availablelength(buffer);
1789			isc_buffer_add(buffer,
1790				       isc_buffer_availablelength(buffer));
1791		} else {
1792			isc_buffer_add(buffer, actual_count);
1793			actual_count = 0;
1794			POST(actual_count);
1795			break;
1796		}
1797		buffer = ISC_LIST_NEXT(buffer, link);
1798		if (buffer == NULL) {
1799			INSIST(actual_count == 0U);
1800		}
1801	}
1802
1803	/*
1804	 * If we read less than we expected, update counters,
1805	 * and let the upper layer poke the descriptor.
1806	 */
1807	if (((size_t)cc != read_count) && (dev->n < dev->minimum))
1808		return (DOIO_SOFT);
1809
1810	/*
1811	 * Full reads are posted, or partials if partials are ok.
1812	 */
1813	dev->result = ISC_R_SUCCESS;
1814	return (DOIO_SUCCESS);
1815}
1816
1817/*
1818 * Returns:
1819 *	DOIO_SUCCESS	The operation succeeded.  dev->result contains
1820 *			ISC_R_SUCCESS.
1821 *
1822 *	DOIO_HARD	A hard or unexpected I/O error was encountered.
1823 *			dev->result contains the appropriate error.
1824 *
1825 *	DOIO_SOFT	A soft I/O error was encountered.  No senddone
1826 *			event was sent.  The operation should be retried.
1827 *
1828 *	No other return values are possible.
1829 */
1830static int
1831doio_send(isc__socket_t *sock, isc_socketevent_t *dev) {
1832	int cc;
1833	struct iovec iov[MAXSCATTERGATHER_SEND];
1834	size_t write_count;
1835	struct msghdr msghdr;
1836	char addrbuf[ISC_SOCKADDR_FORMATSIZE];
1837	int attempts = 0;
1838	int send_errno;
1839	char strbuf[ISC_STRERRORSIZE];
1840
1841	build_msghdr_send(sock, dev, &msghdr, iov, &write_count);
1842
1843 resend:
1844	cc = sendmsg(sock->fd, &msghdr, 0);
1845	send_errno = errno;
1846
1847	/*
1848	 * Check for error or block condition.
1849	 */
1850	if (cc < 0) {
1851		if (send_errno == EINTR && ++attempts < NRETRIES)
1852			goto resend;
1853
1854		if (SOFT_ERROR(send_errno))
1855			return (DOIO_SOFT);
1856
1857#define SOFT_OR_HARD(_system, _isc) \
1858	if (send_errno == _system) { \
1859		if (sock->connected) { \
1860			dev->result = _isc; \
1861			inc_stats(sock->manager->stats, \
1862				  sock->statsindex[STATID_SENDFAIL]); \
1863			return (DOIO_HARD); \
1864		} \
1865		return (DOIO_SOFT); \
1866	}
1867#define ALWAYS_HARD(_system, _isc) \
1868	if (send_errno == _system) { \
1869		dev->result = _isc; \
1870		inc_stats(sock->manager->stats, \
1871			  sock->statsindex[STATID_SENDFAIL]); \
1872		return (DOIO_HARD); \
1873	}
1874
1875		SOFT_OR_HARD(ECONNREFUSED, ISC_R_CONNREFUSED);
1876		ALWAYS_HARD(EACCES, ISC_R_NOPERM);
1877		ALWAYS_HARD(EAFNOSUPPORT, ISC_R_ADDRNOTAVAIL);
1878		ALWAYS_HARD(EADDRNOTAVAIL, ISC_R_ADDRNOTAVAIL);
1879		ALWAYS_HARD(EHOSTUNREACH, ISC_R_HOSTUNREACH);
1880#ifdef EHOSTDOWN
1881		ALWAYS_HARD(EHOSTDOWN, ISC_R_HOSTUNREACH);
1882#endif
1883		ALWAYS_HARD(ENETUNREACH, ISC_R_NETUNREACH);
1884		ALWAYS_HARD(ENOBUFS, ISC_R_NORESOURCES);
1885		ALWAYS_HARD(EPERM, ISC_R_HOSTUNREACH);
1886		ALWAYS_HARD(EPIPE, ISC_R_NOTCONNECTED);
1887		ALWAYS_HARD(ECONNRESET, ISC_R_CONNECTIONRESET);
1888
1889#undef SOFT_OR_HARD
1890#undef ALWAYS_HARD
1891
1892		/*
1893		 * The other error types depend on whether or not the
1894		 * socket is UDP or TCP.  If it is UDP, some errors
1895		 * that we expect to be fatal under TCP are merely
1896		 * annoying, and are really soft errors.
1897		 *
1898		 * However, these soft errors are still returned as
1899		 * a status.
1900		 */
1901		isc_sockaddr_format(&dev->address, addrbuf, sizeof(addrbuf));
1902		isc__strerror(send_errno, strbuf, sizeof(strbuf));
1903		UNEXPECTED_ERROR(__FILE__, __LINE__, "internal_send: %s: %s",
1904				 addrbuf, strbuf);
1905		dev->result = isc__errno2result(send_errno);
1906		inc_stats(sock->manager->stats,
1907			  sock->statsindex[STATID_SENDFAIL]);
1908		return (DOIO_HARD);
1909	}
1910
1911	if (cc == 0) {
1912		inc_stats(sock->manager->stats,
1913			  sock->statsindex[STATID_SENDFAIL]);
1914		UNEXPECTED_ERROR(__FILE__, __LINE__,
1915				 "doio_send: send() %s 0",
1916				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1917						ISC_MSG_RETURNED, "returned"));
1918	}
1919
1920	/*
1921	 * If we write less than we expected, update counters, poke.
1922	 */
1923	dev->n += cc;
1924	if ((size_t)cc != write_count)
1925		return (DOIO_SOFT);
1926
1927	/*
1928	 * Exactly what we wanted to write.  We're done with this
1929	 * entry.  Post its completion event.
1930	 */
1931	dev->result = ISC_R_SUCCESS;
1932	return (DOIO_SUCCESS);
1933}
1934
1935/*
1936 * Kill.
1937 *
1938 * Caller must ensure that the socket is not locked and no external
1939 * references exist.
1940 */
1941static void
1942closesocket(isc__socketmgr_t *manager, isc__socket_t *sock, int fd) {
1943	isc_sockettype_t type = sock->type;
1944	int lockid = FDLOCK_ID(fd);
1945
1946	/*
1947	 * No one has this socket open, so the watcher doesn't have to be
1948	 * poked, and the socket doesn't have to be locked.
1949	 */
1950	LOCK(&manager->fdlock[lockid]);
1951	manager->fds[fd] = NULL;
1952	if (type == isc_sockettype_fdwatch)
1953		manager->fdstate[fd] = CLOSED;
1954	else
1955		manager->fdstate[fd] = CLOSE_PENDING;
1956	UNLOCK(&manager->fdlock[lockid]);
1957	if (type == isc_sockettype_fdwatch) {
1958		/*
1959		 * The caller may close the socket once this function returns,
1960		 * and `fd' may be reassigned for a new socket.  So we do
1961		 * unwatch_fd() here, rather than defer it via select_poke().
1962		 * Note: this may complicate data protection among threads and
1963		 * may reduce performance due to additional locks.  One way to
1964		 * solve this would be to dup() the watched descriptor, but we
1965		 * take a simpler approach at this moment.
1966		 */
1967		(void)unwatch_fd(manager, fd, SELECT_POKE_READ);
1968		(void)unwatch_fd(manager, fd, SELECT_POKE_WRITE);
1969	} else
1970		select_poke(manager, fd, SELECT_POKE_CLOSE);
1971
1972	inc_stats(manager->stats, sock->statsindex[STATID_CLOSE]);
1973
1974	/*
1975	 * update manager->maxfd here (XXX: this should be implemented more
1976	 * efficiently)
1977	 */
1978#ifdef USE_SELECT
1979	LOCK(&manager->lock);
1980	if (manager->maxfd == fd) {
1981		int i;
1982
1983		manager->maxfd = 0;
1984		for (i = fd - 1; i >= 0; i--) {
1985			lockid = FDLOCK_ID(i);
1986
1987			LOCK(&manager->fdlock[lockid]);
1988			if (manager->fdstate[i] == MANAGED) {
1989				manager->maxfd = i;
1990				UNLOCK(&manager->fdlock[lockid]);
1991				break;
1992			}
1993			UNLOCK(&manager->fdlock[lockid]);
1994		}
1995#ifdef ISC_PLATFORM_USETHREADS
1996		if (manager->maxfd < manager->pipe_fds[0])
1997			manager->maxfd = manager->pipe_fds[0];
1998#endif
1999	}
2000	UNLOCK(&manager->lock);
2001#endif	/* USE_SELECT */
2002}
2003
2004static void
2005destroy(isc__socket_t **sockp) {
2006	int fd;
2007	isc__socket_t *sock = *sockp;
2008	isc__socketmgr_t *manager = sock->manager;
2009
2010	socket_log(sock, NULL, CREATION, isc_msgcat, ISC_MSGSET_SOCKET,
2011		   ISC_MSG_DESTROYING, "destroying");
2012
2013	INSIST(ISC_LIST_EMPTY(sock->accept_list));
2014	INSIST(ISC_LIST_EMPTY(sock->recv_list));
2015	INSIST(ISC_LIST_EMPTY(sock->send_list));
2016	INSIST(sock->connect_ev == NULL);
2017	REQUIRE(sock->fd == -1 || sock->fd < (int)manager->maxsocks);
2018
2019	if (sock->fd >= 0) {
2020		fd = sock->fd;
2021		sock->fd = -1;
2022		closesocket(manager, sock, fd);
2023	}
2024
2025	LOCK(&manager->lock);
2026
2027	ISC_LIST_UNLINK(manager->socklist, sock, link);
2028
2029#ifdef USE_WATCHER_THREAD
2030	if (ISC_LIST_EMPTY(manager->socklist))
2031		SIGNAL(&manager->shutdown_ok);
2032#endif /* USE_WATCHER_THREAD */
2033
2034	/* can't unlock manager as its memory context is still used */
2035	free_socket(sockp);
2036
2037	UNLOCK(&manager->lock);
2038}
2039
2040static isc_result_t
2041allocate_socket(isc__socketmgr_t *manager, isc_sockettype_t type,
2042		isc__socket_t **socketp)
2043{
2044	isc__socket_t *sock;
2045	isc_result_t result;
2046	ISC_SOCKADDR_LEN_T cmsgbuflen;
2047
2048	sock = isc_mem_get(manager->mctx, sizeof(*sock));
2049
2050	if (sock == NULL)
2051		return (ISC_R_NOMEMORY);
2052
2053	sock->common.magic = 0;
2054	sock->common.impmagic = 0;
2055	sock->references = 0;
2056
2057	sock->manager = manager;
2058	sock->type = type;
2059	sock->fd = -1;
2060	sock->dupped = 0;
2061	sock->statsindex = NULL;
2062
2063	ISC_LINK_INIT(sock, link);
2064
2065	sock->recvcmsgbuf = NULL;
2066	sock->sendcmsgbuf = NULL;
2067
2068	/*
2069	 * set up cmsg buffers
2070	 */
2071	cmsgbuflen = 0;
2072#if defined(USE_CMSG) && defined(ISC_PLATFORM_HAVEIN6PKTINFO)
2073	cmsgbuflen += cmsg_space(sizeof(struct in6_pktinfo));
2074#endif
2075#if defined(USE_CMSG) && defined(SO_TIMESTAMP)
2076	cmsgbuflen += cmsg_space(sizeof(struct timeval));
2077#endif
2078	sock->recvcmsgbuflen = cmsgbuflen;
2079	if (sock->recvcmsgbuflen != 0U) {
2080		sock->recvcmsgbuf = isc_mem_get(manager->mctx, cmsgbuflen);
2081		if (sock->recvcmsgbuf == NULL) {
2082			result = ISC_R_NOMEMORY;
2083			goto error;
2084		}
2085	}
2086
2087	cmsgbuflen = 0;
2088#if defined(USE_CMSG) && defined(ISC_PLATFORM_HAVEIN6PKTINFO)
2089	cmsgbuflen += cmsg_space(sizeof(struct in6_pktinfo));
2090#if defined(IPV6_USE_MIN_MTU)
2091	/*
2092	 * Provide space for working around FreeBSD's broken IPV6_USE_MIN_MTU
2093	 * support.
2094	 */
2095	cmsgbuflen += cmsg_space(sizeof(int));
2096#endif
2097#endif
2098	sock->sendcmsgbuflen = cmsgbuflen;
2099	if (sock->sendcmsgbuflen != 0U) {
2100		sock->sendcmsgbuf = isc_mem_get(manager->mctx, cmsgbuflen);
2101		if (sock->sendcmsgbuf == NULL) {
2102			result = ISC_R_NOMEMORY;
2103			goto error;
2104		}
2105	}
2106
2107	memset(sock->name, 0, sizeof(sock->name));
2108	sock->tag = NULL;
2109
2110	/*
2111	 * set up list of readers and writers to be initially empty
2112	 */
2113	ISC_LIST_INIT(sock->recv_list);
2114	ISC_LIST_INIT(sock->send_list);
2115	ISC_LIST_INIT(sock->accept_list);
2116	sock->connect_ev = NULL;
2117	sock->pending_recv = 0;
2118	sock->pending_send = 0;
2119	sock->pending_accept = 0;
2120	sock->listener = 0;
2121	sock->connected = 0;
2122	sock->connecting = 0;
2123	sock->bound = 0;
2124
2125	/*
2126	 * initialize the lock
2127	 */
2128	result = isc_mutex_init(&sock->lock);
2129	if (result != ISC_R_SUCCESS) {
2130		sock->common.magic = 0;
2131		sock->common.impmagic = 0;
2132		goto error;
2133	}
2134
2135	/*
2136	 * Initialize readable and writable events
2137	 */
2138	ISC_EVENT_INIT(&sock->readable_ev, sizeof(intev_t),
2139		       ISC_EVENTATTR_NOPURGE, NULL, ISC_SOCKEVENT_INTR,
2140		       NULL, sock, sock, NULL, NULL);
2141	ISC_EVENT_INIT(&sock->writable_ev, sizeof(intev_t),
2142		       ISC_EVENTATTR_NOPURGE, NULL, ISC_SOCKEVENT_INTW,
2143		       NULL, sock, sock, NULL, NULL);
2144
2145	sock->common.magic = ISCAPI_SOCKET_MAGIC;
2146	sock->common.impmagic = SOCKET_MAGIC;
2147	*socketp = sock;
2148
2149	return (ISC_R_SUCCESS);
2150
2151 error:
2152	if (sock->recvcmsgbuf != NULL)
2153		isc_mem_put(manager->mctx, sock->recvcmsgbuf,
2154			    sock->recvcmsgbuflen);
2155	if (sock->sendcmsgbuf != NULL)
2156		isc_mem_put(manager->mctx, sock->sendcmsgbuf,
2157			    sock->sendcmsgbuflen);
2158	isc_mem_put(manager->mctx, sock, sizeof(*sock));
2159
2160	return (result);
2161}
2162
2163/*
2164 * This event requires that the various lists be empty, that the reference
2165 * count be 1, and that the magic number is valid.  The other socket bits,
2166 * like the lock, must be initialized as well.  The fd associated must be
2167 * marked as closed, by setting it to -1 on close, or this routine will
2168 * also close the socket.
2169 */
2170static void
2171free_socket(isc__socket_t **socketp) {
2172	isc__socket_t *sock = *socketp;
2173
2174	INSIST(sock->references == 0);
2175	INSIST(VALID_SOCKET(sock));
2176	INSIST(!sock->connecting);
2177	INSIST(!sock->pending_recv);
2178	INSIST(!sock->pending_send);
2179	INSIST(!sock->pending_accept);
2180	INSIST(ISC_LIST_EMPTY(sock->recv_list));
2181	INSIST(ISC_LIST_EMPTY(sock->send_list));
2182	INSIST(ISC_LIST_EMPTY(sock->accept_list));
2183	INSIST(!ISC_LINK_LINKED(sock, link));
2184
2185	if (sock->recvcmsgbuf != NULL)
2186		isc_mem_put(sock->manager->mctx, sock->recvcmsgbuf,
2187			    sock->recvcmsgbuflen);
2188	if (sock->sendcmsgbuf != NULL)
2189		isc_mem_put(sock->manager->mctx, sock->sendcmsgbuf,
2190			    sock->sendcmsgbuflen);
2191
2192	sock->common.magic = 0;
2193	sock->common.impmagic = 0;
2194
2195	DESTROYLOCK(&sock->lock);
2196
2197	isc_mem_put(sock->manager->mctx, sock, sizeof(*sock));
2198
2199	*socketp = NULL;
2200}
2201
2202#ifdef SO_BSDCOMPAT
2203/*
2204 * This really should not be necessary to do.  Having to workout
2205 * which kernel version we are on at run time so that we don't cause
2206 * the kernel to issue a warning about us using a deprecated socket option.
2207 * Such warnings should *never* be on by default in production kernels.
2208 *
2209 * We can't do this a build time because executables are moved between
2210 * machines and hence kernels.
2211 *
2212 * We can't just not set SO_BSDCOMAT because some kernels require it.
2213 */
2214
2215static isc_once_t         bsdcompat_once = ISC_ONCE_INIT;
2216isc_boolean_t bsdcompat = ISC_TRUE;
2217
2218static void
2219clear_bsdcompat(void) {
2220#ifdef __linux__
2221	 struct utsname buf;
2222	 char *endp;
2223	 long int major;
2224	 long int minor;
2225
2226	 uname(&buf);    /* Can only fail if buf is bad in Linux. */
2227
2228	 /* Paranoia in parsing can be increased, but we trust uname(). */
2229	 major = strtol(buf.release, &endp, 10);
2230	 if (*endp == '.') {
2231		minor = strtol(endp+1, &endp, 10);
2232		if ((major > 2) || ((major == 2) && (minor >= 4))) {
2233			bsdcompat = ISC_FALSE;
2234		}
2235	 }
2236#endif /* __linux __ */
2237}
2238#endif
2239
2240static isc_result_t
2241opensocket(isc__socketmgr_t *manager, isc__socket_t *sock,
2242	   isc__socket_t *dup_socket)
2243{
2244	isc_result_t result;
2245	char strbuf[ISC_STRERRORSIZE];
2246	const char *err = "socket";
2247	int tries = 0;
2248#if defined(USE_CMSG) || defined(SO_BSDCOMPAT)
2249	int on = 1;
2250#endif
2251#if defined(SO_RCVBUF)
2252	ISC_SOCKADDR_LEN_T optlen;
2253	int size;
2254#endif
2255
2256 again:
2257	if (dup_socket == NULL) {
2258		switch (sock->type) {
2259		case isc_sockettype_udp:
2260			sock->fd = socket(sock->pf, SOCK_DGRAM, IPPROTO_UDP);
2261			break;
2262		case isc_sockettype_tcp:
2263			sock->fd = socket(sock->pf, SOCK_STREAM, IPPROTO_TCP);
2264			break;
2265		case isc_sockettype_unix:
2266			sock->fd = socket(sock->pf, SOCK_STREAM, 0);
2267			break;
2268		case isc_sockettype_fdwatch:
2269			/*
2270			 * We should not be called for isc_sockettype_fdwatch
2271			 * sockets.
2272			 */
2273			INSIST(0);
2274			break;
2275		}
2276	} else {
2277		sock->fd = dup(dup_socket->fd);
2278		sock->dupped = 1;
2279		sock->bound = dup_socket->bound;
2280	}
2281	if (sock->fd == -1 && errno == EINTR && tries++ < 42)
2282		goto again;
2283
2284#ifdef F_DUPFD
2285	/*
2286	 * Leave a space for stdio and TCP to work in.
2287	 */
2288	if (manager->reserved != 0 && sock->type == isc_sockettype_udp &&
2289	    sock->fd >= 0 && sock->fd < manager->reserved) {
2290		int new, tmp;
2291		new = fcntl(sock->fd, F_DUPFD, manager->reserved);
2292		tmp = errno;
2293		(void)close(sock->fd);
2294		errno = tmp;
2295		sock->fd = new;
2296		err = "isc_socket_create: fcntl/reserved";
2297	} else if (sock->fd >= 0 && sock->fd < 20) {
2298		int new, tmp;
2299		new = fcntl(sock->fd, F_DUPFD, 20);
2300		tmp = errno;
2301		(void)close(sock->fd);
2302		errno = tmp;
2303		sock->fd = new;
2304		err = "isc_socket_create: fcntl";
2305	}
2306#endif
2307
2308	if (sock->fd >= (int)manager->maxsocks) {
2309		(void)close(sock->fd);
2310		isc_log_iwrite(isc_lctx, ISC_LOGCATEGORY_GENERAL,
2311			       ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
2312			       isc_msgcat, ISC_MSGSET_SOCKET,
2313			       ISC_MSG_TOOMANYFDS,
2314			       "socket: file descriptor exceeds limit (%d/%u)",
2315			       sock->fd, manager->maxsocks);
2316		return (ISC_R_NORESOURCES);
2317	}
2318
2319	if (sock->fd < 0) {
2320		switch (errno) {
2321		case EMFILE:
2322		case ENFILE:
2323			isc__strerror(errno, strbuf, sizeof(strbuf));
2324			isc_log_iwrite(isc_lctx, ISC_LOGCATEGORY_GENERAL,
2325				       ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
2326				       isc_msgcat, ISC_MSGSET_SOCKET,
2327				       ISC_MSG_TOOMANYFDS,
2328				       "%s: %s", err, strbuf);
2329			/* fallthrough */
2330		case ENOBUFS:
2331			return (ISC_R_NORESOURCES);
2332
2333		case EPROTONOSUPPORT:
2334		case EPFNOSUPPORT:
2335		case EAFNOSUPPORT:
2336		/*
2337		 * Linux 2.2 (and maybe others) return EINVAL instead of
2338		 * EAFNOSUPPORT.
2339		 */
2340		case EINVAL:
2341			return (ISC_R_FAMILYNOSUPPORT);
2342
2343		default:
2344			isc__strerror(errno, strbuf, sizeof(strbuf));
2345			UNEXPECTED_ERROR(__FILE__, __LINE__,
2346					 "%s() %s: %s", err,
2347					 isc_msgcat_get(isc_msgcat,
2348							ISC_MSGSET_GENERAL,
2349							ISC_MSG_FAILED,
2350							"failed"),
2351					 strbuf);
2352			return (ISC_R_UNEXPECTED);
2353		}
2354	}
2355
2356	if (dup_socket != NULL)
2357		goto setup_done;
2358
2359	result = make_nonblock(sock->fd);
2360	if (result != ISC_R_SUCCESS) {
2361		(void)close(sock->fd);
2362		return (result);
2363	}
2364
2365#ifdef SO_BSDCOMPAT
2366	RUNTIME_CHECK(isc_once_do(&bsdcompat_once,
2367				  clear_bsdcompat) == ISC_R_SUCCESS);
2368	if (sock->type != isc_sockettype_unix && bsdcompat &&
2369	    setsockopt(sock->fd, SOL_SOCKET, SO_BSDCOMPAT,
2370		       (void *)&on, sizeof(on)) < 0) {
2371		isc__strerror(errno, strbuf, sizeof(strbuf));
2372		UNEXPECTED_ERROR(__FILE__, __LINE__,
2373				 "setsockopt(%d, SO_BSDCOMPAT) %s: %s",
2374				 sock->fd,
2375				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
2376						ISC_MSG_FAILED, "failed"),
2377				 strbuf);
2378		/* Press on... */
2379	}
2380#endif
2381
2382#ifdef SO_NOSIGPIPE
2383	if (setsockopt(sock->fd, SOL_SOCKET, SO_NOSIGPIPE,
2384		       (void *)&on, sizeof(on)) < 0) {
2385		isc__strerror(errno, strbuf, sizeof(strbuf));
2386		UNEXPECTED_ERROR(__FILE__, __LINE__,
2387				 "setsockopt(%d, SO_NOSIGPIPE) %s: %s",
2388				 sock->fd,
2389				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
2390						ISC_MSG_FAILED, "failed"),
2391				 strbuf);
2392		/* Press on... */
2393	}
2394#endif
2395
2396#if defined(USE_CMSG) || defined(SO_RCVBUF)
2397	if (sock->type == isc_sockettype_udp) {
2398
2399#if defined(USE_CMSG)
2400#if defined(SO_TIMESTAMP)
2401		if (setsockopt(sock->fd, SOL_SOCKET, SO_TIMESTAMP,
2402			       (void *)&on, sizeof(on)) < 0
2403		    && errno != ENOPROTOOPT) {
2404			isc__strerror(errno, strbuf, sizeof(strbuf));
2405			UNEXPECTED_ERROR(__FILE__, __LINE__,
2406					 "setsockopt(%d, SO_TIMESTAMP) %s: %s",
2407					 sock->fd,
2408					 isc_msgcat_get(isc_msgcat,
2409							ISC_MSGSET_GENERAL,
2410							ISC_MSG_FAILED,
2411							"failed"),
2412					 strbuf);
2413			/* Press on... */
2414		}
2415#endif /* SO_TIMESTAMP */
2416
2417#if defined(ISC_PLATFORM_HAVEIPV6)
2418		if (sock->pf == AF_INET6 && sock->recvcmsgbuflen == 0U) {
2419			/*
2420			 * Warn explicitly because this anomaly can be hidden
2421			 * in usual operation (and unexpectedly appear later).
2422			 */
2423			UNEXPECTED_ERROR(__FILE__, __LINE__,
2424					 "No buffer available to receive "
2425					 "IPv6 destination");
2426		}
2427#ifdef ISC_PLATFORM_HAVEIN6PKTINFO
2428#ifdef IPV6_RECVPKTINFO
2429		/* RFC 3542 */
2430		if ((sock->pf == AF_INET6)
2431		    && (setsockopt(sock->fd, IPPROTO_IPV6, IPV6_RECVPKTINFO,
2432				   (void *)&on, sizeof(on)) < 0)) {
2433			isc__strerror(errno, strbuf, sizeof(strbuf));
2434			UNEXPECTED_ERROR(__FILE__, __LINE__,
2435					 "setsockopt(%d, IPV6_RECVPKTINFO) "
2436					 "%s: %s", sock->fd,
2437					 isc_msgcat_get(isc_msgcat,
2438							ISC_MSGSET_GENERAL,
2439							ISC_MSG_FAILED,
2440							"failed"),
2441					 strbuf);
2442		}
2443#else
2444		/* RFC 2292 */
2445		if ((sock->pf == AF_INET6)
2446		    && (setsockopt(sock->fd, IPPROTO_IPV6, IPV6_PKTINFO,
2447				   (void *)&on, sizeof(on)) < 0)) {
2448			isc__strerror(errno, strbuf, sizeof(strbuf));
2449			UNEXPECTED_ERROR(__FILE__, __LINE__,
2450					 "setsockopt(%d, IPV6_PKTINFO) %s: %s",
2451					 sock->fd,
2452					 isc_msgcat_get(isc_msgcat,
2453							ISC_MSGSET_GENERAL,
2454							ISC_MSG_FAILED,
2455							"failed"),
2456					 strbuf);
2457		}
2458#endif /* IPV6_RECVPKTINFO */
2459#endif /* ISC_PLATFORM_HAVEIN6PKTINFO */
2460#ifdef IPV6_USE_MIN_MTU        /* RFC 3542, not too common yet*/
2461		/* use minimum MTU */
2462		if (sock->pf == AF_INET6 &&
2463		    setsockopt(sock->fd, IPPROTO_IPV6, IPV6_USE_MIN_MTU,
2464			       (void *)&on, sizeof(on)) < 0) {
2465			isc__strerror(errno, strbuf, sizeof(strbuf));
2466			UNEXPECTED_ERROR(__FILE__, __LINE__,
2467					 "setsockopt(%d, IPV6_USE_MIN_MTU) "
2468					 "%s: %s", sock->fd,
2469					 isc_msgcat_get(isc_msgcat,
2470							ISC_MSGSET_GENERAL,
2471							ISC_MSG_FAILED,
2472							"failed"),
2473					 strbuf);
2474		}
2475#endif
2476#if defined(IPV6_MTU)
2477		/*
2478		 * Use minimum MTU on IPv6 sockets.
2479		 */
2480		if (sock->pf == AF_INET6) {
2481			int mtu = 1280;
2482			(void)setsockopt(sock->fd, IPPROTO_IPV6, IPV6_MTU,
2483					 &mtu, sizeof(mtu));
2484		}
2485#endif
2486#if defined(IPV6_MTU_DISCOVER) && defined(IPV6_PMTUDISC_DONT)
2487		/*
2488		 * Turn off Path MTU discovery on IPv6/UDP sockets.
2489		 */
2490		if (sock->pf == AF_INET6) {
2491			int action = IPV6_PMTUDISC_DONT;
2492			(void)setsockopt(sock->fd, IPPROTO_IPV6,
2493					 IPV6_MTU_DISCOVER, &action,
2494					 sizeof(action));
2495		}
2496#endif
2497#endif /* ISC_PLATFORM_HAVEIPV6 */
2498#endif /* defined(USE_CMSG) */
2499
2500#if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2501		/*
2502		 * Turn off Path MTU discovery on IPv4/UDP sockets.
2503		 */
2504		if (sock->pf == AF_INET) {
2505			int action = IP_PMTUDISC_DONT;
2506			(void)setsockopt(sock->fd, IPPROTO_IP, IP_MTU_DISCOVER,
2507					 &action, sizeof(action));
2508		}
2509#endif
2510#if defined(IP_DONTFRAG)
2511		/*
2512		 * Turn off Path MTU discovery on IPv4/UDP sockets.
2513		 */
2514		if (sock->pf == AF_INET) {
2515			int off = 0;
2516			(void)setsockopt(sock->fd, IPPROTO_IP, IP_DONTFRAG,
2517					 &off, sizeof(off));
2518		}
2519#endif
2520
2521#if defined(SO_RCVBUF)
2522		optlen = sizeof(size);
2523		if (getsockopt(sock->fd, SOL_SOCKET, SO_RCVBUF,
2524			       (void *)&size, &optlen) >= 0 &&
2525		     size < RCVBUFSIZE) {
2526			size = RCVBUFSIZE;
2527			if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUF,
2528				       (void *)&size, sizeof(size)) == -1) {
2529				isc__strerror(errno, strbuf, sizeof(strbuf));
2530				UNEXPECTED_ERROR(__FILE__, __LINE__,
2531					"setsockopt(%d, SO_RCVBUF, %d) %s: %s",
2532					sock->fd, size,
2533					isc_msgcat_get(isc_msgcat,
2534						       ISC_MSGSET_GENERAL,
2535						       ISC_MSG_FAILED,
2536						       "failed"),
2537					strbuf);
2538			}
2539		}
2540#endif
2541	}
2542#endif /* defined(USE_CMSG) || defined(SO_RCVBUF) */
2543
2544setup_done:
2545	inc_stats(manager->stats, sock->statsindex[STATID_OPEN]);
2546
2547	return (ISC_R_SUCCESS);
2548}
2549
2550/*
2551 * Create a 'type' socket or duplicate an existing socket, managed
2552 * by 'manager'.  Events will be posted to 'task' and when dispatched
2553 * 'action' will be called with 'arg' as the arg value.  The new
2554 * socket is returned in 'socketp'.
2555 */
2556static isc_result_t
2557socket_create(isc_socketmgr_t *manager0, int pf, isc_sockettype_t type,
2558	      isc_socket_t **socketp, isc_socket_t *dup_socket)
2559{
2560	isc__socket_t *sock = NULL;
2561	isc__socketmgr_t *manager = (isc__socketmgr_t *)manager0;
2562	isc_result_t result;
2563	int lockid;
2564
2565	REQUIRE(VALID_MANAGER(manager));
2566	REQUIRE(socketp != NULL && *socketp == NULL);
2567	REQUIRE(type != isc_sockettype_fdwatch);
2568
2569	result = allocate_socket(manager, type, &sock);
2570	if (result != ISC_R_SUCCESS)
2571		return (result);
2572
2573	switch (sock->type) {
2574	case isc_sockettype_udp:
2575		sock->statsindex =
2576			(pf == AF_INET) ? upd4statsindex : upd6statsindex;
2577		break;
2578	case isc_sockettype_tcp:
2579		sock->statsindex =
2580			(pf == AF_INET) ? tcp4statsindex : tcp6statsindex;
2581		break;
2582	case isc_sockettype_unix:
2583		sock->statsindex = unixstatsindex;
2584		break;
2585	default:
2586		INSIST(0);
2587	}
2588
2589	sock->pf = pf;
2590
2591	result = opensocket(manager, sock, (isc__socket_t *)dup_socket);
2592	if (result != ISC_R_SUCCESS) {
2593		inc_stats(manager->stats, sock->statsindex[STATID_OPENFAIL]);
2594		free_socket(&sock);
2595		return (result);
2596	}
2597
2598	sock->common.methods = (isc_socketmethods_t *)&socketmethods;
2599	sock->references = 1;
2600	*socketp = (isc_socket_t *)sock;
2601
2602	/*
2603	 * Note we don't have to lock the socket like we normally would because
2604	 * there are no external references to it yet.
2605	 */
2606
2607	lockid = FDLOCK_ID(sock->fd);
2608	LOCK(&manager->fdlock[lockid]);
2609	manager->fds[sock->fd] = sock;
2610	manager->fdstate[sock->fd] = MANAGED;
2611#ifdef USE_DEVPOLL
2612	INSIST(sock->manager->fdpollinfo[sock->fd].want_read == 0 &&
2613	       sock->manager->fdpollinfo[sock->fd].want_write == 0);
2614#endif
2615	UNLOCK(&manager->fdlock[lockid]);
2616
2617	LOCK(&manager->lock);
2618	ISC_LIST_APPEND(manager->socklist, sock, link);
2619#ifdef USE_SELECT
2620	if (manager->maxfd < sock->fd)
2621		manager->maxfd = sock->fd;
2622#endif
2623	UNLOCK(&manager->lock);
2624
2625	socket_log(sock, NULL, CREATION, isc_msgcat, ISC_MSGSET_SOCKET,
2626		   ISC_MSG_CREATED, dup_socket == NULL ? "dupped" : "created");
2627
2628	return (ISC_R_SUCCESS);
2629}
2630
2631/*%
2632 * Create a new 'type' socket managed by 'manager'.  Events
2633 * will be posted to 'task' and when dispatched 'action' will be
2634 * called with 'arg' as the arg value.  The new socket is returned
2635 * in 'socketp'.
2636 */
2637ISC_SOCKETFUNC_SCOPE isc_result_t
2638isc__socket_create(isc_socketmgr_t *manager0, int pf, isc_sockettype_t type,
2639		   isc_socket_t **socketp)
2640{
2641	return (socket_create(manager0, pf, type, socketp, NULL));
2642}
2643
2644/*%
2645 * Duplicate an existing socket.  The new socket is returned
2646 * in 'socketp'.
2647 */
2648ISC_SOCKETFUNC_SCOPE isc_result_t
2649isc__socket_dup(isc_socket_t *sock0, isc_socket_t **socketp) {
2650	isc__socket_t *sock = (isc__socket_t *)sock0;
2651
2652	REQUIRE(VALID_SOCKET(sock));
2653	REQUIRE(socketp != NULL && *socketp == NULL);
2654
2655	return (socket_create((isc_socketmgr_t *) sock->manager,
2656			      sock->pf, sock->type, socketp,
2657			      sock0));
2658}
2659
2660#ifdef BIND9
2661ISC_SOCKETFUNC_SCOPE isc_result_t
2662isc__socket_open(isc_socket_t *sock0) {
2663	isc_result_t result;
2664	isc__socket_t *sock = (isc__socket_t *)sock0;
2665
2666	REQUIRE(VALID_SOCKET(sock));
2667
2668	LOCK(&sock->lock);
2669	REQUIRE(sock->references == 1);
2670	REQUIRE(sock->type != isc_sockettype_fdwatch);
2671	UNLOCK(&sock->lock);
2672	/*
2673	 * We don't need to retain the lock hereafter, since no one else has
2674	 * this socket.
2675	 */
2676	REQUIRE(sock->fd == -1);
2677
2678	result = opensocket(sock->manager, sock, NULL);
2679	if (result != ISC_R_SUCCESS)
2680		sock->fd = -1;
2681
2682	if (result == ISC_R_SUCCESS) {
2683		int lockid = FDLOCK_ID(sock->fd);
2684
2685		LOCK(&sock->manager->fdlock[lockid]);
2686		sock->manager->fds[sock->fd] = sock;
2687		sock->manager->fdstate[sock->fd] = MANAGED;
2688#ifdef USE_DEVPOLL
2689		INSIST(sock->manager->fdpollinfo[sock->fd].want_read == 0 &&
2690		       sock->manager->fdpollinfo[sock->fd].want_write == 0);
2691#endif
2692		UNLOCK(&sock->manager->fdlock[lockid]);
2693
2694#ifdef USE_SELECT
2695		LOCK(&sock->manager->lock);
2696		if (sock->manager->maxfd < sock->fd)
2697			sock->manager->maxfd = sock->fd;
2698		UNLOCK(&sock->manager->lock);
2699#endif
2700	}
2701
2702	return (result);
2703}
2704#endif	/* BIND9 */
2705
2706/*
2707 * Create a new 'type' socket managed by 'manager'.  Events
2708 * will be posted to 'task' and when dispatched 'action' will be
2709 * called with 'arg' as the arg value.  The new socket is returned
2710 * in 'socketp'.
2711 */
2712ISC_SOCKETFUNC_SCOPE isc_result_t
2713isc__socket_fdwatchcreate(isc_socketmgr_t *manager0, int fd, int flags,
2714			  isc_sockfdwatch_t callback, void *cbarg,
2715			  isc_task_t *task, isc_socket_t **socketp)
2716{
2717	isc__socketmgr_t *manager = (isc__socketmgr_t *)manager0;
2718	isc__socket_t *sock = NULL;
2719	isc_result_t result;
2720	int lockid;
2721
2722	REQUIRE(VALID_MANAGER(manager));
2723	REQUIRE(socketp != NULL && *socketp == NULL);
2724
2725	result = allocate_socket(manager, isc_sockettype_fdwatch, &sock);
2726	if (result != ISC_R_SUCCESS)
2727		return (result);
2728
2729	sock->fd = fd;
2730	sock->fdwatcharg = cbarg;
2731	sock->fdwatchcb = callback;
2732	sock->fdwatchflags = flags;
2733	sock->fdwatchtask = task;
2734	sock->statsindex = fdwatchstatsindex;
2735
2736	sock->common.methods = (isc_socketmethods_t *)&socketmethods;
2737	sock->references = 1;
2738	*socketp = (isc_socket_t *)sock;
2739
2740	/*
2741	 * Note we don't have to lock the socket like we normally would because
2742	 * there are no external references to it yet.
2743	 */
2744
2745	lockid = FDLOCK_ID(sock->fd);
2746	LOCK(&manager->fdlock[lockid]);
2747	manager->fds[sock->fd] = sock;
2748	manager->fdstate[sock->fd] = MANAGED;
2749	UNLOCK(&manager->fdlock[lockid]);
2750
2751	LOCK(&manager->lock);
2752	ISC_LIST_APPEND(manager->socklist, sock, link);
2753#ifdef USE_SELECT
2754	if (manager->maxfd < sock->fd)
2755		manager->maxfd = sock->fd;
2756#endif
2757	UNLOCK(&manager->lock);
2758
2759	if (flags & ISC_SOCKFDWATCH_READ)
2760		select_poke(sock->manager, sock->fd, SELECT_POKE_READ);
2761	if (flags & ISC_SOCKFDWATCH_WRITE)
2762		select_poke(sock->manager, sock->fd, SELECT_POKE_WRITE);
2763
2764	socket_log(sock, NULL, CREATION, isc_msgcat, ISC_MSGSET_SOCKET,
2765		   ISC_MSG_CREATED, "fdwatch-created");
2766
2767	return (ISC_R_SUCCESS);
2768}
2769
2770/*
2771 * Indicate to the manager that it should watch the socket again.
2772 * This can be used to restart watching if the previous event handler
2773 * didn't indicate there was more data to be processed.  Primarily
2774 * it is for writing but could be used for reading if desired
2775 */
2776
2777ISC_SOCKETFUNC_SCOPE isc_result_t
2778isc__socket_fdwatchpoke(isc_socket_t *sock0, int flags)
2779{
2780	isc__socket_t *sock = (isc__socket_t *)sock0;
2781
2782	REQUIRE(VALID_SOCKET(sock));
2783
2784	/*
2785	 * We check both flags first to allow us to get the lock
2786	 * once but only if we need it.
2787	 */
2788
2789	if ((flags & (ISC_SOCKFDWATCH_READ | ISC_SOCKFDWATCH_WRITE)) != 0) {
2790		LOCK(&sock->lock);
2791		if (((flags & ISC_SOCKFDWATCH_READ) != 0) &&
2792		    !sock->pending_recv)
2793			select_poke(sock->manager, sock->fd,
2794				    SELECT_POKE_READ);
2795		if (((flags & ISC_SOCKFDWATCH_WRITE) != 0) &&
2796		    !sock->pending_send)
2797			select_poke(sock->manager, sock->fd,
2798				    SELECT_POKE_WRITE);
2799		UNLOCK(&sock->lock);
2800	}
2801
2802	socket_log(sock, NULL, TRACE, isc_msgcat, ISC_MSGSET_SOCKET,
2803		   ISC_MSG_POKED, "fdwatch-poked flags: %d", flags);
2804
2805	return (ISC_R_SUCCESS);
2806}
2807
2808/*
2809 * Attach to a socket.  Caller must explicitly detach when it is done.
2810 */
2811ISC_SOCKETFUNC_SCOPE void
2812isc__socket_attach(isc_socket_t *sock0, isc_socket_t **socketp) {
2813	isc__socket_t *sock = (isc__socket_t *)sock0;
2814
2815	REQUIRE(VALID_SOCKET(sock));
2816	REQUIRE(socketp != NULL && *socketp == NULL);
2817
2818	LOCK(&sock->lock);
2819	sock->references++;
2820	UNLOCK(&sock->lock);
2821
2822	*socketp = (isc_socket_t *)sock;
2823}
2824
2825/*
2826 * Dereference a socket.  If this is the last reference to it, clean things
2827 * up by destroying the socket.
2828 */
2829ISC_SOCKETFUNC_SCOPE void
2830isc__socket_detach(isc_socket_t **socketp) {
2831	isc__socket_t *sock;
2832	isc_boolean_t kill_socket = ISC_FALSE;
2833
2834	REQUIRE(socketp != NULL);
2835	sock = (isc__socket_t *)*socketp;
2836	REQUIRE(VALID_SOCKET(sock));
2837
2838	LOCK(&sock->lock);
2839	REQUIRE(sock->references > 0);
2840	sock->references--;
2841	if (sock->references == 0)
2842		kill_socket = ISC_TRUE;
2843	UNLOCK(&sock->lock);
2844
2845	if (kill_socket)
2846		destroy(&sock);
2847
2848	*socketp = NULL;
2849}
2850
2851#ifdef BIND9
2852ISC_SOCKETFUNC_SCOPE isc_result_t
2853isc__socket_close(isc_socket_t *sock0) {
2854	isc__socket_t *sock = (isc__socket_t *)sock0;
2855	int fd;
2856	isc__socketmgr_t *manager;
2857
2858	fflush(stdout);
2859	REQUIRE(VALID_SOCKET(sock));
2860
2861	LOCK(&sock->lock);
2862
2863	REQUIRE(sock->references == 1);
2864	REQUIRE(sock->type != isc_sockettype_fdwatch);
2865	REQUIRE(sock->fd >= 0 && sock->fd < (int)sock->manager->maxsocks);
2866
2867	INSIST(!sock->connecting);
2868	INSIST(!sock->pending_recv);
2869	INSIST(!sock->pending_send);
2870	INSIST(!sock->pending_accept);
2871	INSIST(ISC_LIST_EMPTY(sock->recv_list));
2872	INSIST(ISC_LIST_EMPTY(sock->send_list));
2873	INSIST(ISC_LIST_EMPTY(sock->accept_list));
2874	INSIST(sock->connect_ev == NULL);
2875
2876	manager = sock->manager;
2877	fd = sock->fd;
2878	sock->fd = -1;
2879	sock->dupped = 0;
2880	memset(sock->name, 0, sizeof(sock->name));
2881	sock->tag = NULL;
2882	sock->listener = 0;
2883	sock->connected = 0;
2884	sock->connecting = 0;
2885	sock->bound = 0;
2886	isc_sockaddr_any(&sock->peer_address);
2887
2888	UNLOCK(&sock->lock);
2889
2890	closesocket(manager, sock, fd);
2891
2892	return (ISC_R_SUCCESS);
2893}
2894#endif	/* BIND9 */
2895
2896/*
2897 * I/O is possible on a given socket.  Schedule an event to this task that
2898 * will call an internal function to do the I/O.  This will charge the
2899 * task with the I/O operation and let our select loop handler get back
2900 * to doing something real as fast as possible.
2901 *
2902 * The socket and manager must be locked before calling this function.
2903 */
2904static void
2905dispatch_recv(isc__socket_t *sock) {
2906	intev_t *iev;
2907	isc_socketevent_t *ev;
2908	isc_task_t *sender;
2909
2910	INSIST(!sock->pending_recv);
2911
2912	if (sock->type != isc_sockettype_fdwatch) {
2913		ev = ISC_LIST_HEAD(sock->recv_list);
2914		if (ev == NULL)
2915			return;
2916		socket_log(sock, NULL, EVENT, NULL, 0, 0,
2917			   "dispatch_recv:  event %p -> task %p",
2918			   ev, ev->ev_sender);
2919		sender = ev->ev_sender;
2920	} else {
2921		sender = sock->fdwatchtask;
2922	}
2923
2924	sock->pending_recv = 1;
2925	iev = &sock->readable_ev;
2926
2927	sock->references++;
2928	iev->ev_sender = sock;
2929	if (sock->type == isc_sockettype_fdwatch)
2930		iev->ev_action = internal_fdwatch_read;
2931	else
2932		iev->ev_action = internal_recv;
2933	iev->ev_arg = sock;
2934
2935	isc_task_send(sender, (isc_event_t **)&iev);
2936}
2937
2938static void
2939dispatch_send(isc__socket_t *sock) {
2940	intev_t *iev;
2941	isc_socketevent_t *ev;
2942	isc_task_t *sender;
2943
2944	INSIST(!sock->pending_send);
2945
2946	if (sock->type != isc_sockettype_fdwatch) {
2947		ev = ISC_LIST_HEAD(sock->send_list);
2948		if (ev == NULL)
2949			return;
2950		socket_log(sock, NULL, EVENT, NULL, 0, 0,
2951			   "dispatch_send:  event %p -> task %p",
2952			   ev, ev->ev_sender);
2953		sender = ev->ev_sender;
2954	} else {
2955		sender = sock->fdwatchtask;
2956	}
2957
2958	sock->pending_send = 1;
2959	iev = &sock->writable_ev;
2960
2961	sock->references++;
2962	iev->ev_sender = sock;
2963	if (sock->type == isc_sockettype_fdwatch)
2964		iev->ev_action = internal_fdwatch_write;
2965	else
2966		iev->ev_action = internal_send;
2967	iev->ev_arg = sock;
2968
2969	isc_task_send(sender, (isc_event_t **)&iev);
2970}
2971
2972/*
2973 * Dispatch an internal accept event.
2974 */
2975static void
2976dispatch_accept(isc__socket_t *sock) {
2977	intev_t *iev;
2978	isc_socket_newconnev_t *ev;
2979
2980	INSIST(!sock->pending_accept);
2981
2982	/*
2983	 * Are there any done events left, or were they all canceled
2984	 * before the manager got the socket lock?
2985	 */
2986	ev = ISC_LIST_HEAD(sock->accept_list);
2987	if (ev == NULL)
2988		return;
2989
2990	sock->pending_accept = 1;
2991	iev = &sock->readable_ev;
2992
2993	sock->references++;  /* keep socket around for this internal event */
2994	iev->ev_sender = sock;
2995	iev->ev_action = internal_accept;
2996	iev->ev_arg = sock;
2997
2998	isc_task_send(ev->ev_sender, (isc_event_t **)&iev);
2999}
3000
3001static void
3002dispatch_connect(isc__socket_t *sock) {
3003	intev_t *iev;
3004	isc_socket_connev_t *ev;
3005
3006	iev = &sock->writable_ev;
3007
3008	ev = sock->connect_ev;
3009	INSIST(ev != NULL); /* XXX */
3010
3011	INSIST(sock->connecting);
3012
3013	sock->references++;  /* keep socket around for this internal event */
3014	iev->ev_sender = sock;
3015	iev->ev_action = internal_connect;
3016	iev->ev_arg = sock;
3017
3018	isc_task_send(ev->ev_sender, (isc_event_t **)&iev);
3019}
3020
3021/*
3022 * Dequeue an item off the given socket's read queue, set the result code
3023 * in the done event to the one provided, and send it to the task it was
3024 * destined for.
3025 *
3026 * If the event to be sent is on a list, remove it before sending.  If
3027 * asked to, send and detach from the socket as well.
3028 *
3029 * Caller must have the socket locked if the event is attached to the socket.
3030 */
3031static void
3032send_recvdone_event(isc__socket_t *sock, isc_socketevent_t **dev) {
3033	isc_task_t *task;
3034
3035	task = (*dev)->ev_sender;
3036
3037	(*dev)->ev_sender = sock;
3038
3039	if (ISC_LINK_LINKED(*dev, ev_link))
3040		ISC_LIST_DEQUEUE(sock->recv_list, *dev, ev_link);
3041
3042	if (((*dev)->attributes & ISC_SOCKEVENTATTR_ATTACHED)
3043	    == ISC_SOCKEVENTATTR_ATTACHED)
3044		isc_task_sendanddetach(&task, (isc_event_t **)dev);
3045	else
3046		isc_task_send(task, (isc_event_t **)dev);
3047}
3048
3049/*
3050 * See comments for send_recvdone_event() above.
3051 *
3052 * Caller must have the socket locked if the event is attached to the socket.
3053 */
3054static void
3055send_senddone_event(isc__socket_t *sock, isc_socketevent_t **dev) {
3056	isc_task_t *task;
3057
3058	INSIST(dev != NULL && *dev != NULL);
3059
3060	task = (*dev)->ev_sender;
3061	(*dev)->ev_sender = sock;
3062
3063	if (ISC_LINK_LINKED(*dev, ev_link))
3064		ISC_LIST_DEQUEUE(sock->send_list, *dev, ev_link);
3065
3066	if (((*dev)->attributes & ISC_SOCKEVENTATTR_ATTACHED)
3067	    == ISC_SOCKEVENTATTR_ATTACHED)
3068		isc_task_sendanddetach(&task, (isc_event_t **)dev);
3069	else
3070		isc_task_send(task, (isc_event_t **)dev);
3071}
3072
3073/*
3074 * Call accept() on a socket, to get the new file descriptor.  The listen
3075 * socket is used as a prototype to create a new isc_socket_t.  The new
3076 * socket has one outstanding reference.  The task receiving the event
3077 * will be detached from just after the event is delivered.
3078 *
3079 * On entry to this function, the event delivered is the internal
3080 * readable event, and the first item on the accept_list should be
3081 * the done event we want to send.  If the list is empty, this is a no-op,
3082 * so just unlock and return.
3083 */
3084static void
3085internal_accept(isc_task_t *me, isc_event_t *ev) {
3086	isc__socket_t *sock;
3087	isc__socketmgr_t *manager;
3088	isc_socket_newconnev_t *dev;
3089	isc_task_t *task;
3090	ISC_SOCKADDR_LEN_T addrlen;
3091	int fd;
3092	isc_result_t result = ISC_R_SUCCESS;
3093	char strbuf[ISC_STRERRORSIZE];
3094	const char *err = "accept";
3095
3096	UNUSED(me);
3097
3098	sock = ev->ev_sender;
3099	INSIST(VALID_SOCKET(sock));
3100
3101	LOCK(&sock->lock);
3102	socket_log(sock, NULL, TRACE,
3103		   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_ACCEPTLOCK,
3104		   "internal_accept called, locked socket");
3105
3106	manager = sock->manager;
3107	INSIST(VALID_MANAGER(manager));
3108
3109	INSIST(sock->listener);
3110	INSIST(sock->pending_accept == 1);
3111	sock->pending_accept = 0;
3112
3113	INSIST(sock->references > 0);
3114	sock->references--;  /* the internal event is done with this socket */
3115	if (sock->references == 0) {
3116		UNLOCK(&sock->lock);
3117		destroy(&sock);
3118		return;
3119	}
3120
3121	/*
3122	 * Get the first item off the accept list.
3123	 * If it is empty, unlock the socket and return.
3124	 */
3125	dev = ISC_LIST_HEAD(sock->accept_list);
3126	if (dev == NULL) {
3127		UNLOCK(&sock->lock);
3128		return;
3129	}
3130
3131	/*
3132	 * Try to accept the new connection.  If the accept fails with
3133	 * EAGAIN or EINTR, simply poke the watcher to watch this socket
3134	 * again.  Also ignore ECONNRESET, which has been reported to
3135	 * be spuriously returned on Linux 2.2.19 although it is not
3136	 * a documented error for accept().  ECONNABORTED has been
3137	 * reported for Solaris 8.  The rest are thrown in not because
3138	 * we have seen them but because they are ignored by other
3139	 * daemons such as BIND 8 and Apache.
3140	 */
3141
3142	addrlen = sizeof(NEWCONNSOCK(dev)->peer_address.type);
3143	memset(&NEWCONNSOCK(dev)->peer_address.type, 0, addrlen);
3144	fd = accept(sock->fd, &NEWCONNSOCK(dev)->peer_address.type.sa,
3145		    (void *)&addrlen);
3146
3147#ifdef F_DUPFD
3148	/*
3149	 * Leave a space for stdio to work in.
3150	 */
3151	if (fd >= 0 && fd < 20) {
3152		int new, tmp;
3153		new = fcntl(fd, F_DUPFD, 20);
3154		tmp = errno;
3155		(void)close(fd);
3156		errno = tmp;
3157		fd = new;
3158		err = "accept/fcntl";
3159	}
3160#endif
3161
3162	if (fd < 0) {
3163		if (SOFT_ERROR(errno))
3164			goto soft_error;
3165		switch (errno) {
3166		case ENFILE:
3167		case EMFILE:
3168			isc_log_iwrite(isc_lctx, ISC_LOGCATEGORY_GENERAL,
3169				       ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
3170				       isc_msgcat, ISC_MSGSET_SOCKET,
3171				       ISC_MSG_TOOMANYFDS,
3172				       "%s: too many open file descriptors",
3173				       err);
3174			goto soft_error;
3175
3176		case ENOBUFS:
3177		case ENOMEM:
3178		case ECONNRESET:
3179		case ECONNABORTED:
3180		case EHOSTUNREACH:
3181		case EHOSTDOWN:
3182		case ENETUNREACH:
3183		case ENETDOWN:
3184		case ECONNREFUSED:
3185#ifdef EPROTO
3186		case EPROTO:
3187#endif
3188#ifdef ENONET
3189		case ENONET:
3190#endif
3191			goto soft_error;
3192		default:
3193			break;
3194		}
3195		isc__strerror(errno, strbuf, sizeof(strbuf));
3196		UNEXPECTED_ERROR(__FILE__, __LINE__,
3197				 "internal_accept: %s() %s: %s", err,
3198				 isc_msgcat_get(isc_msgcat,
3199						ISC_MSGSET_GENERAL,
3200						ISC_MSG_FAILED,
3201						"failed"),
3202				 strbuf);
3203		fd = -1;
3204		result = ISC_R_UNEXPECTED;
3205	} else {
3206		if (addrlen == 0U) {
3207			UNEXPECTED_ERROR(__FILE__, __LINE__,
3208					 "internal_accept(): "
3209					 "accept() failed to return "
3210					 "remote address");
3211
3212			(void)close(fd);
3213			goto soft_error;
3214		} else if (NEWCONNSOCK(dev)->peer_address.type.sa.sa_family !=
3215			   sock->pf)
3216		{
3217			UNEXPECTED_ERROR(__FILE__, __LINE__,
3218					 "internal_accept(): "
3219					 "accept() returned peer address "
3220					 "family %u (expected %u)",
3221					 NEWCONNSOCK(dev)->peer_address.
3222					 type.sa.sa_family,
3223					 sock->pf);
3224			(void)close(fd);
3225			goto soft_error;
3226		} else if (fd >= (int)manager->maxsocks) {
3227			isc_log_iwrite(isc_lctx, ISC_LOGCATEGORY_GENERAL,
3228				       ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
3229				       isc_msgcat, ISC_MSGSET_SOCKET,
3230				       ISC_MSG_TOOMANYFDS,
3231				       "accept: "
3232				       "file descriptor exceeds limit (%d/%u)",
3233				       fd, manager->maxsocks);
3234			(void)close(fd);
3235			goto soft_error;
3236		}
3237	}
3238
3239	if (fd != -1) {
3240		NEWCONNSOCK(dev)->peer_address.length = addrlen;
3241		NEWCONNSOCK(dev)->pf = sock->pf;
3242	}
3243
3244	/*
3245	 * Pull off the done event.
3246	 */
3247	ISC_LIST_UNLINK(sock->accept_list, dev, ev_link);
3248
3249	/*
3250	 * Poke watcher if there are more pending accepts.
3251	 */
3252	if (!ISC_LIST_EMPTY(sock->accept_list))
3253		select_poke(sock->manager, sock->fd, SELECT_POKE_ACCEPT);
3254
3255	UNLOCK(&sock->lock);
3256
3257	if (fd != -1) {
3258		result = make_nonblock(fd);
3259		if (result != ISC_R_SUCCESS) {
3260			(void)close(fd);
3261			fd = -1;
3262		}
3263	}
3264
3265	/*
3266	 * -1 means the new socket didn't happen.
3267	 */
3268	if (fd != -1) {
3269		int lockid = FDLOCK_ID(fd);
3270
3271		LOCK(&manager->fdlock[lockid]);
3272		manager->fds[fd] = NEWCONNSOCK(dev);
3273		manager->fdstate[fd] = MANAGED;
3274		UNLOCK(&manager->fdlock[lockid]);
3275
3276		LOCK(&manager->lock);
3277		ISC_LIST_APPEND(manager->socklist, NEWCONNSOCK(dev), link);
3278
3279		NEWCONNSOCK(dev)->fd = fd;
3280		NEWCONNSOCK(dev)->bound = 1;
3281		NEWCONNSOCK(dev)->connected = 1;
3282
3283		/*
3284		 * Save away the remote address
3285		 */
3286		dev->address = NEWCONNSOCK(dev)->peer_address;
3287
3288#ifdef USE_SELECT
3289		if (manager->maxfd < fd)
3290			manager->maxfd = fd;
3291#endif
3292
3293		socket_log(sock, &NEWCONNSOCK(dev)->peer_address, CREATION,
3294			   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_ACCEPTEDCXN,
3295			   "accepted connection, new socket %p",
3296			   dev->newsocket);
3297
3298		UNLOCK(&manager->lock);
3299
3300		inc_stats(manager->stats, sock->statsindex[STATID_ACCEPT]);
3301	} else {
3302		inc_stats(manager->stats, sock->statsindex[STATID_ACCEPTFAIL]);
3303		NEWCONNSOCK(dev)->references--;
3304		free_socket((isc__socket_t **)(void *)&dev->newsocket);
3305	}
3306
3307	/*
3308	 * Fill in the done event details and send it off.
3309	 */
3310	dev->result = result;
3311	task = dev->ev_sender;
3312	dev->ev_sender = sock;
3313
3314	isc_task_sendanddetach(&task, ISC_EVENT_PTR(&dev));
3315	return;
3316
3317 soft_error:
3318	select_poke(sock->manager, sock->fd, SELECT_POKE_ACCEPT);
3319	UNLOCK(&sock->lock);
3320
3321	inc_stats(manager->stats, sock->statsindex[STATID_ACCEPTFAIL]);
3322	return;
3323}
3324
3325static void
3326internal_recv(isc_task_t *me, isc_event_t *ev) {
3327	isc_socketevent_t *dev;
3328	isc__socket_t *sock;
3329
3330	INSIST(ev->ev_type == ISC_SOCKEVENT_INTR);
3331
3332	sock = ev->ev_sender;
3333	INSIST(VALID_SOCKET(sock));
3334
3335	LOCK(&sock->lock);
3336	socket_log(sock, NULL, IOEVENT,
3337		   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_INTERNALRECV,
3338		   "internal_recv: task %p got event %p", me, ev);
3339
3340	INSIST(sock->pending_recv == 1);
3341	sock->pending_recv = 0;
3342
3343	INSIST(sock->references > 0);
3344	sock->references--;  /* the internal event is done with this socket */
3345	if (sock->references == 0) {
3346		UNLOCK(&sock->lock);
3347		destroy(&sock);
3348		return;
3349	}
3350
3351	/*
3352	 * Try to do as much I/O as possible on this socket.  There are no
3353	 * limits here, currently.
3354	 */
3355	dev = ISC_LIST_HEAD(sock->recv_list);
3356	while (dev != NULL) {
3357		switch (doio_recv(sock, dev)) {
3358		case DOIO_SOFT:
3359			goto poke;
3360
3361		case DOIO_EOF:
3362			/*
3363			 * read of 0 means the remote end was closed.
3364			 * Run through the event queue and dispatch all
3365			 * the events with an EOF result code.
3366			 */
3367			do {
3368				dev->result = ISC_R_EOF;
3369				send_recvdone_event(sock, &dev);
3370				dev = ISC_LIST_HEAD(sock->recv_list);
3371			} while (dev != NULL);
3372			goto poke;
3373
3374		case DOIO_SUCCESS:
3375		case DOIO_HARD:
3376			send_recvdone_event(sock, &dev);
3377			break;
3378		}
3379
3380		dev = ISC_LIST_HEAD(sock->recv_list);
3381	}
3382
3383 poke:
3384	if (!ISC_LIST_EMPTY(sock->recv_list))
3385		select_poke(sock->manager, sock->fd, SELECT_POKE_READ);
3386
3387	UNLOCK(&sock->lock);
3388}
3389
3390static void
3391internal_send(isc_task_t *me, isc_event_t *ev) {
3392	isc_socketevent_t *dev;
3393	isc__socket_t *sock;
3394
3395	INSIST(ev->ev_type == ISC_SOCKEVENT_INTW);
3396
3397	/*
3398	 * Find out what socket this is and lock it.
3399	 */
3400	sock = (isc__socket_t *)ev->ev_sender;
3401	INSIST(VALID_SOCKET(sock));
3402
3403	LOCK(&sock->lock);
3404	socket_log(sock, NULL, IOEVENT,
3405		   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_INTERNALSEND,
3406		   "internal_send: task %p got event %p", me, ev);
3407
3408	INSIST(sock->pending_send == 1);
3409	sock->pending_send = 0;
3410
3411	INSIST(sock->references > 0);
3412	sock->references--;  /* the internal event is done with this socket */
3413	if (sock->references == 0) {
3414		UNLOCK(&sock->lock);
3415		destroy(&sock);
3416		return;
3417	}
3418
3419	/*
3420	 * Try to do as much I/O as possible on this socket.  There are no
3421	 * limits here, currently.
3422	 */
3423	dev = ISC_LIST_HEAD(sock->send_list);
3424	while (dev != NULL) {
3425		switch (doio_send(sock, dev)) {
3426		case DOIO_SOFT:
3427			goto poke;
3428
3429		case DOIO_HARD:
3430		case DOIO_SUCCESS:
3431			send_senddone_event(sock, &dev);
3432			break;
3433		}
3434
3435		dev = ISC_LIST_HEAD(sock->send_list);
3436	}
3437
3438 poke:
3439	if (!ISC_LIST_EMPTY(sock->send_list))
3440		select_poke(sock->manager, sock->fd, SELECT_POKE_WRITE);
3441
3442	UNLOCK(&sock->lock);
3443}
3444
3445static void
3446internal_fdwatch_write(isc_task_t *me, isc_event_t *ev) {
3447	isc__socket_t *sock;
3448	int more_data;
3449
3450	INSIST(ev->ev_type == ISC_SOCKEVENT_INTW);
3451
3452	/*
3453	 * Find out what socket this is and lock it.
3454	 */
3455	sock = (isc__socket_t *)ev->ev_sender;
3456	INSIST(VALID_SOCKET(sock));
3457
3458	LOCK(&sock->lock);
3459	socket_log(sock, NULL, IOEVENT,
3460		   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_INTERNALSEND,
3461		   "internal_fdwatch_write: task %p got event %p", me, ev);
3462
3463	INSIST(sock->pending_send == 1);
3464
3465	UNLOCK(&sock->lock);
3466	more_data = (sock->fdwatchcb)(me, (isc_socket_t *)sock,
3467				      sock->fdwatcharg, ISC_SOCKFDWATCH_WRITE);
3468	LOCK(&sock->lock);
3469
3470	sock->pending_send = 0;
3471
3472	INSIST(sock->references > 0);
3473	sock->references--;  /* the internal event is done with this socket */
3474	if (sock->references == 0) {
3475		UNLOCK(&sock->lock);
3476		destroy(&sock);
3477		return;
3478	}
3479
3480	if (more_data)
3481		select_poke(sock->manager, sock->fd, SELECT_POKE_WRITE);
3482
3483	UNLOCK(&sock->lock);
3484}
3485
3486static void
3487internal_fdwatch_read(isc_task_t *me, isc_event_t *ev) {
3488	isc__socket_t *sock;
3489	int more_data;
3490
3491	INSIST(ev->ev_type == ISC_SOCKEVENT_INTR);
3492
3493	/*
3494	 * Find out what socket this is and lock it.
3495	 */
3496	sock = (isc__socket_t *)ev->ev_sender;
3497	INSIST(VALID_SOCKET(sock));
3498
3499	LOCK(&sock->lock);
3500	socket_log(sock, NULL, IOEVENT,
3501		   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_INTERNALRECV,
3502		   "internal_fdwatch_read: task %p got event %p", me, ev);
3503
3504	INSIST(sock->pending_recv == 1);
3505
3506	UNLOCK(&sock->lock);
3507	more_data = (sock->fdwatchcb)(me, (isc_socket_t *)sock,
3508				      sock->fdwatcharg, ISC_SOCKFDWATCH_READ);
3509	LOCK(&sock->lock);
3510
3511	sock->pending_recv = 0;
3512
3513	INSIST(sock->references > 0);
3514	sock->references--;  /* the internal event is done with this socket */
3515	if (sock->references == 0) {
3516		UNLOCK(&sock->lock);
3517		destroy(&sock);
3518		return;
3519	}
3520
3521	if (more_data)
3522		select_poke(sock->manager, sock->fd, SELECT_POKE_READ);
3523
3524	UNLOCK(&sock->lock);
3525}
3526
3527/*
3528 * Process read/writes on each fd here.  Avoid locking
3529 * and unlocking twice if both reads and writes are possible.
3530 */
3531static void
3532process_fd(isc__socketmgr_t *manager, int fd, isc_boolean_t readable,
3533	   isc_boolean_t writeable)
3534{
3535	isc__socket_t *sock;
3536	isc_boolean_t unlock_sock;
3537	isc_boolean_t unwatch_read = ISC_FALSE, unwatch_write = ISC_FALSE;
3538	int lockid = FDLOCK_ID(fd);
3539
3540	/*
3541	 * If the socket is going to be closed, don't do more I/O.
3542	 */
3543	LOCK(&manager->fdlock[lockid]);
3544	if (manager->fdstate[fd] == CLOSE_PENDING) {
3545		UNLOCK(&manager->fdlock[lockid]);
3546
3547		(void)unwatch_fd(manager, fd, SELECT_POKE_READ);
3548		(void)unwatch_fd(manager, fd, SELECT_POKE_WRITE);
3549		return;
3550	}
3551
3552	sock = manager->fds[fd];
3553	unlock_sock = ISC_FALSE;
3554	if (readable) {
3555		if (sock == NULL) {
3556			unwatch_read = ISC_TRUE;
3557			goto check_write;
3558		}
3559		unlock_sock = ISC_TRUE;
3560		LOCK(&sock->lock);
3561		if (!SOCK_DEAD(sock)) {
3562			if (sock->listener)
3563				dispatch_accept(sock);
3564			else
3565				dispatch_recv(sock);
3566		}
3567		unwatch_read = ISC_TRUE;
3568	}
3569check_write:
3570	if (writeable) {
3571		if (sock == NULL) {
3572			unwatch_write = ISC_TRUE;
3573			goto unlock_fd;
3574		}
3575		if (!unlock_sock) {
3576			unlock_sock = ISC_TRUE;
3577			LOCK(&sock->lock);
3578		}
3579		if (!SOCK_DEAD(sock)) {
3580			if (sock->connecting)
3581				dispatch_connect(sock);
3582			else
3583				dispatch_send(sock);
3584		}
3585		unwatch_write = ISC_TRUE;
3586	}
3587	if (unlock_sock)
3588		UNLOCK(&sock->lock);
3589
3590 unlock_fd:
3591	UNLOCK(&manager->fdlock[lockid]);
3592	if (unwatch_read)
3593		(void)unwatch_fd(manager, fd, SELECT_POKE_READ);
3594	if (unwatch_write)
3595		(void)unwatch_fd(manager, fd, SELECT_POKE_WRITE);
3596
3597}
3598
3599#ifdef USE_KQUEUE
3600static isc_boolean_t
3601process_fds(isc__socketmgr_t *manager, struct kevent *events, int nevents) {
3602	int i;
3603	isc_boolean_t readable, writable;
3604	isc_boolean_t done = ISC_FALSE;
3605#ifdef USE_WATCHER_THREAD
3606	isc_boolean_t have_ctlevent = ISC_FALSE;
3607#endif
3608
3609	if (nevents == manager->nevents) {
3610		/*
3611		 * This is not an error, but something unexpected.  If this
3612		 * happens, it may indicate the need for increasing
3613		 * ISC_SOCKET_MAXEVENTS.
3614		 */
3615		manager_log(manager, ISC_LOGCATEGORY_GENERAL,
3616			    ISC_LOGMODULE_SOCKET, ISC_LOG_INFO,
3617			    "maximum number of FD events (%d) received",
3618			    nevents);
3619	}
3620
3621	for (i = 0; i < nevents; i++) {
3622		REQUIRE(events[i].ident < manager->maxsocks);
3623#ifdef USE_WATCHER_THREAD
3624		if (events[i].ident == (uintptr_t)manager->pipe_fds[0]) {
3625			have_ctlevent = ISC_TRUE;
3626			continue;
3627		}
3628#endif
3629		readable = ISC_TF(events[i].filter == EVFILT_READ);
3630		writable = ISC_TF(events[i].filter == EVFILT_WRITE);
3631		process_fd(manager, events[i].ident, readable, writable);
3632	}
3633
3634#ifdef USE_WATCHER_THREAD
3635	if (have_ctlevent)
3636		done = process_ctlfd(manager);
3637#endif
3638
3639	return (done);
3640}
3641#elif defined(USE_EPOLL)
3642static isc_boolean_t
3643process_fds(isc__socketmgr_t *manager, struct epoll_event *events, int nevents)
3644{
3645	int i;
3646	isc_boolean_t done = ISC_FALSE;
3647#ifdef USE_WATCHER_THREAD
3648	isc_boolean_t have_ctlevent = ISC_FALSE;
3649#endif
3650
3651	if (nevents == manager->nevents) {
3652		manager_log(manager, ISC_LOGCATEGORY_GENERAL,
3653			    ISC_LOGMODULE_SOCKET, ISC_LOG_INFO,
3654			    "maximum number of FD events (%d) received",
3655			    nevents);
3656	}
3657
3658	for (i = 0; i < nevents; i++) {
3659		REQUIRE(events[i].data.fd < (int)manager->maxsocks);
3660#ifdef USE_WATCHER_THREAD
3661		if (events[i].data.fd == manager->pipe_fds[0]) {
3662			have_ctlevent = ISC_TRUE;
3663			continue;
3664		}
3665#endif
3666		if ((events[i].events & EPOLLERR) != 0 ||
3667		    (events[i].events & EPOLLHUP) != 0) {
3668			/*
3669			 * epoll does not set IN/OUT bits on an erroneous
3670			 * condition, so we need to try both anyway.  This is a
3671			 * bit inefficient, but should be okay for such rare
3672			 * events.  Note also that the read or write attempt
3673			 * won't block because we use non-blocking sockets.
3674			 */
3675			events[i].events |= (EPOLLIN | EPOLLOUT);
3676		}
3677		process_fd(manager, events[i].data.fd,
3678			   (events[i].events & EPOLLIN) != 0,
3679			   (events[i].events & EPOLLOUT) != 0);
3680	}
3681
3682#ifdef USE_WATCHER_THREAD
3683	if (have_ctlevent)
3684		done = process_ctlfd(manager);
3685#endif
3686
3687	return (done);
3688}
3689#elif defined(USE_DEVPOLL)
3690static isc_boolean_t
3691process_fds(isc__socketmgr_t *manager, struct pollfd *events, int nevents) {
3692	int i;
3693	isc_boolean_t done = ISC_FALSE;
3694#ifdef USE_WATCHER_THREAD
3695	isc_boolean_t have_ctlevent = ISC_FALSE;
3696#endif
3697
3698	if (nevents == manager->nevents) {
3699		manager_log(manager, ISC_LOGCATEGORY_GENERAL,
3700			    ISC_LOGMODULE_SOCKET, ISC_LOG_INFO,
3701			    "maximum number of FD events (%d) received",
3702			    nevents);
3703	}
3704
3705	for (i = 0; i < nevents; i++) {
3706		REQUIRE(events[i].fd < (int)manager->maxsocks);
3707#ifdef USE_WATCHER_THREAD
3708		if (events[i].fd == manager->pipe_fds[0]) {
3709			have_ctlevent = ISC_TRUE;
3710			continue;
3711		}
3712#endif
3713		process_fd(manager, events[i].fd,
3714			   (events[i].events & POLLIN) != 0,
3715			   (events[i].events & POLLOUT) != 0);
3716	}
3717
3718#ifdef USE_WATCHER_THREAD
3719	if (have_ctlevent)
3720		done = process_ctlfd(manager);
3721#endif
3722
3723	return (done);
3724}
3725#elif defined(USE_SELECT)
3726static void
3727process_fds(isc__socketmgr_t *manager, int maxfd, fd_set *readfds,
3728	    fd_set *writefds)
3729{
3730	int i;
3731
3732	REQUIRE(maxfd <= (int)manager->maxsocks);
3733
3734	for (i = 0; i < maxfd; i++) {
3735#ifdef USE_WATCHER_THREAD
3736		if (i == manager->pipe_fds[0] || i == manager->pipe_fds[1])
3737			continue;
3738#endif /* USE_WATCHER_THREAD */
3739		process_fd(manager, i, FD_ISSET(i, readfds),
3740			   FD_ISSET(i, writefds));
3741	}
3742}
3743#endif
3744
3745#ifdef USE_WATCHER_THREAD
3746static isc_boolean_t
3747process_ctlfd(isc__socketmgr_t *manager) {
3748	int msg, fd;
3749
3750	for (;;) {
3751		select_readmsg(manager, &fd, &msg);
3752
3753		manager_log(manager, IOEVENT,
3754			    isc_msgcat_get(isc_msgcat, ISC_MSGSET_SOCKET,
3755					   ISC_MSG_WATCHERMSG,
3756					   "watcher got message %d "
3757					   "for socket %d"), msg, fd);
3758
3759		/*
3760		 * Nothing to read?
3761		 */
3762		if (msg == SELECT_POKE_NOTHING)
3763			break;
3764
3765		/*
3766		 * Handle shutdown message.  We really should
3767		 * jump out of this loop right away, but
3768		 * it doesn't matter if we have to do a little
3769		 * more work first.
3770		 */
3771		if (msg == SELECT_POKE_SHUTDOWN)
3772			return (ISC_TRUE);
3773
3774		/*
3775		 * This is a wakeup on a socket.  Look
3776		 * at the event queue for both read and write,
3777		 * and decide if we need to watch on it now
3778		 * or not.
3779		 */
3780		wakeup_socket(manager, fd, msg);
3781	}
3782
3783	return (ISC_FALSE);
3784}
3785
3786/*
3787 * This is the thread that will loop forever, always in a select or poll
3788 * call.
3789 *
3790 * When select returns something to do, track down what thread gets to do
3791 * this I/O and post the event to it.
3792 */
3793static isc_threadresult_t
3794watcher(void *uap) {
3795	isc__socketmgr_t *manager = uap;
3796	isc_boolean_t done;
3797	int cc;
3798#ifdef USE_KQUEUE
3799	const char *fnname = "kevent()";
3800#elif defined (USE_EPOLL)
3801	const char *fnname = "epoll_wait()";
3802#elif defined(USE_DEVPOLL)
3803	const char *fnname = "ioctl(DP_POLL)";
3804	struct dvpoll dvp;
3805#elif defined (USE_SELECT)
3806	const char *fnname = "select()";
3807	int maxfd;
3808	int ctlfd;
3809#endif
3810	char strbuf[ISC_STRERRORSIZE];
3811#ifdef ISC_SOCKET_USE_POLLWATCH
3812	pollstate_t pollstate = poll_idle;
3813#endif
3814
3815#if defined (USE_SELECT)
3816	/*
3817	 * Get the control fd here.  This will never change.
3818	 */
3819	ctlfd = manager->pipe_fds[0];
3820#endif
3821	done = ISC_FALSE;
3822	while (!done) {
3823		do {
3824#ifdef USE_KQUEUE
3825			cc = kevent(manager->kqueue_fd, NULL, 0,
3826				    manager->events, manager->nevents, NULL);
3827#elif defined(USE_EPOLL)
3828			cc = epoll_wait(manager->epoll_fd, manager->events,
3829					manager->nevents, -1);
3830#elif defined(USE_DEVPOLL)
3831			dvp.dp_fds = manager->events;
3832			dvp.dp_nfds = manager->nevents;
3833#ifndef ISC_SOCKET_USE_POLLWATCH
3834			dvp.dp_timeout = -1;
3835#else
3836			if (pollstate == poll_idle)
3837				dvp.dp_timeout = -1;
3838			else
3839				dvp.dp_timeout = ISC_SOCKET_POLLWATCH_TIMEOUT;
3840#endif	/* ISC_SOCKET_USE_POLLWATCH */
3841			cc = ioctl(manager->devpoll_fd, DP_POLL, &dvp);
3842#elif defined(USE_SELECT)
3843			LOCK(&manager->lock);
3844			memcpy(manager->read_fds_copy, manager->read_fds,
3845			       manager->fd_bufsize);
3846			memcpy(manager->write_fds_copy, manager->write_fds,
3847			       manager->fd_bufsize);
3848			maxfd = manager->maxfd + 1;
3849			UNLOCK(&manager->lock);
3850
3851			cc = select(maxfd, manager->read_fds_copy,
3852				    manager->write_fds_copy, NULL, NULL);
3853#endif	/* USE_KQUEUE */
3854
3855			if (cc < 0 && !SOFT_ERROR(errno)) {
3856				isc__strerror(errno, strbuf, sizeof(strbuf));
3857				FATAL_ERROR(__FILE__, __LINE__,
3858					    "%s %s: %s", fnname,
3859					    isc_msgcat_get(isc_msgcat,
3860							   ISC_MSGSET_GENERAL,
3861							   ISC_MSG_FAILED,
3862							   "failed"), strbuf);
3863			}
3864
3865#if defined(USE_DEVPOLL) && defined(ISC_SOCKET_USE_POLLWATCH)
3866			if (cc == 0) {
3867				if (pollstate == poll_active)
3868					pollstate = poll_checking;
3869				else if (pollstate == poll_checking)
3870					pollstate = poll_idle;
3871			} else if (cc > 0) {
3872				if (pollstate == poll_checking) {
3873					/*
3874					 * XXX: We'd like to use a more
3875					 * verbose log level as it's actually an
3876					 * unexpected event, but the kernel bug
3877					 * reportedly happens pretty frequently
3878					 * (and it can also be a false positive)
3879					 * so it would be just too noisy.
3880					 */
3881					manager_log(manager,
3882						    ISC_LOGCATEGORY_GENERAL,
3883						    ISC_LOGMODULE_SOCKET,
3884						    ISC_LOG_DEBUG(1),
3885						    "unexpected POLL timeout");
3886				}
3887				pollstate = poll_active;
3888			}
3889#endif
3890		} while (cc < 0);
3891
3892#if defined(USE_KQUEUE) || defined (USE_EPOLL) || defined (USE_DEVPOLL)
3893		done = process_fds(manager, manager->events, cc);
3894#elif defined(USE_SELECT)
3895		process_fds(manager, maxfd, manager->read_fds_copy,
3896			    manager->write_fds_copy);
3897
3898		/*
3899		 * Process reads on internal, control fd.
3900		 */
3901		if (FD_ISSET(ctlfd, manager->read_fds_copy))
3902			done = process_ctlfd(manager);
3903#endif
3904	}
3905
3906	manager_log(manager, TRACE, "%s",
3907		    isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
3908				   ISC_MSG_EXITING, "watcher exiting"));
3909
3910	return ((isc_threadresult_t)0);
3911}
3912#endif /* USE_WATCHER_THREAD */
3913
3914#ifdef BIND9
3915ISC_SOCKETFUNC_SCOPE void
3916isc__socketmgr_setreserved(isc_socketmgr_t *manager0, isc_uint32_t reserved) {
3917	isc__socketmgr_t *manager = (isc__socketmgr_t *)manager0;
3918
3919	REQUIRE(VALID_MANAGER(manager));
3920
3921	manager->reserved = reserved;
3922}
3923
3924ISC_SOCKETFUNC_SCOPE void
3925isc___socketmgr_maxudp(isc_socketmgr_t *manager0, int maxudp) {
3926	isc__socketmgr_t *manager = (isc__socketmgr_t *)manager0;
3927
3928	REQUIRE(VALID_MANAGER(manager));
3929
3930	manager->maxudp = maxudp;
3931}
3932#endif	/* BIND9 */
3933
3934/*
3935 * Create a new socket manager.
3936 */
3937
3938static isc_result_t
3939setup_watcher(isc_mem_t *mctx, isc__socketmgr_t *manager) {
3940	isc_result_t result;
3941#if defined(USE_KQUEUE) || defined(USE_EPOLL) || defined(USE_DEVPOLL)
3942	char strbuf[ISC_STRERRORSIZE];
3943#endif
3944
3945#ifdef USE_KQUEUE
3946	manager->nevents = ISC_SOCKET_MAXEVENTS;
3947	manager->events = isc_mem_get(mctx, sizeof(struct kevent) *
3948				      manager->nevents);
3949	if (manager->events == NULL)
3950		return (ISC_R_NOMEMORY);
3951	manager->kqueue_fd = kqueue();
3952	if (manager->kqueue_fd == -1) {
3953		result = isc__errno2result(errno);
3954		isc__strerror(errno, strbuf, sizeof(strbuf));
3955		UNEXPECTED_ERROR(__FILE__, __LINE__,
3956				 "kqueue %s: %s",
3957				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
3958						ISC_MSG_FAILED, "failed"),
3959				 strbuf);
3960		isc_mem_put(mctx, manager->events,
3961			    sizeof(struct kevent) * manager->nevents);
3962		return (result);
3963	}
3964
3965#ifdef USE_WATCHER_THREAD
3966	result = watch_fd(manager, manager->pipe_fds[0], SELECT_POKE_READ);
3967	if (result != ISC_R_SUCCESS) {
3968		close(manager->kqueue_fd);
3969		isc_mem_put(mctx, manager->events,
3970			    sizeof(struct kevent) * manager->nevents);
3971		return (result);
3972	}
3973#endif	/* USE_WATCHER_THREAD */
3974#elif defined(USE_EPOLL)
3975	manager->nevents = ISC_SOCKET_MAXEVENTS;
3976	manager->events = isc_mem_get(mctx, sizeof(struct epoll_event) *
3977				      manager->nevents);
3978	if (manager->events == NULL)
3979		return (ISC_R_NOMEMORY);
3980	manager->epoll_fd = epoll_create(manager->nevents);
3981	if (manager->epoll_fd == -1) {
3982		result = isc__errno2result(errno);
3983		isc__strerror(errno, strbuf, sizeof(strbuf));
3984		UNEXPECTED_ERROR(__FILE__, __LINE__,
3985				 "epoll_create %s: %s",
3986				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
3987						ISC_MSG_FAILED, "failed"),
3988				 strbuf);
3989		isc_mem_put(mctx, manager->events,
3990			    sizeof(struct epoll_event) * manager->nevents);
3991		return (result);
3992	}
3993#ifdef USE_WATCHER_THREAD
3994	result = watch_fd(manager, manager->pipe_fds[0], SELECT_POKE_READ);
3995	if (result != ISC_R_SUCCESS) {
3996		close(manager->epoll_fd);
3997		isc_mem_put(mctx, manager->events,
3998			    sizeof(struct epoll_event) * manager->nevents);
3999		return (result);
4000	}
4001#endif	/* USE_WATCHER_THREAD */
4002#elif defined(USE_DEVPOLL)
4003	/*
4004	 * XXXJT: /dev/poll seems to reject large numbers of events,
4005	 * so we should be careful about redefining ISC_SOCKET_MAXEVENTS.
4006	 */
4007	manager->nevents = ISC_SOCKET_MAXEVENTS;
4008	manager->events = isc_mem_get(mctx, sizeof(struct pollfd) *
4009				      manager->nevents);
4010	if (manager->events == NULL)
4011		return (ISC_R_NOMEMORY);
4012	/*
4013	 * Note: fdpollinfo should be able to support all possible FDs, so
4014	 * it must have maxsocks entries (not nevents).
4015	 */
4016	manager->fdpollinfo = isc_mem_get(mctx, sizeof(pollinfo_t) *
4017					  manager->maxsocks);
4018	if (manager->fdpollinfo == NULL) {
4019		isc_mem_put(mctx, manager->events,
4020			    sizeof(struct pollfd) * manager->nevents);
4021		return (ISC_R_NOMEMORY);
4022	}
4023	memset(manager->fdpollinfo, 0, sizeof(pollinfo_t) * manager->maxsocks);
4024	manager->devpoll_fd = open("/dev/poll", O_RDWR);
4025	if (manager->devpoll_fd == -1) {
4026		result = isc__errno2result(errno);
4027		isc__strerror(errno, strbuf, sizeof(strbuf));
4028		UNEXPECTED_ERROR(__FILE__, __LINE__,
4029				 "open(/dev/poll) %s: %s",
4030				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
4031						ISC_MSG_FAILED, "failed"),
4032				 strbuf);
4033		isc_mem_put(mctx, manager->events,
4034			    sizeof(struct pollfd) * manager->nevents);
4035		isc_mem_put(mctx, manager->fdpollinfo,
4036			    sizeof(pollinfo_t) * manager->maxsocks);
4037		return (result);
4038	}
4039#ifdef USE_WATCHER_THREAD
4040	result = watch_fd(manager, manager->pipe_fds[0], SELECT_POKE_READ);
4041	if (result != ISC_R_SUCCESS) {
4042		close(manager->devpoll_fd);
4043		isc_mem_put(mctx, manager->events,
4044			    sizeof(struct pollfd) * manager->nevents);
4045		isc_mem_put(mctx, manager->fdpollinfo,
4046			    sizeof(pollinfo_t) * manager->maxsocks);
4047		return (result);
4048	}
4049#endif	/* USE_WATCHER_THREAD */
4050#elif defined(USE_SELECT)
4051	UNUSED(result);
4052
4053#if ISC_SOCKET_MAXSOCKETS > FD_SETSIZE
4054	/*
4055	 * Note: this code should also cover the case of MAXSOCKETS <=
4056	 * FD_SETSIZE, but we separate the cases to avoid possible portability
4057	 * issues regarding howmany() and the actual representation of fd_set.
4058	 */
4059	manager->fd_bufsize = howmany(manager->maxsocks, NFDBITS) *
4060		sizeof(fd_mask);
4061#else
4062	manager->fd_bufsize = sizeof(fd_set);
4063#endif
4064
4065	manager->read_fds = NULL;
4066	manager->read_fds_copy = NULL;
4067	manager->write_fds = NULL;
4068	manager->write_fds_copy = NULL;
4069
4070	manager->read_fds = isc_mem_get(mctx, manager->fd_bufsize);
4071	if (manager->read_fds != NULL)
4072		manager->read_fds_copy = isc_mem_get(mctx, manager->fd_bufsize);
4073	if (manager->read_fds_copy != NULL)
4074		manager->write_fds = isc_mem_get(mctx, manager->fd_bufsize);
4075	if (manager->write_fds != NULL) {
4076		manager->write_fds_copy = isc_mem_get(mctx,
4077						      manager->fd_bufsize);
4078	}
4079	if (manager->write_fds_copy == NULL) {
4080		if (manager->write_fds != NULL) {
4081			isc_mem_put(mctx, manager->write_fds,
4082				    manager->fd_bufsize);
4083		}
4084		if (manager->read_fds_copy != NULL) {
4085			isc_mem_put(mctx, manager->read_fds_copy,
4086				    manager->fd_bufsize);
4087		}
4088		if (manager->read_fds != NULL) {
4089			isc_mem_put(mctx, manager->read_fds,
4090				    manager->fd_bufsize);
4091		}
4092		return (ISC_R_NOMEMORY);
4093	}
4094	memset(manager->read_fds, 0, manager->fd_bufsize);
4095	memset(manager->write_fds, 0, manager->fd_bufsize);
4096
4097#ifdef USE_WATCHER_THREAD
4098	(void)watch_fd(manager, manager->pipe_fds[0], SELECT_POKE_READ);
4099	manager->maxfd = manager->pipe_fds[0];
4100#else /* USE_WATCHER_THREAD */
4101	manager->maxfd = 0;
4102#endif /* USE_WATCHER_THREAD */
4103#endif	/* USE_KQUEUE */
4104
4105	return (ISC_R_SUCCESS);
4106}
4107
4108static void
4109cleanup_watcher(isc_mem_t *mctx, isc__socketmgr_t *manager) {
4110#ifdef USE_WATCHER_THREAD
4111	isc_result_t result;
4112
4113	result = unwatch_fd(manager, manager->pipe_fds[0], SELECT_POKE_READ);
4114	if (result != ISC_R_SUCCESS) {
4115		UNEXPECTED_ERROR(__FILE__, __LINE__,
4116				 "epoll_ctl(DEL) %s",
4117				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
4118						ISC_MSG_FAILED, "failed"));
4119	}
4120#endif	/* USE_WATCHER_THREAD */
4121
4122#ifdef USE_KQUEUE
4123	close(manager->kqueue_fd);
4124	isc_mem_put(mctx, manager->events,
4125		    sizeof(struct kevent) * manager->nevents);
4126#elif defined(USE_EPOLL)
4127	close(manager->epoll_fd);
4128	isc_mem_put(mctx, manager->events,
4129		    sizeof(struct epoll_event) * manager->nevents);
4130#elif defined(USE_DEVPOLL)
4131	close(manager->devpoll_fd);
4132	isc_mem_put(mctx, manager->events,
4133		    sizeof(struct pollfd) * manager->nevents);
4134	isc_mem_put(mctx, manager->fdpollinfo,
4135		    sizeof(pollinfo_t) * manager->maxsocks);
4136#elif defined(USE_SELECT)
4137	if (manager->read_fds != NULL)
4138		isc_mem_put(mctx, manager->read_fds, manager->fd_bufsize);
4139	if (manager->read_fds_copy != NULL)
4140		isc_mem_put(mctx, manager->read_fds_copy, manager->fd_bufsize);
4141	if (manager->write_fds != NULL)
4142		isc_mem_put(mctx, manager->write_fds, manager->fd_bufsize);
4143	if (manager->write_fds_copy != NULL)
4144		isc_mem_put(mctx, manager->write_fds_copy, manager->fd_bufsize);
4145#endif	/* USE_KQUEUE */
4146}
4147
4148ISC_SOCKETFUNC_SCOPE isc_result_t
4149isc__socketmgr_create(isc_mem_t *mctx, isc_socketmgr_t **managerp) {
4150	return (isc__socketmgr_create2(mctx, managerp, 0));
4151}
4152
4153ISC_SOCKETFUNC_SCOPE isc_result_t
4154isc__socketmgr_create2(isc_mem_t *mctx, isc_socketmgr_t **managerp,
4155		       unsigned int maxsocks)
4156{
4157	int i;
4158	isc__socketmgr_t *manager;
4159#ifdef USE_WATCHER_THREAD
4160	char strbuf[ISC_STRERRORSIZE];
4161#endif
4162	isc_result_t result;
4163
4164	REQUIRE(managerp != NULL && *managerp == NULL);
4165
4166#ifdef USE_SHARED_MANAGER
4167	if (socketmgr != NULL) {
4168		/* Don't allow maxsocks to be updated */
4169		if (maxsocks > 0 && socketmgr->maxsocks != maxsocks)
4170			return (ISC_R_EXISTS);
4171
4172		socketmgr->refs++;
4173		*managerp = (isc_socketmgr_t *)socketmgr;
4174		return (ISC_R_SUCCESS);
4175	}
4176#endif /* USE_SHARED_MANAGER */
4177
4178	if (maxsocks == 0)
4179		maxsocks = ISC_SOCKET_MAXSOCKETS;
4180
4181	manager = isc_mem_get(mctx, sizeof(*manager));
4182	if (manager == NULL)
4183		return (ISC_R_NOMEMORY);
4184
4185	/* zero-clear so that necessary cleanup on failure will be easy */
4186	memset(manager, 0, sizeof(*manager));
4187	manager->maxsocks = maxsocks;
4188	manager->reserved = 0;
4189	manager->maxudp = 0;
4190	manager->fds = isc_mem_get(mctx,
4191				   manager->maxsocks * sizeof(isc__socket_t *));
4192	if (manager->fds == NULL) {
4193		result = ISC_R_NOMEMORY;
4194		goto free_manager;
4195	}
4196	manager->fdstate = isc_mem_get(mctx, manager->maxsocks * sizeof(int));
4197	if (manager->fdstate == NULL) {
4198		result = ISC_R_NOMEMORY;
4199		goto free_manager;
4200	}
4201	manager->stats = NULL;
4202
4203	manager->common.methods = &socketmgrmethods;
4204	manager->common.magic = ISCAPI_SOCKETMGR_MAGIC;
4205	manager->common.impmagic = SOCKET_MANAGER_MAGIC;
4206	manager->mctx = NULL;
4207	memset(manager->fds, 0, manager->maxsocks * sizeof(isc_socket_t *));
4208	ISC_LIST_INIT(manager->socklist);
4209	result = isc_mutex_init(&manager->lock);
4210	if (result != ISC_R_SUCCESS)
4211		goto free_manager;
4212	manager->fdlock = isc_mem_get(mctx, FDLOCK_COUNT * sizeof(isc_mutex_t));
4213	if (manager->fdlock == NULL) {
4214		result = ISC_R_NOMEMORY;
4215		goto cleanup_lock;
4216	}
4217	for (i = 0; i < FDLOCK_COUNT; i++) {
4218		result = isc_mutex_init(&manager->fdlock[i]);
4219		if (result != ISC_R_SUCCESS) {
4220			while (--i >= 0)
4221				DESTROYLOCK(&manager->fdlock[i]);
4222			isc_mem_put(mctx, manager->fdlock,
4223				    FDLOCK_COUNT * sizeof(isc_mutex_t));
4224			manager->fdlock = NULL;
4225			goto cleanup_lock;
4226		}
4227	}
4228
4229#ifdef USE_WATCHER_THREAD
4230	if (isc_condition_init(&manager->shutdown_ok) != ISC_R_SUCCESS) {
4231		UNEXPECTED_ERROR(__FILE__, __LINE__,
4232				 "isc_condition_init() %s",
4233				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
4234						ISC_MSG_FAILED, "failed"));
4235		result = ISC_R_UNEXPECTED;
4236		goto cleanup_lock;
4237	}
4238
4239	/*
4240	 * Create the special fds that will be used to wake up the
4241	 * select/poll loop when something internal needs to be done.
4242	 */
4243	if (pipe(manager->pipe_fds) != 0) {
4244		isc__strerror(errno, strbuf, sizeof(strbuf));
4245		UNEXPECTED_ERROR(__FILE__, __LINE__,
4246				 "pipe() %s: %s",
4247				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
4248						ISC_MSG_FAILED, "failed"),
4249				 strbuf);
4250		result = ISC_R_UNEXPECTED;
4251		goto cleanup_condition;
4252	}
4253
4254	RUNTIME_CHECK(make_nonblock(manager->pipe_fds[0]) == ISC_R_SUCCESS);
4255#if 0
4256	RUNTIME_CHECK(make_nonblock(manager->pipe_fds[1]) == ISC_R_SUCCESS);
4257#endif
4258#endif	/* USE_WATCHER_THREAD */
4259
4260#ifdef USE_SHARED_MANAGER
4261	manager->refs = 1;
4262#endif /* USE_SHARED_MANAGER */
4263
4264	/*
4265	 * Set up initial state for the select loop
4266	 */
4267	result = setup_watcher(mctx, manager);
4268	if (result != ISC_R_SUCCESS)
4269		goto cleanup;
4270	memset(manager->fdstate, 0, manager->maxsocks * sizeof(int));
4271#ifdef USE_WATCHER_THREAD
4272	/*
4273	 * Start up the select/poll thread.
4274	 */
4275	if (isc_thread_create(watcher, manager, &manager->watcher) !=
4276	    ISC_R_SUCCESS) {
4277		UNEXPECTED_ERROR(__FILE__, __LINE__,
4278				 "isc_thread_create() %s",
4279				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
4280						ISC_MSG_FAILED, "failed"));
4281		cleanup_watcher(mctx, manager);
4282		result = ISC_R_UNEXPECTED;
4283		goto cleanup;
4284	}
4285#endif /* USE_WATCHER_THREAD */
4286	isc_mem_attach(mctx, &manager->mctx);
4287
4288#ifdef USE_SHARED_MANAGER
4289	socketmgr = manager;
4290#endif /* USE_SHARED_MANAGER */
4291	*managerp = (isc_socketmgr_t *)manager;
4292
4293	return (ISC_R_SUCCESS);
4294
4295cleanup:
4296#ifdef USE_WATCHER_THREAD
4297	(void)close(manager->pipe_fds[0]);
4298	(void)close(manager->pipe_fds[1]);
4299#endif	/* USE_WATCHER_THREAD */
4300
4301#ifdef USE_WATCHER_THREAD
4302cleanup_condition:
4303	(void)isc_condition_destroy(&manager->shutdown_ok);
4304#endif	/* USE_WATCHER_THREAD */
4305
4306
4307cleanup_lock:
4308	if (manager->fdlock != NULL) {
4309		for (i = 0; i < FDLOCK_COUNT; i++)
4310			DESTROYLOCK(&manager->fdlock[i]);
4311	}
4312	DESTROYLOCK(&manager->lock);
4313
4314free_manager:
4315	if (manager->fdlock != NULL) {
4316		isc_mem_put(mctx, manager->fdlock,
4317			    FDLOCK_COUNT * sizeof(isc_mutex_t));
4318	}
4319	if (manager->fdstate != NULL) {
4320		isc_mem_put(mctx, manager->fdstate,
4321			    manager->maxsocks * sizeof(int));
4322	}
4323	if (manager->fds != NULL) {
4324		isc_mem_put(mctx, manager->fds,
4325			    manager->maxsocks * sizeof(isc_socket_t *));
4326	}
4327	isc_mem_put(mctx, manager, sizeof(*manager));
4328
4329	return (result);
4330}
4331
4332#ifdef BIND9
4333isc_result_t
4334isc__socketmgr_getmaxsockets(isc_socketmgr_t *manager0, unsigned int *nsockp) {
4335	isc__socketmgr_t *manager = (isc__socketmgr_t *)manager0;
4336	REQUIRE(VALID_MANAGER(manager));
4337	REQUIRE(nsockp != NULL);
4338
4339	*nsockp = manager->maxsocks;
4340
4341	return (ISC_R_SUCCESS);
4342}
4343
4344void
4345isc__socketmgr_setstats(isc_socketmgr_t *manager0, isc_stats_t *stats) {
4346	isc__socketmgr_t *manager = (isc__socketmgr_t *)manager0;
4347
4348	REQUIRE(VALID_MANAGER(manager));
4349	REQUIRE(ISC_LIST_EMPTY(manager->socklist));
4350	REQUIRE(manager->stats == NULL);
4351	REQUIRE(isc_stats_ncounters(stats) == isc_sockstatscounter_max);
4352
4353	isc_stats_attach(stats, &manager->stats);
4354}
4355#endif
4356
4357ISC_SOCKETFUNC_SCOPE void
4358isc__socketmgr_destroy(isc_socketmgr_t **managerp) {
4359	isc__socketmgr_t *manager;
4360	int i;
4361	isc_mem_t *mctx;
4362
4363	/*
4364	 * Destroy a socket manager.
4365	 */
4366
4367	REQUIRE(managerp != NULL);
4368	manager = (isc__socketmgr_t *)*managerp;
4369	REQUIRE(VALID_MANAGER(manager));
4370
4371#ifdef USE_SHARED_MANAGER
4372	manager->refs--;
4373	if (manager->refs > 0) {
4374		*managerp = NULL;
4375		return;
4376	}
4377	socketmgr = NULL;
4378#endif /* USE_SHARED_MANAGER */
4379
4380	LOCK(&manager->lock);
4381
4382	/*
4383	 * Wait for all sockets to be destroyed.
4384	 */
4385	while (!ISC_LIST_EMPTY(manager->socklist)) {
4386#ifdef USE_WATCHER_THREAD
4387		manager_log(manager, CREATION, "%s",
4388			    isc_msgcat_get(isc_msgcat, ISC_MSGSET_SOCKET,
4389					   ISC_MSG_SOCKETSREMAIN,
4390					   "sockets exist"));
4391		WAIT(&manager->shutdown_ok, &manager->lock);
4392#else /* USE_WATCHER_THREAD */
4393		UNLOCK(&manager->lock);
4394		isc__taskmgr_dispatch(NULL);
4395		LOCK(&manager->lock);
4396#endif /* USE_WATCHER_THREAD */
4397	}
4398
4399	UNLOCK(&manager->lock);
4400
4401	/*
4402	 * Here, poke our select/poll thread.  Do this by closing the write
4403	 * half of the pipe, which will send EOF to the read half.
4404	 * This is currently a no-op in the non-threaded case.
4405	 */
4406	select_poke(manager, 0, SELECT_POKE_SHUTDOWN);
4407
4408#ifdef USE_WATCHER_THREAD
4409	/*
4410	 * Wait for thread to exit.
4411	 */
4412	if (isc_thread_join(manager->watcher, NULL) != ISC_R_SUCCESS)
4413		UNEXPECTED_ERROR(__FILE__, __LINE__,
4414				 "isc_thread_join() %s",
4415				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
4416						ISC_MSG_FAILED, "failed"));
4417#endif /* USE_WATCHER_THREAD */
4418
4419	/*
4420	 * Clean up.
4421	 */
4422	cleanup_watcher(manager->mctx, manager);
4423
4424#ifdef USE_WATCHER_THREAD
4425	(void)close(manager->pipe_fds[0]);
4426	(void)close(manager->pipe_fds[1]);
4427	(void)isc_condition_destroy(&manager->shutdown_ok);
4428#endif /* USE_WATCHER_THREAD */
4429
4430	for (i = 0; i < (int)manager->maxsocks; i++)
4431		if (manager->fdstate[i] == CLOSE_PENDING) /* no need to lock */
4432			(void)close(i);
4433
4434	isc_mem_put(manager->mctx, manager->fds,
4435		    manager->maxsocks * sizeof(isc__socket_t *));
4436	isc_mem_put(manager->mctx, manager->fdstate,
4437		    manager->maxsocks * sizeof(int));
4438
4439	if (manager->stats != NULL)
4440		isc_stats_detach(&manager->stats);
4441
4442	if (manager->fdlock != NULL) {
4443		for (i = 0; i < FDLOCK_COUNT; i++)
4444			DESTROYLOCK(&manager->fdlock[i]);
4445		isc_mem_put(manager->mctx, manager->fdlock,
4446			    FDLOCK_COUNT * sizeof(isc_mutex_t));
4447	}
4448	DESTROYLOCK(&manager->lock);
4449	manager->common.magic = 0;
4450	manager->common.impmagic = 0;
4451	mctx= manager->mctx;
4452	isc_mem_put(mctx, manager, sizeof(*manager));
4453
4454	isc_mem_detach(&mctx);
4455
4456	*managerp = NULL;
4457
4458#ifdef USE_SHARED_MANAGER
4459	socketmgr = NULL;
4460#endif
4461}
4462
4463static isc_result_t
4464socket_recv(isc__socket_t *sock, isc_socketevent_t *dev, isc_task_t *task,
4465	    unsigned int flags)
4466{
4467	int io_state;
4468	isc_boolean_t have_lock = ISC_FALSE;
4469	isc_task_t *ntask = NULL;
4470	isc_result_t result = ISC_R_SUCCESS;
4471
4472	dev->ev_sender = task;
4473
4474	if (sock->type == isc_sockettype_udp) {
4475		io_state = doio_recv(sock, dev);
4476	} else {
4477		LOCK(&sock->lock);
4478		have_lock = ISC_TRUE;
4479
4480		if (ISC_LIST_EMPTY(sock->recv_list))
4481			io_state = doio_recv(sock, dev);
4482		else
4483			io_state = DOIO_SOFT;
4484	}
4485
4486	switch (io_state) {
4487	case DOIO_SOFT:
4488		/*
4489		 * We couldn't read all or part of the request right now, so
4490		 * queue it.
4491		 *
4492		 * Attach to socket and to task
4493		 */
4494		isc_task_attach(task, &ntask);
4495		dev->attributes |= ISC_SOCKEVENTATTR_ATTACHED;
4496
4497		if (!have_lock) {
4498			LOCK(&sock->lock);
4499			have_lock = ISC_TRUE;
4500		}
4501
4502		/*
4503		 * Enqueue the request.  If the socket was previously not being
4504		 * watched, poke the watcher to start paying attention to it.
4505		 */
4506		if (ISC_LIST_EMPTY(sock->recv_list) && !sock->pending_recv)
4507			select_poke(sock->manager, sock->fd, SELECT_POKE_READ);
4508		ISC_LIST_ENQUEUE(sock->recv_list, dev, ev_link);
4509
4510		socket_log(sock, NULL, EVENT, NULL, 0, 0,
4511			   "socket_recv: event %p -> task %p",
4512			   dev, ntask);
4513
4514		if ((flags & ISC_SOCKFLAG_IMMEDIATE) != 0)
4515			result = ISC_R_INPROGRESS;
4516		break;
4517
4518	case DOIO_EOF:
4519		dev->result = ISC_R_EOF;
4520		/* fallthrough */
4521
4522	case DOIO_HARD:
4523	case DOIO_SUCCESS:
4524		if ((flags & ISC_SOCKFLAG_IMMEDIATE) == 0)
4525			send_recvdone_event(sock, &dev);
4526		break;
4527	}
4528
4529	if (have_lock)
4530		UNLOCK(&sock->lock);
4531
4532	return (result);
4533}
4534
4535ISC_SOCKETFUNC_SCOPE isc_result_t
4536isc__socket_recvv(isc_socket_t *sock0, isc_bufferlist_t *buflist,
4537		  unsigned int minimum, isc_task_t *task,
4538		  isc_taskaction_t action, const void *arg)
4539{
4540	isc__socket_t *sock = (isc__socket_t *)sock0;
4541	isc_socketevent_t *dev;
4542	isc__socketmgr_t *manager;
4543	unsigned int iocount;
4544	isc_buffer_t *buffer;
4545
4546	REQUIRE(VALID_SOCKET(sock));
4547	REQUIRE(buflist != NULL);
4548	REQUIRE(!ISC_LIST_EMPTY(*buflist));
4549	REQUIRE(task != NULL);
4550	REQUIRE(action != NULL);
4551
4552	manager = sock->manager;
4553	REQUIRE(VALID_MANAGER(manager));
4554
4555	iocount = isc_bufferlist_availablecount(buflist);
4556	REQUIRE(iocount > 0);
4557
4558	INSIST(sock->bound);
4559
4560	dev = allocate_socketevent(sock, ISC_SOCKEVENT_RECVDONE, action, arg);
4561	if (dev == NULL)
4562		return (ISC_R_NOMEMORY);
4563
4564	/*
4565	 * UDP sockets are always partial read
4566	 */
4567	if (sock->type == isc_sockettype_udp)
4568		dev->minimum = 1;
4569	else {
4570		if (minimum == 0)
4571			dev->minimum = iocount;
4572		else
4573			dev->minimum = minimum;
4574	}
4575
4576	/*
4577	 * Move each buffer from the passed in list to our internal one.
4578	 */
4579	buffer = ISC_LIST_HEAD(*buflist);
4580	while (buffer != NULL) {
4581		ISC_LIST_DEQUEUE(*buflist, buffer, link);
4582		ISC_LIST_ENQUEUE(dev->bufferlist, buffer, link);
4583		buffer = ISC_LIST_HEAD(*buflist);
4584	}
4585
4586	return (socket_recv(sock, dev, task, 0));
4587}
4588
4589ISC_SOCKETFUNC_SCOPE isc_result_t
4590isc__socket_recv(isc_socket_t *sock0, isc_region_t *region,
4591		 unsigned int minimum, isc_task_t *task,
4592		 isc_taskaction_t action, const void *arg)
4593{
4594	isc__socket_t *sock = (isc__socket_t *)sock0;
4595	isc_socketevent_t *dev;
4596	isc__socketmgr_t *manager;
4597
4598	REQUIRE(VALID_SOCKET(sock));
4599	REQUIRE(action != NULL);
4600
4601	manager = sock->manager;
4602	REQUIRE(VALID_MANAGER(manager));
4603
4604	INSIST(sock->bound);
4605
4606	dev = allocate_socketevent(sock, ISC_SOCKEVENT_RECVDONE, action, arg);
4607	if (dev == NULL)
4608		return (ISC_R_NOMEMORY);
4609
4610	return (isc__socket_recv2(sock0, region, minimum, task, dev, 0));
4611}
4612
4613ISC_SOCKETFUNC_SCOPE isc_result_t
4614isc__socket_recv2(isc_socket_t *sock0, isc_region_t *region,
4615		  unsigned int minimum, isc_task_t *task,
4616		  isc_socketevent_t *event, unsigned int flags)
4617{
4618	isc__socket_t *sock = (isc__socket_t *)sock0;
4619
4620	event->ev_sender = sock;
4621	event->result = ISC_R_UNSET;
4622	ISC_LIST_INIT(event->bufferlist);
4623	event->region = *region;
4624	event->n = 0;
4625	event->offset = 0;
4626	event->attributes = 0;
4627
4628	/*
4629	 * UDP sockets are always partial read.
4630	 */
4631	if (sock->type == isc_sockettype_udp)
4632		event->minimum = 1;
4633	else {
4634		if (minimum == 0)
4635			event->minimum = region->length;
4636		else
4637			event->minimum = minimum;
4638	}
4639
4640	return (socket_recv(sock, event, task, flags));
4641}
4642
4643static isc_result_t
4644socket_send(isc__socket_t *sock, isc_socketevent_t *dev, isc_task_t *task,
4645	    isc_sockaddr_t *address, struct in6_pktinfo *pktinfo,
4646	    unsigned int flags)
4647{
4648	int io_state;
4649	isc_boolean_t have_lock = ISC_FALSE;
4650	isc_task_t *ntask = NULL;
4651	isc_result_t result = ISC_R_SUCCESS;
4652
4653	dev->ev_sender = task;
4654
4655	set_dev_address(address, sock, dev);
4656	if (pktinfo != NULL) {
4657		dev->attributes |= ISC_SOCKEVENTATTR_PKTINFO;
4658		dev->pktinfo = *pktinfo;
4659
4660		if (!isc_sockaddr_issitelocal(&dev->address) &&
4661		    !isc_sockaddr_islinklocal(&dev->address)) {
4662			socket_log(sock, NULL, TRACE, isc_msgcat,
4663				   ISC_MSGSET_SOCKET, ISC_MSG_PKTINFOPROVIDED,
4664				   "pktinfo structure provided, ifindex %u "
4665				   "(set to 0)", pktinfo->ipi6_ifindex);
4666
4667			/*
4668			 * Set the pktinfo index to 0 here, to let the
4669			 * kernel decide what interface it should send on.
4670			 */
4671			dev->pktinfo.ipi6_ifindex = 0;
4672		}
4673	}
4674
4675	if (sock->type == isc_sockettype_udp)
4676		io_state = doio_send(sock, dev);
4677	else {
4678		LOCK(&sock->lock);
4679		have_lock = ISC_TRUE;
4680
4681		if (ISC_LIST_EMPTY(sock->send_list))
4682			io_state = doio_send(sock, dev);
4683		else
4684			io_state = DOIO_SOFT;
4685	}
4686
4687	switch (io_state) {
4688	case DOIO_SOFT:
4689		/*
4690		 * We couldn't send all or part of the request right now, so
4691		 * queue it unless ISC_SOCKFLAG_NORETRY is set.
4692		 */
4693		if ((flags & ISC_SOCKFLAG_NORETRY) == 0) {
4694			isc_task_attach(task, &ntask);
4695			dev->attributes |= ISC_SOCKEVENTATTR_ATTACHED;
4696
4697			if (!have_lock) {
4698				LOCK(&sock->lock);
4699				have_lock = ISC_TRUE;
4700			}
4701
4702			/*
4703			 * Enqueue the request.  If the socket was previously
4704			 * not being watched, poke the watcher to start
4705			 * paying attention to it.
4706			 */
4707			if (ISC_LIST_EMPTY(sock->send_list) &&
4708			    !sock->pending_send)
4709				select_poke(sock->manager, sock->fd,
4710					    SELECT_POKE_WRITE);
4711			ISC_LIST_ENQUEUE(sock->send_list, dev, ev_link);
4712
4713			socket_log(sock, NULL, EVENT, NULL, 0, 0,
4714				   "socket_send: event %p -> task %p",
4715				   dev, ntask);
4716
4717			if ((flags & ISC_SOCKFLAG_IMMEDIATE) != 0)
4718				result = ISC_R_INPROGRESS;
4719			break;
4720		}
4721
4722	case DOIO_HARD:
4723	case DOIO_SUCCESS:
4724		if ((flags & ISC_SOCKFLAG_IMMEDIATE) == 0)
4725			send_senddone_event(sock, &dev);
4726		break;
4727	}
4728
4729	if (have_lock)
4730		UNLOCK(&sock->lock);
4731
4732	return (result);
4733}
4734
4735ISC_SOCKETFUNC_SCOPE isc_result_t
4736isc__socket_send(isc_socket_t *sock, isc_region_t *region,
4737		 isc_task_t *task, isc_taskaction_t action, const void *arg)
4738{
4739	/*
4740	 * REQUIRE() checking is performed in isc_socket_sendto().
4741	 */
4742	return (isc__socket_sendto(sock, region, task, action, arg, NULL,
4743				   NULL));
4744}
4745
4746ISC_SOCKETFUNC_SCOPE isc_result_t
4747isc__socket_sendto(isc_socket_t *sock0, isc_region_t *region,
4748		   isc_task_t *task, isc_taskaction_t action, const void *arg,
4749		   isc_sockaddr_t *address, struct in6_pktinfo *pktinfo)
4750{
4751	isc__socket_t *sock = (isc__socket_t *)sock0;
4752	isc_socketevent_t *dev;
4753	isc__socketmgr_t *manager;
4754
4755	REQUIRE(VALID_SOCKET(sock));
4756	REQUIRE(region != NULL);
4757	REQUIRE(task != NULL);
4758	REQUIRE(action != NULL);
4759
4760	manager = sock->manager;
4761	REQUIRE(VALID_MANAGER(manager));
4762
4763	INSIST(sock->bound);
4764
4765	dev = allocate_socketevent(sock, ISC_SOCKEVENT_SENDDONE, action, arg);
4766	if (dev == NULL)
4767		return (ISC_R_NOMEMORY);
4768
4769	dev->region = *region;
4770
4771	return (socket_send(sock, dev, task, address, pktinfo, 0));
4772}
4773
4774ISC_SOCKETFUNC_SCOPE isc_result_t
4775isc__socket_sendv(isc_socket_t *sock, isc_bufferlist_t *buflist,
4776		  isc_task_t *task, isc_taskaction_t action, const void *arg)
4777{
4778	return (isc__socket_sendtov(sock, buflist, task, action, arg, NULL,
4779				    NULL));
4780}
4781
4782ISC_SOCKETFUNC_SCOPE isc_result_t
4783isc__socket_sendtov(isc_socket_t *sock0, isc_bufferlist_t *buflist,
4784		    isc_task_t *task, isc_taskaction_t action, const void *arg,
4785		    isc_sockaddr_t *address, struct in6_pktinfo *pktinfo)
4786{
4787	isc__socket_t *sock = (isc__socket_t *)sock0;
4788	isc_socketevent_t *dev;
4789	isc__socketmgr_t *manager;
4790	unsigned int iocount;
4791	isc_buffer_t *buffer;
4792
4793	REQUIRE(VALID_SOCKET(sock));
4794	REQUIRE(buflist != NULL);
4795	REQUIRE(!ISC_LIST_EMPTY(*buflist));
4796	REQUIRE(task != NULL);
4797	REQUIRE(action != NULL);
4798
4799	manager = sock->manager;
4800	REQUIRE(VALID_MANAGER(manager));
4801
4802	iocount = isc_bufferlist_usedcount(buflist);
4803	REQUIRE(iocount > 0);
4804
4805	dev = allocate_socketevent(sock, ISC_SOCKEVENT_SENDDONE, action, arg);
4806	if (dev == NULL)
4807		return (ISC_R_NOMEMORY);
4808
4809	/*
4810	 * Move each buffer from the passed in list to our internal one.
4811	 */
4812	buffer = ISC_LIST_HEAD(*buflist);
4813	while (buffer != NULL) {
4814		ISC_LIST_DEQUEUE(*buflist, buffer, link);
4815		ISC_LIST_ENQUEUE(dev->bufferlist, buffer, link);
4816		buffer = ISC_LIST_HEAD(*buflist);
4817	}
4818
4819	return (socket_send(sock, dev, task, address, pktinfo, 0));
4820}
4821
4822ISC_SOCKETFUNC_SCOPE isc_result_t
4823isc__socket_sendto2(isc_socket_t *sock0, isc_region_t *region,
4824		    isc_task_t *task,
4825		    isc_sockaddr_t *address, struct in6_pktinfo *pktinfo,
4826		    isc_socketevent_t *event, unsigned int flags)
4827{
4828	isc__socket_t *sock = (isc__socket_t *)sock0;
4829
4830	REQUIRE(VALID_SOCKET(sock));
4831	REQUIRE((flags & ~(ISC_SOCKFLAG_IMMEDIATE|ISC_SOCKFLAG_NORETRY)) == 0);
4832	if ((flags & ISC_SOCKFLAG_NORETRY) != 0)
4833		REQUIRE(sock->type == isc_sockettype_udp);
4834	event->ev_sender = sock;
4835	event->result = ISC_R_UNSET;
4836	ISC_LIST_INIT(event->bufferlist);
4837	event->region = *region;
4838	event->n = 0;
4839	event->offset = 0;
4840	event->attributes = 0;
4841
4842	return (socket_send(sock, event, task, address, pktinfo, flags));
4843}
4844
4845ISC_SOCKETFUNC_SCOPE void
4846isc__socket_cleanunix(isc_sockaddr_t *sockaddr, isc_boolean_t active) {
4847#ifdef ISC_PLATFORM_HAVESYSUNH
4848	int s;
4849	struct stat sb;
4850	char strbuf[ISC_STRERRORSIZE];
4851
4852	if (sockaddr->type.sa.sa_family != AF_UNIX)
4853		return;
4854
4855#ifndef S_ISSOCK
4856#if defined(S_IFMT) && defined(S_IFSOCK)
4857#define S_ISSOCK(mode) ((mode & S_IFMT)==S_IFSOCK)
4858#elif defined(_S_IFMT) && defined(S_IFSOCK)
4859#define S_ISSOCK(mode) ((mode & _S_IFMT)==S_IFSOCK)
4860#endif
4861#endif
4862
4863#ifndef S_ISFIFO
4864#if defined(S_IFMT) && defined(S_IFIFO)
4865#define S_ISFIFO(mode) ((mode & S_IFMT)==S_IFIFO)
4866#elif defined(_S_IFMT) && defined(S_IFIFO)
4867#define S_ISFIFO(mode) ((mode & _S_IFMT)==S_IFIFO)
4868#endif
4869#endif
4870
4871#if !defined(S_ISFIFO) && !defined(S_ISSOCK)
4872#error You need to define S_ISFIFO and S_ISSOCK as appropriate for your platform.  See <sys/stat.h>.
4873#endif
4874
4875#ifndef S_ISFIFO
4876#define S_ISFIFO(mode) 0
4877#endif
4878
4879#ifndef S_ISSOCK
4880#define S_ISSOCK(mode) 0
4881#endif
4882
4883	if (active) {
4884		if (stat(sockaddr->type.sunix.sun_path, &sb) < 0) {
4885			isc__strerror(errno, strbuf, sizeof(strbuf));
4886			isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
4887				      ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
4888				      "isc_socket_cleanunix: stat(%s): %s",
4889				      sockaddr->type.sunix.sun_path, strbuf);
4890			return;
4891		}
4892		if (!(S_ISSOCK(sb.st_mode) || S_ISFIFO(sb.st_mode))) {
4893			isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
4894				      ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
4895				      "isc_socket_cleanunix: %s: not a socket",
4896				      sockaddr->type.sunix.sun_path);
4897			return;
4898		}
4899		if (unlink(sockaddr->type.sunix.sun_path) < 0) {
4900			isc__strerror(errno, strbuf, sizeof(strbuf));
4901			isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
4902				      ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
4903				      "isc_socket_cleanunix: unlink(%s): %s",
4904				      sockaddr->type.sunix.sun_path, strbuf);
4905		}
4906		return;
4907	}
4908
4909	s = socket(AF_UNIX, SOCK_STREAM, 0);
4910	if (s < 0) {
4911		isc__strerror(errno, strbuf, sizeof(strbuf));
4912		isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
4913			      ISC_LOGMODULE_SOCKET, ISC_LOG_WARNING,
4914			      "isc_socket_cleanunix: socket(%s): %s",
4915			      sockaddr->type.sunix.sun_path, strbuf);
4916		return;
4917	}
4918
4919	if (stat(sockaddr->type.sunix.sun_path, &sb) < 0) {
4920		switch (errno) {
4921		case ENOENT:    /* We exited cleanly last time */
4922			break;
4923		default:
4924			isc__strerror(errno, strbuf, sizeof(strbuf));
4925			isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
4926				      ISC_LOGMODULE_SOCKET, ISC_LOG_WARNING,
4927				      "isc_socket_cleanunix: stat(%s): %s",
4928				      sockaddr->type.sunix.sun_path, strbuf);
4929			break;
4930		}
4931		goto cleanup;
4932	}
4933
4934	if (!(S_ISSOCK(sb.st_mode) || S_ISFIFO(sb.st_mode))) {
4935		isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
4936			      ISC_LOGMODULE_SOCKET, ISC_LOG_WARNING,
4937			      "isc_socket_cleanunix: %s: not a socket",
4938			      sockaddr->type.sunix.sun_path);
4939		goto cleanup;
4940	}
4941
4942	if (connect(s, (struct sockaddr *)&sockaddr->type.sunix,
4943		    sizeof(sockaddr->type.sunix)) < 0) {
4944		switch (errno) {
4945		case ECONNREFUSED:
4946		case ECONNRESET:
4947			if (unlink(sockaddr->type.sunix.sun_path) < 0) {
4948				isc__strerror(errno, strbuf, sizeof(strbuf));
4949				isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
4950					      ISC_LOGMODULE_SOCKET,
4951					      ISC_LOG_WARNING,
4952					      "isc_socket_cleanunix: "
4953					      "unlink(%s): %s",
4954					      sockaddr->type.sunix.sun_path,
4955					      strbuf);
4956			}
4957			break;
4958		default:
4959			isc__strerror(errno, strbuf, sizeof(strbuf));
4960			isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
4961				      ISC_LOGMODULE_SOCKET, ISC_LOG_WARNING,
4962				      "isc_socket_cleanunix: connect(%s): %s",
4963				      sockaddr->type.sunix.sun_path, strbuf);
4964			break;
4965		}
4966	}
4967 cleanup:
4968	close(s);
4969#else
4970	UNUSED(sockaddr);
4971	UNUSED(active);
4972#endif
4973}
4974
4975ISC_SOCKETFUNC_SCOPE isc_result_t
4976isc__socket_permunix(isc_sockaddr_t *sockaddr, isc_uint32_t perm,
4977		    isc_uint32_t owner, isc_uint32_t group)
4978{
4979#ifdef ISC_PLATFORM_HAVESYSUNH
4980	isc_result_t result = ISC_R_SUCCESS;
4981	char strbuf[ISC_STRERRORSIZE];
4982	char path[sizeof(sockaddr->type.sunix.sun_path)];
4983#ifdef NEED_SECURE_DIRECTORY
4984	char *slash;
4985#endif
4986
4987	REQUIRE(sockaddr->type.sa.sa_family == AF_UNIX);
4988	INSIST(strlen(sockaddr->type.sunix.sun_path) < sizeof(path));
4989	strcpy(path, sockaddr->type.sunix.sun_path);
4990
4991#ifdef NEED_SECURE_DIRECTORY
4992	slash = strrchr(path, '/');
4993	if (slash != NULL) {
4994		if (slash != path)
4995			*slash = '\0';
4996		else
4997			strcpy(path, "/");
4998	} else
4999		strcpy(path, ".");
5000#endif
5001
5002	if (chmod(path, perm) < 0) {
5003		isc__strerror(errno, strbuf, sizeof(strbuf));
5004		isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
5005			      ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
5006			      "isc_socket_permunix: chmod(%s, %d): %s",
5007			      path, perm, strbuf);
5008		result = ISC_R_FAILURE;
5009	}
5010	if (chown(path, owner, group) < 0) {
5011		isc__strerror(errno, strbuf, sizeof(strbuf));
5012		isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
5013			      ISC_LOGMODULE_SOCKET, ISC_LOG_ERROR,
5014			      "isc_socket_permunix: chown(%s, %d, %d): %s",
5015			      path, owner, group,
5016			      strbuf);
5017		result = ISC_R_FAILURE;
5018	}
5019	return (result);
5020#else
5021	UNUSED(sockaddr);
5022	UNUSED(perm);
5023	UNUSED(owner);
5024	UNUSED(group);
5025	return (ISC_R_NOTIMPLEMENTED);
5026#endif
5027}
5028
5029ISC_SOCKETFUNC_SCOPE isc_result_t
5030isc__socket_bind(isc_socket_t *sock0, isc_sockaddr_t *sockaddr,
5031		 unsigned int options) {
5032	isc__socket_t *sock = (isc__socket_t *)sock0;
5033	char strbuf[ISC_STRERRORSIZE];
5034	int on = 1;
5035
5036	REQUIRE(VALID_SOCKET(sock));
5037
5038	LOCK(&sock->lock);
5039
5040	INSIST(!sock->bound);
5041	INSIST(!sock->dupped);
5042
5043	if (sock->pf != sockaddr->type.sa.sa_family) {
5044		UNLOCK(&sock->lock);
5045		return (ISC_R_FAMILYMISMATCH);
5046	}
5047
5048	/*
5049	 * Only set SO_REUSEADDR when we want a specific port.
5050	 */
5051#ifdef AF_UNIX
5052	if (sock->pf == AF_UNIX)
5053		goto bind_socket;
5054#endif
5055	if ((options & ISC_SOCKET_REUSEADDRESS) != 0 &&
5056	    isc_sockaddr_getport(sockaddr) != (in_port_t)0 &&
5057	    setsockopt(sock->fd, SOL_SOCKET, SO_REUSEADDR, (void *)&on,
5058		       sizeof(on)) < 0) {
5059		UNEXPECTED_ERROR(__FILE__, __LINE__,
5060				 "setsockopt(%d) %s", sock->fd,
5061				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
5062						ISC_MSG_FAILED, "failed"));
5063		/* Press on... */
5064	}
5065#ifdef AF_UNIX
5066 bind_socket:
5067#endif
5068	if (bind(sock->fd, &sockaddr->type.sa, sockaddr->length) < 0) {
5069		inc_stats(sock->manager->stats,
5070			  sock->statsindex[STATID_BINDFAIL]);
5071
5072		UNLOCK(&sock->lock);
5073		switch (errno) {
5074		case EACCES:
5075			return (ISC_R_NOPERM);
5076		case EADDRNOTAVAIL:
5077			return (ISC_R_ADDRNOTAVAIL);
5078		case EADDRINUSE:
5079			return (ISC_R_ADDRINUSE);
5080		case EINVAL:
5081			return (ISC_R_BOUND);
5082		default:
5083			isc__strerror(errno, strbuf, sizeof(strbuf));
5084			UNEXPECTED_ERROR(__FILE__, __LINE__, "bind: %s",
5085					 strbuf);
5086			return (ISC_R_UNEXPECTED);
5087		}
5088	}
5089
5090	socket_log(sock, sockaddr, TRACE,
5091		   isc_msgcat, ISC_MSGSET_SOCKET, ISC_MSG_BOUND, "bound");
5092	sock->bound = 1;
5093
5094	UNLOCK(&sock->lock);
5095	return (ISC_R_SUCCESS);
5096}
5097
5098/*
5099 * Enable this only for specific OS versions, and only when they have repaired
5100 * their problems with it.  Until then, this is is broken and needs to be
5101 * diabled by default.  See RT22589 for details.
5102 */
5103#undef ENABLE_ACCEPTFILTER
5104
5105ISC_SOCKETFUNC_SCOPE isc_result_t
5106isc__socket_filter(isc_socket_t *sock0, const char *filter) {
5107	isc__socket_t *sock = (isc__socket_t *)sock0;
5108#if defined(SO_ACCEPTFILTER) && defined(ENABLE_ACCEPTFILTER)
5109	char strbuf[ISC_STRERRORSIZE];
5110	struct accept_filter_arg afa;
5111#else
5112	UNUSED(sock);
5113	UNUSED(filter);
5114#endif
5115
5116	REQUIRE(VALID_SOCKET(sock));
5117
5118#if defined(SO_ACCEPTFILTER) && defined(ENABLE_ACCEPTFILTER)
5119	bzero(&afa, sizeof(afa));
5120	strncpy(afa.af_name, filter, sizeof(afa.af_name));
5121	if (setsockopt(sock->fd, SOL_SOCKET, SO_ACCEPTFILTER,
5122			 &afa, sizeof(afa)) == -1) {
5123		isc__strerror(errno, strbuf, sizeof(strbuf));
5124		socket_log(sock, NULL, CREATION, isc_msgcat, ISC_MSGSET_SOCKET,
5125			   ISC_MSG_FILTER, "setsockopt(SO_ACCEPTFILTER): %s",
5126			   strbuf);
5127		return (ISC_R_FAILURE);
5128	}
5129	return (ISC_R_SUCCESS);
5130#else
5131	return (ISC_R_NOTIMPLEMENTED);
5132#endif
5133}
5134
5135/*
5136 * Set up to listen on a given socket.  We do this by creating an internal
5137 * event that will be dispatched when the socket has read activity.  The
5138 * watcher will send the internal event to the task when there is a new
5139 * connection.
5140 *
5141 * Unlike in read, we don't preallocate a done event here.  Every time there
5142 * is a new connection we'll have to allocate a new one anyway, so we might
5143 * as well keep things simple rather than having to track them.
5144 */
5145ISC_SOCKETFUNC_SCOPE isc_result_t
5146isc__socket_listen(isc_socket_t *sock0, unsigned int backlog) {
5147	isc__socket_t *sock = (isc__socket_t *)sock0;
5148	char strbuf[ISC_STRERRORSIZE];
5149
5150	REQUIRE(VALID_SOCKET(sock));
5151
5152	LOCK(&sock->lock);
5153
5154	REQUIRE(!sock->listener);
5155	REQUIRE(sock->bound);
5156	REQUIRE(sock->type == isc_sockettype_tcp ||
5157		sock->type == isc_sockettype_unix);
5158
5159	if (backlog == 0)
5160		backlog = SOMAXCONN;
5161
5162	if (listen(sock->fd, (int)backlog) < 0) {
5163		UNLOCK(&sock->lock);
5164		isc__strerror(errno, strbuf, sizeof(strbuf));
5165
5166		UNEXPECTED_ERROR(__FILE__, __LINE__, "listen: %s", strbuf);
5167
5168		return (ISC_R_UNEXPECTED);
5169	}
5170
5171	sock->listener = 1;
5172
5173	UNLOCK(&sock->lock);
5174	return (ISC_R_SUCCESS);
5175}
5176
5177/*
5178 * This should try to do aggressive accept() XXXMLG
5179 */
5180ISC_SOCKETFUNC_SCOPE isc_result_t
5181isc__socket_accept(isc_socket_t *sock0,
5182		  isc_task_t *task, isc_taskaction_t action, const void *arg)
5183{
5184	isc__socket_t *sock = (isc__socket_t *)sock0;
5185	isc_socket_newconnev_t *dev;
5186	isc__socketmgr_t *manager;
5187	isc_task_t *ntask = NULL;
5188	isc__socket_t *nsock;
5189	isc_result_t result;
5190	isc_boolean_t do_poke = ISC_FALSE;
5191
5192	REQUIRE(VALID_SOCKET(sock));
5193	manager = sock->manager;
5194	REQUIRE(VALID_MANAGER(manager));
5195
5196	LOCK(&sock->lock);
5197
5198	REQUIRE(sock->listener);
5199
5200	/*
5201	 * Sender field is overloaded here with the task we will be sending
5202	 * this event to.  Just before the actual event is delivered the
5203	 * actual ev_sender will be touched up to be the socket.
5204	 */
5205	dev = (isc_socket_newconnev_t *)
5206		isc_event_allocate(manager->mctx, task, ISC_SOCKEVENT_NEWCONN,
5207				   action, arg, sizeof(*dev));
5208	if (dev == NULL) {
5209		UNLOCK(&sock->lock);
5210		return (ISC_R_NOMEMORY);
5211	}
5212	ISC_LINK_INIT(dev, ev_link);
5213
5214	result = allocate_socket(manager, sock->type, &nsock);
5215	if (result != ISC_R_SUCCESS) {
5216		isc_event_free(ISC_EVENT_PTR(&dev));
5217		UNLOCK(&sock->lock);
5218		return (result);
5219	}
5220
5221	/*
5222	 * Attach to socket and to task.
5223	 */
5224	isc_task_attach(task, &ntask);
5225	if (isc_task_exiting(ntask)) {
5226		free_socket(&nsock);
5227		isc_task_detach(&ntask);
5228		isc_event_free(ISC_EVENT_PTR(&dev));
5229		UNLOCK(&sock->lock);
5230		return (ISC_R_SHUTTINGDOWN);
5231	}
5232	nsock->references++;
5233	nsock->statsindex = sock->statsindex;
5234
5235	dev->ev_sender = ntask;
5236	dev->newsocket = (isc_socket_t *)nsock;
5237
5238	/*
5239	 * Poke watcher here.  We still have the socket locked, so there
5240	 * is no race condition.  We will keep the lock for such a short
5241	 * bit of time waking it up now or later won't matter all that much.
5242	 */
5243	if (ISC_LIST_EMPTY(sock->accept_list))
5244		do_poke = ISC_TRUE;
5245
5246	ISC_LIST_ENQUEUE(sock->accept_list, dev, ev_link);
5247
5248	if (do_poke)
5249		select_poke(manager, sock->fd, SELECT_POKE_ACCEPT);
5250
5251	UNLOCK(&sock->lock);
5252	return (ISC_R_SUCCESS);
5253}
5254
5255ISC_SOCKETFUNC_SCOPE isc_result_t
5256isc__socket_connect(isc_socket_t *sock0, isc_sockaddr_t *addr,
5257		   isc_task_t *task, isc_taskaction_t action, const void *arg)
5258{
5259	isc__socket_t *sock = (isc__socket_t *)sock0;
5260	isc_socket_connev_t *dev;
5261	isc_task_t *ntask = NULL;
5262	isc__socketmgr_t *manager;
5263	int cc;
5264	char strbuf[ISC_STRERRORSIZE];
5265	char addrbuf[ISC_SOCKADDR_FORMATSIZE];
5266
5267	REQUIRE(VALID_SOCKET(sock));
5268	REQUIRE(addr != NULL);
5269	REQUIRE(task != NULL);
5270	REQUIRE(action != NULL);
5271
5272	manager = sock->manager;
5273	REQUIRE(VALID_MANAGER(manager));
5274	REQUIRE(addr != NULL);
5275
5276	if (isc_sockaddr_ismulticast(addr))
5277		return (ISC_R_MULTICAST);
5278
5279	LOCK(&sock->lock);
5280
5281	REQUIRE(!sock->connecting);
5282
5283	dev = (isc_socket_connev_t *)isc_event_allocate(manager->mctx, sock,
5284							ISC_SOCKEVENT_CONNECT,
5285							action,	arg,
5286							sizeof(*dev));
5287	if (dev == NULL) {
5288		UNLOCK(&sock->lock);
5289		return (ISC_R_NOMEMORY);
5290	}
5291	ISC_LINK_INIT(dev, ev_link);
5292
5293	/*
5294	 * Try to do the connect right away, as there can be only one
5295	 * outstanding, and it might happen to complete.
5296	 */
5297	sock->peer_address = *addr;
5298	cc = connect(sock->fd, &addr->type.sa, addr->length);
5299	if (cc < 0) {
5300		/*
5301		 * HP-UX "fails" to connect a UDP socket and sets errno to
5302		 * EINPROGRESS if it's non-blocking.  We'd rather regard this as
5303		 * a success and let the user detect it if it's really an error
5304		 * at the time of sending a packet on the socket.
5305		 */
5306		if (sock->type == isc_sockettype_udp && errno == EINPROGRESS) {
5307			cc = 0;
5308			goto success;
5309		}
5310		if (SOFT_ERROR(errno) || errno == EINPROGRESS)
5311			goto queue;
5312
5313		switch (errno) {
5314#define ERROR_MATCH(a, b) case a: dev->result = b; goto err_exit;
5315			ERROR_MATCH(EACCES, ISC_R_NOPERM);
5316			ERROR_MATCH(EADDRNOTAVAIL, ISC_R_ADDRNOTAVAIL);
5317			ERROR_MATCH(EAFNOSUPPORT, ISC_R_ADDRNOTAVAIL);
5318			ERROR_MATCH(ECONNREFUSED, ISC_R_CONNREFUSED);
5319			ERROR_MATCH(EHOSTUNREACH, ISC_R_HOSTUNREACH);
5320#ifdef EHOSTDOWN
5321			ERROR_MATCH(EHOSTDOWN, ISC_R_HOSTUNREACH);
5322#endif
5323			ERROR_MATCH(ENETUNREACH, ISC_R_NETUNREACH);
5324			ERROR_MATCH(ENOBUFS, ISC_R_NORESOURCES);
5325			ERROR_MATCH(EPERM, ISC_R_HOSTUNREACH);
5326			ERROR_MATCH(EPIPE, ISC_R_NOTCONNECTED);
5327			ERROR_MATCH(ECONNRESET, ISC_R_CONNECTIONRESET);
5328#undef ERROR_MATCH
5329		}
5330
5331		sock->connected = 0;
5332
5333		isc__strerror(errno, strbuf, sizeof(strbuf));
5334		isc_sockaddr_format(addr, addrbuf, sizeof(addrbuf));
5335		UNEXPECTED_ERROR(__FILE__, __LINE__, "connect(%s) %d/%s",
5336				 addrbuf, errno, strbuf);
5337
5338		UNLOCK(&sock->lock);
5339		inc_stats(sock->manager->stats,
5340			  sock->statsindex[STATID_CONNECTFAIL]);
5341		isc_event_free(ISC_EVENT_PTR(&dev));
5342		return (ISC_R_UNEXPECTED);
5343
5344	err_exit:
5345		sock->connected = 0;
5346		isc_task_send(task, ISC_EVENT_PTR(&dev));
5347
5348		UNLOCK(&sock->lock);
5349		inc_stats(sock->manager->stats,
5350			  sock->statsindex[STATID_CONNECTFAIL]);
5351		return (ISC_R_SUCCESS);
5352	}
5353
5354	/*
5355	 * If connect completed, fire off the done event.
5356	 */
5357 success:
5358	if (cc == 0) {
5359		sock->connected = 1;
5360		sock->bound = 1;
5361		dev->result = ISC_R_SUCCESS;
5362		isc_task_send(task, ISC_EVENT_PTR(&dev));
5363
5364		UNLOCK(&sock->lock);
5365
5366		inc_stats(sock->manager->stats,
5367			  sock->statsindex[STATID_CONNECT]);
5368
5369		return (ISC_R_SUCCESS);
5370	}
5371
5372 queue:
5373
5374	/*
5375	 * Attach to task.
5376	 */
5377	isc_task_attach(task, &ntask);
5378
5379	sock->connecting = 1;
5380
5381	dev->ev_sender = ntask;
5382
5383	/*
5384	 * Poke watcher here.  We still have the socket locked, so there
5385	 * is no race condition.  We will keep the lock for such a short
5386	 * bit of time waking it up now or later won't matter all that much.
5387	 */
5388	if (sock->connect_ev == NULL)
5389		select_poke(manager, sock->fd, SELECT_POKE_CONNECT);
5390
5391	sock->connect_ev = dev;
5392
5393	UNLOCK(&sock->lock);
5394	return (ISC_R_SUCCESS);
5395}
5396
5397/*
5398 * Called when a socket with a pending connect() finishes.
5399 */
5400static void
5401internal_connect(isc_task_t *me, isc_event_t *ev) {
5402	isc__socket_t *sock;
5403	isc_socket_connev_t *dev;
5404	isc_task_t *task;
5405	int cc;
5406	ISC_SOCKADDR_LEN_T optlen;
5407	char strbuf[ISC_STRERRORSIZE];
5408	char peerbuf[ISC_SOCKADDR_FORMATSIZE];
5409
5410	UNUSED(me);
5411	INSIST(ev->ev_type == ISC_SOCKEVENT_INTW);
5412
5413	sock = ev->ev_sender;
5414	INSIST(VALID_SOCKET(sock));
5415
5416	LOCK(&sock->lock);
5417
5418	/*
5419	 * When the internal event was sent the reference count was bumped
5420	 * to keep the socket around for us.  Decrement the count here.
5421	 */
5422	INSIST(sock->references > 0);
5423	sock->references--;
5424	if (sock->references == 0) {
5425		UNLOCK(&sock->lock);
5426		destroy(&sock);
5427		return;
5428	}
5429
5430	/*
5431	 * Has this event been canceled?
5432	 */
5433	dev = sock->connect_ev;
5434	if (dev == NULL) {
5435		INSIST(!sock->connecting);
5436		UNLOCK(&sock->lock);
5437		return;
5438	}
5439
5440	INSIST(sock->connecting);
5441	sock->connecting = 0;
5442
5443	/*
5444	 * Get any possible error status here.
5445	 */
5446	optlen = sizeof(cc);
5447	if (getsockopt(sock->fd, SOL_SOCKET, SO_ERROR,
5448		       (void *)&cc, (void *)&optlen) < 0)
5449		cc = errno;
5450	else
5451		errno = cc;
5452
5453	if (errno != 0) {
5454		/*
5455		 * If the error is EAGAIN, just re-select on this
5456		 * fd and pretend nothing strange happened.
5457		 */
5458		if (SOFT_ERROR(errno) || errno == EINPROGRESS) {
5459			sock->connecting = 1;
5460			select_poke(sock->manager, sock->fd,
5461				    SELECT_POKE_CONNECT);
5462			UNLOCK(&sock->lock);
5463
5464			return;
5465		}
5466
5467		inc_stats(sock->manager->stats,
5468			  sock->statsindex[STATID_CONNECTFAIL]);
5469
5470		/*
5471		 * Translate other errors into ISC_R_* flavors.
5472		 */
5473		switch (errno) {
5474#define ERROR_MATCH(a, b) case a: dev->result = b; break;
5475			ERROR_MATCH(EACCES, ISC_R_NOPERM);
5476			ERROR_MATCH(EADDRNOTAVAIL, ISC_R_ADDRNOTAVAIL);
5477			ERROR_MATCH(EAFNOSUPPORT, ISC_R_ADDRNOTAVAIL);
5478			ERROR_MATCH(ECONNREFUSED, ISC_R_CONNREFUSED);
5479			ERROR_MATCH(EHOSTUNREACH, ISC_R_HOSTUNREACH);
5480#ifdef EHOSTDOWN
5481			ERROR_MATCH(EHOSTDOWN, ISC_R_HOSTUNREACH);
5482#endif
5483			ERROR_MATCH(ENETUNREACH, ISC_R_NETUNREACH);
5484			ERROR_MATCH(ENOBUFS, ISC_R_NORESOURCES);
5485			ERROR_MATCH(EPERM, ISC_R_HOSTUNREACH);
5486			ERROR_MATCH(EPIPE, ISC_R_NOTCONNECTED);
5487			ERROR_MATCH(ETIMEDOUT, ISC_R_TIMEDOUT);
5488			ERROR_MATCH(ECONNRESET, ISC_R_CONNECTIONRESET);
5489#undef ERROR_MATCH
5490		default:
5491			dev->result = ISC_R_UNEXPECTED;
5492			isc_sockaddr_format(&sock->peer_address, peerbuf,
5493					    sizeof(peerbuf));
5494			isc__strerror(errno, strbuf, sizeof(strbuf));
5495			UNEXPECTED_ERROR(__FILE__, __LINE__,
5496					 "internal_connect: connect(%s) %s",
5497					 peerbuf, strbuf);
5498		}
5499	} else {
5500		inc_stats(sock->manager->stats,
5501			  sock->statsindex[STATID_CONNECT]);
5502		dev->result = ISC_R_SUCCESS;
5503		sock->connected = 1;
5504		sock->bound = 1;
5505	}
5506
5507	sock->connect_ev = NULL;
5508
5509	UNLOCK(&sock->lock);
5510
5511	task = dev->ev_sender;
5512	dev->ev_sender = sock;
5513	isc_task_sendanddetach(&task, ISC_EVENT_PTR(&dev));
5514}
5515
5516ISC_SOCKETFUNC_SCOPE isc_result_t
5517isc__socket_getpeername(isc_socket_t *sock0, isc_sockaddr_t *addressp) {
5518	isc__socket_t *sock = (isc__socket_t *)sock0;
5519	isc_result_t result;
5520
5521	REQUIRE(VALID_SOCKET(sock));
5522	REQUIRE(addressp != NULL);
5523
5524	LOCK(&sock->lock);
5525
5526	if (sock->connected) {
5527		*addressp = sock->peer_address;
5528		result = ISC_R_SUCCESS;
5529	} else {
5530		result = ISC_R_NOTCONNECTED;
5531	}
5532
5533	UNLOCK(&sock->lock);
5534
5535	return (result);
5536}
5537
5538ISC_SOCKETFUNC_SCOPE isc_result_t
5539isc__socket_getsockname(isc_socket_t *sock0, isc_sockaddr_t *addressp) {
5540	isc__socket_t *sock = (isc__socket_t *)sock0;
5541	ISC_SOCKADDR_LEN_T len;
5542	isc_result_t result;
5543	char strbuf[ISC_STRERRORSIZE];
5544
5545	REQUIRE(VALID_SOCKET(sock));
5546	REQUIRE(addressp != NULL);
5547
5548	LOCK(&sock->lock);
5549
5550	if (!sock->bound) {
5551		result = ISC_R_NOTBOUND;
5552		goto out;
5553	}
5554
5555	result = ISC_R_SUCCESS;
5556
5557	len = sizeof(addressp->type);
5558	if (getsockname(sock->fd, &addressp->type.sa, (void *)&len) < 0) {
5559		isc__strerror(errno, strbuf, sizeof(strbuf));
5560		UNEXPECTED_ERROR(__FILE__, __LINE__, "getsockname: %s",
5561				 strbuf);
5562		result = ISC_R_UNEXPECTED;
5563		goto out;
5564	}
5565	addressp->length = (unsigned int)len;
5566
5567 out:
5568	UNLOCK(&sock->lock);
5569
5570	return (result);
5571}
5572
5573/*
5574 * Run through the list of events on this socket, and cancel the ones
5575 * queued for task "task" of type "how".  "how" is a bitmask.
5576 */
5577ISC_SOCKETFUNC_SCOPE void
5578isc__socket_cancel(isc_socket_t *sock0, isc_task_t *task, unsigned int how) {
5579	isc__socket_t *sock = (isc__socket_t *)sock0;
5580
5581	REQUIRE(VALID_SOCKET(sock));
5582
5583	/*
5584	 * Quick exit if there is nothing to do.  Don't even bother locking
5585	 * in this case.
5586	 */
5587	if (how == 0)
5588		return;
5589
5590	LOCK(&sock->lock);
5591
5592	/*
5593	 * All of these do the same thing, more or less.
5594	 * Each will:
5595	 *	o If the internal event is marked as "posted" try to
5596	 *	  remove it from the task's queue.  If this fails, mark it
5597	 *	  as canceled instead, and let the task clean it up later.
5598	 *	o For each I/O request for that task of that type, post
5599	 *	  its done event with status of "ISC_R_CANCELED".
5600	 *	o Reset any state needed.
5601	 */
5602	if (((how & ISC_SOCKCANCEL_RECV) == ISC_SOCKCANCEL_RECV)
5603	    && !ISC_LIST_EMPTY(sock->recv_list)) {
5604		isc_socketevent_t      *dev;
5605		isc_socketevent_t      *next;
5606		isc_task_t	       *current_task;
5607
5608		dev = ISC_LIST_HEAD(sock->recv_list);
5609
5610		while (dev != NULL) {
5611			current_task = dev->ev_sender;
5612			next = ISC_LIST_NEXT(dev, ev_link);
5613
5614			if ((task == NULL) || (task == current_task)) {
5615				dev->result = ISC_R_CANCELED;
5616				send_recvdone_event(sock, &dev);
5617			}
5618			dev = next;
5619		}
5620	}
5621
5622	if (((how & ISC_SOCKCANCEL_SEND) == ISC_SOCKCANCEL_SEND)
5623	    && !ISC_LIST_EMPTY(sock->send_list)) {
5624		isc_socketevent_t      *dev;
5625		isc_socketevent_t      *next;
5626		isc_task_t	       *current_task;
5627
5628		dev = ISC_LIST_HEAD(sock->send_list);
5629
5630		while (dev != NULL) {
5631			current_task = dev->ev_sender;
5632			next = ISC_LIST_NEXT(dev, ev_link);
5633
5634			if ((task == NULL) || (task == current_task)) {
5635				dev->result = ISC_R_CANCELED;
5636				send_senddone_event(sock, &dev);
5637			}
5638			dev = next;
5639		}
5640	}
5641
5642	if (((how & ISC_SOCKCANCEL_ACCEPT) == ISC_SOCKCANCEL_ACCEPT)
5643	    && !ISC_LIST_EMPTY(sock->accept_list)) {
5644		isc_socket_newconnev_t *dev;
5645		isc_socket_newconnev_t *next;
5646		isc_task_t	       *current_task;
5647
5648		dev = ISC_LIST_HEAD(sock->accept_list);
5649		while (dev != NULL) {
5650			current_task = dev->ev_sender;
5651			next = ISC_LIST_NEXT(dev, ev_link);
5652
5653			if ((task == NULL) || (task == current_task)) {
5654
5655				ISC_LIST_UNLINK(sock->accept_list, dev,
5656						ev_link);
5657
5658				NEWCONNSOCK(dev)->references--;
5659				free_socket((isc__socket_t **)(void *)&dev->newsocket);
5660
5661				dev->result = ISC_R_CANCELED;
5662				dev->ev_sender = sock;
5663				isc_task_sendanddetach(&current_task,
5664						       ISC_EVENT_PTR(&dev));
5665			}
5666
5667			dev = next;
5668		}
5669	}
5670
5671	/*
5672	 * Connecting is not a list.
5673	 */
5674	if (((how & ISC_SOCKCANCEL_CONNECT) == ISC_SOCKCANCEL_CONNECT)
5675	    && sock->connect_ev != NULL) {
5676		isc_socket_connev_t    *dev;
5677		isc_task_t	       *current_task;
5678
5679		INSIST(sock->connecting);
5680		sock->connecting = 0;
5681
5682		dev = sock->connect_ev;
5683		current_task = dev->ev_sender;
5684
5685		if ((task == NULL) || (task == current_task)) {
5686			sock->connect_ev = NULL;
5687
5688			dev->result = ISC_R_CANCELED;
5689			dev->ev_sender = sock;
5690			isc_task_sendanddetach(&current_task,
5691					       ISC_EVENT_PTR(&dev));
5692		}
5693	}
5694
5695	UNLOCK(&sock->lock);
5696}
5697
5698ISC_SOCKETFUNC_SCOPE isc_sockettype_t
5699isc__socket_gettype(isc_socket_t *sock0) {
5700	isc__socket_t *sock = (isc__socket_t *)sock0;
5701
5702	REQUIRE(VALID_SOCKET(sock));
5703
5704	return (sock->type);
5705}
5706
5707ISC_SOCKETFUNC_SCOPE isc_boolean_t
5708isc__socket_isbound(isc_socket_t *sock0) {
5709	isc__socket_t *sock = (isc__socket_t *)sock0;
5710	isc_boolean_t val;
5711
5712	REQUIRE(VALID_SOCKET(sock));
5713
5714	LOCK(&sock->lock);
5715	val = ((sock->bound) ? ISC_TRUE : ISC_FALSE);
5716	UNLOCK(&sock->lock);
5717
5718	return (val);
5719}
5720
5721ISC_SOCKETFUNC_SCOPE void
5722isc__socket_ipv6only(isc_socket_t *sock0, isc_boolean_t yes) {
5723	isc__socket_t *sock = (isc__socket_t *)sock0;
5724#if defined(IPV6_V6ONLY)
5725	int onoff = yes ? 1 : 0;
5726#else
5727	UNUSED(yes);
5728	UNUSED(sock);
5729#endif
5730
5731	REQUIRE(VALID_SOCKET(sock));
5732	INSIST(!sock->dupped);
5733
5734#ifdef IPV6_V6ONLY
5735	if (sock->pf == AF_INET6) {
5736		if (setsockopt(sock->fd, IPPROTO_IPV6, IPV6_V6ONLY,
5737			       (void *)&onoff, sizeof(int)) < 0) {
5738			char strbuf[ISC_STRERRORSIZE];
5739			isc__strerror(errno, strbuf, sizeof(strbuf));
5740			UNEXPECTED_ERROR(__FILE__, __LINE__,
5741					 "setsockopt(%d, IPV6_V6ONLY) "
5742					 "%s: %s", sock->fd,
5743					 isc_msgcat_get(isc_msgcat,
5744							ISC_MSGSET_GENERAL,
5745							ISC_MSG_FAILED,
5746							"failed"),
5747					 strbuf);
5748		}
5749	}
5750	FIX_IPV6_RECVPKTINFO(sock);	/* AIX */
5751#endif
5752}
5753
5754#ifndef USE_WATCHER_THREAD
5755/*
5756 * In our assumed scenario, we can simply use a single static object.
5757 * XXX: this is not true if the application uses multiple threads with
5758 *      'multi-context' mode.  Fixing this is a future TODO item.
5759 */
5760static isc_socketwait_t swait_private;
5761
5762int
5763isc__socketmgr_waitevents(isc_socketmgr_t *manager0, struct timeval *tvp,
5764			  isc_socketwait_t **swaitp)
5765{
5766	isc__socketmgr_t *manager = (isc__socketmgr_t *)manager0;
5767
5768
5769	int n;
5770#ifdef USE_KQUEUE
5771	struct timespec ts, *tsp;
5772#endif
5773#ifdef USE_EPOLL
5774	int timeout;
5775#endif
5776#ifdef USE_DEVPOLL
5777	struct dvpoll dvp;
5778#endif
5779
5780	REQUIRE(swaitp != NULL && *swaitp == NULL);
5781
5782#ifdef USE_SHARED_MANAGER
5783	if (manager == NULL)
5784		manager = socketmgr;
5785#endif
5786	if (manager == NULL)
5787		return (0);
5788
5789#ifdef USE_KQUEUE
5790	if (tvp != NULL) {
5791		ts.tv_sec = tvp->tv_sec;
5792		ts.tv_nsec = tvp->tv_usec * 1000;
5793		tsp = &ts;
5794	} else
5795		tsp = NULL;
5796	swait_private.nevents = kevent(manager->kqueue_fd, NULL, 0,
5797				       manager->events, manager->nevents,
5798				       tsp);
5799	n = swait_private.nevents;
5800#elif defined(USE_EPOLL)
5801	if (tvp != NULL)
5802		timeout = tvp->tv_sec * 1000 + (tvp->tv_usec + 999) / 1000;
5803	else
5804		timeout = -1;
5805	swait_private.nevents = epoll_wait(manager->epoll_fd,
5806					   manager->events,
5807					   manager->nevents, timeout);
5808	n = swait_private.nevents;
5809#elif defined(USE_DEVPOLL)
5810	dvp.dp_fds = manager->events;
5811	dvp.dp_nfds = manager->nevents;
5812	if (tvp != NULL) {
5813		dvp.dp_timeout = tvp->tv_sec * 1000 +
5814			(tvp->tv_usec + 999) / 1000;
5815	} else
5816		dvp.dp_timeout = -1;
5817	swait_private.nevents = ioctl(manager->devpoll_fd, DP_POLL, &dvp);
5818	n = swait_private.nevents;
5819#elif defined(USE_SELECT)
5820	memcpy(manager->read_fds_copy, manager->read_fds,  manager->fd_bufsize);
5821	memcpy(manager->write_fds_copy, manager->write_fds,
5822	       manager->fd_bufsize);
5823
5824	swait_private.readset = manager->read_fds_copy;
5825	swait_private.writeset = manager->write_fds_copy;
5826	swait_private.maxfd = manager->maxfd + 1;
5827
5828	n = select(swait_private.maxfd, swait_private.readset,
5829		   swait_private.writeset, NULL, tvp);
5830#endif
5831
5832	*swaitp = &swait_private;
5833	return (n);
5834}
5835
5836isc_result_t
5837isc__socketmgr_dispatch(isc_socketmgr_t *manager0, isc_socketwait_t *swait) {
5838	isc__socketmgr_t *manager = (isc__socketmgr_t *)manager0;
5839
5840	REQUIRE(swait == &swait_private);
5841
5842#ifdef USE_SHARED_MANAGER
5843	if (manager == NULL)
5844		manager = socketmgr;
5845#endif
5846	if (manager == NULL)
5847		return (ISC_R_NOTFOUND);
5848
5849#if defined(USE_KQUEUE) || defined(USE_EPOLL) || defined(USE_DEVPOLL)
5850	(void)process_fds(manager, manager->events, swait->nevents);
5851	return (ISC_R_SUCCESS);
5852#elif defined(USE_SELECT)
5853	process_fds(manager, swait->maxfd, swait->readset, swait->writeset);
5854	return (ISC_R_SUCCESS);
5855#endif
5856}
5857#endif /* USE_WATCHER_THREAD */
5858
5859#ifdef BIND9
5860void
5861isc__socket_setname(isc_socket_t *socket0, const char *name, void *tag) {
5862	isc__socket_t *socket = (isc__socket_t *)socket0;
5863
5864	/*
5865	 * Name 'socket'.
5866	 */
5867
5868	REQUIRE(VALID_SOCKET(socket));
5869
5870	LOCK(&socket->lock);
5871	memset(socket->name, 0, sizeof(socket->name));
5872	strncpy(socket->name, name, sizeof(socket->name) - 1);
5873	socket->tag = tag;
5874	UNLOCK(&socket->lock);
5875}
5876
5877ISC_SOCKETFUNC_SCOPE const char *
5878isc__socket_getname(isc_socket_t *socket0) {
5879	isc__socket_t *socket = (isc__socket_t *)socket0;
5880
5881	return (socket->name);
5882}
5883
5884void *
5885isc__socket_gettag(isc_socket_t *socket0) {
5886	isc__socket_t *socket = (isc__socket_t *)socket0;
5887
5888	return (socket->tag);
5889}
5890#endif	/* BIND9 */
5891
5892#ifdef USE_SOCKETIMPREGISTER
5893isc_result_t
5894isc__socket_register() {
5895	return (isc_socket_register(isc__socketmgr_create));
5896}
5897#endif
5898
5899ISC_SOCKETFUNC_SCOPE int
5900isc__socket_getfd(isc_socket_t *socket0) {
5901	isc__socket_t *socket = (isc__socket_t *)socket0;
5902
5903	return ((short) socket->fd);
5904}
5905
5906#if defined(HAVE_LIBXML2) && defined(BIND9)
5907
5908static const char *
5909_socktype(isc_sockettype_t type)
5910{
5911	if (type == isc_sockettype_udp)
5912		return ("udp");
5913	else if (type == isc_sockettype_tcp)
5914		return ("tcp");
5915	else if (type == isc_sockettype_unix)
5916		return ("unix");
5917	else if (type == isc_sockettype_fdwatch)
5918		return ("fdwatch");
5919	else
5920		return ("not-initialized");
5921}
5922
5923ISC_SOCKETFUNC_SCOPE void
5924isc_socketmgr_renderxml(isc_socketmgr_t *mgr0, xmlTextWriterPtr writer) {
5925	isc__socketmgr_t *mgr = (isc__socketmgr_t *)mgr0;
5926	isc__socket_t *sock;
5927	char peerbuf[ISC_SOCKADDR_FORMATSIZE];
5928	isc_sockaddr_t addr;
5929	ISC_SOCKADDR_LEN_T len;
5930
5931	LOCK(&mgr->lock);
5932
5933#ifdef USE_SHARED_MANAGER
5934	xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
5935	xmlTextWriterWriteFormatString(writer, "%d", mgr->refs);
5936	xmlTextWriterEndElement(writer);
5937#endif	/* USE_SHARED_MANAGER */
5938
5939	xmlTextWriterStartElement(writer, ISC_XMLCHAR "sockets");
5940	sock = ISC_LIST_HEAD(mgr->socklist);
5941	while (sock != NULL) {
5942		LOCK(&sock->lock);
5943		xmlTextWriterStartElement(writer, ISC_XMLCHAR "socket");
5944
5945		xmlTextWriterStartElement(writer, ISC_XMLCHAR "id");
5946		xmlTextWriterWriteFormatString(writer, "%p", sock);
5947		xmlTextWriterEndElement(writer);
5948
5949		if (sock->name[0] != 0) {
5950			xmlTextWriterStartElement(writer, ISC_XMLCHAR "name");
5951			xmlTextWriterWriteFormatString(writer, "%s",
5952						       sock->name);
5953			xmlTextWriterEndElement(writer); /* name */
5954		}
5955
5956		xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
5957		xmlTextWriterWriteFormatString(writer, "%d", sock->references);
5958		xmlTextWriterEndElement(writer);
5959
5960		xmlTextWriterWriteElement(writer, ISC_XMLCHAR "type",
5961					  ISC_XMLCHAR _socktype(sock->type));
5962
5963		if (sock->connected) {
5964			isc_sockaddr_format(&sock->peer_address, peerbuf,
5965					    sizeof(peerbuf));
5966			xmlTextWriterWriteElement(writer,
5967						  ISC_XMLCHAR "peer-address",
5968						  ISC_XMLCHAR peerbuf);
5969		}
5970
5971		len = sizeof(addr);
5972		if (getsockname(sock->fd, &addr.type.sa, (void *)&len) == 0) {
5973			isc_sockaddr_format(&addr, peerbuf, sizeof(peerbuf));
5974			xmlTextWriterWriteElement(writer,
5975						  ISC_XMLCHAR "local-address",
5976						  ISC_XMLCHAR peerbuf);
5977		}
5978
5979		xmlTextWriterStartElement(writer, ISC_XMLCHAR "states");
5980		if (sock->pending_recv)
5981			xmlTextWriterWriteElement(writer, ISC_XMLCHAR "state",
5982						ISC_XMLCHAR "pending-receive");
5983		if (sock->pending_send)
5984			xmlTextWriterWriteElement(writer, ISC_XMLCHAR "state",
5985						  ISC_XMLCHAR "pending-send");
5986		if (sock->pending_accept)
5987			xmlTextWriterWriteElement(writer, ISC_XMLCHAR "state",
5988						 ISC_XMLCHAR "pending_accept");
5989		if (sock->listener)
5990			xmlTextWriterWriteElement(writer, ISC_XMLCHAR "state",
5991						  ISC_XMLCHAR "listener");
5992		if (sock->connected)
5993			xmlTextWriterWriteElement(writer, ISC_XMLCHAR "state",
5994						  ISC_XMLCHAR "connected");
5995		if (sock->connecting)
5996			xmlTextWriterWriteElement(writer, ISC_XMLCHAR "state",
5997						  ISC_XMLCHAR "connecting");
5998		if (sock->bound)
5999			xmlTextWriterWriteElement(writer, ISC_XMLCHAR "state",
6000						  ISC_XMLCHAR "bound");
6001
6002		xmlTextWriterEndElement(writer); /* states */
6003
6004		xmlTextWriterEndElement(writer); /* socket */
6005
6006		UNLOCK(&sock->lock);
6007		sock = ISC_LIST_NEXT(sock, link);
6008	}
6009	xmlTextWriterEndElement(writer); /* sockets */
6010
6011	UNLOCK(&mgr->lock);
6012}
6013#endif /* HAVE_LIBXML2 */
6014