1/*
2 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include "event2/event-config.h"
29
30#ifdef WIN32
31#include <winsock2.h>
32#include <windows.h>
33#include <io.h>
34#endif
35
36#ifdef _EVENT_HAVE_VASPRINTF
37/* If we have vasprintf, we need to define this before we include stdio.h. */
38#define _GNU_SOURCE
39#endif
40
41#include <sys/types.h>
42
43#ifdef _EVENT_HAVE_SYS_TIME_H
44#include <sys/time.h>
45#endif
46
47#ifdef _EVENT_HAVE_SYS_SOCKET_H
48#include <sys/socket.h>
49#endif
50
51#ifdef _EVENT_HAVE_SYS_UIO_H
52#include <sys/uio.h>
53#endif
54
55#ifdef _EVENT_HAVE_SYS_IOCTL_H
56#include <sys/ioctl.h>
57#endif
58
59#ifdef _EVENT_HAVE_SYS_MMAN_H
60#include <sys/mman.h>
61#endif
62
63#ifdef _EVENT_HAVE_SYS_SENDFILE_H
64#include <sys/sendfile.h>
65#endif
66
67#include <errno.h>
68#include <stdio.h>
69#include <stdlib.h>
70#include <string.h>
71#ifdef _EVENT_HAVE_STDARG_H
72#include <stdarg.h>
73#endif
74#ifdef _EVENT_HAVE_UNISTD_H
75#include <unistd.h>
76#endif
77#include <limits.h>
78
79#include "event2/event.h"
80#include "event2/buffer.h"
81#include "event2/buffer_compat.h"
82#include "event2/bufferevent.h"
83#include "event2/bufferevent_compat.h"
84#include "event2/bufferevent_struct.h"
85#include "event2/thread.h"
86#include "event2/event-config.h"
87#include "log-internal.h"
88#include "mm-internal.h"
89#include "util-internal.h"
90#include "evthread-internal.h"
91#include "evbuffer-internal.h"
92#include "bufferevent-internal.h"
93
94/* some systems do not have MAP_FAILED */
95#ifndef MAP_FAILED
96#define MAP_FAILED	((void *)-1)
97#endif
98
99/* send file support */
100#if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
101#define USE_SENDFILE		1
102#define SENDFILE_IS_LINUX	1
103#elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
104#define USE_SENDFILE		1
105#define SENDFILE_IS_FREEBSD	1
106#elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
107#define USE_SENDFILE		1
108#define SENDFILE_IS_MACOSX	1
109#elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
110#define USE_SENDFILE		1
111#define SENDFILE_IS_SOLARIS	1
112#endif
113
114#ifdef USE_SENDFILE
115static int use_sendfile = 1;
116#endif
117#ifdef _EVENT_HAVE_MMAP
118static int use_mmap = 1;
119#endif
120
121
122/* Mask of user-selectable callback flags. */
123#define EVBUFFER_CB_USER_FLAGS	    0xffff
124/* Mask of all internal-use-only flags. */
125#define EVBUFFER_CB_INTERNAL_FLAGS  0xffff0000
126
127/* Flag set if the callback is using the cb_obsolete function pointer  */
128#define EVBUFFER_CB_OBSOLETE	       0x00040000
129
130/* evbuffer_chain support */
131#define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
132#define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
133	    0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
134
135#define CHAIN_PINNED(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
136#define CHAIN_PINNED_R(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
137
138static void evbuffer_chain_align(struct evbuffer_chain *chain);
139static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
140    size_t datalen);
141static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg);
142static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
143    const struct evbuffer_ptr *pos, const char *mem, size_t len);
144static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
145    size_t datlen);
146
147#ifdef WIN32
148static int evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd,
149    ev_ssize_t howmuch);
150#else
151#define evbuffer_readfile evbuffer_read
152#endif
153
154static struct evbuffer_chain *
155evbuffer_chain_new(size_t size)
156{
157	struct evbuffer_chain *chain;
158	size_t to_alloc;
159
160	size += EVBUFFER_CHAIN_SIZE;
161
162	/* get the next largest memory that can hold the buffer */
163	to_alloc = MIN_BUFFER_SIZE;
164	while (to_alloc < size)
165		to_alloc <<= 1;
166
167	/* we get everything in one chunk */
168	if ((chain = mm_malloc(to_alloc)) == NULL)
169		return (NULL);
170
171	memset(chain, 0, EVBUFFER_CHAIN_SIZE);
172
173	chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
174
175	/* this way we can manipulate the buffer to different addresses,
176	 * which is required for mmap for example.
177	 */
178	chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
179
180	return (chain);
181}
182
183static inline void
184evbuffer_chain_free(struct evbuffer_chain *chain)
185{
186	if (CHAIN_PINNED(chain)) {
187		chain->flags |= EVBUFFER_DANGLING;
188		return;
189	}
190	if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE|
191		EVBUFFER_REFERENCE)) {
192		if (chain->flags & EVBUFFER_REFERENCE) {
193			struct evbuffer_chain_reference *info =
194			    EVBUFFER_CHAIN_EXTRA(
195				    struct evbuffer_chain_reference,
196				    chain);
197			if (info->cleanupfn)
198				(*info->cleanupfn)(chain->buffer,
199				    chain->buffer_len,
200				    info->extra);
201		}
202#ifdef _EVENT_HAVE_MMAP
203		if (chain->flags & EVBUFFER_MMAP) {
204			struct evbuffer_chain_fd *info =
205			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
206				chain);
207			if (munmap(chain->buffer, chain->buffer_len) == -1)
208				event_warn("%s: munmap failed", __func__);
209			if (close(info->fd) == -1)
210				event_warn("%s: close(%d) failed",
211				    __func__, info->fd);
212		}
213#endif
214#ifdef USE_SENDFILE
215		if (chain->flags & EVBUFFER_SENDFILE) {
216			struct evbuffer_chain_fd *info =
217			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
218				chain);
219			if (close(info->fd) == -1)
220				event_warn("%s: close(%d) failed",
221				    __func__, info->fd);
222		}
223#endif
224	}
225
226	mm_free(chain);
227}
228
229static void
230evbuffer_free_all_chains(struct evbuffer_chain *chain)
231{
232	struct evbuffer_chain *next;
233	for (; chain; chain = next) {
234		next = chain->next;
235		evbuffer_chain_free(chain);
236	}
237}
238
239#ifndef NDEBUG
240static int
241evbuffer_chains_all_empty(struct evbuffer_chain *chain)
242{
243	for (; chain; chain = chain->next) {
244		if (chain->off)
245			return 0;
246	}
247	return 1;
248}
249#else
250/* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
251"unused variable" warnings. */
252static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
253	return 1;
254}
255#endif
256
257/* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
258 * to replacing them all with a new chain.  Return a pointer to the place
259 * where the new chain will go.
260 *
261 * Internal; requires lock.  The caller must fix up buf->last and buf->first
262 * as needed; they might have been freed.
263 */
264static struct evbuffer_chain **
265evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
266{
267	struct evbuffer_chain **ch = buf->last_with_datap;
268	/* Find the first victim chain.  It might be *last_with_datap */
269	while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
270		ch = &(*ch)->next;
271	if (*ch) {
272		EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
273		evbuffer_free_all_chains(*ch);
274		*ch = NULL;
275	}
276	return ch;
277}
278
279/* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
280 * chains as necessary.  Requires lock.  Does not schedule callbacks.
281 */
282static void
283evbuffer_chain_insert(struct evbuffer *buf,
284    struct evbuffer_chain *chain)
285{
286	ASSERT_EVBUFFER_LOCKED(buf);
287	if (*buf->last_with_datap == NULL) {
288		/* There are no chains data on the buffer at all. */
289		EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
290		EVUTIL_ASSERT(buf->first == NULL);
291		buf->first = buf->last = chain;
292	} else {
293		struct evbuffer_chain **ch = buf->last_with_datap;
294		/* Find the first victim chain.  It might be *last_with_datap */
295		while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
296			ch = &(*ch)->next;
297		if (*ch == NULL) {
298			/* There is no victim; just append this new chain. */
299			buf->last->next = chain;
300			if (chain->off)
301				buf->last_with_datap = &buf->last->next;
302		} else {
303			/* Replace all victim chains with this chain. */
304			EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
305			evbuffer_free_all_chains(*ch);
306			*ch = chain;
307		}
308		buf->last = chain;
309	}
310	buf->total_len += chain->off;
311}
312
313static inline struct evbuffer_chain *
314evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
315{
316	struct evbuffer_chain *chain;
317	if ((chain = evbuffer_chain_new(datlen)) == NULL)
318		return NULL;
319	evbuffer_chain_insert(buf, chain);
320	return chain;
321}
322
323void
324_evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
325{
326	EVUTIL_ASSERT((chain->flags & flag) == 0);
327	chain->flags |= flag;
328}
329
330void
331_evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
332{
333	EVUTIL_ASSERT((chain->flags & flag) != 0);
334	chain->flags &= ~flag;
335	if (chain->flags & EVBUFFER_DANGLING)
336		evbuffer_chain_free(chain);
337}
338
339struct evbuffer *
340evbuffer_new(void)
341{
342	struct evbuffer *buffer;
343
344	buffer = mm_calloc(1, sizeof(struct evbuffer));
345	if (buffer == NULL)
346		return (NULL);
347
348	TAILQ_INIT(&buffer->callbacks);
349	buffer->refcnt = 1;
350	buffer->last_with_datap = &buffer->first;
351
352	return (buffer);
353}
354
355int
356evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
357{
358	EVBUFFER_LOCK(buf);
359	buf->flags |= (ev_uint32_t)flags;
360	EVBUFFER_UNLOCK(buf);
361	return 0;
362}
363
364int
365evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
366{
367	EVBUFFER_LOCK(buf);
368	buf->flags &= ~(ev_uint32_t)flags;
369	EVBUFFER_UNLOCK(buf);
370	return 0;
371}
372
373void
374_evbuffer_incref(struct evbuffer *buf)
375{
376	EVBUFFER_LOCK(buf);
377	++buf->refcnt;
378	EVBUFFER_UNLOCK(buf);
379}
380
381void
382_evbuffer_incref_and_lock(struct evbuffer *buf)
383{
384	EVBUFFER_LOCK(buf);
385	++buf->refcnt;
386}
387
388int
389evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
390{
391	EVBUFFER_LOCK(buffer);
392	buffer->cb_queue = event_base_get_deferred_cb_queue(base);
393	buffer->deferred_cbs = 1;
394	event_deferred_cb_init(&buffer->deferred,
395	    evbuffer_deferred_callback, buffer);
396	EVBUFFER_UNLOCK(buffer);
397	return 0;
398}
399
400int
401evbuffer_enable_locking(struct evbuffer *buf, void *lock)
402{
403#ifdef _EVENT_DISABLE_THREAD_SUPPORT
404	return -1;
405#else
406	if (buf->lock)
407		return -1;
408
409	if (!lock) {
410		EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
411		if (!lock)
412			return -1;
413		buf->lock = lock;
414		buf->own_lock = 1;
415	} else {
416		buf->lock = lock;
417		buf->own_lock = 0;
418	}
419
420	return 0;
421#endif
422}
423
424void
425evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev)
426{
427	EVBUFFER_LOCK(buf);
428	buf->parent = bev;
429	EVBUFFER_UNLOCK(buf);
430}
431
432static void
433evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
434{
435	struct evbuffer_cb_entry *cbent, *next;
436	struct evbuffer_cb_info info;
437	size_t new_size;
438	ev_uint32_t mask, masked_val;
439	int clear = 1;
440
441	if (running_deferred) {
442		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
443		masked_val = EVBUFFER_CB_ENABLED;
444	} else if (buffer->deferred_cbs) {
445		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
446		masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
447		/* Don't zero-out n_add/n_del, since the deferred callbacks
448		   will want to see them. */
449		clear = 0;
450	} else {
451		mask = EVBUFFER_CB_ENABLED;
452		masked_val = EVBUFFER_CB_ENABLED;
453	}
454
455	ASSERT_EVBUFFER_LOCKED(buffer);
456
457	if (TAILQ_EMPTY(&buffer->callbacks)) {
458		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
459		return;
460	}
461	if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
462		return;
463
464	new_size = buffer->total_len;
465	info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
466	info.n_added = buffer->n_add_for_cb;
467	info.n_deleted = buffer->n_del_for_cb;
468	if (clear) {
469		buffer->n_add_for_cb = 0;
470		buffer->n_del_for_cb = 0;
471	}
472	for (cbent = TAILQ_FIRST(&buffer->callbacks);
473	     cbent != TAILQ_END(&buffer->callbacks);
474	     cbent = next) {
475		/* Get the 'next' pointer now in case this callback decides
476		 * to remove itself or something. */
477		next = TAILQ_NEXT(cbent, next);
478
479		if ((cbent->flags & mask) != masked_val)
480			continue;
481
482		if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
483			cbent->cb.cb_obsolete(buffer,
484			    info.orig_size, new_size, cbent->cbarg);
485		else
486			cbent->cb.cb_func(buffer, &info, cbent->cbarg);
487	}
488}
489
490void
491evbuffer_invoke_callbacks(struct evbuffer *buffer)
492{
493	if (TAILQ_EMPTY(&buffer->callbacks)) {
494		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
495		return;
496	}
497
498	if (buffer->deferred_cbs) {
499		if (buffer->deferred.queued)
500			return;
501		_evbuffer_incref_and_lock(buffer);
502		if (buffer->parent)
503			bufferevent_incref(buffer->parent);
504		EVBUFFER_UNLOCK(buffer);
505		event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred);
506	}
507
508	evbuffer_run_callbacks(buffer, 0);
509}
510
511static void
512evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
513{
514	struct bufferevent *parent = NULL;
515	struct evbuffer *buffer = arg;
516
517	/* XXXX It would be better to run these callbacks without holding the
518	 * lock */
519	EVBUFFER_LOCK(buffer);
520	parent = buffer->parent;
521	evbuffer_run_callbacks(buffer, 1);
522	_evbuffer_decref_and_unlock(buffer);
523	if (parent)
524		bufferevent_decref(parent);
525}
526
527static void
528evbuffer_remove_all_callbacks(struct evbuffer *buffer)
529{
530	struct evbuffer_cb_entry *cbent;
531
532	while ((cbent = TAILQ_FIRST(&buffer->callbacks))) {
533	    TAILQ_REMOVE(&buffer->callbacks, cbent, next);
534	    mm_free(cbent);
535	}
536}
537
538void
539_evbuffer_decref_and_unlock(struct evbuffer *buffer)
540{
541	struct evbuffer_chain *chain, *next;
542	ASSERT_EVBUFFER_LOCKED(buffer);
543
544	EVUTIL_ASSERT(buffer->refcnt > 0);
545
546	if (--buffer->refcnt > 0) {
547		EVBUFFER_UNLOCK(buffer);
548		return;
549	}
550
551	for (chain = buffer->first; chain != NULL; chain = next) {
552		next = chain->next;
553		evbuffer_chain_free(chain);
554	}
555	evbuffer_remove_all_callbacks(buffer);
556	if (buffer->deferred_cbs)
557		event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred);
558
559	EVBUFFER_UNLOCK(buffer);
560	if (buffer->own_lock)
561		EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
562	mm_free(buffer);
563}
564
565void
566evbuffer_free(struct evbuffer *buffer)
567{
568	EVBUFFER_LOCK(buffer);
569	_evbuffer_decref_and_unlock(buffer);
570}
571
572void
573evbuffer_lock(struct evbuffer *buf)
574{
575	EVBUFFER_LOCK(buf);
576}
577
578void
579evbuffer_unlock(struct evbuffer *buf)
580{
581	EVBUFFER_UNLOCK(buf);
582}
583
584size_t
585evbuffer_get_length(const struct evbuffer *buffer)
586{
587	size_t result;
588
589	EVBUFFER_LOCK(buffer);
590
591	result = (buffer->total_len);
592
593	EVBUFFER_UNLOCK(buffer);
594
595	return result;
596}
597
598size_t
599evbuffer_get_contiguous_space(const struct evbuffer *buf)
600{
601	struct evbuffer_chain *chain;
602	size_t result;
603
604	EVBUFFER_LOCK(buf);
605	chain = buf->first;
606	result = (chain != NULL ? chain->off : 0);
607	EVBUFFER_UNLOCK(buf);
608
609	return result;
610}
611
612int
613evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
614    struct evbuffer_iovec *vec, int n_vecs)
615{
616	struct evbuffer_chain *chain, **chainp;
617	int n = -1;
618
619	EVBUFFER_LOCK(buf);
620	if (buf->freeze_end)
621		goto done;
622	if (n_vecs < 1)
623		goto done;
624	if (n_vecs == 1) {
625		if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
626			goto done;
627
628		vec[0].iov_base = CHAIN_SPACE_PTR(chain);
629		vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
630		EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
631		n = 1;
632	} else {
633		if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
634			goto done;
635		n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
636				&chainp, 0);
637	}
638
639done:
640	EVBUFFER_UNLOCK(buf);
641	return n;
642
643}
644
645static int
646advance_last_with_data(struct evbuffer *buf)
647{
648	int n = 0;
649	ASSERT_EVBUFFER_LOCKED(buf);
650
651	if (!*buf->last_with_datap)
652		return 0;
653
654	while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
655		buf->last_with_datap = &(*buf->last_with_datap)->next;
656		++n;
657	}
658	return n;
659}
660
661int
662evbuffer_commit_space(struct evbuffer *buf,
663    struct evbuffer_iovec *vec, int n_vecs)
664{
665	struct evbuffer_chain *chain, **firstchainp, **chainp;
666	int result = -1;
667	size_t added = 0;
668	int i;
669
670	EVBUFFER_LOCK(buf);
671
672	if (buf->freeze_end)
673		goto done;
674	if (n_vecs == 0) {
675		result = 0;
676		goto done;
677	} else if (n_vecs == 1 &&
678	    (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
679		/* The user only got or used one chain; it might not
680		 * be the first one with space in it. */
681		if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
682			goto done;
683		buf->last->off += vec[0].iov_len;
684		added = vec[0].iov_len;
685		if (added)
686			advance_last_with_data(buf);
687		goto okay;
688	}
689
690	/* Advance 'firstchain' to the first chain with space in it. */
691	firstchainp = buf->last_with_datap;
692	if (!*firstchainp)
693		goto done;
694	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
695		firstchainp = &(*firstchainp)->next;
696	}
697
698	chain = *firstchainp;
699	/* pass 1: make sure that the pointers and lengths of vecs[] are in
700	 * bounds before we try to commit anything. */
701	for (i=0; i<n_vecs; ++i) {
702		if (!chain)
703			goto done;
704		if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
705		    (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
706			goto done;
707		chain = chain->next;
708	}
709	/* pass 2: actually adjust all the chains. */
710	chainp = firstchainp;
711	for (i=0; i<n_vecs; ++i) {
712		(*chainp)->off += vec[i].iov_len;
713		added += vec[i].iov_len;
714		if (vec[i].iov_len) {
715			buf->last_with_datap = chainp;
716		}
717		chainp = &(*chainp)->next;
718	}
719
720okay:
721	buf->total_len += added;
722	buf->n_add_for_cb += added;
723	result = 0;
724	evbuffer_invoke_callbacks(buf);
725
726done:
727	EVBUFFER_UNLOCK(buf);
728	return result;
729}
730
731static inline int
732HAS_PINNED_R(struct evbuffer *buf)
733{
734	return (buf->last && CHAIN_PINNED_R(buf->last));
735}
736
737static inline void
738ZERO_CHAIN(struct evbuffer *dst)
739{
740	ASSERT_EVBUFFER_LOCKED(dst);
741	dst->first = NULL;
742	dst->last = NULL;
743	dst->last_with_datap = &(dst)->first;
744	dst->total_len = 0;
745}
746
747/* Prepares the contents of src to be moved to another buffer by removing
748 * read-pinned chains. The first pinned chain is saved in first, and the
749 * last in last. If src has no read-pinned chains, first and last are set
750 * to NULL. */
751static int
752PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
753		struct evbuffer_chain **last)
754{
755	struct evbuffer_chain *chain, **pinned;
756
757	ASSERT_EVBUFFER_LOCKED(src);
758
759	if (!HAS_PINNED_R(src)) {
760		*first = *last = NULL;
761		return 0;
762	}
763
764	pinned = src->last_with_datap;
765	if (!CHAIN_PINNED_R(*pinned))
766		pinned = &(*pinned)->next;
767	EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
768	chain = *first = *pinned;
769	*last = src->last;
770
771	/* If there's data in the first pinned chain, we need to allocate
772	 * a new chain and copy the data over. */
773	if (chain->off) {
774		struct evbuffer_chain *tmp;
775
776		EVUTIL_ASSERT(pinned == src->last_with_datap);
777		tmp = evbuffer_chain_new(chain->off);
778		if (!tmp)
779			return -1;
780		memcpy(tmp->buffer, chain->buffer + chain->misalign,
781			chain->off);
782		tmp->off = chain->off;
783		*src->last_with_datap = tmp;
784		src->last = tmp;
785		chain->misalign += chain->off;
786		chain->off = 0;
787	} else {
788		src->last = *src->last_with_datap;
789		*pinned = NULL;
790	}
791
792	return 0;
793}
794
795static inline void
796RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
797		struct evbuffer_chain *last)
798{
799	ASSERT_EVBUFFER_LOCKED(src);
800
801	if (!pinned) {
802		ZERO_CHAIN(src);
803		return;
804	}
805
806	src->first = pinned;
807	src->last = last;
808	src->last_with_datap = &src->first;
809	src->total_len = 0;
810}
811
812static inline void
813COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
814{
815	ASSERT_EVBUFFER_LOCKED(dst);
816	ASSERT_EVBUFFER_LOCKED(src);
817	dst->first = src->first;
818	if (src->last_with_datap == &src->first)
819		dst->last_with_datap = &dst->first;
820	else
821		dst->last_with_datap = src->last_with_datap;
822	dst->last = src->last;
823	dst->total_len = src->total_len;
824}
825
826static void
827APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
828{
829	ASSERT_EVBUFFER_LOCKED(dst);
830	ASSERT_EVBUFFER_LOCKED(src);
831	dst->last->next = src->first;
832	if (src->last_with_datap == &src->first)
833		dst->last_with_datap = &dst->last->next;
834	else
835		dst->last_with_datap = src->last_with_datap;
836	dst->last = src->last;
837	dst->total_len += src->total_len;
838}
839
840static void
841PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
842{
843	ASSERT_EVBUFFER_LOCKED(dst);
844	ASSERT_EVBUFFER_LOCKED(src);
845	src->last->next = dst->first;
846	dst->first = src->first;
847	dst->total_len += src->total_len;
848	if (*dst->last_with_datap == NULL) {
849		if (src->last_with_datap == &(src)->first)
850			dst->last_with_datap = &dst->first;
851		else
852			dst->last_with_datap = src->last_with_datap;
853	} else if (dst->last_with_datap == &dst->first) {
854		dst->last_with_datap = &src->last->next;
855	}
856}
857
858int
859evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
860{
861	struct evbuffer_chain *pinned, *last;
862	size_t in_total_len, out_total_len;
863	int result = 0;
864
865	EVBUFFER_LOCK2(inbuf, outbuf);
866	in_total_len = inbuf->total_len;
867	out_total_len = outbuf->total_len;
868
869	if (in_total_len == 0 || outbuf == inbuf)
870		goto done;
871
872	if (outbuf->freeze_end || inbuf->freeze_start) {
873		result = -1;
874		goto done;
875	}
876
877	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
878		result = -1;
879		goto done;
880	}
881
882	if (out_total_len == 0) {
883		/* There might be an empty chain at the start of outbuf; free
884		 * it. */
885		evbuffer_free_all_chains(outbuf->first);
886		COPY_CHAIN(outbuf, inbuf);
887	} else {
888		APPEND_CHAIN(outbuf, inbuf);
889	}
890
891	RESTORE_PINNED(inbuf, pinned, last);
892
893	inbuf->n_del_for_cb += in_total_len;
894	outbuf->n_add_for_cb += in_total_len;
895
896	evbuffer_invoke_callbacks(inbuf);
897	evbuffer_invoke_callbacks(outbuf);
898
899done:
900	EVBUFFER_UNLOCK2(inbuf, outbuf);
901	return result;
902}
903
904int
905evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
906{
907	struct evbuffer_chain *pinned, *last;
908	size_t in_total_len, out_total_len;
909	int result = 0;
910
911	EVBUFFER_LOCK2(inbuf, outbuf);
912
913	in_total_len = inbuf->total_len;
914	out_total_len = outbuf->total_len;
915
916	if (!in_total_len || inbuf == outbuf)
917		goto done;
918
919	if (outbuf->freeze_start || inbuf->freeze_start) {
920		result = -1;
921		goto done;
922	}
923
924	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
925		result = -1;
926		goto done;
927	}
928
929	if (out_total_len == 0) {
930		/* There might be an empty chain at the start of outbuf; free
931		 * it. */
932		evbuffer_free_all_chains(outbuf->first);
933		COPY_CHAIN(outbuf, inbuf);
934	} else {
935		PREPEND_CHAIN(outbuf, inbuf);
936	}
937
938	RESTORE_PINNED(inbuf, pinned, last);
939
940	inbuf->n_del_for_cb += in_total_len;
941	outbuf->n_add_for_cb += in_total_len;
942
943	evbuffer_invoke_callbacks(inbuf);
944	evbuffer_invoke_callbacks(outbuf);
945done:
946	EVBUFFER_UNLOCK2(inbuf, outbuf);
947	return result;
948}
949
950int
951evbuffer_drain(struct evbuffer *buf, size_t len)
952{
953	struct evbuffer_chain *chain, *next;
954	size_t remaining, old_len;
955	int result = 0;
956
957	EVBUFFER_LOCK(buf);
958	old_len = buf->total_len;
959
960	if (old_len == 0)
961		goto done;
962
963	if (buf->freeze_start) {
964		result = -1;
965		goto done;
966	}
967
968	if (len >= old_len && !HAS_PINNED_R(buf)) {
969		len = old_len;
970		for (chain = buf->first; chain != NULL; chain = next) {
971			next = chain->next;
972			evbuffer_chain_free(chain);
973		}
974
975		ZERO_CHAIN(buf);
976	} else {
977		if (len >= old_len)
978			len = old_len;
979
980		buf->total_len -= len;
981		remaining = len;
982		for (chain = buf->first;
983		     remaining >= chain->off;
984		     chain = next) {
985			next = chain->next;
986			remaining -= chain->off;
987
988			if (chain == *buf->last_with_datap) {
989				buf->last_with_datap = &buf->first;
990			}
991			if (&chain->next == buf->last_with_datap)
992				buf->last_with_datap = &buf->first;
993
994			if (CHAIN_PINNED_R(chain)) {
995				EVUTIL_ASSERT(remaining == 0);
996				chain->misalign += chain->off;
997				chain->off = 0;
998				break;
999			} else
1000				evbuffer_chain_free(chain);
1001		}
1002
1003		buf->first = chain;
1004		if (chain) {
1005			chain->misalign += remaining;
1006			chain->off -= remaining;
1007		}
1008	}
1009
1010	buf->n_del_for_cb += len;
1011	/* Tell someone about changes in this buffer */
1012	evbuffer_invoke_callbacks(buf);
1013
1014done:
1015	EVBUFFER_UNLOCK(buf);
1016	return result;
1017}
1018
1019/* Reads data from an event buffer and drains the bytes read */
1020int
1021evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
1022{
1023	ev_ssize_t n;
1024	EVBUFFER_LOCK(buf);
1025	n = evbuffer_copyout(buf, data_out, datlen);
1026	if (n > 0) {
1027		if (evbuffer_drain(buf, n)<0)
1028			n = -1;
1029	}
1030	EVBUFFER_UNLOCK(buf);
1031	return (int)n;
1032}
1033
1034ev_ssize_t
1035evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
1036{
1037	/*XXX fails badly on sendfile case. */
1038	struct evbuffer_chain *chain;
1039	char *data = data_out;
1040	size_t nread;
1041	ev_ssize_t result = 0;
1042
1043	EVBUFFER_LOCK(buf);
1044
1045	chain = buf->first;
1046
1047	if (datlen >= buf->total_len)
1048		datlen = buf->total_len;
1049
1050	if (datlen == 0)
1051		goto done;
1052
1053	if (buf->freeze_start) {
1054		result = -1;
1055		goto done;
1056	}
1057
1058	nread = datlen;
1059
1060	while (datlen && datlen >= chain->off) {
1061		memcpy(data, chain->buffer + chain->misalign, chain->off);
1062		data += chain->off;
1063		datlen -= chain->off;
1064
1065		chain = chain->next;
1066		EVUTIL_ASSERT(chain || datlen==0);
1067	}
1068
1069	if (datlen) {
1070		EVUTIL_ASSERT(chain);
1071		memcpy(data, chain->buffer + chain->misalign, datlen);
1072	}
1073
1074	result = nread;
1075done:
1076	EVBUFFER_UNLOCK(buf);
1077	return result;
1078}
1079
1080/* reads data from the src buffer to the dst buffer, avoids memcpy as
1081 * possible. */
1082/*  XXXX should return ev_ssize_t */
1083int
1084evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
1085    size_t datlen)
1086{
1087	/*XXX We should have an option to force this to be zero-copy.*/
1088
1089	/*XXX can fail badly on sendfile case. */
1090	struct evbuffer_chain *chain, *previous;
1091	size_t nread = 0;
1092	int result;
1093
1094	EVBUFFER_LOCK2(src, dst);
1095
1096	chain = previous = src->first;
1097
1098	if (datlen == 0 || dst == src) {
1099		result = 0;
1100		goto done;
1101	}
1102
1103	if (dst->freeze_end || src->freeze_start) {
1104		result = -1;
1105		goto done;
1106	}
1107
1108	/* short-cut if there is no more data buffered */
1109	if (datlen >= src->total_len) {
1110		datlen = src->total_len;
1111		evbuffer_add_buffer(dst, src);
1112		result = (int)datlen; /*XXXX should return ev_ssize_t*/
1113		goto done;
1114	}
1115
1116	/* removes chains if possible */
1117	while (chain->off <= datlen) {
1118		/* We can't remove the last with data from src unless we
1119		 * remove all chains, in which case we would have done the if
1120		 * block above */
1121		EVUTIL_ASSERT(chain != *src->last_with_datap);
1122		nread += chain->off;
1123		datlen -= chain->off;
1124		previous = chain;
1125		if (src->last_with_datap == &chain->next)
1126			src->last_with_datap = &src->first;
1127		chain = chain->next;
1128	}
1129
1130	if (nread) {
1131		/* we can remove the chain */
1132		struct evbuffer_chain **chp;
1133		chp = evbuffer_free_trailing_empty_chains(dst);
1134
1135		if (dst->first == NULL) {
1136			dst->first = src->first;
1137		} else {
1138			*chp = src->first;
1139		}
1140		dst->last = previous;
1141		previous->next = NULL;
1142		src->first = chain;
1143		advance_last_with_data(dst);
1144
1145		dst->total_len += nread;
1146		dst->n_add_for_cb += nread;
1147	}
1148
1149	/* we know that there is more data in the src buffer than
1150	 * we want to read, so we manually drain the chain */
1151	evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
1152	chain->misalign += datlen;
1153	chain->off -= datlen;
1154	nread += datlen;
1155
1156	/* You might think we would want to increment dst->n_add_for_cb
1157	 * here too.  But evbuffer_add above already took care of that.
1158	 */
1159	src->total_len -= nread;
1160	src->n_del_for_cb += nread;
1161
1162	if (nread) {
1163		evbuffer_invoke_callbacks(dst);
1164		evbuffer_invoke_callbacks(src);
1165	}
1166	result = (int)nread;/*XXXX should change return type */
1167
1168done:
1169	EVBUFFER_UNLOCK2(src, dst);
1170	return result;
1171}
1172
1173unsigned char *
1174evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
1175{
1176	struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
1177	unsigned char *buffer, *result = NULL;
1178	ev_ssize_t remaining;
1179	int removed_last_with_data = 0;
1180	int removed_last_with_datap = 0;
1181
1182	EVBUFFER_LOCK(buf);
1183
1184	chain = buf->first;
1185
1186	if (size < 0)
1187		size = buf->total_len;
1188	/* if size > buf->total_len, we cannot guarantee to the user that she
1189	 * is going to have a long enough buffer afterwards; so we return
1190	 * NULL */
1191	if (size == 0 || (size_t)size > buf->total_len)
1192		goto done;
1193
1194	/* No need to pull up anything; the first size bytes are
1195	 * already here. */
1196	if (chain->off >= (size_t)size) {
1197		result = chain->buffer + chain->misalign;
1198		goto done;
1199	}
1200
1201	/* Make sure that none of the chains we need to copy from is pinned. */
1202	remaining = size - chain->off;
1203	EVUTIL_ASSERT(remaining >= 0);
1204	for (tmp=chain->next; tmp; tmp=tmp->next) {
1205		if (CHAIN_PINNED(tmp))
1206			goto done;
1207		if (tmp->off >= (size_t)remaining)
1208			break;
1209		remaining -= tmp->off;
1210	}
1211
1212	if (CHAIN_PINNED(chain)) {
1213		size_t old_off = chain->off;
1214		if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
1215			/* not enough room at end of chunk. */
1216			goto done;
1217		}
1218		buffer = CHAIN_SPACE_PTR(chain);
1219		tmp = chain;
1220		tmp->off = size;
1221		size -= old_off;
1222		chain = chain->next;
1223	} else if (chain->buffer_len - chain->misalign >= (size_t)size) {
1224		/* already have enough space in the first chain */
1225		size_t old_off = chain->off;
1226		buffer = chain->buffer + chain->misalign + chain->off;
1227		tmp = chain;
1228		tmp->off = size;
1229		size -= old_off;
1230		chain = chain->next;
1231	} else {
1232		if ((tmp = evbuffer_chain_new(size)) == NULL) {
1233			event_warn("%s: out of memory", __func__);
1234			goto done;
1235		}
1236		buffer = tmp->buffer;
1237		tmp->off = size;
1238		buf->first = tmp;
1239	}
1240
1241	/* TODO(niels): deal with buffers that point to NULL like sendfile */
1242
1243	/* Copy and free every chunk that will be entirely pulled into tmp */
1244	last_with_data = *buf->last_with_datap;
1245	for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
1246		next = chain->next;
1247
1248		memcpy(buffer, chain->buffer + chain->misalign, chain->off);
1249		size -= chain->off;
1250		buffer += chain->off;
1251		if (chain == last_with_data)
1252			removed_last_with_data = 1;
1253		if (&chain->next == buf->last_with_datap)
1254			removed_last_with_datap = 1;
1255
1256		evbuffer_chain_free(chain);
1257	}
1258
1259	if (chain != NULL) {
1260		memcpy(buffer, chain->buffer + chain->misalign, size);
1261		chain->misalign += size;
1262		chain->off -= size;
1263	} else {
1264		buf->last = tmp;
1265	}
1266
1267	tmp->next = chain;
1268
1269	if (removed_last_with_data) {
1270		buf->last_with_datap = &buf->first;
1271	} else if (removed_last_with_datap) {
1272		if (buf->first->next && buf->first->next->off)
1273			buf->last_with_datap = &buf->first->next;
1274		else
1275			buf->last_with_datap = &buf->first;
1276	}
1277
1278	result = (tmp->buffer + tmp->misalign);
1279
1280done:
1281	EVBUFFER_UNLOCK(buf);
1282	return result;
1283}
1284
1285/*
1286 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
1287 * The returned buffer needs to be freed by the called.
1288 */
1289char *
1290evbuffer_readline(struct evbuffer *buffer)
1291{
1292	return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
1293}
1294
1295static inline ev_ssize_t
1296evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
1297{
1298	struct evbuffer_chain *chain = it->_internal.chain;
1299	size_t i = it->_internal.pos_in_chain;
1300	while (chain != NULL) {
1301		char *buffer = (char *)chain->buffer + chain->misalign;
1302		char *cp = memchr(buffer+i, chr, chain->off-i);
1303		if (cp) {
1304			it->_internal.chain = chain;
1305			it->_internal.pos_in_chain = cp - buffer;
1306			it->pos += (cp - buffer - i);
1307			return it->pos;
1308		}
1309		it->pos += chain->off - i;
1310		i = 0;
1311		chain = chain->next;
1312	}
1313
1314	return (-1);
1315}
1316
1317static inline char *
1318find_eol_char(char *s, size_t len)
1319{
1320#define CHUNK_SZ 128
1321	/* Lots of benchmarking found this approach to be faster in practice
1322	 * than doing two memchrs over the whole buffer, doin a memchr on each
1323	 * char of the buffer, or trying to emulate memchr by hand. */
1324	char *s_end, *cr, *lf;
1325	s_end = s+len;
1326	while (s < s_end) {
1327		size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
1328		cr = memchr(s, '\r', chunk);
1329		lf = memchr(s, '\n', chunk);
1330		if (cr) {
1331			if (lf && lf < cr)
1332				return lf;
1333			return cr;
1334		} else if (lf) {
1335			return lf;
1336		}
1337		s += CHUNK_SZ;
1338	}
1339
1340	return NULL;
1341#undef CHUNK_SZ
1342}
1343
1344static ev_ssize_t
1345evbuffer_find_eol_char(struct evbuffer_ptr *it)
1346{
1347	struct evbuffer_chain *chain = it->_internal.chain;
1348	size_t i = it->_internal.pos_in_chain;
1349	while (chain != NULL) {
1350		char *buffer = (char *)chain->buffer + chain->misalign;
1351		char *cp = find_eol_char(buffer+i, chain->off-i);
1352		if (cp) {
1353			it->_internal.chain = chain;
1354			it->_internal.pos_in_chain = cp - buffer;
1355			it->pos += (cp - buffer) - i;
1356			return it->pos;
1357		}
1358		it->pos += chain->off - i;
1359		i = 0;
1360		chain = chain->next;
1361	}
1362
1363	return (-1);
1364}
1365
1366static inline int
1367evbuffer_strspn(
1368	struct evbuffer_ptr *ptr, const char *chrset)
1369{
1370	int count = 0;
1371	struct evbuffer_chain *chain = ptr->_internal.chain;
1372	size_t i = ptr->_internal.pos_in_chain;
1373
1374	if (!chain)
1375		return -1;
1376
1377	while (1) {
1378		char *buffer = (char *)chain->buffer + chain->misalign;
1379		for (; i < chain->off; ++i) {
1380			const char *p = chrset;
1381			while (*p) {
1382				if (buffer[i] == *p++)
1383					goto next;
1384			}
1385			ptr->_internal.chain = chain;
1386			ptr->_internal.pos_in_chain = i;
1387			ptr->pos += count;
1388			return count;
1389		next:
1390			++count;
1391		}
1392		i = 0;
1393
1394		if (! chain->next) {
1395			ptr->_internal.chain = chain;
1396			ptr->_internal.pos_in_chain = i;
1397			ptr->pos += count;
1398			return count;
1399		}
1400
1401		chain = chain->next;
1402	}
1403}
1404
1405
1406static inline char
1407evbuffer_getchr(struct evbuffer_ptr *it)
1408{
1409	struct evbuffer_chain *chain = it->_internal.chain;
1410	size_t off = it->_internal.pos_in_chain;
1411
1412	return chain->buffer[chain->misalign + off];
1413}
1414
1415struct evbuffer_ptr
1416evbuffer_search_eol(struct evbuffer *buffer,
1417    struct evbuffer_ptr *start, size_t *eol_len_out,
1418    enum evbuffer_eol_style eol_style)
1419{
1420	struct evbuffer_ptr it, it2;
1421	size_t extra_drain = 0;
1422	int ok = 0;
1423
1424	EVBUFFER_LOCK(buffer);
1425
1426	if (start) {
1427		memcpy(&it, start, sizeof(it));
1428	} else {
1429		it.pos = 0;
1430		it._internal.chain = buffer->first;
1431		it._internal.pos_in_chain = 0;
1432	}
1433
1434	/* the eol_style determines our first stop character and how many
1435	 * characters we are going to drain afterwards. */
1436	switch (eol_style) {
1437	case EVBUFFER_EOL_ANY:
1438		if (evbuffer_find_eol_char(&it) < 0)
1439			goto done;
1440		memcpy(&it2, &it, sizeof(it));
1441		extra_drain = evbuffer_strspn(&it2, "\r\n");
1442		break;
1443	case EVBUFFER_EOL_CRLF_STRICT: {
1444		it = evbuffer_search(buffer, "\r\n", 2, &it);
1445		if (it.pos < 0)
1446			goto done;
1447		extra_drain = 2;
1448		break;
1449	}
1450	case EVBUFFER_EOL_CRLF:
1451		while (1) {
1452			if (evbuffer_find_eol_char(&it) < 0)
1453				goto done;
1454			if (evbuffer_getchr(&it) == '\n') {
1455				extra_drain = 1;
1456				break;
1457			} else if (!evbuffer_ptr_memcmp(
1458				    buffer, &it, "\r\n", 2)) {
1459				extra_drain = 2;
1460				break;
1461			} else {
1462				if (evbuffer_ptr_set(buffer, &it, 1,
1463					EVBUFFER_PTR_ADD)<0)
1464					goto done;
1465			}
1466		}
1467		break;
1468	case EVBUFFER_EOL_LF:
1469		if (evbuffer_strchr(&it, '\n') < 0)
1470			goto done;
1471		extra_drain = 1;
1472		break;
1473	default:
1474		goto done;
1475	}
1476
1477	ok = 1;
1478done:
1479	EVBUFFER_UNLOCK(buffer);
1480
1481	if (!ok) {
1482		it.pos = -1;
1483	}
1484	if (eol_len_out)
1485		*eol_len_out = extra_drain;
1486
1487	return it;
1488}
1489
1490char *
1491evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
1492		enum evbuffer_eol_style eol_style)
1493{
1494	struct evbuffer_ptr it;
1495	char *line;
1496	size_t n_to_copy=0, extra_drain=0;
1497	char *result = NULL;
1498
1499	EVBUFFER_LOCK(buffer);
1500
1501	if (buffer->freeze_start) {
1502		goto done;
1503	}
1504
1505	it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
1506	if (it.pos < 0)
1507		goto done;
1508	n_to_copy = it.pos;
1509
1510	if ((line = mm_malloc(n_to_copy+1)) == NULL) {
1511		event_warn("%s: out of memory", __func__);
1512		goto done;
1513	}
1514
1515	evbuffer_remove(buffer, line, n_to_copy);
1516	line[n_to_copy] = '\0';
1517
1518	evbuffer_drain(buffer, extra_drain);
1519	result = line;
1520done:
1521	EVBUFFER_UNLOCK(buffer);
1522
1523	if (n_read_out)
1524		*n_read_out = result ? n_to_copy : 0;
1525
1526	return result;
1527}
1528
1529#define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
1530
1531/* Adds data to an event buffer */
1532
1533int
1534evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
1535{
1536	struct evbuffer_chain *chain, *tmp;
1537	const unsigned char *data = data_in;
1538	size_t remain, to_alloc;
1539	int result = -1;
1540
1541	EVBUFFER_LOCK(buf);
1542
1543	if (buf->freeze_end) {
1544		goto done;
1545	}
1546
1547	chain = buf->last;
1548
1549	/* If there are no chains allocated for this buffer, allocate one
1550	 * big enough to hold all the data. */
1551	if (chain == NULL) {
1552		chain = evbuffer_chain_new(datlen);
1553		if (!chain)
1554			goto done;
1555		evbuffer_chain_insert(buf, chain);
1556	}
1557
1558	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1559		remain = (size_t)(chain->buffer_len - chain->misalign - chain->off);
1560		if (remain >= datlen) {
1561			/* there's enough space to hold all the data in the
1562			 * current last chain */
1563			memcpy(chain->buffer + chain->misalign + chain->off,
1564			    data, datlen);
1565			chain->off += datlen;
1566			buf->total_len += datlen;
1567			buf->n_add_for_cb += datlen;
1568			goto out;
1569		} else if (!CHAIN_PINNED(chain) &&
1570		    evbuffer_chain_should_realign(chain, datlen)) {
1571			/* we can fit the data into the misalignment */
1572			evbuffer_chain_align(chain);
1573
1574			memcpy(chain->buffer + chain->off, data, datlen);
1575			chain->off += datlen;
1576			buf->total_len += datlen;
1577			buf->n_add_for_cb += datlen;
1578			goto out;
1579		}
1580	} else {
1581		/* we cannot write any data to the last chain */
1582		remain = 0;
1583	}
1584
1585	/* we need to add another chain */
1586	to_alloc = chain->buffer_len;
1587	if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
1588		to_alloc <<= 1;
1589	if (datlen > to_alloc)
1590		to_alloc = datlen;
1591	tmp = evbuffer_chain_new(to_alloc);
1592	if (tmp == NULL)
1593		goto done;
1594
1595	if (remain) {
1596		memcpy(chain->buffer + chain->misalign + chain->off,
1597		    data, remain);
1598		chain->off += remain;
1599		buf->total_len += remain;
1600		buf->n_add_for_cb += remain;
1601	}
1602
1603	data += remain;
1604	datlen -= remain;
1605
1606	memcpy(tmp->buffer, data, datlen);
1607	tmp->off = datlen;
1608	evbuffer_chain_insert(buf, tmp);
1609	buf->n_add_for_cb += datlen;
1610
1611out:
1612	evbuffer_invoke_callbacks(buf);
1613	result = 0;
1614done:
1615	EVBUFFER_UNLOCK(buf);
1616	return result;
1617}
1618
1619int
1620evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
1621{
1622	struct evbuffer_chain *chain, *tmp;
1623	int result = -1;
1624
1625	EVBUFFER_LOCK(buf);
1626
1627	if (buf->freeze_start) {
1628		goto done;
1629	}
1630
1631	chain = buf->first;
1632
1633	if (chain == NULL) {
1634		chain = evbuffer_chain_new(datlen);
1635		if (!chain)
1636			goto done;
1637		evbuffer_chain_insert(buf, chain);
1638	}
1639
1640	/* we cannot touch immutable buffers */
1641	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1642		/* If this chain is empty, we can treat it as
1643		 * 'empty at the beginning' rather than 'empty at the end' */
1644		if (chain->off == 0)
1645			chain->misalign = chain->buffer_len;
1646
1647		if ((size_t)chain->misalign >= datlen) {
1648			/* we have enough space to fit everything */
1649			memcpy(chain->buffer + chain->misalign - datlen,
1650			    data, datlen);
1651			chain->off += datlen;
1652			chain->misalign -= datlen;
1653			buf->total_len += datlen;
1654			buf->n_add_for_cb += datlen;
1655			goto out;
1656		} else if (chain->misalign) {
1657			/* we can only fit some of the data. */
1658			memcpy(chain->buffer,
1659			    (char*)data + datlen - chain->misalign,
1660			    (size_t)chain->misalign);
1661			chain->off += (size_t)chain->misalign;
1662			buf->total_len += (size_t)chain->misalign;
1663			buf->n_add_for_cb += (size_t)chain->misalign;
1664			datlen -= (size_t)chain->misalign;
1665			chain->misalign = 0;
1666		}
1667	}
1668
1669	/* we need to add another chain */
1670	if ((tmp = evbuffer_chain_new(datlen)) == NULL)
1671		goto done;
1672	buf->first = tmp;
1673	if (buf->last_with_datap == &buf->first)
1674		buf->last_with_datap = &tmp->next;
1675
1676	tmp->next = chain;
1677
1678	tmp->off = datlen;
1679	tmp->misalign = tmp->buffer_len - datlen;
1680
1681	memcpy(tmp->buffer + tmp->misalign, data, datlen);
1682	buf->total_len += datlen;
1683	buf->n_add_for_cb += (size_t)chain->misalign;
1684
1685out:
1686	evbuffer_invoke_callbacks(buf);
1687	result = 0;
1688done:
1689	EVBUFFER_UNLOCK(buf);
1690	return result;
1691}
1692
1693/** Helper: realigns the memory in chain->buffer so that misalign is 0. */
1694static void
1695evbuffer_chain_align(struct evbuffer_chain *chain)
1696{
1697	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
1698	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
1699	memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
1700	chain->misalign = 0;
1701}
1702
1703#define MAX_TO_COPY_IN_EXPAND 4096
1704#define MAX_TO_REALIGN_IN_EXPAND 2048
1705
1706/** Helper: return true iff we should realign chain to fit datalen bytes of
1707    data in it. */
1708static int
1709evbuffer_chain_should_realign(struct evbuffer_chain *chain,
1710    size_t datlen)
1711{
1712	return chain->buffer_len - chain->off >= datlen &&
1713	    (chain->off < chain->buffer_len / 2) &&
1714	    (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
1715}
1716
1717/* Expands the available space in the event buffer to at least datlen, all in
1718 * a single chunk.  Return that chunk. */
1719static struct evbuffer_chain *
1720evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
1721{
1722	struct evbuffer_chain *chain, **chainp;
1723	struct evbuffer_chain *result = NULL;
1724	ASSERT_EVBUFFER_LOCKED(buf);
1725
1726	chainp = buf->last_with_datap;
1727
1728	/* XXX If *chainp is no longer writeable, but has enough space in its
1729	 * misalign, this might be a bad idea: we could still use *chainp, not
1730	 * (*chainp)->next. */
1731	if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
1732		chainp = &(*chainp)->next;
1733
1734	/* 'chain' now points to the first chain with writable space (if any)
1735	 * We will either use it, realign it, replace it, or resize it. */
1736	chain = *chainp;
1737
1738	if (chain == NULL ||
1739	    (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
1740		/* We can't use the last_with_data chain at all.  Just add a
1741		 * new one that's big enough. */
1742		goto insert_new;
1743	}
1744
1745	/* If we can fit all the data, then we don't have to do anything */
1746	if (CHAIN_SPACE_LEN(chain) >= datlen) {
1747		result = chain;
1748		goto ok;
1749	}
1750
1751	/* If the chain is completely empty, just replace it by adding a new
1752	 * empty chain. */
1753	if (chain->off == 0) {
1754		goto insert_new;
1755	}
1756
1757	/* If the misalignment plus the remaining space fulfills our data
1758	 * needs, we could just force an alignment to happen.  Afterwards, we
1759	 * have enough space.  But only do this if we're saving a lot of space
1760	 * and not moving too much data.  Otherwise the space savings are
1761	 * probably offset by the time lost in copying.
1762	 */
1763	if (evbuffer_chain_should_realign(chain, datlen)) {
1764		evbuffer_chain_align(chain);
1765		result = chain;
1766		goto ok;
1767	}
1768
1769	/* At this point, we can either resize the last chunk with space in
1770	 * it, use the next chunk after it, or   If we add a new chunk, we waste
1771	 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk.  If we
1772	 * resize, we have to copy chain->off bytes.
1773	 */
1774
1775	/* Would expanding this chunk be affordable and worthwhile? */
1776	if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
1777	    chain->off > MAX_TO_COPY_IN_EXPAND) {
1778		/* It's not worth resizing this chain. Can the next one be
1779		 * used? */
1780		if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
1781			/* Yes, we can just use the next chain (which should
1782			 * be empty. */
1783			result = chain->next;
1784			goto ok;
1785		} else {
1786			/* No; append a new chain (which will free all
1787			 * terminal empty chains.) */
1788			goto insert_new;
1789		}
1790	} else {
1791		/* Okay, we're going to try to resize this chain: Not doing so
1792		 * would waste at least 1/8 of its current allocation, and we
1793		 * can do so without having to copy more than
1794		 * MAX_TO_COPY_IN_EXPAND bytes. */
1795		/* figure out how much space we need */
1796		size_t length = chain->off + datlen;
1797		struct evbuffer_chain *tmp = evbuffer_chain_new(length);
1798		if (tmp == NULL)
1799			goto err;
1800
1801		/* copy the data over that we had so far */
1802		tmp->off = chain->off;
1803		memcpy(tmp->buffer, chain->buffer + chain->misalign,
1804		    chain->off);
1805		/* fix up the list */
1806		EVUTIL_ASSERT(*chainp == chain);
1807		result = *chainp = tmp;
1808
1809		if (buf->last == chain)
1810			buf->last = tmp;
1811
1812		tmp->next = chain->next;
1813		evbuffer_chain_free(chain);
1814		goto ok;
1815	}
1816
1817insert_new:
1818	result = evbuffer_chain_insert_new(buf, datlen);
1819	if (!result)
1820		goto err;
1821ok:
1822	EVUTIL_ASSERT(result);
1823	EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
1824err:
1825	return result;
1826}
1827
1828/* Make sure that datlen bytes are available for writing in the last n
1829 * chains.  Never copies or moves data. */
1830int
1831_evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
1832{
1833	struct evbuffer_chain *chain = buf->last, *tmp, *next;
1834	size_t avail;
1835	int used;
1836
1837	ASSERT_EVBUFFER_LOCKED(buf);
1838	EVUTIL_ASSERT(n >= 2);
1839
1840	if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
1841		/* There is no last chunk, or we can't touch the last chunk.
1842		 * Just add a new chunk. */
1843		chain = evbuffer_chain_new(datlen);
1844		if (chain == NULL)
1845			return (-1);
1846
1847		evbuffer_chain_insert(buf, chain);
1848		return (0);
1849	}
1850
1851	used = 0; /* number of chains we're using space in. */
1852	avail = 0; /* how much space they have. */
1853	/* How many bytes can we stick at the end of buffer as it is?  Iterate
1854	 * over the chains at the end of the buffer, tring to see how much
1855	 * space we have in the first n. */
1856	for (chain = *buf->last_with_datap; chain; chain = chain->next) {
1857		if (chain->off) {
1858			size_t space = (size_t) CHAIN_SPACE_LEN(chain);
1859			EVUTIL_ASSERT(chain == *buf->last_with_datap);
1860			if (space) {
1861				avail += space;
1862				++used;
1863			}
1864		} else {
1865			/* No data in chain; realign it. */
1866			chain->misalign = 0;
1867			avail += chain->buffer_len;
1868			++used;
1869		}
1870		if (avail >= datlen) {
1871			/* There is already enough space.  Just return */
1872			return (0);
1873		}
1874		if (used == n)
1875			break;
1876	}
1877
1878	/* There wasn't enough space in the first n chains with space in
1879	 * them. Either add a new chain with enough space, or replace all
1880	 * empty chains with one that has enough space, depending on n. */
1881	if (used < n) {
1882		/* The loop ran off the end of the chains before it hit n
1883		 * chains; we can add another. */
1884		EVUTIL_ASSERT(chain == NULL);
1885
1886		tmp = evbuffer_chain_new(datlen - avail);
1887		if (tmp == NULL)
1888			return (-1);
1889
1890		buf->last->next = tmp;
1891		buf->last = tmp;
1892		/* (we would only set last_with_data if we added the first
1893		 * chain. But if the buffer had no chains, we would have
1894		 * just allocated a new chain earlier) */
1895		return (0);
1896	} else {
1897		/* Nuke _all_ the empty chains. */
1898		int rmv_all = 0; /* True iff we removed last_with_data. */
1899		chain = *buf->last_with_datap;
1900		if (!chain->off) {
1901			EVUTIL_ASSERT(chain == buf->first);
1902			rmv_all = 1;
1903			avail = 0;
1904		} else {
1905			avail = (size_t) CHAIN_SPACE_LEN(chain);
1906			chain = chain->next;
1907		}
1908
1909
1910		for (; chain; chain = next) {
1911			next = chain->next;
1912			EVUTIL_ASSERT(chain->off == 0);
1913			evbuffer_chain_free(chain);
1914		}
1915		tmp = evbuffer_chain_new(datlen - avail);
1916		if (tmp == NULL) {
1917			if (rmv_all) {
1918				ZERO_CHAIN(buf);
1919			} else {
1920				buf->last = *buf->last_with_datap;
1921				(*buf->last_with_datap)->next = NULL;
1922			}
1923			return (-1);
1924		}
1925
1926		if (rmv_all) {
1927			buf->first = buf->last = tmp;
1928			buf->last_with_datap = &buf->first;
1929		} else {
1930			(*buf->last_with_datap)->next = tmp;
1931			buf->last = tmp;
1932		}
1933		return (0);
1934	}
1935}
1936
1937int
1938evbuffer_expand(struct evbuffer *buf, size_t datlen)
1939{
1940	struct evbuffer_chain *chain;
1941
1942	EVBUFFER_LOCK(buf);
1943	chain = evbuffer_expand_singlechain(buf, datlen);
1944	EVBUFFER_UNLOCK(buf);
1945	return chain ? 0 : -1;
1946}
1947
1948/*
1949 * Reads data from a file descriptor into a buffer.
1950 */
1951
1952#if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
1953#define USE_IOVEC_IMPL
1954#endif
1955
1956#ifdef USE_IOVEC_IMPL
1957
1958#ifdef _EVENT_HAVE_SYS_UIO_H
1959/* number of iovec we use for writev, fragmentation is going to determine
1960 * how much we end up writing */
1961
1962#define DEFAULT_WRITE_IOVEC 128
1963
1964#if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
1965#define NUM_WRITE_IOVEC UIO_MAXIOV
1966#elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
1967#define NUM_WRITE_IOVEC IOV_MAX
1968#else
1969#define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
1970#endif
1971
1972#define IOV_TYPE struct iovec
1973#define IOV_PTR_FIELD iov_base
1974#define IOV_LEN_FIELD iov_len
1975#define IOV_LEN_TYPE size_t
1976#else
1977#define NUM_WRITE_IOVEC 16
1978#define IOV_TYPE WSABUF
1979#define IOV_PTR_FIELD buf
1980#define IOV_LEN_FIELD len
1981#define IOV_LEN_TYPE unsigned long
1982#endif
1983#endif
1984#define NUM_READ_IOVEC 4
1985
1986#define EVBUFFER_MAX_READ	4096
1987
1988/** Helper function to figure out which space to use for reading data into
1989    an evbuffer.  Internal use only.
1990
1991    @param buf The buffer to read into
1992    @param howmuch How much we want to read.
1993    @param vecs An array of two or more iovecs or WSABUFs.
1994    @param n_vecs_avail The length of vecs
1995    @param chainp A pointer to a variable to hold the first chain we're
1996      reading into.
1997    @param exact Boolean: if true, we do not provide more than 'howmuch'
1998      space in the vectors, even if more space is available.
1999    @return The number of buffers we're using.
2000 */
2001int
2002_evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
2003    struct evbuffer_iovec *vecs, int n_vecs_avail,
2004    struct evbuffer_chain ***chainp, int exact)
2005{
2006	struct evbuffer_chain *chain;
2007	struct evbuffer_chain **firstchainp;
2008	size_t so_far;
2009	int i;
2010	ASSERT_EVBUFFER_LOCKED(buf);
2011
2012	if (howmuch < 0)
2013		return -1;
2014
2015	so_far = 0;
2016	/* Let firstchain be the first chain with any space on it */
2017	firstchainp = buf->last_with_datap;
2018	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
2019		firstchainp = &(*firstchainp)->next;
2020	}
2021
2022	chain = *firstchainp;
2023	for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
2024		size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
2025		if (avail > (howmuch - so_far) && exact)
2026			avail = howmuch - so_far;
2027		vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
2028		vecs[i].iov_len = avail;
2029		so_far += avail;
2030		chain = chain->next;
2031	}
2032
2033	*chainp = firstchainp;
2034	return i;
2035}
2036
2037static int
2038get_n_bytes_readable_on_socket(evutil_socket_t fd)
2039{
2040#if defined(FIONREAD) && defined(WIN32)
2041	unsigned long lng = EVBUFFER_MAX_READ;
2042	if (ioctlsocket(fd, FIONREAD, &lng) < 0)
2043		return -1;
2044	return (int)lng;
2045#elif defined(FIONREAD)
2046	int n = EVBUFFER_MAX_READ;
2047	if (ioctl(fd, FIONREAD, &n) < 0)
2048		return -1;
2049	return n;
2050#else
2051	return EVBUFFER_MAX_READ;
2052#endif
2053}
2054
2055/* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
2056 * as howmuch? */
2057int
2058evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
2059{
2060	struct evbuffer_chain **chainp;
2061	int n;
2062	int result;
2063
2064#ifdef USE_IOVEC_IMPL
2065	int nvecs, i, remaining;
2066#else
2067	struct evbuffer_chain *chain;
2068	unsigned char *p;
2069#endif
2070
2071	EVBUFFER_LOCK(buf);
2072
2073	if (buf->freeze_end) {
2074		result = -1;
2075		goto done;
2076	}
2077
2078	n = get_n_bytes_readable_on_socket(fd);
2079	if (n <= 0 || n > EVBUFFER_MAX_READ)
2080		n = EVBUFFER_MAX_READ;
2081	if (howmuch < 0 || howmuch > n)
2082		howmuch = n;
2083
2084#ifdef USE_IOVEC_IMPL
2085	/* Since we can use iovecs, we're willing to use the last
2086	 * NUM_READ_IOVEC chains. */
2087	if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
2088		result = -1;
2089		goto done;
2090	} else {
2091		IOV_TYPE vecs[NUM_READ_IOVEC];
2092#ifdef _EVBUFFER_IOVEC_IS_NATIVE
2093		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
2094		    NUM_READ_IOVEC, &chainp, 1);
2095#else
2096		/* We aren't using the native struct iovec.  Therefore,
2097		   we are on win32. */
2098		struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
2099		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
2100		    &chainp, 1);
2101
2102		for (i=0; i < nvecs; ++i)
2103			WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
2104#endif
2105
2106#ifdef WIN32
2107		{
2108			DWORD bytesRead;
2109			DWORD flags=0;
2110			if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
2111				/* The read failed. It might be a close,
2112				 * or it might be an error. */
2113				if (WSAGetLastError() == WSAECONNABORTED)
2114					n = 0;
2115				else
2116					n = -1;
2117			} else
2118				n = bytesRead;
2119		}
2120#else
2121		n = readv(fd, vecs, nvecs);
2122#endif
2123	}
2124
2125#else /*!USE_IOVEC_IMPL*/
2126	/* If we don't have FIONREAD, we might waste some space here */
2127	/* XXX we _will_ waste some space here if there is any space left
2128	 * over on buf->last. */
2129	if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
2130		result = -1;
2131		goto done;
2132	}
2133
2134	/* We can append new data at this point */
2135	p = chain->buffer + chain->misalign + chain->off;
2136
2137#ifndef WIN32
2138	n = read(fd, p, howmuch);
2139#else
2140	n = recv(fd, p, howmuch, 0);
2141#endif
2142#endif /* USE_IOVEC_IMPL */
2143
2144	if (n == -1) {
2145		result = -1;
2146		goto done;
2147	}
2148	if (n == 0) {
2149		result = 0;
2150		goto done;
2151	}
2152
2153#ifdef USE_IOVEC_IMPL
2154	remaining = n;
2155	for (i=0; i < nvecs; ++i) {
2156		ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp);
2157		if (space < remaining) {
2158			(*chainp)->off += space;
2159			remaining -= (int)space;
2160		} else {
2161			(*chainp)->off += remaining;
2162			buf->last_with_datap = chainp;
2163			break;
2164		}
2165		chainp = &(*chainp)->next;
2166	}
2167#else
2168	chain->off += n;
2169	advance_last_with_data(buf);
2170#endif
2171	buf->total_len += n;
2172	buf->n_add_for_cb += n;
2173
2174	/* Tell someone about changes in this buffer */
2175	evbuffer_invoke_callbacks(buf);
2176	result = n;
2177done:
2178	EVBUFFER_UNLOCK(buf);
2179	return result;
2180}
2181
2182#ifdef WIN32
2183static int
2184evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, ev_ssize_t howmuch)
2185{
2186	int result;
2187	int nchains, n;
2188	struct evbuffer_iovec v[2];
2189
2190	EVBUFFER_LOCK(buf);
2191
2192	if (buf->freeze_end) {
2193		result = -1;
2194		goto done;
2195	}
2196
2197	if (howmuch < 0)
2198		howmuch = 16384;
2199
2200
2201	/* XXX we _will_ waste some space here if there is any space left
2202	 * over on buf->last. */
2203	nchains = evbuffer_reserve_space(buf, howmuch, v, 2);
2204	if (nchains < 1 || nchains > 2) {
2205		result = -1;
2206		goto done;
2207	}
2208	n = read((int)fd, v[0].iov_base, (unsigned int)v[0].iov_len);
2209	if (n <= 0) {
2210		result = n;
2211		goto done;
2212	}
2213	v[0].iov_len = (IOV_LEN_TYPE) n; /* XXXX another problem with big n.*/
2214	if (nchains > 1) {
2215		n = read((int)fd, v[1].iov_base, (unsigned int)v[1].iov_len);
2216		if (n <= 0) {
2217			result = (unsigned long) v[0].iov_len;
2218			evbuffer_commit_space(buf, v, 1);
2219			goto done;
2220		}
2221		v[1].iov_len = n;
2222	}
2223	evbuffer_commit_space(buf, v, nchains);
2224
2225	result = n;
2226done:
2227	EVBUFFER_UNLOCK(buf);
2228	return result;
2229}
2230#endif
2231
2232#ifdef USE_IOVEC_IMPL
2233static inline int
2234evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
2235    ev_ssize_t howmuch)
2236{
2237	IOV_TYPE iov[NUM_WRITE_IOVEC];
2238	struct evbuffer_chain *chain = buffer->first;
2239	int n, i = 0;
2240
2241	if (howmuch < 0)
2242		return -1;
2243
2244	ASSERT_EVBUFFER_LOCKED(buffer);
2245	/* XXX make this top out at some maximal data length?  if the
2246	 * buffer has (say) 1MB in it, split over 128 chains, there's
2247	 * no way it all gets written in one go. */
2248	while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
2249#ifdef USE_SENDFILE
2250		/* we cannot write the file info via writev */
2251		if (chain->flags & EVBUFFER_SENDFILE)
2252			break;
2253#endif
2254		iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
2255		if ((size_t)howmuch >= chain->off) {
2256			/* XXXcould be problematic when windows supports mmap*/
2257			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
2258			howmuch -= chain->off;
2259		} else {
2260			/* XXXcould be problematic when windows supports mmap*/
2261			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
2262			break;
2263		}
2264		chain = chain->next;
2265	}
2266#ifdef WIN32
2267	{
2268		DWORD bytesSent;
2269		if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
2270			n = -1;
2271		else
2272			n = bytesSent;
2273	}
2274#else
2275	n = writev(fd, iov, i);
2276#endif
2277	return (n);
2278}
2279#endif
2280
2281#ifdef USE_SENDFILE
2282static inline int
2283evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd,
2284    ev_ssize_t howmuch)
2285{
2286	struct evbuffer_chain *chain = buffer->first;
2287	struct evbuffer_chain_fd *info =
2288	    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
2289#if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
2290	int res;
2291	off_t len = chain->off;
2292#elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
2293	ev_ssize_t res;
2294	off_t offset = chain->misalign;
2295#endif
2296
2297	ASSERT_EVBUFFER_LOCKED(buffer);
2298
2299#if defined(SENDFILE_IS_MACOSX)
2300	res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
2301	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2302		return (-1);
2303
2304	return (len);
2305#elif defined(SENDFILE_IS_FREEBSD)
2306	res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0);
2307	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2308		return (-1);
2309
2310	return (len);
2311#elif defined(SENDFILE_IS_LINUX)
2312	/* TODO(niels): implement splice */
2313	res = sendfile(fd, info->fd, &offset, chain->off);
2314	if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2315		/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
2316		return (0);
2317	}
2318	return (res);
2319#elif defined(SENDFILE_IS_SOLARIS)
2320	{
2321		const off_t offset_orig = offset;
2322		res = sendfile(fd, info->fd, &offset, chain->off);
2323		if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2324			if (offset - offset_orig)
2325				return offset - offset_orig;
2326			/* if this is EAGAIN or EINTR and no bytes were
2327			 * written, return 0 */
2328			return (0);
2329		}
2330		return (res);
2331	}
2332#endif
2333}
2334#endif
2335
2336int
2337evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
2338    ev_ssize_t howmuch)
2339{
2340	int n = -1;
2341
2342	EVBUFFER_LOCK(buffer);
2343
2344	if (buffer->freeze_start) {
2345		goto done;
2346	}
2347
2348	if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
2349		howmuch = buffer->total_len;
2350
2351	if (howmuch > 0) {
2352#ifdef USE_SENDFILE
2353		struct evbuffer_chain *chain = buffer->first;
2354		if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
2355			n = evbuffer_write_sendfile(buffer, fd, howmuch);
2356		else {
2357#endif
2358#ifdef USE_IOVEC_IMPL
2359		n = evbuffer_write_iovec(buffer, fd, howmuch);
2360#elif defined(WIN32)
2361		/* XXX(nickm) Don't disable this code until we know if
2362		 * the WSARecv code above works. */
2363		void *p = evbuffer_pullup(buffer, howmuch);
2364		n = send(fd, p, howmuch, 0);
2365#else
2366		void *p = evbuffer_pullup(buffer, howmuch);
2367		n = write(fd, p, howmuch);
2368#endif
2369#ifdef USE_SENDFILE
2370		}
2371#endif
2372	}
2373
2374	if (n > 0)
2375		evbuffer_drain(buffer, n);
2376
2377done:
2378	EVBUFFER_UNLOCK(buffer);
2379	return (n);
2380}
2381
2382int
2383evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
2384{
2385	return evbuffer_write_atmost(buffer, fd, -1);
2386}
2387
2388unsigned char *
2389evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
2390{
2391	unsigned char *search;
2392	struct evbuffer_ptr ptr;
2393
2394	EVBUFFER_LOCK(buffer);
2395
2396	ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
2397	if (ptr.pos < 0) {
2398		search = NULL;
2399	} else {
2400		search = evbuffer_pullup(buffer, ptr.pos + len);
2401		if (search)
2402			search += ptr.pos;
2403	}
2404	EVBUFFER_UNLOCK(buffer);
2405	return search;
2406}
2407
2408int
2409evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
2410    size_t position, enum evbuffer_ptr_how how)
2411{
2412	size_t left = position;
2413	struct evbuffer_chain *chain = NULL;
2414
2415	EVBUFFER_LOCK(buf);
2416
2417	switch (how) {
2418	case EVBUFFER_PTR_SET:
2419		chain = buf->first;
2420		pos->pos = position;
2421		position = 0;
2422		break;
2423	case EVBUFFER_PTR_ADD:
2424		/* this avoids iterating over all previous chains if
2425		   we just want to advance the position */
2426		chain = pos->_internal.chain;
2427		pos->pos += position;
2428		position = pos->_internal.pos_in_chain;
2429		break;
2430	}
2431
2432	while (chain && position + left >= chain->off) {
2433		left -= chain->off - position;
2434		chain = chain->next;
2435		position = 0;
2436	}
2437	if (chain) {
2438		pos->_internal.chain = chain;
2439		pos->_internal.pos_in_chain = position + left;
2440	} else {
2441		pos->_internal.chain = NULL;
2442		pos->pos = -1;
2443	}
2444
2445	EVBUFFER_UNLOCK(buf);
2446
2447	return chain != NULL ? 0 : -1;
2448}
2449
2450/**
2451   Compare the bytes in buf at position pos to the len bytes in mem.  Return
2452   less than 0, 0, or greater than 0 as memcmp.
2453 */
2454static int
2455evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
2456    const char *mem, size_t len)
2457{
2458	struct evbuffer_chain *chain;
2459	size_t position;
2460	int r;
2461
2462	ASSERT_EVBUFFER_LOCKED(buf);
2463
2464	if (pos->pos + len > buf->total_len)
2465		return -1;
2466
2467	chain = pos->_internal.chain;
2468	position = pos->_internal.pos_in_chain;
2469	while (len && chain) {
2470		size_t n_comparable;
2471		if (len + position > chain->off)
2472			n_comparable = chain->off - position;
2473		else
2474			n_comparable = len;
2475		r = memcmp(chain->buffer + chain->misalign + position, mem,
2476		    n_comparable);
2477		if (r)
2478			return r;
2479		mem += n_comparable;
2480		len -= n_comparable;
2481		position = 0;
2482		chain = chain->next;
2483	}
2484
2485	return 0;
2486}
2487
2488struct evbuffer_ptr
2489evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
2490{
2491	return evbuffer_search_range(buffer, what, len, start, NULL);
2492}
2493
2494struct evbuffer_ptr
2495evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
2496{
2497	struct evbuffer_ptr pos;
2498	struct evbuffer_chain *chain, *last_chain = NULL;
2499	const unsigned char *p;
2500	char first;
2501
2502	EVBUFFER_LOCK(buffer);
2503
2504	if (start) {
2505		memcpy(&pos, start, sizeof(pos));
2506		chain = pos._internal.chain;
2507	} else {
2508		pos.pos = 0;
2509		chain = pos._internal.chain = buffer->first;
2510		pos._internal.pos_in_chain = 0;
2511	}
2512
2513	if (end)
2514		last_chain = end->_internal.chain;
2515
2516	if (!len || len > EV_SSIZE_MAX)
2517		goto done;
2518
2519	first = what[0];
2520
2521	while (chain) {
2522		const unsigned char *start_at =
2523		    chain->buffer + chain->misalign +
2524		    pos._internal.pos_in_chain;
2525		p = memchr(start_at, first,
2526		    chain->off - pos._internal.pos_in_chain);
2527		if (p) {
2528			pos.pos += p - start_at;
2529			pos._internal.pos_in_chain += p - start_at;
2530			if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
2531				if (end && pos.pos + (ev_ssize_t)len > end->pos)
2532					goto not_found;
2533				else
2534					goto done;
2535			}
2536			++pos.pos;
2537			++pos._internal.pos_in_chain;
2538			if (pos._internal.pos_in_chain == chain->off) {
2539				chain = pos._internal.chain = chain->next;
2540				pos._internal.pos_in_chain = 0;
2541			}
2542		} else {
2543			if (chain == last_chain)
2544				goto not_found;
2545			pos.pos += chain->off - pos._internal.pos_in_chain;
2546			chain = pos._internal.chain = chain->next;
2547			pos._internal.pos_in_chain = 0;
2548		}
2549	}
2550
2551not_found:
2552	pos.pos = -1;
2553	pos._internal.chain = NULL;
2554done:
2555	EVBUFFER_UNLOCK(buffer);
2556	return pos;
2557}
2558
2559int
2560evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
2561    struct evbuffer_ptr *start_at,
2562    struct evbuffer_iovec *vec, int n_vec)
2563{
2564	struct evbuffer_chain *chain;
2565	int idx = 0;
2566	ev_ssize_t len_so_far = 0;
2567
2568	EVBUFFER_LOCK(buffer);
2569
2570	if (start_at) {
2571		chain = start_at->_internal.chain;
2572		len_so_far = chain->off
2573		    - start_at->_internal.pos_in_chain;
2574		idx = 1;
2575		if (n_vec > 0) {
2576			vec[0].iov_base = chain->buffer + chain->misalign
2577			    + start_at->_internal.pos_in_chain;
2578			vec[0].iov_len = len_so_far;
2579		}
2580		chain = chain->next;
2581	} else {
2582		chain = buffer->first;
2583	}
2584
2585	if (n_vec == 0 && len < 0) {
2586		/* If no vectors are provided and they asked for "everything",
2587		 * pretend they asked for the actual available amount. */
2588		len = buffer->total_len - len_so_far;
2589	}
2590
2591	while (chain) {
2592		if (len >= 0 && len_so_far >= len)
2593			break;
2594		if (idx<n_vec) {
2595			vec[idx].iov_base = chain->buffer + chain->misalign;
2596			vec[idx].iov_len = chain->off;
2597		} else if (len<0) {
2598			break;
2599		}
2600		++idx;
2601		len_so_far += chain->off;
2602		chain = chain->next;
2603	}
2604
2605	EVBUFFER_UNLOCK(buffer);
2606
2607	return idx;
2608}
2609
2610
2611int
2612evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
2613{
2614	char *buffer;
2615	size_t space;
2616	int sz, result = -1;
2617	va_list aq;
2618	struct evbuffer_chain *chain;
2619
2620
2621	EVBUFFER_LOCK(buf);
2622
2623	if (buf->freeze_end) {
2624		goto done;
2625	}
2626
2627	/* make sure that at least some space is available */
2628	if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
2629		goto done;
2630
2631	for (;;) {
2632#if 0
2633		size_t used = chain->misalign + chain->off;
2634		buffer = (char *)chain->buffer + chain->misalign + chain->off;
2635		EVUTIL_ASSERT(chain->buffer_len >= used);
2636		space = chain->buffer_len - used;
2637#endif
2638		buffer = (char*) CHAIN_SPACE_PTR(chain);
2639		space = (size_t) CHAIN_SPACE_LEN(chain);
2640
2641#ifndef va_copy
2642#define	va_copy(dst, src)	memcpy(&(dst), &(src), sizeof(va_list))
2643#endif
2644		va_copy(aq, ap);
2645
2646		sz = evutil_vsnprintf(buffer, space, fmt, aq);
2647
2648		va_end(aq);
2649
2650		if (sz < 0)
2651			goto done;
2652		if ((size_t)sz < space) {
2653			chain->off += sz;
2654			buf->total_len += sz;
2655			buf->n_add_for_cb += sz;
2656
2657			advance_last_with_data(buf);
2658			evbuffer_invoke_callbacks(buf);
2659			result = sz;
2660			goto done;
2661		}
2662		if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
2663			goto done;
2664	}
2665	/* NOTREACHED */
2666
2667done:
2668	EVBUFFER_UNLOCK(buf);
2669	return result;
2670}
2671
2672int
2673evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
2674{
2675	int res = -1;
2676	va_list ap;
2677
2678	va_start(ap, fmt);
2679	res = evbuffer_add_vprintf(buf, fmt, ap);
2680	va_end(ap);
2681
2682	return (res);
2683}
2684
2685int
2686evbuffer_add_reference(struct evbuffer *outbuf,
2687    const void *data, size_t datlen,
2688    evbuffer_ref_cleanup_cb cleanupfn, void *extra)
2689{
2690	struct evbuffer_chain *chain;
2691	struct evbuffer_chain_reference *info;
2692	int result = -1;
2693
2694	chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
2695	if (!chain)
2696		return (-1);
2697	chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
2698	chain->buffer = (u_char *)data;
2699	chain->buffer_len = datlen;
2700	chain->off = datlen;
2701
2702	info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
2703	info->cleanupfn = cleanupfn;
2704	info->extra = extra;
2705
2706	EVBUFFER_LOCK(outbuf);
2707	if (outbuf->freeze_end) {
2708		/* don't call chain_free; we do not want to actually invoke
2709		 * the cleanup function */
2710		mm_free(chain);
2711		goto done;
2712	}
2713	evbuffer_chain_insert(outbuf, chain);
2714	outbuf->n_add_for_cb += datlen;
2715
2716	evbuffer_invoke_callbacks(outbuf);
2717
2718	result = 0;
2719done:
2720	EVBUFFER_UNLOCK(outbuf);
2721
2722	return result;
2723}
2724
2725/* TODO(niels): maybe we don't want to own the fd, however, in that
2726 * case, we should dup it - dup is cheap.  Perhaps, we should use a
2727 * callback instead?
2728 */
2729/* TODO(niels): we may want to add to automagically convert to mmap, in
2730 * case evbuffer_remove() or evbuffer_pullup() are being used.
2731 */
2732int
2733evbuffer_add_file(struct evbuffer *outbuf, int fd,
2734    ev_off_t offset, ev_off_t length)
2735{
2736#if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
2737	struct evbuffer_chain *chain;
2738	struct evbuffer_chain_fd *info;
2739#endif
2740#if defined(USE_SENDFILE)
2741	int sendfile_okay = 1;
2742#endif
2743	int ok = 1;
2744
2745#if defined(USE_SENDFILE)
2746	if (use_sendfile) {
2747		EVBUFFER_LOCK(outbuf);
2748		sendfile_okay = outbuf->flags & EVBUFFER_FLAG_DRAINS_TO_FD;
2749		EVBUFFER_UNLOCK(outbuf);
2750	}
2751
2752	if (use_sendfile && sendfile_okay) {
2753		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
2754		if (chain == NULL) {
2755			event_warn("%s: out of memory", __func__);
2756			return (-1);
2757		}
2758
2759		chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE;
2760		chain->buffer = NULL;	/* no reading possible */
2761		chain->buffer_len = length + offset;
2762		chain->off = length;
2763		chain->misalign = offset;
2764
2765		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
2766		info->fd = fd;
2767
2768		EVBUFFER_LOCK(outbuf);
2769		if (outbuf->freeze_end) {
2770			mm_free(chain);
2771			ok = 0;
2772		} else {
2773			outbuf->n_add_for_cb += length;
2774			evbuffer_chain_insert(outbuf, chain);
2775		}
2776	} else
2777#endif
2778#if defined(_EVENT_HAVE_MMAP)
2779	if (use_mmap) {
2780		void *mapped = mmap(NULL, length + offset, PROT_READ,
2781#ifdef MAP_NOCACHE
2782		    MAP_NOCACHE |
2783#endif
2784#ifdef MAP_FILE
2785		    MAP_FILE |
2786#endif
2787		    MAP_PRIVATE,
2788		    fd, 0);
2789		/* some mmap implementations require offset to be a multiple of
2790		 * the page size.  most users of this api, are likely to use 0
2791		 * so mapping everything is not likely to be a problem.
2792		 * TODO(niels): determine page size and round offset to that
2793		 * page size to avoid mapping too much memory.
2794		 */
2795		if (mapped == MAP_FAILED) {
2796			event_warn("%s: mmap(%d, %d, %zu) failed",
2797			    __func__, fd, 0, (size_t)(offset + length));
2798			return (-1);
2799		}
2800		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
2801		if (chain == NULL) {
2802			event_warn("%s: out of memory", __func__);
2803			munmap(mapped, length);
2804			return (-1);
2805		}
2806
2807		chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE;
2808		chain->buffer = mapped;
2809		chain->buffer_len = length + offset;
2810		chain->off = length + offset;
2811
2812		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
2813		info->fd = fd;
2814
2815		EVBUFFER_LOCK(outbuf);
2816		if (outbuf->freeze_end) {
2817			info->fd = -1;
2818			evbuffer_chain_free(chain);
2819			ok = 0;
2820		} else {
2821			outbuf->n_add_for_cb += length;
2822
2823			evbuffer_chain_insert(outbuf, chain);
2824
2825			/* we need to subtract whatever we don't need */
2826			evbuffer_drain(outbuf, offset);
2827		}
2828	} else
2829#endif
2830	{
2831		/* the default implementation */
2832		struct evbuffer *tmp = evbuffer_new();
2833		ev_ssize_t read;
2834
2835		if (tmp == NULL)
2836			return (-1);
2837
2838#ifdef WIN32
2839#define lseek _lseeki64
2840#endif
2841		if (lseek(fd, offset, SEEK_SET) == -1) {
2842			evbuffer_free(tmp);
2843			return (-1);
2844		}
2845
2846		/* we add everything to a temporary buffer, so that we
2847		 * can abort without side effects if the read fails.
2848		 */
2849		while (length) {
2850			read = evbuffer_readfile(tmp, fd, (ev_ssize_t)length);
2851			if (read == -1) {
2852				evbuffer_free(tmp);
2853				return (-1);
2854			}
2855
2856			length -= read;
2857		}
2858
2859		EVBUFFER_LOCK(outbuf);
2860		if (outbuf->freeze_end) {
2861			evbuffer_free(tmp);
2862			ok = 0;
2863		} else {
2864			evbuffer_add_buffer(outbuf, tmp);
2865			evbuffer_free(tmp);
2866
2867#ifdef WIN32
2868#define close _close
2869#endif
2870			close(fd);
2871		}
2872	}
2873
2874	if (ok)
2875		evbuffer_invoke_callbacks(outbuf);
2876	EVBUFFER_UNLOCK(outbuf);
2877
2878	return ok ? 0 : -1;
2879}
2880
2881
2882void
2883evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
2884{
2885	EVBUFFER_LOCK(buffer);
2886
2887	if (!TAILQ_EMPTY(&buffer->callbacks))
2888		evbuffer_remove_all_callbacks(buffer);
2889
2890	if (cb) {
2891		struct evbuffer_cb_entry *ent =
2892		    evbuffer_add_cb(buffer, NULL, cbarg);
2893		ent->cb.cb_obsolete = cb;
2894		ent->flags |= EVBUFFER_CB_OBSOLETE;
2895	}
2896	EVBUFFER_UNLOCK(buffer);
2897}
2898
2899struct evbuffer_cb_entry *
2900evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
2901{
2902	struct evbuffer_cb_entry *e;
2903	if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
2904		return NULL;
2905	EVBUFFER_LOCK(buffer);
2906	e->cb.cb_func = cb;
2907	e->cbarg = cbarg;
2908	e->flags = EVBUFFER_CB_ENABLED;
2909	TAILQ_INSERT_HEAD(&buffer->callbacks, e, next);
2910	EVBUFFER_UNLOCK(buffer);
2911	return e;
2912}
2913
2914int
2915evbuffer_remove_cb_entry(struct evbuffer *buffer,
2916			 struct evbuffer_cb_entry *ent)
2917{
2918	EVBUFFER_LOCK(buffer);
2919	TAILQ_REMOVE(&buffer->callbacks, ent, next);
2920	EVBUFFER_UNLOCK(buffer);
2921	mm_free(ent);
2922	return 0;
2923}
2924
2925int
2926evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
2927{
2928	struct evbuffer_cb_entry *cbent;
2929	int result = -1;
2930	EVBUFFER_LOCK(buffer);
2931	TAILQ_FOREACH(cbent, &buffer->callbacks, next) {
2932		if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
2933			result = evbuffer_remove_cb_entry(buffer, cbent);
2934			goto done;
2935		}
2936	}
2937done:
2938	EVBUFFER_UNLOCK(buffer);
2939	return result;
2940}
2941
2942int
2943evbuffer_cb_set_flags(struct evbuffer *buffer,
2944		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
2945{
2946	/* the user isn't allowed to mess with these. */
2947	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
2948	EVBUFFER_LOCK(buffer);
2949	cb->flags |= flags;
2950	EVBUFFER_UNLOCK(buffer);
2951	return 0;
2952}
2953
2954int
2955evbuffer_cb_clear_flags(struct evbuffer *buffer,
2956		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
2957{
2958	/* the user isn't allowed to mess with these. */
2959	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
2960	EVBUFFER_LOCK(buffer);
2961	cb->flags &= ~flags;
2962	EVBUFFER_UNLOCK(buffer);
2963	return 0;
2964}
2965
2966int
2967evbuffer_freeze(struct evbuffer *buffer, int start)
2968{
2969	EVBUFFER_LOCK(buffer);
2970	if (start)
2971		buffer->freeze_start = 1;
2972	else
2973		buffer->freeze_end = 1;
2974	EVBUFFER_UNLOCK(buffer);
2975	return 0;
2976}
2977
2978int
2979evbuffer_unfreeze(struct evbuffer *buffer, int start)
2980{
2981	EVBUFFER_LOCK(buffer);
2982	if (start)
2983		buffer->freeze_start = 0;
2984	else
2985		buffer->freeze_end = 0;
2986	EVBUFFER_UNLOCK(buffer);
2987	return 0;
2988}
2989
2990#if 0
2991void
2992evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
2993{
2994	if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
2995		cb->size_before_suspend = evbuffer_get_length(buffer);
2996		cb->flags |= EVBUFFER_CB_SUSPENDED;
2997	}
2998}
2999
3000void
3001evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
3002{
3003	if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
3004		unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
3005		size_t sz = cb->size_before_suspend;
3006		cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
3007			       EVBUFFER_CB_CALL_ON_UNSUSPEND);
3008		cb->size_before_suspend = 0;
3009		if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
3010			cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
3011		}
3012	}
3013}
3014#endif
3015
3016/* These hooks are exposed so that the unit tests can temporarily disable
3017 * sendfile support in order to test mmap, or both to test linear
3018 * access. Don't use it; if we need to add a way to disable sendfile support
3019 * in the future, it will probably be via an alternate version of
3020 * evbuffer_add_file() with a 'flags' argument.
3021 */
3022int _evbuffer_testing_use_sendfile(void);
3023int _evbuffer_testing_use_mmap(void);
3024int _evbuffer_testing_use_linear_file_access(void);
3025
3026int
3027_evbuffer_testing_use_sendfile(void)
3028{
3029	int ok = 0;
3030#ifdef USE_SENDFILE
3031	use_sendfile = 1;
3032	ok = 1;
3033#endif
3034#ifdef _EVENT_HAVE_MMAP
3035	use_mmap = 0;
3036#endif
3037	return ok;
3038}
3039int
3040_evbuffer_testing_use_mmap(void)
3041{
3042	int ok = 0;
3043#ifdef USE_SENDFILE
3044	use_sendfile = 0;
3045#endif
3046#ifdef _EVENT_HAVE_MMAP
3047	use_mmap = 1;
3048	ok = 1;
3049#endif
3050	return ok;
3051}
3052int
3053_evbuffer_testing_use_linear_file_access(void)
3054{
3055#ifdef USE_SENDFILE
3056	use_sendfile = 0;
3057#endif
3058#ifdef _EVENT_HAVE_MMAP
3059	use_mmap = 0;
3060#endif
3061	return 1;
3062}
3063