1/*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#define __KPI__
30//#include <sys/kpi_interface.h>
31
32#include <sys/param.h>
33#include <sys/mbuf.h>
34#include <sys/socket.h>
35#include <kern/debug.h>
36#include <libkern/OSAtomic.h>
37#include <kern/kalloc.h>
38#include <string.h>
39#include <netinet/in.h>
40#include "kpi_mbuf_internal.h"
41
42static const mbuf_flags_t mbuf_flags_mask = MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
43				MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
44				MBUF_LASTFRAG | MBUF_PROMISC;
45
46void* mbuf_data(mbuf_t mbuf)
47{
48	return mbuf->m_data;
49}
50
51void* mbuf_datastart(mbuf_t mbuf)
52{
53	if (mbuf->m_flags & M_EXT)
54		return mbuf->m_ext.ext_buf;
55	if (mbuf->m_flags & M_PKTHDR)
56		return mbuf->m_pktdat;
57	return mbuf->m_dat;
58}
59
60errno_t mbuf_setdata(mbuf_t mbuf, void* data, size_t len)
61{
62	size_t	start = (size_t)((char*)mbuf_datastart(mbuf));
63	size_t	maxlen = mbuf_maxlen(mbuf);
64
65	if ((size_t)data < start || ((size_t)data) + len > start + maxlen)
66		return EINVAL;
67	mbuf->m_data = data;
68	mbuf->m_len = len;
69
70	return 0;
71}
72
73errno_t mbuf_align_32(mbuf_t mbuf, size_t len)
74{
75	if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf))
76		return ENOTSUP;
77	mbuf->m_data = mbuf_datastart(mbuf);
78	mbuf->m_data += ((mbuf_trailingspace(mbuf) - len) &~ (sizeof(u_int32_t) - 1));
79
80	return 0;
81}
82
83addr64_t mbuf_data_to_physical(void* ptr)
84{
85	return (addr64_t)(intptr_t)mcl_to_paddr(ptr);
86}
87
88errno_t mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
89{
90	/* Must set *mbuf to NULL in failure case */
91	*mbuf = m_get(how, type);
92
93	return (*mbuf == NULL) ? ENOMEM : 0;
94}
95
96errno_t mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
97{
98	/* Must set *mbuf to NULL in failure case */
99	*mbuf = m_gethdr(how, type);
100
101	return (*mbuf == NULL) ? ENOMEM : 0;
102}
103
104errno_t
105mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
106    caddr_t extbuf, void (*extfree)(caddr_t , u_int, caddr_t),
107    size_t extsize, caddr_t extarg)
108{
109	if (extbuf == NULL || extfree == NULL || extsize == 0)
110		return (EINVAL);
111
112	if ((*mbuf = m_clattach(mbuf != NULL ? *mbuf : NULL, type, extbuf,
113	    extfree, extsize, extarg, how)) == NULL)
114		return (ENOMEM);
115
116	return (0);
117}
118
119errno_t
120mbuf_alloccluster(mbuf_how_t how, size_t *size, caddr_t *addr)
121{
122	if (size == NULL || *size == 0 || addr == NULL)
123		return (EINVAL);
124
125	*addr = NULL;
126
127	/* Jumbo cluster pool not available? */
128	if (*size > NBPG && njcl == 0)
129		return (ENOTSUP);
130
131	if (*size <= MCLBYTES && (*addr = m_mclalloc(how)) != NULL)
132		*size = MCLBYTES;
133	else if (*size > MCLBYTES && *size <= NBPG &&
134	    (*addr = m_bigalloc(how)) != NULL)
135		*size = NBPG;
136	else if (*size > NBPG && *size <= M16KCLBYTES &&
137	    (*addr = m_16kalloc(how)) != NULL)
138		*size = M16KCLBYTES;
139	else
140		*size = 0;
141
142	if (*addr == NULL)
143		return (ENOMEM);
144
145	return (0);
146}
147
148void
149mbuf_freecluster(caddr_t addr, size_t size)
150{
151	if (size != MCLBYTES && size != NBPG && size != M16KCLBYTES)
152		panic("%s: invalid size (%ld) for cluster %p", __func__,
153		    size, (void *)addr);
154
155	if (size == MCLBYTES)
156		m_mclfree(addr);
157	else if (size == NBPG)
158		m_bigfree(addr, NBPG, NULL);
159	else if (njcl > 0)
160		m_16kfree(addr, M16KCLBYTES, NULL);
161	else
162		panic("%s: freeing jumbo cluster to an empty pool", __func__);
163}
164
165errno_t
166mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t* mbuf)
167{
168	/* Must set *mbuf to NULL in failure case */
169	errno_t	error = 0;
170	int	created = 0;
171
172	if (mbuf == NULL)
173		return EINVAL;
174	if (*mbuf == NULL) {
175		*mbuf = m_get(how, type);
176		if (*mbuf == NULL)
177			return ENOMEM;
178		created = 1;
179	}
180	/*
181	 * At the time this code was written, m_{mclget,mbigget,m16kget}
182	 * would always return the same value that was passed in to it.
183	 */
184	if (size == MCLBYTES) {
185		*mbuf = m_mclget(*mbuf, how);
186	} else if (size == NBPG) {
187		*mbuf = m_mbigget(*mbuf, how);
188	} else if (size == M16KCLBYTES) {
189		if (njcl > 0) {
190			*mbuf = m_m16kget(*mbuf, how);
191		} else {
192			/* Jumbo cluster pool not available? */
193			error = ENOTSUP;
194			goto out;
195		}
196	} else {
197		error = EINVAL;
198		goto out;
199	}
200	if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0)
201		error = ENOMEM;
202out:
203	if (created && error != 0) {
204		mbuf_free(*mbuf);
205		*mbuf = NULL;
206	}
207	return error;
208}
209
210errno_t mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
211{
212	/* Must set *mbuf to NULL in failure case */
213	errno_t	error = 0;
214	int		created = 0;
215	if (mbuf == NULL) return EINVAL;
216	if (*mbuf == NULL) {
217		error = mbuf_get(how, type, mbuf);
218		if (error)
219			return error;
220		created = 1;
221	}
222
223	/*
224	 * At the time this code was written, m_mclget would always
225	 * return the same value that was passed in to it.
226	 */
227	*mbuf = m_mclget(*mbuf, how);
228
229	if (created && ((*mbuf)->m_flags & M_EXT) == 0) {
230		mbuf_free(*mbuf);
231		*mbuf = NULL;
232	}
233	if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0)
234		error = ENOMEM;
235	return error;
236}
237
238
239errno_t mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf)
240{
241	/* Must set *mbuf to NULL in failure case */
242	errno_t	error = 0;
243
244	*mbuf = m_getpacket_how(how);
245
246	if (*mbuf == NULL) {
247		if (how == MBUF_WAITOK)
248			error = ENOMEM;
249		else
250			error = EWOULDBLOCK;
251	}
252
253	return error;
254}
255
256mbuf_t mbuf_free(mbuf_t mbuf)
257{
258	return m_free(mbuf);
259}
260
261void mbuf_freem(mbuf_t mbuf)
262{
263	m_freem(mbuf);
264}
265
266int	mbuf_freem_list(mbuf_t mbuf)
267{
268	return m_freem_list(mbuf);
269}
270
271size_t mbuf_leadingspace(const mbuf_t mbuf)
272{
273	return m_leadingspace(mbuf);
274}
275
276size_t mbuf_trailingspace(const mbuf_t mbuf)
277{
278	return m_trailingspace(mbuf);
279}
280
281/* Manipulation */
282errno_t mbuf_copym(const mbuf_t src, size_t offset, size_t len,
283				   mbuf_how_t how, mbuf_t *new_mbuf)
284{
285	/* Must set *mbuf to NULL in failure case */
286	*new_mbuf = m_copym(src, offset, len, how);
287
288	return (*new_mbuf == NULL) ? ENOMEM : 0;
289}
290
291errno_t	mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
292{
293	/* Must set *new_mbuf to NULL in failure case */
294	*new_mbuf = m_dup(src, how);
295
296	return (*new_mbuf == NULL) ? ENOMEM : 0;
297}
298
299errno_t mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how)
300{
301	/* Must set *orig to NULL in failure case */
302	*orig = m_prepend_2(*orig, len, how);
303
304	return (*orig == NULL) ? ENOMEM : 0;
305}
306
307errno_t mbuf_split(mbuf_t src, size_t offset,
308					mbuf_how_t how, mbuf_t *new_mbuf)
309{
310	/* Must set *new_mbuf to NULL in failure case */
311	*new_mbuf = m_split(src, offset, how);
312
313	return (*new_mbuf == NULL) ? ENOMEM : 0;
314}
315
316errno_t mbuf_pullup(mbuf_t *mbuf, size_t len)
317{
318	/* Must set *mbuf to NULL in failure case */
319	*mbuf = m_pullup(*mbuf, len);
320
321	return (*mbuf == NULL) ? ENOMEM : 0;
322}
323
324errno_t mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location)
325{
326	/* Must set *location to NULL in failure case */
327	int new_offset;
328	*location = m_pulldown(src, *offset, len, &new_offset);
329	*offset = new_offset;
330
331	return (*location == NULL) ? ENOMEM : 0;
332}
333
334void mbuf_adj(mbuf_t mbuf, int len)
335{
336	m_adj(mbuf, len);
337}
338
339errno_t mbuf_adjustlen(mbuf_t m, int amount)
340{
341	/* Verify m_len will be valid after adding amount */
342	if (amount > 0) {
343		int		used = (size_t)mbuf_data(m) - (size_t)mbuf_datastart(m) +
344					   m->m_len;
345
346		if ((size_t)(amount + used) > mbuf_maxlen(m))
347			return EINVAL;
348	}
349	else if (-amount > m->m_len) {
350		return EINVAL;
351	}
352
353	m->m_len += amount;
354	return 0;
355}
356
357errno_t mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void* out_data)
358{
359	/* Copied m_copydata, added error handling (don't just panic) */
360	int count;
361	mbuf_t	m = m0;
362
363	while (off > 0) {
364		if (m == 0)
365			return EINVAL;
366		if (off < (size_t)m->m_len)
367			break;
368		off -= m->m_len;
369		m = m->m_next;
370	}
371	while (len > 0) {
372		if (m == 0)
373			return EINVAL;
374		count = m->m_len - off > len ? len : m->m_len - off;
375		bcopy(mtod(m, caddr_t) + off, out_data, count);
376		len -= count;
377		out_data = ((char*)out_data) + count;
378		off = 0;
379		m = m->m_next;
380	}
381
382	return 0;
383}
384
385int mbuf_mclhasreference(mbuf_t mbuf)
386{
387	if ((mbuf->m_flags & M_EXT))
388		return m_mclhasreference(mbuf);
389	else
390		return 0;
391}
392
393
394/* mbuf header */
395mbuf_t mbuf_next(const mbuf_t mbuf)
396{
397	return mbuf->m_next;
398}
399
400errno_t mbuf_setnext(mbuf_t mbuf, mbuf_t next)
401{
402	if (next && ((next)->m_nextpkt != NULL ||
403		(next)->m_type == MT_FREE)) return EINVAL;
404	mbuf->m_next = next;
405
406	return 0;
407}
408
409mbuf_t mbuf_nextpkt(const mbuf_t mbuf)
410{
411	return mbuf->m_nextpkt;
412}
413
414void mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt)
415{
416	mbuf->m_nextpkt = nextpkt;
417}
418
419size_t mbuf_len(const mbuf_t mbuf)
420{
421	return mbuf->m_len;
422}
423
424void mbuf_setlen(mbuf_t mbuf, size_t len)
425{
426	mbuf->m_len = len;
427}
428
429size_t mbuf_maxlen(const mbuf_t mbuf)
430{
431	if (mbuf->m_flags & M_EXT)
432		return mbuf->m_ext.ext_size;
433	return &mbuf->m_dat[MLEN] - ((char*)mbuf_datastart(mbuf));
434}
435
436mbuf_type_t mbuf_type(const mbuf_t mbuf)
437{
438	return mbuf->m_type;
439}
440
441errno_t mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type)
442{
443	if (new_type == MBUF_TYPE_FREE) return EINVAL;
444
445	m_mchtype(mbuf, new_type);
446
447	return 0;
448}
449
450mbuf_flags_t mbuf_flags(const mbuf_t mbuf)
451{
452	return mbuf->m_flags & mbuf_flags_mask;
453}
454
455errno_t mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
456{
457	if ((flags & ~mbuf_flags_mask) != 0) return EINVAL;
458	mbuf->m_flags = flags |
459		(mbuf->m_flags & ~mbuf_flags_mask);
460
461	return 0;
462}
463
464errno_t mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
465{
466	if (((flags | mask) & ~mbuf_flags_mask) != 0) return EINVAL;
467
468	mbuf->m_flags = (flags & mask) | (mbuf->m_flags & ~mask);
469
470	return 0;
471}
472
473errno_t mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src)
474{
475	if (((src)->m_flags & M_PKTHDR) == 0)
476		return EINVAL;
477
478	m_copy_pkthdr(dest, src);
479
480	return 0;
481}
482
483size_t mbuf_pkthdr_len(const mbuf_t mbuf)
484{
485	return mbuf->m_pkthdr.len;
486}
487
488void mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
489{
490	mbuf->m_pkthdr.len = len;
491}
492
493void mbuf_pkthdr_adjustlen(mbuf_t mbuf, int amount)
494{
495	mbuf->m_pkthdr.len += amount;
496}
497
498ifnet_t mbuf_pkthdr_rcvif(const mbuf_t mbuf)
499{
500	// If we reference count ifnets, we should take a reference here before returning
501	return mbuf->m_pkthdr.rcvif;
502}
503
504errno_t mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet)
505{
506	/* May want to walk ifnet list to determine if interface is valid */
507	mbuf->m_pkthdr.rcvif = (struct ifnet*)ifnet;
508	return 0;
509}
510
511void* mbuf_pkthdr_header(const mbuf_t mbuf)
512{
513	return mbuf->m_pkthdr.header;
514}
515
516void mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
517{
518	mbuf->m_pkthdr.header = (void*)header;
519}
520
521void
522mbuf_inbound_modified(mbuf_t mbuf)
523{
524	/* Invalidate hardware generated checksum flags */
525	mbuf->m_pkthdr.csum_flags = 0;
526}
527
528extern void in_cksum_offset(struct mbuf* m, size_t ip_offset);
529extern void in_delayed_cksum_offset(struct mbuf *m, int ip_offset);
530
531void
532mbuf_outbound_finalize(mbuf_t mbuf, u_long protocol_family, size_t protocol_offset)
533{
534	if ((mbuf->m_pkthdr.csum_flags &
535		 (CSUM_DELAY_DATA | CSUM_DELAY_IP | CSUM_TCP_SUM16)) == 0)
536		return;
537
538	/* Generate the packet in software, client needs it */
539	switch (protocol_family) {
540		case PF_INET:
541			if (mbuf->m_pkthdr.csum_flags & CSUM_TCP_SUM16) {
542				/*
543				 * If you're wondering where this lovely code comes
544				 * from, we're trying to undo what happens in ip_output.
545				 * Look for CSUM_TCP_SUM16 in ip_output.
546				 */
547				u_int16_t	first, second;
548				mbuf->m_pkthdr.csum_flags &= ~CSUM_TCP_SUM16;
549				mbuf->m_pkthdr.csum_flags |= CSUM_TCP;
550				first = mbuf->m_pkthdr.csum_data >> 16;
551				second = mbuf->m_pkthdr.csum_data & 0xffff;
552				mbuf->m_pkthdr.csum_data = first - second;
553			}
554			if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
555				in_delayed_cksum_offset(mbuf, protocol_offset);
556			}
557
558			if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
559				in_cksum_offset(mbuf, protocol_offset);
560			}
561
562			mbuf->m_pkthdr.csum_flags &= ~(CSUM_DELAY_DATA | CSUM_DELAY_IP);
563			break;
564
565		default:
566			/*
567			 * Not sure what to do here if anything.
568			 * Hardware checksum code looked pretty IPv4 specific.
569			 */
570			if ((mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_IP)) != 0)
571				panic("mbuf_outbound_finalize - CSUM flags set for non-IPv4 packet (%lu)!\n", protocol_family);
572	}
573}
574
575errno_t
576mbuf_set_vlan_tag(
577	mbuf_t mbuf,
578	u_int16_t vlan)
579{
580	mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID;
581	mbuf->m_pkthdr.vlan_tag = vlan;
582
583	return 0;
584}
585
586errno_t
587mbuf_get_vlan_tag(
588	mbuf_t mbuf,
589	u_int16_t *vlan)
590{
591	if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0)
592		return ENXIO; // No vlan tag set
593
594	*vlan = mbuf->m_pkthdr.vlan_tag;
595
596	return 0;
597}
598
599errno_t
600mbuf_clear_vlan_tag(
601	mbuf_t mbuf)
602{
603	mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
604	mbuf->m_pkthdr.vlan_tag = 0;
605
606	return 0;
607}
608
609static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
610	MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP | MBUF_CSUM_REQ_SUM16;
611
612errno_t
613mbuf_set_csum_requested(
614	mbuf_t mbuf,
615	mbuf_csum_request_flags_t request,
616	u_int32_t value)
617{
618	request &= mbuf_valid_csum_request_flags;
619	mbuf->m_pkthdr.csum_flags = (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request;
620	mbuf->m_pkthdr.csum_data = value;
621
622	return 0;
623}
624
625errno_t
626mbuf_get_csum_requested(
627	mbuf_t mbuf,
628	mbuf_csum_request_flags_t *request,
629	u_int32_t *value)
630{
631	*request = mbuf->m_pkthdr.csum_flags;
632	*request &= mbuf_valid_csum_request_flags;
633	if (value != NULL) {
634		*value = mbuf->m_pkthdr.csum_data;
635	}
636
637	return 0;
638}
639
640errno_t
641mbuf_clear_csum_requested(
642	mbuf_t mbuf)
643{
644	mbuf->m_pkthdr.csum_flags &= 0xffff0000;
645	mbuf->m_pkthdr.csum_data = 0;
646
647	return 0;
648}
649
650static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
651	MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
652	MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_TCP_SUM16;
653
654errno_t
655mbuf_set_csum_performed(
656	mbuf_t mbuf,
657	mbuf_csum_performed_flags_t performed,
658	u_int32_t value)
659{
660	performed &= mbuf_valid_csum_performed_flags;
661	mbuf->m_pkthdr.csum_flags = (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed;
662	mbuf->m_pkthdr.csum_data = value;
663
664	return 0;
665}
666
667errno_t
668mbuf_get_csum_performed(
669	mbuf_t mbuf,
670	mbuf_csum_performed_flags_t *performed,
671	u_int32_t *value)
672{
673	*performed = mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags;
674	*value = mbuf->m_pkthdr.csum_data;
675
676	return 0;
677}
678
679errno_t
680mbuf_clear_csum_performed(
681	mbuf_t mbuf)
682{
683	mbuf->m_pkthdr.csum_flags &= 0xffff0000;
684	mbuf->m_pkthdr.csum_data = 0;
685
686	return 0;
687}
688
689errno_t
690mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
691    u_int16_t *csum)
692{
693	if (mbuf == NULL || length == 0 || csum == NULL ||
694	   (u_int32_t)mbuf->m_pkthdr.len < (offset + length))
695		return (EINVAL);
696
697	*csum = inet_cksum(mbuf, protocol, offset, length);
698	return (0);
699}
700
701#if INET6
702errno_t
703mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
704    u_int16_t *csum)
705{
706	if (mbuf == NULL || length == 0 || csum == NULL ||
707	   (u_int32_t)mbuf->m_pkthdr.len < (offset + length))
708		return (EINVAL);
709
710	*csum = inet6_cksum(mbuf, protocol, offset, length);
711	return (0);
712}
713#else /* INET6 */
714errno_t
715mbuf_inet6_cksum(__unused mbuf_t mbuf, __unused int protocol,
716		__unused u_int32_t offset, __unused u_int32_t length,
717		__unused u_int16_t *csum)
718{
719	panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
720	return (0);
721}
722
723u_int16_t
724inet6_cksum(__unused struct mbuf *m, __unused unsigned int nxt,
725		__unused unsigned int off, __unused unsigned int len)
726{
727	panic("inet6_cksum() doesn't exist on this platform\n");
728	return (0);
729}
730
731void nd6_lookup_ipv6(void);
732void
733nd6_lookup_ipv6(void)
734{
735	panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
736}
737
738int
739in6addr_local(__unused struct in6_addr *a)
740{
741	panic("in6addr_local() doesn't exist on this platform\n");
742	return (0);
743}
744
745void nd6_storelladdr(void);
746void
747nd6_storelladdr(void)
748{
749	panic("nd6_storelladdr() doesn't exist on this platform\n");
750}
751#endif /* INET6 */
752
753/*
754 * Mbuf tag KPIs
755 */
756
757struct mbuf_tag_id_entry {
758	SLIST_ENTRY(mbuf_tag_id_entry)	next;
759	mbuf_tag_id_t					id;
760	char							string[];
761};
762
763#define	MBUF_TAG_ID_ENTRY_SIZE(__str) \
764	((size_t)&(((struct mbuf_tag_id_entry*)0)->string[0]) + \
765	 strlen(__str) + 1)
766
767#define	MTAG_FIRST_ID					1000
768static mbuf_tag_id_t					mtag_id_next = MTAG_FIRST_ID;
769static SLIST_HEAD(,mbuf_tag_id_entry)	mtag_id_list = {NULL};
770static lck_mtx_t						*mtag_id_lock = NULL;
771
772__private_extern__ void
773mbuf_tag_id_first_last(
774	mbuf_tag_id_t * first,
775	mbuf_tag_id_t * last)
776{
777	*first = MTAG_FIRST_ID;
778	*last = mtag_id_next - 1;
779}
780
781__private_extern__ errno_t
782mbuf_tag_id_find_internal(
783	const char		*string,
784	mbuf_tag_id_t	*out_id,
785	int				create)
786{
787	struct mbuf_tag_id_entry			*entry = NULL;
788
789
790	*out_id = 0;
791
792	if (string == NULL || out_id == NULL) {
793		return EINVAL;
794	}
795
796	/* Don't bother allocating the lock if we're only doing a lookup */
797	if (create == 0 && mtag_id_lock == NULL)
798		return ENOENT;
799
800	/* Allocate lock if necessary */
801	if (mtag_id_lock == NULL) {
802		lck_grp_attr_t	*grp_attrib = NULL;
803		lck_attr_t		*lck_attrb = NULL;
804		lck_grp_t		*lck_group = NULL;
805		lck_mtx_t		*new_lock = NULL;
806
807		grp_attrib = lck_grp_attr_alloc_init();
808		lck_group = lck_grp_alloc_init("mbuf_tag_allocate_id", grp_attrib);
809		lck_grp_attr_free(grp_attrib);
810		lck_attrb = lck_attr_alloc_init();
811
812		new_lock = lck_mtx_alloc_init(lck_group, lck_attrb);
813		if (!OSCompareAndSwap((UInt32)0, (UInt32)new_lock, (UInt32*)&mtag_id_lock)) {
814			/*
815			 * If the atomic swap fails, someone else has already
816			 * done this work. We can free the stuff we allocated.
817			 */
818			lck_mtx_free(new_lock, lck_group);
819			lck_grp_free(lck_group);
820		}
821		lck_attr_free(lck_attrb);
822	}
823
824	/* Look for an existing entry */
825	lck_mtx_lock(mtag_id_lock);
826	SLIST_FOREACH(entry, &mtag_id_list, next) {
827		if (strncmp(string, entry->string, strlen(string) + 1) == 0) {
828			break;
829		}
830	}
831
832	if (entry == NULL) {
833		if (create == 0) {
834			lck_mtx_unlock(mtag_id_lock);
835			return ENOENT;
836		}
837
838		entry = kalloc(MBUF_TAG_ID_ENTRY_SIZE(string));
839		if (entry == NULL) {
840			lck_mtx_unlock(mtag_id_lock);
841			return ENOMEM;
842		}
843
844		strlcpy(entry->string, string, strlen(string)+1);
845		entry->id = mtag_id_next;
846		mtag_id_next++;
847		SLIST_INSERT_HEAD(&mtag_id_list, entry, next);
848	}
849	lck_mtx_unlock(mtag_id_lock);
850
851	*out_id = entry->id;
852
853	return 0;
854}
855
856errno_t
857mbuf_tag_id_find(
858	const char		*string,
859	mbuf_tag_id_t	*out_id)
860{
861	return mbuf_tag_id_find_internal(string, out_id, 1);
862}
863
864errno_t
865mbuf_tag_allocate(
866	mbuf_t			mbuf,
867	mbuf_tag_id_t	id,
868	mbuf_tag_type_t	type,
869	size_t			length,
870	mbuf_how_t		how,
871	void**			data_p)
872{
873	struct m_tag *tag;
874
875	if (data_p != NULL)
876		*data_p = NULL;
877
878	/* Sanity check parameters */
879	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
880		id >= mtag_id_next || length < 1 || (length & 0xffff0000) != 0 ||
881		data_p == NULL) {
882		return EINVAL;
883	}
884
885	/* Make sure this mtag hasn't already been allocated */
886	tag = m_tag_locate(mbuf, id, type, NULL);
887	if (tag != NULL) {
888		return EEXIST;
889	}
890
891	/* Allocate an mtag */
892	tag = m_tag_alloc(id, type, length, how);
893	if (tag == NULL) {
894		return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
895	}
896
897	/* Attach the mtag and set *data_p */
898	m_tag_prepend(mbuf, tag);
899	*data_p = tag + 1;
900
901	return 0;
902}
903
904errno_t
905mbuf_tag_find(
906	mbuf_t			mbuf,
907	mbuf_tag_id_t	id,
908	mbuf_tag_type_t	type,
909	size_t*			length,
910	void**			data_p)
911{
912	struct m_tag *tag;
913
914	if (length != NULL)
915		*length = 0;
916	if (data_p != NULL)
917		*data_p = NULL;
918
919	/* Sanity check parameters */
920	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
921		id >= mtag_id_next || length == NULL || data_p == NULL) {
922		return EINVAL;
923	}
924
925	/* Locate an mtag */
926	tag = m_tag_locate(mbuf, id, type, NULL);
927	if (tag == NULL) {
928		return ENOENT;
929	}
930
931	/* Copy out the pointer to the data and the lenght value */
932	*length = tag->m_tag_len;
933	*data_p = tag + 1;
934
935	return 0;
936}
937
938void
939mbuf_tag_free(
940	mbuf_t			mbuf,
941	mbuf_tag_id_t	id,
942	mbuf_tag_type_t	type)
943{
944	struct m_tag *tag;
945
946	if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
947		id >= mtag_id_next)
948		return;
949
950	tag = m_tag_locate(mbuf, id, type, NULL);
951	if (tag == NULL) {
952		return;
953	}
954
955	m_tag_delete(mbuf, tag);
956	return;
957}
958
959/* mbuf stats */
960void mbuf_stats(struct mbuf_stat *stats)
961{
962	stats->mbufs = mbstat.m_mbufs;
963	stats->clusters = mbstat.m_clusters;
964	stats->clfree = mbstat.m_clfree;
965	stats->drops = mbstat.m_drops;
966	stats->wait = mbstat.m_wait;
967	stats->drain = mbstat.m_drain;
968	__builtin_memcpy(stats->mtypes, mbstat.m_mtypes, sizeof(stats->mtypes));
969	stats->mcfail = mbstat.m_mcfail;
970	stats->mpfail = mbstat.m_mpfail;
971	stats->msize = mbstat.m_msize;
972	stats->mclbytes = mbstat.m_mclbytes;
973	stats->minclsize = mbstat.m_minclsize;
974	stats->mlen = mbstat.m_mlen;
975	stats->mhlen = mbstat.m_mhlen;
976	stats->bigclusters = mbstat.m_bigclusters;
977	stats->bigclfree = mbstat.m_bigclfree;
978	stats->bigmclbytes = mbstat.m_bigmclbytes;
979}
980
981errno_t
982mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks, mbuf_t *mbuf)
983{
984	errno_t error;
985	struct mbuf *m;
986	unsigned int numpkts = 1;
987	unsigned int numchunks = maxchunks ? *maxchunks : 0;
988
989	if (packetlen == 0) {
990		error = EINVAL;
991		goto out;
992	}
993	m = m_allocpacket_internal(&numpkts, packetlen, maxchunks ? &numchunks : NULL, how, 1, 0);
994	if (m == 0) {
995		if (maxchunks && *maxchunks && numchunks > *maxchunks)
996			error = ENOBUFS;
997		else
998			error = ENOMEM;
999	} else {
1000		if (maxchunks)
1001			*maxchunks = numchunks;
1002		error = 0;
1003		*mbuf = m;
1004	}
1005out:
1006	return error;
1007}
1008
1009errno_t
1010mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen, unsigned int *maxchunks, mbuf_t *mbuf)
1011{
1012	errno_t error;
1013	struct mbuf *m;
1014	unsigned int numchunks = maxchunks ? *maxchunks : 0;
1015
1016	if (numpkts == 0) {
1017		error = EINVAL;
1018		goto out;
1019	}
1020	if (packetlen == 0) {
1021		error = EINVAL;
1022		goto out;
1023	}
1024	m = m_allocpacket_internal(&numpkts, packetlen, maxchunks ? &numchunks : NULL, how, 1, 0);
1025	if (m == 0) {
1026		if (maxchunks && *maxchunks && numchunks > *maxchunks)
1027			error = ENOBUFS;
1028		else
1029			error = ENOMEM;
1030	} else {
1031		if (maxchunks)
1032			*maxchunks = numchunks;
1033		error = 0;
1034		*mbuf = m;
1035	}
1036out:
1037	return error;
1038}
1039
1040
1041
1042/*
1043 * mbuf_copyback differs from m_copyback in a few ways:
1044 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1045 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1046 * 3) mbuf_copyback reports whether or not the operation succeeded
1047 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1048 */
1049errno_t
1050mbuf_copyback(
1051	mbuf_t		m,
1052	size_t		off,
1053	size_t		len,
1054	const void	*data,
1055	mbuf_how_t	how)
1056{
1057	size_t	mlen;
1058	mbuf_t	m_start = m;
1059	mbuf_t	n;
1060	int		totlen = 0;
1061	errno_t		result = 0;
1062	const char	*cp = data;
1063
1064	if (m == NULL || len == 0 || data == NULL)
1065		return EINVAL;
1066
1067	while (off > (mlen = m->m_len)) {
1068		off -= mlen;
1069		totlen += mlen;
1070		if (m->m_next == 0) {
1071			n = m_getclr(how, m->m_type);
1072			if (n == 0) {
1073				result = ENOBUFS;
1074				goto out;
1075			}
1076			n->m_len = MIN(MLEN, len + off);
1077			m->m_next = n;
1078		}
1079		m = m->m_next;
1080	}
1081
1082	while (len > 0) {
1083		mlen = MIN(m->m_len - off, len);
1084		if (mlen < len && m->m_next == NULL && mbuf_trailingspace(m) > 0) {
1085			size_t	grow = MIN(mbuf_trailingspace(m), len - mlen);
1086			mlen += grow;
1087			m->m_len += grow;
1088		}
1089		bcopy(cp, off + (char*)mbuf_data(m), (unsigned)mlen);
1090		cp += mlen;
1091		len -= mlen;
1092		mlen += off;
1093		off = 0;
1094		totlen += mlen;
1095		if (len == 0)
1096			break;
1097		if (m->m_next == 0) {
1098			n = m_get(how, m->m_type);
1099			if (n == NULL) {
1100				result = ENOBUFS;
1101				goto out;
1102			}
1103			if (len > MINCLSIZE) {
1104				/* cluter allocation failure is okay, we can grow chain */
1105				mbuf_mclget(how, m->m_type, &n);
1106			}
1107			n->m_len = MIN(mbuf_maxlen(n), len);
1108			m->m_next = n;
1109		}
1110		m = m->m_next;
1111	}
1112
1113out:
1114	if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen))
1115		m_start->m_pkthdr.len = totlen;
1116
1117	return result;
1118}
1119
1120#if !INET6
1121void inet6_unsupported(void);
1122
1123void inet6_unsupported(void)
1124{
1125	*((int *)0) = 0x1;
1126}
1127#endif /* !INET6 */
1128