extract.h revision 1.4
1/*
2 * Copyright (c) 1992, 1993, 1994, 1995, 1996
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 * @(#) Header: /tcpdump/master/tcpdump/extract.h,v 1.25 2006-01-30 16:20:07 hannes Exp  (LBL)
22 */
23
24#ifdef __NetBSD__
25#include <string.h>
26
27/*
28 * Do it the portable way and let the compiler optimize the code
29 */
30static inline uint16_t EXTRACT_16BITS(const void *p)
31{
32	uint16_t t;
33	memcpy(&t, p, sizeof(t));
34	return ntohs(t);
35}
36
37static inline uint32_t EXTRACT_24BITS(const void *p)
38{
39	uint8_t t[3];
40	memcpy(t, p, sizeof(t));
41	return
42	    ((uint32_t)t[0] << 16) |
43	    ((uint32_t)t[1] << 8) |
44	    t[2];
45}
46
47static inline uint32_t EXTRACT_32BITS(const void *p)
48{
49	uint32_t t;
50	memcpy(&t, p, sizeof(t));
51	return ntohl(t);
52}
53
54static inline uint64_t EXTRACT_64BITS(const void *p)
55{
56	uint32_t t[2];
57	memcpy(&t[0], p, sizeof(t[0]));
58	memcpy(&t[1], (const uint8_t *)p + sizeof(t[0]), sizeof(t[1]));
59	return ((uint64_t)ntohl(t[0]) << 32) | ntohl(t[1]);
60}
61
62static inline uint8_t EXTRACT_LE_8BITS(const void *p)
63{
64	uint8_t t[1];
65	memcpy(t, p, sizeof(t));
66	return t[0];
67}
68
69static inline uint16_t EXTRACT_LE_16BITS(const void *p)
70{
71	uint8_t t[2];
72	memcpy(t, p, sizeof(t));
73	return
74	    ((uint16_t)t[1] << 8) |
75	    t[0];
76}
77
78static inline uint32_t EXTRACT_LE_24BITS(const void *p)
79{
80	uint8_t t[3];
81	memcpy(t, p, sizeof(t));
82	return
83	    ((uint32_t)t[2] << 16) |
84	    ((uint32_t)t[1] << 8) |
85	    t[0];
86}
87
88static inline uint32_t EXTRACT_LE_32BITS(const void *p)
89{
90	uint8_t t[4];
91	memcpy(t, p, sizeof(t));
92	return
93	    ((uint32_t)t[3] << 24) |
94	    ((uint32_t)t[2] << 16) |
95	    ((uint32_t)t[1] << 8) |
96	    t[0];
97}
98
99static inline uint64_t EXTRACT_LE_64BITS(const void *p)
100{
101	uint8_t t[8];
102	memcpy(&t, p, sizeof(t));
103	return
104	    ((uint64_t)t[7] << 56) |
105	    ((uint64_t)t[6] << 48) |
106	    ((uint64_t)t[5] << 40) |
107	    ((uint64_t)t[4] << 32) |
108	    ((uint64_t)t[3] << 24) |
109	    ((uint64_t)t[2] << 16) |
110	    ((uint64_t)t[1] << 8) |
111	    t[0];
112}
113
114#else /* Fast & Loose */
115/*
116 * Macros to extract possibly-unaligned big-endian integral values.
117 */
118#ifdef LBL_ALIGN
119/*
120 * The processor doesn't natively handle unaligned loads.
121 */
122#ifdef HAVE___ATTRIBUTE__
123/*
124 * We have __attribute__; we assume that means we have __attribute__((packed)).
125 * Declare packed structures containing a u_int16_t and a u_int32_t,
126 * cast the pointer to point to one of those, and fetch through it;
127 * the GCC manual doesn't appear to explicitly say that
128 * __attribute__((packed)) causes the compiler to generate unaligned-safe
129 * code, but it apppears to do so.
130 *
131 * We do this in case the compiler can generate, for this instruction set,
132 * better code to do an unaligned load and pass stuff to "ntohs()" or
133 * "ntohl()" than the code to fetch the bytes one at a time and
134 * assemble them.  (That might not be the case on a little-endian platform,
135 * where "ntohs()" and "ntohl()" might not be done inline.)
136 */
137typedef struct {
138	u_int16_t	val;
139} __attribute__((packed)) unaligned_u_int16_t;
140
141typedef struct {
142	u_int32_t	val;
143} __attribute__((packed)) unaligned_u_int32_t;
144
145static inline u_int16_t
146EXTRACT_16BITS(const void *p)
147{
148	return ((u_int16_t)ntohs(((const unaligned_u_int16_t *)(p))->val));
149}
150
151static inline u_int32_t
152EXTRACT_32BITS(const void *p)
153{
154	return ((u_int32_t)ntohl(((const unaligned_u_int32_t *)(p))->val));
155}
156
157static inline u_int64_t
158EXTRACT_64BITS(const void *p)
159{
160	return ((u_int64_t)(((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 0)->val)) << 32 | \
161		((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 1)->val)) << 0));
162
163}
164
165#else /* HAVE___ATTRIBUTE__ */
166/*
167 * We don't have __attribute__, so do unaligned loads of big-endian
168 * quantities the hard way - fetch the bytes one at a time and
169 * assemble them.
170 */
171#define EXTRACT_16BITS(p) \
172	((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \
173		     (u_int16_t)*((const u_int8_t *)(p) + 1)))
174#define EXTRACT_32BITS(p) \
175	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \
176		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \
177		     (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \
178		     (u_int32_t)*((const u_int8_t *)(p) + 3)))
179#define EXTRACT_64BITS(p) \
180	((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 0) << 56 | \
181		     (u_int64_t)*((const u_int8_t *)(p) + 1) << 48 | \
182		     (u_int64_t)*((const u_int8_t *)(p) + 2) << 40 | \
183		     (u_int64_t)*((const u_int8_t *)(p) + 3) << 32 | \
184	             (u_int64_t)*((const u_int8_t *)(p) + 4) << 24 | \
185		     (u_int64_t)*((const u_int8_t *)(p) + 5) << 16 | \
186		     (u_int64_t)*((const u_int8_t *)(p) + 6) << 8 | \
187		     (u_int64_t)*((const u_int8_t *)(p) + 7)))
188#endif /* HAVE___ATTRIBUTE__ */
189#else /* LBL_ALIGN */
190/*
191 * The processor natively handles unaligned loads, so we can just
192 * cast the pointer and fetch through it.
193 */
194static inline u_int16_t
195EXTRACT_16BITS(const void *p)
196{
197	return ((u_int16_t)ntohs(*(const u_int16_t *)(p)));
198}
199
200static inline u_int32_t
201EXTRACT_32BITS(const void *p)
202{
203	return ((u_int32_t)ntohl(*(const u_int32_t *)(p)));
204}
205
206static inline u_int64_t
207EXTRACT_64BITS(const void *p)
208{
209	return ((u_int64_t)(((u_int64_t)ntohl(*((const u_int32_t *)(p) + 0))) << 32 | \
210		((u_int64_t)ntohl(*((const u_int32_t *)(p) + 1))) << 0));
211
212}
213
214#endif /* LBL_ALIGN */
215
216#define EXTRACT_24BITS(p) \
217	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 16 | \
218		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
219		     (u_int32_t)*((const u_int8_t *)(p) + 2)))
220
221/*
222 * Macros to extract possibly-unaligned little-endian integral values.
223 * XXX - do loads on little-endian machines that support unaligned loads?
224 */
225#define EXTRACT_LE_8BITS(p) (*(p))
226#define EXTRACT_LE_16BITS(p) \
227	((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 1) << 8 | \
228		     (u_int16_t)*((const u_int8_t *)(p) + 0)))
229#define EXTRACT_LE_32BITS(p) \
230	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 3) << 24 | \
231		     (u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
232		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
233		     (u_int32_t)*((const u_int8_t *)(p) + 0)))
234#define EXTRACT_LE_24BITS(p) \
235	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
236		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
237		     (u_int32_t)*((const u_int8_t *)(p) + 0)))
238#define EXTRACT_LE_64BITS(p) \
239	((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 7) << 56 | \
240		     (u_int64_t)*((const u_int8_t *)(p) + 6) << 48 | \
241		     (u_int64_t)*((const u_int8_t *)(p) + 5) << 40 | \
242		     (u_int64_t)*((const u_int8_t *)(p) + 4) << 32 | \
243	             (u_int64_t)*((const u_int8_t *)(p) + 3) << 24 | \
244		     (u_int64_t)*((const u_int8_t *)(p) + 2) << 16 | \
245		     (u_int64_t)*((const u_int8_t *)(p) + 1) << 8 | \
246		     (u_int64_t)*((const u_int8_t *)(p) + 0)))
247#endif /* __NetBSD__ */
248