extract.h revision 127668
1126353Smlaier/*
2126353Smlaier * Copyright (c) 1992, 1993, 1994, 1995, 1996
3126353Smlaier *	The Regents of the University of California.  All rights reserved.
4126353Smlaier *
5126353Smlaier * Redistribution and use in source and binary forms, with or without
6126353Smlaier * modification, are permitted provided that: (1) source code distributions
7126353Smlaier * retain the above copyright notice and this paragraph in its entirety, (2)
8126353Smlaier * distributions including binary code include the above copyright notice and
9126353Smlaier * this paragraph in its entirety in the documentation or other materials
10126353Smlaier * provided with the distribution, and (3) all advertising materials mentioning
11126353Smlaier * features or use of this software display the following acknowledgement:
12126353Smlaier * ``This product includes software developed by the University of California,
13126353Smlaier * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14126353Smlaier * the University nor the names of its contributors may be used to endorse
15126353Smlaier * or promote products derived from this software without specific prior
16126353Smlaier * written permission.
17126353Smlaier * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18126353Smlaier * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19126353Smlaier * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20126353Smlaier *
21126353Smlaier * @(#) $Header: /tcpdump/master/tcpdump/extract.h,v 1.19 2002/12/11 07:13:51 guy Exp $ (LBL)
22126353Smlaier */
23126353Smlaier
24126353Smlaier/* Network to host order macros */
25126353Smlaier
26126353Smlaier#ifdef LBL_ALIGN
27126353Smlaier/*
28126353Smlaier * The processor doesn't natively handle unaligned loads.
29126353Smlaier */
30126353Smlaier#ifdef HAVE___ATTRIBUTE__
31126353Smlaier/*
32126353Smlaier * We have __attribute__; we assume that means we have __attribute__((packed)).
33126353Smlaier * Declare packed structures containing a u_int16_t and a u_int32_t,
34126353Smlaier * cast the pointer to point to one of those, and fetch through it;
35126353Smlaier * the GCC manual doesn't appear to explicitly say that
36126353Smlaier * __attribute__((packed)) causes the compiler to generate unaligned-safe
37126353Smlaier * code, but it apppears to do so.
38126353Smlaier *
39126353Smlaier * We do this in case the compiler can generate, for this instruction set,
40126353Smlaier * better code to do an unaligned load and pass stuff to "ntohs()" or
41126353Smlaier * "ntohl()" than the code to fetch the bytes one at a time and
42126353Smlaier * assemble them.  (That might not be the case on a little-endian platform,
43126353Smlaier * where "ntohs()" and "ntohl()" might not be done inline.)
44126353Smlaier */
45126353Smlaiertypedef struct {
46126353Smlaier	u_int16_t	val;
47126353Smlaier} __attribute__((packed)) unaligned_u_int16_t;
48126353Smlaier
49126353Smlaiertypedef struct {
50126353Smlaier	u_int32_t	val;
51126353Smlaier} __attribute__((packed)) unaligned_u_int32_t;
52126353Smlaier
53126353Smlaier#define EXTRACT_16BITS(p) \
54126353Smlaier	((u_int16_t)ntohs(((const unaligned_u_int16_t *)(p))->val))
55126353Smlaier#define EXTRACT_32BITS(p) \
56126353Smlaier	((u_int32_t)ntohl(((const unaligned_u_int32_t *)(p))->val))
57126353Smlaier#else /* HAVE___ATTRIBUTE__ */
58126353Smlaier/*
59126353Smlaier * We don't have __attribute__, so do unaligned loads of big-endian
60126353Smlaier * quantities the hard way - fetch the bytes one at a time and
61126353Smlaier * assemble them.
62126353Smlaier */
63126353Smlaier#define EXTRACT_16BITS(p) \
64126353Smlaier	((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \
65126353Smlaier		     (u_int16_t)*((const u_int8_t *)(p) + 1)))
66126353Smlaier#define EXTRACT_32BITS(p) \
67126353Smlaier	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \
68126353Smlaier		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \
69126353Smlaier		     (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \
70126353Smlaier		     (u_int32_t)*((const u_int8_t *)(p) + 3)))
71126353Smlaier#endif /* HAVE___ATTRIBUTE__ */
72126353Smlaier#else /* LBL_ALIGN */
73126353Smlaier/*
74126353Smlaier * The processor natively handles unaligned loads, so we can just
75126353Smlaier * cast the pointer and fetch through it.
76126353Smlaier */
77126353Smlaier#define EXTRACT_16BITS(p) \
78126353Smlaier	((u_int16_t)ntohs(*(const u_int16_t *)(p)))
79126353Smlaier#define EXTRACT_32BITS(p) \
80126353Smlaier	((u_int32_t)ntohl(*(const u_int32_t *)(p)))
81126353Smlaier#endif /* LBL_ALIGN */
82126353Smlaier
83126353Smlaier#define EXTRACT_24BITS(p) \
84126353Smlaier	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 16 | \
85126353Smlaier		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
86126353Smlaier		     (u_int32_t)*((const u_int8_t *)(p) + 2)))
87126353Smlaier
88126353Smlaier/* Little endian protocol host order macros */
89126353Smlaier
90126353Smlaier#define EXTRACT_LE_8BITS(p) (*(p))
91126353Smlaier#define EXTRACT_LE_16BITS(p) \
92126353Smlaier	((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 1) << 8 | \
93126353Smlaier		     (u_int16_t)*((const u_int8_t *)(p) + 0)))
94126353Smlaier#define EXTRACT_LE_32BITS(p) \
95126353Smlaier	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 3) << 24 | \
96126353Smlaier		     (u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
97126353Smlaier		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
98126353Smlaier		     (u_int32_t)*((const u_int8_t *)(p) + 0)))
99126353Smlaier