extract.h revision 146773
117680Spst/* 217680Spst * Copyright (c) 1992, 1993, 1994, 1995, 1996 317680Spst * The Regents of the University of California. All rights reserved. 417680Spst * 517680Spst * Redistribution and use in source and binary forms, with or without 617680Spst * modification, are permitted provided that: (1) source code distributions 717680Spst * retain the above copyright notice and this paragraph in its entirety, (2) 817680Spst * distributions including binary code include the above copyright notice and 917680Spst * this paragraph in its entirety in the documentation or other materials 1017680Spst * provided with the distribution, and (3) all advertising materials mentioning 1117680Spst * features or use of this software display the following acknowledgement: 1217680Spst * ``This product includes software developed by the University of California, 1317680Spst * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 1417680Spst * the University nor the names of its contributors may be used to endorse 1517680Spst * or promote products derived from this software without specific prior 1617680Spst * written permission. 1717680Spst * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 1817680Spst * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 1917680Spst * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 2017680Spst * 21146773Ssam * @(#) $Header: /tcpdump/master/tcpdump/extract.h,v 1.24 2005/01/15 02:06:50 guy Exp $ (LBL) 2217680Spst */ 2317680Spst 24146773Ssam/* 25146773Ssam * Macros to extract possibly-unaligned big-endian integral values. 26146773Ssam */ 2717680Spst#ifdef LBL_ALIGN 28127668Sbms/* 29127668Sbms * The processor doesn't natively handle unaligned loads. 30127668Sbms */ 31127668Sbms#ifdef HAVE___ATTRIBUTE__ 32127668Sbms/* 33127668Sbms * We have __attribute__; we assume that means we have __attribute__((packed)). 34127668Sbms * Declare packed structures containing a u_int16_t and a u_int32_t, 35127668Sbms * cast the pointer to point to one of those, and fetch through it; 36127668Sbms * the GCC manual doesn't appear to explicitly say that 37127668Sbms * __attribute__((packed)) causes the compiler to generate unaligned-safe 38127668Sbms * code, but it apppears to do so. 39127668Sbms * 40127668Sbms * We do this in case the compiler can generate, for this instruction set, 41127668Sbms * better code to do an unaligned load and pass stuff to "ntohs()" or 42127668Sbms * "ntohl()" than the code to fetch the bytes one at a time and 43127668Sbms * assemble them. (That might not be the case on a little-endian platform, 44127668Sbms * where "ntohs()" and "ntohl()" might not be done inline.) 45127668Sbms */ 46127668Sbmstypedef struct { 47127668Sbms u_int16_t val; 48127668Sbms} __attribute__((packed)) unaligned_u_int16_t; 49127668Sbms 50127668Sbmstypedef struct { 51127668Sbms u_int32_t val; 52127668Sbms} __attribute__((packed)) unaligned_u_int32_t; 53127668Sbms 5417680Spst#define EXTRACT_16BITS(p) \ 55127668Sbms ((u_int16_t)ntohs(((const unaligned_u_int16_t *)(p))->val)) 5617680Spst#define EXTRACT_32BITS(p) \ 57127668Sbms ((u_int32_t)ntohl(((const unaligned_u_int32_t *)(p))->val)) 58146773Ssam#define EXTRACT_64BITS(p) \ 59146773Ssam ((u_int64_t)(((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 0)->val)) << 32 | \ 60146773Ssam ((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 1)->val)) << 0)) 61146773Ssam 62127668Sbms#else /* HAVE___ATTRIBUTE__ */ 63127668Sbms/* 64127668Sbms * We don't have __attribute__, so do unaligned loads of big-endian 65127668Sbms * quantities the hard way - fetch the bytes one at a time and 66127668Sbms * assemble them. 67127668Sbms */ 6817680Spst#define EXTRACT_16BITS(p) \ 69127668Sbms ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \ 70127668Sbms (u_int16_t)*((const u_int8_t *)(p) + 1))) 71127668Sbms#define EXTRACT_32BITS(p) \ 72127668Sbms ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \ 73127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \ 74127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \ 75127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 3))) 76146773Ssam#define EXTRACT_64BITS(p) \ 77146773Ssam ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 0) << 56 | \ 78146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 1) << 48 | \ 79146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 2) << 40 | \ 80146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 3) << 32 | \ 81146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 4) << 24 | \ 82146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 5) << 16 | \ 83146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 6) << 8 | \ 84146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 7))) 85127668Sbms#endif /* HAVE___ATTRIBUTE__ */ 86127668Sbms#else /* LBL_ALIGN */ 87127668Sbms/* 88127668Sbms * The processor natively handles unaligned loads, so we can just 89127668Sbms * cast the pointer and fetch through it. 90127668Sbms */ 91127668Sbms#define EXTRACT_16BITS(p) \ 9298524Sfenner ((u_int16_t)ntohs(*(const u_int16_t *)(p))) 9317680Spst#define EXTRACT_32BITS(p) \ 9498524Sfenner ((u_int32_t)ntohl(*(const u_int32_t *)(p))) 95146773Ssam#define EXTRACT_64BITS(p) \ 96146773Ssam ((u_int64_t)(((u_int64_t)ntohl(*((const u_int32_t *)(p) + 0))) << 32 | \ 97146773Ssam ((u_int64_t)ntohl(*((const u_int32_t *)(p) + 1))) << 0)) 98127668Sbms#endif /* LBL_ALIGN */ 9917680Spst 10017680Spst#define EXTRACT_24BITS(p) \ 101127668Sbms ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 16 | \ 102127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \ 103127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 2))) 10417680Spst 105146773Ssam/* 106146773Ssam * Macros to extract possibly-unaligned little-endian integral values. 107146773Ssam * XXX - do loads on little-endian machines that support unaligned loads? 108146773Ssam */ 10917680Spst#define EXTRACT_LE_8BITS(p) (*(p)) 11017680Spst#define EXTRACT_LE_16BITS(p) \ 111127668Sbms ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 1) << 8 | \ 112127668Sbms (u_int16_t)*((const u_int8_t *)(p) + 0))) 11317680Spst#define EXTRACT_LE_32BITS(p) \ 114127668Sbms ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 3) << 24 | \ 115127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \ 116127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \ 117127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 0))) 118146773Ssam#define EXTRACT_LE_64BITS(p) \ 119146773Ssam ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 7) << 56 | \ 120146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 6) << 48 | \ 121146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 5) << 40 | \ 122146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 4) << 32 | \ 123146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 3) << 24 | \ 124146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 2) << 16 | \ 125146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 1) << 8 | \ 126146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 0))) 127