117680Spst/* 217680Spst * Copyright (c) 1992, 1993, 1994, 1995, 1996 317680Spst * The Regents of the University of California. All rights reserved. 417680Spst * 517680Spst * Redistribution and use in source and binary forms, with or without 617680Spst * modification, are permitted provided that: (1) source code distributions 717680Spst * retain the above copyright notice and this paragraph in its entirety, (2) 817680Spst * distributions including binary code include the above copyright notice and 917680Spst * this paragraph in its entirety in the documentation or other materials 1017680Spst * provided with the distribution, and (3) all advertising materials mentioning 1117680Spst * features or use of this software display the following acknowledgement: 1217680Spst * ``This product includes software developed by the University of California, 1317680Spst * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 1417680Spst * the University nor the names of its contributors may be used to endorse 1517680Spst * or promote products derived from this software without specific prior 1617680Spst * written permission. 1717680Spst * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 1817680Spst * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 1917680Spst * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 2017680Spst * 21190207Srpaulo * @(#) $Header: /tcpdump/master/tcpdump/extract.h,v 1.25 2006-01-30 16:20:07 hannes Exp $ (LBL) 2217680Spst */ 2317680Spst 24146773Ssam/* 25146773Ssam * Macros to extract possibly-unaligned big-endian integral values. 26146773Ssam */ 2717680Spst#ifdef LBL_ALIGN 28127668Sbms/* 29127668Sbms * The processor doesn't natively handle unaligned loads. 30127668Sbms */ 31127668Sbms#ifdef HAVE___ATTRIBUTE__ 32127668Sbms/* 33127668Sbms * We have __attribute__; we assume that means we have __attribute__((packed)). 34127668Sbms * Declare packed structures containing a u_int16_t and a u_int32_t, 35127668Sbms * cast the pointer to point to one of those, and fetch through it; 36127668Sbms * the GCC manual doesn't appear to explicitly say that 37127668Sbms * __attribute__((packed)) causes the compiler to generate unaligned-safe 38127668Sbms * code, but it apppears to do so. 39127668Sbms * 40127668Sbms * We do this in case the compiler can generate, for this instruction set, 41127668Sbms * better code to do an unaligned load and pass stuff to "ntohs()" or 42127668Sbms * "ntohl()" than the code to fetch the bytes one at a time and 43127668Sbms * assemble them. (That might not be the case on a little-endian platform, 44127668Sbms * where "ntohs()" and "ntohl()" might not be done inline.) 45127668Sbms */ 46127668Sbmstypedef struct { 47127668Sbms u_int16_t val; 48127668Sbms} __attribute__((packed)) unaligned_u_int16_t; 49127668Sbms 50127668Sbmstypedef struct { 51127668Sbms u_int32_t val; 52127668Sbms} __attribute__((packed)) unaligned_u_int32_t; 53127668Sbms 54259327Spfgstatic inline u_int16_t 55259327SpfgEXTRACT_16BITS(const void *p) 56259327Spfg{ 57259327Spfg return ((u_int16_t)ntohs(((const unaligned_u_int16_t *)(p))->val)); 58259327Spfg} 59146773Ssam 60259327Spfgstatic inline u_int32_t 61259327SpfgEXTRACT_32BITS(const void *p) 62259327Spfg{ 63259327Spfg return ((u_int32_t)ntohl(((const unaligned_u_int32_t *)(p))->val)); 64259327Spfg} 65259327Spfg 66259327Spfgstatic inline u_int64_t 67259327SpfgEXTRACT_64BITS(const void *p) 68259327Spfg{ 69259327Spfg return ((u_int64_t)(((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 0)->val)) << 32 | \ 70259327Spfg ((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 1)->val)) << 0)); 71259327Spfg 72259327Spfg} 73259327Spfg 74127668Sbms#else /* HAVE___ATTRIBUTE__ */ 75127668Sbms/* 76127668Sbms * We don't have __attribute__, so do unaligned loads of big-endian 77127668Sbms * quantities the hard way - fetch the bytes one at a time and 78127668Sbms * assemble them. 79127668Sbms */ 8017680Spst#define EXTRACT_16BITS(p) \ 81127668Sbms ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \ 82127668Sbms (u_int16_t)*((const u_int8_t *)(p) + 1))) 83127668Sbms#define EXTRACT_32BITS(p) \ 84127668Sbms ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \ 85127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \ 86127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \ 87127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 3))) 88146773Ssam#define EXTRACT_64BITS(p) \ 89146773Ssam ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 0) << 56 | \ 90146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 1) << 48 | \ 91146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 2) << 40 | \ 92146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 3) << 32 | \ 93146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 4) << 24 | \ 94146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 5) << 16 | \ 95146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 6) << 8 | \ 96146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 7))) 97127668Sbms#endif /* HAVE___ATTRIBUTE__ */ 98127668Sbms#else /* LBL_ALIGN */ 99127668Sbms/* 100127668Sbms * The processor natively handles unaligned loads, so we can just 101127668Sbms * cast the pointer and fetch through it. 102127668Sbms */ 103259327Spfgstatic inline u_int16_t 104259327SpfgEXTRACT_16BITS(const void *p) 105259327Spfg{ 106259327Spfg return ((u_int16_t)ntohs(*(const u_int16_t *)(p))); 107259327Spfg} 108259327Spfg 109259327Spfgstatic inline u_int32_t 110259327SpfgEXTRACT_32BITS(const void *p) 111259327Spfg{ 112259327Spfg return ((u_int32_t)ntohl(*(const u_int32_t *)(p))); 113259327Spfg} 114259327Spfg 115259327Spfgstatic inline u_int64_t 116259327SpfgEXTRACT_64BITS(const void *p) 117259327Spfg{ 118259327Spfg return ((u_int64_t)(((u_int64_t)ntohl(*((const u_int32_t *)(p) + 0))) << 32 | \ 119259327Spfg ((u_int64_t)ntohl(*((const u_int32_t *)(p) + 1))) << 0)); 120259327Spfg 121259327Spfg} 122259327Spfg 123127668Sbms#endif /* LBL_ALIGN */ 12417680Spst 12517680Spst#define EXTRACT_24BITS(p) \ 126127668Sbms ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 16 | \ 127127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \ 128127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 2))) 12917680Spst 130146773Ssam/* 131146773Ssam * Macros to extract possibly-unaligned little-endian integral values. 132146773Ssam * XXX - do loads on little-endian machines that support unaligned loads? 133146773Ssam */ 13417680Spst#define EXTRACT_LE_8BITS(p) (*(p)) 13517680Spst#define EXTRACT_LE_16BITS(p) \ 136127668Sbms ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 1) << 8 | \ 137127668Sbms (u_int16_t)*((const u_int8_t *)(p) + 0))) 13817680Spst#define EXTRACT_LE_32BITS(p) \ 139127668Sbms ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 3) << 24 | \ 140127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \ 141127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \ 142127668Sbms (u_int32_t)*((const u_int8_t *)(p) + 0))) 143190207Srpaulo#define EXTRACT_LE_24BITS(p) \ 144190207Srpaulo ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \ 145190207Srpaulo (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \ 146190207Srpaulo (u_int32_t)*((const u_int8_t *)(p) + 0))) 147146773Ssam#define EXTRACT_LE_64BITS(p) \ 148146773Ssam ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 7) << 56 | \ 149146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 6) << 48 | \ 150146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 5) << 40 | \ 151146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 4) << 32 | \ 152146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 3) << 24 | \ 153146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 2) << 16 | \ 154146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 1) << 8 | \ 155146773Ssam (u_int64_t)*((const u_int8_t *)(p) + 0))) 156