extract.h revision 190207
1139969Simp/*
21556Srgrimes * Copyright (c) 1992, 1993, 1994, 1995, 1996
31556Srgrimes *	The Regents of the University of California.  All rights reserved.
41556Srgrimes *
51556Srgrimes * Redistribution and use in source and binary forms, with or without
61556Srgrimes * modification, are permitted provided that: (1) source code distributions
71556Srgrimes * retain the above copyright notice and this paragraph in its entirety, (2)
81556Srgrimes * distributions including binary code include the above copyright notice and
91556Srgrimes * this paragraph in its entirety in the documentation or other materials
101556Srgrimes * provided with the distribution, and (3) all advertising materials mentioning
111556Srgrimes * features or use of this software display the following acknowledgement:
121556Srgrimes * ``This product includes software developed by the University of California,
131556Srgrimes * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
141556Srgrimes * the University nor the names of its contributors may be used to endorse
151556Srgrimes * or promote products derived from this software without specific prior
161556Srgrimes * written permission.
171556Srgrimes * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
181556Srgrimes * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
191556Srgrimes * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
201556Srgrimes *
211556Srgrimes * @(#) $Header: /tcpdump/master/tcpdump/extract.h,v 1.25 2006-01-30 16:20:07 hannes Exp $ (LBL)
221556Srgrimes */
231556Srgrimes
241556Srgrimes/*
251556Srgrimes * Macros to extract possibly-unaligned big-endian integral values.
261556Srgrimes */
271556Srgrimes#ifdef LBL_ALIGN
281556Srgrimes/*
291556Srgrimes * The processor doesn't natively handle unaligned loads.
30114433Sobrien */
311556Srgrimes#ifdef HAVE___ATTRIBUTE__
3220418Ssteve/*
331556Srgrimes * We have __attribute__; we assume that means we have __attribute__((packed)).
341556Srgrimes * Declare packed structures containing a u_int16_t and a u_int32_t,
351556Srgrimes * cast the pointer to point to one of those, and fetch through it;
361556Srgrimes * the GCC manual doesn't appear to explicitly say that
371556Srgrimes * __attribute__((packed)) causes the compiler to generate unaligned-safe
3836048Scharnier * code, but it apppears to do so.
39114433Sobrien *
4036048Scharnier * We do this in case the compiler can generate, for this instruction set,
4199109Sobrien * better code to do an unaligned load and pass stuff to "ntohs()" or
4299109Sobrien * "ntohl()" than the code to fetch the bytes one at a time and
431556Srgrimes * assemble them.  (That might not be the case on a little-endian platform,
441556Srgrimes * where "ntohs()" and "ntohl()" might not be done inline.)
451556Srgrimes */
461556Srgrimestypedef struct {
471556Srgrimes	u_int16_t	val;
481556Srgrimes} __attribute__((packed)) unaligned_u_int16_t;
4971029Salfred
501556Srgrimestypedef struct {
511556Srgrimes	u_int32_t	val;
521556Srgrimes} __attribute__((packed)) unaligned_u_int32_t;
5350528Smharo
541556Srgrimes#define EXTRACT_16BITS(p) \
551556Srgrimes	((u_int16_t)ntohs(((const unaligned_u_int16_t *)(p))->val))
5691084Smarkm#define EXTRACT_32BITS(p) \
5791084Smarkm	((u_int32_t)ntohl(((const unaligned_u_int32_t *)(p))->val))
581556Srgrimes#define EXTRACT_64BITS(p) \
5950528Smharo	((u_int64_t)(((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 0)->val)) << 32 | \
6050528Smharo		     ((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 1)->val)) << 0))
611556Srgrimes
6290110Simp#else /* HAVE___ATTRIBUTE__ */
631556Srgrimes/*
6491084Smarkm * We don't have __attribute__, so do unaligned loads of big-endian
65140816Sssouhlal * quantities the hard way - fetch the bytes one at a time and
66140816Sssouhlal * assemble them.
6724524Smpp */
681556Srgrimes#define EXTRACT_16BITS(p) \
692923Sphk	((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \
701556Srgrimes		     (u_int16_t)*((const u_int8_t *)(p) + 1)))
7150528Smharo#define EXTRACT_32BITS(p) \
721556Srgrimes	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \
7350870Smharo		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \
7450870Smharo		     (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \
7550870Smharo		     (u_int32_t)*((const u_int8_t *)(p) + 3)))
761556Srgrimes#define EXTRACT_64BITS(p) \
771556Srgrimes	((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 0) << 56 | \
781556Srgrimes		     (u_int64_t)*((const u_int8_t *)(p) + 1) << 48 | \
7950528Smharo		     (u_int64_t)*((const u_int8_t *)(p) + 2) << 40 | \
8050528Smharo		     (u_int64_t)*((const u_int8_t *)(p) + 3) << 32 | \
8150528Smharo	             (u_int64_t)*((const u_int8_t *)(p) + 4) << 24 | \
821556Srgrimes		     (u_int64_t)*((const u_int8_t *)(p) + 5) << 16 | \
831556Srgrimes		     (u_int64_t)*((const u_int8_t *)(p) + 6) << 8 | \
841556Srgrimes		     (u_int64_t)*((const u_int8_t *)(p) + 7)))
851556Srgrimes#endif /* HAVE___ATTRIBUTE__ */
861556Srgrimes#else /* LBL_ALIGN */
871556Srgrimes/*
881556Srgrimes * The processor natively handles unaligned loads, so we can just
891556Srgrimes * cast the pointer and fetch through it.
901556Srgrimes */
911556Srgrimes#define EXTRACT_16BITS(p) \
921556Srgrimes	((u_int16_t)ntohs(*(const u_int16_t *)(p)))
931556Srgrimes#define EXTRACT_32BITS(p) \
941556Srgrimes	((u_int32_t)ntohl(*(const u_int32_t *)(p)))
951556Srgrimes#define EXTRACT_64BITS(p) \
961556Srgrimes	((u_int64_t)(((u_int64_t)ntohl(*((const u_int32_t *)(p) + 0))) << 32 | \
9724524Smpp		     ((u_int64_t)ntohl(*((const u_int32_t *)(p) + 1))) << 0))
9841842Simp#endif /* LBL_ALIGN */
991556Srgrimes
1001556Srgrimes#define EXTRACT_24BITS(p) \
1011556Srgrimes	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 16 | \
10224524Smpp		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
103163213Sru		     (u_int32_t)*((const u_int8_t *)(p) + 2)))
10440602Smsmith
10571029Salfred/*
10671029Salfred * Macros to extract possibly-unaligned little-endian integral values.
10771029Salfred * XXX - do loads on little-endian machines that support unaligned loads?
10871029Salfred */
10940606Smsmith#define EXTRACT_LE_8BITS(p) (*(p))
110163213Sru#define EXTRACT_LE_16BITS(p) \
111163213Sru	((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 1) << 8 | \
112163213Sru		     (u_int16_t)*((const u_int8_t *)(p) + 0)))
113163213Sru#define EXTRACT_LE_32BITS(p) \
114163213Sru	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 3) << 24 | \
11540606Smsmith		     (u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
11624524Smpp		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
11740535Smsmith		     (u_int32_t)*((const u_int8_t *)(p) + 0)))
11840535Smsmith#define EXTRACT_LE_24BITS(p) \
11940535Smsmith	((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
12040602Smsmith		     (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
12140602Smsmith		     (u_int32_t)*((const u_int8_t *)(p) + 0)))
122163213Sru#define EXTRACT_LE_64BITS(p) \
123163213Sru	((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 7) << 56 | \
12440535Smsmith		     (u_int64_t)*((const u_int8_t *)(p) + 6) << 48 | \
125163213Sru		     (u_int64_t)*((const u_int8_t *)(p) + 5) << 40 | \
12640606Smsmith		     (u_int64_t)*((const u_int8_t *)(p) + 4) << 32 | \
12740535Smsmith	             (u_int64_t)*((const u_int8_t *)(p) + 3) << 24 | \
12840535Smsmith		     (u_int64_t)*((const u_int8_t *)(p) + 2) << 16 | \
1291556Srgrimes		     (u_int64_t)*((const u_int8_t *)(p) + 1) << 8 | \
1301556Srgrimes		     (u_int64_t)*((const u_int8_t *)(p) + 0)))
1311556Srgrimes