1/* 2 * net/dst.h Protocol independent destination cache definitions. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8#ifndef _NET_DST_H 9#define _NET_DST_H 10 11#include <net/dst_ops.h> 12#include <linux/netdevice.h> 13#include <linux/rtnetlink.h> 14#include <linux/rcupdate.h> 15#include <linux/jiffies.h> 16#include <net/neighbour.h> 17#include <asm/processor.h> 18 19/* 20 * 0 - no debugging messages 21 * 1 - rare events and bugs (default) 22 * 2 - trace mode. 23 */ 24#define RT_CACHE_DEBUG 0 25 26#define DST_GC_MIN (HZ/10) 27#define DST_GC_INC (HZ/2) 28#define DST_GC_MAX (120*HZ) 29 30/* Each dst_entry has reference count and sits in some parent list(s). 31 * When it is removed from parent list, it is "freed" (dst_free). 32 * After this it enters dead state (dst->obsolete > 0) and if its refcnt 33 * is zero, it can be destroyed immediately, otherwise it is added 34 * to gc list and garbage collector periodically checks the refcnt. 35 */ 36 37struct sk_buff; 38 39struct dst_entry { 40 struct rcu_head rcu_head; 41 struct dst_entry *child; 42 struct net_device *dev; 43 short error; 44 short obsolete; 45 int flags; 46#define DST_HOST 1 47#define DST_NOXFRM 2 48#define DST_NOPOLICY 4 49#define DST_NOHASH 8 50 unsigned long expires; 51 52 unsigned short header_len; /* more space at head required */ 53 unsigned short trailer_len; /* space to reserve at tail */ 54 55 unsigned int rate_tokens; 56 unsigned long rate_last; /* rate limiting for ICMP */ 57 58 struct dst_entry *path; 59 60 struct neighbour *neighbour; 61 struct hh_cache *hh; 62#ifdef CONFIG_XFRM 63 struct xfrm_state *xfrm; 64#else 65 void *__pad1; 66#endif 67 int (*input)(struct sk_buff*); 68 int (*output)(struct sk_buff*); 69 70 struct dst_ops *ops; 71 72 u32 metrics[RTAX_MAX]; 73 74#ifdef CONFIG_NET_CLS_ROUTE 75 __u32 tclassid; 76#else 77 __u32 __pad2; 78#endif 79 80 81 /* 82 * Align __refcnt to a 64 bytes alignment 83 * (L1_CACHE_SIZE would be too much) 84 */ 85#ifdef CONFIG_64BIT 86 long __pad_to_align_refcnt[1]; 87#endif 88 /* 89 * __refcnt wants to be on a different cache line from 90 * input/output/ops or performance tanks badly 91 */ 92 atomic_t __refcnt; /* client references */ 93 int __use; 94 unsigned long lastuse; 95 union { 96 struct dst_entry *next; 97 struct rtable *rt_next; 98 struct rt6_info *rt6_next; 99 struct dn_route *dn_next; 100 }; 101}; 102 103#ifdef __KERNEL__ 104 105static inline u32 106dst_metric(const struct dst_entry *dst, int metric) 107{ 108 return dst->metrics[metric-1]; 109} 110 111static inline u32 112dst_feature(const struct dst_entry *dst, u32 feature) 113{ 114 return dst_metric(dst, RTAX_FEATURES) & feature; 115} 116 117static inline u32 dst_mtu(const struct dst_entry *dst) 118{ 119 u32 mtu = dst_metric(dst, RTAX_MTU); 120 /* 121 * Alexey put it here, so ask him about it :) 122 */ 123 barrier(); 124 return mtu; 125} 126 127/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ 128static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) 129{ 130 return msecs_to_jiffies(dst_metric(dst, metric)); 131} 132 133static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric, 134 unsigned long rtt) 135{ 136 dst->metrics[metric-1] = jiffies_to_msecs(rtt); 137} 138 139static inline u32 140dst_allfrag(const struct dst_entry *dst) 141{ 142 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); 143 /* Yes, _exactly_. This is paranoia. */ 144 barrier(); 145 return ret; 146} 147 148static inline int 149dst_metric_locked(struct dst_entry *dst, int metric) 150{ 151 return dst_metric(dst, RTAX_LOCK) & (1<<metric); 152} 153 154static inline void dst_hold(struct dst_entry * dst) 155{ 156 /* 157 * If your kernel compilation stops here, please check 158 * __pad_to_align_refcnt declaration in struct dst_entry 159 */ 160 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); 161 atomic_inc(&dst->__refcnt); 162} 163 164static inline void dst_use(struct dst_entry *dst, unsigned long time) 165{ 166 dst_hold(dst); 167 dst->__use++; 168 dst->lastuse = time; 169} 170 171static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) 172{ 173 dst->__use++; 174 dst->lastuse = time; 175} 176 177static inline 178struct dst_entry * dst_clone(struct dst_entry * dst) 179{ 180 if (dst) 181 atomic_inc(&dst->__refcnt); 182 return dst; 183} 184 185extern void dst_release(struct dst_entry *dst); 186 187static inline void refdst_drop(unsigned long refdst) 188{ 189 if (!(refdst & SKB_DST_NOREF)) 190 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); 191} 192 193/** 194 * skb_dst_drop - drops skb dst 195 * @skb: buffer 196 * 197 * Drops dst reference count if a reference was taken. 198 */ 199static inline void skb_dst_drop(struct sk_buff *skb) 200{ 201 if (skb->_skb_refdst) { 202 refdst_drop(skb->_skb_refdst); 203 skb->_skb_refdst = 0UL; 204 } 205} 206 207static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) 208{ 209 nskb->_skb_refdst = oskb->_skb_refdst; 210 if (!(nskb->_skb_refdst & SKB_DST_NOREF)) 211 dst_clone(skb_dst(nskb)); 212} 213 214/** 215 * skb_dst_force - makes sure skb dst is refcounted 216 * @skb: buffer 217 * 218 * If dst is not yet refcounted, let's do it 219 */ 220static inline void skb_dst_force(struct sk_buff *skb) 221{ 222 if (skb_dst_is_noref(skb)) { 223 WARN_ON(!rcu_read_lock_held()); 224 skb->_skb_refdst &= ~SKB_DST_NOREF; 225 dst_clone(skb_dst(skb)); 226 } 227} 228 229 230/** 231 * skb_tunnel_rx - prepare skb for rx reinsert 232 * @skb: buffer 233 * @dev: tunnel device 234 * 235 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 236 * so make some cleanups, and perform accounting. 237 */ 238static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) 239{ 240 skb->dev = dev; 241 /* TODO : stats should be SMP safe */ 242 dev->stats.rx_packets++; 243 dev->stats.rx_bytes += skb->len; 244 skb->rxhash = 0; 245 skb_set_queue_mapping(skb, 0); 246 skb_dst_drop(skb); 247 nf_reset(skb); 248} 249 250/* Children define the path of the packet through the 251 * Linux networking. Thus, destinations are stackable. 252 */ 253 254static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) 255{ 256 struct dst_entry *child = skb_dst(skb)->child; 257 258 skb_dst_drop(skb); 259 return child; 260} 261 262extern int dst_discard(struct sk_buff *skb); 263extern void * dst_alloc(struct dst_ops * ops); 264extern void __dst_free(struct dst_entry * dst); 265extern struct dst_entry *dst_destroy(struct dst_entry * dst); 266 267static inline void dst_free(struct dst_entry * dst) 268{ 269 if (dst->obsolete > 1) 270 return; 271 if (!atomic_read(&dst->__refcnt)) { 272 dst = dst_destroy(dst); 273 if (!dst) 274 return; 275 } 276 __dst_free(dst); 277} 278 279static inline void dst_rcu_free(struct rcu_head *head) 280{ 281 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); 282 dst_free(dst); 283} 284 285static inline void dst_confirm(struct dst_entry *dst) 286{ 287 if (dst) 288 neigh_confirm(dst->neighbour); 289} 290 291static inline void dst_link_failure(struct sk_buff *skb) 292{ 293 struct dst_entry *dst = skb_dst(skb); 294 if (dst && dst->ops && dst->ops->link_failure) 295 dst->ops->link_failure(skb); 296} 297 298static inline void dst_set_expires(struct dst_entry *dst, int timeout) 299{ 300 unsigned long expires = jiffies + timeout; 301 302 if (expires == 0) 303 expires = 1; 304 305 if (dst->expires == 0 || time_before(expires, dst->expires)) 306 dst->expires = expires; 307} 308 309/* Output packet to network from transport. */ 310static inline int dst_output(struct sk_buff *skb) 311{ 312 return skb_dst(skb)->output(skb); 313} 314 315/* Input packet from network to transport. */ 316static inline int dst_input(struct sk_buff *skb) 317{ 318 return skb_dst(skb)->input(skb); 319} 320 321static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) 322{ 323 if (dst->obsolete) 324 dst = dst->ops->check(dst, cookie); 325 return dst; 326} 327 328extern void dst_init(void); 329 330/* Flags for xfrm_lookup flags argument. */ 331enum { 332 XFRM_LOOKUP_WAIT = 1 << 0, 333 XFRM_LOOKUP_ICMP = 1 << 1, 334}; 335 336struct flowi; 337#ifndef CONFIG_XFRM 338static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 339 struct flowi *fl, struct sock *sk, int flags) 340{ 341 return 0; 342} 343static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 344 struct flowi *fl, struct sock *sk, int flags) 345{ 346 return 0; 347} 348#else 349extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 350 struct flowi *fl, struct sock *sk, int flags); 351extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 352 struct flowi *fl, struct sock *sk, int flags); 353#endif 354#endif 355 356#endif /* _NET_DST_H */ 357