1/************************************************************************** 2 3Copyright (c) 2007, Chelsio Inc. 4All rights reserved. 5 6Redistribution and use in source and binary forms, with or without 7modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Neither the name of the Chelsio Corporation nor the names of its 13 contributors may be used to endorse or promote products derived from 14 this software without specific prior written permission. 15 16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26POSSIBILITY OF SUCH DAMAGE. 27 28***************************************************************************/ 29#ifndef _CHELSIO_L2T_H 30#define _CHELSIO_L2T_H 31 32#include <sys/lock.h> 33 34#define rwlock mtx 35#define rw_wlock(x) mtx_lock((x)) 36#define rw_wunlock(x) mtx_unlock((x)) 37#define rw_rlock(x) mtx_lock((x)) 38#define rw_runlock(x) mtx_unlock((x)) 39#define rw_init(x, str) mtx_init((x), (str), NULL, MTX_DEF) 40#define rw_destroy(x) mtx_destroy((x)) 41 42enum { 43 L2T_STATE_VALID, /* entry is up to date */ 44 L2T_STATE_STALE, /* entry may be used but needs revalidation */ 45 L2T_STATE_RESOLVING, /* entry needs address resolution */ 46 L2T_STATE_UNUSED /* entry not in use */ 47}; 48 49/* 50 * Each L2T entry plays multiple roles. First of all, it keeps state for the 51 * corresponding entry of the HW L2 table and maintains a queue of offload 52 * packets awaiting address resolution. Second, it is a node of a hash table 53 * chain, where the nodes of the chain are linked together through their next 54 * pointer. Finally, each node is a bucket of a hash table, pointing to the 55 * first element in its chain through its first pointer. 56 */ 57struct l2t_entry { 58 uint16_t state; /* entry state */ 59 uint16_t idx; /* entry index */ 60 uint32_t addr; /* dest IP address */ 61 int ifindex; /* neighbor's net_device's ifindex */ 62 uint16_t smt_idx; /* SMT index */ 63 uint16_t vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ 64 struct rtentry *neigh; /* associated neighbour */ 65 struct l2t_entry *first; /* start of hash chain */ 66 struct l2t_entry *next; /* next l2t_entry on chain */ 67 struct mbuf *arpq_head; /* queue of packets awaiting resolution */ 68 struct mbuf *arpq_tail; 69 struct mtx lock; 70 volatile uint32_t refcnt; /* entry reference count */ 71 uint8_t dmac[6]; /* neighbour's MAC address */ 72#ifndef NETEVENT 73#ifdef CONFIG_CHELSIO_T3_MODULE 74 struct timer_list update_timer; 75#endif 76#endif 77}; 78 79struct l2t_data { 80 unsigned int nentries; /* number of entries */ 81 struct l2t_entry *rover; /* starting point for next allocation */ 82 volatile uint32_t nfree; /* number of free entries */ 83 struct rwlock lock; 84 struct l2t_entry l2tab[0]; 85}; 86 87typedef void (*arp_failure_handler_func)(struct toedev *dev, 88 struct mbuf *m); 89 90/* 91 * Callback stored in an skb to handle address resolution failure. 92 */ 93struct l2t_mbuf_cb { 94 arp_failure_handler_func arp_failure_handler; 95}; 96 97/* 98 * XXX 99 */ 100#define L2T_MBUF_CB(skb) ((struct l2t_mbuf_cb *)(skb)->cb) 101 102 103static __inline void set_arp_failure_handler(struct mbuf *m, 104 arp_failure_handler_func hnd) 105{ 106#if 0 107 L2T_SKB_CB(skb)->arp_failure_handler = hnd; 108#endif 109 panic("implement me"); 110} 111 112/* 113 * Getting to the L2 data from an offload device. 114 */ 115#define L2DATA(dev) ((dev)->l2opt) 116 117void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); 118void t3_l2t_update(struct toedev *dev, struct rtentry *ifp); 119struct l2t_entry *t3_l2t_get(struct toedev *dev, struct rtentry *neigh, 120 unsigned int smt_idx); 121int t3_l2t_send_slow(struct toedev *dev, struct mbuf *m, 122 struct l2t_entry *e); 123void t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e); 124struct l2t_data *t3_init_l2t(unsigned int l2t_capacity); 125void t3_free_l2t(struct l2t_data *d); 126 127#ifdef CONFIG_PROC_FS 128int t3_l2t_proc_setup(struct proc_dir_entry *dir, struct l2t_data *d); 129void t3_l2t_proc_free(struct proc_dir_entry *dir); 130#else 131#define l2t_proc_setup(dir, d) 0 132#define l2t_proc_free(dir) 133#endif 134 135int cxgb_ofld_send(struct toedev *dev, struct mbuf *m); 136 137static inline int l2t_send(struct toedev *dev, struct mbuf *m, 138 struct l2t_entry *e) 139{ 140 if (__predict_true(e->state == L2T_STATE_VALID)) 141 return cxgb_ofld_send(dev, m); 142 return t3_l2t_send_slow(dev, m, e); 143} 144 145static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) 146{ 147 if (atomic_fetchadd_int(&e->refcnt, -1) == 1) 148 t3_l2e_free(d, e); 149} 150 151static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) 152{ 153 if (atomic_fetchadd_int(&e->refcnt, 1) == 1) /* 0 -> 1 transition */ 154 atomic_add_int(&d->nfree, 1); 155} 156 157#endif 158