1/*-
2 * Copyright (C) 2001 Eduardo Horvath.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 *	from: NetBSD: gemvar.h,v 1.8 2002/05/15 02:36:12 matt Exp
27 *
28 * $FreeBSD$
29 */
30
31#ifndef	_IF_GEMVAR_H
32#define	_IF_GEMVAR_H
33
34#include <sys/queue.h>
35#include <sys/callout.h>
36
37/*
38 * Transmit descriptor ring size - this is arbitrary, but allocate
39 * enough descriptors for 64 pending transmissions and 16 segments
40 * per packet.  This limit is not actually enforced (packets with
41 * more segments can be sent, depending on the busdma backend); it
42 * is however used as an estimate for the TX window size.
43 */
44#define	GEM_NTXSEGS		16
45
46#define	GEM_TXQUEUELEN		64
47#define	GEM_NTXDESC		(GEM_TXQUEUELEN * GEM_NTXSEGS)
48#define	GEM_MAXTXFREE		(GEM_NTXDESC - 1)
49#define	GEM_NTXDESC_MASK	(GEM_NTXDESC - 1)
50#define	GEM_NEXTTX(x)		((x + 1) & GEM_NTXDESC_MASK)
51
52/*
53 * Receive descriptor ring size - we have one RX buffer per incoming
54 * packet, so this logic is a little simpler.
55 */
56#define	GEM_NRXDESC		256
57#define	GEM_NRXDESC_MASK	(GEM_NRXDESC - 1)
58#define	GEM_NEXTRX(x)		((x + 1) & GEM_NRXDESC_MASK)
59
60/*
61 * How many ticks to wait until to retry on a RX descriptor that is
62 * still owned by the hardware.
63 */
64#define	GEM_RXOWN_TICKS		(hz / 50)
65
66/*
67 * Control structures are DMA'd to the chip.  We allocate them
68 * in a single clump that maps to a single DMA segment to make
69 * several things easier.
70 */
71struct gem_control_data {
72	struct gem_desc gcd_txdescs[GEM_NTXDESC];	/* TX descriptors */
73	struct gem_desc gcd_rxdescs[GEM_NRXDESC];	/* RX descriptors */
74};
75
76#define	GEM_CDOFF(x)		offsetof(struct gem_control_data, x)
77#define	GEM_CDTXOFF(x)		GEM_CDOFF(gcd_txdescs[(x)])
78#define	GEM_CDRXOFF(x)		GEM_CDOFF(gcd_rxdescs[(x)])
79
80/*
81 * software state for transmit job mbufs (may be elements of mbuf chains)
82 */
83struct gem_txsoft {
84	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
85	bus_dmamap_t txs_dmamap;	/* our DMA map */
86	u_int txs_firstdesc;		/* first descriptor in packet */
87	u_int txs_lastdesc;		/* last descriptor in packet */
88	u_int txs_ndescs;		/* number of descriptors */
89	STAILQ_ENTRY(gem_txsoft) txs_q;
90};
91
92STAILQ_HEAD(gem_txsq, gem_txsoft);
93
94/*
95 * software state for receive jobs
96 */
97struct gem_rxsoft {
98	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
99	bus_dmamap_t rxs_dmamap;	/* our DMA map */
100	bus_addr_t rxs_paddr;		/* physical address of the segment */
101};
102
103/*
104 * software state per device
105 */
106struct gem_softc {
107	struct ifnet	*sc_ifp;
108	struct mtx	sc_mtx;
109	device_t	sc_miibus;
110	struct mii_data	*sc_mii;	/* MII media control */
111	device_t	sc_dev;		/* generic device information */
112	u_char		sc_enaddr[ETHER_ADDR_LEN];
113	struct callout	sc_tick_ch;	/* tick callout */
114	struct callout	sc_rx_ch;	/* delayed RX callout */
115	u_int		sc_wdog_timer;	/* watchdog timer */
116
117	void		*sc_ih;
118	struct resource *sc_res[3];
119#define	GEM_RES_INTR		0
120#define	GEM_RES_BANK1		1
121#define	GEM_RES_BANK2		2
122
123	bus_dma_tag_t	sc_pdmatag;	/* parent bus DMA tag */
124	bus_dma_tag_t	sc_rdmatag;	/* RX bus DMA tag */
125	bus_dma_tag_t	sc_tdmatag;	/* TX bus DMA tag */
126	bus_dma_tag_t	sc_cdmatag;	/* control data bus DMA tag */
127	bus_dmamap_t	sc_dmamap;	/* bus DMA handle */
128
129	u_int		sc_variant;
130#define	GEM_UNKNOWN		0	/* don't know */
131#define	GEM_SUN_GEM		1	/* Sun GEM */
132#define	GEM_SUN_ERI		2	/* Sun ERI */
133#define	GEM_APPLE_GMAC		3	/* Apple GMAC */
134#define	GEM_APPLE_K2_GMAC	4	/* Apple K2 GMAC */
135
136#define	GEM_IS_APPLE(sc)						\
137	((sc)->sc_variant == GEM_APPLE_GMAC ||				\
138	(sc)->sc_variant == GEM_APPLE_K2_GMAC)
139
140	u_int		sc_flags;
141#define	GEM_INITED	(1 << 0)	/* reset persistent regs init'ed */
142#define	GEM_LINK	(1 << 1)	/* link is up */
143#define	GEM_PCI		(1 << 2)	/* PCI busses are little-endian */
144#define	GEM_PCI66	(1 << 3)	/* PCI bus runs at 66MHz */
145#define	GEM_SERDES	(1 << 4)	/* use the SERDES */
146
147	/*
148	 * ring buffer DMA stuff
149	 */
150	bus_dmamap_t	sc_cddmamap;	/* control data DMA map */
151	bus_addr_t	sc_cddma;
152
153	/*
154	 * software state for transmit and receive descriptors
155	 */
156	struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN];
157	struct gem_rxsoft sc_rxsoft[GEM_NRXDESC];
158
159	/*
160	 * control data structures
161	 */
162	struct gem_control_data *sc_control_data;
163#define	sc_txdescs	sc_control_data->gcd_txdescs
164#define	sc_rxdescs	sc_control_data->gcd_rxdescs
165
166	u_int		sc_txfree;	/* number of free TX descriptors */
167	u_int		sc_txnext;	/* next ready TX descriptor */
168	u_int		sc_txwin;	/* TX desc. since last TX intr. */
169
170	struct gem_txsq	sc_txfreeq;	/* free TX descsofts */
171	struct gem_txsq	sc_txdirtyq;	/* dirty TX descsofts */
172
173	u_int		sc_rxptr;	/* next ready RX descriptor/state */
174	u_int		sc_rxfifosize;	/* RX FIFO size (bytes) */
175
176	uint32_t	sc_mac_rxcfg;	/* RX MAC conf. % GEM_MAC_RX_ENABLE */
177
178	int		sc_ifflags;
179	u_long		sc_csum_features;
180};
181
182#define	GEM_BANKN_BARRIER(n, sc, offs, len, flags)			\
183	bus_barrier((sc)->sc_res[(n)], (offs), (len), (flags))
184#define	GEM_BANK1_BARRIER(sc, offs, len, flags)				\
185	GEM_BANKN_BARRIER(GEM_RES_BANK1, (sc), (offs), (len), (flags))
186#define	GEM_BANK2_BARRIER(sc, offs, len, flags)				\
187	GEM_BANKN_BARRIER(GEM_RES_BANK2, (sc), (offs), (len), (flags))
188
189#define	GEM_BANKN_READ_M(n, m, sc, offs)				\
190	bus_read_ ## m((sc)->sc_res[(n)], (offs))
191#define	GEM_BANK1_READ_1(sc, offs)					\
192	GEM_BANKN_READ_M(GEM_RES_BANK1, 1, (sc), (offs))
193#define	GEM_BANK1_READ_2(sc, offs)					\
194	GEM_BANKN_READ_M(GEM_RES_BANK1, 2, (sc), (offs))
195#define	GEM_BANK1_READ_4(sc, offs)					\
196	GEM_BANKN_READ_M(GEM_RES_BANK1, 4, (sc), (offs))
197#define	GEM_BANK2_READ_1(sc, offs)					\
198	GEM_BANKN_READ_M(GEM_RES_BANK2, 1, (sc), (offs))
199#define	GEM_BANK2_READ_2(sc, offs)					\
200	GEM_BANKN_READ_M(GEM_RES_BANK2, 2, (sc), (offs))
201#define	GEM_BANK2_READ_4(sc, offs)					\
202	GEM_BANKN_READ_M(GEM_RES_BANK2, 4, (sc), (offs))
203
204#define	GEM_BANKN_WRITE_M(n, m, sc, offs, v)				\
205	bus_write_ ## m((sc)->sc_res[n], (offs), (v))
206#define	GEM_BANK1_WRITE_1(sc, offs, v)					\
207	GEM_BANKN_WRITE_M(GEM_RES_BANK1, 1, (sc), (offs), (v))
208#define	GEM_BANK1_WRITE_2(sc, offs, v)					\
209	GEM_BANKN_WRITE_M(GEM_RES_BANK1, 2, (sc), (offs), (v))
210#define	GEM_BANK1_WRITE_4(sc, offs, v)					\
211	GEM_BANKN_WRITE_M(GEM_RES_BANK1, 4, (sc), (offs), (v))
212#define	GEM_BANK2_WRITE_1(sc, offs, v)					\
213	GEM_BANKN_WRITE_M(GEM_RES_BANK2, 1, (sc), (offs), (v))
214#define	GEM_BANK2_WRITE_2(sc, offs, v)					\
215	GEM_BANKN_WRITE_M(GEM_RES_BANK2, 2, (sc), (offs), (v))
216#define	GEM_BANK2_WRITE_4(sc, offs, v)					\
217	GEM_BANKN_WRITE_M(GEM_RES_BANK2, 4, (sc), (offs), (v))
218
219/* XXX this should be handled by bus_dma(9). */
220#define	GEM_DMA_READ(sc, v)						\
221	((((sc)->sc_flags & GEM_PCI) != 0) ? le64toh(v) : be64toh(v))
222#define	GEM_DMA_WRITE(sc, v)						\
223	((((sc)->sc_flags & GEM_PCI) != 0) ? htole64(v) : htobe64(v))
224
225#define	GEM_CDTXADDR(sc, x)	((sc)->sc_cddma + GEM_CDTXOFF((x)))
226#define	GEM_CDRXADDR(sc, x)	((sc)->sc_cddma + GEM_CDRXOFF((x)))
227
228#define	GEM_CDSYNC(sc, ops)						\
229	bus_dmamap_sync((sc)->sc_cdmatag, (sc)->sc_cddmamap, (ops));
230
231#define	GEM_INIT_RXDESC(sc, x)						\
232do {									\
233	struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)];			\
234	struct gem_desc *__rxd = &sc->sc_rxdescs[(x)];			\
235	struct mbuf *__m = __rxs->rxs_mbuf;				\
236									\
237	__m->m_data = __m->m_ext.ext_buf;				\
238	__rxd->gd_addr =						\
239	    GEM_DMA_WRITE((sc), __rxs->rxs_paddr);			\
240	__rxd->gd_flags = GEM_DMA_WRITE((sc),				\
241	    (((__m->m_ext.ext_size) << GEM_RD_BUFSHIFT)	&		\
242	    GEM_RD_BUFSIZE) | GEM_RD_OWN);				\
243} while (0)
244
245#define	GEM_UPDATE_RXDESC(sc, x)					\
246do {									\
247	struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)];			\
248	struct gem_desc *__rxd = &sc->sc_rxdescs[(x)];			\
249	struct mbuf *__m = __rxs->rxs_mbuf;				\
250									\
251	__rxd->gd_flags = GEM_DMA_WRITE((sc),				\
252	    (((__m->m_ext.ext_size) << GEM_RD_BUFSHIFT)	&		\
253	    GEM_RD_BUFSIZE) | GEM_RD_OWN);				\
254} while (0)
255
256#define	GEM_LOCK_INIT(_sc, _name)					\
257	mtx_init(&(_sc)->sc_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
258#define	GEM_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
259#define	GEM_UNLOCK(_sc)			mtx_unlock(&(_sc)->sc_mtx)
260#define	GEM_LOCK_ASSERT(_sc, _what)	mtx_assert(&(_sc)->sc_mtx, (_what))
261#define	GEM_LOCK_DESTROY(_sc)		mtx_destroy(&(_sc)->sc_mtx)
262
263#ifdef _KERNEL
264extern devclass_t gem_devclass;
265
266int	gem_attach(struct gem_softc *sc);
267void	gem_detach(struct gem_softc *sc);
268void	gem_intr(void *v);
269void	gem_resume(struct gem_softc *sc);
270void	gem_suspend(struct gem_softc *sc);
271
272int	gem_mediachange(struct ifnet *ifp);
273void	gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
274
275/* MII methods & callbacks */
276int	gem_mii_readreg(device_t dev, int phy, int reg);
277void	gem_mii_statchg(device_t dev);
278int	gem_mii_writereg(device_t dev, int phy, int reg, int val);
279
280#endif /* _KERNEL */
281
282#endif
283