if_hmevar.h revision 331722
1/*-
2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Paul Kranenburg.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 *
29 *	from: NetBSD: hmevar.h,v 1.5 2000/06/25 01:10:04 eeh Exp
30 *
31 * $FreeBSD: stable/11/sys/dev/hme/if_hmevar.h 331722 2018-03-29 02:50:57Z eadler $
32 */
33
34#include <sys/callout.h>
35
36/*
37 * Number of receive and transmit descriptors. For each receive descriptor,
38 * an mbuf cluster is allocated and set up to receive a packet, and a dma map
39 * is created. Therefore, this number should not be too high to not waste
40 * memory.
41 * TX descriptors have no static cost, except for the memory directly allocated
42 * for them. TX queue elements (the number of which is fixed by HME_NTXQ) hold
43 * the software state for a transmit job; each has a dmamap allocated for it.
44 * There may be multiple descriptors allocated to a single queue element.
45 * HME_NTXQ and HME_NTXSEGS are completely arbitrary.
46 */
47#define	HME_NRXDESC	128
48#define	HME_NTXDESC	256
49#define	HME_NTXQ	64
50#define	HME_NTXSEGS	16
51
52/* Maximum size of a mapped RX buffer. */
53#define	HME_BUFSZ	1600
54
55/*
56 * RX DMA descriptor. The descriptors are preallocated; the dma map is
57 * reused.
58 */
59struct hme_rxdesc {
60	struct mbuf	*hrx_m;
61	bus_dmamap_t	hrx_dmamap;
62};
63
64/* Lazily leave at least one burst size grace space. */
65#define	HME_DESC_RXLEN(sc, d)						\
66	ulmin(HME_BUFSZ, (d)->hrx_m->m_len - (sc)->sc_burst)
67
68struct hme_txdesc {
69	struct mbuf	*htx_m;
70	bus_dmamap_t	htx_dmamap;
71	int		htx_lastdesc;
72	STAILQ_ENTRY(hme_txdesc) htx_q;
73};
74
75STAILQ_HEAD(hme_txdq, hme_txdesc);
76
77struct hme_ring {
78	/* Ring Descriptors */
79	caddr_t		rb_membase;	/* Packet buffer: CPU address */
80	bus_addr_t	rb_dmabase;	/* Packet buffer: DMA address */
81	caddr_t		rb_txd;		/* Transmit descriptors */
82	bus_addr_t	rb_txddma;	/* DMA address of same */
83	caddr_t		rb_rxd;		/* Receive descriptors */
84	bus_addr_t	rb_rxddma;	/* DMA address of same */
85
86	/* Ring Descriptor state */
87	int		rb_tdhead, rb_tdtail;
88	int		rb_rdtail;
89	int		rb_td_nbusy;
90
91	/* Descriptors */
92	struct hme_rxdesc	rb_rxdesc[HME_NRXDESC];
93	struct hme_txdesc	rb_txdesc[HME_NTXQ];
94
95	struct	hme_txdq	rb_txfreeq;
96	struct	hme_txdq	rb_txbusyq;
97
98	bus_dmamap_t	rb_spare_dmamap;
99};
100
101struct hme_softc {
102	struct ifnet	*sc_ifp;
103	struct ifmedia	sc_ifmedia;
104	device_t	sc_dev;
105	device_t	sc_miibus;
106	struct mii_data	*sc_mii;	/* MII media control */
107	u_char		sc_enaddr[ETHER_ADDR_LEN];
108	struct callout	sc_tick_ch;	/* tick callout */
109	int		sc_wdog_timer;	/* watchdog timer */
110
111	/* The following bus handles are to be provided by the bus front-end */
112	bus_dma_tag_t	sc_pdmatag;	/* bus dma parent tag */
113	bus_dma_tag_t	sc_cdmatag;	/* control bus dma tag */
114	bus_dmamap_t	sc_cdmamap;	/* control bus dma handle */
115	bus_dma_tag_t	sc_rdmatag;	/* RX bus dma tag */
116	bus_dma_tag_t	sc_tdmatag;	/* RX bus dma tag */
117	bus_space_handle_t sc_sebh;	/* HME Global registers */
118	bus_space_handle_t sc_erxh;	/* HME ERX registers */
119	bus_space_handle_t sc_etxh;	/* HME ETX registers */
120	bus_space_handle_t sc_mach;	/* HME MAC registers */
121	bus_space_handle_t sc_mifh;	/* HME MIF registers */
122	bus_space_tag_t	sc_sebt;	/* HME Global registers */
123	bus_space_tag_t	sc_erxt;	/* HME ERX registers */
124	bus_space_tag_t	sc_etxt;	/* HME ETX registers */
125	bus_space_tag_t	sc_mact;	/* HME MAC registers */
126	bus_space_tag_t	sc_mift;	/* HME MIF registers */
127	int		sc_burst;	/* DVMA burst size in effect */
128	int		sc_phys[2];	/* MII instance -> PHY map */
129
130	u_int		sc_flags;
131#define	HME_LINK	(1 << 0)	/* link is up */
132#define	HME_PCI		(1 << 1)	/* PCI busses are little-endian */
133
134	int		sc_ifflags;
135	int		sc_csum_features;
136
137	/* Ring descriptor */
138	struct hme_ring	sc_rb;
139
140	struct mtx	sc_lock;
141};
142
143#define HME_LOCK(_sc)		mtx_lock(&(_sc)->sc_lock)
144#define HME_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_lock)
145#define HME_LOCK_ASSERT(_sc, _what)	mtx_assert(&(_sc)->sc_lock, (_what))
146
147extern devclass_t hme_devclass;
148
149int	hme_config(struct hme_softc *);
150void	hme_detach(struct hme_softc *);
151void	hme_suspend(struct hme_softc *);
152void	hme_resume(struct hme_softc *);
153void	hme_intr(void *);
154
155/* MII methods & callbacks */
156int	hme_mii_readreg(device_t, int, int);
157int	hme_mii_writereg(device_t, int, int, int);
158void	hme_mii_statchg(device_t);
159