if_mwlvar.h revision 193240
1/*-
2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2009 Marvell Semiconductor, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 *
30 * $FreeBSD: head/sys/dev/mwl/if_mwlvar.h 193240 2009-06-01 18:07:01Z sam $
31 */
32
33/*
34 * Definitions for the Marvell 88W8363 Wireless LAN controller.
35 */
36#ifndef _DEV_MWL_MVVAR_H
37#define _DEV_MWL_MVVAR_H
38
39#include <sys/endian.h>
40#include <net80211/ieee80211_radiotap.h>
41#include <dev/mwl/mwlhal.h>
42#include <dev/mwl/mwlreg.h>
43#include <dev/mwl/if_mwlioctl.h>
44
45#ifndef MWL_TXBUF
46#define MWL_TXBUF	256		/* number of TX descriptors/buffers */
47#endif
48#ifndef MWL_RXDESC
49#define MWL_RXDESC	256		/* number of RX descriptors */
50#endif
51#ifndef MWL_RXBUF
52#define MWL_RXBUF	((5*MWL_RXDESC)/2)/* number of RX dma buffers */
53#endif
54#ifndef MWL_MAXBA
55#define	MWL_MAXBA	2		/* max BA streams/sta */
56#endif
57
58#ifdef MWL_SGDMA_SUPPORT
59#define	MWL_TXDESC	6		/* max tx descriptors/segments */
60#else
61#define	MWL_TXDESC	1		/* max tx descriptors/segments */
62#endif
63#ifndef MWL_AGGR_SIZE
64#define MWL_AGGR_SIZE	3839		/* max tx agregation size */
65#endif
66#define	MWL_AGEINTERVAL	1		/* poke f/w every sec to age q's */
67#define	MWL_MAXSTAID	64		/* max of 64 stations */
68
69/*
70 * DMA state for tx/rx descriptors.
71 */
72
73/*
74 * Software backed version of tx/rx descriptors.  We keep
75 * the software state out of the h/w descriptor structure
76 * so that may be allocated in uncached memory w/o paying
77 * performance hit.
78 */
79struct mwl_txbuf {
80	STAILQ_ENTRY(mwl_txbuf) bf_list;
81	void 		*bf_desc;	/* h/w descriptor */
82	bus_addr_t	bf_daddr;	/* physical addr of desc */
83	bus_dmamap_t	bf_dmamap;	/* DMA map for descriptors */
84	int		bf_nseg;
85	bus_dma_segment_t bf_segs[MWL_TXDESC];
86	struct mbuf	*bf_m;
87	struct ieee80211_node *bf_node;
88	struct mwl_txq	*bf_txq;		/* backpointer to tx q/ring */
89};
90typedef STAILQ_HEAD(, mwl_txbuf) mwl_txbufhead;
91
92/*
93 * Common "base class" for tx/rx descriptor resources
94 * allocated using the bus dma api.
95 */
96struct mwl_descdma {
97	const char*		dd_name;
98	void			*dd_desc;	/* descriptors */
99	bus_addr_t		dd_desc_paddr;	/* physical addr of dd_desc */
100	bus_size_t		dd_desc_len;	/* size of dd_desc */
101	bus_dma_segment_t	dd_dseg;
102	int			dd_dnseg;	/* number of segments */
103	bus_dma_tag_t		dd_dmat;	/* bus DMA tag */
104	bus_dmamap_t		dd_dmamap;	/* DMA map for descriptors */
105	void			*dd_bufptr;	/* associated buffers */
106};
107
108/*
109 * TX/RX ring definitions.  There are 4 tx rings, one
110 * per AC, and 1 rx ring.  Note carefully that transmit
111 * descriptors are treated as a contiguous chunk and the
112 * firmware pre-fetches descriptors.  This means that we
113 * must preserve order when moving descriptors between
114 * the active+free lists; otherwise we may stall transmit.
115 */
116struct mwl_txq {
117	struct mwl_descdma dma;		/* bus dma resources */
118	struct mtx	lock;		/* tx q lock */
119	char		name[12];	/* e.g. "mwl0_txq4" */
120	int		qnum;		/* f/w q number */
121	int		txpri;		/* f/w tx priority */
122	int		nfree;		/* # buffers on free list */
123	mwl_txbufhead	free;		/* queue of free buffers */
124	mwl_txbufhead	active;		/* queue of active buffers */
125};
126
127#define	MWL_TXQ_LOCK_INIT(_sc, _tq) do { \
128	snprintf((_tq)->name, sizeof((_tq)->name), "%s_txq%u", \
129		device_get_nameunit((_sc)->sc_dev), (_tq)->qnum); \
130	mtx_init(&(_tq)->lock, (_tq)->name, NULL, MTX_DEF); \
131} while (0)
132#define	MWL_TXQ_LOCK_DESTROY(_tq)	mtx_destroy(&(_tq)->lock)
133#define	MWL_TXQ_LOCK(_tq)		mtx_lock(&(_tq)->lock)
134#define	MWL_TXQ_UNLOCK(_tq)		mtx_unlock(&(_tq)->lock)
135#define	MWL_TXQ_LOCK_ASSERT(_tq)	mtx_assert(&(_tq)->lock, MA_OWNED)
136
137#define	MWL_TXDESC_SYNC(txq, ds, how) do { \
138	bus_dmamap_sync((txq)->dma.dd_dmat, (txq)->dma.dd_dmamap, how); \
139} while(0)
140
141/*
142 * RX dma buffers that are not in use are kept on a list.
143 */
144struct mwl_jumbo {
145	SLIST_ENTRY(mwl_jumbo) next;
146};
147typedef SLIST_HEAD(, mwl_jumbo) mwl_jumbohead;
148
149#define	MWL_JUMBO_DATA2BUF(_data)	((struct mwl_jumbo *)(_data))
150#define	MWL_JUMBO_BUF2DATA(_buf)		((uint8_t *)(_buf))
151#define	MWL_JUMBO_OFFSET(_sc, _data) \
152	(((const uint8_t *)(_data)) - (const uint8_t *)((_sc)->sc_rxmem))
153#define	MWL_JUMBO_DMA_ADDR(_sc, _data) \
154	((_sc)->sc_rxmem_paddr + MWL_JUMBO_OFFSET(_sc, _data))
155
156struct mwl_rxbuf {
157	STAILQ_ENTRY(mwl_rxbuf) bf_list;
158	void 		*bf_desc;	/* h/w descriptor */
159	bus_addr_t	bf_daddr;	/* physical addr of desc */
160	uint8_t		*bf_data;	/* rx data area */
161};
162typedef STAILQ_HEAD(, mwl_rxbuf) mwl_rxbufhead;
163
164#define	MWL_RXDESC_SYNC(sc, ds, how) do { \
165	bus_dmamap_sync((sc)->sc_rxdma.dd_dmat, (sc)->sc_rxdma.dd_dmamap, how);\
166} while (0)
167
168/*
169 * BA stream state.  One of these is setup for each stream
170 * allocated/created for use.  We pre-allocate the h/w stream
171 * before sending ADDBA request then complete the setup when
172 * get ADDBA response (success).  The completed state is setup
173 * to optimize the fast path in mwl_txstart--we precalculate
174 * the QoS control bits in the outbound frame and use those
175 * to identify which BA stream to use (assigning the h/w q to
176 * the TxPriority field of the descriptor).
177 *
178 * NB: Each station may have at most MWL_MAXBA streams at one time.
179 */
180struct mwl_bastate {
181	uint16_t	qos;		/* QoS ctl for BA stream */
182	uint8_t		txq;		/* h/w q for BA stream */
183	const MWL_HAL_BASTREAM *bastream; /* A-MPDU BA stream */
184};
185
186static __inline__ void
187mwl_bastream_setup(struct mwl_bastate *bas, int ac, int txq)
188{
189	bas->txq = txq;
190	bas->qos = htole16(WME_AC_TO_TID(ac) | IEEE80211_QOS_ACKPOLICY_BA);
191}
192
193static __inline__ void
194mwl_bastream_free(struct mwl_bastate *bas)
195{
196	bas->qos = 0;
197	bas->bastream = NULL;
198	/* NB: don't need to clear txq */
199}
200
201/*
202 * Check the QoS control bits from an outbound frame against the
203 * value calculated when a BA stream is setup (above).  We need
204 * to match the TID and also the ACK policy so we only match AMPDU
205 * frames.  The bits from the frame are assumed in network byte
206 * order, hence the potential byte swap.
207 */
208static __inline__ int
209mwl_bastream_match(const struct mwl_bastate *bas, uint16_t qos)
210{
211	return (qos & htole16(IEEE80211_QOS_TID|IEEE80211_QOS_ACKPOLICY)) ==
212	    bas->qos;
213}
214
215/* driver-specific node state */
216struct mwl_node {
217	struct ieee80211_node mn_node;	/* base class */
218	struct mwl_ant_info mn_ai;	/* antenna info */
219	uint32_t	mn_avgrssi;	/* average rssi over all rx frames */
220	uint16_t	mn_staid;	/* firmware station id */
221	struct mwl_bastate mn_ba[MWL_MAXBA];
222	struct mwl_hal_vap *mn_hvap;	/* hal vap handle */
223};
224#define	MWL_NODE(ni)		((struct mwl_node *)(ni))
225#define	MWL_NODE_CONST(ni)	((const struct mwl_node *)(ni))
226
227/*
228 * Driver-specific vap state.
229 */
230struct mwl_vap {
231	struct ieee80211vap mv_vap;		/* base class */
232	struct mwl_hal_vap *mv_hvap;		/* hal vap handle */
233	struct mwl_hal_vap *mv_ap_hvap;		/* ap hal vap handle for wds */
234	uint16_t	mv_last_ps_sta;		/* last count of ps sta's */
235	uint16_t	mv_eapolformat;		/* fixed tx rate for EAPOL */
236	int		(*mv_newstate)(struct ieee80211vap *,
237				    enum ieee80211_state, int);
238	int		(*mv_set_tim)(struct ieee80211_node *, int);
239};
240#define	MWL_VAP(vap)	((struct mwl_vap *)(vap))
241#define	MWL_VAP_CONST(vap)	((const struct mwl_vap *)(vap))
242
243struct mwl_softc {
244	struct ifnet		*sc_ifp;	/* interface common */
245	struct mwl_stats	sc_stats;	/* interface statistics */
246	int			sc_debug;
247	device_t		sc_dev;
248	bus_dma_tag_t		sc_dmat;	/* bus DMA tag */
249	bus_space_handle_t	sc_io0h;	/* BAR 0 */
250	bus_space_tag_t		sc_io0t;
251	bus_space_handle_t	sc_io1h;	/* BAR 1 */
252	bus_space_tag_t		sc_io1t;
253	struct mtx		sc_mtx;		/* master lock (recursive) */
254	struct taskqueue	*sc_tq;		/* private task queue */
255	unsigned int		sc_invalid : 1,	/* disable hardware accesses */
256				sc_recvsetup:1,	/* recv setup */
257				sc_csapending:1,/* 11h channel switch pending */
258				sc_radarena : 1,/* radar detection enabled */
259				sc_rxblocked: 1;/* rx waiting for dma buffers */
260
261	struct mwl_hal		*sc_mh;		/* h/w access layer */
262	struct mwl_hal_vap	*sc_hvap;	/* hal vap handle */
263	struct mwl_hal_hwspec	sc_hwspecs;	/* h/w capabilities */
264	uint32_t		sc_fwrelease;	/* release # of loaded f/w */
265	struct mwl_hal_txrxdma	sc_hwdma;	/* h/w dma setup */
266	uint32_t		sc_imask;	/* interrupt mask copy */
267	enum ieee80211_phymode	sc_curmode;
268	u_int16_t		sc_curaid;	/* current association id */
269	u_int8_t		sc_curbssid[IEEE80211_ADDR_LEN];
270	MWL_HAL_CHANNEL		sc_curchan;
271	MWL_HAL_TXRATE_HANDLING	sc_txratehandling;
272	u_int16_t		sc_rxantenna;	/* rx antenna */
273	u_int16_t		sc_txantenna;	/* tx antenna */
274	uint8_t			sc_napvaps;	/* # ap mode vaps */
275	uint8_t			sc_nwdsvaps;	/* # wds mode vaps */
276	uint8_t			sc_nstavaps;	/* # sta mode vaps */
277	uint8_t			sc_nbssid0;	/* # vap's using base mac */
278	uint32_t		sc_bssidmask;	/* bssid mask */
279
280	void			(*sc_recv_mgmt)(struct ieee80211com *,
281				    struct mbuf *,
282				    struct ieee80211_node *,
283				    int, int, int, u_int32_t);
284	int			(*sc_newstate)(struct ieee80211com *,
285				    enum ieee80211_state, int);
286	void 			(*sc_node_cleanup)(struct ieee80211_node *);
287	void 			(*sc_node_drain)(struct ieee80211_node *);
288	void			(*sc_recv_action)(struct ieee80211_node *,
289				    const uint8_t *, const uint8_t *);
290	int			(*sc_addba_request)(struct ieee80211_node *,
291				    struct ieee80211_tx_ampdu *,
292				    int dialogtoken, int baparamset,
293				    int batimeout);
294	int			(*sc_addba_response)(struct ieee80211_node *,
295				    struct ieee80211_tx_ampdu *,
296				    int status, int baparamset,
297				    int batimeout);
298	void			(*sc_addba_stop)(struct ieee80211_node *,
299				    struct ieee80211_tx_ampdu *);
300
301	struct mwl_descdma	sc_rxdma;	/* rx bus dma resources */
302	mwl_rxbufhead		sc_rxbuf;	/* rx buffers */
303	struct mwl_rxbuf	*sc_rxnext;	/* next rx buffer to process */
304	struct task		sc_rxtask;	/* rx int processing */
305	void			*sc_rxmem;	/* rx dma buffer pool */
306	bus_dma_tag_t		sc_rxdmat;	/* rx bus DMA tag */
307	bus_size_t		sc_rxmemsize;	/* rx dma buffer pool size */
308	bus_dmamap_t		sc_rxmap;	/* map for rx dma buffers */
309	bus_addr_t		sc_rxmem_paddr;	/* physical addr of sc_rxmem */
310	mwl_jumbohead		sc_rxfree;	/* list of free dma buffers */
311	int			sc_nrxfree;	/* # buffers on rx free list */
312	struct mtx		sc_rxlock;	/* lock on sc_rxfree */
313
314	struct mwl_txq		sc_txq[MWL_NUM_TX_QUEUES];
315	struct mwl_txq		*sc_ac2q[5];	/* WME AC -> h/w q map */
316	struct mbuf		*sc_aggrq;	/* aggregation q */
317	struct task		sc_txtask;	/* tx int processing */
318	struct task		sc_bawatchdogtask;/* BA watchdog processing */
319
320	struct task		sc_radartask;	/* radar detect processing */
321	struct task		sc_chanswitchtask;/* chan switch processing */
322
323	uint8_t			sc_staid[MWL_MAXSTAID/NBBY];
324	int			sc_ageinterval;
325	struct callout		sc_timer;	/* periodic work */
326
327	struct mwl_tx_radiotap_header sc_tx_th;
328	struct mwl_rx_radiotap_header sc_rx_th;
329};
330
331#define	MWL_LOCK_INIT(_sc) \
332	mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
333		 NULL, MTX_DEF | MTX_RECURSE)
334#define	MWL_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_mtx)
335#define	MWL_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
336#define	MWL_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
337#define	MWL_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
338
339#define	MWL_RXFREE_INIT(_sc) \
340	mtx_init(&(_sc)->sc_rxlock, device_get_nameunit((_sc)->sc_dev), \
341		 NULL, MTX_DEF)
342#define	MWL_RXFREE_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_rxlock)
343#define	MWL_RXFREE_LOCK(_sc)	mtx_lock(&(_sc)->sc_rxlock)
344#define	MWL_RXFREE_UNLOCK(_sc)	mtx_unlock(&(_sc)->sc_rxlock)
345#define	MWL_RXFREE_ASSERT(_sc)	mtx_assert(&(_sc)->sc_rxlock, MA_OWNED)
346
347int	mwl_attach(u_int16_t, struct mwl_softc *);
348int	mwl_detach(struct mwl_softc *);
349void	mwl_resume(struct mwl_softc *);
350void	mwl_suspend(struct mwl_softc *);
351void	mwl_shutdown(void *);
352void	mwl_intr(void *);
353
354#endif /* _DEV_MWL_MVVAR_H */
355