if_athvar.h revision 249639
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 *    redistribution must be conditioned upon including a substantially
14 *    similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 *
29 * $FreeBSD: head/sys/dev/ath/if_athvar.h 249639 2013-04-19 06:59:10Z adrian $
30 */
31
32/*
33 * Defintions for the Atheros Wireless LAN controller driver.
34 */
35#ifndef _DEV_ATH_ATHVAR_H
36#define _DEV_ATH_ATHVAR_H
37
38#include <machine/atomic.h>
39
40#include <dev/ath/ath_hal/ah.h>
41#include <dev/ath/ath_hal/ah_desc.h>
42#include <net80211/ieee80211_radiotap.h>
43#include <dev/ath/if_athioctl.h>
44#include <dev/ath/if_athrate.h>
45#ifdef	ATH_DEBUG_ALQ
46#include <dev/ath/if_ath_alq.h>
47#endif
48
49#define	ATH_TIMEOUT		1000
50
51/*
52 * There is a separate TX ath_buf pool for management frames.
53 * This ensures that management frames such as probe responses
54 * and BAR frames can be transmitted during periods of high
55 * TX activity.
56 */
57#define	ATH_MGMT_TXBUF		32
58
59/*
60 * 802.11n requires more TX and RX buffers to do AMPDU.
61 */
62#ifdef	ATH_ENABLE_11N
63#define	ATH_TXBUF	512
64#define	ATH_RXBUF	512
65#endif
66
67#ifndef ATH_RXBUF
68#define	ATH_RXBUF	40		/* number of RX buffers */
69#endif
70#ifndef ATH_TXBUF
71#define	ATH_TXBUF	200		/* number of TX buffers */
72#endif
73#define	ATH_BCBUF	4		/* number of beacon buffers */
74
75#define	ATH_TXDESC	10		/* number of descriptors per buffer */
76#define	ATH_TXMAXTRY	11		/* max number of transmit attempts */
77#define	ATH_TXMGTTRY	4		/* xmit attempts for mgt/ctl frames */
78#define	ATH_TXINTR_PERIOD 5		/* max number of batched tx descriptors */
79
80#define	ATH_BEACON_AIFS_DEFAULT	 1	/* default aifs for ap beacon q */
81#define	ATH_BEACON_CWMIN_DEFAULT 0	/* default cwmin for ap beacon q */
82#define	ATH_BEACON_CWMAX_DEFAULT 0	/* default cwmax for ap beacon q */
83
84/*
85 * The key cache is used for h/w cipher state and also for
86 * tracking station state such as the current tx antenna.
87 * We also setup a mapping table between key cache slot indices
88 * and station state to short-circuit node lookups on rx.
89 * Different parts have different size key caches.  We handle
90 * up to ATH_KEYMAX entries (could dynamically allocate state).
91 */
92#define	ATH_KEYMAX	128		/* max key cache size we handle */
93#define	ATH_KEYBYTES	(ATH_KEYMAX/NBBY)	/* storage space in bytes */
94
95struct taskqueue;
96struct kthread;
97struct ath_buf;
98
99#define	ATH_TID_MAX_BUFS	(2 * IEEE80211_AGGR_BAWMAX)
100
101/*
102 * Per-TID state
103 *
104 * Note that TID 16 (WME_NUM_TID+1) is for handling non-QoS frames.
105 */
106struct ath_tid {
107	TAILQ_HEAD(,ath_buf)	tid_q;		/* pending buffers */
108	struct ath_node		*an;		/* pointer to parent */
109	int			tid;		/* tid */
110	int			ac;		/* which AC gets this trafic */
111	int			hwq_depth;	/* how many buffers are on HW */
112	u_int			axq_depth;	/* SW queue depth */
113
114	struct {
115		TAILQ_HEAD(,ath_buf)	tid_q;		/* filtered queue */
116		u_int			axq_depth;	/* SW queue depth */
117	} filtq;
118
119	/*
120	 * Entry on the ath_txq; when there's traffic
121	 * to send
122	 */
123	TAILQ_ENTRY(ath_tid)	axq_qelem;
124	int			sched;
125	int			paused;	/* >0 if the TID has been paused */
126
127	/*
128	 * These are flags - perhaps later collapse
129	 * down to a single uint32_t ?
130	 */
131	int			addba_tx_pending;	/* TX ADDBA pending */
132	int			bar_wait;	/* waiting for BAR */
133	int			bar_tx;		/* BAR TXed */
134	int			isfiltered;	/* is this node currently filtered */
135
136	/*
137	 * Is the TID being cleaned up after a transition
138	 * from aggregation to non-aggregation?
139	 * When this is set to 1, this TID will be paused
140	 * and no further traffic will be queued until all
141	 * the hardware packets pending for this TID have been
142	 * TXed/completed; at which point (non-aggregation)
143	 * traffic will resume being TXed.
144	 */
145	int			cleanup_inprogress;
146	/*
147	 * How many hardware-queued packets are
148	 * waiting to be cleaned up.
149	 * This is only valid if cleanup_inprogress is 1.
150	 */
151	int			incomp;
152
153	/*
154	 * The following implements a ring representing
155	 * the frames in the current BAW.
156	 * To avoid copying the array content each time
157	 * the BAW is moved, the baw_head/baw_tail point
158	 * to the current BAW begin/end; when the BAW is
159	 * shifted the head/tail of the array are also
160	 * appropriately shifted.
161	 */
162	/* active tx buffers, beginning at current BAW */
163	struct ath_buf		*tx_buf[ATH_TID_MAX_BUFS];
164	/* where the baw head is in the array */
165	int			baw_head;
166	/* where the BAW tail is in the array */
167	int			baw_tail;
168};
169
170/* driver-specific node state */
171struct ath_node {
172	struct ieee80211_node an_node;	/* base class */
173	u_int8_t	an_mgmtrix;	/* min h/w rate index */
174	u_int8_t	an_mcastrix;	/* mcast h/w rate index */
175	uint32_t	an_is_powersave;	/* node is sleeping */
176	uint32_t	an_stack_psq;		/* net80211 psq isn't empty */
177	uint32_t	an_tim_set;		/* TIM has been set */
178	struct ath_buf	*an_ff_buf[WME_NUM_AC]; /* ff staging area */
179	struct ath_tid	an_tid[IEEE80211_TID_SIZE];	/* per-TID state */
180	char		an_name[32];	/* eg "wlan0_a1" */
181	struct mtx	an_mtx;		/* protecting the ath_node state */
182	uint32_t	an_swq_depth;	/* how many SWQ packets for this
183					   node */
184	int			clrdmask;	/* has clrdmask been set */
185	/* variable-length rate control state follows */
186};
187#define	ATH_NODE(ni)	((struct ath_node *)(ni))
188#define	ATH_NODE_CONST(ni)	((const struct ath_node *)(ni))
189
190#define ATH_RSSI_LPF_LEN	10
191#define ATH_RSSI_DUMMY_MARKER	0x127
192#define ATH_EP_MUL(x, mul)	((x) * (mul))
193#define ATH_RSSI_IN(x)		(ATH_EP_MUL((x), HAL_RSSI_EP_MULTIPLIER))
194#define ATH_LPF_RSSI(x, y, len) \
195    ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))
196#define ATH_RSSI_LPF(x, y) do {						\
197    if ((y) >= -20)							\
198    	x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN);	\
199} while (0)
200#define	ATH_EP_RND(x,mul) \
201	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
202#define	ATH_RSSI(x)		ATH_EP_RND(x, HAL_RSSI_EP_MULTIPLIER)
203
204typedef enum {
205	ATH_BUFTYPE_NORMAL	= 0,
206	ATH_BUFTYPE_MGMT	= 1,
207} ath_buf_type_t;
208
209struct ath_buf {
210	TAILQ_ENTRY(ath_buf)	bf_list;
211	struct ath_buf *	bf_next;	/* next buffer in the aggregate */
212	int			bf_nseg;
213	HAL_STATUS		bf_rxstatus;
214	uint16_t		bf_flags;	/* status flags (below) */
215	uint16_t		bf_descid;	/* 16 bit descriptor ID */
216	struct ath_desc		*bf_desc;	/* virtual addr of desc */
217	struct ath_desc_status	bf_status;	/* tx/rx status */
218	bus_addr_t		bf_daddr;	/* physical addr of desc */
219	bus_dmamap_t		bf_dmamap;	/* DMA map for mbuf chain */
220	struct mbuf		*bf_m;		/* mbuf for buf */
221	struct ieee80211_node	*bf_node;	/* pointer to the node */
222	struct ath_desc		*bf_lastds;	/* last descriptor for comp status */
223	struct ath_buf		*bf_last;	/* last buffer in aggregate, or self for non-aggregate */
224	bus_size_t		bf_mapsize;
225#define	ATH_MAX_SCATTER		ATH_TXDESC	/* max(tx,rx,beacon) desc's */
226	bus_dma_segment_t	bf_segs[ATH_MAX_SCATTER];
227
228	/* Completion function to call on TX complete (fail or not) */
229	/*
230	 * "fail" here is set to 1 if the queue entries were removed
231	 * through a call to ath_tx_draintxq().
232	 */
233	void(* bf_comp) (struct ath_softc *sc, struct ath_buf *bf, int fail);
234
235	/* This state is kept to support software retries and aggregation */
236	struct {
237		uint16_t bfs_seqno;	/* sequence number of this packet */
238		uint16_t bfs_ndelim;	/* number of delims for padding */
239
240		uint8_t bfs_retries;	/* retry count */
241		uint8_t bfs_tid;	/* packet TID (or TID_MAX for no QoS) */
242		uint8_t bfs_nframes;	/* number of frames in aggregate */
243		uint8_t bfs_pri;	/* packet AC priority */
244		uint8_t bfs_tx_queue;	/* destination hardware TX queue */
245
246		u_int32_t bfs_aggr:1,		/* part of aggregate? */
247		    bfs_aggrburst:1,	/* part of aggregate burst? */
248		    bfs_isretried:1,	/* retried frame? */
249		    bfs_dobaw:1,	/* actually check against BAW? */
250		    bfs_addedbaw:1,	/* has been added to the BAW */
251		    bfs_shpream:1,	/* use short preamble */
252		    bfs_istxfrag:1,	/* is fragmented */
253		    bfs_ismrr:1,	/* do multi-rate TX retry */
254		    bfs_doprot:1,	/* do RTS/CTS based protection */
255		    bfs_doratelookup:1;	/* do rate lookup before each TX */
256
257		/*
258		 * These fields are passed into the
259		 * descriptor setup functions.
260		 */
261
262		/* Make this an 8 bit value? */
263		HAL_PKT_TYPE bfs_atype;	/* packet type */
264
265		uint32_t bfs_pktlen;	/* length of this packet */
266
267		uint16_t bfs_hdrlen;	/* length of this packet header */
268		uint16_t bfs_al;	/* length of aggregate */
269
270		uint16_t bfs_txflags;	/* HAL (tx) descriptor flags */
271		uint8_t bfs_txrate0;	/* first TX rate */
272		uint8_t bfs_try0;		/* first try count */
273
274		uint16_t bfs_txpower;	/* tx power */
275		uint8_t bfs_ctsrate0;	/* Non-zero - use this as ctsrate */
276		uint8_t bfs_ctsrate;	/* CTS rate */
277
278		/* 16 bit? */
279		int32_t bfs_keyix;		/* crypto key index */
280		int32_t bfs_txantenna;	/* TX antenna config */
281
282		/* Make this an 8 bit value? */
283		enum ieee80211_protmode bfs_protmode;
284
285		/* 16 bit? */
286		uint32_t bfs_ctsduration;	/* CTS duration (pre-11n NICs) */
287		struct ath_rc_series bfs_rc[ATH_RC_NUM];	/* non-11n TX series */
288	} bf_state;
289};
290typedef TAILQ_HEAD(ath_bufhead_s, ath_buf) ath_bufhead;
291
292#define	ATH_BUF_MGMT	0x00000001	/* (tx) desc is a mgmt desc */
293#define	ATH_BUF_BUSY	0x00000002	/* (tx) desc owned by h/w */
294#define	ATH_BUF_FIFOEND	0x00000004
295#define	ATH_BUF_FIFOPTR	0x00000008
296
297#define	ATH_BUF_FLAGS_CLONE	(ATH_BUF_MGMT)
298
299/*
300 * DMA state for tx/rx descriptors.
301 */
302struct ath_descdma {
303	const char*		dd_name;
304	struct ath_desc		*dd_desc;	/* descriptors */
305	int			dd_descsize;	/* size of single descriptor */
306	bus_addr_t		dd_desc_paddr;	/* physical addr of dd_desc */
307	bus_size_t		dd_desc_len;	/* size of dd_desc */
308	bus_dma_segment_t	dd_dseg;
309	bus_dma_tag_t		dd_dmat;	/* bus DMA tag */
310	bus_dmamap_t		dd_dmamap;	/* DMA map for descriptors */
311	struct ath_buf		*dd_bufptr;	/* associated buffers */
312};
313
314/*
315 * Data transmit queue state.  One of these exists for each
316 * hardware transmit queue.  Packets sent to us from above
317 * are assigned to queues based on their priority.  Not all
318 * devices support a complete set of hardware transmit queues.
319 * For those devices the array sc_ac2q will map multiple
320 * priorities to fewer hardware queues (typically all to one
321 * hardware queue).
322 */
323struct ath_txq {
324	struct ath_softc	*axq_softc;	/* Needed for scheduling */
325	u_int			axq_qnum;	/* hardware q number */
326#define	ATH_TXQ_SWQ	(HAL_NUM_TX_QUEUES+1)	/* qnum for s/w only queue */
327	u_int			axq_ac;		/* WME AC */
328	u_int			axq_flags;
329#define	ATH_TXQ_PUTPENDING	0x0001		/* ath_hal_puttxbuf pending */
330	u_int			axq_depth;	/* queue depth (stat only) */
331	u_int			axq_aggr_depth;	/* how many aggregates are queued */
332	u_int			axq_intrcnt;	/* interrupt count */
333	u_int32_t		*axq_link;	/* link ptr in last TX desc */
334	TAILQ_HEAD(axq_q_s, ath_buf)	axq_q;		/* transmit queue */
335	struct mtx		axq_lock;	/* lock on q and link */
336
337	/*
338	 * This is the FIFO staging buffer when doing EDMA.
339	 *
340	 * For legacy chips, we just push the head pointer to
341	 * the hardware and we ignore this list.
342	 *
343	 * For EDMA, the staging buffer is treated as normal;
344	 * when it's time to push a list of frames to the hardware
345	 * we move that list here and we stamp buffers with
346	 * flags to identify the beginning/end of that particular
347	 * FIFO entry.
348	 */
349	struct {
350		TAILQ_HEAD(axq_q_f_s, ath_buf)	axq_q;
351		u_int				axq_depth;
352	} fifo;
353	u_int			axq_fifo_depth;	/* depth of FIFO frames */
354
355	/*
356	 * XXX the holdingbf field is protected by the TXBUF lock
357	 * for now, NOT the TXQ lock.
358	 *
359	 * Architecturally, it would likely be better to move
360	 * the holdingbf field to a separate array in ath_softc
361	 * just to highlight that it's not protected by the normal
362	 * TX path lock.
363	 */
364	struct ath_buf		*axq_holdingbf;	/* holding TX buffer */
365	char			axq_name[12];	/* e.g. "ath0_txq4" */
366
367	/* Per-TID traffic queue for software -> hardware TX */
368	/*
369	 * This is protected by the general TX path lock, not (for now)
370	 * by the TXQ lock.
371	 */
372	TAILQ_HEAD(axq_t_s,ath_tid)	axq_tidq;
373};
374
375#define	ATH_TXQ_LOCK_INIT(_sc, _tq) do { \
376	    snprintf((_tq)->axq_name, sizeof((_tq)->axq_name), "%s_txq%u", \
377	      device_get_nameunit((_sc)->sc_dev), (_tq)->axq_qnum); \
378	    mtx_init(&(_tq)->axq_lock, (_tq)->axq_name, NULL, MTX_DEF); \
379	} while (0)
380#define	ATH_TXQ_LOCK_DESTROY(_tq)	mtx_destroy(&(_tq)->axq_lock)
381#define	ATH_TXQ_LOCK(_tq)		mtx_lock(&(_tq)->axq_lock)
382#define	ATH_TXQ_UNLOCK(_tq)		mtx_unlock(&(_tq)->axq_lock)
383#define	ATH_TXQ_LOCK_ASSERT(_tq)	mtx_assert(&(_tq)->axq_lock, MA_OWNED)
384
385
386#define	ATH_NODE_LOCK(_an)		mtx_lock(&(_an)->an_mtx)
387#define	ATH_NODE_UNLOCK(_an)		mtx_unlock(&(_an)->an_mtx)
388#define	ATH_NODE_LOCK_ASSERT(_an)	mtx_assert(&(_an)->an_mtx, MA_OWNED)
389#define	ATH_NODE_UNLOCK_ASSERT(_an)	mtx_assert(&(_an)->an_mtx,	\
390					    MA_NOTOWNED)
391
392/*
393 * These are for the hardware queue.
394 */
395#define ATH_TXQ_INSERT_HEAD(_tq, _elm, _field) do { \
396	TAILQ_INSERT_HEAD(&(_tq)->axq_q, (_elm), _field); \
397	(_tq)->axq_depth++; \
398} while (0)
399#define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \
400	TAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \
401	(_tq)->axq_depth++; \
402} while (0)
403#define ATH_TXQ_REMOVE(_tq, _elm, _field) do { \
404	TAILQ_REMOVE(&(_tq)->axq_q, _elm, _field); \
405	(_tq)->axq_depth--; \
406} while (0)
407#define	ATH_TXQ_FIRST(_tq)		TAILQ_FIRST(&(_tq)->axq_q)
408#define	ATH_TXQ_LAST(_tq, _field)	TAILQ_LAST(&(_tq)->axq_q, _field)
409
410/*
411 * These are for the TID software queue.
412 */
413#define ATH_TID_INSERT_HEAD(_tq, _elm, _field) do { \
414	TAILQ_INSERT_HEAD(&(_tq)->tid_q, (_elm), _field); \
415	(_tq)->axq_depth++; \
416	atomic_add_rel_32( &((_tq)->an)->an_swq_depth, 1); \
417} while (0)
418#define ATH_TID_INSERT_TAIL(_tq, _elm, _field) do { \
419	TAILQ_INSERT_TAIL(&(_tq)->tid_q, (_elm), _field); \
420	(_tq)->axq_depth++; \
421	atomic_add_rel_32( &((_tq)->an)->an_swq_depth, 1); \
422} while (0)
423#define ATH_TID_REMOVE(_tq, _elm, _field) do { \
424	TAILQ_REMOVE(&(_tq)->tid_q, _elm, _field); \
425	(_tq)->axq_depth--; \
426	atomic_subtract_rel_32( &((_tq)->an)->an_swq_depth, 1); \
427} while (0)
428#define	ATH_TID_FIRST(_tq)		TAILQ_FIRST(&(_tq)->tid_q)
429#define	ATH_TID_LAST(_tq, _field)	TAILQ_LAST(&(_tq)->tid_q, _field)
430
431/*
432 * These are for the TID filtered frame queue
433 */
434#define ATH_TID_FILT_INSERT_HEAD(_tq, _elm, _field) do { \
435	TAILQ_INSERT_HEAD(&(_tq)->filtq.tid_q, (_elm), _field); \
436	(_tq)->axq_depth++; \
437	atomic_add_rel_32( &((_tq)->an)->an_swq_depth, 1); \
438} while (0)
439#define ATH_TID_FILT_INSERT_TAIL(_tq, _elm, _field) do { \
440	TAILQ_INSERT_TAIL(&(_tq)->filtq.tid_q, (_elm), _field); \
441	(_tq)->axq_depth++; \
442	atomic_add_rel_32( &((_tq)->an)->an_swq_depth, 1); \
443} while (0)
444#define ATH_TID_FILT_REMOVE(_tq, _elm, _field) do { \
445	TAILQ_REMOVE(&(_tq)->filtq.tid_q, _elm, _field); \
446	(_tq)->axq_depth--; \
447	atomic_subtract_rel_32( &((_tq)->an)->an_swq_depth, 1); \
448} while (0)
449#define	ATH_TID_FILT_FIRST(_tq)		TAILQ_FIRST(&(_tq)->filtq.tid_q)
450#define	ATH_TID_FILT_LAST(_tq, _field)	TAILQ_LAST(&(_tq)->filtq.tid_q,_field)
451
452struct ath_vap {
453	struct ieee80211vap av_vap;	/* base class */
454	int		av_bslot;	/* beacon slot index */
455	struct ath_buf	*av_bcbuf;	/* beacon buffer */
456	struct ieee80211_beacon_offsets av_boff;/* dynamic update state */
457	struct ath_txq	av_mcastq;	/* buffered mcast s/w queue */
458
459	void		(*av_recv_mgmt)(struct ieee80211_node *,
460				struct mbuf *, int, int, int);
461	int		(*av_newstate)(struct ieee80211vap *,
462				enum ieee80211_state, int);
463	void		(*av_bmiss)(struct ieee80211vap *);
464	void		(*av_node_ps)(struct ieee80211_node *, int);
465	int		(*av_set_tim)(struct ieee80211_node *, int);
466};
467#define	ATH_VAP(vap)	((struct ath_vap *)(vap))
468
469struct taskqueue;
470struct ath_tx99;
471
472/*
473 * Whether to reset the TX/RX queue with or without
474 * a queue flush.
475 */
476typedef enum {
477	ATH_RESET_DEFAULT = 0,
478	ATH_RESET_NOLOSS = 1,
479	ATH_RESET_FULL = 2,
480} ATH_RESET_TYPE;
481
482struct ath_rx_methods {
483	void		(*recv_sched_queue)(struct ath_softc *sc,
484			    HAL_RX_QUEUE q, int dosched);
485	void		(*recv_sched)(struct ath_softc *sc, int dosched);
486	void		(*recv_stop)(struct ath_softc *sc, int dodelay);
487	int		(*recv_start)(struct ath_softc *sc);
488	void		(*recv_flush)(struct ath_softc *sc);
489	void		(*recv_tasklet)(void *arg, int npending);
490	int		(*recv_rxbuf_init)(struct ath_softc *sc,
491			    struct ath_buf *bf);
492	int		(*recv_setup)(struct ath_softc *sc);
493	int		(*recv_teardown)(struct ath_softc *sc);
494};
495
496/*
497 * Represent the current state of the RX FIFO.
498 */
499struct ath_rx_edma {
500	struct ath_buf	**m_fifo;
501	int		m_fifolen;
502	int		m_fifo_head;
503	int		m_fifo_tail;
504	int		m_fifo_depth;
505	struct mbuf	*m_rxpending;
506};
507
508struct ath_tx_edma_fifo {
509	struct ath_buf	**m_fifo;
510	int		m_fifolen;
511	int		m_fifo_head;
512	int		m_fifo_tail;
513	int		m_fifo_depth;
514};
515
516struct ath_tx_methods {
517	int		(*xmit_setup)(struct ath_softc *sc);
518	int		(*xmit_teardown)(struct ath_softc *sc);
519	void		(*xmit_attach_comp_func)(struct ath_softc *sc);
520
521	void		(*xmit_dma_restart)(struct ath_softc *sc,
522			    struct ath_txq *txq);
523	void		(*xmit_handoff)(struct ath_softc *sc,
524			    struct ath_txq *txq, struct ath_buf *bf);
525	void		(*xmit_drain)(struct ath_softc *sc,
526			    ATH_RESET_TYPE reset_type);
527};
528
529struct ath_softc {
530	struct ifnet		*sc_ifp;	/* interface common */
531	struct ath_stats	sc_stats;	/* interface statistics */
532	struct ath_tx_aggr_stats	sc_aggr_stats;
533	struct ath_intr_stats	sc_intr_stats;
534	uint64_t		sc_debug;
535	uint64_t		sc_ktrdebug;
536	int			sc_nvaps;	/* # vaps */
537	int			sc_nstavaps;	/* # station vaps */
538	int			sc_nmeshvaps;	/* # mbss vaps */
539	u_int8_t		sc_hwbssidmask[IEEE80211_ADDR_LEN];
540	u_int8_t		sc_nbssid0;	/* # vap's using base mac */
541	uint32_t		sc_bssidmask;	/* bssid mask */
542
543	struct ath_rx_methods	sc_rx;
544	struct ath_rx_edma	sc_rxedma[HAL_NUM_RX_QUEUES];	/* HP/LP queues */
545	ath_bufhead		sc_rx_rxlist[HAL_NUM_RX_QUEUES];	/* deferred RX completion */
546	struct ath_tx_methods	sc_tx;
547	struct ath_tx_edma_fifo	sc_txedma[HAL_NUM_TX_QUEUES];
548
549	/*
550	 * This is (currently) protected by the TX queue lock;
551	 * it should migrate to a separate lock later
552	 * so as to minimise contention.
553	 */
554	ath_bufhead		sc_txbuf_list;
555
556	int			sc_rx_statuslen;
557	int			sc_tx_desclen;
558	int			sc_tx_statuslen;
559	int			sc_tx_nmaps;	/* Number of TX maps */
560	int			sc_edma_bufsize;
561
562	void 			(*sc_node_cleanup)(struct ieee80211_node *);
563	void 			(*sc_node_free)(struct ieee80211_node *);
564	device_t		sc_dev;
565	HAL_BUS_TAG		sc_st;		/* bus space tag */
566	HAL_BUS_HANDLE		sc_sh;		/* bus space handle */
567	bus_dma_tag_t		sc_dmat;	/* bus DMA tag */
568	struct mtx		sc_mtx;		/* master lock (recursive) */
569	struct mtx		sc_pcu_mtx;	/* PCU access mutex */
570	char			sc_pcu_mtx_name[32];
571	struct mtx		sc_rx_mtx;	/* RX access mutex */
572	char			sc_rx_mtx_name[32];
573	struct mtx		sc_tx_mtx;	/* TX handling/comp mutex */
574	char			sc_tx_mtx_name[32];
575	struct mtx		sc_tx_ic_mtx;	/* TX queue mutex */
576	char			sc_tx_ic_mtx_name[32];
577	struct taskqueue	*sc_tq;		/* private task queue */
578	struct ath_hal		*sc_ah;		/* Atheros HAL */
579	struct ath_ratectrl	*sc_rc;		/* tx rate control support */
580	struct ath_tx99		*sc_tx99;	/* tx99 adjunct state */
581	void			(*sc_setdefantenna)(struct ath_softc *, u_int);
582
583	/*
584	 * First set of flags.
585	 */
586	uint32_t		sc_invalid  : 1,/* disable hardware accesses */
587				sc_mrretry  : 1,/* multi-rate retry support */
588				sc_mrrprot  : 1,/* MRR + protection support */
589				sc_softled  : 1,/* enable LED gpio status */
590				sc_hardled  : 1,/* enable MAC LED status */
591				sc_splitmic : 1,/* split TKIP MIC keys */
592				sc_needmib  : 1,/* enable MIB stats intr */
593				sc_diversity: 1,/* enable rx diversity */
594				sc_hasveol  : 1,/* tx VEOL support */
595				sc_ledstate : 1,/* LED on/off state */
596				sc_blinking : 1,/* LED blink operation active */
597				sc_mcastkey : 1,/* mcast key cache search */
598				sc_scanning : 1,/* scanning active */
599				sc_syncbeacon:1,/* sync/resync beacon timers */
600				sc_hasclrkey: 1,/* CLR key supported */
601				sc_xchanmode: 1,/* extended channel mode */
602				sc_outdoor  : 1,/* outdoor operation */
603				sc_dturbo   : 1,/* dynamic turbo in use */
604				sc_hasbmask : 1,/* bssid mask support */
605				sc_hasbmatch: 1,/* bssid match disable support*/
606				sc_hastsfadd: 1,/* tsf adjust support */
607				sc_beacons  : 1,/* beacons running */
608				sc_swbmiss  : 1,/* sta mode using sw bmiss */
609				sc_stagbeacons:1,/* use staggered beacons */
610				sc_wmetkipmic:1,/* can do WME+TKIP MIC */
611				sc_resume_up: 1,/* on resume, start all vaps */
612				sc_tdma	    : 1,/* TDMA in use */
613				sc_setcca   : 1,/* set/clr CCA with TDMA */
614				sc_resetcal : 1,/* reset cal state next trip */
615				sc_rxslink  : 1,/* do self-linked final descriptor */
616				sc_rxtsf32  : 1,/* RX dec TSF is 32 bits */
617				sc_isedma   : 1;/* supports EDMA */
618
619	/*
620	 * Second set of flags.
621	 */
622	u_int32_t		sc_use_ent  : 1,
623				sc_rx_stbc  : 1,
624				sc_tx_stbc  : 1;
625
626
627	int			sc_cabq_enable;	/* Enable cabq transmission */
628
629	/*
630	 * Enterprise mode configuration for AR9380 and later chipsets.
631	 */
632	uint32_t		sc_ent_cfg;
633
634	uint32_t		sc_eerd;	/* regdomain from EEPROM */
635	uint32_t		sc_eecc;	/* country code from EEPROM */
636						/* rate tables */
637	const HAL_RATE_TABLE	*sc_rates[IEEE80211_MODE_MAX];
638	const HAL_RATE_TABLE	*sc_currates;	/* current rate table */
639	enum ieee80211_phymode	sc_curmode;	/* current phy mode */
640	HAL_OPMODE		sc_opmode;	/* current operating mode */
641	u_int16_t		sc_curtxpow;	/* current tx power limit */
642	u_int16_t		sc_curaid;	/* current association id */
643	struct ieee80211_channel *sc_curchan;	/* current installed channel */
644	u_int8_t		sc_curbssid[IEEE80211_ADDR_LEN];
645	u_int8_t		sc_rixmap[256];	/* IEEE to h/w rate table ix */
646	struct {
647		u_int8_t	ieeerate;	/* IEEE rate */
648		u_int8_t	rxflags;	/* radiotap rx flags */
649		u_int8_t	txflags;	/* radiotap tx flags */
650		u_int16_t	ledon;		/* softled on time */
651		u_int16_t	ledoff;		/* softled off time */
652	} sc_hwmap[32];				/* h/w rate ix mappings */
653	u_int8_t		sc_protrix;	/* protection rate index */
654	u_int8_t		sc_lastdatarix;	/* last data frame rate index */
655	u_int			sc_mcastrate;	/* ieee rate for mcastrateix */
656	u_int			sc_fftxqmin;	/* min frames before staging */
657	u_int			sc_fftxqmax;	/* max frames before drop */
658	u_int			sc_txantenna;	/* tx antenna (fixed or auto) */
659
660	HAL_INT			sc_imask;	/* interrupt mask copy */
661
662	/*
663	 * These are modified in the interrupt handler as well as
664	 * the task queues and other contexts. Thus these must be
665	 * protected by a mutex, or they could clash.
666	 *
667	 * For now, access to these is behind the ATH_LOCK,
668	 * just to save time.
669	 */
670	uint32_t		sc_txq_active;	/* bitmap of active TXQs */
671	uint32_t		sc_kickpcu;	/* whether to kick the PCU */
672	uint32_t		sc_rxproc_cnt;	/* In RX processing */
673	uint32_t		sc_txproc_cnt;	/* In TX processing */
674	uint32_t		sc_txstart_cnt;	/* In TX output (raw/start) */
675	uint32_t		sc_inreset_cnt;	/* In active reset/chanchange */
676	uint32_t		sc_txrx_cnt;	/* refcount on stop/start'ing TX */
677	uint32_t		sc_intr_cnt;	/* refcount on interrupt handling */
678
679	u_int			sc_keymax;	/* size of key cache */
680	u_int8_t		sc_keymap[ATH_KEYBYTES];/* key use bit map */
681
682	/*
683	 * Software based LED blinking
684	 */
685	u_int			sc_ledpin;	/* GPIO pin for driving LED */
686	u_int			sc_ledon;	/* pin setting for LED on */
687	u_int			sc_ledidle;	/* idle polling interval */
688	int			sc_ledevent;	/* time of last LED event */
689	u_int8_t		sc_txrix;	/* current tx rate for LED */
690	u_int16_t		sc_ledoff;	/* off time for current blink */
691	struct callout		sc_ledtimer;	/* led off timer */
692
693	/*
694	 * Hardware based LED blinking
695	 */
696	int			sc_led_pwr_pin;	/* MAC power LED GPIO pin */
697	int			sc_led_net_pin;	/* MAC network LED GPIO pin */
698
699	u_int			sc_rfsilentpin;	/* GPIO pin for rfkill int */
700	u_int			sc_rfsilentpol;	/* pin setting for rfkill on */
701
702	struct ath_descdma	sc_rxdma;	/* RX descriptors */
703	ath_bufhead		sc_rxbuf;	/* receive buffer */
704	u_int32_t		*sc_rxlink;	/* link ptr in last RX desc */
705	struct task		sc_rxtask;	/* rx int processing */
706	u_int8_t		sc_defant;	/* current default antenna */
707	u_int8_t		sc_rxotherant;	/* rx's on non-default antenna*/
708	u_int64_t		sc_lastrx;	/* tsf at last rx'd frame */
709	struct ath_rx_status	*sc_lastrs;	/* h/w status of last rx */
710	struct ath_rx_radiotap_header sc_rx_th;
711	int			sc_rx_th_len;
712	u_int			sc_monpass;	/* frames to pass in mon.mode */
713
714	struct ath_descdma	sc_txdma;	/* TX descriptors */
715	uint16_t		sc_txbuf_descid;
716	ath_bufhead		sc_txbuf;	/* transmit buffer */
717	int			sc_txbuf_cnt;	/* how many buffers avail */
718	struct ath_descdma	sc_txdma_mgmt;	/* mgmt TX descriptors */
719	ath_bufhead		sc_txbuf_mgmt;	/* mgmt transmit buffer */
720	struct ath_descdma	sc_txsdma;	/* EDMA TX status desc's */
721	struct mtx		sc_txbuflock;	/* txbuf lock */
722	char			sc_txname[12];	/* e.g. "ath0_buf" */
723	u_int			sc_txqsetup;	/* h/w queues setup */
724	u_int			sc_txintrperiod;/* tx interrupt batching */
725	struct ath_txq		sc_txq[HAL_NUM_TX_QUEUES];
726	struct ath_txq		*sc_ac2q[5];	/* WME AC -> h/w q map */
727	struct task		sc_txtask;	/* tx int processing */
728	struct task		sc_txqtask;	/* tx proc processing */
729	struct task		sc_txpkttask;	/* tx frame processing */
730
731	struct ath_descdma	sc_txcompdma;	/* TX EDMA completion */
732	struct mtx		sc_txcomplock;	/* TX EDMA completion lock */
733	char			sc_txcompname[12];	/* eg ath0_txcomp */
734
735	int			sc_wd_timer;	/* count down for wd timer */
736	struct callout		sc_wd_ch;	/* tx watchdog timer */
737	struct ath_tx_radiotap_header sc_tx_th;
738	int			sc_tx_th_len;
739
740	struct ath_descdma	sc_bdma;	/* beacon descriptors */
741	ath_bufhead		sc_bbuf;	/* beacon buffers */
742	u_int			sc_bhalq;	/* HAL q for outgoing beacons */
743	u_int			sc_bmisscount;	/* missed beacon transmits */
744	u_int32_t		sc_ant_tx[8];	/* recent tx frames/antenna */
745	struct ath_txq		*sc_cabq;	/* tx q for cab frames */
746	struct task		sc_bmisstask;	/* bmiss int processing */
747	struct task		sc_bstucktask;	/* stuck beacon processing */
748	struct task		sc_resettask;	/* interface reset task */
749	struct task		sc_fataltask;	/* fatal task */
750	enum {
751		OK,				/* no change needed */
752		UPDATE,				/* update pending */
753		COMMIT				/* beacon sent, commit change */
754	} sc_updateslot;			/* slot time update fsm */
755	int			sc_slotupdate;	/* slot to advance fsm */
756	struct ieee80211vap	*sc_bslot[ATH_BCBUF];
757	int			sc_nbcnvaps;	/* # vaps with beacons */
758
759	struct callout		sc_cal_ch;	/* callout handle for cals */
760	int			sc_lastlongcal;	/* last long cal completed */
761	int			sc_lastcalreset;/* last cal reset done */
762	int			sc_lastani;	/* last ANI poll */
763	int			sc_lastshortcal;	/* last short calibration */
764	HAL_BOOL		sc_doresetcal;	/* Yes, we're doing a reset cal atm */
765	HAL_NODE_STATS		sc_halstats;	/* station-mode rssi stats */
766	u_int			sc_tdmadbaprep;	/* TDMA DBA prep time */
767	u_int			sc_tdmaswbaprep;/* TDMA SWBA prep time */
768	u_int			sc_tdmaswba;	/* TDMA SWBA counter */
769	u_int32_t		sc_tdmabintval;	/* TDMA beacon interval (TU) */
770	u_int32_t		sc_tdmaguard;	/* TDMA guard time (usec) */
771	u_int			sc_tdmaslotlen;	/* TDMA slot length (usec) */
772	u_int32_t		sc_avgtsfdeltap;/* TDMA slot adjust (+) */
773	u_int32_t		sc_avgtsfdeltam;/* TDMA slot adjust (-) */
774	uint16_t		*sc_eepromdata;	/* Local eeprom data, if AR9100 */
775	uint32_t		sc_txchainmask;	/* hardware TX chainmask */
776	uint32_t		sc_rxchainmask;	/* hardware RX chainmask */
777	uint32_t		sc_cur_txchainmask;	/* currently configured TX chainmask */
778	uint32_t		sc_cur_rxchainmask;	/* currently configured RX chainmask */
779	uint32_t		sc_rts_aggr_limit;	/* TX limit on RTS aggregates */
780	int			sc_aggr_limit;	/* TX limit on all aggregates */
781	int			sc_delim_min_pad;	/* Minimum delimiter count */
782
783	/* Queue limits */
784
785	/*
786	 * To avoid queue starvation in congested conditions,
787	 * these parameters tune the maximum number of frames
788	 * queued to the data/mcastq before they're dropped.
789	 *
790	 * This is to prevent:
791	 * + a single destination overwhelming everything, including
792	 *   management/multicast frames;
793	 * + multicast frames overwhelming everything (when the
794	 *   air is sufficiently busy that cabq can't drain.)
795	 *
796	 * These implement:
797	 * + data_minfree is the maximum number of free buffers
798	 *   overall to successfully allow a data frame.
799	 *
800	 * + mcastq_maxdepth is the maximum depth allowed of the cabq.
801	 */
802	int			sc_txq_data_minfree;
803	int			sc_txq_mcastq_maxdepth;
804
805	/*
806	 * Aggregation twiddles
807	 *
808	 * hwq_limit:	how busy to keep the hardware queue - don't schedule
809	 *		further packets to the hardware, regardless of the TID
810	 * tid_hwq_lo:	how low the per-TID hwq count has to be before the
811	 *		TID will be scheduled again
812	 * tid_hwq_hi:	how many frames to queue to the HWQ before the TID
813	 *		stops being scheduled.
814	 */
815	int			sc_hwq_limit;
816	int			sc_tid_hwq_lo;
817	int			sc_tid_hwq_hi;
818
819	/* DFS related state */
820	void			*sc_dfs;	/* Used by an optional DFS module */
821	int			sc_dodfs;	/* Whether to enable DFS rx filter bits */
822	struct task		sc_dfstask;	/* DFS processing task */
823
824	/* Spectral related state */
825	void			*sc_spectral;
826	int			sc_dospectral;
827
828	/* ALQ */
829#ifdef	ATH_DEBUG_ALQ
830	struct if_ath_alq sc_alq;
831#endif
832
833	/* TX AMPDU handling */
834	int			(*sc_addba_request)(struct ieee80211_node *,
835				    struct ieee80211_tx_ampdu *, int, int, int);
836	int			(*sc_addba_response)(struct ieee80211_node *,
837				    struct ieee80211_tx_ampdu *, int, int, int);
838	void			(*sc_addba_stop)(struct ieee80211_node *,
839				    struct ieee80211_tx_ampdu *);
840	void			(*sc_addba_response_timeout)
841				    (struct ieee80211_node *,
842				    struct ieee80211_tx_ampdu *);
843	void			(*sc_bar_response)(struct ieee80211_node *ni,
844				    struct ieee80211_tx_ampdu *tap,
845				    int status);
846};
847
848#define	ATH_LOCK_INIT(_sc) \
849	mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
850		 NULL, MTX_DEF | MTX_RECURSE)
851#define	ATH_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_mtx)
852#define	ATH_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
853#define	ATH_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
854#define	ATH_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
855#define	ATH_UNLOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED)
856
857/*
858 * The TX lock is non-reentrant and serialises the TX frame send
859 * and completion operations.
860 */
861#define	ATH_TX_LOCK_INIT(_sc) do {\
862	snprintf((_sc)->sc_tx_mtx_name,				\
863	    sizeof((_sc)->sc_tx_mtx_name),				\
864	    "%s TX lock",						\
865	    device_get_nameunit((_sc)->sc_dev));			\
866	mtx_init(&(_sc)->sc_tx_mtx, (_sc)->sc_tx_mtx_name,		\
867		 NULL, MTX_DEF);					\
868	} while (0)
869#define	ATH_TX_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_tx_mtx)
870#define	ATH_TX_LOCK(_sc)		mtx_lock(&(_sc)->sc_tx_mtx)
871#define	ATH_TX_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_tx_mtx)
872#define	ATH_TX_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_tx_mtx,	\
873		MA_OWNED)
874#define	ATH_TX_UNLOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_tx_mtx,	\
875		MA_NOTOWNED)
876#define	ATH_TX_TRYLOCK(_sc)	(mtx_owned(&(_sc)->sc_tx_mtx) != 0 &&	\
877					mtx_trylock(&(_sc)->sc_tx_mtx))
878
879/*
880 * The IC TX lock is non-reentrant and serialises packet queuing from
881 * the upper layers.
882 */
883#define	ATH_TX_IC_LOCK_INIT(_sc) do {\
884	snprintf((_sc)->sc_tx_ic_mtx_name,				\
885	    sizeof((_sc)->sc_tx_ic_mtx_name),				\
886	    "%s IC TX lock",						\
887	    device_get_nameunit((_sc)->sc_dev));			\
888	mtx_init(&(_sc)->sc_tx_ic_mtx, (_sc)->sc_tx_ic_mtx_name,	\
889		 NULL, MTX_DEF);					\
890	} while (0)
891#define	ATH_TX_IC_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_tx_ic_mtx)
892#define	ATH_TX_IC_LOCK(_sc)		mtx_lock(&(_sc)->sc_tx_ic_mtx)
893#define	ATH_TX_IC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_tx_ic_mtx)
894#define	ATH_TX_IC_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_tx_ic_mtx,	\
895		MA_OWNED)
896#define	ATH_TX_IC_UNLOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_tx_ic_mtx,	\
897		MA_NOTOWNED)
898
899/*
900 * The PCU lock is non-recursive and should be treated as a spinlock.
901 * Although currently the interrupt code is run in netisr context and
902 * doesn't require this, this may change in the future.
903 * Please keep this in mind when protecting certain code paths
904 * with the PCU lock.
905 *
906 * The PCU lock is used to serialise access to the PCU so things such
907 * as TX, RX, state change (eg channel change), channel reset and updates
908 * from interrupt context (eg kickpcu, txqactive bits) do not clash.
909 *
910 * Although the current single-thread taskqueue mechanism protects the
911 * majority of these situations by simply serialising them, there are
912 * a few others which occur at the same time. These include the TX path
913 * (which only acquires ATH_LOCK when recycling buffers to the free list),
914 * ath_set_channel, the channel scanning API and perhaps quite a bit more.
915 */
916#define	ATH_PCU_LOCK_INIT(_sc) do {\
917	snprintf((_sc)->sc_pcu_mtx_name,				\
918	    sizeof((_sc)->sc_pcu_mtx_name),				\
919	    "%s PCU lock",						\
920	    device_get_nameunit((_sc)->sc_dev));			\
921	mtx_init(&(_sc)->sc_pcu_mtx, (_sc)->sc_pcu_mtx_name,		\
922		 NULL, MTX_DEF);					\
923	} while (0)
924#define	ATH_PCU_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_pcu_mtx)
925#define	ATH_PCU_LOCK(_sc)		mtx_lock(&(_sc)->sc_pcu_mtx)
926#define	ATH_PCU_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_pcu_mtx)
927#define	ATH_PCU_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_pcu_mtx,	\
928		MA_OWNED)
929#define	ATH_PCU_UNLOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_pcu_mtx,	\
930		MA_NOTOWNED)
931
932/*
933 * The RX lock is primarily a(nother) workaround to ensure that the
934 * RX FIFO/list isn't modified by various execution paths.
935 * Even though RX occurs in a single context (the ath taskqueue), the
936 * RX path can be executed via various reset/channel change paths.
937 */
938#define	ATH_RX_LOCK_INIT(_sc) do {\
939	snprintf((_sc)->sc_rx_mtx_name,					\
940	    sizeof((_sc)->sc_rx_mtx_name),				\
941	    "%s RX lock",						\
942	    device_get_nameunit((_sc)->sc_dev));			\
943	mtx_init(&(_sc)->sc_rx_mtx, (_sc)->sc_rx_mtx_name,		\
944		 NULL, MTX_DEF);					\
945	} while (0)
946#define	ATH_RX_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_rx_mtx)
947#define	ATH_RX_LOCK(_sc)		mtx_lock(&(_sc)->sc_rx_mtx)
948#define	ATH_RX_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_rx_mtx)
949#define	ATH_RX_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_rx_mtx,	\
950		MA_OWNED)
951#define	ATH_RX_UNLOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_rx_mtx,	\
952		MA_NOTOWNED)
953
954#define	ATH_TXQ_SETUP(sc, i)	((sc)->sc_txqsetup & (1<<i))
955
956#define	ATH_TXBUF_LOCK_INIT(_sc) do { \
957	snprintf((_sc)->sc_txname, sizeof((_sc)->sc_txname), "%s_buf", \
958		device_get_nameunit((_sc)->sc_dev)); \
959	mtx_init(&(_sc)->sc_txbuflock, (_sc)->sc_txname, NULL, MTX_DEF); \
960} while (0)
961#define	ATH_TXBUF_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_txbuflock)
962#define	ATH_TXBUF_LOCK(_sc)		mtx_lock(&(_sc)->sc_txbuflock)
963#define	ATH_TXBUF_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_txbuflock)
964#define	ATH_TXBUF_LOCK_ASSERT(_sc) \
965	mtx_assert(&(_sc)->sc_txbuflock, MA_OWNED)
966
967#define	ATH_TXSTATUS_LOCK_INIT(_sc) do { \
968	snprintf((_sc)->sc_txcompname, sizeof((_sc)->sc_txcompname), \
969		"%s_buf", \
970		device_get_nameunit((_sc)->sc_dev)); \
971	mtx_init(&(_sc)->sc_txcomplock, (_sc)->sc_txcompname, NULL, \
972		MTX_DEF); \
973} while (0)
974#define	ATH_TXSTATUS_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->sc_txcomplock)
975#define	ATH_TXSTATUS_LOCK(_sc)		mtx_lock(&(_sc)->sc_txcomplock)
976#define	ATH_TXSTATUS_UNLOCK(_sc)	mtx_unlock(&(_sc)->sc_txcomplock)
977#define	ATH_TXSTATUS_LOCK_ASSERT(_sc) \
978	mtx_assert(&(_sc)->sc_txcomplock, MA_OWNED)
979
980int	ath_attach(u_int16_t, struct ath_softc *);
981int	ath_detach(struct ath_softc *);
982void	ath_resume(struct ath_softc *);
983void	ath_suspend(struct ath_softc *);
984void	ath_shutdown(struct ath_softc *);
985void	ath_intr(void *);
986
987/*
988 * HAL definitions to comply with local coding convention.
989 */
990#define	ath_hal_detach(_ah) \
991	((*(_ah)->ah_detach)((_ah)))
992#define	ath_hal_reset(_ah, _opmode, _chan, _outdoor, _pstatus) \
993	((*(_ah)->ah_reset)((_ah), (_opmode), (_chan), (_outdoor), (_pstatus)))
994#define	ath_hal_macversion(_ah) \
995	(((_ah)->ah_macVersion << 4) | ((_ah)->ah_macRev))
996#define	ath_hal_getratetable(_ah, _mode) \
997	((*(_ah)->ah_getRateTable)((_ah), (_mode)))
998#define	ath_hal_getmac(_ah, _mac) \
999	((*(_ah)->ah_getMacAddress)((_ah), (_mac)))
1000#define	ath_hal_setmac(_ah, _mac) \
1001	((*(_ah)->ah_setMacAddress)((_ah), (_mac)))
1002#define	ath_hal_getbssidmask(_ah, _mask) \
1003	((*(_ah)->ah_getBssIdMask)((_ah), (_mask)))
1004#define	ath_hal_setbssidmask(_ah, _mask) \
1005	((*(_ah)->ah_setBssIdMask)((_ah), (_mask)))
1006#define	ath_hal_intrset(_ah, _mask) \
1007	((*(_ah)->ah_setInterrupts)((_ah), (_mask)))
1008#define	ath_hal_intrget(_ah) \
1009	((*(_ah)->ah_getInterrupts)((_ah)))
1010#define	ath_hal_intrpend(_ah) \
1011	((*(_ah)->ah_isInterruptPending)((_ah)))
1012#define	ath_hal_getisr(_ah, _pmask) \
1013	((*(_ah)->ah_getPendingInterrupts)((_ah), (_pmask)))
1014#define	ath_hal_updatetxtriglevel(_ah, _inc) \
1015	((*(_ah)->ah_updateTxTrigLevel)((_ah), (_inc)))
1016#define	ath_hal_setpower(_ah, _mode) \
1017	((*(_ah)->ah_setPowerMode)((_ah), (_mode), AH_TRUE))
1018#define	ath_hal_keycachesize(_ah) \
1019	((*(_ah)->ah_getKeyCacheSize)((_ah)))
1020#define	ath_hal_keyreset(_ah, _ix) \
1021	((*(_ah)->ah_resetKeyCacheEntry)((_ah), (_ix)))
1022#define	ath_hal_keyset(_ah, _ix, _pk, _mac) \
1023	((*(_ah)->ah_setKeyCacheEntry)((_ah), (_ix), (_pk), (_mac), AH_FALSE))
1024#define	ath_hal_keyisvalid(_ah, _ix) \
1025	(((*(_ah)->ah_isKeyCacheEntryValid)((_ah), (_ix))))
1026#define	ath_hal_keysetmac(_ah, _ix, _mac) \
1027	((*(_ah)->ah_setKeyCacheEntryMac)((_ah), (_ix), (_mac)))
1028#define	ath_hal_getrxfilter(_ah) \
1029	((*(_ah)->ah_getRxFilter)((_ah)))
1030#define	ath_hal_setrxfilter(_ah, _filter) \
1031	((*(_ah)->ah_setRxFilter)((_ah), (_filter)))
1032#define	ath_hal_setmcastfilter(_ah, _mfilt0, _mfilt1) \
1033	((*(_ah)->ah_setMulticastFilter)((_ah), (_mfilt0), (_mfilt1)))
1034#define	ath_hal_waitforbeacon(_ah, _bf) \
1035	((*(_ah)->ah_waitForBeaconDone)((_ah), (_bf)->bf_daddr))
1036#define	ath_hal_putrxbuf(_ah, _bufaddr, _rxq) \
1037	((*(_ah)->ah_setRxDP)((_ah), (_bufaddr), (_rxq)))
1038/* NB: common across all chips */
1039#define	AR_TSF_L32	0x804c	/* MAC local clock lower 32 bits */
1040#define	ath_hal_gettsf32(_ah) \
1041	OS_REG_READ(_ah, AR_TSF_L32)
1042#define	ath_hal_gettsf64(_ah) \
1043	((*(_ah)->ah_getTsf64)((_ah)))
1044#define	ath_hal_settsf64(_ah, _val) \
1045	((*(_ah)->ah_setTsf64)((_ah), (_val)))
1046#define	ath_hal_resettsf(_ah) \
1047	((*(_ah)->ah_resetTsf)((_ah)))
1048#define	ath_hal_rxena(_ah) \
1049	((*(_ah)->ah_enableReceive)((_ah)))
1050#define	ath_hal_puttxbuf(_ah, _q, _bufaddr) \
1051	((*(_ah)->ah_setTxDP)((_ah), (_q), (_bufaddr)))
1052#define	ath_hal_gettxbuf(_ah, _q) \
1053	((*(_ah)->ah_getTxDP)((_ah), (_q)))
1054#define	ath_hal_numtxpending(_ah, _q) \
1055	((*(_ah)->ah_numTxPending)((_ah), (_q)))
1056#define	ath_hal_getrxbuf(_ah, _rxq) \
1057	((*(_ah)->ah_getRxDP)((_ah), (_rxq)))
1058#define	ath_hal_txstart(_ah, _q) \
1059	((*(_ah)->ah_startTxDma)((_ah), (_q)))
1060#define	ath_hal_setchannel(_ah, _chan) \
1061	((*(_ah)->ah_setChannel)((_ah), (_chan)))
1062#define	ath_hal_calibrate(_ah, _chan, _iqcal) \
1063	((*(_ah)->ah_perCalibration)((_ah), (_chan), (_iqcal)))
1064#define	ath_hal_calibrateN(_ah, _chan, _lcal, _isdone) \
1065	((*(_ah)->ah_perCalibrationN)((_ah), (_chan), 0x1, (_lcal), (_isdone)))
1066#define	ath_hal_calreset(_ah, _chan) \
1067	((*(_ah)->ah_resetCalValid)((_ah), (_chan)))
1068#define	ath_hal_setledstate(_ah, _state) \
1069	((*(_ah)->ah_setLedState)((_ah), (_state)))
1070#define	ath_hal_beaconinit(_ah, _nextb, _bperiod) \
1071	((*(_ah)->ah_beaconInit)((_ah), (_nextb), (_bperiod)))
1072#define	ath_hal_beaconreset(_ah) \
1073	((*(_ah)->ah_resetStationBeaconTimers)((_ah)))
1074#define	ath_hal_beaconsettimers(_ah, _bt) \
1075	((*(_ah)->ah_setBeaconTimers)((_ah), (_bt)))
1076#define	ath_hal_beacontimers(_ah, _bs) \
1077	((*(_ah)->ah_setStationBeaconTimers)((_ah), (_bs)))
1078#define	ath_hal_getnexttbtt(_ah) \
1079	((*(_ah)->ah_getNextTBTT)((_ah)))
1080#define	ath_hal_setassocid(_ah, _bss, _associd) \
1081	((*(_ah)->ah_writeAssocid)((_ah), (_bss), (_associd)))
1082#define	ath_hal_phydisable(_ah) \
1083	((*(_ah)->ah_phyDisable)((_ah)))
1084#define	ath_hal_setopmode(_ah) \
1085	((*(_ah)->ah_setPCUConfig)((_ah)))
1086#define	ath_hal_stoptxdma(_ah, _qnum) \
1087	((*(_ah)->ah_stopTxDma)((_ah), (_qnum)))
1088#define	ath_hal_stoppcurecv(_ah) \
1089	((*(_ah)->ah_stopPcuReceive)((_ah)))
1090#define	ath_hal_startpcurecv(_ah) \
1091	((*(_ah)->ah_startPcuReceive)((_ah)))
1092#define	ath_hal_stopdmarecv(_ah) \
1093	((*(_ah)->ah_stopDmaReceive)((_ah)))
1094#define	ath_hal_getdiagstate(_ah, _id, _indata, _insize, _outdata, _outsize) \
1095	((*(_ah)->ah_getDiagState)((_ah), (_id), \
1096		(_indata), (_insize), (_outdata), (_outsize)))
1097#define	ath_hal_getfatalstate(_ah, _outdata, _outsize) \
1098	ath_hal_getdiagstate(_ah, 29, NULL, 0, (_outdata), _outsize)
1099#define	ath_hal_setuptxqueue(_ah, _type, _irq) \
1100	((*(_ah)->ah_setupTxQueue)((_ah), (_type), (_irq)))
1101#define	ath_hal_resettxqueue(_ah, _q) \
1102	((*(_ah)->ah_resetTxQueue)((_ah), (_q)))
1103#define	ath_hal_releasetxqueue(_ah, _q) \
1104	((*(_ah)->ah_releaseTxQueue)((_ah), (_q)))
1105#define	ath_hal_gettxqueueprops(_ah, _q, _qi) \
1106	((*(_ah)->ah_getTxQueueProps)((_ah), (_q), (_qi)))
1107#define	ath_hal_settxqueueprops(_ah, _q, _qi) \
1108	((*(_ah)->ah_setTxQueueProps)((_ah), (_q), (_qi)))
1109/* NB: common across all chips */
1110#define	AR_Q_TXE	0x0840	/* MAC Transmit Queue enable */
1111#define	ath_hal_txqenabled(_ah, _qnum) \
1112	(OS_REG_READ(_ah, AR_Q_TXE) & (1<<(_qnum)))
1113#define	ath_hal_getrfgain(_ah) \
1114	((*(_ah)->ah_getRfGain)((_ah)))
1115#define	ath_hal_getdefantenna(_ah) \
1116	((*(_ah)->ah_getDefAntenna)((_ah)))
1117#define	ath_hal_setdefantenna(_ah, _ant) \
1118	((*(_ah)->ah_setDefAntenna)((_ah), (_ant)))
1119#define	ath_hal_rxmonitor(_ah, _arg, _chan) \
1120	((*(_ah)->ah_rxMonitor)((_ah), (_arg), (_chan)))
1121#define	ath_hal_ani_poll(_ah, _chan) \
1122	((*(_ah)->ah_aniPoll)((_ah), (_chan)))
1123#define	ath_hal_mibevent(_ah, _stats) \
1124	((*(_ah)->ah_procMibEvent)((_ah), (_stats)))
1125#define	ath_hal_setslottime(_ah, _us) \
1126	((*(_ah)->ah_setSlotTime)((_ah), (_us)))
1127#define	ath_hal_getslottime(_ah) \
1128	((*(_ah)->ah_getSlotTime)((_ah)))
1129#define	ath_hal_setacktimeout(_ah, _us) \
1130	((*(_ah)->ah_setAckTimeout)((_ah), (_us)))
1131#define	ath_hal_getacktimeout(_ah) \
1132	((*(_ah)->ah_getAckTimeout)((_ah)))
1133#define	ath_hal_setctstimeout(_ah, _us) \
1134	((*(_ah)->ah_setCTSTimeout)((_ah), (_us)))
1135#define	ath_hal_getctstimeout(_ah) \
1136	((*(_ah)->ah_getCTSTimeout)((_ah)))
1137#define	ath_hal_getcapability(_ah, _cap, _param, _result) \
1138	((*(_ah)->ah_getCapability)((_ah), (_cap), (_param), (_result)))
1139#define	ath_hal_setcapability(_ah, _cap, _param, _v, _status) \
1140	((*(_ah)->ah_setCapability)((_ah), (_cap), (_param), (_v), (_status)))
1141#define	ath_hal_ciphersupported(_ah, _cipher) \
1142	(ath_hal_getcapability(_ah, HAL_CAP_CIPHER, _cipher, NULL) == HAL_OK)
1143#define	ath_hal_getregdomain(_ah, _prd) \
1144	(ath_hal_getcapability(_ah, HAL_CAP_REG_DMN, 0, (_prd)) == HAL_OK)
1145#define	ath_hal_setregdomain(_ah, _rd) \
1146	ath_hal_setcapability(_ah, HAL_CAP_REG_DMN, 0, _rd, NULL)
1147#define	ath_hal_getcountrycode(_ah, _pcc) \
1148	(*(_pcc) = (_ah)->ah_countryCode)
1149#define	ath_hal_gettkipmic(_ah) \
1150	(ath_hal_getcapability(_ah, HAL_CAP_TKIP_MIC, 1, NULL) == HAL_OK)
1151#define	ath_hal_settkipmic(_ah, _v) \
1152	ath_hal_setcapability(_ah, HAL_CAP_TKIP_MIC, 1, _v, NULL)
1153#define	ath_hal_hastkipsplit(_ah) \
1154	(ath_hal_getcapability(_ah, HAL_CAP_TKIP_SPLIT, 0, NULL) == HAL_OK)
1155#define	ath_hal_gettkipsplit(_ah) \
1156	(ath_hal_getcapability(_ah, HAL_CAP_TKIP_SPLIT, 1, NULL) == HAL_OK)
1157#define	ath_hal_settkipsplit(_ah, _v) \
1158	ath_hal_setcapability(_ah, HAL_CAP_TKIP_SPLIT, 1, _v, NULL)
1159#define	ath_hal_haswmetkipmic(_ah) \
1160	(ath_hal_getcapability(_ah, HAL_CAP_WME_TKIPMIC, 0, NULL) == HAL_OK)
1161#define	ath_hal_hwphycounters(_ah) \
1162	(ath_hal_getcapability(_ah, HAL_CAP_PHYCOUNTERS, 0, NULL) == HAL_OK)
1163#define	ath_hal_hasdiversity(_ah) \
1164	(ath_hal_getcapability(_ah, HAL_CAP_DIVERSITY, 0, NULL) == HAL_OK)
1165#define	ath_hal_getdiversity(_ah) \
1166	(ath_hal_getcapability(_ah, HAL_CAP_DIVERSITY, 1, NULL) == HAL_OK)
1167#define	ath_hal_setdiversity(_ah, _v) \
1168	ath_hal_setcapability(_ah, HAL_CAP_DIVERSITY, 1, _v, NULL)
1169#define	ath_hal_getantennaswitch(_ah) \
1170	((*(_ah)->ah_getAntennaSwitch)((_ah)))
1171#define	ath_hal_setantennaswitch(_ah, _v) \
1172	((*(_ah)->ah_setAntennaSwitch)((_ah), (_v)))
1173#define	ath_hal_getdiag(_ah, _pv) \
1174	(ath_hal_getcapability(_ah, HAL_CAP_DIAG, 0, _pv) == HAL_OK)
1175#define	ath_hal_setdiag(_ah, _v) \
1176	ath_hal_setcapability(_ah, HAL_CAP_DIAG, 0, _v, NULL)
1177#define	ath_hal_getnumtxqueues(_ah, _pv) \
1178	(ath_hal_getcapability(_ah, HAL_CAP_NUM_TXQUEUES, 0, _pv) == HAL_OK)
1179#define	ath_hal_hasveol(_ah) \
1180	(ath_hal_getcapability(_ah, HAL_CAP_VEOL, 0, NULL) == HAL_OK)
1181#define	ath_hal_hastxpowlimit(_ah) \
1182	(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 0, NULL) == HAL_OK)
1183#define	ath_hal_settxpowlimit(_ah, _pow) \
1184	((*(_ah)->ah_setTxPowerLimit)((_ah), (_pow)))
1185#define	ath_hal_gettxpowlimit(_ah, _ppow) \
1186	(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 1, _ppow) == HAL_OK)
1187#define	ath_hal_getmaxtxpow(_ah, _ppow) \
1188	(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 2, _ppow) == HAL_OK)
1189#define	ath_hal_gettpscale(_ah, _scale) \
1190	(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 3, _scale) == HAL_OK)
1191#define	ath_hal_settpscale(_ah, _v) \
1192	ath_hal_setcapability(_ah, HAL_CAP_TXPOW, 3, _v, NULL)
1193#define	ath_hal_hastpc(_ah) \
1194	(ath_hal_getcapability(_ah, HAL_CAP_TPC, 0, NULL) == HAL_OK)
1195#define	ath_hal_gettpc(_ah) \
1196	(ath_hal_getcapability(_ah, HAL_CAP_TPC, 1, NULL) == HAL_OK)
1197#define	ath_hal_settpc(_ah, _v) \
1198	ath_hal_setcapability(_ah, HAL_CAP_TPC, 1, _v, NULL)
1199#define	ath_hal_hasbursting(_ah) \
1200	(ath_hal_getcapability(_ah, HAL_CAP_BURST, 0, NULL) == HAL_OK)
1201#define	ath_hal_setmcastkeysearch(_ah, _v) \
1202	ath_hal_setcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 0, _v, NULL)
1203#define	ath_hal_hasmcastkeysearch(_ah) \
1204	(ath_hal_getcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 0, NULL) == HAL_OK)
1205#define	ath_hal_getmcastkeysearch(_ah) \
1206	(ath_hal_getcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 1, NULL) == HAL_OK)
1207#define	ath_hal_hasfastframes(_ah) \
1208	(ath_hal_getcapability(_ah, HAL_CAP_FASTFRAME, 0, NULL) == HAL_OK)
1209#define	ath_hal_hasbssidmask(_ah) \
1210	(ath_hal_getcapability(_ah, HAL_CAP_BSSIDMASK, 0, NULL) == HAL_OK)
1211#define	ath_hal_hasbssidmatch(_ah) \
1212	(ath_hal_getcapability(_ah, HAL_CAP_BSSIDMATCH, 0, NULL) == HAL_OK)
1213#define	ath_hal_hastsfadjust(_ah) \
1214	(ath_hal_getcapability(_ah, HAL_CAP_TSF_ADJUST, 0, NULL) == HAL_OK)
1215#define	ath_hal_gettsfadjust(_ah) \
1216	(ath_hal_getcapability(_ah, HAL_CAP_TSF_ADJUST, 1, NULL) == HAL_OK)
1217#define	ath_hal_settsfadjust(_ah, _onoff) \
1218	ath_hal_setcapability(_ah, HAL_CAP_TSF_ADJUST, 1, _onoff, NULL)
1219#define	ath_hal_hasrfsilent(_ah) \
1220	(ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 0, NULL) == HAL_OK)
1221#define	ath_hal_getrfkill(_ah) \
1222	(ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 1, NULL) == HAL_OK)
1223#define	ath_hal_setrfkill(_ah, _onoff) \
1224	ath_hal_setcapability(_ah, HAL_CAP_RFSILENT, 1, _onoff, NULL)
1225#define	ath_hal_getrfsilent(_ah, _prfsilent) \
1226	(ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 2, _prfsilent) == HAL_OK)
1227#define	ath_hal_setrfsilent(_ah, _rfsilent) \
1228	ath_hal_setcapability(_ah, HAL_CAP_RFSILENT, 2, _rfsilent, NULL)
1229#define	ath_hal_gettpack(_ah, _ptpack) \
1230	(ath_hal_getcapability(_ah, HAL_CAP_TPC_ACK, 0, _ptpack) == HAL_OK)
1231#define	ath_hal_settpack(_ah, _tpack) \
1232	ath_hal_setcapability(_ah, HAL_CAP_TPC_ACK, 0, _tpack, NULL)
1233#define	ath_hal_gettpcts(_ah, _ptpcts) \
1234	(ath_hal_getcapability(_ah, HAL_CAP_TPC_CTS, 0, _ptpcts) == HAL_OK)
1235#define	ath_hal_settpcts(_ah, _tpcts) \
1236	ath_hal_setcapability(_ah, HAL_CAP_TPC_CTS, 0, _tpcts, NULL)
1237#define	ath_hal_hasintmit(_ah) \
1238	(ath_hal_getcapability(_ah, HAL_CAP_INTMIT, \
1239	HAL_CAP_INTMIT_PRESENT, NULL) == HAL_OK)
1240#define	ath_hal_getintmit(_ah) \
1241	(ath_hal_getcapability(_ah, HAL_CAP_INTMIT, \
1242	HAL_CAP_INTMIT_ENABLE, NULL) == HAL_OK)
1243#define	ath_hal_setintmit(_ah, _v) \
1244	ath_hal_setcapability(_ah, HAL_CAP_INTMIT, \
1245	HAL_CAP_INTMIT_ENABLE, _v, NULL)
1246
1247/* EDMA definitions */
1248#define	ath_hal_hasedma(_ah) \
1249	(ath_hal_getcapability(_ah, HAL_CAP_ENHANCED_DMA_SUPPORT,	\
1250	0, NULL) == HAL_OK)
1251#define	ath_hal_getrxfifodepth(_ah, _qtype, _req) \
1252	(ath_hal_getcapability(_ah, HAL_CAP_RXFIFODEPTH, _qtype, _req)	\
1253	== HAL_OK)
1254#define	ath_hal_getntxmaps(_ah, _req) \
1255	(ath_hal_getcapability(_ah, HAL_CAP_NUM_TXMAPS, 0, _req)	\
1256	== HAL_OK)
1257#define	ath_hal_gettxdesclen(_ah, _req) \
1258	(ath_hal_getcapability(_ah, HAL_CAP_TXDESCLEN, 0, _req)		\
1259	== HAL_OK)
1260#define	ath_hal_gettxstatuslen(_ah, _req) \
1261	(ath_hal_getcapability(_ah, HAL_CAP_TXSTATUSLEN, 0, _req)	\
1262	== HAL_OK)
1263#define	ath_hal_getrxstatuslen(_ah, _req) \
1264	(ath_hal_getcapability(_ah, HAL_CAP_RXSTATUSLEN, 0, _req)	\
1265	== HAL_OK)
1266#define	ath_hal_setrxbufsize(_ah, _req) \
1267	(ath_hal_setcapability(_ah, HAL_CAP_RXBUFSIZE, 0, _req, NULL)	\
1268	== HAL_OK)
1269
1270#define	ath_hal_getchannoise(_ah, _c) \
1271	((*(_ah)->ah_getChanNoise)((_ah), (_c)))
1272
1273/* 802.11n HAL methods */
1274#define	ath_hal_getrxchainmask(_ah, _prxchainmask) \
1275	(ath_hal_getcapability(_ah, HAL_CAP_RX_CHAINMASK, 0, _prxchainmask))
1276#define	ath_hal_gettxchainmask(_ah, _ptxchainmask) \
1277	(ath_hal_getcapability(_ah, HAL_CAP_TX_CHAINMASK, 0, _ptxchainmask))
1278#define	ath_hal_setrxchainmask(_ah, _rx) \
1279	(ath_hal_setcapability(_ah, HAL_CAP_RX_CHAINMASK, 1, _rx, NULL))
1280#define	ath_hal_settxchainmask(_ah, _tx) \
1281	(ath_hal_setcapability(_ah, HAL_CAP_TX_CHAINMASK, 1, _tx, NULL))
1282#define	ath_hal_split4ktrans(_ah) \
1283	(ath_hal_getcapability(_ah, HAL_CAP_SPLIT_4KB_TRANS, \
1284	0, NULL) == HAL_OK)
1285#define	ath_hal_self_linked_final_rxdesc(_ah) \
1286	(ath_hal_getcapability(_ah, HAL_CAP_RXDESC_SELFLINK, \
1287	0, NULL) == HAL_OK)
1288#define	ath_hal_gtxto_supported(_ah) \
1289	(ath_hal_getcapability(_ah, HAL_CAP_GTXTO, 0, NULL) == HAL_OK)
1290#define	ath_hal_has_long_rxdesc_tsf(_ah) \
1291	(ath_hal_getcapability(_ah, HAL_CAP_LONG_RXDESC_TSF, \
1292	0, NULL) == HAL_OK)
1293#define	ath_hal_setuprxdesc(_ah, _ds, _size, _intreq) \
1294	((*(_ah)->ah_setupRxDesc)((_ah), (_ds), (_size), (_intreq)))
1295#define	ath_hal_rxprocdesc(_ah, _ds, _dspa, _dsnext, _rs) \
1296	((*(_ah)->ah_procRxDesc)((_ah), (_ds), (_dspa), (_dsnext), 0, (_rs)))
1297#define	ath_hal_setuptxdesc(_ah, _ds, _plen, _hlen, _atype, _txpow, \
1298		_txr0, _txtr0, _keyix, _ant, _flags, \
1299		_rtsrate, _rtsdura) \
1300	((*(_ah)->ah_setupTxDesc)((_ah), (_ds), (_plen), (_hlen), (_atype), \
1301		(_txpow), (_txr0), (_txtr0), (_keyix), (_ant), \
1302		(_flags), (_rtsrate), (_rtsdura), 0, 0, 0))
1303#define	ath_hal_setupxtxdesc(_ah, _ds, \
1304		_txr1, _txtr1, _txr2, _txtr2, _txr3, _txtr3) \
1305	((*(_ah)->ah_setupXTxDesc)((_ah), (_ds), \
1306		(_txr1), (_txtr1), (_txr2), (_txtr2), (_txr3), (_txtr3)))
1307#define	ath_hal_filltxdesc(_ah, _ds, _b, _l, _did, _qid, _first, _last, _ds0) \
1308	((*(_ah)->ah_fillTxDesc)((_ah), (_ds), (_b), (_l), (_did), (_qid), \
1309		(_first), (_last), (_ds0)))
1310#define	ath_hal_txprocdesc(_ah, _ds, _ts) \
1311	((*(_ah)->ah_procTxDesc)((_ah), (_ds), (_ts)))
1312#define	ath_hal_gettxintrtxqs(_ah, _txqs) \
1313	((*(_ah)->ah_getTxIntrQueue)((_ah), (_txqs)))
1314#define ath_hal_gettxcompletionrates(_ah, _ds, _rates, _tries) \
1315	((*(_ah)->ah_getTxCompletionRates)((_ah), (_ds), (_rates), (_tries)))
1316#define ath_hal_settxdesclink(_ah, _ds, _link) \
1317	((*(_ah)->ah_setTxDescLink)((_ah), (_ds), (_link)))
1318#define ath_hal_gettxdesclink(_ah, _ds, _link) \
1319	((*(_ah)->ah_getTxDescLink)((_ah), (_ds), (_link)))
1320#define ath_hal_gettxdesclinkptr(_ah, _ds, _linkptr) \
1321	((*(_ah)->ah_getTxDescLinkPtr)((_ah), (_ds), (_linkptr)))
1322#define	ath_hal_setuptxstatusring(_ah, _tsstart, _tspstart, _size) \
1323	((*(_ah)->ah_setupTxStatusRing)((_ah), (_tsstart), (_tspstart), \
1324		(_size)))
1325#define	ath_hal_gettxrawtxdesc(_ah, _txstatus) \
1326	((*(_ah)->ah_getTxRawTxDesc)((_ah), (_txstatus)))
1327
1328#define	ath_hal_setupfirsttxdesc(_ah, _ds, _aggrlen, _flags, _txpower, \
1329		_txr0, _txtr0, _antm, _rcr, _rcd) \
1330	((*(_ah)->ah_setupFirstTxDesc)((_ah), (_ds), (_aggrlen), (_flags), \
1331	(_txpower), (_txr0), (_txtr0), (_antm), (_rcr), (_rcd)))
1332#define	ath_hal_chaintxdesc(_ah, _ds, _bl, _sl, _pktlen, _hdrlen, _type, \
1333	_keyix, _cipher, _delims, _first, _last, _lastaggr) \
1334	((*(_ah)->ah_chainTxDesc)((_ah), (_ds), (_bl), (_sl), \
1335	(_pktlen), (_hdrlen), (_type), (_keyix), (_cipher), (_delims), \
1336	(_first), (_last), (_lastaggr)))
1337#define	ath_hal_setuplasttxdesc(_ah, _ds, _ds0) \
1338	((*(_ah)->ah_setupLastTxDesc)((_ah), (_ds), (_ds0)))
1339
1340#define	ath_hal_set11nratescenario(_ah, _ds, _dur, _rt, _series, _ns, _flags) \
1341	((*(_ah)->ah_set11nRateScenario)((_ah), (_ds), (_dur), (_rt), \
1342	(_series), (_ns), (_flags)))
1343
1344#define	ath_hal_set11n_aggr_first(_ah, _ds, _len, _num) \
1345	((*(_ah)->ah_set11nAggrFirst)((_ah), (_ds), (_len), (_num)))
1346#define	ath_hal_set11n_aggr_middle(_ah, _ds, _num) \
1347	((*(_ah)->ah_set11nAggrMiddle)((_ah), (_ds), (_num)))
1348#define	ath_hal_set11n_aggr_last(_ah, _ds) \
1349	((*(_ah)->ah_set11nAggrLast)((_ah), (_ds)))
1350
1351#define	ath_hal_set11nburstduration(_ah, _ds, _dur) \
1352	((*(_ah)->ah_set11nBurstDuration)((_ah), (_ds), (_dur)))
1353#define	ath_hal_clr11n_aggr(_ah, _ds) \
1354	((*(_ah)->ah_clr11nAggr)((_ah), (_ds)))
1355#define	ath_hal_set11n_virtmorefrag(_ah, _ds, _v) \
1356	((*(_ah)->ah_set11nVirtMoreFrag)((_ah), (_ds), (_v)))
1357
1358#define	ath_hal_gpioCfgOutput(_ah, _gpio, _type) \
1359	((*(_ah)->ah_gpioCfgOutput)((_ah), (_gpio), (_type)))
1360#define	ath_hal_gpioset(_ah, _gpio, _b) \
1361	((*(_ah)->ah_gpioSet)((_ah), (_gpio), (_b)))
1362#define	ath_hal_gpioget(_ah, _gpio) \
1363	((*(_ah)->ah_gpioGet)((_ah), (_gpio)))
1364#define	ath_hal_gpiosetintr(_ah, _gpio, _b) \
1365	((*(_ah)->ah_gpioSetIntr)((_ah), (_gpio), (_b)))
1366
1367/*
1368 * PCIe suspend/resume/poweron/poweroff related macros
1369 */
1370#define	ath_hal_enablepcie(_ah, _restore, _poweroff) \
1371	((*(_ah)->ah_configPCIE)((_ah), (_restore), (_poweroff)))
1372#define	ath_hal_disablepcie(_ah) \
1373	((*(_ah)->ah_disablePCIE)((_ah)))
1374
1375/*
1376 * This is badly-named; you need to set the correct parameters
1377 * to begin to receive useful radar events; and even then
1378 * it doesn't "enable" DFS. See the ath_dfs/null/ module for
1379 * more information.
1380 */
1381#define	ath_hal_enabledfs(_ah, _param) \
1382	((*(_ah)->ah_enableDfs)((_ah), (_param)))
1383#define	ath_hal_getdfsthresh(_ah, _param) \
1384	((*(_ah)->ah_getDfsThresh)((_ah), (_param)))
1385#define	ath_hal_getdfsdefaultthresh(_ah, _param) \
1386	((*(_ah)->ah_getDfsDefaultThresh)((_ah), (_param)))
1387#define	ath_hal_procradarevent(_ah, _rxs, _fulltsf, _buf, _event) \
1388	((*(_ah)->ah_procRadarEvent)((_ah), (_rxs), (_fulltsf), \
1389	(_buf), (_event)))
1390#define	ath_hal_is_fast_clock_enabled(_ah) \
1391	((*(_ah)->ah_isFastClockEnabled)((_ah)))
1392#define	ath_hal_radar_wait(_ah, _chan) \
1393	((*(_ah)->ah_radarWait)((_ah), (_chan)))
1394#define	ath_hal_get_mib_cycle_counts(_ah, _sample) \
1395	((*(_ah)->ah_getMibCycleCounts)((_ah), (_sample)))
1396#define	ath_hal_get_chan_ext_busy(_ah) \
1397	((*(_ah)->ah_get11nExtBusy)((_ah)))
1398#define	ath_hal_setchainmasks(_ah, _txchainmask, _rxchainmask) \
1399	((*(_ah)->ah_setChainMasks)((_ah), (_txchainmask), (_rxchainmask)))
1400
1401#define	ath_hal_spectral_supported(_ah) \
1402	(ath_hal_getcapability(_ah, HAL_CAP_SPECTRAL_SCAN, 0, NULL) == HAL_OK)
1403#define	ath_hal_spectral_get_config(_ah, _p) \
1404	((*(_ah)->ah_spectralGetConfig)((_ah), (_p)))
1405#define	ath_hal_spectral_configure(_ah, _p) \
1406	((*(_ah)->ah_spectralConfigure)((_ah), (_p)))
1407#define	ath_hal_spectral_start(_ah) \
1408	((*(_ah)->ah_spectralStart)((_ah)))
1409#define	ath_hal_spectral_stop(_ah) \
1410	((*(_ah)->ah_spectralStop)((_ah)))
1411
1412#endif /* _DEV_ATH_ATHVAR_H */
1413