ath_main.c revision 7656:2621e50fdf4a
1277323Sdim/*
2277323Sdim * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3353358Sdim * Use is subject to license terms.
4353358Sdim */
5353358Sdim
6277323Sdim/*
7277323Sdim * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting
8277323Sdim * All rights reserved.
9277323Sdim *
10277323Sdim * Redistribution and use in source and binary forms, with or without
11277323Sdim * modification, are permitted provided that the following conditions
12341825Sdim * are met:
13277323Sdim * 1. Redistributions of source code must retain the above copyright
14309124Sdim * notice, this list of conditions and the following disclaimer,
15277323Sdim * without modification.
16277323Sdim * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17277323Sdim * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
18341825Sdim * redistribution must be conditioned upon including a substantially
19277323Sdim * similar Disclaimer requirement for further binary redistribution.
20309124Sdim * 3. Neither the names of the above-listed copyright holders nor the names
21309124Sdim * of any contributors may be used to endorse or promote products derived
22344779Sdim * from this software without specific prior written permission.
23344779Sdim *
24309124Sdim * NO WARRANTY
25309124Sdim * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26277323Sdim * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27277323Sdim * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
28277323Sdim * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
29277323Sdim * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
30277323Sdim * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31277323Sdim * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32277323Sdim * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
33296417Sdim * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34327952Sdim * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
35327952Sdim * THE POSSIBILITY OF SUCH DAMAGES.
36327952Sdim *
37353358Sdim */
38353358Sdim
39309124Sdim/*
40309124Sdim * Driver for the Atheros Wireless LAN controller.
41309124Sdim *
42314564Sdim * The Atheros driver calls into net80211 module for IEEE80211 protocol
43314564Sdim * management functionalities. The driver includes a LLD(Low Level Driver)
44314564Sdim * part to implement H/W related operations.
45341825Sdim * The following is the high level structure of ath driver.
46277323Sdim * (The arrows between modules indicate function call direction.)
47341825Sdim *
48277323Sdim *
49277323Sdim *                                                  |
50277323Sdim *                                                  | GLD thread
51277323Sdim *                                                  V
52309124Sdim *         ==================  =========================================
53341825Sdim *         |                |  |[1]                                    |
54309124Sdim *         |                |  |  GLDv3 Callback functions registered  |
55309124Sdim *         |   Net80211     |  =========================       by      |
56341825Sdim *         |    module      |          |               |     driver    |
57309124Sdim *         |                |          V               |               |
58314564Sdim *         |                |========================  |               |
59341825Sdim *         |   Functions exported by net80211       |  |               |
60314564Sdim *         |                                        |  |               |
61314564Sdim *         ==========================================  =================
62341825Sdim *                         |                                  |
63314564Sdim *                         V                                  |
64314564Sdim *         +----------------------------------+               |
65341825Sdim *         |[2]                               |               |
66314564Sdim *         |    Net80211 Callback functions   |               |
67314564Sdim *         |      registered by LLD           |               |
68314564Sdim *         +----------------------------------+               |
69314564Sdim *                         |                                  |
70314564Sdim *                         V                                  v
71277323Sdim *         +-----------------------------------------------------------+
72277323Sdim *         |[3]                                                        |
73277323Sdim *         |                LLD Internal functions                     |
74277323Sdim *         |                                                           |
75 *         +-----------------------------------------------------------+
76 *                                    ^
77 *                                    | Software interrupt thread
78 *                                    |
79 *
80 * The short description of each module is as below:
81 *      Module 1: GLD callback functions, which are intercepting the calls from
82 *                GLD to LLD.
83 *      Module 2: Net80211 callback functions registered by LLD, which
84 *                calls into LLD for H/W related functions needed by net80211.
85 *      Module 3: LLD Internal functions, which are responsible for allocing
86 *                descriptor/buffer, handling interrupt and other H/W
87 *                operations.
88 *
89 * All functions are running in 3 types of thread:
90 * 1. GLD callbacks threads, such as ioctl, intr, etc.
91 * 2. Clock interruptt thread which is responsible for scan, rate control and
92 *    calibration.
93 * 3. Software Interrupt thread originated in LLD.
94 *
95 * The lock strategy is as below:
96 * There have 4 queues for tx, each queue has one asc_txqlock[i] to
97 *      prevent conflicts access to queue resource from different thread.
98 *
99 * All the transmit buffers are contained in asc_txbuf which are
100 *      protected by asc_txbuflock.
101 *
102 * Each receive buffers are contained in asc_rxbuf which are protected
103 *      by asc_rxbuflock.
104 *
105 * In ath struct, asc_genlock is a general lock, protecting most other
106 *      operational data in ath_softc struct and HAL accesses.
107 *      It is acquired by the interupt handler and most "mode-ctrl" routines.
108 *
109 * Any of the locks can be acquired singly, but where multiple
110 * locks are acquired, they *must* be in the order:
111 *    asc_genlock >> asc_txqlock[i] >> asc_txbuflock >> asc_rxbuflock
112 */
113
114#include <sys/param.h>
115#include <sys/types.h>
116#include <sys/signal.h>
117#include <sys/stream.h>
118#include <sys/termio.h>
119#include <sys/errno.h>
120#include <sys/file.h>
121#include <sys/cmn_err.h>
122#include <sys/stropts.h>
123#include <sys/strsubr.h>
124#include <sys/strtty.h>
125#include <sys/kbio.h>
126#include <sys/cred.h>
127#include <sys/stat.h>
128#include <sys/consdev.h>
129#include <sys/kmem.h>
130#include <sys/modctl.h>
131#include <sys/ddi.h>
132#include <sys/sunddi.h>
133#include <sys/pci.h>
134#include <sys/errno.h>
135#include <sys/mac.h>
136#include <sys/dlpi.h>
137#include <sys/ethernet.h>
138#include <sys/list.h>
139#include <sys/byteorder.h>
140#include <sys/strsun.h>
141#include <sys/policy.h>
142#include <inet/common.h>
143#include <inet/nd.h>
144#include <inet/mi.h>
145#include <inet/wifi_ioctl.h>
146#include <sys/mac_wifi.h>
147#include "ath_hal.h"
148#include "ath_impl.h"
149#include "ath_aux.h"
150#include "ath_rate.h"
151
152#define	ATH_MAX_RSSI	63	/* max rssi */
153
154extern void ath_halfix_init(void);
155extern void ath_halfix_finit(void);
156extern int32_t ath_getset(ath_t *asc, mblk_t *mp, uint32_t cmd);
157
158/*
159 * PIO access attributes for registers
160 */
161static ddi_device_acc_attr_t ath_reg_accattr = {
162	DDI_DEVICE_ATTR_V0,
163	DDI_STRUCTURE_LE_ACC,
164	DDI_STRICTORDER_ACC
165};
166
167/*
168 * DMA access attributes for descriptors: NOT to be byte swapped.
169 */
170static ddi_device_acc_attr_t ath_desc_accattr = {
171	DDI_DEVICE_ATTR_V0,
172	DDI_STRUCTURE_LE_ACC,
173	DDI_STRICTORDER_ACC
174};
175
176/*
177 * Describes the chip's DMA engine
178 */
179static ddi_dma_attr_t ath_dma_attr = {
180	DMA_ATTR_V0,		/* version number */
181	0,			/* low address */
182	0xffffffffU,		/* high address */
183	0x3ffffU,		/* counter register max */
184	1,			/* alignment */
185	0xFFF,			/* burst sizes */
186	1,			/* minimum transfer size */
187	0x3ffffU,		/* max transfer size */
188	0xffffffffU,		/* address register max */
189	1,			/* no scatter-gather */
190	1,			/* granularity of device */
191	0,			/* DMA flags */
192};
193
194static ddi_dma_attr_t ath_desc_dma_attr = {
195	DMA_ATTR_V0,		/* version number */
196	0,			/* low address */
197	0xffffffffU,		/* high address */
198	0xffffffffU,		/* counter register max */
199	0x1000,			/* alignment */
200	0xFFF,			/* burst sizes */
201	1,			/* minimum transfer size */
202	0xffffffffU,		/* max transfer size */
203	0xffffffffU,		/* address register max */
204	1,			/* no scatter-gather */
205	1,			/* granularity of device */
206	0,			/* DMA flags */
207};
208
209static kmutex_t ath_loglock;
210static void *ath_soft_state_p = NULL;
211static int ath_dwelltime = 150;		/* scan interval, ms */
212
213static int	ath_m_stat(void *,  uint_t, uint64_t *);
214static int	ath_m_start(void *);
215static void	ath_m_stop(void *);
216static int	ath_m_promisc(void *, boolean_t);
217static int	ath_m_multicst(void *, boolean_t, const uint8_t *);
218static int	ath_m_unicst(void *, const uint8_t *);
219static mblk_t	*ath_m_tx(void *, mblk_t *);
220static void	ath_m_ioctl(void *, queue_t *, mblk_t *);
221static mac_callbacks_t ath_m_callbacks = {
222	MC_IOCTL,
223	ath_m_stat,
224	ath_m_start,
225	ath_m_stop,
226	ath_m_promisc,
227	ath_m_multicst,
228	ath_m_unicst,
229	ath_m_tx,
230	NULL,		/* mc_resources; */
231	ath_m_ioctl,
232	NULL		/* mc_getcapab */
233};
234
235/*
236 * Available debug flags:
237 * ATH_DBG_INIT, ATH_DBG_GLD, ATH_DBG_HAL, ATH_DBG_INT, ATH_DBG_ATTACH,
238 * ATH_DBG_DETACH, ATH_DBG_AUX, ATH_DBG_WIFICFG, ATH_DBG_OSDEP
239 */
240uint32_t ath_dbg_flags = 0;
241
242/*
243 * Exception/warning cases not leading to panic.
244 */
245void
246ath_problem(const int8_t *fmt, ...)
247{
248	va_list args;
249
250	mutex_enter(&ath_loglock);
251
252	va_start(args, fmt);
253	vcmn_err(CE_WARN, fmt, args);
254	va_end(args);
255
256	mutex_exit(&ath_loglock);
257}
258
259/*
260 * Normal log information independent of debug.
261 */
262void
263ath_log(const int8_t *fmt, ...)
264{
265	va_list args;
266
267	mutex_enter(&ath_loglock);
268
269	va_start(args, fmt);
270	vcmn_err(CE_CONT, fmt, args);
271	va_end(args);
272
273	mutex_exit(&ath_loglock);
274}
275
276void
277ath_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
278{
279	va_list args;
280
281	if (dbg_flags & ath_dbg_flags) {
282		mutex_enter(&ath_loglock);
283		va_start(args, fmt);
284		vcmn_err(CE_CONT, fmt, args);
285		va_end(args);
286		mutex_exit(&ath_loglock);
287	}
288}
289
290void
291ath_setup_desc(ath_t *asc, struct ath_buf *bf)
292{
293	struct ath_desc *ds;
294
295	ds = bf->bf_desc;
296	ds->ds_link = bf->bf_daddr;
297	ds->ds_data = bf->bf_dma.cookie.dmac_address;
298	ds->ds_vdata = bf->bf_dma.mem_va;
299	ATH_HAL_SETUPRXDESC(asc->asc_ah, ds,
300	    bf->bf_dma.alength,		/* buffer size */
301	    0);
302
303	if (asc->asc_rxlink != NULL)
304		*asc->asc_rxlink = bf->bf_daddr;
305	asc->asc_rxlink = &ds->ds_link;
306}
307
308
309/*
310 * Allocate an area of memory and a DMA handle for accessing it
311 */
312static int
313ath_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
314    ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
315    uint_t bind_flags, dma_area_t *dma_p)
316{
317	int err;
318
319	/*
320	 * Allocate handle
321	 */
322	err = ddi_dma_alloc_handle(devinfo, dma_attr,
323	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
324	if (err != DDI_SUCCESS)
325		return (DDI_FAILURE);
326
327	/*
328	 * Allocate memory
329	 */
330	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
331	    alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
332	    &dma_p->alength, &dma_p->acc_hdl);
333	if (err != DDI_SUCCESS)
334		return (DDI_FAILURE);
335
336	/*
337	 * Bind the two together
338	 */
339	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
340	    dma_p->mem_va, dma_p->alength, bind_flags,
341	    DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
342	if (err != DDI_DMA_MAPPED)
343		return (DDI_FAILURE);
344
345	dma_p->nslots = ~0U;
346	dma_p->size = ~0U;
347	dma_p->token = ~0U;
348	dma_p->offset = 0;
349	return (DDI_SUCCESS);
350}
351
352/*
353 * Free one allocated area of DMAable memory
354 */
355static void
356ath_free_dma_mem(dma_area_t *dma_p)
357{
358	if (dma_p->dma_hdl != NULL) {
359		(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
360		if (dma_p->acc_hdl != NULL) {
361			ddi_dma_mem_free(&dma_p->acc_hdl);
362			dma_p->acc_hdl = NULL;
363		}
364		ddi_dma_free_handle(&dma_p->dma_hdl);
365		dma_p->ncookies = 0;
366		dma_p->dma_hdl = NULL;
367	}
368}
369
370
371static int
372ath_desc_alloc(dev_info_t *devinfo, ath_t *asc)
373{
374	int i, err;
375	size_t size;
376	struct ath_desc *ds;
377	struct ath_buf *bf;
378
379	size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
380
381	err = ath_alloc_dma_mem(devinfo, &ath_desc_dma_attr, size,
382	    &ath_desc_accattr, DDI_DMA_CONSISTENT,
383	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &asc->asc_desc_dma);
384
385	/* virtual address of the first descriptor */
386	asc->asc_desc = (struct ath_desc *)asc->asc_desc_dma.mem_va;
387
388	ds = asc->asc_desc;
389	ATH_DEBUG((ATH_DBG_INIT, "ath: ath_desc_alloc(): DMA map: "
390	    "%p (%d) -> %p\n",
391	    asc->asc_desc, asc->asc_desc_dma.alength,
392	    asc->asc_desc_dma.cookie.dmac_address));
393
394	/* allocate data structures to describe TX/RX DMA buffers */
395	asc->asc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
396	bf = (struct ath_buf *)kmem_zalloc(asc->asc_vbuflen, KM_SLEEP);
397	asc->asc_vbufptr = bf;
398
399	/* DMA buffer size for each TX/RX packet */
400	asc->asc_dmabuf_size = roundup(1000 + sizeof (struct ieee80211_frame) +
401	    IEEE80211_MTU + IEEE80211_CRC_LEN +
402	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
403	    IEEE80211_WEP_CRCLEN), asc->asc_cachelsz);
404
405	/* create RX buffer list and allocate DMA memory */
406	list_create(&asc->asc_rxbuf_list, sizeof (struct ath_buf),
407	    offsetof(struct ath_buf, bf_node));
408	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) {
409		bf->bf_desc = ds;
410		bf->bf_daddr = asc->asc_desc_dma.cookie.dmac_address +
411		    ((uintptr_t)ds - (uintptr_t)asc->asc_desc);
412		list_insert_tail(&asc->asc_rxbuf_list, bf);
413
414		/* alloc DMA memory */
415		err = ath_alloc_dma_mem(devinfo, &ath_dma_attr,
416		    asc->asc_dmabuf_size, &ath_desc_accattr,
417		    DDI_DMA_STREAMING, DDI_DMA_READ | DDI_DMA_STREAMING,
418		    &bf->bf_dma);
419		if (err != DDI_SUCCESS)
420			return (err);
421	}
422
423	/* create TX buffer list and allocate DMA memory */
424	list_create(&asc->asc_txbuf_list, sizeof (struct ath_buf),
425	    offsetof(struct ath_buf, bf_node));
426	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++) {
427		bf->bf_desc = ds;
428		bf->bf_daddr = asc->asc_desc_dma.cookie.dmac_address +
429		    ((uintptr_t)ds - (uintptr_t)asc->asc_desc);
430		list_insert_tail(&asc->asc_txbuf_list, bf);
431
432		/* alloc DMA memory */
433		err = ath_alloc_dma_mem(devinfo, &ath_dma_attr,
434		    asc->asc_dmabuf_size, &ath_desc_accattr,
435		    DDI_DMA_STREAMING, DDI_DMA_STREAMING, &bf->bf_dma);
436		if (err != DDI_SUCCESS)
437			return (err);
438	}
439
440	return (DDI_SUCCESS);
441}
442
443static void
444ath_desc_free(ath_t *asc)
445{
446	struct ath_buf *bf;
447
448	/* Free TX DMA buffer */
449	bf = list_head(&asc->asc_txbuf_list);
450	while (bf != NULL) {
451		ath_free_dma_mem(&bf->bf_dma);
452		list_remove(&asc->asc_txbuf_list, bf);
453		bf = list_head(&asc->asc_txbuf_list);
454	}
455	list_destroy(&asc->asc_txbuf_list);
456
457	/* Free RX DMA uffer */
458	bf = list_head(&asc->asc_rxbuf_list);
459	while (bf != NULL) {
460		ath_free_dma_mem(&bf->bf_dma);
461		list_remove(&asc->asc_rxbuf_list, bf);
462		bf = list_head(&asc->asc_rxbuf_list);
463	}
464	list_destroy(&asc->asc_rxbuf_list);
465
466	/* Free descriptor DMA buffer */
467	ath_free_dma_mem(&asc->asc_desc_dma);
468
469	kmem_free((void *)asc->asc_vbufptr, asc->asc_vbuflen);
470	asc->asc_vbufptr = NULL;
471}
472
473static void
474ath_printrxbuf(struct ath_buf *bf, int32_t done)
475{
476	struct ath_desc *ds = bf->bf_desc;
477
478	ATH_DEBUG((ATH_DBG_RECV, "ath: R (%p %p) %08x %08x %08x "
479	    "%08x %08x %08x %c\n",
480	    ds, bf->bf_daddr,
481	    ds->ds_link, ds->ds_data,
482	    ds->ds_ctl0, ds->ds_ctl1,
483	    ds->ds_hw[0], ds->ds_hw[1],
484	    !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'));
485}
486
487static void
488ath_rx_handler(ath_t *asc)
489{
490	ieee80211com_t *ic = (ieee80211com_t *)asc;
491	struct ath_buf *bf;
492	struct ath_hal *ah = asc->asc_ah;
493	struct ath_desc *ds;
494	mblk_t *rx_mp;
495	struct ieee80211_frame *wh;
496	int32_t len, loop = 1;
497	uint8_t phyerr;
498	HAL_STATUS status;
499	HAL_NODE_STATS hal_node_stats;
500	struct ieee80211_node *in;
501
502	do {
503		mutex_enter(&asc->asc_rxbuflock);
504		bf = list_head(&asc->asc_rxbuf_list);
505		if (bf == NULL) {
506			ATH_DEBUG((ATH_DBG_RECV, "ath: ath_rx_handler(): "
507			    "no buffer\n"));
508			mutex_exit(&asc->asc_rxbuflock);
509			break;
510		}
511		ASSERT(bf->bf_dma.cookie.dmac_address != NULL);
512		ds = bf->bf_desc;
513		if (ds->ds_link == bf->bf_daddr) {
514			/*
515			 * Never process the self-linked entry at the end,
516			 * this may be met at heavy load.
517			 */
518			mutex_exit(&asc->asc_rxbuflock);
519			break;
520		}
521
522		status = ATH_HAL_RXPROCDESC(ah, ds,
523		    bf->bf_daddr,
524		    ATH_PA2DESC(asc, ds->ds_link));
525		if (status == HAL_EINPROGRESS) {
526			mutex_exit(&asc->asc_rxbuflock);
527			break;
528		}
529		list_remove(&asc->asc_rxbuf_list, bf);
530		mutex_exit(&asc->asc_rxbuflock);
531
532		if (ds->ds_rxstat.rs_status != 0) {
533			if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC)
534				asc->asc_stats.ast_rx_crcerr++;
535			if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO)
536				asc->asc_stats.ast_rx_fifoerr++;
537			if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT)
538				asc->asc_stats.ast_rx_badcrypt++;
539			if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) {
540				asc->asc_stats.ast_rx_phyerr++;
541				phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
542				asc->asc_stats.ast_rx_phy[phyerr]++;
543			}
544			goto rx_next;
545		}
546		len = ds->ds_rxstat.rs_datalen;
547
548		/* less than sizeof(struct ieee80211_frame) */
549		if (len < 20) {
550			asc->asc_stats.ast_rx_tooshort++;
551			goto rx_next;
552		}
553
554		if ((rx_mp = allocb(asc->asc_dmabuf_size, BPRI_MED)) == NULL) {
555			ath_problem("ath: ath_rx_handler(): "
556			    "allocing mblk buffer failed.\n");
557			return;
558		}
559
560		ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORCPU);
561		bcopy(bf->bf_dma.mem_va, rx_mp->b_rptr, len);
562
563		rx_mp->b_wptr += len;
564		wh = (struct ieee80211_frame *)rx_mp->b_rptr;
565		if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
566		    IEEE80211_FC0_TYPE_CTL) {
567			/*
568			 * Ignore control frame received in promisc mode.
569			 */
570			freemsg(rx_mp);
571			goto rx_next;
572		}
573		/* Remove the CRC at the end of IEEE80211 frame */
574		rx_mp->b_wptr -= IEEE80211_CRC_LEN;
575#ifdef DEBUG
576		ath_printrxbuf(bf, status == HAL_OK);
577#endif /* DEBUG */
578		/*
579		 * Locate the node for sender, track state, and then
580		 * pass the (referenced) node up to the 802.11 layer
581		 * for its use.
582		 */
583		in = ieee80211_find_rxnode(ic, wh);
584
585		/*
586		 * Send frame up for processing.
587		 */
588		(void) ieee80211_input(ic, rx_mp, in,
589		    ds->ds_rxstat.rs_rssi,
590		    ds->ds_rxstat.rs_tstamp);
591
592		ieee80211_free_node(in);
593
594rx_next:
595		mutex_enter(&asc->asc_rxbuflock);
596		list_insert_tail(&asc->asc_rxbuf_list, bf);
597		mutex_exit(&asc->asc_rxbuflock);
598		ath_setup_desc(asc, bf);
599	} while (loop);
600
601	/* rx signal state monitoring */
602	ATH_HAL_RXMONITOR(ah, &hal_node_stats, &asc->asc_curchan);
603}
604
605static void
606ath_printtxbuf(struct ath_buf *bf, int done)
607{
608	struct ath_desc *ds = bf->bf_desc;
609
610	ATH_DEBUG((ATH_DBG_SEND, "ath: T(%p %p) %08x %08x %08x %08x %08x"
611	    " %08x %08x %08x %c\n",
612	    ds, bf->bf_daddr,
613	    ds->ds_link, ds->ds_data,
614	    ds->ds_ctl0, ds->ds_ctl1,
615	    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
616	    !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'));
617}
618
619/*
620 * The input parameter mp has following assumption:
621 * For data packets, GLDv3 mac_wifi plugin allocates and fills the
622 * ieee80211 header. For management packets, net80211 allocates and
623 * fills the ieee80211 header. In both cases, enough spaces in the
624 * header are left for encryption option.
625 */
626static int32_t
627ath_tx_start(ath_t *asc, struct ieee80211_node *in, struct ath_buf *bf,
628    mblk_t *mp)
629{
630	ieee80211com_t *ic = (ieee80211com_t *)asc;
631	struct ieee80211_frame *wh;
632	struct ath_hal *ah = asc->asc_ah;
633	uint32_t subtype, flags, ctsduration;
634	int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen, try0;
635	uint8_t rix, cix, txrate, ctsrate;
636	struct ath_desc *ds;
637	struct ath_txq *txq;
638	HAL_PKT_TYPE atype;
639	const HAL_RATE_TABLE *rt;
640	HAL_BOOL shortPreamble;
641	struct ath_node *an;
642	caddr_t dest;
643
644	/*
645	 * CRC are added by H/W, not encaped by driver,
646	 * but we must count it in pkt length.
647	 */
648	pktlen = IEEE80211_CRC_LEN;
649
650	wh = (struct ieee80211_frame *)mp->b_rptr;
651	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
652	keyix = HAL_TXKEYIX_INVALID;
653	hdrlen = sizeof (struct ieee80211_frame);
654	if (iswep != 0) {
655		const struct ieee80211_cipher *cip;
656		struct ieee80211_key *k;
657
658		/*
659		 * Construct the 802.11 header+trailer for an encrypted
660		 * frame. The only reason this can fail is because of an
661		 * unknown or unsupported cipher/key type.
662		 */
663		k = ieee80211_crypto_encap(ic, mp);
664		if (k == NULL) {
665			ATH_DEBUG((ATH_DBG_AUX, "crypto_encap failed\n"));
666			/*
667			 * This can happen when the key is yanked after the
668			 * frame was queued.  Just discard the frame; the
669			 * 802.11 layer counts failures and provides
670			 * debugging/diagnostics.
671			 */
672			return (EIO);
673		}
674		cip = k->wk_cipher;
675		/*
676		 * Adjust the packet + header lengths for the crypto
677		 * additions and calculate the h/w key index.  When
678		 * a s/w mic is done the frame will have had any mic
679		 * added to it prior to entry so m0->m_pkthdr.len above will
680		 * account for it. Otherwise we need to add it to the
681		 * packet length.
682		 */
683		hdrlen += cip->ic_header;
684		pktlen += cip->ic_trailer;
685		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
686			pktlen += cip->ic_miclen;
687		keyix = k->wk_keyix;
688
689		/* packet header may have moved, reset our local pointer */
690		wh = (struct ieee80211_frame *)mp->b_rptr;
691	}
692
693	dest = bf->bf_dma.mem_va;
694	for (; mp != NULL; mp = mp->b_cont) {
695		mblen = MBLKL(mp);
696		bcopy(mp->b_rptr, dest, mblen);
697		dest += mblen;
698	}
699	mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va;
700	pktlen += mbslen;
701
702	bf->bf_in = in;
703
704	/* setup descriptors */
705	ds = bf->bf_desc;
706	rt = asc->asc_currates;
707	ASSERT(rt != NULL);
708
709	/*
710	 * The 802.11 layer marks whether or not we should
711	 * use short preamble based on the current mode and
712	 * negotiated parameters.
713	 */
714	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
715	    (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
716		shortPreamble = AH_TRUE;
717		asc->asc_stats.ast_tx_shortpre++;
718	} else {
719		shortPreamble = AH_FALSE;
720	}
721
722	an = ATH_NODE(in);
723
724	/*
725	 * Calculate Atheros packet type from IEEE80211 packet header
726	 * and setup for rate calculations.
727	 */
728	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
729	case IEEE80211_FC0_TYPE_MGT:
730		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
731		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
732			atype = HAL_PKT_TYPE_BEACON;
733		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
734			atype = HAL_PKT_TYPE_PROBE_RESP;
735		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
736			atype = HAL_PKT_TYPE_ATIM;
737		else
738			atype = HAL_PKT_TYPE_NORMAL;
739		rix = 0;	/* lowest rate */
740		try0 = ATH_TXMAXTRY;
741		if (shortPreamble)
742			txrate = an->an_tx_mgtratesp;
743		else
744			txrate = an->an_tx_mgtrate;
745		/* force all ctl frames to highest queue */
746		txq = asc->asc_ac2q[WME_AC_VO];
747		break;
748	case IEEE80211_FC0_TYPE_CTL:
749		atype = HAL_PKT_TYPE_PSPOLL;
750		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
751		rix = 0;	/* lowest rate */
752		try0 = ATH_TXMAXTRY;
753		if (shortPreamble)
754			txrate = an->an_tx_mgtratesp;
755		else
756			txrate = an->an_tx_mgtrate;
757		/* force all ctl frames to highest queue */
758		txq = asc->asc_ac2q[WME_AC_VO];
759		break;
760	case IEEE80211_FC0_TYPE_DATA:
761		atype = HAL_PKT_TYPE_NORMAL;
762		rix = an->an_tx_rix0;
763		try0 = an->an_tx_try0;
764		if (shortPreamble)
765			txrate = an->an_tx_rate0sp;
766		else
767			txrate = an->an_tx_rate0;
768		/* Always use background queue */
769		txq = asc->asc_ac2q[WME_AC_BK];
770		break;
771	default:
772		/* Unknown 802.11 frame */
773		asc->asc_stats.ast_tx_invalid++;
774		return (1);
775	}
776	/*
777	 * Calculate miscellaneous flags.
778	 */
779	flags = HAL_TXDESC_CLRDMASK;
780	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
781		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
782		asc->asc_stats.ast_tx_noack++;
783	} else if (pktlen > ic->ic_rtsthreshold) {
784		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
785		asc->asc_stats.ast_tx_rts++;
786	}
787
788	/*
789	 * Calculate duration.  This logically belongs in the 802.11
790	 * layer but it lacks sufficient information to calculate it.
791	 */
792	if ((flags & HAL_TXDESC_NOACK) == 0 &&
793	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
794	    IEEE80211_FC0_TYPE_CTL) {
795		uint16_t dur;
796		dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE,
797		    rix, shortPreamble);
798		/* LINTED E_BAD_PTR_CAST_ALIGN */
799		*(uint16_t *)wh->i_dur = LE_16(dur);
800	}
801
802	/*
803	 * Calculate RTS/CTS rate and duration if needed.
804	 */
805	ctsduration = 0;
806	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
807		/*
808		 * CTS transmit rate is derived from the transmit rate
809		 * by looking in the h/w rate table.  We must also factor
810		 * in whether or not a short preamble is to be used.
811		 */
812		cix = rt->info[rix].controlRate;
813		ctsrate = rt->info[cix].rateCode;
814		if (shortPreamble)
815			ctsrate |= rt->info[cix].shortPreamble;
816		/*
817		 * Compute the transmit duration based on the size
818		 * of an ACK frame.  We call into the HAL to do the
819		 * computation since it depends on the characteristics
820		 * of the actual PHY being used.
821		 */
822		if (flags & HAL_TXDESC_RTSENA) {	/* SIFS + CTS */
823			ctsduration += ath_hal_computetxtime(ah,
824			    rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
825		}
826		/* SIFS + data */
827		ctsduration += ath_hal_computetxtime(ah,
828		    rt, pktlen, rix, shortPreamble);
829		if ((flags & HAL_TXDESC_NOACK) == 0) {	/* SIFS + ACK */
830			ctsduration += ath_hal_computetxtime(ah,
831			    rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
832		}
833	} else
834		ctsrate = 0;
835
836	if (++txq->axq_intrcnt >= ATH_TXINTR_PERIOD) {
837		flags |= HAL_TXDESC_INTREQ;
838		txq->axq_intrcnt = 0;
839	}
840
841	/*
842	 * Formulate first tx descriptor with tx controls.
843	 */
844	ATH_HAL_SETUPTXDESC(ah, ds,
845	    pktlen,			/* packet length */
846	    hdrlen,			/* header length */
847	    atype,			/* Atheros packet type */
848	    MIN(in->in_txpower, 60),	/* txpower */
849	    txrate, try0,		/* series 0 rate/tries */
850	    keyix,			/* key cache index */
851	    an->an_tx_antenna,		/* antenna mode */
852	    flags,			/* flags */
853	    ctsrate,			/* rts/cts rate */
854	    ctsduration);		/* rts/cts duration */
855	bf->bf_flags = flags;
856
857	/* LINTED E_BAD_PTR_CAST_ALIGN */
858	ATH_DEBUG((ATH_DBG_SEND, "ath: ath_xmit(): to %s totlen=%d "
859	    "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
860	    "qnum=%d rix=%d sht=%d dur = %d\n",
861	    ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp,
862	    an->an_tx_rate2sp, an->an_tx_rate3sp,
863	    txq->axq_qnum, rix, shortPreamble, *(uint16_t *)wh->i_dur));
864
865	/*
866	 * Setup the multi-rate retry state only when we're
867	 * going to use it.  This assumes ath_hal_setuptxdesc
868	 * initializes the descriptors (so we don't have to)
869	 * when the hardware supports multi-rate retry and
870	 * we don't use it.
871	 */
872	if (try0 != ATH_TXMAXTRY)
873		ATH_HAL_SETUPXTXDESC(ah, ds,
874		    an->an_tx_rate1sp, 2,	/* series 1 */
875		    an->an_tx_rate2sp, 2,	/* series 2 */
876		    an->an_tx_rate3sp, 2);	/* series 3 */
877
878	ds->ds_link = 0;
879	ds->ds_data = bf->bf_dma.cookie.dmac_address;
880	ATH_HAL_FILLTXDESC(ah, ds,
881	    mbslen,		/* segment length */
882	    AH_TRUE,		/* first segment */
883	    AH_TRUE,		/* last segment */
884	    ds);		/* first descriptor */
885
886	ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
887
888	mutex_enter(&txq->axq_lock);
889	list_insert_tail(&txq->axq_list, bf);
890	if (txq->axq_link == NULL) {
891		ATH_HAL_PUTTXBUF(ah, txq->axq_qnum, bf->bf_daddr);
892	} else {
893		*txq->axq_link = bf->bf_daddr;
894	}
895	txq->axq_link = &ds->ds_link;
896	mutex_exit(&txq->axq_lock);
897
898	ATH_HAL_TXSTART(ah, txq->axq_qnum);
899
900	ic->ic_stats.is_tx_frags++;
901	ic->ic_stats.is_tx_bytes += pktlen;
902
903	return (0);
904}
905
906/*
907 * Transmit a management frame.  On failure we reclaim the skbuff.
908 * Note that management frames come directly from the 802.11 layer
909 * and do not honor the send queue flow control.  Need to investigate
910 * using priority queueing so management frames can bypass data.
911 */
912static int
913ath_xmit(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
914{
915	ath_t *asc = (ath_t *)ic;
916	struct ath_hal *ah = asc->asc_ah;
917	struct ieee80211_node *in = NULL;
918	struct ath_buf *bf = NULL;
919	struct ieee80211_frame *wh;
920	int error = 0;
921
922	ASSERT(mp->b_next == NULL);
923
924	if (!ATH_IS_RUNNING(asc)) {
925		if ((type & IEEE80211_FC0_TYPE_MASK) !=
926		    IEEE80211_FC0_TYPE_DATA) {
927			freemsg(mp);
928		}
929		return (ENXIO);
930	}
931
932	/* Grab a TX buffer */
933	mutex_enter(&asc->asc_txbuflock);
934	bf = list_head(&asc->asc_txbuf_list);
935	if (bf != NULL)
936		list_remove(&asc->asc_txbuf_list, bf);
937	if (list_empty(&asc->asc_txbuf_list)) {
938		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): "
939		    "stop queue\n"));
940		asc->asc_stats.ast_tx_qstop++;
941	}
942	mutex_exit(&asc->asc_txbuflock);
943	if (bf == NULL) {
944		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): discard, "
945		    "no xmit buf\n"));
946		ic->ic_stats.is_tx_nobuf++;
947		if ((type & IEEE80211_FC0_TYPE_MASK) ==
948		    IEEE80211_FC0_TYPE_DATA) {
949			asc->asc_stats.ast_tx_nobuf++;
950			mutex_enter(&asc->asc_resched_lock);
951			asc->asc_resched_needed = B_TRUE;
952			mutex_exit(&asc->asc_resched_lock);
953		} else {
954			asc->asc_stats.ast_tx_nobufmgt++;
955			freemsg(mp);
956		}
957		return (ENOMEM);
958	}
959
960	wh = (struct ieee80211_frame *)mp->b_rptr;
961
962	/* Locate node */
963	in = ieee80211_find_txnode(ic,  wh->i_addr1);
964	if (in == NULL) {
965		error = EIO;
966		goto bad;
967	}
968
969	in->in_inact = 0;
970	switch (type & IEEE80211_FC0_TYPE_MASK) {
971	case IEEE80211_FC0_TYPE_DATA:
972		(void) ieee80211_encap(ic, mp, in);
973		break;
974	default:
975		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
976		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
977			/* fill time stamp */
978			uint64_t tsf;
979			uint32_t *tstamp;
980
981			tsf = ATH_HAL_GETTSF64(ah);
982			/* adjust 100us delay to xmit */
983			tsf += 100;
984			/* LINTED E_BAD_PTR_CAST_ALIGN */
985			tstamp = (uint32_t *)&wh[1];
986			tstamp[0] = LE_32(tsf & 0xffffffff);
987			tstamp[1] = LE_32(tsf >> 32);
988		}
989		asc->asc_stats.ast_tx_mgmt++;
990		break;
991	}
992
993	error = ath_tx_start(asc, in, bf, mp);
994	if (error != 0) {
995bad:
996		ic->ic_stats.is_tx_failed++;
997		if (bf != NULL) {
998			mutex_enter(&asc->asc_txbuflock);
999			list_insert_tail(&asc->asc_txbuf_list, bf);
1000			mutex_exit(&asc->asc_txbuflock);
1001		}
1002	}
1003	if (in != NULL)
1004		ieee80211_free_node(in);
1005	if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ||
1006	    error == 0) {
1007		freemsg(mp);
1008	}
1009
1010	return (error);
1011}
1012
1013static mblk_t *
1014ath_m_tx(void *arg, mblk_t *mp)
1015{
1016	ath_t *asc = arg;
1017	ieee80211com_t *ic = (ieee80211com_t *)asc;
1018	mblk_t *next;
1019	int error = 0;
1020
1021	/*
1022	 * No data frames go out unless we're associated; this
1023	 * should not happen as the 802.11 layer does not enable
1024	 * the xmit queue until we enter the RUN state.
1025	 */
1026	if (ic->ic_state != IEEE80211_S_RUN) {
1027		ATH_DEBUG((ATH_DBG_SEND, "ath: ath_m_tx(): "
1028		    "discard, state %u\n", ic->ic_state));
1029		asc->asc_stats.ast_tx_discard++;
1030		freemsgchain(mp);
1031		return (NULL);
1032	}
1033
1034	while (mp != NULL) {
1035		next = mp->b_next;
1036		mp->b_next = NULL;
1037		error = ath_xmit(ic, mp, IEEE80211_FC0_TYPE_DATA);
1038		if (error != 0) {
1039			mp->b_next = next;
1040			if (error == ENOMEM) {
1041				break;
1042			} else {
1043				freemsgchain(mp);	/* CR6501759 issues */
1044				return (NULL);
1045			}
1046		}
1047		mp = next;
1048	}
1049
1050	return (mp);
1051
1052}
1053
1054static int
1055ath_tx_processq(ath_t *asc, struct ath_txq *txq)
1056{
1057	ieee80211com_t *ic = (ieee80211com_t *)asc;
1058	struct ath_hal *ah = asc->asc_ah;
1059	struct ath_buf *bf;
1060	struct ath_desc *ds;
1061	struct ieee80211_node *in;
1062	int32_t sr, lr, nacked = 0;
1063	HAL_STATUS status;
1064	struct ath_node *an;
1065
1066	for (;;) {
1067		mutex_enter(&txq->axq_lock);
1068		bf = list_head(&txq->axq_list);
1069		if (bf == NULL) {
1070			txq->axq_link = NULL;
1071			mutex_exit(&txq->axq_lock);
1072			break;
1073		}
1074		ds = bf->bf_desc;	/* last decriptor */
1075		status = ATH_HAL_TXPROCDESC(ah, ds);
1076#ifdef DEBUG
1077		ath_printtxbuf(bf, status == HAL_OK);
1078#endif
1079		if (status == HAL_EINPROGRESS) {
1080			mutex_exit(&txq->axq_lock);
1081			break;
1082		}
1083		list_remove(&txq->axq_list, bf);
1084		mutex_exit(&txq->axq_lock);
1085		in = bf->bf_in;
1086		if (in != NULL) {
1087			an = ATH_NODE(in);
1088			/* Successful transmition */
1089			if (ds->ds_txstat.ts_status == 0) {
1090				an->an_tx_ok++;
1091				an->an_tx_antenna =
1092				    ds->ds_txstat.ts_antenna;
1093				if (ds->ds_txstat.ts_rate &
1094				    HAL_TXSTAT_ALTRATE)
1095					asc->asc_stats.ast_tx_altrate++;
1096				asc->asc_stats.ast_tx_rssidelta =
1097				    ds->ds_txstat.ts_rssi -
1098				    asc->asc_stats.ast_tx_rssi;
1099				asc->asc_stats.ast_tx_rssi =
1100				    ds->ds_txstat.ts_rssi;
1101			} else {
1102				an->an_tx_err++;
1103				if (ds->ds_txstat.ts_status &
1104				    HAL_TXERR_XRETRY)
1105					asc->asc_stats.
1106					    ast_tx_xretries++;
1107				if (ds->ds_txstat.ts_status &
1108				    HAL_TXERR_FIFO)
1109					asc->asc_stats.ast_tx_fifoerr++;
1110				if (ds->ds_txstat.ts_status &
1111				    HAL_TXERR_FILT)
1112					asc->asc_stats.
1113					    ast_tx_filtered++;
1114				an->an_tx_antenna = 0;	/* invalidate */
1115			}
1116			sr = ds->ds_txstat.ts_shortretry;
1117			lr = ds->ds_txstat.ts_longretry;
1118			asc->asc_stats.ast_tx_shortretry += sr;
1119			asc->asc_stats.ast_tx_longretry += lr;
1120			/*
1121			 * Hand the descriptor to the rate control algorithm.
1122			 */
1123			if ((ds->ds_txstat.ts_status & HAL_TXERR_FILT) == 0 &&
1124			    (bf->bf_flags & HAL_TXDESC_NOACK) == 0) {
1125				/*
1126				 * If frame was ack'd update the last rx time
1127				 * used to workaround phantom bmiss interrupts.
1128				 */
1129				if (ds->ds_txstat.ts_status == 0) {
1130					nacked++;
1131					an->an_tx_ok++;
1132				} else {
1133					an->an_tx_err++;
1134				}
1135				an->an_tx_retr += sr + lr;
1136			}
1137		}
1138		bf->bf_in = NULL;
1139		mutex_enter(&asc->asc_txbuflock);
1140		list_insert_tail(&asc->asc_txbuf_list, bf);
1141		mutex_exit(&asc->asc_txbuflock);
1142		/*
1143		 * Reschedule stalled outbound packets
1144		 */
1145		mutex_enter(&asc->asc_resched_lock);
1146		if (asc->asc_resched_needed) {
1147			asc->asc_resched_needed = B_FALSE;
1148			mac_tx_update(ic->ic_mach);
1149		}
1150		mutex_exit(&asc->asc_resched_lock);
1151	}
1152	return (nacked);
1153}
1154
1155
1156static void
1157ath_tx_handler(ath_t *asc)
1158{
1159	int i;
1160
1161	/*
1162	 * Process each active queue.
1163	 */
1164	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1165		if (ATH_TXQ_SETUP(asc, i)) {
1166			(void) ath_tx_processq(asc, &asc->asc_txq[i]);
1167		}
1168	}
1169}
1170
1171static struct ieee80211_node *
1172ath_node_alloc(ieee80211com_t *ic)
1173{
1174	struct ath_node *an;
1175	ath_t *asc = (ath_t *)ic;
1176
1177	an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1178	ath_rate_update(asc, &an->an_node, 0);
1179	return (&an->an_node);
1180}
1181
1182static void
1183ath_node_free(struct ieee80211_node *in)
1184{
1185	ieee80211com_t *ic = in->in_ic;
1186	ath_t *asc = (ath_t *)ic;
1187	struct ath_buf *bf;
1188	struct ath_txq *txq;
1189	int32_t i;
1190
1191	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1192		if (ATH_TXQ_SETUP(asc, i)) {
1193			txq = &asc->asc_txq[i];
1194			mutex_enter(&txq->axq_lock);
1195			bf = list_head(&txq->axq_list);
1196			while (bf != NULL) {
1197				if (bf->bf_in == in) {
1198					bf->bf_in = NULL;
1199				}
1200				bf = list_next(&txq->axq_list, bf);
1201			}
1202			mutex_exit(&txq->axq_lock);
1203		}
1204	}
1205	ic->ic_node_cleanup(in);
1206	if (in->in_wpa_ie != NULL)
1207		ieee80211_free(in->in_wpa_ie);
1208	kmem_free(in, sizeof (struct ath_node));
1209}
1210
1211static void
1212ath_next_scan(void *arg)
1213{
1214	ieee80211com_t *ic = arg;
1215	ath_t *asc = (ath_t *)ic;
1216
1217	asc->asc_scan_timer = 0;
1218	if (ic->ic_state == IEEE80211_S_SCAN) {
1219		asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc,
1220		    drv_usectohz(ath_dwelltime * 1000));
1221		ieee80211_next_scan(ic);
1222	}
1223}
1224
1225static void
1226ath_stop_scantimer(ath_t *asc)
1227{
1228	timeout_id_t tmp_id = 0;
1229
1230	while ((asc->asc_scan_timer != 0) && (tmp_id != asc->asc_scan_timer)) {
1231		tmp_id = asc->asc_scan_timer;
1232		(void) untimeout(tmp_id);
1233	}
1234	asc->asc_scan_timer = 0;
1235}
1236
1237static int32_t
1238ath_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1239{
1240	ath_t *asc = (ath_t *)ic;
1241	struct ath_hal *ah = asc->asc_ah;
1242	struct ieee80211_node *in;
1243	int32_t i, error;
1244	uint8_t *bssid;
1245	uint32_t rfilt;
1246	enum ieee80211_state ostate;
1247
1248	static const HAL_LED_STATE leds[] = {
1249	    HAL_LED_INIT,	/* IEEE80211_S_INIT */
1250	    HAL_LED_SCAN,	/* IEEE80211_S_SCAN */
1251	    HAL_LED_AUTH,	/* IEEE80211_S_AUTH */
1252	    HAL_LED_ASSOC, 	/* IEEE80211_S_ASSOC */
1253	    HAL_LED_RUN, 	/* IEEE80211_S_RUN */
1254	};
1255	if (!ATH_IS_RUNNING(asc))
1256		return (0);
1257
1258	ostate = ic->ic_state;
1259	if (nstate != IEEE80211_S_SCAN)
1260		ath_stop_scantimer(asc);
1261
1262	ATH_LOCK(asc);
1263	ATH_HAL_SETLEDSTATE(ah, leds[nstate]);	/* set LED */
1264
1265	if (nstate == IEEE80211_S_INIT) {
1266		asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1267		ATH_HAL_INTRSET(ah, asc->asc_imask &~ HAL_INT_GLOBAL);
1268		ATH_UNLOCK(asc);
1269		goto done;
1270	}
1271	in = ic->ic_bss;
1272	error = ath_chan_set(asc, ic->ic_curchan);
1273	if (error != 0) {
1274		if (nstate != IEEE80211_S_SCAN) {
1275			ATH_UNLOCK(asc);
1276			ieee80211_reset_chan(ic);
1277			goto bad;
1278		}
1279	}
1280
1281	rfilt = ath_calcrxfilter(asc);
1282	if (nstate == IEEE80211_S_SCAN)
1283		bssid = ic->ic_macaddr;
1284	else
1285		bssid = in->in_bssid;
1286	ATH_HAL_SETRXFILTER(ah, rfilt);
1287
1288	if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1289		ATH_HAL_SETASSOCID(ah, bssid, in->in_associd);
1290	else
1291		ATH_HAL_SETASSOCID(ah, bssid, 0);
1292	if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1293		for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1294			if (ATH_HAL_KEYISVALID(ah, i))
1295				ATH_HAL_KEYSETMAC(ah, i, bssid);
1296		}
1297	}
1298
1299	if ((nstate == IEEE80211_S_RUN) &&
1300	    (ostate != IEEE80211_S_RUN)) {
1301		/* Configure the beacon and sleep timers. */
1302		ath_beacon_config(asc);
1303	} else {
1304		asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1305		ATH_HAL_INTRSET(ah, asc->asc_imask);
1306	}
1307	/*
1308	 * Reset the rate control state.
1309	 */
1310	ath_rate_ctl_reset(asc, nstate);
1311
1312	if (nstate == IEEE80211_S_RUN && (ostate != IEEE80211_S_RUN)) {
1313		nvlist_t *attr_list = NULL;
1314		sysevent_id_t eid;
1315		int32_t err = 0;
1316		char *str_name = "ATH";
1317		char str_value[256] = {0};
1318
1319		ATH_DEBUG((ATH_DBG_80211, "ath: ath new state(RUN): "
1320		    "ic_flags=0x%08x iv=%d"
1321		    " bssid=%s capinfo=0x%04x chan=%d\n",
1322		    ic->ic_flags,
1323		    in->in_intval,
1324		    ieee80211_macaddr_sprintf(in->in_bssid),
1325		    in->in_capinfo,
1326		    ieee80211_chan2ieee(ic, in->in_chan)));
1327
1328		(void) sprintf(str_value, "%s%s%d", "-i ",
1329		    ddi_driver_name(asc->asc_dev),
1330		    ddi_get_instance(asc->asc_dev));
1331		if (nvlist_alloc(&attr_list,
1332		    NV_UNIQUE_NAME_TYPE, KM_SLEEP) == 0) {
1333			err = nvlist_add_string(attr_list,
1334			    str_name, str_value);
1335			if (err != DDI_SUCCESS)
1336				ATH_DEBUG((ATH_DBG_80211, "ath: "
1337				    "ath_new_state: error log event\n"));
1338			err = ddi_log_sysevent(asc->asc_dev,
1339			    DDI_VENDOR_SUNW, "class",
1340			    "subclass", attr_list,
1341			    &eid, DDI_NOSLEEP);
1342			if (err != DDI_SUCCESS)
1343				ATH_DEBUG((ATH_DBG_80211, "ath: "
1344				    "ath_new_state(): error log event\n"));
1345			nvlist_free(attr_list);
1346		}
1347	}
1348
1349	ATH_UNLOCK(asc);
1350done:
1351	/*
1352	 * Invoke the parent method to complete the work.
1353	 */
1354	error = asc->asc_newstate(ic, nstate, arg);
1355	/*
1356	 * Finally, start any timers.
1357	 */
1358	if (nstate == IEEE80211_S_RUN) {
1359		ieee80211_start_watchdog(ic, 1);
1360	} else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1361		/* start ap/neighbor scan timer */
1362		ASSERT(asc->asc_scan_timer == 0);
1363		asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc,
1364		    drv_usectohz(ath_dwelltime * 1000));
1365	}
1366bad:
1367	return (error);
1368}
1369
1370/*
1371 * Periodically recalibrate the PHY to account
1372 * for temperature/environment changes.
1373 */
1374static void
1375ath_calibrate(ath_t *asc)
1376{
1377	struct ath_hal *ah = asc->asc_ah;
1378	HAL_BOOL iqcaldone;
1379
1380	asc->asc_stats.ast_per_cal++;
1381
1382	if (ATH_HAL_GETRFGAIN(ah) == HAL_RFGAIN_NEED_CHANGE) {
1383		/*
1384		 * Rfgain is out of bounds, reset the chip
1385		 * to load new gain values.
1386		 */
1387		ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1388		    "Need change RFgain\n"));
1389		asc->asc_stats.ast_per_rfgain++;
1390		(void) ath_reset(&asc->asc_isc);
1391	}
1392	if (!ATH_HAL_CALIBRATE(ah, &asc->asc_curchan, &iqcaldone)) {
1393		ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1394		    "calibration of channel %u failed\n",
1395		    asc->asc_curchan.channel));
1396		asc->asc_stats.ast_per_calfail++;
1397	}
1398}
1399
1400static void
1401ath_watchdog(void *arg)
1402{
1403	ath_t *asc = arg;
1404	ieee80211com_t *ic = &asc->asc_isc;
1405	int ntimer = 0;
1406
1407	ATH_LOCK(asc);
1408	ic->ic_watchdog_timer = 0;
1409	if (!ATH_IS_RUNNING(asc)) {
1410		ATH_UNLOCK(asc);
1411		return;
1412	}
1413
1414	if (ic->ic_state == IEEE80211_S_RUN) {
1415		/* periodic recalibration */
1416		ath_calibrate(asc);
1417
1418		/*
1419		 * Start the background rate control thread if we
1420		 * are not configured to use a fixed xmit rate.
1421		 */
1422		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1423			asc->asc_stats.ast_rate_calls ++;
1424			if (ic->ic_opmode == IEEE80211_M_STA)
1425				ath_rate_ctl(ic, ic->ic_bss);
1426			else
1427				ieee80211_iterate_nodes(&ic->ic_sta,
1428				    ath_rate_cb, asc);
1429		}
1430
1431		ntimer = 1;
1432	}
1433	ATH_UNLOCK(asc);
1434
1435	ieee80211_watchdog(ic);
1436	if (ntimer != 0)
1437		ieee80211_start_watchdog(ic, ntimer);
1438}
1439
1440static uint_t
1441ath_intr(caddr_t arg)
1442{
1443	/* LINTED E_BAD_PTR_CAST_ALIGN */
1444	ath_t *asc = (ath_t *)arg;
1445	struct ath_hal *ah = asc->asc_ah;
1446	HAL_INT status;
1447	ieee80211com_t *ic = (ieee80211com_t *)asc;
1448
1449	ATH_LOCK(asc);
1450
1451	if (!ATH_IS_RUNNING(asc)) {
1452		/*
1453		 * The hardware is not ready/present, don't touch anything.
1454		 * Note this can happen early on if the IRQ is shared.
1455		 */
1456		ATH_UNLOCK(asc);
1457		return (DDI_INTR_UNCLAIMED);
1458	}
1459
1460	if (!ATH_HAL_INTRPEND(ah)) {	/* shared irq, not for us */
1461		ATH_UNLOCK(asc);
1462		return (DDI_INTR_UNCLAIMED);
1463	}
1464
1465	ATH_HAL_GETISR(ah, &status);
1466	status &= asc->asc_imask;
1467	if (status & HAL_INT_FATAL) {
1468		asc->asc_stats.ast_hardware++;
1469		goto reset;
1470	} else if (status & HAL_INT_RXORN) {
1471		asc->asc_stats.ast_rxorn++;
1472		goto reset;
1473	} else {
1474		if (status & HAL_INT_RXEOL) {
1475			asc->asc_stats.ast_rxeol++;
1476			asc->asc_rxlink = NULL;
1477		}
1478		if (status & HAL_INT_TXURN) {
1479			asc->asc_stats.ast_txurn++;
1480			ATH_HAL_UPDATETXTRIGLEVEL(ah, AH_TRUE);
1481		}
1482
1483		if (status & HAL_INT_RX) {
1484			asc->asc_rx_pend = 1;
1485			ddi_trigger_softintr(asc->asc_softint_id);
1486		}
1487		if (status & HAL_INT_TX) {
1488			ath_tx_handler(asc);
1489		}
1490		ATH_UNLOCK(asc);
1491
1492		if (status & HAL_INT_SWBA) {
1493			/* This will occur only in Host-AP or Ad-Hoc mode */
1494			return (DDI_INTR_CLAIMED);
1495		}
1496		if (status & HAL_INT_BMISS) {
1497			if (ic->ic_state == IEEE80211_S_RUN) {
1498				(void) ieee80211_new_state(ic,
1499				    IEEE80211_S_ASSOC, -1);
1500			}
1501		}
1502	}
1503
1504	return (DDI_INTR_CLAIMED);
1505reset:
1506	(void) ath_reset(ic);
1507	ATH_UNLOCK(asc);
1508	return (DDI_INTR_CLAIMED);
1509}
1510
1511static uint_t
1512ath_softint_handler(caddr_t data)
1513{
1514	/* LINTED E_BAD_PTR_CAST_ALIGN */
1515	ath_t *asc = (ath_t *)data;
1516
1517	/*
1518	 * Check if the soft interrupt is triggered by another
1519	 * driver at the same level.
1520	 */
1521	ATH_LOCK(asc);
1522	if (asc->asc_rx_pend) { /* Soft interrupt for this driver */
1523		asc->asc_rx_pend = 0;
1524		ATH_UNLOCK(asc);
1525		ath_rx_handler(asc);
1526		return (DDI_INTR_CLAIMED);
1527	}
1528	ATH_UNLOCK(asc);
1529	return (DDI_INTR_UNCLAIMED);
1530}
1531
1532/*
1533 * following are gld callback routine
1534 * ath_gld_send, ath_gld_ioctl, ath_gld_gstat
1535 * are listed in other corresponding sections.
1536 * reset the hardware w/o losing operational state.  this is
1537 * basically a more efficient way of doing ath_gld_stop, ath_gld_start,
1538 * followed by state transitions to the current 802.11
1539 * operational state.  used to recover from errors rx overrun
1540 * and to reset the hardware when rf gain settings must be reset.
1541 */
1542
1543static void
1544ath_stop_locked(ath_t *asc)
1545{
1546	ieee80211com_t *ic = (ieee80211com_t *)asc;
1547	struct ath_hal *ah = asc->asc_ah;
1548
1549	ATH_LOCK_ASSERT(asc);
1550	if (!asc->asc_isrunning)
1551		return;
1552
1553	/*
1554	 * Shutdown the hardware and driver:
1555	 *    reset 802.11 state machine
1556	 *    turn off timers
1557	 *    disable interrupts
1558	 *    turn off the radio
1559	 *    clear transmit machinery
1560	 *    clear receive machinery
1561	 *    drain and release tx queues
1562	 *    reclaim beacon resources
1563	 *    power down hardware
1564	 *
1565	 * Note that some of this work is not possible if the
1566	 * hardware is gone (invalid).
1567	 */
1568	ATH_UNLOCK(asc);
1569	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1570	ieee80211_stop_watchdog(ic);
1571	ATH_LOCK(asc);
1572	ATH_HAL_INTRSET(ah, 0);
1573	ath_draintxq(asc);
1574	if (!asc->asc_invalid) {
1575		ath_stoprecv(asc);
1576		ATH_HAL_PHYDISABLE(ah);
1577	} else {
1578		asc->asc_rxlink = NULL;
1579	}
1580	asc->asc_isrunning = 0;
1581}
1582
1583static void
1584ath_m_stop(void *arg)
1585{
1586	ath_t *asc = arg;
1587	struct ath_hal *ah = asc->asc_ah;
1588
1589	ATH_LOCK(asc);
1590	ath_stop_locked(asc);
1591	ATH_HAL_SETPOWER(ah, HAL_PM_AWAKE);
1592	asc->asc_invalid = 1;
1593	ATH_UNLOCK(asc);
1594}
1595
1596static int
1597ath_start_locked(ath_t *asc)
1598{
1599	ieee80211com_t *ic = (ieee80211com_t *)asc;
1600	struct ath_hal *ah = asc->asc_ah;
1601	HAL_STATUS status;
1602
1603	ATH_LOCK_ASSERT(asc);
1604
1605	/*
1606	 * The basic interface to setting the hardware in a good
1607	 * state is ``reset''.  On return the hardware is known to
1608	 * be powered up and with interrupts disabled.  This must
1609	 * be followed by initialization of the appropriate bits
1610	 * and then setup of the interrupt mask.
1611	 */
1612	asc->asc_curchan.channel = ic->ic_curchan->ich_freq;
1613	asc->asc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_curchan);
1614	if (!ATH_HAL_RESET(ah, (HAL_OPMODE)ic->ic_opmode,
1615	    &asc->asc_curchan, AH_FALSE, &status)) {
1616		ATH_DEBUG((ATH_DBG_HAL, "ath: ath_m_start(): "
1617		    "reset hardware failed: '%s' (HAL status %u)\n",
1618		    ath_get_hal_status_desc(status), status));
1619		return (ENOTACTIVE);
1620	}
1621
1622	(void) ath_startrecv(asc);
1623
1624	/*
1625	 * Enable interrupts.
1626	 */
1627	asc->asc_imask = HAL_INT_RX | HAL_INT_TX
1628	    | HAL_INT_RXEOL | HAL_INT_RXORN
1629	    | HAL_INT_FATAL | HAL_INT_GLOBAL;
1630	ATH_HAL_INTRSET(ah, asc->asc_imask);
1631
1632	/*
1633	 * The hardware should be ready to go now so it's safe
1634	 * to kick the 802.11 state machine as it's likely to
1635	 * immediately call back to us to send mgmt frames.
1636	 */
1637	ath_chan_change(asc, ic->ic_curchan);
1638
1639	asc->asc_isrunning = 1;
1640
1641	return (0);
1642}
1643
1644int
1645ath_m_start(void *arg)
1646{
1647	ath_t *asc = arg;
1648	int err;
1649
1650	ATH_LOCK(asc);
1651	/*
1652	 * Stop anything previously setup.  This is safe
1653	 * whether this is the first time through or not.
1654	 */
1655	ath_stop_locked(asc);
1656
1657	if ((err = ath_start_locked(asc)) != 0) {
1658		ATH_UNLOCK(asc);
1659		return (err);
1660	}
1661
1662	asc->asc_invalid = 0;
1663	ATH_UNLOCK(asc);
1664
1665	return (0);
1666}
1667
1668
1669static int
1670ath_m_unicst(void *arg, const uint8_t *macaddr)
1671{
1672	ath_t *asc = arg;
1673	struct ath_hal *ah = asc->asc_ah;
1674
1675	ATH_DEBUG((ATH_DBG_GLD, "ath: ath_gld_saddr(): "
1676	    "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
1677	    macaddr[0], macaddr[1], macaddr[2],
1678	    macaddr[3], macaddr[4], macaddr[5]));
1679
1680	ATH_LOCK(asc);
1681	IEEE80211_ADDR_COPY(asc->asc_isc.ic_macaddr, macaddr);
1682	ATH_HAL_SETMAC(ah, asc->asc_isc.ic_macaddr);
1683
1684	(void) ath_reset(&asc->asc_isc);
1685	ATH_UNLOCK(asc);
1686	return (0);
1687}
1688
1689static int
1690ath_m_promisc(void *arg, boolean_t on)
1691{
1692	ath_t *asc = arg;
1693	struct ath_hal *ah = asc->asc_ah;
1694	uint32_t rfilt;
1695
1696	ATH_LOCK(asc);
1697	rfilt = ATH_HAL_GETRXFILTER(ah);
1698	if (on)
1699		rfilt |= HAL_RX_FILTER_PROM;
1700	else
1701		rfilt &= ~HAL_RX_FILTER_PROM;
1702	asc->asc_promisc = on;
1703	ATH_HAL_SETRXFILTER(ah, rfilt);
1704	ATH_UNLOCK(asc);
1705
1706	return (0);
1707}
1708
1709static int
1710ath_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1711{
1712	ath_t *asc = arg;
1713	struct ath_hal *ah = asc->asc_ah;
1714	uint32_t val, index, bit;
1715	uint8_t pos;
1716	uint32_t *mfilt = asc->asc_mcast_hash;
1717
1718	ATH_LOCK(asc);
1719	/* calculate XOR of eight 6bit values */
1720	val = ATH_LE_READ_4(mca + 0);
1721	pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1722	val = ATH_LE_READ_4(mca + 3);
1723	pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1724	pos &= 0x3f;
1725	index = pos / 32;
1726	bit = 1 << (pos % 32);
1727
1728	if (add) {	/* enable multicast */
1729		asc->asc_mcast_refs[pos]++;
1730		mfilt[index] |= bit;
1731	} else {	/* disable multicast */
1732		if (--asc->asc_mcast_refs[pos] == 0)
1733			mfilt[index] &= ~bit;
1734	}
1735	ATH_HAL_SETMCASTFILTER(ah, mfilt[0], mfilt[1]);
1736
1737	ATH_UNLOCK(asc);
1738	return (0);
1739}
1740
1741static void
1742ath_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1743{
1744	ath_t *asc = arg;
1745	int32_t err;
1746
1747	err = ieee80211_ioctl(&asc->asc_isc, wq, mp);
1748	ATH_LOCK(asc);
1749	if (err == ENETRESET) {
1750		if (ATH_IS_RUNNING(asc)) {
1751			ATH_UNLOCK(asc);
1752			(void) ath_m_start(asc);
1753			(void) ieee80211_new_state(&asc->asc_isc,
1754			    IEEE80211_S_SCAN, -1);
1755			ATH_LOCK(asc);
1756		}
1757	}
1758	ATH_UNLOCK(asc);
1759}
1760
1761static int
1762ath_m_stat(void *arg, uint_t stat, uint64_t *val)
1763{
1764	ath_t *asc = arg;
1765	ieee80211com_t *ic = (ieee80211com_t *)asc;
1766	struct ieee80211_node *in = ic->ic_bss;
1767	struct ieee80211_rateset *rs = &in->in_rates;
1768
1769	ATH_LOCK(asc);
1770	switch (stat) {
1771	case MAC_STAT_IFSPEED:
1772		*val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
1773		    1000000ull;
1774		break;
1775	case MAC_STAT_NOXMTBUF:
1776		*val = asc->asc_stats.ast_tx_nobuf +
1777		    asc->asc_stats.ast_tx_nobufmgt;
1778		break;
1779	case MAC_STAT_IERRORS:
1780		*val = asc->asc_stats.ast_rx_tooshort;
1781		break;
1782	case MAC_STAT_RBYTES:
1783		*val = ic->ic_stats.is_rx_bytes;
1784		break;
1785	case MAC_STAT_IPACKETS:
1786		*val = ic->ic_stats.is_rx_frags;
1787		break;
1788	case MAC_STAT_OBYTES:
1789		*val = ic->ic_stats.is_tx_bytes;
1790		break;
1791	case MAC_STAT_OPACKETS:
1792		*val = ic->ic_stats.is_tx_frags;
1793		break;
1794	case MAC_STAT_OERRORS:
1795	case WIFI_STAT_TX_FAILED:
1796		*val = asc->asc_stats.ast_tx_fifoerr +
1797		    asc->asc_stats.ast_tx_xretries +
1798		    asc->asc_stats.ast_tx_discard;
1799		break;
1800	case WIFI_STAT_TX_RETRANS:
1801		*val = asc->asc_stats.ast_tx_xretries;
1802		break;
1803	case WIFI_STAT_FCS_ERRORS:
1804		*val = asc->asc_stats.ast_rx_crcerr;
1805		break;
1806	case WIFI_STAT_WEP_ERRORS:
1807		*val = asc->asc_stats.ast_rx_badcrypt;
1808		break;
1809	case WIFI_STAT_TX_FRAGS:
1810	case WIFI_STAT_MCAST_TX:
1811	case WIFI_STAT_RTS_SUCCESS:
1812	case WIFI_STAT_RTS_FAILURE:
1813	case WIFI_STAT_ACK_FAILURE:
1814	case WIFI_STAT_RX_FRAGS:
1815	case WIFI_STAT_MCAST_RX:
1816	case WIFI_STAT_RX_DUPS:
1817		ATH_UNLOCK(asc);
1818		return (ieee80211_stat(ic, stat, val));
1819	default:
1820		ATH_UNLOCK(asc);
1821		return (ENOTSUP);
1822	}
1823	ATH_UNLOCK(asc);
1824
1825	return (0);
1826}
1827
1828static int
1829ath_pci_setup(ath_t *asc)
1830{
1831	uint16_t command;
1832
1833	/*
1834	 * Enable memory mapping and bus mastering
1835	 */
1836	ASSERT(asc != NULL);
1837	command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM);
1838	command |= PCI_COMM_MAE | PCI_COMM_ME;
1839	pci_config_put16(asc->asc_cfg_handle, PCI_CONF_COMM, command);
1840	command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM);
1841	if ((command & PCI_COMM_MAE) == 0) {
1842		ath_problem("ath: ath_pci_setup(): "
1843		    "failed to enable memory mapping\n");
1844		return (EIO);
1845	}
1846	if ((command & PCI_COMM_ME) == 0) {
1847		ath_problem("ath: ath_pci_setup(): "
1848		    "failed to enable bus mastering\n");
1849		return (EIO);
1850	}
1851	ATH_DEBUG((ATH_DBG_INIT, "ath: ath_pci_setup(): "
1852	    "set command reg to 0x%x \n", command));
1853
1854	return (0);
1855}
1856
1857static int
1858ath_resume(dev_info_t *devinfo)
1859{
1860	ath_t *asc;
1861	int ret = DDI_SUCCESS;
1862
1863	asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
1864	if (asc == NULL) {
1865		ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): "
1866		    "failed to get soft state\n"));
1867		return (DDI_FAILURE);
1868	}
1869
1870	ATH_LOCK(asc);
1871	/*
1872	 * Set up config space command register(s). Refuse
1873	 * to resume on failure.
1874	 */
1875	if (ath_pci_setup(asc) != 0) {
1876		ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): "
1877		    "ath_pci_setup() failed\n"));
1878		ATH_UNLOCK(asc);
1879		return (DDI_FAILURE);
1880	}
1881
1882	if (!asc->asc_invalid)
1883		ret = ath_start_locked(asc);
1884	ATH_UNLOCK(asc);
1885
1886	return (ret);
1887}
1888
1889static int
1890ath_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1891{
1892	ath_t *asc;
1893	ieee80211com_t *ic;
1894	struct ath_hal *ah;
1895	uint8_t csz;
1896	HAL_STATUS status;
1897	caddr_t regs;
1898	uint32_t i, val;
1899	uint16_t vendor_id, device_id;
1900	const char *athname;
1901	int32_t ath_countrycode = CTRY_DEFAULT;	/* country code */
1902	int32_t err, ath_regdomain = 0; /* regulatory domain */
1903	char strbuf[32];
1904	int instance;
1905	wifi_data_t wd = { 0 };
1906	mac_register_t *macp;
1907
1908	switch (cmd) {
1909	case DDI_ATTACH:
1910		break;
1911
1912	case DDI_RESUME:
1913		return (ath_resume(devinfo));
1914
1915	default:
1916		return (DDI_FAILURE);
1917	}
1918
1919	instance = ddi_get_instance(devinfo);
1920	if (ddi_soft_state_zalloc(ath_soft_state_p, instance) != DDI_SUCCESS) {
1921		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1922		    "Unable to alloc softstate\n"));
1923		return (DDI_FAILURE);
1924	}
1925
1926	asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
1927	ic = (ieee80211com_t *)asc;
1928	asc->asc_dev = devinfo;
1929
1930	mutex_init(&asc->asc_genlock, NULL, MUTEX_DRIVER, NULL);
1931	mutex_init(&asc->asc_txbuflock, NULL, MUTEX_DRIVER, NULL);
1932	mutex_init(&asc->asc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
1933	mutex_init(&asc->asc_resched_lock, NULL, MUTEX_DRIVER, NULL);
1934
1935	err = pci_config_setup(devinfo, &asc->asc_cfg_handle);
1936	if (err != DDI_SUCCESS) {
1937		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1938		    "pci_config_setup() failed"));
1939		goto attach_fail0;
1940	}
1941
1942	if (ath_pci_setup(asc) != 0)
1943		goto attach_fail1;
1944
1945	/*
1946	 * Cache line size is used to size and align various
1947	 * structures used to communicate with the hardware.
1948	 */
1949	csz = pci_config_get8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ);
1950	if (csz == 0) {
1951		/*
1952		 * We must have this setup properly for rx buffer
1953		 * DMA to work so force a reasonable value here if it
1954		 * comes up zero.
1955		 */
1956		csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
1957		pci_config_put8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ,
1958		    csz);
1959	}
1960	asc->asc_cachelsz = csz << 2;
1961	vendor_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_VENID);
1962	device_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_DEVID);
1963	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): vendor 0x%x, "
1964	    "device id 0x%x, cache size %d\n", vendor_id, device_id, csz));
1965
1966	athname = ath_hal_probe(vendor_id, device_id);
1967	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): athname: %s\n",
1968	    athname ? athname : "Atheros ???"));
1969
1970	pci_config_put8(asc->asc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
1971	val = pci_config_get32(asc->asc_cfg_handle, 0x40);
1972	if ((val & 0x0000ff00) != 0)
1973		pci_config_put32(asc->asc_cfg_handle, 0x40, val & 0xffff00ff);
1974
1975	err = ddi_regs_map_setup(devinfo, 1,
1976	    &regs, 0, 0, &ath_reg_accattr, &asc->asc_io_handle);
1977	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1978	    "regs map1 = %x err=%d\n", regs, err));
1979	if (err != DDI_SUCCESS) {
1980		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1981		    "ddi_regs_map_setup() failed"));
1982		goto attach_fail1;
1983	}
1984
1985	ah = ath_hal_attach(device_id, asc, 0, regs, &status);
1986	if (ah == NULL) {
1987		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1988		    "unable to attach hw: '%s' (HAL status %u)\n",
1989		    ath_get_hal_status_desc(status), status));
1990		goto attach_fail2;
1991	}
1992	ATH_HAL_INTRSET(ah, 0);
1993	asc->asc_ah = ah;
1994
1995	if (ah->ah_abi != HAL_ABI_VERSION) {
1996		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1997		    "HAL ABI mismatch detected (0x%x != 0x%x)\n",
1998		    ah->ah_abi, HAL_ABI_VERSION));
1999		goto attach_fail3;
2000	}
2001
2002	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2003	    "HAL ABI version 0x%x\n", ah->ah_abi));
2004	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2005	    "HAL mac version %d.%d, phy version %d.%d\n",
2006	    ah->ah_macVersion, ah->ah_macRev,
2007	    ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf));
2008	if (ah->ah_analog5GhzRev)
2009		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2010		    "HAL 5ghz radio version %d.%d\n",
2011		    ah->ah_analog5GhzRev >> 4,
2012		    ah->ah_analog5GhzRev & 0xf));
2013	if (ah->ah_analog2GhzRev)
2014		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2015		    "HAL 2ghz radio version %d.%d\n",
2016		    ah->ah_analog2GhzRev >> 4,
2017		    ah->ah_analog2GhzRev & 0xf));
2018
2019	/*
2020	 * Check if the MAC has multi-rate retry support.
2021	 * We do this by trying to setup a fake extended
2022	 * descriptor.  MAC's that don't have support will
2023	 * return false w/o doing anything.  MAC's that do
2024	 * support it will return true w/o doing anything.
2025	 */
2026	asc->asc_mrretry = ATH_HAL_SETUPXTXDESC(ah, NULL, 0, 0, 0, 0, 0, 0);
2027	ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2028	    "multi rate retry support=%x\n",
2029	    asc->asc_mrretry));
2030
2031	/*
2032	 * Get the hardware key cache size.
2033	 */
2034	asc->asc_keymax = ATH_HAL_KEYCACHESIZE(ah);
2035	if (asc->asc_keymax > sizeof (asc->asc_keymap) * NBBY) {
2036		ATH_DEBUG((ATH_DBG_ATTACH, "ath_attach:"
2037		    " Warning, using only %u entries in %u key cache\n",
2038		    sizeof (asc->asc_keymap) * NBBY, asc->asc_keymax));
2039		asc->asc_keymax = sizeof (asc->asc_keymap) * NBBY;
2040	}
2041	/*
2042	 * Reset the key cache since some parts do not
2043	 * reset the contents on initial power up.
2044	 */
2045	for (i = 0; i < asc->asc_keymax; i++)
2046		ATH_HAL_KEYRESET(ah, i);
2047
2048	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2049		setbit(asc->asc_keymap, i);
2050		setbit(asc->asc_keymap, i+32);
2051		setbit(asc->asc_keymap, i+64);
2052		setbit(asc->asc_keymap, i+32+64);
2053	}
2054
2055	ATH_HAL_GETREGDOMAIN(ah, (uint32_t *)&ath_regdomain);
2056	ATH_HAL_GETCOUNTRYCODE(ah, &ath_countrycode);
2057	/*
2058	 * Collect the channel list using the default country
2059	 * code and including outdoor channels.  The 802.11 layer
2060	 * is resposible for filtering this list to a set of
2061	 * channels that it considers ok to use.
2062	 */
2063	asc->asc_have11g = 0;
2064
2065	/* enable outdoor use, enable extended channels */
2066	err = ath_getchannels(asc, ath_countrycode, AH_FALSE, AH_TRUE);
2067	if (err != 0)
2068		goto attach_fail3;
2069
2070	/*
2071	 * Setup rate tables for all potential media types.
2072	 */
2073	ath_rate_setup(asc, IEEE80211_MODE_11A);
2074	ath_rate_setup(asc, IEEE80211_MODE_11B);
2075	ath_rate_setup(asc, IEEE80211_MODE_11G);
2076	ath_rate_setup(asc, IEEE80211_MODE_TURBO_A);
2077
2078	/* Setup here so ath_rate_update is happy */
2079	ath_setcurmode(asc, IEEE80211_MODE_11A);
2080
2081	err = ath_desc_alloc(devinfo, asc);
2082	if (err != DDI_SUCCESS) {
2083		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2084		    "failed to allocate descriptors: %d\n", err));
2085		goto attach_fail3;
2086	}
2087
2088	/* Setup transmit queues in the HAL */
2089	if (ath_txq_setup(asc))
2090		goto attach_fail4;
2091
2092	ATH_HAL_GETMAC(ah, ic->ic_macaddr);
2093
2094	/*
2095	 * Initialize pointers to device specific functions which
2096	 * will be used by the generic layer.
2097	 */
2098	/* 11g support is identified when we fetch the channel set */
2099	if (asc->asc_have11g)
2100		ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2101		    IEEE80211_C_SHSLOT;		/* short slot time */
2102	/*
2103	 * Query the hal to figure out h/w crypto support.
2104	 */
2105	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_WEP))
2106		ic->ic_caps |= IEEE80211_C_WEP;
2107	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_OCB))
2108		ic->ic_caps |= IEEE80211_C_AES;
2109	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_CCM)) {
2110		ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W CCMP\n"));
2111		ic->ic_caps |= IEEE80211_C_AES_CCM;
2112	}
2113	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CKIP))
2114		ic->ic_caps |= IEEE80211_C_CKIP;
2115	if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_TKIP)) {
2116		ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W TKIP\n"));
2117		ic->ic_caps |= IEEE80211_C_TKIP;
2118		/*
2119		 * Check if h/w does the MIC and/or whether the
2120		 * separate key cache entries are required to
2121		 * handle both tx+rx MIC keys.
2122		 */
2123		if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_MIC)) {
2124			ATH_DEBUG((ATH_DBG_ATTACH, "Support H/W TKIP MIC\n"));
2125			ic->ic_caps |= IEEE80211_C_TKIPMIC;
2126		}
2127		if (ATH_HAL_TKIPSPLIT(ah))
2128			asc->asc_splitmic = 1;
2129	}
2130	ic->ic_caps |= IEEE80211_C_WPA;	/* Support WPA/WPA2 */
2131
2132	asc->asc_hasclrkey = ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CLR);
2133	ic->ic_phytype = IEEE80211_T_OFDM;
2134	ic->ic_opmode = IEEE80211_M_STA;
2135	ic->ic_state = IEEE80211_S_INIT;
2136	ic->ic_maxrssi = ATH_MAX_RSSI;
2137	ic->ic_set_shortslot = ath_set_shortslot;
2138	ic->ic_xmit = ath_xmit;
2139	ieee80211_attach(ic);
2140
2141	/* different instance has different WPA door */
2142	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
2143	    ddi_driver_name(devinfo),
2144	    ddi_get_instance(devinfo));
2145
2146	/* Override 80211 default routines */
2147	ic->ic_reset = ath_reset;
2148	asc->asc_newstate = ic->ic_newstate;
2149	ic->ic_newstate = ath_newstate;
2150	ic->ic_watchdog = ath_watchdog;
2151	ic->ic_node_alloc = ath_node_alloc;
2152	ic->ic_node_free = ath_node_free;
2153	ic->ic_crypto.cs_key_alloc = ath_key_alloc;
2154	ic->ic_crypto.cs_key_delete = ath_key_delete;
2155	ic->ic_crypto.cs_key_set = ath_key_set;
2156	ieee80211_media_init(ic);
2157	/*
2158	 * initialize default tx key
2159	 */
2160	ic->ic_def_txkey = 0;
2161
2162	asc->asc_rx_pend = 0;
2163	ATH_HAL_INTRSET(ah, 0);
2164	err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
2165	    &asc->asc_softint_id, NULL, 0, ath_softint_handler, (caddr_t)asc);
2166	if (err != DDI_SUCCESS) {
2167		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2168		    "ddi_add_softintr() failed\n"));
2169		goto attach_fail5;
2170	}
2171
2172	if (ddi_get_iblock_cookie(devinfo, 0, &asc->asc_iblock)
2173	    != DDI_SUCCESS) {
2174		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2175		    "Can not get iblock cookie for INT\n"));
2176		goto attach_fail6;
2177	}
2178
2179	if (ddi_add_intr(devinfo, 0, NULL, NULL, ath_intr,
2180	    (caddr_t)asc) != DDI_SUCCESS) {
2181		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2182		    "Can not set intr for ATH driver\n"));
2183		goto attach_fail6;
2184	}
2185
2186	/*
2187	 * Provide initial settings for the WiFi plugin; whenever this
2188	 * information changes, we need to call mac_plugindata_update()
2189	 */
2190	wd.wd_opmode = ic->ic_opmode;
2191	wd.wd_secalloc = WIFI_SEC_NONE;
2192	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
2193
2194	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2195		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2196		    "MAC version mismatch\n"));
2197		goto attach_fail7;
2198	}
2199
2200	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
2201	macp->m_driver		= asc;
2202	macp->m_dip		= devinfo;
2203	macp->m_src_addr	= ic->ic_macaddr;
2204	macp->m_callbacks	= &ath_m_callbacks;
2205	macp->m_min_sdu		= 0;
2206	macp->m_max_sdu		= IEEE80211_MTU;
2207	macp->m_pdata		= &wd;
2208	macp->m_pdata_size	= sizeof (wd);
2209
2210	err = mac_register(macp, &ic->ic_mach);
2211	mac_free(macp);
2212	if (err != 0) {
2213		ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2214		    "mac_register err %x\n", err));
2215		goto attach_fail7;
2216	}
2217
2218	/* Create minor node of type DDI_NT_NET_WIFI */
2219	(void) snprintf(strbuf, sizeof (strbuf), "%s%d",
2220	    ATH_NODENAME, instance);
2221	err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
2222	    instance + 1, DDI_NT_NET_WIFI, 0);
2223	if (err != DDI_SUCCESS)
2224		ATH_DEBUG((ATH_DBG_ATTACH, "WARN: ath: ath_attach(): "
2225		    "Create minor node failed - %d\n", err));
2226
2227	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
2228	asc->asc_invalid = 1;
2229	asc->asc_isrunning = 0;
2230	asc->asc_promisc = B_FALSE;
2231	bzero(asc->asc_mcast_refs, sizeof (asc->asc_mcast_refs));
2232	bzero(asc->asc_mcast_hash, sizeof (asc->asc_mcast_hash));
2233	return (DDI_SUCCESS);
2234attach_fail7:
2235	ddi_remove_intr(devinfo, 0, asc->asc_iblock);
2236attach_fail6:
2237	ddi_remove_softintr(asc->asc_softint_id);
2238attach_fail5:
2239	(void) ieee80211_detach(ic);
2240attach_fail4:
2241	ath_desc_free(asc);
2242attach_fail3:
2243	ah->ah_detach(asc->asc_ah);
2244attach_fail2:
2245	ddi_regs_map_free(&asc->asc_io_handle);
2246attach_fail1:
2247	pci_config_teardown(&asc->asc_cfg_handle);
2248attach_fail0:
2249	asc->asc_invalid = 1;
2250	mutex_destroy(&asc->asc_txbuflock);
2251	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2252		if (ATH_TXQ_SETUP(asc, i)) {
2253			struct ath_txq *txq = &asc->asc_txq[i];
2254			mutex_destroy(&txq->axq_lock);
2255		}
2256	}
2257	mutex_destroy(&asc->asc_rxbuflock);
2258	mutex_destroy(&asc->asc_genlock);
2259	mutex_destroy(&asc->asc_resched_lock);
2260	ddi_soft_state_free(ath_soft_state_p, instance);
2261
2262	return (DDI_FAILURE);
2263}
2264
2265/*
2266 * Suspend transmit/receive for powerdown
2267 */
2268static int
2269ath_suspend(ath_t *asc)
2270{
2271	ATH_LOCK(asc);
2272	ath_stop_locked(asc);
2273	ATH_UNLOCK(asc);
2274	ATH_DEBUG((ATH_DBG_SUSPEND, "ath: suspended.\n"));
2275
2276	return (DDI_SUCCESS);
2277}
2278
2279static int32_t
2280ath_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2281{
2282	ath_t *asc;
2283
2284	asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
2285	ASSERT(asc != NULL);
2286
2287	switch (cmd) {
2288	case DDI_DETACH:
2289		break;
2290
2291	case DDI_SUSPEND:
2292		return (ath_suspend(asc));
2293
2294	default:
2295		return (DDI_FAILURE);
2296	}
2297
2298	if (mac_disable(asc->asc_isc.ic_mach) != 0)
2299		return (DDI_FAILURE);
2300
2301	ath_stop_scantimer(asc);
2302
2303	/* disable interrupts */
2304	ATH_HAL_INTRSET(asc->asc_ah, 0);
2305
2306	/*
2307	 * Unregister from the MAC layer subsystem
2308	 */
2309	(void) mac_unregister(asc->asc_isc.ic_mach);
2310
2311	/* free intterrupt resources */
2312	ddi_remove_intr(devinfo, 0, asc->asc_iblock);
2313	ddi_remove_softintr(asc->asc_softint_id);
2314
2315	/*
2316	 * NB: the order of these is important:
2317	 * o call the 802.11 layer before detaching the hal to
2318	 *   insure callbacks into the driver to delete global
2319	 *   key cache entries can be handled
2320	 * o reclaim the tx queue data structures after calling
2321	 *   the 802.11 layer as we'll get called back to reclaim
2322	 *   node state and potentially want to use them
2323	 * o to cleanup the tx queues the hal is called, so detach
2324	 *   it last
2325	 */
2326	ieee80211_detach(&asc->asc_isc);
2327	ath_desc_free(asc);
2328	ath_txq_cleanup(asc);
2329	asc->asc_ah->ah_detach(asc->asc_ah);
2330
2331	/* free io handle */
2332	ddi_regs_map_free(&asc->asc_io_handle);
2333	pci_config_teardown(&asc->asc_cfg_handle);
2334
2335	/* destroy locks */
2336	mutex_destroy(&asc->asc_rxbuflock);
2337	mutex_destroy(&asc->asc_genlock);
2338	mutex_destroy(&asc->asc_resched_lock);
2339
2340	ddi_remove_minor_node(devinfo, NULL);
2341	ddi_soft_state_free(ath_soft_state_p, ddi_get_instance(devinfo));
2342
2343	return (DDI_SUCCESS);
2344}
2345
2346DDI_DEFINE_STREAM_OPS(ath_dev_ops, nulldev, nulldev, ath_attach, ath_detach,
2347    nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
2348
2349static struct modldrv ath_modldrv = {
2350	&mod_driverops,		/* Type of module.  This one is a driver */
2351	"ath driver",		/* short description */
2352	&ath_dev_ops		/* driver specific ops */
2353};
2354
2355static struct modlinkage modlinkage = {
2356	MODREV_1, (void *)&ath_modldrv, NULL
2357};
2358
2359
2360int
2361_info(struct modinfo *modinfop)
2362{
2363	return (mod_info(&modlinkage, modinfop));
2364}
2365
2366int
2367_init(void)
2368{
2369	int status;
2370
2371	status = ddi_soft_state_init(&ath_soft_state_p, sizeof (ath_t), 1);
2372	if (status != 0)
2373		return (status);
2374
2375	mutex_init(&ath_loglock, NULL, MUTEX_DRIVER, NULL);
2376	ath_halfix_init();
2377	mac_init_ops(&ath_dev_ops, "ath");
2378	status = mod_install(&modlinkage);
2379	if (status != 0) {
2380		mac_fini_ops(&ath_dev_ops);
2381		ath_halfix_finit();
2382		mutex_destroy(&ath_loglock);
2383		ddi_soft_state_fini(&ath_soft_state_p);
2384	}
2385
2386	return (status);
2387}
2388
2389int
2390_fini(void)
2391{
2392	int status;
2393
2394	status = mod_remove(&modlinkage);
2395	if (status == 0) {
2396		mac_fini_ops(&ath_dev_ops);
2397		ath_halfix_finit();
2398		mutex_destroy(&ath_loglock);
2399		ddi_soft_state_fini(&ath_soft_state_p);
2400	}
2401	return (status);
2402}
2403