if_wpi.c revision 289008
1/*-
2 * Copyright (c) 2006,2007
3 *	Damien Bergamini <damien.bergamini@free.fr>
4 *	Benjamin Close <Benjamin.Close@clearchain.com>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/cdefs.h>
20__FBSDID("$FreeBSD: head/sys/dev/wpi/if_wpi.c 289008 2015-10-08 07:18:28Z adrian $");
21
22/*
23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters.
24 *
25 * The 3945ABG network adapter doesn't use traditional hardware as
26 * many other adaptors do. Instead at run time the eeprom is set into a known
27 * state and told to load boot firmware. The boot firmware loads an init and a
28 * main  binary firmware image into SRAM on the card via DMA.
29 * Once the firmware is loaded, the driver/hw then
30 * communicate by way of circular dma rings via the SRAM to the firmware.
31 *
32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings.
33 * The 4 tx data rings allow for prioritization QoS.
34 *
35 * The rx data ring consists of 32 dma buffers. Two registers are used to
36 * indicate where in the ring the driver and the firmware are up to. The
37 * driver sets the initial read index (reg1) and the initial write index (reg2),
38 * the firmware updates the read index (reg1) on rx of a packet and fires an
39 * interrupt. The driver then processes the buffers starting at reg1 indicating
40 * to the firmware which buffers have been accessed by updating reg2. At the
41 * same time allocating new memory for the processed buffer.
42 *
43 * A similar thing happens with the tx rings. The difference is the firmware
44 * stop processing buffers once the queue is full and until confirmation
45 * of a successful transmition (tx_done) has occurred.
46 *
47 * The command ring operates in the same manner as the tx queues.
48 *
49 * All communication direct to the card (ie eeprom) is classed as Stage1
50 * communication
51 *
52 * All communication via the firmware to the card is classed as State2.
53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime
54 * firmware. The bootstrap firmware and runtime firmware are loaded
55 * from host memory via dma to the card then told to execute. From this point
56 * on the majority of communications between the driver and the card goes
57 * via the firmware.
58 */
59
60#include "opt_wlan.h"
61#include "opt_wpi.h"
62
63#include <sys/param.h>
64#include <sys/sysctl.h>
65#include <sys/sockio.h>
66#include <sys/mbuf.h>
67#include <sys/kernel.h>
68#include <sys/socket.h>
69#include <sys/systm.h>
70#include <sys/malloc.h>
71#include <sys/queue.h>
72#include <sys/taskqueue.h>
73#include <sys/module.h>
74#include <sys/bus.h>
75#include <sys/endian.h>
76#include <sys/linker.h>
77#include <sys/firmware.h>
78
79#include <machine/bus.h>
80#include <machine/resource.h>
81#include <sys/rman.h>
82
83#include <dev/pci/pcireg.h>
84#include <dev/pci/pcivar.h>
85
86#include <net/bpf.h>
87#include <net/if.h>
88#include <net/if_var.h>
89#include <net/if_arp.h>
90#include <net/ethernet.h>
91#include <net/if_dl.h>
92#include <net/if_media.h>
93#include <net/if_types.h>
94
95#include <netinet/in.h>
96#include <netinet/in_systm.h>
97#include <netinet/in_var.h>
98#include <netinet/if_ether.h>
99#include <netinet/ip.h>
100
101#include <net80211/ieee80211_var.h>
102#include <net80211/ieee80211_radiotap.h>
103#include <net80211/ieee80211_regdomain.h>
104#include <net80211/ieee80211_ratectl.h>
105
106#include <dev/wpi/if_wpireg.h>
107#include <dev/wpi/if_wpivar.h>
108#include <dev/wpi/if_wpi_debug.h>
109
110struct wpi_ident {
111	uint16_t	vendor;
112	uint16_t	device;
113	uint16_t	subdevice;
114	const char	*name;
115};
116
117static const struct wpi_ident wpi_ident_table[] = {
118	/* The below entries support ABG regardless of the subid */
119	{ 0x8086, 0x4222,    0x0, "Intel(R) PRO/Wireless 3945ABG" },
120	{ 0x8086, 0x4227,    0x0, "Intel(R) PRO/Wireless 3945ABG" },
121	/* The below entries only support BG */
122	{ 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG"  },
123	{ 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG"  },
124	{ 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG"  },
125	{ 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG"  },
126	{ 0, 0, 0, NULL }
127};
128
129static int	wpi_probe(device_t);
130static int	wpi_attach(device_t);
131static void	wpi_radiotap_attach(struct wpi_softc *);
132static void	wpi_sysctlattach(struct wpi_softc *);
133static void	wpi_init_beacon(struct wpi_vap *);
134static struct ieee80211vap *wpi_vap_create(struct ieee80211com *,
135		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
136		    const uint8_t [IEEE80211_ADDR_LEN],
137		    const uint8_t [IEEE80211_ADDR_LEN]);
138static void	wpi_vap_delete(struct ieee80211vap *);
139static int	wpi_detach(device_t);
140static int	wpi_shutdown(device_t);
141static int	wpi_suspend(device_t);
142static int	wpi_resume(device_t);
143static int	wpi_nic_lock(struct wpi_softc *);
144static int	wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int);
145static void	wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int);
146static int	wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *,
147		    void **, bus_size_t, bus_size_t);
148static void	wpi_dma_contig_free(struct wpi_dma_info *);
149static int	wpi_alloc_shared(struct wpi_softc *);
150static void	wpi_free_shared(struct wpi_softc *);
151static int	wpi_alloc_fwmem(struct wpi_softc *);
152static void	wpi_free_fwmem(struct wpi_softc *);
153static int	wpi_alloc_rx_ring(struct wpi_softc *);
154static void	wpi_update_rx_ring(struct wpi_softc *);
155static void	wpi_update_rx_ring_ps(struct wpi_softc *);
156static void	wpi_reset_rx_ring(struct wpi_softc *);
157static void	wpi_free_rx_ring(struct wpi_softc *);
158static int	wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *,
159		    int);
160static void	wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
161static void	wpi_update_tx_ring_ps(struct wpi_softc *,
162		    struct wpi_tx_ring *);
163static void	wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
164static void	wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
165static int	wpi_read_eeprom(struct wpi_softc *,
166		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
167static uint32_t	wpi_eeprom_channel_flags(struct wpi_eeprom_chan *);
168static void	wpi_read_eeprom_band(struct wpi_softc *, int);
169static int	wpi_read_eeprom_channels(struct wpi_softc *, int);
170static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *,
171		    struct ieee80211_channel *);
172static int	wpi_setregdomain(struct ieee80211com *,
173		    struct ieee80211_regdomain *, int,
174		    struct ieee80211_channel[]);
175static int	wpi_read_eeprom_group(struct wpi_softc *, int);
176static int	wpi_add_node_entry_adhoc(struct wpi_softc *);
177static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *,
178		    const uint8_t mac[IEEE80211_ADDR_LEN]);
179static void	wpi_node_free(struct ieee80211_node *);
180static void	wpi_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
181		    const struct ieee80211_rx_stats *,
182		    int, int);
183static void	wpi_restore_node(void *, struct ieee80211_node *);
184static void	wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *);
185static int	wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int);
186static void	wpi_calib_timeout(void *);
187static void	wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *,
188		    struct wpi_rx_data *);
189static void	wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *,
190		    struct wpi_rx_data *);
191static void	wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *);
192static void	wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *);
193static void	wpi_notif_intr(struct wpi_softc *);
194static void	wpi_wakeup_intr(struct wpi_softc *);
195#ifdef WPI_DEBUG
196static void	wpi_debug_registers(struct wpi_softc *);
197#endif
198static void	wpi_fatal_intr(struct wpi_softc *);
199static void	wpi_intr(void *);
200static int	wpi_cmd2(struct wpi_softc *, struct wpi_buf *);
201static int	wpi_tx_data(struct wpi_softc *, struct mbuf *,
202		    struct ieee80211_node *);
203static int	wpi_tx_data_raw(struct wpi_softc *, struct mbuf *,
204		    struct ieee80211_node *,
205		    const struct ieee80211_bpf_params *);
206static int	wpi_raw_xmit(struct ieee80211_node *, struct mbuf *,
207		    const struct ieee80211_bpf_params *);
208static int	wpi_transmit(struct ieee80211com *, struct mbuf *);
209static void	wpi_watchdog_rfkill(void *);
210static void	wpi_scan_timeout(void *);
211static void	wpi_tx_timeout(void *);
212static void	wpi_parent(struct ieee80211com *);
213static int	wpi_cmd(struct wpi_softc *, int, const void *, size_t, int);
214static int	wpi_mrr_setup(struct wpi_softc *);
215static int	wpi_add_node(struct wpi_softc *, struct ieee80211_node *);
216static int	wpi_add_broadcast_node(struct wpi_softc *, int);
217static int	wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *);
218static void	wpi_del_node(struct wpi_softc *, struct ieee80211_node *);
219static int	wpi_updateedca(struct ieee80211com *);
220static void	wpi_set_promisc(struct wpi_softc *);
221static void	wpi_update_promisc(struct ieee80211com *);
222static void	wpi_update_mcast(struct ieee80211com *);
223static void	wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t);
224static int	wpi_set_timing(struct wpi_softc *, struct ieee80211_node *);
225static void	wpi_power_calibration(struct wpi_softc *);
226static int	wpi_set_txpower(struct wpi_softc *, int);
227static int	wpi_get_power_index(struct wpi_softc *,
228		    struct wpi_power_group *, uint8_t, int, int);
229static int	wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int);
230static int	wpi_send_btcoex(struct wpi_softc *);
231static int	wpi_send_rxon(struct wpi_softc *, int, int);
232static int	wpi_config(struct wpi_softc *);
233static uint16_t	wpi_get_active_dwell_time(struct wpi_softc *,
234		    struct ieee80211_channel *, uint8_t);
235static uint16_t	wpi_limit_dwell(struct wpi_softc *, uint16_t);
236static uint16_t	wpi_get_passive_dwell_time(struct wpi_softc *,
237		    struct ieee80211_channel *);
238static uint32_t	wpi_get_scan_pause_time(uint32_t, uint16_t);
239static int	wpi_scan(struct wpi_softc *, struct ieee80211_channel *);
240static int	wpi_auth(struct wpi_softc *, struct ieee80211vap *);
241static int	wpi_config_beacon(struct wpi_vap *);
242static int	wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *);
243static void	wpi_update_beacon(struct ieee80211vap *, int);
244static void	wpi_newassoc(struct ieee80211_node *, int);
245static int	wpi_run(struct wpi_softc *, struct ieee80211vap *);
246static int	wpi_load_key(struct ieee80211_node *,
247		    const struct ieee80211_key *);
248static void	wpi_load_key_cb(void *, struct ieee80211_node *);
249static int	wpi_set_global_keys(struct ieee80211_node *);
250static int	wpi_del_key(struct ieee80211_node *,
251		    const struct ieee80211_key *);
252static void	wpi_del_key_cb(void *, struct ieee80211_node *);
253static int	wpi_process_key(struct ieee80211vap *,
254		    const struct ieee80211_key *, int);
255static int	wpi_key_set(struct ieee80211vap *,
256		    const struct ieee80211_key *);
257static int	wpi_key_delete(struct ieee80211vap *,
258		    const struct ieee80211_key *);
259static int	wpi_post_alive(struct wpi_softc *);
260static int	wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int);
261static int	wpi_load_firmware(struct wpi_softc *);
262static int	wpi_read_firmware(struct wpi_softc *);
263static void	wpi_unload_firmware(struct wpi_softc *);
264static int	wpi_clock_wait(struct wpi_softc *);
265static int	wpi_apm_init(struct wpi_softc *);
266static void	wpi_apm_stop_master(struct wpi_softc *);
267static void	wpi_apm_stop(struct wpi_softc *);
268static void	wpi_nic_config(struct wpi_softc *);
269static int	wpi_hw_init(struct wpi_softc *);
270static void	wpi_hw_stop(struct wpi_softc *);
271static void	wpi_radio_on(void *, int);
272static void	wpi_radio_off(void *, int);
273static int	wpi_init(struct wpi_softc *);
274static void	wpi_stop_locked(struct wpi_softc *);
275static void	wpi_stop(struct wpi_softc *);
276static void	wpi_scan_start(struct ieee80211com *);
277static void	wpi_scan_end(struct ieee80211com *);
278static void	wpi_set_channel(struct ieee80211com *);
279static void	wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long);
280static void	wpi_scan_mindwell(struct ieee80211_scan_state *);
281static void	wpi_hw_reset(void *, int);
282
283static device_method_t wpi_methods[] = {
284	/* Device interface */
285	DEVMETHOD(device_probe,		wpi_probe),
286	DEVMETHOD(device_attach,	wpi_attach),
287	DEVMETHOD(device_detach,	wpi_detach),
288	DEVMETHOD(device_shutdown,	wpi_shutdown),
289	DEVMETHOD(device_suspend,	wpi_suspend),
290	DEVMETHOD(device_resume,	wpi_resume),
291
292	DEVMETHOD_END
293};
294
295static driver_t wpi_driver = {
296	"wpi",
297	wpi_methods,
298	sizeof (struct wpi_softc)
299};
300static devclass_t wpi_devclass;
301
302DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL);
303
304MODULE_VERSION(wpi, 1);
305
306MODULE_DEPEND(wpi, pci,  1, 1, 1);
307MODULE_DEPEND(wpi, wlan, 1, 1, 1);
308MODULE_DEPEND(wpi, firmware, 1, 1, 1);
309
310static int
311wpi_probe(device_t dev)
312{
313	const struct wpi_ident *ident;
314
315	for (ident = wpi_ident_table; ident->name != NULL; ident++) {
316		if (pci_get_vendor(dev) == ident->vendor &&
317		    pci_get_device(dev) == ident->device) {
318			device_set_desc(dev, ident->name);
319			return (BUS_PROBE_DEFAULT);
320		}
321	}
322	return ENXIO;
323}
324
325static int
326wpi_attach(device_t dev)
327{
328	struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev);
329	struct ieee80211com *ic;
330	int i, error, rid;
331#ifdef WPI_DEBUG
332	int supportsa = 1;
333	const struct wpi_ident *ident;
334#endif
335
336	sc->sc_dev = dev;
337
338#ifdef WPI_DEBUG
339	error = resource_int_value(device_get_name(sc->sc_dev),
340	    device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
341	if (error != 0)
342		sc->sc_debug = 0;
343#else
344	sc->sc_debug = 0;
345#endif
346
347	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
348
349	/*
350	 * Get the offset of the PCI Express Capability Structure in PCI
351	 * Configuration Space.
352	 */
353	error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
354	if (error != 0) {
355		device_printf(dev, "PCIe capability structure not found!\n");
356		return error;
357	}
358
359	/*
360	 * Some card's only support 802.11b/g not a, check to see if
361	 * this is one such card. A 0x0 in the subdevice table indicates
362	 * the entire subdevice range is to be ignored.
363	 */
364#ifdef WPI_DEBUG
365	for (ident = wpi_ident_table; ident->name != NULL; ident++) {
366		if (ident->subdevice &&
367		    pci_get_subdevice(dev) == ident->subdevice) {
368		    supportsa = 0;
369		    break;
370		}
371	}
372#endif
373
374	/* Clear device-specific "PCI retry timeout" register (41h). */
375	pci_write_config(dev, 0x41, 0, 1);
376
377	/* Enable bus-mastering. */
378	pci_enable_busmaster(dev);
379
380	rid = PCIR_BAR(0);
381	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
382	    RF_ACTIVE);
383	if (sc->mem == NULL) {
384		device_printf(dev, "can't map mem space\n");
385		return ENOMEM;
386	}
387	sc->sc_st = rman_get_bustag(sc->mem);
388	sc->sc_sh = rman_get_bushandle(sc->mem);
389
390	i = 1;
391	rid = 0;
392	if (pci_alloc_msi(dev, &i) == 0)
393		rid = 1;
394	/* Install interrupt handler. */
395	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
396	    (rid != 0 ? 0 : RF_SHAREABLE));
397	if (sc->irq == NULL) {
398		device_printf(dev, "can't map interrupt\n");
399		error = ENOMEM;
400		goto fail;
401	}
402
403	WPI_LOCK_INIT(sc);
404	WPI_TX_LOCK_INIT(sc);
405	WPI_RXON_LOCK_INIT(sc);
406	WPI_NT_LOCK_INIT(sc);
407	WPI_TXQ_LOCK_INIT(sc);
408	WPI_TXQ_STATE_LOCK_INIT(sc);
409
410	/* Allocate DMA memory for firmware transfers. */
411	if ((error = wpi_alloc_fwmem(sc)) != 0) {
412		device_printf(dev,
413		    "could not allocate memory for firmware, error %d\n",
414		    error);
415		goto fail;
416	}
417
418	/* Allocate shared page. */
419	if ((error = wpi_alloc_shared(sc)) != 0) {
420		device_printf(dev, "could not allocate shared page\n");
421		goto fail;
422	}
423
424	/* Allocate TX rings - 4 for QoS purposes, 1 for commands. */
425	for (i = 0; i < WPI_NTXQUEUES; i++) {
426		if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
427			device_printf(dev,
428			    "could not allocate TX ring %d, error %d\n", i,
429			    error);
430			goto fail;
431		}
432	}
433
434	/* Allocate RX ring. */
435	if ((error = wpi_alloc_rx_ring(sc)) != 0) {
436		device_printf(dev, "could not allocate RX ring, error %d\n",
437		    error);
438		goto fail;
439	}
440
441	/* Clear pending interrupts. */
442	WPI_WRITE(sc, WPI_INT, 0xffffffff);
443
444	ic = &sc->sc_ic;
445	ic->ic_softc = sc;
446	ic->ic_name = device_get_nameunit(dev);
447	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
448	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
449
450	/* Set device capabilities. */
451	ic->ic_caps =
452		  IEEE80211_C_STA		/* station mode supported */
453		| IEEE80211_C_IBSS		/* IBSS mode supported */
454		| IEEE80211_C_HOSTAP		/* Host access point mode */
455		| IEEE80211_C_MONITOR		/* monitor mode supported */
456		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
457		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
458		| IEEE80211_C_TXPMGT		/* tx power management */
459		| IEEE80211_C_SHSLOT		/* short slot time supported */
460		| IEEE80211_C_WPA		/* 802.11i */
461		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
462		| IEEE80211_C_WME		/* 802.11e */
463		| IEEE80211_C_PMGT		/* Station-side power mgmt */
464		;
465
466	ic->ic_cryptocaps =
467		  IEEE80211_CRYPTO_AES_CCM;
468
469	/*
470	 * Read in the eeprom and also setup the channels for
471	 * net80211. We don't set the rates as net80211 does this for us
472	 */
473	if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) {
474		device_printf(dev, "could not read EEPROM, error %d\n",
475		    error);
476		goto fail;
477	}
478
479#ifdef WPI_DEBUG
480	if (bootverbose) {
481		device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n",
482		    sc->domain);
483		device_printf(sc->sc_dev, "Hardware Type: %c\n",
484		    sc->type > 1 ? 'B': '?');
485		device_printf(sc->sc_dev, "Hardware Revision: %c\n",
486		    ((sc->rev & 0xf0) == 0xd0) ? 'D': '?');
487		device_printf(sc->sc_dev, "SKU %s support 802.11a\n",
488		    supportsa ? "does" : "does not");
489
490		/* XXX hw_config uses the PCIDEV for the Hardware rev. Must
491		   check what sc->rev really represents - benjsc 20070615 */
492	}
493#endif
494
495	ieee80211_ifattach(ic);
496	ic->ic_vap_create = wpi_vap_create;
497	ic->ic_vap_delete = wpi_vap_delete;
498	ic->ic_parent = wpi_parent;
499	ic->ic_raw_xmit = wpi_raw_xmit;
500	ic->ic_transmit = wpi_transmit;
501	ic->ic_node_alloc = wpi_node_alloc;
502	sc->sc_node_free = ic->ic_node_free;
503	ic->ic_node_free = wpi_node_free;
504	ic->ic_wme.wme_update = wpi_updateedca;
505	ic->ic_update_promisc = wpi_update_promisc;
506	ic->ic_update_mcast = wpi_update_mcast;
507	ic->ic_newassoc = wpi_newassoc;
508	ic->ic_scan_start = wpi_scan_start;
509	ic->ic_scan_end = wpi_scan_end;
510	ic->ic_set_channel = wpi_set_channel;
511	ic->ic_scan_curchan = wpi_scan_curchan;
512	ic->ic_scan_mindwell = wpi_scan_mindwell;
513	ic->ic_setregdomain = wpi_setregdomain;
514
515	sc->sc_update_rx_ring = wpi_update_rx_ring;
516	sc->sc_update_tx_ring = wpi_update_tx_ring;
517
518	wpi_radiotap_attach(sc);
519
520	callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0);
521	callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0);
522	callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0);
523	callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0);
524	TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc);
525	TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc);
526	TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc);
527
528	sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK,
529	    taskqueue_thread_enqueue, &sc->sc_tq);
530	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq");
531	if (error != 0) {
532		device_printf(dev, "can't start threads, error %d\n", error);
533		goto fail;
534	}
535
536	wpi_sysctlattach(sc);
537
538	/*
539	 * Hook our interrupt after all initialization is complete.
540	 */
541	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
542	    NULL, wpi_intr, sc, &sc->sc_ih);
543	if (error != 0) {
544		device_printf(dev, "can't establish interrupt, error %d\n",
545		    error);
546		goto fail;
547	}
548
549	if (bootverbose)
550		ieee80211_announce(ic);
551
552#ifdef WPI_DEBUG
553	if (sc->sc_debug & WPI_DEBUG_HW)
554		ieee80211_announce_channels(ic);
555#endif
556
557	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
558	return 0;
559
560fail:	wpi_detach(dev);
561	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
562	return error;
563}
564
565/*
566 * Attach the interface to 802.11 radiotap.
567 */
568static void
569wpi_radiotap_attach(struct wpi_softc *sc)
570{
571	struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap;
572	struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap;
573
574	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
575	ieee80211_radiotap_attach(&sc->sc_ic,
576	    &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT,
577	    &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT);
578	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
579}
580
581static void
582wpi_sysctlattach(struct wpi_softc *sc)
583{
584#ifdef WPI_DEBUG
585	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
586	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
587
588	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
589	    "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
590		"control debugging printfs");
591#endif
592}
593
594static void
595wpi_init_beacon(struct wpi_vap *wvp)
596{
597	struct wpi_buf *bcn = &wvp->wv_bcbuf;
598	struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data;
599
600	cmd->id = WPI_ID_BROADCAST;
601	cmd->ofdm_mask = 0xff;
602	cmd->cck_mask = 0x0f;
603	cmd->lifetime = htole32(WPI_LIFETIME_INFINITE);
604
605	/*
606	 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue
607	 * XXX by using WPI_TX_NEED_ACK instead (with some side effects).
608	 */
609	cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP);
610
611	bcn->code = WPI_CMD_SET_BEACON;
612	bcn->ac = WPI_CMD_QUEUE_NUM;
613	bcn->size = sizeof(struct wpi_cmd_beacon);
614}
615
616static struct ieee80211vap *
617wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
618    enum ieee80211_opmode opmode, int flags,
619    const uint8_t bssid[IEEE80211_ADDR_LEN],
620    const uint8_t mac[IEEE80211_ADDR_LEN])
621{
622	struct wpi_vap *wvp;
623	struct ieee80211vap *vap;
624
625	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
626		return NULL;
627
628	wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO);
629	vap = &wvp->wv_vap;
630	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
631
632	if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) {
633		WPI_VAP_LOCK_INIT(wvp);
634		wpi_init_beacon(wvp);
635	}
636
637	/* Override with driver methods. */
638	vap->iv_key_set = wpi_key_set;
639	vap->iv_key_delete = wpi_key_delete;
640	wvp->wv_recv_mgmt = vap->iv_recv_mgmt;
641	vap->iv_recv_mgmt = wpi_recv_mgmt;
642	wvp->wv_newstate = vap->iv_newstate;
643	vap->iv_newstate = wpi_newstate;
644	vap->iv_update_beacon = wpi_update_beacon;
645	vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1;
646
647	ieee80211_ratectl_init(vap);
648	/* Complete setup. */
649	ieee80211_vap_attach(vap, ieee80211_media_change,
650	    ieee80211_media_status, mac);
651	ic->ic_opmode = opmode;
652	return vap;
653}
654
655static void
656wpi_vap_delete(struct ieee80211vap *vap)
657{
658	struct wpi_vap *wvp = WPI_VAP(vap);
659	struct wpi_buf *bcn = &wvp->wv_bcbuf;
660	enum ieee80211_opmode opmode = vap->iv_opmode;
661
662	ieee80211_ratectl_deinit(vap);
663	ieee80211_vap_detach(vap);
664
665	if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) {
666		if (bcn->m != NULL)
667			m_freem(bcn->m);
668
669		WPI_VAP_LOCK_DESTROY(wvp);
670	}
671
672	free(wvp, M_80211_VAP);
673}
674
675static int
676wpi_detach(device_t dev)
677{
678	struct wpi_softc *sc = device_get_softc(dev);
679	struct ieee80211com *ic = &sc->sc_ic;
680	int qid;
681
682	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
683
684	if (ic->ic_vap_create == wpi_vap_create) {
685		ieee80211_draintask(ic, &sc->sc_radioon_task);
686
687		wpi_stop(sc);
688
689		if (sc->sc_tq != NULL) {
690			taskqueue_drain_all(sc->sc_tq);
691			taskqueue_free(sc->sc_tq);
692		}
693
694		callout_drain(&sc->watchdog_rfkill);
695		callout_drain(&sc->tx_timeout);
696		callout_drain(&sc->scan_timeout);
697		callout_drain(&sc->calib_to);
698		ieee80211_ifdetach(ic);
699	}
700
701	/* Uninstall interrupt handler. */
702	if (sc->irq != NULL) {
703		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
704		bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
705		    sc->irq);
706		pci_release_msi(dev);
707	}
708
709	if (sc->txq[0].data_dmat) {
710		/* Free DMA resources. */
711		for (qid = 0; qid < WPI_NTXQUEUES; qid++)
712			wpi_free_tx_ring(sc, &sc->txq[qid]);
713
714		wpi_free_rx_ring(sc);
715		wpi_free_shared(sc);
716	}
717
718	if (sc->fw_dma.tag)
719		wpi_free_fwmem(sc);
720
721	if (sc->mem != NULL)
722		bus_release_resource(dev, SYS_RES_MEMORY,
723		    rman_get_rid(sc->mem), sc->mem);
724
725	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
726	WPI_TXQ_STATE_LOCK_DESTROY(sc);
727	WPI_TXQ_LOCK_DESTROY(sc);
728	WPI_NT_LOCK_DESTROY(sc);
729	WPI_RXON_LOCK_DESTROY(sc);
730	WPI_TX_LOCK_DESTROY(sc);
731	WPI_LOCK_DESTROY(sc);
732	return 0;
733}
734
735static int
736wpi_shutdown(device_t dev)
737{
738	struct wpi_softc *sc = device_get_softc(dev);
739
740	wpi_stop(sc);
741	return 0;
742}
743
744static int
745wpi_suspend(device_t dev)
746{
747	struct wpi_softc *sc = device_get_softc(dev);
748	struct ieee80211com *ic = &sc->sc_ic;
749
750	ieee80211_suspend_all(ic);
751	return 0;
752}
753
754static int
755wpi_resume(device_t dev)
756{
757	struct wpi_softc *sc = device_get_softc(dev);
758	struct ieee80211com *ic = &sc->sc_ic;
759
760	/* Clear device-specific "PCI retry timeout" register (41h). */
761	pci_write_config(dev, 0x41, 0, 1);
762
763	ieee80211_resume_all(ic);
764	return 0;
765}
766
767/*
768 * Grab exclusive access to NIC memory.
769 */
770static int
771wpi_nic_lock(struct wpi_softc *sc)
772{
773	int ntries;
774
775	/* Request exclusive access to NIC. */
776	WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
777
778	/* Spin until we actually get the lock. */
779	for (ntries = 0; ntries < 1000; ntries++) {
780		if ((WPI_READ(sc, WPI_GP_CNTRL) &
781		    (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) ==
782		    WPI_GP_CNTRL_MAC_ACCESS_ENA)
783			return 0;
784		DELAY(10);
785	}
786
787	device_printf(sc->sc_dev, "could not lock memory\n");
788
789	return ETIMEDOUT;
790}
791
792/*
793 * Release lock on NIC memory.
794 */
795static __inline void
796wpi_nic_unlock(struct wpi_softc *sc)
797{
798	WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
799}
800
801static __inline uint32_t
802wpi_prph_read(struct wpi_softc *sc, uint32_t addr)
803{
804	WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr);
805	WPI_BARRIER_READ_WRITE(sc);
806	return WPI_READ(sc, WPI_PRPH_RDATA);
807}
808
809static __inline void
810wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data)
811{
812	WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr);
813	WPI_BARRIER_WRITE(sc);
814	WPI_WRITE(sc, WPI_PRPH_WDATA, data);
815}
816
817static __inline void
818wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
819{
820	wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask);
821}
822
823static __inline void
824wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
825{
826	wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask);
827}
828
829static __inline void
830wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr,
831    const uint32_t *data, int count)
832{
833	for (; count > 0; count--, data++, addr += 4)
834		wpi_prph_write(sc, addr, *data);
835}
836
837static __inline uint32_t
838wpi_mem_read(struct wpi_softc *sc, uint32_t addr)
839{
840	WPI_WRITE(sc, WPI_MEM_RADDR, addr);
841	WPI_BARRIER_READ_WRITE(sc);
842	return WPI_READ(sc, WPI_MEM_RDATA);
843}
844
845static __inline void
846wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data,
847    int count)
848{
849	for (; count > 0; count--, addr += 4)
850		*data++ = wpi_mem_read(sc, addr);
851}
852
853static int
854wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count)
855{
856	uint8_t *out = data;
857	uint32_t val;
858	int error, ntries;
859
860	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
861
862	if ((error = wpi_nic_lock(sc)) != 0)
863		return error;
864
865	for (; count > 0; count -= 2, addr++) {
866		WPI_WRITE(sc, WPI_EEPROM, addr << 2);
867		for (ntries = 0; ntries < 10; ntries++) {
868			val = WPI_READ(sc, WPI_EEPROM);
869			if (val & WPI_EEPROM_READ_VALID)
870				break;
871			DELAY(5);
872		}
873		if (ntries == 10) {
874			device_printf(sc->sc_dev,
875			    "timeout reading ROM at 0x%x\n", addr);
876			return ETIMEDOUT;
877		}
878		*out++= val >> 16;
879		if (count > 1)
880			*out ++= val >> 24;
881	}
882
883	wpi_nic_unlock(sc);
884
885	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
886
887	return 0;
888}
889
890static void
891wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
892{
893	if (error != 0)
894		return;
895	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
896	*(bus_addr_t *)arg = segs[0].ds_addr;
897}
898
899/*
900 * Allocates a contiguous block of dma memory of the requested size and
901 * alignment.
902 */
903static int
904wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma,
905    void **kvap, bus_size_t size, bus_size_t alignment)
906{
907	int error;
908
909	dma->tag = NULL;
910	dma->size = size;
911
912	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
913	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
914	    1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
915	if (error != 0)
916		goto fail;
917
918	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
919	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
920	if (error != 0)
921		goto fail;
922
923	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
924	    wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
925	if (error != 0)
926		goto fail;
927
928	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
929
930	if (kvap != NULL)
931		*kvap = dma->vaddr;
932
933	return 0;
934
935fail:	wpi_dma_contig_free(dma);
936	return error;
937}
938
939static void
940wpi_dma_contig_free(struct wpi_dma_info *dma)
941{
942	if (dma->vaddr != NULL) {
943		bus_dmamap_sync(dma->tag, dma->map,
944		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
945		bus_dmamap_unload(dma->tag, dma->map);
946		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
947		dma->vaddr = NULL;
948	}
949	if (dma->tag != NULL) {
950		bus_dma_tag_destroy(dma->tag);
951		dma->tag = NULL;
952	}
953}
954
955/*
956 * Allocate a shared page between host and NIC.
957 */
958static int
959wpi_alloc_shared(struct wpi_softc *sc)
960{
961	/* Shared buffer must be aligned on a 4KB boundary. */
962	return wpi_dma_contig_alloc(sc, &sc->shared_dma,
963	    (void **)&sc->shared, sizeof (struct wpi_shared), 4096);
964}
965
966static void
967wpi_free_shared(struct wpi_softc *sc)
968{
969	wpi_dma_contig_free(&sc->shared_dma);
970}
971
972/*
973 * Allocate DMA-safe memory for firmware transfer.
974 */
975static int
976wpi_alloc_fwmem(struct wpi_softc *sc)
977{
978	/* Must be aligned on a 16-byte boundary. */
979	return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL,
980	    WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16);
981}
982
983static void
984wpi_free_fwmem(struct wpi_softc *sc)
985{
986	wpi_dma_contig_free(&sc->fw_dma);
987}
988
989static int
990wpi_alloc_rx_ring(struct wpi_softc *sc)
991{
992	struct wpi_rx_ring *ring = &sc->rxq;
993	bus_size_t size;
994	int i, error;
995
996	ring->cur = 0;
997	ring->update = 0;
998
999	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1000
1001	/* Allocate RX descriptors (16KB aligned.) */
1002	size = WPI_RX_RING_COUNT * sizeof (uint32_t);
1003	error = wpi_dma_contig_alloc(sc, &ring->desc_dma,
1004	    (void **)&ring->desc, size, WPI_RING_DMA_ALIGN);
1005	if (error != 0) {
1006		device_printf(sc->sc_dev,
1007		    "%s: could not allocate RX ring DMA memory, error %d\n",
1008		    __func__, error);
1009		goto fail;
1010	}
1011
1012	/* Create RX buffer DMA tag. */
1013	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1014	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1015	    MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL,
1016	    &ring->data_dmat);
1017	if (error != 0) {
1018		device_printf(sc->sc_dev,
1019		    "%s: could not create RX buf DMA tag, error %d\n",
1020		    __func__, error);
1021		goto fail;
1022	}
1023
1024	/*
1025	 * Allocate and map RX buffers.
1026	 */
1027	for (i = 0; i < WPI_RX_RING_COUNT; i++) {
1028		struct wpi_rx_data *data = &ring->data[i];
1029		bus_addr_t paddr;
1030
1031		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1032		if (error != 0) {
1033			device_printf(sc->sc_dev,
1034			    "%s: could not create RX buf DMA map, error %d\n",
1035			    __func__, error);
1036			goto fail;
1037		}
1038
1039		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1040		if (data->m == NULL) {
1041			device_printf(sc->sc_dev,
1042			    "%s: could not allocate RX mbuf\n", __func__);
1043			error = ENOBUFS;
1044			goto fail;
1045		}
1046
1047		error = bus_dmamap_load(ring->data_dmat, data->map,
1048		    mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
1049		    &paddr, BUS_DMA_NOWAIT);
1050		if (error != 0 && error != EFBIG) {
1051			device_printf(sc->sc_dev,
1052			    "%s: can't map mbuf (error %d)\n", __func__,
1053			    error);
1054			goto fail;
1055		}
1056
1057		/* Set physical address of RX buffer. */
1058		ring->desc[i] = htole32(paddr);
1059	}
1060
1061	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1062	    BUS_DMASYNC_PREWRITE);
1063
1064	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1065
1066	return 0;
1067
1068fail:	wpi_free_rx_ring(sc);
1069
1070	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1071
1072	return error;
1073}
1074
1075static void
1076wpi_update_rx_ring(struct wpi_softc *sc)
1077{
1078	WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7);
1079}
1080
1081static void
1082wpi_update_rx_ring_ps(struct wpi_softc *sc)
1083{
1084	struct wpi_rx_ring *ring = &sc->rxq;
1085
1086	if (ring->update != 0) {
1087		/* Wait for INT_WAKEUP event. */
1088		return;
1089	}
1090
1091	WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1092	if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) {
1093		DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n",
1094		    __func__);
1095		ring->update = 1;
1096	} else {
1097		wpi_update_rx_ring(sc);
1098		WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1099	}
1100}
1101
1102static void
1103wpi_reset_rx_ring(struct wpi_softc *sc)
1104{
1105	struct wpi_rx_ring *ring = &sc->rxq;
1106	int ntries;
1107
1108	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1109
1110	if (wpi_nic_lock(sc) == 0) {
1111		WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0);
1112		for (ntries = 0; ntries < 1000; ntries++) {
1113			if (WPI_READ(sc, WPI_FH_RX_STATUS) &
1114			    WPI_FH_RX_STATUS_IDLE)
1115				break;
1116			DELAY(10);
1117		}
1118		wpi_nic_unlock(sc);
1119	}
1120
1121	ring->cur = 0;
1122	ring->update = 0;
1123}
1124
1125static void
1126wpi_free_rx_ring(struct wpi_softc *sc)
1127{
1128	struct wpi_rx_ring *ring = &sc->rxq;
1129	int i;
1130
1131	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1132
1133	wpi_dma_contig_free(&ring->desc_dma);
1134
1135	for (i = 0; i < WPI_RX_RING_COUNT; i++) {
1136		struct wpi_rx_data *data = &ring->data[i];
1137
1138		if (data->m != NULL) {
1139			bus_dmamap_sync(ring->data_dmat, data->map,
1140			    BUS_DMASYNC_POSTREAD);
1141			bus_dmamap_unload(ring->data_dmat, data->map);
1142			m_freem(data->m);
1143			data->m = NULL;
1144		}
1145		if (data->map != NULL)
1146			bus_dmamap_destroy(ring->data_dmat, data->map);
1147	}
1148	if (ring->data_dmat != NULL) {
1149		bus_dma_tag_destroy(ring->data_dmat);
1150		ring->data_dmat = NULL;
1151	}
1152}
1153
1154static int
1155wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid)
1156{
1157	bus_addr_t paddr;
1158	bus_size_t size;
1159	int i, error;
1160
1161	ring->qid = qid;
1162	ring->queued = 0;
1163	ring->cur = 0;
1164	ring->update = 0;
1165
1166	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1167
1168	/* Allocate TX descriptors (16KB aligned.) */
1169	size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc);
1170	error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1171	    size, WPI_RING_DMA_ALIGN);
1172	if (error != 0) {
1173		device_printf(sc->sc_dev,
1174		    "%s: could not allocate TX ring DMA memory, error %d\n",
1175		    __func__, error);
1176		goto fail;
1177	}
1178
1179	/* Update shared area with ring physical address. */
1180	sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr);
1181	bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
1182	    BUS_DMASYNC_PREWRITE);
1183
1184	/*
1185	 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1186	 * to allocate commands space for other rings.
1187	 * XXX Do we really need to allocate descriptors for other rings?
1188	 */
1189	if (qid > WPI_CMD_QUEUE_NUM) {
1190		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1191		return 0;
1192	}
1193
1194	size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd);
1195	error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1196	    size, 4);
1197	if (error != 0) {
1198		device_printf(sc->sc_dev,
1199		    "%s: could not allocate TX cmd DMA memory, error %d\n",
1200		    __func__, error);
1201		goto fail;
1202	}
1203
1204	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1205	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1206	    WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1207	    &ring->data_dmat);
1208	if (error != 0) {
1209		device_printf(sc->sc_dev,
1210		    "%s: could not create TX buf DMA tag, error %d\n",
1211		    __func__, error);
1212		goto fail;
1213	}
1214
1215	paddr = ring->cmd_dma.paddr;
1216	for (i = 0; i < WPI_TX_RING_COUNT; i++) {
1217		struct wpi_tx_data *data = &ring->data[i];
1218
1219		data->cmd_paddr = paddr;
1220		paddr += sizeof (struct wpi_tx_cmd);
1221
1222		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1223		if (error != 0) {
1224			device_printf(sc->sc_dev,
1225			    "%s: could not create TX buf DMA map, error %d\n",
1226			    __func__, error);
1227			goto fail;
1228		}
1229	}
1230
1231	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1232
1233	return 0;
1234
1235fail:	wpi_free_tx_ring(sc, ring);
1236	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1237	return error;
1238}
1239
1240static void
1241wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1242{
1243	WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
1244}
1245
1246static void
1247wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1248{
1249
1250	if (ring->update != 0) {
1251		/* Wait for INT_WAKEUP event. */
1252		return;
1253	}
1254
1255	WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1256	if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) {
1257		DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n",
1258		    __func__, ring->qid);
1259		ring->update = 1;
1260	} else {
1261		wpi_update_tx_ring(sc, ring);
1262		WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1263	}
1264}
1265
1266static void
1267wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1268{
1269	int i;
1270
1271	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1272
1273	for (i = 0; i < WPI_TX_RING_COUNT; i++) {
1274		struct wpi_tx_data *data = &ring->data[i];
1275
1276		if (data->m != NULL) {
1277			bus_dmamap_sync(ring->data_dmat, data->map,
1278			    BUS_DMASYNC_POSTWRITE);
1279			bus_dmamap_unload(ring->data_dmat, data->map);
1280			m_freem(data->m);
1281			data->m = NULL;
1282		}
1283		if (data->ni != NULL) {
1284			ieee80211_free_node(data->ni);
1285			data->ni = NULL;
1286		}
1287	}
1288	/* Clear TX descriptors. */
1289	memset(ring->desc, 0, ring->desc_dma.size);
1290	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1291	    BUS_DMASYNC_PREWRITE);
1292	ring->queued = 0;
1293	ring->cur = 0;
1294	ring->update = 0;
1295}
1296
1297static void
1298wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1299{
1300	int i;
1301
1302	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1303
1304	wpi_dma_contig_free(&ring->desc_dma);
1305	wpi_dma_contig_free(&ring->cmd_dma);
1306
1307	for (i = 0; i < WPI_TX_RING_COUNT; i++) {
1308		struct wpi_tx_data *data = &ring->data[i];
1309
1310		if (data->m != NULL) {
1311			bus_dmamap_sync(ring->data_dmat, data->map,
1312			    BUS_DMASYNC_POSTWRITE);
1313			bus_dmamap_unload(ring->data_dmat, data->map);
1314			m_freem(data->m);
1315		}
1316		if (data->map != NULL)
1317			bus_dmamap_destroy(ring->data_dmat, data->map);
1318	}
1319	if (ring->data_dmat != NULL) {
1320		bus_dma_tag_destroy(ring->data_dmat);
1321		ring->data_dmat = NULL;
1322	}
1323}
1324
1325/*
1326 * Extract various information from EEPROM.
1327 */
1328static int
1329wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1330{
1331#define WPI_CHK(res) do {		\
1332	if ((error = res) != 0)		\
1333		goto fail;		\
1334} while (0)
1335	int error, i;
1336
1337	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1338
1339	/* Adapter has to be powered on for EEPROM access to work. */
1340	if ((error = wpi_apm_init(sc)) != 0) {
1341		device_printf(sc->sc_dev,
1342		    "%s: could not power ON adapter, error %d\n", __func__,
1343		    error);
1344		return error;
1345	}
1346
1347	if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) {
1348		device_printf(sc->sc_dev, "bad EEPROM signature\n");
1349		error = EIO;
1350		goto fail;
1351	}
1352	/* Clear HW ownership of EEPROM. */
1353	WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER);
1354
1355	/* Read the hardware capabilities, revision and SKU type. */
1356	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap,
1357	    sizeof(sc->cap)));
1358	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev,
1359	    sizeof(sc->rev)));
1360	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type,
1361	    sizeof(sc->type)));
1362
1363	sc->rev = le16toh(sc->rev);
1364	DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap,
1365	    sc->rev, sc->type);
1366
1367	/* Read the regulatory domain (4 ASCII characters.) */
1368	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain,
1369	    sizeof(sc->domain)));
1370
1371	/* Read MAC address. */
1372	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr,
1373	    IEEE80211_ADDR_LEN));
1374
1375	/* Read the list of authorized channels. */
1376	for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++)
1377		WPI_CHK(wpi_read_eeprom_channels(sc, i));
1378
1379	/* Read the list of TX power groups. */
1380	for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++)
1381		WPI_CHK(wpi_read_eeprom_group(sc, i));
1382
1383fail:	wpi_apm_stop(sc);	/* Power OFF adapter. */
1384
1385	DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END,
1386	    __func__);
1387
1388	return error;
1389#undef WPI_CHK
1390}
1391
1392/*
1393 * Translate EEPROM flags to net80211.
1394 */
1395static uint32_t
1396wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel)
1397{
1398	uint32_t nflags;
1399
1400	nflags = 0;
1401	if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0)
1402		nflags |= IEEE80211_CHAN_PASSIVE;
1403	if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0)
1404		nflags |= IEEE80211_CHAN_NOADHOC;
1405	if (channel->flags & WPI_EEPROM_CHAN_RADAR) {
1406		nflags |= IEEE80211_CHAN_DFS;
1407		/* XXX apparently IBSS may still be marked */
1408		nflags |= IEEE80211_CHAN_NOADHOC;
1409	}
1410
1411	/* XXX HOSTAP uses WPI_MODE_IBSS */
1412	if (nflags & IEEE80211_CHAN_NOADHOC)
1413		nflags |= IEEE80211_CHAN_NOHOSTAP;
1414
1415	return nflags;
1416}
1417
1418static void
1419wpi_read_eeprom_band(struct wpi_softc *sc, int n)
1420{
1421	struct ieee80211com *ic = &sc->sc_ic;
1422	struct wpi_eeprom_chan *channels = sc->eeprom_channels[n];
1423	const struct wpi_chan_band *band = &wpi_bands[n];
1424	struct ieee80211_channel *c;
1425	uint8_t chan;
1426	int i, nflags;
1427
1428	for (i = 0; i < band->nchan; i++) {
1429		if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) {
1430			DPRINTF(sc, WPI_DEBUG_EEPROM,
1431			    "Channel Not Valid: %d, band %d\n",
1432			     band->chan[i],n);
1433			continue;
1434		}
1435
1436		chan = band->chan[i];
1437		nflags = wpi_eeprom_channel_flags(&channels[i]);
1438
1439		c = &ic->ic_channels[ic->ic_nchans++];
1440		c->ic_ieee = chan;
1441		c->ic_maxregpower = channels[i].maxpwr;
1442		c->ic_maxpower = 2*c->ic_maxregpower;
1443
1444		if (n == 0) {	/* 2GHz band */
1445			c->ic_freq = ieee80211_ieee2mhz(chan,
1446			    IEEE80211_CHAN_G);
1447
1448			/* G =>'s B is supported */
1449			c->ic_flags = IEEE80211_CHAN_B | nflags;
1450			c = &ic->ic_channels[ic->ic_nchans++];
1451			c[0] = c[-1];
1452			c->ic_flags = IEEE80211_CHAN_G | nflags;
1453		} else {	/* 5GHz band */
1454			c->ic_freq = ieee80211_ieee2mhz(chan,
1455			    IEEE80211_CHAN_A);
1456
1457			c->ic_flags = IEEE80211_CHAN_A | nflags;
1458		}
1459
1460		/* Save maximum allowed TX power for this channel. */
1461		sc->maxpwr[chan] = channels[i].maxpwr;
1462
1463		DPRINTF(sc, WPI_DEBUG_EEPROM,
1464		    "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d,"
1465		    " offset %d\n", chan, c->ic_freq,
1466		    channels[i].flags, sc->maxpwr[chan],
1467		    IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans);
1468	}
1469}
1470
1471/**
1472 * Read the eeprom to find out what channels are valid for the given
1473 * band and update net80211 with what we find.
1474 */
1475static int
1476wpi_read_eeprom_channels(struct wpi_softc *sc, int n)
1477{
1478	struct ieee80211com *ic = &sc->sc_ic;
1479	const struct wpi_chan_band *band = &wpi_bands[n];
1480	int error;
1481
1482	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1483
1484	error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n],
1485	    band->nchan * sizeof (struct wpi_eeprom_chan));
1486	if (error != 0) {
1487		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1488		return error;
1489	}
1490
1491	wpi_read_eeprom_band(sc, n);
1492
1493	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1494
1495	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1496
1497	return 0;
1498}
1499
1500static struct wpi_eeprom_chan *
1501wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c)
1502{
1503	int i, j;
1504
1505	for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++)
1506		for (i = 0; i < wpi_bands[j].nchan; i++)
1507			if (wpi_bands[j].chan[i] == c->ic_ieee)
1508				return &sc->eeprom_channels[j][i];
1509
1510	return NULL;
1511}
1512
1513/*
1514 * Enforce flags read from EEPROM.
1515 */
1516static int
1517wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
1518    int nchan, struct ieee80211_channel chans[])
1519{
1520	struct wpi_softc *sc = ic->ic_softc;
1521	int i;
1522
1523	for (i = 0; i < nchan; i++) {
1524		struct ieee80211_channel *c = &chans[i];
1525		struct wpi_eeprom_chan *channel;
1526
1527		channel = wpi_find_eeprom_channel(sc, c);
1528		if (channel == NULL) {
1529			ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n",
1530			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
1531			return EINVAL;
1532		}
1533		c->ic_flags |= wpi_eeprom_channel_flags(channel);
1534	}
1535
1536	return 0;
1537}
1538
1539static int
1540wpi_read_eeprom_group(struct wpi_softc *sc, int n)
1541{
1542	struct wpi_power_group *group = &sc->groups[n];
1543	struct wpi_eeprom_group rgroup;
1544	int i, error;
1545
1546	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1547
1548	if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32,
1549	    &rgroup, sizeof rgroup)) != 0) {
1550		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1551		return error;
1552	}
1553
1554	/* Save TX power group information. */
1555	group->chan   = rgroup.chan;
1556	group->maxpwr = rgroup.maxpwr;
1557	/* Retrieve temperature at which the samples were taken. */
1558	group->temp   = (int16_t)le16toh(rgroup.temp);
1559
1560	DPRINTF(sc, WPI_DEBUG_EEPROM,
1561	    "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan,
1562	    group->maxpwr, group->temp);
1563
1564	for (i = 0; i < WPI_SAMPLES_COUNT; i++) {
1565		group->samples[i].index = rgroup.samples[i].index;
1566		group->samples[i].power = rgroup.samples[i].power;
1567
1568		DPRINTF(sc, WPI_DEBUG_EEPROM,
1569		    "\tsample %d: index=%d power=%d\n", i,
1570		    group->samples[i].index, group->samples[i].power);
1571	}
1572
1573	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1574
1575	return 0;
1576}
1577
1578static int
1579wpi_add_node_entry_adhoc(struct wpi_softc *sc)
1580{
1581	int newid = WPI_ID_IBSS_MIN;
1582
1583	for (; newid <= WPI_ID_IBSS_MAX; newid++) {
1584		if ((sc->nodesmsk & (1 << newid)) == 0) {
1585			sc->nodesmsk |= 1 << newid;
1586			return newid;
1587		}
1588	}
1589
1590	return WPI_ID_UNDEFINED;
1591}
1592
1593static __inline int
1594wpi_add_node_entry_sta(struct wpi_softc *sc)
1595{
1596	sc->nodesmsk |= 1 << WPI_ID_BSS;
1597
1598	return WPI_ID_BSS;
1599}
1600
1601static __inline int
1602wpi_check_node_entry(struct wpi_softc *sc, uint8_t id)
1603{
1604	if (id == WPI_ID_UNDEFINED)
1605		return 0;
1606
1607	return (sc->nodesmsk >> id) & 1;
1608}
1609
1610static __inline void
1611wpi_clear_node_table(struct wpi_softc *sc)
1612{
1613	sc->nodesmsk = 0;
1614}
1615
1616static __inline void
1617wpi_del_node_entry(struct wpi_softc *sc, uint8_t id)
1618{
1619	sc->nodesmsk &= ~(1 << id);
1620}
1621
1622static struct ieee80211_node *
1623wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1624{
1625	struct wpi_node *wn;
1626
1627	wn = malloc(sizeof (struct wpi_node), M_80211_NODE,
1628	    M_NOWAIT | M_ZERO);
1629
1630	if (wn == NULL)
1631		return NULL;
1632
1633	wn->id = WPI_ID_UNDEFINED;
1634
1635	return &wn->ni;
1636}
1637
1638static void
1639wpi_node_free(struct ieee80211_node *ni)
1640{
1641	struct wpi_softc *sc = ni->ni_ic->ic_softc;
1642	struct wpi_node *wn = WPI_NODE(ni);
1643
1644	if (wn->id != WPI_ID_UNDEFINED) {
1645		WPI_NT_LOCK(sc);
1646		if (wpi_check_node_entry(sc, wn->id)) {
1647			wpi_del_node_entry(sc, wn->id);
1648			wpi_del_node(sc, ni);
1649		}
1650		WPI_NT_UNLOCK(sc);
1651	}
1652
1653	sc->sc_node_free(ni);
1654}
1655
1656static __inline int
1657wpi_check_bss_filter(struct wpi_softc *sc)
1658{
1659	return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0;
1660}
1661
1662static void
1663wpi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype,
1664    const struct ieee80211_rx_stats *rxs,
1665    int rssi, int nf)
1666{
1667	struct ieee80211vap *vap = ni->ni_vap;
1668	struct wpi_softc *sc = vap->iv_ic->ic_softc;
1669	struct wpi_vap *wvp = WPI_VAP(vap);
1670	uint64_t ni_tstamp, rx_tstamp;
1671
1672	wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf);
1673
1674	if (vap->iv_opmode == IEEE80211_M_IBSS &&
1675	    vap->iv_state == IEEE80211_S_RUN &&
1676	    (subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
1677	    subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
1678		ni_tstamp = le64toh(ni->ni_tstamp.tsf);
1679		rx_tstamp = le64toh(sc->rx_tstamp);
1680
1681		if (ni_tstamp >= rx_tstamp) {
1682			DPRINTF(sc, WPI_DEBUG_STATE,
1683			    "ibss merge, tsf %ju tstamp %ju\n",
1684			    (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp);
1685			(void) ieee80211_ibss_merge(ni);
1686		}
1687	}
1688}
1689
1690static void
1691wpi_restore_node(void *arg, struct ieee80211_node *ni)
1692{
1693	struct wpi_softc *sc = arg;
1694	struct wpi_node *wn = WPI_NODE(ni);
1695	int error;
1696
1697	WPI_NT_LOCK(sc);
1698	if (wn->id != WPI_ID_UNDEFINED) {
1699		wn->id = WPI_ID_UNDEFINED;
1700		if ((error = wpi_add_ibss_node(sc, ni)) != 0) {
1701			device_printf(sc->sc_dev,
1702			    "%s: could not add IBSS node, error %d\n",
1703			    __func__, error);
1704		}
1705	}
1706	WPI_NT_UNLOCK(sc);
1707}
1708
1709static void
1710wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp)
1711{
1712	struct ieee80211com *ic = &sc->sc_ic;
1713
1714	/* Set group keys once. */
1715	WPI_NT_LOCK(sc);
1716	wvp->wv_gtk = 0;
1717	WPI_NT_UNLOCK(sc);
1718
1719	ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc);
1720	ieee80211_crypto_reload_keys(ic);
1721}
1722
1723/**
1724 * Called by net80211 when ever there is a change to 80211 state machine
1725 */
1726static int
1727wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1728{
1729	struct wpi_vap *wvp = WPI_VAP(vap);
1730	struct ieee80211com *ic = vap->iv_ic;
1731	struct wpi_softc *sc = ic->ic_softc;
1732	int error = 0;
1733
1734	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1735
1736	WPI_TXQ_LOCK(sc);
1737	if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) {
1738		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1739		WPI_TXQ_UNLOCK(sc);
1740
1741		return ENXIO;
1742	}
1743	WPI_TXQ_UNLOCK(sc);
1744
1745	DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__,
1746		ieee80211_state_name[vap->iv_state],
1747		ieee80211_state_name[nstate]);
1748
1749	if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) {
1750		if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) {
1751			device_printf(sc->sc_dev,
1752			    "%s: could not set power saving level\n",
1753			    __func__);
1754			return error;
1755		}
1756
1757		wpi_set_led(sc, WPI_LED_LINK, 1, 0);
1758	}
1759
1760	switch (nstate) {
1761	case IEEE80211_S_SCAN:
1762		WPI_RXON_LOCK(sc);
1763		if (wpi_check_bss_filter(sc) != 0) {
1764			sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
1765			if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
1766				device_printf(sc->sc_dev,
1767				    "%s: could not send RXON\n", __func__);
1768			}
1769		}
1770		WPI_RXON_UNLOCK(sc);
1771		break;
1772
1773	case IEEE80211_S_ASSOC:
1774		if (vap->iv_state != IEEE80211_S_RUN)
1775			break;
1776		/* FALLTHROUGH */
1777	case IEEE80211_S_AUTH:
1778		/*
1779		 * NB: do not optimize AUTH -> AUTH state transmission -
1780		 * this will break powersave with non-QoS AP!
1781		 */
1782
1783		/*
1784		 * The node must be registered in the firmware before auth.
1785		 * Also the associd must be cleared on RUN -> ASSOC
1786		 * transitions.
1787		 */
1788		if ((error = wpi_auth(sc, vap)) != 0) {
1789			device_printf(sc->sc_dev,
1790			    "%s: could not move to AUTH state, error %d\n",
1791			    __func__, error);
1792		}
1793		break;
1794
1795	case IEEE80211_S_RUN:
1796		/*
1797		 * RUN -> RUN transition:
1798		 * STA mode: Just restart the timers.
1799		 * IBSS mode: Process IBSS merge.
1800		 */
1801		if (vap->iv_state == IEEE80211_S_RUN) {
1802			if (vap->iv_opmode != IEEE80211_M_IBSS) {
1803				WPI_RXON_LOCK(sc);
1804				wpi_calib_timeout(sc);
1805				WPI_RXON_UNLOCK(sc);
1806				break;
1807			} else {
1808				/*
1809				 * Drop the BSS_FILTER bit
1810				 * (there is no another way to change bssid).
1811				 */
1812				WPI_RXON_LOCK(sc);
1813				sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
1814				if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
1815					device_printf(sc->sc_dev,
1816					    "%s: could not send RXON\n",
1817					    __func__);
1818				}
1819				WPI_RXON_UNLOCK(sc);
1820
1821				/* Restore all what was lost. */
1822				wpi_restore_node_table(sc, wvp);
1823
1824				/* XXX set conditionally? */
1825				wpi_updateedca(ic);
1826			}
1827		}
1828
1829		/*
1830		 * !RUN -> RUN requires setting the association id
1831		 * which is done with a firmware cmd.  We also defer
1832		 * starting the timers until that work is done.
1833		 */
1834		if ((error = wpi_run(sc, vap)) != 0) {
1835			device_printf(sc->sc_dev,
1836			    "%s: could not move to RUN state\n", __func__);
1837		}
1838		break;
1839
1840	default:
1841		break;
1842	}
1843	if (error != 0) {
1844		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1845		return error;
1846	}
1847
1848	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1849
1850	return wvp->wv_newstate(vap, nstate, arg);
1851}
1852
1853static void
1854wpi_calib_timeout(void *arg)
1855{
1856	struct wpi_softc *sc = arg;
1857
1858	if (wpi_check_bss_filter(sc) == 0)
1859		return;
1860
1861	wpi_power_calibration(sc);
1862
1863	callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
1864}
1865
1866static __inline uint8_t
1867rate2plcp(const uint8_t rate)
1868{
1869	switch (rate) {
1870	case 12:	return 0xd;
1871	case 18:	return 0xf;
1872	case 24:	return 0x5;
1873	case 36:	return 0x7;
1874	case 48:	return 0x9;
1875	case 72:	return 0xb;
1876	case 96:	return 0x1;
1877	case 108:	return 0x3;
1878	case 2:		return 10;
1879	case 4:		return 20;
1880	case 11:	return 55;
1881	case 22:	return 110;
1882	default:	return 0;
1883	}
1884}
1885
1886static __inline uint8_t
1887plcp2rate(const uint8_t plcp)
1888{
1889	switch (plcp) {
1890	case 0xd:	return 12;
1891	case 0xf:	return 18;
1892	case 0x5:	return 24;
1893	case 0x7:	return 36;
1894	case 0x9:	return 48;
1895	case 0xb:	return 72;
1896	case 0x1:	return 96;
1897	case 0x3:	return 108;
1898	case 10:	return 2;
1899	case 20:	return 4;
1900	case 55:	return 11;
1901	case 110:	return 22;
1902	default:	return 0;
1903	}
1904}
1905
1906/* Quickly determine if a given rate is CCK or OFDM. */
1907#define WPI_RATE_IS_OFDM(rate)	((rate) >= 12 && (rate) != 22)
1908
1909static void
1910wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc,
1911    struct wpi_rx_data *data)
1912{
1913	struct ieee80211com *ic = &sc->sc_ic;
1914	struct wpi_rx_ring *ring = &sc->rxq;
1915	struct wpi_rx_stat *stat;
1916	struct wpi_rx_head *head;
1917	struct wpi_rx_tail *tail;
1918	struct ieee80211_frame *wh;
1919	struct ieee80211_node *ni;
1920	struct mbuf *m, *m1;
1921	bus_addr_t paddr;
1922	uint32_t flags;
1923	uint16_t len;
1924	int error;
1925
1926	stat = (struct wpi_rx_stat *)(desc + 1);
1927
1928	if (__predict_false(stat->len > WPI_STAT_MAXLEN)) {
1929		device_printf(sc->sc_dev, "invalid RX statistic header\n");
1930		goto fail1;
1931	}
1932
1933	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
1934	head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len);
1935	len = le16toh(head->len);
1936	tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len);
1937	flags = le32toh(tail->flags);
1938
1939	DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d"
1940	    " rate %x chan %d tstamp %ju\n", __func__, ring->cur,
1941	    le32toh(desc->len), len, (int8_t)stat->rssi,
1942	    head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp));
1943
1944	/* Discard frames with a bad FCS early. */
1945	if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) {
1946		DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n",
1947		    __func__, flags);
1948		goto fail1;
1949	}
1950	/* Discard frames that are too short. */
1951	if (len < sizeof (struct ieee80211_frame_ack)) {
1952		DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n",
1953		    __func__, len);
1954		goto fail1;
1955	}
1956
1957	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1958	if (__predict_false(m1 == NULL)) {
1959		DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n",
1960		    __func__);
1961		goto fail1;
1962	}
1963	bus_dmamap_unload(ring->data_dmat, data->map);
1964
1965	error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
1966	    MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
1967	if (__predict_false(error != 0 && error != EFBIG)) {
1968		device_printf(sc->sc_dev,
1969		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
1970		m_freem(m1);
1971
1972		/* Try to reload the old mbuf. */
1973		error = bus_dmamap_load(ring->data_dmat, data->map,
1974		    mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
1975		    &paddr, BUS_DMA_NOWAIT);
1976		if (error != 0 && error != EFBIG) {
1977			panic("%s: could not load old RX mbuf", __func__);
1978		}
1979		/* Physical address may have changed. */
1980		ring->desc[ring->cur] = htole32(paddr);
1981		bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
1982		    BUS_DMASYNC_PREWRITE);
1983		goto fail1;
1984	}
1985
1986	m = data->m;
1987	data->m = m1;
1988	/* Update RX descriptor. */
1989	ring->desc[ring->cur] = htole32(paddr);
1990	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1991	    BUS_DMASYNC_PREWRITE);
1992
1993	/* Finalize mbuf. */
1994	m->m_data = (caddr_t)(head + 1);
1995	m->m_pkthdr.len = m->m_len = len;
1996
1997	/* Grab a reference to the source node. */
1998	wh = mtod(m, struct ieee80211_frame *);
1999
2000	if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
2001	    (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) {
2002		/* Check whether decryption was successful or not. */
2003		if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) {
2004			DPRINTF(sc, WPI_DEBUG_RECV,
2005			    "CCMP decryption failed 0x%x\n", flags);
2006			goto fail2;
2007		}
2008		m->m_flags |= M_WEP;
2009	}
2010
2011	if (len >= sizeof(struct ieee80211_frame_min))
2012		ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2013	else
2014		ni = NULL;
2015
2016	sc->rx_tstamp = tail->tstamp;
2017
2018	if (ieee80211_radiotap_active(ic)) {
2019		struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap;
2020
2021		tap->wr_flags = 0;
2022		if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE))
2023			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2024		tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET);
2025		tap->wr_dbm_antnoise = WPI_RSSI_OFFSET;
2026		tap->wr_tsft = tail->tstamp;
2027		tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf;
2028		tap->wr_rate = plcp2rate(head->plcp);
2029	}
2030
2031	WPI_UNLOCK(sc);
2032
2033	/* Send the frame to the 802.11 layer. */
2034	if (ni != NULL) {
2035		(void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET);
2036		/* Node is no longer needed. */
2037		ieee80211_free_node(ni);
2038	} else
2039		(void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET);
2040
2041	WPI_LOCK(sc);
2042
2043	return;
2044
2045fail2:	m_freem(m);
2046
2047fail1:	counter_u64_add(ic->ic_ierrors, 1);
2048}
2049
2050static void
2051wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc,
2052    struct wpi_rx_data *data)
2053{
2054	/* Ignore */
2055}
2056
2057static void
2058wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
2059{
2060	struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3];
2061	struct wpi_tx_data *data = &ring->data[desc->idx];
2062	struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1);
2063	struct mbuf *m;
2064	struct ieee80211_node *ni;
2065	struct ieee80211vap *vap;
2066	struct ieee80211com *ic;
2067	uint32_t status = le32toh(stat->status);
2068	int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT;
2069
2070	KASSERT(data->ni != NULL, ("no node"));
2071	KASSERT(data->m != NULL, ("no mbuf"));
2072
2073	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
2074
2075	DPRINTF(sc, WPI_DEBUG_XMIT, "%s: "
2076	    "qid %d idx %d retries %d btkillcnt %d rate %x duration %d "
2077	    "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt,
2078	    stat->btkillcnt, stat->rate, le32toh(stat->duration), status);
2079
2080	/* Unmap and free mbuf. */
2081	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2082	bus_dmamap_unload(ring->data_dmat, data->map);
2083	m = data->m, data->m = NULL;
2084	ni = data->ni, data->ni = NULL;
2085	vap = ni->ni_vap;
2086	ic = vap->iv_ic;
2087
2088	/*
2089	 * Update rate control statistics for the node.
2090	 */
2091	if (status & WPI_TX_STATUS_FAIL) {
2092		ieee80211_ratectl_tx_complete(vap, ni,
2093		    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2094	} else
2095		ieee80211_ratectl_tx_complete(vap, ni,
2096		    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2097
2098	ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0);
2099
2100	WPI_TXQ_STATE_LOCK(sc);
2101	if (--ring->queued > 0)
2102		callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc);
2103	else
2104		callout_stop(&sc->tx_timeout);
2105	WPI_TXQ_STATE_UNLOCK(sc);
2106
2107	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
2108}
2109
2110/*
2111 * Process a "command done" firmware notification.  This is where we wakeup
2112 * processes waiting for a synchronous command completion.
2113 */
2114static void
2115wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
2116{
2117	struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM];
2118	struct wpi_tx_data *data;
2119	struct wpi_tx_cmd *cmd;
2120
2121	DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x "
2122				   "type %s len %d\n", desc->qid, desc->idx,
2123				   desc->flags, wpi_cmd_str(desc->type),
2124				   le32toh(desc->len));
2125
2126	if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM)
2127		return;	/* Not a command ack. */
2128
2129	KASSERT(ring->queued == 0, ("ring->queued must be 0"));
2130
2131	data = &ring->data[desc->idx];
2132	cmd = &ring->cmd[desc->idx];
2133
2134	/* If the command was mapped in an mbuf, free it. */
2135	if (data->m != NULL) {
2136		bus_dmamap_sync(ring->data_dmat, data->map,
2137		    BUS_DMASYNC_POSTWRITE);
2138		bus_dmamap_unload(ring->data_dmat, data->map);
2139		m_freem(data->m);
2140		data->m = NULL;
2141	}
2142
2143	wakeup(cmd);
2144
2145	if (desc->type == WPI_CMD_SET_POWER_MODE) {
2146		struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data;
2147
2148		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
2149		    BUS_DMASYNC_POSTREAD);
2150
2151		WPI_TXQ_LOCK(sc);
2152		if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) {
2153			sc->sc_update_rx_ring = wpi_update_rx_ring_ps;
2154			sc->sc_update_tx_ring = wpi_update_tx_ring_ps;
2155		} else {
2156			sc->sc_update_rx_ring = wpi_update_rx_ring;
2157			sc->sc_update_tx_ring = wpi_update_tx_ring;
2158		}
2159		WPI_TXQ_UNLOCK(sc);
2160	}
2161}
2162
2163static void
2164wpi_notif_intr(struct wpi_softc *sc)
2165{
2166	struct ieee80211com *ic = &sc->sc_ic;
2167	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2168	uint32_t hw;
2169
2170	bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
2171	    BUS_DMASYNC_POSTREAD);
2172
2173	hw = le32toh(sc->shared->next) & 0xfff;
2174	hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1;
2175
2176	while (sc->rxq.cur != hw) {
2177		sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT;
2178
2179		struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2180		struct wpi_rx_desc *desc;
2181
2182		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2183		    BUS_DMASYNC_POSTREAD);
2184		desc = mtod(data->m, struct wpi_rx_desc *);
2185
2186		DPRINTF(sc, WPI_DEBUG_NOTIFY,
2187		    "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
2188		    __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags,
2189		    desc->type, wpi_cmd_str(desc->type), le32toh(desc->len));
2190
2191		if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) {
2192			/* Reply to a command. */
2193			wpi_cmd_done(sc, desc);
2194		}
2195
2196		switch (desc->type) {
2197		case WPI_RX_DONE:
2198			/* An 802.11 frame has been received. */
2199			wpi_rx_done(sc, desc, data);
2200
2201			if (__predict_false(sc->sc_running == 0)) {
2202				/* wpi_stop() was called. */
2203				return;
2204			}
2205
2206			break;
2207
2208		case WPI_TX_DONE:
2209			/* An 802.11 frame has been transmitted. */
2210			wpi_tx_done(sc, desc);
2211			break;
2212
2213		case WPI_RX_STATISTICS:
2214		case WPI_BEACON_STATISTICS:
2215			wpi_rx_statistics(sc, desc, data);
2216			break;
2217
2218		case WPI_BEACON_MISSED:
2219		{
2220			struct wpi_beacon_missed *miss =
2221			    (struct wpi_beacon_missed *)(desc + 1);
2222			uint32_t expected, misses, received, threshold;
2223
2224			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2225			    BUS_DMASYNC_POSTREAD);
2226
2227			misses = le32toh(miss->consecutive);
2228			expected = le32toh(miss->expected);
2229			received = le32toh(miss->received);
2230			threshold = MAX(2, vap->iv_bmissthreshold);
2231
2232			DPRINTF(sc, WPI_DEBUG_BMISS,
2233			    "%s: beacons missed %u(%u) (received %u/%u)\n",
2234			    __func__, misses, le32toh(miss->total), received,
2235			    expected);
2236
2237			if (misses >= threshold ||
2238			    (received == 0 && expected >= threshold)) {
2239				WPI_RXON_LOCK(sc);
2240				if (callout_pending(&sc->scan_timeout)) {
2241					wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL,
2242					    0, 1);
2243				}
2244				WPI_RXON_UNLOCK(sc);
2245				if (vap->iv_state == IEEE80211_S_RUN &&
2246				    (ic->ic_flags & IEEE80211_F_SCAN) == 0)
2247					ieee80211_beacon_miss(ic);
2248			}
2249
2250			break;
2251		}
2252#ifdef WPI_DEBUG
2253		case WPI_BEACON_SENT:
2254		{
2255			struct wpi_tx_stat *stat =
2256			    (struct wpi_tx_stat *)(desc + 1);
2257			uint64_t *tsf = (uint64_t *)(stat + 1);
2258			uint32_t *mode = (uint32_t *)(tsf + 1);
2259
2260			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2261			    BUS_DMASYNC_POSTREAD);
2262
2263			DPRINTF(sc, WPI_DEBUG_BEACON,
2264			    "beacon sent: rts %u, ack %u, btkill %u, rate %u, "
2265			    "duration %u, status %x, tsf %ju, mode %x\n",
2266			    stat->rtsfailcnt, stat->ackfailcnt,
2267			    stat->btkillcnt, stat->rate, le32toh(stat->duration),
2268			    le32toh(stat->status), *tsf, *mode);
2269
2270			break;
2271		}
2272#endif
2273		case WPI_UC_READY:
2274		{
2275			struct wpi_ucode_info *uc =
2276			    (struct wpi_ucode_info *)(desc + 1);
2277
2278			/* The microcontroller is ready. */
2279			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2280			    BUS_DMASYNC_POSTREAD);
2281			DPRINTF(sc, WPI_DEBUG_RESET,
2282			    "microcode alive notification version=%d.%d "
2283			    "subtype=%x alive=%x\n", uc->major, uc->minor,
2284			    uc->subtype, le32toh(uc->valid));
2285
2286			if (le32toh(uc->valid) != 1) {
2287				device_printf(sc->sc_dev,
2288				    "microcontroller initialization failed\n");
2289				wpi_stop_locked(sc);
2290				return;
2291			}
2292			/* Save the address of the error log in SRAM. */
2293			sc->errptr = le32toh(uc->errptr);
2294			break;
2295		}
2296		case WPI_STATE_CHANGED:
2297		{
2298			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2299			    BUS_DMASYNC_POSTREAD);
2300
2301			uint32_t *status = (uint32_t *)(desc + 1);
2302
2303			DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n",
2304			    le32toh(*status));
2305
2306			if (le32toh(*status) & 1) {
2307				WPI_NT_LOCK(sc);
2308				wpi_clear_node_table(sc);
2309				WPI_NT_UNLOCK(sc);
2310				taskqueue_enqueue(sc->sc_tq,
2311				    &sc->sc_radiooff_task);
2312				return;
2313			}
2314			break;
2315		}
2316#ifdef WPI_DEBUG
2317		case WPI_START_SCAN:
2318		{
2319			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2320			    BUS_DMASYNC_POSTREAD);
2321
2322			struct wpi_start_scan *scan =
2323			    (struct wpi_start_scan *)(desc + 1);
2324			DPRINTF(sc, WPI_DEBUG_SCAN,
2325			    "%s: scanning channel %d status %x\n",
2326			    __func__, scan->chan, le32toh(scan->status));
2327
2328			break;
2329		}
2330#endif
2331		case WPI_STOP_SCAN:
2332		{
2333			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2334			    BUS_DMASYNC_POSTREAD);
2335
2336			struct wpi_stop_scan *scan =
2337			    (struct wpi_stop_scan *)(desc + 1);
2338
2339			DPRINTF(sc, WPI_DEBUG_SCAN,
2340			    "scan finished nchan=%d status=%d chan=%d\n",
2341			    scan->nchan, scan->status, scan->chan);
2342
2343			WPI_RXON_LOCK(sc);
2344			callout_stop(&sc->scan_timeout);
2345			WPI_RXON_UNLOCK(sc);
2346			if (scan->status == WPI_SCAN_ABORTED)
2347				ieee80211_cancel_scan(vap);
2348			else
2349				ieee80211_scan_next(vap);
2350			break;
2351		}
2352		}
2353
2354		if (sc->rxq.cur % 8 == 0) {
2355			/* Tell the firmware what we have processed. */
2356			sc->sc_update_rx_ring(sc);
2357		}
2358	}
2359}
2360
2361/*
2362 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2363 * from power-down sleep mode.
2364 */
2365static void
2366wpi_wakeup_intr(struct wpi_softc *sc)
2367{
2368	int qid;
2369
2370	DPRINTF(sc, WPI_DEBUG_PWRSAVE,
2371	    "%s: ucode wakeup from power-down sleep\n", __func__);
2372
2373	/* Wakeup RX and TX rings. */
2374	if (sc->rxq.update) {
2375		sc->rxq.update = 0;
2376		wpi_update_rx_ring(sc);
2377	}
2378	WPI_TXQ_LOCK(sc);
2379	for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) {
2380		struct wpi_tx_ring *ring = &sc->txq[qid];
2381
2382		if (ring->update) {
2383			ring->update = 0;
2384			wpi_update_tx_ring(sc, ring);
2385		}
2386	}
2387	WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
2388	WPI_TXQ_UNLOCK(sc);
2389}
2390
2391/*
2392 * This function prints firmware registers
2393 */
2394#ifdef WPI_DEBUG
2395static void
2396wpi_debug_registers(struct wpi_softc *sc)
2397{
2398	size_t i;
2399	static const uint32_t csr_tbl[] = {
2400		WPI_HW_IF_CONFIG,
2401		WPI_INT,
2402		WPI_INT_MASK,
2403		WPI_FH_INT,
2404		WPI_GPIO_IN,
2405		WPI_RESET,
2406		WPI_GP_CNTRL,
2407		WPI_EEPROM,
2408		WPI_EEPROM_GP,
2409		WPI_GIO,
2410		WPI_UCODE_GP1,
2411		WPI_UCODE_GP2,
2412		WPI_GIO_CHICKEN,
2413		WPI_ANA_PLL,
2414		WPI_DBG_HPET_MEM,
2415	};
2416	static const uint32_t prph_tbl[] = {
2417		WPI_APMG_CLK_CTRL,
2418		WPI_APMG_PS,
2419		WPI_APMG_PCI_STT,
2420		WPI_APMG_RFKILL,
2421	};
2422
2423	DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n");
2424
2425	for (i = 0; i < nitems(csr_tbl); i++) {
2426		DPRINTF(sc, WPI_DEBUG_REGISTER, "  %-18s: 0x%08x ",
2427		    wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i]));
2428
2429		if ((i + 1) % 2 == 0)
2430			DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
2431	}
2432	DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n");
2433
2434	if (wpi_nic_lock(sc) == 0) {
2435		for (i = 0; i < nitems(prph_tbl); i++) {
2436			DPRINTF(sc, WPI_DEBUG_REGISTER, "  %-18s: 0x%08x ",
2437			    wpi_get_prph_string(prph_tbl[i]),
2438			    wpi_prph_read(sc, prph_tbl[i]));
2439
2440			if ((i + 1) % 2 == 0)
2441				DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
2442		}
2443		DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
2444		wpi_nic_unlock(sc);
2445	} else {
2446		DPRINTF(sc, WPI_DEBUG_REGISTER,
2447		    "Cannot access internal registers.\n");
2448	}
2449}
2450#endif
2451
2452/*
2453 * Dump the error log of the firmware when a firmware panic occurs.  Although
2454 * we can't debug the firmware because it is neither open source nor free, it
2455 * can help us to identify certain classes of problems.
2456 */
2457static void
2458wpi_fatal_intr(struct wpi_softc *sc)
2459{
2460	struct wpi_fw_dump dump;
2461	uint32_t i, offset, count;
2462
2463	/* Check that the error log address is valid. */
2464	if (sc->errptr < WPI_FW_DATA_BASE ||
2465	    sc->errptr + sizeof (dump) >
2466	    WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) {
2467		printf("%s: bad firmware error log address 0x%08x\n", __func__,
2468		    sc->errptr);
2469		return;
2470	}
2471	if (wpi_nic_lock(sc) != 0) {
2472		printf("%s: could not read firmware error log\n", __func__);
2473		return;
2474	}
2475	/* Read number of entries in the log. */
2476	count = wpi_mem_read(sc, sc->errptr);
2477	if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) {
2478		printf("%s: invalid count field (count = %u)\n", __func__,
2479		    count);
2480		wpi_nic_unlock(sc);
2481		return;
2482	}
2483	/* Skip "count" field. */
2484	offset = sc->errptr + sizeof (uint32_t);
2485	printf("firmware error log (count = %u):\n", count);
2486	for (i = 0; i < count; i++) {
2487		wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump,
2488		    sizeof (dump) / sizeof (uint32_t));
2489
2490		printf("  error type = \"%s\" (0x%08X)\n",
2491		    (dump.desc < nitems(wpi_fw_errmsg)) ?
2492		        wpi_fw_errmsg[dump.desc] : "UNKNOWN",
2493		    dump.desc);
2494		printf("  error data      = 0x%08X\n",
2495		    dump.data);
2496		printf("  branch link     = 0x%08X%08X\n",
2497		    dump.blink[0], dump.blink[1]);
2498		printf("  interrupt link  = 0x%08X%08X\n",
2499		    dump.ilink[0], dump.ilink[1]);
2500		printf("  time            = %u\n", dump.time);
2501
2502		offset += sizeof (dump);
2503	}
2504	wpi_nic_unlock(sc);
2505	/* Dump driver status (TX and RX rings) while we're here. */
2506	printf("driver status:\n");
2507	WPI_TXQ_LOCK(sc);
2508	for (i = 0; i < WPI_DRV_NTXQUEUES; i++) {
2509		struct wpi_tx_ring *ring = &sc->txq[i];
2510		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2511		    i, ring->qid, ring->cur, ring->queued);
2512	}
2513	WPI_TXQ_UNLOCK(sc);
2514	printf("  rx ring: cur=%d\n", sc->rxq.cur);
2515}
2516
2517static void
2518wpi_intr(void *arg)
2519{
2520	struct wpi_softc *sc = arg;
2521	uint32_t r1, r2;
2522
2523	WPI_LOCK(sc);
2524
2525	/* Disable interrupts. */
2526	WPI_WRITE(sc, WPI_INT_MASK, 0);
2527
2528	r1 = WPI_READ(sc, WPI_INT);
2529
2530	if (__predict_false(r1 == 0xffffffff ||
2531			   (r1 & 0xfffffff0) == 0xa5a5a5a0))
2532		goto end;	/* Hardware gone! */
2533
2534	r2 = WPI_READ(sc, WPI_FH_INT);
2535
2536	DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__,
2537	    r1, r2);
2538
2539	if (r1 == 0 && r2 == 0)
2540		goto done;	/* Interrupt not for us. */
2541
2542	/* Acknowledge interrupts. */
2543	WPI_WRITE(sc, WPI_INT, r1);
2544	WPI_WRITE(sc, WPI_FH_INT, r2);
2545
2546	if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) {
2547		device_printf(sc->sc_dev, "fatal firmware error\n");
2548#ifdef WPI_DEBUG
2549		wpi_debug_registers(sc);
2550#endif
2551		wpi_fatal_intr(sc);
2552		DPRINTF(sc, WPI_DEBUG_HW,
2553		    "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" :
2554		    "(Hardware Error)");
2555		taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask);
2556		goto end;
2557	}
2558
2559	if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) ||
2560	    (r2 & WPI_FH_INT_RX))
2561		wpi_notif_intr(sc);
2562
2563	if (r1 & WPI_INT_ALIVE)
2564		wakeup(sc);	/* Firmware is alive. */
2565
2566	if (r1 & WPI_INT_WAKEUP)
2567		wpi_wakeup_intr(sc);
2568
2569done:
2570	/* Re-enable interrupts. */
2571	if (__predict_true(sc->sc_running))
2572		WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
2573
2574end:	WPI_UNLOCK(sc);
2575}
2576
2577static int
2578wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf)
2579{
2580	struct ieee80211_frame *wh;
2581	struct wpi_tx_cmd *cmd;
2582	struct wpi_tx_data *data;
2583	struct wpi_tx_desc *desc;
2584	struct wpi_tx_ring *ring;
2585	struct mbuf *m1;
2586	bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER];
2587	int error, i, hdrlen, nsegs, totlen, pad;
2588
2589	WPI_TXQ_LOCK(sc);
2590
2591	KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow"));
2592
2593	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
2594
2595	if (__predict_false(sc->sc_running == 0)) {
2596		/* wpi_stop() was called */
2597		error = ENETDOWN;
2598		goto fail;
2599	}
2600
2601	wh = mtod(buf->m, struct ieee80211_frame *);
2602	hdrlen = ieee80211_anyhdrsize(wh);
2603	totlen = buf->m->m_pkthdr.len;
2604
2605	if (hdrlen & 3) {
2606		/* First segment length must be a multiple of 4. */
2607		pad = 4 - (hdrlen & 3);
2608	} else
2609		pad = 0;
2610
2611	ring = &sc->txq[buf->ac];
2612	desc = &ring->desc[ring->cur];
2613	data = &ring->data[ring->cur];
2614
2615	/* Prepare TX firmware command. */
2616	cmd = &ring->cmd[ring->cur];
2617	cmd->code = buf->code;
2618	cmd->flags = 0;
2619	cmd->qid = ring->qid;
2620	cmd->idx = ring->cur;
2621
2622	memcpy(cmd->data, buf->data, buf->size);
2623
2624	/* Save and trim IEEE802.11 header. */
2625	memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen);
2626	m_adj(buf->m, hdrlen);
2627
2628	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m,
2629	    segs, &nsegs, BUS_DMA_NOWAIT);
2630	if (error != 0 && error != EFBIG) {
2631		device_printf(sc->sc_dev,
2632		    "%s: can't map mbuf (error %d)\n", __func__, error);
2633		goto fail;
2634	}
2635	if (error != 0) {
2636		/* Too many DMA segments, linearize mbuf. */
2637		m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1);
2638		if (m1 == NULL) {
2639			device_printf(sc->sc_dev,
2640			    "%s: could not defrag mbuf\n", __func__);
2641			error = ENOBUFS;
2642			goto fail;
2643		}
2644		buf->m = m1;
2645
2646		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
2647		    buf->m, segs, &nsegs, BUS_DMA_NOWAIT);
2648		if (__predict_false(error != 0)) {
2649			device_printf(sc->sc_dev,
2650			    "%s: can't map mbuf (error %d)\n", __func__,
2651			    error);
2652			goto fail;
2653		}
2654	}
2655
2656	KASSERT(nsegs < WPI_MAX_SCATTER,
2657	    ("too many DMA segments, nsegs (%d) should be less than %d",
2658	     nsegs, WPI_MAX_SCATTER));
2659
2660	data->m = buf->m;
2661	data->ni = buf->ni;
2662
2663	DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
2664	    __func__, ring->qid, ring->cur, totlen, nsegs);
2665
2666	/* Fill TX descriptor. */
2667	desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs);
2668	/* First DMA segment is used by the TX command. */
2669	desc->segs[0].addr = htole32(data->cmd_paddr);
2670	desc->segs[0].len  = htole32(4 + buf->size + hdrlen + pad);
2671	/* Other DMA segments are for data payload. */
2672	seg = &segs[0];
2673	for (i = 1; i <= nsegs; i++) {
2674		desc->segs[i].addr = htole32(seg->ds_addr);
2675		desc->segs[i].len  = htole32(seg->ds_len);
2676		seg++;
2677	}
2678
2679	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
2680	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
2681	    BUS_DMASYNC_PREWRITE);
2682	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2683	    BUS_DMASYNC_PREWRITE);
2684
2685	/* Kick TX ring. */
2686	ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
2687	sc->sc_update_tx_ring(sc, ring);
2688
2689	if (ring->qid < WPI_CMD_QUEUE_NUM) {
2690		WPI_TXQ_STATE_LOCK(sc);
2691		ring->queued++;
2692		callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc);
2693		WPI_TXQ_STATE_UNLOCK(sc);
2694	}
2695
2696	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
2697
2698	WPI_TXQ_UNLOCK(sc);
2699
2700	return 0;
2701
2702fail:	m_freem(buf->m);
2703
2704	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
2705
2706	WPI_TXQ_UNLOCK(sc);
2707
2708	return error;
2709}
2710
2711/*
2712 * Construct the data packet for a transmit buffer.
2713 */
2714static int
2715wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
2716{
2717	const struct ieee80211_txparam *tp;
2718	struct ieee80211vap *vap = ni->ni_vap;
2719	struct ieee80211com *ic = ni->ni_ic;
2720	struct wpi_node *wn = WPI_NODE(ni);
2721	struct ieee80211_channel *chan;
2722	struct ieee80211_frame *wh;
2723	struct ieee80211_key *k = NULL;
2724	struct wpi_buf tx_data;
2725	struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data;
2726	uint32_t flags;
2727	uint16_t qos;
2728	uint8_t tid, type;
2729	int ac, error, swcrypt, rate, ismcast, totlen;
2730
2731	wh = mtod(m, struct ieee80211_frame *);
2732	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2733	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2734
2735	/* Select EDCA Access Category and TX ring for this frame. */
2736	if (IEEE80211_QOS_HAS_SEQ(wh)) {
2737		qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
2738		tid = qos & IEEE80211_QOS_TID;
2739	} else {
2740		qos = 0;
2741		tid = 0;
2742	}
2743	ac = M_WME_GETAC(m);
2744
2745	chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ?
2746		ni->ni_chan : ic->ic_curchan;
2747	tp = &vap->iv_txparms[ieee80211_chan2mode(chan)];
2748
2749	/* Choose a TX rate index. */
2750	if (type == IEEE80211_FC0_TYPE_MGT)
2751		rate = tp->mgmtrate;
2752	else if (ismcast)
2753		rate = tp->mcastrate;
2754	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
2755		rate = tp->ucastrate;
2756	else if (m->m_flags & M_EAPOL)
2757		rate = tp->mgmtrate;
2758	else {
2759		/* XXX pass pktlen */
2760		(void) ieee80211_ratectl_rate(ni, NULL, 0);
2761		rate = ni->ni_txrate;
2762	}
2763
2764	/* Encrypt the frame if need be. */
2765	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2766		/* Retrieve key for TX. */
2767		k = ieee80211_crypto_encap(ni, m);
2768		if (k == NULL) {
2769			error = ENOBUFS;
2770			goto fail;
2771		}
2772		swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT;
2773
2774		/* 802.11 header may have moved. */
2775		wh = mtod(m, struct ieee80211_frame *);
2776	}
2777	totlen = m->m_pkthdr.len;
2778
2779	if (ieee80211_radiotap_active_vap(vap)) {
2780		struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
2781
2782		tap->wt_flags = 0;
2783		tap->wt_rate = rate;
2784		if (k != NULL)
2785			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2786
2787		ieee80211_radiotap_tx(vap, m);
2788	}
2789
2790	flags = 0;
2791	if (!ismcast) {
2792		/* Unicast frame, check if an ACK is expected. */
2793		if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
2794		    IEEE80211_QOS_ACKPOLICY_NOACK)
2795			flags |= WPI_TX_NEED_ACK;
2796	}
2797
2798	if (!IEEE80211_QOS_HAS_SEQ(wh))
2799		flags |= WPI_TX_AUTO_SEQ;
2800	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2801		flags |= WPI_TX_MORE_FRAG;	/* Cannot happen yet. */
2802
2803	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2804	if (!ismcast) {
2805		/* NB: Group frames are sent using CCK in 802.11b/g. */
2806		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
2807			flags |= WPI_TX_NEED_RTS;
2808		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2809		    WPI_RATE_IS_OFDM(rate)) {
2810			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2811				flags |= WPI_TX_NEED_CTS;
2812			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2813				flags |= WPI_TX_NEED_RTS;
2814		}
2815
2816		if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
2817			flags |= WPI_TX_FULL_TXOP;
2818	}
2819
2820	memset(tx, 0, sizeof (struct wpi_cmd_data));
2821	if (type == IEEE80211_FC0_TYPE_MGT) {
2822		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2823
2824		/* Tell HW to set timestamp in probe responses. */
2825		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2826			flags |= WPI_TX_INSERT_TSTAMP;
2827		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2828		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2829			tx->timeout = htole16(3);
2830		else
2831			tx->timeout = htole16(2);
2832	}
2833
2834	if (ismcast || type != IEEE80211_FC0_TYPE_DATA)
2835		tx->id = WPI_ID_BROADCAST;
2836	else {
2837		if (wn->id == WPI_ID_UNDEFINED) {
2838			device_printf(sc->sc_dev,
2839			    "%s: undefined node id\n", __func__);
2840			error = EINVAL;
2841			goto fail;
2842		}
2843
2844		tx->id = wn->id;
2845	}
2846
2847	if (k != NULL && !swcrypt) {
2848		switch (k->wk_cipher->ic_cipher) {
2849		case IEEE80211_CIPHER_AES_CCM:
2850			tx->security = WPI_CIPHER_CCMP;
2851			break;
2852
2853		default:
2854			break;
2855		}
2856
2857		memcpy(tx->key, k->wk_key, k->wk_keylen);
2858	}
2859
2860	tx->len = htole16(totlen);
2861	tx->flags = htole32(flags);
2862	tx->plcp = rate2plcp(rate);
2863	tx->tid = tid;
2864	tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
2865	tx->ofdm_mask = 0xff;
2866	tx->cck_mask = 0x0f;
2867	tx->rts_ntries = 7;
2868	tx->data_ntries = tp->maxretry;
2869
2870	tx_data.ni = ni;
2871	tx_data.m = m;
2872	tx_data.size = sizeof(struct wpi_cmd_data);
2873	tx_data.code = WPI_CMD_TX_DATA;
2874	tx_data.ac = ac;
2875
2876	return wpi_cmd2(sc, &tx_data);
2877
2878fail:	m_freem(m);
2879	return error;
2880}
2881
2882static int
2883wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m,
2884    struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
2885{
2886	struct ieee80211vap *vap = ni->ni_vap;
2887	struct ieee80211_key *k = NULL;
2888	struct ieee80211_frame *wh;
2889	struct wpi_buf tx_data;
2890	struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data;
2891	uint32_t flags;
2892	uint8_t type;
2893	int ac, rate, swcrypt, totlen;
2894
2895	wh = mtod(m, struct ieee80211_frame *);
2896	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2897
2898	ac = params->ibp_pri & 3;
2899
2900	/* Choose a TX rate index. */
2901	rate = params->ibp_rate0;
2902
2903	flags = 0;
2904	if (!IEEE80211_QOS_HAS_SEQ(wh))
2905		flags |= WPI_TX_AUTO_SEQ;
2906	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
2907		flags |= WPI_TX_NEED_ACK;
2908	if (params->ibp_flags & IEEE80211_BPF_RTS)
2909		flags |= WPI_TX_NEED_RTS;
2910	if (params->ibp_flags & IEEE80211_BPF_CTS)
2911		flags |= WPI_TX_NEED_CTS;
2912	if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
2913		flags |= WPI_TX_FULL_TXOP;
2914
2915	/* Encrypt the frame if need be. */
2916	if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
2917		/* Retrieve key for TX. */
2918		k = ieee80211_crypto_encap(ni, m);
2919		if (k == NULL) {
2920			m_freem(m);
2921			return ENOBUFS;
2922		}
2923		swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT;
2924
2925		/* 802.11 header may have moved. */
2926		wh = mtod(m, struct ieee80211_frame *);
2927	}
2928	totlen = m->m_pkthdr.len;
2929
2930	if (ieee80211_radiotap_active_vap(vap)) {
2931		struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
2932
2933		tap->wt_flags = 0;
2934		tap->wt_rate = rate;
2935		if (params->ibp_flags & IEEE80211_BPF_CRYPTO)
2936			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2937
2938		ieee80211_radiotap_tx(vap, m);
2939	}
2940
2941	memset(tx, 0, sizeof (struct wpi_cmd_data));
2942	if (type == IEEE80211_FC0_TYPE_MGT) {
2943		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2944
2945		/* Tell HW to set timestamp in probe responses. */
2946		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2947			flags |= WPI_TX_INSERT_TSTAMP;
2948		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2949		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2950			tx->timeout = htole16(3);
2951		else
2952			tx->timeout = htole16(2);
2953	}
2954
2955	if (k != NULL && !swcrypt) {
2956		switch (k->wk_cipher->ic_cipher) {
2957		case IEEE80211_CIPHER_AES_CCM:
2958			tx->security = WPI_CIPHER_CCMP;
2959			break;
2960
2961		default:
2962			break;
2963		}
2964
2965		memcpy(tx->key, k->wk_key, k->wk_keylen);
2966	}
2967
2968	tx->len = htole16(totlen);
2969	tx->flags = htole32(flags);
2970	tx->plcp = rate2plcp(rate);
2971	tx->id = WPI_ID_BROADCAST;
2972	tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
2973	tx->rts_ntries = params->ibp_try1;
2974	tx->data_ntries = params->ibp_try0;
2975
2976	tx_data.ni = ni;
2977	tx_data.m = m;
2978	tx_data.size = sizeof(struct wpi_cmd_data);
2979	tx_data.code = WPI_CMD_TX_DATA;
2980	tx_data.ac = ac;
2981
2982	return wpi_cmd2(sc, &tx_data);
2983}
2984
2985static __inline int
2986wpi_tx_ring_is_full(struct wpi_softc *sc, int ac)
2987{
2988	struct wpi_tx_ring *ring = &sc->txq[ac];
2989	int retval;
2990
2991	WPI_TXQ_STATE_LOCK(sc);
2992	retval = (ring->queued > WPI_TX_RING_HIMARK);
2993	WPI_TXQ_STATE_UNLOCK(sc);
2994
2995	return retval;
2996}
2997
2998static __inline void
2999wpi_handle_tx_failure(struct ieee80211_node *ni)
3000{
3001	/* NB: m is reclaimed on tx failure */
3002	if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
3003	ieee80211_free_node(ni);
3004}
3005
3006static int
3007wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3008    const struct ieee80211_bpf_params *params)
3009{
3010	struct ieee80211com *ic = ni->ni_ic;
3011	struct wpi_softc *sc = ic->ic_softc;
3012	int ac, error = 0;
3013
3014	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3015
3016	ac = M_WME_GETAC(m);
3017
3018	WPI_TX_LOCK(sc);
3019
3020	if (sc->sc_running == 0 || wpi_tx_ring_is_full(sc, ac)) {
3021		m_freem(m);
3022		error = sc->sc_running ? ENOBUFS : ENETDOWN;
3023		goto unlock;
3024	}
3025
3026	if (params == NULL) {
3027		/*
3028		 * Legacy path; interpret frame contents to decide
3029		 * precisely how to send the frame.
3030		 */
3031		error = wpi_tx_data(sc, m, ni);
3032	} else {
3033		/*
3034		 * Caller supplied explicit parameters to use in
3035		 * sending the frame.
3036		 */
3037		error = wpi_tx_data_raw(sc, m, ni, params);
3038	}
3039
3040unlock:	WPI_TX_UNLOCK(sc);
3041
3042	if (error != 0) {
3043		wpi_handle_tx_failure(ni);
3044		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
3045
3046		return error;
3047	}
3048
3049	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3050
3051	return 0;
3052}
3053
3054static int
3055wpi_transmit(struct ieee80211com *ic, struct mbuf *m)
3056{
3057	struct wpi_softc *sc = ic->ic_softc;
3058	struct ieee80211_node *ni;
3059	int ac, error;
3060
3061	WPI_TX_LOCK(sc);
3062	DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__);
3063
3064	/* Check if interface is up & running. */
3065	if (__predict_false(sc->sc_running == 0)) {
3066		error = ENXIO;
3067		goto unlock;
3068	}
3069
3070	/* Check for available space. */
3071	ac = M_WME_GETAC(m);
3072	if (wpi_tx_ring_is_full(sc, ac)) {
3073		error = ENOBUFS;
3074		goto unlock;
3075	}
3076
3077	error = 0;
3078	ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3079	if (wpi_tx_data(sc, m, ni) != 0) {
3080		wpi_handle_tx_failure(ni);
3081	}
3082
3083	DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__);
3084
3085unlock:	WPI_TX_UNLOCK(sc);
3086
3087	return (error);
3088}
3089
3090static void
3091wpi_watchdog_rfkill(void *arg)
3092{
3093	struct wpi_softc *sc = arg;
3094	struct ieee80211com *ic = &sc->sc_ic;
3095
3096	DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n");
3097
3098	/* No need to lock firmware memory. */
3099	if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) {
3100		/* Radio kill switch is still off. */
3101		callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
3102		    sc);
3103	} else
3104		ieee80211_runtask(ic, &sc->sc_radioon_task);
3105}
3106
3107static void
3108wpi_scan_timeout(void *arg)
3109{
3110	struct wpi_softc *sc = arg;
3111	struct ieee80211com *ic = &sc->sc_ic;
3112
3113	ic_printf(ic, "scan timeout\n");
3114	taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask);
3115}
3116
3117static void
3118wpi_tx_timeout(void *arg)
3119{
3120	struct wpi_softc *sc = arg;
3121	struct ieee80211com *ic = &sc->sc_ic;
3122
3123	ic_printf(ic, "device timeout\n");
3124	taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask);
3125}
3126
3127static void
3128wpi_parent(struct ieee80211com *ic)
3129{
3130	struct wpi_softc *sc = ic->ic_softc;
3131	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3132
3133	if (ic->ic_nrunning > 0) {
3134		if (wpi_init(sc) == 0) {
3135			ieee80211_notify_radio(ic, 1);
3136			ieee80211_start_all(ic);
3137		} else {
3138			ieee80211_notify_radio(ic, 0);
3139			ieee80211_stop(vap);
3140		}
3141	} else
3142		wpi_stop(sc);
3143}
3144
3145/*
3146 * Send a command to the firmware.
3147 */
3148static int
3149wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size,
3150    int async)
3151{
3152	struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM];
3153	struct wpi_tx_desc *desc;
3154	struct wpi_tx_data *data;
3155	struct wpi_tx_cmd *cmd;
3156	struct mbuf *m;
3157	bus_addr_t paddr;
3158	int totlen, error;
3159
3160	WPI_TXQ_LOCK(sc);
3161
3162	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3163
3164	if (__predict_false(sc->sc_running == 0)) {
3165		/* wpi_stop() was called */
3166		if (code == WPI_CMD_SCAN)
3167			error = ENETDOWN;
3168		else
3169			error = 0;
3170
3171		goto fail;
3172	}
3173
3174	if (async == 0)
3175		WPI_LOCK_ASSERT(sc);
3176
3177	DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n",
3178	    __func__, wpi_cmd_str(code), size, async);
3179
3180	desc = &ring->desc[ring->cur];
3181	data = &ring->data[ring->cur];
3182	totlen = 4 + size;
3183
3184	if (size > sizeof cmd->data) {
3185		/* Command is too large to fit in a descriptor. */
3186		if (totlen > MCLBYTES) {
3187			error = EINVAL;
3188			goto fail;
3189		}
3190		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3191		if (m == NULL) {
3192			error = ENOMEM;
3193			goto fail;
3194		}
3195		cmd = mtod(m, struct wpi_tx_cmd *);
3196		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3197		    totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3198		if (error != 0) {
3199			m_freem(m);
3200			goto fail;
3201		}
3202		data->m = m;
3203	} else {
3204		cmd = &ring->cmd[ring->cur];
3205		paddr = data->cmd_paddr;
3206	}
3207
3208	cmd->code = code;
3209	cmd->flags = 0;
3210	cmd->qid = ring->qid;
3211	cmd->idx = ring->cur;
3212	memcpy(cmd->data, buf, size);
3213
3214	desc->nsegs = 1 + (WPI_PAD32(size) << 4);
3215	desc->segs[0].addr = htole32(paddr);
3216	desc->segs[0].len  = htole32(totlen);
3217
3218	if (size > sizeof cmd->data) {
3219		bus_dmamap_sync(ring->data_dmat, data->map,
3220		    BUS_DMASYNC_PREWRITE);
3221	} else {
3222		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3223		    BUS_DMASYNC_PREWRITE);
3224	}
3225	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3226	    BUS_DMASYNC_PREWRITE);
3227
3228	/* Kick command ring. */
3229	ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
3230	sc->sc_update_tx_ring(sc, ring);
3231
3232	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3233
3234	WPI_TXQ_UNLOCK(sc);
3235
3236	return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz);
3237
3238fail:	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
3239
3240	WPI_TXQ_UNLOCK(sc);
3241
3242	return error;
3243}
3244
3245/*
3246 * Configure HW multi-rate retries.
3247 */
3248static int
3249wpi_mrr_setup(struct wpi_softc *sc)
3250{
3251	struct ieee80211com *ic = &sc->sc_ic;
3252	struct wpi_mrr_setup mrr;
3253	int i, error;
3254
3255	/* CCK rates (not used with 802.11a). */
3256	for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) {
3257		mrr.rates[i].flags = 0;
3258		mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
3259		/* Fallback to the immediate lower CCK rate (if any.) */
3260		mrr.rates[i].next =
3261		    (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1;
3262		/* Try twice at this rate before falling back to "next". */
3263		mrr.rates[i].ntries = WPI_NTRIES_DEFAULT;
3264	}
3265	/* OFDM rates (not used with 802.11b). */
3266	for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) {
3267		mrr.rates[i].flags = 0;
3268		mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
3269		/* Fallback to the immediate lower rate (if any.) */
3270		/* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */
3271		mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ?
3272		    ((ic->ic_curmode == IEEE80211_MODE_11A) ?
3273			WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) :
3274		    i - 1;
3275		/* Try twice at this rate before falling back to "next". */
3276		mrr.rates[i].ntries = WPI_NTRIES_DEFAULT;
3277	}
3278	/* Setup MRR for control frames. */
3279	mrr.which = htole32(WPI_MRR_CTL);
3280	error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
3281	if (error != 0) {
3282		device_printf(sc->sc_dev,
3283		    "could not setup MRR for control frames\n");
3284		return error;
3285	}
3286	/* Setup MRR for data frames. */
3287	mrr.which = htole32(WPI_MRR_DATA);
3288	error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
3289	if (error != 0) {
3290		device_printf(sc->sc_dev,
3291		    "could not setup MRR for data frames\n");
3292		return error;
3293	}
3294	return 0;
3295}
3296
3297static int
3298wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3299{
3300	struct ieee80211com *ic = ni->ni_ic;
3301	struct wpi_vap *wvp = WPI_VAP(ni->ni_vap);
3302	struct wpi_node *wn = WPI_NODE(ni);
3303	struct wpi_node_info node;
3304	int error;
3305
3306	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3307
3308	if (wn->id == WPI_ID_UNDEFINED)
3309		return EINVAL;
3310
3311	memset(&node, 0, sizeof node);
3312	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
3313	node.id = wn->id;
3314	node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3315	    wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
3316	node.action = htole32(WPI_ACTION_SET_RATE);
3317	node.antenna = WPI_ANTENNA_BOTH;
3318
3319	DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__,
3320	    wn->id, ether_sprintf(ni->ni_macaddr));
3321
3322	error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
3323	if (error != 0) {
3324		device_printf(sc->sc_dev,
3325		    "%s: wpi_cmd() call failed with error code %d\n", __func__,
3326		    error);
3327		return error;
3328	}
3329
3330	if (wvp->wv_gtk != 0) {
3331		error = wpi_set_global_keys(ni);
3332		if (error != 0) {
3333			device_printf(sc->sc_dev,
3334			    "%s: error while setting global keys\n", __func__);
3335			return ENXIO;
3336		}
3337	}
3338
3339	return 0;
3340}
3341
3342/*
3343 * Broadcast node is used to send group-addressed and management frames.
3344 */
3345static int
3346wpi_add_broadcast_node(struct wpi_softc *sc, int async)
3347{
3348	struct ieee80211com *ic = &sc->sc_ic;
3349	struct wpi_node_info node;
3350
3351	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3352
3353	memset(&node, 0, sizeof node);
3354	IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr);
3355	node.id = WPI_ID_BROADCAST;
3356	node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3357	    wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
3358	node.action = htole32(WPI_ACTION_SET_RATE);
3359	node.antenna = WPI_ANTENNA_BOTH;
3360
3361	DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__);
3362
3363	return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async);
3364}
3365
3366static int
3367wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3368{
3369	struct wpi_node *wn = WPI_NODE(ni);
3370	int error;
3371
3372	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3373
3374	wn->id = wpi_add_node_entry_sta(sc);
3375
3376	if ((error = wpi_add_node(sc, ni)) != 0) {
3377		wpi_del_node_entry(sc, wn->id);
3378		wn->id = WPI_ID_UNDEFINED;
3379		return error;
3380	}
3381
3382	return 0;
3383}
3384
3385static int
3386wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3387{
3388	struct wpi_node *wn = WPI_NODE(ni);
3389	int error;
3390
3391	KASSERT(wn->id == WPI_ID_UNDEFINED,
3392	    ("the node %d was added before", wn->id));
3393
3394	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3395
3396	if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) {
3397		device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__);
3398		return ENOMEM;
3399	}
3400
3401	if ((error = wpi_add_node(sc, ni)) != 0) {
3402		wpi_del_node_entry(sc, wn->id);
3403		wn->id = WPI_ID_UNDEFINED;
3404		return error;
3405	}
3406
3407	return 0;
3408}
3409
3410static void
3411wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3412{
3413	struct wpi_node *wn = WPI_NODE(ni);
3414	struct wpi_cmd_del_node node;
3415	int error;
3416
3417	KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed"));
3418
3419	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3420
3421	memset(&node, 0, sizeof node);
3422	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
3423	node.count = 1;
3424
3425	DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__,
3426	    wn->id, ether_sprintf(ni->ni_macaddr));
3427
3428	error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1);
3429	if (error != 0) {
3430		device_printf(sc->sc_dev,
3431		    "%s: could not delete node %u, error %d\n", __func__,
3432		    wn->id, error);
3433	}
3434}
3435
3436static int
3437wpi_updateedca(struct ieee80211com *ic)
3438{
3439#define WPI_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
3440	struct wpi_softc *sc = ic->ic_softc;
3441	struct wpi_edca_params cmd;
3442	int aci, error;
3443
3444	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3445
3446	memset(&cmd, 0, sizeof cmd);
3447	cmd.flags = htole32(WPI_EDCA_UPDATE);
3448	for (aci = 0; aci < WME_NUM_AC; aci++) {
3449		const struct wmeParams *ac =
3450		    &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
3451		cmd.ac[aci].aifsn = ac->wmep_aifsn;
3452		cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin));
3453		cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax));
3454		cmd.ac[aci].txoplimit =
3455		    htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
3456
3457		DPRINTF(sc, WPI_DEBUG_EDCA,
3458		    "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d "
3459		    "txoplimit=%d\n", aci, cmd.ac[aci].aifsn,
3460		    cmd.ac[aci].cwmin, cmd.ac[aci].cwmax,
3461		    cmd.ac[aci].txoplimit);
3462	}
3463	error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
3464
3465	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3466
3467	return error;
3468#undef WPI_EXP2
3469}
3470
3471static void
3472wpi_set_promisc(struct wpi_softc *sc)
3473{
3474	struct ieee80211com *ic = &sc->sc_ic;
3475	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3476	uint32_t promisc_filter;
3477
3478	promisc_filter = WPI_FILTER_CTL;
3479	if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP)
3480		promisc_filter |= WPI_FILTER_PROMISC;
3481
3482	if (ic->ic_promisc > 0)
3483		sc->rxon.filter |= htole32(promisc_filter);
3484	else
3485		sc->rxon.filter &= ~htole32(promisc_filter);
3486}
3487
3488static void
3489wpi_update_promisc(struct ieee80211com *ic)
3490{
3491	struct wpi_softc *sc = ic->ic_softc;
3492
3493	WPI_RXON_LOCK(sc);
3494	wpi_set_promisc(sc);
3495
3496	if (wpi_send_rxon(sc, 1, 1) != 0) {
3497		device_printf(sc->sc_dev, "%s: could not send RXON\n",
3498		    __func__);
3499	}
3500	WPI_RXON_UNLOCK(sc);
3501}
3502
3503static void
3504wpi_update_mcast(struct ieee80211com *ic)
3505{
3506	/* Ignore */
3507}
3508
3509static void
3510wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3511{
3512	struct wpi_cmd_led led;
3513
3514	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3515
3516	led.which = which;
3517	led.unit = htole32(100000);	/* on/off in unit of 100ms */
3518	led.off = off;
3519	led.on = on;
3520	(void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1);
3521}
3522
3523static int
3524wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni)
3525{
3526	struct wpi_cmd_timing cmd;
3527	uint64_t val, mod;
3528
3529	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3530
3531	memset(&cmd, 0, sizeof cmd);
3532	memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3533	cmd.bintval = htole16(ni->ni_intval);
3534	cmd.lintval = htole16(10);
3535
3536	/* Compute remaining time until next beacon. */
3537	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
3538	mod = le64toh(cmd.tstamp) % val;
3539	cmd.binitval = htole32((uint32_t)(val - mod));
3540
3541	DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
3542	    ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
3543
3544	return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1);
3545}
3546
3547/*
3548 * This function is called periodically (every 60 seconds) to adjust output
3549 * power to temperature changes.
3550 */
3551static void
3552wpi_power_calibration(struct wpi_softc *sc)
3553{
3554	int temp;
3555
3556	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3557
3558	/* Update sensor data. */
3559	temp = (int)WPI_READ(sc, WPI_UCODE_GP2);
3560	DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp);
3561
3562	/* Sanity-check read value. */
3563	if (temp < -260 || temp > 25) {
3564		/* This can't be correct, ignore. */
3565		DPRINTF(sc, WPI_DEBUG_TEMP,
3566		    "out-of-range temperature reported: %d\n", temp);
3567		return;
3568	}
3569
3570	DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp);
3571
3572	/* Adjust Tx power if need be. */
3573	if (abs(temp - sc->temp) <= 6)
3574		return;
3575
3576	sc->temp = temp;
3577
3578	if (wpi_set_txpower(sc, 1) != 0) {
3579		/* just warn, too bad for the automatic calibration... */
3580		device_printf(sc->sc_dev,"could not adjust Tx power\n");
3581	}
3582}
3583
3584/*
3585 * Set TX power for current channel.
3586 */
3587static int
3588wpi_set_txpower(struct wpi_softc *sc, int async)
3589{
3590	struct wpi_power_group *group;
3591	struct wpi_cmd_txpower cmd;
3592	uint8_t chan;
3593	int idx, is_chan_5ghz, i;
3594
3595	/* Retrieve current channel from last RXON. */
3596	chan = sc->rxon.chan;
3597	is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0;
3598
3599	/* Find the TX power group to which this channel belongs. */
3600	if (is_chan_5ghz) {
3601		for (group = &sc->groups[1]; group < &sc->groups[4]; group++)
3602			if (chan <= group->chan)
3603				break;
3604	} else
3605		group = &sc->groups[0];
3606
3607	memset(&cmd, 0, sizeof cmd);
3608	cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ;
3609	cmd.chan = htole16(chan);
3610
3611	/* Set TX power for all OFDM and CCK rates. */
3612	for (i = 0; i <= WPI_RIDX_MAX ; i++) {
3613		/* Retrieve TX power for this channel/rate. */
3614		idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i);
3615
3616		cmd.rates[i].plcp = wpi_ridx_to_plcp[i];
3617
3618		if (is_chan_5ghz) {
3619			cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx];
3620			cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx];
3621		} else {
3622			cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx];
3623			cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx];
3624		}
3625		DPRINTF(sc, WPI_DEBUG_TEMP,
3626		    "chan %d/ridx %d: power index %d\n", chan, i, idx);
3627	}
3628
3629	return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async);
3630}
3631
3632/*
3633 * Determine Tx power index for a given channel/rate combination.
3634 * This takes into account the regulatory information from EEPROM and the
3635 * current temperature.
3636 */
3637static int
3638wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group,
3639    uint8_t chan, int is_chan_5ghz, int ridx)
3640{
3641/* Fixed-point arithmetic division using a n-bit fractional part. */
3642#define fdivround(a, b, n)	\
3643	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3644
3645/* Linear interpolation. */
3646#define interpolate(x, x1, y1, x2, y2, n)	\
3647	((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3648
3649	struct wpi_power_sample *sample;
3650	int pwr, idx;
3651
3652	/* Default TX power is group maximum TX power minus 3dB. */
3653	pwr = group->maxpwr / 2;
3654
3655	/* Decrease TX power for highest OFDM rates to reduce distortion. */
3656	switch (ridx) {
3657	case WPI_RIDX_OFDM36:
3658		pwr -= is_chan_5ghz ?  5 : 0;
3659		break;
3660	case WPI_RIDX_OFDM48:
3661		pwr -= is_chan_5ghz ? 10 : 7;
3662		break;
3663	case WPI_RIDX_OFDM54:
3664		pwr -= is_chan_5ghz ? 12 : 9;
3665		break;
3666	}
3667
3668	/* Never exceed the channel maximum allowed TX power. */
3669	pwr = min(pwr, sc->maxpwr[chan]);
3670
3671	/* Retrieve TX power index into gain tables from samples. */
3672	for (sample = group->samples; sample < &group->samples[3]; sample++)
3673		if (pwr > sample[1].power)
3674			break;
3675	/* Fixed-point linear interpolation using a 19-bit fractional part. */
3676	idx = interpolate(pwr, sample[0].power, sample[0].index,
3677	    sample[1].power, sample[1].index, 19);
3678
3679	/*-
3680	 * Adjust power index based on current temperature:
3681	 * - if cooler than factory-calibrated: decrease output power
3682	 * - if warmer than factory-calibrated: increase output power
3683	 */
3684	idx -= (sc->temp - group->temp) * 11 / 100;
3685
3686	/* Decrease TX power for CCK rates (-5dB). */
3687	if (ridx >= WPI_RIDX_CCK1)
3688		idx += 10;
3689
3690	/* Make sure idx stays in a valid range. */
3691	if (idx < 0)
3692		return 0;
3693	if (idx > WPI_MAX_PWR_INDEX)
3694		return WPI_MAX_PWR_INDEX;
3695	return idx;
3696
3697#undef interpolate
3698#undef fdivround
3699}
3700
3701/*
3702 * Set STA mode power saving level (between 0 and 5).
3703 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
3704 */
3705static int
3706wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async)
3707{
3708	struct wpi_pmgt_cmd cmd;
3709	const struct wpi_pmgt *pmgt;
3710	uint32_t max, skip_dtim;
3711	uint32_t reg;
3712	int i;
3713
3714	DPRINTF(sc, WPI_DEBUG_PWRSAVE,
3715	    "%s: dtim=%d, level=%d, async=%d\n",
3716	    __func__, dtim, level, async);
3717
3718	/* Select which PS parameters to use. */
3719	if (dtim <= 10)
3720		pmgt = &wpi_pmgt[0][level];
3721	else
3722		pmgt = &wpi_pmgt[1][level];
3723
3724	memset(&cmd, 0, sizeof cmd);
3725	if (level != 0)	/* not CAM */
3726		cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP);
3727	/* Retrieve PCIe Active State Power Management (ASPM). */
3728	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
3729	if (!(reg & 0x1))	/* L0s Entry disabled. */
3730		cmd.flags |= htole16(WPI_PS_PCI_PMGT);
3731
3732	cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU);
3733	cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU);
3734
3735	if (dtim == 0) {
3736		dtim = 1;
3737		skip_dtim = 0;
3738	} else
3739		skip_dtim = pmgt->skip_dtim;
3740
3741	if (skip_dtim != 0) {
3742		cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM);
3743		max = pmgt->intval[4];
3744		if (max == (uint32_t)-1)
3745			max = dtim * (skip_dtim + 1);
3746		else if (max > dtim)
3747			max = (max / dtim) * dtim;
3748	} else
3749		max = dtim;
3750
3751	for (i = 0; i < 5; i++)
3752		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
3753
3754	return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
3755}
3756
3757static int
3758wpi_send_btcoex(struct wpi_softc *sc)
3759{
3760	struct wpi_bluetooth cmd;
3761
3762	memset(&cmd, 0, sizeof cmd);
3763	cmd.flags = WPI_BT_COEX_MODE_4WIRE;
3764	cmd.lead_time = WPI_BT_LEAD_TIME_DEF;
3765	cmd.max_kill = WPI_BT_MAX_KILL_DEF;
3766	DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
3767	    __func__);
3768	return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
3769}
3770
3771static int
3772wpi_send_rxon(struct wpi_softc *sc, int assoc, int async)
3773{
3774	int error;
3775
3776	if (async)
3777		WPI_RXON_LOCK_ASSERT(sc);
3778
3779	if (assoc && wpi_check_bss_filter(sc) != 0) {
3780		struct wpi_assoc rxon_assoc;
3781
3782		rxon_assoc.flags = sc->rxon.flags;
3783		rxon_assoc.filter = sc->rxon.filter;
3784		rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask;
3785		rxon_assoc.cck_mask = sc->rxon.cck_mask;
3786		rxon_assoc.reserved = 0;
3787
3788		error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc,
3789		    sizeof (struct wpi_assoc), async);
3790		if (error != 0) {
3791			device_printf(sc->sc_dev,
3792			    "RXON_ASSOC command failed, error %d\n", error);
3793			return error;
3794		}
3795	} else {
3796		if (async) {
3797			WPI_NT_LOCK(sc);
3798			error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon,
3799			    sizeof (struct wpi_rxon), async);
3800			if (error == 0)
3801				wpi_clear_node_table(sc);
3802			WPI_NT_UNLOCK(sc);
3803		} else {
3804			error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon,
3805			    sizeof (struct wpi_rxon), async);
3806			if (error == 0)
3807				wpi_clear_node_table(sc);
3808		}
3809
3810		if (error != 0) {
3811			device_printf(sc->sc_dev,
3812			    "RXON command failed, error %d\n", error);
3813			return error;
3814		}
3815
3816		/* Add broadcast node. */
3817		error = wpi_add_broadcast_node(sc, async);
3818		if (error != 0) {
3819			device_printf(sc->sc_dev,
3820			    "could not add broadcast node, error %d\n", error);
3821			return error;
3822		}
3823	}
3824
3825	/* Configuration has changed, set Tx power accordingly. */
3826	if ((error = wpi_set_txpower(sc, async)) != 0) {
3827		device_printf(sc->sc_dev,
3828		    "%s: could not set TX power, error %d\n", __func__, error);
3829		return error;
3830	}
3831
3832	return 0;
3833}
3834
3835/**
3836 * Configure the card to listen to a particular channel, this transisions the
3837 * card in to being able to receive frames from remote devices.
3838 */
3839static int
3840wpi_config(struct wpi_softc *sc)
3841{
3842	struct ieee80211com *ic = &sc->sc_ic;
3843	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3844	struct ieee80211_channel *c = ic->ic_curchan;
3845	int error;
3846
3847	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3848
3849	/* Set power saving level to CAM during initialization. */
3850	if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) {
3851		device_printf(sc->sc_dev,
3852		    "%s: could not set power saving level\n", __func__);
3853		return error;
3854	}
3855
3856	/* Configure bluetooth coexistence. */
3857	if ((error = wpi_send_btcoex(sc)) != 0) {
3858		device_printf(sc->sc_dev,
3859		    "could not configure bluetooth coexistence\n");
3860		return error;
3861	}
3862
3863	/* Configure adapter. */
3864	memset(&sc->rxon, 0, sizeof (struct wpi_rxon));
3865	IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr);
3866
3867	/* Set default channel. */
3868	sc->rxon.chan = ieee80211_chan2ieee(ic, c);
3869	sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
3870	if (IEEE80211_IS_CHAN_2GHZ(c))
3871		sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
3872
3873	sc->rxon.filter = WPI_FILTER_MULTICAST;
3874	switch (ic->ic_opmode) {
3875	case IEEE80211_M_STA:
3876		sc->rxon.mode = WPI_MODE_STA;
3877		break;
3878	case IEEE80211_M_IBSS:
3879		sc->rxon.mode = WPI_MODE_IBSS;
3880		sc->rxon.filter |= WPI_FILTER_BEACON;
3881		break;
3882	case IEEE80211_M_HOSTAP:
3883		/* XXX workaround for beaconing */
3884		sc->rxon.mode = WPI_MODE_IBSS;
3885		sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC;
3886		break;
3887	case IEEE80211_M_AHDEMO:
3888		sc->rxon.mode = WPI_MODE_HOSTAP;
3889		break;
3890	case IEEE80211_M_MONITOR:
3891		sc->rxon.mode = WPI_MODE_MONITOR;
3892		break;
3893	default:
3894		device_printf(sc->sc_dev, "unknown opmode %d\n",
3895		    ic->ic_opmode);
3896		return EINVAL;
3897	}
3898	sc->rxon.filter = htole32(sc->rxon.filter);
3899	wpi_set_promisc(sc);
3900	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
3901	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
3902
3903	if ((error = wpi_send_rxon(sc, 0, 0)) != 0) {
3904		device_printf(sc->sc_dev, "%s: could not send RXON\n",
3905		    __func__);
3906		return error;
3907	}
3908
3909	/* Setup rate scalling. */
3910	if ((error = wpi_mrr_setup(sc)) != 0) {
3911		device_printf(sc->sc_dev, "could not setup MRR, error %d\n",
3912		    error);
3913		return error;
3914	}
3915
3916	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3917
3918	return 0;
3919}
3920
3921static uint16_t
3922wpi_get_active_dwell_time(struct wpi_softc *sc,
3923    struct ieee80211_channel *c, uint8_t n_probes)
3924{
3925	/* No channel? Default to 2GHz settings. */
3926	if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
3927		return (WPI_ACTIVE_DWELL_TIME_2GHZ +
3928		WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
3929	}
3930
3931	/* 5GHz dwell time. */
3932	return (WPI_ACTIVE_DWELL_TIME_5GHZ +
3933	    WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
3934}
3935
3936/*
3937 * Limit the total dwell time.
3938 *
3939 * Returns the dwell time in milliseconds.
3940 */
3941static uint16_t
3942wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time)
3943{
3944	struct ieee80211com *ic = &sc->sc_ic;
3945	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3946	int bintval = 0;
3947
3948	/* bintval is in TU (1.024mS) */
3949	if (vap != NULL)
3950		bintval = vap->iv_bss->ni_intval;
3951
3952	/*
3953	 * If it's non-zero, we should calculate the minimum of
3954	 * it and the DWELL_BASE.
3955	 *
3956	 * XXX Yes, the math should take into account that bintval
3957	 * is 1.024mS, not 1mS..
3958	 */
3959	if (bintval > 0) {
3960		DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__,
3961		    bintval);
3962		return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2));
3963	}
3964
3965	/* No association context? Default. */
3966	return dwell_time;
3967}
3968
3969static uint16_t
3970wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c)
3971{
3972	uint16_t passive;
3973
3974	if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c))
3975		passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ;
3976	else
3977		passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ;
3978
3979	/* Clamp to the beacon interval if we're associated. */
3980	return (wpi_limit_dwell(sc, passive));
3981}
3982
3983static uint32_t
3984wpi_get_scan_pause_time(uint32_t time, uint16_t bintval)
3985{
3986	uint32_t mod = (time % bintval) * IEEE80211_DUR_TU;
3987	uint32_t nbeacons = time / bintval;
3988
3989	if (mod > WPI_PAUSE_MAX_TIME)
3990		mod = WPI_PAUSE_MAX_TIME;
3991
3992	return WPI_PAUSE_SCAN(nbeacons, mod);
3993}
3994
3995/*
3996 * Send a scan request to the firmware.
3997 */
3998static int
3999wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c)
4000{
4001	struct ieee80211com *ic = &sc->sc_ic;
4002	struct ieee80211_scan_state *ss = ic->ic_scan;
4003	struct ieee80211vap *vap = ss->ss_vap;
4004	struct wpi_scan_hdr *hdr;
4005	struct wpi_cmd_data *tx;
4006	struct wpi_scan_essid *essids;
4007	struct wpi_scan_chan *chan;
4008	struct ieee80211_frame *wh;
4009	struct ieee80211_rateset *rs;
4010	uint16_t dwell_active, dwell_passive;
4011	uint8_t *buf, *frm;
4012	int bgscan, bintval, buflen, error, i, nssid;
4013
4014	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4015
4016	/*
4017	 * We are absolutely not allowed to send a scan command when another
4018	 * scan command is pending.
4019	 */
4020	if (callout_pending(&sc->scan_timeout)) {
4021		device_printf(sc->sc_dev, "%s: called whilst scanning!\n",
4022		    __func__);
4023		error = EAGAIN;
4024		goto fail;
4025	}
4026
4027	bgscan = wpi_check_bss_filter(sc);
4028	bintval = vap->iv_bss->ni_intval;
4029	if (bgscan != 0 &&
4030	    bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) {
4031		error = EOPNOTSUPP;
4032		goto fail;
4033	}
4034
4035	buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4036	if (buf == NULL) {
4037		device_printf(sc->sc_dev,
4038		    "%s: could not allocate buffer for scan command\n",
4039		    __func__);
4040		error = ENOMEM;
4041		goto fail;
4042	}
4043	hdr = (struct wpi_scan_hdr *)buf;
4044
4045	/*
4046	 * Move to the next channel if no packets are received within 10 msecs
4047	 * after sending the probe request.
4048	 */
4049	hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT);
4050	hdr->quiet_threshold = htole16(1);
4051
4052	if (bgscan != 0) {
4053		/*
4054		 * Max needs to be greater than active and passive and quiet!
4055		 * It's also in microseconds!
4056		 */
4057		hdr->max_svc = htole32(250 * IEEE80211_DUR_TU);
4058		hdr->pause_svc = htole32(wpi_get_scan_pause_time(100,
4059		    bintval));
4060	}
4061
4062	hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON);
4063
4064	tx = (struct wpi_cmd_data *)(hdr + 1);
4065	tx->flags = htole32(WPI_TX_AUTO_SEQ);
4066	tx->id = WPI_ID_BROADCAST;
4067	tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
4068
4069	if (IEEE80211_IS_CHAN_5GHZ(c)) {
4070		/* Send probe requests at 6Mbps. */
4071		tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6];
4072		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4073	} else {
4074		hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO);
4075		/* Send probe requests at 1Mbps. */
4076		tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1];
4077		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4078	}
4079
4080	essids = (struct wpi_scan_essid *)(tx + 1);
4081	nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS);
4082	for (i = 0; i < nssid; i++) {
4083		essids[i].id = IEEE80211_ELEMID_SSID;
4084		essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
4085		memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len);
4086#ifdef WPI_DEBUG
4087		if (sc->sc_debug & WPI_DEBUG_SCAN) {
4088			printf("Scanning Essid: ");
4089			ieee80211_print_essid(essids[i].data, essids[i].len);
4090			printf("\n");
4091		}
4092#endif
4093	}
4094
4095	/*
4096	 * Build a probe request frame.  Most of the following code is a
4097	 * copy & paste of what is done in net80211.
4098	 */
4099	wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS);
4100	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4101		IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4102	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4103	IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr);
4104	IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
4105	IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr);
4106
4107	frm = (uint8_t *)(wh + 1);
4108	frm = ieee80211_add_ssid(frm, NULL, 0);
4109	frm = ieee80211_add_rates(frm, rs);
4110	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4111		frm = ieee80211_add_xrates(frm, rs);
4112
4113	/* Set length of probe request. */
4114	tx->len = htole16(frm - (uint8_t *)wh);
4115
4116	/*
4117	 * Construct information about the channel that we
4118	 * want to scan. The firmware expects this to be directly
4119	 * after the scan probe request
4120	 */
4121	chan = (struct wpi_scan_chan *)frm;
4122	chan->chan = htole16(ieee80211_chan2ieee(ic, c));
4123	chan->flags = 0;
4124	if (nssid) {
4125		hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT;
4126		chan->flags |= WPI_CHAN_NPBREQS(nssid);
4127	} else
4128		hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER;
4129
4130	if (!IEEE80211_IS_CHAN_PASSIVE(c))
4131		chan->flags |= WPI_CHAN_ACTIVE;
4132
4133	/*
4134	 * Calculate the active/passive dwell times.
4135	 */
4136	dwell_active = wpi_get_active_dwell_time(sc, c, nssid);
4137	dwell_passive = wpi_get_passive_dwell_time(sc, c);
4138
4139	/* Make sure they're valid. */
4140	if (dwell_active > dwell_passive)
4141		dwell_active = dwell_passive;
4142
4143	chan->active = htole16(dwell_active);
4144	chan->passive = htole16(dwell_passive);
4145
4146	chan->dsp_gain = 0x6e;  /* Default level */
4147
4148	if (IEEE80211_IS_CHAN_5GHZ(c))
4149		chan->rf_gain = 0x3b;
4150	else
4151		chan->rf_gain = 0x28;
4152
4153	DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n",
4154	    chan->chan, IEEE80211_IS_CHAN_PASSIVE(c));
4155
4156	hdr->nchan++;
4157
4158	if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) {
4159		/* XXX Force probe request transmission. */
4160		memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan));
4161
4162		chan++;
4163
4164		/* Reduce unnecessary delay. */
4165		chan->flags = 0;
4166		chan->passive = chan->active = hdr->quiet_time;
4167
4168		hdr->nchan++;
4169	}
4170
4171	chan++;
4172
4173	buflen = (uint8_t *)chan - buf;
4174	hdr->len = htole16(buflen);
4175
4176	DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n",
4177	    hdr->nchan);
4178	error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1);
4179	free(buf, M_DEVBUF);
4180
4181	if (error != 0)
4182		goto fail;
4183
4184	callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc);
4185
4186	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4187
4188	return 0;
4189
4190fail:	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
4191
4192	return error;
4193}
4194
4195static int
4196wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap)
4197{
4198	struct ieee80211com *ic = vap->iv_ic;
4199	struct ieee80211_node *ni = vap->iv_bss;
4200	struct ieee80211_channel *c = ni->ni_chan;
4201	int error;
4202
4203	WPI_RXON_LOCK(sc);
4204
4205	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4206
4207	/* Update adapter configuration. */
4208	sc->rxon.associd = 0;
4209	sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
4210	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4211	sc->rxon.chan = ieee80211_chan2ieee(ic, c);
4212	sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
4213	if (IEEE80211_IS_CHAN_2GHZ(c))
4214		sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
4215	if (ic->ic_flags & IEEE80211_F_SHSLOT)
4216		sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
4217	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4218		sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
4219	if (IEEE80211_IS_CHAN_A(c)) {
4220		sc->rxon.cck_mask  = 0;
4221		sc->rxon.ofdm_mask = 0x15;
4222	} else if (IEEE80211_IS_CHAN_B(c)) {
4223		sc->rxon.cck_mask  = 0x03;
4224		sc->rxon.ofdm_mask = 0;
4225	} else {
4226		/* Assume 802.11b/g. */
4227		sc->rxon.cck_mask  = 0x0f;
4228		sc->rxon.ofdm_mask = 0x15;
4229	}
4230
4231	DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
4232	    sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask,
4233	    sc->rxon.ofdm_mask);
4234
4235	if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
4236		device_printf(sc->sc_dev, "%s: could not send RXON\n",
4237		    __func__);
4238	}
4239
4240	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4241
4242	WPI_RXON_UNLOCK(sc);
4243
4244	return error;
4245}
4246
4247static int
4248wpi_config_beacon(struct wpi_vap *wvp)
4249{
4250	struct ieee80211vap *vap = &wvp->wv_vap;
4251	struct ieee80211com *ic = vap->iv_ic;
4252	struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
4253	struct wpi_buf *bcn = &wvp->wv_bcbuf;
4254	struct wpi_softc *sc = ic->ic_softc;
4255	struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data;
4256	struct ieee80211_tim_ie *tie;
4257	struct mbuf *m;
4258	uint8_t *ptr;
4259	int error;
4260
4261	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4262
4263	WPI_VAP_LOCK_ASSERT(wvp);
4264
4265	cmd->len = htole16(bcn->m->m_pkthdr.len);
4266	cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
4267	    wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
4268
4269	/* XXX seems to be unused */
4270	if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) {
4271		tie = (struct ieee80211_tim_ie *) bo->bo_tim;
4272		ptr = mtod(bcn->m, uint8_t *);
4273
4274		cmd->tim = htole16(bo->bo_tim - ptr);
4275		cmd->timsz = tie->tim_len;
4276	}
4277
4278	/* Necessary for recursion in ieee80211_beacon_update(). */
4279	m = bcn->m;
4280	bcn->m = m_dup(m, M_NOWAIT);
4281	if (bcn->m == NULL) {
4282		device_printf(sc->sc_dev,
4283		    "%s: could not copy beacon frame\n", __func__);
4284		error = ENOMEM;
4285		goto end;
4286	}
4287
4288	if ((error = wpi_cmd2(sc, bcn)) != 0) {
4289		device_printf(sc->sc_dev,
4290		    "%s: could not update beacon frame, error %d", __func__,
4291		    error);
4292	}
4293
4294	/* Restore mbuf. */
4295end:	bcn->m = m;
4296
4297	return error;
4298}
4299
4300static int
4301wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni)
4302{
4303	struct ieee80211vap *vap = ni->ni_vap;
4304	struct wpi_vap *wvp = WPI_VAP(vap);
4305	struct wpi_buf *bcn = &wvp->wv_bcbuf;
4306	struct mbuf *m;
4307	int error;
4308
4309	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4310
4311	if (ni->ni_chan == IEEE80211_CHAN_ANYC)
4312		return EINVAL;
4313
4314	m = ieee80211_beacon_alloc(ni);
4315	if (m == NULL) {
4316		device_printf(sc->sc_dev,
4317		    "%s: could not allocate beacon frame\n", __func__);
4318		return ENOMEM;
4319	}
4320
4321	WPI_VAP_LOCK(wvp);
4322	if (bcn->m != NULL)
4323		m_freem(bcn->m);
4324
4325	bcn->m = m;
4326
4327	error = wpi_config_beacon(wvp);
4328	WPI_VAP_UNLOCK(wvp);
4329
4330	return error;
4331}
4332
4333static void
4334wpi_update_beacon(struct ieee80211vap *vap, int item)
4335{
4336	struct wpi_softc *sc = vap->iv_ic->ic_softc;
4337	struct wpi_vap *wvp = WPI_VAP(vap);
4338	struct wpi_buf *bcn = &wvp->wv_bcbuf;
4339	struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
4340	struct ieee80211_node *ni = vap->iv_bss;
4341	int mcast = 0;
4342
4343	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4344
4345	WPI_VAP_LOCK(wvp);
4346	if (bcn->m == NULL) {
4347		bcn->m = ieee80211_beacon_alloc(ni);
4348		if (bcn->m == NULL) {
4349			device_printf(sc->sc_dev,
4350			    "%s: could not allocate beacon frame\n", __func__);
4351
4352			DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR,
4353			    __func__);
4354
4355			WPI_VAP_UNLOCK(wvp);
4356			return;
4357		}
4358	}
4359	WPI_VAP_UNLOCK(wvp);
4360
4361	if (item == IEEE80211_BEACON_TIM)
4362		mcast = 1;	/* TODO */
4363
4364	setbit(bo->bo_flags, item);
4365	ieee80211_beacon_update(ni, bcn->m, mcast);
4366
4367	WPI_VAP_LOCK(wvp);
4368	wpi_config_beacon(wvp);
4369	WPI_VAP_UNLOCK(wvp);
4370
4371	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4372}
4373
4374static void
4375wpi_newassoc(struct ieee80211_node *ni, int isnew)
4376{
4377	struct ieee80211vap *vap = ni->ni_vap;
4378	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4379	struct wpi_node *wn = WPI_NODE(ni);
4380	int error;
4381
4382	WPI_NT_LOCK(sc);
4383
4384	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4385
4386	if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) {
4387		if ((error = wpi_add_ibss_node(sc, ni)) != 0) {
4388			device_printf(sc->sc_dev,
4389			    "%s: could not add IBSS node, error %d\n",
4390			    __func__, error);
4391		}
4392	}
4393	WPI_NT_UNLOCK(sc);
4394}
4395
4396static int
4397wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap)
4398{
4399	struct ieee80211com *ic = vap->iv_ic;
4400	struct ieee80211_node *ni = vap->iv_bss;
4401	struct ieee80211_channel *c = ni->ni_chan;
4402	int error;
4403
4404	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4405
4406	if (vap->iv_opmode == IEEE80211_M_MONITOR) {
4407		/* Link LED blinks while monitoring. */
4408		wpi_set_led(sc, WPI_LED_LINK, 5, 5);
4409		return 0;
4410	}
4411
4412	/* XXX kernel panic workaround */
4413	if (c == IEEE80211_CHAN_ANYC) {
4414		device_printf(sc->sc_dev, "%s: incomplete configuration\n",
4415		    __func__);
4416		return EINVAL;
4417	}
4418
4419	if ((error = wpi_set_timing(sc, ni)) != 0) {
4420		device_printf(sc->sc_dev,
4421		    "%s: could not set timing, error %d\n", __func__, error);
4422		return error;
4423	}
4424
4425	/* Update adapter configuration. */
4426	WPI_RXON_LOCK(sc);
4427	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4428	sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni));
4429	sc->rxon.chan = ieee80211_chan2ieee(ic, c);
4430	sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
4431	if (IEEE80211_IS_CHAN_2GHZ(c))
4432		sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
4433	if (ic->ic_flags & IEEE80211_F_SHSLOT)
4434		sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
4435	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4436		sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
4437	if (IEEE80211_IS_CHAN_A(c)) {
4438		sc->rxon.cck_mask  = 0;
4439		sc->rxon.ofdm_mask = 0x15;
4440	} else if (IEEE80211_IS_CHAN_B(c)) {
4441		sc->rxon.cck_mask  = 0x03;
4442		sc->rxon.ofdm_mask = 0;
4443	} else {
4444		/* Assume 802.11b/g. */
4445		sc->rxon.cck_mask  = 0x0f;
4446		sc->rxon.ofdm_mask = 0x15;
4447	}
4448	sc->rxon.filter |= htole32(WPI_FILTER_BSS);
4449
4450	DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n",
4451	    sc->rxon.chan, sc->rxon.flags);
4452
4453	if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
4454		device_printf(sc->sc_dev, "%s: could not send RXON\n",
4455		    __func__);
4456		return error;
4457	}
4458
4459	/* Start periodic calibration timer. */
4460	callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
4461
4462	WPI_RXON_UNLOCK(sc);
4463
4464	if (vap->iv_opmode == IEEE80211_M_IBSS ||
4465	    vap->iv_opmode == IEEE80211_M_HOSTAP) {
4466		if ((error = wpi_setup_beacon(sc, ni)) != 0) {
4467			device_printf(sc->sc_dev,
4468			    "%s: could not setup beacon, error %d\n", __func__,
4469			    error);
4470			return error;
4471		}
4472	}
4473
4474	if (vap->iv_opmode == IEEE80211_M_STA) {
4475		/* Add BSS node. */
4476		WPI_NT_LOCK(sc);
4477		error = wpi_add_sta_node(sc, ni);
4478		WPI_NT_UNLOCK(sc);
4479		if (error != 0) {
4480			device_printf(sc->sc_dev,
4481			    "%s: could not add BSS node, error %d\n", __func__,
4482			    error);
4483			return error;
4484		}
4485	}
4486
4487	/* Link LED always on while associated. */
4488	wpi_set_led(sc, WPI_LED_LINK, 0, 1);
4489
4490	/* Enable power-saving mode if requested by user. */
4491	if ((vap->iv_flags & IEEE80211_F_PMGTON) &&
4492	    vap->iv_opmode != IEEE80211_M_IBSS)
4493		(void)wpi_set_pslevel(sc, 0, 3, 1);
4494
4495	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4496
4497	return 0;
4498}
4499
4500static int
4501wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k)
4502{
4503	const struct ieee80211_cipher *cip = k->wk_cipher;
4504	struct ieee80211vap *vap = ni->ni_vap;
4505	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4506	struct wpi_node *wn = WPI_NODE(ni);
4507	struct wpi_node_info node;
4508	uint16_t kflags;
4509	int error;
4510
4511	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4512
4513	if (wpi_check_node_entry(sc, wn->id) == 0) {
4514		device_printf(sc->sc_dev, "%s: node does not exist\n",
4515		    __func__);
4516		return 0;
4517	}
4518
4519	switch (cip->ic_cipher) {
4520	case IEEE80211_CIPHER_AES_CCM:
4521		kflags = WPI_KFLAG_CCMP;
4522		break;
4523
4524	default:
4525		device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__,
4526		    cip->ic_cipher);
4527		return 0;
4528	}
4529
4530	kflags |= WPI_KFLAG_KID(k->wk_keyix);
4531	if (k->wk_flags & IEEE80211_KEY_GROUP)
4532		kflags |= WPI_KFLAG_MULTICAST;
4533
4534	memset(&node, 0, sizeof node);
4535	node.id = wn->id;
4536	node.control = WPI_NODE_UPDATE;
4537	node.flags = WPI_FLAG_KEY_SET;
4538	node.kflags = htole16(kflags);
4539	memcpy(node.key, k->wk_key, k->wk_keylen);
4540again:
4541	DPRINTF(sc, WPI_DEBUG_KEY,
4542	    "%s: setting %s key id %d for node %d (%s)\n", __func__,
4543	    (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix,
4544	    node.id, ether_sprintf(ni->ni_macaddr));
4545
4546	error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
4547	if (error != 0) {
4548		device_printf(sc->sc_dev, "can't update node info, error %d\n",
4549		    error);
4550		return !error;
4551	}
4552
4553	if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
4554	    k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
4555		kflags |= WPI_KFLAG_MULTICAST;
4556		node.kflags = htole16(kflags);
4557
4558		goto again;
4559	}
4560
4561	return 1;
4562}
4563
4564static void
4565wpi_load_key_cb(void *arg, struct ieee80211_node *ni)
4566{
4567	const struct ieee80211_key *k = arg;
4568	struct ieee80211vap *vap = ni->ni_vap;
4569	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4570	struct wpi_node *wn = WPI_NODE(ni);
4571	int error;
4572
4573	if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED)
4574		return;
4575
4576	WPI_NT_LOCK(sc);
4577	error = wpi_load_key(ni, k);
4578	WPI_NT_UNLOCK(sc);
4579
4580	if (error == 0) {
4581		device_printf(sc->sc_dev, "%s: error while setting key\n",
4582		    __func__);
4583	}
4584}
4585
4586static int
4587wpi_set_global_keys(struct ieee80211_node *ni)
4588{
4589	struct ieee80211vap *vap = ni->ni_vap;
4590	struct ieee80211_key *wk = &vap->iv_nw_keys[0];
4591	int error = 1;
4592
4593	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++)
4594		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4595			error = wpi_load_key(ni, wk);
4596
4597	return !error;
4598}
4599
4600static int
4601wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k)
4602{
4603	struct ieee80211vap *vap = ni->ni_vap;
4604	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4605	struct wpi_node *wn = WPI_NODE(ni);
4606	struct wpi_node_info node;
4607	uint16_t kflags;
4608	int error;
4609
4610	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4611
4612	if (wpi_check_node_entry(sc, wn->id) == 0) {
4613		DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__);
4614		return 1;	/* Nothing to do. */
4615	}
4616
4617	kflags = WPI_KFLAG_KID(k->wk_keyix);
4618	if (k->wk_flags & IEEE80211_KEY_GROUP)
4619		kflags |= WPI_KFLAG_MULTICAST;
4620
4621	memset(&node, 0, sizeof node);
4622	node.id = wn->id;
4623	node.control = WPI_NODE_UPDATE;
4624	node.flags = WPI_FLAG_KEY_SET;
4625	node.kflags = htole16(kflags);
4626again:
4627	DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n",
4628	    __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast",
4629	    k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr));
4630
4631	error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
4632	if (error != 0) {
4633		device_printf(sc->sc_dev, "can't update node info, error %d\n",
4634		    error);
4635		return !error;
4636	}
4637
4638	if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
4639	    k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
4640		kflags |= WPI_KFLAG_MULTICAST;
4641		node.kflags = htole16(kflags);
4642
4643		goto again;
4644	}
4645
4646	return 1;
4647}
4648
4649static void
4650wpi_del_key_cb(void *arg, struct ieee80211_node *ni)
4651{
4652	const struct ieee80211_key *k = arg;
4653	struct ieee80211vap *vap = ni->ni_vap;
4654	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4655	struct wpi_node *wn = WPI_NODE(ni);
4656	int error;
4657
4658	if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED)
4659		return;
4660
4661	WPI_NT_LOCK(sc);
4662	error = wpi_del_key(ni, k);
4663	WPI_NT_UNLOCK(sc);
4664
4665	if (error == 0) {
4666		device_printf(sc->sc_dev, "%s: error while deleting key\n",
4667		    __func__);
4668	}
4669}
4670
4671static int
4672wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k,
4673    int set)
4674{
4675	struct ieee80211com *ic = vap->iv_ic;
4676	struct wpi_softc *sc = ic->ic_softc;
4677	struct wpi_vap *wvp = WPI_VAP(vap);
4678	struct ieee80211_node *ni;
4679	int error, ni_ref = 0;
4680
4681	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4682
4683	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
4684		/* Not for us. */
4685		return 1;
4686	}
4687
4688	if (!(k->wk_flags & IEEE80211_KEY_RECV)) {
4689		/* XMIT keys are handled in wpi_tx_data(). */
4690		return 1;
4691	}
4692
4693	/* Handle group keys. */
4694	if (&vap->iv_nw_keys[0] <= k &&
4695	    k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
4696		WPI_NT_LOCK(sc);
4697		if (set)
4698			wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix);
4699		else
4700			wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix);
4701		WPI_NT_UNLOCK(sc);
4702
4703		if (vap->iv_state == IEEE80211_S_RUN) {
4704			ieee80211_iterate_nodes(&ic->ic_sta,
4705			    set ? wpi_load_key_cb : wpi_del_key_cb,
4706			    __DECONST(void *, k));
4707		}
4708
4709		return 1;
4710	}
4711
4712	switch (vap->iv_opmode) {
4713	case IEEE80211_M_STA:
4714		ni = vap->iv_bss;
4715		break;
4716
4717	case IEEE80211_M_IBSS:
4718	case IEEE80211_M_AHDEMO:
4719	case IEEE80211_M_HOSTAP:
4720		ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr);
4721		if (ni == NULL)
4722			return 0;	/* should not happen */
4723
4724		ni_ref = 1;
4725		break;
4726
4727	default:
4728		device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__,
4729		    vap->iv_opmode);
4730		return 0;
4731	}
4732
4733	WPI_NT_LOCK(sc);
4734	if (set)
4735		error = wpi_load_key(ni, k);
4736	else
4737		error = wpi_del_key(ni, k);
4738	WPI_NT_UNLOCK(sc);
4739
4740	if (ni_ref)
4741		ieee80211_node_decref(ni);
4742
4743	return error;
4744}
4745
4746static int
4747wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
4748{
4749	return wpi_process_key(vap, k, 1);
4750}
4751
4752static int
4753wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
4754{
4755	return wpi_process_key(vap, k, 0);
4756}
4757
4758/*
4759 * This function is called after the runtime firmware notifies us of its
4760 * readiness (called in a process context).
4761 */
4762static int
4763wpi_post_alive(struct wpi_softc *sc)
4764{
4765	int ntries, error;
4766
4767	/* Check (again) that the radio is not disabled. */
4768	if ((error = wpi_nic_lock(sc)) != 0)
4769		return error;
4770
4771	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4772
4773	/* NB: Runtime firmware must be up and running. */
4774	if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) {
4775		device_printf(sc->sc_dev,
4776		    "RF switch: radio disabled (%s)\n", __func__);
4777		wpi_nic_unlock(sc);
4778		return EPERM;   /* :-) */
4779	}
4780	wpi_nic_unlock(sc);
4781
4782	/* Wait for thermal sensor to calibrate. */
4783	for (ntries = 0; ntries < 1000; ntries++) {
4784		if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0)
4785			break;
4786		DELAY(10);
4787	}
4788
4789	if (ntries == 1000) {
4790		device_printf(sc->sc_dev,
4791		    "timeout waiting for thermal sensor calibration\n");
4792		return ETIMEDOUT;
4793	}
4794
4795	DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp);
4796	return 0;
4797}
4798
4799/*
4800 * The firmware boot code is small and is intended to be copied directly into
4801 * the NIC internal memory (no DMA transfer).
4802 */
4803static int
4804wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size)
4805{
4806	int error, ntries;
4807
4808	DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size);
4809
4810	size /= sizeof (uint32_t);
4811
4812	if ((error = wpi_nic_lock(sc)) != 0)
4813		return error;
4814
4815	/* Copy microcode image into NIC memory. */
4816	wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE,
4817	    (const uint32_t *)ucode, size);
4818
4819	wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0);
4820	wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE);
4821	wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size);
4822
4823	/* Start boot load now. */
4824	wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START);
4825
4826	/* Wait for transfer to complete. */
4827	for (ntries = 0; ntries < 1000; ntries++) {
4828		uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS);
4829		DPRINTF(sc, WPI_DEBUG_HW,
4830		    "firmware status=0x%x, val=0x%x, result=0x%x\n", status,
4831		    WPI_FH_TX_STATUS_IDLE(6),
4832		    status & WPI_FH_TX_STATUS_IDLE(6));
4833		if (status & WPI_FH_TX_STATUS_IDLE(6)) {
4834			DPRINTF(sc, WPI_DEBUG_HW,
4835			    "Status Match! - ntries = %d\n", ntries);
4836			break;
4837		}
4838		DELAY(10);
4839	}
4840	if (ntries == 1000) {
4841		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
4842		    __func__);
4843		wpi_nic_unlock(sc);
4844		return ETIMEDOUT;
4845	}
4846
4847	/* Enable boot after power up. */
4848	wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN);
4849
4850	wpi_nic_unlock(sc);
4851	return 0;
4852}
4853
4854static int
4855wpi_load_firmware(struct wpi_softc *sc)
4856{
4857	struct wpi_fw_info *fw = &sc->fw;
4858	struct wpi_dma_info *dma = &sc->fw_dma;
4859	int error;
4860
4861	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4862
4863	/* Copy initialization sections into pre-allocated DMA-safe memory. */
4864	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
4865	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
4866	memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz);
4867	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
4868
4869	/* Tell adapter where to find initialization sections. */
4870	if ((error = wpi_nic_lock(sc)) != 0)
4871		return error;
4872	wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
4873	wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz);
4874	wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
4875	    dma->paddr + WPI_FW_DATA_MAXSZ);
4876	wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
4877	wpi_nic_unlock(sc);
4878
4879	/* Load firmware boot code. */
4880	error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
4881	if (error != 0) {
4882		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
4883		    __func__);
4884		return error;
4885	}
4886
4887	/* Now press "execute". */
4888	WPI_WRITE(sc, WPI_RESET, 0);
4889
4890	/* Wait at most one second for first alive notification. */
4891	if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
4892		device_printf(sc->sc_dev,
4893		    "%s: timeout waiting for adapter to initialize, error %d\n",
4894		    __func__, error);
4895		return error;
4896	}
4897
4898	/* Copy runtime sections into pre-allocated DMA-safe memory. */
4899	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
4900	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
4901	memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz);
4902	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
4903
4904	/* Tell adapter where to find runtime sections. */
4905	if ((error = wpi_nic_lock(sc)) != 0)
4906		return error;
4907	wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
4908	wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz);
4909	wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
4910	    dma->paddr + WPI_FW_DATA_MAXSZ);
4911	wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE,
4912	    WPI_FW_UPDATED | fw->main.textsz);
4913	wpi_nic_unlock(sc);
4914
4915	return 0;
4916}
4917
4918static int
4919wpi_read_firmware(struct wpi_softc *sc)
4920{
4921	const struct firmware *fp;
4922	struct wpi_fw_info *fw = &sc->fw;
4923	const struct wpi_firmware_hdr *hdr;
4924	int error;
4925
4926	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4927
4928	DPRINTF(sc, WPI_DEBUG_FIRMWARE,
4929	    "Attempting Loading Firmware from %s module\n", WPI_FW_NAME);
4930
4931	WPI_UNLOCK(sc);
4932	fp = firmware_get(WPI_FW_NAME);
4933	WPI_LOCK(sc);
4934
4935	if (fp == NULL) {
4936		device_printf(sc->sc_dev,
4937		    "could not load firmware image '%s'\n", WPI_FW_NAME);
4938		return EINVAL;
4939	}
4940
4941	sc->fw_fp = fp;
4942
4943	if (fp->datasize < sizeof (struct wpi_firmware_hdr)) {
4944		device_printf(sc->sc_dev,
4945		    "firmware file too short: %zu bytes\n", fp->datasize);
4946		error = EINVAL;
4947		goto fail;
4948	}
4949
4950	fw->size = fp->datasize;
4951	fw->data = (const uint8_t *)fp->data;
4952
4953	/* Extract firmware header information. */
4954	hdr = (const struct wpi_firmware_hdr *)fw->data;
4955
4956	/*     |  RUNTIME FIRMWARE   |    INIT FIRMWARE    | BOOT FW  |
4957	   |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */
4958
4959	fw->main.textsz = le32toh(hdr->rtextsz);
4960	fw->main.datasz = le32toh(hdr->rdatasz);
4961	fw->init.textsz = le32toh(hdr->itextsz);
4962	fw->init.datasz = le32toh(hdr->idatasz);
4963	fw->boot.textsz = le32toh(hdr->btextsz);
4964	fw->boot.datasz = 0;
4965
4966	/* Sanity-check firmware header. */
4967	if (fw->main.textsz > WPI_FW_TEXT_MAXSZ ||
4968	    fw->main.datasz > WPI_FW_DATA_MAXSZ ||
4969	    fw->init.textsz > WPI_FW_TEXT_MAXSZ ||
4970	    fw->init.datasz > WPI_FW_DATA_MAXSZ ||
4971	    fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ ||
4972	    (fw->boot.textsz & 3) != 0) {
4973		device_printf(sc->sc_dev, "invalid firmware header\n");
4974		error = EINVAL;
4975		goto fail;
4976	}
4977
4978	/* Check that all firmware sections fit. */
4979	if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz +
4980	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
4981		device_printf(sc->sc_dev,
4982		    "firmware file too short: %zu bytes\n", fw->size);
4983		error = EINVAL;
4984		goto fail;
4985	}
4986
4987	/* Get pointers to firmware sections. */
4988	fw->main.text = (const uint8_t *)(hdr + 1);
4989	fw->main.data = fw->main.text + fw->main.textsz;
4990	fw->init.text = fw->main.data + fw->main.datasz;
4991	fw->init.data = fw->init.text + fw->init.textsz;
4992	fw->boot.text = fw->init.data + fw->init.datasz;
4993
4994	DPRINTF(sc, WPI_DEBUG_FIRMWARE,
4995	    "Firmware Version: Major %d, Minor %d, Driver %d, \n"
4996	    "runtime (text: %u, data: %u) init (text: %u, data %u) "
4997	    "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver),
4998	    fw->main.textsz, fw->main.datasz,
4999	    fw->init.textsz, fw->init.datasz, fw->boot.textsz);
5000
5001	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text);
5002	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data);
5003	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text);
5004	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data);
5005	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text);
5006
5007	return 0;
5008
5009fail:	wpi_unload_firmware(sc);
5010	return error;
5011}
5012
5013/**
5014 * Free the referenced firmware image
5015 */
5016static void
5017wpi_unload_firmware(struct wpi_softc *sc)
5018{
5019	if (sc->fw_fp != NULL) {
5020		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
5021		sc->fw_fp = NULL;
5022	}
5023}
5024
5025static int
5026wpi_clock_wait(struct wpi_softc *sc)
5027{
5028	int ntries;
5029
5030	/* Set "initialization complete" bit. */
5031	WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
5032
5033	/* Wait for clock stabilization. */
5034	for (ntries = 0; ntries < 2500; ntries++) {
5035		if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY)
5036			return 0;
5037		DELAY(100);
5038	}
5039	device_printf(sc->sc_dev,
5040	    "%s: timeout waiting for clock stabilization\n", __func__);
5041
5042	return ETIMEDOUT;
5043}
5044
5045static int
5046wpi_apm_init(struct wpi_softc *sc)
5047{
5048	uint32_t reg;
5049	int error;
5050
5051	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5052
5053	/* Disable L0s exit timer (NMI bug workaround). */
5054	WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER);
5055	/* Don't wait for ICH L0s (ICH bug workaround). */
5056	WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX);
5057
5058	/* Set FH wait threshold to max (HW bug under stress workaround). */
5059	WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000);
5060
5061	/* Retrieve PCIe Active State Power Management (ASPM). */
5062	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
5063	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
5064	if (reg & 0x02)	/* L1 Entry enabled. */
5065		WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
5066	else
5067		WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
5068
5069	WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT);
5070
5071	/* Wait for clock stabilization before accessing prph. */
5072	if ((error = wpi_clock_wait(sc)) != 0)
5073		return error;
5074
5075	if ((error = wpi_nic_lock(sc)) != 0)
5076		return error;
5077	/* Cleanup. */
5078	wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400);
5079	wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200);
5080
5081	/* Enable DMA and BSM (Bootstrap State Machine). */
5082	wpi_prph_write(sc, WPI_APMG_CLK_EN,
5083	    WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT);
5084	DELAY(20);
5085	/* Disable L1-Active. */
5086	wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS);
5087	wpi_nic_unlock(sc);
5088
5089	return 0;
5090}
5091
5092static void
5093wpi_apm_stop_master(struct wpi_softc *sc)
5094{
5095	int ntries;
5096
5097	/* Stop busmaster DMA activity. */
5098	WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER);
5099
5100	if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) ==
5101	    WPI_GP_CNTRL_MAC_PS)
5102		return; /* Already asleep. */
5103
5104	for (ntries = 0; ntries < 100; ntries++) {
5105		if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED)
5106			return;
5107		DELAY(10);
5108	}
5109	device_printf(sc->sc_dev, "%s: timeout waiting for master\n",
5110	    __func__);
5111}
5112
5113static void
5114wpi_apm_stop(struct wpi_softc *sc)
5115{
5116	wpi_apm_stop_master(sc);
5117
5118	/* Reset the entire device. */
5119	WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW);
5120	DELAY(10);
5121	/* Clear "initialization complete" bit. */
5122	WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
5123}
5124
5125static void
5126wpi_nic_config(struct wpi_softc *sc)
5127{
5128	uint32_t rev;
5129
5130	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5131
5132	/* voodoo from the Linux "driver".. */
5133	rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1);
5134	if ((rev & 0xc0) == 0x40)
5135		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB);
5136	else if (!(rev & 0x80))
5137		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM);
5138
5139	if (sc->cap == 0x80)
5140		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC);
5141
5142	if ((sc->rev & 0xf0) == 0xd0)
5143		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
5144	else
5145		WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
5146
5147	if (sc->type > 1)
5148		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B);
5149}
5150
5151static int
5152wpi_hw_init(struct wpi_softc *sc)
5153{
5154	int chnl, ntries, error;
5155
5156	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
5157
5158	/* Clear pending interrupts. */
5159	WPI_WRITE(sc, WPI_INT, 0xffffffff);
5160
5161	if ((error = wpi_apm_init(sc)) != 0) {
5162		device_printf(sc->sc_dev,
5163		    "%s: could not power ON adapter, error %d\n", __func__,
5164		    error);
5165		return error;
5166	}
5167
5168	/* Select VMAIN power source. */
5169	if ((error = wpi_nic_lock(sc)) != 0)
5170		return error;
5171	wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK);
5172	wpi_nic_unlock(sc);
5173	/* Spin until VMAIN gets selected. */
5174	for (ntries = 0; ntries < 5000; ntries++) {
5175		if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN)
5176			break;
5177		DELAY(10);
5178	}
5179	if (ntries == 5000) {
5180		device_printf(sc->sc_dev, "timeout selecting power source\n");
5181		return ETIMEDOUT;
5182	}
5183
5184	/* Perform adapter initialization. */
5185	wpi_nic_config(sc);
5186
5187	/* Initialize RX ring. */
5188	if ((error = wpi_nic_lock(sc)) != 0)
5189		return error;
5190	/* Set physical address of RX ring. */
5191	WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr);
5192	/* Set physical address of RX read pointer. */
5193	WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr +
5194	    offsetof(struct wpi_shared, next));
5195	WPI_WRITE(sc, WPI_FH_RX_WPTR, 0);
5196	/* Enable RX. */
5197	WPI_WRITE(sc, WPI_FH_RX_CONFIG,
5198	    WPI_FH_RX_CONFIG_DMA_ENA |
5199	    WPI_FH_RX_CONFIG_RDRBD_ENA |
5200	    WPI_FH_RX_CONFIG_WRSTATUS_ENA |
5201	    WPI_FH_RX_CONFIG_MAXFRAG |
5202	    WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) |
5203	    WPI_FH_RX_CONFIG_IRQ_DST_HOST |
5204	    WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1));
5205	(void)WPI_READ(sc, WPI_FH_RSSR_TBL);	/* barrier */
5206	wpi_nic_unlock(sc);
5207	WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7);
5208
5209	/* Initialize TX rings. */
5210	if ((error = wpi_nic_lock(sc)) != 0)
5211		return error;
5212	wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2);	/* bypass mode */
5213	wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1);	/* enable RA0 */
5214	/* Enable all 6 TX rings. */
5215	wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f);
5216	wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000);
5217	wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002);
5218	wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4);
5219	wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5);
5220	/* Set physical address of TX rings. */
5221	WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr);
5222	WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5);
5223
5224	/* Enable all DMA channels. */
5225	for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
5226		WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0);
5227		WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0);
5228		WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008);
5229	}
5230	wpi_nic_unlock(sc);
5231	(void)WPI_READ(sc, WPI_FH_TX_BASE);	/* barrier */
5232
5233	/* Clear "radio off" and "commands blocked" bits. */
5234	WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
5235	WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED);
5236
5237	/* Clear pending interrupts. */
5238	WPI_WRITE(sc, WPI_INT, 0xffffffff);
5239	/* Enable interrupts. */
5240	WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
5241
5242	/* _Really_ make sure "radio off" bit is cleared! */
5243	WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
5244	WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
5245
5246	if ((error = wpi_load_firmware(sc)) != 0) {
5247		device_printf(sc->sc_dev,
5248		    "%s: could not load firmware, error %d\n", __func__,
5249		    error);
5250		return error;
5251	}
5252	/* Wait at most one second for firmware alive notification. */
5253	if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
5254		device_printf(sc->sc_dev,
5255		    "%s: timeout waiting for adapter to initialize, error %d\n",
5256		    __func__, error);
5257		return error;
5258	}
5259
5260	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
5261
5262	/* Do post-firmware initialization. */
5263	return wpi_post_alive(sc);
5264}
5265
5266static void
5267wpi_hw_stop(struct wpi_softc *sc)
5268{
5269	int chnl, qid, ntries;
5270
5271	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5272
5273	if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP)
5274		wpi_nic_lock(sc);
5275
5276	WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO);
5277
5278	/* Disable interrupts. */
5279	WPI_WRITE(sc, WPI_INT_MASK, 0);
5280	WPI_WRITE(sc, WPI_INT, 0xffffffff);
5281	WPI_WRITE(sc, WPI_FH_INT, 0xffffffff);
5282
5283	/* Make sure we no longer hold the NIC lock. */
5284	wpi_nic_unlock(sc);
5285
5286	if (wpi_nic_lock(sc) == 0) {
5287		/* Stop TX scheduler. */
5288		wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0);
5289		wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0);
5290
5291		/* Stop all DMA channels. */
5292		for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
5293			WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0);
5294			for (ntries = 0; ntries < 200; ntries++) {
5295				if (WPI_READ(sc, WPI_FH_TX_STATUS) &
5296				    WPI_FH_TX_STATUS_IDLE(chnl))
5297					break;
5298				DELAY(10);
5299			}
5300		}
5301		wpi_nic_unlock(sc);
5302	}
5303
5304	/* Stop RX ring. */
5305	wpi_reset_rx_ring(sc);
5306
5307	/* Reset all TX rings. */
5308	for (qid = 0; qid < WPI_NTXQUEUES; qid++)
5309		wpi_reset_tx_ring(sc, &sc->txq[qid]);
5310
5311	if (wpi_nic_lock(sc) == 0) {
5312		wpi_prph_write(sc, WPI_APMG_CLK_DIS,
5313		    WPI_APMG_CLK_CTRL_DMA_CLK_RQT);
5314		wpi_nic_unlock(sc);
5315	}
5316	DELAY(5);
5317	/* Power OFF adapter. */
5318	wpi_apm_stop(sc);
5319}
5320
5321static void
5322wpi_radio_on(void *arg0, int pending)
5323{
5324	struct wpi_softc *sc = arg0;
5325	struct ieee80211com *ic = &sc->sc_ic;
5326	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5327
5328	device_printf(sc->sc_dev, "RF switch: radio enabled\n");
5329
5330	WPI_LOCK(sc);
5331	callout_stop(&sc->watchdog_rfkill);
5332	WPI_UNLOCK(sc);
5333
5334	if (vap != NULL)
5335		ieee80211_init(vap);
5336}
5337
5338static void
5339wpi_radio_off(void *arg0, int pending)
5340{
5341	struct wpi_softc *sc = arg0;
5342	struct ieee80211com *ic = &sc->sc_ic;
5343	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5344
5345	device_printf(sc->sc_dev, "RF switch: radio disabled\n");
5346
5347	ieee80211_notify_radio(ic, 0);
5348	wpi_stop(sc);
5349	if (vap != NULL)
5350		ieee80211_stop(vap);
5351
5352	WPI_LOCK(sc);
5353	callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc);
5354	WPI_UNLOCK(sc);
5355}
5356
5357static int
5358wpi_init(struct wpi_softc *sc)
5359{
5360	int error = 0;
5361
5362	WPI_LOCK(sc);
5363
5364	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
5365
5366	if (sc->sc_running != 0)
5367		goto end;
5368
5369	/* Check that the radio is not disabled by hardware switch. */
5370	if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) {
5371		device_printf(sc->sc_dev,
5372		    "RF switch: radio disabled (%s)\n", __func__);
5373		callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
5374		    sc);
5375		error = EINPROGRESS;
5376		goto end;
5377	}
5378
5379	/* Read firmware images from the filesystem. */
5380	if ((error = wpi_read_firmware(sc)) != 0) {
5381		device_printf(sc->sc_dev,
5382		    "%s: could not read firmware, error %d\n", __func__,
5383		    error);
5384		goto end;
5385	}
5386
5387	sc->sc_running = 1;
5388
5389	/* Initialize hardware and upload firmware. */
5390	error = wpi_hw_init(sc);
5391	wpi_unload_firmware(sc);
5392	if (error != 0) {
5393		device_printf(sc->sc_dev,
5394		    "%s: could not initialize hardware, error %d\n", __func__,
5395		    error);
5396		goto fail;
5397	}
5398
5399	/* Configure adapter now that it is ready. */
5400	if ((error = wpi_config(sc)) != 0) {
5401		device_printf(sc->sc_dev,
5402		    "%s: could not configure device, error %d\n", __func__,
5403		    error);
5404		goto fail;
5405	}
5406
5407	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
5408
5409	WPI_UNLOCK(sc);
5410
5411	return 0;
5412
5413fail:	wpi_stop_locked(sc);
5414
5415end:	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
5416	WPI_UNLOCK(sc);
5417
5418	return error;
5419}
5420
5421static void
5422wpi_stop_locked(struct wpi_softc *sc)
5423{
5424
5425	WPI_LOCK_ASSERT(sc);
5426
5427	if (sc->sc_running == 0)
5428		return;
5429
5430	WPI_TX_LOCK(sc);
5431	WPI_TXQ_LOCK(sc);
5432	sc->sc_running = 0;
5433	WPI_TXQ_UNLOCK(sc);
5434	WPI_TX_UNLOCK(sc);
5435
5436	WPI_TXQ_STATE_LOCK(sc);
5437	callout_stop(&sc->tx_timeout);
5438	WPI_TXQ_STATE_UNLOCK(sc);
5439
5440	WPI_RXON_LOCK(sc);
5441	callout_stop(&sc->scan_timeout);
5442	callout_stop(&sc->calib_to);
5443	WPI_RXON_UNLOCK(sc);
5444
5445	/* Power OFF hardware. */
5446	wpi_hw_stop(sc);
5447}
5448
5449static void
5450wpi_stop(struct wpi_softc *sc)
5451{
5452	WPI_LOCK(sc);
5453	wpi_stop_locked(sc);
5454	WPI_UNLOCK(sc);
5455}
5456
5457/*
5458 * Callback from net80211 to start a scan.
5459 */
5460static void
5461wpi_scan_start(struct ieee80211com *ic)
5462{
5463	struct wpi_softc *sc = ic->ic_softc;
5464
5465	wpi_set_led(sc, WPI_LED_LINK, 20, 2);
5466}
5467
5468/*
5469 * Callback from net80211 to terminate a scan.
5470 */
5471static void
5472wpi_scan_end(struct ieee80211com *ic)
5473{
5474	struct wpi_softc *sc = ic->ic_softc;
5475	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5476
5477	if (vap->iv_state == IEEE80211_S_RUN)
5478		wpi_set_led(sc, WPI_LED_LINK, 0, 1);
5479}
5480
5481/**
5482 * Called by the net80211 framework to indicate to the driver
5483 * that the channel should be changed
5484 */
5485static void
5486wpi_set_channel(struct ieee80211com *ic)
5487{
5488	const struct ieee80211_channel *c = ic->ic_curchan;
5489	struct wpi_softc *sc = ic->ic_softc;
5490	int error;
5491
5492	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5493
5494	WPI_LOCK(sc);
5495	sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
5496	sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
5497	WPI_UNLOCK(sc);
5498	WPI_TX_LOCK(sc);
5499	sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
5500	sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
5501	WPI_TX_UNLOCK(sc);
5502
5503	/*
5504	 * Only need to set the channel in Monitor mode. AP scanning and auth
5505	 * are already taken care of by their respective firmware commands.
5506	 */
5507	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5508		WPI_RXON_LOCK(sc);
5509		sc->rxon.chan = ieee80211_chan2ieee(ic, c);
5510		if (IEEE80211_IS_CHAN_2GHZ(c)) {
5511			sc->rxon.flags |= htole32(WPI_RXON_AUTO |
5512			    WPI_RXON_24GHZ);
5513		} else {
5514			sc->rxon.flags &= ~htole32(WPI_RXON_AUTO |
5515			    WPI_RXON_24GHZ);
5516		}
5517		if ((error = wpi_send_rxon(sc, 0, 1)) != 0)
5518			device_printf(sc->sc_dev,
5519			    "%s: error %d setting channel\n", __func__,
5520			    error);
5521		WPI_RXON_UNLOCK(sc);
5522	}
5523}
5524
5525/**
5526 * Called by net80211 to indicate that we need to scan the current
5527 * channel. The channel is previously be set via the wpi_set_channel
5528 * callback.
5529 */
5530static void
5531wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5532{
5533	struct ieee80211vap *vap = ss->ss_vap;
5534	struct ieee80211com *ic = vap->iv_ic;
5535	struct wpi_softc *sc = ic->ic_softc;
5536	int error;
5537
5538	WPI_RXON_LOCK(sc);
5539	error = wpi_scan(sc, ic->ic_curchan);
5540	WPI_RXON_UNLOCK(sc);
5541	if (error != 0)
5542		ieee80211_cancel_scan(vap);
5543}
5544
5545/**
5546 * Called by the net80211 framework to indicate
5547 * the minimum dwell time has been met, terminate the scan.
5548 * We don't actually terminate the scan as the firmware will notify
5549 * us when it's finished and we have no way to interrupt it.
5550 */
5551static void
5552wpi_scan_mindwell(struct ieee80211_scan_state *ss)
5553{
5554	/* NB: don't try to abort scan; wait for firmware to finish */
5555}
5556
5557static void
5558wpi_hw_reset(void *arg, int pending)
5559{
5560	struct wpi_softc *sc = arg;
5561	struct ieee80211com *ic = &sc->sc_ic;
5562	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5563
5564	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5565
5566	ieee80211_notify_radio(ic, 0);
5567	if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN))
5568		ieee80211_cancel_scan(vap);
5569
5570	wpi_stop(sc);
5571	if (vap != NULL) {
5572		ieee80211_stop(vap);
5573		ieee80211_init(vap);
5574	}
5575}
5576