1/*-
2 * Copyright (c) 2007-2009
3 *	Damien Bergamini <damien.bergamini@free.fr>
4 * Copyright (c) 2008
5 *	Benjamin Close <benjsc@FreeBSD.org>
6 * Copyright (c) 2008 Sam Leffler, Errno Consulting
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
23 * adapters.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD$");
28
29#include <sys/param.h>
30#include <sys/sockio.h>
31#include <sys/sysctl.h>
32#include <sys/mbuf.h>
33#include <sys/kernel.h>
34#include <sys/socket.h>
35#include <sys/systm.h>
36#include <sys/malloc.h>
37#include <sys/bus.h>
38#include <sys/rman.h>
39#include <sys/endian.h>
40#include <sys/firmware.h>
41#include <sys/limits.h>
42#include <sys/module.h>
43#include <sys/queue.h>
44#include <sys/taskqueue.h>
45
46#include <machine/bus.h>
47#include <machine/resource.h>
48#include <machine/clock.h>
49
50#include <dev/pci/pcireg.h>
51#include <dev/pci/pcivar.h>
52
53#include <net/bpf.h>
54#include <net/if.h>
55#include <net/if_arp.h>
56#include <net/ethernet.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59#include <net/if_types.h>
60
61#include <netinet/in.h>
62#include <netinet/in_systm.h>
63#include <netinet/in_var.h>
64#include <netinet/if_ether.h>
65#include <netinet/ip.h>
66
67#include <net80211/ieee80211_var.h>
68#include <net80211/ieee80211_radiotap.h>
69#include <net80211/ieee80211_regdomain.h>
70#include <net80211/ieee80211_ratectl.h>
71
72#include <dev/iwn/if_iwnreg.h>
73#include <dev/iwn/if_iwnvar.h>
74
75struct iwn_ident {
76	uint16_t	vendor;
77	uint16_t	device;
78	const char	*name;
79};
80
81static const struct iwn_ident iwn_ident_table[] = {
82	{ 0x8086, 0x0082, "Intel Centrino Advanced-N 6205"		},
83	{ 0x8086, 0x0083, "Intel Centrino Wireless-N 1000"		},
84	{ 0x8086, 0x0084, "Intel Centrino Wireless-N 1000"		},
85	{ 0x8086, 0x0085, "Intel Centrino Advanced-N 6205"		},
86	{ 0x8086, 0x0087, "Intel Centrino Advanced-N + WiMAX 6250"	},
87	{ 0x8086, 0x0089, "Intel Centrino Advanced-N + WiMAX 6250"	},
88	{ 0x8086, 0x008a, "Intel Centrino Wireless-N 1030"		},
89	{ 0x8086, 0x008b, "Intel Centrino Wireless-N 1030"		},
90	{ 0x8086, 0x0090, "Intel Centrino Advanced-N 6230"		},
91	{ 0x8086, 0x0091, "Intel Centrino Advanced-N 6230"		},
92	{ 0x8086, 0x0885, "Intel Centrino Wireless-N + WiMAX 6150"	},
93	{ 0x8086, 0x0886, "Intel Centrino Wireless-N + WiMAX 6150"	},
94	{ 0x8086, 0x0896, "Intel Centrino Wireless-N 130"		},
95	{ 0x8086, 0x0887, "Intel Centrino Wireless-N 130"		},
96	{ 0x8086, 0x08ae, "Intel Centrino Wireless-N 100"		},
97	{ 0x8086, 0x08af, "Intel Centrino Wireless-N 100"		},
98	{ 0x8086, 0x4229, "Intel Wireless WiFi Link 4965"		},
99	{ 0x8086, 0x422b, "Intel Centrino Ultimate-N 6300"		},
100	{ 0x8086, 0x422c, "Intel Centrino Advanced-N 6200"		},
101	{ 0x8086, 0x422d, "Intel Wireless WiFi Link 4965"		},
102	{ 0x8086, 0x4230, "Intel Wireless WiFi Link 4965"		},
103	{ 0x8086, 0x4232, "Intel WiFi Link 5100"			},
104	{ 0x8086, 0x4233, "Intel Wireless WiFi Link 4965"		},
105	{ 0x8086, 0x4235, "Intel Ultimate N WiFi Link 5300"		},
106	{ 0x8086, 0x4236, "Intel Ultimate N WiFi Link 5300"		},
107	{ 0x8086, 0x4237, "Intel WiFi Link 5100"			},
108	{ 0x8086, 0x4238, "Intel Centrino Ultimate-N 6300"		},
109	{ 0x8086, 0x4239, "Intel Centrino Advanced-N 6200"		},
110	{ 0x8086, 0x423a, "Intel WiMAX/WiFi Link 5350"			},
111	{ 0x8086, 0x423b, "Intel WiMAX/WiFi Link 5350"			},
112	{ 0x8086, 0x423c, "Intel WiMAX/WiFi Link 5150"			},
113	{ 0x8086, 0x423d, "Intel WiMAX/WiFi Link 5150"			},
114	{ 0, 0, NULL }
115};
116
117static int	iwn_probe(device_t);
118static int	iwn_attach(device_t);
119static int	iwn4965_attach(struct iwn_softc *, uint16_t);
120static int	iwn5000_attach(struct iwn_softc *, uint16_t);
121static void	iwn_radiotap_attach(struct iwn_softc *);
122static void	iwn_sysctlattach(struct iwn_softc *);
123static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
124		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
125		    const uint8_t [IEEE80211_ADDR_LEN],
126		    const uint8_t [IEEE80211_ADDR_LEN]);
127static void	iwn_vap_delete(struct ieee80211vap *);
128static int	iwn_detach(device_t);
129static int	iwn_shutdown(device_t);
130static int	iwn_suspend(device_t);
131static int	iwn_resume(device_t);
132static int	iwn_nic_lock(struct iwn_softc *);
133static int	iwn_eeprom_lock(struct iwn_softc *);
134static int	iwn_init_otprom(struct iwn_softc *);
135static int	iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
136static void	iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
137static int	iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
138		    void **, bus_size_t, bus_size_t);
139static void	iwn_dma_contig_free(struct iwn_dma_info *);
140static int	iwn_alloc_sched(struct iwn_softc *);
141static void	iwn_free_sched(struct iwn_softc *);
142static int	iwn_alloc_kw(struct iwn_softc *);
143static void	iwn_free_kw(struct iwn_softc *);
144static int	iwn_alloc_ict(struct iwn_softc *);
145static void	iwn_free_ict(struct iwn_softc *);
146static int	iwn_alloc_fwmem(struct iwn_softc *);
147static void	iwn_free_fwmem(struct iwn_softc *);
148static int	iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
149static void	iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
150static void	iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
151static int	iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
152		    int);
153static void	iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
154static void	iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
155static void	iwn5000_ict_reset(struct iwn_softc *);
156static int	iwn_read_eeprom(struct iwn_softc *,
157		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
158static void	iwn4965_read_eeprom(struct iwn_softc *);
159static void	iwn4965_print_power_group(struct iwn_softc *, int);
160static void	iwn5000_read_eeprom(struct iwn_softc *);
161static uint32_t	iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
162static void	iwn_read_eeprom_band(struct iwn_softc *, int);
163static void	iwn_read_eeprom_ht40(struct iwn_softc *, int);
164static void	iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
165static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
166		    struct ieee80211_channel *);
167static int	iwn_setregdomain(struct ieee80211com *,
168		    struct ieee80211_regdomain *, int,
169		    struct ieee80211_channel[]);
170static void	iwn_read_eeprom_enhinfo(struct iwn_softc *);
171static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
172		    const uint8_t mac[IEEE80211_ADDR_LEN]);
173static void	iwn_newassoc(struct ieee80211_node *, int);
174static int	iwn_media_change(struct ifnet *);
175static int	iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
176static void	iwn_calib_timeout(void *);
177static void	iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
178		    struct iwn_rx_data *);
179static void	iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
180		    struct iwn_rx_data *);
181static void	iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
182		    struct iwn_rx_data *);
183static void	iwn5000_rx_calib_results(struct iwn_softc *,
184		    struct iwn_rx_desc *, struct iwn_rx_data *);
185static void	iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
186		    struct iwn_rx_data *);
187static void	iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
188		    struct iwn_rx_data *);
189static void	iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
190		    struct iwn_rx_data *);
191static void	iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
192		    uint8_t);
193static void	iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *);
194static void	iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
195static void	iwn_notif_intr(struct iwn_softc *);
196static void	iwn_wakeup_intr(struct iwn_softc *);
197static void	iwn_rftoggle_intr(struct iwn_softc *);
198static void	iwn_fatal_intr(struct iwn_softc *);
199static void	iwn_intr(void *);
200static void	iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
201		    uint16_t);
202static void	iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
203		    uint16_t);
204#ifdef notyet
205static void	iwn5000_reset_sched(struct iwn_softc *, int, int);
206#endif
207static int	iwn_tx_data(struct iwn_softc *, struct mbuf *,
208		    struct ieee80211_node *);
209static int	iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
210		    struct ieee80211_node *,
211		    const struct ieee80211_bpf_params *params);
212static int	iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
213		    const struct ieee80211_bpf_params *);
214static void	iwn_start(struct ifnet *);
215static void	iwn_start_locked(struct ifnet *);
216static void	iwn_watchdog(void *);
217static int	iwn_ioctl(struct ifnet *, u_long, caddr_t);
218static int	iwn_cmd(struct iwn_softc *, int, const void *, int, int);
219static int	iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
220		    int);
221static int	iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
222		    int);
223static int	iwn_set_link_quality(struct iwn_softc *,
224		    struct ieee80211_node *);
225static int	iwn_add_broadcast_node(struct iwn_softc *, int);
226static int	iwn_updateedca(struct ieee80211com *);
227static void	iwn_update_mcast(struct ifnet *);
228static void	iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
229static int	iwn_set_critical_temp(struct iwn_softc *);
230static int	iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
231static void	iwn4965_power_calibration(struct iwn_softc *, int);
232static int	iwn4965_set_txpower(struct iwn_softc *,
233		    struct ieee80211_channel *, int);
234static int	iwn5000_set_txpower(struct iwn_softc *,
235		    struct ieee80211_channel *, int);
236static int	iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
237static int	iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
238static int	iwn_get_noise(const struct iwn_rx_general_stats *);
239static int	iwn4965_get_temperature(struct iwn_softc *);
240static int	iwn5000_get_temperature(struct iwn_softc *);
241static int	iwn_init_sensitivity(struct iwn_softc *);
242static void	iwn_collect_noise(struct iwn_softc *,
243		    const struct iwn_rx_general_stats *);
244static int	iwn4965_init_gains(struct iwn_softc *);
245static int	iwn5000_init_gains(struct iwn_softc *);
246static int	iwn4965_set_gains(struct iwn_softc *);
247static int	iwn5000_set_gains(struct iwn_softc *);
248static void	iwn_tune_sensitivity(struct iwn_softc *,
249		    const struct iwn_rx_stats *);
250static int	iwn_send_sensitivity(struct iwn_softc *);
251static int	iwn_set_pslevel(struct iwn_softc *, int, int, int);
252static int	iwn_send_btcoex(struct iwn_softc *);
253static int	iwn_send_advanced_btcoex(struct iwn_softc *);
254static int	iwn5000_runtime_calib(struct iwn_softc *);
255static int	iwn_config(struct iwn_softc *);
256static uint8_t	*ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
257static int	iwn_scan(struct iwn_softc *);
258static int	iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
259static int	iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
260static int	iwn_ampdu_rx_start(struct ieee80211_node *,
261		    struct ieee80211_rx_ampdu *, int, int, int);
262static void	iwn_ampdu_rx_stop(struct ieee80211_node *,
263		    struct ieee80211_rx_ampdu *);
264static int	iwn_addba_request(struct ieee80211_node *,
265		    struct ieee80211_tx_ampdu *, int, int, int);
266static int	iwn_addba_response(struct ieee80211_node *,
267		    struct ieee80211_tx_ampdu *, int, int, int);
268static int	iwn_ampdu_tx_start(struct ieee80211com *,
269		    struct ieee80211_node *, uint8_t);
270static void	iwn_ampdu_tx_stop(struct ieee80211_node *,
271		    struct ieee80211_tx_ampdu *);
272static void	iwn4965_ampdu_tx_start(struct iwn_softc *,
273		    struct ieee80211_node *, int, uint8_t, uint16_t);
274static void	iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
275		    uint8_t, uint16_t);
276static void	iwn5000_ampdu_tx_start(struct iwn_softc *,
277		    struct ieee80211_node *, int, uint8_t, uint16_t);
278static void	iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
279		    uint8_t, uint16_t);
280static int	iwn5000_query_calibration(struct iwn_softc *);
281static int	iwn5000_send_calibration(struct iwn_softc *);
282static int	iwn5000_send_wimax_coex(struct iwn_softc *);
283static int	iwn5000_crystal_calib(struct iwn_softc *);
284static int	iwn5000_temp_offset_calib(struct iwn_softc *);
285static int	iwn4965_post_alive(struct iwn_softc *);
286static int	iwn5000_post_alive(struct iwn_softc *);
287static int	iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
288		    int);
289static int	iwn4965_load_firmware(struct iwn_softc *);
290static int	iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
291		    const uint8_t *, int);
292static int	iwn5000_load_firmware(struct iwn_softc *);
293static int	iwn_read_firmware_leg(struct iwn_softc *,
294		    struct iwn_fw_info *);
295static int	iwn_read_firmware_tlv(struct iwn_softc *,
296		    struct iwn_fw_info *, uint16_t);
297static int	iwn_read_firmware(struct iwn_softc *);
298static int	iwn_clock_wait(struct iwn_softc *);
299static int	iwn_apm_init(struct iwn_softc *);
300static void	iwn_apm_stop_master(struct iwn_softc *);
301static void	iwn_apm_stop(struct iwn_softc *);
302static int	iwn4965_nic_config(struct iwn_softc *);
303static int	iwn5000_nic_config(struct iwn_softc *);
304static int	iwn_hw_prepare(struct iwn_softc *);
305static int	iwn_hw_init(struct iwn_softc *);
306static void	iwn_hw_stop(struct iwn_softc *);
307static void	iwn_radio_on(void *, int);
308static void	iwn_radio_off(void *, int);
309static void	iwn_init_locked(struct iwn_softc *);
310static void	iwn_init(void *);
311static void	iwn_stop_locked(struct iwn_softc *);
312static void	iwn_stop(struct iwn_softc *);
313static void	iwn_scan_start(struct ieee80211com *);
314static void	iwn_scan_end(struct ieee80211com *);
315static void	iwn_set_channel(struct ieee80211com *);
316static void	iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
317static void	iwn_scan_mindwell(struct ieee80211_scan_state *);
318static void	iwn_hw_reset(void *, int);
319
320#define IWN_DEBUG
321#ifdef IWN_DEBUG
322enum {
323	IWN_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
324	IWN_DEBUG_RECV		= 0x00000002,	/* basic recv operation */
325	IWN_DEBUG_STATE		= 0x00000004,	/* 802.11 state transitions */
326	IWN_DEBUG_TXPOW		= 0x00000008,	/* tx power processing */
327	IWN_DEBUG_RESET		= 0x00000010,	/* reset processing */
328	IWN_DEBUG_OPS		= 0x00000020,	/* iwn_ops processing */
329	IWN_DEBUG_BEACON 	= 0x00000040,	/* beacon handling */
330	IWN_DEBUG_WATCHDOG 	= 0x00000080,	/* watchdog timeout */
331	IWN_DEBUG_INTR		= 0x00000100,	/* ISR */
332	IWN_DEBUG_CALIBRATE	= 0x00000200,	/* periodic calibration */
333	IWN_DEBUG_NODE		= 0x00000400,	/* node management */
334	IWN_DEBUG_LED		= 0x00000800,	/* led management */
335	IWN_DEBUG_CMD		= 0x00001000,	/* cmd submission */
336	IWN_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
337	IWN_DEBUG_ANY		= 0xffffffff
338};
339
340#define DPRINTF(sc, m, fmt, ...) do {			\
341	if (sc->sc_debug & (m))				\
342		printf(fmt, __VA_ARGS__);		\
343} while (0)
344
345static const char *
346iwn_intr_str(uint8_t cmd)
347{
348	switch (cmd) {
349	/* Notifications */
350	case IWN_UC_READY:		return "UC_READY";
351	case IWN_ADD_NODE_DONE:		return "ADD_NODE_DONE";
352	case IWN_TX_DONE:		return "TX_DONE";
353	case IWN_START_SCAN:		return "START_SCAN";
354	case IWN_STOP_SCAN:		return "STOP_SCAN";
355	case IWN_RX_STATISTICS:		return "RX_STATS";
356	case IWN_BEACON_STATISTICS:	return "BEACON_STATS";
357	case IWN_STATE_CHANGED:		return "STATE_CHANGED";
358	case IWN_BEACON_MISSED:		return "BEACON_MISSED";
359	case IWN_RX_PHY:		return "RX_PHY";
360	case IWN_MPDU_RX_DONE:		return "MPDU_RX_DONE";
361	case IWN_RX_DONE:		return "RX_DONE";
362
363	/* Command Notifications */
364	case IWN_CMD_RXON:		return "IWN_CMD_RXON";
365	case IWN_CMD_RXON_ASSOC:	return "IWN_CMD_RXON_ASSOC";
366	case IWN_CMD_EDCA_PARAMS:	return "IWN_CMD_EDCA_PARAMS";
367	case IWN_CMD_TIMING:		return "IWN_CMD_TIMING";
368	case IWN_CMD_LINK_QUALITY:	return "IWN_CMD_LINK_QUALITY";
369	case IWN_CMD_SET_LED:		return "IWN_CMD_SET_LED";
370	case IWN5000_CMD_WIMAX_COEX:	return "IWN5000_CMD_WIMAX_COEX";
371	case IWN5000_CMD_CALIB_CONFIG:	return "IWN5000_CMD_CALIB_CONFIG";
372	case IWN5000_CMD_CALIB_RESULT:	return "IWN5000_CMD_CALIB_RESULT";
373	case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
374	case IWN_CMD_SET_POWER_MODE:	return "IWN_CMD_SET_POWER_MODE";
375	case IWN_CMD_SCAN:		return "IWN_CMD_SCAN";
376	case IWN_CMD_SCAN_RESULTS:	return "IWN_CMD_SCAN_RESULTS";
377	case IWN_CMD_TXPOWER:		return "IWN_CMD_TXPOWER";
378	case IWN_CMD_TXPOWER_DBM:	return "IWN_CMD_TXPOWER_DBM";
379	case IWN5000_CMD_TX_ANT_CONFIG:	return "IWN5000_CMD_TX_ANT_CONFIG";
380	case IWN_CMD_BT_COEX:		return "IWN_CMD_BT_COEX";
381	case IWN_CMD_SET_CRITICAL_TEMP:	return "IWN_CMD_SET_CRITICAL_TEMP";
382	case IWN_CMD_SET_SENSITIVITY:	return "IWN_CMD_SET_SENSITIVITY";
383	case IWN_CMD_PHY_CALIB:		return "IWN_CMD_PHY_CALIB";
384	}
385	return "UNKNOWN INTR NOTIF/CMD";
386}
387#else
388#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
389#endif
390
391static device_method_t iwn_methods[] = {
392	/* Device interface */
393	DEVMETHOD(device_probe,		iwn_probe),
394	DEVMETHOD(device_attach,	iwn_attach),
395	DEVMETHOD(device_detach,	iwn_detach),
396	DEVMETHOD(device_shutdown,	iwn_shutdown),
397	DEVMETHOD(device_suspend,	iwn_suspend),
398	DEVMETHOD(device_resume,	iwn_resume),
399
400	DEVMETHOD_END
401};
402
403static driver_t iwn_driver = {
404	"iwn",
405	iwn_methods,
406	sizeof(struct iwn_softc)
407};
408static devclass_t iwn_devclass;
409
410DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL);
411
412MODULE_VERSION(iwn, 1);
413
414MODULE_DEPEND(iwn, firmware, 1, 1, 1);
415MODULE_DEPEND(iwn, pci, 1, 1, 1);
416MODULE_DEPEND(iwn, wlan, 1, 1, 1);
417
418static int
419iwn_probe(device_t dev)
420{
421	const struct iwn_ident *ident;
422
423	for (ident = iwn_ident_table; ident->name != NULL; ident++) {
424		if (pci_get_vendor(dev) == ident->vendor &&
425		    pci_get_device(dev) == ident->device) {
426			device_set_desc(dev, ident->name);
427			return (BUS_PROBE_DEFAULT);
428		}
429	}
430	return ENXIO;
431}
432
433static int
434iwn_attach(device_t dev)
435{
436	struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
437	struct ieee80211com *ic;
438	struct ifnet *ifp;
439	int i, error, rid;
440	uint8_t macaddr[IEEE80211_ADDR_LEN];
441
442	sc->sc_dev = dev;
443
444	/*
445	 * Get the offset of the PCI Express Capability Structure in PCI
446	 * Configuration Space.
447	 */
448	error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
449	if (error != 0) {
450		device_printf(dev, "PCIe capability structure not found!\n");
451		return error;
452	}
453
454	/* Clear device-specific "PCI retry timeout" register (41h). */
455	pci_write_config(dev, 0x41, 0, 1);
456
457	/* Enable bus-mastering. */
458	pci_enable_busmaster(dev);
459
460	rid = PCIR_BAR(0);
461	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
462	    RF_ACTIVE);
463	if (sc->mem == NULL) {
464		device_printf(dev, "can't map mem space\n");
465		error = ENOMEM;
466		return error;
467	}
468	sc->sc_st = rman_get_bustag(sc->mem);
469	sc->sc_sh = rman_get_bushandle(sc->mem);
470
471	i = 1;
472	rid = 0;
473	if (pci_alloc_msi(dev, &i) == 0)
474		rid = 1;
475	/* Install interrupt handler. */
476	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
477	    (rid != 0 ? 0 : RF_SHAREABLE));
478	if (sc->irq == NULL) {
479		device_printf(dev, "can't map interrupt\n");
480		error = ENOMEM;
481		goto fail;
482	}
483
484	IWN_LOCK_INIT(sc);
485
486	/* Read hardware revision and attach. */
487	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
488	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
489		error = iwn4965_attach(sc, pci_get_device(dev));
490	else
491		error = iwn5000_attach(sc, pci_get_device(dev));
492	if (error != 0) {
493		device_printf(dev, "could not attach device, error %d\n",
494		    error);
495		goto fail;
496	}
497
498	if ((error = iwn_hw_prepare(sc)) != 0) {
499		device_printf(dev, "hardware not ready, error %d\n", error);
500		goto fail;
501	}
502
503	/* Allocate DMA memory for firmware transfers. */
504	if ((error = iwn_alloc_fwmem(sc)) != 0) {
505		device_printf(dev,
506		    "could not allocate memory for firmware, error %d\n",
507		    error);
508		goto fail;
509	}
510
511	/* Allocate "Keep Warm" page. */
512	if ((error = iwn_alloc_kw(sc)) != 0) {
513		device_printf(dev,
514		    "could not allocate keep warm page, error %d\n", error);
515		goto fail;
516	}
517
518	/* Allocate ICT table for 5000 Series. */
519	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
520	    (error = iwn_alloc_ict(sc)) != 0) {
521		device_printf(dev, "could not allocate ICT table, error %d\n",
522		    error);
523		goto fail;
524	}
525
526	/* Allocate TX scheduler "rings". */
527	if ((error = iwn_alloc_sched(sc)) != 0) {
528		device_printf(dev,
529		    "could not allocate TX scheduler rings, error %d\n", error);
530		goto fail;
531	}
532
533	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
534	for (i = 0; i < sc->ntxqs; i++) {
535		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
536			device_printf(dev,
537			    "could not allocate TX ring %d, error %d\n", i,
538			    error);
539			goto fail;
540		}
541	}
542
543	/* Allocate RX ring. */
544	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
545		device_printf(dev, "could not allocate RX ring, error %d\n",
546		    error);
547		goto fail;
548	}
549
550	/* Clear pending interrupts. */
551	IWN_WRITE(sc, IWN_INT, 0xffffffff);
552
553	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
554	if (ifp == NULL) {
555		device_printf(dev, "can not allocate ifnet structure\n");
556		goto fail;
557	}
558
559	ic = ifp->if_l2com;
560	ic->ic_ifp = ifp;
561	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
562	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
563
564	/* Set device capabilities. */
565	ic->ic_caps =
566		  IEEE80211_C_STA		/* station mode supported */
567		| IEEE80211_C_MONITOR		/* monitor mode supported */
568		| IEEE80211_C_BGSCAN		/* background scanning */
569		| IEEE80211_C_TXPMGT		/* tx power management */
570		| IEEE80211_C_SHSLOT		/* short slot time supported */
571		| IEEE80211_C_WPA
572		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
573#if 0
574		| IEEE80211_C_IBSS		/* ibss/adhoc mode */
575#endif
576		| IEEE80211_C_WME		/* WME */
577		;
578
579	/* Read MAC address, channels, etc from EEPROM. */
580	if ((error = iwn_read_eeprom(sc, macaddr)) != 0) {
581		device_printf(dev, "could not read EEPROM, error %d\n",
582		    error);
583		goto fail;
584	}
585
586	/* Count the number of available chains. */
587	sc->ntxchains =
588	    ((sc->txchainmask >> 2) & 1) +
589	    ((sc->txchainmask >> 1) & 1) +
590	    ((sc->txchainmask >> 0) & 1);
591	sc->nrxchains =
592	    ((sc->rxchainmask >> 2) & 1) +
593	    ((sc->rxchainmask >> 1) & 1) +
594	    ((sc->rxchainmask >> 0) & 1);
595	if (bootverbose) {
596		device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
597		    sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
598		    macaddr, ":");
599	}
600
601	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
602		ic->ic_rxstream = sc->nrxchains;
603		ic->ic_txstream = sc->ntxchains;
604		ic->ic_htcaps =
605			  IEEE80211_HTCAP_SMPS_OFF	/* SMPS mode disabled */
606			| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
607			| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width*/
608			| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
609#ifdef notyet
610			| IEEE80211_HTCAP_GREENFIELD
611#if IWN_RBUF_SIZE == 8192
612			| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
613#else
614			| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
615#endif
616#endif
617			/* s/w capabilities */
618			| IEEE80211_HTC_HT		/* HT operation */
619			| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
620#ifdef notyet
621			| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
622#endif
623			;
624	}
625
626	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
627	ifp->if_softc = sc;
628	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
629	ifp->if_init = iwn_init;
630	ifp->if_ioctl = iwn_ioctl;
631	ifp->if_start = iwn_start;
632	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
633	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
634	IFQ_SET_READY(&ifp->if_snd);
635
636	ieee80211_ifattach(ic, macaddr);
637	ic->ic_vap_create = iwn_vap_create;
638	ic->ic_vap_delete = iwn_vap_delete;
639	ic->ic_raw_xmit = iwn_raw_xmit;
640	ic->ic_node_alloc = iwn_node_alloc;
641	sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
642	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
643	sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
644	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
645	sc->sc_addba_request = ic->ic_addba_request;
646	ic->ic_addba_request = iwn_addba_request;
647	sc->sc_addba_response = ic->ic_addba_response;
648	ic->ic_addba_response = iwn_addba_response;
649	sc->sc_addba_stop = ic->ic_addba_stop;
650	ic->ic_addba_stop = iwn_ampdu_tx_stop;
651	ic->ic_newassoc = iwn_newassoc;
652	ic->ic_wme.wme_update = iwn_updateedca;
653	ic->ic_update_mcast = iwn_update_mcast;
654	ic->ic_scan_start = iwn_scan_start;
655	ic->ic_scan_end = iwn_scan_end;
656	ic->ic_set_channel = iwn_set_channel;
657	ic->ic_scan_curchan = iwn_scan_curchan;
658	ic->ic_scan_mindwell = iwn_scan_mindwell;
659	ic->ic_setregdomain = iwn_setregdomain;
660
661	iwn_radiotap_attach(sc);
662
663	callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
664	callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
665	TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
666	TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
667	TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
668
669	iwn_sysctlattach(sc);
670
671	/*
672	 * Hook our interrupt after all initialization is complete.
673	 */
674	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
675	    NULL, iwn_intr, sc, &sc->sc_ih);
676	if (error != 0) {
677		device_printf(dev, "can't establish interrupt, error %d\n",
678		    error);
679		goto fail;
680	}
681
682	if (bootverbose)
683		ieee80211_announce(ic);
684	return 0;
685fail:
686	iwn_detach(dev);
687	return error;
688}
689
690static int
691iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
692{
693	struct iwn_ops *ops = &sc->ops;
694
695	ops->load_firmware = iwn4965_load_firmware;
696	ops->read_eeprom = iwn4965_read_eeprom;
697	ops->post_alive = iwn4965_post_alive;
698	ops->nic_config = iwn4965_nic_config;
699	ops->update_sched = iwn4965_update_sched;
700	ops->get_temperature = iwn4965_get_temperature;
701	ops->get_rssi = iwn4965_get_rssi;
702	ops->set_txpower = iwn4965_set_txpower;
703	ops->init_gains = iwn4965_init_gains;
704	ops->set_gains = iwn4965_set_gains;
705	ops->add_node = iwn4965_add_node;
706	ops->tx_done = iwn4965_tx_done;
707	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
708	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
709	sc->ntxqs = IWN4965_NTXQUEUES;
710	sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
711	sc->ndmachnls = IWN4965_NDMACHNLS;
712	sc->broadcast_id = IWN4965_ID_BROADCAST;
713	sc->rxonsz = IWN4965_RXONSZ;
714	sc->schedsz = IWN4965_SCHEDSZ;
715	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
716	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
717	sc->fwsz = IWN4965_FWSZ;
718	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
719	sc->limits = &iwn4965_sensitivity_limits;
720	sc->fwname = "iwn4965fw";
721	/* Override chains masks, ROM is known to be broken. */
722	sc->txchainmask = IWN_ANT_AB;
723	sc->rxchainmask = IWN_ANT_ABC;
724
725	return 0;
726}
727
728static int
729iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
730{
731	struct iwn_ops *ops = &sc->ops;
732
733	ops->load_firmware = iwn5000_load_firmware;
734	ops->read_eeprom = iwn5000_read_eeprom;
735	ops->post_alive = iwn5000_post_alive;
736	ops->nic_config = iwn5000_nic_config;
737	ops->update_sched = iwn5000_update_sched;
738	ops->get_temperature = iwn5000_get_temperature;
739	ops->get_rssi = iwn5000_get_rssi;
740	ops->set_txpower = iwn5000_set_txpower;
741	ops->init_gains = iwn5000_init_gains;
742	ops->set_gains = iwn5000_set_gains;
743	ops->add_node = iwn5000_add_node;
744	ops->tx_done = iwn5000_tx_done;
745	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
746	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
747	sc->ntxqs = IWN5000_NTXQUEUES;
748	sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
749	sc->ndmachnls = IWN5000_NDMACHNLS;
750	sc->broadcast_id = IWN5000_ID_BROADCAST;
751	sc->rxonsz = IWN5000_RXONSZ;
752	sc->schedsz = IWN5000_SCHEDSZ;
753	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
754	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
755	sc->fwsz = IWN5000_FWSZ;
756	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
757	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
758	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
759
760	switch (sc->hw_type) {
761	case IWN_HW_REV_TYPE_5100:
762		sc->limits = &iwn5000_sensitivity_limits;
763		sc->fwname = "iwn5000fw";
764		/* Override chains masks, ROM is known to be broken. */
765		sc->txchainmask = IWN_ANT_B;
766		sc->rxchainmask = IWN_ANT_AB;
767		break;
768	case IWN_HW_REV_TYPE_5150:
769		sc->limits = &iwn5150_sensitivity_limits;
770		sc->fwname = "iwn5150fw";
771		break;
772	case IWN_HW_REV_TYPE_5300:
773	case IWN_HW_REV_TYPE_5350:
774		sc->limits = &iwn5000_sensitivity_limits;
775		sc->fwname = "iwn5000fw";
776		break;
777	case IWN_HW_REV_TYPE_1000:
778		sc->limits = &iwn1000_sensitivity_limits;
779		sc->fwname = "iwn1000fw";
780		break;
781	case IWN_HW_REV_TYPE_6000:
782		sc->limits = &iwn6000_sensitivity_limits;
783		sc->fwname = "iwn6000fw";
784		if (pid == 0x422c || pid == 0x4239) {
785			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
786			/* Override chains masks, ROM is known to be broken. */
787			sc->txchainmask = IWN_ANT_BC;
788			sc->rxchainmask = IWN_ANT_BC;
789		}
790		break;
791	case IWN_HW_REV_TYPE_6050:
792		sc->limits = &iwn6000_sensitivity_limits;
793		sc->fwname = "iwn6050fw";
794		/* Override chains masks, ROM is known to be broken. */
795		sc->txchainmask = IWN_ANT_AB;
796		sc->rxchainmask = IWN_ANT_AB;
797		break;
798	case IWN_HW_REV_TYPE_6005:
799		sc->limits = &iwn6000_sensitivity_limits;
800		if (pid != 0x0082 && pid != 0x0085) {
801			sc->fwname = "iwn6000g2bfw";
802			sc->sc_flags |= IWN_FLAG_ADV_BTCOEX;
803		} else
804			sc->fwname = "iwn6000g2afw";
805		break;
806	default:
807		device_printf(sc->sc_dev, "adapter type %d not supported\n",
808		    sc->hw_type);
809		return ENOTSUP;
810	}
811	return 0;
812}
813
814/*
815 * Attach the interface to 802.11 radiotap.
816 */
817static void
818iwn_radiotap_attach(struct iwn_softc *sc)
819{
820	struct ifnet *ifp = sc->sc_ifp;
821	struct ieee80211com *ic = ifp->if_l2com;
822
823	ieee80211_radiotap_attach(ic,
824	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
825		IWN_TX_RADIOTAP_PRESENT,
826	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
827		IWN_RX_RADIOTAP_PRESENT);
828}
829
830static void
831iwn_sysctlattach(struct iwn_softc *sc)
832{
833	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
834	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
835
836#ifdef IWN_DEBUG
837	sc->sc_debug = 0;
838	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
839	    "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
840#endif
841}
842
843static struct ieee80211vap *
844iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
845    enum ieee80211_opmode opmode, int flags,
846    const uint8_t bssid[IEEE80211_ADDR_LEN],
847    const uint8_t mac[IEEE80211_ADDR_LEN])
848{
849	struct iwn_vap *ivp;
850	struct ieee80211vap *vap;
851
852	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
853		return NULL;
854	ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
855	    M_80211_VAP, M_NOWAIT | M_ZERO);
856	if (ivp == NULL)
857		return NULL;
858	vap = &ivp->iv_vap;
859	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
860	vap->iv_bmissthreshold = 10;		/* override default */
861	/* Override with driver methods. */
862	ivp->iv_newstate = vap->iv_newstate;
863	vap->iv_newstate = iwn_newstate;
864
865	ieee80211_ratectl_init(vap);
866	/* Complete setup. */
867	ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
868	ic->ic_opmode = opmode;
869	return vap;
870}
871
872static void
873iwn_vap_delete(struct ieee80211vap *vap)
874{
875	struct iwn_vap *ivp = IWN_VAP(vap);
876
877	ieee80211_ratectl_deinit(vap);
878	ieee80211_vap_detach(vap);
879	free(ivp, M_80211_VAP);
880}
881
882static int
883iwn_detach(device_t dev)
884{
885	struct iwn_softc *sc = device_get_softc(dev);
886	struct ifnet *ifp = sc->sc_ifp;
887	struct ieee80211com *ic;
888	int qid;
889
890	if (ifp != NULL) {
891		ic = ifp->if_l2com;
892
893		ieee80211_draintask(ic, &sc->sc_reinit_task);
894		ieee80211_draintask(ic, &sc->sc_radioon_task);
895		ieee80211_draintask(ic, &sc->sc_radiooff_task);
896
897		iwn_stop(sc);
898		callout_drain(&sc->watchdog_to);
899		callout_drain(&sc->calib_to);
900		ieee80211_ifdetach(ic);
901	}
902
903	/* Uninstall interrupt handler. */
904	if (sc->irq != NULL) {
905		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
906		bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
907		    sc->irq);
908		pci_release_msi(dev);
909	}
910
911	/* Free DMA resources. */
912	iwn_free_rx_ring(sc, &sc->rxq);
913	for (qid = 0; qid < sc->ntxqs; qid++)
914		iwn_free_tx_ring(sc, &sc->txq[qid]);
915	iwn_free_sched(sc);
916	iwn_free_kw(sc);
917	if (sc->ict != NULL)
918		iwn_free_ict(sc);
919	iwn_free_fwmem(sc);
920
921	if (sc->mem != NULL)
922		bus_release_resource(dev, SYS_RES_MEMORY,
923		    rman_get_rid(sc->mem), sc->mem);
924
925	if (ifp != NULL)
926		if_free(ifp);
927
928	IWN_LOCK_DESTROY(sc);
929	return 0;
930}
931
932static int
933iwn_shutdown(device_t dev)
934{
935	struct iwn_softc *sc = device_get_softc(dev);
936
937	iwn_stop(sc);
938	return 0;
939}
940
941static int
942iwn_suspend(device_t dev)
943{
944	struct iwn_softc *sc = device_get_softc(dev);
945	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
946
947	ieee80211_suspend_all(ic);
948	return 0;
949}
950
951static int
952iwn_resume(device_t dev)
953{
954	struct iwn_softc *sc = device_get_softc(dev);
955	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
956
957	/* Clear device-specific "PCI retry timeout" register (41h). */
958	pci_write_config(dev, 0x41, 0, 1);
959
960	ieee80211_resume_all(ic);
961	return 0;
962}
963
964static int
965iwn_nic_lock(struct iwn_softc *sc)
966{
967	int ntries;
968
969	/* Request exclusive access to NIC. */
970	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
971
972	/* Spin until we actually get the lock. */
973	for (ntries = 0; ntries < 1000; ntries++) {
974		if ((IWN_READ(sc, IWN_GP_CNTRL) &
975		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
976		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
977			return 0;
978		DELAY(10);
979	}
980	return ETIMEDOUT;
981}
982
983static __inline void
984iwn_nic_unlock(struct iwn_softc *sc)
985{
986	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
987}
988
989static __inline uint32_t
990iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
991{
992	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
993	IWN_BARRIER_READ_WRITE(sc);
994	return IWN_READ(sc, IWN_PRPH_RDATA);
995}
996
997static __inline void
998iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
999{
1000	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1001	IWN_BARRIER_WRITE(sc);
1002	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1003}
1004
1005static __inline void
1006iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1007{
1008	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1009}
1010
1011static __inline void
1012iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1013{
1014	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1015}
1016
1017static __inline void
1018iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1019    const uint32_t *data, int count)
1020{
1021	for (; count > 0; count--, data++, addr += 4)
1022		iwn_prph_write(sc, addr, *data);
1023}
1024
1025static __inline uint32_t
1026iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1027{
1028	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1029	IWN_BARRIER_READ_WRITE(sc);
1030	return IWN_READ(sc, IWN_MEM_RDATA);
1031}
1032
1033static __inline void
1034iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1035{
1036	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1037	IWN_BARRIER_WRITE(sc);
1038	IWN_WRITE(sc, IWN_MEM_WDATA, data);
1039}
1040
1041static __inline void
1042iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1043{
1044	uint32_t tmp;
1045
1046	tmp = iwn_mem_read(sc, addr & ~3);
1047	if (addr & 3)
1048		tmp = (tmp & 0x0000ffff) | data << 16;
1049	else
1050		tmp = (tmp & 0xffff0000) | data;
1051	iwn_mem_write(sc, addr & ~3, tmp);
1052}
1053
1054static __inline void
1055iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1056    int count)
1057{
1058	for (; count > 0; count--, addr += 4)
1059		*data++ = iwn_mem_read(sc, addr);
1060}
1061
1062static __inline void
1063iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1064    int count)
1065{
1066	for (; count > 0; count--, addr += 4)
1067		iwn_mem_write(sc, addr, val);
1068}
1069
1070static int
1071iwn_eeprom_lock(struct iwn_softc *sc)
1072{
1073	int i, ntries;
1074
1075	for (i = 0; i < 100; i++) {
1076		/* Request exclusive access to EEPROM. */
1077		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1078		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1079
1080		/* Spin until we actually get the lock. */
1081		for (ntries = 0; ntries < 100; ntries++) {
1082			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1083			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1084				return 0;
1085			DELAY(10);
1086		}
1087	}
1088	return ETIMEDOUT;
1089}
1090
1091static __inline void
1092iwn_eeprom_unlock(struct iwn_softc *sc)
1093{
1094	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1095}
1096
1097/*
1098 * Initialize access by host to One Time Programmable ROM.
1099 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1100 */
1101static int
1102iwn_init_otprom(struct iwn_softc *sc)
1103{
1104	uint16_t prev, base, next;
1105	int count, error;
1106
1107	/* Wait for clock stabilization before accessing prph. */
1108	if ((error = iwn_clock_wait(sc)) != 0)
1109		return error;
1110
1111	if ((error = iwn_nic_lock(sc)) != 0)
1112		return error;
1113	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1114	DELAY(5);
1115	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1116	iwn_nic_unlock(sc);
1117
1118	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1119	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1120		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1121		    IWN_RESET_LINK_PWR_MGMT_DIS);
1122	}
1123	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1124	/* Clear ECC status. */
1125	IWN_SETBITS(sc, IWN_OTP_GP,
1126	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1127
1128	/*
1129	 * Find the block before last block (contains the EEPROM image)
1130	 * for HW without OTP shadow RAM.
1131	 */
1132	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1133		/* Switch to absolute addressing mode. */
1134		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1135		base = prev = 0;
1136		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1137			error = iwn_read_prom_data(sc, base, &next, 2);
1138			if (error != 0)
1139				return error;
1140			if (next == 0)	/* End of linked-list. */
1141				break;
1142			prev = base;
1143			base = le16toh(next);
1144		}
1145		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1146			return EIO;
1147		/* Skip "next" word. */
1148		sc->prom_base = prev + 1;
1149	}
1150	return 0;
1151}
1152
1153static int
1154iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1155{
1156	uint8_t *out = data;
1157	uint32_t val, tmp;
1158	int ntries;
1159
1160	addr += sc->prom_base;
1161	for (; count > 0; count -= 2, addr++) {
1162		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1163		for (ntries = 0; ntries < 10; ntries++) {
1164			val = IWN_READ(sc, IWN_EEPROM);
1165			if (val & IWN_EEPROM_READ_VALID)
1166				break;
1167			DELAY(5);
1168		}
1169		if (ntries == 10) {
1170			device_printf(sc->sc_dev,
1171			    "timeout reading ROM at 0x%x\n", addr);
1172			return ETIMEDOUT;
1173		}
1174		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1175			/* OTPROM, check for ECC errors. */
1176			tmp = IWN_READ(sc, IWN_OTP_GP);
1177			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1178				device_printf(sc->sc_dev,
1179				    "OTPROM ECC error at 0x%x\n", addr);
1180				return EIO;
1181			}
1182			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1183				/* Correctable ECC error, clear bit. */
1184				IWN_SETBITS(sc, IWN_OTP_GP,
1185				    IWN_OTP_GP_ECC_CORR_STTS);
1186			}
1187		}
1188		*out++ = val >> 16;
1189		if (count > 1)
1190			*out++ = val >> 24;
1191	}
1192	return 0;
1193}
1194
1195static void
1196iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1197{
1198	if (error != 0)
1199		return;
1200	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1201	*(bus_addr_t *)arg = segs[0].ds_addr;
1202}
1203
1204static int
1205iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1206    void **kvap, bus_size_t size, bus_size_t alignment)
1207{
1208	int error;
1209
1210	dma->tag = NULL;
1211	dma->size = size;
1212
1213	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1214	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1215	    1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
1216	if (error != 0)
1217		goto fail;
1218
1219	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1220	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1221	if (error != 0)
1222		goto fail;
1223
1224	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1225	    iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1226	if (error != 0)
1227		goto fail;
1228
1229	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1230
1231	if (kvap != NULL)
1232		*kvap = dma->vaddr;
1233
1234	return 0;
1235
1236fail:	iwn_dma_contig_free(dma);
1237	return error;
1238}
1239
1240static void
1241iwn_dma_contig_free(struct iwn_dma_info *dma)
1242{
1243	if (dma->map != NULL) {
1244		if (dma->vaddr != NULL) {
1245			bus_dmamap_sync(dma->tag, dma->map,
1246			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1247			bus_dmamap_unload(dma->tag, dma->map);
1248			bus_dmamem_free(dma->tag, &dma->vaddr, dma->map);
1249			dma->vaddr = NULL;
1250		}
1251		bus_dmamap_destroy(dma->tag, dma->map);
1252		dma->map = NULL;
1253	}
1254	if (dma->tag != NULL) {
1255		bus_dma_tag_destroy(dma->tag);
1256		dma->tag = NULL;
1257	}
1258}
1259
1260static int
1261iwn_alloc_sched(struct iwn_softc *sc)
1262{
1263	/* TX scheduler rings must be aligned on a 1KB boundary. */
1264	return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1265	    sc->schedsz, 1024);
1266}
1267
1268static void
1269iwn_free_sched(struct iwn_softc *sc)
1270{
1271	iwn_dma_contig_free(&sc->sched_dma);
1272}
1273
1274static int
1275iwn_alloc_kw(struct iwn_softc *sc)
1276{
1277	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1278	return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1279}
1280
1281static void
1282iwn_free_kw(struct iwn_softc *sc)
1283{
1284	iwn_dma_contig_free(&sc->kw_dma);
1285}
1286
1287static int
1288iwn_alloc_ict(struct iwn_softc *sc)
1289{
1290	/* ICT table must be aligned on a 4KB boundary. */
1291	return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1292	    IWN_ICT_SIZE, 4096);
1293}
1294
1295static void
1296iwn_free_ict(struct iwn_softc *sc)
1297{
1298	iwn_dma_contig_free(&sc->ict_dma);
1299}
1300
1301static int
1302iwn_alloc_fwmem(struct iwn_softc *sc)
1303{
1304	/* Must be aligned on a 16-byte boundary. */
1305	return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1306}
1307
1308static void
1309iwn_free_fwmem(struct iwn_softc *sc)
1310{
1311	iwn_dma_contig_free(&sc->fw_dma);
1312}
1313
1314static int
1315iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1316{
1317	bus_size_t size;
1318	int i, error;
1319
1320	ring->cur = 0;
1321
1322	/* Allocate RX descriptors (256-byte aligned). */
1323	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1324	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1325	    size, 256);
1326	if (error != 0) {
1327		device_printf(sc->sc_dev,
1328		    "%s: could not allocate RX ring DMA memory, error %d\n",
1329		    __func__, error);
1330		goto fail;
1331	}
1332
1333	/* Allocate RX status area (16-byte aligned). */
1334	error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1335	    sizeof (struct iwn_rx_status), 16);
1336	if (error != 0) {
1337		device_printf(sc->sc_dev,
1338		    "%s: could not allocate RX status DMA memory, error %d\n",
1339		    __func__, error);
1340		goto fail;
1341	}
1342
1343	/* Create RX buffer DMA tag. */
1344	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1345	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1346	    IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
1347	    &ring->data_dmat);
1348	if (error != 0) {
1349		device_printf(sc->sc_dev,
1350		    "%s: could not create RX buf DMA tag, error %d\n",
1351		    __func__, error);
1352		goto fail;
1353	}
1354
1355	/*
1356	 * Allocate and map RX buffers.
1357	 */
1358	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1359		struct iwn_rx_data *data = &ring->data[i];
1360		bus_addr_t paddr;
1361
1362		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1363		if (error != 0) {
1364			device_printf(sc->sc_dev,
1365			    "%s: could not create RX buf DMA map, error %d\n",
1366			    __func__, error);
1367			goto fail;
1368		}
1369
1370		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1371		    IWN_RBUF_SIZE);
1372		if (data->m == NULL) {
1373			device_printf(sc->sc_dev,
1374			    "%s: could not allocate RX mbuf\n", __func__);
1375			error = ENOBUFS;
1376			goto fail;
1377		}
1378
1379		error = bus_dmamap_load(ring->data_dmat, data->map,
1380		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1381		    &paddr, BUS_DMA_NOWAIT);
1382		if (error != 0 && error != EFBIG) {
1383			device_printf(sc->sc_dev,
1384			    "%s: can't not map mbuf, error %d\n", __func__,
1385			    error);
1386			goto fail;
1387		}
1388
1389		/* Set physical address of RX buffer (256-byte aligned). */
1390		ring->desc[i] = htole32(paddr >> 8);
1391	}
1392
1393	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1394	    BUS_DMASYNC_PREWRITE);
1395
1396	return 0;
1397
1398fail:	iwn_free_rx_ring(sc, ring);
1399	return error;
1400}
1401
1402static void
1403iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1404{
1405	int ntries;
1406
1407	if (iwn_nic_lock(sc) == 0) {
1408		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1409		for (ntries = 0; ntries < 1000; ntries++) {
1410			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1411			    IWN_FH_RX_STATUS_IDLE)
1412				break;
1413			DELAY(10);
1414		}
1415		iwn_nic_unlock(sc);
1416	}
1417	ring->cur = 0;
1418	sc->last_rx_valid = 0;
1419}
1420
1421static void
1422iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1423{
1424	int i;
1425
1426	iwn_dma_contig_free(&ring->desc_dma);
1427	iwn_dma_contig_free(&ring->stat_dma);
1428
1429	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1430		struct iwn_rx_data *data = &ring->data[i];
1431
1432		if (data->m != NULL) {
1433			bus_dmamap_sync(ring->data_dmat, data->map,
1434			    BUS_DMASYNC_POSTREAD);
1435			bus_dmamap_unload(ring->data_dmat, data->map);
1436			m_freem(data->m);
1437			data->m = NULL;
1438		}
1439		if (data->map != NULL)
1440			bus_dmamap_destroy(ring->data_dmat, data->map);
1441	}
1442	if (ring->data_dmat != NULL) {
1443		bus_dma_tag_destroy(ring->data_dmat);
1444		ring->data_dmat = NULL;
1445	}
1446}
1447
1448static int
1449iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1450{
1451	bus_addr_t paddr;
1452	bus_size_t size;
1453	int i, error;
1454
1455	ring->qid = qid;
1456	ring->queued = 0;
1457	ring->cur = 0;
1458
1459	/* Allocate TX descriptors (256-byte aligned). */
1460	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1461	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1462	    size, 256);
1463	if (error != 0) {
1464		device_printf(sc->sc_dev,
1465		    "%s: could not allocate TX ring DMA memory, error %d\n",
1466		    __func__, error);
1467		goto fail;
1468	}
1469
1470	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1471	error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1472	    size, 4);
1473	if (error != 0) {
1474		device_printf(sc->sc_dev,
1475		    "%s: could not allocate TX cmd DMA memory, error %d\n",
1476		    __func__, error);
1477		goto fail;
1478	}
1479
1480	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1481	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1482	    IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1483	    &ring->data_dmat);
1484	if (error != 0) {
1485		device_printf(sc->sc_dev,
1486		    "%s: could not create TX buf DMA tag, error %d\n",
1487		    __func__, error);
1488		goto fail;
1489	}
1490
1491	paddr = ring->cmd_dma.paddr;
1492	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1493		struct iwn_tx_data *data = &ring->data[i];
1494
1495		data->cmd_paddr = paddr;
1496		data->scratch_paddr = paddr + 12;
1497		paddr += sizeof (struct iwn_tx_cmd);
1498
1499		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1500		if (error != 0) {
1501			device_printf(sc->sc_dev,
1502			    "%s: could not create TX buf DMA map, error %d\n",
1503			    __func__, error);
1504			goto fail;
1505		}
1506	}
1507	return 0;
1508
1509fail:	iwn_free_tx_ring(sc, ring);
1510	return error;
1511}
1512
1513static void
1514iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1515{
1516	int i;
1517
1518	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1519		struct iwn_tx_data *data = &ring->data[i];
1520
1521		if (data->m != NULL) {
1522			bus_dmamap_sync(ring->data_dmat, data->map,
1523			    BUS_DMASYNC_POSTWRITE);
1524			bus_dmamap_unload(ring->data_dmat, data->map);
1525			m_freem(data->m);
1526			data->m = NULL;
1527		}
1528	}
1529	/* Clear TX descriptors. */
1530	memset(ring->desc, 0, ring->desc_dma.size);
1531	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1532	    BUS_DMASYNC_PREWRITE);
1533	sc->qfullmsk &= ~(1 << ring->qid);
1534	ring->queued = 0;
1535	ring->cur = 0;
1536}
1537
1538static void
1539iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1540{
1541	int i;
1542
1543	iwn_dma_contig_free(&ring->desc_dma);
1544	iwn_dma_contig_free(&ring->cmd_dma);
1545
1546	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1547		struct iwn_tx_data *data = &ring->data[i];
1548
1549		if (data->m != NULL) {
1550			bus_dmamap_sync(ring->data_dmat, data->map,
1551			    BUS_DMASYNC_POSTWRITE);
1552			bus_dmamap_unload(ring->data_dmat, data->map);
1553			m_freem(data->m);
1554		}
1555		if (data->map != NULL)
1556			bus_dmamap_destroy(ring->data_dmat, data->map);
1557	}
1558	if (ring->data_dmat != NULL) {
1559		bus_dma_tag_destroy(ring->data_dmat);
1560		ring->data_dmat = NULL;
1561	}
1562}
1563
1564static void
1565iwn5000_ict_reset(struct iwn_softc *sc)
1566{
1567	/* Disable interrupts. */
1568	IWN_WRITE(sc, IWN_INT_MASK, 0);
1569
1570	/* Reset ICT table. */
1571	memset(sc->ict, 0, IWN_ICT_SIZE);
1572	sc->ict_cur = 0;
1573
1574	/* Set physical address of ICT table (4KB aligned). */
1575	DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1576	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1577	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1578
1579	/* Enable periodic RX interrupt. */
1580	sc->int_mask |= IWN_INT_RX_PERIODIC;
1581	/* Switch to ICT interrupt mode in driver. */
1582	sc->sc_flags |= IWN_FLAG_USE_ICT;
1583
1584	/* Re-enable interrupts. */
1585	IWN_WRITE(sc, IWN_INT, 0xffffffff);
1586	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1587}
1588
1589static int
1590iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1591{
1592	struct iwn_ops *ops = &sc->ops;
1593	uint16_t val;
1594	int error;
1595
1596	/* Check whether adapter has an EEPROM or an OTPROM. */
1597	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1598	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1599		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1600	DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1601	    (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1602
1603	/* Adapter has to be powered on for EEPROM access to work. */
1604	if ((error = iwn_apm_init(sc)) != 0) {
1605		device_printf(sc->sc_dev,
1606		    "%s: could not power ON adapter, error %d\n", __func__,
1607		    error);
1608		return error;
1609	}
1610
1611	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1612		device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1613		return EIO;
1614	}
1615	if ((error = iwn_eeprom_lock(sc)) != 0) {
1616		device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
1617		    __func__, error);
1618		return error;
1619	}
1620	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1621		if ((error = iwn_init_otprom(sc)) != 0) {
1622			device_printf(sc->sc_dev,
1623			    "%s: could not initialize OTPROM, error %d\n",
1624			    __func__, error);
1625			return error;
1626		}
1627	}
1628
1629	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1630	DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
1631	/* Check if HT support is bonded out. */
1632	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1633		sc->sc_flags |= IWN_FLAG_HAS_11N;
1634
1635	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1636	sc->rfcfg = le16toh(val);
1637	DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1638	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
1639	if (sc->txchainmask == 0)
1640		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1641	if (sc->rxchainmask == 0)
1642		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1643
1644	/* Read MAC address. */
1645	iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1646
1647	/* Read adapter-specific information from EEPROM. */
1648	ops->read_eeprom(sc);
1649
1650	iwn_apm_stop(sc);	/* Power OFF adapter. */
1651
1652	iwn_eeprom_unlock(sc);
1653	return 0;
1654}
1655
1656static void
1657iwn4965_read_eeprom(struct iwn_softc *sc)
1658{
1659	uint32_t addr;
1660	uint16_t val;
1661	int i;
1662
1663	/* Read regulatory domain (4 ASCII characters). */
1664	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1665
1666	/* Read the list of authorized channels (20MHz ones only). */
1667	for (i = 0; i < 7; i++) {
1668		addr = iwn4965_regulatory_bands[i];
1669		iwn_read_eeprom_channels(sc, i, addr);
1670	}
1671
1672	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1673	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1674	sc->maxpwr2GHz = val & 0xff;
1675	sc->maxpwr5GHz = val >> 8;
1676	/* Check that EEPROM values are within valid range. */
1677	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1678		sc->maxpwr5GHz = 38;
1679	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1680		sc->maxpwr2GHz = 38;
1681	DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1682	    sc->maxpwr2GHz, sc->maxpwr5GHz);
1683
1684	/* Read samples for each TX power group. */
1685	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1686	    sizeof sc->bands);
1687
1688	/* Read voltage at which samples were taken. */
1689	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1690	sc->eeprom_voltage = (int16_t)le16toh(val);
1691	DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1692	    sc->eeprom_voltage);
1693
1694#ifdef IWN_DEBUG
1695	/* Print samples. */
1696	if (sc->sc_debug & IWN_DEBUG_ANY) {
1697		for (i = 0; i < IWN_NBANDS; i++)
1698			iwn4965_print_power_group(sc, i);
1699	}
1700#endif
1701}
1702
1703#ifdef IWN_DEBUG
1704static void
1705iwn4965_print_power_group(struct iwn_softc *sc, int i)
1706{
1707	struct iwn4965_eeprom_band *band = &sc->bands[i];
1708	struct iwn4965_eeprom_chan_samples *chans = band->chans;
1709	int j, c;
1710
1711	printf("===band %d===\n", i);
1712	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1713	printf("chan1 num=%d\n", chans[0].num);
1714	for (c = 0; c < 2; c++) {
1715		for (j = 0; j < IWN_NSAMPLES; j++) {
1716			printf("chain %d, sample %d: temp=%d gain=%d "
1717			    "power=%d pa_det=%d\n", c, j,
1718			    chans[0].samples[c][j].temp,
1719			    chans[0].samples[c][j].gain,
1720			    chans[0].samples[c][j].power,
1721			    chans[0].samples[c][j].pa_det);
1722		}
1723	}
1724	printf("chan2 num=%d\n", chans[1].num);
1725	for (c = 0; c < 2; c++) {
1726		for (j = 0; j < IWN_NSAMPLES; j++) {
1727			printf("chain %d, sample %d: temp=%d gain=%d "
1728			    "power=%d pa_det=%d\n", c, j,
1729			    chans[1].samples[c][j].temp,
1730			    chans[1].samples[c][j].gain,
1731			    chans[1].samples[c][j].power,
1732			    chans[1].samples[c][j].pa_det);
1733		}
1734	}
1735}
1736#endif
1737
1738static void
1739iwn5000_read_eeprom(struct iwn_softc *sc)
1740{
1741	struct iwn5000_eeprom_calib_hdr hdr;
1742	int32_t volt;
1743	uint32_t base, addr;
1744	uint16_t val;
1745	int i;
1746
1747	/* Read regulatory domain (4 ASCII characters). */
1748	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1749	base = le16toh(val);
1750	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1751	    sc->eeprom_domain, 4);
1752
1753	/* Read the list of authorized channels (20MHz ones only). */
1754	for (i = 0; i < 7; i++) {
1755		if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1756			addr = base + iwn6000_regulatory_bands[i];
1757		else
1758			addr = base + iwn5000_regulatory_bands[i];
1759		iwn_read_eeprom_channels(sc, i, addr);
1760	}
1761
1762	/* Read enhanced TX power information for 6000 Series. */
1763	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1764		iwn_read_eeprom_enhinfo(sc);
1765
1766	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1767	base = le16toh(val);
1768	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1769	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1770	    "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
1771	    hdr.version, hdr.pa_type, le16toh(hdr.volt));
1772	sc->calib_ver = hdr.version;
1773
1774	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1775		/* Compute temperature offset. */
1776		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1777		sc->eeprom_temp = le16toh(val);
1778		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1779		volt = le16toh(val);
1780		sc->temp_off = sc->eeprom_temp - (volt / -5);
1781		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1782		    sc->eeprom_temp, volt, sc->temp_off);
1783	} else {
1784		/* Read crystal calibration. */
1785		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1786		    &sc->eeprom_crystal, sizeof (uint32_t));
1787		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
1788		    le32toh(sc->eeprom_crystal));
1789	}
1790}
1791
1792/*
1793 * Translate EEPROM flags to net80211.
1794 */
1795static uint32_t
1796iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1797{
1798	uint32_t nflags;
1799
1800	nflags = 0;
1801	if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1802		nflags |= IEEE80211_CHAN_PASSIVE;
1803	if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1804		nflags |= IEEE80211_CHAN_NOADHOC;
1805	if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1806		nflags |= IEEE80211_CHAN_DFS;
1807		/* XXX apparently IBSS may still be marked */
1808		nflags |= IEEE80211_CHAN_NOADHOC;
1809	}
1810
1811	return nflags;
1812}
1813
1814static void
1815iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1816{
1817	struct ifnet *ifp = sc->sc_ifp;
1818	struct ieee80211com *ic = ifp->if_l2com;
1819	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1820	const struct iwn_chan_band *band = &iwn_bands[n];
1821	struct ieee80211_channel *c;
1822	uint8_t chan;
1823	int i, nflags;
1824
1825	for (i = 0; i < band->nchan; i++) {
1826		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1827			DPRINTF(sc, IWN_DEBUG_RESET,
1828			    "skip chan %d flags 0x%x maxpwr %d\n",
1829			    band->chan[i], channels[i].flags,
1830			    channels[i].maxpwr);
1831			continue;
1832		}
1833		chan = band->chan[i];
1834		nflags = iwn_eeprom_channel_flags(&channels[i]);
1835
1836		c = &ic->ic_channels[ic->ic_nchans++];
1837		c->ic_ieee = chan;
1838		c->ic_maxregpower = channels[i].maxpwr;
1839		c->ic_maxpower = 2*c->ic_maxregpower;
1840
1841		if (n == 0) {	/* 2GHz band */
1842			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
1843			/* G =>'s B is supported */
1844			c->ic_flags = IEEE80211_CHAN_B | nflags;
1845			c = &ic->ic_channels[ic->ic_nchans++];
1846			c[0] = c[-1];
1847			c->ic_flags = IEEE80211_CHAN_G | nflags;
1848		} else {	/* 5GHz band */
1849			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
1850			c->ic_flags = IEEE80211_CHAN_A | nflags;
1851		}
1852
1853		/* Save maximum allowed TX power for this channel. */
1854		sc->maxpwr[chan] = channels[i].maxpwr;
1855
1856		DPRINTF(sc, IWN_DEBUG_RESET,
1857		    "add chan %d flags 0x%x maxpwr %d\n", chan,
1858		    channels[i].flags, channels[i].maxpwr);
1859
1860		if (sc->sc_flags & IWN_FLAG_HAS_11N) {
1861			/* add HT20, HT40 added separately */
1862			c = &ic->ic_channels[ic->ic_nchans++];
1863			c[0] = c[-1];
1864			c->ic_flags |= IEEE80211_CHAN_HT20;
1865		}
1866	}
1867}
1868
1869static void
1870iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1871{
1872	struct ifnet *ifp = sc->sc_ifp;
1873	struct ieee80211com *ic = ifp->if_l2com;
1874	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1875	const struct iwn_chan_band *band = &iwn_bands[n];
1876	struct ieee80211_channel *c, *cent, *extc;
1877	uint8_t chan;
1878	int i, nflags;
1879
1880	if (!(sc->sc_flags & IWN_FLAG_HAS_11N))
1881		return;
1882
1883	for (i = 0; i < band->nchan; i++) {
1884		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1885			DPRINTF(sc, IWN_DEBUG_RESET,
1886			    "skip chan %d flags 0x%x maxpwr %d\n",
1887			    band->chan[i], channels[i].flags,
1888			    channels[i].maxpwr);
1889			continue;
1890		}
1891		chan = band->chan[i];
1892		nflags = iwn_eeprom_channel_flags(&channels[i]);
1893
1894		/*
1895		 * Each entry defines an HT40 channel pair; find the
1896		 * center channel, then the extension channel above.
1897		 */
1898		cent = ieee80211_find_channel_byieee(ic, chan,
1899		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
1900		if (cent == NULL) {	/* XXX shouldn't happen */
1901			device_printf(sc->sc_dev,
1902			    "%s: no entry for channel %d\n", __func__, chan);
1903			continue;
1904		}
1905		extc = ieee80211_find_channel(ic, cent->ic_freq+20,
1906		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
1907		if (extc == NULL) {
1908			DPRINTF(sc, IWN_DEBUG_RESET,
1909			    "%s: skip chan %d, extension channel not found\n",
1910			    __func__, chan);
1911			continue;
1912		}
1913
1914		DPRINTF(sc, IWN_DEBUG_RESET,
1915		    "add ht40 chan %d flags 0x%x maxpwr %d\n",
1916		    chan, channels[i].flags, channels[i].maxpwr);
1917
1918		c = &ic->ic_channels[ic->ic_nchans++];
1919		c[0] = cent[0];
1920		c->ic_extieee = extc->ic_ieee;
1921		c->ic_flags &= ~IEEE80211_CHAN_HT;
1922		c->ic_flags |= IEEE80211_CHAN_HT40U | nflags;
1923		c = &ic->ic_channels[ic->ic_nchans++];
1924		c[0] = extc[0];
1925		c->ic_extieee = cent->ic_ieee;
1926		c->ic_flags &= ~IEEE80211_CHAN_HT;
1927		c->ic_flags |= IEEE80211_CHAN_HT40D | nflags;
1928	}
1929}
1930
1931static void
1932iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1933{
1934	struct ifnet *ifp = sc->sc_ifp;
1935	struct ieee80211com *ic = ifp->if_l2com;
1936
1937	iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
1938	    iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
1939
1940	if (n < 5)
1941		iwn_read_eeprom_band(sc, n);
1942	else
1943		iwn_read_eeprom_ht40(sc, n);
1944	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1945}
1946
1947static struct iwn_eeprom_chan *
1948iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
1949{
1950	int band, chan, i, j;
1951
1952	if (IEEE80211_IS_CHAN_HT40(c)) {
1953		band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
1954		if (IEEE80211_IS_CHAN_HT40D(c))
1955			chan = c->ic_extieee;
1956		else
1957			chan = c->ic_ieee;
1958		for (i = 0; i < iwn_bands[band].nchan; i++) {
1959			if (iwn_bands[band].chan[i] == chan)
1960				return &sc->eeprom_channels[band][i];
1961		}
1962	} else {
1963		for (j = 0; j < 5; j++) {
1964			for (i = 0; i < iwn_bands[j].nchan; i++) {
1965				if (iwn_bands[j].chan[i] == c->ic_ieee)
1966					return &sc->eeprom_channels[j][i];
1967			}
1968		}
1969	}
1970	return NULL;
1971}
1972
1973/*
1974 * Enforce flags read from EEPROM.
1975 */
1976static int
1977iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
1978    int nchan, struct ieee80211_channel chans[])
1979{
1980	struct iwn_softc *sc = ic->ic_ifp->if_softc;
1981	int i;
1982
1983	for (i = 0; i < nchan; i++) {
1984		struct ieee80211_channel *c = &chans[i];
1985		struct iwn_eeprom_chan *channel;
1986
1987		channel = iwn_find_eeprom_channel(sc, c);
1988		if (channel == NULL) {
1989			if_printf(ic->ic_ifp,
1990			    "%s: invalid channel %u freq %u/0x%x\n",
1991			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
1992			return EINVAL;
1993		}
1994		c->ic_flags |= iwn_eeprom_channel_flags(channel);
1995	}
1996
1997	return 0;
1998}
1999
2000static void
2001iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2002{
2003	struct iwn_eeprom_enhinfo enhinfo[35];
2004	struct ifnet *ifp = sc->sc_ifp;
2005	struct ieee80211com *ic = ifp->if_l2com;
2006	struct ieee80211_channel *c;
2007	uint16_t val, base;
2008	int8_t maxpwr;
2009	uint8_t flags;
2010	int i, j;
2011
2012	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2013	base = le16toh(val);
2014	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2015	    enhinfo, sizeof enhinfo);
2016
2017	for (i = 0; i < nitems(enhinfo); i++) {
2018		flags = enhinfo[i].flags;
2019		if (!(flags & IWN_ENHINFO_VALID))
2020			continue;	/* Skip invalid entries. */
2021
2022		maxpwr = 0;
2023		if (sc->txchainmask & IWN_ANT_A)
2024			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2025		if (sc->txchainmask & IWN_ANT_B)
2026			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2027		if (sc->txchainmask & IWN_ANT_C)
2028			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2029		if (sc->ntxchains == 2)
2030			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2031		else if (sc->ntxchains == 3)
2032			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2033
2034		for (j = 0; j < ic->ic_nchans; j++) {
2035			c = &ic->ic_channels[j];
2036			if ((flags & IWN_ENHINFO_5GHZ)) {
2037				if (!IEEE80211_IS_CHAN_A(c))
2038					continue;
2039			} else if ((flags & IWN_ENHINFO_OFDM)) {
2040				if (!IEEE80211_IS_CHAN_G(c))
2041					continue;
2042			} else if (!IEEE80211_IS_CHAN_B(c))
2043				continue;
2044			if ((flags & IWN_ENHINFO_HT40)) {
2045				if (!IEEE80211_IS_CHAN_HT40(c))
2046					continue;
2047			} else {
2048				if (IEEE80211_IS_CHAN_HT40(c))
2049					continue;
2050			}
2051			if (enhinfo[i].chan != 0 &&
2052			    enhinfo[i].chan != c->ic_ieee)
2053				continue;
2054
2055			DPRINTF(sc, IWN_DEBUG_RESET,
2056			    "channel %d(%x), maxpwr %d\n", c->ic_ieee,
2057			    c->ic_flags, maxpwr / 2);
2058			c->ic_maxregpower = maxpwr / 2;
2059			c->ic_maxpower = maxpwr;
2060		}
2061	}
2062}
2063
2064static struct ieee80211_node *
2065iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2066{
2067	return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2068}
2069
2070static __inline int
2071rate2plcp(int rate)
2072{
2073	switch (rate & 0xff) {
2074	case 12:	return 0xd;
2075	case 18:	return 0xf;
2076	case 24:	return 0x5;
2077	case 36:	return 0x7;
2078	case 48:	return 0x9;
2079	case 72:	return 0xb;
2080	case 96:	return 0x1;
2081	case 108:	return 0x3;
2082	case 2:		return 10;
2083	case 4:		return 20;
2084	case 11:	return 55;
2085	case 22:	return 110;
2086	}
2087	return 0;
2088}
2089
2090static void
2091iwn_newassoc(struct ieee80211_node *ni, int isnew)
2092{
2093#define	RV(v)	((v) & IEEE80211_RATE_VAL)
2094	struct ieee80211com *ic = ni->ni_ic;
2095	struct iwn_softc *sc = ic->ic_ifp->if_softc;
2096	struct iwn_node *wn = (void *)ni;
2097	uint8_t txant1, txant2;
2098	int i, plcp, rate, ridx;
2099
2100	/* Use the first valid TX antenna. */
2101	txant1 = IWN_LSB(sc->txchainmask);
2102	txant2 = IWN_LSB(sc->txchainmask & ~txant1);
2103
2104	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
2105		ridx = ni->ni_rates.rs_nrates - 1;
2106		for (i = ni->ni_htrates.rs_nrates - 1; i >= 0; i--) {
2107			plcp = RV(ni->ni_htrates.rs_rates[i]) | IWN_RFLAG_MCS;
2108			if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2109				plcp |= IWN_RFLAG_HT40;
2110				if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2111					plcp |= IWN_RFLAG_SGI;
2112			} else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20)
2113				plcp |= IWN_RFLAG_SGI;
2114			if (RV(ni->ni_htrates.rs_rates[i]) > 7)
2115				plcp |= IWN_RFLAG_ANT(txant1 | txant2);
2116			else
2117				plcp |= IWN_RFLAG_ANT(txant1);
2118			if (ridx >= 0) {
2119				rate = RV(ni->ni_rates.rs_rates[ridx]);
2120				wn->ridx[rate] = plcp;
2121			}
2122			wn->ridx[IEEE80211_RATE_MCS | i] = plcp;
2123			ridx--;
2124		}
2125	} else {
2126		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
2127			rate = RV(ni->ni_rates.rs_rates[i]);
2128			plcp = rate2plcp(rate);
2129			ridx = ic->ic_rt->rateCodeToIndex[rate];
2130			if (ridx < IWN_RIDX_OFDM6 &&
2131			    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2132				plcp |= IWN_RFLAG_CCK;
2133			plcp |= IWN_RFLAG_ANT(txant1);
2134			wn->ridx[rate] = htole32(plcp);
2135		}
2136	}
2137#undef	RV
2138}
2139
2140static int
2141iwn_media_change(struct ifnet *ifp)
2142{
2143	int error;
2144
2145	error = ieee80211_media_change(ifp);
2146	/* NB: only the fixed rate can change and that doesn't need a reset */
2147	return (error == ENETRESET ? 0 : error);
2148}
2149
2150static int
2151iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2152{
2153	struct iwn_vap *ivp = IWN_VAP(vap);
2154	struct ieee80211com *ic = vap->iv_ic;
2155	struct iwn_softc *sc = ic->ic_ifp->if_softc;
2156	int error = 0;
2157
2158	DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2159	    ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2160
2161	IEEE80211_UNLOCK(ic);
2162	IWN_LOCK(sc);
2163	callout_stop(&sc->calib_to);
2164
2165	switch (nstate) {
2166	case IEEE80211_S_ASSOC:
2167		if (vap->iv_state != IEEE80211_S_RUN)
2168			break;
2169		/* FALLTHROUGH */
2170	case IEEE80211_S_AUTH:
2171		if (vap->iv_state == IEEE80211_S_AUTH)
2172			break;
2173
2174		/*
2175		 * !AUTH -> AUTH transition requires state reset to handle
2176		 * reassociations correctly.
2177		 */
2178		sc->rxon.associd = 0;
2179		sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
2180		sc->calib.state = IWN_CALIB_STATE_INIT;
2181
2182		if ((error = iwn_auth(sc, vap)) != 0) {
2183			device_printf(sc->sc_dev,
2184			    "%s: could not move to auth state\n", __func__);
2185		}
2186		break;
2187
2188	case IEEE80211_S_RUN:
2189		/*
2190		 * RUN -> RUN transition; Just restart the timers.
2191		 */
2192		if (vap->iv_state == IEEE80211_S_RUN) {
2193			sc->calib_cnt = 0;
2194			break;
2195		}
2196
2197		/*
2198		 * !RUN -> RUN requires setting the association id
2199		 * which is done with a firmware cmd.  We also defer
2200		 * starting the timers until that work is done.
2201		 */
2202		if ((error = iwn_run(sc, vap)) != 0) {
2203			device_printf(sc->sc_dev,
2204			    "%s: could not move to run state\n", __func__);
2205		}
2206		break;
2207
2208	case IEEE80211_S_INIT:
2209		sc->calib.state = IWN_CALIB_STATE_INIT;
2210		break;
2211
2212	default:
2213		break;
2214	}
2215	IWN_UNLOCK(sc);
2216	IEEE80211_LOCK(ic);
2217	if (error != 0)
2218		return error;
2219	return ivp->iv_newstate(vap, nstate, arg);
2220}
2221
2222static void
2223iwn_calib_timeout(void *arg)
2224{
2225	struct iwn_softc *sc = arg;
2226
2227	IWN_LOCK_ASSERT(sc);
2228
2229	/* Force automatic TX power calibration every 60 secs. */
2230	if (++sc->calib_cnt >= 120) {
2231		uint32_t flags = 0;
2232
2233		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2234		    "sending request for statistics");
2235		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2236		    sizeof flags, 1);
2237		sc->calib_cnt = 0;
2238	}
2239	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2240	    sc);
2241}
2242
2243/*
2244 * Process an RX_PHY firmware notification.  This is usually immediately
2245 * followed by an MPDU_RX_DONE notification.
2246 */
2247static void
2248iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2249    struct iwn_rx_data *data)
2250{
2251	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2252
2253	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2254	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2255
2256	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
2257	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2258	sc->last_rx_valid = 1;
2259}
2260
2261/*
2262 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2263 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2264 */
2265static void
2266iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2267    struct iwn_rx_data *data)
2268{
2269	struct iwn_ops *ops = &sc->ops;
2270	struct ifnet *ifp = sc->sc_ifp;
2271	struct ieee80211com *ic = ifp->if_l2com;
2272	struct iwn_rx_ring *ring = &sc->rxq;
2273	struct ieee80211_frame *wh;
2274	struct ieee80211_node *ni;
2275	struct mbuf *m, *m1;
2276	struct iwn_rx_stat *stat;
2277	caddr_t head;
2278	bus_addr_t paddr;
2279	uint32_t flags;
2280	int error, len, rssi, nf;
2281
2282	if (desc->type == IWN_MPDU_RX_DONE) {
2283		/* Check for prior RX_PHY notification. */
2284		if (!sc->last_rx_valid) {
2285			DPRINTF(sc, IWN_DEBUG_ANY,
2286			    "%s: missing RX_PHY\n", __func__);
2287			return;
2288		}
2289		stat = &sc->last_rx_stat;
2290	} else
2291		stat = (struct iwn_rx_stat *)(desc + 1);
2292
2293	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2294
2295	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2296		device_printf(sc->sc_dev,
2297		    "%s: invalid RX statistic header, len %d\n", __func__,
2298		    stat->cfg_phy_len);
2299		return;
2300	}
2301	if (desc->type == IWN_MPDU_RX_DONE) {
2302		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2303		head = (caddr_t)(mpdu + 1);
2304		len = le16toh(mpdu->len);
2305	} else {
2306		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2307		len = le16toh(stat->len);
2308	}
2309
2310	flags = le32toh(*(uint32_t *)(head + len));
2311
2312	/* Discard frames with a bad FCS early. */
2313	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2314		DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
2315		    __func__, flags);
2316		ifp->if_ierrors++;
2317		return;
2318	}
2319	/* Discard frames that are too short. */
2320	if (len < sizeof (*wh)) {
2321		DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2322		    __func__, len);
2323		ifp->if_ierrors++;
2324		return;
2325	}
2326
2327	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
2328	if (m1 == NULL) {
2329		DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2330		    __func__);
2331		ifp->if_ierrors++;
2332		return;
2333	}
2334	bus_dmamap_unload(ring->data_dmat, data->map);
2335
2336	error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
2337	    IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2338	if (error != 0 && error != EFBIG) {
2339		device_printf(sc->sc_dev,
2340		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2341		m_freem(m1);
2342
2343		/* Try to reload the old mbuf. */
2344		error = bus_dmamap_load(ring->data_dmat, data->map,
2345		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
2346		    &paddr, BUS_DMA_NOWAIT);
2347		if (error != 0 && error != EFBIG) {
2348			panic("%s: could not load old RX mbuf", __func__);
2349		}
2350		/* Physical address may have changed. */
2351		ring->desc[ring->cur] = htole32(paddr >> 8);
2352		bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
2353		    BUS_DMASYNC_PREWRITE);
2354		ifp->if_ierrors++;
2355		return;
2356	}
2357
2358	m = data->m;
2359	data->m = m1;
2360	/* Update RX descriptor. */
2361	ring->desc[ring->cur] = htole32(paddr >> 8);
2362	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2363	    BUS_DMASYNC_PREWRITE);
2364
2365	/* Finalize mbuf. */
2366	m->m_pkthdr.rcvif = ifp;
2367	m->m_data = head;
2368	m->m_pkthdr.len = m->m_len = len;
2369
2370	/* Grab a reference to the source node. */
2371	wh = mtod(m, struct ieee80211_frame *);
2372	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2373	nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2374	    (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2375
2376	rssi = ops->get_rssi(sc, stat);
2377
2378	if (ieee80211_radiotap_active(ic)) {
2379		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2380
2381		tap->wr_flags = 0;
2382		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2383			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2384		tap->wr_dbm_antsignal = (int8_t)rssi;
2385		tap->wr_dbm_antnoise = (int8_t)nf;
2386		tap->wr_tsft = stat->tstamp;
2387		switch (stat->rate) {
2388		/* CCK rates. */
2389		case  10: tap->wr_rate =   2; break;
2390		case  20: tap->wr_rate =   4; break;
2391		case  55: tap->wr_rate =  11; break;
2392		case 110: tap->wr_rate =  22; break;
2393		/* OFDM rates. */
2394		case 0xd: tap->wr_rate =  12; break;
2395		case 0xf: tap->wr_rate =  18; break;
2396		case 0x5: tap->wr_rate =  24; break;
2397		case 0x7: tap->wr_rate =  36; break;
2398		case 0x9: tap->wr_rate =  48; break;
2399		case 0xb: tap->wr_rate =  72; break;
2400		case 0x1: tap->wr_rate =  96; break;
2401		case 0x3: tap->wr_rate = 108; break;
2402		/* Unknown rate: should not happen. */
2403		default:  tap->wr_rate =   0;
2404		}
2405	}
2406
2407	IWN_UNLOCK(sc);
2408
2409	/* Send the frame to the 802.11 layer. */
2410	if (ni != NULL) {
2411		if (ni->ni_flags & IEEE80211_NODE_HT)
2412			m->m_flags |= M_AMPDU;
2413		(void)ieee80211_input(ni, m, rssi - nf, nf);
2414		/* Node is no longer needed. */
2415		ieee80211_free_node(ni);
2416	} else
2417		(void)ieee80211_input_all(ic, m, rssi - nf, nf);
2418
2419	IWN_LOCK(sc);
2420}
2421
2422/* Process an incoming Compressed BlockAck. */
2423static void
2424iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2425    struct iwn_rx_data *data)
2426{
2427	struct iwn_ops *ops = &sc->ops;
2428	struct ifnet *ifp = sc->sc_ifp;
2429	struct iwn_node *wn;
2430	struct ieee80211_node *ni;
2431	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2432	struct iwn_tx_ring *txq;
2433	struct iwn_tx_data *txdata;
2434	struct ieee80211_tx_ampdu *tap;
2435	struct mbuf *m;
2436	uint64_t bitmap;
2437	uint16_t ssn;
2438	uint8_t tid;
2439	int ackfailcnt = 0, i, lastidx, qid, *res, shift;
2440
2441	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2442
2443	qid = le16toh(ba->qid);
2444	txq = &sc->txq[ba->qid];
2445	tap = sc->qid2tap[ba->qid];
2446	tid = WME_AC_TO_TID(tap->txa_ac);
2447	wn = (void *)tap->txa_ni;
2448
2449	res = NULL;
2450	ssn = 0;
2451	if (!IEEE80211_AMPDU_RUNNING(tap)) {
2452		res = tap->txa_private;
2453		ssn = tap->txa_start & 0xfff;
2454	}
2455
2456	for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
2457		txdata = &txq->data[txq->read];
2458
2459		/* Unmap and free mbuf. */
2460		bus_dmamap_sync(txq->data_dmat, txdata->map,
2461		    BUS_DMASYNC_POSTWRITE);
2462		bus_dmamap_unload(txq->data_dmat, txdata->map);
2463		m = txdata->m, txdata->m = NULL;
2464		ni = txdata->ni, txdata->ni = NULL;
2465
2466		KASSERT(ni != NULL, ("no node"));
2467		KASSERT(m != NULL, ("no mbuf"));
2468
2469		if (m->m_flags & M_TXCB)
2470			ieee80211_process_callback(ni, m, 1);
2471
2472		m_freem(m);
2473		ieee80211_free_node(ni);
2474
2475		txq->queued--;
2476		txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
2477	}
2478
2479	if (txq->queued == 0 && res != NULL) {
2480		iwn_nic_lock(sc);
2481		ops->ampdu_tx_stop(sc, qid, tid, ssn);
2482		iwn_nic_unlock(sc);
2483		sc->qid2tap[qid] = NULL;
2484		free(res, M_DEVBUF);
2485		return;
2486	}
2487
2488	if (wn->agg[tid].bitmap == 0)
2489		return;
2490
2491	shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
2492	if (shift < 0)
2493		shift += 0x100;
2494
2495	if (wn->agg[tid].nframes > (64 - shift))
2496		return;
2497
2498	ni = tap->txa_ni;
2499	bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
2500	for (i = 0; bitmap; i++) {
2501		if ((bitmap & 1) == 0) {
2502			ifp->if_oerrors++;
2503			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2504			    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2505		} else {
2506			ifp->if_opackets++;
2507			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2508			    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2509		}
2510		bitmap >>= 1;
2511	}
2512}
2513
2514/*
2515 * Process a CALIBRATION_RESULT notification sent by the initialization
2516 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2517 */
2518static void
2519iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2520    struct iwn_rx_data *data)
2521{
2522	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2523	int len, idx = -1;
2524
2525	/* Runtime firmware should not send such a notification. */
2526	if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2527		return;
2528
2529	len = (le32toh(desc->len) & 0x3fff) - 4;
2530	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2531
2532	switch (calib->code) {
2533	case IWN5000_PHY_CALIB_DC:
2534		if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0 &&
2535		    (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2536		     sc->hw_type >= IWN_HW_REV_TYPE_6000) &&
2537		     sc->hw_type != IWN_HW_REV_TYPE_6050)
2538			idx = 0;
2539		break;
2540	case IWN5000_PHY_CALIB_LO:
2541		idx = 1;
2542		break;
2543	case IWN5000_PHY_CALIB_TX_IQ:
2544		idx = 2;
2545		break;
2546	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2547		if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2548		    sc->hw_type != IWN_HW_REV_TYPE_5150)
2549			idx = 3;
2550		break;
2551	case IWN5000_PHY_CALIB_BASE_BAND:
2552		idx = 4;
2553		break;
2554	}
2555	if (idx == -1)	/* Ignore other results. */
2556		return;
2557
2558	/* Save calibration result. */
2559	if (sc->calibcmd[idx].buf != NULL)
2560		free(sc->calibcmd[idx].buf, M_DEVBUF);
2561	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2562	if (sc->calibcmd[idx].buf == NULL) {
2563		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2564		    "not enough memory for calibration result %d\n",
2565		    calib->code);
2566		return;
2567	}
2568	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2569	    "saving calibration result code=%d len=%d\n", calib->code, len);
2570	sc->calibcmd[idx].len = len;
2571	memcpy(sc->calibcmd[idx].buf, calib, len);
2572}
2573
2574/*
2575 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2576 * The latter is sent by the firmware after each received beacon.
2577 */
2578static void
2579iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2580    struct iwn_rx_data *data)
2581{
2582	struct iwn_ops *ops = &sc->ops;
2583	struct ifnet *ifp = sc->sc_ifp;
2584	struct ieee80211com *ic = ifp->if_l2com;
2585	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2586	struct iwn_calib_state *calib = &sc->calib;
2587	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2588	int temp;
2589
2590	/* Ignore statistics received during a scan. */
2591	if (vap->iv_state != IEEE80211_S_RUN ||
2592	    (ic->ic_flags & IEEE80211_F_SCAN))
2593		return;
2594
2595	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2596
2597	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n",
2598	    __func__, desc->type);
2599	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
2600
2601	/* Test if temperature has changed. */
2602	if (stats->general.temp != sc->rawtemp) {
2603		/* Convert "raw" temperature to degC. */
2604		sc->rawtemp = stats->general.temp;
2605		temp = ops->get_temperature(sc);
2606		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2607		    __func__, temp);
2608
2609		/* Update TX power if need be (4965AGN only). */
2610		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2611			iwn4965_power_calibration(sc, temp);
2612	}
2613
2614	if (desc->type != IWN_BEACON_STATISTICS)
2615		return;	/* Reply to a statistics request. */
2616
2617	sc->noise = iwn_get_noise(&stats->rx.general);
2618	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2619
2620	/* Test that RSSI and noise are present in stats report. */
2621	if (le32toh(stats->rx.general.flags) != 1) {
2622		DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2623		    "received statistics without RSSI");
2624		return;
2625	}
2626
2627	if (calib->state == IWN_CALIB_STATE_ASSOC)
2628		iwn_collect_noise(sc, &stats->rx.general);
2629	else if (calib->state == IWN_CALIB_STATE_RUN)
2630		iwn_tune_sensitivity(sc, &stats->rx);
2631}
2632
2633/*
2634 * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2635 * and 5000 adapters have different incompatible TX status formats.
2636 */
2637static void
2638iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2639    struct iwn_rx_data *data)
2640{
2641	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2642	struct iwn_tx_ring *ring;
2643	int qid;
2644
2645	qid = desc->qid & 0xf;
2646	ring = &sc->txq[qid];
2647
2648	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2649	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2650	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2651	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2652	    le32toh(stat->status));
2653
2654	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2655	if (qid >= sc->firstaggqueue) {
2656		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2657		    &stat->status);
2658	} else {
2659		iwn_tx_done(sc, desc, stat->ackfailcnt,
2660		    le32toh(stat->status) & 0xff);
2661	}
2662}
2663
2664static void
2665iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2666    struct iwn_rx_data *data)
2667{
2668	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2669	struct iwn_tx_ring *ring;
2670	int qid;
2671
2672	qid = desc->qid & 0xf;
2673	ring = &sc->txq[qid];
2674
2675	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2676	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2677	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2678	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2679	    le32toh(stat->status));
2680
2681#ifdef notyet
2682	/* Reset TX scheduler slot. */
2683	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2684#endif
2685
2686	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2687	if (qid >= sc->firstaggqueue) {
2688		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2689		    &stat->status);
2690	} else {
2691		iwn_tx_done(sc, desc, stat->ackfailcnt,
2692		    le16toh(stat->status) & 0xff);
2693	}
2694}
2695
2696/*
2697 * Adapter-independent backend for TX_DONE firmware notifications.
2698 */
2699static void
2700iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2701    uint8_t status)
2702{
2703	struct ifnet *ifp = sc->sc_ifp;
2704	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2705	struct iwn_tx_data *data = &ring->data[desc->idx];
2706	struct mbuf *m;
2707	struct ieee80211_node *ni;
2708	struct ieee80211vap *vap;
2709
2710	KASSERT(data->ni != NULL, ("no node"));
2711
2712	/* Unmap and free mbuf. */
2713	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2714	bus_dmamap_unload(ring->data_dmat, data->map);
2715	m = data->m, data->m = NULL;
2716	ni = data->ni, data->ni = NULL;
2717	vap = ni->ni_vap;
2718
2719	if (m->m_flags & M_TXCB) {
2720		/*
2721		 * Channels marked for "radar" require traffic to be received
2722		 * to unlock before we can transmit.  Until traffic is seen
2723		 * any attempt to transmit is returned immediately with status
2724		 * set to IWN_TX_FAIL_TX_LOCKED.  Unfortunately this can easily
2725		 * happen on first authenticate after scanning.  To workaround
2726		 * this we ignore a failure of this sort in AUTH state so the
2727		 * 802.11 layer will fall back to using a timeout to wait for
2728		 * the AUTH reply.  This allows the firmware time to see
2729		 * traffic so a subsequent retry of AUTH succeeds.  It's
2730		 * unclear why the firmware does not maintain state for
2731		 * channels recently visited as this would allow immediate
2732		 * use of the channel after a scan (where we see traffic).
2733		 */
2734		if (status == IWN_TX_FAIL_TX_LOCKED &&
2735		    ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2736			ieee80211_process_callback(ni, m, 0);
2737		else
2738			ieee80211_process_callback(ni, m,
2739			    (status & IWN_TX_FAIL) != 0);
2740	}
2741
2742	/*
2743	 * Update rate control statistics for the node.
2744	 */
2745	if (status & IWN_TX_FAIL) {
2746		ifp->if_oerrors++;
2747		ieee80211_ratectl_tx_complete(vap, ni,
2748		    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2749	} else {
2750		ifp->if_opackets++;
2751		ieee80211_ratectl_tx_complete(vap, ni,
2752		    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2753	}
2754	m_freem(m);
2755	ieee80211_free_node(ni);
2756
2757	sc->sc_tx_timer = 0;
2758	if (--ring->queued < IWN_TX_RING_LOMARK) {
2759		sc->qfullmsk &= ~(1 << ring->qid);
2760		if (sc->qfullmsk == 0 &&
2761		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2762			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2763			iwn_start_locked(ifp);
2764		}
2765	}
2766}
2767
2768/*
2769 * Process a "command done" firmware notification.  This is where we wakeup
2770 * processes waiting for a synchronous command completion.
2771 */
2772static void
2773iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2774{
2775	struct iwn_tx_ring *ring = &sc->txq[4];
2776	struct iwn_tx_data *data;
2777
2778	if ((desc->qid & 0xf) != 4)
2779		return;	/* Not a command ack. */
2780
2781	data = &ring->data[desc->idx];
2782
2783	/* If the command was mapped in an mbuf, free it. */
2784	if (data->m != NULL) {
2785		bus_dmamap_sync(ring->data_dmat, data->map,
2786		    BUS_DMASYNC_POSTWRITE);
2787		bus_dmamap_unload(ring->data_dmat, data->map);
2788		m_freem(data->m);
2789		data->m = NULL;
2790	}
2791	wakeup(&ring->desc[desc->idx]);
2792}
2793
2794static void
2795iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
2796    void *stat)
2797{
2798	struct iwn_ops *ops = &sc->ops;
2799	struct ifnet *ifp = sc->sc_ifp;
2800	struct iwn_tx_ring *ring = &sc->txq[qid];
2801	struct iwn_tx_data *data;
2802	struct mbuf *m;
2803	struct iwn_node *wn;
2804	struct ieee80211_node *ni;
2805	struct ieee80211_tx_ampdu *tap;
2806	uint64_t bitmap;
2807	uint32_t *status = stat;
2808	uint16_t *aggstatus = stat;
2809	uint16_t ssn;
2810	uint8_t tid;
2811	int bit, i, lastidx, *res, seqno, shift, start;
2812
2813#ifdef NOT_YET
2814	if (nframes == 1) {
2815		if ((*status & 0xff) != 1 && (*status & 0xff) != 2)
2816			printf("ieee80211_send_bar()\n");
2817	}
2818#endif
2819
2820	bitmap = 0;
2821	start = idx;
2822	for (i = 0; i < nframes; i++) {
2823		if (le16toh(aggstatus[i * 2]) & 0xc)
2824			continue;
2825
2826		idx = le16toh(aggstatus[2*i + 1]) & 0xff;
2827		bit = idx - start;
2828		shift = 0;
2829		if (bit >= 64) {
2830			shift = 0x100 - idx + start;
2831			bit = 0;
2832			start = idx;
2833		} else if (bit <= -64)
2834			bit = 0x100 - start + idx;
2835		else if (bit < 0) {
2836			shift = start - idx;
2837			start = idx;
2838			bit = 0;
2839		}
2840		bitmap = bitmap << shift;
2841		bitmap |= 1ULL << bit;
2842	}
2843	tap = sc->qid2tap[qid];
2844	tid = WME_AC_TO_TID(tap->txa_ac);
2845	wn = (void *)tap->txa_ni;
2846	wn->agg[tid].bitmap = bitmap;
2847	wn->agg[tid].startidx = start;
2848	wn->agg[tid].nframes = nframes;
2849
2850	res = NULL;
2851	ssn = 0;
2852	if (!IEEE80211_AMPDU_RUNNING(tap)) {
2853		res = tap->txa_private;
2854		ssn = tap->txa_start & 0xfff;
2855	}
2856
2857	seqno = le32toh(*(status + nframes)) & 0xfff;
2858	for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
2859		data = &ring->data[ring->read];
2860
2861		/* Unmap and free mbuf. */
2862		bus_dmamap_sync(ring->data_dmat, data->map,
2863		    BUS_DMASYNC_POSTWRITE);
2864		bus_dmamap_unload(ring->data_dmat, data->map);
2865		m = data->m, data->m = NULL;
2866		ni = data->ni, data->ni = NULL;
2867
2868		KASSERT(ni != NULL, ("no node"));
2869		KASSERT(m != NULL, ("no mbuf"));
2870
2871		if (m->m_flags & M_TXCB)
2872			ieee80211_process_callback(ni, m, 1);
2873
2874		m_freem(m);
2875		ieee80211_free_node(ni);
2876
2877		ring->queued--;
2878		ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
2879	}
2880
2881	if (ring->queued == 0 && res != NULL) {
2882		iwn_nic_lock(sc);
2883		ops->ampdu_tx_stop(sc, qid, tid, ssn);
2884		iwn_nic_unlock(sc);
2885		sc->qid2tap[qid] = NULL;
2886		free(res, M_DEVBUF);
2887		return;
2888	}
2889
2890	sc->sc_tx_timer = 0;
2891	if (ring->queued < IWN_TX_RING_LOMARK) {
2892		sc->qfullmsk &= ~(1 << ring->qid);
2893		if (sc->qfullmsk == 0 &&
2894		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2895			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2896			iwn_start_locked(ifp);
2897		}
2898	}
2899}
2900
2901/*
2902 * Process an INT_FH_RX or INT_SW_RX interrupt.
2903 */
2904static void
2905iwn_notif_intr(struct iwn_softc *sc)
2906{
2907	struct iwn_ops *ops = &sc->ops;
2908	struct ifnet *ifp = sc->sc_ifp;
2909	struct ieee80211com *ic = ifp->if_l2com;
2910	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2911	uint16_t hw;
2912
2913	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
2914	    BUS_DMASYNC_POSTREAD);
2915
2916	hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2917	while (sc->rxq.cur != hw) {
2918		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2919		struct iwn_rx_desc *desc;
2920
2921		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2922		    BUS_DMASYNC_POSTREAD);
2923		desc = mtod(data->m, struct iwn_rx_desc *);
2924
2925		DPRINTF(sc, IWN_DEBUG_RECV,
2926		    "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
2927		    __func__, desc->qid & 0xf, desc->idx, desc->flags,
2928		    desc->type, iwn_intr_str(desc->type),
2929		    le16toh(desc->len));
2930
2931		if (!(desc->qid & 0x80))	/* Reply to a command. */
2932			iwn_cmd_done(sc, desc);
2933
2934		switch (desc->type) {
2935		case IWN_RX_PHY:
2936			iwn_rx_phy(sc, desc, data);
2937			break;
2938
2939		case IWN_RX_DONE:		/* 4965AGN only. */
2940		case IWN_MPDU_RX_DONE:
2941			/* An 802.11 frame has been received. */
2942			iwn_rx_done(sc, desc, data);
2943			break;
2944
2945		case IWN_RX_COMPRESSED_BA:
2946			/* A Compressed BlockAck has been received. */
2947			iwn_rx_compressed_ba(sc, desc, data);
2948			break;
2949
2950		case IWN_TX_DONE:
2951			/* An 802.11 frame has been transmitted. */
2952			ops->tx_done(sc, desc, data);
2953			break;
2954
2955		case IWN_RX_STATISTICS:
2956		case IWN_BEACON_STATISTICS:
2957			iwn_rx_statistics(sc, desc, data);
2958			break;
2959
2960		case IWN_BEACON_MISSED:
2961		{
2962			struct iwn_beacon_missed *miss =
2963			    (struct iwn_beacon_missed *)(desc + 1);
2964			int misses;
2965
2966			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2967			    BUS_DMASYNC_POSTREAD);
2968			misses = le32toh(miss->consecutive);
2969
2970			DPRINTF(sc, IWN_DEBUG_STATE,
2971			    "%s: beacons missed %d/%d\n", __func__,
2972			    misses, le32toh(miss->total));
2973			/*
2974			 * If more than 5 consecutive beacons are missed,
2975			 * reinitialize the sensitivity state machine.
2976			 */
2977			if (vap->iv_state == IEEE80211_S_RUN &&
2978			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
2979				if (misses > 5)
2980					(void)iwn_init_sensitivity(sc);
2981				if (misses >= vap->iv_bmissthreshold) {
2982					IWN_UNLOCK(sc);
2983					ieee80211_beacon_miss(ic);
2984					IWN_LOCK(sc);
2985				}
2986			}
2987			break;
2988		}
2989		case IWN_UC_READY:
2990		{
2991			struct iwn_ucode_info *uc =
2992			    (struct iwn_ucode_info *)(desc + 1);
2993
2994			/* The microcontroller is ready. */
2995			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2996			    BUS_DMASYNC_POSTREAD);
2997			DPRINTF(sc, IWN_DEBUG_RESET,
2998			    "microcode alive notification version=%d.%d "
2999			    "subtype=%x alive=%x\n", uc->major, uc->minor,
3000			    uc->subtype, le32toh(uc->valid));
3001
3002			if (le32toh(uc->valid) != 1) {
3003				device_printf(sc->sc_dev,
3004				    "microcontroller initialization failed");
3005				break;
3006			}
3007			if (uc->subtype == IWN_UCODE_INIT) {
3008				/* Save microcontroller report. */
3009				memcpy(&sc->ucode_info, uc, sizeof (*uc));
3010			}
3011			/* Save the address of the error log in SRAM. */
3012			sc->errptr = le32toh(uc->errptr);
3013			break;
3014		}
3015		case IWN_STATE_CHANGED:
3016		{
3017			uint32_t *status = (uint32_t *)(desc + 1);
3018
3019			/*
3020			 * State change allows hardware switch change to be
3021			 * noted. However, we handle this in iwn_intr as we
3022			 * get both the enable/disble intr.
3023			 */
3024			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3025			    BUS_DMASYNC_POSTREAD);
3026			DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
3027			    le32toh(*status));
3028			break;
3029		}
3030		case IWN_START_SCAN:
3031		{
3032			struct iwn_start_scan *scan =
3033			    (struct iwn_start_scan *)(desc + 1);
3034
3035			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3036			    BUS_DMASYNC_POSTREAD);
3037			DPRINTF(sc, IWN_DEBUG_ANY,
3038			    "%s: scanning channel %d status %x\n",
3039			    __func__, scan->chan, le32toh(scan->status));
3040			break;
3041		}
3042		case IWN_STOP_SCAN:
3043		{
3044			struct iwn_stop_scan *scan =
3045			    (struct iwn_stop_scan *)(desc + 1);
3046
3047			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3048			    BUS_DMASYNC_POSTREAD);
3049			DPRINTF(sc, IWN_DEBUG_STATE,
3050			    "scan finished nchan=%d status=%d chan=%d\n",
3051			    scan->nchan, scan->status, scan->chan);
3052
3053			IWN_UNLOCK(sc);
3054			ieee80211_scan_next(vap);
3055			IWN_LOCK(sc);
3056			break;
3057		}
3058		case IWN5000_CALIBRATION_RESULT:
3059			iwn5000_rx_calib_results(sc, desc, data);
3060			break;
3061
3062		case IWN5000_CALIBRATION_DONE:
3063			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3064			wakeup(sc);
3065			break;
3066		}
3067
3068		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3069	}
3070
3071	/* Tell the firmware what we have processed. */
3072	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3073	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3074}
3075
3076/*
3077 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3078 * from power-down sleep mode.
3079 */
3080static void
3081iwn_wakeup_intr(struct iwn_softc *sc)
3082{
3083	int qid;
3084
3085	DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
3086	    __func__);
3087
3088	/* Wakeup RX and TX rings. */
3089	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3090	for (qid = 0; qid < sc->ntxqs; qid++) {
3091		struct iwn_tx_ring *ring = &sc->txq[qid];
3092		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3093	}
3094}
3095
3096static void
3097iwn_rftoggle_intr(struct iwn_softc *sc)
3098{
3099	struct ifnet *ifp = sc->sc_ifp;
3100	struct ieee80211com *ic = ifp->if_l2com;
3101	uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
3102
3103	IWN_LOCK_ASSERT(sc);
3104
3105	device_printf(sc->sc_dev, "RF switch: radio %s\n",
3106	    (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
3107	if (tmp & IWN_GP_CNTRL_RFKILL)
3108		ieee80211_runtask(ic, &sc->sc_radioon_task);
3109	else
3110		ieee80211_runtask(ic, &sc->sc_radiooff_task);
3111}
3112
3113/*
3114 * Dump the error log of the firmware when a firmware panic occurs.  Although
3115 * we can't debug the firmware because it is neither open source nor free, it
3116 * can help us to identify certain classes of problems.
3117 */
3118static void
3119iwn_fatal_intr(struct iwn_softc *sc)
3120{
3121	struct iwn_fw_dump dump;
3122	int i;
3123
3124	IWN_LOCK_ASSERT(sc);
3125
3126	/* Force a complete recalibration on next init. */
3127	sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3128
3129	/* Check that the error log address is valid. */
3130	if (sc->errptr < IWN_FW_DATA_BASE ||
3131	    sc->errptr + sizeof (dump) >
3132	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3133		printf("%s: bad firmware error log address 0x%08x\n", __func__,
3134		    sc->errptr);
3135		return;
3136	}
3137	if (iwn_nic_lock(sc) != 0) {
3138		printf("%s: could not read firmware error log\n", __func__);
3139		return;
3140	}
3141	/* Read firmware error log from SRAM. */
3142	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
3143	    sizeof (dump) / sizeof (uint32_t));
3144	iwn_nic_unlock(sc);
3145
3146	if (dump.valid == 0) {
3147		printf("%s: firmware error log is empty\n", __func__);
3148		return;
3149	}
3150	printf("firmware error log:\n");
3151	printf("  error type      = \"%s\" (0x%08X)\n",
3152	    (dump.id < nitems(iwn_fw_errmsg)) ?
3153		iwn_fw_errmsg[dump.id] : "UNKNOWN",
3154	    dump.id);
3155	printf("  program counter = 0x%08X\n", dump.pc);
3156	printf("  source line     = 0x%08X\n", dump.src_line);
3157	printf("  error data      = 0x%08X%08X\n",
3158	    dump.error_data[0], dump.error_data[1]);
3159	printf("  branch link     = 0x%08X%08X\n",
3160	    dump.branch_link[0], dump.branch_link[1]);
3161	printf("  interrupt link  = 0x%08X%08X\n",
3162	    dump.interrupt_link[0], dump.interrupt_link[1]);
3163	printf("  time            = %u\n", dump.time[0]);
3164
3165	/* Dump driver status (TX and RX rings) while we're here. */
3166	printf("driver status:\n");
3167	for (i = 0; i < sc->ntxqs; i++) {
3168		struct iwn_tx_ring *ring = &sc->txq[i];
3169		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
3170		    i, ring->qid, ring->cur, ring->queued);
3171	}
3172	printf("  rx ring: cur=%d\n", sc->rxq.cur);
3173}
3174
3175static void
3176iwn_intr(void *arg)
3177{
3178	struct iwn_softc *sc = arg;
3179	struct ifnet *ifp = sc->sc_ifp;
3180	uint32_t r1, r2, tmp;
3181
3182	IWN_LOCK(sc);
3183
3184	/* Disable interrupts. */
3185	IWN_WRITE(sc, IWN_INT_MASK, 0);
3186
3187	/* Read interrupts from ICT (fast) or from registers (slow). */
3188	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3189		tmp = 0;
3190		while (sc->ict[sc->ict_cur] != 0) {
3191			tmp |= sc->ict[sc->ict_cur];
3192			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
3193			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
3194		}
3195		tmp = le32toh(tmp);
3196		if (tmp == 0xffffffff)	/* Shouldn't happen. */
3197			tmp = 0;
3198		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
3199			tmp |= 0x8000;
3200		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
3201		r2 = 0;	/* Unused. */
3202	} else {
3203		r1 = IWN_READ(sc, IWN_INT);
3204		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
3205			return;	/* Hardware gone! */
3206		r2 = IWN_READ(sc, IWN_FH_INT);
3207	}
3208
3209	DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2);
3210
3211	if (r1 == 0 && r2 == 0)
3212		goto done;	/* Interrupt not for us. */
3213
3214	/* Acknowledge interrupts. */
3215	IWN_WRITE(sc, IWN_INT, r1);
3216	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
3217		IWN_WRITE(sc, IWN_FH_INT, r2);
3218
3219	if (r1 & IWN_INT_RF_TOGGLED) {
3220		iwn_rftoggle_intr(sc);
3221		goto done;
3222	}
3223	if (r1 & IWN_INT_CT_REACHED) {
3224		device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
3225		    __func__);
3226	}
3227	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
3228		device_printf(sc->sc_dev, "%s: fatal firmware error\n",
3229		    __func__);
3230		/* Dump firmware error log and stop. */
3231		iwn_fatal_intr(sc);
3232		ifp->if_flags &= ~IFF_UP;
3233		iwn_stop_locked(sc);
3234		goto done;
3235	}
3236	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
3237	    (r2 & IWN_FH_INT_RX)) {
3238		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3239			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
3240				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
3241			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3242			    IWN_INT_PERIODIC_DIS);
3243			iwn_notif_intr(sc);
3244			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
3245				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3246				    IWN_INT_PERIODIC_ENA);
3247			}
3248		} else
3249			iwn_notif_intr(sc);
3250	}
3251
3252	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
3253		if (sc->sc_flags & IWN_FLAG_USE_ICT)
3254			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
3255		wakeup(sc);	/* FH DMA transfer completed. */
3256	}
3257
3258	if (r1 & IWN_INT_ALIVE)
3259		wakeup(sc);	/* Firmware is alive. */
3260
3261	if (r1 & IWN_INT_WAKEUP)
3262		iwn_wakeup_intr(sc);
3263
3264done:
3265	/* Re-enable interrupts. */
3266	if (ifp->if_flags & IFF_UP)
3267		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3268
3269	IWN_UNLOCK(sc);
3270}
3271
3272/*
3273 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3274 * 5000 adapters use a slightly different format).
3275 */
3276static void
3277iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3278    uint16_t len)
3279{
3280	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
3281
3282	*w = htole16(len + 8);
3283	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3284	    BUS_DMASYNC_PREWRITE);
3285	if (idx < IWN_SCHED_WINSZ) {
3286		*(w + IWN_TX_RING_COUNT) = *w;
3287		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3288		    BUS_DMASYNC_PREWRITE);
3289	}
3290}
3291
3292static void
3293iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3294    uint16_t len)
3295{
3296	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3297
3298	*w = htole16(id << 12 | (len + 8));
3299	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3300	    BUS_DMASYNC_PREWRITE);
3301	if (idx < IWN_SCHED_WINSZ) {
3302		*(w + IWN_TX_RING_COUNT) = *w;
3303		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3304		    BUS_DMASYNC_PREWRITE);
3305	}
3306}
3307
3308#ifdef notyet
3309static void
3310iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3311{
3312	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3313
3314	*w = (*w & htole16(0xf000)) | htole16(1);
3315	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3316	    BUS_DMASYNC_PREWRITE);
3317	if (idx < IWN_SCHED_WINSZ) {
3318		*(w + IWN_TX_RING_COUNT) = *w;
3319		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3320		    BUS_DMASYNC_PREWRITE);
3321	}
3322}
3323#endif
3324
3325static int
3326iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3327{
3328	struct iwn_ops *ops = &sc->ops;
3329	const struct ieee80211_txparam *tp;
3330	struct ieee80211vap *vap = ni->ni_vap;
3331	struct ieee80211com *ic = ni->ni_ic;
3332	struct iwn_node *wn = (void *)ni;
3333	struct iwn_tx_ring *ring;
3334	struct iwn_tx_desc *desc;
3335	struct iwn_tx_data *data;
3336	struct iwn_tx_cmd *cmd;
3337	struct iwn_cmd_data *tx;
3338	struct ieee80211_frame *wh;
3339	struct ieee80211_key *k = NULL;
3340	struct mbuf *m1;
3341	uint32_t flags;
3342	uint16_t qos;
3343	u_int hdrlen;
3344	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3345	uint8_t tid, ridx, txant, type;
3346	int ac, i, totlen, error, pad, nsegs = 0, rate;
3347
3348	IWN_LOCK_ASSERT(sc);
3349
3350	wh = mtod(m, struct ieee80211_frame *);
3351	hdrlen = ieee80211_anyhdrsize(wh);
3352	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3353
3354	/* Select EDCA Access Category and TX ring for this frame. */
3355	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3356		qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
3357		tid = qos & IEEE80211_QOS_TID;
3358	} else {
3359		qos = 0;
3360		tid = 0;
3361	}
3362	ac = M_WME_GETAC(m);
3363	if (m->m_flags & M_AMPDU_MPDU) {
3364		struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
3365
3366		if (!IEEE80211_AMPDU_RUNNING(tap)) {
3367			m_freem(m);
3368			return EINVAL;
3369		}
3370
3371		ac = *(int *)tap->txa_private;
3372		*(uint16_t *)wh->i_seq =
3373		    htole16(ni->ni_txseqs[tid] << IEEE80211_SEQ_SEQ_SHIFT);
3374		ni->ni_txseqs[tid]++;
3375	}
3376	ring = &sc->txq[ac];
3377	desc = &ring->desc[ring->cur];
3378	data = &ring->data[ring->cur];
3379
3380	/* Choose a TX rate index. */
3381	tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
3382	if (type == IEEE80211_FC0_TYPE_MGT)
3383		rate = tp->mgmtrate;
3384	else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
3385		rate = tp->mcastrate;
3386	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
3387		rate = tp->ucastrate;
3388	else {
3389		/* XXX pass pktlen */
3390		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3391		rate = ni->ni_txrate;
3392	}
3393	ridx = ic->ic_rt->rateCodeToIndex[rate];
3394
3395	/* Encrypt the frame if need be. */
3396	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3397		/* Retrieve key for TX. */
3398		k = ieee80211_crypto_encap(ni, m);
3399		if (k == NULL) {
3400			m_freem(m);
3401			return ENOBUFS;
3402		}
3403		/* 802.11 header may have moved. */
3404		wh = mtod(m, struct ieee80211_frame *);
3405	}
3406	totlen = m->m_pkthdr.len;
3407
3408	if (ieee80211_radiotap_active_vap(vap)) {
3409		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3410
3411		tap->wt_flags = 0;
3412		tap->wt_rate = rate;
3413		if (k != NULL)
3414			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3415
3416		ieee80211_radiotap_tx(vap, m);
3417	}
3418
3419	/* Prepare TX firmware command. */
3420	cmd = &ring->cmd[ring->cur];
3421	cmd->code = IWN_CMD_TX_DATA;
3422	cmd->flags = 0;
3423	cmd->qid = ring->qid;
3424	cmd->idx = ring->cur;
3425
3426	tx = (struct iwn_cmd_data *)cmd->data;
3427	/* NB: No need to clear tx, all fields are reinitialized here. */
3428	tx->scratch = 0;	/* clear "scratch" area */
3429
3430	flags = 0;
3431	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3432		/* Unicast frame, check if an ACK is expected. */
3433		if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
3434		    IEEE80211_QOS_ACKPOLICY_NOACK)
3435			flags |= IWN_TX_NEED_ACK;
3436	}
3437	if ((wh->i_fc[0] &
3438	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3439	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
3440		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
3441
3442	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3443		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
3444
3445	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3446	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3447		/* NB: Group frames are sent using CCK in 802.11b/g. */
3448		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
3449			flags |= IWN_TX_NEED_RTS;
3450		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3451		    ridx >= IWN_RIDX_OFDM6) {
3452			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3453				flags |= IWN_TX_NEED_CTS;
3454			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3455				flags |= IWN_TX_NEED_RTS;
3456		}
3457		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3458			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3459				/* 5000 autoselects RTS/CTS or CTS-to-self. */
3460				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3461				flags |= IWN_TX_NEED_PROTECTION;
3462			} else
3463				flags |= IWN_TX_FULL_TXOP;
3464		}
3465	}
3466
3467	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3468	    type != IEEE80211_FC0_TYPE_DATA)
3469		tx->id = sc->broadcast_id;
3470	else
3471		tx->id = wn->id;
3472
3473	if (type == IEEE80211_FC0_TYPE_MGT) {
3474		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3475
3476		/* Tell HW to set timestamp in probe responses. */
3477		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3478			flags |= IWN_TX_INSERT_TSTAMP;
3479		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3480		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3481			tx->timeout = htole16(3);
3482		else
3483			tx->timeout = htole16(2);
3484	} else
3485		tx->timeout = htole16(0);
3486
3487	if (hdrlen & 3) {
3488		/* First segment length must be a multiple of 4. */
3489		flags |= IWN_TX_NEED_PADDING;
3490		pad = 4 - (hdrlen & 3);
3491	} else
3492		pad = 0;
3493
3494	tx->len = htole16(totlen);
3495	tx->tid = tid;
3496	tx->rts_ntries = 60;
3497	tx->data_ntries = 15;
3498	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3499	tx->rate = wn->ridx[rate];
3500	if (tx->id == sc->broadcast_id) {
3501		/* Group or management frame. */
3502		tx->linkq = 0;
3503		/* XXX Alternate between antenna A and B? */
3504		txant = IWN_LSB(sc->txchainmask);
3505		tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3506	} else {
3507		tx->linkq = ni->ni_rates.rs_nrates - ridx - 1;
3508		flags |= IWN_TX_LINKQ;	/* enable MRR */
3509	}
3510	/* Set physical address of "scratch area". */
3511	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3512	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3513
3514	/* Copy 802.11 header in TX command. */
3515	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3516
3517	/* Trim 802.11 header. */
3518	m_adj(m, hdrlen);
3519	tx->security = 0;
3520	tx->flags = htole32(flags);
3521
3522	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3523	    &nsegs, BUS_DMA_NOWAIT);
3524	if (error != 0) {
3525		if (error != EFBIG) {
3526			device_printf(sc->sc_dev,
3527			    "%s: can't map mbuf (error %d)\n", __func__, error);
3528			m_freem(m);
3529			return error;
3530		}
3531		/* Too many DMA segments, linearize mbuf. */
3532		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3533		if (m1 == NULL) {
3534			device_printf(sc->sc_dev,
3535			    "%s: could not defrag mbuf\n", __func__);
3536			m_freem(m);
3537			return ENOBUFS;
3538		}
3539		m = m1;
3540
3541		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3542		    segs, &nsegs, BUS_DMA_NOWAIT);
3543		if (error != 0) {
3544			device_printf(sc->sc_dev,
3545			    "%s: can't map mbuf (error %d)\n", __func__, error);
3546			m_freem(m);
3547			return error;
3548		}
3549	}
3550
3551	data->m = m;
3552	data->ni = ni;
3553
3554	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3555	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3556
3557	/* Fill TX descriptor. */
3558	desc->nsegs = 1;
3559	if (m->m_len != 0)
3560		desc->nsegs += nsegs;
3561	/* First DMA segment is used by the TX command. */
3562	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3563	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3564	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3565	/* Other DMA segments are for data payload. */
3566	seg = &segs[0];
3567	for (i = 1; i <= nsegs; i++) {
3568		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3569		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
3570		    seg->ds_len << 4);
3571		seg++;
3572	}
3573
3574	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3575	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3576	    BUS_DMASYNC_PREWRITE);
3577	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3578	    BUS_DMASYNC_PREWRITE);
3579
3580	/* Update TX scheduler. */
3581	if (ring->qid >= sc->firstaggqueue)
3582		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3583
3584	/* Kick TX ring. */
3585	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3586	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3587
3588	/* Mark TX ring as full if we reach a certain threshold. */
3589	if (++ring->queued > IWN_TX_RING_HIMARK)
3590		sc->qfullmsk |= 1 << ring->qid;
3591
3592	return 0;
3593}
3594
3595static int
3596iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3597    struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
3598{
3599	struct iwn_ops *ops = &sc->ops;
3600	struct ifnet *ifp = sc->sc_ifp;
3601	struct ieee80211vap *vap = ni->ni_vap;
3602	struct ieee80211com *ic = ifp->if_l2com;
3603	struct iwn_tx_cmd *cmd;
3604	struct iwn_cmd_data *tx;
3605	struct ieee80211_frame *wh;
3606	struct iwn_tx_ring *ring;
3607	struct iwn_tx_desc *desc;
3608	struct iwn_tx_data *data;
3609	struct mbuf *m1;
3610	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3611	uint32_t flags;
3612	u_int hdrlen;
3613	int ac, totlen, error, pad, nsegs = 0, i, rate;
3614	uint8_t ridx, type, txant;
3615
3616	IWN_LOCK_ASSERT(sc);
3617
3618	wh = mtod(m, struct ieee80211_frame *);
3619	hdrlen = ieee80211_anyhdrsize(wh);
3620	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3621
3622	ac = params->ibp_pri & 3;
3623
3624	ring = &sc->txq[ac];
3625	desc = &ring->desc[ring->cur];
3626	data = &ring->data[ring->cur];
3627
3628	/* Choose a TX rate index. */
3629	rate = params->ibp_rate0;
3630	ridx = ic->ic_rt->rateCodeToIndex[rate];
3631	if (ridx == (uint8_t)-1) {
3632		/* XXX fall back to mcast/mgmt rate? */
3633		m_freem(m);
3634		return EINVAL;
3635	}
3636
3637	totlen = m->m_pkthdr.len;
3638
3639	/* Prepare TX firmware command. */
3640	cmd = &ring->cmd[ring->cur];
3641	cmd->code = IWN_CMD_TX_DATA;
3642	cmd->flags = 0;
3643	cmd->qid = ring->qid;
3644	cmd->idx = ring->cur;
3645
3646	tx = (struct iwn_cmd_data *)cmd->data;
3647	/* NB: No need to clear tx, all fields are reinitialized here. */
3648	tx->scratch = 0;	/* clear "scratch" area */
3649
3650	flags = 0;
3651	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3652		flags |= IWN_TX_NEED_ACK;
3653	if (params->ibp_flags & IEEE80211_BPF_RTS) {
3654		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3655			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3656			flags &= ~IWN_TX_NEED_RTS;
3657			flags |= IWN_TX_NEED_PROTECTION;
3658		} else
3659			flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3660	}
3661	if (params->ibp_flags & IEEE80211_BPF_CTS) {
3662		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3663			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3664			flags &= ~IWN_TX_NEED_CTS;
3665			flags |= IWN_TX_NEED_PROTECTION;
3666		} else
3667			flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3668	}
3669	if (type == IEEE80211_FC0_TYPE_MGT) {
3670		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3671
3672		/* Tell HW to set timestamp in probe responses. */
3673		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3674			flags |= IWN_TX_INSERT_TSTAMP;
3675
3676		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3677		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3678			tx->timeout = htole16(3);
3679		else
3680			tx->timeout = htole16(2);
3681	} else
3682		tx->timeout = htole16(0);
3683
3684	if (hdrlen & 3) {
3685		/* First segment length must be a multiple of 4. */
3686		flags |= IWN_TX_NEED_PADDING;
3687		pad = 4 - (hdrlen & 3);
3688	} else
3689		pad = 0;
3690
3691	if (ieee80211_radiotap_active_vap(vap)) {
3692		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3693
3694		tap->wt_flags = 0;
3695		tap->wt_rate = rate;
3696
3697		ieee80211_radiotap_tx(vap, m);
3698	}
3699
3700	tx->len = htole16(totlen);
3701	tx->tid = 0;
3702	tx->id = sc->broadcast_id;
3703	tx->rts_ntries = params->ibp_try1;
3704	tx->data_ntries = params->ibp_try0;
3705	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3706	tx->rate = htole32(rate2plcp(rate));
3707	if (ridx < IWN_RIDX_OFDM6 &&
3708	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
3709		tx->rate |= htole32(IWN_RFLAG_CCK);
3710	/* Group or management frame. */
3711	tx->linkq = 0;
3712	txant = IWN_LSB(sc->txchainmask);
3713	tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3714	/* Set physical address of "scratch area". */
3715	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3716	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3717
3718	/* Copy 802.11 header in TX command. */
3719	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3720
3721	/* Trim 802.11 header. */
3722	m_adj(m, hdrlen);
3723	tx->security = 0;
3724	tx->flags = htole32(flags);
3725
3726	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3727	    &nsegs, BUS_DMA_NOWAIT);
3728	if (error != 0) {
3729		if (error != EFBIG) {
3730			device_printf(sc->sc_dev,
3731			    "%s: can't map mbuf (error %d)\n", __func__, error);
3732			m_freem(m);
3733			return error;
3734		}
3735		/* Too many DMA segments, linearize mbuf. */
3736		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3737		if (m1 == NULL) {
3738			device_printf(sc->sc_dev,
3739			    "%s: could not defrag mbuf\n", __func__);
3740			m_freem(m);
3741			return ENOBUFS;
3742		}
3743		m = m1;
3744
3745		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3746		    segs, &nsegs, BUS_DMA_NOWAIT);
3747		if (error != 0) {
3748			device_printf(sc->sc_dev,
3749			    "%s: can't map mbuf (error %d)\n", __func__, error);
3750			m_freem(m);
3751			return error;
3752		}
3753	}
3754
3755	data->m = m;
3756	data->ni = ni;
3757
3758	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3759	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3760
3761	/* Fill TX descriptor. */
3762	desc->nsegs = 1;
3763	if (m->m_len != 0)
3764		desc->nsegs += nsegs;
3765	/* First DMA segment is used by the TX command. */
3766	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3767	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3768	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3769	/* Other DMA segments are for data payload. */
3770	seg = &segs[0];
3771	for (i = 1; i <= nsegs; i++) {
3772		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3773		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
3774		    seg->ds_len << 4);
3775		seg++;
3776	}
3777
3778	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3779	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3780	    BUS_DMASYNC_PREWRITE);
3781	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3782	    BUS_DMASYNC_PREWRITE);
3783
3784	/* Update TX scheduler. */
3785	if (ring->qid >= sc->firstaggqueue)
3786		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3787
3788	/* Kick TX ring. */
3789	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3790	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3791
3792	/* Mark TX ring as full if we reach a certain threshold. */
3793	if (++ring->queued > IWN_TX_RING_HIMARK)
3794		sc->qfullmsk |= 1 << ring->qid;
3795
3796	return 0;
3797}
3798
3799static int
3800iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3801    const struct ieee80211_bpf_params *params)
3802{
3803	struct ieee80211com *ic = ni->ni_ic;
3804	struct ifnet *ifp = ic->ic_ifp;
3805	struct iwn_softc *sc = ifp->if_softc;
3806	int error = 0;
3807
3808	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3809		ieee80211_free_node(ni);
3810		m_freem(m);
3811		return ENETDOWN;
3812	}
3813
3814	IWN_LOCK(sc);
3815	if (params == NULL) {
3816		/*
3817		 * Legacy path; interpret frame contents to decide
3818		 * precisely how to send the frame.
3819		 */
3820		error = iwn_tx_data(sc, m, ni);
3821	} else {
3822		/*
3823		 * Caller supplied explicit parameters to use in
3824		 * sending the frame.
3825		 */
3826		error = iwn_tx_data_raw(sc, m, ni, params);
3827	}
3828	if (error != 0) {
3829		/* NB: m is reclaimed on tx failure */
3830		ieee80211_free_node(ni);
3831		ifp->if_oerrors++;
3832	}
3833	sc->sc_tx_timer = 5;
3834
3835	IWN_UNLOCK(sc);
3836	return error;
3837}
3838
3839static void
3840iwn_start(struct ifnet *ifp)
3841{
3842	struct iwn_softc *sc = ifp->if_softc;
3843
3844	IWN_LOCK(sc);
3845	iwn_start_locked(ifp);
3846	IWN_UNLOCK(sc);
3847}
3848
3849static void
3850iwn_start_locked(struct ifnet *ifp)
3851{
3852	struct iwn_softc *sc = ifp->if_softc;
3853	struct ieee80211_node *ni;
3854	struct mbuf *m;
3855
3856	IWN_LOCK_ASSERT(sc);
3857
3858	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
3859	    (ifp->if_drv_flags & IFF_DRV_OACTIVE))
3860		return;
3861
3862	for (;;) {
3863		if (sc->qfullmsk != 0) {
3864			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3865			break;
3866		}
3867		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
3868		if (m == NULL)
3869			break;
3870		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3871		if (iwn_tx_data(sc, m, ni) != 0) {
3872			ieee80211_free_node(ni);
3873			ifp->if_oerrors++;
3874			continue;
3875		}
3876		sc->sc_tx_timer = 5;
3877	}
3878}
3879
3880static void
3881iwn_watchdog(void *arg)
3882{
3883	struct iwn_softc *sc = arg;
3884	struct ifnet *ifp = sc->sc_ifp;
3885	struct ieee80211com *ic = ifp->if_l2com;
3886
3887	IWN_LOCK_ASSERT(sc);
3888
3889	KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
3890
3891	if (sc->sc_tx_timer > 0) {
3892		if (--sc->sc_tx_timer == 0) {
3893			if_printf(ifp, "device timeout\n");
3894			ieee80211_runtask(ic, &sc->sc_reinit_task);
3895			return;
3896		}
3897	}
3898	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
3899}
3900
3901static int
3902iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3903{
3904	struct iwn_softc *sc = ifp->if_softc;
3905	struct ieee80211com *ic = ifp->if_l2com;
3906	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3907	struct ifreq *ifr = (struct ifreq *) data;
3908	int error = 0, startall = 0, stop = 0;
3909
3910	switch (cmd) {
3911	case SIOCGIFADDR:
3912		error = ether_ioctl(ifp, cmd, data);
3913		break;
3914	case SIOCSIFFLAGS:
3915		IWN_LOCK(sc);
3916		if (ifp->if_flags & IFF_UP) {
3917			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3918				iwn_init_locked(sc);
3919				if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
3920					startall = 1;
3921				else
3922					stop = 1;
3923			}
3924		} else {
3925			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3926				iwn_stop_locked(sc);
3927		}
3928		IWN_UNLOCK(sc);
3929		if (startall)
3930			ieee80211_start_all(ic);
3931		else if (vap != NULL && stop)
3932			ieee80211_stop(vap);
3933		break;
3934	case SIOCGIFMEDIA:
3935		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
3936		break;
3937	default:
3938		error = EINVAL;
3939		break;
3940	}
3941	return error;
3942}
3943
3944/*
3945 * Send a command to the firmware.
3946 */
3947static int
3948iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3949{
3950	struct iwn_tx_ring *ring = &sc->txq[4];
3951	struct iwn_tx_desc *desc;
3952	struct iwn_tx_data *data;
3953	struct iwn_tx_cmd *cmd;
3954	struct mbuf *m;
3955	bus_addr_t paddr;
3956	int totlen, error;
3957
3958	if (async == 0)
3959		IWN_LOCK_ASSERT(sc);
3960
3961	desc = &ring->desc[ring->cur];
3962	data = &ring->data[ring->cur];
3963	totlen = 4 + size;
3964
3965	if (size > sizeof cmd->data) {
3966		/* Command is too large to fit in a descriptor. */
3967		if (totlen > MCLBYTES)
3968			return EINVAL;
3969		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3970		if (m == NULL)
3971			return ENOMEM;
3972		cmd = mtod(m, struct iwn_tx_cmd *);
3973		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3974		    totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3975		if (error != 0) {
3976			m_freem(m);
3977			return error;
3978		}
3979		data->m = m;
3980	} else {
3981		cmd = &ring->cmd[ring->cur];
3982		paddr = data->cmd_paddr;
3983	}
3984
3985	cmd->code = code;
3986	cmd->flags = 0;
3987	cmd->qid = ring->qid;
3988	cmd->idx = ring->cur;
3989	memcpy(cmd->data, buf, size);
3990
3991	desc->nsegs = 1;
3992	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3993	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
3994
3995	DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
3996	    __func__, iwn_intr_str(cmd->code), cmd->code,
3997	    cmd->flags, cmd->qid, cmd->idx);
3998
3999	if (size > sizeof cmd->data) {
4000		bus_dmamap_sync(ring->data_dmat, data->map,
4001		    BUS_DMASYNC_PREWRITE);
4002	} else {
4003		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4004		    BUS_DMASYNC_PREWRITE);
4005	}
4006	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4007	    BUS_DMASYNC_PREWRITE);
4008
4009	/* Kick command ring. */
4010	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4011	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4012
4013	return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
4014}
4015
4016static int
4017iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4018{
4019	struct iwn4965_node_info hnode;
4020	caddr_t src, dst;
4021
4022	/*
4023	 * We use the node structure for 5000 Series internally (it is
4024	 * a superset of the one for 4965AGN). We thus copy the common
4025	 * fields before sending the command.
4026	 */
4027	src = (caddr_t)node;
4028	dst = (caddr_t)&hnode;
4029	memcpy(dst, src, 48);
4030	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
4031	memcpy(dst + 48, src + 72, 20);
4032	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
4033}
4034
4035static int
4036iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4037{
4038	/* Direct mapping. */
4039	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
4040}
4041
4042static int
4043iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
4044{
4045#define	RV(v)	((v) & IEEE80211_RATE_VAL)
4046	struct iwn_node *wn = (void *)ni;
4047	struct ieee80211_rateset *rs = &ni->ni_rates;
4048	struct iwn_cmd_link_quality linkq;
4049	uint8_t txant;
4050	int i, rate, txrate;
4051
4052	/* Use the first valid TX antenna. */
4053	txant = IWN_LSB(sc->txchainmask);
4054
4055	memset(&linkq, 0, sizeof linkq);
4056	linkq.id = wn->id;
4057	linkq.antmsk_1stream = txant;
4058	linkq.antmsk_2stream = IWN_ANT_AB;
4059	linkq.ampdu_max = 64;
4060	linkq.ampdu_threshold = 3;
4061	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4062
4063	/* Start at highest available bit-rate. */
4064	if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4065		txrate = ni->ni_htrates.rs_nrates - 1;
4066	else
4067		txrate = rs->rs_nrates - 1;
4068	for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
4069		if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4070			rate = IEEE80211_RATE_MCS | txrate;
4071		else
4072			rate = RV(rs->rs_rates[txrate]);
4073		linkq.retry[i] = wn->ridx[rate];
4074
4075		if ((le32toh(wn->ridx[rate]) & IWN_RFLAG_MCS) &&
4076		    RV(le32toh(wn->ridx[rate])) > 7)
4077			linkq.mimo = i + 1;
4078
4079		/* Next retry at immediate lower bit-rate. */
4080		if (txrate > 0)
4081			txrate--;
4082	}
4083	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
4084#undef	RV
4085}
4086
4087/*
4088 * Broadcast node is used to send group-addressed and management frames.
4089 */
4090static int
4091iwn_add_broadcast_node(struct iwn_softc *sc, int async)
4092{
4093	struct iwn_ops *ops = &sc->ops;
4094	struct ifnet *ifp = sc->sc_ifp;
4095	struct ieee80211com *ic = ifp->if_l2com;
4096	struct iwn_node_info node;
4097	struct iwn_cmd_link_quality linkq;
4098	uint8_t txant;
4099	int i, error;
4100
4101	memset(&node, 0, sizeof node);
4102	IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
4103	node.id = sc->broadcast_id;
4104	DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
4105	if ((error = ops->add_node(sc, &node, async)) != 0)
4106		return error;
4107
4108	/* Use the first valid TX antenna. */
4109	txant = IWN_LSB(sc->txchainmask);
4110
4111	memset(&linkq, 0, sizeof linkq);
4112	linkq.id = sc->broadcast_id;
4113	linkq.antmsk_1stream = txant;
4114	linkq.antmsk_2stream = IWN_ANT_AB;
4115	linkq.ampdu_max = 64;
4116	linkq.ampdu_threshold = 3;
4117	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4118
4119	/* Use lowest mandatory bit-rate. */
4120	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
4121		linkq.retry[0] = htole32(0xd);
4122	else
4123		linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
4124	linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
4125	/* Use same bit-rate for all TX retries. */
4126	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
4127		linkq.retry[i] = linkq.retry[0];
4128	}
4129	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
4130}
4131
4132static int
4133iwn_updateedca(struct ieee80211com *ic)
4134{
4135#define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
4136	struct iwn_softc *sc = ic->ic_ifp->if_softc;
4137	struct iwn_edca_params cmd;
4138	int aci;
4139
4140	memset(&cmd, 0, sizeof cmd);
4141	cmd.flags = htole32(IWN_EDCA_UPDATE);
4142	for (aci = 0; aci < WME_NUM_AC; aci++) {
4143		const struct wmeParams *ac =
4144		    &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
4145		cmd.ac[aci].aifsn = ac->wmep_aifsn;
4146		cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
4147		cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
4148		cmd.ac[aci].txoplimit =
4149		    htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
4150	}
4151	IEEE80211_UNLOCK(ic);
4152	IWN_LOCK(sc);
4153	(void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
4154	IWN_UNLOCK(sc);
4155	IEEE80211_LOCK(ic);
4156	return 0;
4157#undef IWN_EXP2
4158}
4159
4160static void
4161iwn_update_mcast(struct ifnet *ifp)
4162{
4163	/* Ignore */
4164}
4165
4166static void
4167iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
4168{
4169	struct iwn_cmd_led led;
4170
4171	/* Clear microcode LED ownership. */
4172	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
4173
4174	led.which = which;
4175	led.unit = htole32(10000);	/* on/off in unit of 100ms */
4176	led.off = off;
4177	led.on = on;
4178	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
4179}
4180
4181/*
4182 * Set the critical temperature at which the firmware will stop the radio
4183 * and notify us.
4184 */
4185static int
4186iwn_set_critical_temp(struct iwn_softc *sc)
4187{
4188	struct iwn_critical_temp crit;
4189	int32_t temp;
4190
4191	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
4192
4193	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
4194		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
4195	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
4196		temp = IWN_CTOK(110);
4197	else
4198		temp = 110;
4199	memset(&crit, 0, sizeof crit);
4200	crit.tempR = htole32(temp);
4201	DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
4202	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
4203}
4204
4205static int
4206iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
4207{
4208	struct iwn_cmd_timing cmd;
4209	uint64_t val, mod;
4210
4211	memset(&cmd, 0, sizeof cmd);
4212	memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
4213	cmd.bintval = htole16(ni->ni_intval);
4214	cmd.lintval = htole16(10);
4215
4216	/* Compute remaining time until next beacon. */
4217	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
4218	mod = le64toh(cmd.tstamp) % val;
4219	cmd.binitval = htole32((uint32_t)(val - mod));
4220
4221	DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
4222	    ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
4223
4224	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
4225}
4226
4227static void
4228iwn4965_power_calibration(struct iwn_softc *sc, int temp)
4229{
4230	struct ifnet *ifp = sc->sc_ifp;
4231	struct ieee80211com *ic = ifp->if_l2com;
4232
4233	/* Adjust TX power if need be (delta >= 3 degC). */
4234	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
4235	    __func__, sc->temp, temp);
4236	if (abs(temp - sc->temp) >= 3) {
4237		/* Record temperature of last calibration. */
4238		sc->temp = temp;
4239		(void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
4240	}
4241}
4242
4243/*
4244 * Set TX power for current channel (each rate has its own power settings).
4245 * This function takes into account the regulatory information from EEPROM,
4246 * the current temperature and the current voltage.
4247 */
4248static int
4249iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4250    int async)
4251{
4252/* Fixed-point arithmetic division using a n-bit fractional part. */
4253#define fdivround(a, b, n)	\
4254	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
4255/* Linear interpolation. */
4256#define interpolate(x, x1, y1, x2, y2, n)	\
4257	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
4258
4259	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
4260	struct iwn_ucode_info *uc = &sc->ucode_info;
4261	struct iwn4965_cmd_txpower cmd;
4262	struct iwn4965_eeprom_chan_samples *chans;
4263	const uint8_t *rf_gain, *dsp_gain;
4264	int32_t vdiff, tdiff;
4265	int i, c, grp, maxpwr;
4266	uint8_t chan;
4267
4268	/* Retrieve current channel from last RXON. */
4269	chan = sc->rxon.chan;
4270	DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
4271	    chan);
4272
4273	memset(&cmd, 0, sizeof cmd);
4274	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4275	cmd.chan = chan;
4276
4277	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4278		maxpwr   = sc->maxpwr5GHz;
4279		rf_gain  = iwn4965_rf_gain_5ghz;
4280		dsp_gain = iwn4965_dsp_gain_5ghz;
4281	} else {
4282		maxpwr   = sc->maxpwr2GHz;
4283		rf_gain  = iwn4965_rf_gain_2ghz;
4284		dsp_gain = iwn4965_dsp_gain_2ghz;
4285	}
4286
4287	/* Compute voltage compensation. */
4288	vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
4289	if (vdiff > 0)
4290		vdiff *= 2;
4291	if (abs(vdiff) > 2)
4292		vdiff = 0;
4293	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4294	    "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
4295	    __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
4296
4297	/* Get channel attenuation group. */
4298	if (chan <= 20)		/* 1-20 */
4299		grp = 4;
4300	else if (chan <= 43)	/* 34-43 */
4301		grp = 0;
4302	else if (chan <= 70)	/* 44-70 */
4303		grp = 1;
4304	else if (chan <= 124)	/* 71-124 */
4305		grp = 2;
4306	else			/* 125-200 */
4307		grp = 3;
4308	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4309	    "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
4310
4311	/* Get channel sub-band. */
4312	for (i = 0; i < IWN_NBANDS; i++)
4313		if (sc->bands[i].lo != 0 &&
4314		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4315			break;
4316	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
4317		return EINVAL;
4318	chans = sc->bands[i].chans;
4319	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4320	    "%s: chan %d sub-band=%d\n", __func__, chan, i);
4321
4322	for (c = 0; c < 2; c++) {
4323		uint8_t power, gain, temp;
4324		int maxchpwr, pwr, ridx, idx;
4325
4326		power = interpolate(chan,
4327		    chans[0].num, chans[0].samples[c][1].power,
4328		    chans[1].num, chans[1].samples[c][1].power, 1);
4329		gain  = interpolate(chan,
4330		    chans[0].num, chans[0].samples[c][1].gain,
4331		    chans[1].num, chans[1].samples[c][1].gain, 1);
4332		temp  = interpolate(chan,
4333		    chans[0].num, chans[0].samples[c][1].temp,
4334		    chans[1].num, chans[1].samples[c][1].temp, 1);
4335		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4336		    "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
4337		    __func__, c, power, gain, temp);
4338
4339		/* Compute temperature compensation. */
4340		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4341		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4342		    "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
4343		    __func__, tdiff, sc->temp, temp);
4344
4345		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4346			/* Convert dBm to half-dBm. */
4347			maxchpwr = sc->maxpwr[chan] * 2;
4348			if ((ridx / 8) & 1)
4349				maxchpwr -= 6;	/* MIMO 2T: -3dB */
4350
4351			pwr = maxpwr;
4352
4353			/* Adjust TX power based on rate. */
4354			if ((ridx % 8) == 5)
4355				pwr -= 15;	/* OFDM48: -7.5dB */
4356			else if ((ridx % 8) == 6)
4357				pwr -= 17;	/* OFDM54: -8.5dB */
4358			else if ((ridx % 8) == 7)
4359				pwr -= 20;	/* OFDM60: -10dB */
4360			else
4361				pwr -= 10;	/* Others: -5dB */
4362
4363			/* Do not exceed channel max TX power. */
4364			if (pwr > maxchpwr)
4365				pwr = maxchpwr;
4366
4367			idx = gain - (pwr - power) - tdiff - vdiff;
4368			if ((ridx / 8) & 1)	/* MIMO */
4369				idx += (int32_t)le32toh(uc->atten[grp][c]);
4370
4371			if (cmd.band == 0)
4372				idx += 9;	/* 5GHz */
4373			if (ridx == IWN_RIDX_MAX)
4374				idx += 5;	/* CCK */
4375
4376			/* Make sure idx stays in a valid range. */
4377			if (idx < 0)
4378				idx = 0;
4379			else if (idx > IWN4965_MAX_PWR_INDEX)
4380				idx = IWN4965_MAX_PWR_INDEX;
4381
4382			DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4383			    "%s: Tx chain %d, rate idx %d: power=%d\n",
4384			    __func__, c, ridx, idx);
4385			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4386			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4387		}
4388	}
4389
4390	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4391	    "%s: set tx power for chan %d\n", __func__, chan);
4392	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4393
4394#undef interpolate
4395#undef fdivround
4396}
4397
4398static int
4399iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4400    int async)
4401{
4402	struct iwn5000_cmd_txpower cmd;
4403
4404	/*
4405	 * TX power calibration is handled automatically by the firmware
4406	 * for 5000 Series.
4407	 */
4408	memset(&cmd, 0, sizeof cmd);
4409	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
4410	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4411	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4412	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
4413	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4414}
4415
4416/*
4417 * Retrieve the maximum RSSI (in dBm) among receivers.
4418 */
4419static int
4420iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4421{
4422	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
4423	uint8_t mask, agc;
4424	int rssi;
4425
4426	mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
4427	agc  = (le16toh(phy->agc) >> 7) & 0x7f;
4428
4429	rssi = 0;
4430	if (mask & IWN_ANT_A)
4431		rssi = MAX(rssi, phy->rssi[0]);
4432	if (mask & IWN_ANT_B)
4433		rssi = MAX(rssi, phy->rssi[2]);
4434	if (mask & IWN_ANT_C)
4435		rssi = MAX(rssi, phy->rssi[4]);
4436
4437	DPRINTF(sc, IWN_DEBUG_RECV,
4438	    "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
4439	    mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
4440	    rssi - agc - IWN_RSSI_TO_DBM);
4441	return rssi - agc - IWN_RSSI_TO_DBM;
4442}
4443
4444static int
4445iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4446{
4447	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4448	uint8_t agc;
4449	int rssi;
4450
4451	agc = (le32toh(phy->agc) >> 9) & 0x7f;
4452
4453	rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4454		   le16toh(phy->rssi[1]) & 0xff);
4455	rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4456
4457	DPRINTF(sc, IWN_DEBUG_RECV,
4458	    "%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
4459	    phy->rssi[0], phy->rssi[1], phy->rssi[2],
4460	    rssi - agc - IWN_RSSI_TO_DBM);
4461	return rssi - agc - IWN_RSSI_TO_DBM;
4462}
4463
4464/*
4465 * Retrieve the average noise (in dBm) among receivers.
4466 */
4467static int
4468iwn_get_noise(const struct iwn_rx_general_stats *stats)
4469{
4470	int i, total, nbant, noise;
4471
4472	total = nbant = 0;
4473	for (i = 0; i < 3; i++) {
4474		if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4475			continue;
4476		total += noise;
4477		nbant++;
4478	}
4479	/* There should be at least one antenna but check anyway. */
4480	return (nbant == 0) ? -127 : (total / nbant) - 107;
4481}
4482
4483/*
4484 * Compute temperature (in degC) from last received statistics.
4485 */
4486static int
4487iwn4965_get_temperature(struct iwn_softc *sc)
4488{
4489	struct iwn_ucode_info *uc = &sc->ucode_info;
4490	int32_t r1, r2, r3, r4, temp;
4491
4492	r1 = le32toh(uc->temp[0].chan20MHz);
4493	r2 = le32toh(uc->temp[1].chan20MHz);
4494	r3 = le32toh(uc->temp[2].chan20MHz);
4495	r4 = le32toh(sc->rawtemp);
4496
4497	if (r1 == r3)	/* Prevents division by 0 (should not happen). */
4498		return 0;
4499
4500	/* Sign-extend 23-bit R4 value to 32-bit. */
4501	r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4502	/* Compute temperature in Kelvin. */
4503	temp = (259 * (r4 - r2)) / (r3 - r1);
4504	temp = (temp * 97) / 100 + 8;
4505
4506	DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4507	    IWN_KTOC(temp));
4508	return IWN_KTOC(temp);
4509}
4510
4511static int
4512iwn5000_get_temperature(struct iwn_softc *sc)
4513{
4514	int32_t temp;
4515
4516	/*
4517	 * Temperature is not used by the driver for 5000 Series because
4518	 * TX power calibration is handled by firmware.
4519	 */
4520	temp = le32toh(sc->rawtemp);
4521	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4522		temp = (temp / -5) + sc->temp_off;
4523		temp = IWN_KTOC(temp);
4524	}
4525	return temp;
4526}
4527
4528/*
4529 * Initialize sensitivity calibration state machine.
4530 */
4531static int
4532iwn_init_sensitivity(struct iwn_softc *sc)
4533{
4534	struct iwn_ops *ops = &sc->ops;
4535	struct iwn_calib_state *calib = &sc->calib;
4536	uint32_t flags;
4537	int error;
4538
4539	/* Reset calibration state machine. */
4540	memset(calib, 0, sizeof (*calib));
4541	calib->state = IWN_CALIB_STATE_INIT;
4542	calib->cck_state = IWN_CCK_STATE_HIFA;
4543	/* Set initial correlation values. */
4544	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
4545	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4546	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
4547	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4548	calib->cck_x4      = 125;
4549	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
4550	calib->energy_cck  = sc->limits->energy_cck;
4551
4552	/* Write initial sensitivity. */
4553	if ((error = iwn_send_sensitivity(sc)) != 0)
4554		return error;
4555
4556	/* Write initial gains. */
4557	if ((error = ops->init_gains(sc)) != 0)
4558		return error;
4559
4560	/* Request statistics at each beacon interval. */
4561	flags = 0;
4562	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
4563	    __func__);
4564	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4565}
4566
4567/*
4568 * Collect noise and RSSI statistics for the first 20 beacons received
4569 * after association and use them to determine connected antennas and
4570 * to set differential gains.
4571 */
4572static void
4573iwn_collect_noise(struct iwn_softc *sc,
4574    const struct iwn_rx_general_stats *stats)
4575{
4576	struct iwn_ops *ops = &sc->ops;
4577	struct iwn_calib_state *calib = &sc->calib;
4578	uint32_t val;
4579	int i;
4580
4581	/* Accumulate RSSI and noise for all 3 antennas. */
4582	for (i = 0; i < 3; i++) {
4583		calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4584		calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4585	}
4586	/* NB: We update differential gains only once after 20 beacons. */
4587	if (++calib->nbeacons < 20)
4588		return;
4589
4590	/* Determine highest average RSSI. */
4591	val = MAX(calib->rssi[0], calib->rssi[1]);
4592	val = MAX(calib->rssi[2], val);
4593
4594	/* Determine which antennas are connected. */
4595	sc->chainmask = sc->rxchainmask;
4596	for (i = 0; i < 3; i++)
4597		if (val - calib->rssi[i] > 15 * 20)
4598			sc->chainmask &= ~(1 << i);
4599	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4600	    "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4601	    __func__, sc->rxchainmask, sc->chainmask);
4602
4603	/* If none of the TX antennas are connected, keep at least one. */
4604	if ((sc->chainmask & sc->txchainmask) == 0)
4605		sc->chainmask |= IWN_LSB(sc->txchainmask);
4606
4607	(void)ops->set_gains(sc);
4608	calib->state = IWN_CALIB_STATE_RUN;
4609
4610#ifdef notyet
4611	/* XXX Disable RX chains with no antennas connected. */
4612	sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4613	(void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4614#endif
4615
4616#if 0
4617	/* XXX: not yet */
4618	/* Enable power-saving mode if requested by user. */
4619	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4620		(void)iwn_set_pslevel(sc, 0, 3, 1);
4621#endif
4622}
4623
4624static int
4625iwn4965_init_gains(struct iwn_softc *sc)
4626{
4627	struct iwn_phy_calib_gain cmd;
4628
4629	memset(&cmd, 0, sizeof cmd);
4630	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4631	/* Differential gains initially set to 0 for all 3 antennas. */
4632	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4633	    "%s: setting initial differential gains\n", __func__);
4634	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4635}
4636
4637static int
4638iwn5000_init_gains(struct iwn_softc *sc)
4639{
4640	struct iwn_phy_calib cmd;
4641
4642	memset(&cmd, 0, sizeof cmd);
4643	cmd.code = sc->reset_noise_gain;
4644	cmd.ngroups = 1;
4645	cmd.isvalid = 1;
4646	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4647	    "%s: setting initial differential gains\n", __func__);
4648	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4649}
4650
4651static int
4652iwn4965_set_gains(struct iwn_softc *sc)
4653{
4654	struct iwn_calib_state *calib = &sc->calib;
4655	struct iwn_phy_calib_gain cmd;
4656	int i, delta, noise;
4657
4658	/* Get minimal noise among connected antennas. */
4659	noise = INT_MAX;	/* NB: There's at least one antenna. */
4660	for (i = 0; i < 3; i++)
4661		if (sc->chainmask & (1 << i))
4662			noise = MIN(calib->noise[i], noise);
4663
4664	memset(&cmd, 0, sizeof cmd);
4665	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4666	/* Set differential gains for connected antennas. */
4667	for (i = 0; i < 3; i++) {
4668		if (sc->chainmask & (1 << i)) {
4669			/* Compute attenuation (in unit of 1.5dB). */
4670			delta = (noise - (int32_t)calib->noise[i]) / 30;
4671			/* NB: delta <= 0 */
4672			/* Limit to [-4.5dB,0]. */
4673			cmd.gain[i] = MIN(abs(delta), 3);
4674			if (delta < 0)
4675				cmd.gain[i] |= 1 << 2;	/* sign bit */
4676		}
4677	}
4678	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4679	    "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4680	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4681	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4682}
4683
4684static int
4685iwn5000_set_gains(struct iwn_softc *sc)
4686{
4687	struct iwn_calib_state *calib = &sc->calib;
4688	struct iwn_phy_calib_gain cmd;
4689	int i, ant, div, delta;
4690
4691	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
4692	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4693
4694	memset(&cmd, 0, sizeof cmd);
4695	cmd.code = sc->noise_gain;
4696	cmd.ngroups = 1;
4697	cmd.isvalid = 1;
4698	/* Get first available RX antenna as referential. */
4699	ant = IWN_LSB(sc->rxchainmask);
4700	/* Set differential gains for other antennas. */
4701	for (i = ant + 1; i < 3; i++) {
4702		if (sc->chainmask & (1 << i)) {
4703			/* The delta is relative to antenna "ant". */
4704			delta = ((int32_t)calib->noise[ant] -
4705			    (int32_t)calib->noise[i]) / div;
4706			/* Limit to [-4.5dB,+4.5dB]. */
4707			cmd.gain[i - 1] = MIN(abs(delta), 3);
4708			if (delta < 0)
4709				cmd.gain[i - 1] |= 1 << 2;	/* sign bit */
4710		}
4711	}
4712	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4713	    "setting differential gains Ant B/C: %x/%x (%x)\n",
4714	    cmd.gain[0], cmd.gain[1], sc->chainmask);
4715	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4716}
4717
4718/*
4719 * Tune RF RX sensitivity based on the number of false alarms detected
4720 * during the last beacon period.
4721 */
4722static void
4723iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4724{
4725#define inc(val, inc, max)			\
4726	if ((val) < (max)) {			\
4727		if ((val) < (max) - (inc))	\
4728			(val) += (inc);		\
4729		else				\
4730			(val) = (max);		\
4731		needs_update = 1;		\
4732	}
4733#define dec(val, dec, min)			\
4734	if ((val) > (min)) {			\
4735		if ((val) > (min) + (dec))	\
4736			(val) -= (dec);		\
4737		else				\
4738			(val) = (min);		\
4739		needs_update = 1;		\
4740	}
4741
4742	const struct iwn_sensitivity_limits *limits = sc->limits;
4743	struct iwn_calib_state *calib = &sc->calib;
4744	uint32_t val, rxena, fa;
4745	uint32_t energy[3], energy_min;
4746	uint8_t noise[3], noise_ref;
4747	int i, needs_update = 0;
4748
4749	/* Check that we've been enabled long enough. */
4750	if ((rxena = le32toh(stats->general.load)) == 0)
4751		return;
4752
4753	/* Compute number of false alarms since last call for OFDM. */
4754	fa  = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4755	fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
4756	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4757
4758	/* Save counters values for next call. */
4759	calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
4760	calib->fa_ofdm = le32toh(stats->ofdm.fa);
4761
4762	if (fa > 50 * rxena) {
4763		/* High false alarm count, decrease sensitivity. */
4764		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4765		    "%s: OFDM high false alarm count: %u\n", __func__, fa);
4766		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
4767		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4768		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
4769		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4770
4771	} else if (fa < 5 * rxena) {
4772		/* Low false alarm count, increase sensitivity. */
4773		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4774		    "%s: OFDM low false alarm count: %u\n", __func__, fa);
4775		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
4776		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4777		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
4778		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4779	}
4780
4781	/* Compute maximum noise among 3 receivers. */
4782	for (i = 0; i < 3; i++)
4783		noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
4784	val = MAX(noise[0], noise[1]);
4785	val = MAX(noise[2], val);
4786	/* Insert it into our samples table. */
4787	calib->noise_samples[calib->cur_noise_sample] = val;
4788	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4789
4790	/* Compute maximum noise among last 20 samples. */
4791	noise_ref = calib->noise_samples[0];
4792	for (i = 1; i < 20; i++)
4793		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4794
4795	/* Compute maximum energy among 3 receivers. */
4796	for (i = 0; i < 3; i++)
4797		energy[i] = le32toh(stats->general.energy[i]);
4798	val = MIN(energy[0], energy[1]);
4799	val = MIN(energy[2], val);
4800	/* Insert it into our samples table. */
4801	calib->energy_samples[calib->cur_energy_sample] = val;
4802	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4803
4804	/* Compute minimum energy among last 10 samples. */
4805	energy_min = calib->energy_samples[0];
4806	for (i = 1; i < 10; i++)
4807		energy_min = MAX(energy_min, calib->energy_samples[i]);
4808	energy_min += 6;
4809
4810	/* Compute number of false alarms since last call for CCK. */
4811	fa  = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4812	fa += le32toh(stats->cck.fa) - calib->fa_cck;
4813	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4814
4815	/* Save counters values for next call. */
4816	calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
4817	calib->fa_cck = le32toh(stats->cck.fa);
4818
4819	if (fa > 50 * rxena) {
4820		/* High false alarm count, decrease sensitivity. */
4821		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4822		    "%s: CCK high false alarm count: %u\n", __func__, fa);
4823		calib->cck_state = IWN_CCK_STATE_HIFA;
4824		calib->low_fa = 0;
4825
4826		if (calib->cck_x4 > 160) {
4827			calib->noise_ref = noise_ref;
4828			if (calib->energy_cck > 2)
4829				dec(calib->energy_cck, 2, energy_min);
4830		}
4831		if (calib->cck_x4 < 160) {
4832			calib->cck_x4 = 161;
4833			needs_update = 1;
4834		} else
4835			inc(calib->cck_x4, 3, limits->max_cck_x4);
4836
4837		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4838
4839	} else if (fa < 5 * rxena) {
4840		/* Low false alarm count, increase sensitivity. */
4841		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4842		    "%s: CCK low false alarm count: %u\n", __func__, fa);
4843		calib->cck_state = IWN_CCK_STATE_LOFA;
4844		calib->low_fa++;
4845
4846		if (calib->cck_state != IWN_CCK_STATE_INIT &&
4847		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4848		     calib->low_fa > 100)) {
4849			inc(calib->energy_cck, 2, limits->min_energy_cck);
4850			dec(calib->cck_x4,     3, limits->min_cck_x4);
4851			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4852		}
4853	} else {
4854		/* Not worth to increase or decrease sensitivity. */
4855		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4856		    "%s: CCK normal false alarm count: %u\n", __func__, fa);
4857		calib->low_fa = 0;
4858		calib->noise_ref = noise_ref;
4859
4860		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4861			/* Previous interval had many false alarms. */
4862			dec(calib->energy_cck, 8, energy_min);
4863		}
4864		calib->cck_state = IWN_CCK_STATE_INIT;
4865	}
4866
4867	if (needs_update)
4868		(void)iwn_send_sensitivity(sc);
4869#undef dec
4870#undef inc
4871}
4872
4873static int
4874iwn_send_sensitivity(struct iwn_softc *sc)
4875{
4876	struct iwn_calib_state *calib = &sc->calib;
4877	struct iwn_enhanced_sensitivity_cmd cmd;
4878	int len;
4879
4880	memset(&cmd, 0, sizeof cmd);
4881	len = sizeof (struct iwn_sensitivity_cmd);
4882	cmd.which = IWN_SENSITIVITY_WORKTBL;
4883	/* OFDM modulation. */
4884	cmd.corr_ofdm_x1       = htole16(calib->ofdm_x1);
4885	cmd.corr_ofdm_mrc_x1   = htole16(calib->ofdm_mrc_x1);
4886	cmd.corr_ofdm_x4       = htole16(calib->ofdm_x4);
4887	cmd.corr_ofdm_mrc_x4   = htole16(calib->ofdm_mrc_x4);
4888	cmd.energy_ofdm        = htole16(sc->limits->energy_ofdm);
4889	cmd.energy_ofdm_th     = htole16(62);
4890	/* CCK modulation. */
4891	cmd.corr_cck_x4        = htole16(calib->cck_x4);
4892	cmd.corr_cck_mrc_x4    = htole16(calib->cck_mrc_x4);
4893	cmd.energy_cck         = htole16(calib->energy_cck);
4894	/* Barker modulation: use default values. */
4895	cmd.corr_barker        = htole16(190);
4896	cmd.corr_barker_mrc    = htole16(390);
4897
4898	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4899	    "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
4900	    calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4901	    calib->ofdm_mrc_x4, calib->cck_x4,
4902	    calib->cck_mrc_x4, calib->energy_cck);
4903
4904	if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
4905		goto send;
4906	/* Enhanced sensitivity settings. */
4907	len = sizeof (struct iwn_enhanced_sensitivity_cmd);
4908	cmd.ofdm_det_slope_mrc = htole16(668);
4909	cmd.ofdm_det_icept_mrc = htole16(4);
4910	cmd.ofdm_det_slope     = htole16(486);
4911	cmd.ofdm_det_icept     = htole16(37);
4912	cmd.cck_det_slope_mrc  = htole16(853);
4913	cmd.cck_det_icept_mrc  = htole16(4);
4914	cmd.cck_det_slope      = htole16(476);
4915	cmd.cck_det_icept      = htole16(99);
4916send:
4917	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
4918}
4919
4920/*
4921 * Set STA mode power saving level (between 0 and 5).
4922 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4923 */
4924static int
4925iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4926{
4927	struct iwn_pmgt_cmd cmd;
4928	const struct iwn_pmgt *pmgt;
4929	uint32_t max, skip_dtim;
4930	uint32_t reg;
4931	int i;
4932
4933	/* Select which PS parameters to use. */
4934	if (dtim <= 2)
4935		pmgt = &iwn_pmgt[0][level];
4936	else if (dtim <= 10)
4937		pmgt = &iwn_pmgt[1][level];
4938	else
4939		pmgt = &iwn_pmgt[2][level];
4940
4941	memset(&cmd, 0, sizeof cmd);
4942	if (level != 0)	/* not CAM */
4943		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4944	if (level == 5)
4945		cmd.flags |= htole16(IWN_PS_FAST_PD);
4946	/* Retrieve PCIe Active State Power Management (ASPM). */
4947	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
4948	if (!(reg & 0x1))	/* L0s Entry disabled. */
4949		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4950	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4951	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4952
4953	if (dtim == 0) {
4954		dtim = 1;
4955		skip_dtim = 0;
4956	} else
4957		skip_dtim = pmgt->skip_dtim;
4958	if (skip_dtim != 0) {
4959		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4960		max = pmgt->intval[4];
4961		if (max == (uint32_t)-1)
4962			max = dtim * (skip_dtim + 1);
4963		else if (max > dtim)
4964			max = (max / dtim) * dtim;
4965	} else
4966		max = dtim;
4967	for (i = 0; i < 5; i++)
4968		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4969
4970	DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
4971	    level);
4972	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4973}
4974
4975static int
4976iwn_send_btcoex(struct iwn_softc *sc)
4977{
4978	struct iwn_bluetooth cmd;
4979
4980	memset(&cmd, 0, sizeof cmd);
4981	cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4982	cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
4983	cmd.max_kill = IWN_BT_MAX_KILL_DEF;
4984	DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
4985	    __func__);
4986	return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
4987}
4988
4989static int
4990iwn_send_advanced_btcoex(struct iwn_softc *sc)
4991{
4992	static const uint32_t btcoex_3wire[12] = {
4993		0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
4994		0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
4995		0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
4996	};
4997	struct iwn6000_btcoex_config btconfig;
4998	struct iwn_btcoex_priotable btprio;
4999	struct iwn_btcoex_prot btprot;
5000	int error, i;
5001
5002	memset(&btconfig, 0, sizeof btconfig);
5003	btconfig.flags = 145;
5004	btconfig.max_kill = 5;
5005	btconfig.bt3_t7_timer = 1;
5006	btconfig.kill_ack = htole32(0xffff0000);
5007	btconfig.kill_cts = htole32(0xffff0000);
5008	btconfig.sample_time = 2;
5009	btconfig.bt3_t2_timer = 0xc;
5010	for (i = 0; i < 12; i++)
5011		btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
5012	btconfig.valid = htole16(0xff);
5013	btconfig.prio_boost = 0xf0;
5014	DPRINTF(sc, IWN_DEBUG_RESET,
5015	    "%s: configuring advanced bluetooth coexistence\n", __func__);
5016	error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1);
5017	if (error != 0)
5018		return error;
5019
5020	memset(&btprio, 0, sizeof btprio);
5021	btprio.calib_init1 = 0x6;
5022	btprio.calib_init2 = 0x7;
5023	btprio.calib_periodic_low1 = 0x2;
5024	btprio.calib_periodic_low2 = 0x3;
5025	btprio.calib_periodic_high1 = 0x4;
5026	btprio.calib_periodic_high2 = 0x5;
5027	btprio.dtim = 0x6;
5028	btprio.scan52 = 0x8;
5029	btprio.scan24 = 0xa;
5030	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
5031	    1);
5032	if (error != 0)
5033		return error;
5034
5035	/* Force BT state machine change. */
5036	memset(&btprot, 0, sizeof btprio);
5037	btprot.open = 1;
5038	btprot.type = 1;
5039	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5040	if (error != 0)
5041		return error;
5042	btprot.open = 0;
5043	return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5044}
5045
5046static int
5047iwn5000_runtime_calib(struct iwn_softc *sc)
5048{
5049	struct iwn5000_calib_config cmd;
5050
5051	memset(&cmd, 0, sizeof cmd);
5052	cmd.ucode.once.enable = 0xffffffff;
5053	cmd.ucode.once.start = IWN5000_CALIB_DC;
5054	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5055	    "%s: configuring runtime calibration\n", __func__);
5056	return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
5057}
5058
5059static int
5060iwn_config(struct iwn_softc *sc)
5061{
5062	struct iwn_ops *ops = &sc->ops;
5063	struct ifnet *ifp = sc->sc_ifp;
5064	struct ieee80211com *ic = ifp->if_l2com;
5065	uint32_t txmask;
5066	uint16_t rxchain;
5067	int error;
5068
5069	if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
5070		/* Set radio temperature sensor offset. */
5071		error = iwn5000_temp_offset_calib(sc);
5072		if (error != 0) {
5073			device_printf(sc->sc_dev,
5074			    "%s: could not set temperature offset\n", __func__);
5075			return error;
5076		}
5077	}
5078
5079	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5080		/* Configure runtime DC calibration. */
5081		error = iwn5000_runtime_calib(sc);
5082		if (error != 0) {
5083			device_printf(sc->sc_dev,
5084			    "%s: could not configure runtime calibration\n",
5085			    __func__);
5086			return error;
5087		}
5088	}
5089
5090	/* Configure valid TX chains for >=5000 Series. */
5091	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
5092		txmask = htole32(sc->txchainmask);
5093		DPRINTF(sc, IWN_DEBUG_RESET,
5094		    "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
5095		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
5096		    sizeof txmask, 0);
5097		if (error != 0) {
5098			device_printf(sc->sc_dev,
5099			    "%s: could not configure valid TX chains, "
5100			    "error %d\n", __func__, error);
5101			return error;
5102		}
5103	}
5104
5105	/* Configure bluetooth coexistence. */
5106	if (sc->sc_flags & IWN_FLAG_ADV_BTCOEX)
5107		error = iwn_send_advanced_btcoex(sc);
5108	else
5109		error = iwn_send_btcoex(sc);
5110	if (error != 0) {
5111		device_printf(sc->sc_dev,
5112		    "%s: could not configure bluetooth coexistence, error %d\n",
5113		    __func__, error);
5114		return error;
5115	}
5116
5117	/* Set mode, channel, RX filter and enable RX. */
5118	memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
5119	IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp));
5120	IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp));
5121	sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
5122	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5123	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
5124		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5125	switch (ic->ic_opmode) {
5126	case IEEE80211_M_STA:
5127		sc->rxon.mode = IWN_MODE_STA;
5128		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
5129		break;
5130	case IEEE80211_M_MONITOR:
5131		sc->rxon.mode = IWN_MODE_MONITOR;
5132		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
5133		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
5134		break;
5135	default:
5136		/* Should not get there. */
5137		break;
5138	}
5139	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
5140	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
5141	sc->rxon.ht_single_mask = 0xff;
5142	sc->rxon.ht_dual_mask = 0xff;
5143	sc->rxon.ht_triple_mask = 0xff;
5144	rxchain =
5145	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5146	    IWN_RXCHAIN_MIMO_COUNT(2) |
5147	    IWN_RXCHAIN_IDLE_COUNT(2);
5148	sc->rxon.rxchain = htole16(rxchain);
5149	DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
5150	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
5151	if (error != 0) {
5152		device_printf(sc->sc_dev, "%s: RXON command failed\n",
5153		    __func__);
5154		return error;
5155	}
5156
5157	if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
5158		device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
5159		    __func__);
5160		return error;
5161	}
5162
5163	/* Configuration has changed, set TX power accordingly. */
5164	if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
5165		device_printf(sc->sc_dev, "%s: could not set TX power\n",
5166		    __func__);
5167		return error;
5168	}
5169
5170	if ((error = iwn_set_critical_temp(sc)) != 0) {
5171		device_printf(sc->sc_dev,
5172		    "%s: could not set critical temperature\n", __func__);
5173		return error;
5174	}
5175
5176	/* Set power saving level to CAM during initialization. */
5177	if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
5178		device_printf(sc->sc_dev,
5179		    "%s: could not set power saving level\n", __func__);
5180		return error;
5181	}
5182	return 0;
5183}
5184
5185/*
5186 * Add an ssid element to a frame.
5187 */
5188static uint8_t *
5189ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
5190{
5191	*frm++ = IEEE80211_ELEMID_SSID;
5192	*frm++ = len;
5193	memcpy(frm, ssid, len);
5194	return frm + len;
5195}
5196
5197static int
5198iwn_scan(struct iwn_softc *sc)
5199{
5200	struct ifnet *ifp = sc->sc_ifp;
5201	struct ieee80211com *ic = ifp->if_l2com;
5202	struct ieee80211_scan_state *ss = ic->ic_scan;	/*XXX*/
5203	struct ieee80211_node *ni = ss->ss_vap->iv_bss;
5204	struct iwn_scan_hdr *hdr;
5205	struct iwn_cmd_data *tx;
5206	struct iwn_scan_essid *essid;
5207	struct iwn_scan_chan *chan;
5208	struct ieee80211_frame *wh;
5209	struct ieee80211_rateset *rs;
5210	struct ieee80211_channel *c;
5211	uint8_t *buf, *frm;
5212	uint16_t rxchain;
5213	uint8_t txant;
5214	int buflen, error;
5215
5216	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
5217	if (buf == NULL) {
5218		device_printf(sc->sc_dev,
5219		    "%s: could not allocate buffer for scan command\n",
5220		    __func__);
5221		return ENOMEM;
5222	}
5223	hdr = (struct iwn_scan_hdr *)buf;
5224	/*
5225	 * Move to the next channel if no frames are received within 10ms
5226	 * after sending the probe request.
5227	 */
5228	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
5229	hdr->quiet_threshold = htole16(1);	/* min # of packets */
5230
5231	/* Select antennas for scanning. */
5232	rxchain =
5233	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5234	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
5235	    IWN_RXCHAIN_DRIVER_FORCE;
5236	if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
5237	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
5238		/* Ant A must be avoided in 5GHz because of an HW bug. */
5239		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
5240	} else	/* Use all available RX antennas. */
5241		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5242	hdr->rxchain = htole16(rxchain);
5243	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
5244
5245	tx = (struct iwn_cmd_data *)(hdr + 1);
5246	tx->flags = htole32(IWN_TX_AUTO_SEQ);
5247	tx->id = sc->broadcast_id;
5248	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
5249
5250	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) {
5251		/* Send probe requests at 6Mbps. */
5252		tx->rate = htole32(0xd);
5253		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5254	} else {
5255		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
5256		if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
5257		    sc->rxon.associd && sc->rxon.chan > 14)
5258			tx->rate = htole32(0xd);
5259		else {
5260			/* Send probe requests at 1Mbps. */
5261			tx->rate = htole32(10 | IWN_RFLAG_CCK);
5262		}
5263		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5264	}
5265	/* Use the first valid TX antenna. */
5266	txant = IWN_LSB(sc->txchainmask);
5267	tx->rate |= htole32(IWN_RFLAG_ANT(txant));
5268
5269	essid = (struct iwn_scan_essid *)(tx + 1);
5270	if (ss->ss_ssid[0].len != 0) {
5271		essid[0].id = IEEE80211_ELEMID_SSID;
5272		essid[0].len = ss->ss_ssid[0].len;
5273		memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
5274	}
5275	/*
5276	 * Build a probe request frame.  Most of the following code is a
5277	 * copy & paste of what is done in net80211.
5278	 */
5279	wh = (struct ieee80211_frame *)(essid + 20);
5280	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5281	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5282	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5283	IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
5284	IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
5285	IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
5286	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5287	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5288
5289	frm = (uint8_t *)(wh + 1);
5290	frm = ieee80211_add_ssid(frm, NULL, 0);
5291	frm = ieee80211_add_rates(frm, rs);
5292	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5293		frm = ieee80211_add_xrates(frm, rs);
5294	if (ic->ic_htcaps & IEEE80211_HTC_HT)
5295		frm = ieee80211_add_htcap(frm, ni);
5296
5297	/* Set length of probe request. */
5298	tx->len = htole16(frm - (uint8_t *)wh);
5299
5300	c = ic->ic_curchan;
5301	chan = (struct iwn_scan_chan *)frm;
5302	chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5303	chan->flags = 0;
5304	if (ss->ss_nssid > 0)
5305		chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5306	chan->dsp_gain = 0x6e;
5307	if (IEEE80211_IS_CHAN_5GHZ(c) &&
5308	    !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5309		chan->rf_gain = 0x3b;
5310		chan->active  = htole16(24);
5311		chan->passive = htole16(110);
5312		chan->flags |= htole32(IWN_CHAN_ACTIVE);
5313	} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
5314		chan->rf_gain = 0x3b;
5315		chan->active  = htole16(24);
5316		if (sc->rxon.associd)
5317			chan->passive = htole16(78);
5318		else
5319			chan->passive = htole16(110);
5320		hdr->crc_threshold = 0xffff;
5321	} else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5322		chan->rf_gain = 0x28;
5323		chan->active  = htole16(36);
5324		chan->passive = htole16(120);
5325		chan->flags |= htole32(IWN_CHAN_ACTIVE);
5326	} else {
5327		chan->rf_gain = 0x28;
5328		chan->active  = htole16(36);
5329		if (sc->rxon.associd)
5330			chan->passive = htole16(88);
5331		else
5332			chan->passive = htole16(120);
5333		hdr->crc_threshold = 0xffff;
5334	}
5335
5336	DPRINTF(sc, IWN_DEBUG_STATE,
5337	    "%s: chan %u flags 0x%x rf_gain 0x%x "
5338	    "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
5339	    chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
5340	    chan->active, chan->passive);
5341
5342	hdr->nchan++;
5343	chan++;
5344	buflen = (uint8_t *)chan - buf;
5345	hdr->len = htole16(buflen);
5346
5347	DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
5348	    hdr->nchan);
5349	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5350	free(buf, M_DEVBUF);
5351	return error;
5352}
5353
5354static int
5355iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
5356{
5357	struct iwn_ops *ops = &sc->ops;
5358	struct ifnet *ifp = sc->sc_ifp;
5359	struct ieee80211com *ic = ifp->if_l2com;
5360	struct ieee80211_node *ni = vap->iv_bss;
5361	int error;
5362
5363	/* Update adapter configuration. */
5364	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5365	sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5366	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5367	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5368		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5369	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5370		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5371	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5372		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5373	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5374		sc->rxon.cck_mask  = 0;
5375		sc->rxon.ofdm_mask = 0x15;
5376	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5377		sc->rxon.cck_mask  = 0x03;
5378		sc->rxon.ofdm_mask = 0;
5379	} else {
5380		/* Assume 802.11b/g. */
5381		sc->rxon.cck_mask  = 0x0f;
5382		sc->rxon.ofdm_mask = 0x15;
5383	}
5384	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
5385	    sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask,
5386	    sc->rxon.ofdm_mask);
5387	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5388	if (error != 0) {
5389		device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
5390		    __func__, error);
5391		return error;
5392	}
5393
5394	/* Configuration has changed, set TX power accordingly. */
5395	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5396		device_printf(sc->sc_dev,
5397		    "%s: could not set TX power, error %d\n", __func__, error);
5398		return error;
5399	}
5400	/*
5401	 * Reconfiguring RXON clears the firmware nodes table so we must
5402	 * add the broadcast node again.
5403	 */
5404	if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
5405		device_printf(sc->sc_dev,
5406		    "%s: could not add broadcast node, error %d\n", __func__,
5407		    error);
5408		return error;
5409	}
5410	return 0;
5411}
5412
5413static int
5414iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
5415{
5416	struct iwn_ops *ops = &sc->ops;
5417	struct ifnet *ifp = sc->sc_ifp;
5418	struct ieee80211com *ic = ifp->if_l2com;
5419	struct ieee80211_node *ni = vap->iv_bss;
5420	struct iwn_node_info node;
5421	uint32_t htflags = 0;
5422	int error;
5423
5424	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5425		/* Link LED blinks while monitoring. */
5426		iwn_set_led(sc, IWN_LED_LINK, 5, 5);
5427		return 0;
5428	}
5429	if ((error = iwn_set_timing(sc, ni)) != 0) {
5430		device_printf(sc->sc_dev,
5431		    "%s: could not set timing, error %d\n", __func__, error);
5432		return error;
5433	}
5434
5435	/* Update adapter configuration. */
5436	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5437	sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
5438	sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5439	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5440	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5441		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5442	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5443		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5444	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5445		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5446	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5447		sc->rxon.cck_mask  = 0;
5448		sc->rxon.ofdm_mask = 0x15;
5449	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5450		sc->rxon.cck_mask  = 0x03;
5451		sc->rxon.ofdm_mask = 0;
5452	} else {
5453		/* Assume 802.11b/g. */
5454		sc->rxon.cck_mask  = 0x0f;
5455		sc->rxon.ofdm_mask = 0x15;
5456	}
5457	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5458		htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode);
5459		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
5460			switch (ic->ic_curhtprotmode) {
5461			case IEEE80211_HTINFO_OPMODE_HT20PR:
5462				htflags |= IWN_RXON_HT_MODEPURE40;
5463				break;
5464			default:
5465				htflags |= IWN_RXON_HT_MODEMIXED;
5466				break;
5467			}
5468		}
5469		if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
5470			htflags |= IWN_RXON_HT_HT40MINUS;
5471	}
5472	sc->rxon.flags |= htole32(htflags);
5473	sc->rxon.filter |= htole32(IWN_FILTER_BSS);
5474	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n",
5475	    sc->rxon.chan, sc->rxon.flags);
5476	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5477	if (error != 0) {
5478		device_printf(sc->sc_dev,
5479		    "%s: could not update configuration, error %d\n", __func__,
5480		    error);
5481		return error;
5482	}
5483
5484	/* Configuration has changed, set TX power accordingly. */
5485	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5486		device_printf(sc->sc_dev,
5487		    "%s: could not set TX power, error %d\n", __func__, error);
5488		return error;
5489	}
5490
5491	/* Fake a join to initialize the TX rate. */
5492	((struct iwn_node *)ni)->id = IWN_ID_BSS;
5493	iwn_newassoc(ni, 1);
5494
5495	/* Add BSS node. */
5496	memset(&node, 0, sizeof node);
5497	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
5498	node.id = IWN_ID_BSS;
5499	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5500		switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
5501		case IEEE80211_HTCAP_SMPS_ENA:
5502			node.htflags |= htole32(IWN_SMPS_MIMO_DIS);
5503			break;
5504		case IEEE80211_HTCAP_SMPS_DYNAMIC:
5505			node.htflags |= htole32(IWN_SMPS_MIMO_PROT);
5506			break;
5507		}
5508		node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) |
5509		    IWN_AMDPU_DENSITY(5));	/* 4us */
5510		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
5511			node.htflags |= htole32(IWN_NODE_HT40);
5512	}
5513	DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
5514	error = ops->add_node(sc, &node, 1);
5515	if (error != 0) {
5516		device_printf(sc->sc_dev,
5517		    "%s: could not add BSS node, error %d\n", __func__, error);
5518		return error;
5519	}
5520	DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
5521	    __func__, node.id);
5522	if ((error = iwn_set_link_quality(sc, ni)) != 0) {
5523		device_printf(sc->sc_dev,
5524		    "%s: could not setup link quality for node %d, error %d\n",
5525		    __func__, node.id, error);
5526		return error;
5527	}
5528
5529	if ((error = iwn_init_sensitivity(sc)) != 0) {
5530		device_printf(sc->sc_dev,
5531		    "%s: could not set sensitivity, error %d\n", __func__,
5532		    error);
5533		return error;
5534	}
5535	/* Start periodic calibration timer. */
5536	sc->calib.state = IWN_CALIB_STATE_ASSOC;
5537	sc->calib_cnt = 0;
5538	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
5539	    sc);
5540
5541	/* Link LED always on while associated. */
5542	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5543	return 0;
5544}
5545
5546/*
5547 * This function is called by upper layer when an ADDBA request is received
5548 * from another STA and before the ADDBA response is sent.
5549 */
5550static int
5551iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
5552    int baparamset, int batimeout, int baseqctl)
5553{
5554#define MS(_v, _f)	(((_v) & _f) >> _f##_S)
5555	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5556	struct iwn_ops *ops = &sc->ops;
5557	struct iwn_node *wn = (void *)ni;
5558	struct iwn_node_info node;
5559	uint16_t ssn;
5560	uint8_t tid;
5561	int error;
5562
5563	tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID);
5564	ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START);
5565
5566	memset(&node, 0, sizeof node);
5567	node.id = wn->id;
5568	node.control = IWN_NODE_UPDATE;
5569	node.flags = IWN_FLAG_SET_ADDBA;
5570	node.addba_tid = tid;
5571	node.addba_ssn = htole16(ssn);
5572	DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
5573	    wn->id, tid, ssn);
5574	error = ops->add_node(sc, &node, 1);
5575	if (error != 0)
5576		return error;
5577	return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
5578#undef MS
5579}
5580
5581/*
5582 * This function is called by upper layer on teardown of an HT-immediate
5583 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
5584 */
5585static void
5586iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
5587{
5588	struct ieee80211com *ic = ni->ni_ic;
5589	struct iwn_softc *sc = ic->ic_ifp->if_softc;
5590	struct iwn_ops *ops = &sc->ops;
5591	struct iwn_node *wn = (void *)ni;
5592	struct iwn_node_info node;
5593	uint8_t tid;
5594
5595	/* XXX: tid as an argument */
5596	for (tid = 0; tid < WME_NUM_TID; tid++) {
5597		if (&ni->ni_rx_ampdu[tid] == rap)
5598			break;
5599	}
5600
5601	memset(&node, 0, sizeof node);
5602	node.id = wn->id;
5603	node.control = IWN_NODE_UPDATE;
5604	node.flags = IWN_FLAG_SET_DELBA;
5605	node.delba_tid = tid;
5606	DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5607	(void)ops->add_node(sc, &node, 1);
5608	sc->sc_ampdu_rx_stop(ni, rap);
5609}
5610
5611static int
5612iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5613    int dialogtoken, int baparamset, int batimeout)
5614{
5615	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5616	int qid;
5617
5618	for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) {
5619		if (sc->qid2tap[qid] == NULL)
5620			break;
5621	}
5622	if (qid == sc->ntxqs) {
5623		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
5624		    __func__);
5625		return 0;
5626	}
5627	tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
5628	if (tap->txa_private == NULL) {
5629		device_printf(sc->sc_dev,
5630		    "%s: failed to alloc TX aggregation structure\n", __func__);
5631		return 0;
5632	}
5633	sc->qid2tap[qid] = tap;
5634	*(int *)tap->txa_private = qid;
5635	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5636	    batimeout);
5637}
5638
5639static int
5640iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5641    int code, int baparamset, int batimeout)
5642{
5643	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5644	int qid = *(int *)tap->txa_private;
5645	uint8_t tid = WME_AC_TO_TID(tap->txa_ac);
5646	int ret;
5647
5648	if (code == IEEE80211_STATUS_SUCCESS) {
5649		ni->ni_txseqs[tid] = tap->txa_start & 0xfff;
5650		ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid);
5651		if (ret != 1)
5652			return ret;
5653	} else {
5654		sc->qid2tap[qid] = NULL;
5655		free(tap->txa_private, M_DEVBUF);
5656		tap->txa_private = NULL;
5657	}
5658	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
5659}
5660
5661/*
5662 * This function is called by upper layer when an ADDBA response is received
5663 * from another STA.
5664 */
5665static int
5666iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5667    uint8_t tid)
5668{
5669	struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[TID_TO_WME_AC(tid)];
5670	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5671	struct iwn_ops *ops = &sc->ops;
5672	struct iwn_node *wn = (void *)ni;
5673	struct iwn_node_info node;
5674	int error, qid;
5675
5676	/* Enable TX for the specified RA/TID. */
5677	wn->disable_tid &= ~(1 << tid);
5678	memset(&node, 0, sizeof node);
5679	node.id = wn->id;
5680	node.control = IWN_NODE_UPDATE;
5681	node.flags = IWN_FLAG_SET_DISABLE_TID;
5682	node.disable_tid = htole16(wn->disable_tid);
5683	error = ops->add_node(sc, &node, 1);
5684	if (error != 0)
5685		return 0;
5686
5687	if ((error = iwn_nic_lock(sc)) != 0)
5688		return 0;
5689	qid = *(int *)tap->txa_private;
5690	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n",
5691	    __func__, wn->id, tid, tap->txa_start, qid);
5692	ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff);
5693	iwn_nic_unlock(sc);
5694
5695	iwn_set_link_quality(sc, ni);
5696	return 1;
5697}
5698
5699static void
5700iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5701{
5702	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5703	struct iwn_ops *ops = &sc->ops;
5704	uint8_t tid = WME_AC_TO_TID(tap->txa_ac);
5705	int qid;
5706
5707	sc->sc_addba_stop(ni, tap);
5708
5709	if (tap->txa_private == NULL)
5710		return;
5711
5712	qid = *(int *)tap->txa_private;
5713	if (sc->txq[qid].queued != 0)
5714		return;
5715	if (iwn_nic_lock(sc) != 0)
5716		return;
5717	ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff);
5718	iwn_nic_unlock(sc);
5719	sc->qid2tap[qid] = NULL;
5720	free(tap->txa_private, M_DEVBUF);
5721	tap->txa_private = NULL;
5722}
5723
5724static void
5725iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5726    int qid, uint8_t tid, uint16_t ssn)
5727{
5728	struct iwn_node *wn = (void *)ni;
5729
5730	/* Stop TX scheduler while we're changing its configuration. */
5731	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5732	    IWN4965_TXQ_STATUS_CHGACT);
5733
5734	/* Assign RA/TID translation to the queue. */
5735	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5736	    wn->id << 4 | tid);
5737
5738	/* Enable chain-building mode for the queue. */
5739	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5740
5741	/* Set starting sequence number from the ADDBA request. */
5742	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
5743	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5744	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5745
5746	/* Set scheduler window size. */
5747	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5748	    IWN_SCHED_WINSZ);
5749	/* Set scheduler frame limit. */
5750	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5751	    IWN_SCHED_LIMIT << 16);
5752
5753	/* Enable interrupts for the queue. */
5754	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5755
5756	/* Mark the queue as active. */
5757	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5758	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5759	    iwn_tid2fifo[tid] << 1);
5760}
5761
5762static void
5763iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
5764{
5765	/* Stop TX scheduler while we're changing its configuration. */
5766	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5767	    IWN4965_TXQ_STATUS_CHGACT);
5768
5769	/* Set starting sequence number from the ADDBA request. */
5770	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5771	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5772
5773	/* Disable interrupts for the queue. */
5774	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5775
5776	/* Mark the queue as inactive. */
5777	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5778	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5779}
5780
5781static void
5782iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5783    int qid, uint8_t tid, uint16_t ssn)
5784{
5785	struct iwn_node *wn = (void *)ni;
5786
5787	/* Stop TX scheduler while we're changing its configuration. */
5788	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5789	    IWN5000_TXQ_STATUS_CHGACT);
5790
5791	/* Assign RA/TID translation to the queue. */
5792	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5793	    wn->id << 4 | tid);
5794
5795	/* Enable chain-building mode for the queue. */
5796	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5797
5798	/* Enable aggregation for the queue. */
5799	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5800
5801	/* Set starting sequence number from the ADDBA request. */
5802	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
5803	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5804	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5805
5806	/* Set scheduler window size and frame limit. */
5807	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5808	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5809
5810	/* Enable interrupts for the queue. */
5811	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5812
5813	/* Mark the queue as active. */
5814	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5815	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
5816}
5817
5818static void
5819iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
5820{
5821	/* Stop TX scheduler while we're changing its configuration. */
5822	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5823	    IWN5000_TXQ_STATUS_CHGACT);
5824
5825	/* Disable aggregation for the queue. */
5826	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5827
5828	/* Set starting sequence number from the ADDBA request. */
5829	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5830	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5831
5832	/* Disable interrupts for the queue. */
5833	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5834
5835	/* Mark the queue as inactive. */
5836	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5837	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
5838}
5839
5840/*
5841 * Query calibration tables from the initialization firmware.  We do this
5842 * only once at first boot.  Called from a process context.
5843 */
5844static int
5845iwn5000_query_calibration(struct iwn_softc *sc)
5846{
5847	struct iwn5000_calib_config cmd;
5848	int error;
5849
5850	memset(&cmd, 0, sizeof cmd);
5851	cmd.ucode.once.enable = 0xffffffff;
5852	cmd.ucode.once.start  = 0xffffffff;
5853	cmd.ucode.once.send   = 0xffffffff;
5854	cmd.ucode.flags       = 0xffffffff;
5855	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
5856	    __func__);
5857	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
5858	if (error != 0)
5859		return error;
5860
5861	/* Wait at most two seconds for calibration to complete. */
5862	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
5863		error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
5864	return error;
5865}
5866
5867/*
5868 * Send calibration results to the runtime firmware.  These results were
5869 * obtained on first boot from the initialization firmware.
5870 */
5871static int
5872iwn5000_send_calibration(struct iwn_softc *sc)
5873{
5874	int idx, error;
5875
5876	for (idx = 0; idx < 5; idx++) {
5877		if (sc->calibcmd[idx].buf == NULL)
5878			continue;	/* No results available. */
5879		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5880		    "send calibration result idx=%d len=%d\n", idx,
5881		    sc->calibcmd[idx].len);
5882		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
5883		    sc->calibcmd[idx].len, 0);
5884		if (error != 0) {
5885			device_printf(sc->sc_dev,
5886			    "%s: could not send calibration result, error %d\n",
5887			    __func__, error);
5888			return error;
5889		}
5890	}
5891	return 0;
5892}
5893
5894static int
5895iwn5000_send_wimax_coex(struct iwn_softc *sc)
5896{
5897	struct iwn5000_wimax_coex wimax;
5898
5899#ifdef notyet
5900	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5901		/* Enable WiMAX coexistence for combo adapters. */
5902		wimax.flags =
5903		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
5904		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
5905		    IWN_WIMAX_COEX_STA_TABLE_VALID |
5906		    IWN_WIMAX_COEX_ENABLE;
5907		memcpy(wimax.events, iwn6050_wimax_events,
5908		    sizeof iwn6050_wimax_events);
5909	} else
5910#endif
5911	{
5912		/* Disable WiMAX coexistence. */
5913		wimax.flags = 0;
5914		memset(wimax.events, 0, sizeof wimax.events);
5915	}
5916	DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
5917	    __func__);
5918	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
5919}
5920
5921static int
5922iwn5000_crystal_calib(struct iwn_softc *sc)
5923{
5924	struct iwn5000_phy_calib_crystal cmd;
5925
5926	memset(&cmd, 0, sizeof cmd);
5927	cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5928	cmd.ngroups = 1;
5929	cmd.isvalid = 1;
5930	cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
5931	cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
5932	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
5933	    cmd.cap_pin[0], cmd.cap_pin[1]);
5934	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5935}
5936
5937static int
5938iwn5000_temp_offset_calib(struct iwn_softc *sc)
5939{
5940	struct iwn5000_phy_calib_temp_offset cmd;
5941
5942	memset(&cmd, 0, sizeof cmd);
5943	cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
5944	cmd.ngroups = 1;
5945	cmd.isvalid = 1;
5946	if (sc->eeprom_temp != 0)
5947		cmd.offset = htole16(sc->eeprom_temp);
5948	else
5949		cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
5950	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
5951	    le16toh(cmd.offset));
5952	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5953}
5954
5955/*
5956 * This function is called after the runtime firmware notifies us of its
5957 * readiness (called in a process context).
5958 */
5959static int
5960iwn4965_post_alive(struct iwn_softc *sc)
5961{
5962	int error, qid;
5963
5964	if ((error = iwn_nic_lock(sc)) != 0)
5965		return error;
5966
5967	/* Clear TX scheduler state in SRAM. */
5968	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5969	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5970	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5971
5972	/* Set physical address of TX scheduler rings (1KB aligned). */
5973	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5974
5975	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5976
5977	/* Disable chain mode for all our 16 queues. */
5978	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5979
5980	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5981		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5982		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5983
5984		/* Set scheduler window size. */
5985		iwn_mem_write(sc, sc->sched_base +
5986		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5987		/* Set scheduler frame limit. */
5988		iwn_mem_write(sc, sc->sched_base +
5989		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5990		    IWN_SCHED_LIMIT << 16);
5991	}
5992
5993	/* Enable interrupts for all our 16 queues. */
5994	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
5995	/* Identify TX FIFO rings (0-7). */
5996	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
5997
5998	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5999	for (qid = 0; qid < 7; qid++) {
6000		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
6001		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6002		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
6003	}
6004	iwn_nic_unlock(sc);
6005	return 0;
6006}
6007
6008/*
6009 * This function is called after the initialization or runtime firmware
6010 * notifies us of its readiness (called in a process context).
6011 */
6012static int
6013iwn5000_post_alive(struct iwn_softc *sc)
6014{
6015	int error, qid;
6016
6017	/* Switch to using ICT interrupt mode. */
6018	iwn5000_ict_reset(sc);
6019
6020	if ((error = iwn_nic_lock(sc)) != 0)
6021		return error;
6022
6023	/* Clear TX scheduler state in SRAM. */
6024	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6025	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
6026	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
6027
6028	/* Set physical address of TX scheduler rings (1KB aligned). */
6029	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6030
6031	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6032
6033	/* Enable chain mode for all queues, except command queue. */
6034	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
6035	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
6036
6037	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
6038		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
6039		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6040
6041		iwn_mem_write(sc, sc->sched_base +
6042		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
6043		/* Set scheduler window size and frame limit. */
6044		iwn_mem_write(sc, sc->sched_base +
6045		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6046		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6047	}
6048
6049	/* Enable interrupts for all our 20 queues. */
6050	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
6051	/* Identify TX FIFO rings (0-7). */
6052	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
6053
6054	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6055	for (qid = 0; qid < 7; qid++) {
6056		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
6057		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6058		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
6059	}
6060	iwn_nic_unlock(sc);
6061
6062	/* Configure WiMAX coexistence for combo adapters. */
6063	error = iwn5000_send_wimax_coex(sc);
6064	if (error != 0) {
6065		device_printf(sc->sc_dev,
6066		    "%s: could not configure WiMAX coexistence, error %d\n",
6067		    __func__, error);
6068		return error;
6069	}
6070	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
6071		/* Perform crystal calibration. */
6072		error = iwn5000_crystal_calib(sc);
6073		if (error != 0) {
6074			device_printf(sc->sc_dev,
6075			    "%s: crystal calibration failed, error %d\n",
6076			    __func__, error);
6077			return error;
6078		}
6079	}
6080	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
6081		/* Query calibration from the initialization firmware. */
6082		if ((error = iwn5000_query_calibration(sc)) != 0) {
6083			device_printf(sc->sc_dev,
6084			    "%s: could not query calibration, error %d\n",
6085			    __func__, error);
6086			return error;
6087		}
6088		/*
6089		 * We have the calibration results now, reboot with the
6090		 * runtime firmware (call ourselves recursively!)
6091		 */
6092		iwn_hw_stop(sc);
6093		error = iwn_hw_init(sc);
6094	} else {
6095		/* Send calibration results to runtime firmware. */
6096		error = iwn5000_send_calibration(sc);
6097	}
6098	return error;
6099}
6100
6101/*
6102 * The firmware boot code is small and is intended to be copied directly into
6103 * the NIC internal memory (no DMA transfer).
6104 */
6105static int
6106iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
6107{
6108	int error, ntries;
6109
6110	size /= sizeof (uint32_t);
6111
6112	if ((error = iwn_nic_lock(sc)) != 0)
6113		return error;
6114
6115	/* Copy microcode image into NIC memory. */
6116	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
6117	    (const uint32_t *)ucode, size);
6118
6119	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
6120	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
6121	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
6122
6123	/* Start boot load now. */
6124	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
6125
6126	/* Wait for transfer to complete. */
6127	for (ntries = 0; ntries < 1000; ntries++) {
6128		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
6129		    IWN_BSM_WR_CTRL_START))
6130			break;
6131		DELAY(10);
6132	}
6133	if (ntries == 1000) {
6134		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6135		    __func__);
6136		iwn_nic_unlock(sc);
6137		return ETIMEDOUT;
6138	}
6139
6140	/* Enable boot after power up. */
6141	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
6142
6143	iwn_nic_unlock(sc);
6144	return 0;
6145}
6146
6147static int
6148iwn4965_load_firmware(struct iwn_softc *sc)
6149{
6150	struct iwn_fw_info *fw = &sc->fw;
6151	struct iwn_dma_info *dma = &sc->fw_dma;
6152	int error;
6153
6154	/* Copy initialization sections into pre-allocated DMA-safe memory. */
6155	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
6156	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6157	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6158	    fw->init.text, fw->init.textsz);
6159	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6160
6161	/* Tell adapter where to find initialization sections. */
6162	if ((error = iwn_nic_lock(sc)) != 0)
6163		return error;
6164	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6165	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
6166	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6167	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6168	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
6169	iwn_nic_unlock(sc);
6170
6171	/* Load firmware boot code. */
6172	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
6173	if (error != 0) {
6174		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6175		    __func__);
6176		return error;
6177	}
6178	/* Now press "execute". */
6179	IWN_WRITE(sc, IWN_RESET, 0);
6180
6181	/* Wait at most one second for first alive notification. */
6182	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
6183		device_printf(sc->sc_dev,
6184		    "%s: timeout waiting for adapter to initialize, error %d\n",
6185		    __func__, error);
6186		return error;
6187	}
6188
6189	/* Retrieve current temperature for initial TX power calibration. */
6190	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
6191	sc->temp = iwn4965_get_temperature(sc);
6192
6193	/* Copy runtime sections into pre-allocated DMA-safe memory. */
6194	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
6195	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6196	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6197	    fw->main.text, fw->main.textsz);
6198	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6199
6200	/* Tell adapter where to find runtime sections. */
6201	if ((error = iwn_nic_lock(sc)) != 0)
6202		return error;
6203	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6204	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
6205	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6206	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6207	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
6208	    IWN_FW_UPDATED | fw->main.textsz);
6209	iwn_nic_unlock(sc);
6210
6211	return 0;
6212}
6213
6214static int
6215iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
6216    const uint8_t *section, int size)
6217{
6218	struct iwn_dma_info *dma = &sc->fw_dma;
6219	int error;
6220
6221	/* Copy firmware section into pre-allocated DMA-safe memory. */
6222	memcpy(dma->vaddr, section, size);
6223	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6224
6225	if ((error = iwn_nic_lock(sc)) != 0)
6226		return error;
6227
6228	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6229	    IWN_FH_TX_CONFIG_DMA_PAUSE);
6230
6231	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
6232	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
6233	    IWN_LOADDR(dma->paddr));
6234	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
6235	    IWN_HIADDR(dma->paddr) << 28 | size);
6236	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
6237	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
6238	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
6239	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
6240
6241	/* Kick Flow Handler to start DMA transfer. */
6242	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6243	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
6244
6245	iwn_nic_unlock(sc);
6246
6247	/* Wait at most five seconds for FH DMA transfer to complete. */
6248	return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
6249}
6250
6251static int
6252iwn5000_load_firmware(struct iwn_softc *sc)
6253{
6254	struct iwn_fw_part *fw;
6255	int error;
6256
6257	/* Load the initialization firmware on first boot only. */
6258	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
6259	    &sc->fw.main : &sc->fw.init;
6260
6261	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
6262	    fw->text, fw->textsz);
6263	if (error != 0) {
6264		device_printf(sc->sc_dev,
6265		    "%s: could not load firmware %s section, error %d\n",
6266		    __func__, ".text", error);
6267		return error;
6268	}
6269	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
6270	    fw->data, fw->datasz);
6271	if (error != 0) {
6272		device_printf(sc->sc_dev,
6273		    "%s: could not load firmware %s section, error %d\n",
6274		    __func__, ".data", error);
6275		return error;
6276	}
6277
6278	/* Now press "execute". */
6279	IWN_WRITE(sc, IWN_RESET, 0);
6280	return 0;
6281}
6282
6283/*
6284 * Extract text and data sections from a legacy firmware image.
6285 */
6286static int
6287iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
6288{
6289	const uint32_t *ptr;
6290	size_t hdrlen = 24;
6291	uint32_t rev;
6292
6293	ptr = (const uint32_t *)fw->data;
6294	rev = le32toh(*ptr++);
6295
6296	/* Check firmware API version. */
6297	if (IWN_FW_API(rev) <= 1) {
6298		device_printf(sc->sc_dev,
6299		    "%s: bad firmware, need API version >=2\n", __func__);
6300		return EINVAL;
6301	}
6302	if (IWN_FW_API(rev) >= 3) {
6303		/* Skip build number (version 2 header). */
6304		hdrlen += 4;
6305		ptr++;
6306	}
6307	if (fw->size < hdrlen) {
6308		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6309		    __func__, fw->size);
6310		return EINVAL;
6311	}
6312	fw->main.textsz = le32toh(*ptr++);
6313	fw->main.datasz = le32toh(*ptr++);
6314	fw->init.textsz = le32toh(*ptr++);
6315	fw->init.datasz = le32toh(*ptr++);
6316	fw->boot.textsz = le32toh(*ptr++);
6317
6318	/* Check that all firmware sections fit. */
6319	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6320	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6321		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6322		    __func__, fw->size);
6323		return EINVAL;
6324	}
6325
6326	/* Get pointers to firmware sections. */
6327	fw->main.text = (const uint8_t *)ptr;
6328	fw->main.data = fw->main.text + fw->main.textsz;
6329	fw->init.text = fw->main.data + fw->main.datasz;
6330	fw->init.data = fw->init.text + fw->init.textsz;
6331	fw->boot.text = fw->init.data + fw->init.datasz;
6332	return 0;
6333}
6334
6335/*
6336 * Extract text and data sections from a TLV firmware image.
6337 */
6338static int
6339iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6340    uint16_t alt)
6341{
6342	const struct iwn_fw_tlv_hdr *hdr;
6343	const struct iwn_fw_tlv *tlv;
6344	const uint8_t *ptr, *end;
6345	uint64_t altmask;
6346	uint32_t len, tmp;
6347
6348	if (fw->size < sizeof (*hdr)) {
6349		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6350		    __func__, fw->size);
6351		return EINVAL;
6352	}
6353	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6354	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6355		device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
6356		    __func__, le32toh(hdr->signature));
6357		return EINVAL;
6358	}
6359	DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
6360	    le32toh(hdr->build));
6361
6362	/*
6363	 * Select the closest supported alternative that is less than
6364	 * or equal to the specified one.
6365	 */
6366	altmask = le64toh(hdr->altmask);
6367	while (alt > 0 && !(altmask & (1ULL << alt)))
6368		alt--;	/* Downgrade. */
6369	DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
6370
6371	ptr = (const uint8_t *)(hdr + 1);
6372	end = (const uint8_t *)(fw->data + fw->size);
6373
6374	/* Parse type-length-value fields. */
6375	while (ptr + sizeof (*tlv) <= end) {
6376		tlv = (const struct iwn_fw_tlv *)ptr;
6377		len = le32toh(tlv->len);
6378
6379		ptr += sizeof (*tlv);
6380		if (ptr + len > end) {
6381			device_printf(sc->sc_dev,
6382			    "%s: firmware too short: %zu bytes\n", __func__,
6383			    fw->size);
6384			return EINVAL;
6385		}
6386		/* Skip other alternatives. */
6387		if (tlv->alt != 0 && tlv->alt != htole16(alt))
6388			goto next;
6389
6390		switch (le16toh(tlv->type)) {
6391		case IWN_FW_TLV_MAIN_TEXT:
6392			fw->main.text = ptr;
6393			fw->main.textsz = len;
6394			break;
6395		case IWN_FW_TLV_MAIN_DATA:
6396			fw->main.data = ptr;
6397			fw->main.datasz = len;
6398			break;
6399		case IWN_FW_TLV_INIT_TEXT:
6400			fw->init.text = ptr;
6401			fw->init.textsz = len;
6402			break;
6403		case IWN_FW_TLV_INIT_DATA:
6404			fw->init.data = ptr;
6405			fw->init.datasz = len;
6406			break;
6407		case IWN_FW_TLV_BOOT_TEXT:
6408			fw->boot.text = ptr;
6409			fw->boot.textsz = len;
6410			break;
6411		case IWN_FW_TLV_ENH_SENS:
6412			if (!len)
6413				sc->sc_flags |= IWN_FLAG_ENH_SENS;
6414			break;
6415		case IWN_FW_TLV_PHY_CALIB:
6416			tmp = htole32(*ptr);
6417			if (tmp < 253) {
6418				sc->reset_noise_gain = tmp;
6419				sc->noise_gain = tmp + 1;
6420			}
6421			break;
6422		default:
6423			DPRINTF(sc, IWN_DEBUG_RESET,
6424			    "TLV type %d not handled\n", le16toh(tlv->type));
6425			break;
6426		}
6427 next:		/* TLV fields are 32-bit aligned. */
6428		ptr += (len + 3) & ~3;
6429	}
6430	return 0;
6431}
6432
6433static int
6434iwn_read_firmware(struct iwn_softc *sc)
6435{
6436	struct iwn_fw_info *fw = &sc->fw;
6437	int error;
6438
6439	IWN_UNLOCK(sc);
6440
6441	memset(fw, 0, sizeof (*fw));
6442
6443	/* Read firmware image from filesystem. */
6444	sc->fw_fp = firmware_get(sc->fwname);
6445	if (sc->fw_fp == NULL) {
6446		device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
6447		    __func__, sc->fwname);
6448		IWN_LOCK(sc);
6449		return EINVAL;
6450	}
6451	IWN_LOCK(sc);
6452
6453	fw->size = sc->fw_fp->datasize;
6454	fw->data = (const uint8_t *)sc->fw_fp->data;
6455	if (fw->size < sizeof (uint32_t)) {
6456		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6457		    __func__, fw->size);
6458		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6459		sc->fw_fp = NULL;
6460		return EINVAL;
6461	}
6462
6463	/* Retrieve text and data sections. */
6464	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
6465		error = iwn_read_firmware_leg(sc, fw);
6466	else
6467		error = iwn_read_firmware_tlv(sc, fw, 1);
6468	if (error != 0) {
6469		device_printf(sc->sc_dev,
6470		    "%s: could not read firmware sections, error %d\n",
6471		    __func__, error);
6472		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6473		sc->fw_fp = NULL;
6474		return error;
6475	}
6476
6477	/* Make sure text and data sections fit in hardware memory. */
6478	if (fw->main.textsz > sc->fw_text_maxsz ||
6479	    fw->main.datasz > sc->fw_data_maxsz ||
6480	    fw->init.textsz > sc->fw_text_maxsz ||
6481	    fw->init.datasz > sc->fw_data_maxsz ||
6482	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
6483	    (fw->boot.textsz & 3) != 0) {
6484		device_printf(sc->sc_dev, "%s: firmware sections too large\n",
6485		    __func__);
6486		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6487		sc->fw_fp = NULL;
6488		return EINVAL;
6489	}
6490
6491	/* We can proceed with loading the firmware. */
6492	return 0;
6493}
6494
6495static int
6496iwn_clock_wait(struct iwn_softc *sc)
6497{
6498	int ntries;
6499
6500	/* Set "initialization complete" bit. */
6501	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6502
6503	/* Wait for clock stabilization. */
6504	for (ntries = 0; ntries < 2500; ntries++) {
6505		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6506			return 0;
6507		DELAY(10);
6508	}
6509	device_printf(sc->sc_dev,
6510	    "%s: timeout waiting for clock stabilization\n", __func__);
6511	return ETIMEDOUT;
6512}
6513
6514static int
6515iwn_apm_init(struct iwn_softc *sc)
6516{
6517	uint32_t reg;
6518	int error;
6519
6520	/* Disable L0s exit timer (NMI bug workaround). */
6521	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6522	/* Don't wait for ICH L0s (ICH bug workaround). */
6523	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6524
6525	/* Set FH wait threshold to max (HW bug under stress workaround). */
6526	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6527
6528	/* Enable HAP INTA to move adapter from L1a to L0s. */
6529	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6530
6531	/* Retrieve PCIe Active State Power Management (ASPM). */
6532	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6533	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6534	if (reg & 0x02)	/* L1 Entry enabled. */
6535		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6536	else
6537		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6538
6539	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6540	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
6541		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6542
6543	/* Wait for clock stabilization before accessing prph. */
6544	if ((error = iwn_clock_wait(sc)) != 0)
6545		return error;
6546
6547	if ((error = iwn_nic_lock(sc)) != 0)
6548		return error;
6549	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6550		/* Enable DMA and BSM (Bootstrap State Machine). */
6551		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6552		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6553		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6554	} else {
6555		/* Enable DMA. */
6556		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6557		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6558	}
6559	DELAY(20);
6560	/* Disable L1-Active. */
6561	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6562	iwn_nic_unlock(sc);
6563
6564	return 0;
6565}
6566
6567static void
6568iwn_apm_stop_master(struct iwn_softc *sc)
6569{
6570	int ntries;
6571
6572	/* Stop busmaster DMA activity. */
6573	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6574	for (ntries = 0; ntries < 100; ntries++) {
6575		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6576			return;
6577		DELAY(10);
6578	}
6579	device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
6580}
6581
6582static void
6583iwn_apm_stop(struct iwn_softc *sc)
6584{
6585	iwn_apm_stop_master(sc);
6586
6587	/* Reset the entire device. */
6588	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6589	DELAY(10);
6590	/* Clear "initialization complete" bit. */
6591	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6592}
6593
6594static int
6595iwn4965_nic_config(struct iwn_softc *sc)
6596{
6597	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6598		/*
6599		 * I don't believe this to be correct but this is what the
6600		 * vendor driver is doing. Probably the bits should not be
6601		 * shifted in IWN_RFCFG_*.
6602		 */
6603		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6604		    IWN_RFCFG_TYPE(sc->rfcfg) |
6605		    IWN_RFCFG_STEP(sc->rfcfg) |
6606		    IWN_RFCFG_DASH(sc->rfcfg));
6607	}
6608	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6609	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6610	return 0;
6611}
6612
6613static int
6614iwn5000_nic_config(struct iwn_softc *sc)
6615{
6616	uint32_t tmp;
6617	int error;
6618
6619	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6620		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6621		    IWN_RFCFG_TYPE(sc->rfcfg) |
6622		    IWN_RFCFG_STEP(sc->rfcfg) |
6623		    IWN_RFCFG_DASH(sc->rfcfg));
6624	}
6625	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6626	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6627
6628	if ((error = iwn_nic_lock(sc)) != 0)
6629		return error;
6630	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6631
6632	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6633		/*
6634		 * Select first Switching Voltage Regulator (1.32V) to
6635		 * solve a stability issue related to noisy DC2DC line
6636		 * in the silicon of 1000 Series.
6637		 */
6638		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6639		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6640		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6641		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6642	}
6643	iwn_nic_unlock(sc);
6644
6645	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6646		/* Use internal power amplifier only. */
6647		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6648	}
6649	if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
6650	     sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
6651		/* Indicate that ROM calibration version is >=6. */
6652		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6653	}
6654	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
6655		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
6656	return 0;
6657}
6658
6659/*
6660 * Take NIC ownership over Intel Active Management Technology (AMT).
6661 */
6662static int
6663iwn_hw_prepare(struct iwn_softc *sc)
6664{
6665	int ntries;
6666
6667	/* Check if hardware is ready. */
6668	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6669	for (ntries = 0; ntries < 5; ntries++) {
6670		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6671		    IWN_HW_IF_CONFIG_NIC_READY)
6672			return 0;
6673		DELAY(10);
6674	}
6675
6676	/* Hardware not ready, force into ready state. */
6677	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6678	for (ntries = 0; ntries < 15000; ntries++) {
6679		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6680		    IWN_HW_IF_CONFIG_PREPARE_DONE))
6681			break;
6682		DELAY(10);
6683	}
6684	if (ntries == 15000)
6685		return ETIMEDOUT;
6686
6687	/* Hardware should be ready now. */
6688	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6689	for (ntries = 0; ntries < 5; ntries++) {
6690		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6691		    IWN_HW_IF_CONFIG_NIC_READY)
6692			return 0;
6693		DELAY(10);
6694	}
6695	return ETIMEDOUT;
6696}
6697
6698static int
6699iwn_hw_init(struct iwn_softc *sc)
6700{
6701	struct iwn_ops *ops = &sc->ops;
6702	int error, chnl, qid;
6703
6704	/* Clear pending interrupts. */
6705	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6706
6707	if ((error = iwn_apm_init(sc)) != 0) {
6708		device_printf(sc->sc_dev,
6709		    "%s: could not power ON adapter, error %d\n", __func__,
6710		    error);
6711		return error;
6712	}
6713
6714	/* Select VMAIN power source. */
6715	if ((error = iwn_nic_lock(sc)) != 0)
6716		return error;
6717	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6718	iwn_nic_unlock(sc);
6719
6720	/* Perform adapter-specific initialization. */
6721	if ((error = ops->nic_config(sc)) != 0)
6722		return error;
6723
6724	/* Initialize RX ring. */
6725	if ((error = iwn_nic_lock(sc)) != 0)
6726		return error;
6727	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6728	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6729	/* Set physical address of RX ring (256-byte aligned). */
6730	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6731	/* Set physical address of RX status (16-byte aligned). */
6732	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6733	/* Enable RX. */
6734	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6735	    IWN_FH_RX_CONFIG_ENA           |
6736	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
6737	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
6738	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
6739	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
6740	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6741	iwn_nic_unlock(sc);
6742	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6743
6744	if ((error = iwn_nic_lock(sc)) != 0)
6745		return error;
6746
6747	/* Initialize TX scheduler. */
6748	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6749
6750	/* Set physical address of "keep warm" page (16-byte aligned). */
6751	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6752
6753	/* Initialize TX rings. */
6754	for (qid = 0; qid < sc->ntxqs; qid++) {
6755		struct iwn_tx_ring *txq = &sc->txq[qid];
6756
6757		/* Set physical address of TX ring (256-byte aligned). */
6758		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6759		    txq->desc_dma.paddr >> 8);
6760	}
6761	iwn_nic_unlock(sc);
6762
6763	/* Enable DMA channels. */
6764	for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6765		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6766		    IWN_FH_TX_CONFIG_DMA_ENA |
6767		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6768	}
6769
6770	/* Clear "radio off" and "commands blocked" bits. */
6771	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6772	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
6773
6774	/* Clear pending interrupts. */
6775	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6776	/* Enable interrupt coalescing. */
6777	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
6778	/* Enable interrupts. */
6779	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6780
6781	/* _Really_ make sure "radio off" bit is cleared! */
6782	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6783	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6784
6785	/* Enable shadow registers. */
6786	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
6787		IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
6788
6789	if ((error = ops->load_firmware(sc)) != 0) {
6790		device_printf(sc->sc_dev,
6791		    "%s: could not load firmware, error %d\n", __func__,
6792		    error);
6793		return error;
6794	}
6795	/* Wait at most one second for firmware alive notification. */
6796	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
6797		device_printf(sc->sc_dev,
6798		    "%s: timeout waiting for adapter to initialize, error %d\n",
6799		    __func__, error);
6800		return error;
6801	}
6802	/* Do post-firmware initialization. */
6803	return ops->post_alive(sc);
6804}
6805
6806static void
6807iwn_hw_stop(struct iwn_softc *sc)
6808{
6809	int chnl, qid, ntries;
6810
6811	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
6812
6813	/* Disable interrupts. */
6814	IWN_WRITE(sc, IWN_INT_MASK, 0);
6815	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6816	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
6817	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6818
6819	/* Make sure we no longer hold the NIC lock. */
6820	iwn_nic_unlock(sc);
6821
6822	/* Stop TX scheduler. */
6823	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6824
6825	/* Stop all DMA channels. */
6826	if (iwn_nic_lock(sc) == 0) {
6827		for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6828			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
6829			for (ntries = 0; ntries < 200; ntries++) {
6830				if (IWN_READ(sc, IWN_FH_TX_STATUS) &
6831				    IWN_FH_TX_STATUS_IDLE(chnl))
6832					break;
6833				DELAY(10);
6834			}
6835		}
6836		iwn_nic_unlock(sc);
6837	}
6838
6839	/* Stop RX ring. */
6840	iwn_reset_rx_ring(sc, &sc->rxq);
6841
6842	/* Reset all TX rings. */
6843	for (qid = 0; qid < sc->ntxqs; qid++)
6844		iwn_reset_tx_ring(sc, &sc->txq[qid]);
6845
6846	if (iwn_nic_lock(sc) == 0) {
6847		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
6848		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6849		iwn_nic_unlock(sc);
6850	}
6851	DELAY(5);
6852	/* Power OFF adapter. */
6853	iwn_apm_stop(sc);
6854}
6855
6856static void
6857iwn_radio_on(void *arg0, int pending)
6858{
6859	struct iwn_softc *sc = arg0;
6860	struct ifnet *ifp = sc->sc_ifp;
6861	struct ieee80211com *ic = ifp->if_l2com;
6862	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6863
6864	if (vap != NULL) {
6865		iwn_init(sc);
6866		ieee80211_init(vap);
6867	}
6868}
6869
6870static void
6871iwn_radio_off(void *arg0, int pending)
6872{
6873	struct iwn_softc *sc = arg0;
6874	struct ifnet *ifp = sc->sc_ifp;
6875	struct ieee80211com *ic = ifp->if_l2com;
6876	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6877
6878	iwn_stop(sc);
6879	if (vap != NULL)
6880		ieee80211_stop(vap);
6881
6882	/* Enable interrupts to get RF toggle notification. */
6883	IWN_LOCK(sc);
6884	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6885	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6886	IWN_UNLOCK(sc);
6887}
6888
6889static void
6890iwn_init_locked(struct iwn_softc *sc)
6891{
6892	struct ifnet *ifp = sc->sc_ifp;
6893	int error;
6894
6895	IWN_LOCK_ASSERT(sc);
6896
6897	if ((error = iwn_hw_prepare(sc)) != 0) {
6898		device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
6899		    __func__, error);
6900		goto fail;
6901	}
6902
6903	/* Initialize interrupt mask to default value. */
6904	sc->int_mask = IWN_INT_MASK_DEF;
6905	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6906
6907	/* Check that the radio is not disabled by hardware switch. */
6908	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
6909		device_printf(sc->sc_dev,
6910		    "radio is disabled by hardware switch\n");
6911		/* Enable interrupts to get RF toggle notifications. */
6912		IWN_WRITE(sc, IWN_INT, 0xffffffff);
6913		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6914		return;
6915	}
6916
6917	/* Read firmware images from the filesystem. */
6918	if ((error = iwn_read_firmware(sc)) != 0) {
6919		device_printf(sc->sc_dev,
6920		    "%s: could not read firmware, error %d\n", __func__,
6921		    error);
6922		goto fail;
6923	}
6924
6925	/* Initialize hardware and upload firmware. */
6926	error = iwn_hw_init(sc);
6927	firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6928	sc->fw_fp = NULL;
6929	if (error != 0) {
6930		device_printf(sc->sc_dev,
6931		    "%s: could not initialize hardware, error %d\n", __func__,
6932		    error);
6933		goto fail;
6934	}
6935
6936	/* Configure adapter now that it is ready. */
6937	if ((error = iwn_config(sc)) != 0) {
6938		device_printf(sc->sc_dev,
6939		    "%s: could not configure device, error %d\n", __func__,
6940		    error);
6941		goto fail;
6942	}
6943
6944	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6945	ifp->if_drv_flags |= IFF_DRV_RUNNING;
6946
6947	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
6948	return;
6949
6950fail:	iwn_stop_locked(sc);
6951}
6952
6953static void
6954iwn_init(void *arg)
6955{
6956	struct iwn_softc *sc = arg;
6957	struct ifnet *ifp = sc->sc_ifp;
6958	struct ieee80211com *ic = ifp->if_l2com;
6959
6960	IWN_LOCK(sc);
6961	iwn_init_locked(sc);
6962	IWN_UNLOCK(sc);
6963
6964	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6965		ieee80211_start_all(ic);
6966}
6967
6968static void
6969iwn_stop_locked(struct iwn_softc *sc)
6970{
6971	struct ifnet *ifp = sc->sc_ifp;
6972
6973	IWN_LOCK_ASSERT(sc);
6974
6975	sc->sc_tx_timer = 0;
6976	callout_stop(&sc->watchdog_to);
6977	callout_stop(&sc->calib_to);
6978	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
6979
6980	/* Power OFF hardware. */
6981	iwn_hw_stop(sc);
6982}
6983
6984static void
6985iwn_stop(struct iwn_softc *sc)
6986{
6987	IWN_LOCK(sc);
6988	iwn_stop_locked(sc);
6989	IWN_UNLOCK(sc);
6990}
6991
6992/*
6993 * Callback from net80211 to start a scan.
6994 */
6995static void
6996iwn_scan_start(struct ieee80211com *ic)
6997{
6998	struct ifnet *ifp = ic->ic_ifp;
6999	struct iwn_softc *sc = ifp->if_softc;
7000
7001	IWN_LOCK(sc);
7002	/* make the link LED blink while we're scanning */
7003	iwn_set_led(sc, IWN_LED_LINK, 20, 2);
7004	IWN_UNLOCK(sc);
7005}
7006
7007/*
7008 * Callback from net80211 to terminate a scan.
7009 */
7010static void
7011iwn_scan_end(struct ieee80211com *ic)
7012{
7013	struct ifnet *ifp = ic->ic_ifp;
7014	struct iwn_softc *sc = ifp->if_softc;
7015	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7016
7017	IWN_LOCK(sc);
7018	if (vap->iv_state == IEEE80211_S_RUN) {
7019		/* Set link LED to ON status if we are associated */
7020		iwn_set_led(sc, IWN_LED_LINK, 0, 1);
7021	}
7022	IWN_UNLOCK(sc);
7023}
7024
7025/*
7026 * Callback from net80211 to force a channel change.
7027 */
7028static void
7029iwn_set_channel(struct ieee80211com *ic)
7030{
7031	const struct ieee80211_channel *c = ic->ic_curchan;
7032	struct ifnet *ifp = ic->ic_ifp;
7033	struct iwn_softc *sc = ifp->if_softc;
7034	int error;
7035
7036	IWN_LOCK(sc);
7037	sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
7038	sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
7039	sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
7040	sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
7041
7042	/*
7043	 * Only need to set the channel in Monitor mode. AP scanning and auth
7044	 * are already taken care of by their respective firmware commands.
7045	 */
7046	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7047		error = iwn_config(sc);
7048		if (error != 0)
7049		device_printf(sc->sc_dev,
7050		    "%s: error %d settting channel\n", __func__, error);
7051	}
7052	IWN_UNLOCK(sc);
7053}
7054
7055/*
7056 * Callback from net80211 to start scanning of the current channel.
7057 */
7058static void
7059iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
7060{
7061	struct ieee80211vap *vap = ss->ss_vap;
7062	struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
7063	int error;
7064
7065	IWN_LOCK(sc);
7066	error = iwn_scan(sc);
7067	IWN_UNLOCK(sc);
7068	if (error != 0)
7069		ieee80211_cancel_scan(vap);
7070}
7071
7072/*
7073 * Callback from net80211 to handle the minimum dwell time being met.
7074 * The intent is to terminate the scan but we just let the firmware
7075 * notify us when it's finished as we have no safe way to abort it.
7076 */
7077static void
7078iwn_scan_mindwell(struct ieee80211_scan_state *ss)
7079{
7080	/* NB: don't try to abort scan; wait for firmware to finish */
7081}
7082
7083static void
7084iwn_hw_reset(void *arg0, int pending)
7085{
7086	struct iwn_softc *sc = arg0;
7087	struct ifnet *ifp = sc->sc_ifp;
7088	struct ieee80211com *ic = ifp->if_l2com;
7089
7090	iwn_stop(sc);
7091	iwn_init(sc);
7092	ieee80211_notify_radio(ic, 1);
7093}
7094