if_iwn.c revision 262007
1/*-
2 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr>
3 * Copyright (c) 2011 Intel Corporation
4 * Copyright (c) 2007-2009
5 *	Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2008
7 *	Benjamin Close <benjsc@FreeBSD.org>
8 * Copyright (c) 2008 Sam Leffler, Errno Consulting
9 *
10 * Permission to use, copy, modify, and distribute this software for any
11 * purpose with or without fee is hereby granted, provided that the above
12 * copyright notice and this permission notice appear in all copies.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
15 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
17 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 */
22
23/*
24 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
25 * adapters.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/dev/iwn/if_iwn.c 262007 2014-02-17 01:36:53Z kevlo $");
30
31#include "opt_wlan.h"
32#include "opt_iwn.h"
33
34#include <sys/param.h>
35#include <sys/sockio.h>
36#include <sys/sysctl.h>
37#include <sys/mbuf.h>
38#include <sys/kernel.h>
39#include <sys/socket.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/bus.h>
43#include <sys/rman.h>
44#include <sys/endian.h>
45#include <sys/firmware.h>
46#include <sys/limits.h>
47#include <sys/module.h>
48#include <sys/queue.h>
49#include <sys/taskqueue.h>
50
51#include <machine/bus.h>
52#include <machine/resource.h>
53#include <machine/clock.h>
54
55#include <dev/pci/pcireg.h>
56#include <dev/pci/pcivar.h>
57
58#include <net/bpf.h>
59#include <net/if.h>
60#include <net/if_arp.h>
61#include <net/ethernet.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_types.h>
65
66#include <netinet/in.h>
67#include <netinet/in_systm.h>
68#include <netinet/in_var.h>
69#include <netinet/if_ether.h>
70#include <netinet/ip.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_radiotap.h>
74#include <net80211/ieee80211_regdomain.h>
75#include <net80211/ieee80211_ratectl.h>
76
77#include <dev/iwn/if_iwnreg.h>
78#include <dev/iwn/if_iwnvar.h>
79#include <dev/iwn/if_iwn_devid.h>
80
81struct iwn_ident {
82	uint16_t	vendor;
83	uint16_t	device;
84	const char	*name;
85};
86
87static const struct iwn_ident iwn_ident_table[] = {
88	{ 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205"		},
89	{ 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000"		},
90	{ 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000"		},
91	{ 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205"		},
92	{ 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250"	},
93	{ 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250"	},
94	{ 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030"		},
95	{ 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030"		},
96	{ 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230"		},
97	{ 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230"		},
98	{ 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150"	},
99	{ 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150"	},
100	{ 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230"		},
101	{ 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230"		},
102	{ 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130"		},
103	{ 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130"		},
104	{ 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100"		},
105	{ 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100"		},
106	{ 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965"		},
107	{ 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300"		},
108	{ 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200"		},
109	{ 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965"		},
110	{ 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965"		},
111	{ 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100"			},
112	{ 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965"		},
113	{ 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300"		},
114	{ 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300"		},
115	{ 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100"			},
116	{ 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300"		},
117	{ 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200"		},
118	{ 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350"			},
119	{ 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350"			},
120	{ 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150"			},
121	{ 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150"			},
122	{ 0, 0, NULL }
123};
124
125static int	iwn_probe(device_t);
126static int	iwn_attach(device_t);
127static int	iwn4965_attach(struct iwn_softc *, uint16_t);
128static int	iwn5000_attach(struct iwn_softc *, uint16_t);
129static void	iwn_radiotap_attach(struct iwn_softc *);
130static void	iwn_sysctlattach(struct iwn_softc *);
131static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
132		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
133		    const uint8_t [IEEE80211_ADDR_LEN],
134		    const uint8_t [IEEE80211_ADDR_LEN]);
135static void	iwn_vap_delete(struct ieee80211vap *);
136static int	iwn_detach(device_t);
137static int	iwn_shutdown(device_t);
138static int	iwn_suspend(device_t);
139static int	iwn_resume(device_t);
140static int	iwn_nic_lock(struct iwn_softc *);
141static int	iwn_eeprom_lock(struct iwn_softc *);
142static int	iwn_init_otprom(struct iwn_softc *);
143static int	iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
144static void	iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
145static int	iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
146		    void **, bus_size_t, bus_size_t);
147static void	iwn_dma_contig_free(struct iwn_dma_info *);
148static int	iwn_alloc_sched(struct iwn_softc *);
149static void	iwn_free_sched(struct iwn_softc *);
150static int	iwn_alloc_kw(struct iwn_softc *);
151static void	iwn_free_kw(struct iwn_softc *);
152static int	iwn_alloc_ict(struct iwn_softc *);
153static void	iwn_free_ict(struct iwn_softc *);
154static int	iwn_alloc_fwmem(struct iwn_softc *);
155static void	iwn_free_fwmem(struct iwn_softc *);
156static int	iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
157static void	iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
158static void	iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
159static int	iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
160		    int);
161static void	iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
162static void	iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
163static void	iwn5000_ict_reset(struct iwn_softc *);
164static int	iwn_read_eeprom(struct iwn_softc *,
165		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
166static void	iwn4965_read_eeprom(struct iwn_softc *);
167#ifdef	IWN_DEBUG
168static void	iwn4965_print_power_group(struct iwn_softc *, int);
169#endif
170static void	iwn5000_read_eeprom(struct iwn_softc *);
171static uint32_t	iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
172static void	iwn_read_eeprom_band(struct iwn_softc *, int);
173static void	iwn_read_eeprom_ht40(struct iwn_softc *, int);
174static void	iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
175static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
176		    struct ieee80211_channel *);
177static int	iwn_setregdomain(struct ieee80211com *,
178		    struct ieee80211_regdomain *, int,
179		    struct ieee80211_channel[]);
180static void	iwn_read_eeprom_enhinfo(struct iwn_softc *);
181static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
182		    const uint8_t mac[IEEE80211_ADDR_LEN]);
183static void	iwn_newassoc(struct ieee80211_node *, int);
184static int	iwn_media_change(struct ifnet *);
185static int	iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
186static void	iwn_calib_timeout(void *);
187static void	iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
188		    struct iwn_rx_data *);
189static void	iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
190		    struct iwn_rx_data *);
191static void	iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
192		    struct iwn_rx_data *);
193static void	iwn5000_rx_calib_results(struct iwn_softc *,
194		    struct iwn_rx_desc *, struct iwn_rx_data *);
195static void	iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
196		    struct iwn_rx_data *);
197static void	iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
198		    struct iwn_rx_data *);
199static void	iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
200		    struct iwn_rx_data *);
201static void	iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
202		    uint8_t);
203static void	iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *);
204static void	iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
205static void	iwn_notif_intr(struct iwn_softc *);
206static void	iwn_wakeup_intr(struct iwn_softc *);
207static void	iwn_rftoggle_intr(struct iwn_softc *);
208static void	iwn_fatal_intr(struct iwn_softc *);
209static void	iwn_intr(void *);
210static void	iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
211		    uint16_t);
212static void	iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
213		    uint16_t);
214#ifdef notyet
215static void	iwn5000_reset_sched(struct iwn_softc *, int, int);
216#endif
217static int	iwn_tx_data(struct iwn_softc *, struct mbuf *,
218		    struct ieee80211_node *);
219static int	iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
220		    struct ieee80211_node *,
221		    const struct ieee80211_bpf_params *params);
222static int	iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
223		    const struct ieee80211_bpf_params *);
224static void	iwn_start(struct ifnet *);
225static void	iwn_start_locked(struct ifnet *);
226static void	iwn_watchdog(void *);
227static int	iwn_ioctl(struct ifnet *, u_long, caddr_t);
228static int	iwn_cmd(struct iwn_softc *, int, const void *, int, int);
229static int	iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
230		    int);
231static int	iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
232		    int);
233static int	iwn_set_link_quality(struct iwn_softc *,
234		    struct ieee80211_node *);
235static int	iwn_add_broadcast_node(struct iwn_softc *, int);
236static int	iwn_updateedca(struct ieee80211com *);
237static void	iwn_update_mcast(struct ifnet *);
238static void	iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
239static int	iwn_set_critical_temp(struct iwn_softc *);
240static int	iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
241static void	iwn4965_power_calibration(struct iwn_softc *, int);
242static int	iwn4965_set_txpower(struct iwn_softc *,
243		    struct ieee80211_channel *, int);
244static int	iwn5000_set_txpower(struct iwn_softc *,
245		    struct ieee80211_channel *, int);
246static int	iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
247static int	iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
248static int	iwn_get_noise(const struct iwn_rx_general_stats *);
249static int	iwn4965_get_temperature(struct iwn_softc *);
250static int	iwn5000_get_temperature(struct iwn_softc *);
251static int	iwn_init_sensitivity(struct iwn_softc *);
252static void	iwn_collect_noise(struct iwn_softc *,
253		    const struct iwn_rx_general_stats *);
254static int	iwn4965_init_gains(struct iwn_softc *);
255static int	iwn5000_init_gains(struct iwn_softc *);
256static int	iwn4965_set_gains(struct iwn_softc *);
257static int	iwn5000_set_gains(struct iwn_softc *);
258static void	iwn_tune_sensitivity(struct iwn_softc *,
259		    const struct iwn_rx_stats *);
260static int	iwn_send_sensitivity(struct iwn_softc *);
261static int	iwn_set_pslevel(struct iwn_softc *, int, int, int);
262static int	iwn_send_btcoex(struct iwn_softc *);
263static int	iwn_send_advanced_btcoex(struct iwn_softc *);
264static int	iwn5000_runtime_calib(struct iwn_softc *);
265static int	iwn_config(struct iwn_softc *);
266static uint8_t	*ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
267static int	iwn_scan(struct iwn_softc *);
268static int	iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
269static int	iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
270static int	iwn_ampdu_rx_start(struct ieee80211_node *,
271		    struct ieee80211_rx_ampdu *, int, int, int);
272static void	iwn_ampdu_rx_stop(struct ieee80211_node *,
273		    struct ieee80211_rx_ampdu *);
274static int	iwn_addba_request(struct ieee80211_node *,
275		    struct ieee80211_tx_ampdu *, int, int, int);
276static int	iwn_addba_response(struct ieee80211_node *,
277		    struct ieee80211_tx_ampdu *, int, int, int);
278static int	iwn_ampdu_tx_start(struct ieee80211com *,
279		    struct ieee80211_node *, uint8_t);
280static void	iwn_ampdu_tx_stop(struct ieee80211_node *,
281		    struct ieee80211_tx_ampdu *);
282static void	iwn4965_ampdu_tx_start(struct iwn_softc *,
283		    struct ieee80211_node *, int, uint8_t, uint16_t);
284static void	iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
285		    uint8_t, uint16_t);
286static void	iwn5000_ampdu_tx_start(struct iwn_softc *,
287		    struct ieee80211_node *, int, uint8_t, uint16_t);
288static void	iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
289		    uint8_t, uint16_t);
290static int	iwn5000_query_calibration(struct iwn_softc *);
291static int	iwn5000_send_calibration(struct iwn_softc *);
292static int	iwn5000_send_wimax_coex(struct iwn_softc *);
293static int	iwn5000_crystal_calib(struct iwn_softc *);
294static int	iwn5000_temp_offset_calib(struct iwn_softc *);
295static int	iwn4965_post_alive(struct iwn_softc *);
296static int	iwn5000_post_alive(struct iwn_softc *);
297static int	iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
298		    int);
299static int	iwn4965_load_firmware(struct iwn_softc *);
300static int	iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
301		    const uint8_t *, int);
302static int	iwn5000_load_firmware(struct iwn_softc *);
303static int	iwn_read_firmware_leg(struct iwn_softc *,
304		    struct iwn_fw_info *);
305static int	iwn_read_firmware_tlv(struct iwn_softc *,
306		    struct iwn_fw_info *, uint16_t);
307static int	iwn_read_firmware(struct iwn_softc *);
308static int	iwn_clock_wait(struct iwn_softc *);
309static int	iwn_apm_init(struct iwn_softc *);
310static void	iwn_apm_stop_master(struct iwn_softc *);
311static void	iwn_apm_stop(struct iwn_softc *);
312static int	iwn4965_nic_config(struct iwn_softc *);
313static int	iwn5000_nic_config(struct iwn_softc *);
314static int	iwn_hw_prepare(struct iwn_softc *);
315static int	iwn_hw_init(struct iwn_softc *);
316static void	iwn_hw_stop(struct iwn_softc *);
317static void	iwn_radio_on(void *, int);
318static void	iwn_radio_off(void *, int);
319static void	iwn_init_locked(struct iwn_softc *);
320static void	iwn_init(void *);
321static void	iwn_stop_locked(struct iwn_softc *);
322static void	iwn_stop(struct iwn_softc *);
323static void	iwn_scan_start(struct ieee80211com *);
324static void	iwn_scan_end(struct ieee80211com *);
325static void	iwn_set_channel(struct ieee80211com *);
326static void	iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
327static void	iwn_scan_mindwell(struct ieee80211_scan_state *);
328static void	iwn_hw_reset(void *, int);
329#ifdef	IWN_DEBUG
330static char	*iwn_get_csr_string(int);
331static void	iwn_debug_register(struct iwn_softc *);
332#endif
333
334#ifdef	IWN_DEBUG
335enum {
336	IWN_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
337	IWN_DEBUG_RECV		= 0x00000002,	/* basic recv operation */
338	IWN_DEBUG_STATE		= 0x00000004,	/* 802.11 state transitions */
339	IWN_DEBUG_TXPOW		= 0x00000008,	/* tx power processing */
340	IWN_DEBUG_RESET		= 0x00000010,	/* reset processing */
341	IWN_DEBUG_OPS		= 0x00000020,	/* iwn_ops processing */
342	IWN_DEBUG_BEACON 	= 0x00000040,	/* beacon handling */
343	IWN_DEBUG_WATCHDOG 	= 0x00000080,	/* watchdog timeout */
344	IWN_DEBUG_INTR		= 0x00000100,	/* ISR */
345	IWN_DEBUG_CALIBRATE	= 0x00000200,	/* periodic calibration */
346	IWN_DEBUG_NODE		= 0x00000400,	/* node management */
347	IWN_DEBUG_LED		= 0x00000800,	/* led management */
348	IWN_DEBUG_CMD		= 0x00001000,	/* cmd submission */
349	IWN_DEBUG_TXRATE	= 0x00002000,	/* TX rate debugging */
350	IWN_DEBUG_PWRSAVE	= 0x00004000,	/* Power save operations */
351	IWN_DEBUG_REGISTER	= 0x20000000,	/* print chipset register */
352	IWN_DEBUG_TRACE		= 0x40000000,	/* Print begin and start driver function */
353	IWN_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
354	IWN_DEBUG_ANY		= 0xffffffff
355};
356
357#define DPRINTF(sc, m, fmt, ...) do {			\
358	if (sc->sc_debug & (m))				\
359		printf(fmt, __VA_ARGS__);		\
360} while (0)
361
362static const char *
363iwn_intr_str(uint8_t cmd)
364{
365	switch (cmd) {
366	/* Notifications */
367	case IWN_UC_READY:		return "UC_READY";
368	case IWN_ADD_NODE_DONE:		return "ADD_NODE_DONE";
369	case IWN_TX_DONE:		return "TX_DONE";
370	case IWN_START_SCAN:		return "START_SCAN";
371	case IWN_STOP_SCAN:		return "STOP_SCAN";
372	case IWN_RX_STATISTICS:		return "RX_STATS";
373	case IWN_BEACON_STATISTICS:	return "BEACON_STATS";
374	case IWN_STATE_CHANGED:		return "STATE_CHANGED";
375	case IWN_BEACON_MISSED:		return "BEACON_MISSED";
376	case IWN_RX_PHY:		return "RX_PHY";
377	case IWN_MPDU_RX_DONE:		return "MPDU_RX_DONE";
378	case IWN_RX_DONE:		return "RX_DONE";
379
380	/* Command Notifications */
381	case IWN_CMD_RXON:		return "IWN_CMD_RXON";
382	case IWN_CMD_RXON_ASSOC:	return "IWN_CMD_RXON_ASSOC";
383	case IWN_CMD_EDCA_PARAMS:	return "IWN_CMD_EDCA_PARAMS";
384	case IWN_CMD_TIMING:		return "IWN_CMD_TIMING";
385	case IWN_CMD_LINK_QUALITY:	return "IWN_CMD_LINK_QUALITY";
386	case IWN_CMD_SET_LED:		return "IWN_CMD_SET_LED";
387	case IWN5000_CMD_WIMAX_COEX:	return "IWN5000_CMD_WIMAX_COEX";
388	case IWN5000_CMD_CALIB_CONFIG:	return "IWN5000_CMD_CALIB_CONFIG";
389	case IWN5000_CMD_CALIB_RESULT:	return "IWN5000_CMD_CALIB_RESULT";
390	case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
391	case IWN_CMD_SET_POWER_MODE:	return "IWN_CMD_SET_POWER_MODE";
392	case IWN_CMD_SCAN:		return "IWN_CMD_SCAN";
393	case IWN_CMD_SCAN_RESULTS:	return "IWN_CMD_SCAN_RESULTS";
394	case IWN_CMD_TXPOWER:		return "IWN_CMD_TXPOWER";
395	case IWN_CMD_TXPOWER_DBM:	return "IWN_CMD_TXPOWER_DBM";
396	case IWN5000_CMD_TX_ANT_CONFIG:	return "IWN5000_CMD_TX_ANT_CONFIG";
397	case IWN_CMD_BT_COEX:		return "IWN_CMD_BT_COEX";
398	case IWN_CMD_SET_CRITICAL_TEMP:	return "IWN_CMD_SET_CRITICAL_TEMP";
399	case IWN_CMD_SET_SENSITIVITY:	return "IWN_CMD_SET_SENSITIVITY";
400	case IWN_CMD_PHY_CALIB:		return "IWN_CMD_PHY_CALIB";
401	}
402	return "UNKNOWN INTR NOTIF/CMD";
403}
404#else
405#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
406#endif
407
408static device_method_t iwn_methods[] = {
409	/* Device interface */
410	DEVMETHOD(device_probe,		iwn_probe),
411	DEVMETHOD(device_attach,	iwn_attach),
412	DEVMETHOD(device_detach,	iwn_detach),
413	DEVMETHOD(device_shutdown,	iwn_shutdown),
414	DEVMETHOD(device_suspend,	iwn_suspend),
415	DEVMETHOD(device_resume,	iwn_resume),
416	{ 0, 0 }
417};
418
419static driver_t iwn_driver = {
420	"iwn",
421	iwn_methods,
422	sizeof(struct iwn_softc)
423};
424static devclass_t iwn_devclass;
425
426DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0);
427
428MODULE_VERSION(iwn, 1);
429
430MODULE_DEPEND(iwn, firmware, 1, 1, 1);
431MODULE_DEPEND(iwn, pci, 1, 1, 1);
432MODULE_DEPEND(iwn, wlan, 1, 1, 1);
433
434static int
435iwn_probe(device_t dev)
436{
437	const struct iwn_ident *ident;
438
439	for (ident = iwn_ident_table; ident->name != NULL; ident++) {
440		if (pci_get_vendor(dev) == ident->vendor &&
441		    pci_get_device(dev) == ident->device) {
442			device_set_desc(dev, ident->name);
443			return 0;
444		}
445	}
446	return ENXIO;
447}
448
449static int
450iwn_attach(device_t dev)
451{
452	struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
453	struct ieee80211com *ic;
454	struct ifnet *ifp;
455	uint32_t reg;
456	int i, error, result;
457	uint8_t macaddr[IEEE80211_ADDR_LEN];
458
459	sc->sc_dev = dev;
460
461#ifdef	IWN_DEBUG
462	error = resource_int_value(device_get_name(sc->sc_dev),
463	    device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
464	if (error != 0)
465		sc->sc_debug = 0;
466#else
467	sc->sc_debug = 0;
468#endif
469
470	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__);
471
472	/*
473	 * Get the offset of the PCI Express Capability Structure in PCI
474	 * Configuration Space.
475	 */
476	error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
477	if (error != 0) {
478		device_printf(dev, "PCIe capability structure not found!\n");
479		return error;
480	}
481
482	/* Clear device-specific "PCI retry timeout" register (41h). */
483	pci_write_config(dev, 0x41, 0, 1);
484
485	/* Hardware bug workaround. */
486	reg = pci_read_config(dev, PCIR_COMMAND, 2);
487	if (reg & PCIM_CMD_INTxDIS) {
488		DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n",
489		    __func__);
490		reg &= ~PCIM_CMD_INTxDIS;
491		pci_write_config(dev, PCIR_COMMAND, reg, 2);
492	}
493
494	/* Enable bus-mastering. */
495	pci_enable_busmaster(dev);
496
497	sc->mem_rid = PCIR_BAR(0);
498	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
499	    RF_ACTIVE);
500	if (sc->mem == NULL) {
501		device_printf(dev, "can't map mem space\n");
502		error = ENOMEM;
503		return error;
504	}
505	sc->sc_st = rman_get_bustag(sc->mem);
506	sc->sc_sh = rman_get_bushandle(sc->mem);
507
508	sc->irq_rid = 0;
509	if ((result = pci_msi_count(dev)) == 1 &&
510	    pci_alloc_msi(dev, &result) == 0)
511		sc->irq_rid = 1;
512	/* Install interrupt handler. */
513	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
514	    RF_ACTIVE | RF_SHAREABLE);
515	if (sc->irq == NULL) {
516		device_printf(dev, "can't map interrupt\n");
517		error = ENOMEM;
518		goto fail;
519	}
520
521	IWN_LOCK_INIT(sc);
522
523	/* Read hardware revision and attach. */
524	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT)
525	    & IWN_HW_REV_TYPE_MASK;
526	sc->subdevice_id = pci_get_subdevice(dev);
527	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
528		error = iwn4965_attach(sc, pci_get_device(dev));
529	else
530		error = iwn5000_attach(sc, pci_get_device(dev));
531	if (error != 0) {
532		device_printf(dev, "could not attach device, error %d\n",
533		    error);
534		goto fail;
535	}
536
537	if ((error = iwn_hw_prepare(sc)) != 0) {
538		device_printf(dev, "hardware not ready, error %d\n", error);
539		goto fail;
540	}
541
542	/* Allocate DMA memory for firmware transfers. */
543	if ((error = iwn_alloc_fwmem(sc)) != 0) {
544		device_printf(dev,
545		    "could not allocate memory for firmware, error %d\n",
546		    error);
547		goto fail;
548	}
549
550	/* Allocate "Keep Warm" page. */
551	if ((error = iwn_alloc_kw(sc)) != 0) {
552		device_printf(dev,
553		    "could not allocate keep warm page, error %d\n", error);
554		goto fail;
555	}
556
557	/* Allocate ICT table for 5000 Series. */
558	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
559	    (error = iwn_alloc_ict(sc)) != 0) {
560		device_printf(dev, "could not allocate ICT table, error %d\n",
561		    error);
562		goto fail;
563	}
564
565	/* Allocate TX scheduler "rings". */
566	if ((error = iwn_alloc_sched(sc)) != 0) {
567		device_printf(dev,
568		    "could not allocate TX scheduler rings, error %d\n", error);
569		goto fail;
570	}
571
572	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
573	for (i = 0; i < sc->ntxqs; i++) {
574		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
575			device_printf(dev,
576			    "could not allocate TX ring %d, error %d\n", i,
577			    error);
578			goto fail;
579		}
580	}
581
582	/* Allocate RX ring. */
583	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
584		device_printf(dev, "could not allocate RX ring, error %d\n",
585		    error);
586		goto fail;
587	}
588
589	/* Clear pending interrupts. */
590	IWN_WRITE(sc, IWN_INT, 0xffffffff);
591
592	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
593	if (ifp == NULL) {
594		device_printf(dev, "can not allocate ifnet structure\n");
595		goto fail;
596	}
597
598	ic = ifp->if_l2com;
599	ic->ic_ifp = ifp;
600	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
601	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
602
603	/* Set device capabilities. */
604	ic->ic_caps =
605		  IEEE80211_C_STA		/* station mode supported */
606		| IEEE80211_C_MONITOR		/* monitor mode supported */
607		| IEEE80211_C_BGSCAN		/* background scanning */
608		| IEEE80211_C_TXPMGT		/* tx power management */
609		| IEEE80211_C_SHSLOT		/* short slot time supported */
610		| IEEE80211_C_WPA
611		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
612#if 0
613		| IEEE80211_C_IBSS		/* ibss/adhoc mode */
614#endif
615		| IEEE80211_C_WME		/* WME */
616		| IEEE80211_C_PMGT		/* Station-side power mgmt */
617		;
618
619	/* Read MAC address, channels, etc from EEPROM. */
620	if ((error = iwn_read_eeprom(sc, macaddr)) != 0) {
621		device_printf(dev, "could not read EEPROM, error %d\n",
622		    error);
623		goto fail;
624	}
625
626	/* Count the number of available chains. */
627	sc->ntxchains =
628	    ((sc->txchainmask >> 2) & 1) +
629	    ((sc->txchainmask >> 1) & 1) +
630	    ((sc->txchainmask >> 0) & 1);
631	sc->nrxchains =
632	    ((sc->rxchainmask >> 2) & 1) +
633	    ((sc->rxchainmask >> 1) & 1) +
634	    ((sc->rxchainmask >> 0) & 1);
635	if (bootverbose) {
636		device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
637		    sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
638		    macaddr, ":");
639	}
640
641	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
642		ic->ic_rxstream = sc->nrxchains;
643		ic->ic_txstream = sc->ntxchains;
644
645		/*
646		 * The NICs we currently support cap out at 2x2 support
647		 * separate from the chains being used.
648		 *
649		 * This is a total hack to work around that until some
650		 * per-device method is implemented to return the
651		 * actual stream support.
652		 */
653		if (ic->ic_rxstream > 2)
654			ic->ic_rxstream = 2;
655		if (ic->ic_txstream > 2)
656			ic->ic_txstream = 2;
657
658		ic->ic_htcaps =
659			  IEEE80211_HTCAP_SMPS_OFF	/* SMPS mode disabled */
660			| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
661			| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width*/
662			| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
663#ifdef notyet
664			| IEEE80211_HTCAP_GREENFIELD
665#if IWN_RBUF_SIZE == 8192
666			| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
667#else
668			| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
669#endif
670#endif
671			/* s/w capabilities */
672			| IEEE80211_HTC_HT		/* HT operation */
673			| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
674#ifdef notyet
675			| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
676#endif
677			;
678	}
679
680	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
681	ifp->if_softc = sc;
682	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
683	ifp->if_init = iwn_init;
684	ifp->if_ioctl = iwn_ioctl;
685	ifp->if_start = iwn_start;
686	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
687	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
688	IFQ_SET_READY(&ifp->if_snd);
689
690	ieee80211_ifattach(ic, macaddr);
691	ic->ic_vap_create = iwn_vap_create;
692	ic->ic_vap_delete = iwn_vap_delete;
693	ic->ic_raw_xmit = iwn_raw_xmit;
694	ic->ic_node_alloc = iwn_node_alloc;
695	sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
696	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
697	sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
698	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
699	sc->sc_addba_request = ic->ic_addba_request;
700	ic->ic_addba_request = iwn_addba_request;
701	sc->sc_addba_response = ic->ic_addba_response;
702	ic->ic_addba_response = iwn_addba_response;
703	sc->sc_addba_stop = ic->ic_addba_stop;
704	ic->ic_addba_stop = iwn_ampdu_tx_stop;
705	ic->ic_newassoc = iwn_newassoc;
706	ic->ic_wme.wme_update = iwn_updateedca;
707	ic->ic_update_mcast = iwn_update_mcast;
708	ic->ic_scan_start = iwn_scan_start;
709	ic->ic_scan_end = iwn_scan_end;
710	ic->ic_set_channel = iwn_set_channel;
711	ic->ic_scan_curchan = iwn_scan_curchan;
712	ic->ic_scan_mindwell = iwn_scan_mindwell;
713	ic->ic_setregdomain = iwn_setregdomain;
714
715	iwn_radiotap_attach(sc);
716
717	callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
718	callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
719	TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
720	TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
721	TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
722
723	iwn_sysctlattach(sc);
724
725	/*
726	 * Hook our interrupt after all initialization is complete.
727	 */
728	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
729	    NULL, iwn_intr, sc, &sc->sc_ih);
730	if (error != 0) {
731		device_printf(dev, "can't establish interrupt, error %d\n",
732		    error);
733		goto fail;
734	}
735
736	if (bootverbose)
737		ieee80211_announce(ic);
738	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
739	return 0;
740fail:
741	iwn_detach(dev);
742	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
743	return error;
744}
745
746static int
747iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
748{
749	struct iwn_ops *ops = &sc->ops;
750
751	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
752	ops->load_firmware = iwn4965_load_firmware;
753	ops->read_eeprom = iwn4965_read_eeprom;
754	ops->post_alive = iwn4965_post_alive;
755	ops->nic_config = iwn4965_nic_config;
756	ops->update_sched = iwn4965_update_sched;
757	ops->get_temperature = iwn4965_get_temperature;
758	ops->get_rssi = iwn4965_get_rssi;
759	ops->set_txpower = iwn4965_set_txpower;
760	ops->init_gains = iwn4965_init_gains;
761	ops->set_gains = iwn4965_set_gains;
762	ops->add_node = iwn4965_add_node;
763	ops->tx_done = iwn4965_tx_done;
764	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
765	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
766	sc->ntxqs = IWN4965_NTXQUEUES;
767	sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
768	sc->ndmachnls = IWN4965_NDMACHNLS;
769	sc->broadcast_id = IWN4965_ID_BROADCAST;
770	sc->rxonsz = IWN4965_RXONSZ;
771	sc->schedsz = IWN4965_SCHEDSZ;
772	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
773	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
774	sc->fwsz = IWN4965_FWSZ;
775	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
776	sc->limits = &iwn4965_sensitivity_limits;
777	sc->fwname = "iwn4965fw";
778	/* Override chains masks, ROM is known to be broken. */
779	sc->txchainmask = IWN_ANT_AB;
780	sc->rxchainmask = IWN_ANT_ABC;
781
782	DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__);
783
784	return 0;
785}
786
787static int
788iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
789{
790	struct iwn_ops *ops = &sc->ops;
791
792	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
793
794	ops->load_firmware = iwn5000_load_firmware;
795	ops->read_eeprom = iwn5000_read_eeprom;
796	ops->post_alive = iwn5000_post_alive;
797	ops->nic_config = iwn5000_nic_config;
798	ops->update_sched = iwn5000_update_sched;
799	ops->get_temperature = iwn5000_get_temperature;
800	ops->get_rssi = iwn5000_get_rssi;
801	ops->set_txpower = iwn5000_set_txpower;
802	ops->init_gains = iwn5000_init_gains;
803	ops->set_gains = iwn5000_set_gains;
804	ops->add_node = iwn5000_add_node;
805	ops->tx_done = iwn5000_tx_done;
806	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
807	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
808	sc->ntxqs = IWN5000_NTXQUEUES;
809	sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
810	sc->ndmachnls = IWN5000_NDMACHNLS;
811	sc->broadcast_id = IWN5000_ID_BROADCAST;
812	sc->rxonsz = IWN5000_RXONSZ;
813	sc->schedsz = IWN5000_SCHEDSZ;
814	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
815	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
816	sc->fwsz = IWN5000_FWSZ;
817	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
818	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
819	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
820
821	switch (sc->hw_type) {
822	case IWN_HW_REV_TYPE_5100:
823		sc->limits = &iwn5000_sensitivity_limits;
824		sc->fwname = "iwn5000fw";
825		/* Override chains masks, ROM is known to be broken. */
826		sc->txchainmask = IWN_ANT_B;
827		sc->rxchainmask = IWN_ANT_AB;
828		break;
829	case IWN_HW_REV_TYPE_5150:
830		sc->limits = &iwn5150_sensitivity_limits;
831		sc->fwname = "iwn5150fw";
832		break;
833	case IWN_HW_REV_TYPE_5300:
834	case IWN_HW_REV_TYPE_5350:
835		sc->limits = &iwn5000_sensitivity_limits;
836		sc->fwname = "iwn5000fw";
837		break;
838	case IWN_HW_REV_TYPE_1000:
839		sc->limits = &iwn1000_sensitivity_limits;
840		sc->fwname = "iwn1000fw";
841		break;
842	case IWN_HW_REV_TYPE_6000:
843		sc->limits = &iwn6000_sensitivity_limits;
844		sc->fwname = "iwn6000fw";
845		if (pid == 0x422c || pid == 0x4239) {
846			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
847			/* Override chains masks, ROM is known to be broken. */
848			sc->txchainmask = IWN_ANT_BC;
849			sc->rxchainmask = IWN_ANT_BC;
850		}
851		break;
852	case IWN_HW_REV_TYPE_6050:
853		sc->limits = &iwn6000_sensitivity_limits;
854		sc->fwname = "iwn6050fw";
855		/* Override chains masks, ROM is known to be broken. */
856		sc->txchainmask = IWN_ANT_AB;
857		sc->rxchainmask = IWN_ANT_AB;
858		break;
859	case IWN_HW_REV_TYPE_6005:
860		sc->limits = &iwn6000_sensitivity_limits;
861		if (pid != 0x0082 && pid != 0x0085) {
862			sc->fwname = "iwn6000g2bfw";
863			sc->sc_flags |= IWN_FLAG_ADV_BTCOEX;
864		} else
865			sc->fwname = "iwn6000g2afw";
866		break;
867	default:
868		device_printf(sc->sc_dev, "adapter type %d not supported\n",
869		    sc->hw_type);
870		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
871		return ENOTSUP;
872	}
873	return 0;
874}
875
876/*
877 * Attach the interface to 802.11 radiotap.
878 */
879static void
880iwn_radiotap_attach(struct iwn_softc *sc)
881{
882	struct ifnet *ifp = sc->sc_ifp;
883	struct ieee80211com *ic = ifp->if_l2com;
884	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
885	ieee80211_radiotap_attach(ic,
886	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
887		IWN_TX_RADIOTAP_PRESENT,
888	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
889		IWN_RX_RADIOTAP_PRESENT);
890	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
891}
892
893static void
894iwn_sysctlattach(struct iwn_softc *sc)
895{
896#ifdef	IWN_DEBUG
897	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
898	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
899
900	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
901	    "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
902		"control debugging printfs");
903#endif
904}
905
906static struct ieee80211vap *
907iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
908    enum ieee80211_opmode opmode, int flags,
909    const uint8_t bssid[IEEE80211_ADDR_LEN],
910    const uint8_t mac[IEEE80211_ADDR_LEN])
911{
912	struct iwn_vap *ivp;
913	struct ieee80211vap *vap;
914	uint8_t mac1[IEEE80211_ADDR_LEN];
915	struct iwn_softc *sc = ic->ic_ifp->if_softc;
916
917	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
918		return NULL;
919
920	IEEE80211_ADDR_COPY(mac1, mac);
921
922	ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
923	    M_80211_VAP, M_NOWAIT | M_ZERO);
924	if (ivp == NULL)
925		return NULL;
926	vap = &ivp->iv_vap;
927	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1);
928	ivp->ctx = IWN_RXON_BSS_CTX;
929	IEEE80211_ADDR_COPY(ivp->macaddr, mac1);
930	vap->iv_bmissthreshold = 10;		/* override default */
931	/* Override with driver methods. */
932	ivp->iv_newstate = vap->iv_newstate;
933	vap->iv_newstate = iwn_newstate;
934	sc->ivap[IWN_RXON_BSS_CTX] = vap;
935
936	ieee80211_ratectl_init(vap);
937	/* Complete setup. */
938	ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
939	ic->ic_opmode = opmode;
940	return vap;
941}
942
943static void
944iwn_vap_delete(struct ieee80211vap *vap)
945{
946	struct iwn_vap *ivp = IWN_VAP(vap);
947
948	ieee80211_ratectl_deinit(vap);
949	ieee80211_vap_detach(vap);
950	free(ivp, M_80211_VAP);
951}
952
953static int
954iwn_detach(device_t dev)
955{
956	struct iwn_softc *sc = device_get_softc(dev);
957	struct ifnet *ifp = sc->sc_ifp;
958	struct ieee80211com *ic;
959	int qid;
960
961	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
962
963	if (ifp != NULL) {
964		ic = ifp->if_l2com;
965
966		ieee80211_draintask(ic, &sc->sc_reinit_task);
967		ieee80211_draintask(ic, &sc->sc_radioon_task);
968		ieee80211_draintask(ic, &sc->sc_radiooff_task);
969
970		iwn_stop(sc);
971		callout_drain(&sc->watchdog_to);
972		callout_drain(&sc->calib_to);
973		ieee80211_ifdetach(ic);
974	}
975
976	/* Uninstall interrupt handler. */
977	if (sc->irq != NULL) {
978		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
979		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
980		if (sc->irq_rid == 1)
981			pci_release_msi(dev);
982	}
983
984	/* Free DMA resources. */
985	iwn_free_rx_ring(sc, &sc->rxq);
986	for (qid = 0; qid < sc->ntxqs; qid++)
987		iwn_free_tx_ring(sc, &sc->txq[qid]);
988	iwn_free_sched(sc);
989	iwn_free_kw(sc);
990	if (sc->ict != NULL)
991		iwn_free_ict(sc);
992	iwn_free_fwmem(sc);
993
994	if (sc->mem != NULL)
995		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
996
997	if (ifp != NULL)
998		if_free(ifp);
999
1000	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__);
1001	IWN_LOCK_DESTROY(sc);
1002	return 0;
1003}
1004
1005static int
1006iwn_shutdown(device_t dev)
1007{
1008	struct iwn_softc *sc = device_get_softc(dev);
1009
1010	iwn_stop(sc);
1011	return 0;
1012}
1013
1014static int
1015iwn_suspend(device_t dev)
1016{
1017	struct iwn_softc *sc = device_get_softc(dev);
1018	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1019
1020	ieee80211_suspend_all(ic);
1021	return 0;
1022}
1023
1024static int
1025iwn_resume(device_t dev)
1026{
1027	struct iwn_softc *sc = device_get_softc(dev);
1028	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1029
1030	/* Clear device-specific "PCI retry timeout" register (41h). */
1031	pci_write_config(dev, 0x41, 0, 1);
1032
1033	ieee80211_resume_all(ic);
1034	return 0;
1035}
1036
1037static int
1038iwn_nic_lock(struct iwn_softc *sc)
1039{
1040	int ntries;
1041
1042	/* Request exclusive access to NIC. */
1043	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1044
1045	/* Spin until we actually get the lock. */
1046	for (ntries = 0; ntries < 1000; ntries++) {
1047		if ((IWN_READ(sc, IWN_GP_CNTRL) &
1048		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
1049		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
1050			return 0;
1051		DELAY(10);
1052	}
1053	return ETIMEDOUT;
1054}
1055
1056static __inline void
1057iwn_nic_unlock(struct iwn_softc *sc)
1058{
1059	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1060}
1061
1062static __inline uint32_t
1063iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1064{
1065	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1066	IWN_BARRIER_READ_WRITE(sc);
1067	return IWN_READ(sc, IWN_PRPH_RDATA);
1068}
1069
1070static __inline void
1071iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1072{
1073	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1074	IWN_BARRIER_WRITE(sc);
1075	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1076}
1077
1078static __inline void
1079iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1080{
1081	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1082}
1083
1084static __inline void
1085iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1086{
1087	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1088}
1089
1090static __inline void
1091iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1092    const uint32_t *data, int count)
1093{
1094	for (; count > 0; count--, data++, addr += 4)
1095		iwn_prph_write(sc, addr, *data);
1096}
1097
1098static __inline uint32_t
1099iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1100{
1101	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1102	IWN_BARRIER_READ_WRITE(sc);
1103	return IWN_READ(sc, IWN_MEM_RDATA);
1104}
1105
1106static __inline void
1107iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1108{
1109	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1110	IWN_BARRIER_WRITE(sc);
1111	IWN_WRITE(sc, IWN_MEM_WDATA, data);
1112}
1113
1114static __inline void
1115iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1116{
1117	uint32_t tmp;
1118
1119	tmp = iwn_mem_read(sc, addr & ~3);
1120	if (addr & 3)
1121		tmp = (tmp & 0x0000ffff) | data << 16;
1122	else
1123		tmp = (tmp & 0xffff0000) | data;
1124	iwn_mem_write(sc, addr & ~3, tmp);
1125}
1126
1127static __inline void
1128iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1129    int count)
1130{
1131	for (; count > 0; count--, addr += 4)
1132		*data++ = iwn_mem_read(sc, addr);
1133}
1134
1135static __inline void
1136iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1137    int count)
1138{
1139	for (; count > 0; count--, addr += 4)
1140		iwn_mem_write(sc, addr, val);
1141}
1142
1143static int
1144iwn_eeprom_lock(struct iwn_softc *sc)
1145{
1146	int i, ntries;
1147
1148	for (i = 0; i < 100; i++) {
1149		/* Request exclusive access to EEPROM. */
1150		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1151		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1152
1153		/* Spin until we actually get the lock. */
1154		for (ntries = 0; ntries < 100; ntries++) {
1155			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1156			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1157				return 0;
1158			DELAY(10);
1159		}
1160	}
1161	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__);
1162	return ETIMEDOUT;
1163}
1164
1165static __inline void
1166iwn_eeprom_unlock(struct iwn_softc *sc)
1167{
1168	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1169}
1170
1171/*
1172 * Initialize access by host to One Time Programmable ROM.
1173 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1174 */
1175static int
1176iwn_init_otprom(struct iwn_softc *sc)
1177{
1178	uint16_t prev, base, next;
1179	int count, error;
1180
1181	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1182
1183	/* Wait for clock stabilization before accessing prph. */
1184	if ((error = iwn_clock_wait(sc)) != 0)
1185		return error;
1186
1187	if ((error = iwn_nic_lock(sc)) != 0)
1188		return error;
1189	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1190	DELAY(5);
1191	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1192	iwn_nic_unlock(sc);
1193
1194	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1195	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1196		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1197		    IWN_RESET_LINK_PWR_MGMT_DIS);
1198	}
1199	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1200	/* Clear ECC status. */
1201	IWN_SETBITS(sc, IWN_OTP_GP,
1202	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1203
1204	/*
1205	 * Find the block before last block (contains the EEPROM image)
1206	 * for HW without OTP shadow RAM.
1207	 */
1208	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1209		/* Switch to absolute addressing mode. */
1210		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1211		base = prev = 0;
1212		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1213			error = iwn_read_prom_data(sc, base, &next, 2);
1214			if (error != 0)
1215				return error;
1216			if (next == 0)	/* End of linked-list. */
1217				break;
1218			prev = base;
1219			base = le16toh(next);
1220		}
1221		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1222			return EIO;
1223		/* Skip "next" word. */
1224		sc->prom_base = prev + 1;
1225	}
1226
1227	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1228
1229	return 0;
1230}
1231
1232static int
1233iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1234{
1235	uint8_t *out = data;
1236	uint32_t val, tmp;
1237	int ntries;
1238
1239	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1240
1241	addr += sc->prom_base;
1242	for (; count > 0; count -= 2, addr++) {
1243		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1244		for (ntries = 0; ntries < 10; ntries++) {
1245			val = IWN_READ(sc, IWN_EEPROM);
1246			if (val & IWN_EEPROM_READ_VALID)
1247				break;
1248			DELAY(5);
1249		}
1250		if (ntries == 10) {
1251			device_printf(sc->sc_dev,
1252			    "timeout reading ROM at 0x%x\n", addr);
1253			return ETIMEDOUT;
1254		}
1255		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1256			/* OTPROM, check for ECC errors. */
1257			tmp = IWN_READ(sc, IWN_OTP_GP);
1258			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1259				device_printf(sc->sc_dev,
1260				    "OTPROM ECC error at 0x%x\n", addr);
1261				return EIO;
1262			}
1263			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1264				/* Correctable ECC error, clear bit. */
1265				IWN_SETBITS(sc, IWN_OTP_GP,
1266				    IWN_OTP_GP_ECC_CORR_STTS);
1267			}
1268		}
1269		*out++ = val >> 16;
1270		if (count > 1)
1271			*out++ = val >> 24;
1272	}
1273
1274	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1275
1276	return 0;
1277}
1278
1279static void
1280iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1281{
1282	if (error != 0)
1283		return;
1284	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1285	*(bus_addr_t *)arg = segs[0].ds_addr;
1286}
1287
1288static int
1289iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1290    void **kvap, bus_size_t size, bus_size_t alignment)
1291{
1292	int error;
1293
1294	dma->tag = NULL;
1295	dma->size = size;
1296
1297	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1298	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1299	    1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
1300	if (error != 0)
1301		goto fail;
1302
1303	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1304	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1305	if (error != 0)
1306		goto fail;
1307
1308	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1309	    iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1310	if (error != 0)
1311		goto fail;
1312
1313	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1314
1315	if (kvap != NULL)
1316		*kvap = dma->vaddr;
1317
1318	return 0;
1319
1320fail:	iwn_dma_contig_free(dma);
1321	return error;
1322}
1323
1324static void
1325iwn_dma_contig_free(struct iwn_dma_info *dma)
1326{
1327	if (dma->map != NULL) {
1328		if (dma->vaddr != NULL) {
1329			bus_dmamap_sync(dma->tag, dma->map,
1330			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1331			bus_dmamap_unload(dma->tag, dma->map);
1332			bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1333			dma->vaddr = NULL;
1334		}
1335		bus_dmamap_destroy(dma->tag, dma->map);
1336		dma->map = NULL;
1337	}
1338	if (dma->tag != NULL) {
1339		bus_dma_tag_destroy(dma->tag);
1340		dma->tag = NULL;
1341	}
1342}
1343
1344static int
1345iwn_alloc_sched(struct iwn_softc *sc)
1346{
1347	/* TX scheduler rings must be aligned on a 1KB boundary. */
1348	return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1349	    sc->schedsz, 1024);
1350}
1351
1352static void
1353iwn_free_sched(struct iwn_softc *sc)
1354{
1355	iwn_dma_contig_free(&sc->sched_dma);
1356}
1357
1358static int
1359iwn_alloc_kw(struct iwn_softc *sc)
1360{
1361	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1362	return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1363}
1364
1365static void
1366iwn_free_kw(struct iwn_softc *sc)
1367{
1368	iwn_dma_contig_free(&sc->kw_dma);
1369}
1370
1371static int
1372iwn_alloc_ict(struct iwn_softc *sc)
1373{
1374	/* ICT table must be aligned on a 4KB boundary. */
1375	return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1376	    IWN_ICT_SIZE, 4096);
1377}
1378
1379static void
1380iwn_free_ict(struct iwn_softc *sc)
1381{
1382	iwn_dma_contig_free(&sc->ict_dma);
1383}
1384
1385static int
1386iwn_alloc_fwmem(struct iwn_softc *sc)
1387{
1388	/* Must be aligned on a 16-byte boundary. */
1389	return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1390}
1391
1392static void
1393iwn_free_fwmem(struct iwn_softc *sc)
1394{
1395	iwn_dma_contig_free(&sc->fw_dma);
1396}
1397
1398static int
1399iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1400{
1401	bus_size_t size;
1402	int i, error;
1403
1404	ring->cur = 0;
1405
1406	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1407
1408	/* Allocate RX descriptors (256-byte aligned). */
1409	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1410	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1411	    size, 256);
1412	if (error != 0) {
1413		device_printf(sc->sc_dev,
1414		    "%s: could not allocate RX ring DMA memory, error %d\n",
1415		    __func__, error);
1416		goto fail;
1417	}
1418
1419	/* Allocate RX status area (16-byte aligned). */
1420	error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1421	    sizeof (struct iwn_rx_status), 16);
1422	if (error != 0) {
1423		device_printf(sc->sc_dev,
1424		    "%s: could not allocate RX status DMA memory, error %d\n",
1425		    __func__, error);
1426		goto fail;
1427	}
1428
1429	/* Create RX buffer DMA tag. */
1430	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1431	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1432	    IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
1433	    &ring->data_dmat);
1434	if (error != 0) {
1435		device_printf(sc->sc_dev,
1436		    "%s: could not create RX buf DMA tag, error %d\n",
1437		    __func__, error);
1438		goto fail;
1439	}
1440
1441	/*
1442	 * Allocate and map RX buffers.
1443	 */
1444	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1445		struct iwn_rx_data *data = &ring->data[i];
1446		bus_addr_t paddr;
1447
1448		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1449		if (error != 0) {
1450			device_printf(sc->sc_dev,
1451			    "%s: could not create RX buf DMA map, error %d\n",
1452			    __func__, error);
1453			goto fail;
1454		}
1455
1456		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1457		    IWN_RBUF_SIZE);
1458		if (data->m == NULL) {
1459			device_printf(sc->sc_dev,
1460			    "%s: could not allocate RX mbuf\n", __func__);
1461			error = ENOBUFS;
1462			goto fail;
1463		}
1464
1465		error = bus_dmamap_load(ring->data_dmat, data->map,
1466		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1467		    &paddr, BUS_DMA_NOWAIT);
1468		if (error != 0 && error != EFBIG) {
1469			device_printf(sc->sc_dev,
1470			    "%s: can't not map mbuf, error %d\n", __func__,
1471			    error);
1472			goto fail;
1473		}
1474
1475		/* Set physical address of RX buffer (256-byte aligned). */
1476		ring->desc[i] = htole32(paddr >> 8);
1477	}
1478
1479	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1480	    BUS_DMASYNC_PREWRITE);
1481
1482	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
1483
1484	return 0;
1485
1486fail:	iwn_free_rx_ring(sc, ring);
1487
1488	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
1489
1490	return error;
1491}
1492
1493static void
1494iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1495{
1496	int ntries;
1497
1498	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
1499
1500	if (iwn_nic_lock(sc) == 0) {
1501		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1502		for (ntries = 0; ntries < 1000; ntries++) {
1503			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1504			    IWN_FH_RX_STATUS_IDLE)
1505				break;
1506			DELAY(10);
1507		}
1508		iwn_nic_unlock(sc);
1509	}
1510	ring->cur = 0;
1511	sc->last_rx_valid = 0;
1512}
1513
1514static void
1515iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1516{
1517	int i;
1518
1519	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
1520
1521	iwn_dma_contig_free(&ring->desc_dma);
1522	iwn_dma_contig_free(&ring->stat_dma);
1523
1524	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1525		struct iwn_rx_data *data = &ring->data[i];
1526
1527		if (data->m != NULL) {
1528			bus_dmamap_sync(ring->data_dmat, data->map,
1529			    BUS_DMASYNC_POSTREAD);
1530			bus_dmamap_unload(ring->data_dmat, data->map);
1531			m_freem(data->m);
1532			data->m = NULL;
1533		}
1534		if (data->map != NULL)
1535			bus_dmamap_destroy(ring->data_dmat, data->map);
1536	}
1537	if (ring->data_dmat != NULL) {
1538		bus_dma_tag_destroy(ring->data_dmat);
1539		ring->data_dmat = NULL;
1540	}
1541}
1542
1543static int
1544iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1545{
1546	bus_addr_t paddr;
1547	bus_size_t size;
1548	int i, error;
1549
1550	ring->qid = qid;
1551	ring->queued = 0;
1552	ring->cur = 0;
1553
1554	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1555
1556	/* Allocate TX descriptors (256-byte aligned). */
1557	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1558	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1559	    size, 256);
1560	if (error != 0) {
1561		device_printf(sc->sc_dev,
1562		    "%s: could not allocate TX ring DMA memory, error %d\n",
1563		    __func__, error);
1564		goto fail;
1565	}
1566
1567	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1568	error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1569	    size, 4);
1570	if (error != 0) {
1571		device_printf(sc->sc_dev,
1572		    "%s: could not allocate TX cmd DMA memory, error %d\n",
1573		    __func__, error);
1574		goto fail;
1575	}
1576
1577	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1578	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1579	    IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1580	    &ring->data_dmat);
1581	if (error != 0) {
1582		device_printf(sc->sc_dev,
1583		    "%s: could not create TX buf DMA tag, error %d\n",
1584		    __func__, error);
1585		goto fail;
1586	}
1587
1588	paddr = ring->cmd_dma.paddr;
1589	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1590		struct iwn_tx_data *data = &ring->data[i];
1591
1592		data->cmd_paddr = paddr;
1593		data->scratch_paddr = paddr + 12;
1594		paddr += sizeof (struct iwn_tx_cmd);
1595
1596		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1597		if (error != 0) {
1598			device_printf(sc->sc_dev,
1599			    "%s: could not create TX buf DMA map, error %d\n",
1600			    __func__, error);
1601			goto fail;
1602		}
1603	}
1604
1605	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1606
1607	return 0;
1608
1609fail:	iwn_free_tx_ring(sc, ring);
1610	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
1611	return error;
1612}
1613
1614static void
1615iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1616{
1617	int i;
1618
1619	DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__);
1620
1621	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1622		struct iwn_tx_data *data = &ring->data[i];
1623
1624		if (data->m != NULL) {
1625			bus_dmamap_sync(ring->data_dmat, data->map,
1626			    BUS_DMASYNC_POSTWRITE);
1627			bus_dmamap_unload(ring->data_dmat, data->map);
1628			m_freem(data->m);
1629			data->m = NULL;
1630		}
1631	}
1632	/* Clear TX descriptors. */
1633	memset(ring->desc, 0, ring->desc_dma.size);
1634	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1635	    BUS_DMASYNC_PREWRITE);
1636	sc->qfullmsk &= ~(1 << ring->qid);
1637	ring->queued = 0;
1638	ring->cur = 0;
1639}
1640
1641static void
1642iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1643{
1644	int i;
1645
1646	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
1647
1648	iwn_dma_contig_free(&ring->desc_dma);
1649	iwn_dma_contig_free(&ring->cmd_dma);
1650
1651	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1652		struct iwn_tx_data *data = &ring->data[i];
1653
1654		if (data->m != NULL) {
1655			bus_dmamap_sync(ring->data_dmat, data->map,
1656			    BUS_DMASYNC_POSTWRITE);
1657			bus_dmamap_unload(ring->data_dmat, data->map);
1658			m_freem(data->m);
1659		}
1660		if (data->map != NULL)
1661			bus_dmamap_destroy(ring->data_dmat, data->map);
1662	}
1663	if (ring->data_dmat != NULL) {
1664		bus_dma_tag_destroy(ring->data_dmat);
1665		ring->data_dmat = NULL;
1666	}
1667}
1668
1669static void
1670iwn5000_ict_reset(struct iwn_softc *sc)
1671{
1672	/* Disable interrupts. */
1673	IWN_WRITE(sc, IWN_INT_MASK, 0);
1674
1675	/* Reset ICT table. */
1676	memset(sc->ict, 0, IWN_ICT_SIZE);
1677	sc->ict_cur = 0;
1678
1679	/* Set physical address of ICT table (4KB aligned). */
1680	DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1681	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1682	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1683
1684	/* Enable periodic RX interrupt. */
1685	sc->int_mask |= IWN_INT_RX_PERIODIC;
1686	/* Switch to ICT interrupt mode in driver. */
1687	sc->sc_flags |= IWN_FLAG_USE_ICT;
1688
1689	/* Re-enable interrupts. */
1690	IWN_WRITE(sc, IWN_INT, 0xffffffff);
1691	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1692}
1693
1694static int
1695iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1696{
1697	struct iwn_ops *ops = &sc->ops;
1698	uint16_t val;
1699	int error;
1700
1701	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1702
1703	/* Check whether adapter has an EEPROM or an OTPROM. */
1704	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1705	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1706		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1707	DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1708	    (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1709
1710	/* Adapter has to be powered on for EEPROM access to work. */
1711	if ((error = iwn_apm_init(sc)) != 0) {
1712		device_printf(sc->sc_dev,
1713		    "%s: could not power ON adapter, error %d\n", __func__,
1714		    error);
1715		return error;
1716	}
1717
1718	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1719		device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1720		return EIO;
1721	}
1722	if ((error = iwn_eeprom_lock(sc)) != 0) {
1723		device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
1724		    __func__, error);
1725		return error;
1726	}
1727	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1728		if ((error = iwn_init_otprom(sc)) != 0) {
1729			device_printf(sc->sc_dev,
1730			    "%s: could not initialize OTPROM, error %d\n",
1731			    __func__, error);
1732			return error;
1733		}
1734	}
1735
1736	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1737	DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
1738	/* Check if HT support is bonded out. */
1739	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1740		sc->sc_flags |= IWN_FLAG_HAS_11N;
1741
1742	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1743	sc->rfcfg = le16toh(val);
1744	DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1745	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
1746	if (sc->txchainmask == 0)
1747		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1748	if (sc->rxchainmask == 0)
1749		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1750
1751	/* Read MAC address. */
1752	iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1753
1754	/* Read adapter-specific information from EEPROM. */
1755	ops->read_eeprom(sc);
1756
1757	iwn_apm_stop(sc);	/* Power OFF adapter. */
1758
1759	iwn_eeprom_unlock(sc);
1760
1761	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1762
1763	return 0;
1764}
1765
1766static void
1767iwn4965_read_eeprom(struct iwn_softc *sc)
1768{
1769	uint32_t addr;
1770	uint16_t val;
1771	int i;
1772
1773	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1774
1775	/* Read regulatory domain (4 ASCII characters). */
1776	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1777
1778	/* Read the list of authorized channels (20MHz ones only). */
1779	for (i = 0; i < 7; i++) {
1780		addr = iwn4965_regulatory_bands[i];
1781		iwn_read_eeprom_channels(sc, i, addr);
1782	}
1783
1784	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1785	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1786	sc->maxpwr2GHz = val & 0xff;
1787	sc->maxpwr5GHz = val >> 8;
1788	/* Check that EEPROM values are within valid range. */
1789	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1790		sc->maxpwr5GHz = 38;
1791	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1792		sc->maxpwr2GHz = 38;
1793	DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1794	    sc->maxpwr2GHz, sc->maxpwr5GHz);
1795
1796	/* Read samples for each TX power group. */
1797	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1798	    sizeof sc->bands);
1799
1800	/* Read voltage at which samples were taken. */
1801	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1802	sc->eeprom_voltage = (int16_t)le16toh(val);
1803	DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1804	    sc->eeprom_voltage);
1805
1806#ifdef IWN_DEBUG
1807	/* Print samples. */
1808	if (sc->sc_debug & IWN_DEBUG_ANY) {
1809		for (i = 0; i < IWN_NBANDS; i++)
1810			iwn4965_print_power_group(sc, i);
1811	}
1812#endif
1813
1814	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1815}
1816
1817#ifdef IWN_DEBUG
1818static void
1819iwn4965_print_power_group(struct iwn_softc *sc, int i)
1820{
1821	struct iwn4965_eeprom_band *band = &sc->bands[i];
1822	struct iwn4965_eeprom_chan_samples *chans = band->chans;
1823	int j, c;
1824
1825	printf("===band %d===\n", i);
1826	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1827	printf("chan1 num=%d\n", chans[0].num);
1828	for (c = 0; c < 2; c++) {
1829		for (j = 0; j < IWN_NSAMPLES; j++) {
1830			printf("chain %d, sample %d: temp=%d gain=%d "
1831			    "power=%d pa_det=%d\n", c, j,
1832			    chans[0].samples[c][j].temp,
1833			    chans[0].samples[c][j].gain,
1834			    chans[0].samples[c][j].power,
1835			    chans[0].samples[c][j].pa_det);
1836		}
1837	}
1838	printf("chan2 num=%d\n", chans[1].num);
1839	for (c = 0; c < 2; c++) {
1840		for (j = 0; j < IWN_NSAMPLES; j++) {
1841			printf("chain %d, sample %d: temp=%d gain=%d "
1842			    "power=%d pa_det=%d\n", c, j,
1843			    chans[1].samples[c][j].temp,
1844			    chans[1].samples[c][j].gain,
1845			    chans[1].samples[c][j].power,
1846			    chans[1].samples[c][j].pa_det);
1847		}
1848	}
1849}
1850#endif
1851
1852static void
1853iwn5000_read_eeprom(struct iwn_softc *sc)
1854{
1855	struct iwn5000_eeprom_calib_hdr hdr;
1856	int32_t volt;
1857	uint32_t base, addr;
1858	uint16_t val;
1859	int i;
1860
1861	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1862
1863	/* Read regulatory domain (4 ASCII characters). */
1864	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1865	base = le16toh(val);
1866	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1867	    sc->eeprom_domain, 4);
1868
1869	/* Read the list of authorized channels (20MHz ones only). */
1870	for (i = 0; i < 7; i++) {
1871		if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1872			addr = base + iwn6000_regulatory_bands[i];
1873		else
1874			addr = base + iwn5000_regulatory_bands[i];
1875		iwn_read_eeprom_channels(sc, i, addr);
1876	}
1877
1878	/* Read enhanced TX power information for 6000 Series. */
1879	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1880		iwn_read_eeprom_enhinfo(sc);
1881
1882	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1883	base = le16toh(val);
1884	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1885	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1886	    "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
1887	    hdr.version, hdr.pa_type, le16toh(hdr.volt));
1888	sc->calib_ver = hdr.version;
1889
1890	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1891		/* Compute temperature offset. */
1892		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1893		sc->eeprom_temp = le16toh(val);
1894		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1895		volt = le16toh(val);
1896		sc->temp_off = sc->eeprom_temp - (volt / -5);
1897		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1898		    sc->eeprom_temp, volt, sc->temp_off);
1899	} else {
1900		/* Read crystal calibration. */
1901		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1902		    &sc->eeprom_crystal, sizeof (uint32_t));
1903		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
1904		    le32toh(sc->eeprom_crystal));
1905	}
1906
1907	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1908
1909}
1910
1911/*
1912 * Translate EEPROM flags to net80211.
1913 */
1914static uint32_t
1915iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1916{
1917	uint32_t nflags;
1918
1919	nflags = 0;
1920	if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1921		nflags |= IEEE80211_CHAN_PASSIVE;
1922	if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1923		nflags |= IEEE80211_CHAN_NOADHOC;
1924	if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1925		nflags |= IEEE80211_CHAN_DFS;
1926		/* XXX apparently IBSS may still be marked */
1927		nflags |= IEEE80211_CHAN_NOADHOC;
1928	}
1929
1930	return nflags;
1931}
1932
1933static void
1934iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1935{
1936	struct ifnet *ifp = sc->sc_ifp;
1937	struct ieee80211com *ic = ifp->if_l2com;
1938	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1939	const struct iwn_chan_band *band = &iwn_bands[n];
1940	struct ieee80211_channel *c;
1941	uint8_t chan;
1942	int i, nflags;
1943
1944	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1945
1946	for (i = 0; i < band->nchan; i++) {
1947		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1948			DPRINTF(sc, IWN_DEBUG_RESET,
1949			    "skip chan %d flags 0x%x maxpwr %d\n",
1950			    band->chan[i], channels[i].flags,
1951			    channels[i].maxpwr);
1952			continue;
1953		}
1954		chan = band->chan[i];
1955		nflags = iwn_eeprom_channel_flags(&channels[i]);
1956
1957		c = &ic->ic_channels[ic->ic_nchans++];
1958		c->ic_ieee = chan;
1959		c->ic_maxregpower = channels[i].maxpwr;
1960		c->ic_maxpower = 2*c->ic_maxregpower;
1961
1962		if (n == 0) {	/* 2GHz band */
1963			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
1964			/* G =>'s B is supported */
1965			c->ic_flags = IEEE80211_CHAN_B | nflags;
1966			c = &ic->ic_channels[ic->ic_nchans++];
1967			c[0] = c[-1];
1968			c->ic_flags = IEEE80211_CHAN_G | nflags;
1969		} else {	/* 5GHz band */
1970			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
1971			c->ic_flags = IEEE80211_CHAN_A | nflags;
1972		}
1973
1974		/* Save maximum allowed TX power for this channel. */
1975		sc->maxpwr[chan] = channels[i].maxpwr;
1976
1977		DPRINTF(sc, IWN_DEBUG_RESET,
1978		    "add chan %d flags 0x%x maxpwr %d\n", chan,
1979		    channels[i].flags, channels[i].maxpwr);
1980
1981		if (sc->sc_flags & IWN_FLAG_HAS_11N) {
1982			/* add HT20, HT40 added separately */
1983			c = &ic->ic_channels[ic->ic_nchans++];
1984			c[0] = c[-1];
1985			c->ic_flags |= IEEE80211_CHAN_HT20;
1986		}
1987	}
1988
1989	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1990
1991}
1992
1993static void
1994iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1995{
1996	struct ifnet *ifp = sc->sc_ifp;
1997	struct ieee80211com *ic = ifp->if_l2com;
1998	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1999	const struct iwn_chan_band *band = &iwn_bands[n];
2000	struct ieee80211_channel *c, *cent, *extc;
2001	uint8_t chan;
2002	int i, nflags;
2003
2004	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__);
2005
2006	if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) {
2007		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__);
2008		return;
2009	}
2010
2011	for (i = 0; i < band->nchan; i++) {
2012		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2013			DPRINTF(sc, IWN_DEBUG_RESET,
2014			    "skip chan %d flags 0x%x maxpwr %d\n",
2015			    band->chan[i], channels[i].flags,
2016			    channels[i].maxpwr);
2017			continue;
2018		}
2019		chan = band->chan[i];
2020		nflags = iwn_eeprom_channel_flags(&channels[i]);
2021
2022		/*
2023		 * Each entry defines an HT40 channel pair; find the
2024		 * center channel, then the extension channel above.
2025		 */
2026		cent = ieee80211_find_channel_byieee(ic, chan,
2027		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2028		if (cent == NULL) {	/* XXX shouldn't happen */
2029			device_printf(sc->sc_dev,
2030			    "%s: no entry for channel %d\n", __func__, chan);
2031			continue;
2032		}
2033		extc = ieee80211_find_channel(ic, cent->ic_freq+20,
2034		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2035		if (extc == NULL) {
2036			DPRINTF(sc, IWN_DEBUG_RESET,
2037			    "%s: skip chan %d, extension channel not found\n",
2038			    __func__, chan);
2039			continue;
2040		}
2041
2042		DPRINTF(sc, IWN_DEBUG_RESET,
2043		    "add ht40 chan %d flags 0x%x maxpwr %d\n",
2044		    chan, channels[i].flags, channels[i].maxpwr);
2045
2046		c = &ic->ic_channels[ic->ic_nchans++];
2047		c[0] = cent[0];
2048		c->ic_extieee = extc->ic_ieee;
2049		c->ic_flags &= ~IEEE80211_CHAN_HT;
2050		c->ic_flags |= IEEE80211_CHAN_HT40U | nflags;
2051		c = &ic->ic_channels[ic->ic_nchans++];
2052		c[0] = extc[0];
2053		c->ic_extieee = cent->ic_ieee;
2054		c->ic_flags &= ~IEEE80211_CHAN_HT;
2055		c->ic_flags |= IEEE80211_CHAN_HT40D | nflags;
2056	}
2057
2058	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2059
2060}
2061
2062static void
2063iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
2064{
2065	struct ifnet *ifp = sc->sc_ifp;
2066	struct ieee80211com *ic = ifp->if_l2com;
2067
2068	iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
2069	    iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
2070
2071	if (n < 5)
2072		iwn_read_eeprom_band(sc, n);
2073	else
2074		iwn_read_eeprom_ht40(sc, n);
2075	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
2076}
2077
2078static struct iwn_eeprom_chan *
2079iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
2080{
2081	int band, chan, i, j;
2082
2083	if (IEEE80211_IS_CHAN_HT40(c)) {
2084		band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
2085		if (IEEE80211_IS_CHAN_HT40D(c))
2086			chan = c->ic_extieee;
2087		else
2088			chan = c->ic_ieee;
2089		for (i = 0; i < iwn_bands[band].nchan; i++) {
2090			if (iwn_bands[band].chan[i] == chan)
2091				return &sc->eeprom_channels[band][i];
2092		}
2093	} else {
2094		for (j = 0; j < 5; j++) {
2095			for (i = 0; i < iwn_bands[j].nchan; i++) {
2096				if (iwn_bands[j].chan[i] == c->ic_ieee)
2097					return &sc->eeprom_channels[j][i];
2098			}
2099		}
2100	}
2101	return NULL;
2102}
2103
2104/*
2105 * Enforce flags read from EEPROM.
2106 */
2107static int
2108iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
2109    int nchan, struct ieee80211_channel chans[])
2110{
2111	struct iwn_softc *sc = ic->ic_ifp->if_softc;
2112	int i;
2113
2114	for (i = 0; i < nchan; i++) {
2115		struct ieee80211_channel *c = &chans[i];
2116		struct iwn_eeprom_chan *channel;
2117
2118		channel = iwn_find_eeprom_channel(sc, c);
2119		if (channel == NULL) {
2120			if_printf(ic->ic_ifp,
2121			    "%s: invalid channel %u freq %u/0x%x\n",
2122			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
2123			return EINVAL;
2124		}
2125		c->ic_flags |= iwn_eeprom_channel_flags(channel);
2126	}
2127
2128	return 0;
2129}
2130
2131static void
2132iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2133{
2134	struct iwn_eeprom_enhinfo enhinfo[35];
2135	struct ifnet *ifp = sc->sc_ifp;
2136	struct ieee80211com *ic = ifp->if_l2com;
2137	struct ieee80211_channel *c;
2138	uint16_t val, base;
2139	int8_t maxpwr;
2140	uint8_t flags;
2141	int i, j;
2142
2143	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2144
2145	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2146	base = le16toh(val);
2147	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2148	    enhinfo, sizeof enhinfo);
2149
2150	for (i = 0; i < nitems(enhinfo); i++) {
2151		flags = enhinfo[i].flags;
2152		if (!(flags & IWN_ENHINFO_VALID))
2153			continue;	/* Skip invalid entries. */
2154
2155		maxpwr = 0;
2156		if (sc->txchainmask & IWN_ANT_A)
2157			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2158		if (sc->txchainmask & IWN_ANT_B)
2159			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2160		if (sc->txchainmask & IWN_ANT_C)
2161			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2162		if (sc->ntxchains == 2)
2163			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2164		else if (sc->ntxchains == 3)
2165			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2166
2167		for (j = 0; j < ic->ic_nchans; j++) {
2168			c = &ic->ic_channels[j];
2169			if ((flags & IWN_ENHINFO_5GHZ)) {
2170				if (!IEEE80211_IS_CHAN_A(c))
2171					continue;
2172			} else if ((flags & IWN_ENHINFO_OFDM)) {
2173				if (!IEEE80211_IS_CHAN_G(c))
2174					continue;
2175			} else if (!IEEE80211_IS_CHAN_B(c))
2176				continue;
2177			if ((flags & IWN_ENHINFO_HT40)) {
2178				if (!IEEE80211_IS_CHAN_HT40(c))
2179					continue;
2180			} else {
2181				if (IEEE80211_IS_CHAN_HT40(c))
2182					continue;
2183			}
2184			if (enhinfo[i].chan != 0 &&
2185			    enhinfo[i].chan != c->ic_ieee)
2186				continue;
2187
2188			DPRINTF(sc, IWN_DEBUG_RESET,
2189			    "channel %d(%x), maxpwr %d\n", c->ic_ieee,
2190			    c->ic_flags, maxpwr / 2);
2191			c->ic_maxregpower = maxpwr / 2;
2192			c->ic_maxpower = maxpwr;
2193		}
2194	}
2195
2196	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2197
2198}
2199
2200static struct ieee80211_node *
2201iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2202{
2203	return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2204}
2205
2206static __inline int
2207rate2plcp(int rate)
2208{
2209	switch (rate & 0xff) {
2210	case 12:	return 0xd;
2211	case 18:	return 0xf;
2212	case 24:	return 0x5;
2213	case 36:	return 0x7;
2214	case 48:	return 0x9;
2215	case 72:	return 0xb;
2216	case 96:	return 0x1;
2217	case 108:	return 0x3;
2218	case 2:		return 10;
2219	case 4:		return 20;
2220	case 11:	return 55;
2221	case 22:	return 110;
2222	}
2223	return 0;
2224}
2225
2226/*
2227 * Calculate the required PLCP value from the given rate,
2228 * to the given node.
2229 *
2230 * This will take the node configuration (eg 11n, rate table
2231 * setup, etc) into consideration.
2232 */
2233static uint32_t
2234iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni,
2235    uint8_t rate)
2236{
2237#define	RV(v)	((v) & IEEE80211_RATE_VAL)
2238	struct ieee80211com *ic = ni->ni_ic;
2239	uint8_t txant1, txant2;
2240	uint32_t plcp = 0;
2241	int ridx;
2242
2243	/* Use the first valid TX antenna. */
2244	txant1 = IWN_LSB(sc->txchainmask);
2245	txant2 = IWN_LSB(sc->txchainmask & ~txant1);
2246
2247	/*
2248	 * If it's an MCS rate, let's set the plcp correctly
2249	 * and set the relevant flags based on the node config.
2250	 */
2251	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
2252		/*
2253		 * Set the initial PLCP value to be between 0->31 for
2254		 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!"
2255		 * flag.
2256		 */
2257		plcp = RV(rate) | IWN_RFLAG_MCS;
2258
2259		/*
2260		 * XXX the following should only occur if both
2261		 * the local configuration _and_ the remote node
2262		 * advertise these capabilities.  Thus this code
2263		 * may need fixing!
2264		 */
2265
2266		/*
2267		 * Set the channel width and guard interval.
2268		 */
2269		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2270			plcp |= IWN_RFLAG_HT40;
2271			if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2272				plcp |= IWN_RFLAG_SGI;
2273		} else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
2274			plcp |= IWN_RFLAG_SGI;
2275		}
2276
2277		/*
2278		 * If it's a two stream rate, enable TX on both
2279		 * antennas.
2280		 *
2281		 * XXX three stream rates?
2282		 */
2283		if (rate > 0x87)
2284			plcp |= IWN_RFLAG_ANT(txant1 | txant2);
2285		else
2286			plcp |= IWN_RFLAG_ANT(txant1);
2287	} else {
2288		/*
2289		 * Set the initial PLCP - fine for both
2290		 * OFDM and CCK rates.
2291		 */
2292		plcp = rate2plcp(rate);
2293
2294		/* Set CCK flag if it's CCK */
2295
2296		/* XXX It would be nice to have a method
2297		 * to map the ridx -> phy table entry
2298		 * so we could just query that, rather than
2299		 * this hack to check against IWN_RIDX_OFDM6.
2300		 */
2301		ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
2302		    rate & IEEE80211_RATE_VAL);
2303		if (ridx < IWN_RIDX_OFDM6 &&
2304		    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2305			plcp |= IWN_RFLAG_CCK;
2306
2307		/* Set antenna configuration */
2308		plcp |= IWN_RFLAG_ANT(txant1);
2309	}
2310
2311	DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n",
2312	    __func__,
2313	    rate,
2314	    plcp);
2315
2316	return (htole32(plcp));
2317#undef	RV
2318}
2319
2320static void
2321iwn_newassoc(struct ieee80211_node *ni, int isnew)
2322{
2323	/* Doesn't do anything at the moment */
2324}
2325
2326static int
2327iwn_media_change(struct ifnet *ifp)
2328{
2329	int error;
2330
2331	error = ieee80211_media_change(ifp);
2332	/* NB: only the fixed rate can change and that doesn't need a reset */
2333	return (error == ENETRESET ? 0 : error);
2334}
2335
2336static int
2337iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2338{
2339	struct iwn_vap *ivp = IWN_VAP(vap);
2340	struct ieee80211com *ic = vap->iv_ic;
2341	struct iwn_softc *sc = ic->ic_ifp->if_softc;
2342	int error = 0;
2343
2344	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2345
2346	DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2347	    ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2348
2349	IEEE80211_UNLOCK(ic);
2350	IWN_LOCK(sc);
2351	callout_stop(&sc->calib_to);
2352
2353	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
2354
2355	switch (nstate) {
2356	case IEEE80211_S_ASSOC:
2357		if (vap->iv_state != IEEE80211_S_RUN)
2358			break;
2359		/* FALLTHROUGH */
2360	case IEEE80211_S_AUTH:
2361		if (vap->iv_state == IEEE80211_S_AUTH)
2362			break;
2363
2364		/*
2365		 * !AUTH -> AUTH transition requires state reset to handle
2366		 * reassociations correctly.
2367		 */
2368		sc->rxon->associd = 0;
2369		sc->rxon->filter &= ~htole32(IWN_FILTER_BSS);
2370		sc->calib.state = IWN_CALIB_STATE_INIT;
2371
2372		if ((error = iwn_auth(sc, vap)) != 0) {
2373			device_printf(sc->sc_dev,
2374			    "%s: could not move to auth state\n", __func__);
2375		}
2376		break;
2377
2378	case IEEE80211_S_RUN:
2379		/*
2380		 * RUN -> RUN transition; Just restart the timers.
2381		 */
2382		if (vap->iv_state == IEEE80211_S_RUN) {
2383			sc->calib_cnt = 0;
2384			break;
2385		}
2386
2387		/*
2388		 * !RUN -> RUN requires setting the association id
2389		 * which is done with a firmware cmd.  We also defer
2390		 * starting the timers until that work is done.
2391		 */
2392		if ((error = iwn_run(sc, vap)) != 0) {
2393			device_printf(sc->sc_dev,
2394			    "%s: could not move to run state\n", __func__);
2395		}
2396		break;
2397
2398	case IEEE80211_S_INIT:
2399		sc->calib.state = IWN_CALIB_STATE_INIT;
2400		break;
2401
2402	default:
2403		break;
2404	}
2405	IWN_UNLOCK(sc);
2406	IEEE80211_LOCK(ic);
2407	if (error != 0){
2408		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2409		return error;
2410	}
2411
2412	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2413
2414	return ivp->iv_newstate(vap, nstate, arg);
2415}
2416
2417static void
2418iwn_calib_timeout(void *arg)
2419{
2420	struct iwn_softc *sc = arg;
2421
2422	IWN_LOCK_ASSERT(sc);
2423
2424	/* Force automatic TX power calibration every 60 secs. */
2425	if (++sc->calib_cnt >= 120) {
2426		uint32_t flags = 0;
2427
2428		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2429		    "sending request for statistics");
2430		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2431		    sizeof flags, 1);
2432		sc->calib_cnt = 0;
2433	}
2434	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2435	    sc);
2436}
2437
2438/*
2439 * Process an RX_PHY firmware notification.  This is usually immediately
2440 * followed by an MPDU_RX_DONE notification.
2441 */
2442static void
2443iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2444    struct iwn_rx_data *data)
2445{
2446	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2447
2448	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2449	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2450
2451	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
2452	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2453	sc->last_rx_valid = 1;
2454}
2455
2456/*
2457 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2458 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2459 */
2460static void
2461iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2462    struct iwn_rx_data *data)
2463{
2464	struct iwn_ops *ops = &sc->ops;
2465	struct ifnet *ifp = sc->sc_ifp;
2466	struct ieee80211com *ic = ifp->if_l2com;
2467	struct iwn_rx_ring *ring = &sc->rxq;
2468	struct ieee80211_frame *wh;
2469	struct ieee80211_node *ni;
2470	struct mbuf *m, *m1;
2471	struct iwn_rx_stat *stat;
2472	caddr_t head;
2473	bus_addr_t paddr;
2474	uint32_t flags;
2475	int error, len, rssi, nf;
2476
2477	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2478
2479	if (desc->type == IWN_MPDU_RX_DONE) {
2480		/* Check for prior RX_PHY notification. */
2481		if (!sc->last_rx_valid) {
2482			DPRINTF(sc, IWN_DEBUG_ANY,
2483			    "%s: missing RX_PHY\n", __func__);
2484			return;
2485		}
2486		stat = &sc->last_rx_stat;
2487	} else
2488		stat = (struct iwn_rx_stat *)(desc + 1);
2489
2490	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2491
2492	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2493		device_printf(sc->sc_dev,
2494		    "%s: invalid RX statistic header, len %d\n", __func__,
2495		    stat->cfg_phy_len);
2496		return;
2497	}
2498	if (desc->type == IWN_MPDU_RX_DONE) {
2499		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2500		head = (caddr_t)(mpdu + 1);
2501		len = le16toh(mpdu->len);
2502	} else {
2503		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2504		len = le16toh(stat->len);
2505	}
2506
2507	flags = le32toh(*(uint32_t *)(head + len));
2508
2509	/* Discard frames with a bad FCS early. */
2510	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2511		DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
2512		    __func__, flags);
2513		ifp->if_ierrors++;
2514		return;
2515	}
2516	/* Discard frames that are too short. */
2517	if (len < sizeof (*wh)) {
2518		DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2519		    __func__, len);
2520		ifp->if_ierrors++;
2521		return;
2522	}
2523
2524	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
2525	if (m1 == NULL) {
2526		DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2527		    __func__);
2528		ifp->if_ierrors++;
2529		return;
2530	}
2531	bus_dmamap_unload(ring->data_dmat, data->map);
2532
2533	error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
2534	    IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2535	if (error != 0 && error != EFBIG) {
2536		device_printf(sc->sc_dev,
2537		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2538		m_freem(m1);
2539
2540		/* Try to reload the old mbuf. */
2541		error = bus_dmamap_load(ring->data_dmat, data->map,
2542		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
2543		    &paddr, BUS_DMA_NOWAIT);
2544		if (error != 0 && error != EFBIG) {
2545			panic("%s: could not load old RX mbuf", __func__);
2546		}
2547		/* Physical address may have changed. */
2548		ring->desc[ring->cur] = htole32(paddr >> 8);
2549		bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
2550		    BUS_DMASYNC_PREWRITE);
2551		ifp->if_ierrors++;
2552		return;
2553	}
2554
2555	m = data->m;
2556	data->m = m1;
2557	/* Update RX descriptor. */
2558	ring->desc[ring->cur] = htole32(paddr >> 8);
2559	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2560	    BUS_DMASYNC_PREWRITE);
2561
2562	/* Finalize mbuf. */
2563	m->m_pkthdr.rcvif = ifp;
2564	m->m_data = head;
2565	m->m_pkthdr.len = m->m_len = len;
2566
2567	/* Grab a reference to the source node. */
2568	wh = mtod(m, struct ieee80211_frame *);
2569	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2570	nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2571	    (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2572
2573	rssi = ops->get_rssi(sc, stat);
2574
2575	if (ieee80211_radiotap_active(ic)) {
2576		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2577
2578		tap->wr_flags = 0;
2579		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2580			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2581		tap->wr_dbm_antsignal = (int8_t)rssi;
2582		tap->wr_dbm_antnoise = (int8_t)nf;
2583		tap->wr_tsft = stat->tstamp;
2584		switch (stat->rate) {
2585		/* CCK rates. */
2586		case  10: tap->wr_rate =   2; break;
2587		case  20: tap->wr_rate =   4; break;
2588		case  55: tap->wr_rate =  11; break;
2589		case 110: tap->wr_rate =  22; break;
2590		/* OFDM rates. */
2591		case 0xd: tap->wr_rate =  12; break;
2592		case 0xf: tap->wr_rate =  18; break;
2593		case 0x5: tap->wr_rate =  24; break;
2594		case 0x7: tap->wr_rate =  36; break;
2595		case 0x9: tap->wr_rate =  48; break;
2596		case 0xb: tap->wr_rate =  72; break;
2597		case 0x1: tap->wr_rate =  96; break;
2598		case 0x3: tap->wr_rate = 108; break;
2599		/* Unknown rate: should not happen. */
2600		default:  tap->wr_rate =   0;
2601		}
2602	}
2603
2604	IWN_UNLOCK(sc);
2605
2606	/* Send the frame to the 802.11 layer. */
2607	if (ni != NULL) {
2608		if (ni->ni_flags & IEEE80211_NODE_HT)
2609			m->m_flags |= M_AMPDU;
2610		(void)ieee80211_input(ni, m, rssi - nf, nf);
2611		/* Node is no longer needed. */
2612		ieee80211_free_node(ni);
2613	} else
2614		(void)ieee80211_input_all(ic, m, rssi - nf, nf);
2615
2616	IWN_LOCK(sc);
2617
2618	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2619
2620}
2621
2622/* Process an incoming Compressed BlockAck. */
2623static void
2624iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2625    struct iwn_rx_data *data)
2626{
2627	struct iwn_ops *ops = &sc->ops;
2628	struct ifnet *ifp = sc->sc_ifp;
2629	struct iwn_node *wn;
2630	struct ieee80211_node *ni;
2631	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2632	struct iwn_tx_ring *txq;
2633	struct iwn_tx_data *txdata;
2634	struct ieee80211_tx_ampdu *tap;
2635	struct mbuf *m;
2636	uint64_t bitmap;
2637	uint16_t ssn;
2638	uint8_t tid;
2639	int ackfailcnt = 0, i, lastidx, qid, *res, shift;
2640
2641	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2642
2643	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2644
2645	qid = le16toh(ba->qid);
2646	txq = &sc->txq[ba->qid];
2647	tap = sc->qid2tap[ba->qid];
2648	tid = tap->txa_tid;
2649	wn = (void *)tap->txa_ni;
2650
2651	res = NULL;
2652	ssn = 0;
2653	if (!IEEE80211_AMPDU_RUNNING(tap)) {
2654		res = tap->txa_private;
2655		ssn = tap->txa_start & 0xfff;
2656	}
2657
2658	for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
2659		txdata = &txq->data[txq->read];
2660
2661		/* Unmap and free mbuf. */
2662		bus_dmamap_sync(txq->data_dmat, txdata->map,
2663		    BUS_DMASYNC_POSTWRITE);
2664		bus_dmamap_unload(txq->data_dmat, txdata->map);
2665		m = txdata->m, txdata->m = NULL;
2666		ni = txdata->ni, txdata->ni = NULL;
2667
2668		KASSERT(ni != NULL, ("no node"));
2669		KASSERT(m != NULL, ("no mbuf"));
2670
2671		ieee80211_tx_complete(ni, m, 1);
2672
2673		txq->queued--;
2674		txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
2675	}
2676
2677	if (txq->queued == 0 && res != NULL) {
2678		iwn_nic_lock(sc);
2679		ops->ampdu_tx_stop(sc, qid, tid, ssn);
2680		iwn_nic_unlock(sc);
2681		sc->qid2tap[qid] = NULL;
2682		free(res, M_DEVBUF);
2683		return;
2684	}
2685
2686	if (wn->agg[tid].bitmap == 0)
2687		return;
2688
2689	shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
2690	if (shift < 0)
2691		shift += 0x100;
2692
2693	if (wn->agg[tid].nframes > (64 - shift))
2694		return;
2695
2696	ni = tap->txa_ni;
2697	bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
2698	for (i = 0; bitmap; i++) {
2699		if ((bitmap & 1) == 0) {
2700			ifp->if_oerrors++;
2701			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2702			    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2703		} else {
2704			ifp->if_opackets++;
2705			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2706			    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2707		}
2708		bitmap >>= 1;
2709	}
2710
2711	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2712
2713}
2714
2715/*
2716 * Process a CALIBRATION_RESULT notification sent by the initialization
2717 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2718 */
2719static void
2720iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2721    struct iwn_rx_data *data)
2722{
2723	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2724	int len, idx = -1;
2725
2726	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2727
2728	/* Runtime firmware should not send such a notification. */
2729	if (sc->sc_flags & IWN_FLAG_CALIB_DONE){
2730		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n",
2731	    __func__);
2732		return;
2733	}
2734	len = (le32toh(desc->len) & 0x3fff) - 4;
2735	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2736
2737	switch (calib->code) {
2738	case IWN5000_PHY_CALIB_DC:
2739		if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0 &&
2740		    (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2741		     sc->hw_type >= IWN_HW_REV_TYPE_6000) &&
2742		     sc->hw_type != IWN_HW_REV_TYPE_6050)
2743			idx = 0;
2744		break;
2745	case IWN5000_PHY_CALIB_LO:
2746		idx = 1;
2747		break;
2748	case IWN5000_PHY_CALIB_TX_IQ:
2749		idx = 2;
2750		break;
2751	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2752		if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2753		    sc->hw_type != IWN_HW_REV_TYPE_5150)
2754			idx = 3;
2755		break;
2756	case IWN5000_PHY_CALIB_BASE_BAND:
2757		idx = 4;
2758		break;
2759	}
2760	if (idx == -1)	/* Ignore other results. */
2761		return;
2762
2763	/* Save calibration result. */
2764	if (sc->calibcmd[idx].buf != NULL)
2765		free(sc->calibcmd[idx].buf, M_DEVBUF);
2766	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2767	if (sc->calibcmd[idx].buf == NULL) {
2768		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2769		    "not enough memory for calibration result %d\n",
2770		    calib->code);
2771		return;
2772	}
2773	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2774	    "saving calibration result code=%d len=%d\n", calib->code, len);
2775	sc->calibcmd[idx].len = len;
2776	memcpy(sc->calibcmd[idx].buf, calib, len);
2777}
2778
2779/*
2780 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2781 * The latter is sent by the firmware after each received beacon.
2782 */
2783static void
2784iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2785    struct iwn_rx_data *data)
2786{
2787	struct iwn_ops *ops = &sc->ops;
2788	struct ifnet *ifp = sc->sc_ifp;
2789	struct ieee80211com *ic = ifp->if_l2com;
2790	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2791	struct iwn_calib_state *calib = &sc->calib;
2792	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2793	int temp;
2794
2795	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2796
2797	/* Ignore statistics received during a scan. */
2798	if (vap->iv_state != IEEE80211_S_RUN ||
2799	    (ic->ic_flags & IEEE80211_F_SCAN)){
2800		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n",
2801	    __func__);
2802		return;
2803	}
2804
2805	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2806
2807	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n",
2808	    __func__, desc->type);
2809	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
2810
2811	/* Test if temperature has changed. */
2812	if (stats->general.temp != sc->rawtemp) {
2813		/* Convert "raw" temperature to degC. */
2814		sc->rawtemp = stats->general.temp;
2815		temp = ops->get_temperature(sc);
2816		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2817		    __func__, temp);
2818
2819		/* Update TX power if need be (4965AGN only). */
2820		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2821			iwn4965_power_calibration(sc, temp);
2822	}
2823
2824	if (desc->type != IWN_BEACON_STATISTICS)
2825		return;	/* Reply to a statistics request. */
2826
2827	sc->noise = iwn_get_noise(&stats->rx.general);
2828	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2829
2830	/* Test that RSSI and noise are present in stats report. */
2831	if (le32toh(stats->rx.general.flags) != 1) {
2832		DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2833		    "received statistics without RSSI");
2834		return;
2835	}
2836
2837	if (calib->state == IWN_CALIB_STATE_ASSOC)
2838		iwn_collect_noise(sc, &stats->rx.general);
2839	else if (calib->state == IWN_CALIB_STATE_RUN)
2840		iwn_tune_sensitivity(sc, &stats->rx);
2841
2842	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2843}
2844
2845/*
2846 * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2847 * and 5000 adapters have different incompatible TX status formats.
2848 */
2849static void
2850iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2851    struct iwn_rx_data *data)
2852{
2853	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2854	struct iwn_tx_ring *ring;
2855	int qid;
2856
2857	qid = desc->qid & 0xf;
2858	ring = &sc->txq[qid];
2859
2860	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2861	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2862	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2863	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2864	    le32toh(stat->status));
2865
2866	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2867	if (qid >= sc->firstaggqueue) {
2868		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2869		    &stat->status);
2870	} else {
2871		iwn_tx_done(sc, desc, stat->ackfailcnt,
2872		    le32toh(stat->status) & 0xff);
2873	}
2874}
2875
2876static void
2877iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2878    struct iwn_rx_data *data)
2879{
2880	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2881	struct iwn_tx_ring *ring;
2882	int qid;
2883
2884	qid = desc->qid & 0xf;
2885	ring = &sc->txq[qid];
2886
2887	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2888	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2889	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2890	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2891	    le32toh(stat->status));
2892
2893#ifdef notyet
2894	/* Reset TX scheduler slot. */
2895	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2896#endif
2897
2898	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2899	if (qid >= sc->firstaggqueue) {
2900		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2901		    &stat->status);
2902	} else {
2903		iwn_tx_done(sc, desc, stat->ackfailcnt,
2904		    le16toh(stat->status) & 0xff);
2905	}
2906}
2907
2908/*
2909 * Adapter-independent backend for TX_DONE firmware notifications.
2910 */
2911static void
2912iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2913    uint8_t status)
2914{
2915	struct ifnet *ifp = sc->sc_ifp;
2916	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2917	struct iwn_tx_data *data = &ring->data[desc->idx];
2918	struct mbuf *m;
2919	struct ieee80211_node *ni;
2920	struct ieee80211vap *vap;
2921
2922	KASSERT(data->ni != NULL, ("no node"));
2923
2924	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2925
2926	/* Unmap and free mbuf. */
2927	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2928	bus_dmamap_unload(ring->data_dmat, data->map);
2929	m = data->m, data->m = NULL;
2930	ni = data->ni, data->ni = NULL;
2931	vap = ni->ni_vap;
2932
2933	/*
2934	 * Update rate control statistics for the node.
2935	 */
2936	if (status & IWN_TX_FAIL) {
2937		ifp->if_oerrors++;
2938		ieee80211_ratectl_tx_complete(vap, ni,
2939		    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2940	} else {
2941		ifp->if_opackets++;
2942		ieee80211_ratectl_tx_complete(vap, ni,
2943		    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2944	}
2945
2946	/*
2947	 * Channels marked for "radar" require traffic to be received
2948	 * to unlock before we can transmit.  Until traffic is seen
2949	 * any attempt to transmit is returned immediately with status
2950	 * set to IWN_TX_FAIL_TX_LOCKED.  Unfortunately this can easily
2951	 * happen on first authenticate after scanning.  To workaround
2952	 * this we ignore a failure of this sort in AUTH state so the
2953	 * 802.11 layer will fall back to using a timeout to wait for
2954	 * the AUTH reply.  This allows the firmware time to see
2955	 * traffic so a subsequent retry of AUTH succeeds.  It's
2956	 * unclear why the firmware does not maintain state for
2957	 * channels recently visited as this would allow immediate
2958	 * use of the channel after a scan (where we see traffic).
2959	 */
2960	if (status == IWN_TX_FAIL_TX_LOCKED &&
2961	    ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2962		ieee80211_tx_complete(ni, m, 0);
2963	else
2964		ieee80211_tx_complete(ni, m,
2965		    (status & IWN_TX_FAIL) != 0);
2966
2967	sc->sc_tx_timer = 0;
2968	if (--ring->queued < IWN_TX_RING_LOMARK) {
2969		sc->qfullmsk &= ~(1 << ring->qid);
2970		if (sc->qfullmsk == 0 &&
2971		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2972			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2973			iwn_start_locked(ifp);
2974		}
2975	}
2976
2977	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2978
2979}
2980
2981/*
2982 * Process a "command done" firmware notification.  This is where we wakeup
2983 * processes waiting for a synchronous command completion.
2984 */
2985static void
2986iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2987{
2988	struct iwn_tx_ring *ring = &sc->txq[4];
2989	struct iwn_tx_data *data;
2990
2991	if ((desc->qid & 0xf) != 4)
2992		return;	/* Not a command ack. */
2993
2994	data = &ring->data[desc->idx];
2995
2996	/* If the command was mapped in an mbuf, free it. */
2997	if (data->m != NULL) {
2998		bus_dmamap_sync(ring->data_dmat, data->map,
2999		    BUS_DMASYNC_POSTWRITE);
3000		bus_dmamap_unload(ring->data_dmat, data->map);
3001		m_freem(data->m);
3002		data->m = NULL;
3003	}
3004	wakeup(&ring->desc[desc->idx]);
3005}
3006
3007static void
3008iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
3009    void *stat)
3010{
3011	struct iwn_ops *ops = &sc->ops;
3012	struct ifnet *ifp = sc->sc_ifp;
3013	struct iwn_tx_ring *ring = &sc->txq[qid];
3014	struct iwn_tx_data *data;
3015	struct mbuf *m;
3016	struct iwn_node *wn;
3017	struct ieee80211_node *ni;
3018	struct ieee80211_tx_ampdu *tap;
3019	uint64_t bitmap;
3020	uint32_t *status = stat;
3021	uint16_t *aggstatus = stat;
3022	uint16_t ssn;
3023	uint8_t tid;
3024	int bit, i, lastidx, *res, seqno, shift, start;
3025
3026	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3027
3028#ifdef NOT_YET
3029	if (nframes == 1) {
3030		if ((*status & 0xff) != 1 && (*status & 0xff) != 2)
3031			printf("ieee80211_send_bar()\n");
3032	}
3033#endif
3034
3035	bitmap = 0;
3036	start = idx;
3037	for (i = 0; i < nframes; i++) {
3038		if (le16toh(aggstatus[i * 2]) & 0xc)
3039			continue;
3040
3041		idx = le16toh(aggstatus[2*i + 1]) & 0xff;
3042		bit = idx - start;
3043		shift = 0;
3044		if (bit >= 64) {
3045			shift = 0x100 - idx + start;
3046			bit = 0;
3047			start = idx;
3048		} else if (bit <= -64)
3049			bit = 0x100 - start + idx;
3050		else if (bit < 0) {
3051			shift = start - idx;
3052			start = idx;
3053			bit = 0;
3054		}
3055		bitmap = bitmap << shift;
3056		bitmap |= 1ULL << bit;
3057	}
3058	tap = sc->qid2tap[qid];
3059	tid = tap->txa_tid;
3060	wn = (void *)tap->txa_ni;
3061	wn->agg[tid].bitmap = bitmap;
3062	wn->agg[tid].startidx = start;
3063	wn->agg[tid].nframes = nframes;
3064
3065	res = NULL;
3066	ssn = 0;
3067	if (!IEEE80211_AMPDU_RUNNING(tap)) {
3068		res = tap->txa_private;
3069		ssn = tap->txa_start & 0xfff;
3070	}
3071
3072	seqno = le32toh(*(status + nframes)) & 0xfff;
3073	for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
3074		data = &ring->data[ring->read];
3075
3076		/* Unmap and free mbuf. */
3077		bus_dmamap_sync(ring->data_dmat, data->map,
3078		    BUS_DMASYNC_POSTWRITE);
3079		bus_dmamap_unload(ring->data_dmat, data->map);
3080		m = data->m, data->m = NULL;
3081		ni = data->ni, data->ni = NULL;
3082
3083		KASSERT(ni != NULL, ("no node"));
3084		KASSERT(m != NULL, ("no mbuf"));
3085
3086		ieee80211_tx_complete(ni, m, 1);
3087
3088		ring->queued--;
3089		ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
3090	}
3091
3092	if (ring->queued == 0 && res != NULL) {
3093		iwn_nic_lock(sc);
3094		ops->ampdu_tx_stop(sc, qid, tid, ssn);
3095		iwn_nic_unlock(sc);
3096		sc->qid2tap[qid] = NULL;
3097		free(res, M_DEVBUF);
3098		return;
3099	}
3100
3101	sc->sc_tx_timer = 0;
3102	if (ring->queued < IWN_TX_RING_LOMARK) {
3103		sc->qfullmsk &= ~(1 << ring->qid);
3104		if (sc->qfullmsk == 0 &&
3105		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
3106			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3107			iwn_start_locked(ifp);
3108		}
3109	}
3110
3111	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3112
3113}
3114
3115/*
3116 * Process an INT_FH_RX or INT_SW_RX interrupt.
3117 */
3118static void
3119iwn_notif_intr(struct iwn_softc *sc)
3120{
3121	struct iwn_ops *ops = &sc->ops;
3122	struct ifnet *ifp = sc->sc_ifp;
3123	struct ieee80211com *ic = ifp->if_l2com;
3124	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3125	uint16_t hw;
3126
3127	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
3128	    BUS_DMASYNC_POSTREAD);
3129
3130	hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
3131	while (sc->rxq.cur != hw) {
3132		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
3133		struct iwn_rx_desc *desc;
3134
3135		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3136		    BUS_DMASYNC_POSTREAD);
3137		desc = mtod(data->m, struct iwn_rx_desc *);
3138
3139		DPRINTF(sc, IWN_DEBUG_RECV,
3140		    "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
3141		    __func__, desc->qid & 0xf, desc->idx, desc->flags,
3142		    desc->type, iwn_intr_str(desc->type),
3143		    le16toh(desc->len));
3144
3145		if (!(desc->qid & 0x80))	/* Reply to a command. */
3146			iwn_cmd_done(sc, desc);
3147
3148		switch (desc->type) {
3149		case IWN_RX_PHY:
3150			iwn_rx_phy(sc, desc, data);
3151			break;
3152
3153		case IWN_RX_DONE:		/* 4965AGN only. */
3154		case IWN_MPDU_RX_DONE:
3155			/* An 802.11 frame has been received. */
3156			iwn_rx_done(sc, desc, data);
3157			break;
3158
3159		case IWN_RX_COMPRESSED_BA:
3160			/* A Compressed BlockAck has been received. */
3161			iwn_rx_compressed_ba(sc, desc, data);
3162			break;
3163
3164		case IWN_TX_DONE:
3165			/* An 802.11 frame has been transmitted. */
3166			ops->tx_done(sc, desc, data);
3167			break;
3168
3169		case IWN_RX_STATISTICS:
3170		case IWN_BEACON_STATISTICS:
3171			iwn_rx_statistics(sc, desc, data);
3172			break;
3173
3174		case IWN_BEACON_MISSED:
3175		{
3176			struct iwn_beacon_missed *miss =
3177			    (struct iwn_beacon_missed *)(desc + 1);
3178			int misses;
3179
3180			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3181			    BUS_DMASYNC_POSTREAD);
3182			misses = le32toh(miss->consecutive);
3183
3184			DPRINTF(sc, IWN_DEBUG_STATE,
3185			    "%s: beacons missed %d/%d\n", __func__,
3186			    misses, le32toh(miss->total));
3187			/*
3188			 * If more than 5 consecutive beacons are missed,
3189			 * reinitialize the sensitivity state machine.
3190			 */
3191			if (vap->iv_state == IEEE80211_S_RUN &&
3192			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
3193				if (misses > 5)
3194					(void)iwn_init_sensitivity(sc);
3195				if (misses >= vap->iv_bmissthreshold) {
3196					IWN_UNLOCK(sc);
3197					ieee80211_beacon_miss(ic);
3198					IWN_LOCK(sc);
3199				}
3200			}
3201			break;
3202		}
3203		case IWN_UC_READY:
3204		{
3205			struct iwn_ucode_info *uc =
3206			    (struct iwn_ucode_info *)(desc + 1);
3207
3208			/* The microcontroller is ready. */
3209			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3210			    BUS_DMASYNC_POSTREAD);
3211			DPRINTF(sc, IWN_DEBUG_RESET,
3212			    "microcode alive notification version=%d.%d "
3213			    "subtype=%x alive=%x\n", uc->major, uc->minor,
3214			    uc->subtype, le32toh(uc->valid));
3215
3216			if (le32toh(uc->valid) != 1) {
3217				device_printf(sc->sc_dev,
3218				    "microcontroller initialization failed");
3219				break;
3220			}
3221			if (uc->subtype == IWN_UCODE_INIT) {
3222				/* Save microcontroller report. */
3223				memcpy(&sc->ucode_info, uc, sizeof (*uc));
3224			}
3225			/* Save the address of the error log in SRAM. */
3226			sc->errptr = le32toh(uc->errptr);
3227			break;
3228		}
3229		case IWN_STATE_CHANGED:
3230		{
3231			/*
3232			 * State change allows hardware switch change to be
3233			 * noted. However, we handle this in iwn_intr as we
3234			 * get both the enable/disble intr.
3235			 */
3236			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3237			    BUS_DMASYNC_POSTREAD);
3238#ifdef	IWN_DEBUG
3239			uint32_t *status = (uint32_t *)(desc + 1);
3240			DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
3241			    le32toh(*status));
3242#endif
3243			break;
3244		}
3245		case IWN_START_SCAN:
3246		{
3247			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3248			    BUS_DMASYNC_POSTREAD);
3249#ifdef	IWN_DEBUG
3250			struct iwn_start_scan *scan =
3251			    (struct iwn_start_scan *)(desc + 1);
3252			DPRINTF(sc, IWN_DEBUG_ANY,
3253			    "%s: scanning channel %d status %x\n",
3254			    __func__, scan->chan, le32toh(scan->status));
3255#endif
3256			break;
3257		}
3258		case IWN_STOP_SCAN:
3259		{
3260			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3261			    BUS_DMASYNC_POSTREAD);
3262#ifdef	IWN_DEBUG
3263			struct iwn_stop_scan *scan =
3264			    (struct iwn_stop_scan *)(desc + 1);
3265			DPRINTF(sc, IWN_DEBUG_STATE,
3266			    "scan finished nchan=%d status=%d chan=%d\n",
3267			    scan->nchan, scan->status, scan->chan);
3268#endif
3269
3270			IWN_UNLOCK(sc);
3271			ieee80211_scan_next(vap);
3272			IWN_LOCK(sc);
3273			break;
3274		}
3275		case IWN5000_CALIBRATION_RESULT:
3276			iwn5000_rx_calib_results(sc, desc, data);
3277			break;
3278
3279		case IWN5000_CALIBRATION_DONE:
3280			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3281			wakeup(sc);
3282			break;
3283		}
3284
3285		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3286	}
3287
3288	/* Tell the firmware what we have processed. */
3289	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3290	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3291}
3292
3293/*
3294 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3295 * from power-down sleep mode.
3296 */
3297static void
3298iwn_wakeup_intr(struct iwn_softc *sc)
3299{
3300	int qid;
3301
3302	DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
3303	    __func__);
3304
3305	/* Wakeup RX and TX rings. */
3306	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3307	for (qid = 0; qid < sc->ntxqs; qid++) {
3308		struct iwn_tx_ring *ring = &sc->txq[qid];
3309		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3310	}
3311}
3312
3313static void
3314iwn_rftoggle_intr(struct iwn_softc *sc)
3315{
3316	struct ifnet *ifp = sc->sc_ifp;
3317	struct ieee80211com *ic = ifp->if_l2com;
3318	uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
3319
3320	IWN_LOCK_ASSERT(sc);
3321
3322	device_printf(sc->sc_dev, "RF switch: radio %s\n",
3323	    (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
3324	if (tmp & IWN_GP_CNTRL_RFKILL)
3325		ieee80211_runtask(ic, &sc->sc_radioon_task);
3326	else
3327		ieee80211_runtask(ic, &sc->sc_radiooff_task);
3328}
3329
3330/*
3331 * Dump the error log of the firmware when a firmware panic occurs.  Although
3332 * we can't debug the firmware because it is neither open source nor free, it
3333 * can help us to identify certain classes of problems.
3334 */
3335static void
3336iwn_fatal_intr(struct iwn_softc *sc)
3337{
3338	struct iwn_fw_dump dump;
3339	int i;
3340
3341	IWN_LOCK_ASSERT(sc);
3342
3343	/* Force a complete recalibration on next init. */
3344	sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3345
3346	/* Check that the error log address is valid. */
3347	if (sc->errptr < IWN_FW_DATA_BASE ||
3348	    sc->errptr + sizeof (dump) >
3349	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3350		printf("%s: bad firmware error log address 0x%08x\n", __func__,
3351		    sc->errptr);
3352		return;
3353	}
3354	if (iwn_nic_lock(sc) != 0) {
3355		printf("%s: could not read firmware error log\n", __func__);
3356		return;
3357	}
3358	/* Read firmware error log from SRAM. */
3359	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
3360	    sizeof (dump) / sizeof (uint32_t));
3361	iwn_nic_unlock(sc);
3362
3363	if (dump.valid == 0) {
3364		printf("%s: firmware error log is empty\n", __func__);
3365		return;
3366	}
3367	printf("firmware error log:\n");
3368	printf("  error type      = \"%s\" (0x%08X)\n",
3369	    (dump.id < nitems(iwn_fw_errmsg)) ?
3370		iwn_fw_errmsg[dump.id] : "UNKNOWN",
3371	    dump.id);
3372	printf("  program counter = 0x%08X\n", dump.pc);
3373	printf("  source line     = 0x%08X\n", dump.src_line);
3374	printf("  error data      = 0x%08X%08X\n",
3375	    dump.error_data[0], dump.error_data[1]);
3376	printf("  branch link     = 0x%08X%08X\n",
3377	    dump.branch_link[0], dump.branch_link[1]);
3378	printf("  interrupt link  = 0x%08X%08X\n",
3379	    dump.interrupt_link[0], dump.interrupt_link[1]);
3380	printf("  time            = %u\n", dump.time[0]);
3381
3382	/* Dump driver status (TX and RX rings) while we're here. */
3383	printf("driver status:\n");
3384	for (i = 0; i < sc->ntxqs; i++) {
3385		struct iwn_tx_ring *ring = &sc->txq[i];
3386		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
3387		    i, ring->qid, ring->cur, ring->queued);
3388	}
3389	printf("  rx ring: cur=%d\n", sc->rxq.cur);
3390}
3391
3392static void
3393iwn_intr(void *arg)
3394{
3395	struct iwn_softc *sc = arg;
3396	struct ifnet *ifp = sc->sc_ifp;
3397	uint32_t r1, r2, tmp;
3398
3399	IWN_LOCK(sc);
3400
3401	/* Disable interrupts. */
3402	IWN_WRITE(sc, IWN_INT_MASK, 0);
3403
3404	/* Read interrupts from ICT (fast) or from registers (slow). */
3405	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3406		tmp = 0;
3407		while (sc->ict[sc->ict_cur] != 0) {
3408			tmp |= sc->ict[sc->ict_cur];
3409			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
3410			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
3411		}
3412		tmp = le32toh(tmp);
3413		if (tmp == 0xffffffff)	/* Shouldn't happen. */
3414			tmp = 0;
3415		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
3416			tmp |= 0x8000;
3417		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
3418		r2 = 0;	/* Unused. */
3419	} else {
3420		r1 = IWN_READ(sc, IWN_INT);
3421		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
3422			return;	/* Hardware gone! */
3423		r2 = IWN_READ(sc, IWN_FH_INT);
3424	}
3425
3426	DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n"
3427    , r1, r2);
3428
3429	if (r1 == 0 && r2 == 0)
3430		goto done;	/* Interrupt not for us. */
3431
3432	/* Acknowledge interrupts. */
3433	IWN_WRITE(sc, IWN_INT, r1);
3434	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
3435		IWN_WRITE(sc, IWN_FH_INT, r2);
3436
3437	if (r1 & IWN_INT_RF_TOGGLED) {
3438		iwn_rftoggle_intr(sc);
3439		goto done;
3440	}
3441	if (r1 & IWN_INT_CT_REACHED) {
3442		device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
3443		    __func__);
3444	}
3445	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
3446		device_printf(sc->sc_dev, "%s: fatal firmware error\n",
3447		    __func__);
3448#ifdef	IWN_DEBUG
3449		iwn_debug_register(sc);
3450#endif
3451		/* Dump firmware error log and stop. */
3452		iwn_fatal_intr(sc);
3453		ifp->if_flags &= ~IFF_UP;
3454		iwn_stop_locked(sc);
3455		goto done;
3456	}
3457	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
3458	    (r2 & IWN_FH_INT_RX)) {
3459		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3460			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
3461				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
3462			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3463			    IWN_INT_PERIODIC_DIS);
3464			iwn_notif_intr(sc);
3465			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
3466				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3467				    IWN_INT_PERIODIC_ENA);
3468			}
3469		} else
3470			iwn_notif_intr(sc);
3471	}
3472
3473	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
3474		if (sc->sc_flags & IWN_FLAG_USE_ICT)
3475			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
3476		wakeup(sc);	/* FH DMA transfer completed. */
3477	}
3478
3479	if (r1 & IWN_INT_ALIVE)
3480		wakeup(sc);	/* Firmware is alive. */
3481
3482	if (r1 & IWN_INT_WAKEUP)
3483		iwn_wakeup_intr(sc);
3484
3485done:
3486	/* Re-enable interrupts. */
3487	if (ifp->if_flags & IFF_UP)
3488		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3489
3490	IWN_UNLOCK(sc);
3491}
3492
3493/*
3494 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3495 * 5000 adapters use a slightly different format).
3496 */
3497static void
3498iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3499    uint16_t len)
3500{
3501	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
3502
3503	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
3504
3505	*w = htole16(len + 8);
3506	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3507	    BUS_DMASYNC_PREWRITE);
3508	if (idx < IWN_SCHED_WINSZ) {
3509		*(w + IWN_TX_RING_COUNT) = *w;
3510		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3511		    BUS_DMASYNC_PREWRITE);
3512	}
3513}
3514
3515static void
3516iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3517    uint16_t len)
3518{
3519	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3520
3521	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
3522
3523	*w = htole16(id << 12 | (len + 8));
3524	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3525	    BUS_DMASYNC_PREWRITE);
3526	if (idx < IWN_SCHED_WINSZ) {
3527		*(w + IWN_TX_RING_COUNT) = *w;
3528		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3529		    BUS_DMASYNC_PREWRITE);
3530	}
3531}
3532
3533#ifdef notyet
3534static void
3535iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3536{
3537	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3538
3539	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
3540
3541	*w = (*w & htole16(0xf000)) | htole16(1);
3542	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3543	    BUS_DMASYNC_PREWRITE);
3544	if (idx < IWN_SCHED_WINSZ) {
3545		*(w + IWN_TX_RING_COUNT) = *w;
3546		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3547		    BUS_DMASYNC_PREWRITE);
3548	}
3549}
3550#endif
3551
3552static int
3553iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3554{
3555	struct iwn_ops *ops = &sc->ops;
3556	const struct ieee80211_txparam *tp;
3557	struct ieee80211vap *vap = ni->ni_vap;
3558	struct ieee80211com *ic = ni->ni_ic;
3559	struct iwn_node *wn = (void *)ni;
3560	struct iwn_tx_ring *ring;
3561	struct iwn_tx_desc *desc;
3562	struct iwn_tx_data *data;
3563	struct iwn_tx_cmd *cmd;
3564	struct iwn_cmd_data *tx;
3565	struct ieee80211_frame *wh;
3566	struct ieee80211_key *k = NULL;
3567	struct mbuf *m1;
3568	uint32_t flags;
3569	uint16_t qos;
3570	u_int hdrlen;
3571	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3572	uint8_t tid, ridx, txant, type;
3573	int ac, i, totlen, error, pad, nsegs = 0, rate;
3574
3575	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3576
3577	IWN_LOCK_ASSERT(sc);
3578
3579	wh = mtod(m, struct ieee80211_frame *);
3580	hdrlen = ieee80211_anyhdrsize(wh);
3581	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3582
3583	/* Select EDCA Access Category and TX ring for this frame. */
3584	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3585		qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
3586		tid = qos & IEEE80211_QOS_TID;
3587	} else {
3588		qos = 0;
3589		tid = 0;
3590	}
3591	ac = M_WME_GETAC(m);
3592	if (m->m_flags & M_AMPDU_MPDU) {
3593		struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
3594
3595		if (!IEEE80211_AMPDU_RUNNING(tap)) {
3596			m_freem(m);
3597			return EINVAL;
3598		}
3599
3600		ac = *(int *)tap->txa_private;
3601		*(uint16_t *)wh->i_seq =
3602		    htole16(ni->ni_txseqs[tid] << IEEE80211_SEQ_SEQ_SHIFT);
3603		ni->ni_txseqs[tid]++;
3604	}
3605	ring = &sc->txq[ac];
3606	desc = &ring->desc[ring->cur];
3607	data = &ring->data[ring->cur];
3608
3609	/* Choose a TX rate index. */
3610	tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
3611	if (type == IEEE80211_FC0_TYPE_MGT)
3612		rate = tp->mgmtrate;
3613	else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
3614		rate = tp->mcastrate;
3615	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
3616		rate = tp->ucastrate;
3617	else {
3618		/* XXX pass pktlen */
3619		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3620		rate = ni->ni_txrate;
3621	}
3622	ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
3623	    rate & IEEE80211_RATE_VAL);
3624
3625	/* Encrypt the frame if need be. */
3626	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3627		/* Retrieve key for TX. */
3628		k = ieee80211_crypto_encap(ni, m);
3629		if (k == NULL) {
3630			m_freem(m);
3631			return ENOBUFS;
3632		}
3633		/* 802.11 header may have moved. */
3634		wh = mtod(m, struct ieee80211_frame *);
3635	}
3636	totlen = m->m_pkthdr.len;
3637
3638	if (ieee80211_radiotap_active_vap(vap)) {
3639		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3640
3641		tap->wt_flags = 0;
3642		tap->wt_rate = rate;
3643		if (k != NULL)
3644			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3645
3646		ieee80211_radiotap_tx(vap, m);
3647	}
3648
3649	/* Prepare TX firmware command. */
3650	cmd = &ring->cmd[ring->cur];
3651	cmd->code = IWN_CMD_TX_DATA;
3652	cmd->flags = 0;
3653	cmd->qid = ring->qid;
3654	cmd->idx = ring->cur;
3655
3656	tx = (struct iwn_cmd_data *)cmd->data;
3657	/* NB: No need to clear tx, all fields are reinitialized here. */
3658	tx->scratch = 0;	/* clear "scratch" area */
3659
3660	flags = 0;
3661	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3662		/* Unicast frame, check if an ACK is expected. */
3663		if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
3664		    IEEE80211_QOS_ACKPOLICY_NOACK)
3665			flags |= IWN_TX_NEED_ACK;
3666	}
3667	if ((wh->i_fc[0] &
3668	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3669	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
3670		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
3671
3672	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3673		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
3674
3675	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3676	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3677		/* NB: Group frames are sent using CCK in 802.11b/g. */
3678		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
3679			flags |= IWN_TX_NEED_RTS;
3680		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3681		    ridx >= IWN_RIDX_OFDM6) {
3682			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3683				flags |= IWN_TX_NEED_CTS;
3684			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3685				flags |= IWN_TX_NEED_RTS;
3686		}
3687		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3688			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3689				/* 5000 autoselects RTS/CTS or CTS-to-self. */
3690				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3691				flags |= IWN_TX_NEED_PROTECTION;
3692			} else
3693				flags |= IWN_TX_FULL_TXOP;
3694		}
3695	}
3696
3697	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3698	    type != IEEE80211_FC0_TYPE_DATA)
3699		tx->id = sc->broadcast_id;
3700	else
3701		tx->id = wn->id;
3702
3703	if (type == IEEE80211_FC0_TYPE_MGT) {
3704		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3705
3706		/* Tell HW to set timestamp in probe responses. */
3707		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3708			flags |= IWN_TX_INSERT_TSTAMP;
3709		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3710		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3711			tx->timeout = htole16(3);
3712		else
3713			tx->timeout = htole16(2);
3714	} else
3715		tx->timeout = htole16(0);
3716
3717	if (hdrlen & 3) {
3718		/* First segment length must be a multiple of 4. */
3719		flags |= IWN_TX_NEED_PADDING;
3720		pad = 4 - (hdrlen & 3);
3721	} else
3722		pad = 0;
3723
3724	tx->len = htole16(totlen);
3725	tx->tid = tid;
3726	tx->rts_ntries = 60;
3727	tx->data_ntries = 15;
3728	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3729	tx->rate = iwn_rate_to_plcp(sc, ni, rate);
3730	if (tx->id == sc->broadcast_id) {
3731		/* Group or management frame. */
3732		tx->linkq = 0;
3733		/* XXX Alternate between antenna A and B? */
3734		txant = IWN_LSB(sc->txchainmask);
3735		tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3736	} else {
3737		tx->linkq = ni->ni_rates.rs_nrates - ridx - 1;
3738		flags |= IWN_TX_LINKQ;	/* enable MRR */
3739	}
3740	/* Set physical address of "scratch area". */
3741	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3742	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3743
3744	/* Copy 802.11 header in TX command. */
3745	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3746
3747	/* Trim 802.11 header. */
3748	m_adj(m, hdrlen);
3749	tx->security = 0;
3750	tx->flags = htole32(flags);
3751
3752	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3753	    &nsegs, BUS_DMA_NOWAIT);
3754	if (error != 0) {
3755		if (error != EFBIG) {
3756			device_printf(sc->sc_dev,
3757			    "%s: can't map mbuf (error %d)\n", __func__, error);
3758			m_freem(m);
3759			return error;
3760		}
3761		/* Too many DMA segments, linearize mbuf. */
3762		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3763		if (m1 == NULL) {
3764			device_printf(sc->sc_dev,
3765			    "%s: could not defrag mbuf\n", __func__);
3766			m_freem(m);
3767			return ENOBUFS;
3768		}
3769		m = m1;
3770
3771		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3772		    segs, &nsegs, BUS_DMA_NOWAIT);
3773		if (error != 0) {
3774			device_printf(sc->sc_dev,
3775			    "%s: can't map mbuf (error %d)\n", __func__, error);
3776			m_freem(m);
3777			return error;
3778		}
3779	}
3780
3781	data->m = m;
3782	data->ni = ni;
3783
3784	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3785	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3786
3787	/* Fill TX descriptor. */
3788	desc->nsegs = 1;
3789	if (m->m_len != 0)
3790		desc->nsegs += nsegs;
3791	/* First DMA segment is used by the TX command. */
3792	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3793	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3794	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3795	/* Other DMA segments are for data payload. */
3796	seg = &segs[0];
3797	for (i = 1; i <= nsegs; i++) {
3798		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3799		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
3800		    seg->ds_len << 4);
3801		seg++;
3802	}
3803
3804	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3805	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3806	    BUS_DMASYNC_PREWRITE);
3807	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3808	    BUS_DMASYNC_PREWRITE);
3809
3810	/* Update TX scheduler. */
3811	if (ring->qid >= sc->firstaggqueue)
3812		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3813
3814	/* Kick TX ring. */
3815	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3816	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3817
3818	/* Mark TX ring as full if we reach a certain threshold. */
3819	if (++ring->queued > IWN_TX_RING_HIMARK)
3820		sc->qfullmsk |= 1 << ring->qid;
3821
3822	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3823
3824	return 0;
3825}
3826
3827static int
3828iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3829    struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
3830{
3831	struct iwn_ops *ops = &sc->ops;
3832	struct ifnet *ifp = sc->sc_ifp;
3833	struct ieee80211vap *vap = ni->ni_vap;
3834	struct ieee80211com *ic = ifp->if_l2com;
3835	struct iwn_tx_cmd *cmd;
3836	struct iwn_cmd_data *tx;
3837	struct ieee80211_frame *wh;
3838	struct iwn_tx_ring *ring;
3839	struct iwn_tx_desc *desc;
3840	struct iwn_tx_data *data;
3841	struct mbuf *m1;
3842	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3843	uint32_t flags;
3844	u_int hdrlen;
3845	int ac, totlen, error, pad, nsegs = 0, i, rate;
3846	uint8_t ridx, type, txant;
3847
3848	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3849
3850	IWN_LOCK_ASSERT(sc);
3851
3852	wh = mtod(m, struct ieee80211_frame *);
3853	hdrlen = ieee80211_anyhdrsize(wh);
3854	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3855
3856	ac = params->ibp_pri & 3;
3857
3858	ring = &sc->txq[ac];
3859	desc = &ring->desc[ring->cur];
3860	data = &ring->data[ring->cur];
3861
3862	/* Choose a TX rate index. */
3863	rate = params->ibp_rate0;
3864	ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
3865	    rate & IEEE80211_RATE_VAL);
3866	if (ridx == (uint8_t)-1) {
3867		/* XXX fall back to mcast/mgmt rate? */
3868		m_freem(m);
3869		return EINVAL;
3870	}
3871
3872	totlen = m->m_pkthdr.len;
3873
3874	/* Prepare TX firmware command. */
3875	cmd = &ring->cmd[ring->cur];
3876	cmd->code = IWN_CMD_TX_DATA;
3877	cmd->flags = 0;
3878	cmd->qid = ring->qid;
3879	cmd->idx = ring->cur;
3880
3881	tx = (struct iwn_cmd_data *)cmd->data;
3882	/* NB: No need to clear tx, all fields are reinitialized here. */
3883	tx->scratch = 0;	/* clear "scratch" area */
3884
3885	flags = 0;
3886	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3887		flags |= IWN_TX_NEED_ACK;
3888	if (params->ibp_flags & IEEE80211_BPF_RTS) {
3889		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3890			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3891			flags &= ~IWN_TX_NEED_RTS;
3892			flags |= IWN_TX_NEED_PROTECTION;
3893		} else
3894			flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3895	}
3896	if (params->ibp_flags & IEEE80211_BPF_CTS) {
3897		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3898			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3899			flags &= ~IWN_TX_NEED_CTS;
3900			flags |= IWN_TX_NEED_PROTECTION;
3901		} else
3902			flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3903	}
3904	if (type == IEEE80211_FC0_TYPE_MGT) {
3905		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3906
3907		/* Tell HW to set timestamp in probe responses. */
3908		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3909			flags |= IWN_TX_INSERT_TSTAMP;
3910
3911		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3912		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3913			tx->timeout = htole16(3);
3914		else
3915			tx->timeout = htole16(2);
3916	} else
3917		tx->timeout = htole16(0);
3918
3919	if (hdrlen & 3) {
3920		/* First segment length must be a multiple of 4. */
3921		flags |= IWN_TX_NEED_PADDING;
3922		pad = 4 - (hdrlen & 3);
3923	} else
3924		pad = 0;
3925
3926	if (ieee80211_radiotap_active_vap(vap)) {
3927		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3928
3929		tap->wt_flags = 0;
3930		tap->wt_rate = rate;
3931
3932		ieee80211_radiotap_tx(vap, m);
3933	}
3934
3935	tx->len = htole16(totlen);
3936	tx->tid = 0;
3937	tx->id = sc->broadcast_id;
3938	tx->rts_ntries = params->ibp_try1;
3939	tx->data_ntries = params->ibp_try0;
3940	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3941
3942	/* XXX should just use  iwn_rate_to_plcp() */
3943	tx->rate = htole32(rate2plcp(rate));
3944	if (ridx < IWN_RIDX_OFDM6 &&
3945	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
3946		tx->rate |= htole32(IWN_RFLAG_CCK);
3947
3948	/* Group or management frame. */
3949	tx->linkq = 0;
3950	txant = IWN_LSB(sc->txchainmask);
3951	tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3952
3953	/* Set physical address of "scratch area". */
3954	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3955	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3956
3957	/* Copy 802.11 header in TX command. */
3958	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3959
3960	/* Trim 802.11 header. */
3961	m_adj(m, hdrlen);
3962	tx->security = 0;
3963	tx->flags = htole32(flags);
3964
3965	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3966	    &nsegs, BUS_DMA_NOWAIT);
3967	if (error != 0) {
3968		if (error != EFBIG) {
3969			device_printf(sc->sc_dev,
3970			    "%s: can't map mbuf (error %d)\n", __func__, error);
3971			m_freem(m);
3972			return error;
3973		}
3974		/* Too many DMA segments, linearize mbuf. */
3975		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3976		if (m1 == NULL) {
3977			device_printf(sc->sc_dev,
3978			    "%s: could not defrag mbuf\n", __func__);
3979			m_freem(m);
3980			return ENOBUFS;
3981		}
3982		m = m1;
3983
3984		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3985		    segs, &nsegs, BUS_DMA_NOWAIT);
3986		if (error != 0) {
3987			device_printf(sc->sc_dev,
3988			    "%s: can't map mbuf (error %d)\n", __func__, error);
3989			m_freem(m);
3990			return error;
3991		}
3992	}
3993
3994	data->m = m;
3995	data->ni = ni;
3996
3997	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3998	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3999
4000	/* Fill TX descriptor. */
4001	desc->nsegs = 1;
4002	if (m->m_len != 0)
4003		desc->nsegs += nsegs;
4004	/* First DMA segment is used by the TX command. */
4005	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
4006	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
4007	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
4008	/* Other DMA segments are for data payload. */
4009	seg = &segs[0];
4010	for (i = 1; i <= nsegs; i++) {
4011		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4012		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
4013		    seg->ds_len << 4);
4014		seg++;
4015	}
4016
4017	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4018	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4019	    BUS_DMASYNC_PREWRITE);
4020	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4021	    BUS_DMASYNC_PREWRITE);
4022
4023	/* Update TX scheduler. */
4024	if (ring->qid >= sc->firstaggqueue)
4025		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4026
4027	/* Kick TX ring. */
4028	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4029	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4030
4031	/* Mark TX ring as full if we reach a certain threshold. */
4032	if (++ring->queued > IWN_TX_RING_HIMARK)
4033		sc->qfullmsk |= 1 << ring->qid;
4034
4035	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4036
4037	return 0;
4038}
4039
4040static int
4041iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4042    const struct ieee80211_bpf_params *params)
4043{
4044	struct ieee80211com *ic = ni->ni_ic;
4045	struct ifnet *ifp = ic->ic_ifp;
4046	struct iwn_softc *sc = ifp->if_softc;
4047	int error = 0;
4048
4049	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4050
4051	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4052		ieee80211_free_node(ni);
4053		m_freem(m);
4054		return ENETDOWN;
4055	}
4056
4057	IWN_LOCK(sc);
4058	if (params == NULL) {
4059		/*
4060		 * Legacy path; interpret frame contents to decide
4061		 * precisely how to send the frame.
4062		 */
4063		error = iwn_tx_data(sc, m, ni);
4064	} else {
4065		/*
4066		 * Caller supplied explicit parameters to use in
4067		 * sending the frame.
4068		 */
4069		error = iwn_tx_data_raw(sc, m, ni, params);
4070	}
4071	if (error != 0) {
4072		/* NB: m is reclaimed on tx failure */
4073		ieee80211_free_node(ni);
4074		ifp->if_oerrors++;
4075	}
4076	sc->sc_tx_timer = 5;
4077
4078	IWN_UNLOCK(sc);
4079
4080	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4081
4082	return error;
4083}
4084
4085static void
4086iwn_start(struct ifnet *ifp)
4087{
4088	struct iwn_softc *sc = ifp->if_softc;
4089
4090	IWN_LOCK(sc);
4091	iwn_start_locked(ifp);
4092	IWN_UNLOCK(sc);
4093}
4094
4095static void
4096iwn_start_locked(struct ifnet *ifp)
4097{
4098	struct iwn_softc *sc = ifp->if_softc;
4099	struct ieee80211_node *ni;
4100	struct mbuf *m;
4101
4102	IWN_LOCK_ASSERT(sc);
4103
4104	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
4105	    (ifp->if_drv_flags & IFF_DRV_OACTIVE))
4106		return;
4107
4108	for (;;) {
4109		if (sc->qfullmsk != 0) {
4110			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4111			break;
4112		}
4113		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
4114		if (m == NULL)
4115			break;
4116		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4117		if (iwn_tx_data(sc, m, ni) != 0) {
4118			ieee80211_free_node(ni);
4119			ifp->if_oerrors++;
4120			continue;
4121		}
4122		sc->sc_tx_timer = 5;
4123	}
4124}
4125
4126static void
4127iwn_watchdog(void *arg)
4128{
4129	struct iwn_softc *sc = arg;
4130	struct ifnet *ifp = sc->sc_ifp;
4131	struct ieee80211com *ic = ifp->if_l2com;
4132
4133	IWN_LOCK_ASSERT(sc);
4134
4135	KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
4136
4137	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4138
4139	if (sc->sc_tx_timer > 0) {
4140		if (--sc->sc_tx_timer == 0) {
4141			if_printf(ifp, "device timeout\n");
4142			ieee80211_runtask(ic, &sc->sc_reinit_task);
4143			return;
4144		}
4145	}
4146	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
4147}
4148
4149static int
4150iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4151{
4152	struct iwn_softc *sc = ifp->if_softc;
4153	struct ieee80211com *ic = ifp->if_l2com;
4154	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4155	struct ifreq *ifr = (struct ifreq *) data;
4156	int error = 0, startall = 0, stop = 0;
4157
4158	switch (cmd) {
4159	case SIOCGIFADDR:
4160		error = ether_ioctl(ifp, cmd, data);
4161		break;
4162	case SIOCSIFFLAGS:
4163		IWN_LOCK(sc);
4164		if (ifp->if_flags & IFF_UP) {
4165			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4166				iwn_init_locked(sc);
4167				if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
4168					startall = 1;
4169				else
4170					stop = 1;
4171			}
4172		} else {
4173			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4174				iwn_stop_locked(sc);
4175		}
4176		IWN_UNLOCK(sc);
4177		if (startall)
4178			ieee80211_start_all(ic);
4179		else if (vap != NULL && stop)
4180			ieee80211_stop(vap);
4181		break;
4182	case SIOCGIFMEDIA:
4183		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4184		break;
4185	default:
4186		error = EINVAL;
4187		break;
4188	}
4189	return error;
4190}
4191
4192/*
4193 * Send a command to the firmware.
4194 */
4195static int
4196iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
4197{
4198	struct iwn_tx_ring *ring = &sc->txq[4];
4199	struct iwn_tx_desc *desc;
4200	struct iwn_tx_data *data;
4201	struct iwn_tx_cmd *cmd;
4202	struct mbuf *m;
4203	bus_addr_t paddr;
4204	int totlen, error;
4205
4206	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4207
4208	if (async == 0)
4209		IWN_LOCK_ASSERT(sc);
4210
4211	desc = &ring->desc[ring->cur];
4212	data = &ring->data[ring->cur];
4213	totlen = 4 + size;
4214
4215	if (size > sizeof cmd->data) {
4216		/* Command is too large to fit in a descriptor. */
4217		if (totlen > MCLBYTES)
4218			return EINVAL;
4219		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
4220		if (m == NULL)
4221			return ENOMEM;
4222		cmd = mtod(m, struct iwn_tx_cmd *);
4223		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
4224		    totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
4225		if (error != 0) {
4226			m_freem(m);
4227			return error;
4228		}
4229		data->m = m;
4230	} else {
4231		cmd = &ring->cmd[ring->cur];
4232		paddr = data->cmd_paddr;
4233	}
4234
4235	cmd->code = code;
4236	cmd->flags = 0;
4237	cmd->qid = ring->qid;
4238	cmd->idx = ring->cur;
4239	memcpy(cmd->data, buf, size);
4240
4241	desc->nsegs = 1;
4242	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
4243	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
4244
4245	DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
4246	    __func__, iwn_intr_str(cmd->code), cmd->code,
4247	    cmd->flags, cmd->qid, cmd->idx);
4248
4249	if (size > sizeof cmd->data) {
4250		bus_dmamap_sync(ring->data_dmat, data->map,
4251		    BUS_DMASYNC_PREWRITE);
4252	} else {
4253		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4254		    BUS_DMASYNC_PREWRITE);
4255	}
4256	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4257	    BUS_DMASYNC_PREWRITE);
4258
4259	/* Kick command ring. */
4260	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4261	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4262
4263	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4264
4265	return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
4266}
4267
4268static int
4269iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4270{
4271	struct iwn4965_node_info hnode;
4272	caddr_t src, dst;
4273
4274	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4275
4276	/*
4277	 * We use the node structure for 5000 Series internally (it is
4278	 * a superset of the one for 4965AGN). We thus copy the common
4279	 * fields before sending the command.
4280	 */
4281	src = (caddr_t)node;
4282	dst = (caddr_t)&hnode;
4283	memcpy(dst, src, 48);
4284	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
4285	memcpy(dst + 48, src + 72, 20);
4286	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
4287}
4288
4289static int
4290iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4291{
4292
4293	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4294
4295	/* Direct mapping. */
4296	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
4297}
4298
4299static int
4300iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
4301{
4302#define	RV(v)	((v) & IEEE80211_RATE_VAL)
4303	struct iwn_node *wn = (void *)ni;
4304	struct ieee80211_rateset *rs = &ni->ni_rates;
4305	struct iwn_cmd_link_quality linkq;
4306	uint8_t txant;
4307	int i, rate, txrate;
4308
4309	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4310
4311	/* Use the first valid TX antenna. */
4312	txant = IWN_LSB(sc->txchainmask);
4313
4314	memset(&linkq, 0, sizeof linkq);
4315	linkq.id = wn->id;
4316	linkq.antmsk_1stream = txant;
4317	linkq.antmsk_2stream = IWN_ANT_AB;
4318	linkq.ampdu_max = 64;
4319	linkq.ampdu_threshold = 3;
4320	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4321
4322	/* Start at highest available bit-rate. */
4323	if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4324		txrate = ni->ni_htrates.rs_nrates - 1;
4325	else
4326		txrate = rs->rs_nrates - 1;
4327	for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
4328		uint32_t plcp;
4329
4330		if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4331			rate = IEEE80211_RATE_MCS | txrate;
4332		else
4333			rate = RV(rs->rs_rates[txrate]);
4334
4335		/* Do rate -> PLCP config mapping */
4336		plcp = iwn_rate_to_plcp(sc, ni, rate);
4337		linkq.retry[i] = plcp;
4338
4339		/* Special case for dual-stream rates? */
4340		if ((le32toh(plcp) & IWN_RFLAG_MCS) &&
4341		    RV(le32toh(plcp)) > 7)
4342			linkq.mimo = i + 1;
4343
4344		/* Next retry at immediate lower bit-rate. */
4345		if (txrate > 0)
4346			txrate--;
4347	}
4348
4349	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4350
4351	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
4352#undef	RV
4353}
4354
4355/*
4356 * Broadcast node is used to send group-addressed and management frames.
4357 */
4358static int
4359iwn_add_broadcast_node(struct iwn_softc *sc, int async)
4360{
4361	struct iwn_ops *ops = &sc->ops;
4362	struct ifnet *ifp = sc->sc_ifp;
4363	struct ieee80211com *ic = ifp->if_l2com;
4364	struct iwn_node_info node;
4365	struct iwn_cmd_link_quality linkq;
4366	uint8_t txant;
4367	int i, error;
4368
4369	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4370
4371	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
4372
4373	memset(&node, 0, sizeof node);
4374	IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
4375	node.id = sc->broadcast_id;
4376	DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
4377	if ((error = ops->add_node(sc, &node, async)) != 0)
4378		return error;
4379
4380	/* Use the first valid TX antenna. */
4381	txant = IWN_LSB(sc->txchainmask);
4382
4383	memset(&linkq, 0, sizeof linkq);
4384	linkq.id = sc->broadcast_id;
4385	linkq.antmsk_1stream = txant;
4386	linkq.antmsk_2stream = IWN_ANT_AB;
4387	linkq.ampdu_max = 64;
4388	linkq.ampdu_threshold = 3;
4389	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4390
4391	/* Use lowest mandatory bit-rate. */
4392	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
4393		linkq.retry[0] = htole32(0xd);
4394	else
4395		linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
4396	linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
4397	/* Use same bit-rate for all TX retries. */
4398	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
4399		linkq.retry[i] = linkq.retry[0];
4400	}
4401
4402	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4403
4404	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
4405}
4406
4407static int
4408iwn_updateedca(struct ieee80211com *ic)
4409{
4410#define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
4411	struct iwn_softc *sc = ic->ic_ifp->if_softc;
4412	struct iwn_edca_params cmd;
4413	int aci;
4414
4415	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4416
4417	memset(&cmd, 0, sizeof cmd);
4418	cmd.flags = htole32(IWN_EDCA_UPDATE);
4419	for (aci = 0; aci < WME_NUM_AC; aci++) {
4420		const struct wmeParams *ac =
4421		    &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
4422		cmd.ac[aci].aifsn = ac->wmep_aifsn;
4423		cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
4424		cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
4425		cmd.ac[aci].txoplimit =
4426		    htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
4427	}
4428	IEEE80211_UNLOCK(ic);
4429	IWN_LOCK(sc);
4430	(void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
4431	IWN_UNLOCK(sc);
4432	IEEE80211_LOCK(ic);
4433
4434	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4435
4436	return 0;
4437#undef IWN_EXP2
4438}
4439
4440static void
4441iwn_update_mcast(struct ifnet *ifp)
4442{
4443	/* Ignore */
4444}
4445
4446static void
4447iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
4448{
4449	struct iwn_cmd_led led;
4450
4451	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4452
4453	/* Clear microcode LED ownership. */
4454	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
4455
4456	led.which = which;
4457	led.unit = htole32(10000);	/* on/off in unit of 100ms */
4458	led.off = off;
4459	led.on = on;
4460	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
4461}
4462
4463/*
4464 * Set the critical temperature at which the firmware will stop the radio
4465 * and notify us.
4466 */
4467static int
4468iwn_set_critical_temp(struct iwn_softc *sc)
4469{
4470	struct iwn_critical_temp crit;
4471	int32_t temp;
4472
4473	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4474
4475	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
4476
4477	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
4478		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
4479	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
4480		temp = IWN_CTOK(110);
4481	else
4482		temp = 110;
4483	memset(&crit, 0, sizeof crit);
4484	crit.tempR = htole32(temp);
4485	DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
4486	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
4487}
4488
4489static int
4490iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
4491{
4492	struct iwn_cmd_timing cmd;
4493	uint64_t val, mod;
4494
4495	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4496
4497	memset(&cmd, 0, sizeof cmd);
4498	memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
4499	cmd.bintval = htole16(ni->ni_intval);
4500	cmd.lintval = htole16(10);
4501
4502	/* Compute remaining time until next beacon. */
4503	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
4504	mod = le64toh(cmd.tstamp) % val;
4505	cmd.binitval = htole32((uint32_t)(val - mod));
4506
4507	DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
4508	    ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
4509
4510	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
4511}
4512
4513static void
4514iwn4965_power_calibration(struct iwn_softc *sc, int temp)
4515{
4516	struct ifnet *ifp = sc->sc_ifp;
4517	struct ieee80211com *ic = ifp->if_l2com;
4518
4519	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4520
4521	/* Adjust TX power if need be (delta >= 3 degC). */
4522	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
4523	    __func__, sc->temp, temp);
4524	if (abs(temp - sc->temp) >= 3) {
4525		/* Record temperature of last calibration. */
4526		sc->temp = temp;
4527		(void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
4528	}
4529}
4530
4531/*
4532 * Set TX power for current channel (each rate has its own power settings).
4533 * This function takes into account the regulatory information from EEPROM,
4534 * the current temperature and the current voltage.
4535 */
4536static int
4537iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4538    int async)
4539{
4540/* Fixed-point arithmetic division using a n-bit fractional part. */
4541#define fdivround(a, b, n)	\
4542	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
4543/* Linear interpolation. */
4544#define interpolate(x, x1, y1, x2, y2, n)	\
4545	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
4546
4547	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
4548	struct iwn_ucode_info *uc = &sc->ucode_info;
4549	struct iwn4965_cmd_txpower cmd;
4550	struct iwn4965_eeprom_chan_samples *chans;
4551	const uint8_t *rf_gain, *dsp_gain;
4552	int32_t vdiff, tdiff;
4553	int i, c, grp, maxpwr;
4554	uint8_t chan;
4555
4556	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
4557	/* Retrieve current channel from last RXON. */
4558	chan = sc->rxon->chan;
4559	DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
4560	    chan);
4561
4562	memset(&cmd, 0, sizeof cmd);
4563	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4564	cmd.chan = chan;
4565
4566	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4567		maxpwr   = sc->maxpwr5GHz;
4568		rf_gain  = iwn4965_rf_gain_5ghz;
4569		dsp_gain = iwn4965_dsp_gain_5ghz;
4570	} else {
4571		maxpwr   = sc->maxpwr2GHz;
4572		rf_gain  = iwn4965_rf_gain_2ghz;
4573		dsp_gain = iwn4965_dsp_gain_2ghz;
4574	}
4575
4576	/* Compute voltage compensation. */
4577	vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
4578	if (vdiff > 0)
4579		vdiff *= 2;
4580	if (abs(vdiff) > 2)
4581		vdiff = 0;
4582	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4583	    "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
4584	    __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
4585
4586	/* Get channel attenuation group. */
4587	if (chan <= 20)		/* 1-20 */
4588		grp = 4;
4589	else if (chan <= 43)	/* 34-43 */
4590		grp = 0;
4591	else if (chan <= 70)	/* 44-70 */
4592		grp = 1;
4593	else if (chan <= 124)	/* 71-124 */
4594		grp = 2;
4595	else			/* 125-200 */
4596		grp = 3;
4597	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4598	    "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
4599
4600	/* Get channel sub-band. */
4601	for (i = 0; i < IWN_NBANDS; i++)
4602		if (sc->bands[i].lo != 0 &&
4603		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4604			break;
4605	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
4606		return EINVAL;
4607	chans = sc->bands[i].chans;
4608	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4609	    "%s: chan %d sub-band=%d\n", __func__, chan, i);
4610
4611	for (c = 0; c < 2; c++) {
4612		uint8_t power, gain, temp;
4613		int maxchpwr, pwr, ridx, idx;
4614
4615		power = interpolate(chan,
4616		    chans[0].num, chans[0].samples[c][1].power,
4617		    chans[1].num, chans[1].samples[c][1].power, 1);
4618		gain  = interpolate(chan,
4619		    chans[0].num, chans[0].samples[c][1].gain,
4620		    chans[1].num, chans[1].samples[c][1].gain, 1);
4621		temp  = interpolate(chan,
4622		    chans[0].num, chans[0].samples[c][1].temp,
4623		    chans[1].num, chans[1].samples[c][1].temp, 1);
4624		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4625		    "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
4626		    __func__, c, power, gain, temp);
4627
4628		/* Compute temperature compensation. */
4629		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4630		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4631		    "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
4632		    __func__, tdiff, sc->temp, temp);
4633
4634		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4635			/* Convert dBm to half-dBm. */
4636			maxchpwr = sc->maxpwr[chan] * 2;
4637			if ((ridx / 8) & 1)
4638				maxchpwr -= 6;	/* MIMO 2T: -3dB */
4639
4640			pwr = maxpwr;
4641
4642			/* Adjust TX power based on rate. */
4643			if ((ridx % 8) == 5)
4644				pwr -= 15;	/* OFDM48: -7.5dB */
4645			else if ((ridx % 8) == 6)
4646				pwr -= 17;	/* OFDM54: -8.5dB */
4647			else if ((ridx % 8) == 7)
4648				pwr -= 20;	/* OFDM60: -10dB */
4649			else
4650				pwr -= 10;	/* Others: -5dB */
4651
4652			/* Do not exceed channel max TX power. */
4653			if (pwr > maxchpwr)
4654				pwr = maxchpwr;
4655
4656			idx = gain - (pwr - power) - tdiff - vdiff;
4657			if ((ridx / 8) & 1)	/* MIMO */
4658				idx += (int32_t)le32toh(uc->atten[grp][c]);
4659
4660			if (cmd.band == 0)
4661				idx += 9;	/* 5GHz */
4662			if (ridx == IWN_RIDX_MAX)
4663				idx += 5;	/* CCK */
4664
4665			/* Make sure idx stays in a valid range. */
4666			if (idx < 0)
4667				idx = 0;
4668			else if (idx > IWN4965_MAX_PWR_INDEX)
4669				idx = IWN4965_MAX_PWR_INDEX;
4670
4671			DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4672			    "%s: Tx chain %d, rate idx %d: power=%d\n",
4673			    __func__, c, ridx, idx);
4674			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4675			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4676		}
4677	}
4678
4679	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4680	    "%s: set tx power for chan %d\n", __func__, chan);
4681	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4682
4683#undef interpolate
4684#undef fdivround
4685}
4686
4687static int
4688iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4689    int async)
4690{
4691	struct iwn5000_cmd_txpower cmd;
4692
4693	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4694
4695	/*
4696	 * TX power calibration is handled automatically by the firmware
4697	 * for 5000 Series.
4698	 */
4699	memset(&cmd, 0, sizeof cmd);
4700	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
4701	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4702	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4703	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
4704	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4705}
4706
4707/*
4708 * Retrieve the maximum RSSI (in dBm) among receivers.
4709 */
4710static int
4711iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4712{
4713	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
4714	uint8_t mask, agc;
4715	int rssi;
4716
4717	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4718
4719	mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
4720	agc  = (le16toh(phy->agc) >> 7) & 0x7f;
4721
4722	rssi = 0;
4723	if (mask & IWN_ANT_A)
4724		rssi = MAX(rssi, phy->rssi[0]);
4725	if (mask & IWN_ANT_B)
4726		rssi = MAX(rssi, phy->rssi[2]);
4727	if (mask & IWN_ANT_C)
4728		rssi = MAX(rssi, phy->rssi[4]);
4729
4730	DPRINTF(sc, IWN_DEBUG_RECV,
4731	    "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
4732	    mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
4733	    rssi - agc - IWN_RSSI_TO_DBM);
4734	return rssi - agc - IWN_RSSI_TO_DBM;
4735}
4736
4737static int
4738iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4739{
4740	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4741	uint8_t agc;
4742	int rssi;
4743
4744	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4745
4746	agc = (le32toh(phy->agc) >> 9) & 0x7f;
4747
4748	rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4749		   le16toh(phy->rssi[1]) & 0xff);
4750	rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4751
4752	DPRINTF(sc, IWN_DEBUG_RECV,
4753	    "%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
4754	    phy->rssi[0], phy->rssi[1], phy->rssi[2],
4755	    rssi - agc - IWN_RSSI_TO_DBM);
4756	return rssi - agc - IWN_RSSI_TO_DBM;
4757}
4758
4759/*
4760 * Retrieve the average noise (in dBm) among receivers.
4761 */
4762static int
4763iwn_get_noise(const struct iwn_rx_general_stats *stats)
4764{
4765	int i, total, nbant, noise;
4766
4767	total = nbant = 0;
4768	for (i = 0; i < 3; i++) {
4769		if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4770			continue;
4771		total += noise;
4772		nbant++;
4773	}
4774	/* There should be at least one antenna but check anyway. */
4775	return (nbant == 0) ? -127 : (total / nbant) - 107;
4776}
4777
4778/*
4779 * Compute temperature (in degC) from last received statistics.
4780 */
4781static int
4782iwn4965_get_temperature(struct iwn_softc *sc)
4783{
4784	struct iwn_ucode_info *uc = &sc->ucode_info;
4785	int32_t r1, r2, r3, r4, temp;
4786
4787	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4788
4789	r1 = le32toh(uc->temp[0].chan20MHz);
4790	r2 = le32toh(uc->temp[1].chan20MHz);
4791	r3 = le32toh(uc->temp[2].chan20MHz);
4792	r4 = le32toh(sc->rawtemp);
4793
4794	if (r1 == r3)	/* Prevents division by 0 (should not happen). */
4795		return 0;
4796
4797	/* Sign-extend 23-bit R4 value to 32-bit. */
4798	r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4799	/* Compute temperature in Kelvin. */
4800	temp = (259 * (r4 - r2)) / (r3 - r1);
4801	temp = (temp * 97) / 100 + 8;
4802
4803	DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4804	    IWN_KTOC(temp));
4805	return IWN_KTOC(temp);
4806}
4807
4808static int
4809iwn5000_get_temperature(struct iwn_softc *sc)
4810{
4811	int32_t temp;
4812
4813	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4814
4815	/*
4816	 * Temperature is not used by the driver for 5000 Series because
4817	 * TX power calibration is handled by firmware.
4818	 */
4819	temp = le32toh(sc->rawtemp);
4820	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4821		temp = (temp / -5) + sc->temp_off;
4822		temp = IWN_KTOC(temp);
4823	}
4824	return temp;
4825}
4826
4827/*
4828 * Initialize sensitivity calibration state machine.
4829 */
4830static int
4831iwn_init_sensitivity(struct iwn_softc *sc)
4832{
4833	struct iwn_ops *ops = &sc->ops;
4834	struct iwn_calib_state *calib = &sc->calib;
4835	uint32_t flags;
4836	int error;
4837
4838	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4839
4840	/* Reset calibration state machine. */
4841	memset(calib, 0, sizeof (*calib));
4842	calib->state = IWN_CALIB_STATE_INIT;
4843	calib->cck_state = IWN_CCK_STATE_HIFA;
4844	/* Set initial correlation values. */
4845	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
4846	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4847	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
4848	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4849	calib->cck_x4      = 125;
4850	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
4851	calib->energy_cck  = sc->limits->energy_cck;
4852
4853	/* Write initial sensitivity. */
4854	if ((error = iwn_send_sensitivity(sc)) != 0)
4855		return error;
4856
4857	/* Write initial gains. */
4858	if ((error = ops->init_gains(sc)) != 0)
4859		return error;
4860
4861	/* Request statistics at each beacon interval. */
4862	flags = 0;
4863	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
4864	    __func__);
4865	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4866}
4867
4868/*
4869 * Collect noise and RSSI statistics for the first 20 beacons received
4870 * after association and use them to determine connected antennas and
4871 * to set differential gains.
4872 */
4873static void
4874iwn_collect_noise(struct iwn_softc *sc,
4875    const struct iwn_rx_general_stats *stats)
4876{
4877	struct iwn_ops *ops = &sc->ops;
4878	struct iwn_calib_state *calib = &sc->calib;
4879	struct ifnet *ifp = sc->sc_ifp;
4880	struct ieee80211com *ic = ifp->if_l2com;
4881	uint32_t val;
4882	int i;
4883
4884	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4885
4886	/* Accumulate RSSI and noise for all 3 antennas. */
4887	for (i = 0; i < 3; i++) {
4888		calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4889		calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4890	}
4891	/* NB: We update differential gains only once after 20 beacons. */
4892	if (++calib->nbeacons < 20)
4893		return;
4894
4895	/* Determine highest average RSSI. */
4896	val = MAX(calib->rssi[0], calib->rssi[1]);
4897	val = MAX(calib->rssi[2], val);
4898
4899	/* Determine which antennas are connected. */
4900	sc->chainmask = sc->rxchainmask;
4901	for (i = 0; i < 3; i++)
4902		if (val - calib->rssi[i] > 15 * 20)
4903			sc->chainmask &= ~(1 << i);
4904	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4905	    "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4906	    __func__, sc->rxchainmask, sc->chainmask);
4907
4908	/* If none of the TX antennas are connected, keep at least one. */
4909	if ((sc->chainmask & sc->txchainmask) == 0)
4910		sc->chainmask |= IWN_LSB(sc->txchainmask);
4911
4912	(void)ops->set_gains(sc);
4913	calib->state = IWN_CALIB_STATE_RUN;
4914
4915#ifdef notyet
4916	/* XXX Disable RX chains with no antennas connected. */
4917	sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4918	(void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
4919#endif
4920
4921	/* Enable power-saving mode if requested by user. */
4922	if (ic->ic_flags & IEEE80211_F_PMGTON)
4923		(void)iwn_set_pslevel(sc, 0, 3, 1);
4924
4925	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4926
4927}
4928
4929static int
4930iwn4965_init_gains(struct iwn_softc *sc)
4931{
4932	struct iwn_phy_calib_gain cmd;
4933
4934	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4935
4936	memset(&cmd, 0, sizeof cmd);
4937	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4938	/* Differential gains initially set to 0 for all 3 antennas. */
4939	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4940	    "%s: setting initial differential gains\n", __func__);
4941	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4942}
4943
4944static int
4945iwn5000_init_gains(struct iwn_softc *sc)
4946{
4947	struct iwn_phy_calib cmd;
4948
4949	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4950
4951	memset(&cmd, 0, sizeof cmd);
4952	cmd.code = sc->reset_noise_gain;
4953	cmd.ngroups = 1;
4954	cmd.isvalid = 1;
4955	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4956	    "%s: setting initial differential gains\n", __func__);
4957	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4958}
4959
4960static int
4961iwn4965_set_gains(struct iwn_softc *sc)
4962{
4963	struct iwn_calib_state *calib = &sc->calib;
4964	struct iwn_phy_calib_gain cmd;
4965	int i, delta, noise;
4966
4967	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4968
4969	/* Get minimal noise among connected antennas. */
4970	noise = INT_MAX;	/* NB: There's at least one antenna. */
4971	for (i = 0; i < 3; i++)
4972		if (sc->chainmask & (1 << i))
4973			noise = MIN(calib->noise[i], noise);
4974
4975	memset(&cmd, 0, sizeof cmd);
4976	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4977	/* Set differential gains for connected antennas. */
4978	for (i = 0; i < 3; i++) {
4979		if (sc->chainmask & (1 << i)) {
4980			/* Compute attenuation (in unit of 1.5dB). */
4981			delta = (noise - (int32_t)calib->noise[i]) / 30;
4982			/* NB: delta <= 0 */
4983			/* Limit to [-4.5dB,0]. */
4984			cmd.gain[i] = MIN(abs(delta), 3);
4985			if (delta < 0)
4986				cmd.gain[i] |= 1 << 2;	/* sign bit */
4987		}
4988	}
4989	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4990	    "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4991	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4992	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4993}
4994
4995static int
4996iwn5000_set_gains(struct iwn_softc *sc)
4997{
4998	struct iwn_calib_state *calib = &sc->calib;
4999	struct iwn_phy_calib_gain cmd;
5000	int i, ant, div, delta;
5001
5002	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5003
5004	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
5005	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
5006
5007	memset(&cmd, 0, sizeof cmd);
5008	cmd.code = sc->noise_gain;
5009	cmd.ngroups = 1;
5010	cmd.isvalid = 1;
5011	/* Get first available RX antenna as referential. */
5012	ant = IWN_LSB(sc->rxchainmask);
5013	/* Set differential gains for other antennas. */
5014	for (i = ant + 1; i < 3; i++) {
5015		if (sc->chainmask & (1 << i)) {
5016			/* The delta is relative to antenna "ant". */
5017			delta = ((int32_t)calib->noise[ant] -
5018			    (int32_t)calib->noise[i]) / div;
5019			/* Limit to [-4.5dB,+4.5dB]. */
5020			cmd.gain[i - 1] = MIN(abs(delta), 3);
5021			if (delta < 0)
5022				cmd.gain[i - 1] |= 1 << 2;	/* sign bit */
5023		}
5024	}
5025	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5026	    "setting differential gains Ant B/C: %x/%x (%x)\n",
5027	    cmd.gain[0], cmd.gain[1], sc->chainmask);
5028	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5029}
5030
5031/*
5032 * Tune RF RX sensitivity based on the number of false alarms detected
5033 * during the last beacon period.
5034 */
5035static void
5036iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
5037{
5038#define inc(val, inc, max)			\
5039	if ((val) < (max)) {			\
5040		if ((val) < (max) - (inc))	\
5041			(val) += (inc);		\
5042		else				\
5043			(val) = (max);		\
5044		needs_update = 1;		\
5045	}
5046#define dec(val, dec, min)			\
5047	if ((val) > (min)) {			\
5048		if ((val) > (min) + (dec))	\
5049			(val) -= (dec);		\
5050		else				\
5051			(val) = (min);		\
5052		needs_update = 1;		\
5053	}
5054
5055	const struct iwn_sensitivity_limits *limits = sc->limits;
5056	struct iwn_calib_state *calib = &sc->calib;
5057	uint32_t val, rxena, fa;
5058	uint32_t energy[3], energy_min;
5059	uint8_t noise[3], noise_ref;
5060	int i, needs_update = 0;
5061
5062	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5063
5064	/* Check that we've been enabled long enough. */
5065	if ((rxena = le32toh(stats->general.load)) == 0){
5066		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__);
5067		return;
5068	}
5069
5070	/* Compute number of false alarms since last call for OFDM. */
5071	fa  = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
5072	fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
5073	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
5074
5075	/* Save counters values for next call. */
5076	calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
5077	calib->fa_ofdm = le32toh(stats->ofdm.fa);
5078
5079	if (fa > 50 * rxena) {
5080		/* High false alarm count, decrease sensitivity. */
5081		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5082		    "%s: OFDM high false alarm count: %u\n", __func__, fa);
5083		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
5084		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
5085		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
5086		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
5087
5088	} else if (fa < 5 * rxena) {
5089		/* Low false alarm count, increase sensitivity. */
5090		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5091		    "%s: OFDM low false alarm count: %u\n", __func__, fa);
5092		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
5093		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
5094		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
5095		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
5096	}
5097
5098	/* Compute maximum noise among 3 receivers. */
5099	for (i = 0; i < 3; i++)
5100		noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
5101	val = MAX(noise[0], noise[1]);
5102	val = MAX(noise[2], val);
5103	/* Insert it into our samples table. */
5104	calib->noise_samples[calib->cur_noise_sample] = val;
5105	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
5106
5107	/* Compute maximum noise among last 20 samples. */
5108	noise_ref = calib->noise_samples[0];
5109	for (i = 1; i < 20; i++)
5110		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
5111
5112	/* Compute maximum energy among 3 receivers. */
5113	for (i = 0; i < 3; i++)
5114		energy[i] = le32toh(stats->general.energy[i]);
5115	val = MIN(energy[0], energy[1]);
5116	val = MIN(energy[2], val);
5117	/* Insert it into our samples table. */
5118	calib->energy_samples[calib->cur_energy_sample] = val;
5119	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
5120
5121	/* Compute minimum energy among last 10 samples. */
5122	energy_min = calib->energy_samples[0];
5123	for (i = 1; i < 10; i++)
5124		energy_min = MAX(energy_min, calib->energy_samples[i]);
5125	energy_min += 6;
5126
5127	/* Compute number of false alarms since last call for CCK. */
5128	fa  = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
5129	fa += le32toh(stats->cck.fa) - calib->fa_cck;
5130	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
5131
5132	/* Save counters values for next call. */
5133	calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
5134	calib->fa_cck = le32toh(stats->cck.fa);
5135
5136	if (fa > 50 * rxena) {
5137		/* High false alarm count, decrease sensitivity. */
5138		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5139		    "%s: CCK high false alarm count: %u\n", __func__, fa);
5140		calib->cck_state = IWN_CCK_STATE_HIFA;
5141		calib->low_fa = 0;
5142
5143		if (calib->cck_x4 > 160) {
5144			calib->noise_ref = noise_ref;
5145			if (calib->energy_cck > 2)
5146				dec(calib->energy_cck, 2, energy_min);
5147		}
5148		if (calib->cck_x4 < 160) {
5149			calib->cck_x4 = 161;
5150			needs_update = 1;
5151		} else
5152			inc(calib->cck_x4, 3, limits->max_cck_x4);
5153
5154		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
5155
5156	} else if (fa < 5 * rxena) {
5157		/* Low false alarm count, increase sensitivity. */
5158		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5159		    "%s: CCK low false alarm count: %u\n", __func__, fa);
5160		calib->cck_state = IWN_CCK_STATE_LOFA;
5161		calib->low_fa++;
5162
5163		if (calib->cck_state != IWN_CCK_STATE_INIT &&
5164		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
5165		     calib->low_fa > 100)) {
5166			inc(calib->energy_cck, 2, limits->min_energy_cck);
5167			dec(calib->cck_x4,     3, limits->min_cck_x4);
5168			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
5169		}
5170	} else {
5171		/* Not worth to increase or decrease sensitivity. */
5172		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5173		    "%s: CCK normal false alarm count: %u\n", __func__, fa);
5174		calib->low_fa = 0;
5175		calib->noise_ref = noise_ref;
5176
5177		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
5178			/* Previous interval had many false alarms. */
5179			dec(calib->energy_cck, 8, energy_min);
5180		}
5181		calib->cck_state = IWN_CCK_STATE_INIT;
5182	}
5183
5184	if (needs_update)
5185		(void)iwn_send_sensitivity(sc);
5186
5187	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5188
5189#undef dec
5190#undef inc
5191}
5192
5193static int
5194iwn_send_sensitivity(struct iwn_softc *sc)
5195{
5196	struct iwn_calib_state *calib = &sc->calib;
5197	struct iwn_enhanced_sensitivity_cmd cmd;
5198	int len;
5199
5200	memset(&cmd, 0, sizeof cmd);
5201	len = sizeof (struct iwn_sensitivity_cmd);
5202	cmd.which = IWN_SENSITIVITY_WORKTBL;
5203	/* OFDM modulation. */
5204	cmd.corr_ofdm_x1       = htole16(calib->ofdm_x1);
5205	cmd.corr_ofdm_mrc_x1   = htole16(calib->ofdm_mrc_x1);
5206	cmd.corr_ofdm_x4       = htole16(calib->ofdm_x4);
5207	cmd.corr_ofdm_mrc_x4   = htole16(calib->ofdm_mrc_x4);
5208	cmd.energy_ofdm        = htole16(sc->limits->energy_ofdm);
5209	cmd.energy_ofdm_th     = htole16(62);
5210	/* CCK modulation. */
5211	cmd.corr_cck_x4        = htole16(calib->cck_x4);
5212	cmd.corr_cck_mrc_x4    = htole16(calib->cck_mrc_x4);
5213	cmd.energy_cck         = htole16(calib->energy_cck);
5214	/* Barker modulation: use default values. */
5215	cmd.corr_barker        = htole16(190);
5216	cmd.corr_barker_mrc    = htole16(390);
5217
5218	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5219	    "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
5220	    calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
5221	    calib->ofdm_mrc_x4, calib->cck_x4,
5222	    calib->cck_mrc_x4, calib->energy_cck);
5223
5224	if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
5225		goto send;
5226	/* Enhanced sensitivity settings. */
5227	len = sizeof (struct iwn_enhanced_sensitivity_cmd);
5228	cmd.ofdm_det_slope_mrc = htole16(668);
5229	cmd.ofdm_det_icept_mrc = htole16(4);
5230	cmd.ofdm_det_slope     = htole16(486);
5231	cmd.ofdm_det_icept     = htole16(37);
5232	cmd.cck_det_slope_mrc  = htole16(853);
5233	cmd.cck_det_icept_mrc  = htole16(4);
5234	cmd.cck_det_slope      = htole16(476);
5235	cmd.cck_det_icept      = htole16(99);
5236send:
5237	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
5238}
5239
5240/*
5241 * Set STA mode power saving level (between 0 and 5).
5242 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
5243 */
5244static int
5245iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
5246{
5247	struct iwn_pmgt_cmd cmd;
5248	const struct iwn_pmgt *pmgt;
5249	uint32_t max, skip_dtim;
5250	uint32_t reg;
5251	int i;
5252
5253	DPRINTF(sc, IWN_DEBUG_PWRSAVE,
5254	    "%s: dtim=%d, level=%d, async=%d\n",
5255	    __func__,
5256	    dtim,
5257	    level,
5258	    async);
5259
5260	/* Select which PS parameters to use. */
5261	if (dtim <= 2)
5262		pmgt = &iwn_pmgt[0][level];
5263	else if (dtim <= 10)
5264		pmgt = &iwn_pmgt[1][level];
5265	else
5266		pmgt = &iwn_pmgt[2][level];
5267
5268	memset(&cmd, 0, sizeof cmd);
5269	if (level != 0)	/* not CAM */
5270		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
5271	if (level == 5)
5272		cmd.flags |= htole16(IWN_PS_FAST_PD);
5273	/* Retrieve PCIe Active State Power Management (ASPM). */
5274	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
5275	if (!(reg & 0x1))	/* L0s Entry disabled. */
5276		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
5277	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
5278	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
5279
5280	if (dtim == 0) {
5281		dtim = 1;
5282		skip_dtim = 0;
5283	} else
5284		skip_dtim = pmgt->skip_dtim;
5285	if (skip_dtim != 0) {
5286		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
5287		max = pmgt->intval[4];
5288		if (max == (uint32_t)-1)
5289			max = dtim * (skip_dtim + 1);
5290		else if (max > dtim)
5291			max = (max / dtim) * dtim;
5292	} else
5293		max = dtim;
5294	for (i = 0; i < 5; i++)
5295		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
5296
5297	DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
5298	    level);
5299	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
5300}
5301
5302static int
5303iwn_send_btcoex(struct iwn_softc *sc)
5304{
5305	struct iwn_bluetooth cmd;
5306
5307	memset(&cmd, 0, sizeof cmd);
5308	cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
5309	cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
5310	cmd.max_kill = IWN_BT_MAX_KILL_DEF;
5311	DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
5312	    __func__);
5313	return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
5314}
5315
5316static int
5317iwn_send_advanced_btcoex(struct iwn_softc *sc)
5318{
5319	static const uint32_t btcoex_3wire[12] = {
5320		0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
5321		0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
5322		0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
5323	};
5324	struct iwn6000_btcoex_config btconfig;
5325	struct iwn_btcoex_priotable btprio;
5326	struct iwn_btcoex_prot btprot;
5327	int error, i;
5328
5329	memset(&btconfig, 0, sizeof btconfig);
5330	btconfig.flags = 145;
5331	btconfig.max_kill = 5;
5332	btconfig.bt3_t7_timer = 1;
5333	btconfig.kill_ack = htole32(0xffff0000);
5334	btconfig.kill_cts = htole32(0xffff0000);
5335	btconfig.sample_time = 2;
5336	btconfig.bt3_t2_timer = 0xc;
5337	for (i = 0; i < 12; i++)
5338		btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
5339	btconfig.valid = htole16(0xff);
5340	btconfig.prio_boost = 0xf0;
5341	DPRINTF(sc, IWN_DEBUG_RESET,
5342	    "%s: configuring advanced bluetooth coexistence\n", __func__);
5343	error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1);
5344	if (error != 0)
5345		return error;
5346
5347	memset(&btprio, 0, sizeof btprio);
5348	btprio.calib_init1 = 0x6;
5349	btprio.calib_init2 = 0x7;
5350	btprio.calib_periodic_low1 = 0x2;
5351	btprio.calib_periodic_low2 = 0x3;
5352	btprio.calib_periodic_high1 = 0x4;
5353	btprio.calib_periodic_high2 = 0x5;
5354	btprio.dtim = 0x6;
5355	btprio.scan52 = 0x8;
5356	btprio.scan24 = 0xa;
5357	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
5358	    1);
5359	if (error != 0)
5360		return error;
5361
5362	/* Force BT state machine change. */
5363	memset(&btprot, 0, sizeof btprot);
5364	btprot.open = 1;
5365	btprot.type = 1;
5366	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5367	if (error != 0)
5368		return error;
5369	btprot.open = 0;
5370	return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5371}
5372
5373static int
5374iwn5000_runtime_calib(struct iwn_softc *sc)
5375{
5376	struct iwn5000_calib_config cmd;
5377
5378	memset(&cmd, 0, sizeof cmd);
5379	cmd.ucode.once.enable = 0xffffffff;
5380	cmd.ucode.once.start = IWN5000_CALIB_DC;
5381	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5382	    "%s: configuring runtime calibration\n", __func__);
5383	return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
5384}
5385
5386static int
5387iwn_config(struct iwn_softc *sc)
5388{
5389	struct iwn_ops *ops = &sc->ops;
5390	struct ifnet *ifp = sc->sc_ifp;
5391	struct ieee80211com *ic = ifp->if_l2com;
5392	uint32_t txmask;
5393	uint16_t rxchain;
5394	int error;
5395
5396	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5397
5398	if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
5399		/* Set radio temperature sensor offset. */
5400		error = iwn5000_temp_offset_calib(sc);
5401		if (error != 0) {
5402			device_printf(sc->sc_dev,
5403			    "%s: could not set temperature offset\n", __func__);
5404			return error;
5405		}
5406	}
5407
5408	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5409		/* Configure runtime DC calibration. */
5410		error = iwn5000_runtime_calib(sc);
5411		if (error != 0) {
5412			device_printf(sc->sc_dev,
5413			    "%s: could not configure runtime calibration\n",
5414			    __func__);
5415			return error;
5416		}
5417	}
5418
5419	/* Configure valid TX chains for >=5000 Series. */
5420	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
5421		txmask = htole32(sc->txchainmask);
5422		DPRINTF(sc, IWN_DEBUG_RESET,
5423		    "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
5424		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
5425		    sizeof txmask, 0);
5426		if (error != 0) {
5427			device_printf(sc->sc_dev,
5428			    "%s: could not configure valid TX chains, "
5429			    "error %d\n", __func__, error);
5430			return error;
5431		}
5432	}
5433
5434	/* Configure bluetooth coexistence. */
5435	if (sc->sc_flags & IWN_FLAG_ADV_BTCOEX)
5436		error = iwn_send_advanced_btcoex(sc);
5437	else
5438		error = iwn_send_btcoex(sc);
5439	if (error != 0) {
5440		device_printf(sc->sc_dev,
5441		    "%s: could not configure bluetooth coexistence, error %d\n",
5442		    __func__, error);
5443		return error;
5444	}
5445
5446	/* Set mode, channel, RX filter and enable RX. */
5447	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5448	memset(sc->rxon, 0, sizeof (struct iwn_rxon));
5449	IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp));
5450	IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp));
5451	sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
5452	sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5453	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
5454		sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5455	switch (ic->ic_opmode) {
5456	case IEEE80211_M_STA:
5457		sc->rxon->mode = IWN_MODE_STA;
5458		sc->rxon->filter = htole32(IWN_FILTER_MULTICAST);
5459		break;
5460	case IEEE80211_M_MONITOR:
5461		sc->rxon->mode = IWN_MODE_MONITOR;
5462		sc->rxon->filter = htole32(IWN_FILTER_MULTICAST |
5463		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
5464		break;
5465	default:
5466		/* Should not get there. */
5467		break;
5468	}
5469	sc->rxon->cck_mask  = 0x0f;	/* not yet negotiated */
5470	sc->rxon->ofdm_mask = 0xff;	/* not yet negotiated */
5471	sc->rxon->ht_single_mask = 0xff;
5472	sc->rxon->ht_dual_mask = 0xff;
5473	sc->rxon->ht_triple_mask = 0xff;
5474	rxchain =
5475	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5476	    IWN_RXCHAIN_MIMO_COUNT(2) |
5477	    IWN_RXCHAIN_IDLE_COUNT(2);
5478	sc->rxon->rxchain = htole16(rxchain);
5479	DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
5480	error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0);
5481	if (error != 0) {
5482		device_printf(sc->sc_dev, "%s: RXON command failed\n",
5483		    __func__);
5484		return error;
5485	}
5486
5487	if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
5488		device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
5489		    __func__);
5490		return error;
5491	}
5492
5493	/* Configuration has changed, set TX power accordingly. */
5494	if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
5495		device_printf(sc->sc_dev, "%s: could not set TX power\n",
5496		    __func__);
5497		return error;
5498	}
5499
5500	if ((error = iwn_set_critical_temp(sc)) != 0) {
5501		device_printf(sc->sc_dev,
5502		    "%s: could not set critical temperature\n", __func__);
5503		return error;
5504	}
5505
5506	/* Set power saving level to CAM during initialization. */
5507	if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
5508		device_printf(sc->sc_dev,
5509		    "%s: could not set power saving level\n", __func__);
5510		return error;
5511	}
5512
5513	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5514
5515	return 0;
5516}
5517
5518/*
5519 * Add an ssid element to a frame.
5520 */
5521static uint8_t *
5522ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
5523{
5524	*frm++ = IEEE80211_ELEMID_SSID;
5525	*frm++ = len;
5526	memcpy(frm, ssid, len);
5527	return frm + len;
5528}
5529
5530static int
5531iwn_scan(struct iwn_softc *sc)
5532{
5533	struct ifnet *ifp = sc->sc_ifp;
5534	struct ieee80211com *ic = ifp->if_l2com;
5535	struct ieee80211_scan_state *ss = ic->ic_scan;	/*XXX*/
5536	struct ieee80211_node *ni = ss->ss_vap->iv_bss;
5537	struct iwn_scan_hdr *hdr;
5538	struct iwn_cmd_data *tx;
5539	struct iwn_scan_essid *essid;
5540	struct iwn_scan_chan *chan;
5541	struct ieee80211_frame *wh;
5542	struct ieee80211_rateset *rs;
5543	struct ieee80211_channel *c;
5544	uint8_t *buf, *frm;
5545	uint16_t rxchain;
5546	uint8_t txant;
5547	int buflen, error;
5548
5549	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5550
5551	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5552	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
5553	if (buf == NULL) {
5554		device_printf(sc->sc_dev,
5555		    "%s: could not allocate buffer for scan command\n",
5556		    __func__);
5557		return ENOMEM;
5558	}
5559	hdr = (struct iwn_scan_hdr *)buf;
5560	/*
5561	 * Move to the next channel if no frames are received within 10ms
5562	 * after sending the probe request.
5563	 */
5564	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
5565	hdr->quiet_threshold = htole16(1);	/* min # of packets */
5566
5567	/* Select antennas for scanning. */
5568	rxchain =
5569	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5570	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
5571	    IWN_RXCHAIN_DRIVER_FORCE;
5572	if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
5573	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
5574		/* Ant A must be avoided in 5GHz because of an HW bug. */
5575		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
5576	} else	/* Use all available RX antennas. */
5577		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5578	hdr->rxchain = htole16(rxchain);
5579	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
5580
5581	tx = (struct iwn_cmd_data *)(hdr + 1);
5582	tx->flags = htole32(IWN_TX_AUTO_SEQ);
5583	tx->id = sc->broadcast_id;
5584	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
5585
5586	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) {
5587		/* Send probe requests at 6Mbps. */
5588		tx->rate = htole32(0xd);
5589		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5590	} else {
5591		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
5592		if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
5593		    sc->rxon->associd && sc->rxon->chan > 14)
5594			tx->rate = htole32(0xd);
5595		else {
5596			/* Send probe requests at 1Mbps. */
5597			tx->rate = htole32(10 | IWN_RFLAG_CCK);
5598		}
5599		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5600	}
5601	/* Use the first valid TX antenna. */
5602	txant = IWN_LSB(sc->txchainmask);
5603	tx->rate |= htole32(IWN_RFLAG_ANT(txant));
5604
5605	essid = (struct iwn_scan_essid *)(tx + 1);
5606	if (ss->ss_ssid[0].len != 0) {
5607		essid[0].id = IEEE80211_ELEMID_SSID;
5608		essid[0].len = ss->ss_ssid[0].len;
5609		memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
5610	}
5611	/*
5612	 * Build a probe request frame.  Most of the following code is a
5613	 * copy & paste of what is done in net80211.
5614	 */
5615	wh = (struct ieee80211_frame *)(essid + 20);
5616	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5617	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5618	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5619	IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
5620	IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
5621	IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
5622	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5623	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5624
5625	frm = (uint8_t *)(wh + 1);
5626	frm = ieee80211_add_ssid(frm, NULL, 0);
5627	frm = ieee80211_add_rates(frm, rs);
5628	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5629		frm = ieee80211_add_xrates(frm, rs);
5630	if (ic->ic_htcaps & IEEE80211_HTC_HT)
5631		frm = ieee80211_add_htcap(frm, ni);
5632
5633	/* Set length of probe request. */
5634	tx->len = htole16(frm - (uint8_t *)wh);
5635
5636	c = ic->ic_curchan;
5637	chan = (struct iwn_scan_chan *)frm;
5638	chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5639	chan->flags = 0;
5640	if (ss->ss_nssid > 0)
5641		chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5642	chan->dsp_gain = 0x6e;
5643	if (IEEE80211_IS_CHAN_5GHZ(c) &&
5644	    !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5645		chan->rf_gain = 0x3b;
5646		chan->active  = htole16(24);
5647		chan->passive = htole16(110);
5648		chan->flags |= htole32(IWN_CHAN_ACTIVE);
5649	} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
5650		chan->rf_gain = 0x3b;
5651		chan->active  = htole16(24);
5652		if (sc->rxon->associd)
5653			chan->passive = htole16(78);
5654		else
5655			chan->passive = htole16(110);
5656		hdr->crc_threshold = 0xffff;
5657	} else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5658		chan->rf_gain = 0x28;
5659		chan->active  = htole16(36);
5660		chan->passive = htole16(120);
5661		chan->flags |= htole32(IWN_CHAN_ACTIVE);
5662	} else {
5663		chan->rf_gain = 0x28;
5664		chan->active  = htole16(36);
5665		if (sc->rxon->associd)
5666			chan->passive = htole16(88);
5667		else
5668			chan->passive = htole16(120);
5669		hdr->crc_threshold = 0xffff;
5670	}
5671
5672	DPRINTF(sc, IWN_DEBUG_STATE,
5673	    "%s: chan %u flags 0x%x rf_gain 0x%x "
5674	    "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
5675	    chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
5676	    chan->active, chan->passive);
5677
5678	hdr->nchan++;
5679	chan++;
5680	buflen = (uint8_t *)chan - buf;
5681	hdr->len = htole16(buflen);
5682
5683	DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
5684	    hdr->nchan);
5685	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5686	free(buf, M_DEVBUF);
5687
5688	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5689
5690	return error;
5691}
5692
5693static int
5694iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
5695{
5696	struct iwn_ops *ops = &sc->ops;
5697	struct ifnet *ifp = sc->sc_ifp;
5698	struct ieee80211com *ic = ifp->if_l2com;
5699	struct ieee80211_node *ni = vap->iv_bss;
5700	int error;
5701
5702	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5703
5704	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5705	/* Update adapter configuration. */
5706	IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
5707	sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5708	sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5709	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5710		sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5711	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5712		sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
5713	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5714		sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
5715	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5716		sc->rxon->cck_mask  = 0;
5717		sc->rxon->ofdm_mask = 0x15;
5718	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5719		sc->rxon->cck_mask  = 0x03;
5720		sc->rxon->ofdm_mask = 0;
5721	} else {
5722		/* Assume 802.11b/g. */
5723		sc->rxon->cck_mask  = 0x0f;
5724		sc->rxon->ofdm_mask = 0x15;
5725	}
5726	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
5727	    sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask,
5728	    sc->rxon->ofdm_mask);
5729	error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
5730	if (error != 0) {
5731		device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
5732		    __func__, error);
5733		return error;
5734	}
5735
5736	/* Configuration has changed, set TX power accordingly. */
5737	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5738		device_printf(sc->sc_dev,
5739		    "%s: could not set TX power, error %d\n", __func__, error);
5740		return error;
5741	}
5742	/*
5743	 * Reconfiguring RXON clears the firmware nodes table so we must
5744	 * add the broadcast node again.
5745	 */
5746	if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
5747		device_printf(sc->sc_dev,
5748		    "%s: could not add broadcast node, error %d\n", __func__,
5749		    error);
5750		return error;
5751	}
5752
5753	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5754
5755	return 0;
5756}
5757
5758static int
5759iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
5760{
5761	struct iwn_ops *ops = &sc->ops;
5762	struct ifnet *ifp = sc->sc_ifp;
5763	struct ieee80211com *ic = ifp->if_l2com;
5764	struct ieee80211_node *ni = vap->iv_bss;
5765	struct iwn_node_info node;
5766	uint32_t htflags = 0;
5767	int error;
5768
5769	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5770
5771	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5772	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5773		/* Link LED blinks while monitoring. */
5774		iwn_set_led(sc, IWN_LED_LINK, 5, 5);
5775		return 0;
5776	}
5777	if ((error = iwn_set_timing(sc, ni)) != 0) {
5778		device_printf(sc->sc_dev,
5779		    "%s: could not set timing, error %d\n", __func__, error);
5780		return error;
5781	}
5782
5783	/* Update adapter configuration. */
5784	IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
5785	sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd));
5786	sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5787	sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5788	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5789		sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5790	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5791		sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
5792	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5793		sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
5794	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5795		sc->rxon->cck_mask  = 0;
5796		sc->rxon->ofdm_mask = 0x15;
5797	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5798		sc->rxon->cck_mask  = 0x03;
5799		sc->rxon->ofdm_mask = 0;
5800	} else {
5801		/* Assume 802.11b/g. */
5802		sc->rxon->cck_mask  = 0x0f;
5803		sc->rxon->ofdm_mask = 0x15;
5804	}
5805	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5806		htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode);
5807		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
5808			switch (ic->ic_curhtprotmode) {
5809			case IEEE80211_HTINFO_OPMODE_HT20PR:
5810				htflags |= IWN_RXON_HT_MODEPURE40;
5811				break;
5812			default:
5813				htflags |= IWN_RXON_HT_MODEMIXED;
5814				break;
5815			}
5816		}
5817		if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
5818			htflags |= IWN_RXON_HT_HT40MINUS;
5819	}
5820	sc->rxon->flags |= htole32(htflags);
5821	sc->rxon->filter |= htole32(IWN_FILTER_BSS);
5822	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n",
5823	    sc->rxon->chan, sc->rxon->flags);
5824	error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
5825	if (error != 0) {
5826		device_printf(sc->sc_dev,
5827		    "%s: could not update configuration, error %d\n", __func__,
5828		    error);
5829		return error;
5830	}
5831
5832	/* Configuration has changed, set TX power accordingly. */
5833	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5834		device_printf(sc->sc_dev,
5835		    "%s: could not set TX power, error %d\n", __func__, error);
5836		return error;
5837	}
5838
5839	/* Fake a join to initialize the TX rate. */
5840	((struct iwn_node *)ni)->id = IWN_ID_BSS;
5841	iwn_newassoc(ni, 1);
5842
5843	/* Add BSS node. */
5844	memset(&node, 0, sizeof node);
5845	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
5846	node.id = IWN_ID_BSS;
5847	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5848		switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
5849		case IEEE80211_HTCAP_SMPS_ENA:
5850			node.htflags |= htole32(IWN_SMPS_MIMO_DIS);
5851			break;
5852		case IEEE80211_HTCAP_SMPS_DYNAMIC:
5853			node.htflags |= htole32(IWN_SMPS_MIMO_PROT);
5854			break;
5855		}
5856		node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) |
5857		    IWN_AMDPU_DENSITY(5));	/* 4us */
5858		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
5859			node.htflags |= htole32(IWN_NODE_HT40);
5860	}
5861	DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
5862	error = ops->add_node(sc, &node, 1);
5863	if (error != 0) {
5864		device_printf(sc->sc_dev,
5865		    "%s: could not add BSS node, error %d\n", __func__, error);
5866		return error;
5867	}
5868	DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
5869	    __func__, node.id);
5870	if ((error = iwn_set_link_quality(sc, ni)) != 0) {
5871		device_printf(sc->sc_dev,
5872		    "%s: could not setup link quality for node %d, error %d\n",
5873		    __func__, node.id, error);
5874		return error;
5875	}
5876
5877	if ((error = iwn_init_sensitivity(sc)) != 0) {
5878		device_printf(sc->sc_dev,
5879		    "%s: could not set sensitivity, error %d\n", __func__,
5880		    error);
5881		return error;
5882	}
5883	/* Start periodic calibration timer. */
5884	sc->calib.state = IWN_CALIB_STATE_ASSOC;
5885	sc->calib_cnt = 0;
5886	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
5887	    sc);
5888
5889	/* Link LED always on while associated. */
5890	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5891
5892	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5893
5894	return 0;
5895}
5896
5897/*
5898 * This function is called by upper layer when an ADDBA request is received
5899 * from another STA and before the ADDBA response is sent.
5900 */
5901static int
5902iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
5903    int baparamset, int batimeout, int baseqctl)
5904{
5905#define MS(_v, _f)	(((_v) & _f) >> _f##_S)
5906	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5907	struct iwn_ops *ops = &sc->ops;
5908	struct iwn_node *wn = (void *)ni;
5909	struct iwn_node_info node;
5910	uint16_t ssn;
5911	uint8_t tid;
5912	int error;
5913
5914	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5915
5916	tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID);
5917	ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START);
5918
5919	memset(&node, 0, sizeof node);
5920	node.id = wn->id;
5921	node.control = IWN_NODE_UPDATE;
5922	node.flags = IWN_FLAG_SET_ADDBA;
5923	node.addba_tid = tid;
5924	node.addba_ssn = htole16(ssn);
5925	DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
5926	    wn->id, tid, ssn);
5927	error = ops->add_node(sc, &node, 1);
5928	if (error != 0)
5929		return error;
5930	return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
5931#undef MS
5932}
5933
5934/*
5935 * This function is called by upper layer on teardown of an HT-immediate
5936 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
5937 */
5938static void
5939iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
5940{
5941	struct ieee80211com *ic = ni->ni_ic;
5942	struct iwn_softc *sc = ic->ic_ifp->if_softc;
5943	struct iwn_ops *ops = &sc->ops;
5944	struct iwn_node *wn = (void *)ni;
5945	struct iwn_node_info node;
5946	uint8_t tid;
5947
5948	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5949
5950	/* XXX: tid as an argument */
5951	for (tid = 0; tid < WME_NUM_TID; tid++) {
5952		if (&ni->ni_rx_ampdu[tid] == rap)
5953			break;
5954	}
5955
5956	memset(&node, 0, sizeof node);
5957	node.id = wn->id;
5958	node.control = IWN_NODE_UPDATE;
5959	node.flags = IWN_FLAG_SET_DELBA;
5960	node.delba_tid = tid;
5961	DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5962	(void)ops->add_node(sc, &node, 1);
5963	sc->sc_ampdu_rx_stop(ni, rap);
5964}
5965
5966static int
5967iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5968    int dialogtoken, int baparamset, int batimeout)
5969{
5970	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5971	int qid;
5972
5973	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5974
5975	for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) {
5976		if (sc->qid2tap[qid] == NULL)
5977			break;
5978	}
5979	if (qid == sc->ntxqs) {
5980		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
5981		    __func__);
5982		return 0;
5983	}
5984	tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
5985	if (tap->txa_private == NULL) {
5986		device_printf(sc->sc_dev,
5987		    "%s: failed to alloc TX aggregation structure\n", __func__);
5988		return 0;
5989	}
5990	sc->qid2tap[qid] = tap;
5991	*(int *)tap->txa_private = qid;
5992	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5993	    batimeout);
5994}
5995
5996static int
5997iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5998    int code, int baparamset, int batimeout)
5999{
6000	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
6001	int qid = *(int *)tap->txa_private;
6002	uint8_t tid = tap->txa_tid;
6003	int ret;
6004
6005	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6006
6007	if (code == IEEE80211_STATUS_SUCCESS) {
6008		ni->ni_txseqs[tid] = tap->txa_start & 0xfff;
6009		ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid);
6010		if (ret != 1)
6011			return ret;
6012	} else {
6013		sc->qid2tap[qid] = NULL;
6014		free(tap->txa_private, M_DEVBUF);
6015		tap->txa_private = NULL;
6016	}
6017	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
6018}
6019
6020/*
6021 * This function is called by upper layer when an ADDBA response is received
6022 * from another STA.
6023 */
6024static int
6025iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
6026    uint8_t tid)
6027{
6028	struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
6029	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
6030	struct iwn_ops *ops = &sc->ops;
6031	struct iwn_node *wn = (void *)ni;
6032	struct iwn_node_info node;
6033	int error, qid;
6034
6035	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6036
6037	/* Enable TX for the specified RA/TID. */
6038	wn->disable_tid &= ~(1 << tid);
6039	memset(&node, 0, sizeof node);
6040	node.id = wn->id;
6041	node.control = IWN_NODE_UPDATE;
6042	node.flags = IWN_FLAG_SET_DISABLE_TID;
6043	node.disable_tid = htole16(wn->disable_tid);
6044	error = ops->add_node(sc, &node, 1);
6045	if (error != 0)
6046		return 0;
6047
6048	if ((error = iwn_nic_lock(sc)) != 0)
6049		return 0;
6050	qid = *(int *)tap->txa_private;
6051	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n",
6052	    __func__, wn->id, tid, tap->txa_start, qid);
6053	ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff);
6054	iwn_nic_unlock(sc);
6055
6056	iwn_set_link_quality(sc, ni);
6057	return 1;
6058}
6059
6060static void
6061iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
6062{
6063	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
6064	struct iwn_ops *ops = &sc->ops;
6065	uint8_t tid = tap->txa_tid;
6066	int qid;
6067
6068	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6069
6070	sc->sc_addba_stop(ni, tap);
6071
6072	if (tap->txa_private == NULL)
6073		return;
6074
6075	qid = *(int *)tap->txa_private;
6076	if (sc->txq[qid].queued != 0)
6077		return;
6078	if (iwn_nic_lock(sc) != 0)
6079		return;
6080	ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff);
6081	iwn_nic_unlock(sc);
6082	sc->qid2tap[qid] = NULL;
6083	free(tap->txa_private, M_DEVBUF);
6084	tap->txa_private = NULL;
6085}
6086
6087static void
6088iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
6089    int qid, uint8_t tid, uint16_t ssn)
6090{
6091	struct iwn_node *wn = (void *)ni;
6092
6093	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6094
6095	/* Stop TX scheduler while we're changing its configuration. */
6096	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6097	    IWN4965_TXQ_STATUS_CHGACT);
6098
6099	/* Assign RA/TID translation to the queue. */
6100	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
6101	    wn->id << 4 | tid);
6102
6103	/* Enable chain-building mode for the queue. */
6104	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
6105
6106	/* Set starting sequence number from the ADDBA request. */
6107	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
6108	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6109	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
6110
6111	/* Set scheduler window size. */
6112	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
6113	    IWN_SCHED_WINSZ);
6114	/* Set scheduler frame limit. */
6115	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6116	    IWN_SCHED_LIMIT << 16);
6117
6118	/* Enable interrupts for the queue. */
6119	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
6120
6121	/* Mark the queue as active. */
6122	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6123	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
6124	    iwn_tid2fifo[tid] << 1);
6125}
6126
6127static void
6128iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
6129{
6130	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6131
6132	/* Stop TX scheduler while we're changing its configuration. */
6133	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6134	    IWN4965_TXQ_STATUS_CHGACT);
6135
6136	/* Set starting sequence number from the ADDBA request. */
6137	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6138	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
6139
6140	/* Disable interrupts for the queue. */
6141	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
6142
6143	/* Mark the queue as inactive. */
6144	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6145	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
6146}
6147
6148static void
6149iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
6150    int qid, uint8_t tid, uint16_t ssn)
6151{
6152	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6153
6154	struct iwn_node *wn = (void *)ni;
6155
6156	/* Stop TX scheduler while we're changing its configuration. */
6157	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6158	    IWN5000_TXQ_STATUS_CHGACT);
6159
6160	/* Assign RA/TID translation to the queue. */
6161	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
6162	    wn->id << 4 | tid);
6163
6164	/* Enable chain-building mode for the queue. */
6165	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
6166
6167	/* Enable aggregation for the queue. */
6168	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6169
6170	/* Set starting sequence number from the ADDBA request. */
6171	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
6172	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6173	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6174
6175	/* Set scheduler window size and frame limit. */
6176	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6177	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6178
6179	/* Enable interrupts for the queue. */
6180	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6181
6182	/* Mark the queue as active. */
6183	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6184	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
6185}
6186
6187static void
6188iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
6189{
6190	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6191
6192	/* Stop TX scheduler while we're changing its configuration. */
6193	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6194	    IWN5000_TXQ_STATUS_CHGACT);
6195
6196	/* Disable aggregation for the queue. */
6197	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6198
6199	/* Set starting sequence number from the ADDBA request. */
6200	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6201	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6202
6203	/* Disable interrupts for the queue. */
6204	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6205
6206	/* Mark the queue as inactive. */
6207	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6208	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
6209}
6210
6211/*
6212 * Query calibration tables from the initialization firmware.  We do this
6213 * only once at first boot.  Called from a process context.
6214 */
6215static int
6216iwn5000_query_calibration(struct iwn_softc *sc)
6217{
6218	struct iwn5000_calib_config cmd;
6219	int error;
6220
6221	memset(&cmd, 0, sizeof cmd);
6222	cmd.ucode.once.enable = 0xffffffff;
6223	cmd.ucode.once.start  = 0xffffffff;
6224	cmd.ucode.once.send   = 0xffffffff;
6225	cmd.ucode.flags       = 0xffffffff;
6226	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
6227	    __func__);
6228	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
6229	if (error != 0)
6230		return error;
6231
6232	/* Wait at most two seconds for calibration to complete. */
6233	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
6234		error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
6235	return error;
6236}
6237
6238/*
6239 * Send calibration results to the runtime firmware.  These results were
6240 * obtained on first boot from the initialization firmware.
6241 */
6242static int
6243iwn5000_send_calibration(struct iwn_softc *sc)
6244{
6245	int idx, error;
6246
6247	for (idx = 0; idx < 5; idx++) {
6248		if (sc->calibcmd[idx].buf == NULL)
6249			continue;	/* No results available. */
6250		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6251		    "send calibration result idx=%d len=%d\n", idx,
6252		    sc->calibcmd[idx].len);
6253		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
6254		    sc->calibcmd[idx].len, 0);
6255		if (error != 0) {
6256			device_printf(sc->sc_dev,
6257			    "%s: could not send calibration result, error %d\n",
6258			    __func__, error);
6259			return error;
6260		}
6261	}
6262	return 0;
6263}
6264
6265static int
6266iwn5000_send_wimax_coex(struct iwn_softc *sc)
6267{
6268	struct iwn5000_wimax_coex wimax;
6269
6270#ifdef notyet
6271	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
6272		/* Enable WiMAX coexistence for combo adapters. */
6273		wimax.flags =
6274		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
6275		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
6276		    IWN_WIMAX_COEX_STA_TABLE_VALID |
6277		    IWN_WIMAX_COEX_ENABLE;
6278		memcpy(wimax.events, iwn6050_wimax_events,
6279		    sizeof iwn6050_wimax_events);
6280	} else
6281#endif
6282	{
6283		/* Disable WiMAX coexistence. */
6284		wimax.flags = 0;
6285		memset(wimax.events, 0, sizeof wimax.events);
6286	}
6287	DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
6288	    __func__);
6289	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
6290}
6291
6292static int
6293iwn5000_crystal_calib(struct iwn_softc *sc)
6294{
6295	struct iwn5000_phy_calib_crystal cmd;
6296
6297	memset(&cmd, 0, sizeof cmd);
6298	cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
6299	cmd.ngroups = 1;
6300	cmd.isvalid = 1;
6301	cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
6302	cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
6303	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
6304	    cmd.cap_pin[0], cmd.cap_pin[1]);
6305	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6306}
6307
6308static int
6309iwn5000_temp_offset_calib(struct iwn_softc *sc)
6310{
6311	struct iwn5000_phy_calib_temp_offset cmd;
6312
6313	memset(&cmd, 0, sizeof cmd);
6314	cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
6315	cmd.ngroups = 1;
6316	cmd.isvalid = 1;
6317	if (sc->eeprom_temp != 0)
6318		cmd.offset = htole16(sc->eeprom_temp);
6319	else
6320		cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
6321	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
6322	    le16toh(cmd.offset));
6323	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6324}
6325
6326/*
6327 * This function is called after the runtime firmware notifies us of its
6328 * readiness (called in a process context).
6329 */
6330static int
6331iwn4965_post_alive(struct iwn_softc *sc)
6332{
6333	int error, qid;
6334
6335	if ((error = iwn_nic_lock(sc)) != 0)
6336		return error;
6337
6338	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6339
6340	/* Clear TX scheduler state in SRAM. */
6341	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6342	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
6343	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
6344
6345	/* Set physical address of TX scheduler rings (1KB aligned). */
6346	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6347
6348	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6349
6350	/* Disable chain mode for all our 16 queues. */
6351	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
6352
6353	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
6354		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
6355		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6356
6357		/* Set scheduler window size. */
6358		iwn_mem_write(sc, sc->sched_base +
6359		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
6360		/* Set scheduler frame limit. */
6361		iwn_mem_write(sc, sc->sched_base +
6362		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6363		    IWN_SCHED_LIMIT << 16);
6364	}
6365
6366	/* Enable interrupts for all our 16 queues. */
6367	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
6368	/* Identify TX FIFO rings (0-7). */
6369	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
6370
6371	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6372	for (qid = 0; qid < 7; qid++) {
6373		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
6374		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6375		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
6376	}
6377	iwn_nic_unlock(sc);
6378	return 0;
6379}
6380
6381/*
6382 * This function is called after the initialization or runtime firmware
6383 * notifies us of its readiness (called in a process context).
6384 */
6385static int
6386iwn5000_post_alive(struct iwn_softc *sc)
6387{
6388	int error, qid;
6389
6390	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6391
6392	/* Switch to using ICT interrupt mode. */
6393	iwn5000_ict_reset(sc);
6394
6395	if ((error = iwn_nic_lock(sc)) != 0){
6396		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
6397		return error;
6398	}
6399
6400	/* Clear TX scheduler state in SRAM. */
6401	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6402	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
6403	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
6404
6405	/* Set physical address of TX scheduler rings (1KB aligned). */
6406	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6407
6408	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6409
6410	/* Enable chain mode for all queues, except command queue. */
6411	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
6412	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
6413
6414	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
6415		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
6416		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6417
6418		iwn_mem_write(sc, sc->sched_base +
6419		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
6420		/* Set scheduler window size and frame limit. */
6421		iwn_mem_write(sc, sc->sched_base +
6422		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6423		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6424	}
6425
6426	/* Enable interrupts for all our 20 queues. */
6427	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
6428	/* Identify TX FIFO rings (0-7). */
6429	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
6430
6431	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6432	for (qid = 0; qid < 7; qid++) {
6433		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
6434		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6435		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
6436	}
6437	iwn_nic_unlock(sc);
6438
6439	/* Configure WiMAX coexistence for combo adapters. */
6440	error = iwn5000_send_wimax_coex(sc);
6441	if (error != 0) {
6442		device_printf(sc->sc_dev,
6443		    "%s: could not configure WiMAX coexistence, error %d\n",
6444		    __func__, error);
6445		return error;
6446	}
6447	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
6448		/* Perform crystal calibration. */
6449		error = iwn5000_crystal_calib(sc);
6450		if (error != 0) {
6451			device_printf(sc->sc_dev,
6452			    "%s: crystal calibration failed, error %d\n",
6453			    __func__, error);
6454			return error;
6455		}
6456	}
6457	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
6458		/* Query calibration from the initialization firmware. */
6459		if ((error = iwn5000_query_calibration(sc)) != 0) {
6460			device_printf(sc->sc_dev,
6461			    "%s: could not query calibration, error %d\n",
6462			    __func__, error);
6463			return error;
6464		}
6465		/*
6466		 * We have the calibration results now, reboot with the
6467		 * runtime firmware (call ourselves recursively!)
6468		 */
6469		iwn_hw_stop(sc);
6470		error = iwn_hw_init(sc);
6471	} else {
6472		/* Send calibration results to runtime firmware. */
6473		error = iwn5000_send_calibration(sc);
6474	}
6475
6476	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6477
6478	return error;
6479}
6480
6481/*
6482 * The firmware boot code is small and is intended to be copied directly into
6483 * the NIC internal memory (no DMA transfer).
6484 */
6485static int
6486iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
6487{
6488	int error, ntries;
6489
6490	size /= sizeof (uint32_t);
6491
6492	if ((error = iwn_nic_lock(sc)) != 0)
6493		return error;
6494
6495	/* Copy microcode image into NIC memory. */
6496	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
6497	    (const uint32_t *)ucode, size);
6498
6499	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
6500	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
6501	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
6502
6503	/* Start boot load now. */
6504	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
6505
6506	/* Wait for transfer to complete. */
6507	for (ntries = 0; ntries < 1000; ntries++) {
6508		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
6509		    IWN_BSM_WR_CTRL_START))
6510			break;
6511		DELAY(10);
6512	}
6513	if (ntries == 1000) {
6514		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6515		    __func__);
6516		iwn_nic_unlock(sc);
6517		return ETIMEDOUT;
6518	}
6519
6520	/* Enable boot after power up. */
6521	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
6522
6523	iwn_nic_unlock(sc);
6524	return 0;
6525}
6526
6527static int
6528iwn4965_load_firmware(struct iwn_softc *sc)
6529{
6530	struct iwn_fw_info *fw = &sc->fw;
6531	struct iwn_dma_info *dma = &sc->fw_dma;
6532	int error;
6533
6534	/* Copy initialization sections into pre-allocated DMA-safe memory. */
6535	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
6536	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6537	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6538	    fw->init.text, fw->init.textsz);
6539	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6540
6541	/* Tell adapter where to find initialization sections. */
6542	if ((error = iwn_nic_lock(sc)) != 0)
6543		return error;
6544	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6545	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
6546	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6547	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6548	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
6549	iwn_nic_unlock(sc);
6550
6551	/* Load firmware boot code. */
6552	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
6553	if (error != 0) {
6554		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6555		    __func__);
6556		return error;
6557	}
6558	/* Now press "execute". */
6559	IWN_WRITE(sc, IWN_RESET, 0);
6560
6561	/* Wait at most one second for first alive notification. */
6562	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
6563		device_printf(sc->sc_dev,
6564		    "%s: timeout waiting for adapter to initialize, error %d\n",
6565		    __func__, error);
6566		return error;
6567	}
6568
6569	/* Retrieve current temperature for initial TX power calibration. */
6570	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
6571	sc->temp = iwn4965_get_temperature(sc);
6572
6573	/* Copy runtime sections into pre-allocated DMA-safe memory. */
6574	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
6575	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6576	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6577	    fw->main.text, fw->main.textsz);
6578	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6579
6580	/* Tell adapter where to find runtime sections. */
6581	if ((error = iwn_nic_lock(sc)) != 0)
6582		return error;
6583	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6584	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
6585	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6586	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6587	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
6588	    IWN_FW_UPDATED | fw->main.textsz);
6589	iwn_nic_unlock(sc);
6590
6591	return 0;
6592}
6593
6594static int
6595iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
6596    const uint8_t *section, int size)
6597{
6598	struct iwn_dma_info *dma = &sc->fw_dma;
6599	int error;
6600
6601	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6602
6603	/* Copy firmware section into pre-allocated DMA-safe memory. */
6604	memcpy(dma->vaddr, section, size);
6605	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6606
6607	if ((error = iwn_nic_lock(sc)) != 0)
6608		return error;
6609
6610	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6611	    IWN_FH_TX_CONFIG_DMA_PAUSE);
6612
6613	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
6614	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
6615	    IWN_LOADDR(dma->paddr));
6616	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
6617	    IWN_HIADDR(dma->paddr) << 28 | size);
6618	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
6619	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
6620	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
6621	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
6622
6623	/* Kick Flow Handler to start DMA transfer. */
6624	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6625	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
6626
6627	iwn_nic_unlock(sc);
6628
6629	/* Wait at most five seconds for FH DMA transfer to complete. */
6630	return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
6631}
6632
6633static int
6634iwn5000_load_firmware(struct iwn_softc *sc)
6635{
6636	struct iwn_fw_part *fw;
6637	int error;
6638
6639	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6640
6641	/* Load the initialization firmware on first boot only. */
6642	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
6643	    &sc->fw.main : &sc->fw.init;
6644
6645	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
6646	    fw->text, fw->textsz);
6647	if (error != 0) {
6648		device_printf(sc->sc_dev,
6649		    "%s: could not load firmware %s section, error %d\n",
6650		    __func__, ".text", error);
6651		return error;
6652	}
6653	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
6654	    fw->data, fw->datasz);
6655	if (error != 0) {
6656		device_printf(sc->sc_dev,
6657		    "%s: could not load firmware %s section, error %d\n",
6658		    __func__, ".data", error);
6659		return error;
6660	}
6661
6662	/* Now press "execute". */
6663	IWN_WRITE(sc, IWN_RESET, 0);
6664	return 0;
6665}
6666
6667/*
6668 * Extract text and data sections from a legacy firmware image.
6669 */
6670static int
6671iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
6672{
6673	const uint32_t *ptr;
6674	size_t hdrlen = 24;
6675	uint32_t rev;
6676
6677	ptr = (const uint32_t *)fw->data;
6678	rev = le32toh(*ptr++);
6679
6680	/* Check firmware API version. */
6681	if (IWN_FW_API(rev) <= 1) {
6682		device_printf(sc->sc_dev,
6683		    "%s: bad firmware, need API version >=2\n", __func__);
6684		return EINVAL;
6685	}
6686	if (IWN_FW_API(rev) >= 3) {
6687		/* Skip build number (version 2 header). */
6688		hdrlen += 4;
6689		ptr++;
6690	}
6691	if (fw->size < hdrlen) {
6692		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6693		    __func__, fw->size);
6694		return EINVAL;
6695	}
6696	fw->main.textsz = le32toh(*ptr++);
6697	fw->main.datasz = le32toh(*ptr++);
6698	fw->init.textsz = le32toh(*ptr++);
6699	fw->init.datasz = le32toh(*ptr++);
6700	fw->boot.textsz = le32toh(*ptr++);
6701
6702	/* Check that all firmware sections fit. */
6703	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6704	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6705		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6706		    __func__, fw->size);
6707		return EINVAL;
6708	}
6709
6710	/* Get pointers to firmware sections. */
6711	fw->main.text = (const uint8_t *)ptr;
6712	fw->main.data = fw->main.text + fw->main.textsz;
6713	fw->init.text = fw->main.data + fw->main.datasz;
6714	fw->init.data = fw->init.text + fw->init.textsz;
6715	fw->boot.text = fw->init.data + fw->init.datasz;
6716	return 0;
6717}
6718
6719/*
6720 * Extract text and data sections from a TLV firmware image.
6721 */
6722static int
6723iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6724    uint16_t alt)
6725{
6726	const struct iwn_fw_tlv_hdr *hdr;
6727	const struct iwn_fw_tlv *tlv;
6728	const uint8_t *ptr, *end;
6729	uint64_t altmask;
6730	uint32_t len, tmp;
6731
6732	if (fw->size < sizeof (*hdr)) {
6733		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6734		    __func__, fw->size);
6735		return EINVAL;
6736	}
6737	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6738	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6739		device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
6740		    __func__, le32toh(hdr->signature));
6741		return EINVAL;
6742	}
6743	DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
6744	    le32toh(hdr->build));
6745
6746	/*
6747	 * Select the closest supported alternative that is less than
6748	 * or equal to the specified one.
6749	 */
6750	altmask = le64toh(hdr->altmask);
6751	while (alt > 0 && !(altmask & (1ULL << alt)))
6752		alt--;	/* Downgrade. */
6753	DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
6754
6755	ptr = (const uint8_t *)(hdr + 1);
6756	end = (const uint8_t *)(fw->data + fw->size);
6757
6758	/* Parse type-length-value fields. */
6759	while (ptr + sizeof (*tlv) <= end) {
6760		tlv = (const struct iwn_fw_tlv *)ptr;
6761		len = le32toh(tlv->len);
6762
6763		ptr += sizeof (*tlv);
6764		if (ptr + len > end) {
6765			device_printf(sc->sc_dev,
6766			    "%s: firmware too short: %zu bytes\n", __func__,
6767			    fw->size);
6768			return EINVAL;
6769		}
6770		/* Skip other alternatives. */
6771		if (tlv->alt != 0 && tlv->alt != htole16(alt))
6772			goto next;
6773
6774		switch (le16toh(tlv->type)) {
6775		case IWN_FW_TLV_MAIN_TEXT:
6776			fw->main.text = ptr;
6777			fw->main.textsz = len;
6778			break;
6779		case IWN_FW_TLV_MAIN_DATA:
6780			fw->main.data = ptr;
6781			fw->main.datasz = len;
6782			break;
6783		case IWN_FW_TLV_INIT_TEXT:
6784			fw->init.text = ptr;
6785			fw->init.textsz = len;
6786			break;
6787		case IWN_FW_TLV_INIT_DATA:
6788			fw->init.data = ptr;
6789			fw->init.datasz = len;
6790			break;
6791		case IWN_FW_TLV_BOOT_TEXT:
6792			fw->boot.text = ptr;
6793			fw->boot.textsz = len;
6794			break;
6795		case IWN_FW_TLV_ENH_SENS:
6796			if (!len)
6797				sc->sc_flags |= IWN_FLAG_ENH_SENS;
6798			break;
6799		case IWN_FW_TLV_PHY_CALIB:
6800			tmp = htole32(*ptr);
6801			if (tmp < 253) {
6802				sc->reset_noise_gain = tmp;
6803				sc->noise_gain = tmp + 1;
6804			}
6805			break;
6806		case IWN_FW_TLV_PAN:
6807			sc->sc_flags |= IWN_FLAG_PAN_SUPPORT;
6808			DPRINTF(sc, IWN_DEBUG_RESET,
6809			    "PAN Support found: %d\n", 1);
6810			break;
6811		case IWN_FW_TLV_FLAGS :
6812			sc->tlv_feature_flags = htole32(*ptr);
6813			break;
6814		case IWN_FW_TLV_PBREQ_MAXLEN:
6815		case IWN_FW_TLV_RUNT_EVTLOG_PTR:
6816		case IWN_FW_TLV_RUNT_EVTLOG_SIZE:
6817		case IWN_FW_TLV_RUNT_ERRLOG_PTR:
6818		case IWN_FW_TLV_INIT_EVTLOG_PTR:
6819		case IWN_FW_TLV_INIT_EVTLOG_SIZE:
6820		case IWN_FW_TLV_INIT_ERRLOG_PTR:
6821		case IWN_FW_TLV_WOWLAN_INST:
6822		case IWN_FW_TLV_WOWLAN_DATA:
6823			DPRINTF(sc, IWN_DEBUG_RESET,
6824			    "TLV type %d reconized but not handled\n",
6825			    le16toh(tlv->type));
6826			break;
6827		default:
6828			DPRINTF(sc, IWN_DEBUG_RESET,
6829			    "TLV type %d not handled\n", le16toh(tlv->type));
6830			break;
6831		}
6832 next:		/* TLV fields are 32-bit aligned. */
6833		ptr += (len + 3) & ~3;
6834	}
6835	return 0;
6836}
6837
6838static int
6839iwn_read_firmware(struct iwn_softc *sc)
6840{
6841	struct iwn_fw_info *fw = &sc->fw;
6842	int error;
6843
6844	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6845
6846	IWN_UNLOCK(sc);
6847
6848	memset(fw, 0, sizeof (*fw));
6849
6850	/* Read firmware image from filesystem. */
6851	sc->fw_fp = firmware_get(sc->fwname);
6852	if (sc->fw_fp == NULL) {
6853		device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
6854		    __func__, sc->fwname);
6855		IWN_LOCK(sc);
6856		return EINVAL;
6857	}
6858	IWN_LOCK(sc);
6859
6860	fw->size = sc->fw_fp->datasize;
6861	fw->data = (const uint8_t *)sc->fw_fp->data;
6862	if (fw->size < sizeof (uint32_t)) {
6863		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6864		    __func__, fw->size);
6865		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6866		sc->fw_fp = NULL;
6867		return EINVAL;
6868	}
6869
6870	/* Retrieve text and data sections. */
6871	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
6872		error = iwn_read_firmware_leg(sc, fw);
6873	else
6874		error = iwn_read_firmware_tlv(sc, fw, 1);
6875	if (error != 0) {
6876		device_printf(sc->sc_dev,
6877		    "%s: could not read firmware sections, error %d\n",
6878		    __func__, error);
6879		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6880		sc->fw_fp = NULL;
6881		return error;
6882	}
6883
6884	/* Make sure text and data sections fit in hardware memory. */
6885	if (fw->main.textsz > sc->fw_text_maxsz ||
6886	    fw->main.datasz > sc->fw_data_maxsz ||
6887	    fw->init.textsz > sc->fw_text_maxsz ||
6888	    fw->init.datasz > sc->fw_data_maxsz ||
6889	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
6890	    (fw->boot.textsz & 3) != 0) {
6891		device_printf(sc->sc_dev, "%s: firmware sections too large\n",
6892		    __func__);
6893		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6894		sc->fw_fp = NULL;
6895		return EINVAL;
6896	}
6897
6898	/* We can proceed with loading the firmware. */
6899	return 0;
6900}
6901
6902static int
6903iwn_clock_wait(struct iwn_softc *sc)
6904{
6905	int ntries;
6906
6907	/* Set "initialization complete" bit. */
6908	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6909
6910	/* Wait for clock stabilization. */
6911	for (ntries = 0; ntries < 2500; ntries++) {
6912		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6913			return 0;
6914		DELAY(10);
6915	}
6916	device_printf(sc->sc_dev,
6917	    "%s: timeout waiting for clock stabilization\n", __func__);
6918	return ETIMEDOUT;
6919}
6920
6921static int
6922iwn_apm_init(struct iwn_softc *sc)
6923{
6924	uint32_t reg;
6925	int error;
6926
6927	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6928
6929	/* Disable L0s exit timer (NMI bug workaround). */
6930	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6931	/* Don't wait for ICH L0s (ICH bug workaround). */
6932	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6933
6934	/* Set FH wait threshold to max (HW bug under stress workaround). */
6935	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6936
6937	/* Enable HAP INTA to move adapter from L1a to L0s. */
6938	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6939
6940	/* Retrieve PCIe Active State Power Management (ASPM). */
6941	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6942	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6943	if (reg & 0x02)	/* L1 Entry enabled. */
6944		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6945	else
6946		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6947
6948	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6949	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
6950		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6951
6952	/* Wait for clock stabilization before accessing prph. */
6953	if ((error = iwn_clock_wait(sc)) != 0)
6954		return error;
6955
6956	if ((error = iwn_nic_lock(sc)) != 0)
6957		return error;
6958	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6959		/* Enable DMA and BSM (Bootstrap State Machine). */
6960		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6961		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6962		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6963	} else {
6964		/* Enable DMA. */
6965		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6966		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6967	}
6968	DELAY(20);
6969	/* Disable L1-Active. */
6970	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6971	iwn_nic_unlock(sc);
6972
6973	return 0;
6974}
6975
6976static void
6977iwn_apm_stop_master(struct iwn_softc *sc)
6978{
6979	int ntries;
6980
6981	/* Stop busmaster DMA activity. */
6982	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6983	for (ntries = 0; ntries < 100; ntries++) {
6984		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6985			return;
6986		DELAY(10);
6987	}
6988	device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
6989}
6990
6991static void
6992iwn_apm_stop(struct iwn_softc *sc)
6993{
6994	iwn_apm_stop_master(sc);
6995
6996	/* Reset the entire device. */
6997	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6998	DELAY(10);
6999	/* Clear "initialization complete" bit. */
7000	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
7001}
7002
7003static int
7004iwn4965_nic_config(struct iwn_softc *sc)
7005{
7006	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7007
7008	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
7009		/*
7010		 * I don't believe this to be correct but this is what the
7011		 * vendor driver is doing. Probably the bits should not be
7012		 * shifted in IWN_RFCFG_*.
7013		 */
7014		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7015		    IWN_RFCFG_TYPE(sc->rfcfg) |
7016		    IWN_RFCFG_STEP(sc->rfcfg) |
7017		    IWN_RFCFG_DASH(sc->rfcfg));
7018	}
7019	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7020	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
7021	return 0;
7022}
7023
7024static int
7025iwn5000_nic_config(struct iwn_softc *sc)
7026{
7027	uint32_t tmp;
7028	int error;
7029
7030	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7031
7032	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
7033		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7034		    IWN_RFCFG_TYPE(sc->rfcfg) |
7035		    IWN_RFCFG_STEP(sc->rfcfg) |
7036		    IWN_RFCFG_DASH(sc->rfcfg));
7037	}
7038	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7039	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
7040
7041	if ((error = iwn_nic_lock(sc)) != 0)
7042		return error;
7043	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
7044
7045	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
7046		/*
7047		 * Select first Switching Voltage Regulator (1.32V) to
7048		 * solve a stability issue related to noisy DC2DC line
7049		 * in the silicon of 1000 Series.
7050		 */
7051		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
7052		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
7053		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
7054		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
7055	}
7056	iwn_nic_unlock(sc);
7057
7058	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
7059		/* Use internal power amplifier only. */
7060		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
7061	}
7062	if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
7063	     sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
7064		/* Indicate that ROM calibration version is >=6. */
7065		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
7066	}
7067	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
7068		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
7069	return 0;
7070}
7071
7072/*
7073 * Take NIC ownership over Intel Active Management Technology (AMT).
7074 */
7075static int
7076iwn_hw_prepare(struct iwn_softc *sc)
7077{
7078	int ntries;
7079
7080	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7081
7082	/* Check if hardware is ready. */
7083	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
7084	for (ntries = 0; ntries < 5; ntries++) {
7085		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
7086		    IWN_HW_IF_CONFIG_NIC_READY)
7087			return 0;
7088		DELAY(10);
7089	}
7090
7091	/* Hardware not ready, force into ready state. */
7092	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
7093	for (ntries = 0; ntries < 15000; ntries++) {
7094		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
7095		    IWN_HW_IF_CONFIG_PREPARE_DONE))
7096			break;
7097		DELAY(10);
7098	}
7099	if (ntries == 15000)
7100		return ETIMEDOUT;
7101
7102	/* Hardware should be ready now. */
7103	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
7104	for (ntries = 0; ntries < 5; ntries++) {
7105		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
7106		    IWN_HW_IF_CONFIG_NIC_READY)
7107			return 0;
7108		DELAY(10);
7109	}
7110	return ETIMEDOUT;
7111}
7112
7113static int
7114iwn_hw_init(struct iwn_softc *sc)
7115{
7116	struct iwn_ops *ops = &sc->ops;
7117	int error, chnl, qid;
7118
7119	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7120
7121	/* Clear pending interrupts. */
7122	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7123
7124	if ((error = iwn_apm_init(sc)) != 0) {
7125		device_printf(sc->sc_dev,
7126		    "%s: could not power ON adapter, error %d\n", __func__,
7127		    error);
7128		return error;
7129	}
7130
7131	/* Select VMAIN power source. */
7132	if ((error = iwn_nic_lock(sc)) != 0)
7133		return error;
7134	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
7135	iwn_nic_unlock(sc);
7136
7137	/* Perform adapter-specific initialization. */
7138	if ((error = ops->nic_config(sc)) != 0)
7139		return error;
7140
7141	/* Initialize RX ring. */
7142	if ((error = iwn_nic_lock(sc)) != 0)
7143		return error;
7144	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
7145	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
7146	/* Set physical address of RX ring (256-byte aligned). */
7147	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
7148	/* Set physical address of RX status (16-byte aligned). */
7149	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
7150	/* Enable RX. */
7151	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
7152	    IWN_FH_RX_CONFIG_ENA           |
7153	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
7154	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
7155	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
7156	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
7157	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
7158	iwn_nic_unlock(sc);
7159	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
7160
7161	if ((error = iwn_nic_lock(sc)) != 0)
7162		return error;
7163
7164	/* Initialize TX scheduler. */
7165	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7166
7167	/* Set physical address of "keep warm" page (16-byte aligned). */
7168	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
7169
7170	/* Initialize TX rings. */
7171	for (qid = 0; qid < sc->ntxqs; qid++) {
7172		struct iwn_tx_ring *txq = &sc->txq[qid];
7173
7174		/* Set physical address of TX ring (256-byte aligned). */
7175		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
7176		    txq->desc_dma.paddr >> 8);
7177	}
7178	iwn_nic_unlock(sc);
7179
7180	/* Enable DMA channels. */
7181	for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7182		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
7183		    IWN_FH_TX_CONFIG_DMA_ENA |
7184		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
7185	}
7186
7187	/* Clear "radio off" and "commands blocked" bits. */
7188	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7189	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
7190
7191	/* Clear pending interrupts. */
7192	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7193	/* Enable interrupt coalescing. */
7194	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
7195	/* Enable interrupts. */
7196	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7197
7198	/* _Really_ make sure "radio off" bit is cleared! */
7199	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7200	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7201
7202	/* Enable shadow registers. */
7203	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
7204		IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
7205
7206	if ((error = ops->load_firmware(sc)) != 0) {
7207		device_printf(sc->sc_dev,
7208		    "%s: could not load firmware, error %d\n", __func__,
7209		    error);
7210		return error;
7211	}
7212	/* Wait at most one second for firmware alive notification. */
7213	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
7214		device_printf(sc->sc_dev,
7215		    "%s: timeout waiting for adapter to initialize, error %d\n",
7216		    __func__, error);
7217		return error;
7218	}
7219	/* Do post-firmware initialization. */
7220
7221	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7222
7223	return ops->post_alive(sc);
7224}
7225
7226static void
7227iwn_hw_stop(struct iwn_softc *sc)
7228{
7229	int chnl, qid, ntries;
7230
7231	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7232
7233	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
7234
7235	/* Disable interrupts. */
7236	IWN_WRITE(sc, IWN_INT_MASK, 0);
7237	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7238	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
7239	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7240
7241	/* Make sure we no longer hold the NIC lock. */
7242	iwn_nic_unlock(sc);
7243
7244	/* Stop TX scheduler. */
7245	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7246
7247	/* Stop all DMA channels. */
7248	if (iwn_nic_lock(sc) == 0) {
7249		for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7250			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
7251			for (ntries = 0; ntries < 200; ntries++) {
7252				if (IWN_READ(sc, IWN_FH_TX_STATUS) &
7253				    IWN_FH_TX_STATUS_IDLE(chnl))
7254					break;
7255				DELAY(10);
7256			}
7257		}
7258		iwn_nic_unlock(sc);
7259	}
7260
7261	/* Stop RX ring. */
7262	iwn_reset_rx_ring(sc, &sc->rxq);
7263
7264	/* Reset all TX rings. */
7265	for (qid = 0; qid < sc->ntxqs; qid++)
7266		iwn_reset_tx_ring(sc, &sc->txq[qid]);
7267
7268	if (iwn_nic_lock(sc) == 0) {
7269		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
7270		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
7271		iwn_nic_unlock(sc);
7272	}
7273	DELAY(5);
7274	/* Power OFF adapter. */
7275	iwn_apm_stop(sc);
7276}
7277
7278static void
7279iwn_radio_on(void *arg0, int pending)
7280{
7281	struct iwn_softc *sc = arg0;
7282	struct ifnet *ifp = sc->sc_ifp;
7283	struct ieee80211com *ic = ifp->if_l2com;
7284	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7285
7286	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7287
7288	if (vap != NULL) {
7289		iwn_init(sc);
7290		ieee80211_init(vap);
7291	}
7292}
7293
7294static void
7295iwn_radio_off(void *arg0, int pending)
7296{
7297	struct iwn_softc *sc = arg0;
7298	struct ifnet *ifp = sc->sc_ifp;
7299	struct ieee80211com *ic = ifp->if_l2com;
7300	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7301
7302	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7303
7304	iwn_stop(sc);
7305	if (vap != NULL)
7306		ieee80211_stop(vap);
7307
7308	/* Enable interrupts to get RF toggle notification. */
7309	IWN_LOCK(sc);
7310	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7311	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7312	IWN_UNLOCK(sc);
7313}
7314
7315static void
7316iwn_init_locked(struct iwn_softc *sc)
7317{
7318	struct ifnet *ifp = sc->sc_ifp;
7319	int error;
7320
7321	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7322
7323	IWN_LOCK_ASSERT(sc);
7324
7325	if ((error = iwn_hw_prepare(sc)) != 0) {
7326		device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
7327		    __func__, error);
7328		goto fail;
7329	}
7330
7331	/* Initialize interrupt mask to default value. */
7332	sc->int_mask = IWN_INT_MASK_DEF;
7333	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7334
7335	/* Check that the radio is not disabled by hardware switch. */
7336	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
7337		device_printf(sc->sc_dev,
7338		    "radio is disabled by hardware switch\n");
7339		/* Enable interrupts to get RF toggle notifications. */
7340		IWN_WRITE(sc, IWN_INT, 0xffffffff);
7341		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7342		return;
7343	}
7344
7345	/* Read firmware images from the filesystem. */
7346	if ((error = iwn_read_firmware(sc)) != 0) {
7347		device_printf(sc->sc_dev,
7348		    "%s: could not read firmware, error %d\n", __func__,
7349		    error);
7350		goto fail;
7351	}
7352
7353	/* Initialize hardware and upload firmware. */
7354	error = iwn_hw_init(sc);
7355	firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
7356	sc->fw_fp = NULL;
7357	if (error != 0) {
7358		device_printf(sc->sc_dev,
7359		    "%s: could not initialize hardware, error %d\n", __func__,
7360		    error);
7361		goto fail;
7362	}
7363
7364	/* Configure adapter now that it is ready. */
7365	if ((error = iwn_config(sc)) != 0) {
7366		device_printf(sc->sc_dev,
7367		    "%s: could not configure device, error %d\n", __func__,
7368		    error);
7369		goto fail;
7370	}
7371
7372	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
7373	ifp->if_drv_flags |= IFF_DRV_RUNNING;
7374
7375	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
7376
7377	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7378
7379	return;
7380
7381fail:	iwn_stop_locked(sc);
7382	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
7383}
7384
7385static void
7386iwn_init(void *arg)
7387{
7388	struct iwn_softc *sc = arg;
7389	struct ifnet *ifp = sc->sc_ifp;
7390	struct ieee80211com *ic = ifp->if_l2com;
7391
7392	IWN_LOCK(sc);
7393	iwn_init_locked(sc);
7394	IWN_UNLOCK(sc);
7395
7396	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
7397		ieee80211_start_all(ic);
7398}
7399
7400static void
7401iwn_stop_locked(struct iwn_softc *sc)
7402{
7403	struct ifnet *ifp = sc->sc_ifp;
7404
7405	IWN_LOCK_ASSERT(sc);
7406
7407	sc->sc_tx_timer = 0;
7408	callout_stop(&sc->watchdog_to);
7409	callout_stop(&sc->calib_to);
7410	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
7411
7412	/* Power OFF hardware. */
7413	iwn_hw_stop(sc);
7414}
7415
7416static void
7417iwn_stop(struct iwn_softc *sc)
7418{
7419	IWN_LOCK(sc);
7420	iwn_stop_locked(sc);
7421	IWN_UNLOCK(sc);
7422}
7423
7424/*
7425 * Callback from net80211 to start a scan.
7426 */
7427static void
7428iwn_scan_start(struct ieee80211com *ic)
7429{
7430	struct ifnet *ifp = ic->ic_ifp;
7431	struct iwn_softc *sc = ifp->if_softc;
7432
7433	IWN_LOCK(sc);
7434	/* make the link LED blink while we're scanning */
7435	iwn_set_led(sc, IWN_LED_LINK, 20, 2);
7436	IWN_UNLOCK(sc);
7437}
7438
7439/*
7440 * Callback from net80211 to terminate a scan.
7441 */
7442static void
7443iwn_scan_end(struct ieee80211com *ic)
7444{
7445	struct ifnet *ifp = ic->ic_ifp;
7446	struct iwn_softc *sc = ifp->if_softc;
7447	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7448
7449	IWN_LOCK(sc);
7450	if (vap->iv_state == IEEE80211_S_RUN) {
7451		/* Set link LED to ON status if we are associated */
7452		iwn_set_led(sc, IWN_LED_LINK, 0, 1);
7453	}
7454	IWN_UNLOCK(sc);
7455}
7456
7457/*
7458 * Callback from net80211 to force a channel change.
7459 */
7460static void
7461iwn_set_channel(struct ieee80211com *ic)
7462{
7463	const struct ieee80211_channel *c = ic->ic_curchan;
7464	struct ifnet *ifp = ic->ic_ifp;
7465	struct iwn_softc *sc = ifp->if_softc;
7466	int error;
7467
7468	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7469
7470	IWN_LOCK(sc);
7471	sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
7472	sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
7473	sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
7474	sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
7475
7476	/*
7477	 * Only need to set the channel in Monitor mode. AP scanning and auth
7478	 * are already taken care of by their respective firmware commands.
7479	 */
7480	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7481		error = iwn_config(sc);
7482		if (error != 0)
7483		device_printf(sc->sc_dev,
7484		    "%s: error %d settting channel\n", __func__, error);
7485	}
7486	IWN_UNLOCK(sc);
7487}
7488
7489/*
7490 * Callback from net80211 to start scanning of the current channel.
7491 */
7492static void
7493iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
7494{
7495	struct ieee80211vap *vap = ss->ss_vap;
7496	struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
7497	int error;
7498
7499	IWN_LOCK(sc);
7500	error = iwn_scan(sc);
7501	IWN_UNLOCK(sc);
7502	if (error != 0)
7503		ieee80211_cancel_scan(vap);
7504}
7505
7506/*
7507 * Callback from net80211 to handle the minimum dwell time being met.
7508 * The intent is to terminate the scan but we just let the firmware
7509 * notify us when it's finished as we have no safe way to abort it.
7510 */
7511static void
7512iwn_scan_mindwell(struct ieee80211_scan_state *ss)
7513{
7514	/* NB: don't try to abort scan; wait for firmware to finish */
7515}
7516
7517static void
7518iwn_hw_reset(void *arg0, int pending)
7519{
7520	struct iwn_softc *sc = arg0;
7521	struct ifnet *ifp = sc->sc_ifp;
7522	struct ieee80211com *ic = ifp->if_l2com;
7523
7524	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7525
7526	iwn_stop(sc);
7527	iwn_init(sc);
7528	ieee80211_notify_radio(ic, 1);
7529}
7530#ifdef	IWN_DEBUG
7531#define	IWN_DESC(x) case x:	return #x
7532#define	COUNTOF(array) (sizeof(array) / sizeof(array[0]))
7533
7534/*
7535 * Translate CSR code to string
7536 */
7537static char *iwn_get_csr_string(int csr)
7538{
7539	switch (csr) {
7540		IWN_DESC(IWN_HW_IF_CONFIG);
7541		IWN_DESC(IWN_INT_COALESCING);
7542		IWN_DESC(IWN_INT);
7543		IWN_DESC(IWN_INT_MASK);
7544		IWN_DESC(IWN_FH_INT);
7545		IWN_DESC(IWN_GPIO_IN);
7546		IWN_DESC(IWN_RESET);
7547		IWN_DESC(IWN_GP_CNTRL);
7548		IWN_DESC(IWN_HW_REV);
7549		IWN_DESC(IWN_EEPROM);
7550		IWN_DESC(IWN_EEPROM_GP);
7551		IWN_DESC(IWN_OTP_GP);
7552		IWN_DESC(IWN_GIO);
7553		IWN_DESC(IWN_GP_UCODE);
7554		IWN_DESC(IWN_GP_DRIVER);
7555		IWN_DESC(IWN_UCODE_GP1);
7556		IWN_DESC(IWN_UCODE_GP2);
7557		IWN_DESC(IWN_LED);
7558		IWN_DESC(IWN_DRAM_INT_TBL);
7559		IWN_DESC(IWN_GIO_CHICKEN);
7560		IWN_DESC(IWN_ANA_PLL);
7561		IWN_DESC(IWN_HW_REV_WA);
7562		IWN_DESC(IWN_DBG_HPET_MEM);
7563	default:
7564		return "UNKNOWN CSR";
7565	}
7566}
7567
7568/*
7569 * This function print firmware register
7570 */
7571static void
7572iwn_debug_register(struct iwn_softc *sc)
7573{
7574	int i;
7575	static const uint32_t csr_tbl[] = {
7576		IWN_HW_IF_CONFIG,
7577		IWN_INT_COALESCING,
7578		IWN_INT,
7579		IWN_INT_MASK,
7580		IWN_FH_INT,
7581		IWN_GPIO_IN,
7582		IWN_RESET,
7583		IWN_GP_CNTRL,
7584		IWN_HW_REV,
7585		IWN_EEPROM,
7586		IWN_EEPROM_GP,
7587		IWN_OTP_GP,
7588		IWN_GIO,
7589		IWN_GP_UCODE,
7590		IWN_GP_DRIVER,
7591		IWN_UCODE_GP1,
7592		IWN_UCODE_GP2,
7593		IWN_LED,
7594		IWN_DRAM_INT_TBL,
7595		IWN_GIO_CHICKEN,
7596		IWN_ANA_PLL,
7597		IWN_HW_REV_WA,
7598		IWN_DBG_HPET_MEM,
7599	};
7600	DPRINTF(sc, IWN_DEBUG_REGISTER,
7601	    "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s",
7602	    "\n");
7603	for (i = 0; i <  COUNTOF(csr_tbl); i++){
7604		DPRINTF(sc, IWN_DEBUG_REGISTER,"  %10s: 0x%08x ",
7605			iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i]));
7606		if ((i+1) % 3 == 0)
7607			DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
7608	}
7609	DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
7610}
7611#endif
7612