if_iwn.c revision 216237
1169689Skan/*-
2169689Skan * Copyright (c) 2007-2009
3169689Skan *	Damien Bergamini <damien.bergamini@free.fr>
4169689Skan * Copyright (c) 2008
5169689Skan *	Benjamin Close <benjsc@FreeBSD.org>
6169689Skan * Copyright (c) 2008 Sam Leffler, Errno Consulting
7169689Skan *
8169689Skan * Permission to use, copy, modify, and distribute this software for any
9169689Skan * purpose with or without fee is hereby granted, provided that the above
10169689Skan * copyright notice and this permission notice appear in all copies.
11169689Skan *
12169689Skan * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13169689Skan * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14169689Skan * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15169689Skan * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16169689Skan * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17169689Skan * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18169689Skan * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19169689Skan */
20169689Skan
21169689Skan/*
22169689Skan * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
23169689Skan * adapters.
24169689Skan */
25169689Skan
26169689Skan#include <sys/cdefs.h>
27169689Skan__FBSDID("$FreeBSD: head/sys/dev/iwn/if_iwn.c 216237 2010-12-06 18:28:39Z bschmidt $");
28169689Skan
29169689Skan#include <sys/param.h>
30169689Skan#include <sys/sockio.h>
31169689Skan#include <sys/sysctl.h>
32169689Skan#include <sys/mbuf.h>
33169689Skan#include <sys/kernel.h>
34169689Skan#include <sys/socket.h>
35169689Skan#include <sys/systm.h>
36169689Skan#include <sys/malloc.h>
37169689Skan#include <sys/bus.h>
38169689Skan#include <sys/rman.h>
39169689Skan#include <sys/endian.h>
40169689Skan#include <sys/firmware.h>
41169689Skan#include <sys/limits.h>
42169689Skan#include <sys/module.h>
43169689Skan#include <sys/queue.h>
44169689Skan#include <sys/taskqueue.h>
45169689Skan
46169689Skan#include <machine/bus.h>
47169689Skan#include <machine/resource.h>
48169689Skan#include <machine/clock.h>
49169689Skan
50169689Skan#include <dev/pci/pcireg.h>
51169689Skan#include <dev/pci/pcivar.h>
52169689Skan
53169689Skan#include <net/bpf.h>
54169689Skan#include <net/if.h>
55169689Skan#include <net/if_arp.h>
56169689Skan#include <net/ethernet.h>
57169689Skan#include <net/if_dl.h>
58169689Skan#include <net/if_media.h>
59169689Skan#include <net/if_types.h>
60169689Skan
61169689Skan#include <netinet/in.h>
62169689Skan#include <netinet/in_systm.h>
63169689Skan#include <netinet/in_var.h>
64169689Skan#include <netinet/if_ether.h>
65169689Skan#include <netinet/ip.h>
66169689Skan
67169689Skan#include <net80211/ieee80211_var.h>
68169689Skan#include <net80211/ieee80211_radiotap.h>
69169689Skan#include <net80211/ieee80211_regdomain.h>
70169689Skan#include <net80211/ieee80211_ratectl.h>
71169689Skan
72169689Skan#include <dev/iwn/if_iwnreg.h>
73169689Skan#include <dev/iwn/if_iwnvar.h>
74169689Skan
75169689Skanstatic int	iwn_probe(device_t);
76169689Skanstatic int	iwn_attach(device_t);
77169689Skanstatic const struct iwn_hal *iwn_hal_attach(struct iwn_softc *);
78169689Skanstatic void	iwn_radiotap_attach(struct iwn_softc *);
79169689Skanstatic struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
80169689Skan		    const char name[IFNAMSIZ], int unit, int opmode,
81169689Skan		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
82169689Skan		    const uint8_t mac[IEEE80211_ADDR_LEN]);
83169689Skanstatic void	iwn_vap_delete(struct ieee80211vap *);
84169689Skanstatic int	iwn_cleanup(device_t);
85169689Skanstatic int	iwn_detach(device_t);
86169689Skanstatic int	iwn_nic_lock(struct iwn_softc *);
87169689Skanstatic int	iwn_eeprom_lock(struct iwn_softc *);
88169689Skanstatic int	iwn_init_otprom(struct iwn_softc *);
89169689Skanstatic int	iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
90169689Skanstatic void	iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
91169689Skanstatic int	iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
92		    void **, bus_size_t, bus_size_t, int);
93static void	iwn_dma_contig_free(struct iwn_dma_info *);
94static int	iwn_alloc_sched(struct iwn_softc *);
95static void	iwn_free_sched(struct iwn_softc *);
96static int	iwn_alloc_kw(struct iwn_softc *);
97static void	iwn_free_kw(struct iwn_softc *);
98static int	iwn_alloc_ict(struct iwn_softc *);
99static void	iwn_free_ict(struct iwn_softc *);
100static int	iwn_alloc_fwmem(struct iwn_softc *);
101static void	iwn_free_fwmem(struct iwn_softc *);
102static int	iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
103static void	iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
104static void	iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
105static int	iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
106		    int);
107static void	iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
108static void	iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
109static void	iwn5000_ict_reset(struct iwn_softc *);
110static int	iwn_read_eeprom(struct iwn_softc *,
111		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
112static void	iwn4965_read_eeprom(struct iwn_softc *);
113static void	iwn4965_print_power_group(struct iwn_softc *, int);
114static void	iwn5000_read_eeprom(struct iwn_softc *);
115static uint32_t	iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
116static void	iwn_read_eeprom_band(struct iwn_softc *, int);
117#if 0	/* HT */
118static void	iwn_read_eeprom_ht40(struct iwn_softc *, int);
119#endif
120static void	iwn_read_eeprom_channels(struct iwn_softc *, int,
121		    uint32_t);
122static void	iwn_read_eeprom_enhinfo(struct iwn_softc *);
123static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
124		    const uint8_t mac[IEEE80211_ADDR_LEN]);
125static void	iwn_newassoc(struct ieee80211_node *, int);
126static int	iwn_media_change(struct ifnet *);
127static int	iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
128static void	iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
129		    struct iwn_rx_data *);
130static void	iwn_timer_timeout(void *);
131static void	iwn_calib_reset(struct iwn_softc *);
132static void	iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
133		    struct iwn_rx_data *);
134#if 0	/* HT */
135static void	iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
136		    struct iwn_rx_data *);
137#endif
138static void	iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
139		    struct iwn_rx_data *);
140static void	iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
141		    struct iwn_rx_data *);
142static void	iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
143		    struct iwn_rx_data *);
144static void	iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
145		    uint8_t);
146static void	iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
147static void	iwn_notif_intr(struct iwn_softc *);
148static void	iwn_wakeup_intr(struct iwn_softc *);
149static void	iwn_rftoggle_intr(struct iwn_softc *);
150static void	iwn_fatal_intr(struct iwn_softc *);
151static void	iwn_intr(void *);
152static void	iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
153		    uint16_t);
154static void	iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
155		    uint16_t);
156#ifdef notyet
157static void	iwn5000_reset_sched(struct iwn_softc *, int, int);
158#endif
159static uint8_t	iwn_plcp_signal(int);
160static int	iwn_tx_data(struct iwn_softc *, struct mbuf *,
161		    struct ieee80211_node *, struct iwn_tx_ring *);
162static int	iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
163		    const struct ieee80211_bpf_params *);
164static void	iwn_start(struct ifnet *);
165static void	iwn_start_locked(struct ifnet *);
166static void	iwn_watchdog(struct iwn_softc *sc);
167static int	iwn_ioctl(struct ifnet *, u_long, caddr_t);
168static int	iwn_cmd(struct iwn_softc *, int, const void *, int, int);
169static int	iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
170		    int);
171static int	iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
172		    int);
173static int	iwn_set_link_quality(struct iwn_softc *, uint8_t, int);
174static int	iwn_add_broadcast_node(struct iwn_softc *, int);
175static int	iwn_wme_update(struct ieee80211com *);
176static void	iwn_update_mcast(struct ifnet *);
177static void	iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
178static int	iwn_set_critical_temp(struct iwn_softc *);
179static int	iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
180static void	iwn4965_power_calibration(struct iwn_softc *, int);
181static int	iwn4965_set_txpower(struct iwn_softc *,
182		    struct ieee80211_channel *, int);
183static int	iwn5000_set_txpower(struct iwn_softc *,
184		    struct ieee80211_channel *, int);
185static int	iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
186static int	iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
187static int	iwn_get_noise(const struct iwn_rx_general_stats *);
188static int	iwn4965_get_temperature(struct iwn_softc *);
189static int	iwn5000_get_temperature(struct iwn_softc *);
190static int	iwn_init_sensitivity(struct iwn_softc *);
191static void	iwn_collect_noise(struct iwn_softc *,
192		    const struct iwn_rx_general_stats *);
193static int	iwn4965_init_gains(struct iwn_softc *);
194static int	iwn5000_init_gains(struct iwn_softc *);
195static int	iwn4965_set_gains(struct iwn_softc *);
196static int	iwn5000_set_gains(struct iwn_softc *);
197static void	iwn_tune_sensitivity(struct iwn_softc *,
198		    const struct iwn_rx_stats *);
199static int	iwn_send_sensitivity(struct iwn_softc *);
200static int	iwn_set_pslevel(struct iwn_softc *, int, int, int);
201static int	iwn_config(struct iwn_softc *);
202static int	iwn_scan(struct iwn_softc *);
203static int	iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
204static int	iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
205#if 0	/* HT */
206static int	iwn_ampdu_rx_start(struct ieee80211com *,
207		    struct ieee80211_node *, uint8_t);
208static void	iwn_ampdu_rx_stop(struct ieee80211com *,
209		    struct ieee80211_node *, uint8_t);
210static int	iwn_ampdu_tx_start(struct ieee80211com *,
211		    struct ieee80211_node *, uint8_t);
212static void	iwn_ampdu_tx_stop(struct ieee80211com *,
213		    struct ieee80211_node *, uint8_t);
214static void	iwn4965_ampdu_tx_start(struct iwn_softc *,
215		    struct ieee80211_node *, uint8_t, uint16_t);
216static void	iwn4965_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
217static void	iwn5000_ampdu_tx_start(struct iwn_softc *,
218		    struct ieee80211_node *, uint8_t, uint16_t);
219static void	iwn5000_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
220#endif
221static int	iwn5000_send_calib_results(struct iwn_softc *);
222static int	iwn5000_save_calib_result(struct iwn_softc *,
223		    struct iwn_phy_calib *, int, int);
224static void	iwn5000_free_calib_results(struct iwn_softc *);
225static int	iwn5000_chrystal_calib(struct iwn_softc *);
226static int	iwn5000_send_calib_query(struct iwn_softc *, uint32_t);
227static int	iwn5000_rx_calib_result(struct iwn_softc *,
228		    struct iwn_rx_desc *, struct iwn_rx_data *);
229static int	iwn5000_send_wimax_coex(struct iwn_softc *);
230static int	iwn4965_post_alive(struct iwn_softc *);
231static int	iwn5000_post_alive(struct iwn_softc *);
232static int	iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
233		    int);
234static int	iwn4965_load_firmware(struct iwn_softc *);
235static int	iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
236		    const uint8_t *, int);
237static int	iwn5000_load_firmware(struct iwn_softc *);
238static int	iwn_read_firmware_leg(struct iwn_softc *,
239		    struct iwn_fw_info *);
240static int	iwn_read_firmware_tlv(struct iwn_softc *,
241		    struct iwn_fw_info *, uint16_t);
242static int	iwn_read_firmware(struct iwn_softc *);
243static int	iwn_clock_wait(struct iwn_softc *);
244static int	iwn_apm_init(struct iwn_softc *);
245static void	iwn_apm_stop_master(struct iwn_softc *);
246static void	iwn_apm_stop(struct iwn_softc *);
247static int	iwn4965_nic_config(struct iwn_softc *);
248static int	iwn5000_nic_config(struct iwn_softc *);
249static int	iwn_hw_prepare(struct iwn_softc *);
250static int	iwn_hw_init(struct iwn_softc *);
251static void	iwn_hw_stop(struct iwn_softc *);
252static void	iwn_init_locked(struct iwn_softc *);
253static void	iwn_init(void *);
254static void	iwn_stop_locked(struct iwn_softc *);
255static void	iwn_stop(struct iwn_softc *);
256static void 	iwn_scan_start(struct ieee80211com *);
257static void 	iwn_scan_end(struct ieee80211com *);
258static void 	iwn_set_channel(struct ieee80211com *);
259static void 	iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
260static void 	iwn_scan_mindwell(struct ieee80211_scan_state *);
261static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
262		    struct ieee80211_channel *);
263static int	iwn_setregdomain(struct ieee80211com *,
264		    struct ieee80211_regdomain *, int,
265		    struct ieee80211_channel []);
266static void	iwn_hw_reset(void *, int);
267static void	iwn_radio_on(void *, int);
268static void	iwn_radio_off(void *, int);
269static void	iwn_sysctlattach(struct iwn_softc *);
270static int	iwn_shutdown(device_t);
271static int	iwn_suspend(device_t);
272static int	iwn_resume(device_t);
273
274#define IWN_DEBUG
275#ifdef IWN_DEBUG
276enum {
277	IWN_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
278	IWN_DEBUG_RECV		= 0x00000002,	/* basic recv operation */
279	IWN_DEBUG_STATE		= 0x00000004,	/* 802.11 state transitions */
280	IWN_DEBUG_TXPOW		= 0x00000008,	/* tx power processing */
281	IWN_DEBUG_RESET		= 0x00000010,	/* reset processing */
282	IWN_DEBUG_OPS		= 0x00000020,	/* iwn_ops processing */
283	IWN_DEBUG_BEACON 	= 0x00000040,	/* beacon handling */
284	IWN_DEBUG_WATCHDOG 	= 0x00000080,	/* watchdog timeout */
285	IWN_DEBUG_INTR		= 0x00000100,	/* ISR */
286	IWN_DEBUG_CALIBRATE	= 0x00000200,	/* periodic calibration */
287	IWN_DEBUG_NODE		= 0x00000400,	/* node management */
288	IWN_DEBUG_LED		= 0x00000800,	/* led management */
289	IWN_DEBUG_CMD		= 0x00001000,	/* cmd submission */
290	IWN_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
291	IWN_DEBUG_ANY		= 0xffffffff
292};
293
294#define DPRINTF(sc, m, fmt, ...) do {			\
295	if (sc->sc_debug & (m))				\
296		printf(fmt, __VA_ARGS__);		\
297} while (0)
298
299static const char *iwn_intr_str(uint8_t);
300#else
301#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
302#endif
303
304struct iwn_ident {
305	uint16_t	vendor;
306	uint16_t	device;
307	const char	*name;
308};
309
310static const struct iwn_ident iwn_ident_table [] = {
311	{ 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" },
312	{ 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" },
313	{ 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" },
314	{ 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" },
315	{ 0x8086, 0x4232, "Intel(R) PRO/Wireless 5100" },
316	{ 0x8086, 0x4237, "Intel(R) PRO/Wireless 5100" },
317	{ 0x8086, 0x423C, "Intel(R) PRO/Wireless 5150" },
318	{ 0x8086, 0x423D, "Intel(R) PRO/Wireless 5150" },
319	{ 0x8086, 0x4235, "Intel(R) PRO/Wireless 5300" },
320	{ 0x8086, 0x4236, "Intel(R) PRO/Wireless 5300" },
321	{ 0x8086, 0x423A, "Intel(R) PRO/Wireless 5350" },
322	{ 0x8086, 0x423B, "Intel(R) PRO/Wireless 5350" },
323	{ 0x8086, 0x0083, "Intel(R) PRO/Wireless 1000" },
324	{ 0x8086, 0x0084, "Intel(R) PRO/Wireless 1000" },
325	{ 0x8086, 0x008D, "Intel(R) PRO/Wireless 6000" },
326	{ 0x8086, 0x008E, "Intel(R) PRO/Wireless 6000" },
327	{ 0x8086, 0x4238, "Intel(R) PRO/Wireless 6000" },
328	{ 0x8086, 0x4239, "Intel(R) PRO/Wireless 6000" },
329	{ 0x8086, 0x422B, "Intel(R) PRO/Wireless 6000" },
330	{ 0x8086, 0x422C, "Intel(R) PRO/Wireless 6000" },
331	{ 0x8086, 0x0087, "Intel(R) PRO/Wireless 6250" },
332	{ 0x8086, 0x0089, "Intel(R) PRO/Wireless 6250" },
333	{ 0x8086, 0x0082, "Intel(R) PRO/Wireless 6205a" },
334	{ 0x8086, 0x0085, "Intel(R) PRO/Wireless 6205a" },
335#ifdef notyet
336	{ 0x8086, 0x008a, "Intel(R) PRO/Wireless 6205b" },
337	{ 0x8086, 0x008b, "Intel(R) PRO/Wireless 6205b" },
338	{ 0x8086, 0x008f, "Intel(R) PRO/Wireless 6205b" },
339	{ 0x8086, 0x0090, "Intel(R) PRO/Wireless 6205b" },
340	{ 0x8086, 0x0091, "Intel(R) PRO/Wireless 6205b" },
341#endif
342	{ 0, 0, NULL }
343};
344
345static const struct iwn_hal iwn4965_hal = {
346	iwn4965_load_firmware,
347	iwn4965_read_eeprom,
348	iwn4965_post_alive,
349	iwn4965_nic_config,
350	iwn4965_update_sched,
351	iwn4965_get_temperature,
352	iwn4965_get_rssi,
353	iwn4965_set_txpower,
354	iwn4965_init_gains,
355	iwn4965_set_gains,
356	iwn4965_add_node,
357	iwn4965_tx_done,
358#if 0	/* HT */
359	iwn4965_ampdu_tx_start,
360	iwn4965_ampdu_tx_stop,
361#endif
362	IWN4965_NTXQUEUES,
363	IWN4965_NDMACHNLS,
364	IWN4965_ID_BROADCAST,
365	IWN4965_RXONSZ,
366	IWN4965_SCHEDSZ,
367	IWN4965_FW_TEXT_MAXSZ,
368	IWN4965_FW_DATA_MAXSZ,
369	IWN4965_FWSZ,
370	IWN4965_SCHED_TXFACT
371};
372
373static const struct iwn_hal iwn5000_hal = {
374	iwn5000_load_firmware,
375	iwn5000_read_eeprom,
376	iwn5000_post_alive,
377	iwn5000_nic_config,
378	iwn5000_update_sched,
379	iwn5000_get_temperature,
380	iwn5000_get_rssi,
381	iwn5000_set_txpower,
382	iwn5000_init_gains,
383	iwn5000_set_gains,
384	iwn5000_add_node,
385	iwn5000_tx_done,
386#if 0	/* HT */
387	iwn5000_ampdu_tx_start,
388	iwn5000_ampdu_tx_stop,
389#endif
390	IWN5000_NTXQUEUES,
391	IWN5000_NDMACHNLS,
392	IWN5000_ID_BROADCAST,
393	IWN5000_RXONSZ,
394	IWN5000_SCHEDSZ,
395	IWN5000_FW_TEXT_MAXSZ,
396	IWN5000_FW_DATA_MAXSZ,
397	IWN5000_FWSZ,
398	IWN5000_SCHED_TXFACT
399};
400
401static int
402iwn_probe(device_t dev)
403{
404	const struct iwn_ident *ident;
405
406	for (ident = iwn_ident_table; ident->name != NULL; ident++) {
407		if (pci_get_vendor(dev) == ident->vendor &&
408		    pci_get_device(dev) == ident->device) {
409			device_set_desc(dev, ident->name);
410			return 0;
411		}
412	}
413	return ENXIO;
414}
415
416static int
417iwn_attach(device_t dev)
418{
419	struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
420	struct ieee80211com *ic;
421	struct ifnet *ifp;
422	const struct iwn_hal *hal;
423	uint32_t tmp;
424	int i, error, result;
425	uint8_t macaddr[IEEE80211_ADDR_LEN];
426
427	sc->sc_dev = dev;
428
429	/*
430	 * Get the offset of the PCI Express Capability Structure in PCI
431	 * Configuration Space.
432	 */
433	error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
434	if (error != 0) {
435		device_printf(dev, "PCIe capability structure not found!\n");
436		return error;
437	}
438
439	/* Clear device-specific "PCI retry timeout" register (41h). */
440	pci_write_config(dev, 0x41, 0, 1);
441
442	/* Hardware bug workaround. */
443	tmp = pci_read_config(dev, PCIR_COMMAND, 1);
444	if (tmp & PCIM_CMD_INTxDIS) {
445		DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n",
446		    __func__);
447		tmp &= ~PCIM_CMD_INTxDIS;
448		pci_write_config(dev, PCIR_COMMAND, tmp, 1);
449	}
450
451	/* Enable bus-mastering. */
452	pci_enable_busmaster(dev);
453
454	sc->mem_rid = PCIR_BAR(0);
455	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
456	    RF_ACTIVE);
457	if (sc->mem == NULL ) {
458		device_printf(dev, "could not allocate memory resources\n");
459		error = ENOMEM;
460		return error;
461	}
462
463	sc->sc_st = rman_get_bustag(sc->mem);
464	sc->sc_sh = rman_get_bushandle(sc->mem);
465	sc->irq_rid = 0;
466	if ((result = pci_msi_count(dev)) == 1 &&
467	    pci_alloc_msi(dev, &result) == 0)
468		sc->irq_rid = 1;
469	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
470	    RF_ACTIVE | RF_SHAREABLE);
471	if (sc->irq == NULL) {
472		device_printf(dev, "could not allocate interrupt resource\n");
473		error = ENOMEM;
474		goto fail;
475	}
476
477	IWN_LOCK_INIT(sc);
478	callout_init_mtx(&sc->sc_timer_to, &sc->sc_mtx, 0);
479	TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc );
480	TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc );
481	TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc );
482
483	/* Attach Hardware Abstraction Layer. */
484	hal = iwn_hal_attach(sc);
485	if (hal == NULL) {
486		error = ENXIO;	/* XXX: Wrong error code? */
487		goto fail;
488	}
489
490	error = iwn_hw_prepare(sc);
491	if (error != 0) {
492		device_printf(dev, "hardware not ready, error %d\n", error);
493		goto fail;
494	}
495
496	/* Allocate DMA memory for firmware transfers. */
497	error = iwn_alloc_fwmem(sc);
498	if (error != 0) {
499		device_printf(dev,
500		    "could not allocate memory for firmware, error %d\n",
501		    error);
502		goto fail;
503	}
504
505	/* Allocate "Keep Warm" page. */
506	error = iwn_alloc_kw(sc);
507	if (error != 0) {
508		device_printf(dev,
509		    "could not allocate \"Keep Warm\" page, error %d\n", error);
510		goto fail;
511	}
512
513	/* Allocate ICT table for 5000 Series. */
514	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
515	    (error = iwn_alloc_ict(sc)) != 0) {
516		device_printf(dev,
517		    "%s: could not allocate ICT table, error %d\n",
518		    __func__, error);
519		goto fail;
520	}
521
522	/* Allocate TX scheduler "rings". */
523	error = iwn_alloc_sched(sc);
524	if (error != 0) {
525		device_printf(dev,
526		    "could not allocate TX scheduler rings, error %d\n",
527		    error);
528		goto fail;
529	}
530
531	/* Allocate TX rings (16 on 4965AGN, 20 on 5000). */
532	for (i = 0; i < hal->ntxqs; i++) {
533		error = iwn_alloc_tx_ring(sc, &sc->txq[i], i);
534		if (error != 0) {
535			device_printf(dev,
536			    "could not allocate Tx ring %d, error %d\n",
537			    i, error);
538			goto fail;
539		}
540	}
541
542	/* Allocate RX ring. */
543	error = iwn_alloc_rx_ring(sc, &sc->rxq);
544	if (error != 0 ){
545		device_printf(dev,
546		    "could not allocate Rx ring, error %d\n", error);
547		goto fail;
548	}
549
550	/* Clear pending interrupts. */
551	IWN_WRITE(sc, IWN_INT, 0xffffffff);
552
553	/* Count the number of available chains. */
554	sc->ntxchains =
555	    ((sc->txchainmask >> 2) & 1) +
556	    ((sc->txchainmask >> 1) & 1) +
557	    ((sc->txchainmask >> 0) & 1);
558	sc->nrxchains =
559	    ((sc->rxchainmask >> 2) & 1) +
560	    ((sc->rxchainmask >> 1) & 1) +
561	    ((sc->rxchainmask >> 0) & 1);
562
563	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
564	if (ifp == NULL) {
565		device_printf(dev, "can not allocate ifnet structure\n");
566		goto fail;
567	}
568	ic = ifp->if_l2com;
569
570	ic->ic_ifp = ifp;
571	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
572	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
573
574	/* Set device capabilities. */
575	ic->ic_caps =
576		  IEEE80211_C_STA		/* station mode supported */
577		| IEEE80211_C_MONITOR		/* monitor mode supported */
578		| IEEE80211_C_TXPMGT		/* tx power management */
579		| IEEE80211_C_SHSLOT		/* short slot time supported */
580		| IEEE80211_C_WPA
581		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
582		| IEEE80211_C_BGSCAN		/* background scanning */
583#if 0
584		| IEEE80211_C_IBSS		/* ibss/adhoc mode */
585#endif
586		| IEEE80211_C_WME		/* WME */
587		;
588#if 0	/* HT */
589	/* XXX disable until HT channel setup works */
590	ic->ic_htcaps =
591		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
592		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
593		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
594		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
595		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
596		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
597		/* s/w capabilities */
598		| IEEE80211_HTC_HT		/* HT operation */
599		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
600		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
601		;
602
603	/* Set HT capabilities. */
604	ic->ic_htcaps =
605#if IWN_RBUF_SIZE == 8192
606	    IEEE80211_HTCAP_AMSDU7935 |
607#endif
608	    IEEE80211_HTCAP_CBW20_40 |
609	    IEEE80211_HTCAP_SGI20 |
610	    IEEE80211_HTCAP_SGI40;
611	if (sc->hw_type != IWN_HW_REV_TYPE_4965)
612		ic->ic_htcaps |= IEEE80211_HTCAP_GF;
613	if (sc->hw_type == IWN_HW_REV_TYPE_6050)
614		ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
615	else
616		ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
617#endif
618
619	/* Read MAC address, channels, etc from EEPROM. */
620	error = iwn_read_eeprom(sc, macaddr);
621	if (error != 0) {
622		device_printf(dev, "could not read EEPROM, error %d\n",
623		    error);
624		goto fail;
625	}
626
627	device_printf(sc->sc_dev, "MIMO %dT%dR, %.4s, address %6D\n",
628	    sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
629	    macaddr, ":");
630
631#if 0	/* HT */
632	/* Set supported HT rates. */
633	ic->ic_sup_mcs[0] = 0xff;
634	if (sc->nrxchains > 1)
635		ic->ic_sup_mcs[1] = 0xff;
636	if (sc->nrxchains > 2)
637		ic->ic_sup_mcs[2] = 0xff;
638#endif
639
640	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
641	ifp->if_softc = sc;
642	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
643	ifp->if_init = iwn_init;
644	ifp->if_ioctl = iwn_ioctl;
645	ifp->if_start = iwn_start;
646	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
647	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
648	IFQ_SET_READY(&ifp->if_snd);
649
650	ieee80211_ifattach(ic, macaddr);
651	ic->ic_vap_create = iwn_vap_create;
652	ic->ic_vap_delete = iwn_vap_delete;
653	ic->ic_raw_xmit = iwn_raw_xmit;
654	ic->ic_node_alloc = iwn_node_alloc;
655	ic->ic_newassoc = iwn_newassoc;
656	ic->ic_wme.wme_update = iwn_wme_update;
657	ic->ic_update_mcast = iwn_update_mcast;
658	ic->ic_scan_start = iwn_scan_start;
659	ic->ic_scan_end = iwn_scan_end;
660	ic->ic_set_channel = iwn_set_channel;
661	ic->ic_scan_curchan = iwn_scan_curchan;
662	ic->ic_scan_mindwell = iwn_scan_mindwell;
663	ic->ic_setregdomain = iwn_setregdomain;
664#if 0	/* HT */
665	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
666	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
667	ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
668	ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
669#endif
670
671	iwn_radiotap_attach(sc);
672	iwn_sysctlattach(sc);
673
674	/*
675	 * Hook our interrupt after all initialization is complete.
676	 */
677	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
678	    NULL, iwn_intr, sc, &sc->sc_ih);
679	if (error != 0) {
680		device_printf(dev, "could not set up interrupt, error %d\n",
681		    error);
682		goto fail;
683	}
684
685	ieee80211_announce(ic);
686	return 0;
687fail:
688	iwn_cleanup(dev);
689	return error;
690}
691
692static const struct iwn_hal *
693iwn_hal_attach(struct iwn_softc *sc)
694{
695	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
696
697	switch (sc->hw_type) {
698	case IWN_HW_REV_TYPE_4965:
699		sc->sc_hal = &iwn4965_hal;
700		sc->limits = &iwn4965_sensitivity_limits;
701		sc->fwname = "iwn4965fw";
702		sc->txchainmask = IWN_ANT_AB;
703		sc->rxchainmask = IWN_ANT_ABC;
704		break;
705	case IWN_HW_REV_TYPE_5100:
706		sc->sc_hal = &iwn5000_hal;
707		sc->limits = &iwn5000_sensitivity_limits;
708		sc->fwname = "iwn5000fw";
709		sc->txchainmask = IWN_ANT_B;
710		sc->rxchainmask = IWN_ANT_AB;
711		sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
712		    IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC |
713		    IWN_CALIB_BASE_BAND;
714		break;
715	case IWN_HW_REV_TYPE_5150:
716		sc->sc_hal = &iwn5000_hal;
717		sc->limits = &iwn5150_sensitivity_limits;
718		sc->fwname = "iwn5150fw";
719		sc->txchainmask = IWN_ANT_A;
720		sc->rxchainmask = IWN_ANT_AB;
721		sc->calib_init = IWN_CALIB_DC | IWN_CALIB_LO |
722		    IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
723		break;
724	case IWN_HW_REV_TYPE_5300:
725	case IWN_HW_REV_TYPE_5350:
726		sc->sc_hal = &iwn5000_hal;
727		sc->limits = &iwn5000_sensitivity_limits;
728		sc->fwname = "iwn5000fw";
729		sc->txchainmask = IWN_ANT_ABC;
730		sc->rxchainmask = IWN_ANT_ABC;
731		sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
732		    IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC |
733		    IWN_CALIB_BASE_BAND;
734		break;
735	case IWN_HW_REV_TYPE_1000:
736		sc->sc_hal = &iwn5000_hal;
737		sc->limits = &iwn1000_sensitivity_limits;
738		sc->fwname = "iwn1000fw";
739		sc->txchainmask = IWN_ANT_A;
740		sc->rxchainmask = IWN_ANT_AB;
741		sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
742		    IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC |
743		    IWN_CALIB_BASE_BAND;
744		break;
745	case IWN_HW_REV_TYPE_6000:
746		sc->sc_hal = &iwn5000_hal;
747		sc->limits = &iwn6000_sensitivity_limits;
748		sc->fwname = "iwn6000fw";
749		switch (pci_get_device(sc->sc_dev)) {
750		case 0x422C:
751		case 0x4239:
752			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
753			sc->txchainmask = IWN_ANT_BC;
754			sc->rxchainmask = IWN_ANT_BC;
755			break;
756		default:
757			sc->txchainmask = IWN_ANT_ABC;
758			sc->rxchainmask = IWN_ANT_ABC;
759			sc->calib_runtime = IWN_CALIB_DC;
760			break;
761		}
762		sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
763		    IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
764		break;
765	case IWN_HW_REV_TYPE_6050:
766		sc->sc_hal = &iwn5000_hal;
767		sc->limits = &iwn6000_sensitivity_limits;
768		sc->fwname = "iwn6050fw";
769		sc->txchainmask = IWN_ANT_AB;
770		sc->rxchainmask = IWN_ANT_AB;
771		sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
772		    IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
773		sc->calib_runtime = IWN_CALIB_DC;
774		break;
775	case IWN_HW_REV_TYPE_6005:
776		sc->sc_hal = &iwn5000_hal;
777		sc->limits = &iwn6000_sensitivity_limits;
778		sc->fwname = "iwn6005fw";
779		sc->txchainmask = IWN_ANT_AB;
780		sc->rxchainmask = IWN_ANT_AB;
781		sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
782		    IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
783		sc->calib_runtime = IWN_CALIB_DC;
784		break;
785	default:
786		device_printf(sc->sc_dev, "adapter type %d not supported\n",
787		    sc->hw_type);
788		return NULL;
789	}
790	return sc->sc_hal;
791}
792
793/*
794 * Attach the interface to 802.11 radiotap.
795 */
796static void
797iwn_radiotap_attach(struct iwn_softc *sc)
798{
799	struct ifnet *ifp = sc->sc_ifp;
800	struct ieee80211com *ic = ifp->if_l2com;
801
802	ieee80211_radiotap_attach(ic,
803	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
804		IWN_TX_RADIOTAP_PRESENT,
805	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
806		IWN_RX_RADIOTAP_PRESENT);
807}
808
809static struct ieee80211vap *
810iwn_vap_create(struct ieee80211com *ic,
811	const char name[IFNAMSIZ], int unit, int opmode, int flags,
812	const uint8_t bssid[IEEE80211_ADDR_LEN],
813	const uint8_t mac[IEEE80211_ADDR_LEN])
814{
815	struct iwn_vap *ivp;
816	struct ieee80211vap *vap;
817
818	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
819		return NULL;
820	ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
821	    M_80211_VAP, M_NOWAIT | M_ZERO);
822	if (ivp == NULL)
823		return NULL;
824	vap = &ivp->iv_vap;
825	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
826	vap->iv_bmissthreshold = 10;		/* override default */
827	/* Override with driver methods. */
828	ivp->iv_newstate = vap->iv_newstate;
829	vap->iv_newstate = iwn_newstate;
830
831	ieee80211_ratectl_init(vap);
832	/* Complete setup. */
833	ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
834	ic->ic_opmode = opmode;
835	return vap;
836}
837
838static void
839iwn_vap_delete(struct ieee80211vap *vap)
840{
841	struct iwn_vap *ivp = IWN_VAP(vap);
842
843	ieee80211_ratectl_deinit(vap);
844	ieee80211_vap_detach(vap);
845	free(ivp, M_80211_VAP);
846}
847
848static int
849iwn_cleanup(device_t dev)
850{
851	struct iwn_softc *sc = device_get_softc(dev);
852	struct ifnet *ifp = sc->sc_ifp;
853	struct ieee80211com *ic;
854	int i;
855
856	if (ifp != NULL) {
857		ic = ifp->if_l2com;
858
859		ieee80211_draintask(ic, &sc->sc_reinit_task);
860		ieee80211_draintask(ic, &sc->sc_radioon_task);
861		ieee80211_draintask(ic, &sc->sc_radiooff_task);
862
863		iwn_stop(sc);
864		callout_drain(&sc->sc_timer_to);
865		ieee80211_ifdetach(ic);
866	}
867
868	iwn5000_free_calib_results(sc);
869
870	/* Free DMA resources. */
871	iwn_free_rx_ring(sc, &sc->rxq);
872	if (sc->sc_hal != NULL)
873		for (i = 0; i < sc->sc_hal->ntxqs; i++)
874			iwn_free_tx_ring(sc, &sc->txq[i]);
875	iwn_free_sched(sc);
876	iwn_free_kw(sc);
877	if (sc->ict != NULL)
878		iwn_free_ict(sc);
879	iwn_free_fwmem(sc);
880
881	if (sc->irq != NULL) {
882		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
883		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
884		if (sc->irq_rid == 1)
885			pci_release_msi(dev);
886	}
887
888	if (sc->mem != NULL)
889		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
890
891	if (ifp != NULL)
892		if_free(ifp);
893
894	IWN_LOCK_DESTROY(sc);
895	return 0;
896}
897
898static int
899iwn_detach(device_t dev)
900{
901	iwn_cleanup(dev);
902	return 0;
903}
904
905static int
906iwn_nic_lock(struct iwn_softc *sc)
907{
908	int ntries;
909
910	/* Request exclusive access to NIC. */
911	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
912
913	/* Spin until we actually get the lock. */
914	for (ntries = 0; ntries < 1000; ntries++) {
915		if ((IWN_READ(sc, IWN_GP_CNTRL) &
916		    (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
917		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
918			return 0;
919		DELAY(10);
920	}
921	return ETIMEDOUT;
922}
923
924static __inline void
925iwn_nic_unlock(struct iwn_softc *sc)
926{
927	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
928}
929
930static __inline uint32_t
931iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
932{
933	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
934	IWN_BARRIER_READ_WRITE(sc);
935	return IWN_READ(sc, IWN_PRPH_RDATA);
936}
937
938static __inline void
939iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
940{
941	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
942	IWN_BARRIER_WRITE(sc);
943	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
944}
945
946static __inline void
947iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
948{
949	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
950}
951
952static __inline void
953iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
954{
955	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
956}
957
958static __inline void
959iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
960    const uint32_t *data, int count)
961{
962	for (; count > 0; count--, data++, addr += 4)
963		iwn_prph_write(sc, addr, *data);
964}
965
966static __inline uint32_t
967iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
968{
969	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
970	IWN_BARRIER_READ_WRITE(sc);
971	return IWN_READ(sc, IWN_MEM_RDATA);
972}
973
974static __inline void
975iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
976{
977	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
978	IWN_BARRIER_WRITE(sc);
979	IWN_WRITE(sc, IWN_MEM_WDATA, data);
980}
981
982static __inline void
983iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
984{
985	uint32_t tmp;
986
987	tmp = iwn_mem_read(sc, addr & ~3);
988	if (addr & 3)
989		tmp = (tmp & 0x0000ffff) | data << 16;
990	else
991		tmp = (tmp & 0xffff0000) | data;
992	iwn_mem_write(sc, addr & ~3, tmp);
993}
994
995static __inline void
996iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
997    int count)
998{
999	for (; count > 0; count--, addr += 4)
1000		*data++ = iwn_mem_read(sc, addr);
1001}
1002
1003static __inline void
1004iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1005    int count)
1006{
1007	for (; count > 0; count--, addr += 4)
1008		iwn_mem_write(sc, addr, val);
1009}
1010
1011static int
1012iwn_eeprom_lock(struct iwn_softc *sc)
1013{
1014	int i, ntries;
1015
1016	for (i = 0; i < 100; i++) {
1017		/* Request exclusive access to EEPROM. */
1018		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1019		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1020
1021		/* Spin until we actually get the lock. */
1022		for (ntries = 0; ntries < 100; ntries++) {
1023			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1024			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1025				return 0;
1026			DELAY(10);
1027		}
1028	}
1029	return ETIMEDOUT;
1030}
1031
1032static __inline void
1033iwn_eeprom_unlock(struct iwn_softc *sc)
1034{
1035	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1036}
1037
1038/*
1039 * Initialize access by host to One Time Programmable ROM.
1040 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1041 */
1042static int
1043iwn_init_otprom(struct iwn_softc *sc)
1044{
1045	uint16_t prev, base, next;
1046	int count, error;
1047
1048	/* Wait for clock stabilization before accessing prph. */
1049	error = iwn_clock_wait(sc);
1050	if (error != 0)
1051		return error;
1052
1053	error = iwn_nic_lock(sc);
1054	if (error != 0)
1055		return error;
1056	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1057	DELAY(5);
1058	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1059	iwn_nic_unlock(sc);
1060
1061	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1062	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1063		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1064		    IWN_RESET_LINK_PWR_MGMT_DIS);
1065	}
1066	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1067	/* Clear ECC status. */
1068	IWN_SETBITS(sc, IWN_OTP_GP,
1069	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1070
1071	/*
1072	 * Find the block before last block (contains the EEPROM image)
1073	 * for HW without OTP shadow RAM.
1074	 */
1075	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1076		/* Switch to absolute addressing mode. */
1077		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1078		base = prev = 0;
1079		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1080			error = iwn_read_prom_data(sc, base, &next, 2);
1081			if (error != 0)
1082				return error;
1083			if (next == 0)	/* End of linked-list. */
1084				break;
1085			prev = base;
1086			base = le16toh(next);
1087		}
1088		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1089			return EIO;
1090		/* Skip "next" word. */
1091		sc->prom_base = prev + 1;
1092	}
1093	return 0;
1094}
1095
1096static int
1097iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1098{
1099	uint32_t val, tmp;
1100	int ntries;
1101	uint8_t *out = data;
1102
1103	addr += sc->prom_base;
1104	for (; count > 0; count -= 2, addr++) {
1105		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1106		for (ntries = 0; ntries < 10; ntries++) {
1107			val = IWN_READ(sc, IWN_EEPROM);
1108			if (val & IWN_EEPROM_READ_VALID)
1109				break;
1110			DELAY(5);
1111		}
1112		if (ntries == 10) {
1113			device_printf(sc->sc_dev,
1114			    "timeout reading ROM at 0x%x\n", addr);
1115			return ETIMEDOUT;
1116		}
1117		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1118			/* OTPROM, check for ECC errors. */
1119			tmp = IWN_READ(sc, IWN_OTP_GP);
1120			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1121				device_printf(sc->sc_dev,
1122				    "OTPROM ECC error at 0x%x\n", addr);
1123				return EIO;
1124			}
1125			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1126				/* Correctable ECC error, clear bit. */
1127				IWN_SETBITS(sc, IWN_OTP_GP,
1128				    IWN_OTP_GP_ECC_CORR_STTS);
1129			}
1130		}
1131		*out++ = val >> 16;
1132		if (count > 1)
1133			*out++ = val >> 24;
1134	}
1135	return 0;
1136}
1137
1138static void
1139iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1140{
1141	if (error != 0)
1142		return;
1143	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1144	*(bus_addr_t *)arg = segs[0].ds_addr;
1145}
1146
1147static int
1148iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1149	void **kvap, bus_size_t size, bus_size_t alignment, int flags)
1150{
1151	int error;
1152
1153	dma->size = size;
1154	dma->tag = NULL;
1155
1156	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1157	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1158	    1, size, flags, NULL, NULL, &dma->tag);
1159	if (error != 0) {
1160		device_printf(sc->sc_dev,
1161		    "%s: bus_dma_tag_create failed, error %d\n",
1162		    __func__, error);
1163		goto fail;
1164	}
1165	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1166	    flags | BUS_DMA_ZERO, &dma->map);
1167	if (error != 0) {
1168		device_printf(sc->sc_dev,
1169		    "%s: bus_dmamem_alloc failed, error %d\n", __func__, error);
1170		goto fail;
1171	}
1172	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
1173	    size, iwn_dma_map_addr, &dma->paddr, flags);
1174	if (error != 0) {
1175		device_printf(sc->sc_dev,
1176		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
1177		goto fail;
1178	}
1179
1180	if (kvap != NULL)
1181		*kvap = dma->vaddr;
1182	return 0;
1183fail:
1184	iwn_dma_contig_free(dma);
1185	return error;
1186}
1187
1188static void
1189iwn_dma_contig_free(struct iwn_dma_info *dma)
1190{
1191	if (dma->tag != NULL) {
1192		if (dma->map != NULL) {
1193			if (dma->paddr == 0) {
1194				bus_dmamap_sync(dma->tag, dma->map,
1195				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1196				bus_dmamap_unload(dma->tag, dma->map);
1197			}
1198			bus_dmamem_free(dma->tag, &dma->vaddr, dma->map);
1199		}
1200		bus_dma_tag_destroy(dma->tag);
1201	}
1202}
1203
1204static int
1205iwn_alloc_sched(struct iwn_softc *sc)
1206{
1207	/* TX scheduler rings must be aligned on a 1KB boundary. */
1208	return iwn_dma_contig_alloc(sc, &sc->sched_dma,
1209	    (void **)&sc->sched, sc->sc_hal->schedsz, 1024, BUS_DMA_NOWAIT);
1210}
1211
1212static void
1213iwn_free_sched(struct iwn_softc *sc)
1214{
1215	iwn_dma_contig_free(&sc->sched_dma);
1216}
1217
1218static int
1219iwn_alloc_kw(struct iwn_softc *sc)
1220{
1221	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1222	return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096,
1223	    BUS_DMA_NOWAIT);
1224}
1225
1226static void
1227iwn_free_kw(struct iwn_softc *sc)
1228{
1229	iwn_dma_contig_free(&sc->kw_dma);
1230}
1231
1232static int
1233iwn_alloc_ict(struct iwn_softc *sc)
1234{
1235	/* ICT table must be aligned on a 4KB boundary. */
1236	return iwn_dma_contig_alloc(sc, &sc->ict_dma,
1237	    (void **)&sc->ict, IWN_ICT_SIZE, 4096, BUS_DMA_NOWAIT);
1238}
1239
1240static void
1241iwn_free_ict(struct iwn_softc *sc)
1242{
1243	iwn_dma_contig_free(&sc->ict_dma);
1244}
1245
1246static int
1247iwn_alloc_fwmem(struct iwn_softc *sc)
1248{
1249	/* Must be aligned on a 16-byte boundary. */
1250	return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL,
1251	    sc->sc_hal->fwsz, 16, BUS_DMA_NOWAIT);
1252}
1253
1254static void
1255iwn_free_fwmem(struct iwn_softc *sc)
1256{
1257	iwn_dma_contig_free(&sc->fw_dma);
1258}
1259
1260static int
1261iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1262{
1263	bus_size_t size;
1264	int i, error;
1265
1266	ring->cur = 0;
1267
1268	/* Allocate RX descriptors (256-byte aligned). */
1269	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1270	error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1271	    (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1272	if (error != 0) {
1273		device_printf(sc->sc_dev,
1274		    "%s: could not allocate Rx ring DMA memory, error %d\n",
1275		    __func__, error);
1276		goto fail;
1277	}
1278
1279	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1280	    BUS_SPACE_MAXADDR_32BIT,
1281	    BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1,
1282	    MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat);
1283	if (error != 0) {
1284		device_printf(sc->sc_dev,
1285		    "%s: bus_dma_tag_create_failed, error %d\n",
1286		    __func__, error);
1287		goto fail;
1288	}
1289
1290	/* Allocate RX status area (16-byte aligned). */
1291	error = iwn_dma_contig_alloc(sc, &ring->stat_dma,
1292	    (void **)&ring->stat, sizeof (struct iwn_rx_status),
1293	    16, BUS_DMA_NOWAIT);
1294	if (error != 0) {
1295		device_printf(sc->sc_dev,
1296		    "%s: could not allocate Rx status DMA memory, error %d\n",
1297		    __func__, error);
1298		goto fail;
1299	}
1300
1301	/*
1302	 * Allocate and map RX buffers.
1303	 */
1304	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1305		struct iwn_rx_data *data = &ring->data[i];
1306		bus_addr_t paddr;
1307
1308		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1309		if (error != 0) {
1310			device_printf(sc->sc_dev,
1311			    "%s: bus_dmamap_create failed, error %d\n",
1312			    __func__, error);
1313			goto fail;
1314		}
1315
1316		data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1317		if (data->m == NULL) {
1318			device_printf(sc->sc_dev,
1319			    "%s: could not allocate rx mbuf\n", __func__);
1320			error = ENOMEM;
1321			goto fail;
1322		}
1323
1324		/* Map page. */
1325		error = bus_dmamap_load(ring->data_dmat, data->map,
1326		    mtod(data->m, caddr_t), MJUMPAGESIZE,
1327		    iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
1328		if (error != 0 && error != EFBIG) {
1329			device_printf(sc->sc_dev,
1330			    "%s: bus_dmamap_load failed, error %d\n",
1331			    __func__, error);
1332			m_freem(data->m);
1333			error = ENOMEM;	/* XXX unique code */
1334			goto fail;
1335		}
1336		bus_dmamap_sync(ring->data_dmat, data->map,
1337		    BUS_DMASYNC_PREWRITE);
1338
1339		/* Set physical address of RX buffer (256-byte aligned). */
1340		ring->desc[i] = htole32(paddr >> 8);
1341	}
1342	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1343	    BUS_DMASYNC_PREWRITE);
1344	return 0;
1345fail:
1346	iwn_free_rx_ring(sc, ring);
1347	return error;
1348}
1349
1350static void
1351iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1352{
1353	int ntries;
1354
1355	if (iwn_nic_lock(sc) == 0) {
1356		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1357		for (ntries = 0; ntries < 1000; ntries++) {
1358			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1359			    IWN_FH_RX_STATUS_IDLE)
1360				break;
1361			DELAY(10);
1362		}
1363		iwn_nic_unlock(sc);
1364#ifdef IWN_DEBUG
1365		if (ntries == 1000)
1366			DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
1367			    "timeout resetting Rx ring");
1368#endif
1369	}
1370	ring->cur = 0;
1371	sc->last_rx_valid = 0;
1372}
1373
1374static void
1375iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1376{
1377	int i;
1378
1379	iwn_dma_contig_free(&ring->desc_dma);
1380	iwn_dma_contig_free(&ring->stat_dma);
1381
1382	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1383		struct iwn_rx_data *data = &ring->data[i];
1384
1385		if (data->m != NULL) {
1386			bus_dmamap_sync(ring->data_dmat, data->map,
1387			    BUS_DMASYNC_POSTREAD);
1388			bus_dmamap_unload(ring->data_dmat, data->map);
1389			m_freem(data->m);
1390		}
1391		if (data->map != NULL)
1392			bus_dmamap_destroy(ring->data_dmat, data->map);
1393	}
1394}
1395
1396static int
1397iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1398{
1399	bus_size_t size;
1400	bus_addr_t paddr;
1401	int i, error;
1402
1403	ring->qid = qid;
1404	ring->queued = 0;
1405	ring->cur = 0;
1406
1407	/* Allocate TX descriptors (256-byte aligned.) */
1408	size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc);
1409	error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1410	    (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1411	if (error != 0) {
1412		device_printf(sc->sc_dev,
1413		    "%s: could not allocate TX ring DMA memory, error %d\n",
1414		    __func__, error);
1415		goto fail;
1416	}
1417
1418	/*
1419	 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1420	 * to allocate commands space for other rings.
1421	 */
1422	if (qid > 4)
1423		return 0;
1424
1425	size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd);
1426	error = iwn_dma_contig_alloc(sc, &ring->cmd_dma,
1427	    (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT);
1428	if (error != 0) {
1429		device_printf(sc->sc_dev,
1430		    "%s: could not allocate TX cmd DMA memory, error %d\n",
1431		    __func__, error);
1432		goto fail;
1433	}
1434
1435	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1436	    BUS_SPACE_MAXADDR_32BIT,
1437	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1,
1438	    MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat);
1439	if (error != 0) {
1440		device_printf(sc->sc_dev,
1441		    "%s: bus_dma_tag_create_failed, error %d\n",
1442		    __func__, error);
1443		goto fail;
1444	}
1445
1446	paddr = ring->cmd_dma.paddr;
1447	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1448		struct iwn_tx_data *data = &ring->data[i];
1449
1450		data->cmd_paddr = paddr;
1451		data->scratch_paddr = paddr + 12;
1452		paddr += sizeof (struct iwn_tx_cmd);
1453
1454		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1455		if (error != 0) {
1456			device_printf(sc->sc_dev,
1457			    "%s: bus_dmamap_create failed, error %d\n",
1458			    __func__, error);
1459			goto fail;
1460		}
1461		bus_dmamap_sync(ring->data_dmat, data->map,
1462		    BUS_DMASYNC_PREWRITE);
1463	}
1464	return 0;
1465fail:
1466	iwn_free_tx_ring(sc, ring);
1467	return error;
1468}
1469
1470static void
1471iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1472{
1473	int i;
1474
1475	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1476		struct iwn_tx_data *data = &ring->data[i];
1477
1478		if (data->m != NULL) {
1479			bus_dmamap_unload(ring->data_dmat, data->map);
1480			m_freem(data->m);
1481			data->m = NULL;
1482		}
1483	}
1484	/* Clear TX descriptors. */
1485	memset(ring->desc, 0, ring->desc_dma.size);
1486	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1487	    BUS_DMASYNC_PREWRITE);
1488	sc->qfullmsk &= ~(1 << ring->qid);
1489	ring->queued = 0;
1490	ring->cur = 0;
1491}
1492
1493static void
1494iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1495{
1496	int i;
1497
1498	iwn_dma_contig_free(&ring->desc_dma);
1499	iwn_dma_contig_free(&ring->cmd_dma);
1500
1501	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1502		struct iwn_tx_data *data = &ring->data[i];
1503
1504		if (data->m != NULL) {
1505			bus_dmamap_sync(ring->data_dmat, data->map,
1506			    BUS_DMASYNC_POSTWRITE);
1507			bus_dmamap_unload(ring->data_dmat, data->map);
1508			m_freem(data->m);
1509		}
1510		if (data->map != NULL)
1511			bus_dmamap_destroy(ring->data_dmat, data->map);
1512	}
1513}
1514
1515static void
1516iwn5000_ict_reset(struct iwn_softc *sc)
1517{
1518	/* Disable interrupts. */
1519	IWN_WRITE(sc, IWN_INT_MASK, 0);
1520
1521	/* Reset ICT table. */
1522	memset(sc->ict, 0, IWN_ICT_SIZE);
1523	sc->ict_cur = 0;
1524
1525	/* Set physical address of ICT table (4KB aligned.) */
1526	DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1527	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1528	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1529
1530	/* Enable periodic RX interrupt. */
1531	sc->int_mask |= IWN_INT_RX_PERIODIC;
1532	/* Switch to ICT interrupt mode in driver. */
1533	sc->sc_flags |= IWN_FLAG_USE_ICT;
1534
1535	/* Re-enable interrupts. */
1536	IWN_WRITE(sc, IWN_INT, 0xffffffff);
1537	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1538}
1539
1540static int
1541iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1542{
1543	const struct iwn_hal *hal = sc->sc_hal;
1544	int error;
1545	uint16_t val;
1546
1547	/* Check whether adapter has an EEPROM or an OTPROM. */
1548	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1549	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1550		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1551	DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1552	    (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1553
1554	/* Adapter has to be powered on for EEPROM access to work. */
1555	error = iwn_apm_init(sc);
1556	if (error != 0) {
1557		device_printf(sc->sc_dev,
1558		    "%s: could not power ON adapter, error %d\n",
1559		    __func__, error);
1560		return error;
1561	}
1562
1563	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1564		device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1565		return EIO;
1566	}
1567	error = iwn_eeprom_lock(sc);
1568	if (error != 0) {
1569		device_printf(sc->sc_dev,
1570		    "%s: could not lock ROM, error %d\n",
1571		    __func__, error);
1572		return error;
1573	}
1574
1575	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1576		error = iwn_init_otprom(sc);
1577		if (error != 0) {
1578			device_printf(sc->sc_dev,
1579			    "%s: could not initialize OTPROM, error %d\n",
1580			    __func__, error);
1581			return error;
1582		}
1583	}
1584
1585	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1586	sc->rfcfg = le16toh(val);
1587	DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1588
1589	/* Read MAC address. */
1590	iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1591
1592	/* Read adapter-specific information from EEPROM. */
1593	hal->read_eeprom(sc);
1594
1595	iwn_apm_stop(sc);	/* Power OFF adapter. */
1596
1597	iwn_eeprom_unlock(sc);
1598	return 0;
1599}
1600
1601static void
1602iwn4965_read_eeprom(struct iwn_softc *sc)
1603{
1604	uint32_t addr;
1605	int i;
1606	uint16_t val;
1607
1608	/* Read regulatory domain (4 ASCII characters.) */
1609	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1610
1611	/* Read the list of authorized channels (20MHz ones only.) */
1612	for (i = 0; i < 5; i++) {
1613		addr = iwn4965_regulatory_bands[i];
1614		iwn_read_eeprom_channels(sc, i, addr);
1615	}
1616
1617	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1618	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1619	sc->maxpwr2GHz = val & 0xff;
1620	sc->maxpwr5GHz = val >> 8;
1621	/* Check that EEPROM values are within valid range. */
1622	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1623		sc->maxpwr5GHz = 38;
1624	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1625		sc->maxpwr2GHz = 38;
1626	DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1627	    sc->maxpwr2GHz, sc->maxpwr5GHz);
1628
1629	/* Read samples for each TX power group. */
1630	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1631	    sizeof sc->bands);
1632
1633	/* Read voltage at which samples were taken. */
1634	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1635	sc->eeprom_voltage = (int16_t)le16toh(val);
1636	DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1637	    sc->eeprom_voltage);
1638
1639#ifdef IWN_DEBUG
1640	/* Print samples. */
1641	if (sc->sc_debug & IWN_DEBUG_ANY) {
1642		for (i = 0; i < IWN_NBANDS; i++)
1643			iwn4965_print_power_group(sc, i);
1644	}
1645#endif
1646}
1647
1648#ifdef IWN_DEBUG
1649static void
1650iwn4965_print_power_group(struct iwn_softc *sc, int i)
1651{
1652	struct iwn4965_eeprom_band *band = &sc->bands[i];
1653	struct iwn4965_eeprom_chan_samples *chans = band->chans;
1654	int j, c;
1655
1656	printf("===band %d===\n", i);
1657	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1658	printf("chan1 num=%d\n", chans[0].num);
1659	for (c = 0; c < 2; c++) {
1660		for (j = 0; j < IWN_NSAMPLES; j++) {
1661			printf("chain %d, sample %d: temp=%d gain=%d "
1662			    "power=%d pa_det=%d\n", c, j,
1663			    chans[0].samples[c][j].temp,
1664			    chans[0].samples[c][j].gain,
1665			    chans[0].samples[c][j].power,
1666			    chans[0].samples[c][j].pa_det);
1667		}
1668	}
1669	printf("chan2 num=%d\n", chans[1].num);
1670	for (c = 0; c < 2; c++) {
1671		for (j = 0; j < IWN_NSAMPLES; j++) {
1672			printf("chain %d, sample %d: temp=%d gain=%d "
1673			    "power=%d pa_det=%d\n", c, j,
1674			    chans[1].samples[c][j].temp,
1675			    chans[1].samples[c][j].gain,
1676			    chans[1].samples[c][j].power,
1677			    chans[1].samples[c][j].pa_det);
1678		}
1679	}
1680}
1681#endif
1682
1683static void
1684iwn5000_read_eeprom(struct iwn_softc *sc)
1685{
1686	struct iwn5000_eeprom_calib_hdr hdr;
1687	int32_t temp, volt;
1688	uint32_t addr, base;
1689	int i;
1690	uint16_t val;
1691
1692	/* Read regulatory domain (4 ASCII characters.) */
1693	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1694	base = le16toh(val);
1695	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1696	    sc->eeprom_domain, 4);
1697
1698	/* Read the list of authorized channels (20MHz ones only.) */
1699	for (i = 0; i < 5; i++) {
1700		addr = base + iwn5000_regulatory_bands[i];
1701		iwn_read_eeprom_channels(sc, i, addr);
1702	}
1703
1704	/* Read enhanced TX power information for 6000 Series. */
1705	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1706		iwn_read_eeprom_enhinfo(sc);
1707
1708	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1709	base = le16toh(val);
1710	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1711	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1712	    "%s: calib version=%u pa type=%u voltage=%u\n",
1713	    __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt));
1714	sc->calib_ver = hdr.version;
1715
1716	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1717		/* Compute temperature offset. */
1718		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1719		temp = le16toh(val);
1720		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1721		volt = le16toh(val);
1722		sc->temp_off = temp - (volt / -5);
1723		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1724		    temp, volt, sc->temp_off);
1725	}
1726}
1727
1728/*
1729 * Translate EEPROM flags to net80211.
1730 */
1731static uint32_t
1732iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1733{
1734	uint32_t nflags;
1735
1736	nflags = 0;
1737	if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1738		nflags |= IEEE80211_CHAN_PASSIVE;
1739	if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1740		nflags |= IEEE80211_CHAN_NOADHOC;
1741	if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1742		nflags |= IEEE80211_CHAN_DFS;
1743		/* XXX apparently IBSS may still be marked */
1744		nflags |= IEEE80211_CHAN_NOADHOC;
1745	}
1746
1747	return nflags;
1748}
1749
1750static void
1751iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1752{
1753	struct ifnet *ifp = sc->sc_ifp;
1754	struct ieee80211com *ic = ifp->if_l2com;
1755	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1756	const struct iwn_chan_band *band = &iwn_bands[n];
1757	struct ieee80211_channel *c;
1758	int i, chan, nflags;
1759
1760	for (i = 0; i < band->nchan; i++) {
1761		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1762			DPRINTF(sc, IWN_DEBUG_RESET,
1763			    "skip chan %d flags 0x%x maxpwr %d\n",
1764			    band->chan[i], channels[i].flags,
1765			    channels[i].maxpwr);
1766			continue;
1767		}
1768		chan = band->chan[i];
1769		nflags = iwn_eeprom_channel_flags(&channels[i]);
1770
1771		DPRINTF(sc, IWN_DEBUG_RESET,
1772		    "add chan %d flags 0x%x maxpwr %d\n",
1773		    chan, channels[i].flags, channels[i].maxpwr);
1774
1775		c = &ic->ic_channels[ic->ic_nchans++];
1776		c->ic_ieee = chan;
1777		c->ic_maxregpower = channels[i].maxpwr;
1778		c->ic_maxpower = 2*c->ic_maxregpower;
1779
1780		/* Save maximum allowed TX power for this channel. */
1781		sc->maxpwr[chan] = channels[i].maxpwr;
1782
1783		if (n == 0) {	/* 2GHz band */
1784			c->ic_freq = ieee80211_ieee2mhz(chan,
1785			    IEEE80211_CHAN_G);
1786
1787			/* G =>'s B is supported */
1788			c->ic_flags = IEEE80211_CHAN_B | nflags;
1789
1790			c = &ic->ic_channels[ic->ic_nchans++];
1791			c[0] = c[-1];
1792			c->ic_flags = IEEE80211_CHAN_G | nflags;
1793		} else {	/* 5GHz band */
1794			c->ic_freq = ieee80211_ieee2mhz(chan,
1795			    IEEE80211_CHAN_A);
1796			c->ic_flags = IEEE80211_CHAN_A | nflags;
1797			sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
1798		}
1799#if 0	/* HT */
1800		/* XXX no constraints on using HT20 */
1801		/* add HT20, HT40 added separately */
1802		c = &ic->ic_channels[ic->ic_nchans++];
1803		c[0] = c[-1];
1804		c->ic_flags |= IEEE80211_CHAN_HT20;
1805		/* XXX NARROW =>'s 1/2 and 1/4 width? */
1806#endif
1807	}
1808}
1809
1810#if 0	/* HT */
1811static void
1812iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1813{
1814	struct ifnet *ifp = sc->sc_ifp;
1815	struct ieee80211com *ic = ifp->if_l2com;
1816	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1817	const struct iwn_chan_band *band = &iwn_bands[n];
1818	struct ieee80211_channel *c, *cent, *extc;
1819	int i;
1820
1821	for (i = 0; i < band->nchan; i++) {
1822		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) ||
1823		    !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) {
1824			DPRINTF(sc, IWN_DEBUG_RESET,
1825			    "skip chan %d flags 0x%x maxpwr %d\n",
1826			    band->chan[i], channels[i].flags,
1827			    channels[i].maxpwr);
1828			continue;
1829		}
1830		/*
1831		 * Each entry defines an HT40 channel pair; find the
1832		 * center channel, then the extension channel above.
1833		 */
1834		cent = ieee80211_find_channel_byieee(ic, band->chan[i],
1835		    band->flags & ~IEEE80211_CHAN_HT);
1836		if (cent == NULL) {	/* XXX shouldn't happen */
1837			device_printf(sc->sc_dev,
1838			    "%s: no entry for channel %d\n",
1839			    __func__, band->chan[i]);
1840			continue;
1841		}
1842		extc = ieee80211_find_channel(ic, cent->ic_freq+20,
1843		    band->flags & ~IEEE80211_CHAN_HT);
1844		if (extc == NULL) {
1845			DPRINTF(sc, IWN_DEBUG_RESET,
1846			    "skip chan %d, extension channel not found\n",
1847			    band->chan[i]);
1848			continue;
1849		}
1850
1851		DPRINTF(sc, IWN_DEBUG_RESET,
1852		    "add ht40 chan %d flags 0x%x maxpwr %d\n",
1853		    band->chan[i], channels[i].flags, channels[i].maxpwr);
1854
1855		c = &ic->ic_channels[ic->ic_nchans++];
1856		c[0] = cent[0];
1857		c->ic_extieee = extc->ic_ieee;
1858		c->ic_flags &= ~IEEE80211_CHAN_HT;
1859		c->ic_flags |= IEEE80211_CHAN_HT40U;
1860		c = &ic->ic_channels[ic->ic_nchans++];
1861		c[0] = extc[0];
1862		c->ic_extieee = cent->ic_ieee;
1863		c->ic_flags &= ~IEEE80211_CHAN_HT;
1864		c->ic_flags |= IEEE80211_CHAN_HT40D;
1865	}
1866}
1867#endif
1868
1869static void
1870iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1871{
1872	struct ifnet *ifp = sc->sc_ifp;
1873	struct ieee80211com *ic = ifp->if_l2com;
1874
1875	iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
1876	    iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
1877
1878	if (n < 5)
1879		iwn_read_eeprom_band(sc, n);
1880#if 0	/* HT */
1881	else
1882		iwn_read_eeprom_ht40(sc, n);
1883#endif
1884	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1885}
1886
1887#define nitems(_a)	(sizeof((_a)) / sizeof((_a)[0]))
1888
1889static void
1890iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1891{
1892	struct iwn_eeprom_enhinfo enhinfo[35];
1893	uint16_t val, base;
1894	int8_t maxpwr;
1895	int i;
1896
1897	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1898	base = le16toh(val);
1899	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1900	    enhinfo, sizeof enhinfo);
1901
1902	memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1903	for (i = 0; i < nitems(enhinfo); i++) {
1904		if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1905			continue;	/* Skip invalid entries. */
1906
1907		maxpwr = 0;
1908		if (sc->txchainmask & IWN_ANT_A)
1909			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1910		if (sc->txchainmask & IWN_ANT_B)
1911			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1912		if (sc->txchainmask & IWN_ANT_C)
1913			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1914		if (sc->ntxchains == 2)
1915			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1916		else if (sc->ntxchains == 3)
1917			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1918		maxpwr /= 2;	/* Convert half-dBm to dBm. */
1919
1920		DPRINTF(sc, IWN_DEBUG_RESET, "enhinfo %d, maxpwr=%d\n", i,
1921		    maxpwr);
1922		sc->enh_maxpwr[i] = maxpwr;
1923	}
1924}
1925
1926static struct ieee80211_node *
1927iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1928{
1929	return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
1930}
1931
1932static void
1933iwn_newassoc(struct ieee80211_node *ni, int isnew)
1934{
1935	/* XXX move */
1936	ieee80211_ratectl_node_init(ni);
1937}
1938
1939static int
1940iwn_media_change(struct ifnet *ifp)
1941{
1942	int error = ieee80211_media_change(ifp);
1943	/* NB: only the fixed rate can change and that doesn't need a reset */
1944	return (error == ENETRESET ? 0 : error);
1945}
1946
1947static int
1948iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1949{
1950	struct iwn_vap *ivp = IWN_VAP(vap);
1951	struct ieee80211com *ic = vap->iv_ic;
1952	struct iwn_softc *sc = ic->ic_ifp->if_softc;
1953	int error;
1954
1955	DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
1956		ieee80211_state_name[vap->iv_state],
1957		ieee80211_state_name[nstate]);
1958
1959	IEEE80211_UNLOCK(ic);
1960	IWN_LOCK(sc);
1961	callout_stop(&sc->sc_timer_to);
1962
1963	switch (nstate) {
1964	case IEEE80211_S_ASSOC:
1965		if (vap->iv_state != IEEE80211_S_RUN)
1966			break;
1967		/* FALLTHROUGH */
1968	case IEEE80211_S_AUTH:
1969		if (vap->iv_state == IEEE80211_S_AUTH)
1970			break;
1971
1972		/*
1973		 * !AUTH -> AUTH transition requires state reset to handle
1974		 * reassociations correctly.
1975		 */
1976		sc->rxon.associd = 0;
1977		sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1978		iwn_calib_reset(sc);
1979		error = iwn_auth(sc, vap);
1980		break;
1981
1982	case IEEE80211_S_RUN:
1983		/*
1984		 * RUN -> RUN transition; Just restart the timers.
1985		 */
1986		if (vap->iv_state == IEEE80211_S_RUN &&
1987		    vap->iv_opmode != IEEE80211_M_MONITOR) {
1988			iwn_calib_reset(sc);
1989			break;
1990		}
1991
1992		/*
1993		 * !RUN -> RUN requires setting the association id
1994		 * which is done with a firmware cmd.  We also defer
1995		 * starting the timers until that work is done.
1996		 */
1997		error = iwn_run(sc, vap);
1998		break;
1999
2000	default:
2001		break;
2002	}
2003	IWN_UNLOCK(sc);
2004	IEEE80211_LOCK(ic);
2005	return ivp->iv_newstate(vap, nstate, arg);
2006}
2007
2008/*
2009 * Process an RX_PHY firmware notification.  This is usually immediately
2010 * followed by an MPDU_RX_DONE notification.
2011 */
2012static void
2013iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2014    struct iwn_rx_data *data)
2015{
2016	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2017
2018	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2019	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2020
2021	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
2022	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2023	sc->last_rx_valid = 1;
2024}
2025
2026static void
2027iwn_timer_timeout(void *arg)
2028{
2029	struct iwn_softc *sc = arg;
2030	uint32_t flags = 0;
2031
2032	IWN_LOCK_ASSERT(sc);
2033
2034	if (sc->calib_cnt && --sc->calib_cnt == 0) {
2035		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2036		    "send statistics request");
2037		(void) iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2038		    sizeof flags, 1);
2039		sc->calib_cnt = 60;	/* do calibration every 60s */
2040	}
2041	iwn_watchdog(sc);		/* NB: piggyback tx watchdog */
2042	callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc);
2043}
2044
2045static void
2046iwn_calib_reset(struct iwn_softc *sc)
2047{
2048	callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc);
2049	sc->calib_cnt = 60;		/* do calibration every 60s */
2050}
2051
2052/*
2053 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2054 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2055 */
2056static void
2057iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2058    struct iwn_rx_data *data)
2059{
2060	const struct iwn_hal *hal = sc->sc_hal;
2061	struct ifnet *ifp = sc->sc_ifp;
2062	struct ieee80211com *ic = ifp->if_l2com;
2063	struct iwn_rx_ring *ring = &sc->rxq;
2064	struct ieee80211_frame *wh;
2065	struct ieee80211_node *ni;
2066	struct mbuf *m, *m1;
2067	struct iwn_rx_stat *stat;
2068	caddr_t head;
2069	bus_addr_t paddr;
2070	uint32_t flags;
2071	int error, len, rssi, nf;
2072
2073	if (desc->type == IWN_MPDU_RX_DONE) {
2074		/* Check for prior RX_PHY notification. */
2075		if (!sc->last_rx_valid) {
2076			DPRINTF(sc, IWN_DEBUG_ANY,
2077			    "%s: missing RX_PHY\n", __func__);
2078			ifp->if_ierrors++;
2079			return;
2080		}
2081		sc->last_rx_valid = 0;
2082		stat = &sc->last_rx_stat;
2083	} else
2084		stat = (struct iwn_rx_stat *)(desc + 1);
2085
2086	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2087
2088	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2089		device_printf(sc->sc_dev,
2090		    "%s: invalid rx statistic header, len %d\n",
2091		    __func__, stat->cfg_phy_len);
2092		ifp->if_ierrors++;
2093		return;
2094	}
2095	if (desc->type == IWN_MPDU_RX_DONE) {
2096		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2097		head = (caddr_t)(mpdu + 1);
2098		len = le16toh(mpdu->len);
2099	} else {
2100		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2101		len = le16toh(stat->len);
2102	}
2103
2104	flags = le32toh(*(uint32_t *)(head + len));
2105
2106	/* Discard frames with a bad FCS early. */
2107	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2108		DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n",
2109		    __func__, flags);
2110		ifp->if_ierrors++;
2111		return;
2112	}
2113	/* Discard frames that are too short. */
2114	if (len < sizeof (*wh)) {
2115		DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2116		    __func__, len);
2117		ifp->if_ierrors++;
2118		return;
2119	}
2120
2121	/* XXX don't need mbuf, just dma buffer */
2122	m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
2123	if (m1 == NULL) {
2124		DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2125		    __func__);
2126		ifp->if_ierrors++;
2127		return;
2128	}
2129	bus_dmamap_unload(ring->data_dmat, data->map);
2130
2131	error = bus_dmamap_load(ring->data_dmat, data->map,
2132	    mtod(m1, caddr_t), MJUMPAGESIZE,
2133	    iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2134	if (error != 0 && error != EFBIG) {
2135		device_printf(sc->sc_dev,
2136		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2137		m_freem(m1);
2138		ifp->if_ierrors++;
2139		return;
2140	}
2141
2142	m = data->m;
2143	data->m = m1;
2144	/* Update RX descriptor. */
2145	ring->desc[ring->cur] = htole32(paddr >> 8);
2146	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2147	    BUS_DMASYNC_PREWRITE);
2148
2149	/* Finalize mbuf. */
2150	m->m_pkthdr.rcvif = ifp;
2151	m->m_data = head;
2152	m->m_pkthdr.len = m->m_len = len;
2153
2154	rssi = hal->get_rssi(sc, stat);
2155
2156	/* Grab a reference to the source node. */
2157	wh = mtod(m, struct ieee80211_frame *);
2158	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2159	nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2160	    (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2161
2162	if (ieee80211_radiotap_active(ic)) {
2163		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2164
2165		tap->wr_tsft = htole64(stat->tstamp);
2166		tap->wr_flags = 0;
2167		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2168			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2169		switch (stat->rate) {
2170		/* CCK rates. */
2171		case  10: tap->wr_rate =   2; break;
2172		case  20: tap->wr_rate =   4; break;
2173		case  55: tap->wr_rate =  11; break;
2174		case 110: tap->wr_rate =  22; break;
2175		/* OFDM rates. */
2176		case 0xd: tap->wr_rate =  12; break;
2177		case 0xf: tap->wr_rate =  18; break;
2178		case 0x5: tap->wr_rate =  24; break;
2179		case 0x7: tap->wr_rate =  36; break;
2180		case 0x9: tap->wr_rate =  48; break;
2181		case 0xb: tap->wr_rate =  72; break;
2182		case 0x1: tap->wr_rate =  96; break;
2183		case 0x3: tap->wr_rate = 108; break;
2184		/* Unknown rate: should not happen. */
2185		default:  tap->wr_rate =   0;
2186		}
2187		tap->wr_dbm_antsignal = rssi;
2188		tap->wr_dbm_antnoise = nf;
2189	}
2190
2191	IWN_UNLOCK(sc);
2192
2193	/* Send the frame to the 802.11 layer. */
2194	if (ni != NULL) {
2195		(void) ieee80211_input(ni, m, rssi - nf, nf);
2196		/* Node is no longer needed. */
2197		ieee80211_free_node(ni);
2198	} else
2199		(void) ieee80211_input_all(ic, m, rssi - nf, nf);
2200
2201	IWN_LOCK(sc);
2202}
2203
2204#if 0	/* HT */
2205/* Process an incoming Compressed BlockAck. */
2206static void
2207iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2208    struct iwn_rx_data *data)
2209{
2210	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2211	struct iwn_tx_ring *txq;
2212
2213	txq = &sc->txq[letoh16(ba->qid)];
2214	/* XXX TBD */
2215}
2216#endif
2217
2218/*
2219 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2220 * The latter is sent by the firmware after each received beacon.
2221 */
2222static void
2223iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2224    struct iwn_rx_data *data)
2225{
2226	const struct iwn_hal *hal = sc->sc_hal;
2227	struct ifnet *ifp = sc->sc_ifp;
2228	struct ieee80211com *ic = ifp->if_l2com;
2229	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2230	struct iwn_calib_state *calib = &sc->calib;
2231	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2232	int temp;
2233
2234	/* Beacon stats are meaningful only when associated and not scanning. */
2235	if (vap->iv_state != IEEE80211_S_RUN ||
2236	    (ic->ic_flags & IEEE80211_F_SCAN))
2237		return;
2238
2239	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2240	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type);
2241	iwn_calib_reset(sc);	/* Reset TX power calibration timeout. */
2242
2243	/* Test if temperature has changed. */
2244	if (stats->general.temp != sc->rawtemp) {
2245		/* Convert "raw" temperature to degC. */
2246		sc->rawtemp = stats->general.temp;
2247		temp = hal->get_temperature(sc);
2248		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2249		    __func__, temp);
2250
2251		/* Update TX power if need be (4965AGN only.) */
2252		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2253			iwn4965_power_calibration(sc, temp);
2254	}
2255
2256	if (desc->type != IWN_BEACON_STATISTICS)
2257		return;	/* Reply to a statistics request. */
2258
2259	sc->noise = iwn_get_noise(&stats->rx.general);
2260	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2261
2262	/* Test that RSSI and noise are present in stats report. */
2263	if (le32toh(stats->rx.general.flags) != 1) {
2264		DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2265		    "received statistics without RSSI");
2266		return;
2267	}
2268
2269	if (calib->state == IWN_CALIB_STATE_ASSOC)
2270		iwn_collect_noise(sc, &stats->rx.general);
2271	else if (calib->state == IWN_CALIB_STATE_RUN)
2272		iwn_tune_sensitivity(sc, &stats->rx);
2273}
2274
2275/*
2276 * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2277 * and 5000 adapters have different incompatible TX status formats.
2278 */
2279static void
2280iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2281    struct iwn_rx_data *data)
2282{
2283	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2284	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2285
2286	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2287	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2288	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2289	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2290	    le32toh(stat->status));
2291
2292	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2293	iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2294}
2295
2296static void
2297iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2298    struct iwn_rx_data *data)
2299{
2300	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2301	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2302
2303	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2304	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2305	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2306	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2307	    le32toh(stat->status));
2308
2309#ifdef notyet
2310	/* Reset TX scheduler slot. */
2311	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2312#endif
2313
2314	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2315	iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2316}
2317
2318/*
2319 * Adapter-independent backend for TX_DONE firmware notifications.
2320 */
2321static void
2322iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2323    uint8_t status)
2324{
2325	struct ifnet *ifp = sc->sc_ifp;
2326	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2327	struct iwn_tx_data *data = &ring->data[desc->idx];
2328	struct mbuf *m;
2329	struct ieee80211_node *ni;
2330	struct ieee80211vap *vap;
2331
2332	KASSERT(data->ni != NULL, ("no node"));
2333
2334	/* Unmap and free mbuf. */
2335	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2336	bus_dmamap_unload(ring->data_dmat, data->map);
2337	m = data->m, data->m = NULL;
2338	ni = data->ni, data->ni = NULL;
2339	vap = ni->ni_vap;
2340
2341	if (m->m_flags & M_TXCB) {
2342		/*
2343		 * Channels marked for "radar" require traffic to be received
2344		 * to unlock before we can transmit.  Until traffic is seen
2345		 * any attempt to transmit is returned immediately with status
2346		 * set to IWN_TX_FAIL_TX_LOCKED.  Unfortunately this can easily
2347		 * happen on first authenticate after scanning.  To workaround
2348		 * this we ignore a failure of this sort in AUTH state so the
2349		 * 802.11 layer will fall back to using a timeout to wait for
2350		 * the AUTH reply.  This allows the firmware time to see
2351		 * traffic so a subsequent retry of AUTH succeeds.  It's
2352		 * unclear why the firmware does not maintain state for
2353		 * channels recently visited as this would allow immediate
2354		 * use of the channel after a scan (where we see traffic).
2355		 */
2356		if (status == IWN_TX_FAIL_TX_LOCKED &&
2357		    ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2358			ieee80211_process_callback(ni, m, 0);
2359		else
2360			ieee80211_process_callback(ni, m,
2361			    (status & IWN_TX_FAIL) != 0);
2362	}
2363
2364	/*
2365	 * Update rate control statistics for the node.
2366	 */
2367	if (status & 0x80) {
2368		ifp->if_oerrors++;
2369		ieee80211_ratectl_tx_complete(vap, ni,
2370		    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2371	} else {
2372		ieee80211_ratectl_tx_complete(vap, ni,
2373		    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2374	}
2375	m_freem(m);
2376	ieee80211_free_node(ni);
2377
2378	sc->sc_tx_timer = 0;
2379	if (--ring->queued < IWN_TX_RING_LOMARK) {
2380		sc->qfullmsk &= ~(1 << ring->qid);
2381		if (sc->qfullmsk == 0 &&
2382		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2383			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2384			iwn_start_locked(ifp);
2385		}
2386	}
2387}
2388
2389/*
2390 * Process a "command done" firmware notification.  This is where we wakeup
2391 * processes waiting for a synchronous command completion.
2392 */
2393static void
2394iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2395{
2396	struct iwn_tx_ring *ring = &sc->txq[4];
2397	struct iwn_tx_data *data;
2398
2399	if ((desc->qid & 0xf) != 4)
2400		return;	/* Not a command ack. */
2401
2402	data = &ring->data[desc->idx];
2403
2404	/* If the command was mapped in an mbuf, free it. */
2405	if (data->m != NULL) {
2406		bus_dmamap_unload(ring->data_dmat, data->map);
2407		m_freem(data->m);
2408		data->m = NULL;
2409	}
2410	wakeup(&ring->desc[desc->idx]);
2411}
2412
2413/*
2414 * Process an INT_FH_RX or INT_SW_RX interrupt.
2415 */
2416static void
2417iwn_notif_intr(struct iwn_softc *sc)
2418{
2419	struct ifnet *ifp = sc->sc_ifp;
2420	struct ieee80211com *ic = ifp->if_l2com;
2421	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2422	uint16_t hw;
2423
2424	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
2425	    BUS_DMASYNC_POSTREAD);
2426
2427	hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2428	while (sc->rxq.cur != hw) {
2429		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2430		struct iwn_rx_desc *desc;
2431
2432		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2433		    BUS_DMASYNC_POSTREAD);
2434		desc = mtod(data->m, struct iwn_rx_desc *);
2435
2436		DPRINTF(sc, IWN_DEBUG_RECV,
2437		    "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
2438		    __func__, desc->qid & 0xf, desc->idx, desc->flags,
2439		    desc->type, iwn_intr_str(desc->type),
2440		    le16toh(desc->len));
2441
2442		if (!(desc->qid & 0x80))	/* Reply to a command. */
2443			iwn_cmd_done(sc, desc);
2444
2445		switch (desc->type) {
2446		case IWN_RX_PHY:
2447			iwn_rx_phy(sc, desc, data);
2448			break;
2449
2450		case IWN_RX_DONE:		/* 4965AGN only. */
2451		case IWN_MPDU_RX_DONE:
2452			/* An 802.11 frame has been received. */
2453			iwn_rx_done(sc, desc, data);
2454			break;
2455
2456#if 0	/* HT */
2457		case IWN_RX_COMPRESSED_BA:
2458			/* A Compressed BlockAck has been received. */
2459			iwn_rx_compressed_ba(sc, desc, data);
2460			break;
2461#endif
2462
2463		case IWN_TX_DONE:
2464			/* An 802.11 frame has been transmitted. */
2465			sc->sc_hal->tx_done(sc, desc, data);
2466			break;
2467
2468		case IWN_RX_STATISTICS:
2469		case IWN_BEACON_STATISTICS:
2470			iwn_rx_statistics(sc, desc, data);
2471			break;
2472
2473		case IWN_BEACON_MISSED:
2474		{
2475			struct iwn_beacon_missed *miss =
2476			    (struct iwn_beacon_missed *)(desc + 1);
2477			int misses;
2478
2479			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2480			    BUS_DMASYNC_POSTREAD);
2481			misses = le32toh(miss->consecutive);
2482
2483			/* XXX not sure why we're notified w/ zero */
2484			if (misses == 0)
2485				break;
2486			DPRINTF(sc, IWN_DEBUG_STATE,
2487			    "%s: beacons missed %d/%d\n", __func__,
2488			    misses, le32toh(miss->total));
2489
2490			/*
2491			 * If more than 5 consecutive beacons are missed,
2492			 * reinitialize the sensitivity state machine.
2493			 */
2494			if (vap->iv_state == IEEE80211_S_RUN && misses > 5)
2495				(void) iwn_init_sensitivity(sc);
2496			if (misses >= vap->iv_bmissthreshold) {
2497				IWN_UNLOCK(sc);
2498				ieee80211_beacon_miss(ic);
2499				IWN_LOCK(sc);
2500			}
2501			break;
2502		}
2503		case IWN_UC_READY:
2504		{
2505			struct iwn_ucode_info *uc =
2506			    (struct iwn_ucode_info *)(desc + 1);
2507
2508			/* The microcontroller is ready. */
2509			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2510			    BUS_DMASYNC_POSTREAD);
2511			DPRINTF(sc, IWN_DEBUG_RESET,
2512			    "microcode alive notification version=%d.%d "
2513			    "subtype=%x alive=%x\n", uc->major, uc->minor,
2514			    uc->subtype, le32toh(uc->valid));
2515
2516			if (le32toh(uc->valid) != 1) {
2517				device_printf(sc->sc_dev,
2518				    "microcontroller initialization failed");
2519				break;
2520			}
2521			if (uc->subtype == IWN_UCODE_INIT) {
2522				/* Save microcontroller report. */
2523				memcpy(&sc->ucode_info, uc, sizeof (*uc));
2524			}
2525			/* Save the address of the error log in SRAM. */
2526			sc->errptr = le32toh(uc->errptr);
2527			break;
2528		}
2529		case IWN_STATE_CHANGED:
2530		{
2531			uint32_t *status = (uint32_t *)(desc + 1);
2532
2533			/*
2534			 * State change allows hardware switch change to be
2535			 * noted. However, we handle this in iwn_intr as we
2536			 * get both the enable/disble intr.
2537			 */
2538			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2539			    BUS_DMASYNC_POSTREAD);
2540			DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
2541			    le32toh(*status));
2542			break;
2543		}
2544		case IWN_START_SCAN:
2545		{
2546			struct iwn_start_scan *scan =
2547			    (struct iwn_start_scan *)(desc + 1);
2548
2549			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2550			    BUS_DMASYNC_POSTREAD);
2551			DPRINTF(sc, IWN_DEBUG_ANY,
2552			    "%s: scanning channel %d status %x\n",
2553			    __func__, scan->chan, le32toh(scan->status));
2554			break;
2555		}
2556		case IWN_STOP_SCAN:
2557		{
2558			struct iwn_stop_scan *scan =
2559			    (struct iwn_stop_scan *)(desc + 1);
2560
2561			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2562			    BUS_DMASYNC_POSTREAD);
2563			DPRINTF(sc, IWN_DEBUG_STATE,
2564			    "scan finished nchan=%d status=%d chan=%d\n",
2565			    scan->nchan, scan->status, scan->chan);
2566
2567			IWN_UNLOCK(sc);
2568			ieee80211_scan_next(vap);
2569			IWN_LOCK(sc);
2570			break;
2571		}
2572		case IWN5000_CALIBRATION_RESULT:
2573			iwn5000_rx_calib_result(sc, desc, data);
2574			break;
2575
2576		case IWN5000_CALIBRATION_DONE:
2577			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
2578			wakeup(sc);
2579			break;
2580		}
2581
2582		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
2583	}
2584
2585	/* Tell the firmware what we have processed. */
2586	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
2587	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
2588}
2589
2590/*
2591 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2592 * from power-down sleep mode.
2593 */
2594static void
2595iwn_wakeup_intr(struct iwn_softc *sc)
2596{
2597	int qid;
2598
2599	DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
2600	    __func__);
2601
2602	/* Wakeup RX and TX rings. */
2603	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
2604	for (qid = 0; qid < sc->sc_hal->ntxqs; qid++) {
2605		struct iwn_tx_ring *ring = &sc->txq[qid];
2606		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
2607	}
2608}
2609
2610static void
2611iwn_rftoggle_intr(struct iwn_softc *sc)
2612{
2613	struct ifnet *ifp = sc->sc_ifp;
2614	struct ieee80211com *ic = ifp->if_l2com;
2615	uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
2616
2617	IWN_LOCK_ASSERT(sc);
2618
2619	device_printf(sc->sc_dev, "RF switch: radio %s\n",
2620	    (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
2621	if (tmp & IWN_GP_CNTRL_RFKILL)
2622		ieee80211_runtask(ic, &sc->sc_radioon_task);
2623	else
2624		ieee80211_runtask(ic, &sc->sc_radiooff_task);
2625}
2626
2627/*
2628 * Dump the error log of the firmware when a firmware panic occurs.  Although
2629 * we can't debug the firmware because it is neither open source nor free, it
2630 * can help us to identify certain classes of problems.
2631 */
2632static void
2633iwn_fatal_intr(struct iwn_softc *sc)
2634{
2635	const struct iwn_hal *hal = sc->sc_hal;
2636	struct iwn_fw_dump dump;
2637	int i;
2638
2639	IWN_LOCK_ASSERT(sc);
2640
2641	/* Force a complete recalibration on next init. */
2642	sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
2643
2644	/* Check that the error log address is valid. */
2645	if (sc->errptr < IWN_FW_DATA_BASE ||
2646	    sc->errptr + sizeof (dump) >
2647	    IWN_FW_DATA_BASE + hal->fw_data_maxsz) {
2648		printf("%s: bad firmware error log address 0x%08x\n",
2649		    __func__, sc->errptr);
2650		return;
2651	}
2652	if (iwn_nic_lock(sc) != 0) {
2653		printf("%s: could not read firmware error log\n",
2654		    __func__);
2655		return;
2656	}
2657	/* Read firmware error log from SRAM. */
2658	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
2659	    sizeof (dump) / sizeof (uint32_t));
2660	iwn_nic_unlock(sc);
2661
2662	if (dump.valid == 0) {
2663		printf("%s: firmware error log is empty\n",
2664		    __func__);
2665		return;
2666	}
2667	printf("firmware error log:\n");
2668	printf("  error type      = \"%s\" (0x%08X)\n",
2669	    (dump.id < nitems(iwn_fw_errmsg)) ?
2670		iwn_fw_errmsg[dump.id] : "UNKNOWN",
2671	    dump.id);
2672	printf("  program counter = 0x%08X\n", dump.pc);
2673	printf("  source line     = 0x%08X\n", dump.src_line);
2674	printf("  error data      = 0x%08X%08X\n",
2675	    dump.error_data[0], dump.error_data[1]);
2676	printf("  branch link     = 0x%08X%08X\n",
2677	    dump.branch_link[0], dump.branch_link[1]);
2678	printf("  interrupt link  = 0x%08X%08X\n",
2679	    dump.interrupt_link[0], dump.interrupt_link[1]);
2680	printf("  time            = %u\n", dump.time[0]);
2681
2682	/* Dump driver status (TX and RX rings) while we're here. */
2683	printf("driver status:\n");
2684	for (i = 0; i < hal->ntxqs; i++) {
2685		struct iwn_tx_ring *ring = &sc->txq[i];
2686		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2687		    i, ring->qid, ring->cur, ring->queued);
2688	}
2689	printf("  rx ring: cur=%d\n", sc->rxq.cur);
2690}
2691
2692static void
2693iwn_intr(void *arg)
2694{
2695	struct iwn_softc *sc = arg;
2696	struct ifnet *ifp = sc->sc_ifp;
2697	uint32_t r1, r2, tmp;
2698
2699	IWN_LOCK(sc);
2700
2701	/* Disable interrupts. */
2702	IWN_WRITE(sc, IWN_INT_MASK, 0);
2703
2704	/* Read interrupts from ICT (fast) or from registers (slow). */
2705	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2706		tmp = 0;
2707		while (sc->ict[sc->ict_cur] != 0) {
2708			tmp |= sc->ict[sc->ict_cur];
2709			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
2710			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
2711		}
2712		tmp = le32toh(tmp);
2713		if (tmp == 0xffffffff)	/* Shouldn't happen. */
2714			tmp = 0;
2715		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
2716			tmp |= 0x8000;
2717		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
2718		r2 = 0;	/* Unused. */
2719	} else {
2720		r1 = IWN_READ(sc, IWN_INT);
2721		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
2722			return;	/* Hardware gone! */
2723		r2 = IWN_READ(sc, IWN_FH_INT);
2724	}
2725
2726	DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2);
2727
2728	if (r1 == 0 && r2 == 0)
2729		goto done;	/* Interrupt not for us. */
2730
2731	/* Acknowledge interrupts. */
2732	IWN_WRITE(sc, IWN_INT, r1);
2733	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
2734		IWN_WRITE(sc, IWN_FH_INT, r2);
2735
2736	if (r1 & IWN_INT_RF_TOGGLED) {
2737		iwn_rftoggle_intr(sc);
2738		goto done;
2739	}
2740	if (r1 & IWN_INT_CT_REACHED) {
2741		device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
2742		    __func__);
2743	}
2744	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
2745		iwn_fatal_intr(sc);
2746		ifp->if_flags &= ~IFF_UP;
2747		iwn_stop_locked(sc);
2748		goto done;
2749	}
2750	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
2751	    (r2 & IWN_FH_INT_RX)) {
2752		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2753			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
2754				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
2755			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2756			    IWN_INT_PERIODIC_DIS);
2757			iwn_notif_intr(sc);
2758			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
2759				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2760				    IWN_INT_PERIODIC_ENA);
2761			}
2762		} else
2763			iwn_notif_intr(sc);
2764	}
2765
2766	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
2767		if (sc->sc_flags & IWN_FLAG_USE_ICT)
2768			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
2769		wakeup(sc);	/* FH DMA transfer completed. */
2770	}
2771
2772	if (r1 & IWN_INT_ALIVE)
2773		wakeup(sc);	/* Firmware is alive. */
2774
2775	if (r1 & IWN_INT_WAKEUP)
2776		iwn_wakeup_intr(sc);
2777
2778done:
2779	/* Re-enable interrupts. */
2780	if (ifp->if_flags & IFF_UP)
2781		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2782
2783	IWN_UNLOCK(sc);
2784}
2785
2786/*
2787 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
2788 * 5000 adapters use a slightly different format.)
2789 */
2790static void
2791iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2792    uint16_t len)
2793{
2794	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
2795
2796	*w = htole16(len + 8);
2797	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2798	    BUS_DMASYNC_PREWRITE);
2799	if (idx < IWN_SCHED_WINSZ) {
2800		*(w + IWN_TX_RING_COUNT) = *w;
2801		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2802		    BUS_DMASYNC_PREWRITE);
2803	}
2804}
2805
2806static void
2807iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2808    uint16_t len)
2809{
2810	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2811
2812	*w = htole16(id << 12 | (len + 8));
2813
2814	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2815	    BUS_DMASYNC_PREWRITE);
2816	if (idx < IWN_SCHED_WINSZ) {
2817		*(w + IWN_TX_RING_COUNT) = *w;
2818		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2819		    BUS_DMASYNC_PREWRITE);
2820	}
2821}
2822
2823#ifdef notyet
2824static void
2825iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
2826{
2827	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2828
2829	*w = (*w & htole16(0xf000)) | htole16(1);
2830	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2831	    BUS_DMASYNC_PREWRITE);
2832	if (idx < IWN_SCHED_WINSZ) {
2833		*(w + IWN_TX_RING_COUNT) = *w;
2834		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2835		    BUS_DMASYNC_PREWRITE);
2836	}
2837}
2838#endif
2839
2840static uint8_t
2841iwn_plcp_signal(int rate) {
2842	int i;
2843
2844	for (i = 0; i < IWN_RIDX_MAX + 1; i++) {
2845		if (rate == iwn_rates[i].rate)
2846			return i;
2847	}
2848
2849	return 0;
2850}
2851
2852static int
2853iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
2854    struct iwn_tx_ring *ring)
2855{
2856	const struct iwn_hal *hal = sc->sc_hal;
2857	const struct ieee80211_txparam *tp;
2858	const struct iwn_rate *rinfo;
2859	struct ieee80211vap *vap = ni->ni_vap;
2860	struct ieee80211com *ic = ni->ni_ic;
2861	struct iwn_node *wn = (void *)ni;
2862	struct iwn_tx_desc *desc;
2863	struct iwn_tx_data *data;
2864	struct iwn_tx_cmd *cmd;
2865	struct iwn_cmd_data *tx;
2866	struct ieee80211_frame *wh;
2867	struct ieee80211_key *k = NULL;
2868	struct mbuf *mnew;
2869	bus_dma_segment_t segs[IWN_MAX_SCATTER];
2870	uint32_t flags;
2871	u_int hdrlen;
2872	int totlen, error, pad, nsegs = 0, i, rate;
2873	uint8_t ridx, type, txant;
2874
2875	IWN_LOCK_ASSERT(sc);
2876
2877	wh = mtod(m, struct ieee80211_frame *);
2878	hdrlen = ieee80211_anyhdrsize(wh);
2879	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2880
2881	desc = &ring->desc[ring->cur];
2882	data = &ring->data[ring->cur];
2883
2884	/* Choose a TX rate index. */
2885	tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
2886	if (type == IEEE80211_FC0_TYPE_MGT)
2887		rate = tp->mgmtrate;
2888	else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
2889		rate = tp->mcastrate;
2890	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
2891		rate = tp->ucastrate;
2892	else {
2893		/* XXX pass pktlen */
2894		(void) ieee80211_ratectl_rate(ni, NULL, 0);
2895		rate = ni->ni_txrate;
2896	}
2897	ridx = iwn_plcp_signal(rate);
2898	rinfo = &iwn_rates[ridx];
2899
2900	/* Encrypt the frame if need be. */
2901	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2902		k = ieee80211_crypto_encap(ni, m);
2903		if (k == NULL) {
2904			m_freem(m);
2905			return ENOBUFS;
2906		}
2907		/* Packet header may have moved, reset our local pointer. */
2908		wh = mtod(m, struct ieee80211_frame *);
2909	}
2910	totlen = m->m_pkthdr.len;
2911
2912	if (ieee80211_radiotap_active_vap(vap)) {
2913		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
2914
2915		tap->wt_flags = 0;
2916		tap->wt_rate = rinfo->rate;
2917		if (k != NULL)
2918			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2919
2920		ieee80211_radiotap_tx(vap, m);
2921	}
2922
2923	/* Prepare TX firmware command. */
2924	cmd = &ring->cmd[ring->cur];
2925	cmd->code = IWN_CMD_TX_DATA;
2926	cmd->flags = 0;
2927	cmd->qid = ring->qid;
2928	cmd->idx = ring->cur;
2929
2930	tx = (struct iwn_cmd_data *)cmd->data;
2931	/* NB: No need to clear tx, all fields are reinitialized here. */
2932	tx->scratch = 0;	/* clear "scratch" area */
2933
2934	flags = 0;
2935	if (!IEEE80211_IS_MULTICAST(wh->i_addr1))
2936		flags |= IWN_TX_NEED_ACK;
2937	if ((wh->i_fc[0] &
2938	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
2939	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
2940		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
2941
2942	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2943		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
2944
2945	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2946	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2947		/* NB: Group frames are sent using CCK in 802.11b/g. */
2948		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
2949			flags |= IWN_TX_NEED_RTS;
2950		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2951		    ridx >= IWN_RIDX_OFDM6) {
2952			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2953				flags |= IWN_TX_NEED_CTS;
2954			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2955				flags |= IWN_TX_NEED_RTS;
2956		}
2957		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
2958			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
2959				/* 5000 autoselects RTS/CTS or CTS-to-self. */
2960				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
2961				flags |= IWN_TX_NEED_PROTECTION;
2962			} else
2963				flags |= IWN_TX_FULL_TXOP;
2964		}
2965	}
2966
2967	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2968	    type != IEEE80211_FC0_TYPE_DATA)
2969		tx->id = hal->broadcast_id;
2970	else
2971		tx->id = wn->id;
2972
2973	if (type == IEEE80211_FC0_TYPE_MGT) {
2974		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2975
2976		/* Tell HW to set timestamp in probe responses. */
2977		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2978			flags |= IWN_TX_INSERT_TSTAMP;
2979
2980		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2981		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2982			tx->timeout = htole16(3);
2983		else
2984			tx->timeout = htole16(2);
2985	} else
2986		tx->timeout = htole16(0);
2987
2988	if (hdrlen & 3) {
2989		/* First segment length must be a multiple of 4. */
2990		flags |= IWN_TX_NEED_PADDING;
2991		pad = 4 - (hdrlen & 3);
2992	} else
2993		pad = 0;
2994
2995	tx->len = htole16(totlen);
2996	tx->tid = 0;
2997	tx->rts_ntries = 60;
2998	tx->data_ntries = 15;
2999	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3000	tx->plcp = rinfo->plcp;
3001	tx->rflags = rinfo->flags;
3002	if (tx->id == hal->broadcast_id) {
3003		/* Group or management frame. */
3004		tx->linkq = 0;
3005		/* XXX Alternate between antenna A and B? */
3006		txant = IWN_LSB(sc->txchainmask);
3007		tx->rflags |= IWN_RFLAG_ANT(txant);
3008	} else {
3009		tx->linkq = IWN_RIDX_OFDM54 - ridx;
3010		flags |= IWN_TX_LINKQ;	/* enable MRR */
3011	}
3012
3013	/* Set physical address of "scratch area". */
3014	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3015	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3016
3017	/* Copy 802.11 header in TX command. */
3018	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3019
3020	/* Trim 802.11 header. */
3021	m_adj(m, hdrlen);
3022	tx->security = 0;
3023	tx->flags = htole32(flags);
3024
3025	if (m->m_len > 0) {
3026		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
3027		    m, segs, &nsegs, BUS_DMA_NOWAIT);
3028		if (error == EFBIG) {
3029			/* too many fragments, linearize */
3030			mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3031			if (mnew == NULL) {
3032				device_printf(sc->sc_dev,
3033				    "%s: could not defrag mbuf\n", __func__);
3034				m_freem(m);
3035				return ENOBUFS;
3036			}
3037			m = mnew;
3038			error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
3039			    data->map, m, segs, &nsegs, BUS_DMA_NOWAIT);
3040		}
3041		if (error != 0) {
3042			device_printf(sc->sc_dev,
3043			    "%s: bus_dmamap_load_mbuf_sg failed, error %d\n",
3044			    __func__, error);
3045			m_freem(m);
3046			return error;
3047		}
3048	}
3049
3050	data->m = m;
3051	data->ni = ni;
3052
3053	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3054	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3055
3056	/* Fill TX descriptor. */
3057	desc->nsegs = 1 + nsegs;
3058	/* First DMA segment is used by the TX command. */
3059	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3060	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3061	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3062	/* Other DMA segments are for data payload. */
3063	for (i = 1; i <= nsegs; i++) {
3064		desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3065		desc->segs[i].len  = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3066		    segs[i - 1].ds_len << 4);
3067	}
3068
3069	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3070	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3071	    BUS_DMASYNC_PREWRITE);
3072	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3073	    BUS_DMASYNC_PREWRITE);
3074
3075#ifdef notyet
3076	/* Update TX scheduler. */
3077	hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3078#endif
3079
3080	/* Kick TX ring. */
3081	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3082	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3083
3084	/* Mark TX ring as full if we reach a certain threshold. */
3085	if (++ring->queued > IWN_TX_RING_HIMARK)
3086		sc->qfullmsk |= 1 << ring->qid;
3087
3088	return 0;
3089}
3090
3091static int
3092iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3093    struct ieee80211_node *ni, struct iwn_tx_ring *ring,
3094    const struct ieee80211_bpf_params *params)
3095{
3096	const struct iwn_hal *hal = sc->sc_hal;
3097	const struct iwn_rate *rinfo;
3098	struct ifnet *ifp = sc->sc_ifp;
3099	struct ieee80211vap *vap = ni->ni_vap;
3100	struct ieee80211com *ic = ifp->if_l2com;
3101	struct iwn_tx_cmd *cmd;
3102	struct iwn_cmd_data *tx;
3103	struct ieee80211_frame *wh;
3104	struct iwn_tx_desc *desc;
3105	struct iwn_tx_data *data;
3106	struct mbuf *mnew;
3107	bus_addr_t paddr;
3108	bus_dma_segment_t segs[IWN_MAX_SCATTER];
3109	uint32_t flags;
3110	u_int hdrlen;
3111	int totlen, error, pad, nsegs = 0, i, rate;
3112	uint8_t ridx, type, txant;
3113
3114	IWN_LOCK_ASSERT(sc);
3115
3116	wh = mtod(m, struct ieee80211_frame *);
3117	hdrlen = ieee80211_anyhdrsize(wh);
3118	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3119
3120	desc = &ring->desc[ring->cur];
3121	data = &ring->data[ring->cur];
3122
3123	/* Choose a TX rate index. */
3124	rate = params->ibp_rate0;
3125	if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3126		/* XXX fall back to mcast/mgmt rate? */
3127		m_freem(m);
3128		return EINVAL;
3129	}
3130	ridx = iwn_plcp_signal(rate);
3131	rinfo = &iwn_rates[ridx];
3132
3133	totlen = m->m_pkthdr.len;
3134
3135	/* Prepare TX firmware command. */
3136	cmd = &ring->cmd[ring->cur];
3137	cmd->code = IWN_CMD_TX_DATA;
3138	cmd->flags = 0;
3139	cmd->qid = ring->qid;
3140	cmd->idx = ring->cur;
3141
3142	tx = (struct iwn_cmd_data *)cmd->data;
3143	/* NB: No need to clear tx, all fields are reinitialized here. */
3144	tx->scratch = 0;	/* clear "scratch" area */
3145
3146	flags = 0;
3147	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3148		flags |= IWN_TX_NEED_ACK;
3149	if (params->ibp_flags & IEEE80211_BPF_RTS) {
3150		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3151			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3152			flags &= ~IWN_TX_NEED_RTS;
3153			flags |= IWN_TX_NEED_PROTECTION;
3154		} else
3155			flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3156	}
3157	if (params->ibp_flags & IEEE80211_BPF_CTS) {
3158		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3159			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3160			flags &= ~IWN_TX_NEED_CTS;
3161			flags |= IWN_TX_NEED_PROTECTION;
3162		} else
3163			flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3164	}
3165	if (type == IEEE80211_FC0_TYPE_MGT) {
3166		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3167
3168		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3169			flags |= IWN_TX_INSERT_TSTAMP;
3170
3171		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3172		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3173			tx->timeout = htole16(3);
3174		else
3175			tx->timeout = htole16(2);
3176	} else
3177		tx->timeout = htole16(0);
3178
3179	if (hdrlen & 3) {
3180		/* First segment length must be a multiple of 4. */
3181		flags |= IWN_TX_NEED_PADDING;
3182		pad = 4 - (hdrlen & 3);
3183	} else
3184		pad = 0;
3185
3186	if (ieee80211_radiotap_active_vap(vap)) {
3187		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3188
3189		tap->wt_flags = 0;
3190		tap->wt_rate = rate;
3191
3192		ieee80211_radiotap_tx(vap, m);
3193	}
3194
3195	tx->len = htole16(totlen);
3196	tx->tid = 0;
3197	tx->id = hal->broadcast_id;
3198	tx->rts_ntries = params->ibp_try1;
3199	tx->data_ntries = params->ibp_try0;
3200	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3201	tx->plcp = rinfo->plcp;
3202	tx->rflags = rinfo->flags;
3203	/* Group or management frame. */
3204	tx->linkq = 0;
3205	txant = IWN_LSB(sc->txchainmask);
3206	tx->rflags |= IWN_RFLAG_ANT(txant);
3207	/* Set physical address of "scratch area". */
3208	paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd);
3209	tx->loaddr = htole32(IWN_LOADDR(paddr));
3210	tx->hiaddr = IWN_HIADDR(paddr);
3211
3212	/* Copy 802.11 header in TX command. */
3213	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3214
3215	/* Trim 802.11 header. */
3216	m_adj(m, hdrlen);
3217	tx->security = 0;
3218	tx->flags = htole32(flags);
3219
3220	if (m->m_len > 0) {
3221		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
3222		    m, segs, &nsegs, BUS_DMA_NOWAIT);
3223		if (error == EFBIG) {
3224			/* Too many fragments, linearize. */
3225			mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3226			if (mnew == NULL) {
3227				device_printf(sc->sc_dev,
3228				    "%s: could not defrag mbuf\n", __func__);
3229				m_freem(m);
3230				return ENOBUFS;
3231			}
3232			m = mnew;
3233			error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
3234			    data->map, m, segs, &nsegs, BUS_DMA_NOWAIT);
3235		}
3236		if (error != 0) {
3237			device_printf(sc->sc_dev,
3238			    "%s: bus_dmamap_load_mbuf_sg failed, error %d\n",
3239			    __func__, error);
3240			m_freem(m);
3241			return error;
3242		}
3243	}
3244
3245	data->m = m;
3246	data->ni = ni;
3247
3248	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3249	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3250
3251	/* Fill TX descriptor. */
3252	desc->nsegs = 1 + nsegs;
3253	/* First DMA segment is used by the TX command. */
3254	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3255	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3256	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3257	/* Other DMA segments are for data payload. */
3258	for (i = 1; i <= nsegs; i++) {
3259		desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3260		desc->segs[i].len  = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3261		    segs[i - 1].ds_len << 4);
3262	}
3263
3264	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3265	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3266	    BUS_DMASYNC_PREWRITE);
3267	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3268	    BUS_DMASYNC_PREWRITE);
3269
3270#ifdef notyet
3271	/* Update TX scheduler. */
3272	hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3273#endif
3274
3275	/* Kick TX ring. */
3276	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3277	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3278
3279	/* Mark TX ring as full if we reach a certain threshold. */
3280	if (++ring->queued > IWN_TX_RING_HIMARK)
3281		sc->qfullmsk |= 1 << ring->qid;
3282
3283	return 0;
3284}
3285
3286static int
3287iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3288	const struct ieee80211_bpf_params *params)
3289{
3290	struct ieee80211com *ic = ni->ni_ic;
3291	struct ifnet *ifp = ic->ic_ifp;
3292	struct iwn_softc *sc = ifp->if_softc;
3293	struct iwn_tx_ring *txq;
3294	int error = 0;
3295
3296	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3297		ieee80211_free_node(ni);
3298		m_freem(m);
3299		return ENETDOWN;
3300	}
3301
3302	IWN_LOCK(sc);
3303	if (params == NULL)
3304		txq = &sc->txq[M_WME_GETAC(m)];
3305	else
3306		txq = &sc->txq[params->ibp_pri & 3];
3307
3308	if (params == NULL) {
3309		/*
3310		 * Legacy path; interpret frame contents to decide
3311		 * precisely how to send the frame.
3312		 */
3313		error = iwn_tx_data(sc, m, ni, txq);
3314	} else {
3315		/*
3316		 * Caller supplied explicit parameters to use in
3317		 * sending the frame.
3318		 */
3319		error = iwn_tx_data_raw(sc, m, ni, txq, params);
3320	}
3321	if (error != 0) {
3322		/* NB: m is reclaimed on tx failure */
3323		ieee80211_free_node(ni);
3324		ifp->if_oerrors++;
3325	}
3326	IWN_UNLOCK(sc);
3327	return error;
3328}
3329
3330static void
3331iwn_start(struct ifnet *ifp)
3332{
3333	struct iwn_softc *sc = ifp->if_softc;
3334
3335	IWN_LOCK(sc);
3336	iwn_start_locked(ifp);
3337	IWN_UNLOCK(sc);
3338}
3339
3340static void
3341iwn_start_locked(struct ifnet *ifp)
3342{
3343	struct iwn_softc *sc = ifp->if_softc;
3344	struct ieee80211_node *ni;
3345	struct iwn_tx_ring *txq;
3346	struct mbuf *m;
3347	int pri;
3348
3349	IWN_LOCK_ASSERT(sc);
3350
3351	for (;;) {
3352		if (sc->qfullmsk != 0) {
3353			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3354			break;
3355		}
3356		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
3357		if (m == NULL)
3358			break;
3359		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3360		pri = M_WME_GETAC(m);
3361		txq = &sc->txq[pri];
3362		if (iwn_tx_data(sc, m, ni, txq) != 0) {
3363			ifp->if_oerrors++;
3364			ieee80211_free_node(ni);
3365			break;
3366		}
3367		sc->sc_tx_timer = 5;
3368	}
3369}
3370
3371static void
3372iwn_watchdog(struct iwn_softc *sc)
3373{
3374	if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) {
3375		struct ifnet *ifp = sc->sc_ifp;
3376		struct ieee80211com *ic = ifp->if_l2com;
3377
3378		if_printf(ifp, "device timeout\n");
3379		ieee80211_runtask(ic, &sc->sc_reinit_task);
3380	}
3381}
3382
3383static int
3384iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3385{
3386	struct iwn_softc *sc = ifp->if_softc;
3387	struct ieee80211com *ic = ifp->if_l2com;
3388	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3389	struct ifreq *ifr = (struct ifreq *) data;
3390	int error = 0, startall = 0, stop = 0;
3391
3392	switch (cmd) {
3393	case SIOCSIFFLAGS:
3394		IWN_LOCK(sc);
3395		if (ifp->if_flags & IFF_UP) {
3396			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3397				iwn_init_locked(sc);
3398				if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
3399					startall = 1;
3400				else
3401					stop = 1;
3402			}
3403		} else {
3404			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3405				iwn_stop_locked(sc);
3406		}
3407		IWN_UNLOCK(sc);
3408		if (startall)
3409			ieee80211_start_all(ic);
3410		else if (vap != NULL && stop)
3411			ieee80211_stop(vap);
3412		break;
3413	case SIOCGIFMEDIA:
3414		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
3415		break;
3416	case SIOCGIFADDR:
3417		error = ether_ioctl(ifp, cmd, data);
3418		break;
3419	default:
3420		error = EINVAL;
3421		break;
3422	}
3423	return error;
3424}
3425
3426/*
3427 * Send a command to the firmware.
3428 */
3429static int
3430iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3431{
3432	struct iwn_tx_ring *ring = &sc->txq[4];
3433	struct iwn_tx_desc *desc;
3434	struct iwn_tx_data *data;
3435	struct iwn_tx_cmd *cmd;
3436	struct mbuf *m;
3437	bus_addr_t paddr;
3438	int totlen, error;
3439
3440	IWN_LOCK_ASSERT(sc);
3441
3442	desc = &ring->desc[ring->cur];
3443	data = &ring->data[ring->cur];
3444	totlen = 4 + size;
3445
3446	if (size > sizeof cmd->data) {
3447		/* Command is too large to fit in a descriptor. */
3448		if (totlen > MCLBYTES)
3449			return EINVAL;
3450		m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3451		if (m == NULL)
3452			return ENOMEM;
3453		cmd = mtod(m, struct iwn_tx_cmd *);
3454		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3455		    totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3456		if (error != 0) {
3457			m_freem(m);
3458			return error;
3459		}
3460		data->m = m;
3461	} else {
3462		cmd = &ring->cmd[ring->cur];
3463		paddr = data->cmd_paddr;
3464	}
3465
3466	cmd->code = code;
3467	cmd->flags = 0;
3468	cmd->qid = ring->qid;
3469	cmd->idx = ring->cur;
3470	memcpy(cmd->data, buf, size);
3471
3472	desc->nsegs = 1;
3473	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3474	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
3475
3476	DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
3477	    __func__, iwn_intr_str(cmd->code), cmd->code,
3478	    cmd->flags, cmd->qid, cmd->idx);
3479
3480	if (size > sizeof cmd->data) {
3481		bus_dmamap_sync(ring->data_dmat, data->map,
3482		    BUS_DMASYNC_PREWRITE);
3483	} else {
3484		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3485		    BUS_DMASYNC_PREWRITE);
3486	}
3487	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3488	    BUS_DMASYNC_PREWRITE);
3489
3490#ifdef notyet
3491	/* Update TX scheduler. */
3492	sc->sc_hal->update_sched(sc, ring->qid, ring->cur, 0, 0);
3493#endif
3494
3495	/* Kick command ring. */
3496	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3497	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3498
3499	return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
3500}
3501
3502static int
3503iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3504{
3505	struct iwn4965_node_info hnode;
3506	caddr_t src, dst;
3507
3508	/*
3509	 * We use the node structure for 5000 Series internally (it is
3510	 * a superset of the one for 4965AGN). We thus copy the common
3511	 * fields before sending the command.
3512	 */
3513	src = (caddr_t)node;
3514	dst = (caddr_t)&hnode;
3515	memcpy(dst, src, 48);
3516	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3517	memcpy(dst + 48, src + 72, 20);
3518	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3519}
3520
3521static int
3522iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3523{
3524	/* Direct mapping. */
3525	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3526}
3527
3528#if 0	/* HT */
3529static const uint8_t iwn_ridx_to_plcp[] = {
3530	10, 20, 55, 110, /* CCK */
3531	0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */
3532};
3533static const uint8_t iwn_siso_mcs_to_plcp[] = {
3534	0, 0, 0, 0, 			/* CCK */
3535	0, 0, 1, 2, 3, 4, 5, 6, 7	/* HT */
3536};
3537static const uint8_t iwn_mimo_mcs_to_plcp[] = {
3538	0, 0, 0, 0, 			/* CCK */
3539	8, 8, 9, 10, 11, 12, 13, 14, 15	/* HT */
3540};
3541#endif
3542static const uint8_t iwn_prev_ridx[] = {
3543	/* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */
3544	0, 0, 1, 5,			/* CCK */
3545	2, 4, 3, 6, 7, 8, 9, 10, 10	/* OFDM */
3546};
3547
3548/*
3549 * Configure hardware link parameters for the specified
3550 * node operating on the specified channel.
3551 */
3552static int
3553iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, int async)
3554{
3555	struct ifnet *ifp = sc->sc_ifp;
3556	struct ieee80211com *ic = ifp->if_l2com;
3557	struct iwn_cmd_link_quality linkq;
3558	const struct iwn_rate *rinfo;
3559	int i;
3560	uint8_t txant, ridx;
3561
3562	/* Use the first valid TX antenna. */
3563	txant = IWN_LSB(sc->txchainmask);
3564
3565	memset(&linkq, 0, sizeof linkq);
3566	linkq.id = id;
3567	linkq.antmsk_1stream = txant;
3568	linkq.antmsk_2stream = IWN_ANT_AB;
3569	linkq.ampdu_max = 31;
3570	linkq.ampdu_threshold = 3;
3571	linkq.ampdu_limit = htole16(4000);	/* 4ms */
3572
3573#if 0	/* HT */
3574	if (IEEE80211_IS_CHAN_HT(c))
3575		linkq.mimo = 1;
3576#endif
3577
3578	if (id == IWN_ID_BSS)
3579		ridx = IWN_RIDX_OFDM54;
3580	else if (IEEE80211_IS_CHAN_A(ic->ic_curchan))
3581		ridx = IWN_RIDX_OFDM6;
3582	else
3583		ridx = IWN_RIDX_CCK1;
3584
3585	for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
3586		rinfo = &iwn_rates[ridx];
3587#if 0	/* HT */
3588		if (IEEE80211_IS_CHAN_HT40(c)) {
3589			linkq.retry[i].plcp = iwn_mimo_mcs_to_plcp[ridx]
3590					 | IWN_RIDX_MCS;
3591			linkq.retry[i].rflags = IWN_RFLAG_HT
3592					 | IWN_RFLAG_HT40;
3593			/* XXX shortGI */
3594		} else if (IEEE80211_IS_CHAN_HT(c)) {
3595			linkq.retry[i].plcp = iwn_siso_mcs_to_plcp[ridx]
3596					 | IWN_RIDX_MCS;
3597			linkq.retry[i].rflags = IWN_RFLAG_HT;
3598			/* XXX shortGI */
3599		} else
3600#endif
3601		{
3602			linkq.retry[i].plcp = rinfo->plcp;
3603			linkq.retry[i].rflags = rinfo->flags;
3604		}
3605		linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3606		ridx = iwn_prev_ridx[ridx];
3607	}
3608#ifdef IWN_DEBUG
3609	if (sc->sc_debug & IWN_DEBUG_STATE) {
3610		printf("%s: set link quality for node %d, mimo %d ssmask %d\n",
3611		    __func__, id, linkq.mimo, linkq.antmsk_1stream);
3612		printf("%s:", __func__);
3613		for (i = 0; i < IWN_MAX_TX_RETRIES; i++)
3614			printf(" %d:%x", linkq.retry[i].plcp,
3615			    linkq.retry[i].rflags);
3616		printf("\n");
3617	}
3618#endif
3619	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
3620}
3621
3622/*
3623 * Broadcast node is used to send group-addressed and management frames.
3624 */
3625static int
3626iwn_add_broadcast_node(struct iwn_softc *sc, int async)
3627{
3628	const struct iwn_hal *hal = sc->sc_hal;
3629	struct ifnet *ifp = sc->sc_ifp;
3630	struct iwn_node_info node;
3631	int error;
3632
3633	memset(&node, 0, sizeof node);
3634	IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
3635	node.id = hal->broadcast_id;
3636	DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
3637	error = hal->add_node(sc, &node, async);
3638	if (error != 0)
3639		return error;
3640
3641	error = iwn_set_link_quality(sc, hal->broadcast_id, async);
3642	return error;
3643}
3644
3645static int
3646iwn_wme_update(struct ieee80211com *ic)
3647{
3648#define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
3649#define	IWN_TXOP_TO_US(v)		(v<<5)
3650	struct iwn_softc *sc = ic->ic_ifp->if_softc;
3651	struct iwn_edca_params cmd;
3652	int i;
3653
3654	memset(&cmd, 0, sizeof cmd);
3655	cmd.flags = htole32(IWN_EDCA_UPDATE);
3656	for (i = 0; i < WME_NUM_AC; i++) {
3657		const struct wmeParams *wmep =
3658		    &ic->ic_wme.wme_chanParams.cap_wmeParams[i];
3659		cmd.ac[i].aifsn = wmep->wmep_aifsn;
3660		cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin));
3661		cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax));
3662		cmd.ac[i].txoplimit =
3663		    htole16(IWN_TXOP_TO_US(wmep->wmep_txopLimit));
3664	}
3665	IEEE80211_UNLOCK(ic);
3666	IWN_LOCK(sc);
3667	(void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/);
3668	IWN_UNLOCK(sc);
3669	IEEE80211_LOCK(ic);
3670	return 0;
3671#undef IWN_TXOP_TO_US
3672#undef IWN_EXP2
3673}
3674
3675static void
3676iwn_update_mcast(struct ifnet *ifp)
3677{
3678	/* Ignore */
3679}
3680
3681static void
3682iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3683{
3684	struct iwn_cmd_led led;
3685
3686	/* Clear microcode LED ownership. */
3687	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
3688
3689	led.which = which;
3690	led.unit = htole32(10000);	/* on/off in unit of 100ms */
3691	led.off = off;
3692	led.on = on;
3693	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
3694}
3695
3696/*
3697 * Set the critical temperature at which the firmware will stop the radio
3698 * and notify us.
3699 */
3700static int
3701iwn_set_critical_temp(struct iwn_softc *sc)
3702{
3703	struct iwn_critical_temp crit;
3704	int32_t temp;
3705
3706	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
3707
3708	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
3709		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
3710	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3711		temp = IWN_CTOK(110);
3712	else
3713		temp = 110;
3714	memset(&crit, 0, sizeof crit);
3715	crit.tempR = htole32(temp);
3716	DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n",
3717	    temp);
3718	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
3719}
3720
3721static int
3722iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
3723{
3724	struct iwn_cmd_timing cmd;
3725	uint64_t val, mod;
3726
3727	memset(&cmd, 0, sizeof cmd);
3728	memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3729	cmd.bintval = htole16(ni->ni_intval);
3730	cmd.lintval = htole16(10);
3731
3732	/* Compute remaining time until next beacon. */
3733	val = (uint64_t)ni->ni_intval * 1024;	/* msecs -> usecs */
3734	mod = le64toh(cmd.tstamp) % val;
3735	cmd.binitval = htole32((uint32_t)(val - mod));
3736
3737	DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
3738	    ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
3739
3740	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
3741}
3742
3743static void
3744iwn4965_power_calibration(struct iwn_softc *sc, int temp)
3745{
3746	struct ifnet *ifp = sc->sc_ifp;
3747	struct ieee80211com *ic = ifp->if_l2com;
3748
3749	/* Adjust TX power if need be (delta >= 3 degC.) */
3750	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
3751	    __func__, sc->temp, temp);
3752	if (abs(temp - sc->temp) >= 3) {
3753		/* Record temperature of last calibration. */
3754		sc->temp = temp;
3755		(void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
3756	}
3757}
3758
3759/*
3760 * Set TX power for current channel (each rate has its own power settings).
3761 * This function takes into account the regulatory information from EEPROM,
3762 * the current temperature and the current voltage.
3763 */
3764static int
3765iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3766    int async)
3767{
3768/* Fixed-point arithmetic division using a n-bit fractional part. */
3769#define fdivround(a, b, n)	\
3770	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3771/* Linear interpolation. */
3772#define interpolate(x, x1, y1, x2, y2, n)	\
3773	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3774
3775	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
3776	struct ifnet *ifp = sc->sc_ifp;
3777	struct ieee80211com *ic = ifp->if_l2com;
3778	struct iwn_ucode_info *uc = &sc->ucode_info;
3779	struct iwn4965_cmd_txpower cmd;
3780	struct iwn4965_eeprom_chan_samples *chans;
3781	int32_t vdiff, tdiff;
3782	int i, c, grp, maxpwr;
3783	const uint8_t *rf_gain, *dsp_gain;
3784	uint8_t chan;
3785
3786	/* Retrieve channel number. */
3787	chan = ieee80211_chan2ieee(ic, ch);
3788	DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
3789	    chan);
3790
3791	memset(&cmd, 0, sizeof cmd);
3792	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
3793	cmd.chan = chan;
3794
3795	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
3796		maxpwr   = sc->maxpwr5GHz;
3797		rf_gain  = iwn4965_rf_gain_5ghz;
3798		dsp_gain = iwn4965_dsp_gain_5ghz;
3799	} else {
3800		maxpwr   = sc->maxpwr2GHz;
3801		rf_gain  = iwn4965_rf_gain_2ghz;
3802		dsp_gain = iwn4965_dsp_gain_2ghz;
3803	}
3804
3805	/* Compute voltage compensation. */
3806	vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
3807	if (vdiff > 0)
3808		vdiff *= 2;
3809	if (abs(vdiff) > 2)
3810		vdiff = 0;
3811	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3812	    "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
3813	    __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
3814
3815	/* Get channel attenuation group. */
3816	if (chan <= 20)		/* 1-20 */
3817		grp = 4;
3818	else if (chan <= 43)	/* 34-43 */
3819		grp = 0;
3820	else if (chan <= 70)	/* 44-70 */
3821		grp = 1;
3822	else if (chan <= 124)	/* 71-124 */
3823		grp = 2;
3824	else			/* 125-200 */
3825		grp = 3;
3826	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3827	    "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
3828
3829	/* Get channel sub-band. */
3830	for (i = 0; i < IWN_NBANDS; i++)
3831		if (sc->bands[i].lo != 0 &&
3832		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
3833			break;
3834	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
3835		return EINVAL;
3836	chans = sc->bands[i].chans;
3837	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3838	    "%s: chan %d sub-band=%d\n", __func__, chan, i);
3839
3840	for (c = 0; c < 2; c++) {
3841		uint8_t power, gain, temp;
3842		int maxchpwr, pwr, ridx, idx;
3843
3844		power = interpolate(chan,
3845		    chans[0].num, chans[0].samples[c][1].power,
3846		    chans[1].num, chans[1].samples[c][1].power, 1);
3847		gain  = interpolate(chan,
3848		    chans[0].num, chans[0].samples[c][1].gain,
3849		    chans[1].num, chans[1].samples[c][1].gain, 1);
3850		temp  = interpolate(chan,
3851		    chans[0].num, chans[0].samples[c][1].temp,
3852		    chans[1].num, chans[1].samples[c][1].temp, 1);
3853		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3854		    "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
3855		    __func__, c, power, gain, temp);
3856
3857		/* Compute temperature compensation. */
3858		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
3859		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3860		    "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
3861		    __func__, tdiff, sc->temp, temp);
3862
3863		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
3864			/* Convert dBm to half-dBm. */
3865			maxchpwr = sc->maxpwr[chan] * 2;
3866			if ((ridx / 8) & 1)
3867				maxchpwr -= 6;	/* MIMO 2T: -3dB */
3868
3869			pwr = maxpwr;
3870
3871			/* Adjust TX power based on rate. */
3872			if ((ridx % 8) == 5)
3873				pwr -= 15;	/* OFDM48: -7.5dB */
3874			else if ((ridx % 8) == 6)
3875				pwr -= 17;	/* OFDM54: -8.5dB */
3876			else if ((ridx % 8) == 7)
3877				pwr -= 20;	/* OFDM60: -10dB */
3878			else
3879				pwr -= 10;	/* Others: -5dB */
3880
3881			/* Do not exceed channel max TX power. */
3882			if (pwr > maxchpwr)
3883				pwr = maxchpwr;
3884
3885			idx = gain - (pwr - power) - tdiff - vdiff;
3886			if ((ridx / 8) & 1)	/* MIMO */
3887				idx += (int32_t)le32toh(uc->atten[grp][c]);
3888
3889			if (cmd.band == 0)
3890				idx += 9;	/* 5GHz */
3891			if (ridx == IWN_RIDX_MAX)
3892				idx += 5;	/* CCK */
3893
3894			/* Make sure idx stays in a valid range. */
3895			if (idx < 0)
3896				idx = 0;
3897			else if (idx > IWN4965_MAX_PWR_INDEX)
3898				idx = IWN4965_MAX_PWR_INDEX;
3899
3900			DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3901			    "%s: Tx chain %d, rate idx %d: power=%d\n",
3902			    __func__, c, ridx, idx);
3903			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
3904			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
3905		}
3906	}
3907
3908	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3909	    "%s: set tx power for chan %d\n", __func__, chan);
3910	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
3911
3912#undef interpolate
3913#undef fdivround
3914}
3915
3916static int
3917iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3918    int async)
3919{
3920	struct iwn5000_cmd_txpower cmd;
3921
3922	/*
3923	 * TX power calibration is handled automatically by the firmware
3924	 * for 5000 Series.
3925	 */
3926	memset(&cmd, 0, sizeof cmd);
3927	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
3928	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
3929	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
3930	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
3931	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
3932}
3933
3934/*
3935 * Retrieve the maximum RSSI (in dBm) among receivers.
3936 */
3937static int
3938iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
3939{
3940	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
3941	uint8_t mask, agc;
3942	int rssi;
3943
3944	mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
3945	agc  = (le16toh(phy->agc) >> 7) & 0x7f;
3946
3947	rssi = 0;
3948#if 0
3949	if (mask & IWN_ANT_A)	/* Ant A */
3950		rssi = max(rssi, phy->rssi[0]);
3951	if (mask & IWN_ATH_B)	/* Ant B */
3952		rssi = max(rssi, phy->rssi[2]);
3953	if (mask & IWN_ANT_C)	/* Ant C */
3954		rssi = max(rssi, phy->rssi[4]);
3955#else
3956	rssi = max(rssi, phy->rssi[0]);
3957	rssi = max(rssi, phy->rssi[2]);
3958	rssi = max(rssi, phy->rssi[4]);
3959#endif
3960
3961	DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d "
3962	    "result %d\n", __func__, agc, mask,
3963	    phy->rssi[0], phy->rssi[2], phy->rssi[4],
3964	    rssi - agc - IWN_RSSI_TO_DBM);
3965	return rssi - agc - IWN_RSSI_TO_DBM;
3966}
3967
3968static int
3969iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
3970{
3971	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
3972	int rssi;
3973	uint8_t agc;
3974
3975	agc = (le32toh(phy->agc) >> 9) & 0x7f;
3976
3977	rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
3978		   le16toh(phy->rssi[1]) & 0xff);
3979	rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
3980
3981	DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d "
3982	    "result %d\n", __func__, agc,
3983	    phy->rssi[0], phy->rssi[1], phy->rssi[2],
3984	    rssi - agc - IWN_RSSI_TO_DBM);
3985	return rssi - agc - IWN_RSSI_TO_DBM;
3986}
3987
3988/*
3989 * Retrieve the average noise (in dBm) among receivers.
3990 */
3991static int
3992iwn_get_noise(const struct iwn_rx_general_stats *stats)
3993{
3994	int i, total, nbant, noise;
3995
3996	total = nbant = 0;
3997	for (i = 0; i < 3; i++) {
3998		if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
3999			continue;
4000		total += noise;
4001		nbant++;
4002	}
4003	/* There should be at least one antenna but check anyway. */
4004	return (nbant == 0) ? -127 : (total / nbant) - 107;
4005}
4006
4007/*
4008 * Compute temperature (in degC) from last received statistics.
4009 */
4010static int
4011iwn4965_get_temperature(struct iwn_softc *sc)
4012{
4013	struct iwn_ucode_info *uc = &sc->ucode_info;
4014	int32_t r1, r2, r3, r4, temp;
4015
4016	r1 = le32toh(uc->temp[0].chan20MHz);
4017	r2 = le32toh(uc->temp[1].chan20MHz);
4018	r3 = le32toh(uc->temp[2].chan20MHz);
4019	r4 = le32toh(sc->rawtemp);
4020
4021	if (r1 == r3)	/* Prevents division by 0 (should not happen.) */
4022		return 0;
4023
4024	/* Sign-extend 23-bit R4 value to 32-bit. */
4025	r4 = (r4 << 8) >> 8;
4026	/* Compute temperature in Kelvin. */
4027	temp = (259 * (r4 - r2)) / (r3 - r1);
4028	temp = (temp * 97) / 100 + 8;
4029
4030	DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4031	    IWN_KTOC(temp));
4032	return IWN_KTOC(temp);
4033}
4034
4035static int
4036iwn5000_get_temperature(struct iwn_softc *sc)
4037{
4038	int32_t temp;
4039
4040	/*
4041	 * Temperature is not used by the driver for 5000 Series because
4042	 * TX power calibration is handled by firmware.  We export it to
4043	 * users through the sensor framework though.
4044	 */
4045	temp = le32toh(sc->rawtemp);
4046	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4047		temp = (temp / -5) + sc->temp_off;
4048		temp = IWN_KTOC(temp);
4049	}
4050	return temp;
4051}
4052
4053/*
4054 * Initialize sensitivity calibration state machine.
4055 */
4056static int
4057iwn_init_sensitivity(struct iwn_softc *sc)
4058{
4059	const struct iwn_hal *hal = sc->sc_hal;
4060	struct iwn_calib_state *calib = &sc->calib;
4061	uint32_t flags;
4062	int error;
4063
4064	/* Reset calibration state machine. */
4065	memset(calib, 0, sizeof (*calib));
4066	calib->state = IWN_CALIB_STATE_INIT;
4067	calib->cck_state = IWN_CCK_STATE_HIFA;
4068	/* Set initial correlation values. */
4069	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
4070	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4071	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
4072	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4073	calib->cck_x4      = 125;
4074	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
4075	calib->energy_cck  = sc->limits->energy_cck;
4076
4077	/* Write initial sensitivity. */
4078	error = iwn_send_sensitivity(sc);
4079	if (error != 0)
4080		return error;
4081
4082	/* Write initial gains. */
4083	error = hal->init_gains(sc);
4084	if (error != 0)
4085		return error;
4086
4087	/* Request statistics at each beacon interval. */
4088	flags = 0;
4089	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calibrate phy\n", __func__);
4090	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4091}
4092
4093/*
4094 * Collect noise and RSSI statistics for the first 20 beacons received
4095 * after association and use them to determine connected antennas and
4096 * to set differential gains.
4097 */
4098static void
4099iwn_collect_noise(struct iwn_softc *sc,
4100    const struct iwn_rx_general_stats *stats)
4101{
4102	const struct iwn_hal *hal = sc->sc_hal;
4103	struct iwn_calib_state *calib = &sc->calib;
4104	uint32_t val;
4105	int i;
4106
4107	/* Accumulate RSSI and noise for all 3 antennas. */
4108	for (i = 0; i < 3; i++) {
4109		calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4110		calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4111	}
4112	/* NB: We update differential gains only once after 20 beacons. */
4113	if (++calib->nbeacons < 20)
4114		return;
4115
4116	/* Determine highest average RSSI. */
4117	val = MAX(calib->rssi[0], calib->rssi[1]);
4118	val = MAX(calib->rssi[2], val);
4119
4120	/* Determine which antennas are connected. */
4121	sc->chainmask = sc->rxchainmask;
4122	for (i = 0; i < 3; i++)
4123		if (val - calib->rssi[i] > 15 * 20)
4124			sc->chainmask &= ~(1 << i);
4125	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4126	    "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4127	    __func__, sc->rxchainmask, sc->chainmask);
4128
4129	/* If none of the TX antennas are connected, keep at least one. */
4130	if ((sc->chainmask & sc->txchainmask) == 0)
4131		sc->chainmask |= IWN_LSB(sc->txchainmask);
4132
4133	(void)hal->set_gains(sc);
4134	calib->state = IWN_CALIB_STATE_RUN;
4135
4136#ifdef notyet
4137	/* XXX Disable RX chains with no antennas connected. */
4138	sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4139	(void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4140#endif
4141
4142#if 0
4143	/* XXX: not yet */
4144	/* Enable power-saving mode if requested by user. */
4145	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4146		(void)iwn_set_pslevel(sc, 0, 3, 1);
4147#endif
4148}
4149
4150static int
4151iwn4965_init_gains(struct iwn_softc *sc)
4152{
4153	struct iwn_phy_calib_gain cmd;
4154
4155	memset(&cmd, 0, sizeof cmd);
4156	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4157	/* Differential gains initially set to 0 for all 3 antennas. */
4158	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4159	    "%s: setting initial differential gains\n", __func__);
4160	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4161}
4162
4163static int
4164iwn5000_init_gains(struct iwn_softc *sc)
4165{
4166	struct iwn_phy_calib cmd;
4167
4168	memset(&cmd, 0, sizeof cmd);
4169	cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
4170	cmd.ngroups = 1;
4171	cmd.isvalid = 1;
4172	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4173	    "%s: setting initial differential gains\n", __func__);
4174	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4175}
4176
4177static int
4178iwn4965_set_gains(struct iwn_softc *sc)
4179{
4180	struct iwn_calib_state *calib = &sc->calib;
4181	struct iwn_phy_calib_gain cmd;
4182	int i, delta, noise;
4183
4184	/* Get minimal noise among connected antennas. */
4185	noise = INT_MAX;	/* NB: There's at least one antenna. */
4186	for (i = 0; i < 3; i++)
4187		if (sc->chainmask & (1 << i))
4188			noise = MIN(calib->noise[i], noise);
4189
4190	memset(&cmd, 0, sizeof cmd);
4191	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4192	/* Set differential gains for connected antennas. */
4193	for (i = 0; i < 3; i++) {
4194		if (sc->chainmask & (1 << i)) {
4195			/* Compute attenuation (in unit of 1.5dB). */
4196			delta = (noise - (int32_t)calib->noise[i]) / 30;
4197			/* NB: delta <= 0 */
4198			/* Limit to [-4.5dB,0]. */
4199			cmd.gain[i] = MIN(abs(delta), 3);
4200			if (delta < 0)
4201				cmd.gain[i] |= 1 << 2;	/* sign bit */
4202		}
4203	}
4204	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4205	    "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4206	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4207	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4208}
4209
4210static int
4211iwn5000_set_gains(struct iwn_softc *sc)
4212{
4213	struct iwn_calib_state *calib = &sc->calib;
4214	struct iwn_phy_calib_gain cmd;
4215	int i, ant, delta, div;
4216
4217	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
4218	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4219
4220	memset(&cmd, 0, sizeof cmd);
4221	cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN;
4222	cmd.ngroups = 1;
4223	cmd.isvalid = 1;
4224	/* Get first available RX antenna as referential. */
4225	ant = IWN_LSB(sc->rxchainmask);
4226	/* Set differential gains for other antennas. */
4227	for (i = ant + 1; i < 3; i++) {
4228		if (sc->chainmask & (1 << i)) {
4229			/* The delta is relative to antenna "ant". */
4230			delta = ((int32_t)calib->noise[ant] -
4231			    (int32_t)calib->noise[i]) / div;
4232			/* Limit to [-4.5dB,+4.5dB]. */
4233			cmd.gain[i - 1] = MIN(abs(delta), 3);
4234			if (delta < 0)
4235				cmd.gain[i - 1] |= 1 << 2;	/* sign bit */
4236		}
4237	}
4238	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4239	    "setting differential gains Ant B/C: %x/%x (%x)\n",
4240	    cmd.gain[0], cmd.gain[1], sc->chainmask);
4241	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4242}
4243
4244/*
4245 * Tune RF RX sensitivity based on the number of false alarms detected
4246 * during the last beacon period.
4247 */
4248static void
4249iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4250{
4251#define inc(val, inc, max)			\
4252	if ((val) < (max)) {			\
4253		if ((val) < (max) - (inc))	\
4254			(val) += (inc);		\
4255		else				\
4256			(val) = (max);		\
4257		needs_update = 1;		\
4258	}
4259#define dec(val, dec, min)			\
4260	if ((val) > (min)) {			\
4261		if ((val) > (min) + (dec))	\
4262			(val) -= (dec);		\
4263		else				\
4264			(val) = (min);		\
4265		needs_update = 1;		\
4266	}
4267
4268	const struct iwn_sensitivity_limits *limits = sc->limits;
4269	struct iwn_calib_state *calib = &sc->calib;
4270	uint32_t val, rxena, fa;
4271	uint32_t energy[3], energy_min;
4272	uint8_t noise[3], noise_ref;
4273	int i, needs_update = 0;
4274
4275	/* Check that we've been enabled long enough. */
4276	rxena = le32toh(stats->general.load);
4277	if (rxena == 0)
4278		return;
4279
4280	/* Compute number of false alarms since last call for OFDM. */
4281	fa  = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4282	fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
4283	fa *= 200 * 1024;	/* 200TU */
4284
4285	/* Save counters values for next call. */
4286	calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
4287	calib->fa_ofdm = le32toh(stats->ofdm.fa);
4288
4289	if (fa > 50 * rxena) {
4290		/* High false alarm count, decrease sensitivity. */
4291		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4292		    "%s: OFDM high false alarm count: %u\n", __func__, fa);
4293		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
4294		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4295		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
4296		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4297
4298	} else if (fa < 5 * rxena) {
4299		/* Low false alarm count, increase sensitivity. */
4300		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4301		    "%s: OFDM low false alarm count: %u\n", __func__, fa);
4302		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
4303		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4304		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
4305		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4306	}
4307
4308	/* Compute maximum noise among 3 receivers. */
4309	for (i = 0; i < 3; i++)
4310		noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
4311	val = MAX(noise[0], noise[1]);
4312	val = MAX(noise[2], val);
4313	/* Insert it into our samples table. */
4314	calib->noise_samples[calib->cur_noise_sample] = val;
4315	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4316
4317	/* Compute maximum noise among last 20 samples. */
4318	noise_ref = calib->noise_samples[0];
4319	for (i = 1; i < 20; i++)
4320		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4321
4322	/* Compute maximum energy among 3 receivers. */
4323	for (i = 0; i < 3; i++)
4324		energy[i] = le32toh(stats->general.energy[i]);
4325	val = MIN(energy[0], energy[1]);
4326	val = MIN(energy[2], val);
4327	/* Insert it into our samples table. */
4328	calib->energy_samples[calib->cur_energy_sample] = val;
4329	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4330
4331	/* Compute minimum energy among last 10 samples. */
4332	energy_min = calib->energy_samples[0];
4333	for (i = 1; i < 10; i++)
4334		energy_min = MAX(energy_min, calib->energy_samples[i]);
4335	energy_min += 6;
4336
4337	/* Compute number of false alarms since last call for CCK. */
4338	fa  = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4339	fa += le32toh(stats->cck.fa) - calib->fa_cck;
4340	fa *= 200 * 1024;	/* 200TU */
4341
4342	/* Save counters values for next call. */
4343	calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
4344	calib->fa_cck = le32toh(stats->cck.fa);
4345
4346	if (fa > 50 * rxena) {
4347		/* High false alarm count, decrease sensitivity. */
4348		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4349		    "%s: CCK high false alarm count: %u\n", __func__, fa);
4350		calib->cck_state = IWN_CCK_STATE_HIFA;
4351		calib->low_fa = 0;
4352
4353		if (calib->cck_x4 > 160) {
4354			calib->noise_ref = noise_ref;
4355			if (calib->energy_cck > 2)
4356				dec(calib->energy_cck, 2, energy_min);
4357		}
4358		if (calib->cck_x4 < 160) {
4359			calib->cck_x4 = 161;
4360			needs_update = 1;
4361		} else
4362			inc(calib->cck_x4, 3, limits->max_cck_x4);
4363
4364		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4365
4366	} else if (fa < 5 * rxena) {
4367		/* Low false alarm count, increase sensitivity. */
4368		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4369		    "%s: CCK low false alarm count: %u\n", __func__, fa);
4370		calib->cck_state = IWN_CCK_STATE_LOFA;
4371		calib->low_fa++;
4372
4373		if (calib->cck_state != IWN_CCK_STATE_INIT &&
4374		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4375		    calib->low_fa > 100)) {
4376			inc(calib->energy_cck, 2, limits->min_energy_cck);
4377			dec(calib->cck_x4,     3, limits->min_cck_x4);
4378			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4379		}
4380	} else {
4381		/* Not worth to increase or decrease sensitivity. */
4382		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4383		    "%s: CCK normal false alarm count: %u\n", __func__, fa);
4384		calib->low_fa = 0;
4385		calib->noise_ref = noise_ref;
4386
4387		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4388			/* Previous interval had many false alarms. */
4389			dec(calib->energy_cck, 8, energy_min);
4390		}
4391		calib->cck_state = IWN_CCK_STATE_INIT;
4392	}
4393
4394	if (needs_update)
4395		(void)iwn_send_sensitivity(sc);
4396#undef dec
4397#undef inc
4398}
4399
4400static int
4401iwn_send_sensitivity(struct iwn_softc *sc)
4402{
4403	struct iwn_calib_state *calib = &sc->calib;
4404	struct iwn_sensitivity_cmd cmd;
4405
4406	memset(&cmd, 0, sizeof cmd);
4407	cmd.which = IWN_SENSITIVITY_WORKTBL;
4408	/* OFDM modulation. */
4409	cmd.corr_ofdm_x1     = htole16(calib->ofdm_x1);
4410	cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
4411	cmd.corr_ofdm_x4     = htole16(calib->ofdm_x4);
4412	cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
4413	cmd.energy_ofdm      = htole16(sc->limits->energy_ofdm);
4414	cmd.energy_ofdm_th   = htole16(62);
4415	/* CCK modulation. */
4416	cmd.corr_cck_x4      = htole16(calib->cck_x4);
4417	cmd.corr_cck_mrc_x4  = htole16(calib->cck_mrc_x4);
4418	cmd.energy_cck       = htole16(calib->energy_cck);
4419	/* Barker modulation: use default values. */
4420	cmd.corr_barker      = htole16(190);
4421	cmd.corr_barker_mrc  = htole16(390);
4422
4423	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4424	    "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
4425	    calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4426	    calib->ofdm_mrc_x4, calib->cck_x4,
4427	    calib->cck_mrc_x4, calib->energy_cck);
4428	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1);
4429}
4430
4431/*
4432 * Set STA mode power saving level (between 0 and 5).
4433 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4434 */
4435static int
4436iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4437{
4438	const struct iwn_pmgt *pmgt;
4439	struct iwn_pmgt_cmd cmd;
4440	uint32_t max, skip_dtim;
4441	uint32_t tmp;
4442	int i;
4443
4444	/* Select which PS parameters to use. */
4445	if (dtim <= 2)
4446		pmgt = &iwn_pmgt[0][level];
4447	else if (dtim <= 10)
4448		pmgt = &iwn_pmgt[1][level];
4449	else
4450		pmgt = &iwn_pmgt[2][level];
4451
4452	memset(&cmd, 0, sizeof cmd);
4453	if (level != 0)	/* not CAM */
4454		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4455	if (level == 5)
4456		cmd.flags |= htole16(IWN_PS_FAST_PD);
4457	/* Retrieve PCIe Active State Power Management (ASPM). */
4458	tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
4459	if (!(tmp & 0x1))	/* L0s Entry disabled. */
4460		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4461	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4462	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4463
4464	if (dtim == 0) {
4465		dtim = 1;
4466		skip_dtim = 0;
4467	} else
4468		skip_dtim = pmgt->skip_dtim;
4469	if (skip_dtim != 0) {
4470		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4471		max = pmgt->intval[4];
4472		if (max == (uint32_t)-1)
4473			max = dtim * (skip_dtim + 1);
4474		else if (max > dtim)
4475			max = (max / dtim) * dtim;
4476	} else
4477		max = dtim;
4478	for (i = 0; i < 5; i++)
4479		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4480
4481	DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
4482	    level);
4483	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4484}
4485
4486static int
4487iwn_config(struct iwn_softc *sc)
4488{
4489	const struct iwn_hal *hal = sc->sc_hal;
4490	struct ifnet *ifp = sc->sc_ifp;
4491	struct ieee80211com *ic = ifp->if_l2com;
4492	struct iwn_bluetooth bluetooth;
4493	uint32_t txmask;
4494	int error;
4495	uint16_t rxchain;
4496
4497	/* Configure valid TX chains for 5000 Series. */
4498	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4499		txmask = htole32(sc->txchainmask);
4500		DPRINTF(sc, IWN_DEBUG_RESET,
4501		    "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
4502		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
4503		    sizeof txmask, 0);
4504		if (error != 0) {
4505			device_printf(sc->sc_dev,
4506			    "%s: could not configure valid TX chains, "
4507			    "error %d\n", __func__, error);
4508			return error;
4509		}
4510	}
4511
4512	/* Configure bluetooth coexistence. */
4513	memset(&bluetooth, 0, sizeof bluetooth);
4514	bluetooth.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4515	bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF;
4516	bluetooth.max_kill = IWN_BT_MAX_KILL_DEF;
4517	DPRINTF(sc, IWN_DEBUG_RESET, "%s: config bluetooth coexistence\n",
4518	    __func__);
4519	error = iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0);
4520	if (error != 0) {
4521		device_printf(sc->sc_dev,
4522		    "%s: could not configure bluetooth coexistence, error %d\n",
4523		    __func__, error);
4524		return error;
4525	}
4526
4527	/* Set mode, channel, RX filter and enable RX. */
4528	memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
4529	IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp));
4530	IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp));
4531	sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
4532	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4533	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
4534		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4535	switch (ic->ic_opmode) {
4536	case IEEE80211_M_STA:
4537		sc->rxon.mode = IWN_MODE_STA;
4538		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
4539		break;
4540	case IEEE80211_M_MONITOR:
4541		sc->rxon.mode = IWN_MODE_MONITOR;
4542		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
4543		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
4544		break;
4545	default:
4546		/* Should not get there. */
4547		break;
4548	}
4549	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
4550	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
4551	sc->rxon.ht_single_mask = 0xff;
4552	sc->rxon.ht_dual_mask = 0xff;
4553	sc->rxon.ht_triple_mask = 0xff;
4554	rxchain =
4555	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
4556	    IWN_RXCHAIN_MIMO_COUNT(2) |
4557	    IWN_RXCHAIN_IDLE_COUNT(2);
4558	sc->rxon.rxchain = htole16(rxchain);
4559	DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
4560	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 0);
4561	if (error != 0) {
4562		device_printf(sc->sc_dev,
4563		    "%s: RXON command failed\n", __func__);
4564		return error;
4565	}
4566
4567	error = iwn_add_broadcast_node(sc, 0);
4568	if (error != 0) {
4569		device_printf(sc->sc_dev,
4570		    "%s: could not add broadcast node\n", __func__);
4571		return error;
4572	}
4573
4574	/* Configuration has changed, set TX power accordingly. */
4575	error = hal->set_txpower(sc, ic->ic_curchan, 0);
4576	if (error != 0) {
4577		device_printf(sc->sc_dev,
4578		    "%s: could not set TX power\n", __func__);
4579		return error;
4580	}
4581
4582	error = iwn_set_critical_temp(sc);
4583	if (error != 0) {
4584		device_printf(sc->sc_dev,
4585		    "%s: ccould not set critical temperature\n", __func__);
4586		return error;
4587	}
4588
4589	/* Set power saving level to CAM during initialization. */
4590	error = iwn_set_pslevel(sc, 0, 0, 0);
4591	if (error != 0) {
4592		device_printf(sc->sc_dev,
4593		    "%s: could not set power saving level\n", __func__);
4594		return error;
4595	}
4596	return 0;
4597}
4598
4599static int
4600iwn_scan(struct iwn_softc *sc)
4601{
4602	struct ifnet *ifp = sc->sc_ifp;
4603	struct ieee80211com *ic = ifp->if_l2com;
4604	struct ieee80211_scan_state *ss = ic->ic_scan;	/*XXX*/
4605	struct iwn_scan_hdr *hdr;
4606	struct iwn_cmd_data *tx;
4607	struct iwn_scan_essid *essid;
4608	struct iwn_scan_chan *chan;
4609	struct ieee80211_frame *wh;
4610	struct ieee80211_rateset *rs;
4611	struct ieee80211_channel *c;
4612	int buflen, error, nrates;
4613	uint16_t rxchain;
4614	uint8_t *buf, *frm, txant;
4615
4616	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4617	if (buf == NULL) {
4618		device_printf(sc->sc_dev,
4619		    "%s: could not allocate buffer for scan command\n",
4620		    __func__);
4621		return ENOMEM;
4622	}
4623	hdr = (struct iwn_scan_hdr *)buf;
4624
4625	/*
4626	 * Move to the next channel if no frames are received within 10ms
4627	 * after sending the probe request.
4628	 */
4629	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
4630	hdr->quiet_threshold = htole16(1);	/* min # of packets */
4631
4632	/* Select antennas for scanning. */
4633	rxchain =
4634	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
4635	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
4636	    IWN_RXCHAIN_DRIVER_FORCE;
4637	if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
4638	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
4639		/* Ant A must be avoided in 5GHz because of an HW bug. */
4640		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
4641	} else	/* Use all available RX antennas. */
4642		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
4643	hdr->rxchain = htole16(rxchain);
4644	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
4645
4646	tx = (struct iwn_cmd_data *)(hdr + 1);
4647	tx->flags = htole32(IWN_TX_AUTO_SEQ);
4648	tx->id = sc->sc_hal->broadcast_id;
4649	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4650
4651	if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) {
4652		/* Send probe requests at 6Mbps. */
4653		tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
4654		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4655	} else {
4656		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
4657		/* Send probe requests at 1Mbps. */
4658		tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
4659		tx->rflags = IWN_RFLAG_CCK;
4660		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4661	}
4662	/* Use the first valid TX antenna. */
4663	txant = IWN_LSB(sc->txchainmask);
4664	tx->rflags |= IWN_RFLAG_ANT(txant);
4665
4666	essid = (struct iwn_scan_essid *)(tx + 1);
4667	if (ss->ss_ssid[0].len != 0) {
4668		essid[0].id = IEEE80211_ELEMID_SSID;
4669		essid[0].len = ss->ss_ssid[0].len;
4670		memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
4671	}
4672
4673	/*
4674	 * Build a probe request frame.  Most of the following code is a
4675	 * copy & paste of what is done in net80211.
4676	 */
4677	wh = (struct ieee80211_frame *)(essid + 20);
4678	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4679	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4680	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4681	IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
4682	IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
4683	IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
4684	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4685	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4686
4687	frm = (uint8_t *)(wh + 1);
4688
4689	/* Add SSID IE. */
4690	*frm++ = IEEE80211_ELEMID_SSID;
4691	*frm++ = ss->ss_ssid[0].len;
4692	memcpy(frm, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
4693	frm += ss->ss_ssid[0].len;
4694
4695	/* Add supported rates IE. */
4696	*frm++ = IEEE80211_ELEMID_RATES;
4697	nrates = rs->rs_nrates;
4698	if (nrates > IEEE80211_RATE_SIZE)
4699		nrates = IEEE80211_RATE_SIZE;
4700	*frm++ = nrates;
4701	memcpy(frm, rs->rs_rates, nrates);
4702	frm += nrates;
4703
4704	/* Add supported xrates IE. */
4705	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4706		nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
4707		*frm++ = IEEE80211_ELEMID_XRATES;
4708		*frm++ = (uint8_t)nrates;
4709		memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
4710		frm += nrates;
4711	}
4712
4713	/* Set length of probe request. */
4714	tx->len = htole16(frm - (uint8_t *)wh);
4715
4716	c = ic->ic_curchan;
4717	chan = (struct iwn_scan_chan *)frm;
4718	chan->chan = htole16(ieee80211_chan2ieee(ic, c));
4719	chan->flags = 0;
4720	if (ss->ss_nssid > 0)
4721		chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
4722	chan->dsp_gain = 0x6e;
4723	if (IEEE80211_IS_CHAN_5GHZ(c) &&
4724	    !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
4725		chan->rf_gain = 0x3b;
4726		chan->active  = htole16(24);
4727		chan->passive = htole16(110);
4728		chan->flags |= htole32(IWN_CHAN_ACTIVE);
4729	} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4730		chan->rf_gain = 0x3b;
4731		chan->active  = htole16(24);
4732		if (sc->rxon.associd)
4733			chan->passive = htole16(78);
4734		else
4735			chan->passive = htole16(110);
4736		hdr->crc_threshold = 0xffff;
4737	} else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
4738		chan->rf_gain = 0x28;
4739		chan->active  = htole16(36);
4740		chan->passive = htole16(120);
4741		chan->flags |= htole32(IWN_CHAN_ACTIVE);
4742	} else {
4743		chan->rf_gain = 0x28;
4744		chan->active  = htole16(36);
4745		if (sc->rxon.associd)
4746			chan->passive = htole16(88);
4747		else
4748			chan->passive = htole16(120);
4749		hdr->crc_threshold = 0xffff;
4750	}
4751
4752	DPRINTF(sc, IWN_DEBUG_STATE,
4753	    "%s: chan %u flags 0x%x rf_gain 0x%x "
4754	    "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
4755	    chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
4756	    chan->active, chan->passive);
4757
4758	hdr->nchan++;
4759	chan++;
4760	buflen = (uint8_t *)chan - buf;
4761	hdr->len = htole16(buflen);
4762
4763	DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
4764	    hdr->nchan);
4765	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
4766	free(buf, M_DEVBUF);
4767	return error;
4768}
4769
4770static int
4771iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
4772{
4773	const struct iwn_hal *hal = sc->sc_hal;
4774	struct ifnet *ifp = sc->sc_ifp;
4775	struct ieee80211com *ic = ifp->if_l2com;
4776	struct ieee80211_node *ni = vap->iv_bss;
4777	int error;
4778
4779	sc->calib.state = IWN_CALIB_STATE_INIT;
4780
4781	/* Update adapter configuration. */
4782	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4783	sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan));
4784	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4785	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4786		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4787	if (ic->ic_flags & IEEE80211_F_SHSLOT)
4788		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4789	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4790		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4791	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
4792		sc->rxon.cck_mask  = 0;
4793		sc->rxon.ofdm_mask = 0x15;
4794	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
4795		sc->rxon.cck_mask  = 0x03;
4796		sc->rxon.ofdm_mask = 0;
4797	} else {
4798		/* XXX assume 802.11b/g */
4799		sc->rxon.cck_mask  = 0x0f;
4800		sc->rxon.ofdm_mask = 0x15;
4801	}
4802	DPRINTF(sc, IWN_DEBUG_STATE,
4803	    "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x "
4804	    "ht_single 0x%x ht_dual 0x%x rxchain 0x%x "
4805	    "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n",
4806	    __func__,
4807	    le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags),
4808	    sc->rxon.cck_mask, sc->rxon.ofdm_mask,
4809	    sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask,
4810	    le16toh(sc->rxon.rxchain),
4811	    sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":",
4812	    le16toh(sc->rxon.associd), le32toh(sc->rxon.filter));
4813	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4814	if (error != 0) {
4815		device_printf(sc->sc_dev,
4816		    "%s: RXON command failed, error %d\n", __func__, error);
4817		return error;
4818	}
4819
4820	/* Configuration has changed, set TX power accordingly. */
4821	error = hal->set_txpower(sc, ni->ni_chan, 1);
4822	if (error != 0) {
4823		device_printf(sc->sc_dev,
4824		    "%s: could not set Tx power, error %d\n", __func__, error);
4825		return error;
4826	}
4827	/*
4828	 * Reconfiguring RXON clears the firmware nodes table so we must
4829	 * add the broadcast node again.
4830	 */
4831	error = iwn_add_broadcast_node(sc, 1);
4832	if (error != 0) {
4833		device_printf(sc->sc_dev,
4834		    "%s: could not add broadcast node, error %d\n",
4835		    __func__, error);
4836		return error;
4837	}
4838	return 0;
4839}
4840
4841/*
4842 * Configure the adapter for associated state.
4843 */
4844static int
4845iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
4846{
4847#define	MS(v,x)	(((v) & x) >> x##_S)
4848	const struct iwn_hal *hal = sc->sc_hal;
4849	struct ifnet *ifp = sc->sc_ifp;
4850	struct ieee80211com *ic = ifp->if_l2com;
4851	struct ieee80211_node *ni = vap->iv_bss;
4852	struct iwn_node_info node;
4853	int error;
4854
4855	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4856		/* Link LED blinks while monitoring. */
4857		iwn_set_led(sc, IWN_LED_LINK, 20, 20);
4858		return 0;
4859	}
4860	error = iwn_set_timing(sc, ni);
4861	if (error != 0) {
4862		device_printf(sc->sc_dev,
4863		    "%s: could not set timing, error %d\n", __func__, error);
4864		return error;
4865	}
4866
4867	/* Update adapter configuration. */
4868	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4869	sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan));
4870	sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
4871	/* Short preamble and slot time are negotiated when associating. */
4872	sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT);
4873	sc->rxon.flags |= htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4874	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4875		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4876	else
4877		sc->rxon.flags &= ~htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4878	if (ic->ic_flags & IEEE80211_F_SHSLOT)
4879		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4880	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4881		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4882	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
4883		sc->rxon.cck_mask  = 0;
4884		sc->rxon.ofdm_mask = 0x15;
4885	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
4886		sc->rxon.cck_mask  = 0x03;
4887		sc->rxon.ofdm_mask = 0;
4888	} else {
4889		/* XXX assume 802.11b/g */
4890		sc->rxon.cck_mask  = 0x0f;
4891		sc->rxon.ofdm_mask = 0x15;
4892	}
4893#if 0	/* HT */
4894	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
4895		sc->rxon.flags &= ~htole32(IWN_RXON_HT);
4896		if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan))
4897			sc->rxon.flags |= htole32(IWN_RXON_HT40U);
4898		else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
4899			sc->rxon.flags |= htole32(IWN_RXON_HT40D);
4900		else
4901			sc->rxon.flags |= htole32(IWN_RXON_HT20);
4902		sc->rxon.rxchain = htole16(
4903			  IWN_RXCHAIN_VALID(3)
4904			| IWN_RXCHAIN_MIMO_COUNT(3)
4905			| IWN_RXCHAIN_IDLE_COUNT(1)
4906			| IWN_RXCHAIN_MIMO_FORCE);
4907
4908		maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU);
4909		ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY);
4910	} else
4911		maxrxampdu = ampdudensity = 0;
4912#endif
4913	sc->rxon.filter |= htole32(IWN_FILTER_BSS);
4914
4915	DPRINTF(sc, IWN_DEBUG_STATE,
4916	    "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x "
4917	    "ht_single 0x%x ht_dual 0x%x rxchain 0x%x "
4918	    "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n",
4919	    __func__,
4920	    le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags),
4921	    sc->rxon.cck_mask, sc->rxon.ofdm_mask,
4922	    sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask,
4923	    le16toh(sc->rxon.rxchain),
4924	    sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":",
4925	    le16toh(sc->rxon.associd), le32toh(sc->rxon.filter));
4926	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4927	if (error != 0) {
4928		device_printf(sc->sc_dev,
4929		    "%s: could not update configuration, error %d\n",
4930		    __func__, error);
4931		return error;
4932	}
4933
4934	/* Configuration has changed, set TX power accordingly. */
4935	error = hal->set_txpower(sc, ni->ni_chan, 1);
4936	if (error != 0) {
4937		device_printf(sc->sc_dev,
4938		    "%s: could not set Tx power, error %d\n", __func__, error);
4939		return error;
4940	}
4941
4942	/* Add BSS node. */
4943	memset(&node, 0, sizeof node);
4944	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
4945	node.id = IWN_ID_BSS;
4946#ifdef notyet
4947	node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) |
4948	    IWN_AMDPU_DENSITY(5));	/* 2us */
4949#endif
4950	DPRINTF(sc, IWN_DEBUG_STATE, "%s: add BSS node, id %d htflags 0x%x\n",
4951	    __func__, node.id, le32toh(node.htflags));
4952	error = hal->add_node(sc, &node, 1);
4953	if (error != 0) {
4954		device_printf(sc->sc_dev, "could not add BSS node\n");
4955		return error;
4956	}
4957	DPRINTF(sc, IWN_DEBUG_STATE, "setting link quality for node %d\n",
4958	    node.id);
4959	error = iwn_set_link_quality(sc, node.id, 1);
4960	if (error != 0) {
4961		device_printf(sc->sc_dev,
4962		    "%s: could not setup MRR for node %d, error %d\n",
4963		    __func__, node.id, error);
4964		return error;
4965	}
4966
4967	error = iwn_init_sensitivity(sc);
4968	if (error != 0) {
4969		device_printf(sc->sc_dev,
4970		    "%s: could not set sensitivity, error %d\n",
4971		    __func__, error);
4972		return error;
4973	}
4974
4975	/* Start periodic calibration timer. */
4976	sc->calib.state = IWN_CALIB_STATE_ASSOC;
4977	iwn_calib_reset(sc);
4978
4979	/* Link LED always on while associated. */
4980	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
4981
4982	return 0;
4983#undef MS
4984}
4985
4986#if 0	/* HT */
4987/*
4988 * This function is called by upper layer when an ADDBA request is received
4989 * from another STA and before the ADDBA response is sent.
4990 */
4991static int
4992iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
4993    uint8_t tid)
4994{
4995	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
4996	struct iwn_softc *sc = ic->ic_softc;
4997	struct iwn_node *wn = (void *)ni;
4998	struct iwn_node_info node;
4999
5000	memset(&node, 0, sizeof node);
5001	node.id = wn->id;
5002	node.control = IWN_NODE_UPDATE;
5003	node.flags = IWN_FLAG_SET_ADDBA;
5004	node.addba_tid = tid;
5005	node.addba_ssn = htole16(ba->ba_winstart);
5006	DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
5007	    wn->id, tid, ba->ba_winstart));
5008	return sc->sc_hal->add_node(sc, &node, 1);
5009}
5010
5011/*
5012 * This function is called by upper layer on teardown of an HT-immediate
5013 * Block Ack agreement (eg. uppon receipt of a DELBA frame.)
5014 */
5015static void
5016iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5017    uint8_t tid)
5018{
5019	struct iwn_softc *sc = ic->ic_softc;
5020	struct iwn_node *wn = (void *)ni;
5021	struct iwn_node_info node;
5022
5023	memset(&node, 0, sizeof node);
5024	node.id = wn->id;
5025	node.control = IWN_NODE_UPDATE;
5026	node.flags = IWN_FLAG_SET_DELBA;
5027	node.delba_tid = tid;
5028	DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5029	(void)sc->sc_hal->add_node(sc, &node, 1);
5030}
5031
5032/*
5033 * This function is called by upper layer when an ADDBA response is received
5034 * from another STA.
5035 */
5036static int
5037iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5038    uint8_t tid)
5039{
5040	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5041	struct iwn_softc *sc = ic->ic_softc;
5042	const struct iwn_hal *hal = sc->sc_hal;
5043	struct iwn_node *wn = (void *)ni;
5044	struct iwn_node_info node;
5045	int error;
5046
5047	/* Enable TX for the specified RA/TID. */
5048	wn->disable_tid &= ~(1 << tid);
5049	memset(&node, 0, sizeof node);
5050	node.id = wn->id;
5051	node.control = IWN_NODE_UPDATE;
5052	node.flags = IWN_FLAG_SET_DISABLE_TID;
5053	node.disable_tid = htole16(wn->disable_tid);
5054	error = hal->add_node(sc, &node, 1);
5055	if (error != 0)
5056		return error;
5057
5058	if ((error = iwn_nic_lock(sc)) != 0)
5059		return error;
5060	hal->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
5061	iwn_nic_unlock(sc);
5062	return 0;
5063}
5064
5065static void
5066iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5067    uint8_t tid)
5068{
5069	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5070	struct iwn_softc *sc = ic->ic_softc;
5071	int error;
5072
5073	error = iwn_nic_lock(sc);
5074	if (error != 0)
5075		return;
5076	sc->sc_hal->ampdu_tx_stop(sc, tid, ba->ba_winstart);
5077	iwn_nic_unlock(sc);
5078}
5079
5080static void
5081iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5082    uint8_t tid, uint16_t ssn)
5083{
5084	struct iwn_node *wn = (void *)ni;
5085	int qid = 7 + tid;
5086
5087	/* Stop TX scheduler while we're changing its configuration. */
5088	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5089	    IWN4965_TXQ_STATUS_CHGACT);
5090
5091	/* Assign RA/TID translation to the queue. */
5092	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5093	    wn->id << 4 | tid);
5094
5095	/* Enable chain-building mode for the queue. */
5096	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5097
5098	/* Set starting sequence number from the ADDBA request. */
5099	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5100	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5101
5102	/* Set scheduler window size. */
5103	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5104	    IWN_SCHED_WINSZ);
5105	/* Set scheduler frame limit. */
5106	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5107	    IWN_SCHED_LIMIT << 16);
5108
5109	/* Enable interrupts for the queue. */
5110	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5111
5112	/* Mark the queue as active. */
5113	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5114	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5115	    iwn_tid2fifo[tid] << 1);
5116}
5117
5118static void
5119iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5120{
5121	int qid = 7 + tid;
5122
5123	/* Stop TX scheduler while we're changing its configuration. */
5124	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5125	    IWN4965_TXQ_STATUS_CHGACT);
5126
5127	/* Set starting sequence number from the ADDBA request. */
5128	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5129	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5130
5131	/* Disable interrupts for the queue. */
5132	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5133
5134	/* Mark the queue as inactive. */
5135	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5136	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5137}
5138
5139static void
5140iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5141    uint8_t tid, uint16_t ssn)
5142{
5143	struct iwn_node *wn = (void *)ni;
5144	int qid = 10 + tid;
5145
5146	/* Stop TX scheduler while we're changing its configuration. */
5147	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5148	    IWN5000_TXQ_STATUS_CHGACT);
5149
5150	/* Assign RA/TID translation to the queue. */
5151	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5152	    wn->id << 4 | tid);
5153
5154	/* Enable chain-building mode for the queue. */
5155	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5156
5157	/* Enable aggregation for the queue. */
5158	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5159
5160	/* Set starting sequence number from the ADDBA request. */
5161	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5162	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5163
5164	/* Set scheduler window size and frame limit. */
5165	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5166	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5167
5168	/* Enable interrupts for the queue. */
5169	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5170
5171	/* Mark the queue as active. */
5172	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5173	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
5174}
5175
5176static void
5177iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5178{
5179	int qid = 10 + tid;
5180
5181	/* Stop TX scheduler while we're changing its configuration. */
5182	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5183	    IWN5000_TXQ_STATUS_CHGACT);
5184
5185	/* Disable aggregation for the queue. */
5186	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5187
5188	/* Set starting sequence number from the ADDBA request. */
5189	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5190	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5191
5192	/* Disable interrupts for the queue. */
5193	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5194
5195	/* Mark the queue as inactive. */
5196	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5197	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
5198}
5199#endif
5200
5201/*
5202 * Send calibration results to the runtime firmware.  These results were
5203 * obtained on first boot from the initialization firmware, or by reading
5204 * the EEPROM for crystal calibration.
5205 */
5206static int
5207iwn5000_send_calib_results(struct iwn_softc *sc)
5208{
5209	struct iwn_calib_info *calib_result;
5210	int idx, error;
5211
5212	for (idx = 0; idx < IWN_CALIB_NUM; idx++) {
5213		calib_result = &sc->calib_results[idx];
5214
5215		/* No support for this type of calibration. */
5216		if ((sc->calib_init & (1 << idx)) == 0)
5217			continue;
5218
5219		/* No calibration result available. */
5220		if (calib_result->buf == NULL)
5221			continue;
5222
5223		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5224		    "%s: send calibration result idx=%d, len=%d\n",
5225		    __func__, idx, calib_result->len);
5226
5227		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, calib_result->buf,
5228		    calib_result->len, 0);
5229		if (error != 0) {
5230			device_printf(sc->sc_dev,
5231			    "%s: could not send calibration result "
5232			    "idx=%d, error=%d\n",
5233			    __func__, idx, error);
5234			return error;
5235		}
5236	}
5237	return 0;
5238}
5239
5240/*
5241 * Save calibration result at the given index.  The index determines
5242 * in which order the results are sent to the runtime firmware.
5243 */
5244static int
5245iwn5000_save_calib_result(struct iwn_softc *sc, struct iwn_phy_calib *calib,
5246    int len, int idx)
5247{
5248	struct iwn_calib_info *calib_result = &sc->calib_results[idx];
5249
5250	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5251	    "%s: saving calibration result code=%d, idx=%d, len=%d\n",
5252	    __func__, calib->code, idx, len);
5253
5254	if (calib_result->buf != NULL)
5255		free(calib_result->buf, M_DEVBUF);
5256
5257	calib_result->buf = malloc(len, M_DEVBUF, M_NOWAIT);
5258	if (calib_result->buf == NULL) {
5259		device_printf(sc->sc_dev,
5260		    "%s: not enough memory for calibration result "
5261		    "code=%d, len=%d\n", __func__, calib->code, len);
5262		return ENOMEM;
5263	}
5264
5265	calib_result->len = len;
5266	memcpy(calib_result->buf, calib, len);
5267	return 0;
5268}
5269
5270static void
5271iwn5000_free_calib_results(struct iwn_softc *sc)
5272{
5273	struct iwn_calib_info *calib_result;
5274	int idx;
5275
5276	for (idx = 0; idx < IWN_CALIB_NUM; idx++) {
5277		calib_result = &sc->calib_results[idx];
5278
5279		if (calib_result->buf != NULL)
5280			free(calib_result->buf, M_DEVBUF);
5281
5282		calib_result->buf = NULL;
5283		calib_result->len = 0;
5284	}
5285}
5286
5287/*
5288 * Obtain the crystal calibration result from the EEPROM.
5289 */
5290static int
5291iwn5000_chrystal_calib(struct iwn_softc *sc)
5292{
5293	struct iwn5000_phy_calib_crystal cmd;
5294	uint32_t base, crystal;
5295	uint16_t val;
5296
5297	/* Read crystal calibration. */
5298	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
5299	base = le16toh(val);
5300	iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, &crystal,
5301	    sizeof(uint32_t));
5302	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: crystal calibration=0x%08x\n",
5303	    __func__, le32toh(crystal));
5304
5305	memset(&cmd, 0, sizeof cmd);
5306	cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5307	cmd.ngroups = 1;
5308	cmd.isvalid = 1;
5309	cmd.cap_pin[0] = le32toh(crystal) & 0xff;
5310	cmd.cap_pin[1] = (le32toh(crystal) >> 16) & 0xff;
5311
5312	return iwn5000_save_calib_result(sc, (struct iwn_phy_calib *)&cmd,
5313	    sizeof cmd, IWN_CALIB_IDX_XTAL);
5314}
5315
5316/*
5317 * Query calibration results from the initialization firmware.  We do this
5318 * only once at first boot.
5319 */
5320static int
5321iwn5000_send_calib_query(struct iwn_softc *sc, uint32_t cfg)
5322{
5323#define	CALIB_INIT_CFG	0xffffffff;
5324	struct iwn5000_calib_config cmd;
5325	int error;
5326
5327	memset(&cmd, 0, sizeof cmd);
5328	cmd.ucode.once.enable = CALIB_INIT_CFG;
5329	if (cfg == 0) {
5330		cmd.ucode.once.start  = CALIB_INIT_CFG;
5331		cmd.ucode.once.send   = CALIB_INIT_CFG;
5332		cmd.ucode.flags       = CALIB_INIT_CFG;
5333	} else
5334		cmd.ucode.once.start  = cfg;
5335
5336	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5337	    "%s: query calibration results, cfg %x\n", __func__, cfg);
5338
5339	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
5340	if (error != 0)
5341		return error;
5342
5343	/* Wait at most two seconds for calibration to complete. */
5344	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
5345		error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 2 * hz);
5346
5347	return error;
5348#undef	CALIB_INIT_CFG
5349}
5350
5351/*
5352 * Process a CALIBRATION_RESULT notification sent by the initialization
5353 * firmware on response to a CMD_CALIB_CONFIG command.
5354 */
5355static int
5356iwn5000_rx_calib_result(struct iwn_softc *sc, struct iwn_rx_desc *desc,
5357    struct iwn_rx_data *data)
5358{
5359#define	FRAME_SIZE_MASK		0x3fff
5360	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
5361	int len, idx;
5362
5363	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
5364	len = (le32toh(desc->len) & FRAME_SIZE_MASK);
5365
5366	/* Remove length field itself. */
5367	len -= 4;
5368
5369	/*
5370	 * Determine the order in which the results will be send to the
5371	 * runtime firmware.
5372	 */
5373	switch (calib->code) {
5374	case IWN5000_PHY_CALIB_DC:
5375		idx = IWN_CALIB_IDX_DC;
5376		break;
5377	case IWN5000_PHY_CALIB_LO:
5378		idx = IWN_CALIB_IDX_LO;
5379		break;
5380	case IWN5000_PHY_CALIB_TX_IQ:
5381		idx = IWN_CALIB_IDX_TX_IQ;
5382		break;
5383	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
5384		idx = IWN_CALIB_IDX_TX_IQ_PERIODIC;
5385		break;
5386	case IWN5000_PHY_CALIB_BASE_BAND:
5387		idx = IWN_CALIB_IDX_BASE_BAND;
5388		break;
5389	default:
5390		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5391		   "%s: unknown calibration code=%d\n", __func__, calib->code);
5392		return EINVAL;
5393	}
5394	return iwn5000_save_calib_result(sc, calib, len, idx);
5395#undef	FRAME_SIZE_MASK
5396}
5397
5398static int
5399iwn5000_send_wimax_coex(struct iwn_softc *sc)
5400{
5401	struct iwn5000_wimax_coex wimax;
5402
5403#ifdef notyet
5404	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5405		/* Enable WiMAX coexistence for combo adapters. */
5406		wimax.flags =
5407		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
5408		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
5409		    IWN_WIMAX_COEX_STA_TABLE_VALID |
5410		    IWN_WIMAX_COEX_ENABLE;
5411		memcpy(wimax.events, iwn6050_wimax_events,
5412		    sizeof iwn6050_wimax_events);
5413	} else
5414#endif
5415	{
5416		/* Disable WiMAX coexistence. */
5417		wimax.flags = 0;
5418		memset(wimax.events, 0, sizeof wimax.events);
5419	}
5420	DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
5421	    __func__);
5422	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
5423}
5424
5425/*
5426 * This function is called after the runtime firmware notifies us of its
5427 * readiness (called in a process context.)
5428 */
5429static int
5430iwn4965_post_alive(struct iwn_softc *sc)
5431{
5432	int error, qid;
5433
5434	if ((error = iwn_nic_lock(sc)) != 0)
5435		return error;
5436
5437	/* Clear TX scheduler state in SRAM. */
5438	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5439	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5440	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5441
5442	/* Set physical address of TX scheduler rings (1KB aligned.) */
5443	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5444
5445	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5446
5447	/* Disable chain mode for all our 16 queues. */
5448	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5449
5450	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5451		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5452		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5453
5454		/* Set scheduler window size. */
5455		iwn_mem_write(sc, sc->sched_base +
5456		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5457		/* Set scheduler frame limit. */
5458		iwn_mem_write(sc, sc->sched_base +
5459		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5460		    IWN_SCHED_LIMIT << 16);
5461	}
5462
5463	/* Enable interrupts for all our 16 queues. */
5464	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
5465	/* Identify TX FIFO rings (0-7). */
5466	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
5467
5468	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5469	for (qid = 0; qid < 7; qid++) {
5470		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
5471		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5472		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
5473	}
5474	iwn_nic_unlock(sc);
5475	return 0;
5476}
5477
5478/*
5479 * This function is called after the initialization or runtime firmware
5480 * notifies us of its readiness (called in a process context.)
5481 */
5482static int
5483iwn5000_post_alive(struct iwn_softc *sc)
5484{
5485	int error, qid;
5486
5487	/* Switch to using ICT interrupt mode. */
5488	iwn5000_ict_reset(sc);
5489
5490	error = iwn_nic_lock(sc);
5491	if (error != 0)
5492		return error;
5493
5494	/* Clear TX scheduler state in SRAM. */
5495	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5496	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
5497	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
5498
5499	/* Set physical address of TX scheduler rings (1KB aligned.) */
5500	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5501
5502	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5503
5504	/* Enable chain mode for all queues, except command queue. */
5505	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
5506	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
5507
5508	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
5509		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
5510		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5511
5512		iwn_mem_write(sc, sc->sched_base +
5513		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
5514		/* Set scheduler window size and frame limit. */
5515		iwn_mem_write(sc, sc->sched_base +
5516		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5517		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5518	}
5519
5520	/* Enable interrupts for all our 20 queues. */
5521	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
5522	/* Identify TX FIFO rings (0-7). */
5523	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
5524
5525	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5526	for (qid = 0; qid < 7; qid++) {
5527		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
5528		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5529		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
5530	}
5531	iwn_nic_unlock(sc);
5532
5533	/* Configure WiMAX coexistence for combo adapters. */
5534	error = iwn5000_send_wimax_coex(sc);
5535	if (error != 0) {
5536		device_printf(sc->sc_dev,
5537		    "%s: could not configure WiMAX coexistence, error %d\n",
5538		    __func__, error);
5539		return error;
5540	}
5541
5542	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
5543		/*
5544		 * Start calibration by setting and sending the chrystal
5545		 * calibration first, this must be done before we are able
5546		 * to query the other calibration results.
5547		 */
5548		error = iwn5000_chrystal_calib(sc);
5549		if (error != 0) {
5550			device_printf(sc->sc_dev,
5551			    "%s: could not set chrystal calibration, "
5552			    "error=%d\n", __func__, error);
5553			return error;
5554		}
5555		error = iwn5000_send_calib_results(sc);
5556		if (error != 0) {
5557			device_printf(sc->sc_dev,
5558			    "%s: could not send chrystal calibration, "
5559			    "error=%d\n", __func__, error);
5560			return error;
5561		}
5562
5563		/*
5564		 * Query other calibration results from the initialization
5565		 * firmware.
5566		 */
5567		error = iwn5000_send_calib_query(sc, 0);
5568		if (error != 0) {
5569			device_printf(sc->sc_dev,
5570			    "%s: could not query calibration, error=%d\n",
5571			    __func__, error);
5572			return error;
5573		}
5574
5575		/*
5576		 * We have the calibration results now, reboot with the
5577		 * runtime firmware (call ourselves recursively!)
5578		 */
5579		iwn_hw_stop(sc);
5580		error = iwn_hw_init(sc);
5581	} else {
5582		/*
5583		 * Send calibration results obtained from the initialization
5584		 * firmware to the runtime firmware.
5585		 */
5586		error = iwn5000_send_calib_results(sc);
5587
5588		/*
5589		 * Tell the runtime firmware to do certain calibration types.
5590		 */
5591		if (sc->calib_runtime != 0) {
5592			error = iwn5000_send_calib_query(sc, sc->calib_runtime);
5593			if (error != 0) {
5594				device_printf(sc->sc_dev,
5595				    "%s: could not send query calibration, "
5596				    "error=%d, cfg=%x\n", __func__, error,
5597				    sc->calib_runtime);
5598			}
5599		}
5600	}
5601	return error;
5602}
5603
5604/*
5605 * The firmware boot code is small and is intended to be copied directly into
5606 * the NIC internal memory (no DMA transfer.)
5607 */
5608static int
5609iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
5610{
5611	int error, ntries;
5612
5613	size /= sizeof (uint32_t);
5614
5615	error = iwn_nic_lock(sc);
5616	if (error != 0)
5617		return error;
5618
5619	/* Copy microcode image into NIC memory. */
5620	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
5621	    (const uint32_t *)ucode, size);
5622
5623	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
5624	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
5625	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
5626
5627	/* Start boot load now. */
5628	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
5629
5630	/* Wait for transfer to complete. */
5631	for (ntries = 0; ntries < 1000; ntries++) {
5632		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
5633		    IWN_BSM_WR_CTRL_START))
5634			break;
5635		DELAY(10);
5636	}
5637	if (ntries == 1000) {
5638		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5639		    __func__);
5640		iwn_nic_unlock(sc);
5641		return ETIMEDOUT;
5642	}
5643
5644	/* Enable boot after power up. */
5645	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
5646
5647	iwn_nic_unlock(sc);
5648	return 0;
5649}
5650
5651static int
5652iwn4965_load_firmware(struct iwn_softc *sc)
5653{
5654	struct iwn_fw_info *fw = &sc->fw;
5655	struct iwn_dma_info *dma = &sc->fw_dma;
5656	int error;
5657
5658	/* Copy initialization sections into pre-allocated DMA-safe memory. */
5659	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
5660	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5661	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5662	    fw->init.text, fw->init.textsz);
5663	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5664
5665	/* Tell adapter where to find initialization sections. */
5666	error = iwn_nic_lock(sc);
5667	if (error != 0)
5668		return error;
5669	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5670	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
5671	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5672	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5673	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
5674	iwn_nic_unlock(sc);
5675
5676	/* Load firmware boot code. */
5677	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
5678	if (error != 0) {
5679		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5680		    __func__);
5681		return error;
5682	}
5683	/* Now press "execute". */
5684	IWN_WRITE(sc, IWN_RESET, 0);
5685
5686	/* Wait at most one second for first alive notification. */
5687	error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
5688	if (error) {
5689		device_printf(sc->sc_dev,
5690		    "%s: timeout waiting for adapter to initialize, error %d\n",
5691		    __func__, error);
5692		return error;
5693	}
5694
5695	/* Retrieve current temperature for initial TX power calibration. */
5696	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
5697	sc->temp = iwn4965_get_temperature(sc);
5698
5699	/* Copy runtime sections into pre-allocated DMA-safe memory. */
5700	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
5701	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5702	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5703	    fw->main.text, fw->main.textsz);
5704	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5705
5706	/* Tell adapter where to find runtime sections. */
5707	error = iwn_nic_lock(sc);
5708	if (error != 0)
5709		return error;
5710
5711	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5712	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5713	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5714	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5715	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
5716	    IWN_FW_UPDATED | fw->main.textsz);
5717	iwn_nic_unlock(sc);
5718
5719	return 0;
5720}
5721
5722static int
5723iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
5724    const uint8_t *section, int size)
5725{
5726	struct iwn_dma_info *dma = &sc->fw_dma;
5727	int error;
5728
5729	/* Copy firmware section into pre-allocated DMA-safe memory. */
5730	memcpy(dma->vaddr, section, size);
5731	bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE);
5732
5733	error = iwn_nic_lock(sc);
5734	if (error != 0)
5735		return error;
5736
5737	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5738	    IWN_FH_TX_CONFIG_DMA_PAUSE);
5739
5740	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
5741	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
5742	    IWN_LOADDR(dma->paddr));
5743	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
5744	    IWN_HIADDR(dma->paddr) << 28 | size);
5745	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
5746	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
5747	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
5748	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
5749
5750	/* Kick Flow Handler to start DMA transfer. */
5751	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5752	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
5753
5754	iwn_nic_unlock(sc);
5755
5756	/* Wait at most five seconds for FH DMA transfer to complete. */
5757	return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
5758}
5759
5760static int
5761iwn5000_load_firmware(struct iwn_softc *sc)
5762{
5763	struct iwn_fw_part *fw;
5764	int error;
5765
5766	/* Load the initialization firmware on first boot only. */
5767	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
5768	    &sc->fw.main : &sc->fw.init;
5769
5770	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
5771	    fw->text, fw->textsz);
5772	if (error != 0) {
5773		device_printf(sc->sc_dev,
5774		    "%s: could not load firmware %s section, error %d\n",
5775		    __func__, ".text", error);
5776		return error;
5777	}
5778	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
5779	    fw->data, fw->datasz);
5780	if (error != 0) {
5781		device_printf(sc->sc_dev,
5782		    "%s: could not load firmware %s section, error %d\n",
5783		    __func__, ".data", error);
5784		return error;
5785	}
5786
5787	/* Now press "execute". */
5788	IWN_WRITE(sc, IWN_RESET, 0);
5789	return 0;
5790}
5791
5792/*
5793 * Extract text and data sections from a legacy firmware image.
5794 */
5795static int
5796iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
5797{
5798	const uint32_t *ptr;
5799	size_t hdrlen = 24;
5800	uint32_t rev;
5801
5802	ptr = (const uint32_t *)sc->fw_fp->data;
5803	rev = le32toh(*ptr++);
5804
5805	/* Check firmware API version. */
5806	if (IWN_FW_API(rev) <= 1) {
5807		device_printf(sc->sc_dev,
5808		    "%s: bad firmware, need API version >=2\n", __func__);
5809		return EINVAL;
5810	}
5811	if (IWN_FW_API(rev) >= 3) {
5812		/* Skip build number (version 2 header). */
5813		hdrlen += 4;
5814		ptr++;
5815	}
5816	if (fw->size < hdrlen) {
5817		device_printf(sc->sc_dev,
5818		    "%s: firmware file too short: %zu bytes\n",
5819		    __func__, fw->size);
5820		return EINVAL;
5821	}
5822	fw->main.textsz = le32toh(*ptr++);
5823	fw->main.datasz = le32toh(*ptr++);
5824	fw->init.textsz = le32toh(*ptr++);
5825	fw->init.datasz = le32toh(*ptr++);
5826	fw->boot.textsz = le32toh(*ptr++);
5827
5828	/* Check that all firmware sections fit. */
5829	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
5830	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
5831		device_printf(sc->sc_dev,
5832		    "%s: firmware file too short: %zu bytes\n",
5833		    __func__, fw->size);
5834		return EINVAL;
5835	}
5836
5837	/* Get pointers to firmware sections. */
5838	fw->main.text = (const uint8_t *)ptr;
5839	fw->main.data = fw->main.text + fw->main.textsz;
5840	fw->init.text = fw->main.data + fw->main.datasz;
5841	fw->init.data = fw->init.text + fw->init.textsz;
5842	fw->boot.text = fw->init.data + fw->init.datasz;
5843
5844	return 0;
5845}
5846
5847/*
5848 * Extract text and data sections from a TLV firmware image.
5849 */
5850int
5851iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
5852    uint16_t alt)
5853{
5854	const struct iwn_fw_tlv_hdr *hdr;
5855	const struct iwn_fw_tlv *tlv;
5856	const uint8_t *ptr, *end;
5857	uint64_t altmask;
5858	uint32_t len;
5859
5860	if (fw->size < sizeof (*hdr)) {
5861		device_printf(sc->sc_dev,
5862		    "%s: firmware file too short: %zu bytes\n",
5863		    __func__, fw->size);
5864		return EINVAL;
5865	}
5866	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
5867	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
5868		device_printf(sc->sc_dev,
5869		    "%s: bad firmware file signature 0x%08x\n",
5870		    __func__, le32toh(hdr->signature));
5871		return EINVAL;
5872	}
5873
5874	/*
5875	 * Select the closest supported alternative that is less than
5876	 * or equal to the specified one.
5877	 */
5878	altmask = le64toh(hdr->altmask);
5879	while (alt > 0 && !(altmask & (1ULL << alt)))
5880		alt--;	/* Downgrade. */
5881
5882	ptr = (const uint8_t *)(hdr + 1);
5883	end = (const uint8_t *)(fw->data + fw->size);
5884
5885	/* Parse type-length-value fields. */
5886	while (ptr + sizeof (*tlv) <= end) {
5887		tlv = (const struct iwn_fw_tlv *)ptr;
5888		len = le32toh(tlv->len);
5889
5890		ptr += sizeof (*tlv);
5891		if (ptr + len > end) {
5892			device_printf(sc->sc_dev,
5893			    "%s: firmware file too short: %zu bytes\n",
5894			    __func__, fw->size);
5895			return EINVAL;
5896		}
5897		/* Skip other alternatives. */
5898		if (tlv->alt != 0 && tlv->alt != htole16(alt))
5899			goto next;
5900
5901		switch (le16toh(tlv->type)) {
5902		case IWN_FW_TLV_MAIN_TEXT:
5903			fw->main.text = ptr;
5904			fw->main.textsz = len;
5905			break;
5906		case IWN_FW_TLV_MAIN_DATA:
5907			fw->main.data = ptr;
5908			fw->main.datasz = len;
5909			break;
5910		case IWN_FW_TLV_INIT_TEXT:
5911			fw->init.text = ptr;
5912			fw->init.textsz = len;
5913			break;
5914		case IWN_FW_TLV_INIT_DATA:
5915			fw->init.data = ptr;
5916			fw->init.datasz = len;
5917			break;
5918		case IWN_FW_TLV_BOOT_TEXT:
5919			fw->boot.text = ptr;
5920			fw->boot.textsz = len;
5921			break;
5922		default:
5923			DPRINTF(sc, IWN_DEBUG_RESET,
5924			    "%s: TLV type %d not handled\n",
5925			    __func__, le16toh(tlv->type));
5926			break;
5927		}
5928next:		/* TLV fields are 32-bit aligned. */
5929		ptr += (len + 3) & ~3;
5930	}
5931	return 0;
5932}
5933
5934static int
5935iwn_read_firmware(struct iwn_softc *sc)
5936{
5937	const struct iwn_hal *hal = sc->sc_hal;
5938	struct iwn_fw_info *fw = &sc->fw;
5939	int error;
5940
5941	IWN_UNLOCK(sc);
5942
5943	memset(fw, 0, sizeof (*fw));
5944
5945	/* Read firmware image from filesystem. */
5946	sc->fw_fp = firmware_get(sc->fwname);
5947	if (sc->fw_fp == NULL) {
5948		device_printf(sc->sc_dev,
5949		    "%s: could not load firmare image \"%s\"\n", __func__,
5950		    sc->fwname);
5951		IWN_LOCK(sc);
5952		return EINVAL;
5953	}
5954	IWN_LOCK(sc);
5955
5956	fw->size = sc->fw_fp->datasize;
5957	fw->data = (const uint8_t *)sc->fw_fp->data;
5958	if (fw->size < sizeof (uint32_t)) {
5959		device_printf(sc->sc_dev,
5960		    "%s: firmware file too short: %zu bytes\n",
5961		    __func__, fw->size);
5962		return EINVAL;
5963	}
5964
5965	/* Retrieve text and data sections. */
5966	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
5967		error = iwn_read_firmware_leg(sc, fw);
5968	else
5969		error = iwn_read_firmware_tlv(sc, fw, 1);
5970	if (error != 0) {
5971		device_printf(sc->sc_dev,
5972		    "%s: could not read firmware sections\n", __func__);
5973		return error;
5974	}
5975
5976	/* Make sure text and data sections fit in hardware memory. */
5977	if (fw->main.textsz > hal->fw_text_maxsz ||
5978	    fw->main.datasz > hal->fw_data_maxsz ||
5979	    fw->init.textsz > hal->fw_text_maxsz ||
5980	    fw->init.datasz > hal->fw_data_maxsz ||
5981	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
5982	    (fw->boot.textsz & 3) != 0) {
5983		device_printf(sc->sc_dev,
5984		    "%s: firmware sections too large\n", __func__);
5985		return EINVAL;
5986	}
5987
5988	/* We can proceed with loading the firmware. */
5989	return 0;
5990}
5991
5992static int
5993iwn_clock_wait(struct iwn_softc *sc)
5994{
5995	int ntries;
5996
5997	/* Set "initialization complete" bit. */
5998	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
5999
6000	/* Wait for clock stabilization. */
6001	for (ntries = 0; ntries < 2500; ntries++) {
6002		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6003			return 0;
6004		DELAY(10);
6005	}
6006	device_printf(sc->sc_dev,
6007	    "%s: timeout waiting for clock stabilization\n", __func__);
6008	return ETIMEDOUT;
6009}
6010
6011static int
6012iwn_apm_init(struct iwn_softc *sc)
6013{
6014	uint32_t tmp;
6015	int error;
6016
6017	/* Disable L0s exit timer (NMI bug workaround.) */
6018	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6019	/* Don't wait for ICH L0s (ICH bug workaround.) */
6020	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6021
6022	/* Set FH wait threshold to max (HW bug under stress workaround.) */
6023	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6024
6025	/* Enable HAP INTA to move adapter from L1a to L0s. */
6026	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6027
6028	/* Retrieve PCIe Active State Power Management (ASPM). */
6029	tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6030	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6031	if (tmp & 0x02)	/* L1 Entry enabled. */
6032		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6033	else
6034		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6035
6036	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6037	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
6038		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6039
6040	/* Wait for clock stabilization before accessing prph. */
6041	error = iwn_clock_wait(sc);
6042	if (error != 0)
6043		return error;
6044
6045	error = iwn_nic_lock(sc);
6046	if (error != 0)
6047		return error;
6048
6049	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6050		/* Enable DMA and BSM (Bootstrap State Machine.) */
6051		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6052		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6053		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6054	} else {
6055		/* Enable DMA. */
6056		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6057		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6058	}
6059	DELAY(20);
6060
6061	/* Disable L1-Active. */
6062	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6063	iwn_nic_unlock(sc);
6064
6065	return 0;
6066}
6067
6068static void
6069iwn_apm_stop_master(struct iwn_softc *sc)
6070{
6071	int ntries;
6072
6073	/* Stop busmaster DMA activity. */
6074	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6075	for (ntries = 0; ntries < 100; ntries++) {
6076		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6077			return;
6078		DELAY(10);
6079	}
6080	device_printf(sc->sc_dev, "%s: timeout waiting for master\n",
6081	    __func__);
6082}
6083
6084static void
6085iwn_apm_stop(struct iwn_softc *sc)
6086{
6087	iwn_apm_stop_master(sc);
6088
6089	/* Reset the entire device. */
6090	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6091	DELAY(10);
6092	/* Clear "initialization complete" bit. */
6093	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6094}
6095
6096static int
6097iwn4965_nic_config(struct iwn_softc *sc)
6098{
6099	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6100		/*
6101		 * I don't believe this to be correct but this is what the
6102		 * vendor driver is doing. Probably the bits should not be
6103		 * shifted in IWN_RFCFG_*.
6104		 */
6105		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6106		    IWN_RFCFG_TYPE(sc->rfcfg) |
6107		    IWN_RFCFG_STEP(sc->rfcfg) |
6108		    IWN_RFCFG_DASH(sc->rfcfg));
6109	}
6110	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6111	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6112	return 0;
6113}
6114
6115static int
6116iwn5000_nic_config(struct iwn_softc *sc)
6117{
6118	uint32_t tmp;
6119	int error;
6120
6121	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6122		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6123		    IWN_RFCFG_TYPE(sc->rfcfg) |
6124		    IWN_RFCFG_STEP(sc->rfcfg) |
6125		    IWN_RFCFG_DASH(sc->rfcfg));
6126	}
6127	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6128	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6129
6130	error = iwn_nic_lock(sc);
6131	if (error != 0)
6132		return error;
6133	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6134
6135	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6136		/*
6137		 * Select first Switching Voltage Regulator (1.32V) to
6138		 * solve a stability issue related to noisy DC2DC line
6139		 * in the silicon of 1000 Series.
6140		 */
6141		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6142		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6143		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6144		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6145	}
6146	iwn_nic_unlock(sc);
6147
6148	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6149		/* Use internal power amplifier only. */
6150		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6151	}
6152	if (sc->hw_type == IWN_HW_REV_TYPE_6050 && sc->calib_ver >= 6) {
6153		/* Indicate that ROM calibration version is >=6. */
6154		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6155	}
6156	return 0;
6157}
6158
6159/*
6160 * Take NIC ownership over Intel Active Management Technology (AMT).
6161 */
6162static int
6163iwn_hw_prepare(struct iwn_softc *sc)
6164{
6165	int ntries;
6166
6167	/* Check if hardware is ready. */
6168	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6169	for (ntries = 0; ntries < 5; ntries++) {
6170		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6171		    IWN_HW_IF_CONFIG_NIC_READY)
6172			return 0;
6173		DELAY(10);
6174	}
6175
6176	/* Hardware not ready, force into ready state. */
6177	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6178	for (ntries = 0; ntries < 15000; ntries++) {
6179		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6180		    IWN_HW_IF_CONFIG_PREPARE_DONE))
6181			break;
6182		DELAY(10);
6183	}
6184	if (ntries == 15000)
6185		return ETIMEDOUT;
6186
6187	/* Hardware should be ready now. */
6188	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6189	for (ntries = 0; ntries < 5; ntries++) {
6190		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6191		    IWN_HW_IF_CONFIG_NIC_READY)
6192			return 0;
6193		DELAY(10);
6194	}
6195	return ETIMEDOUT;
6196}
6197
6198static int
6199iwn_hw_init(struct iwn_softc *sc)
6200{
6201	const struct iwn_hal *hal = sc->sc_hal;
6202	int error, chnl, qid;
6203
6204	/* Clear pending interrupts. */
6205	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6206
6207	error = iwn_apm_init(sc);
6208	if (error != 0) {
6209		device_printf(sc->sc_dev,
6210		    "%s: could not power ON adapter, error %d\n",
6211		    __func__, error);
6212		return error;
6213	}
6214
6215	/* Select VMAIN power source. */
6216	error = iwn_nic_lock(sc);
6217	if (error != 0)
6218		return error;
6219	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6220	iwn_nic_unlock(sc);
6221
6222	/* Perform adapter-specific initialization. */
6223	error = hal->nic_config(sc);
6224	if (error != 0)
6225		return error;
6226
6227	/* Initialize RX ring. */
6228	error = iwn_nic_lock(sc);
6229	if (error != 0)
6230		return error;
6231	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6232	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6233	/* Set physical address of RX ring (256-byte aligned.) */
6234	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6235	/* Set physical address of RX status (16-byte aligned.) */
6236	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6237	/* Enable RX. */
6238	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6239	    IWN_FH_RX_CONFIG_ENA           |
6240	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
6241	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
6242	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
6243	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
6244	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6245	iwn_nic_unlock(sc);
6246	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6247
6248	error = iwn_nic_lock(sc);
6249	if (error != 0)
6250		return error;
6251
6252	/* Initialize TX scheduler. */
6253	iwn_prph_write(sc, hal->sched_txfact_addr, 0);
6254
6255	/* Set physical address of "keep warm" page (16-byte aligned.) */
6256	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6257
6258	/* Initialize TX rings. */
6259	for (qid = 0; qid < hal->ntxqs; qid++) {
6260		struct iwn_tx_ring *txq = &sc->txq[qid];
6261
6262		/* Set physical address of TX ring (256-byte aligned.) */
6263		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6264		    txq->desc_dma.paddr >> 8);
6265	}
6266	iwn_nic_unlock(sc);
6267
6268	/* Enable DMA channels. */
6269	for (chnl = 0; chnl < hal->ndmachnls; chnl++) {
6270		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6271		    IWN_FH_TX_CONFIG_DMA_ENA |
6272		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6273	}
6274
6275	/* Clear "radio off" and "commands blocked" bits. */
6276	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6277	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
6278
6279	/* Clear pending interrupts. */
6280	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6281	/* Enable interrupt coalescing. */
6282	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
6283	/* Enable interrupts. */
6284	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6285
6286	/* _Really_ make sure "radio off" bit is cleared! */
6287	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6288	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6289
6290	error = hal->load_firmware(sc);
6291	if (error != 0) {
6292		device_printf(sc->sc_dev,
6293		    "%s: could not load firmware, error %d\n",
6294		    __func__, error);
6295		return error;
6296	}
6297	/* Wait at most one second for firmware alive notification. */
6298	error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
6299	if (error != 0) {
6300		device_printf(sc->sc_dev,
6301		    "%s: timeout waiting for adapter to initialize, error %d\n",
6302		    __func__, error);
6303		return error;
6304	}
6305	/* Do post-firmware initialization. */
6306	return hal->post_alive(sc);
6307}
6308
6309static void
6310iwn_hw_stop(struct iwn_softc *sc)
6311{
6312	const struct iwn_hal *hal = sc->sc_hal;
6313	uint32_t tmp;
6314	int chnl, qid, ntries;
6315
6316	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
6317
6318	/* Disable interrupts. */
6319	IWN_WRITE(sc, IWN_INT_MASK, 0);
6320	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6321	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
6322	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6323
6324	/* Make sure we no longer hold the NIC lock. */
6325	iwn_nic_unlock(sc);
6326
6327	/* Stop TX scheduler. */
6328	iwn_prph_write(sc, hal->sched_txfact_addr, 0);
6329
6330	/* Stop all DMA channels. */
6331	if (iwn_nic_lock(sc) == 0) {
6332		for (chnl = 0; chnl < hal->ndmachnls; chnl++) {
6333			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
6334			for (ntries = 0; ntries < 200; ntries++) {
6335				tmp = IWN_READ(sc, IWN_FH_TX_STATUS);
6336				if ((tmp & IWN_FH_TX_STATUS_IDLE(chnl)) ==
6337				    IWN_FH_TX_STATUS_IDLE(chnl))
6338					break;
6339				DELAY(10);
6340			}
6341		}
6342		iwn_nic_unlock(sc);
6343	}
6344
6345	/* Stop RX ring. */
6346	iwn_reset_rx_ring(sc, &sc->rxq);
6347
6348	/* Reset all TX rings. */
6349	for (qid = 0; qid < hal->ntxqs; qid++)
6350		iwn_reset_tx_ring(sc, &sc->txq[qid]);
6351
6352	if (iwn_nic_lock(sc) == 0) {
6353		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
6354		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6355		iwn_nic_unlock(sc);
6356	}
6357	DELAY(5);
6358
6359	/* Power OFF adapter. */
6360	iwn_apm_stop(sc);
6361}
6362
6363static void
6364iwn_init_locked(struct iwn_softc *sc)
6365{
6366	struct ifnet *ifp = sc->sc_ifp;
6367	int error;
6368
6369	IWN_LOCK_ASSERT(sc);
6370
6371	error = iwn_hw_prepare(sc);
6372	if (error != 0) {
6373		device_printf(sc->sc_dev, "%s: hardware not ready, eror %d\n",
6374		    __func__, error);
6375		goto fail;
6376	}
6377
6378	/* Initialize interrupt mask to default value. */
6379	sc->int_mask = IWN_INT_MASK_DEF;
6380	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6381
6382	/* Check that the radio is not disabled by hardware switch. */
6383	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
6384		device_printf(sc->sc_dev,
6385		    "radio is disabled by hardware switch\n");
6386
6387		/* Enable interrupts to get RF toggle notifications. */
6388		IWN_WRITE(sc, IWN_INT, 0xffffffff);
6389		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6390		return;
6391	}
6392
6393	/* Read firmware images from the filesystem. */
6394	error = iwn_read_firmware(sc);
6395	if (error != 0) {
6396		device_printf(sc->sc_dev,
6397		    "%s: could not read firmware, error %d\n",
6398		    __func__, error);
6399		goto fail;
6400	}
6401
6402	/* Initialize hardware and upload firmware. */
6403	error = iwn_hw_init(sc);
6404	firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6405	sc->fw_fp = NULL;
6406	if (error != 0) {
6407		device_printf(sc->sc_dev,
6408		    "%s: could not initialize hardware, error %d\n",
6409		    __func__, error);
6410		goto fail;
6411	}
6412
6413	/* Configure adapter now that it is ready. */
6414	error = iwn_config(sc);
6415	if (error != 0) {
6416		device_printf(sc->sc_dev,
6417		    "%s: could not configure device, error %d\n",
6418		    __func__, error);
6419		goto fail;
6420	}
6421
6422	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6423	ifp->if_drv_flags |= IFF_DRV_RUNNING;
6424
6425	return;
6426
6427fail:
6428	iwn_stop_locked(sc);
6429}
6430
6431static void
6432iwn_init(void *arg)
6433{
6434	struct iwn_softc *sc = arg;
6435	struct ifnet *ifp = sc->sc_ifp;
6436	struct ieee80211com *ic = ifp->if_l2com;
6437
6438	IWN_LOCK(sc);
6439	iwn_init_locked(sc);
6440	IWN_UNLOCK(sc);
6441
6442	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6443		ieee80211_start_all(ic);
6444}
6445
6446static void
6447iwn_stop_locked(struct iwn_softc *sc)
6448{
6449	struct ifnet *ifp = sc->sc_ifp;
6450
6451	IWN_LOCK_ASSERT(sc);
6452
6453	sc->sc_tx_timer = 0;
6454	callout_stop(&sc->sc_timer_to);
6455	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
6456
6457	/* Power OFF hardware. */
6458	iwn_hw_stop(sc);
6459}
6460
6461static void
6462iwn_stop(struct iwn_softc *sc)
6463{
6464	IWN_LOCK(sc);
6465	iwn_stop_locked(sc);
6466	IWN_UNLOCK(sc);
6467}
6468
6469/*
6470 * Callback from net80211 to start a scan.
6471 */
6472static void
6473iwn_scan_start(struct ieee80211com *ic)
6474{
6475	struct ifnet *ifp = ic->ic_ifp;
6476	struct iwn_softc *sc = ifp->if_softc;
6477
6478	IWN_LOCK(sc);
6479	/* make the link LED blink while we're scanning */
6480	iwn_set_led(sc, IWN_LED_LINK, 20, 2);
6481	IWN_UNLOCK(sc);
6482}
6483
6484/*
6485 * Callback from net80211 to terminate a scan.
6486 */
6487static void
6488iwn_scan_end(struct ieee80211com *ic)
6489{
6490	struct ifnet *ifp = ic->ic_ifp;
6491	struct iwn_softc *sc = ifp->if_softc;
6492	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6493
6494	IWN_LOCK(sc);
6495	if (vap->iv_state == IEEE80211_S_RUN) {
6496		/* Set link LED to ON status if we are associated */
6497		iwn_set_led(sc, IWN_LED_LINK, 0, 1);
6498	}
6499	IWN_UNLOCK(sc);
6500}
6501
6502/*
6503 * Callback from net80211 to force a channel change.
6504 */
6505static void
6506iwn_set_channel(struct ieee80211com *ic)
6507{
6508	const struct ieee80211_channel *c = ic->ic_curchan;
6509	struct ifnet *ifp = ic->ic_ifp;
6510	struct iwn_softc *sc = ifp->if_softc;
6511
6512	IWN_LOCK(sc);
6513	sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
6514	sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
6515	sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
6516	sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
6517	IWN_UNLOCK(sc);
6518}
6519
6520/*
6521 * Callback from net80211 to start scanning of the current channel.
6522 */
6523static void
6524iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6525{
6526	struct ieee80211vap *vap = ss->ss_vap;
6527	struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6528	int error;
6529
6530	IWN_LOCK(sc);
6531	error = iwn_scan(sc);
6532	IWN_UNLOCK(sc);
6533	if (error != 0)
6534		ieee80211_cancel_scan(vap);
6535}
6536
6537/*
6538 * Callback from net80211 to handle the minimum dwell time being met.
6539 * The intent is to terminate the scan but we just let the firmware
6540 * notify us when it's finished as we have no safe way to abort it.
6541 */
6542static void
6543iwn_scan_mindwell(struct ieee80211_scan_state *ss)
6544{
6545	/* NB: don't try to abort scan; wait for firmware to finish */
6546}
6547
6548static struct iwn_eeprom_chan *
6549iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
6550{
6551	int i, j;
6552
6553	for (j = 0; j < 7; j++) {
6554		for (i = 0; i < iwn_bands[j].nchan; i++) {
6555			if (iwn_bands[j].chan[i] == c->ic_ieee)
6556				return &sc->eeprom_channels[j][i];
6557		}
6558	}
6559
6560	return NULL;
6561}
6562
6563/*
6564 * Enforce flags read from EEPROM.
6565 */
6566static int
6567iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
6568    int nchan, struct ieee80211_channel chans[])
6569{
6570	struct iwn_softc *sc = ic->ic_ifp->if_softc;
6571	int i;
6572
6573	for (i = 0; i < nchan; i++) {
6574		struct ieee80211_channel *c = &chans[i];
6575		struct iwn_eeprom_chan *channel;
6576
6577		channel = iwn_find_eeprom_channel(sc, c);
6578		if (channel == NULL) {
6579			if_printf(ic->ic_ifp,
6580			    "%s: invalid channel %u freq %u/0x%x\n",
6581			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
6582			return EINVAL;
6583		}
6584		c->ic_flags |= iwn_eeprom_channel_flags(channel);
6585	}
6586
6587	return 0;
6588}
6589
6590static void
6591iwn_hw_reset(void *arg0, int pending)
6592{
6593	struct iwn_softc *sc = arg0;
6594	struct ifnet *ifp = sc->sc_ifp;
6595	struct ieee80211com *ic = ifp->if_l2com;
6596
6597	iwn_stop(sc);
6598	iwn_init(sc);
6599	ieee80211_notify_radio(ic, 1);
6600}
6601
6602static void
6603iwn_radio_on(void *arg0, int pending)
6604{
6605	struct iwn_softc *sc = arg0;
6606	struct ifnet *ifp = sc->sc_ifp;
6607	struct ieee80211com *ic = ifp->if_l2com;
6608	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6609
6610	if (vap != NULL) {
6611		iwn_init(sc);
6612		ieee80211_init(vap);
6613	}
6614}
6615
6616static void
6617iwn_radio_off(void *arg0, int pending)
6618{
6619	struct iwn_softc *sc = arg0;
6620	struct ifnet *ifp = sc->sc_ifp;
6621	struct ieee80211com *ic = ifp->if_l2com;
6622	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6623
6624	iwn_stop(sc);
6625	if (vap != NULL)
6626		ieee80211_stop(vap);
6627
6628	/* Enable interrupts to get RF toggle notification. */
6629	IWN_LOCK(sc);
6630	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6631	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6632	IWN_UNLOCK(sc);
6633}
6634
6635static void
6636iwn_sysctlattach(struct iwn_softc *sc)
6637{
6638	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6639	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6640
6641#ifdef IWN_DEBUG
6642	sc->sc_debug = 0;
6643	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6644	    "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
6645#endif
6646}
6647
6648static int
6649iwn_shutdown(device_t dev)
6650{
6651	struct iwn_softc *sc = device_get_softc(dev);
6652
6653	iwn_stop(sc);
6654	return 0;
6655}
6656
6657static int
6658iwn_suspend(device_t dev)
6659{
6660	struct iwn_softc *sc = device_get_softc(dev);
6661	struct ifnet *ifp = sc->sc_ifp;
6662	struct ieee80211com *ic = ifp->if_l2com;
6663	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6664
6665	iwn_stop(sc);
6666	if (vap != NULL)
6667		ieee80211_stop(vap);
6668	return 0;
6669}
6670
6671static int
6672iwn_resume(device_t dev)
6673{
6674	struct iwn_softc *sc = device_get_softc(dev);
6675	struct ifnet *ifp = sc->sc_ifp;
6676	struct ieee80211com *ic = ifp->if_l2com;
6677	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6678
6679	/* Clear device-specific "PCI retry timeout" register (41h). */
6680	pci_write_config(dev, 0x41, 0, 1);
6681
6682	if (ifp->if_flags & IFF_UP) {
6683		iwn_init(sc);
6684		if (vap != NULL)
6685			ieee80211_init(vap);
6686		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6687			iwn_start(ifp);
6688	}
6689	return 0;
6690}
6691
6692#ifdef IWN_DEBUG
6693static const char *
6694iwn_intr_str(uint8_t cmd)
6695{
6696	switch (cmd) {
6697	/* Notifications */
6698	case IWN_UC_READY:		return "UC_READY";
6699	case IWN_ADD_NODE_DONE:		return "ADD_NODE_DONE";
6700	case IWN_TX_DONE:		return "TX_DONE";
6701	case IWN_START_SCAN:		return "START_SCAN";
6702	case IWN_STOP_SCAN:		return "STOP_SCAN";
6703	case IWN_RX_STATISTICS:		return "RX_STATS";
6704	case IWN_BEACON_STATISTICS:	return "BEACON_STATS";
6705	case IWN_STATE_CHANGED:		return "STATE_CHANGED";
6706	case IWN_BEACON_MISSED:		return "BEACON_MISSED";
6707	case IWN_RX_PHY:		return "RX_PHY";
6708	case IWN_MPDU_RX_DONE:		return "MPDU_RX_DONE";
6709	case IWN_RX_DONE:		return "RX_DONE";
6710
6711	/* Command Notifications */
6712	case IWN_CMD_RXON:		return "IWN_CMD_RXON";
6713	case IWN_CMD_RXON_ASSOC:	return "IWN_CMD_RXON_ASSOC";
6714	case IWN_CMD_EDCA_PARAMS:	return "IWN_CMD_EDCA_PARAMS";
6715	case IWN_CMD_TIMING:		return "IWN_CMD_TIMING";
6716	case IWN_CMD_LINK_QUALITY:	return "IWN_CMD_LINK_QUALITY";
6717	case IWN_CMD_SET_LED:		return "IWN_CMD_SET_LED";
6718	case IWN5000_CMD_WIMAX_COEX:	return "IWN5000_CMD_WIMAX_COEX";
6719	case IWN5000_CMD_CALIB_CONFIG:	return "IWN5000_CMD_CALIB_CONFIG";
6720	case IWN5000_CMD_CALIB_RESULT:	return "IWN5000_CMD_CALIB_RESULT";
6721	case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
6722	case IWN_CMD_SET_POWER_MODE:	return "IWN_CMD_SET_POWER_MODE";
6723	case IWN_CMD_SCAN:		return "IWN_CMD_SCAN";
6724	case IWN_CMD_SCAN_RESULTS:	return "IWN_CMD_SCAN_RESULTS";
6725	case IWN_CMD_TXPOWER:		return "IWN_CMD_TXPOWER";
6726	case IWN_CMD_TXPOWER_DBM:	return "IWN_CMD_TXPOWER_DBM";
6727	case IWN5000_CMD_TX_ANT_CONFIG:	return "IWN5000_CMD_TX_ANT_CONFIG";
6728	case IWN_CMD_BT_COEX:		return "IWN_CMD_BT_COEX";
6729	case IWN_CMD_SET_CRITICAL_TEMP:	return "IWN_CMD_SET_CRITICAL_TEMP";
6730	case IWN_CMD_SET_SENSITIVITY:	return "IWN_CMD_SET_SENSITIVITY";
6731	case IWN_CMD_PHY_CALIB:		return "IWN_CMD_PHY_CALIB";
6732	}
6733	return "UNKNOWN INTR NOTIF/CMD";
6734}
6735#endif /* IWN_DEBUG */
6736
6737static device_method_t iwn_methods[] = {
6738	/* Device interface */
6739	DEVMETHOD(device_probe,		iwn_probe),
6740	DEVMETHOD(device_attach,	iwn_attach),
6741	DEVMETHOD(device_detach,	iwn_detach),
6742	DEVMETHOD(device_shutdown,	iwn_shutdown),
6743	DEVMETHOD(device_suspend,	iwn_suspend),
6744	DEVMETHOD(device_resume,	iwn_resume),
6745	{ 0, 0 }
6746};
6747
6748static driver_t iwn_driver = {
6749	"iwn",
6750	iwn_methods,
6751	sizeof (struct iwn_softc)
6752};
6753static devclass_t iwn_devclass;
6754
6755DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0);
6756MODULE_DEPEND(iwn, pci, 1, 1, 1);
6757MODULE_DEPEND(iwn, firmware, 1, 1, 1);
6758MODULE_DEPEND(iwn, wlan, 1, 1, 1);
6759